prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
"""
Implementation of ODE Risk minimization
<NAME>, ETH Zurich
based on code from
<NAME>, Machine Learning Research Group, University of Oxford
February 2019
"""
# Libraries
from odin.utils.trainable_models import TrainableModel
from odin.utils.gaussian_processes import GaussianProcess
from odin.utils.tensorflow_optimizer import ExtendedScipyOptimizerInterface
import numpy as np
import tensorflow as tf
from typing import Union, Tuple
import time
class ODERiskMinimization(object):
"""
Class that implements ODIN risk minimization
"""
def __init__(self, trainable: TrainableModel,
system_data: np.array, t_data: np.array,
gp_kernel: str = 'RBF',
optimizer: str = 'L-BFGS-B',
initial_gamma: float = 1e-6,
train_gamma: bool = True,
gamma_bounds: Union[np.array, list, Tuple] = (1e-6, 10.0),
state_bounds: np.array = None,
basinhopping: bool = True,
basinhopping_options: dict = None,
single_gp: bool = False,
state_normalization: bool = True,
time_normalization: bool = False,
tensorboard_summary_dir: str = None,
runtime_prof_dir: str = None):
"""
Constructor.
:param trainable: Trainable model class, as explained and implemented in
utils.trainable_models;
:param system_data: numpy array containing the noisy observations of
the state values of the system, size is [n_states, n_points];
:param t_data: numpy array containing the time stamps corresponding to
the observations passed as system_data;
:param gp_kernel: string indicating which kernel to use in the GP.
Valid options are 'RBF', 'Matern52', 'Matern32', 'RationalQuadratic',
'Sigmoid';
:param optimizer: string indicating which scipy optimizer to use. The
valid ones are the same that can be passed to scipy.optimize.minimize.
Notice that some of them will ignore bounds;
:param initial_gamma: initial value for the gamma parameter.
:param train_gamma: boolean, indicates whether to train of not the
variable gamma;
:param gamma_bounds: bounds for gamma (a lower bound of at least 1e-6
is always applied to overcome numerical instabilities);
:param state_bounds: bounds for the state optimization;
:param basinhopping: boolean, indicates whether to turn on the scipy
basinhopping;
:param basinhopping_options: dictionary containing options for the
basinhooping algorithm (syntax is the same as scipy's one);
:param single_gp: boolean, indicates whether to use a single set of GP
hyperparameters for each state;
:param state_normalization: boolean, indicates whether to normalize the
states values before the optimization (notice the parameter values
theta won't change);
:param time_normalization: boolean, indicates whether to normalize the
time stamps before the optimization (notice the parameter values
theta won't change);
:param QFF_features: int, the order of the quadrature scheme
:param tensorboard_summary_dir, runtime_prof_dir: str, logging directories
"""
# Save arguments
self.trainable = trainable
self.system_data = np.copy(system_data)
self.t_data = np.copy(t_data).reshape(-1, 1)
self.dim, self.n_p = system_data.shape
self.gp_kernel = gp_kernel
self.optimizer = optimizer
self.initial_gamma = initial_gamma
self.train_gamma = train_gamma
self.gamma_bounds = np.log(np.array(gamma_bounds))
self.basinhopping = basinhopping
self.basinhopping_options = {'n_iter': 10,
'temperature': 1.0,
'stepsize': 0.05}
self.state_normalization = state_normalization
if basinhopping_options:
self.basinhopping_options.update(basinhopping_options)
self.single_gp = single_gp
# Build bounds for the states and gamma
self._compute_state_bounds(state_bounds)
self._compute_gamma_bounds(gamma_bounds)
# Initialize utils
self._compute_standardization_data(state_normalization,
time_normalization)
# Build the necessary TensorFlow tensors
self._build_tf_data()
# Initialize the Gaussian Process for the derivative model
self.gaussian_process = GaussianProcess(self.dim, self.n_p,
self.gp_kernel, self.single_gp)
#initialize logging variables
if tensorboard_summary_dir:
self.writer = tf.summary.FileWriter(tensorboard_summary_dir)
theta_sum=tf.summary.histogram('Theta_summary',self.trainable.theta)
else:
self.writer = None
self.runtime_prof_dir= runtime_prof_dir
# Initialization of TF operations
self.init = None
return
def _compute_gamma_bounds(self, bounds: Union[np.array, list, Tuple])\
-> None:
"""
Builds the numpy array that defines the bounds for gamma.
:param bounds: of the form (lower_bound, upper_bound).
"""
self.gamma_bounds = np.array([1.0, 1.0])
if bounds is None:
self.gamma_bounds[0] = np.log(1e-6)
self.gamma_bounds[1] = np.inf
else:
self.gamma_bounds[0] = np.log(np.array(bounds[0]))
self.gamma_bounds[1] = np.log(np.array(bounds[1]))
return
def _compute_state_bounds(self, bounds: np.array) -> None:
"""
Builds the numpy array that defines the bounds for the states.
:param bounds: numpy array, sized [n_dim, 2], in which for each
dimensions we can find respectively lower and upper bounds.
"""
if bounds is None:
self.state_bounds = np.inf * np.ones([self.dim, 2])
self.state_bounds[:, 0] = - self.state_bounds[:, 0]
else:
self.state_bounds = np.array(bounds)
return
def _compute_standardization_data(self, state_normalization: bool,
time_normalization: bool) -> None:
"""
Compute the means and the standard deviations for data standardization,
used in the GP regression.
"""
# Compute mean and std dev of the state and time values
if state_normalization:
self.system_data_means = np.mean(self.system_data,
axis=1).reshape(self.dim, 1)
self.system_data_std_dev = np.std(self.system_data,
axis=1).reshape(self.dim, 1)
else:
self.system_data_means = np.zeros([self.dim, 1])
self.system_data_std_dev = np.ones([self.dim, 1])
if time_normalization:
self.t_data_mean = np.mean(self.t_data)
self.t_data_std_dev = np.std(self.t_data)
else:
self.t_data_mean = 0.0
self.t_data_std_dev = 1.0
if self.gp_kernel == 'Sigmoid':
self.t_data_mean = 0.0
# Normalize states and time
self.normalized_states = (self.system_data - self.system_data_means) / \
self.system_data_std_dev
self.normalized_t_data = (self.t_data - self.t_data_mean) / \
self.t_data_std_dev
return
def _build_tf_data(self) -> None:
"""
Initialize all the TensorFlow constants needed by the pipeline.
"""
self.system = tf.constant(self.normalized_states, dtype=tf.float64)
self.t = tf.constant(self.normalized_t_data, dtype=tf.float64)
self.system_means = tf.constant(self.system_data_means,
dtype=tf.float64,
shape=[self.dim, 1])
self.system_std_dev = tf.constant(self.system_data_std_dev,
dtype=tf.float64,
shape=[self.dim, 1])
self.t_mean = tf.constant(self.t_data_mean, dtype=tf.float64)
self.t_std_dev = tf.constant(self.t_data_std_dev, dtype=tf.float64)
self.n_points = tf.constant(self.n_p, dtype=tf.int32)
self.dimensionality = tf.constant(self.dim, dtype=tf.int32)
return
def _build_states_bounds(self) -> None:
"""
Builds the tensors for the normalized states that will containing the
bounds for the constrained optimization.
"""
# Tile the bounds to get the right dimensions
state_lower_bounds = self.state_bounds[:, 0].reshape(self.dim, 1)
state_lower_bounds = | np.tile(state_lower_bounds, [1, self.n_p]) | numpy.tile |
import datetime as dt
from unittest import SkipTest
import numpy as np
from holoviews import Dimension, Image, Curve, RGB, HSV, Dataset, Table
from holoviews.core.util import date_range
from holoviews.core.data.interface import DataError
from .base import DatatypeContext, GriddedInterfaceTests, InterfaceTests
class ImageInterfaceTests(GriddedInterfaceTests, InterfaceTests):
"""
Tests for ImageInterface
"""
datatype = 'image'
data_type = np.ndarray
element = Image
__test__ = True
def test_canonical_vdim(self):
x = np.array([ 0. , 0.75, 1.5 ])
y = np.array([ 1.5 , 0.75, 0. ])
z = np.array([[ 0.06925999, 0.05800389, 0.05620127],
[ 0.06240918, 0.05800931, 0.04969735],
[ 0.05376789, 0.04669417, 0.03880118]])
dataset = Image((x, y, z), kdims=['x', 'y'], vdims=['z'])
canonical = np.array([[ 0.05376789, 0.04669417, 0.03880118],
[ 0.06240918, 0.05800931, 0.04969735],
[ 0.06925999, 0.05800389, 0.05620127]])
self.assertEqual(dataset.dimension_values('z', flat=False),
canonical)
def test_gridded_dtypes(self):
ds = self.dataset_grid
self.assertEqual(ds.interface.dtype(ds, 'x'), np.float64)
self.assertEqual(ds.interface.dtype(ds, 'y'), np.float64)
self.assertEqual(ds.interface.dtype(ds, 'z'), np.dtype(int))
def test_dataset_groupby_with_transposed_dimensions(self):
raise SkipTest('Image interface does not support multi-dimensional data.')
def test_dataset_dynamic_groupby_with_transposed_dimensions(self):
raise SkipTest('Image interface does not support multi-dimensional data.')
def test_dataset_slice_inverted_dimension(self):
raise SkipTest('Image interface does not support 1D data')
def test_sample_2d(self):
raise SkipTest('Image interface only supports Image type')
class BaseImageElementInterfaceTests(InterfaceTests):
"""
Tests for ImageInterface
"""
element = Image
__test__ = False
def init_grid_data(self):
self.xs = np.linspace(-9, 9, 10)
self.ys = np.linspace(0.5, 9.5, 10)
self.array = np.arange(10) * np.arange(10)[:, np.newaxis]
def init_data(self):
self.image = Image(np.flipud(self.array), bounds=(-10, 0, 10, 10))
def test_init_data_tuple(self):
xs = | np.arange(5) | numpy.arange |
"""TNQMetro: Tensor-network based package for efficient quantum metrology computations."""
# Table of Contents
#
# 1 Functions for finite size systems......................................29
# 1.1 High level functions...............................................37
# 1.2 Low level functions...............................................257
# 1.2.1 Problems with exact derivative.............................1207
# 1.2.2 Problems with discrete approximation of the derivative.....2411
# 2 Functions for infinite size systems..................................3808
# 2.1 High level functions.............................................3816
# 2.2 Low level functions..............................................4075
# 3 Auxiliary functions..................................................5048
import itertools
import math
import warnings
import numpy as np
from ncon import ncon
########################################
# #
# #
# 1 Functions for finite size systems. #
# #
# #
########################################
#############################
# #
# 1.1 High level functions. #
# #
#############################
def fin(N, so_before_list, h, so_after_list, BC='O', L_ini=None, psi0_ini=None, imprecision=10**-2, D_L_max=100, D_L_max_forced=False, L_herm=True, D_psi0_max=100, D_psi0_max_forced=False):
"""
Optimization of the QFI over operator L (in MPO representation) and wave function psi0 (in MPS representation) and check of convergence in their bond dimensions. Function for finite size systems.
User has to provide information about the dynamics by specifying the quantum channel. It is assumed that the quantum channel is translationally invariant and is built from layers of quantum operations.
User has to provide one defining operation for each layer as a local superoperator. These local superoperators have to be input in order of their action on the system.
Parameter encoding is a stand out quantum operation. It is assumed that the parameter encoding acts only once and is unitary so the user has to provide only its generator h.
Generator h has to be diagonal in computational basis, or in other words, it is assumed that local superoperators are expressed in the eigenbasis of h.
Parameters:
N: integer
Number of sites in the chain of tensors (usually number of particles).
so_before_list: list of ndarrays of a shape (d**(2*k),d**(2*k)) where k describes on how many sites a particular local superoperator acts
List of local superoperators (in order) which act before unitary parameter encoding.
h: ndarray of a shape (d,d)
Generator of unitary parameter encoding. Dimension d is the dimension of local Hilbert space (dimension of physical index).
Generator h has to be diagonal in the computational basis, or in other words, it is assumed that local superoperators are expressed in the eigenbasis of h.
so_after_list: list of ndarrays of a shape (d**(2*k),d**(2*k)) where k describes on how many sites particular local superoperator acts
List of local superoperators (in order) which act after unitary parameter encoding.
BC: 'O' or 'P', optional
Boundary conditions, 'O' for OBC, 'P' for PBC.
L_ini: list of length N of ndarrays of a shape (Dl_L,Dr_L,d,d) for OBC (Dl_L, Dr_L can vary between sites) or ndarray of a shape (D_L,D_L,d,d,N) for PBC, optional
Initial MPO for L.
psi0_ini: list of length N of ndarrays of a shape (Dl_psi0,Dr_psi0,d) for OBC (Dl_psi0, Dr_psi0 can vary between sites) or ndarray of a shape (D_psi0,D_psi0,d,N) for PBC, optional
Initial MPS for psi0.
imprecision: float, optional
Expected relative imprecision of the end results.
D_L_max: integer, optional
Maximal value of D_L (D_L is bond dimension for MPO representing L).
D_L_max_forced: bool, optional
True if D_L_max has to be reached, otherwise False.
L_herm: bool, optional
True if Hermitian gauge has to be imposed on MPO representing L, otherwise False.
D_psi0_max: integer, optional
Maximal value of D_psi0 (D_psi0 is bond dimension for MPS representing psi0).
D_psi0_max_forced: bool, optional
True if D_psi0_max has to be reached, otherwise False.
Returns:
result: float
Optimal value of figure of merit.
result_m: ndarray
Matrix describing the figure of merit as a function of bond dimensions of respectively L [rows] and psi0 [columns].
L: list of length N of ndarrays of a shape (Dl_L,Dr_L,d,d) for OBC (Dl_L, Dr_L can vary between sites) or ndarray of a shape (D_L,D_L,d,d,N) for PBC
Optimal L in MPO representation.
psi0: list of length N of ndarrays of a shape (Dl_psi0,Dr_psi0,d) for OBC (Dl_psi0, Dr_psi0 can vary between sites) or ndarray of a shape (D_psi0,D_psi0,d,N) for PBC
Optimal psi0 in MPS representation.
"""
if np.linalg.norm(h - np.diag(np.diag(h))) > 10**-10:
warnings.warn('Generator h have to be diagonal in computational basis, or in other words it is assumed that local superoperators are expressed in the eigenbasis of h.')
d = np.shape(h)[0]
ch = fin_create_channel(N, d, BC, so_before_list + so_after_list)
ch2 = fin_create_channel_derivative(N, d, BC, so_before_list, h, so_after_list)
result, result_m, L, psi0 = fin_gen(N, d, BC, ch, ch2, None, L_ini, psi0_ini, imprecision, D_L_max, D_L_max_forced, L_herm, D_psi0_max, D_psi0_max_forced)
return result, result_m, L, psi0
def fin_gen(N, d, BC, ch, ch2, epsilon=None, L_ini=None, psi0_ini=None, imprecision=10**-2, D_L_max=100, D_L_max_forced=False, L_herm=True, D_psi0_max=100, D_psi0_max_forced=False):
"""
Optimization of the figure of merit (usually interpreted as the QFI) over operator L (in MPO representation) and wave function psi0 (in MPS representation) and check of convergence when increasing their bond dimensions. Function for finite size systems.
User has to provide information about the dynamics by specifying a quantum channel ch and its derivative ch2 (or two channels separated by small parameter epsilon) as superoperators in MPO representation.
There are no constraints on the structure of the channel but the complexity of calculations highly depends on the channel's bond dimension.
Parameters:
N: integer
Number of sites in the chain of tensors (usually number of particles).
d: integer
Dimension of local Hilbert space (dimension of physical index).
BC: 'O' or 'P'
Boundary conditions, 'O' for OBC, 'P' for PBC.
ch: list of length N of ndarrays of a shape (Dl_ch,Dr_ch,d**2,d**2) for OBC (Dl_ch, Dr_ch can vary between sites) or ndarray of a shape (D_ch,D_ch,d**2,d**2,N) for PBC
Quantum channel as a superoperator in MPO representation.
ch2: list of length N of ndarrays of a shape (Dl_ch2,Dr_ch2,d**2,d**2) for OBC (Dl_ch2, Dr_ch2 can vary between sites) or ndarray of a shape (D_ch2,D_ch2,d**2,d**2,N) for PBC
Interpretiaon depends on whether epsilon is specifed (2) or not (1, default approach):
1) derivative of the quantum channel as a superoperator in the MPO representation,
2) the quantum channel as superoperator in the MPO representation for the value of estimated parameter shifted by epsilon in relation to ch.
epsilon: float, optional
If specified then interpeted as value of a separation between estimated parameters encoded in ch and ch2.
L_ini: list of length N of ndarrays of a shape (Dl_L,Dr_L,d,d) for OBC (Dl_L, Dr_L can vary between sites) or ndarray of a shape (D_L,D_L,d,d,N) for PBC, optional
Initial MPO for L.
psi0_ini: list of length N of ndarrays of a shape (Dl_psi0,Dr_psi0,d) for OBC (Dl_psi0, Dr_psi0 can vary between sites) or ndarray of a shape (D_psi0,D_psi0,d,N) for PBC, optional
Initial MPS for psi0.
imprecision: float, optional
Expected relative imprecision of the end results.
D_L_max: integer, optional
Maximal value of D_L (D_L is bond dimension for MPO representing L).
D_L_max_forced: bool, optional
True if D_L_max has to be reached, otherwise False.
L_herm: bool, optional
True if the Hermitian gauge has to be imposed on MPO representing L, otherwise False.
D_psi0_max: integer, optional
Maximal value of D_psi0 (D_psi0 is bond dimension for MPS representing psi0).
D_psi0_max_forced: bool, optional
True if D_psi0_max has to be reached, otherwise False.
Returns:
result: float
Optimal value of the figure of merit.
result_m: ndarray
Matrix describing the figure of merit as a function of bond dimensions of respectively L [rows] and psi0 [columns].
L: list of length N of ndarrays of a shape (Dl_L,Dr_L,d,d) for OBC (Dl_L, Dr_L can vary between sites) or ndarray of a shape (D_L,D_L,d,d,N) for PBC
Optimal L in MPO representation.
psi0: list of length N of ndarrays of a shape (Dl_psi0,Dr_psi0,d) for OBC (Dl_psi0, Dr_psi0 can vary between sites) or ndarray of a shape (D_psi0,D_psi0,d,N) for PBC
Optimal psi0 in MPS representation.
"""
if epsilon is None:
result, result_m, L, psi0 = fin_FoM_FoMD_optbd(N, d, BC, ch, ch2, L_ini, psi0_ini, imprecision, D_L_max, D_L_max_forced, L_herm, D_psi0_max, D_psi0_max_forced)
else:
result, result_m, L, psi0 = fin2_FoM_FoMD_optbd(N, d, BC, ch, ch2, epsilon, L_ini, psi0_ini, imprecision, D_L_max, D_L_max_forced, L_herm, D_psi0_max, D_psi0_max_forced)
return result, result_m, L, psi0
def fin_state(N, so_before_list, h, so_after_list, rho0, BC='O', L_ini=None, imprecision=10**-2, D_L_max=100, D_L_max_forced=False, L_herm=True):
"""
Optimization of the QFI over operator L (in MPO representation) and check of convergence when increasing its bond dimension. Function for finite size systems and fixed state of the system.
User has to provide information about the dynamics by specifying a quantum channel. It is assumed that the quantum channel is translationally invariant and is built from layers of quantum operations.
User has to provide one defining operation for each layer as a local superoperator. Those local superoperator have to be input in order of their action on the system.
Parameter encoding is a stand out quantum operation. It is assumed that parameter encoding acts only once and is unitary so the user has to provide only its generator h.
Generator h has to be diagonal in the computational basis, or in other words it is assumed that local superoperators are expressed in the eigenbasis of h.
Parameters:
N: integer
Number of sites in the chain of tensors (usually number of particles).
so_before_list: list of ndarrays of a shape (d**(2*k),d**(2*k)) where k describes on how many sites particular local superoperator acts
List of local superoperators (in order) which act before unitary parameter encoding.
h: ndarray of a shape (d,d)
Generator of unitary parameter encoding. Dimension d is the dimension of local Hilbert space (dimension of physical index).
Generator h have to be diagonal in computational basis, or in other words it is assumed that local superoperators are expressed in the eigenbasis of h.
so_after_list: list of ndarrays of a shape (d**(2*k),d**(2*k)) where k describes on how many sites particular local superoperator acts
List of local superoperators (in order) which act after unitary parameter encoding.
rho0: list of length N of ndarrays of a shape (Dl_rho0,Dr_rho0,d,d) for OBC (Dl_rho0, Dr_rho0 can vary between sites) or ndarray of a shape (D_rho0,D_rho0,d,d,N) for PBC
Density matrix describing initial state of the system in MPO representation.
BC: 'O' or 'P', optional
Boundary conditions, 'O' for OBC, 'P' for PBC.
L_ini: list of length N of ndarrays of shape (Dl_L,Dr_L,d,d) for OBC, (Dl_L, Dr_L can vary between sites) or ndarray of shape (D_L,D_L,d,d,N) for PBC, optional
Initial MPO for L.
imprecision: float, optional
Expected relative imprecision of the end results.
D_L_max: integer, optional
Maximal value of D_L (D_L is bond dimension for MPO representing L).
D_L_max_forced: bool, optional
True if D_L_max has to be reached, otherwise False.
L_herm: bool, optional
True if Hermitian gauge has to be imposed on MPO representing L, otherwise False.
Returns:
result: float
Optimal value of figure of merit.
result_v: ndarray
Vector describing figure of merit in function of bond dimensions of L.
L: list of length N of ndarrays of a shape (Dl_L,Dr_L,d,d) for OBC (Dl_L, Dr_L can vary between sites) or ndarray of a shape (D_L,D_L,d,d,N) for PBC
Optimal L in the MPO representation.
"""
if np.linalg.norm(h - np.diag(np.diag(h))) > 10**-10:
warnings.warn('Generator h have to be diagonal in computational basis, or in other words it is assumed that local superoperators are expressed in the eigenbasis of h.')
d = np.shape(h)[0]
ch = fin_create_channel(N, d, BC, so_before_list + so_after_list)
ch2 = fin_create_channel_derivative(N, d, BC, so_before_list, h, so_after_list)
rho = channel_acting_on_operator(ch, rho0)
rho2 = channel_acting_on_operator(ch2, rho0)
result, result_v, L = fin_state_gen(N, d, BC, rho, rho2, None, L_ini, imprecision, D_L_max, D_L_max_forced, L_herm)
return result, result_v, L
def fin_state_gen(N, d, BC, rho, rho2, epsilon=None, L_ini=None, imprecision=10**-2, D_L_max=100, D_L_max_forced=False, L_herm=True):
"""
Optimization of the the figure of merit (usually interpreted as the QFI) over operator L (in MPO representation) and check of convergence when increasing its bond dimension. Function for finite size systems and fixed state of the system.
User has to provide information about the dynamics by specifying a quantum channel ch and its derivative ch2 (or two channels separated by small parameter epsilon) as superoperators in the MPO representation.
There are no constraints on the structure of the channel but the complexity of calculations highly depends on channel's bond dimension.
Parameters:
N: integer
Number of sites in the chain of tensors (usually number of particles).
d: integer
Dimension of local Hilbert space (dimension of physical index).
BC: 'O' or 'P'
Boundary conditions, 'O' for OBC, 'P' for PBC.
rho: list of length N of ndarrays of a shape (Dl_rho,Dr_rho,d,d) for OBC (Dl_rho, Dr_rho can vary between sites) or ndarray of a shape (D_rho,D_rho,d,d,N) for PBC
Density matrix at the output of the quantum channel in the MPO representation.
rho2: list of length N of ndarrays of a shape (Dl_rho2,Dr_rho2,d,d) for OBC (Dl_rho2, Dr_rho2 can vary between sites) or ndarray of a shape (D_rho2,D_rho2,d,d,N) for PBC
Interpretaion depends on whether epsilon is specifed (2) or not (1, default approach):
1) derivative of density matrix at the output of quantum channel in MPO representation,
2) density matrix at the output of quantum channel in MPO representation for the value of estimated parameter shifted by epsilon in relation to rho.
epsilon: float, optional
If specified then it is interpeted as the value of separation between estimated parameters encoded in rho and rho2.
L_ini: list of length N of ndarrays of a shape (Dl_L,Dr_L,d,d) for OBC (Dl_L, Dr_L can vary between sites) or ndarray of a shape (D_L,D_L,d,d,N) for PBC, optional
Initial MPO for L.
imprecision: float, optional
Expected relative imprecision of the end results.
D_L_max: integer, optional
Maximal value of D_L (D_L is bond dimension for MPO representing L).
D_L_max_forced: bool, optional
True if D_L_max has to be reached, otherwise False.
L_herm: bool, optional
True if Hermitian gauge has to be imposed on MPO representing L, otherwise False.
Returns:
result: float
Optimal value of figure of merit.
result_v: ndarray
Vector describing figure of merit as a function of bond dimensions of L.
L: list of length N of ndarrays of a shape (Dl_L,Dr_L,d,d) for OBC (Dl_L, Dr_L can vary between sites) or ndarray of a shape (D_L,D_L,d,d,N) for PBC
Optimal L in MPO representation.
"""
if epsilon is None:
result, result_v, L = fin_FoM_optbd(N, d, BC, rho, rho2, L_ini, imprecision, D_L_max, D_L_max_forced, L_herm)
else:
result, result_v, L = fin2_FoM_optbd(N, d, BC, rho, rho2, epsilon, L_ini, imprecision, D_L_max, D_L_max_forced, L_herm)
return result, result_v, L
############################
# #
# 1.2 Low level functions. #
# #
############################
def fin_create_channel(N, d, BC, so_list, tol=10**-10):
"""
Creates MPO for a superoperator describing translationally invariant quantum channel from list of local superoperators. Function for finite size systems.
For OBC, tensor-network length N has to be at least 2k-1, where k is the correlation length (number of sites on which acts the biggest local superoperator).
Local superoperators acting on more then 4 neighbouring sites are not currently supported.
Parameters:
N: integer
Number of sites in the chain of tensors (usually number of particles).
For OBC tensor-network length N has to be at least 2k-1 where k is the correlation length (number of sites on which acts the biggest local superoperator).
d: integer
Dimension of local Hilbert space (dimension of physical index).
BC: 'O' or 'P'
Boundary conditions, 'O' for OBC, 'P' for PBC.
so_list: list of ndarrays of a shape (d**(2*k),d**(2*k)) where k describes on how many sites a particular local superoperator acts
List of local superoperators in order of their action on the system.
Local superoperators acting on more then 4 neighbour sites are not currently supported.
tol: float, optional
Factor which after multiplication by the highest singular value gives a cutoff on singular values that are treated as nonzero.
Returns:
ch: list of length N of ndarrays of shape (Dl_ch,Dr_ch,d**2,d**2) for OBC (Dl_ch, Dr_ch can vary between sites) or ndarray of shape (D_ch,D_ch,d**2,d**2,N) for PBC
Quantum channel as a superoperator in the MPO representation.
"""
if so_list == []:
if BC == 'O':
ch = np.eye(d**2,dtype=complex)
ch = ch[np.newaxis,np.newaxis,:,:]
ch = [ch]*N
elif BC == 'P':
ch = np.eye(d**2,dtype=complex)
ch = ch[np.newaxis,np.newaxis,:,:,np.newaxis]
ch = np.tile(ch,(1,1,1,1,N))
return ch
if BC == 'O':
ch = [0]*N
kmax = max([int(math.log(np.shape(so_list[i])[0],d**2)) for i in range(len(so_list))])
if N < 2*kmax-1:
warnings.warn('For OBC tensor-network length N have to be at least 2k-1 where k is correlation length (number of sites on which acts the biggest local superoperator).')
for x in range(N):
if x >= kmax and N-x >= kmax:
ch[x] = ch[x-1]
continue
for i in range(len(so_list)):
so = so_list[i]
k = int(math.log(np.shape(so)[0],d**2))
if np.linalg.norm(so-np.diag(np.diag(so))) < 10**-10:
so = np.diag(so)
if k == 1:
bdchil = 1
bdchir = 1
chi = np.zeros((bdchil,bdchir,d**2,d**2),dtype=complex)
for nx in range(d**2):
chi[:,:,nx,nx] = so[nx]
elif k == 2:
so = np.reshape(so,(d**2,d**2),order='F')
u,s,vh = np.linalg.svd(so)
s = s[s > s[0]*tol]
bdchi = np.shape(s)[0]
u = u[:,:bdchi]
vh = vh[:bdchi,:]
us = u @ np.diag(np.sqrt(s))
sv = np.diag(np.sqrt(s)) @ vh
if x == 0:
bdchil = 1
bdchir = bdchi
chi = np.zeros((bdchil,bdchir,d**2,d**2),dtype=complex)
for nx in range(d**2):
tensors = [us[nx,:]]
legs = [[-1]]
chi[:,:,nx,nx] = np.reshape(ncon(tensors,legs),(bdchil,bdchir),order='F')
elif x > 0 and x < N-1:
bdchil = bdchi
bdchir = bdchi
chi = np.zeros((bdchil,bdchir,d**2,d**2),dtype=complex)
for nx in range(d**2):
tensors = [sv[:,nx],us[nx,:]]
legs = [[-1],[-2]]
chi[:,:,nx,nx] = np.reshape(ncon(tensors,legs),(bdchil,bdchir),order='F')
elif x == N-1:
bdchil = bdchi
bdchir = 1
chi = np.zeros((bdchil,bdchir,d**2,d**2),dtype=complex)
for nx in range(d**2):
tensors = [sv[:,nx]]
legs = [[-1]]
chi[:,:,nx,nx] = np.reshape(ncon(tensors,legs),(bdchil,bdchir),order='F')
elif k == 3:
so = np.reshape(so,(d**2,d**4),order='F')
u1,s1,vh1 = np.linalg.svd(so,full_matrices=False)
s1 = s1[s1 > s1[0]*tol]
bdchi1 = np.shape(s1)[0]
u1 = u1[:,:bdchi1]
vh1 = vh1[:bdchi1,:]
us1 = u1 @ np.diag(np.sqrt(s1))
sv1 = np.diag(np.sqrt(s1)) @ vh1
sv1 = np.reshape(sv1,(bdchi1*d**2,d**2),order='F')
u2,s2,vh2 = np.linalg.svd(sv1,full_matrices=False)
s2 = s2[s2 > s2[0]*tol]
bdchi2 = np.shape(s2)[0]
u2 = u2[:,:bdchi2]
vh2 = vh2[:bdchi2,:]
us2 = u2 @ np.diag(np.sqrt(s2))
us2 = np.reshape(us2,(bdchi1,d**2,bdchi2),order='F')
sv2 = np.diag(np.sqrt(s2)) @ vh2
if x == 0:
bdchil = 1
bdchir = bdchi1
chi = np.zeros((bdchil,bdchir,d**2,d**2),dtype=complex)
for nx in range(d**2):
tensors = [us1[nx,:]]
legs = [[-1]]
chi[:,:,nx,nx] = np.reshape(ncon(tensors,legs),(bdchil,bdchir),order='F')
elif x == 1:
bdchil = bdchi1
bdchir = bdchi2*bdchi1
chi = np.zeros((bdchil,bdchir,d**2,d**2),dtype=complex)
for nx in range(d**2):
tensors = [us2[:,nx,:],us1[nx,:]]
legs = [[-1,-2],[-3]]
chi[:,:,nx,nx] = np.reshape(ncon(tensors,legs),(bdchil,bdchir),order='F')
elif x > 1 and x < N-2:
bdchil = bdchi2*bdchi1
bdchir = bdchi2*bdchi1
chi = np.zeros((bdchil,bdchir,d**2,d**2),dtype=complex)
for nx in range(d**2):
tensors = [sv2[:,nx],us2[:,nx,:],us1[nx,:]]
legs = [[-1],[-2,-3],[-4]]
chi[:,:,nx,nx] = np.reshape(ncon(tensors,legs),(bdchil,bdchir),order='F')
elif x == N-2:
bdchil = bdchi2*bdchi1
bdchir = bdchi2
chi = np.zeros((bdchil,bdchir,d**2,d**2),dtype=complex)
for nx in range(d**2):
tensors = [sv2[:,nx],us2[:,nx,:]]
legs = [[-1],[-2,-3]]
chi[:,:,nx,nx] = np.reshape(ncon(tensors,legs),(bdchil,bdchir),order='F')
elif x == N-1:
bdchil = bdchi2
bdchir = 1
chi = np.zeros((bdchil,bdchir,d**2,d**2),dtype=complex)
for nx in range(d**2):
tensors = [sv2[:,nx]]
legs = [[-1]]
chi[:,:,nx,nx] = np.reshape(ncon(tensors,legs),(bdchil,bdchir),order='F')
elif k == 4:
so = np.reshape(so,(d**2,d**6),order='F')
u1,s1,vh1 = np.linalg.svd(so,full_matrices=False)
s1 = s1[s1 > s1[0]*tol]
bdchi1 = np.shape(s1)[0]
u1 = u1[:,:bdchi1]
vh1 = vh1[:bdchi1,:]
us1 = u1 @ np.diag(np.sqrt(s1))
sv1 = np.diag(np.sqrt(s1)) @ vh1
sv1 = np.reshape(sv1,(bdchi1*d**2,d**4),order='F')
u2,s2,vh2 = np.linalg.svd(sv1,full_matrices=False)
s2 = s2[s2 > s2[0]*tol]
bdchi2 = np.shape(s2)[0]
u2 = u2[:,:bdchi2]
vh2 = vh2[:bdchi2,:]
us2 = u2 @ np.diag(np.sqrt(s2))
us2 = np.reshape(us2,(bdchi1,d**2,bdchi2),order='F')
sv2 = np.diag(np.sqrt(s2)) @ vh2
sv2 = np.reshape(sv2,(bdchi2*d**2,d**2),order='F')
u3,s3,vh3 = np.linalg.svd(sv2,full_matrices=False)
s3 = s3[s3 > s3[0]*tol]
bdchi3 = np.shape(s3)[0]
u3 = u3[:,:bdchi3]
vh3 = vh3[:bdchi3,:]
us3 = u3 @ np.diag(np.sqrt(s3))
us3 = np.reshape(us3,(bdchi2,d**2,bdchi3),order='F')
sv3 = np.diag(np.sqrt(s3)) @ vh3
if x == 0:
bdchil = 1
bdchir = bdchi1
chi = np.zeros((bdchil,bdchir,d**2,d**2),dtype=complex)
for nx in range(d**2):
tensors = [us1[nx,:]]
legs = [[-1]]
chi[:,:,nx,nx] = np.reshape(ncon(tensors,legs),(bdchil,bdchir),order='F')
elif x == 1:
bdchil = bdchi1
bdchir = bdchi2*bdchi1
chi = np.zeros((bdchil,bdchir,d**2,d**2),dtype=complex)
for nx in range(d**2):
tensors = [us2[:,nx,:],us1[nx,:]]
legs = [[-1,-2],[-3]]
chi[:,:,nx,nx] = np.reshape(ncon(tensors,legs),(bdchil,bdchir),order='F')
elif x == 2:
bdchil = bdchi2*bdchi1
bdchir = bdchi3*bdchi2*bdchi1
chi = np.zeros((bdchil,bdchir,d**2,d**2),dtype=complex)
for nx in range(d**2):
tensors = [us3[:,nx,:],us2[:,nx,:],us1[nx,:]]
legs = [[-1,-3],[-2,-4],[-5]]
chi[:,:,nx,nx] = np.reshape(ncon(tensors,legs),(bdchil,bdchir),order='F')
elif x > 2 and x < N-3:
bdchil = bdchi3*bdchi2*bdchi1
bdchir = bdchi3*bdchi2*bdchi1
chi = np.zeros((bdchil,bdchir,d**2,d**2),dtype=complex)
for nx in range(d**2):
tensors = [sv3[:,nx],us3[:,nx,:],us2[:,nx,:],us1[nx,:]]
legs = [[-1],[-2,-4],[-3,-5],[-6]]
chi[:,:,nx,nx] = np.reshape(ncon(tensors,legs),(bdchil,bdchir),order='F')
elif x == N-3:
bdchil = bdchi3*bdchi2*bdchi1
bdchir = bdchi3*bdchi2
chi = np.zeros((bdchil,bdchir,d**2,d**2),dtype=complex)
for nx in range(d**2):
tensors = [sv3[:,nx],us3[:,nx,:],us2[:,nx,:]]
legs = [[-1],[-2,-4],[-3,-5]]
chi[:,:,nx,nx] = np.reshape(ncon(tensors,legs),(bdchil,bdchir),order='F')
elif x == N-2:
bdchil = bdchi3*bdchi2
bdchir = bdchi3
chi = | np.zeros((bdchil,bdchir,d**2,d**2),dtype=complex) | numpy.zeros |
import numpy as np
import pdb
def per_pkt_transmission(args, MM, TransmittedSymbols):
# Pass the channel and generate samples at the receiver
taus = np.sort(np.random.uniform(0,args.maxDelay,(1,MM-1)))[0]
taus[-1] = args.maxDelay
dd = np.zeros(MM)
for idx in np.arange(MM):
if idx == 0:
dd[idx] = taus[0]
elif idx == MM-1:
dd[idx] = 1 - taus[-1]
else:
dd[idx] = taus[idx] - taus[idx-1]
# # # Generate the channel: phaseOffset = 0->0; 1->2pi/4; 2->2pi/2; 3->2pi
if args.phaseOffset == 0:
hh = np.ones([MM,1])
elif args.phaseOffset == 1:
hh = np.exp(1j*np.random.uniform(0,1,(MM,1)) * 2* np.pi/4)
elif args.phaseOffset == 2:
hh = np.exp(1j*np.random.uniform(0,1,(MM,1)) * 3* np.pi/4)
else:
hh = np.exp(1j*np.random.uniform(0,1,(MM,1)) * 4* np.pi /4)
# complex pass the complex channel
for idx in range(MM):
TransmittedSymbols[idx,:] = TransmittedSymbols[idx,:] * hh[idx][0]
# compute the received signal power and add noise
LL = len(TransmittedSymbols[0])
SignalPart = np.sum(TransmittedSymbols,0)
SigPower = np.sum(np.power(np.abs(SignalPart),2))/LL
# SigPower = np.max(np.power(np.abs(SignalPart),2))
EsN0 = np.power(10, args.EsN0dB/10.0)
noisePower = SigPower/EsN0
# Oversample the received signal
RepeatedSymbols = np.repeat(TransmittedSymbols, MM, axis = 1)
for idx in np.arange(MM):
extended = np.array([np.r_[np.zeros(idx), RepeatedSymbols[idx], np.zeros(MM-idx-1)]])
if idx == 0:
samples = extended
else:
samples = np.r_[samples, extended]
samples = np.sum(samples, axis=0)
# generate noise
for idx in np.arange(MM):
noise = np.random.normal(loc=0, scale=np.sqrt(noisePower/2/dd[idx]), size=LL+1)+1j*np.random.normal(loc=0, scale=np.sqrt(noisePower/2/dd[idx]), size=LL+1)
if idx == 0:
AWGNnoise = np.array([noise])
else:
AWGNnoise = np.r_[AWGNnoise, np.array([noise])]
AWGNnoise = np.reshape(AWGNnoise, (1,MM*(LL+1)), 'F')
samples = samples + AWGNnoise[0][0:-1]
# aligned_sample estiamtor
if args.Estimator == 1:
MthFiltersIndex = (np.arange(LL) + 1) * MM - 1
output = samples[MthFiltersIndex]
return output/MM
# ML estiamtor
if args.Estimator == 2:
noisePowerVec = noisePower/2./dd
HH = np.zeros([MM*(LL+1)-1, MM*LL])
for idx in range(MM*LL):
HH[np.arange(MM)+idx, idx] = hh[np.mod(idx,MM)]
CzVec = np.tile(noisePowerVec, [1, LL+1])
Cz = np.diag(CzVec[0][:-1])
## ------------------------------------- ML
MUD = np.matmul(HH.conj().T, np.linalg.inv(Cz))
MUD = np.matmul(MUD, HH)
MUD = np.matmul(np.linalg.inv(MUD), HH.conj().T)
MUD = np.matmul(MUD, np.linalg.inv(Cz))
MUD = np.matmul(MUD, np.array([samples]).T)
## ------------------------------------- Estimate SUM
output = np.sum(np.reshape(MUD, [LL, MM]), 1)
return output/MM
# SP_ML estiamtor
if args.Estimator == 3:
noisePowerVec = noisePower/2./dd
output = BP_Decoding(samples, MM, LL, hh, noisePowerVec)
return output/MM
def BP_Decoding(samples, M, L, hh, noisePowerVec):
# Prepare the Gaussian messages (Eta,LambdaMat) obtained from the observation nodes
# Lambda
beta1 = np.c_[np.real(hh),np.imag(hh)]
beta2 = np.c_[-np.imag(hh),np.real(hh)]
Obser_Lamb_first = np.c_[np.matmul(beta1,np.transpose(beta1)),np.matmul(beta1,np.transpose(beta2))]
Obser_Lamb_second = np.c_[np.matmul(beta2,np.transpose(beta1)),np.matmul(beta1,np.transpose(beta1))]
Obser_Lamb = np.r_[Obser_Lamb_first,Obser_Lamb_second]
element = np.zeros([4,4])
element[0,0] = 1
ObserMat1 = np.tile(element,(2,2)) * Obser_Lamb # pos-by-pos multiplication
element = np.zeros([4,4])
element[0:2,0:2] = 1
ObserMat2 = np.tile(element,(2,2)) * Obser_Lamb # pos-by-pos multiplication
element = np.zeros([4,4])
element[0:3,0:3] = 1
ObserMat3 = np.tile(element,(2,2)) * Obser_Lamb # pos-by-pos multiplication
element = np.ones([4,4])
ObserMat4 = np.tile(element,(2,2)) * Obser_Lamb # pos-by-pos multiplication
element = np.zeros([4,4])
element[1:,1:] = 1
ObserMat5 = np.tile(element,(2,2)) * Obser_Lamb # pos-by-pos multiplication
element = np.zeros([4,4])
element[2:,2:] = 1
ObserMat6 = np.tile(element,(2,2)) * Obser_Lamb # pos-by-pos multiplication
element = np.zeros([4,4])
element[3:,3:] = 1
ObserMat7 = np.tile(element,(2,2)) * Obser_Lamb # pos-by-pos multiplication
# Eta = LambdaMat * mean
etaMat = np.matmul(np.r_[beta1,beta2],np.r_[np.real([samples]),np.imag([samples])])
# process the boundaries
etaMat[[1,2,3,5,6,7],0] = 0
etaMat[[2,3,6,7],1] = 0
etaMat[[3,7],2] = 0
etaMat[[0,4],-3] = 0
etaMat[[0,1,4,5],-2] = 0
etaMat[[0,1,2,4,5,6],-1] = 0
# ============================================================
# ============================================================
# ============================================================ right message passing
R_m3_eta = np.zeros([2*M, M*(L+1)-2])
R_m3_Lamb = np.zeros([2*M, 2*M, M*(L+1)-2])
for idx in range(M*(L+1)-2):
# ----------------------------- message m1(eta,Lamb) from bottom
m1_eta = etaMat[:,idx] / noisePowerVec[np.mod(idx,M)]
if idx == 0: # first boundary -- will only be used in the right passing
ObserMat = ObserMat1
elif idx == 1: # second boundary
ObserMat = ObserMat2
elif idx == 2:# third boundary
ObserMat = ObserMat3
elif idx == M*(L+1)-4: # second last boundary
ObserMat = ObserMat5
elif idx == M*(L+1)-3: # second last boundary
ObserMat = ObserMat6
elif idx == M*(L+1)-2: # last boundary -- will only be used in the left passing
ObserMat = ObserMat7
else:
ObserMat = ObserMat4
m1_Lamb = ObserMat / noisePowerVec[np.mod(idx,M)]
# ----------------------------- message m2: right message => product of bottom and left
if idx == 0: # first boundary
m2_eta = m1_eta
m2_Lamb = m1_Lamb
else:
m2_eta = m1_eta + R_m3_eta[:,idx-1]
m2_Lamb = m1_Lamb + R_m3_Lamb[:,:,idx-1]
# ----------------------------- message m3: sum
m2_Sigma = np.linalg.pinv(m2_Lamb) # find the matrix Sigma of m2
pos = [np.mod(idx+1,M), np.mod(idx+1,M)+M] # pos of two variables (real and imag) to be integrated
# convert m2_eta back to m2_mean to delete columns -> convert back and add zero columns -> get the new m3_eta
m2_mean = np.matmul(m2_Sigma, m2_eta) # m2_mean
m2_mean[pos] = 0 # set to zero and convert back to eta (see below)
m2_Sigma[pos,:] = 0 # delete the rows and columns of m2_Sigma
m2_Sigma[:,pos] = 0
m3_Lamb = np.linalg.pinv(m2_Sigma)
m3_eta = np.matmul(m3_Lamb, m2_mean)
# ----------------------------- store m3
R_m3_eta[:,idx] = m3_eta
R_m3_Lamb[:,:,idx] = m3_Lamb
# ============================================================
# ============================================================
# ============================================================ left message passing
L_m3_eta = np.zeros([2*M, M*(L+1)-1])
L_m3_Lamb = np.zeros([2*M, 2*M, M*(L+1)-1])
for idx in np.arange(M*(L+1)-2, 0, -1):
# ----------------------------- message m1: from bottom
m1_eta = etaMat[:,idx] / noisePowerVec[np.mod(idx,M)];
if idx == 0: # first boundary -- will only be used in the right passing
ObserMat = ObserMat1
elif idx == 1: # second boundary
ObserMat = ObserMat2
elif idx == 2: # third boundary
ObserMat = ObserMat3
elif idx == M*(L+1)-4: # second last boundary
ObserMat = ObserMat5
elif idx == M*(L+1)-3: # second last boundary
ObserMat = ObserMat6
elif idx == M*(L+1)-2: # last boundary -- will only be used in the left passing
ObserMat = ObserMat7
else:
ObserMat = ObserMat4
m1_Lamb = ObserMat / noisePowerVec[np.mod(idx,M)]
# ----------------------------- message m2: product
if idx == M*(L+1)-2: # last boundary
m2_eta = m1_eta
m2_Lamb = m1_Lamb
else:
m2_eta = m1_eta + L_m3_eta[:,idx+1]
m2_Lamb = m1_Lamb + L_m3_Lamb[:,:,idx+1]
# ----------------------------- message m3: sum
m2_Sigma = np.linalg.pinv(m2_Lamb) # find the matrix Sigma of m2
pos = [np.mod(idx,M), np.mod(idx,M)+M] # pos of two variables (real and imag) to be integrated
# convert m2_eta back to m2_mean to delete columns -> convert back and add zero columns -> get the new m3_eta
m2_mean = np.matmul(m2_Sigma, m2_eta) # m2_mean
m2_mean[pos] = 0 # set to zero and convert back to eta (see below)
# convert m2_Lambda back to m2_Sigma to delete rows/columns -> convert back and add zero rows/columns -> get the new m3_Lambda
m2_Sigma[pos,:] = 0
m2_Sigma[:,pos] = 0
m3_Lamb = np.linalg.pinv(m2_Sigma)
m3_eta = np.matmul(m3_Lamb, m2_mean)
# ----------------------------- store m3
L_m3_eta[:,idx] = m3_eta
L_m3_Lamb[:,:,idx] = m3_Lamb
# ------------------------- Marginalization & BP DECODING
Sum_mu = np.zeros(L) + 1j * 0
for ii in range(1, L+1):
idx = ii * M - 1
Res_Eta = etaMat[:, idx] / noisePowerVec[np.mod(idx,M)] + R_m3_eta[:,idx-1] + L_m3_eta[:,idx+1]
Res_Lamb = ObserMat4 / noisePowerVec[np.mod(idx,M)] + R_m3_Lamb[:,:,idx-1] + L_m3_Lamb[:,:,idx+1]
# Res_Eta = etaMat[:, idx] / noisePowerVec[np.mod(idx,M)]
# Res_Lamb = ObserMat4 / noisePowerVec[np.mod(idx,M)]
# compute (mu,Sigma) for a variable node
Res_Sigma = np.linalg.pinv(Res_Lamb)
Res_mu = np.matmul(Res_Sigma, Res_Eta)
# compute (mu,Sigma) for the sum
Sum_mu[ii-1] = np.sum(Res_mu[0:M]) + 1j *np.sum(Res_mu[M:])
return Sum_mu
def test():
from options import args_parser
MM = 4
LL = 1000
args = args_parser()
args.EsN0dB = 10
# Generate TransmittedSymbols
for m in range(MM):
symbols = 2 * np.random.randint(2, size=(2,LL)) - 1
ComplexSymbols = symbols[0,:] + symbols[1,:] * 1j
if m == 0:
TransmittedSymbols = np.array([ComplexSymbols])
else:
TransmittedSymbols = np.r_[TransmittedSymbols, np.array([ComplexSymbols])]
target = np.sum(TransmittedSymbols, 0)
# MSE of the aligned_sample estimator
args.Estimator = 1
output = per_pkt_transmission(args, MM, TransmittedSymbols)
MSE1 = np.mean(np.power(np.abs(output - target),2))
print('MSE1 = ', MSE1)
# MSE of the ML estimator
args.Estimator = 2
output = per_pkt_transmission(args, MM, TransmittedSymbols)
MSE2 = np.mean(np.power( | np.abs(output - target) | numpy.abs |
import collections
import logging
import numpy as np
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
import torch.cuda.random as trandom
import torch.optim as optim
from torch.utils.data import DataLoader
from torch.utils.data.sampler import RandomSampler, WeightedRandomSampler
from tqdm import tqdm
import constants as c
import loss_bank as lb
import model_bank as mb
import optimizer_bank as ob
import util
from dataset import CellDataset, PairDataset, SubimageDataset, SubimageControlDataset, DataPrefetcher
cudnn.benchmark = True
cudnn.deterministic = False
def _get_plate_group(g2rna, id_codes, prediction):
plate_group = dict()
for id_code, pred in zip(id_codes, prediction):
exp = id_code.split('_')[0]
plate = int(id_code.split('_')[1])
key = (exp, plate)
if key not in plate_group:
plate_group[key] = [0, 0, 0, 0]
sirna = np.argmax(pred)
for i in range(4):
if sirna in g2rna[i]:
plate_group[key][i] += 1
break
return plate_group
def get_plate_postprocessing(id_codes, prediction):
g2rna, masks = util.get_g2rna()
plate_group = _get_plate_group(g2rna, id_codes, prediction)
for i, id_code in enumerate(id_codes):
exp = id_code.split('_')[0]
plate = int(id_code.split('_')[1])
key = (exp, plate)
group = np.argmax(plate_group[key])
prediction[i, masks[group]] = -np.inf
if prediction.shape[1] > c.N_CLASS:
prediction[:, c.N_CLASS:] = -np.inf
return prediction
def _balancing_label(prob):
idxs = np.dstack(np.unravel_index(np.argsort(prob.ravel()), prob.shape))[0][::-1]
pred = -np.ones(prob.shape[0])
used_idx = np.zeros(prob.shape[0])
used_rna = np.zeros(prob.shape[1])
for idx in idxs:
if used_idx[idx[0]] == 0 and used_rna[idx[1]] == 0:
pred[idx[0]] = idx[1]
used_idx[idx[0]] = 1
used_rna[idx[1]] = 1
return pred
def balancing_class_prediction(id_codes, prediction):
# at most 1 instance each class
prediction = get_plate_postprocessing(id_codes, prediction)
plates = set()
for id_code in id_codes:
plate = '_'.join(id_code.split('_')[:2])
plates.add(plate)
plates = sorted(plates)
y_pred = np.zeros(len(id_codes))
for plate in plates:
idx = [i for i, x in enumerate(id_codes) if x.startswith(plate)]
y_pred_i = _balancing_label(prediction[idx])
y_pred[idx] = y_pred_i
return y_pred
class Model:
def __init__(self, model_name='resnet', ckpt_path=None, ckpt_epoch=None,
ckpt_full_path=None, output_ckpt_path=None, cell_type=None, criterion='cross_entropy',
train_transform=list(), progress_func=tqdm, lr=0.0001, load_optimizer=True,
freeze_eval=True, precision=16, plate_group=None, train_control=False, optimizer='adam',
training=True, gaussian_sigma=0):
assert torch.cuda.is_available()
torch.manual_seed(c.SEED)
trandom.manual_seed_all(c.SEED)
self.freeze_eval = freeze_eval
self.device = torch.device('cuda')
self.progress_func = progress_func
self.train_transform = train_transform
self.eval_transform = []
self.cell_type = cell_type
self.plate_group = plate_group
self.criterion = criterion
self.train_control = train_control
self.gaussian_sigma = gaussian_sigma
if train_control:
n_class = c.N_CLASS + c.N_CLASS_CONTROL
else:
n_class = c.N_CLASS
if model_name.startswith('resnet2in2out'):
self.model = mb.Resnet2in2out(int(model_name[13:]), n_class)
elif model_name.startswith('resnet'):
self.model = mb.Resnet(int(model_name[6:]), n_class)
elif model_name.startswith('arcresnet'):
self.model = mb.Resnet(int(model_name[9:]), n_class)
elif model_name.startswith('resnext'):
self.model = mb.Resnet(int(model_name[7:]), n_class)
elif model_name.startswith('densenet'):
self.model = mb.Densenet(int(model_name[8:]), n_class)
elif model_name.startswith('efficientnet'):
if training:
self.model = mb.EfficientNet(model_name, n_class, nn.BatchNorm2d, mb.mish_efficientnet.swish)
else:
self.model = mb.EfficientNet(model_name, n_class, mb.mish_efficientnet.MovingBatchNorm2d,
mb.mish_efficientnet.swish)
elif model_name.startswith('mishefficientnet'):
if training:
self.model = mb.EfficientNet(model_name, n_class, nn.BatchNorm2d, mb.mish_efficientnet.mish)
else:
self.model = mb.EfficientNet(model_name, n_class, mb.mish_efficientnet.MovingBatchNorm2d,
mb.mish_efficientnet.mish)
elif model_name.startswith('arcefficientnet'):
self.model = mb.ArcEfficientNet(model_name[3:], n_class, nn.BatchNorm2d, mb.mish_efficientnet.swish)
else:
return
self.model.cuda()
# fixme: should be double - 64 bits, float - 32 bits, half - 16 bits
if precision == 32:
self.model.double()
elif precision == 16:
self.model.float()
elif precision == 8:
self.model.half()
else:
raise Exception('Precision %d not in (8, 16, 32)' % precision)
self.precision = precision
# training_params = []
# for name, param in self.model.named_parameters():
# if 'fc' not in name:
# param.requires_grad = False
# training_params.append(param)
if optimizer.lower().startswith('adam'):
self.optimizer = optim.Adam(self.model.parameters(), lr=lr)
elif optimizer.lower().startswith('ranger'):
self.optimizer = ob.Ranger(self.model.parameters(), lr=lr)
elif optimizer.lower().startswith('sgd'):
self.optimizer = optim.SGD(self.model.parameters(), lr=lr, weight_decay=lr * 0.05, momentum=0.9)
self.start_epoch = 0
self.loss_history = {'train': [], 'valid': [], 'test': []}
self.acc_history = {'train': [], 'valid': [], 'test': []}
self.pp_acc_history = {'train': [], 'valid': []}
self.ckpt_path = ckpt_path
self.ckpt_full_path = ckpt_full_path
self.ckpt_epoch = None
self.output_ckpt_path = output_ckpt_path
if output_ckpt_path:
os.makedirs(output_ckpt_path, exist_ok=True)
self._load_ckpt(ckpt_full_path, ckpt_path, ckpt_epoch, load_optimizer)
if self.start_epoch == 0:
logging.info('No checkpoint loaded.')
if optimizer.endswith('swa'):
self.optimizer = ob.StochasticWeightAverage(self.optimizer, swa_start=1, swa_freq=1, swa_lr=lr)
g2rna, masks = util.get_g2rna()
self.label2mask = []
for i in range(c.N_CLASS):
for j in range(4):
if i in g2rna[j]:
self.label2mask.append(masks[j])
break
assert len(self.label2mask) == c.N_CLASS
def _load_ckpt(self, ckpt_full_path, ckpt_path, ckpt_epoch, load_optimizer=True):
if ckpt_full_path is not None:
path = ckpt_full_path
elif ckpt_path is not None:
cell_str = self.cell_type + '_' if self.cell_type else ''
group_str = str(self.plate_group) + '_' if self.plate_group is not None else ''
epoch_str = str(ckpt_epoch) if ckpt_epoch else 'best'
path = os.path.join(ckpt_path, '%s%s%s.tar' % (cell_str, group_str, epoch_str))
if not os.path.exists(path):
path = os.path.join(ckpt_path, '%s%s.tar' % (cell_str, epoch_str))
else:
return False
if os.path.exists(path):
model_ckpt = torch.load(path)
try:
self.model.load_state_dict(model_ckpt['model'])
except RuntimeError:
weights = model_ckpt['model']
new_weights = collections.OrderedDict()
for k, v in weights.items():
new_weights['model.' + k] = v
self.model.load_state_dict(new_weights)
if load_optimizer:
self.optimizer.load_state_dict(model_ckpt['optimizer'])
self.start_epoch = model_ckpt['epoch'] + 1
self.ckpt_epoch = model_ckpt['epoch']
self.loss_history = model_ckpt['loss']
self.acc_history = model_ckpt['acc']
if 'pp_acc' in model_ckpt:
self.pp_acc_history = model_ckpt['pp_acc']
logging.info('Check point %s loaded', path)
return True
elif ckpt_path is not None:
os.makedirs(ckpt_path, exist_ok=True)
return False
def _save_ckpt(self, path, epoch):
torch.save({
'epoch': epoch,
'model': self.model.state_dict(),
'optimizer': self.optimizer.state_dict(),
'loss': self.loss_history,
'acc': self.acc_history,
'pp_acc': self.pp_acc_history,
}, path)
def _forward_batch(self, images, labels):
if len(images.size()) == 5:
B, T, C, H, W = images.size()
outputs = self.model(images.view(-1, C, H, W))
if labels is not None:
labels = labels.view(-1)
else:
T = 1
outputs = self.model(images)
return outputs, labels, T
def _predict_batch(self, images):
if len(images.size()) == 5:
B, T, C, H, W = images.size()
outputs = self.model(images.view(-1, C, H, W))
outputs = outputs.view(B, T, -1)
outputs = outputs.mean(dim=1)
else:
outputs = self.model(images)
return outputs
def _train_epoch(self, dataloader, criterion):
running_loss = 0.0
running_corrects = 0
self.model.train()
prefetcher = DataPrefetcher(dataloader)
images, labels = prefetcher.next()
for _ in self.progress_func(range(len(dataloader))):
# zero the parameter gradients
self.optimizer.zero_grad()
with torch.set_grad_enabled(True):
outputs, labels, T = self._forward_batch(images, labels)
loss = criterion(outputs, labels)
_, preds = torch.max(outputs, 1)
# backward + optimize only if in training phase
loss.backward()
self.optimizer.step()
# statistics
running_loss += loss.item() * labels.size(0) / T
running_corrects += torch.sum(preds == labels).cpu().numpy() / T
images, labels = prefetcher.next()
assert images is None
epoch_loss = running_loss / len(dataloader.dataset)
epoch_acc = running_corrects / len(dataloader.dataset)
return epoch_loss, epoch_acc
def _eval_epoch(self, dataloader, criterion):
running_loss = 0.0
running_corrects = 0
running_pp_corrects = 0
if self.freeze_eval:
self.model.eval()
prefetcher = DataPrefetcher(dataloader)
images, labels = prefetcher.next()
for _ in self.progress_func(range(len(dataloader))):
# forward
with torch.set_grad_enabled(False):
outputs = self._predict_batch(images)
loss = criterion(outputs, labels)
outputs = outputs.cpu().numpy()
labels = labels.cpu().numpy()
preds = np.argmax(outputs, axis=1)
# statistics
running_loss += loss.item() * labels.shape[0]
running_corrects += np.sum(preds == labels)
for i, l in enumerate(labels):
# do not eval control data in eval_epoch for consistency
if l < c.N_CLASS:
outputs[i, self.label2mask[l]] = -np.inf
preds = np.argmax(outputs, axis=1)
running_pp_corrects += np.sum(preds == labels)
images, labels = prefetcher.next()
assert images is None
epoch_loss = running_loss / len(dataloader.dataset)
epoch_acc = running_corrects / len(dataloader.dataset)
epoch_pp_acc = running_pp_corrects / len(dataloader.dataset)
return epoch_loss, epoch_acc, epoch_pp_acc
def _eval_kld_epoch(self, dataloader, criterion):
running_loss = 0.0
running_kld_loss = 0.0
running_corrects = 0
running_pp_corrects = 0
if self.freeze_eval:
self.model.eval()
for images1, images2, labels, masks in self.progress_func(dataloader):
images1 = images1.to(self.device)
images2 = images2.to(self.device)
labels = labels.to(self.device)
# forward
with torch.set_grad_enabled(False):
outputs1 = self._predict_batch(images1)
outputs2 = self._predict_batch(images2)
outputs = ((outputs1 + outputs2) / 2)
loss = criterion(outputs, labels)
for i, mask in enumerate(masks):
outputs1[i, mask] = -np.inf
outputs2[i, mask] = -np.inf
kld_loss = nn.KLDivLoss(reduction='batchmean')(F.log_softmax(outputs1, dim=1),
F.softmax(outputs2, dim=1))
kld_loss += nn.KLDivLoss(reduction='batchmean')(F.log_softmax(outputs2, dim=1),
F.softmax(outputs1, dim=1))
kld_loss /= 2
outputs = outputs.cpu().numpy()
labels = labels.cpu().numpy()
preds = np.argmax(outputs, axis=1)
# statistics
running_loss += loss.item() * labels.shape[0]
running_kld_loss += kld_loss.item() * labels.shape[0]
running_corrects += np.sum(preds == labels)
for i, l in enumerate(labels):
# do not eval control data in eval_epoch for consistency
if l < c.N_CLASS:
outputs[i, self.label2mask[l]] = -np.inf
preds = np.argmax(outputs, axis=1)
running_pp_corrects += np.sum(preds == labels)
epoch_loss = running_loss / len(dataloader.dataset)
epoch_kld_loss = running_kld_loss / len(dataloader.dataset)
epoch_acc = running_corrects / len(dataloader.dataset)
epoch_pp_acc = running_pp_corrects / len(dataloader.dataset)
return epoch_loss, epoch_kld_loss, epoch_acc, epoch_pp_acc
@staticmethod
def _get_instance_weight(train_files):
exp_count = dict()
for f in train_files:
exp_now = ''
for exp in c.EXPS:
if exp in f:
exp_now = exp
break
if exp_now not in exp_count:
exp_count[exp_now] = 0
exp_count[exp_now] += 1
weights = []
for f in train_files:
exp_now = ''
for exp in c.EXPS:
if exp in f:
exp_now = exp
break
weights.append(1 / exp_count[exp_now])
return weights
def get_best_epoch(self, valid_exps):
best_epoch = [-1] * len(valid_exps)
best_loss = [np.inf] * len(valid_exps)
for i, loss_dict in enumerate(self.loss_history['valid']):
for j, exp in enumerate(valid_exps):
if isinstance(loss_dict, dict):
loss = loss_dict[exp]
else:
loss = loss_dict
if loss < best_loss[j]:
best_loss[j] = loss
best_epoch[j] = i
return best_loss, best_epoch
def train(self, train_files, train_labels, train_stats, valid_files, valid_labels, valid_stats,
test_files, test_labels, test_stats,
epochs=10, patient=5, batch_size=32, num_workers=6, valid_exps=c.EXPS, dataset_class=CellDataset,
balance_exp=False, eval_batch_size=32, eval_bn_batch_size=0, restore_loss=True):
tw = 0
for exp in valid_exps:
tw += c.TEST_COUNT[exp]
if restore_loss:
best_loss, best_epoch = self.get_best_epoch(valid_exps)
else:
best_epoch = [-1] * len(valid_exps)
best_loss = [np.inf] * len(valid_exps)
train_dataset = dataset_class(train_files, train_labels, train_stats, self.train_transform, 'train',
gaussian_sigma=self.gaussian_sigma)
if balance_exp:
sampler = WeightedRandomSampler(Model._get_instance_weight(train_dataset.files), len(train_dataset))
else:
sampler = RandomSampler(train_dataset)
train_loader = DataLoader(train_dataset, batch_size=batch_size, num_workers=num_workers, sampler=sampler,
pin_memory=True)
valid_loaders = dict()
test_loaders = dict()
bn_loaders = dict()
for exp in valid_exps:
idx = util.get_exp_index(exp, valid_files)
valid_loaders[exp] = DataLoader(
dataset_class(valid_files[idx], valid_labels[idx], np.array(valid_stats)[idx], self.eval_transform,
'valid', gaussian_sigma=self.gaussian_sigma), batch_size=eval_batch_size, shuffle=False,
num_workers=num_workers, pin_memory=True)
if eval_bn_batch_size > 0:
bn_loaders[exp] = DataLoader(
dataset_class(valid_files[idx], valid_labels[idx], np.array(valid_stats)[idx], self.eval_transform,
'valid', gaussian_sigma=self.gaussian_sigma), batch_size=eval_bn_batch_size,
shuffle=False, num_workers=num_workers, pin_memory=True)
idx = util.get_exp_index(exp, test_files)
test_loaders[exp] = DataLoader(
dataset_class(test_files[idx], test_labels[idx], | np.array(test_stats) | numpy.array |
import numpy as np
import random
import matplotlib.pyplot as plt
from environments.mujoco.ant_multitask_base import MultitaskAntEnv
class AntSemiCircleEnv(MultitaskAntEnv):
def __init__(
self,
task={},
n_tasks=2,
max_episode_steps=200,
modify_init_state_dist=False,
on_circle_init_state=False,
**kwargs
):
super(AntSemiCircleEnv, self).__init__(task, n_tasks, **kwargs)
# self.set_task(self.sample_tasks(1)[0])
self._max_episode_steps = max_episode_steps
self.modify_init_state_dist = modify_init_state_dist
self.on_circle_init_state = on_circle_init_state
def step(self, action):
self.do_simulation(action, self.frame_skip)
xposafter = np.array(self.get_body_com("torso"))
goal_reward = -np.sum(
np.abs(xposafter[:2] - self._goal)
) # make it happy, not suicidal
# goal_reward = -(np.sum((xposafter[:2] - self._goal) ** 2) ** 0.5)
ctrl_cost = 0.1 * np.square(action).sum()
# contact_cost = 0.5 * 1e-3 * np.sum(
# np.square(np.clip(self.sim.data.cfrc_ext, -1, 1)))
# survive_reward = 0.0
# reward = goal_reward - ctrl_cost - contact_cost + survive_reward
# reward = goal_reward - ctrl_cost - contact_cost
reward = goal_reward - ctrl_cost
# reward = goal_reward
state = self.state_vector()
done = False
ob = self._get_obs()
return (
ob,
reward,
done,
dict(
reward_goal=goal_reward,
reward_ctrl=-ctrl_cost,
# reward_contact=-contact_cost,
task=self._task,
),
)
def reset_model(self):
qpos = self.init_qpos
# just for offline data collection:
if self.modify_init_state_dist:
qpos[:2] = np.array(
[np.random.uniform(-1.5, 1.5), np.random.uniform(-0.5, 1.5)]
)
if (
not self.on_circle_init_state
): # make sure initial state is not on semi-circle
# while 1 - self.goal_radius <= np.linspace.norm(qpos[:2]) <= 1 + self.goal_radius:
while (
0.8 <= np.linalg.norm(qpos[:2]) <= 1.2
): # TODO: uses privileged knowledge (R=0.2)
qpos[:2] = np.array(
[np.random.uniform(-1.5, 1.5), np.random.uniform(-0.5, 1.5)]
)
else:
qpos[:2] = np.array([0, 0])
qvel = self.init_qvel
self.set_state(qpos, qvel)
return self._get_obs()
def reward(self, state, action):
goal_reward = -np.sum(
np.abs(state[:2] - self._goal)
) # make it happy, not suicidal
ctrl_cost = 0.1 * np.square(action).sum()
# contact_cost = 0.5 * 1e-3 * np.sum(
# np.square(np.clip(self.sim.data.cfrc_ext, -1, 1)))
# reward = goal_reward - ctrl_cost - contact_cost
reward = goal_reward - ctrl_cost
return reward
def set_goal(self, goal):
self._goal = np.asarray(goal)
def sample_tasks(self, num_tasks):
a = np.array([random.uniform(0, np.pi) for _ in range(num_tasks)])
r = 1
goals = np.stack((r * np.cos(a), r * np.sin(a)), axis=-1)
tasks = [{"goal": goal} for goal in goals]
return tasks
def get_task(self):
return self._goal
def _get_obs(self):
return np.concatenate(
[
self.sim.data.qpos.flat,
self.sim.data.qvel.flat,
# np.clip(self.sim.data.cfrc_ext, -1, 1).flat,
]
)
def viewer_setup(self):
self.viewer.cam.distance = self.model.stat.extent * 0.5
self.viewer.cam.elevation = -90
self.viewer.cam.azimuth = 90
class SparseAntSemiCircleEnv(AntSemiCircleEnv):
def __init__(
self, task={}, n_tasks=2, max_episode_steps=200, goal_radius=0.2, **kwargs
):
self.goal_radius = goal_radius
super().__init__(task, n_tasks, max_episode_steps, **kwargs)
def sparsify_rewards(self, d):
non_goal_reward_keys = []
for key in d.keys():
if key.startswith("reward") and key != "reward_goal":
non_goal_reward_keys.append(key)
non_goal_rewards = np.sum(
[d[reward_key] for reward_key in non_goal_reward_keys]
)
sparse_goal_reward = 1.0 if self.is_goal_state() else 0.0
return non_goal_rewards + sparse_goal_reward
def step(self, action):
ob, reward, done, d = super().step(action)
sparse_reward = self.sparsify_rewards(d)
return ob, sparse_reward, done, d
def reward(self, state, action):
goal_reward = 1.0 if self.is_goal_state(state) else 0.0
ctrl_cost = 0.1 * np.square(action).sum()
# contact_cost = 0.5 * 1e-3 * np.sum(
# np.square(np.clip(self.sim.data.cfrc_ext, -1, 1)))
# reward = goal_reward - ctrl_cost - contact_cost
reward = goal_reward - ctrl_cost
return reward
def is_goal_state(self, state=None):
if state is None:
state = np.array(self.get_body_com("torso"))
if np.linalg.norm(state[:2] - self._goal) <= self.goal_radius:
return True
else:
return False
def plot_env(self):
ax = plt.gca()
# plot half circle and goal position
angles = np.linspace(0, np.pi, num=100)
x, y = | np.cos(angles) | numpy.cos |
"""
Implements the wire break test of https://github.com/BecCowley/Mquest/blob/083b9a3dc7ec9076705aca0e90bcb500d241be03/GUI/detectwirebreak.m
"""
import numpy
def istight(t, thresh=0.1):
# given a temperature profile, return an array of bools
# true = this level is within thresh of both its neighbors
gaps = numpy.absolute( | numpy.diff(t) | numpy.diff |
import numpy as np
import matplotlib
matplotlib.use("Agg")
from rlkit.torch.multitask.gym_relabelers import ContinuousRelabeler
class AntDirectionRelabeler(ContinuousRelabeler): # todo: flip all the sin and cosines here
def sample_task(self):
return np.random.uniform(low=[0.0, -np.pi], high=[np.pi / 2, np.pi / 2],
size=2) # todo: is the second thing accurate? why is it pi/2
def reward_done(self, obs, action, latent, env_info=None):
theta = float(latent[0])
alpha = float(latent[1])
# return np.cos(alpha) * env_info['reward_run'] + np.sin(alpha) * (1 + env_info['reward_ctrl']), False
reward_run = env_info['torso_velocity'][0] * np.cos(theta) + env_info['torso_velocity'][1] * np.sin(theta)
return np.sin(alpha) * reward_run + np.cos(alpha) * (env_info['reward_ctrl']), False
def calculate_path_reward(self, path, latent):
env_infos = path['env_infos']
# done_rewards = np.array([env_info['reward_run'] for env_info in env_infos])
action_rewards = np.array([env_info['reward_ctrl'] for env_info in env_infos])
theta = float(latent[0])
alpha = float(latent[1])
torso_velocities = np.array([env_info['torso_velocity'][:2] for env_info in env_infos])
done_rewards = torso_velocities[:, 0] * np.cos(theta) + torso_velocities[:, 1] * np.sin(theta)
# return np.cos(alpha) * done_rewards + np.sin(alpha) * action_rewards
return np.sin(alpha) * done_rewards + np.cos(alpha) * action_rewards
def get_features(self, path, latent=None):
env_infos = path['env_infos']
action_rewards = np.array([env_info['reward_ctrl'] for env_info in env_infos])
theta = float(latent[0])
torso_velocities = np.array([env_info['torso_velocity'][:2] for env_info in env_infos])
done_rewards = torso_velocities[:, 0] * np.cos(theta) + torso_velocities[:, 1] * np.sin(theta)
return np.array([self.get_discounted_reward(done_rewards), self.get_discounted_reward(action_rewards)])
def get_weights(self, latents):
latents = np.array(latents)
# return np.concatenate([np.cos(latents).reshape([1, -1]), np.sin(latents).reshape([1, -1])], axis=0)
return np.concatenate([np.sin(latents).reshape([1, -1]), np.cos(latents).reshape([1, -1])], axis=0)
def get_reward_matrix(self, paths, latents):
# |paths| rows, and |latents| columns
# features = self.get_features_matrix(paths)
# weights = self.get_weights(latents)
# result = features.dot(weights)
# return result
return np.array([[self.get_discounted_path_reward(path, latent) for latent in latents] for path in paths])
# no energy
class AntDirectionRelabelerNoEnergy(ContinuousRelabeler):
def sample_task(self):
return np.random.uniform(low=[-np.pi], high=[np.pi], size=(1,))
def reward_done(self, obs, action, latent, env_info=None):
theta = float(latent[0])
# return np.cos(alpha) * env_info['reward_run'] + np.sin(alpha) * (1 + env_info['reward_ctrl']), False
reward_run = env_info['torso_velocity'][0] * np.cos(theta) + env_info['torso_velocity'][1] * np.sin(theta)
return reward_run, False
def calculate_path_reward(self, path, latent):
env_infos = path['env_infos']
# done_rewards = np.array([env_info['reward_run'] for env_info in env_infos])
# action_rewards = np.array([env_info['reward_ctrl'] for env_info in env_infos])
theta = float(latent[0])
# alpha = float(latent[1])
torso_velocities = np.array([env_info['torso_velocity'][:2] for env_info in env_infos])
done_rewards = torso_velocities[:, 0] * np.cos(theta) + torso_velocities[:, 1] * np.sin(theta)
# return np.cos(alpha) * done_rewards + np.sin(alpha) * action_rewards
return done_rewards
def get_features(self, path, latent=None):
env_infos = path['env_infos']
torso_vel = np.array([env_info['torso_velocity'][:2] for env_info in env_infos])
return np.array([self.get_discounted_reward(torso_vel[:, 0]), self.get_discounted_reward(torso_vel[:, 1])])
def get_weights(self, latents):
return np.array([np.cos(latents[:, 0]), np.sin(latents[:, 0])])
def get_reward_matrix(self, paths, latents):
# |paths| rows, and |latents| columns
features = self.get_features_matrix(paths)
weights = self.get_weights(latents)
result = features.dot(weights)
return result
# return np.array([[self.get_discounted_path_reward(path, latent) for latent in latents] for path in paths])
# latent controls direction, but contact and energy terms still there
class AntDirectionRelabelerNew(ContinuousRelabeler):
def __init__(self, type='360', **kwargs):
super().__init__(**kwargs)
assert type in {'90', '180', '360'}
self.type = type
if self.is_eval:
assert type == '360'
self.eval_latents = np.linspace(-np.pi, np.pi, 25, endpoint=False) + np.pi / 25.0
self.eval_latents = self.eval_latents.reshape(-1, 1)
self.curr_idx = 0
def sample_task(self):
if self.is_eval:
self.curr_idx = (self.curr_idx + 1) % len(self.eval_latents)
return self.eval_latents[self.curr_idx].copy()
if self.type == '90':
return np.random.uniform(low=[-np.pi / 4.0], high=[np.pi / 4.0], size=(1,))
elif self.type == '180':
return np.random.uniform(low=[-np.pi / 2.0], high=[np.pi / 2.0], size=(1,))
elif self.type == '360':
return np.random.uniform(low=[-np.pi], high=[np.pi], size=(1,))
else:
raise RuntimeError
def reward_done(self, obs, action, latent, env_info=None):
theta = float(latent[0])
# return np.cos(alpha) * env_info['reward_run'] + np.sin(alpha) * (1 + env_info['reward_ctrl']), False
reward_run = env_info['torso_velocity'][0] * np.cos(theta) + env_info['torso_velocity'][1] * np.sin(theta) \
+ env_info['reward_ctrl'] + env_info['reward_contact'] + 1
return reward_run, False
def calculate_path_reward(self, path, latent):
env_infos = path['env_infos']
ctrl_rewards = np.array([env_info['reward_ctrl'] for env_info in env_infos])
contact_rewards = np.array([env_info['reward_contact'] for env_info in env_infos])
theta = float(latent[0])
torso_velocities = np.array([env_info['torso_velocity'][:2] for env_info in env_infos])
rewards = torso_velocities[:, 0] * np.cos(theta) + torso_velocities[:, 1] * np.sin(theta) \
+ ctrl_rewards + contact_rewards + 1
return rewards
def get_features(self, path, latent=None):
env_infos = path['env_infos']
torso_vel = np.array([env_info['torso_velocity'][:2] for env_info in env_infos])
ctrl_rewards = np.array([env_info['reward_ctrl'] for env_info in env_infos])
contact_rewards = np.array([env_info['reward_contact'] for env_info in env_infos])
return np.array([self.get_discounted_reward(torso_vel[:, 0]),
self.get_discounted_reward(torso_vel[:, 1]),
self.get_discounted_reward(ctrl_rewards + contact_rewards + 1)])
def get_weights(self, latents):
latents = np.array(latents)
return np.array([np.cos(latents[:, 0]), np.sin(latents[:, 0]), np.ones(len(latents))])
def get_reward_matrix(self, paths, latents):
# |paths| rows, and |latents| columns
features = self.get_features_matrix(paths)
weights = self.get_weights(latents)
result = features.dot(weights)
return result
# return np.array([[self.get_discounted_path_reward(path, latent) for latent in latents] for path in paths])
def to_save_video(self, epoch):
"""
:return: boolean whether to save rgb_video for the epoch
"""
if epoch < 10:
return True
else:
return epoch % 10 == 0
class AntDirectionRelabelerNewSquared(AntDirectionRelabelerNew):
def reward_done(self, obs, action, latent, env_info=None):
theta = float(latent[0])
speed = np.linalg.norm(env_info['torso_velocity'][:2])
cosine = (env_info['torso_velocity'][:2] / speed).dot(np.array([np.cos(theta), np.sin(theta)]))
reward_run = speed * (max(0, cosine) ** 2) + env_info['reward_ctrl'] + env_info['reward_contact'] + 1
return reward_run, False
def calculate_path_reward(self, path, latent):
env_infos = path['env_infos']
ctrl_rewards = np.array([env_info['reward_ctrl'] for env_info in env_infos])
contact_rewards = np.array([env_info['reward_contact'] for env_info in env_infos])
theta = float(latent[0])
torso_velocities = np.array([env_info['torso_velocity'][:2] for env_info in env_infos])
speeds = np.linalg.norm(torso_velocities, axis=1, keepdims=True)
cosines = (torso_velocities / speeds).dot((np.array([np.cos(theta), np.sin(theta)])).reshape([-1,1])).flatten()
cosines[cosines < 0] = 0
rewards = speeds.flatten() * (cosines ** 2) + ctrl_rewards + contact_rewards + 1
return rewards
def get_features(self, path, latent=None):
return np.zeros([len(path), 1])
def get_reward_matrix(self, paths, latents):
return np.array([[self.get_discounted_path_reward(path, latent) for latent in latents] for path in paths])
class AntDirectionRelabelerNewSparse(AntDirectionRelabelerNew):
def reward_done(self, obs, action, latent, env_info=None):
theta = float(latent[0])
speed = np.linalg.norm(env_info['torso_velocity'][:2])
cosine = (env_info['torso_velocity'][:2] / speed).dot(np.array([np.cos(theta), np.sin(theta)]))
reward_run = speed * (cosine > 0.9659).astype(np.float32) + env_info['reward_ctrl'] + env_info['reward_contact'] + 1
return reward_run, False
def calculate_path_reward(self, path, latent):
env_infos = path['env_infos']
ctrl_rewards = np.array([env_info['reward_ctrl'] for env_info in env_infos])
contact_rewards = np.array([env_info['reward_contact'] for env_info in env_infos])
theta = float(latent[0])
torso_velocities = np.array([env_info['torso_velocity'][:2] for env_info in env_infos])
speeds = np.linalg.norm(torso_velocities, axis=1, keepdims=True)
cosines = (torso_velocities / speeds).dot((np.array([np.cos(theta), np.sin(theta)])).reshape([-1, 1])).flatten()
rewards = speeds.flatten() * (cosines > 0.9659).astype(np.float32) + ctrl_rewards + contact_rewards + 1
return rewards
def get_features(self, path, latent=None):
return np.zeros([len(path), 1])
def get_reward_matrix(self, paths, latents):
return np.array([[self.get_discounted_path_reward(path, latent) for latent in latents] for path in paths])
class DiscretizedAntDirectionRelabelerNoEnergy(AntDirectionRelabelerNoEnergy):
def __init__(self, index, num_bins=30, **kwargs):
low, high = -np.pi / 2, np.pi / 2
self.latent = np.array([low + (high - low) * index / num_bins])
super().__init__(**kwargs)
def sample_task(self):
return self.latent.copy()
class SingleLatentAntDirectionRelabelerNew(AntDirectionRelabelerNew):
def sample_task(self):
return np.array([0.0])
# for debugging the discrepancy between sac + gym ant and sac_gher + our ant
class SingleLatentAntDirectionRelabelerNew(AntDirectionRelabelerNew):
def sample_task(self):
return np.array([0.0])
class AntDirectionRelabelerRestricted(AntDirectionRelabelerNew):
def sample_task(self):
return | np.random.uniform(low=[-np.pi/4.0], high=[np.pi/4.0], size=(1,)) | numpy.random.uniform |
import numpy as np
import socket
from npsocket_sn import SocketNumpyArray
from multiagent.core import World, Agent, Landmark
from multiagent.scenario import BaseScenario
from math import *
import time
class Scenario(BaseScenario):
def __init__(self):
self.sock_sender = SocketNumpyArray()
self.sock_sender.initialize_sender('localhost', 9998)
self.n = None
self.x = None
self.y = None
self.theta = None
self.phero = None
# Target
self.target_x = 4.0
self.target_y = 0.0
self.target_index = 0
self.radius = 4
self.num_experiments = 20
def make_world(self):
world = World()
# set any world properties first
world.dim_c = 2
num_agents = 1
num_obstacle = 4
num_target = 1
world.collaborative = True
# add agents
world.agents = [Agent() for i in range(num_agents)]
for i, agent in enumerate(world.agents):
agent.name = 'agent %d' % i
agent.collide = True
agent.silent = True
agent.size = 0.1
# add obstacles
world.landmarks = [Landmark() for i in range(num_obstacle)]
for i, landmark in enumerate(world.landmarks):
landmark.name = 'obstacle %d' % i
landmark.collide = True
landmark.movable = False
landmark.size = 0.1
# add target
target = Landmark()
target.name = 'target'
target.collide = False
target.movable = False
target.size = 0.1
# Merge the landmarks (obstacles + target)
world.landmarks.append(target)
# make initial conditions
self.n = num_agents
self.x = [0.0, 0.0]*num_agents
self.y = [0.0, 0.0]*num_agents
self.theta = [0.0, 0.0]*num_agents
self.reset_world(world)
# Send initial information to pheromone system
self.sock_sender.send_number(self.n)
return world
def reset_world(self, world):
# random properties for agents
for i, agent in enumerate(world.agents):
agent.color = np.array([0.35, 0.35, 0.85])
# random properties for landmarks
for i, landmark in enumerate(world.landmarks):
landmark.color = | np.array([0.25, 0.25, 0.25]) | numpy.array |
import scipy
import numpy as np
from numpy.testing import assert_equal, run_module_suite, assert_
import unittest
from qutip import num, rand_herm, expect, rand_unitary
def test_SparseHermValsVecs():
"""
Sparse eigs Hermitian
"""
# check using number operator
N = num(10)
spvals, spvecs = N.eigenstates(sparse=True)
for k in range(10):
# check that eigvals are in proper order
assert_equal(abs(spvals[k] - k) <= 1e-13, True)
# check that eigenvectors are right and in right order
assert_equal(abs(expect(N, spvecs[k]) - spvals[k]) < 5e-14, True)
# check ouput of only a few eigenvals/vecs
spvals, spvecs = N.eigenstates(sparse=True, eigvals=7)
assert_equal(len(spvals), 7)
assert_equal(spvals[0] <= spvals[-1], True)
for k in range(7):
assert_equal(abs(spvals[k] - k) < 1e-12, True)
spvals, spvecs = N.eigenstates(sparse=True, sort='high', eigvals=5)
assert_equal(len(spvals), 5)
assert_equal(spvals[0] >= spvals[-1], True)
vals = np.arange(9, 4, -1)
for k in range(5):
# check that eigvals are ordered from high to low
assert_equal(abs(spvals[k] - vals[k]) < 5e-14, True)
assert_equal(abs(expect(N, spvecs[k]) - vals[k]) < 1e-14, True)
# check using random Hermitian
H = rand_herm(10)
spvals, spvecs = H.eigenstates(sparse=True)
# check that sorting is lowest eigval first
assert_equal(spvals[0] <= spvals[-1], True)
# check that spvals equal expect vals
for k in range(10):
assert_equal(abs(expect(H, spvecs[k]) - spvals[k]) < 5e-14, True)
# check that ouput is real for Hermitian operator
assert_equal(np.isreal(spvals[k]), True)
def test_SparseValsVecs():
"""
Sparse eigs non-Hermitian
"""
U = rand_unitary(10)
spvals, spvecs = U.eigenstates(sparse=True)
assert_equal(np.real(spvals[0]) <= | np.real(spvals[-1]) | numpy.real |
from __future__ import print_function # Python 2/3 compatibility
import boto3
import json
import decimal
from boto3.dynamodb.conditions import Key, Attr
import numpy as np
### Helper class to convert a DynamoDB item to JSON.
##class DecimalEncoder(json.JSONEncoder):
## def default(self, o):
## if isinstance(o, decimal.Decimal):
## if o % 1 > 0:
## return float(o)
## else:
## return int(o)
## return super(DecimalEncoder, self).default(o)
dynamodb = boto3.resource('dynamodb', region_name='ap-southeast-1')
table = dynamodb.Table('NetworkData')
print("MoteID = 2, Data from 2017")
response = table.query(
KeyConditionExpression=Key('MoteID').eq(2) & Key('MoteTimestamp').begins_with('2017'),
ScanIndexForward = False
)
MoteID1 = []
MoteTimestamp1 = []
OpenPrice1 = []
for i in response['Items']:
MoteID1.append(float(i['MoteID']))
MoteTimestamp1.append(i['MoteTimestamp'])
OpenPrice1.append(float(i['StockData']['OpenPrice']))
# print(i['MoteID'], ":", i['MoteTimestamp'], ":", i['StockData']['OpenPrice'])
print("MoteID = 2, Data from 2016")
response = table.query(
KeyConditionExpression=Key('MoteID').eq(2) & Key('MoteTimestamp').begins_with('2016'),
ScanIndexForward = False
)
for i in response['Items']:
MoteID1.append(float(i['MoteID']))
MoteTimestamp1.append(i['MoteTimestamp'])
OpenPrice1.append(float(i['StockData']['OpenPrice']))
# print(i['MoteID'], ":", i['MoteTimestamp'], ":", i['StockData']['OpenPrice'])
# Sensor Data fabrication
MoteID1 = list(1/2*np.array(MoteID1))
MoteID2 = list(1/2*2*np.array(MoteID1))
MoteID3 = list(1/2*3* | np.array(MoteID1) | numpy.array |
"""Corona Library."""
import time
import datetime
import warnings
import pandas as pd
import numpy as np
from scipy.stats import gamma
import plotly.graph_objects as go
from plotly.offline import plot
from plotly.subplots import make_subplots
import plotly.io as pio
import scipy.stats
from IPython.display import display
import pkg_resources
import os
warnings.filterwarnings("ignore")
STATEDEF_EN = {0: "not infected", 1: "immun", 2: "infected",
3: "identified", 4: "dead (other)", 5: 'hospital',
6: 'intensive', 7: 'Covid-19 dead'}
STATEDEF_DE = {0: "nicht infiziert", 1: "immun", 2: "infiziert",
3: "identifiziert", 4: "tod (Sonstige)",
5: 'hospitalisiert', 6: 'ICU', 7: 'tod (Covid-19)'}
STATEDEF = STATEDEF_DE
def infection_profile(mean_serial=7.0, std_serial=3.4, nday=21):
"""Calc the infections profile."""
gamma_a = mean_serial**2/std_serial**2
gamma_scale = std_serial**2/mean_serial
xval = np.linspace(0, nday, num=nday+1, dtype=("int"))
yval = gamma.cdf(xval, a=gamma_a, scale=gamma_scale)
delay = np.zeros(nday+1)
delay[1:(nday+1)] = yval[1:(nday+1)] - yval[0:nday]
return xval, yval, delay
def makepop(popname, n=1000000):
"""Generate population."""
if popname == "current":
germany = pkg_resources.resource_filename('covid19sim',
'population_germany.csv')
age, agegroup, gender, family, contacts, dr_day = readpop(
germany, n)
hnr = None
persons = None
elif popname == "household":
household = pkg_resources.resource_filename('covid19sim',
'population_household.csv')
age, agegroup, gender, contacts, dr_day, hnr, persons = \
read_campus(household, n)
else:
print("Unknown population")
return None, None, None, None, None, None
return age, agegroup, gender, contacts, dr_day, hnr, persons
def makeprofile_plot(mean_serial=7, mean_std=3.4, r0=2.7, re=0.9, isoday=4):
"""Plot the infections profile."""
inf1 = go.Figure()
inf2 = go.Figure()
x, y, z = infection_profile(mean_serial, mean_std)
inf1.add_trace(go.Scatter(x=x, y=r0*y, mode='lines',
name="ohne Maßnahmen"))
inf1.add_trace(go.Scatter(x=x, y=re*y, mode='lines', name="Lockdown"))
iso = np.where(x > isoday, 0.5 * r0*z, r0*z)
inf1.add_trace(go.Scatter(x=x, y=np.cumsum(iso), mode='lines',
name="50% Isolation nach " +
str(isoday) + "Tagen"))
inf2.add_trace(go.Scatter(x=x, y=r0*z, mode='lines+markers',
name="ohne Maßnahmen"))
inf2.add_trace(go.Scatter(x=x, y=re*z, mode='lines+markers',
name="Lockdown"))
inf2.add_trace(go.Scatter(x=x, y=iso, mode='lines+markers',
name="50% Isolation nach "+str(isoday) +
"Tagen"))
x, y, z = infection_profile(1, 0.9)
# inf1.add_trace(go.Scatter(x=x, y=y, mode='lines', name="Influenza"))
# inf2.add_trace(go.Bar(x=x, y=z, name="Influenza"))
inf1.update_layout(
title="Sekundärdinfizierte",
xaxis_title="Tage nach der Primärinfektion",
yaxis_title="Kumlierte Sekundärinfizierte",
legend_orientation="h",
font=dict(size=18)
)
inf2.update_layout(
title="Sekundärdinfizierte",
xaxis_title="Tage nach der Primärinfektion",
yaxis_title="Sekundärinfizierte",
legend_orientation="h",
font=dict(size=18)
)
plot(inf1)
plot(inf2)
inf1.write_image("cdf.png", width=1200, height=800)
inf2.write_image("pdf.png", width=1200, height=800)
return
def sim(age, drate, mean_serial=7.0, std_serial=3.4, nday=140,
day0cumrep=20,
prob_icu=0.005, mean_days_to_icu=12, mean_time_to_death=17,
mean_duration_icu=10, immunt0=0.0, ifr=0.5,
long_term_death=False, hnr=None, com_attack_rate=0.6,
simname="test", datadir=".", realized=None, rep_delay=8.7,
alpha=0.2, r_change=None, day0date=datetime.date(2020, 3, 15)):
"""Simulate model.
Parameters
----------
age : array of length n, age of each individual
drate : array of length n, daily mortality rate of each individual
mean_serial : mean of the gamma distribution for the infections profile
std_serial : std of the gamma distribution for the infections profile
nday : number of days to simulated
day0cumrep : number of cumulated reported at day0 (used to set day0)
prob_icu : mean probility, that an infected needs icu care
mean_days_to_icu : mean days from infection to icucare
mean_duration_icu : mean days on icu
immunt0 : percentage immun at t0
ifr : infected fatality rate
long_term_death : Flag to simulate death from long term death rate
hnr : array of length n, household number
com_attack_rate : dictionary with infection probabilty within a community.
keys are the change dates
simname : name of the simulation
datadir : directory where all results are saved
realized : dataframe with realized data til now
rep_delay : delay between infection and report
alpha : factor between infected and reported
r_change : dictionary with individual r at change points, keys are the
dates, values are vectors of length n with individual r's
day0date : date of day 0
Returns
-------
state : array shape (n,nday) with the state of each indivial on every day
0 : not infected
1 : immun
2.: infected but not identified
3.: not used
4 : dead (long term mortality)
5 : not used
6 : ICU care
7 : dead (Covid-19)
statesum : array of shape (5, nday) with the count of each individual per
days
infections : array of length nday
the number of infections
day0 : the simulation day on which the number of icu care patients exceeds
for the first time day0icu
re : array of length nday
the effective reporoduction number per day
params : a copy of all input paramters as a data frame
results : daily results as a dataframe
"""
# This must be the first line
args = locals()
args["mean_age"] = np.mean(age)
tstart = time.time()
# replace dates
keylist = list(r_change.keys())
for key in keylist:
newkey = datetime.datetime.strptime(key, "%Y-%m-%d").date()
newkey = (newkey - day0date).days
r_change[newkey] = r_change[key]
del r_change[key]
keylist = list(com_attack_rate.keys())
for key in keylist:
newkey = datetime.datetime.strptime(key, "%Y-%m-%d").date()
newkey = (newkey - day0date).days
com_attack_rate[newkey] = com_attack_rate[key]
del com_attack_rate[key]
# Initialize r
daymin = min(r_change.keys())
r = r_change[daymin]
rmean = np.mean(r)
daymin = min(com_attack_rate.keys())
com_attack_now = com_attack_rate[daymin]
# Simulation name
r0aux = np.mean(r)
name = simname
n = len(age)
state = np.zeros(shape=(n), dtype="int")
# set ni individuals to infected
nimmun = int(immunt0*n)
state[np.random.choice(n, nimmun)] = 1
state[np.random.choice(n, 20)] = 2
nstate = 8
statesum = np.zeros(shape=(nstate, nday))
statesum[:, 0] = np.bincount(state, minlength=nstate)
# Precalculate profile infection
p = mean_serial**2/std_serial**2
b = std_serial**2/mean_serial
x = np.linspace(0, 28, num=29, dtype=("int"))
x = gamma.cdf(x, a=p, scale=b)
delay = x[1:29] - x[0:28]
delay = np.ascontiguousarray(delay[::-1])
# Precalculate time to icu
time_to_icu = np.random.poisson(lam=mean_days_to_icu, size=n)
time_to_death = np.random.poisson(lam=mean_time_to_death, size=n)
# individual prob icu
ind_prob_icu = drate/np.mean(drate) * prob_icu
# Precalculate time to icu
time_on_icu = np.random.poisson(lam=mean_duration_icu, size=n)
rans = np.random.random(size=n)
go_to_icu = rans < ind_prob_icu
rans = np.random.random(size=n)
go_dead = rans < (drate/np.mean(drate) * ifr)
# initialize arrays
infections = np.zeros(shape=nday)
rexternal = np.zeros(shape=nday)
newicu = np.zeros(shape=nday)
reported = np.zeros(shape=nday)
cuminfected = np.zeros(shape=nday)
infections[0] = np.sum(state == 2)
firstdayinfected = np.full(shape=n, fill_value=1000, dtype="int")
firstdayinfected[state == 2] = 0
firstdayicu = np.full(shape=n, fill_value=1000, dtype="int")
day0 = -1
burn = True
re = np.zeros(shape=nday)
# Precalculate profile infection
p = rep_delay**2/1**2
b = 1**2/rep_delay
x = np.linspace(0, 48, num=49, dtype=("int"))
x = gamma.cdf(x, a=p, scale=b)
pdf = x[1:49] - x[0:48]
# Precalculate community attack
if hnr is not None:
nhnr = np.max(hnr)+1
firstdayhnr = np.full(shape=n, fill_value=1000, dtype="int")
p = mean_serial**2/std_serial**2
b = std_serial**2/mean_serial
x = np.linspace(0, 28, num=29, dtype=("int"))
x = gamma.cdf(x, a=p, scale=b)
rans = np.random.random(n)
x = np.diff(x)
x = x / np.sum(x)
d = np.linspace(0, 27, num=28, dtype=("int"))
com_days_to_infection = np.random.choice(d, n, p=x)
ranscom = np.random.random(n)
for i in range(1, nday):
# New infections on day i
imin = max(0, i-28)
h = infections[imin: i]
newinf = np.sum(h*delay[-len(h):])
# unconditional deaths
if long_term_death:
rans = np.random.random(size=n)
state[(rans < drate) & (state != 7)] = 4
# Calculate the number of days infected
days_infected = i - firstdayinfected
# set all infected and identified case with more than 30 days to immun
state[((days_infected > 28) & (state < 4)) |
(time_on_icu == (i - firstdayicu))] = 1
# for infected cases calculate the probability of icu admission
filt = (time_to_icu == days_infected) & go_to_icu & (state == 2)
state[filt] = 6
firstdayicu[filt] = i
newicu[i] = np.sum(filt)
state[(time_to_death < days_infected) & go_dead] = 7
# The new infections are mapped to households
if hnr is not None:
# Household infections
filt2 = (com_days_to_infection == (i - firstdayhnr[hnr])) &\
(state == 0) & (ranscom < com_attack_now)
# external infections
aux = n / newinf
rans = np.random.random(size=n) * aux
filt1 = (rans < r) & (state == 0)
filt = filt1 | filt2
state[filt] = 2
# Store the new infections in each household
newhnr = hnr[filt1]
firstdayhnr[newhnr] = np.where(firstdayhnr[newhnr] < i,
firstdayhnr[newhnr], i)
else:
# infection probabilties by case
aux = n / newinf
rans = np.random.random(size=n) * aux
filt = (rans < r) & (state == 0)
state[filt] = 2
# store first infections day
firstdayinfected[filt] = i
rexternal[i] = rmean
# number of new infections
infections[i] = np.sum(filt)
if newinf > 0:
re[i] = infections[i] / newinf
else:
re[i] = 0
statesum[:, i] = np.bincount(state, minlength=nstate)
for s in range(0, min(i, 35)):
reported[i] = reported[i] + infections[i-s] * pdf[s] * alpha
# find day0
if (np.sum(reported) > day0cumrep) and (day0 == -1):
day0 = i
# adjust r
if (day0 > -1) and ((i-day0) in r_change.keys()):
r = r_change[i-day0]
rmean = np.mean(r)
# change community attack rate
if (day0 > -1) and ((i-day0) in com_attack_rate.keys()):
com_attack_now = com_attack_rate[i-day0]
# return only simulation parameter and no populations parameters
argsnew = {}
for key, value in args.items():
if type(value) in [int, bool, float, str]:
argsnew[key] = value
params = pd.DataFrame.from_dict(argsnew, orient="index")
params = params.reset_index()
params.columns = ["Parameter", "Wert"]
agegroup = (age/10).astype(int)*10
# Write each dataframe to a different worksheet.
excelfile = os.path.join(datadir, name + ".xlsx")
writer = pd.ExcelWriter(excelfile, engine='xlsxwriter')
params.to_excel(writer, sheet_name="Parameter", index=False)
groupresults = pd.DataFrame({"Tag": [(x-day0) for x in range(0, nday)]})
groupresults["Datum"] = [day0date + datetime.timedelta(days=x-day0)
for x in range(0, nday)]
groupresults["neue Infektionen"] = infections
# Meldefälle
cuminfected = statesum[1]+statesum[2]+statesum[7]+statesum[6]+statesum[5]
# newinfections
newinfections = np.diff(cuminfected, prepend=0)
# reported
reported = np.empty_like(cuminfected)
reported[0] = 0
for t in range(1, len(newinfections)):
reported[t] = 0
for s in range(0, min(t, 27)):
reported[t] = reported[t] + newinfections[t-s] * pdf[s]
groupresults["Meldefälle"] = np.around(reported * alpha)
groupresults["Meldefälle (kum.)"] = groupresults["Meldefälle"].cumsum()
groupresults["Erwartete Neu-Intensiv"] = newicu
groupresults["R effektiv"] = re
groupresults["R extern"] = rexternal
for key, values in STATEDEF.items():
if max(statesum[key]) > 0:
groupresults[values] = statesum[key]
if realized is not None:
realcases = realized[['Meldedatum', 'Tote', 'Fälle', 'Fälle_kum',
'cumdeath', "Intensiv"]].copy()
realcases.rename(columns={"Meldedatum": "Datum", "cumdeath":
"kum. Tote (Ist)", "Fälle": "Meldefälle (Ist)",
"Fälle_kum": "kum. Meldefälle (Ist)",
"Intensiv": "Ist Intensiv"
}, inplace=True)
groupresults = groupresults.merge(realcases, on="Datum", how="left")
groupresults.rename(columns={
"neue Infektionen": "Erwartete Neu-Infektionen",
"Meldefälle": "Erwartete Neu-Meldefälle",
"Meldefälle (kum.)": "Erwartete Gesamt-Meldefälle",
"R effektiv": "Reproduktionszahl",
"nicht infiziert": "Nicht-Infizierte",
"immun": "Erwartete Genesene",
"infiziert": "Erwartete akt. Infizierte",
"tod (Covid-19)": "Erwartete Tote",
"Tote": "IST Neue Tote",
"Meldefälle (Ist)": "RKI Neu-Meldefälle",
"kum. Meldefälle (Ist)": "RKI Gesamt-Meldefälle",
'kum. Tote (Ist)': "IST Tote gesamt"
}, inplace=True)
results = {}
groupresults["Erwartete neue Tote"] = np.diff(groupresults["Erwartete Tote"],
prepend=0)
wasintensive = firstdayicu < 1000
for col in ['Erwartete Neu-Infektionen', 'Erwartete Neu-Meldefälle',
'ICU', "Erwartete Neu-Intensiv", 'Erwartete neue Tote']:
res = {}
peakd = np.argmax(groupresults[col])
res["Peaktag"] = np.array(groupresults.Datum)[peakd]
res["Peakwert"] = np.array(groupresults[col])[peakd]
res["Summe"] = np.sum(groupresults[col])
if col == 'Erwartete Neu-Infektionen':
res["Mittleres Alter"] = np.mean(age[state > 0])
res["Median Alter"] = np.median(age[state > 0])
if col == "Erwartete neue Tote":
res["Mittleres Alter"] = np.mean(age[state == 7])
res["Median Alter"] = np.median(age[state == 7])
if col == "<NAME>":
res["Mittleres Alter"] = np.mean(age[wasintensive])
res["Median Alter"] = np.median(age[wasintensive])
results[col] = res
results = pd.DataFrame.from_dict(results, orient="index")
display(results)
results.to_excel(writer, sheet_name='Ergebnisübersicht', index=False)
groupresults = groupresults[groupresults.Datum >=
datetime.date(2020, 3, 1)]
groupresults.to_excel(writer, sheet_name='Zustand pro Tag', index=False)
writer.save()
tanalyse = time.time()
print("Simulation time: " + str(tanalyse-tstart))
return state, statesum, infections, day0, re, argsnew, groupresults
def read_campus(filename, n=1000000):
"""Generate popupulation from campus."""
campus = pd.read_csv(filename)
nrep = int(np.around(n/campus.shape[0]))
repid = np.array([x for x in range(0, nrep)], dtype="int")
replica = np.tile(repid, campus.shape[0])
age = np.repeat(np.array(campus.age), nrep)
gender = np.repeat(np.array(campus.gender), nrep)
persons = np.repeat(np.array(campus.Personenzahl - 1), nrep)
contacts = np.repeat(np.array(campus.contacts_mean), nrep)
agegroup = np.repeat(np.array(campus.agegroup), nrep)
dr_year = np.repeat(np.array(campus.deathrate), nrep)
hnr = np.repeat(np.array(campus.hnrnew), nrep)
nhnr = np.max(hnr)+1
hnr = hnr + replica * nhnr
# normalize contacts to a mean of one
contacts = contacts / | np.sum(contacts) | numpy.sum |
# License: BSD 3-clause
# Authors: <NAME>,
# <NAME>
import numpy as np
from numpy.lib.stride_tricks import as_strided
import scipy.signal as sg
from scipy import linalg, fftpack
from numpy.testing import assert_almost_equal
def rolling_mean(X, window_size):
"""
Calculate the rolling mean
Parameters
----------
X : ndarray
Raw input signal
window_size : int
Length of rolling mean window
Returns
------
mean_averaged_X : ndarray
Rolling mean averaged X
"""
w = 1.0 / window_size * np.ones((window_size))
return np.correlate(X, w, 'valid')
def voiced_unvoiced(X, window_size=256, window_step=128, copy=True):
"""
Voiced unvoiced detection from a raw signal
Based on code from:
https://www.clear.rice.edu/elec532/PROJECTS96/lpc/code.html
Other references:
http://www.seas.ucla.edu/spapl/code/harmfreq_MOLRT_VAD.m
Parameters
----------
X : ndarray
Raw input signal
window_size : int, optional (default=256)
The window size to use, in samples.
window_step : int, optional (default=128)
How far the window steps after each calculation, in samples.
copy : bool, optional (default=True)
Whether to make a copy of the input array or allow in place changes.
"""
X = np.array(X, copy=copy)
if len(X.shape) < 2:
X = X[None]
n_points = X.shape[1]
n_windows = n_points // window_step
# Padding
pad_sizes = [(window_size - window_step) // 2,
window_size - window_step // 2]
# TODO: Handling for odd window sizes / steps
X = np.hstack((np.zeros((X.shape[0], pad_sizes[0])), X,
np.zeros((X.shape[0], pad_sizes[1]))))
clipping_factor = 0.6
b, a = sg.butter(10, np.pi * 9 / 40)
voiced_unvoiced = np.zeros((n_windows, 1))
period = np.zeros((n_windows, 1))
for window in range(max(n_windows - 1, 1)):
XX = X.ravel()[window * window_step + np.arange(window_size)]
XX *= sg.hamming(len(XX))
XX = sg.lfilter(b, a, XX)
left_max = np.max(np.abs(XX[:len(XX) // 3]))
right_max = np.max(np.abs(XX[-len(XX) // 3:]))
clip_value = clipping_factor * np.min([left_max, right_max])
XX_clip = np.clip(XX, clip_value, -clip_value)
XX_corr = np.correlate(XX_clip, XX_clip, mode='full')
center = np.argmax(XX_corr)
right_XX_corr = XX_corr[center:]
prev_window = max([window - 1, 0])
if voiced_unvoiced[prev_window] > 0:
# Want it to be harder to turn off than turn on
strength_factor = .29
else:
strength_factor = .3
start = np.where(right_XX_corr < .3 * XX_corr[center])[0]
# 20 is hardcoded but should depend on samplerate?
start = np.max([20, start[0]])
search_corr = right_XX_corr[start:]
index = np.argmax(search_corr)
second_max = search_corr[index]
if (second_max > strength_factor * XX_corr[center]):
voiced_unvoiced[window] = 1
period[window] = start + index - 1
else:
voiced_unvoiced[window] = 0
period[window] = 0
return np.array(voiced_unvoiced), np.array(period)
def lpc_analysis(X, order=8, window_step=128, window_size=2 * 128,
emphasis=0.9, voiced_start_threshold=.9,
voiced_stop_threshold=.6, truncate=False, copy=True):
"""
Extract LPC coefficients from a signal
Based on code from:
http://labrosa.ee.columbia.edu/matlab/sws/
Parameters
----------
X : ndarray
Signals to extract LPC coefficients from
order : int, optional (default=8)
Order of the LPC coefficients. For speech, use the general rule that the
order is two times the expected number of formants plus 2.
This can be formulated as 2 + 2 * (fs // 2000). For approximately signals
with fs = 7000, this is 8 coefficients - 2 + 2 * (7000 // 2000).
window_step : int, optional (default=128)
The size (in samples) of the space between each window
window_size : int, optional (default=2 * 128)
The size of each window (in samples) to extract coefficients over
emphasis : float, optional (default=0.9)
The emphasis coefficient to use for filtering
voiced_start_threshold : float, optional (default=0.9)
Upper power threshold for estimating when speech has started
voiced_stop_threshold : float, optional (default=0.6)
Lower power threshold for estimating when speech has stopped
truncate : bool, optional (default=False)
Whether to cut the data at the last window or do zero padding.
copy : bool, optional (default=True)
Whether to copy the input X or modify in place
Returns
-------
lp_coefficients : ndarray
lp coefficients to describe the frame
per_frame_gain : ndarray
calculated gain for each frame
residual_excitation : ndarray
leftover energy which is not described by lp coefficents and gain
voiced_frames : ndarray
array of [0, 1] values which holds voiced/unvoiced decision for each
frame.
References
----------
<NAME> (2004), "Sinewave Speech Analysis/Synthesis in Matlab",
Web resource, available: http://www.ee.columbia.edu/ln/labrosa/matlab/sws/
"""
X = np.array(X, copy=copy)
if len(X.shape) < 2:
X = X[None]
n_points = X.shape[1]
n_windows = n_points // window_step
if not truncate:
pad_sizes = [(window_size - window_step) // 2,
window_size - window_step // 2]
# TODO: Handling for odd window sizes / steps
X = np.hstack((np.zeros((X.shape[0], pad_sizes[0])), X,
np.zeros((X.shape[0], pad_sizes[1]))))
else:
pad_sizes = [0, 0]
X = X[0, :n_windows * window_step]
lp_coefficients = np.zeros((n_windows, order + 1))
per_frame_gain = np.zeros((n_windows, 1))
residual_excitation = np.zeros(
((n_windows - 1) * window_step + window_size))
# Pre-emphasis high-pass filter
X = sg.lfilter([1, -emphasis], 1, X)
# stride_tricks.as_strided?
autocorr_X = np.zeros((n_windows, 2 * window_size - 1))
for window in range(max(n_windows - 1, 1)):
XX = X.ravel()[window * window_step + np.arange(window_size)]
WXX = XX * sg.hanning(window_size)
autocorr_X[window] = np.correlate(WXX, WXX, mode='full')
center = np.argmax(autocorr_X[window])
RXX = autocorr_X[window,
np.arange(center, window_size + order)]
R = linalg.toeplitz(RXX[:-1])
solved_R = linalg.pinv(R).dot(RXX[1:])
filter_coefs = np.hstack((1, -solved_R))
residual_signal = sg.lfilter(filter_coefs, 1, WXX)
gain = np.sqrt(np.mean(residual_signal ** 2))
lp_coefficients[window] = filter_coefs
per_frame_gain[window] = gain
assign_range = window * window_step + np.arange(window_size)
residual_excitation[assign_range] += residual_signal / gain
# Throw away first part in overlap mode for proper synthesis
residual_excitation = residual_excitation[pad_sizes[0]:]
return lp_coefficients, per_frame_gain, residual_excitation
def lpc_synthesis(lp_coefficients, per_frame_gain, residual_excitation=None,
voiced_frames=None, window_step=128, emphasis=0.9):
"""
Synthesize a signal from LPC coefficients
Based on code from:
http://labrosa.ee.columbia.edu/matlab/sws/
http://web.uvic.ca/~tyoon/resource/auditorytoolbox/auditorytoolbox/synlpc.html
Parameters
----------
lp_coefficients : ndarray
Linear prediction coefficients
per_frame_gain : ndarray
Gain coefficients
residual_excitation : ndarray or None, optional (default=None)
Residual excitations. If None, this will be synthesized with white noise.
voiced_frames : ndarray or None, optional (default=None)
Voiced frames. If None, all frames assumed to be voiced.
window_step : int, optional (default=128)
The size (in samples) of the space between each window
emphasis : float, optional (default=0.9)
The emphasis coefficient to use for filtering
overlap_add : bool, optional (default=True)
What type of processing to use when joining windows
copy : bool, optional (default=True)
Whether to copy the input X or modify in place
Returns
-------
synthesized : ndarray
Sound vector synthesized from input arguments
References
----------
<NAME> (2004), "Sinewave Speech Analysis/Synthesis in Matlab",
Web resource, available: http://www.ee.columbia.edu/ln/labrosa/matlab/sws/
"""
# TODO: Incorporate better synthesis from
# http://eecs.oregonstate.edu/education/docs/ece352/CompleteManual.pdf
window_size = 2 * window_step
[n_windows, order] = lp_coefficients.shape
n_points = (n_windows + 1) * window_step
n_excitation_points = n_points + window_step + window_step // 2
random_state = np.random.RandomState(1999)
if residual_excitation is None:
# Need to generate excitation
if voiced_frames is None:
# No voiced/unvoiced info, so just use randn
voiced_frames = np.ones((lp_coefficients.shape[0], 1))
residual_excitation = np.zeros((n_excitation_points))
f, m = lpc_to_frequency(lp_coefficients, per_frame_gain)
t = np.linspace(0, 1, window_size, endpoint=False)
hanning = sg.hanning(window_size)
for window in range(n_windows):
window_base = window * window_step
index = window_base + np.arange(window_size)
if voiced_frames[window]:
sig = np.zeros_like(t)
cycles = np.cumsum(f[window][0] * t)
sig += sg.sawtooth(cycles, 0.001)
residual_excitation[index] += hanning * sig
residual_excitation[index] += hanning * 0.01 * random_state.randn(
window_size)
else:
n_excitation_points = residual_excitation.shape[0]
n_points = n_excitation_points + window_step + window_step // 2
residual_excitation = np.hstack((residual_excitation,
np.zeros(window_size)))
if voiced_frames is None:
voiced_frames = np.ones_like(per_frame_gain)
synthesized = np.zeros((n_points))
for window in range(n_windows):
window_base = window * window_step
oldbit = synthesized[window_base + np.arange(window_step)]
w_coefs = lp_coefficients[window]
if not np.all(w_coefs):
# Hack to make lfilter avoid
# ValueError: BUG: filter coefficient a[0] == 0 not supported yet
# when all coeffs are 0
w_coefs = [1]
g_coefs = voiced_frames[window] * per_frame_gain[window]
index = window_base + np.arange(window_size)
newbit = g_coefs * sg.lfilter([1], w_coefs,
residual_excitation[index])
synthesized[index] = np.hstack((oldbit, np.zeros(
(window_size - window_step))))
synthesized[index] += sg.hanning(window_size) * newbit
synthesized = sg.lfilter([1], [1, -emphasis], synthesized)
return synthesized
def soundsc(X, copy=True):
"""
Approximate implementation of soundsc from MATLAB without the audio playing.
Parameters
----------
X : ndarray
Signal to be rescaled
copy : bool, optional (default=True)
Whether to make a copy of input signal or operate in place.
Returns
-------
X_sc : ndarray
(-1, 1) scaled version of X as float32, suitable for writing
with scipy.io.wavfile
"""
X = np.array(X, copy=copy)
X = (X - X.min()) / (X.max() - X.min())
X = 2 * X - 1
return X.astype('float32')
def lpc_to_frequency(lp_coefficients, per_frame_gain):
"""
Extract resonant frequencies and magnitudes from LPC coefficients and gains.
Parameters
----------
lp_coefficients : ndarray
LPC coefficients, such as those calculated by ``lpc_analysis``
per_frame_gain : ndarray
Gain calculated for each frame, such as those calculated
by ``lpc_analysis``
Returns
-------
frequencies : ndarray
Resonant frequencies calculated from LPC coefficients and gain. Returned
frequencies are from 0 to 2 * pi
magnitudes : ndarray
Magnitudes of resonant frequencies
References
----------
<NAME> (2004), "Sinewave Speech Analysis/Synthesis in Matlab",
Web resource, available: http://www.ee.columbia.edu/ln/labrosa/matlab/sws/
"""
n_windows, order = lp_coefficients.shape
frame_frequencies = np.zeros((n_windows, (order - 1) // 2))
frame_magnitudes = np.zeros_like(frame_frequencies)
for window in range(n_windows):
w_coefs = lp_coefficients[window]
g_coefs = per_frame_gain[window]
roots = np.roots(np.hstack(([1], w_coefs[1:])))
# Roots doesn't return the same thing as MATLAB... agh
frequencies, index = np.unique(
np.abs(np.angle(roots)), return_index=True)
# Make sure 0 doesn't show up...
gtz = np.where(frequencies > 0)[0]
frequencies = frequencies[gtz]
index = index[gtz]
magnitudes = g_coefs / (1. - np.abs(roots))
sort_index = np.argsort(frequencies)
frame_frequencies[window, :len(sort_index)] = frequencies[sort_index]
frame_magnitudes[window, :len(sort_index)] = magnitudes[sort_index]
return frame_frequencies, frame_magnitudes
def sinusoid_analysis(X, input_sample_rate, resample_block=128, copy=True):
"""
Contruct a sinusoidal model for the input signal.
Parameters
----------
X : ndarray
Input signal to model
input_sample_rate : int
The sample rate of the input signal
resample_block : int, optional (default=128)
Controls the step size of the sinusoidal model
Returns
-------
frequencies_hz : ndarray
Frequencies for the sinusoids, in Hz.
magnitudes : ndarray
Magnitudes of sinusoids returned in ``frequencies``
References
----------
<NAME> (2004), "Sinewave Speech Analysis/Synthesis in Matlab",
Web resource, available: http://www.ee.columbia.edu/ln/labrosa/matlab/sws/
"""
X = np.array(X, copy=copy)
resample_to = 8000
if input_sample_rate != resample_to:
if input_sample_rate % resample_to != 0:
raise ValueError("Input sample rate must be a multiple of 8k!")
# Should be able to use resample... ?
# resampled_count = round(len(X) * resample_to / input_sample_rate)
# X = sg.resample(X, resampled_count, window=sg.hanning(len(X)))
X = sg.decimate(X, input_sample_rate // resample_to)
step_size = 2 * round(resample_block / input_sample_rate * resample_to / 2.)
a, g, e = lpc_analysis(X, order=8, window_step=step_size,
window_size=2 * step_size)
f, m = lpc_to_frequency(a, g)
f_hz = f * resample_to / (2 * np.pi)
return f_hz, m
def slinterp(X, factor, copy=True):
"""
Slow-ish linear interpolation of a 1D numpy array. There must be some
better function to do this in numpy.
Parameters
----------
X : ndarray
1D input array to interpolate
factor : int
Integer factor to interpolate by
Return
------
X_r : ndarray
"""
sz = np.product(X.shape)
X = np.array(X, copy=copy)
X_s = np.hstack((X[1:], [0]))
X_r = np.zeros((factor, sz))
for i in range(factor):
X_r[i, :] = (factor - i) / float(factor) * X + (i / float(factor)) * X_s
return X_r.T.ravel()[:(sz - 1) * factor + 1]
def sinusoid_synthesis(frequencies_hz, magnitudes, input_sample_rate=16000,
resample_block=128):
"""
Create a time series based on input frequencies and magnitudes.
Parameters
----------
frequencies_hz : ndarray
Input signal to model
magnitudes : int
The sample rate of the input signal
input_sample_rate : int, optional (default=16000)
The sample rate parameter that the sinusoid analysis was run with
resample_block : int, optional (default=128)
Controls the step size of the sinusoidal model
Returns
-------
synthesized : ndarray
Sound vector synthesized from input arguments
References
----------
<NAME> (2004), "Sinewave Speech Analysis/Synthesis in Matlab",
Web resource, available: http://www.ee.columbia.edu/ln/labrosa/matlab/sws/
"""
rows, cols = frequencies_hz.shape
synthesized = np.zeros((1 + ((rows - 1) * resample_block),))
for col in range(cols):
mags = slinterp(magnitudes[:, col], resample_block)
freqs = slinterp(frequencies_hz[:, col], resample_block)
cycles = np.cumsum(2 * np.pi * freqs / float(input_sample_rate))
sines = mags * np.cos(cycles)
synthesized += sines
return synthesized
def compress(X, n_components, window_size=128):
"""
Compress using the DCT
Parameters
----------
X : ndarray, shape=(n_samples,)
The input signal to compress. Should be 1-dimensional
n_components : int
The number of DCT components to keep. Setting n_components to about
.5 * window_size can give compression with fairly good reconstruction.
window_size : int
The input X is broken into windows of window_size, each of which are
then compressed with the DCT.
Returns
-------
X_compressed : ndarray, shape=(num_windows, window_size)
A 2D array of non-overlapping DCT coefficients. For use with uncompress
Reference
---------
http://nbviewer.ipython.org/github/craffel/crucialpython/blob/master/week3/stride_tricks.ipynb
"""
if len(X) % window_size != 0:
append = np.zeros((window_size - len(X) % window_size))
X = np.hstack((X, append))
num_frames = len(X) // window_size
X_strided = X.reshape((num_frames, window_size))
X_dct = fftpack.dct(X_strided, norm='ortho')
if n_components is not None:
X_dct = X_dct[:, :n_components]
return X_dct
def uncompress(X_compressed, window_size=128):
"""
Uncompress a DCT compressed signal (such as returned by ``compress``).
Parameters
----------
X_compressed : ndarray, shape=(n_samples, n_features)
Windowed and compressed array.
window_size : int, optional (default=128)
Size of the window used when ``compress`` was called.
Returns
-------
X_reconstructed : ndarray, shape=(n_samples)
Reconstructed version of X.
"""
if X_compressed.shape[1] % window_size != 0:
append = | np.zeros((X_compressed.shape[0], window_size - X_compressed.shape[1] % window_size)) | numpy.zeros |
import math
import numpy as np
import pandas as pd
import tqdm
from scipy.sparse import coo_matrix
from typing import List, Optional
import matplotlib
from matplotlib import pyplot as plt
import matplotlib.patches as patches
from matplotlib.path import Path
from matplotlib.collections import PathCollection
import matplotlib.transforms as mtransforms
from matplotlib.lines import lineMarkers
from utils import Multidict, get_angle, get_lower_bounds
from database import Graph, CelestialGraph
#from solver import AngularGraphSolution
def visualize_graph_2d(graph: Graph, savePath=None):
fig = plt.figure()
axis = plt.subplot()
axis.axis('off')
_visualize_edges_2d(graph)
_visualize_vertices_2d(graph)
_visualize_celest_body_2d(axis, graph)
if savePath:
if savePath[-3:] == "ipe":
old_backend = matplotlib.get_backend()
matplotlib.use('module://backend_ipe')
save_format = "ipe"
plt.savefig(savePath, format=save_format)
matplotlib.use(old_backend)
else:
plt.savefig(savePath)
else:
plt.show()
def visualize_min_sum_sol_2d(solution: 'AngularGraphSolution'):
graph = solution.graph
fig = plt.figure()
axis = plt.subplot()
axis.axis('off')
_visualize_edges_2d(solution.graph)
_visualize_vertices_2d(solution.graph)
_visualize_celest_body_2d(axis, solution.graph)
# Make an edge order for vertices
vertex_order = Multidict()
ordered_times = solution.get_ordered_times()
for time_key in ordered_times.get_ordered_keys():
for edges in ordered_times[time_key]:
if edges[0] < edges[1]:
vertex_order[edges[0]] = edges
vertex_order[edges[1]] = edges
# Get minimum edge length
min_length = max(np.array(
[
np.linalg.norm(solution.graph.vertices[i] - solution.graph.vertices[j])
for i, j in solution.graph.edges
]
).min(), 0.4)
# Draws the angle paths in a circular fashion
path_list = []
last_points = []
for vertex_key in vertex_order:
last_edge = None
last_direction = None
current_min_length = min_length * 0.3
last_point = None
for edge in vertex_order[vertex_key]:
if last_edge:
other_vertices = np.hstack([
np.setdiff1d(np.array(last_edge), np.array([vertex_key])),
np.setdiff1d(np.array(edge), np.array([vertex_key]))
])
angles = [get_angle(
graph.vertices[vertex_key],
graph.vertices[vertex_key] + [1, 0],
graph.vertices[other_vertex]) for other_vertex in other_vertices]
# If y-coord is below the current vertex we need to calculate the angle different
for i in range(len(angles)):
if graph.vertices[other_vertices[i]][1] < graph.vertices[vertex_key][1]:
angles[i] = 360 - angles[i]
# Calculate if we need to go from angle[0] to angle[1] or other way around
# to not create an arc over 180 degrees
diff = abs(angles[0] - angles[1])
if diff > 180:
diff = 360 - diff
normal_angle_direction = math.isclose((angles[0] + diff) % 360, angles[1], rel_tol=1e-5)
if not normal_angle_direction:
angles = reversed(angles)
# 1 shall be clockwise and -1 counter-clockwise direction
current_direction = 1 if normal_angle_direction else -1
if last_direction:
if current_direction != last_direction: # direction change happened
current_min_length *= 1.25
# Transform the arc to the right position
transform = mtransforms.Affine2D().scale(current_min_length, current_min_length)
transform = transform.translate(*graph.vertices[vertex_key])
arc = Path.arc(*angles)
arc_t = arc.transformed(transform)
if last_direction:
if current_direction != last_direction: # direction change happened
last_vertex = path_list[-1].vertices[-1] if last_direction == 1 else path_list[-1].vertices[0]
new_vertex = arc_t.vertices[0] if current_direction == 1 else arc_t.vertices[-1]
bridge_path = Path([last_vertex, new_vertex])
path_list.append(bridge_path)
last_direction = current_direction
path_list.append(arc_t)
last_point = path_list[-1].vertices[-1] if last_direction == 1 else path_list[-1].vertices[0]
last_points.append(last_point)
last_edge = edge
# Add these points to detect direction
last_points.append(last_point)
path_collection = PathCollection(path_list, edgecolor='r', facecolor='#00000000')
axis.add_collection(path_collection)
a_last_points = np.array([l for l in last_points if l is not None])
plt.plot(a_last_points[:, 0], a_last_points[:, 1], 'r.')
axis.autoscale()
plt.show()
def visualize_solution_2d(solution: 'AngularGraphSolution', title=None, show_used=True):
fig = plt.figure()
if title:
fig.suptitle(title)
#fig.subplots_adjust(hspace=0.3, wspace=0.3)
ordered_times = solution.get_ordered_times()
cells_needed = len(ordered_times)
row_num, col_num = _calculate_row_col_needed(cells_needed)
fig.set_size_inches(fig.get_size_inches()[1], fig.get_size_inches()[1])
i = 1
already_used = []
for time in ordered_times.get_ordered_keys():
axis = plt.subplot(row_num, col_num, i)
#if solution.solution_type in ["makespan"]:
plt.title("t = {0}".format(round(time, 2)))
axis.axis('off')
_visualize_edges_2d(
solution.graph,
ordered_times[time], already_used)
_visualize_vertices_2d(solution.graph)
if show_used:
already_used.extend(ordered_times[time])
_visualize_celest_body_2d(axis, solution.graph)
i += 1
fig.tight_layout()
plt.show()
def _visualize_edges_2d(graph: Graph, taken_edges=None, already_used=None):
if graph.vertices.dtype == np.dtype('O'):
graph.vertices = np.array([p for p in graph.vertices])
for edge in graph.edges:
plt.plot(graph.vertices[edge][:, 0], graph.vertices[edge][:, 1], color='black', marker=',', alpha=0.3)
if already_used:
for indices in already_used:
edge = np.array([graph.vertices[i] for i in indices])
plt.plot(edge[:, 0], edge[:, 1], "y-")
if taken_edges:
for indices in taken_edges:
edge = np.array([graph.vertices[i] for i in indices])
plt.plot(edge[:, 0], edge[:, 1], "r-")
def _visualize_vertices_2d(graph: Graph):
plt.plot(graph.vertices[:, 0], graph.vertices[:, 1], "b.")
def _visualize_celest_body_2d(axis, graph: Graph):
if isinstance(graph, CelestialGraph):
for body in graph.celestial_bodies:
# Add earth as celestial object
image = plt.imread("utils/figures/world-1303628_1920.png")
radius = 870
scale = len(image) / (radius*2)
extent = (
(body.position[0] - float(body.size)) * scale,
(body.position[0] + float(body.size)) * scale,
(body.position[1] - float(body.size)) * scale,
(body.position[1] + float(body.size)) * scale
)
im = axis.imshow(image, extent=extent)
pos = body.position
patch = patches.Circle(pos, radius=float(body.size), transform=axis.transData)
im.set_clip_path(patch)
axis.autoscale_view()
def _visualize_celest_body_2d_old(axis, graph: Graph):
if isinstance(graph, CelestialGraph):
for body in graph.celestial_bodies:
# Add earth as celestial object
image = plt.imread("utils/figures/720px-The_Earth_seen_from_Apollo_17.jpg")
radius = 320
scale = len(image) / (radius*2)
extent = (
(body.position[0] - float(body.size)) * scale,
(body.position[0] + float(body.size)) * scale,
(body.position[1] - float(body.size)) * scale,
(body.position[1] + float(body.size)) * scale
)
im = axis.imshow(image, extent=extent)
pos = body.position
patch = patches.Circle(pos, radius=float(body.size), transform=axis.transData)
im.set_clip_path(patch)
axis.autoscale_view()
def _calculate_row_col_needed(cells_needed: int):
# Calculate the quadratic amount needed
# Aim is to get it as quadratic as possible
# Maybe later aim to get a ratio near display ratio?
quad_num = math.ceil(math.sqrt(cells_needed))
# Calculate how many rows are now actually needed
row_num = math.ceil(cells_needed / quad_num)
return row_num, quad_num
_sol_type_to_label = {"runtime": "Runtime", "min_sum": "MinSum", "local_min_sum": "LocalMinSum", "makespan": "Makespan"}
class VisTypes:
Absolute = 0
VsBest = 1
VsLB = 2
All = 3
LB_Runtime = 4
# From https://stackoverflow.com/questions/55767312/how-to-position-suptitle
def _make_space_above(axes, topmargin=1):
""" increase figure size to make topmargin (in inches) space for
titles, without changing the axes sizes"""
fig = axes.flatten()[0].figure
s = fig.subplotpars
w, h = fig.get_size_inches()
figh = h - (1-s.top)*h + topmargin
fig.subplots_adjust(bottom=s.bottom*h/figh, top=1-topmargin/figh)
fig.set_figheight(figh)
def visualize_solution_scatter(jobs: List['TaskJobs'], title,
path: Optional[str]=None, solution_type: Optional[str]=None,
logscale=False, ylabel=None, vis_type=VisTypes.Absolute,
loc=1, bbox_pos=None, top_margin=0.65,
show_legend=True):
if solution_type is None:
solution_type = _get_dominant_solution_type(jobs)
if not ylabel:
y_label = _sol_type_to_label[solution_type]
for s in tqdm.tqdm(jobs, desc="Calculate lower bounds"):
if s.solution is not None:
_get_LB(s.solution, solution_type)
df = pd.DataFrame(
[
{
"Solver": _get_solver_name(job),
"VertAmount": job.solution.graph.vert_amount,
"EdgeAmount": job.solution.graph.edge_amount,
"Graph_id": job.solution.graph.id,
"Runtime": float(job.solution.runtime) if job.prev_job is None else float(job.prev_job.solution.runtime+job.solution.runtime),
"MinSum": job.solution.min_sum,
"LocalMinSum": job.solution.local_min_sum,
"Makespan": job.solution.makespan,
"LB": _get_LB(job.solution, solution_type)}
for job in tqdm.tqdm(jobs, desc="Collect solution information") if job.solution is not None
])
# Then plot the data
if vis_type == VisTypes.All:
fig, axes = plt.subplots(nrows=2,ncols=2, sharex=True)
#fig.suptitle(title)
if isinstance(logscale, bool):
logscale = [logscale for i in range(4)]
if len(logscale) < 4:
logscale = logscale + [False for i in range(4-len(logscale))]
label_cols = 3
top_margin = top_margin+0.2
columns = _plot_data(df, solution_type, "Edge amount", y_label, logscale=logscale[0], vis_type=VisTypes.Absolute, ax=axes[0,0],)# show_legend=True)
columns = _plot_data(df, solution_type, "Edge amount", y_label, logscale=logscale[1], vis_type=VisTypes.VsBest, ax=axes[0,1],)# show_legend=True)
columns = _plot_data(df, solution_type, "Edge amount", y_label, logscale=logscale[2], vis_type=VisTypes.VsLB, ax=axes[1,0],)# show_legend=True)
columns = _plot_data(df, "runtime", "Edge amount", "Runtime", logscale=logscale[3], vis_type=VisTypes.Absolute, ax=axes[1,1],)# show_legend=True)
fig.set_size_inches(fig.get_size_inches()*1.5)
fig.tight_layout()
handles, labels = axes[1, 1].get_legend_handles_labels()
fig.legend(handles, labels, loc=loc, bbox_to_anchor=bbox_pos,\
ncol=label_cols)
_make_space_above(axes, top_margin)
#fig.legend([m_i[1] for m_i in columns], loc=loc, bbox_to_anchor=bbox_pos)
elif vis_type == VisTypes.LB_Runtime:
fig, axes = plt.subplots(nrows=1,ncols=2, sharex=True)
#fig.suptitle(title)
if isinstance(logscale, bool):
logscale = [logscale for i in range(2)]
if len(logscale) < 4:
logscale = logscale + [False for i in range(2-len(logscale))]
label_cols = 3
top_margin = top_margin+0.25
columns = _plot_data(df, solution_type, "Edge amount", y_label, logscale=logscale[0], vis_type=VisTypes.VsLB, ax=axes[0],)# show_legend=True)
columns = _plot_data(df, "runtime", "Edge amount", "Runtime", logscale=logscale[1], vis_type=VisTypes.Absolute, ax=axes[1],)# show_legend=True)
fig.set_size_inches(fig.get_size_inches()*(1.3, 0.9))
fig.tight_layout()
handles, labels = axes[1].get_legend_handles_labels()
fig.legend(handles, labels, loc=loc, bbox_to_anchor=bbox_pos,\
ncol=label_cols)
_make_space_above(axes, top_margin)
else:
columns = _plot_data(df, solution_type, "Edge amount", y_label, logscale=logscale, vis_type=vis_type, show_legend=True)
plt.title(title)
if path is None:
plt.show()
else:
if path[-3:] == "ipe":
old_backend = matplotlib.get_backend()
matplotlib.use('module://backend_ipe')
save_format = "ipe"
plt.savefig(path, format=save_format)
matplotlib.use(old_backend)
else:
plt.savefig(path)
def _get_LB(sol: "AngularGraphSolution", solution_type):
graph = sol.graph
if solution_type == "local_min_sum":
try:
return _get_LB.local_min_sum_lbs[graph.id]
except KeyError:
lb = max(get_lower_bounds(graph))
_get_LB.local_min_sum_lbs[graph.id] = lb
return lb
if solution_type == "min_sum":
try:
return _get_LB.min_sum_lbs[graph.id]
except KeyError:
lb = sum(get_lower_bounds(graph))
_get_LB.min_sum_lbs[graph.id] = lb
return lb
if solution_type == "makespan":
try:
lb = _get_LB.makespan_lbs[graph.id]
except KeyError:
from solver.coloring_solver import Coloring_CP_Solver
from pyclustering.gcolor.dsatur import dsatur
if graph.edge_amount < 40:
solver = Coloring_CP_Solver()
colors = solver.solve(graph)
else:
dsatur_instance = dsatur(graph.ad_matrix)
dsatur_instance.process()
colors = dsatur_instance.get_colors()
lb = ((math.ceil(math.log2(max(colors)))-2) / 2) * 90
_get_LB.makespan_lbs[graph.id] = lb
if sol.makespan and lb > sol.makespan:
log_c_number = math.ceil(sol.makespan * 2 / 90) + 2
lb2 = ((math.ceil(log_c_number)-2) / 2) * 90
if lb > lb2:
_get_LB.makespan_lbs[graph.id] = lb2
lb = lb2
return lb
_get_LB.min_sum_lbs = {}
_get_LB.local_min_sum_lbs = {}
_get_LB.makespan_lbs = {}
def _get_dominant_solution_type(jobs: List['TaskJobs']):
sol_type = np.array([job.solution.solution_type for job in tqdm.tqdm(jobs, desc="Load solutions") if job.solution is not None])
types, counter = np.unique(sol_type, return_counts=True)
max_index = | np.argmax(counter) | numpy.argmax |
import numpy as np
import pandas as pd
import os
import time
from ldsc_polyfun import jackknife, regressions, sumstats, ldscore, parse
import logging
from copy import deepcopy
from tqdm import tqdm
from polyfun_utils import Logger, check_package_versions, set_snpid_index, configure_logger, get_file_name
from polyfun_utils import SNP_COLUMNS
from pyarrow import ArrowIOError
from pyarrow.lib import ArrowInvalid
from compute_ldscores_from_ld import compute_ldscores_chr
import tempfile
MAX_CHI2=80
def __filter__(fname, noun, verb, merge_obj):
merged_list = None
if fname:
f = lambda x,n: x.format(noun=noun, verb=verb, fname=fname, num=n)
x = parse.FilterFile(fname)
c = 'Read list of {num} {noun} to {verb} from {fname}'
logging.info(f(c, len(x.IDList)))
merged_list = merge_obj.loj(x.IDList)
len_merged_list = len(merged_list)
if len_merged_list > 0:
c = 'After merging, {num} {noun} remain'
logging.info(f(c, len_merged_list))
else:
error_msg = 'No {noun} retained for analysis'
raise ValueError(f(error_msg, 0))
return merged_list
def splash_screen():
print('*********************************************************************')
print('* PolyFun (POLYgenic FUNctionally-informed fine-mapping)')
print('* Version 1.0.0')
print('* (C) 2019-2021 <NAME>')
print('*********************************************************************')
print()
def check_args(args):
#verify that the requested computations are valid
mode_params = np.array([args.compute_h2_L2, args.compute_ldscores, args.compute_h2_bins])
if np.sum(mode_params)==0:
raise ValueError('must specify at least one of --compute-h2-L2, --compute-ldscores, --compute-h2-bins')
if args.compute_h2_L2 and args.compute_h2_bins and not args.compute_ldscores:
raise ValueError('cannot use both --compute-h2_L2 and --compute_h2_bins without also specifying --compute-ldscores')
if args.chr is not None:
if args.compute_h2_L2 or args.compute_h2_bins:
raise ValueError('--chr can only be specified when using only --compute-ldscores')
if args.bfile_chr is not None:
if not args.compute_ldscores:
raise ValueError('--bfile-chr can only be specified when using --compute-ldscores')
if args.ld_ukb:
if not args.compute_ldscores:
raise ValueError('--ld-ukb can only be specified when using --compute-ldscores')
if args.no_partitions:
if not args.compute_h2_L2:
raise ValueError('cannot specify --no-partitions without specifying --compute-h2-L2')
if args.compute_ldscores:
raise ValueError('cannot specify both --no-partitions and --compute-ldscores')
if args.compute_h2_bins:
raise ValueError('cannot specify both --no-partitions and --compute-h2-bins')
if args.compute_ldscores and args.compute_h2_bins and not args.compute_h2_L2:
raise ValueError('cannot use both --compute-ldscores and --compute_h2_bins without also specifying --compute-h2-L2')
#verify partitioning parameters
if args.skip_Ckmedian and (args.num_bins is None or args.num_bins<=0):
raise ValueError('You must specify --num-bins when using --skip-Ckmedian')
#verify LD-score related parameters
if args.ld_dir is not None and not args.ld_ukb:
raise ValueError('You cannot specify --ld-dir without also specifying --ld-ukb')
if args.bfile_chr is not None and args.ld_ukb:
raise ValueError('You can specify only one of --bfile-chr and --ld-ukb')
if args.compute_ldscores:
if args.bfile_chr is None and not args.ld_ukb:
raise ValueError('You must specify either --bfile-chr or --ld-ukb when you specify --compute-ldscores')
if not args.ld_ukb and (args.ld_wind_cm is None and args.ld_wind_kb is None and args.ld_wind_snps is None):
args.ld_wind_cm = 1.0
logging.warning('no ld-wind argument specified. PolyFun will use --ld-cm 1.0')
if not args.compute_ldscores:
if not (args.ld_wind_cm is None and args.ld_wind_kb is None and args.ld_wind_snps is None):
raise ValueError('--ld-wind parameters can only be specified together with --compute-ldscores')
if args.keep is not None:
raise ValueError('--keep can only be specified together with --compute-ldscores')
if args.chr is not None:
raise ValueError('--chr can only be specified together with --compute-ldscores')
if args.compute_h2_L2:
if args.sumstats is None:
raise ValueError('--sumstats must be specified when using --compute-h2-L2')
if args.ref_ld_chr is None:
raise ValueError('--ref-ld-chr must be specified when using --compute-h2-L2')
if args.w_ld_chr is None:
raise ValueError('--w-ld-chr must be specified when using --compute-h2-L2')
if args.compute_h2_bins:
if args.sumstats is None:
raise ValueError('--sumstats must be specified when using --compute-h2-bins')
if args.w_ld_chr is None:
raise ValueError('--w-ld-chr must be specified when using --compute-h2-bins')
if args.ref_ld_chr is not None and not args.compute_ldscores:
raise ValueError('--ref-ld-chr should not be specified when using --compute-h2-bins, unless you also use --compute-ldscores')
return args
def check_files(args):
#check that required input files exist
if args.compute_h2_L2:
if not os.path.exists(args.sumstats):
raise IOError('Cannot find sumstats file %s'%(args.sumstats))
for chr_num in range(1,23):
get_file_name(args, 'ref-ld', chr_num, verify_exists=True, allow_multiple=True)
get_file_name(args, 'w-ld', chr_num, verify_exists=True)
get_file_name(args, 'annot', chr_num, verify_exists=True, allow_multiple=True)
if args.compute_ldscores:
if args.chr is None: chr_range = range(1,23)
else: chr_range = range(args.chr, args.chr+1)
for chr_num in chr_range:
if args.bfile_chr is not None:
get_file_name(args, 'bim', chr_num, verify_exists=True)
get_file_name(args, 'fam', chr_num, verify_exists=True)
get_file_name(args, 'bed', chr_num, verify_exists=True)
if not args.compute_h2_L2:
get_file_name(args, 'snpvar_ridge', chr_num, verify_exists=True)
get_file_name(args, 'bins', chr_num, verify_exists=True)
if args.compute_h2_bins and not args.compute_ldscores:
for chr_num in range(1,23):
get_file_name(args, 'w-ld', chr_num, verify_exists=True)
if not args.compute_h2_L2:
get_file_name(args, 'bins', chr_num, verify_exists=True)
class PolyFun:
def __init__(self):
pass
def run_ldsc(self, args, use_ridge, nn, keep_large, evenodd_split, n_blocks=2):
#prepare LDSC objects
log = Logger()
args.h2 = args.sumstats
args.ref_ld = None
args.w_ld = None
args.n_blocks = n_blocks
args.M = None
args.not_M_5_50 = True
#if not ridge, the we'll use the LD-scores of our bins
if not use_ridge:
args = deepcopy(args)
args.ref_ld_chr = args.output_prefix+'.'
#read input data
if use_ridge or not args.compute_ldscores or True:
M_annot, w_ld_cname, ref_ld_cnames, df_sumstats, _ = sumstats._read_ld_sumstats(args, log, args.h2)
else:
#TODO: Don't reload files if we don't have to...
M_annot = self.M
w_ld_cname = 'w_ld'
ref_ld_cnames = self.df_bins.columns
try:
df_sumstats = pd.read_parquet(args.sumstats)
except (ArrowIOError, ArrowInvalid):
df_sumstats = pd.read_table(args.sumstats, sep='\s+')
###merge everything together...
#prepare LD-scores for S-LDSC run
ref_ld = np.array(df_sumstats[ref_ld_cnames], dtype=np.float32)
sumstats._check_ld_condnum(args, log, ref_ld_cnames)
if df_sumstats.shape[0] < 200000:
logging.warning('number of SNPs is smaller than 200k; this is almost always bad.')
n_snp = len(df_sumstats)
n_blocks = np.minimum(n_snp, args.n_blocks)
n_annot = len(ref_ld_cnames)
if n_annot<=1:
raise ValueError('Only one annotation found')
chisq_max = max(0.001*df_sumstats['N'].max(), MAX_CHI2)
#prepare chi2 statistics
s = lambda x: np.array(x).reshape((n_snp, 1))
chisq = s(df_sumstats.Z**2).astype(np.float32)
ii = np.ravel(chisq < chisq_max)
df_sumstats = df_sumstats.loc[ii, :]
if np.any(~ii):
logging.info('Removed {M} SNPs with chi^2 > {C} ({N} SNPs remain)'.format(
C=chisq_max, N=np.sum(ii), M=n_snp-np.sum(ii)))
n_snp = np.sum(ii) # lambdas are late-binding, so this works
ref_ld = np.array(df_sumstats[ref_ld_cnames], dtype=np.float32)
chisq = chisq[ii].reshape((n_snp, 1))
#Run S-LDSC
self.ref_ld_cnames = [c for c in ref_ld_cnames.str[:-2] if c not in SNP_COLUMNS]
hsqhat = regressions.Hsq(chisq,
ref_ld,
s(df_sumstats[w_ld_cname]),
s(df_sumstats.N),
M_annot, n_blocks=n_blocks, intercept=None,
twostep=None, old_weights=True,
chr_num=df_sumstats['CHR'],
loco=use_ridge, ridge_lambda=None,
standardize_ridge=True,
approx_ridge=True,
num_chr_sets=2,
evenodd_split=evenodd_split,
nn=nn,
keep_large=keep_large,
nnls_exact=args.nnls_exact
)
#save the results object
if use_ridge:
self.hsqhat_ridge = hsqhat
else:
self.hsqhat = hsqhat
def load_annotations_file(self, args, chr_num, use_ridge):
#load annotations file for this chromosome
if use_ridge:
annot_filenames = get_file_name(args, 'annot', chr_num, allow_multiple=True)
else:
annot_filenames = [get_file_name(args, 'bins', chr_num)]
#load annotation file(s)
df_annot_chr_list = []
for annot_filename in annot_filenames:
try:
df_annot_chr = pd.read_parquet(annot_filename)
except (ArrowIOError, ArrowInvalid):
df_annot_chr = pd.read_table(annot_filename)
df_annot_chr_list.append(df_annot_chr)
if len(df_annot_chr_list)==1:
df_annot_chr = df_annot_chr_list[0]
else:
for df in df_annot_chr_list[1:]:
for snp_col in SNP_COLUMNS:
if (df.shape[0] != df_annot_chr_list[0].shape[0]) or (np.any(df[snp_col] != df_annot_chr_list[0][snp_col])):
raise ValueError('Different annotation files of chromosome %d must be perfectly aligned'%(chr_num))
df.drop(columns=['CM'], inplace=True, errors='ignore')
df.drop(columns=SNP_COLUMNS, inplace=True, errors='raise')
df_annot_chr = pd.concat(df_annot_chr_list, axis=1)
#make sure all required columns were found
df_annot_chr.drop(columns=['CM'], inplace=True, errors='ignore')
found_missing_col = False
for colname in SNP_COLUMNS:
if colname not in df_annot_chr.columns:
logging.error('%s has a missing column: %s'%(annot_filename, colname))
found_missing_col = True
if found_missing_col:
raise ValueError('Missing columns found in %s'%(annot_filename))
#subset annotations if requested
if args.anno is not None:
anno_to_use = args.anno.split(',')
assert np.all(np.isin(anno_to_use, df_annot_chr.columns))
df_annot_chr = df_annot_chr[SNP_COLUMNS + anno_to_use]
#if we have more annotations that ref-ld, it might mean that some annotations were removed, so remove them from here as well
if not np.all(np.isin(self.ref_ld_cnames, df_annot_chr.columns)):
raise ValueError('Annotation names in annotations file do not match the one in the LD-scores file')
if len(self.ref_ld_cnames) < len(df_annot_chr.columns) - len(SNP_COLUMNS):
df_annot_chr = df_annot_chr[SNP_COLUMNS + self.ref_ld_cnames]
#make sure that we get the same columns as the ones in the LD-score files
if not np.all([c for c in df_annot_chr.columns if c not in SNP_COLUMNS ]== self.ref_ld_cnames):
raise ValueError('Annotation names in annotations file do not match the one in the LD-scores file')
return df_annot_chr
def compute_snpvar_chr(self, args, chr_num, use_ridge):
#load annotations file from disk
df_annot_chr = self.load_annotations_file(args, chr_num, use_ridge)
#extract taus from a jknife object
if use_ridge:
hsqhat = self.hsqhat_ridge
jknife = hsqhat.jknife_ridge
#make sure that the chromosome exists in one set
found_chrom = np.any([chr_num in chr_set for chr_set in jknife.chromosome_sets])
if not found_chrom:
raise ValueError('not all chromosomes have a taus estimate - please make sure that the intersection of SNPs with sumstats and with annotations data spans all 22 human chromosomes')
#find the relevant set number
set_num=None
for chr_set_i, chr_set in enumerate(jknife.chromosome_sets):
if chr_num not in chr_set:
assert set_num is None
set_num = chr_set_i
if set_num is None:
raise ValueError('Could not find Ridge predictions for chromosome %d'%(chr_num))
#compute and return snpvar
taus = jknife.est_loco_ridge[set_num][:hsqhat.n_annot] / hsqhat.Nbar
else:
hsqhat = self.hsqhat
jknife = hsqhat.jknife
if len(jknife.est_loco) != 22:
raise ValueError('not all chromosomes have a taus estimate - please make sure that the intersection of SNPs with sumstats and with annotations data spans all 22 human chromosomes')
taus = jknife.est_loco[chr_num-1][:hsqhat.n_annot] / hsqhat.Nbar
#save the taus to disk
taus_output_file = get_file_name(args, ('taus_ridge' if use_ridge else 'taus_nn'), chr_num, verify_exists=False)
df_taus = pd.Series(taus, index=df_annot_chr.drop(columns=SNP_COLUMNS, errors='raise').columns)
df_taus.index.name = 'ANNOTATION'
df_taus.name = 'ANNOTATION_COEFFICIENT'
df_taus.to_csv(taus_output_file, header=True, index=True, sep='\t')
#compute and return the snp variances
df_snpvar_chr = df_annot_chr.drop(columns=SNP_COLUMNS, errors='raise').dot(taus)
df_snpvar_chr = df_snpvar_chr.to_frame(name='SNPVAR')
df_snpvar_chr = pd.concat((df_annot_chr[SNP_COLUMNS], df_snpvar_chr), axis=1)
return df_snpvar_chr
def compute_snpvar(self, args, use_ridge):
logging.info('Computing per-SNP h^2 for each chromosome...')
#iterate over chromosomes
df_snpvar_chr_list = []
for chr_num in tqdm(range(1,23)):
df_snpvar_chr = self.compute_snpvar_chr(args, chr_num, use_ridge=use_ridge)
df_snpvar_chr_list.append(df_snpvar_chr)
df_snpvar = pd.concat(df_snpvar_chr_list, axis=0)
df_snpvar.reset_index(inplace=True, drop=True)
#save snpvar to a class member
if use_ridge:
self.df_snpvar_ridge = df_snpvar
else:
self.df_snpvar = df_snpvar
def create_df_bins(self, bin_sizes, df_snpvar, df_snpvar_sorted=None, min_bin_size=10):
#sort df_snpvar if needed
if df_snpvar_sorted is None:
df_snpvar_sorted = df_snpvar['SNPVAR'].sort_values()
assert bin_sizes.sum() == df_snpvar_sorted.shape[0]
#rearrange bins to prevent very small bins
bin_i = len(bin_sizes)-1
while True:
#if the current bin is large enough, proceed to the previous one
if bin_sizes[bin_i] >= min_bin_size:
bin_i -= 1
if bin_i==0: break
continue
#Compare the effects of the weakest bin in the current bin, and the strongest bin in the previous bin
bin_start_ind = bin_sizes[:bin_i].sum()
weakest_bin_snp = df_snpvar_sorted.iloc[::-1].iloc[bin_start_ind]
strongest_lastbin_snp = df_snpvar_sorted.iloc[::-1].iloc[bin_start_ind-1]
num_snps_to_transfer = np.minimum(min_bin_size-bin_sizes[bin_i], bin_sizes[bin_i-1])
bin_sizes[bin_i] += num_snps_to_transfer
bin_sizes[bin_i-1] -= num_snps_to_transfer
#if we emptied the previous bin, delete it
if bin_sizes[bin_i-1]==0:
bin_sizes = np.concatenate((bin_sizes[:bin_i-1], bin_sizes[bin_i:]))
bin_i -= 1
#if the current bin is large enough, move to the previous one
if bin_sizes[bin_i] >= min_bin_size:
bin_i -= 1
if bin_i==0: break
#create df_bins
ind=0
df_bins = pd.DataFrame(index=df_snpvar_sorted.index)
for bin_i, bin_size in enumerate(bin_sizes):
snpvar_bin = np.zeros(df_bins.shape[0], dtype=np.bool)
snpvar_bin[ind : ind+bin_size] = True
df_bins['snpvar_bin%d'%(len(bin_sizes) - bin_i)] = snpvar_bin
ind += bin_size
assert np.all(df_bins.sum(axis=0) == bin_sizes)
df_bins = df_bins.iloc[:, ::-1]
assert df_bins.shape[0] == df_snpvar.shape[0]
assert np.all(df_bins.sum(axis=1)==1)
#reorder df_bins
df_bins = df_bins.loc[df_snpvar.index]
df_bins = pd.concat((df_snpvar[SNP_COLUMNS], df_bins), axis=1)
assert np.all(df_bins.index == df_snpvar.index)
return df_bins
def partition_snps_Ckmedian(self, args, use_ridge):
logging.info('Clustering SNPs into bins using the R Ckmeans.1d.dp package')
#try loading the Ckmeans.1d.dp package
try:
import rpy2
import rpy2.robjects.numpy2ri as numpy2ri
try:
from importlib import reload
reload(rpy2.robjects.numpy2ri)
except:
pass
import rpy2.robjects as ro
ro.conversion.py2ri = numpy2ri
numpy2ri.activate()
from rpy2.robjects.packages import importr
importr('Ckmeans.1d.dp')
median_seg_func = ro.r('Ckmedian.1d.dp')
mean_seg_func = ro.r('Ckmeans.1d.dp')
except:
logging.error('Could not load the R package Ckmeans.1d.dp. Either install it or rerun PolyFun with --skip-Ckmedian')
logging.error('')
raise
#access the right class member
if use_ridge:
df_snpvar = self.df_snpvar_ridge
else:
df_snpvar = self.df_snpvar
#sort df_snpvar
df_snpvar_sorted = df_snpvar['SNPVAR'].sort_values()
#perform the segmentation
if args.num_bins is None or args.num_bins<=0:
logging.info('Determining the optimal number of bins (if this is slow, consider using --num-bins 20 (or some other number))')
seg_obj = median_seg_func(df_snpvar_sorted.values, k=np.array([5,30]))
else:
seg_obj = median_seg_func(df_snpvar_sorted.values, k=args.num_bins)
bin_sizes = np.array(seg_obj.rx2('size')).astype(np.int)
num_bins = len(bin_sizes)
logging.info('Ckmedian.1d.dp partitioned SNPs into %d bins'%(num_bins))
#define df_bins
df_bins = self.create_df_bins(bin_sizes, df_snpvar, df_snpvar_sorted=df_snpvar_sorted)
return df_bins
def partition_snps_Kmeans(self, args, use_ridge):
logging.info('Clustering SNPs into bins using K-means clustering with %d bins'%(args.num_bins))
#make sure that we can run K-means clustering
assert args.num_bins is not None and args.num_bins>0
try:
from sklearn.cluster import KMeans
except ImportError:
raise ImportError('sklearn not properly installed. Please reinstall it')
#access the right class member
if use_ridge: df_snpvar = self.df_snpvar_ridge
else: df_snpvar = self.df_snpvar
#perform K-means clustering
kmeans_obj = KMeans(n_clusters=args.num_bins)
kmeans_obj.fit(df_snpvar[['SNPVAR']])
assert kmeans_obj.cluster_centers_.shape[0] == args.num_bins
#Make sure that clusters are contiguous
bins_order = np.argsort(kmeans_obj.cluster_centers_[:,0])
for bin_i, cluster_label in enumerate(bins_order[:-1]):
next_cluster_label = bins_order[bin_i+1]
assert df_snpvar.loc[kmeans_obj.labels_==cluster_label, 'SNPVAR'].max() <= df_snpvar.loc[kmeans_obj.labels_==next_cluster_label, 'SNPVAR'].min()
#define bin_sizes
bin_sizes = np.bincount(kmeans_obj.labels_)[bins_order]
#define df_bins
df_bins = self.create_df_bins(bin_sizes, df_snpvar, df_snpvar_sorted=None)
return df_bins
def partition_snps_to_bins(self, args, use_ridge):
#if skip_ckmedian was specified, run regular K-means
if args.skip_Ckmedian:
self.df_bins = self.partition_snps_Kmeans(args, use_ridge=use_ridge)
else:
self.df_bins = self.partition_snps_Ckmedian(args, use_ridge=use_ridge)
def save_bins_to_disk(self, args):
logging.info('Saving SNP-bins to disk')
for chr_num in tqdm(range(1,23)):
#save bins file to disk
df_bins_chr = self.df_bins.query('CHR==%d'%(chr_num))
bins_chr_file = get_file_name(args, 'bins', chr_num, verify_exists=False)
df_bins_chr.to_parquet(bins_chr_file, index=False)
#save M files to disk
M_chr_file = get_file_name(args, 'M', chr_num, verify_exists=False)
M_chr = df_bins_chr.drop(columns=SNP_COLUMNS).sum(axis=0).values
np.savetxt(M_chr_file, M_chr.reshape((1, M_chr.shape[0])), fmt='%i')
def save_snpvar_to_disk(self, args, use_ridge, constrain_range):
if constrain_range:
logging.info('Saving constrained SNP variances to disk')
else:
logging.info('Saving SNP variances to disk')
#determine which df_snpvar to use
if use_ridge: df_snpvar = self.df_snpvar_ridge
else: df_snpvar = self.df_snpvar
#constrain the ratio between the largest and smallest snp-var
if constrain_range:
df_snpvar = df_snpvar.copy()
h2_total = df_snpvar['SNPVAR'].sum()
min_snpvar = df_snpvar['SNPVAR'].max() / args.q
df_snpvar.loc[df_snpvar['SNPVAR'] < min_snpvar, 'SNPVAR'] = min_snpvar
df_snpvar['SNPVAR'] *= h2_total / df_snpvar['SNPVAR'].sum()
assert np.isclose(df_snpvar['SNPVAR'].sum(), h2_total)
#merge snpvar with sumstats
try:
df_sumstats = pd.read_parquet(args.sumstats)
except (ArrowIOError, ArrowInvalid):
df_sumstats = pd.read_table(args.sumstats, sep='\s+')
df_sumstats.drop(columns=['SNP'], errors='ignore', inplace=True)
for col in ['CHR', 'BP', 'A1', 'A2']:
if col not in df_sumstats.columns:
raise ValueError('sumstats file has a missing column: %s'%(col))
df_snpvar = set_snpid_index(df_snpvar, copy=True)
df_sumstats = set_snpid_index(df_sumstats)
svpvar_cols = df_snpvar.columns.copy()
df_snpvar.drop(columns=['CHR', 'BP', 'A1', 'A2'], inplace=True)
df_snpvar = df_snpvar.merge(df_sumstats, left_index=True, right_index=True)
df_snpvar = df_snpvar[list(svpvar_cols) + [c for c in df_sumstats.columns if c not in list(svpvar_cols)]]
if df_snpvar.shape[0] < df_sumstats.shape[0]:
error_message = 'not all SNPs in the sumstats file and/or in the LD reference files are also in the annotations file'
if args.allow_missing:
logging.warning(error_message + '. Keeping %d/%d SNPs'%(df_snpvar.shape[0], df_sumstats.shape[0]))
else:
raise ValueError(error_message + '. If you wish to omit the missing SNPs, please use the flag --allow-missing')
#iterate over chromosomes
for chr_num in tqdm(range(1,23)):
#define output file name
output_fname = 'snpvar'
if use_ridge: output_fname += '_ridge'
if constrain_range: output_fname += '_constrained'
snpvar_chr_file = get_file_name(args, output_fname, chr_num, verify_exists=False)
#save snpvar to file
df_snpvar_chr = df_snpvar.query('CHR==%d'%(chr_num))
df_snpvar_chr.to_csv(snpvar_chr_file, index=False, sep='\t', compression='gzip', float_format='%0.4e')
def polyfun_h2_L2(self, args):
#run Ridge regression
self.run_ldsc(args, use_ridge=True, nn=False, evenodd_split=False, keep_large=False)
#compute per-SNP h^2 based on L2-regularized S-LDSC coefficients
self.compute_snpvar(args, use_ridge=True)
#save L2-regularized S-LDSC per-SNP h^2 to disk
self.save_snpvar_to_disk(args, use_ridge=True, constrain_range=True)
self.save_snpvar_to_disk(args, use_ridge=True, constrain_range=False)
#partitions SNPs into bins and save to disk, unless explicitly requested not to
if not args.no_partitions:
self.partition_snps_to_bins(args, use_ridge=True)
self.save_bins_to_disk(args)
def load_bins_chr(self, args, chr_num):
bins_file = get_file_name(args, 'bins', chr_num)
df_bins_chr = pd.read_parquet(bins_file)
return df_bins_chr
def compute_ld_scores(self, args):
#define the range of chromosomes to iterate over
if args.chr is None:
chr_range = range(1,23)
else:
chr_range = range(args.chr, args.chr+1)
#iterate over chromosomes and compute LD-scores
###df_ldscores_chr_list = []
for chr_num in tqdm(chr_range, disable=len(chr_range)==1):
#load or extract the bins for the current chromosome
try:
df_bins_chr = self.df_bins.query('CHR==%d'%(chr_num))
except AttributeError:
df_bins_chr = self.load_bins_chr(args, chr_num)
#compute LD-scores for this chromosome
if args.ld_ukb:
if args.ld_dir is None: ld_dir = tempfile.mkdtemp()
else: ld_dir = args.ld_dir
df_bins_chr = set_snpid_index(df_bins_chr)
df_ldscores_chr = compute_ldscores_chr(df_bins_chr, ld_dir=ld_dir, use_ukb=True)
elif args.bfile_chr is not None:
df_ldscores_chr = self.compute_ldscores_plink_chr(args, chr_num, df_bins_chr)
else:
raise ValueError('no LDscore computation method specified')
#save the LD-scores to disk
ldscores_output_file = get_file_name(args, 'ldscores', chr_num, verify_exists=False)
df_ldscores_chr.to_parquet(ldscores_output_file, index=False)
# #add the ldscores to the LDscores list
# df_ldscores_chr_list.append(df_ldscores_chr)
# #concatenate all the LD-score dfs
# if len(df_ldscores_chr_list)==1:
# self.df_bin_ldscores = df_ldscores_chr_list[0]
# else:
# self.df_bin_ldscores = pd.concat(df_ldscores_chr_list, axis=0)
# self.df_bin_ldscores.reset_index(inplace=True, drop=True)
def compute_ldscores_plink_chr(self, args, chr_num, df_bins_chr):
# read bim/snp
bim_file = get_file_name(args, 'bim', chr_num)
array_snps = parse.PlinkBIMFile(bim_file)
df_bim = array_snps.df
df_bim = set_snpid_index(df_bim)
#Remove annotations of SNPs that are not in the .bim file
df_bins_chr = set_snpid_index(df_bins_chr)
df_bins_chr = df_bins_chr.loc[df_bins_chr.index.isin(df_bim.index)]
#make sure that all SNPs have a bin
keep_snps = None
if np.any(~df_bim.index.isin(df_bins_chr.index)):
error_msg = 'Not all SNPs were assigned a bin (meaning some SNPS in the summary statistics and/or in the LD reference files are not in the annotation files)'
if args.allow_missing:
is_good_snp = df_bim.index.isin(df_bins_chr.index)
if not np.any(is_good_snp):
raise ValueError('No SNPs in chromosome %d have annotations'%(chr_num))
keep_snps = np.where(is_good_snp)[0]
logging.warning(error_msg)
logging.warning('Keeping only %d/%d SNPs in chromosome %d that have annotations'%(df_bim.shape[0], len(is_good_snp), chr_num))
else:
raise ValueError(error_msg + '. If you wish to omit the missing SNPs, please use the flag --allow-missing')
#find #individuals in bfile
fam_file = get_file_name(args, 'fam', chr_num)
df_fam = pd.read_table(fam_file, header=None, usecols=[5], sep='\s+')
n = df_fam.shape[0]
#find keep_indivs
if args.keep is None:
keep_indivs= None
else:
array_indivs = parse.PlinkFAMFile(args.bfile+'.fam')
keep_indivs = __filter__(args.keep, 'individuals', 'include', array_indivs)
logging.info('after applying --keep, %d individuals remain'%(len(keep_indivs)))
#read plink file
logging.info('Loading SNP file...')
bed_file = get_file_name(args, 'bed', chr_num)
geno_array = ldscore.PlinkBEDFile(bed_file, n, array_snps, keep_snps=keep_snps,
keep_indivs=keep_indivs, mafMin=None)
#remove omitted SNPs from df_bim
if len(geno_array.kept_snps) != df_bim.shape[0]:
assert np.all(np.array(geno_array.kept_snps) == np.sort(np.array(geno_array.kept_snps)))
assert geno_array.kept_snps[-1] < df_bim.shape[0]
df_bim = df_bim.iloc[geno_array.kept_snps]
#rearrange annotations to match the order of SNPs in the plink file
assert df_bins_chr.shape[0] >= df_bim.shape[0]
if (df_bins_chr.shape[0] > df_bim.shape[0]) or np.any(df_bins_chr.index != df_bim.index):
assert np.all(df_bim.index.isin(df_bins_chr.index))
df_bins_chr = df_bins_chr.loc[df_bim.index]
# determine block widths
num_wind_args = np.array((args.ld_wind_snps, args.ld_wind_kb, args.ld_wind_cm), dtype=bool)
if np.sum(num_wind_args) != 1:
raise ValueError('Must specify exactly one --ld-wind option')
if args.ld_wind_snps:
max_dist = args.ld_wind_snps
coords = np.array(list(range(geno_array.m)))
elif args.ld_wind_kb:
max_dist = args.ld_wind_kb*1000
coords = np.array(df_bim['BP'])
if len(np.unique(coords)) == 1:
raise ValueError('bim file has no basepair data --- please use a different ld-wind option')
elif args.ld_wind_cm:
max_dist = args.ld_wind_cm
coords = np.array(df_bim['CM'])
if len( | np.unique(coords) | numpy.unique |
# pylint: disable=missing-function-docstring, missing-module-docstring/
import pytest
import numpy as np
from numpy.random import randint
from pyccel.epyccel import epyccel
from modules import arrays
#==============================================================================
# TEST: 1D ARRAYS OF INT-32
#==============================================================================
def test_array_int32_1d_scalar_add(language):
f1 = arrays.array_int32_1d_scalar_add
f2 = epyccel( f1 , language = language)
x1 = np.array( [1,2,3], dtype=np.int32 )
x2 = np.copy(x1)
a = randint(low = -1e9, high = 1e9, dtype = np.int32)
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int32_1d_scalar_sub(language):
f1 = arrays.array_int32_1d_scalar_sub
f2 = epyccel( f1 , language = language)
x1 = np.array( [1,2,3], dtype=np.int32 )
x2 = np.copy(x1)
a = randint(low = -1e9, high = 1e9, dtype = np.int32)
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int32_1d_scalar_mul(language):
f1 = arrays.array_int32_1d_scalar_mul
f2 = epyccel( f1 , language = language)
x1 = np.array( [1,2,3], dtype=np.int32 )
x2 = np.copy(x1)
a = randint(low = -1e9, high = 1e9, dtype = np.int32)
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int32_1d_scalar_div(language):
f1 = arrays.array_int32_1d_scalar_div
f2 = epyccel( f1 , language = language)
x1 = np.array( [1,2,3], dtype=np.int32 )
x2 = np.copy(x1)
a = randint(low = 1, high = 1e9, dtype = np.int32)
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int32_1d_scalar_idiv(language):
f1 = arrays.array_int32_1d_scalar_idiv
f2 = epyccel( f1 , language = language)
x1 = np.array( [1,2,3], dtype=np.int32 )
x2 = np.copy(x1)
a = randint(low = 1, high = 1e9, dtype = np.int32)
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int32_1d_add(language):
f1 = arrays.array_int32_1d_add
f2 = epyccel( f1 , language = language)
x1 = np.array( [1,2,3], dtype=np.int32 )
x2 = np.copy(x1)
a = np.array( [1,2,3], dtype=np.int32 )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int32_1d_sub(language):
f1 = arrays.array_int32_1d_sub
f2 = epyccel( f1 , language = language)
x1 = np.array( [1,2,3], dtype=np.int32 )
x2 = np.copy(x1)
a = np.array( [1,2,3], dtype=np.int32 )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int32_1d_mul(language):
f1 = arrays.array_int32_1d_mul
f2 = epyccel( f1 , language = language)
x1 = np.array( [1,2,3], dtype=np.int32 )
x2 = np.copy(x1)
a = np.array( [1,2,3], dtype=np.int32 )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int32_1d_idiv(language):
f1 = arrays.array_int32_1d_idiv
f2 = epyccel( f1 , language = language)
x1 = np.array( [1,2,3], dtype=np.int32 )
x2 = np.copy(x1)
a = np.array( [1,2,3], dtype=np.int32 )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int32_1d_add_augassign(language):
f1 = arrays.array_int32_1d_add_augassign
f2 = epyccel( f1 , language = language)
x1 = np.array( [1,2,3], dtype=np.int32 )
x2 = np.copy(x1)
a = np.array( [1,2,3], dtype=np.int32 )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int32_1d_sub_augassign(language):
f1 = arrays.array_int32_1d_sub_augassign
f2 = epyccel( f1 , language = language)
x1 = np.array( [1,2,3], dtype=np.int32 )
x2 = np.copy(x1)
a = np.array( [1,2,3], dtype=np.int32 )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
@pytest.mark.parametrize( 'language', [
pytest.param("c", marks = [
pytest.mark.skip(reason="Numpy sum not yet implemented for C language"),
pytest.mark.c]),
pytest.param("fortran", marks = pytest.mark.fortran)
]
)
def test_array_int_1d_initialization_1(language):
f1 = arrays.array_int_1d_initialization_1
f2 = epyccel( f1 , language = language)
assert np.array_equal(f1(), f2())
@pytest.mark.parametrize( 'language', [
pytest.param("c", marks = [
pytest.mark.skip(reason="Numpy sum not yet implemented for C language"),
pytest.mark.c]),
pytest.param("fortran", marks = pytest.mark.fortran)
]
)
def test_array_int_1d_initialization_2(language):
f1 = arrays.array_int_1d_initialization_2
f2 = epyccel( f1 , language = language)
assert np.array_equal(f1(), f2())
@pytest.mark.parametrize( 'language', [
pytest.param("c", marks = [
pytest.mark.skip(reason="Numpy sum not yet implemented for C language"),
pytest.mark.c]),
pytest.param("fortran", marks = pytest.mark.fortran)
]
)
def test_array_int_1d_initialization_3(language):
f1 = arrays.array_int_1d_initialization_3
f2 = epyccel( f1 , language = language)
assert np.array_equal(f1(), f2())
#==============================================================================
# TEST: 2D ARRAYS OF INT-32 WITH C ORDERING
#==============================================================================
def test_array_int32_2d_C_scalar_add(language):
f1 = arrays.array_int32_2d_C_scalar_add
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]], dtype=np.int32 )
x2 = np.copy(x1)
a = randint(low = -1e9, high = 1e9, dtype = np.int32)
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int32_2d_C_scalar_sub(language):
f1 = arrays.array_int32_2d_C_scalar_sub
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]], dtype=np.int32 )
x2 = np.copy(x1)
a = randint(low = -1e9, high = 1e9, dtype = np.int32)
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int32_2d_C_scalar_mul(language):
f1 = arrays.array_int32_2d_C_scalar_mul
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]], dtype=np.int32 )
x2 = np.copy(x1)
a = randint(low = -1e9, high = 1e9, dtype = np.int32)
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int32_2d_C_scalar_idiv(language):
f1 = arrays.array_int32_2d_C_scalar_idiv
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]], dtype=np.int32 )
x2 = np.copy(x1)
a = randint(low = 1, high = 1e9, dtype = np.int32)
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int32_2d_C_add(language):
f1 = arrays.array_int32_2d_C_add
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]], dtype=np.int32 )
x2 = np.copy(x1)
a = np.array( [[-1,-2,-3], [-4,-5,-6]], dtype=np.int32 )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int32_2d_C_sub(language):
f1 = arrays.array_int32_2d_C_sub
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]], dtype=np.int32 )
x2 = np.copy(x1)
a = np.array( [[-1,-2,-3], [-4,-5,-6]], dtype=np.int32 )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int32_2d_C_mul(language):
f1 = arrays.array_int32_2d_C_mul
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]], dtype=np.int32 )
x2 = np.copy(x1)
a = np.array( [[-1,-2,-3], [-4,-5,-6]], dtype=np.int32 )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int32_2d_C_idiv(language):
f1 = arrays.array_int32_2d_C_idiv
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]], dtype=np.int32 )
x2 = np.copy(x1)
a = np.array( [[-1,-2,-3], [-4,-5,-6]], dtype=np.int32 )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
#==============================================================================
# TEST: 2D ARRAYS OF INT-32 WITH F ORDERING
#==============================================================================
def test_array_int32_2d_F_scalar_add(language):
f1 = arrays.array_int32_2d_F_scalar_add
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]], dtype=np.int32, order='F' )
x2 = np.copy(x1)
a = randint(low = -1e9, high = 1e9, dtype = np.int32)
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int32_2d_F_scalar_sub(language):
f1 = arrays.array_int32_2d_F_scalar_sub
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]], dtype=np.int32, order='F' )
x2 = np.copy(x1)
a = randint(low = -1e9, high = 1e9, dtype = np.int32)
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int32_2d_F_scalar_mul(language):
f1 = arrays.array_int32_2d_F_scalar_mul
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]], dtype=np.int32, order='F' )
x2 = np.copy(x1)
a = randint(low = -1e9, high = 1e9, dtype = np.int32)
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int32_2d_F_scalar_idiv(language):
f1 = arrays.array_int32_2d_F_scalar_idiv
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]], dtype=np.int32, order='F' )
x2 = np.copy(x1)
a = randint(low = 1, high = 1e9, dtype = np.int32)
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int32_2d_F_add(language):
f1 = arrays.array_int32_2d_F_add
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]], dtype=np.int32, order='F' )
x2 = np.copy(x1)
a = np.array( [[-1,-2,-3], [-4,-5,-6]], dtype=np.int32, order='F' )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int32_2d_F_sub(language):
f1 = arrays.array_int32_2d_F_sub
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]], dtype=np.int32, order='F' )
x2 = np.copy(x1)
a = np.array( [[-1,-2,-3], [-4,-5,-6]], dtype=np.int32, order='F' )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int32_2d_F_mul(language):
f1 = arrays.array_int32_2d_F_mul
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]], dtype=np.int32, order='F' )
x2 = np.copy(x1)
a = np.array( [[-1,-2,-3], [-4,-5,-6]], dtype=np.int32, order='F' )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int32_2d_F_idiv(language):
f1 = arrays.array_int32_2d_F_idiv
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]], dtype=np.int32, order='F' )
x2 = np.copy(x1)
a = np.array( [[-1,-2,-3], [-4,-5,-6]], dtype=np.int32, order='F' )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
#==============================================================================
# TEST: 1D ARRAYS OF INT-64
#==============================================================================
def test_array_int_1d_scalar_add(language):
f1 = arrays.array_int_1d_scalar_add
f2 = epyccel( f1 , language = language)
x1 = np.array( [1,2,3] )
x2 = np.copy(x1)
a = 5
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int_1d_scalar_sub(language):
f1 = arrays.array_int_1d_scalar_sub
f2 = epyccel( f1 , language = language)
x1 = np.array( [1,2,3] )
x2 = np.copy(x1)
a = 5
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int_1d_scalar_mul(language):
f1 = arrays.array_int_1d_scalar_mul
f2 = epyccel( f1 , language = language)
x1 = np.array( [1,2,3] )
x2 = np.copy(x1)
a = 5
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int_1d_scalar_idiv(language):
f1 = arrays.array_int_1d_scalar_idiv
f2 = epyccel( f1 , language = language)
x1 = np.array( [1,2,3] )
x2 = np.copy(x1)
a = 5
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int_1d_add(language):
f1 = arrays.array_int_1d_add
f2 = epyccel( f1 , language = language)
x1 = np.array( [1,2,3] )
x2 = np.copy(x1)
a = np.array( [1,2,3] )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int_1d_sub(language):
f1 = arrays.array_int_1d_sub
f2 = epyccel( f1 , language = language)
x1 = np.array( [1,2,3] )
x2 = np.copy(x1)
a = np.array( [1,2,3] )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int_1d_mul(language):
f1 = arrays.array_int_1d_mul
f2 = epyccel( f1 , language = language)
x1 = np.array( [1,2,3] )
x2 = np.copy(x1)
a = np.array( [1,2,3] )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int_1d_idiv(language):
f1 = arrays.array_int_1d_idiv
f2 = epyccel( f1 , language = language)
x1 = np.array( [1,2,3] )
x2 = np.copy(x1)
a = np.array( [1,2,3] )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
#==============================================================================
# TEST: 2D ARRAYS OF INT-64 WITH C ORDERING
#==============================================================================
def test_array_int_2d_C_scalar_add(language):
f1 = arrays.array_int_2d_C_scalar_add
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]] )
x2 = np.copy(x1)
a = 5
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int_2d_C_scalar_sub(language):
f1 = arrays.array_int_2d_C_scalar_sub
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]] )
x2 = np.copy(x1)
a = 5
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int_2d_C_scalar_mul(language):
f1 = arrays.array_int_2d_C_scalar_mul
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]] )
x2 = np.copy(x1)
a = 5
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int_2d_C_scalar_idiv(language):
f1 = arrays.array_int_2d_C_scalar_idiv
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]] )
x2 = np.copy(x1)
a = 5
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int_2d_C_add(language):
f1 = arrays.array_int_2d_C_add
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]] )
x2 = np.copy(x1)
a = np.array( [[-1,-2,-3], [-4,-5,-6]] )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int_2d_C_sub(language):
f1 = arrays.array_int_2d_C_sub
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]] )
x2 = np.copy(x1)
a = np.array( [[-1,-2,-3], [-4,-5,-6]] )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int_2d_C_mul(language):
f1 = arrays.array_int_2d_C_mul
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]] )
x2 = np.copy(x1)
a = np.array( [[-1,-2,-3], [-4,-5,-6]] )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int_2d_C_idiv(language):
f1 = arrays.array_int_2d_C_idiv
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]] )
x2 = np.copy(x1)
a = np.array( [[-1,-2,-3], [-4,-5,-6]] )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int_2d_C_initialization(language):
f1 = arrays.array_int_2d_C_initialization
f2 = epyccel(f1, language = language)
x1 = np.zeros((2, 3), dtype=int)
x2 = np.ones_like(x1)
f1(x1)
f2(x2)
assert np.array_equal(x1, x2)
#==============================================================================
# TEST: 2D ARRAYS OF INT-64 WITH F ORDERING
#==============================================================================
def test_array_int_2d_F_scalar_add(language):
f1 = arrays.array_int_2d_F_scalar_add
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]], order='F' )
x2 = np.copy(x1)
a = 5
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int_2d_F_scalar_sub(language):
f1 = arrays.array_int_2d_F_scalar_sub
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]], order='F' )
x2 = np.copy(x1)
a = 5
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int_2d_F_scalar_mul(language):
f1 = arrays.array_int_2d_F_scalar_mul
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]], order='F' )
x2 = np.copy(x1)
a = 5
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int_2d_F_scalar_idiv(language):
f1 = arrays.array_int_2d_F_scalar_idiv
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]], order='F' )
x2 = np.copy(x1)
a = 5
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int_2d_F_add(language):
f1 = arrays.array_int_2d_F_add
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]], order='F' )
x2 = np.copy(x1)
a = np.array( [[-1,-2,-3], [-4,-5,-6]], order='F' )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int_2d_F_sub(language):
f1 = arrays.array_int_2d_F_sub
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]], order='F' )
x2 = np.copy(x1)
a = np.array( [[-1,-2,-3], [-4,-5,-6]], order='F' )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int_2d_F_mul(language):
f1 = arrays.array_int_2d_F_mul
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]], order='F' )
x2 = np.copy(x1)
a = np.array( [[-1,-2,-3], [-4,-5,-6]], order='F' )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int_2d_F_idiv(language):
f1 = arrays.array_int_2d_F_idiv
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]], order='F' )
x2 = np.copy(x1)
a = np.array( [[-1,-2,-3], [-4,-5,-6]], order='F' )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int_2d_F_initialization(language):
f1 = arrays.array_int_2d_F_initialization
f2 = epyccel(f1, language = language)
x1 = np.zeros((2, 3), dtype=int, order='F')
x2 = np.ones_like(x1)
f1(x1)
f2(x2)
assert np.array_equal(x1, x2)
#==============================================================================
# TEST: 1D ARRAYS OF REAL
#==============================================================================
def test_array_real_1d_scalar_add(language):
f1 = arrays.array_real_1d_scalar_add
f2 = epyccel( f1 , language = language)
x1 = np.array( [1.,2.,3.] )
x2 = np.copy(x1)
a = 5.
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_real_1d_scalar_sub(language):
f1 = arrays.array_real_1d_scalar_sub
f2 = epyccel( f1 , language = language)
x1 = np.array( [1.,2.,3.] )
x2 = np.copy(x1)
a = 5.
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_real_1d_scalar_mul(language):
f1 = arrays.array_real_1d_scalar_mul
f2 = epyccel( f1 , language = language)
x1 = np.array( [1.,2.,3.] )
x2 = np.copy(x1)
a = 5.
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_real_1d_scalar_div(language):
f1 = arrays.array_real_1d_scalar_div
f2 = epyccel( f1 , language = language)
x1 = np.array( [1.,2.,3.] )
x2 = np.copy(x1)
a = 5.
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_real_1d_scalar_idiv(language):
f1 = arrays.array_real_1d_scalar_idiv
f2 = epyccel( f1 , language = language)
x1 = np.array( [1.,2.,3.] )
x2 = np.copy(x1)
a = 5.
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_real_1d_add(language):
f1 = arrays.array_real_1d_add
f2 = epyccel( f1 , language = language)
x1 = np.array( [1.,2.,3.] )
x2 = np.copy(x1)
a = np.array( [1.,2.,3.] )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_real_1d_sub(language):
f1 = arrays.array_real_1d_sub
f2 = epyccel( f1 , language = language)
x1 = np.array( [1.,2.,3.] )
x2 = np.copy(x1)
a = np.array( [1.,2.,3.] )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_real_1d_mul(language):
f1 = arrays.array_real_1d_mul
f2 = epyccel( f1 , language = language)
x1 = np.array( [1.,2.,3.] )
x2 = np.copy(x1)
a = np.array( [1.,2.,3.] )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_real_1d_div(language):
f1 = arrays.array_real_1d_div
f2 = epyccel( f1 , language = language)
x1 = np.array( [1.,2.,3.] )
x2 = np.copy(x1)
a = np.array( [1.,2.,3.] )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_real_1d_idiv(language):
f1 = arrays.array_real_1d_idiv
f2 = epyccel( f1 , language = language)
x1 = np.array( [1.,2.,3.] )
x2 = np.copy(x1)
a = np.array( [1.,2.,3.] )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
#==============================================================================
# TEST: 2D ARRAYS OF REAL WITH C ORDERING
#==============================================================================
def test_array_real_2d_C_scalar_add(language):
f1 = arrays.array_real_2d_C_scalar_add
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1.,2.,3.], [4.,5.,6.]] )
x2 = np.copy(x1)
a = 5.
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_real_2d_C_scalar_sub(language):
f1 = arrays.array_real_2d_C_scalar_sub
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1.,2.,3.], [4.,5.,6.]] )
x2 = np.copy(x1)
a = 5.
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_real_2d_C_scalar_mul(language):
f1 = arrays.array_real_2d_C_scalar_mul
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1.,2.,3.], [4.,5.,6.]] )
x2 = np.copy(x1)
a = 5.
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_real_2d_C_scalar_div(language):
f1 = arrays.array_real_2d_C_scalar_div
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1.,2.,3.], [4.,5.,6.]] )
x2 = np.copy(x1)
a = 5.
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_real_2d_C_add(language):
f1 = arrays.array_real_2d_C_add
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1.,2.,3.], [4.,5.,6.]] )
x2 = np.copy(x1)
a = np.array( [[-1.,-2.,-3.], [-4.,-5.,-6.]] )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_real_2d_C_sub(language):
f1 = arrays.array_real_2d_C_sub
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1.,2.,3.], [4.,5.,6.]] )
x2 = np.copy(x1)
a = np.array( [[-1.,-2.,-3.], [-4.,-5.,-6.]] )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_real_2d_C_mul(language):
f1 = arrays.array_real_2d_C_mul
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1.,2.,3.], [4.,5.,6.]] )
x2 = np.copy(x1)
a = np.array( [[-1.,-2.,-3.], [-4.,-5.,-6.]] )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_real_2d_C_div(language):
f1 = arrays.array_real_2d_C_div
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1.,2.,3.], [4.,5.,6.]] )
x2 = np.copy(x1)
a = np.array( [[-1.,-2.,-3.], [-4.,-5.,-6.]] )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_real_2d_C_array_initialization(language):
f1 = arrays.array_real_2d_C_array_initialization
f2 = epyccel(f1, language = language)
x1 = np.zeros((2, 3), dtype=float )
x2 = np.ones_like(x1)
f1(x1)
f2(x2)
assert np.array_equal(x1, x2)
@pytest.mark.parametrize( 'language', [
pytest.param("c", marks = [
pytest.mark.skip(reason="array function doesn't handle list of variables. See #752"),
pytest.mark.c]),
pytest.param("fortran", marks = pytest.mark.fortran)
]
)
def test_array_real_3d_C_array_initialization_1(language):
f1 = arrays.array_real_3d_C_array_initialization_1
f2 = epyccel(f1, language = language)
x = np.random.random((3,2))
y = np.random.random((3,2))
a = np.array([x,y])
x1 = np.zeros_like(a)
x2 = | np.zeros_like(a) | numpy.zeros_like |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# Copyright 2020 Huawei Technologies Co., Ltd
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
#
# -------------------------------------------------------------------------
#
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from . import data_utils
from .dataset import Dataset
import itertools
import os
import sys
from .dictionary import Dictionary
from .indexed_dataset import IndexedDataset, IndexedInMemoryDataset, IndexedRawTextDataset # noqa: F401
class LanguagePairDataset(Dataset):
"""A pair of torch.utils.data.Datasets."""
def __init__(
self, src, src_sizes, src_dict,
tgt=None, tgt_sizes=None, tgt_dict=None,
left_pad_source=True, left_pad_target=False,
max_source_positions=1024, max_target_positions=1024,
pad_sequence=1, shuffle=True, gd_size=0,
):
if tgt_dict is not None:
assert src_dict.pad() == tgt_dict.pad()
assert src_dict.eos() == tgt_dict.eos()
assert src_dict.unk() == tgt_dict.unk()
self.src = src
self.tgt = tgt
self.src_sizes = np.array(src_sizes)
self.tgt_sizes = np.array(tgt_sizes) if tgt_sizes is not None else None
self.src_dict = src_dict
self.tgt_dict = tgt_dict
self.left_pad_source = left_pad_source
self.left_pad_target = left_pad_target
self.max_source_positions = max_source_positions
self.max_target_positions = max_target_positions
self.pad_sequence = pad_sequence
self.shuffle = shuffle
self.gd_size = gd_size
def __getitem__(self, index):
return {
'id': index,
'source': self.src[index],
'target': self.tgt[index] if self.tgt is not None else None,
}
def __len__(self):
return len(self.src)
def collater(self, samples):
"""Merge a list of samples to form a mini-batch."""
return data_utils.collate(
samples, pad_idx=self.src_dict.pad(), eos_idx=self.src_dict.eos(),
left_pad_source=self.left_pad_source, left_pad_target=self.left_pad_target,
pad_sequence=self.pad_sequence,
gd_size = self.gd_size,
)
def num_tokens(self, index):
"""Return an example's length (number of tokens), used for batching."""
orig_size = max(self.src_sizes[index], self.tgt_sizes[index] if self.tgt_sizes is not None else 0)
assert self.pad_sequence > 0, "Padding multiple has to be greater than 0"
size = 0
if self.pad_sequence > 1:
size = orig_size // self.pad_sequence * self.pad_sequence
if orig_size % self.pad_sequence > 0:
size += self.pad_sequence
else:
size = orig_size
return size
# return max(self.src_sizes[index], self.tgt_sizes[index] if self.tgt_sizes is not None else 0)
def ordered_indices(self, seed=None, epoch=1):
"""Ordered indices for batching."""
if self.shuffle:
indices = np.random.RandomState(seed + epoch).permutation(len(self))
else:
indices = np.arange(len(self))
if self.tgt_sizes is not None:
indices = indices[np.argsort(self.tgt_sizes[indices], kind='mergesort')]
return indices[ | np.argsort(self.src_sizes[indices], kind='mergesort') | numpy.argsort |
import logging
import multiprocessing
from collections import Iterable
import numpy as np
from scipy.sparse import csr_matrix
from sklearn.decomposition import PCA
from . import _tsne
from .affinity import Affinities, NearestNeighborAffinities
from .quad_tree import QuadTree
EPSILON = np.finfo(np.float64).eps
log = logging.getLogger(__name__)
def _check_callbacks(callbacks):
if callbacks is not None:
# If list was passed, make sure all of them are actually callable
if isinstance(callbacks, Iterable):
if any(not callable(c) for c in callbacks):
raise ValueError('`callbacks` must contain callable objects!')
# The gradient descent method deals with lists
elif callable(callbacks):
callbacks = (callbacks,)
else:
raise ValueError('`callbacks` must be a callable object!')
return callbacks
def _handle_nice_params(optim_params: dict) -> None:
"""Convert the user friendly params into something the optimizer can
understand."""
# Handle callbacks
optim_params['callbacks'] = _check_callbacks(optim_params['callbacks'])
optim_params['use_callbacks'] = optim_params['callbacks'] is not None
# Handle negative gradient method
negative_gradient_method = optim_params['negative_gradient_method']
if callable(negative_gradient_method):
negative_gradient_method = negative_gradient_method
elif negative_gradient_method in {'bh', 'BH', 'barnes-hut'}:
negative_gradient_method = kl_divergence_bh
elif negative_gradient_method in {'fft', 'FFT', 'interpolation'}:
negative_gradient_method = kl_divergence_fft
else:
raise ValueError('Unrecognized gradient method. Please choose one of '
'the supported methods or provide a valid callback.')
optim_params['negative_gradient_method'] = negative_gradient_method
# Handle number of jobs
n_jobs = optim_params['n_jobs']
if n_jobs < 0:
n_cores = multiprocessing.cpu_count()
# Add negative number of n_jobs to the number of cores, but increment by
# one because -1 indicates using all cores, -2 all except one, and so on
n_jobs = n_cores + n_jobs + 1
# If the number of jobs, after this correction is still <= 0, then the user
# probably thought they had more cores, so we'll default to 1
if n_jobs <= 0:
log.warning('`n_jobs` receieved value %d but only %d cores are available. '
'Defaulting to single job.' % (optim_params['n_jobs'], n_cores))
n_jobs = 1
optim_params['n_jobs'] = n_jobs
class OptimizationInterrupt(InterruptedError):
def __init__(self, error: float, final_embedding: np.ndarray) -> None:
super().__init__()
self.error = error
self.final_embedding = final_embedding
class PartialTSNEEmbedding(np.ndarray):
"""A partial embedding is created when we take an existing `TSNEEmbedding`
and add new data to it. It differs from the typical embedding in that it
would be unwise to add even more data to only the subset of already
approximated data. Therefore, we don't allow this and save the computation
of a nearest neighbor index.
If we would like to add new data multiple times to the existing embedding,
we can simply do so on the original embedding.
"""
def __new__(cls, embedding, reference_embedding, P, gradient_descent_params):
obj = np.asarray(embedding, dtype=np.float64, order='C').view(PartialTSNEEmbedding)
obj.reference_embedding = reference_embedding
obj.P = P
obj.gradient_descent_params = gradient_descent_params
obj.kl_divergence = None
return obj
def optimize(self, n_iter, inplace=False, propagate_exception=False,
**gradient_descent_params):
# Typically we want to return a new embedding and keep the old one intact
if inplace:
embedding = self
else:
embedding = PartialTSNEEmbedding(
np.copy(self), self.reference_embedding, self.P, self.gradient_descent_params,
)
# If optimization parameters were passed to this funciton, prefer those
# over the defaults specified in the TSNE object
optim_params = dict(self.gradient_descent_params)
optim_params.update(gradient_descent_params)
_handle_nice_params(optim_params)
optim_params['n_iter'] = n_iter
try:
error, embedding = gradient_descent(
embedding=embedding, reference_embedding=self.reference_embedding,
P=self.P, **optim_params)
except OptimizationInterrupt as ex:
log.info('Optimization was interrupted with callback.')
if propagate_exception:
raise ex
error, embedding = ex.error, ex.final_embedding
embedding.kl_divergence = error
return embedding
class TSNEEmbedding(np.ndarray):
def __new__(cls, embedding, affinities, gradient_descent_params):
obj = np.asarray(embedding, dtype=np.float64, order='C').view(TSNEEmbedding)
obj.affinities = affinities # type: Affinities
obj.gradient_descent_params = gradient_descent_params # type: dict
obj.kl_divergence = None
return obj
def optimize(self, n_iter, inplace=False, propagate_exception=False,
**gradient_descent_params):
# Typically we want to return a new embedding and keep the old one intact
if inplace:
embedding = self
else:
embedding = TSNEEmbedding(np.copy(self), self.affinities,
self.gradient_descent_params)
# If optimization parameters were passed to this funciton, prefer those
# over the defaults specified in the TSNE object
optim_params = dict(self.gradient_descent_params)
optim_params.update(gradient_descent_params)
_handle_nice_params(optim_params)
optim_params['n_iter'] = n_iter
try:
error, embedding = gradient_descent(
embedding=embedding, P=self.affinities.P, **optim_params)
except OptimizationInterrupt as ex:
log.info('Optimization was interrupted with callback.')
if propagate_exception:
raise ex
error, embedding = ex.error, ex.final_embedding
embedding.kl_divergence = error
return embedding
def transform(self, X, perplexity=None, initialization='weighted',
early_exaggeration=2, early_exaggeration_iter=100,
initial_momentum=0.2, n_iter=300, final_momentum=0.4,
**gradient_descent_params):
embedding = self.prepare_partial(X, perplexity=perplexity, initialization=initialization)
optim_params = dict(gradient_descent_params)
try:
# Early exaggeration with lower momentum to allow points to find more
# easily move around and find their neighbors
optim_params['momentum'] = initial_momentum
optim_params['exaggeration'] = early_exaggeration
optim_params['n_iter'] = early_exaggeration_iter
embedding.optimize(inplace=True, propagate_exception=True, **optim_params)
# Restore actual affinity probabilities and increase momentum to get
# final, optimized embedding
optim_params['momentum'] = final_momentum
optim_params['exaggeration'] = None
optim_params['n_iter'] = n_iter
embedding.optimize(inplace=True, propagate_exception=True, **optim_params)
except OptimizationInterrupt as ex:
log.info('Optimization was interrupted with callback.')
embedding = ex.final_embedding
return embedding
def prepare_partial(self, X, initialization='weighted', perplexity=None):
"""Get the initial positions of some new data to be fitted w.r.t. the
existing embedding.
Parameters
----------
X : np.ndarray
initialization : Optional[Union[str, np.ndarray]]
perplexity : Optional[float]
Returns
-------
PartialTSNEEmbedding
"""
P, neighbors, distances = self.affinities.to_new(
X, return_distances=True, perplexity=perplexity,
)
embedding = self.__generate_partial_coordinates(
X, initialization, neighbors, distances,
)
return PartialTSNEEmbedding(
embedding, reference_embedding=self, P=P,
gradient_descent_params=self.gradient_descent_params,
)
def __generate_partial_coordinates(self, X, initialization, neighbors, distances):
n_samples = X.shape[0]
n_components = self.shape[1]
# If initial positions are given in an array, use a copy of that
if isinstance(initialization, np.ndarray):
assert initialization.shape[0] == X.shape[0], \
'The provided initialization contains a different number of ' \
'samples (%d) than the data provided (%d).' % (
initialization.shape[0], X.shape[0])
embedding = np.array(initialization)
# Random initialization with isotropic normal distribution
elif initialization == 'random':
embedding = np.random.normal(0, 1e-2, (X.shape[0], n_components))
elif initialization == 'weighted':
embedding = np.zeros((n_samples, n_components))
for i in range(n_samples):
embedding[i] = np.average(self[neighbors[i]], axis=0, weights=distances[i])
else:
raise ValueError('Unrecognized initialization scheme `%s`.' % initialization)
return embedding
class TSNE:
def __init__(self, n_components=2, perplexity=30, learning_rate=100,
early_exaggeration_iter=250, early_exaggeration=12,
n_iter=750, late_exaggeration_iter=0, late_exaggeration=1.2,
theta=0.5, n_interpolation_points=3, min_num_intervals=10,
ints_in_interval=1, initialization='pca', metric='euclidean',
initial_momentum=0.5, final_momentum=0.8, n_jobs=1,
neighbors='exact', negative_gradient_method='bh',
callbacks=None, callbacks_every_iters=50):
self.n_components = n_components
self.perplexity = perplexity
self.learning_rate = learning_rate
self.early_exaggeration = early_exaggeration
self.early_exaggeration_iter = early_exaggeration_iter
self.n_iter = n_iter
self.late_exaggeration = late_exaggeration
self.late_exaggeration_iter = late_exaggeration_iter
self.theta = theta
self.n_interpolation_points = n_interpolation_points
self.min_num_intervals = min_num_intervals
self.ints_in_interval = ints_in_interval
self.initialization = initialization
self.metric = metric
self.initial_momentum = initial_momentum
self.final_momentum = final_momentum
self.n_jobs = n_jobs
self.neighbors_method = neighbors
self.negative_gradient_method = negative_gradient_method
self.callbacks = callbacks
self.callbacks_every_iters = callbacks_every_iters
def fit(self, X: np.ndarray) -> TSNEEmbedding:
"""Perform t-SNE dimensionality reduction.
Parameters
----------
X : np.ndarray
Returns
-------
TSNEEmbedding
"""
embedding = self.prepare_initial(X)
try:
# Early exaggeration with lower momentum to allow points to find more
# easily move around and find their neighbors
embedding.optimize(
n_iter=self.early_exaggeration_iter, exaggeration=self.early_exaggeration,
momentum=self.initial_momentum, inplace=True, propagate_exception=True,
)
# Restore actual affinity probabilities and increase momentum to get
# final, optimized embedding
embedding.optimize(
n_iter=self.n_iter, momentum=self.final_momentum, inplace=True,
propagate_exception=True,
)
# Use the trick described in [4]_ to get more separated clusters of
# points by applying a late exaggeration phase
embedding.optimize(
n_iter=self.late_exaggeration_iter, exaggeration=self.late_exaggeration,
momentum=self.final_momentum, inplace=True, propagate_exception=True,
)
except OptimizationInterrupt as ex:
log.info('Optimization was interrupted with callback.')
embedding = ex.final_embedding
return embedding
def prepare_initial(self, X, initialization=None):
"""Prepare the initial embedding which can be optimized in steps.
Parameters
----------
X : np.ndarray
initialization : Optional[Union[np.ndarray, str]]
Returns
-------
TSNEEmbedding
"""
# Get some initial coordinates for the embedding
y_coords = self.generate_initial_coordinates(X, initialization=initialization)
# Compute the affinities for the input data
affinities = NearestNeighborAffinities(
X, self.perplexity, method=self.neighbors_method,
metric=self.metric, n_jobs=self.n_jobs,
)
gradient_descent_params = {
# Degrees of freedom of the Student's t-distribution. The
# suggestion degrees_of_freedom = n_components - 1 comes from [3]_.
'dof': max(self.n_components - 1, 1),
'negative_gradient_method': self.negative_gradient_method,
'learning_rate': self.learning_rate,
# By default, use the momentum used in unexaggerated phase
'momentum': self.final_momentum,
# Barnes-Hut params
'theta': self.theta,
# Interpolation params
'n_interpolation_points': self.n_interpolation_points,
'min_num_intervals': self.min_num_intervals,
'ints_in_interval': self.ints_in_interval,
'n_jobs': self.n_jobs,
# Callback params
'callbacks': self.callbacks,
'callbacks_every_iters': self.callbacks_every_iters,
}
return TSNEEmbedding(y_coords, affinities, gradient_descent_params)
def generate_initial_coordinates(self, X, initialization=None):
"""Get initial coordinates for the new embedding for the data set.
Parameters
----------
X : np.ndarray
initialization : Optional[Union[np.ndarray, str]]
Returns
-------
np.ndarray
"""
if initialization is None:
initialization = self.initialization
# If initial positions are given in an array, use a copy of that
if isinstance(initialization, np.ndarray):
assert initialization.shape[0] == X.shape[0], \
'The provided initialization contains a different number of ' \
'samples (%d) than the data provided (%d).' % (
initialization.shape[0], X.shape[0])
embedding = np.array(initialization)
variance = np.var(embedding, axis=0)
if any(variance > 1e-4):
log.warning(
'Variance of embedding is greater than 0.0001. Initial '
'embeddings with high variance may have display poor convergence.')
return embedding
# Initialize the embedding using a PCA projection into the desired
# number of components
elif initialization == 'pca':
pca = PCA(n_components=self.n_components)
embedding = pca.fit_transform(X)
# The PCA embedding may have high variance, which leads to poor convergence
normalization = np.std(embedding, axis=0) * 100
embedding /= normalization
return embedding
# Random initialization with isotropic normal distribution
elif initialization == 'random':
return np.random.normal(0, 1e-2, (X.shape[0], self.n_components))
else:
raise ValueError('Unrecognized initialization scheme `%s`.' % initialization)
def kl_divergence_bh(embedding, P, dof, bh_params, reference_embedding=None,
should_eval_error=False, n_jobs=1, **_):
gradient = np.zeros_like(embedding, dtype=np.float64, order='C')
# In the event that we wish to embed new points into an existing embedding
# using simple optimization, we compute optimize the new embedding points
# w.r.t. the existing embedding. Otherwise, we want to optimize the
# embedding w.r.t. itself
if reference_embedding is None:
reference_embedding = embedding
# Compute negative gradient
tree = QuadTree(reference_embedding)
sum_Q = _tsne.estimate_negative_gradient_bh(
tree, embedding, gradient, **bh_params, dof=dof, num_threads=n_jobs)
del tree
# Compute positive gradient
sum_P, kl_divergence_ = _tsne.estimate_positive_gradient_nn(
P.indices, P.indptr, P.data, embedding, reference_embedding, gradient,
dof, num_threads=n_jobs, should_eval_error=should_eval_error,
)
gradient *= 2 * (dof + 1) / dof
# Computing positive gradients summed up only unnormalized q_ijs, so we
# have to include normalziation term separately
if should_eval_error:
kl_divergence_ += sum_P * np.log(sum_Q + EPSILON)
return kl_divergence_, gradient
def kl_divergence_fft(embedding, P, dof, fft_params, reference_embedding=None,
should_eval_error=False, n_jobs=1, **_):
gradient = np.zeros_like(embedding, dtype=np.float64, order='C')
# Compute negative gradient.
if embedding.ndim == 1 or embedding.shape[1] == 1:
if reference_embedding is not None:
sum_Q = _tsne.estimate_negative_gradient_fft_1d_with_reference(
embedding.ravel(), reference_embedding.ravel(), gradient.ravel(), **fft_params)
else:
sum_Q = _tsne.estimate_negative_gradient_fft_1d(
embedding.ravel(), gradient.ravel(), **fft_params)
elif embedding.shape[1] == 2:
if reference_embedding is not None:
sum_Q = _tsne.estimate_negative_gradient_fft_2d_with_reference(
embedding, reference_embedding, gradient, **fft_params)
else:
sum_Q = _tsne.estimate_negative_gradient_fft_2d(
embedding, gradient, **fft_params)
else:
raise RuntimeError('Interpolation based t-SNE for >2 dimensions is '
'currently unsupported (and generally a bad idea)')
# The positive gradient function needs a reference embedding always
if reference_embedding is None:
reference_embedding = embedding
# Compute positive gradient
sum_P, kl_divergence_ = _tsne.estimate_positive_gradient_nn(
P.indices, P.indptr, P.data, embedding, reference_embedding, gradient,
dof, num_threads=n_jobs, should_eval_error=should_eval_error,
)
gradient *= 2 * (dof + 1) / dof
if should_eval_error:
kl_divergence_ += sum_P * np.log(sum_Q + EPSILON)
return kl_divergence_, gradient
def gradient_descent(embedding, P, dof, n_iter, negative_gradient_method,
learning_rate, momentum, exaggeration=None, min_gain=0.01,
min_grad_norm=1e-8, theta=0.5, n_interpolation_points=3,
min_num_intervals=10, ints_in_interval=10,
reference_embedding=None, n_jobs=1, use_callbacks=False,
callbacks=None, callbacks_every_iters=50):
"""Perform batch gradient descent with momentum and gains.
Parameters
----------
embedding : np.ndarray
The current embedding Y in the desired space.
P : csr_matrix
Joint probability matrix P_{ij}.
dof : float
Degrees of freedom of the Student's t-distribution.
n_iter : int
Number of iterations to run the optimization for.
negative_gradient_method : Callable[..., Tuple[float, np.ndarray]]
The callable takes the embedding as arguments and returns the (or an
approximation to) KL divergence of the current embedding and the
gradient of the embedding, with which to update the point locations.
learning_rate : float
The learning rate for t-SNE. Typical values range from 1 to 1000.
Setting the learning rate too high will result in the crowding problem
where all the points form a ball in the center of the space.
momentum : float
The momentum generates a weight for previous gradients that decays
exponentially.
exaggeration : float
The exaggeration term is used to increase the attractive forces during
the first steps of the optimization. This enables points to move more
easily through others, helping find their true neighbors quicker.
min_gain : float
Minimum individual gain for each parameter.
min_grad_norm : float
If the gradient norm is below this threshold, the optimization will be
stopped. In practice, this almost never happens.
theta : float
This is the trade-off parameter between speed and accuracy of the
Barnes-Hut approximation of the negative forces. Setting a lower value
will produce more accurate results, while setting a higher value will
search through less of the space providing a rougher approximation.
Scikit-learn recommends values between 0.2-0.8. This value is ignored
unless the Barnes-Hut algorithm is used for gradients.
n_interpolation_points : int
The number of interpolation points to use for FFT accelerated
interpolation based tSNE. It is recommended leaving this value at the
default=3 as otherwise the interpolation may suffer from the Runge
phenomenon. This value is ignored unless the interpolation based
algorithm is used.
min_num_intervals : int
The minimum number of intervals into which we split our embedding. A
larger value will produce better embeddings at the cost of performance.
This value is ignored unless the interpolation based algorithm is used.
ints_in_interval : float
Since the coordinate range of the embedding will certainly change
during optimization, this value tells us how many integer values should
appear in a single interval. This number of intervals affect the
embedding quality at the cost of performance. Less ints per interval
will incur a larger number of intervals.
reference_embedding : Optional[np.ndarray]
If we are adding points to an existing embedding, we have to compute
the gradients and errors w.r.t. the existing embedding.
n_jobs : int
Number of threads.
use_callbacks : bool
callbacks : Callable[[int, float, np.ndarray] -> bool]
The callback should accept three parameters, the first is the current
iteration, the second is the current KL divergence error and the last
is the current embedding. The callback should return a boolean value
indicating whether or not to stop optimization i.e. True to stop.
callbacks_every_iters : int
How often should the callback be called.
Returns
-------
float
The KL divergence of the optimized embedding.
np.ndarray
The optimized embedding Y.
Raises
------
OptimizationInterrupt
If the provided callback interrupts the optimization, this is raised.
"""
assert isinstance(embedding, np.ndarray), \
'`embedding` must be an instance of `np.ndarray`. Got `%s` instead' \
% type(embedding)
if reference_embedding is not None:
assert isinstance(reference_embedding, np.ndarray), \
'`reference_embedding` must be an instance of `np.ndarray`. Got ' \
'`%s` instead' % type(reference_embedding)
update = np.zeros_like(embedding)
gains = | np.ones_like(embedding) | numpy.ones_like |
#!/usr/bin/python3
import os
import sys
import numpy as np
class Reducer:
def __init__(self):
self.data = None
self.clusters = None
self.centroids = None
def compute_centroids(self, clusters, data):
ClustersCount = int(os.environ['ClustersCount'])
new_centroids = np.zeros((ClustersCount, data.shape[1]), dtype=float)
for i in range(new_centroids.shape[0]):
idx = np.where(clusters == i)
new_centroids[i] = np.mean(data[idx[0]], axis=0)
return new_centroids
def emit_output(self):
tc = np.zeros((self.centroids.shape[0], 2 + self.centroids.shape[1]), dtype=float)
cc = | np.arange(self.centroids.shape[0], dtype=int) | numpy.arange |
"""
Test functions for gufuncs_linalg module
Heavily inspired (ripped in part) test_linalg
"""
from __future__ import division, print_function
################################################################################
# The following functions are implemented in the module "gufuncs_linalg"
#
# category "linalg"
# - inv (TestInv)
# - poinv (TestPoinv)
# - det (TestDet)
# - slogdet (TestDet)
# - eig (TestEig)
# - eigh (TestEigh)
# - eigvals (TestEigvals)
# - eigvalsh (TestEigvalsh)
# - cholesky
# - solve (TestSolve)
# - chosolve (TestChosolve)
# - svd (TestSVD)
# ** unimplemented **
# - qr
# - matrix_power
# - matrix_rank
# - pinv
# - lstsq
# - tensorinv
# - tensorsolve
# - norm
# - cond
#
# category "inspired by pdl"
# - quadratic_form
# - matrix_multiply3
# - add3 (TestAdd3)
# - multiply3 (TestMultiply3)
# - multiply3_add (TestMultiply3Add)
# - multiply_add (TestMultiplyAdd)
# - multiply_add2 (TestMultiplyAdd2)
# - multiply4 (TestMultiply4)
# - multiply4_add (TestMultiply4Add)
#
# category "others"
# - convolve
# - inner1d
# - innerwt
# - matrix_multiply
from nose.plugins.skip import Skip, SkipTest
import numpy as np
from numpy.testing import (TestCase, assert_, assert_equal, assert_raises,
assert_array_equal, assert_almost_equal,
run_module_suite)
from numpy import array, single, double, csingle, cdouble, dot, identity
from numpy import multiply, inf
import numpy.linalg._gufuncs_linalg as gula
old_assert_almost_equal = assert_almost_equal
def assert_almost_equal(a, b, **kw):
if a.dtype.type in (single, csingle):
decimal = 5
else:
decimal = 10
old_assert_almost_equal(a, b, decimal = decimal, **kw)
def assert_valid_eigen_no_broadcast(M, w, v, **kw):
lhs = gula.matrix_multiply(M, v)
rhs = w*v
assert_almost_equal(lhs, rhs, **kw)
def assert_valid_eigen_recurse(M, w, v, **kw):
"""check that w and v are valid eigenvalues/eigenvectors for matrix M
broadcast"""
if len(M.shape) > 2:
for i in range(M.shape[0]):
assert_valid_eigen_recurse(M[i], w[i], v[i], **kw)
else:
if len(M.shape) == 2:
assert_valid_eigen_no_broadcast(M, w, v, **kw)
else:
raise AssertionError('Not enough dimensions')
def assert_valid_eigen(M, w, v, **kw):
if np.any(np.isnan(M)):
raise AssertionError('nan found in matrix')
if np.any(np.isnan(w)):
raise AssertionError('nan found in eigenvalues')
if np.any(np.isnan(v)):
raise AssertionError('nan found in eigenvectors')
assert_valid_eigen_recurse(M, w, v, **kw)
def assert_valid_eigenvals_no_broadcast(M, w, **kw):
ident = np.eye(M.shape[0], dtype=M.dtype)
for i in range(w.shape[0]):
assert_almost_equal(gula.det(M - w[i]*ident), 0.0, **kw)
def assert_valid_eigenvals_recurse(M, w, **kw):
if len(M.shape) > 2:
for i in range(M.shape[0]):
assert_valid_eigenvals_recurse(M[i], w[i], **kw)
else:
if len(M.shape) == 2:
assert_valid_eigenvals_no_broadcast(M, w, **kw)
else:
raise AssertionError('Not enough dimensions')
def assert_valid_eigenvals(M, w, **kw):
if np.any(np.isnan(M)):
raise AssertionError('nan found in matrix')
if np.any(np.isnan(w)):
raise AssertionError('nan found in eigenvalues')
assert_valid_eigenvals_recurse(M, w, **kw)
class MatrixGenerator(object):
def real_matrices(self):
a = [[1, 2],
[3, 4]]
b = [[4, 3],
[2, 1]]
return a, b
def real_symmetric_matrices(self):
a = [[ 2, -1],
[-1, 2]]
b = [[4, 3],
[2, 1]]
return a, b
def complex_matrices(self):
a = [[1+2j, 2+3j],
[3+4j, 4+5j]]
b = [[4+3j, 3+2j],
[2+1j, 1+0j]]
return a, b
def complex_hermitian_matrices(self):
a = [[2, -1],
[-1, 2]]
b = [[4+3j, 3+2j],
[2-1j, 1+0j]]
return a, b
def real_matrices_vector(self):
a, b = self.real_matrices()
return [a], [b]
def real_symmetric_matrices_vector(self):
a, b = self.real_symmetric_matrices()
return [a], [b]
def complex_matrices_vector(self):
a, b = self.complex_matrices()
return [a], [b]
def complex_hermitian_matrices_vector(self):
a, b = self.complex_hermitian_matrices()
return [a], [b]
class GeneralTestCase(MatrixGenerator):
def test_single(self):
a, b = self.real_matrices()
self.do(array(a, dtype=single),
array(b, dtype=single))
def test_double(self):
a, b = self.real_matrices()
self.do(array(a, dtype=double),
array(b, dtype=double))
def test_csingle(self):
a, b = self.complex_matrices()
self.do(array(a, dtype=csingle),
array(b, dtype=csingle))
def test_cdouble(self):
a, b = self.complex_matrices()
self.do(array(a, dtype=cdouble),
array(b, dtype=cdouble))
def test_vector_single(self):
a, b = self.real_matrices_vector()
self.do(array(a, dtype=single),
array(b, dtype=single))
def test_vector_double(self):
a, b = self.real_matrices_vector()
self.do(array(a, dtype=double),
array(b, dtype=double))
def test_vector_csingle(self):
a, b = self.complex_matrices_vector()
self.do(array(a, dtype=csingle),
array(b, dtype=csingle))
def test_vector_cdouble(self):
a, b = self.complex_matrices_vector()
self.do(array(a, dtype=cdouble),
array(b, dtype=cdouble))
class HermitianTestCase(MatrixGenerator):
def test_single(self):
a, b = self.real_symmetric_matrices()
self.do(array(a, dtype=single),
array(b, dtype=single))
def test_double(self):
a, b = self.real_symmetric_matrices()
self.do(array(a, dtype=double),
array(b, dtype=double))
def test_csingle(self):
a, b = self.complex_hermitian_matrices()
self.do(array(a, dtype=csingle),
array(b, dtype=csingle))
def test_cdouble(self):
a, b = self.complex_hermitian_matrices()
self.do(array(a, dtype=cdouble),
array(b, dtype=cdouble))
def test_vector_single(self):
a, b = self.real_symmetric_matrices_vector()
self.do(array(a, dtype=single),
array(b, dtype=single))
def test_vector_double(self):
a, b = self.real_symmetric_matrices_vector()
self.do( | array(a, dtype=double) | numpy.array |
from collections import defaultdict
from scipy import spatial
import numpy as np
import torch
def compute_struct_metric(pred_mask, masks):
masks = masks.float().squeeze()
pred_mask = torch.from_numpy(pred_mask).float().squeeze()
y = masks.mean();
if (y==0):
x = pred_mask.mean();
Q = 1.0 - x
elif (y==1):
x = pred_mask.mean();
Q = x
else:
alpha = 0.5
Q = alpha*S_object(pred_mask,masks)+(1-alpha)*S_region(pred_mask, masks);
if (Q<0):
Q=0;
return Q
def S_region(prediction, GT):
X, Y = centroid(GT);
# divide GT into 4 regions
[GT_1,GT_2,GT_3,GT_4,w1,w2,w3,w4] = divideGT(GT,X,Y);
# Divede prediction into 4 regions
[prediction_1,prediction_2,prediction_3,prediction_4] = Divideprediction(prediction,X,Y);
# Compute the ssim score for each regions
Q1 = ssim(prediction_1,GT_1);
Q2 = ssim(prediction_2,GT_2);
Q3 = ssim(prediction_3,GT_3);
Q4 = ssim(prediction_4,GT_4);
# Sum the 4 scores
Q = w1 * Q1 + w2 * Q2 + w3 * Q3 + w4 * Q4;
return Q
def centroid(GT):
"""
% Centroid Compute the centroid of the GT
% Usage:
% [X,Y] = Centroid(GT)
% Input:
% GT - Binary ground truth. Type: logical.
% Output:
% [X,Y] - The coordinates of centroid.
"""
[rows,cols] = GT.shape;
if(GT.sum()==0):
X = round(cols / 2);
Y = round(rows / 2);
else:
total = GT.sum();
i=np.arange(cols)
j= | np.arange(rows) | numpy.arange |
# Class definitions for equations to be tested
# from path import *
import numpy as np
import centpy
###############
# 1D equations
###############
# Burgers equation
class Burgers1d(centpy.Equation1d):
def initial_data(self):
return np.sin(self.x) + 0.5 * np.sin(0.5 * self.x)
def boundary_conditions(self, u):
u[0] = u[-4]
u[1] = u[-3]
u[-2] = u[2]
u[-1] = u[3]
def flux_x(self, u):
return 0.5 * u * u
def spectral_radius_x(self, u):
return np.abs(u)
# Euler equation
class Euler1d(centpy.Equation1d):
def initial_data(self):
u = np.zeros((self.J + 4, 3))
midpoint = int(self.J / 2) + 2
left_v = [1, 0, 1.0 / (self.gamma - 1.0)]
right_v = [0.125, 0.0, 0.1 / (self.gamma - 1.0)]
# Left side
u[:midpoint, :] = left_v
# Right side
u[midpoint:, :] = right_v
return u
def boundary_conditions(self, u):
left_v = [1, 0, 1.0 / (self.gamma - 1.0)]
right_v = [0.125, 0.0, 0.1 / (self.gamma - 1.0)]
# Left side
u[0] = left_v
u[1] = left_v
# Right side
u[-1] = right_v
u[-2] = right_v
def flux_x(self, u):
f = np.zeros_like(u)
rho = u[:, 0]
u_x = u[:, 1] / rho
E = u[:, 2]
p = (self.gamma - 1.0) * (E - 0.5 * rho * u_x ** 2)
f[:, 0] = rho * u_x
f[:, 1] = rho * u_x ** 2 + p
f[:, 2] = u_x * (E + p)
return f
def spectral_radius_x(self, u):
rho = u[:, 0]
u_x = u[:, 1] / rho
p = (self.gamma - 1.0) * (u[:, 2] - 0.5 * rho * u_x ** 2)
return np.abs(u_x) + np.sqrt(self.gamma * p / rho)
# MHD equation
class MHD1d(centpy.Equation1d):
def pressure(self, u):
return (
u[:, 6]
- 0.5 * ((u[:, 1] ** 2 + u[:, 2] ** 2 + u[:, 3] ** 2) / u[:, 0])
- 0.5 * (self.B1 ** 2 + u[:, 4] ** 2 + u[:, 5] ** 2)
)
def initial_data(self):
u = np.zeros((self.J + 4, 7))
midpoint = int(self.J / 2) + 2
# Left side
u[:midpoint, 0] = 1.0
u[:midpoint, 1] = 0.0
u[:midpoint, 2] = 0.0
u[:midpoint, 3] = 0.0
u[:midpoint, 4] = 1.0
u[:midpoint, 5] = 0.0
u[:midpoint, 6] = 1.0 + 25.0 / 32.0
# Right side
u[midpoint:, 0] = 0.125
u[midpoint:, 1] = 0.0
u[midpoint:, 2] = 0.0
u[midpoint:, 3] = 0.0
u[midpoint:, 4] = -1.0
u[midpoint:, 5] = 0.0
u[midpoint:, 6] = 0.1 + 25.0 / 32.0
return u
def boundary_conditions(self, u):
left_v = [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0 + 25.0 / 32.0]
right_v = [0.125, 0.0, 0.0, 0.0, -1.0, 0.0, 0.1 + 25.0 / 32]
if self.odd:
u[0] = left_v
u[-1] = right_v
u[-2] = right_v
else:
u[0] = left_v
u[1] = left_v
u[-1] = right_v
def flux_x(self, u):
f = np.zeros_like(u)
B1 = self.B1
p_star = self.pressure(u) + 0.5 * (B1 ** 2 + u[:, 4] ** 2 + u[:, 5] ** 2)
f[:, 0] = u[:, 1]
f[:, 1] = u[:, 1] ** 2 / u[:, 0] + p_star
f[:, 2] = u[:, 1] * u[:, 2] / u[:, 0] - B1 * u[:, 4]
f[:, 3] = u[:, 1] * u[:, 3] / u[:, 0] - B1 * u[:, 5]
f[:, 4] = u[:, 1] * u[:, 4] / u[:, 0] - B1 * u[:, 2] / u[:, 0]
f[:, 5] = u[:, 1] * u[:, 5] / u[:, 0] - B1 * u[:, 3] / u[:, 0]
f[:, 6] = (u[:, 6] + p_star) * (u[:, 1] / u[:, 0]) - B1 * (
B1 * u[:, 1] + u[:, 2] * u[:, 4] + u[:, 3] * u[:, 5]
) / u[:, 0]
return f
def spectral_radius_x(self, u):
rho = u[:, 0]
u_x = u[:, 1] / rho
p = self.pressure(u)
A = 2.0 * p / rho
B = (self.B1 ** 2 + u[:, 4] ** 2 + u[:, 5] ** 2) / rho
cf = np.sqrt(
0.5 * (A + B + np.sqrt((A + B) ** 2 - 4.0 * A * self.B1 ** 2 / rho))
)
return np.abs(u_x) + cf
###############
# 2D equations
###############
# Scalar equation
class Scalar2d(centpy.Equation2d):
def initial_data(self):
return np.sin(self.xx.T + 0.5) * np.cos(2 * self.xx.T + self.yy.T)
def boundary_conditions(self, u):
# x-boundary
u[0] = u[-4]
u[1] = u[-3]
u[-2] = u[2]
u[-1] = u[3]
# y-boundary
u[:, 0] = u[:, -4]
u[:, 1] = u[:, -3]
u[:, -2] = u[:, 2]
u[:, -1] = u[:, 3]
def flux_x(self, u):
return np.sin(u)
def flux_y(self, u):
return 1.0 / 3.0 * u ** 3
def spectral_radius_x(self, u):
return np.abs(np.cos(u))
def spectral_radius_y(self, u):
return u**2
# Euler equation
class Euler2d(centpy.Equation2d):
# Helper functions and definitions for the equation
def pressure(self, u):
return (self.gamma - 1.0) * (
u[:, :, 3] - 0.5 * (u[:, :, 1] ** 2 + u[:, :, 2] ** 2) / u[:, :, 0]
)
def euler_data(self):
gamma = self.gamma
p_one = 1.5
p_two = 0.3
p_three = 0.029
p_four = 0.3
upper_right, upper_left, lower_right, lower_left = np.ones((4, 4))
upper_right[0] = 1.5
upper_right[1] = 0.0
upper_right[2] = 0.0
upper_right[3] = (
p_one / (gamma - 1.0)
+ 0.5 * (upper_right[1] ** 2 + upper_right[2] ** 2) / upper_right[0]
)
upper_left[0] = 0.5323
upper_left[1] = 1.206 * upper_left[0]
upper_left[2] = 0.0
upper_left[3] = (
p_two / (gamma - 1.0)
+ 0.5 * (upper_left[1] ** 2 + upper_left[2] ** 2) / upper_left[0]
)
lower_right[0] = 0.5323
lower_right[1] = 0.0
lower_right[2] = 1.206 * lower_right[0]
lower_right[3] = (
p_four / (gamma - 1.0)
+ 0.5 * (lower_right[1] ** 2 + lower_right[2] ** 2) / lower_right[0]
)
lower_left[0] = 0.138
lower_left[1] = 1.206 * lower_left[0]
lower_left[2] = 1.206 * lower_left[0]
lower_left[3] = (
p_three / (gamma - 1.0)
+ 0.5 * (lower_left[1] ** 2 + lower_left[2] ** 2) / lower_left[0]
)
return upper_right, upper_left, lower_right, lower_left
# Abstract class equation definitions
def initial_data(self):
u = np.empty((self.J + 4, self.K + 4, 4))
midJ = int(self.J / 2) + 2
midK = int(self.K / 2) + 2
one_matrix = np.ones(u[midJ:, midK:].shape)
upper_right, upper_left, lower_right, lower_left = self.euler_data()
u[midJ:, midK:] = upper_right * one_matrix
u[:midJ, midK:] = upper_left * one_matrix
u[midJ:, :midK] = lower_right * one_matrix
u[:midJ, :midK] = lower_left * one_matrix
return u
def boundary_conditions(self, u):
upper_right, upper_left, lower_right, lower_left = self.euler_data()
if self.odd:
j = slice(1, -2)
u[j, 0] = u[j, 1]
u[j, -2] = u[j, -3]
u[j, -1] = u[j, -3]
u[0, j] = u[1, j]
u[-2, j] = u[-3, j]
u[-1, j] = u[-3, j]
# one
u[-2, -2] = upper_right
u[-1, -2] = upper_right
u[-2, -1] = upper_right
u[-1, -1] = upper_right
# two
u[0, -2] = upper_left
u[0, -1] = upper_left
# three
u[0, 0] = lower_left
u[0, 1] = lower_left
u[1, 0] = lower_left
u[1, 1] = lower_left
# four
u[-2, 0] = lower_right
u[-1, 0] = lower_right
u[-2, 1] = lower_right
u[-1, 1] = lower_right
else:
j = slice(2, -1)
u[j, 0] = u[j, 2]
u[j, 1] = u[j, 2]
u[j, -1] = u[j, -2]
u[0, j] = u[2, j]
u[1, j] = u[2, j]
u[-1, j] = u[-2, j]
# one
u[-1, -2] = upper_right
u[-1, -1] = upper_right
# two
u[0, -2] = upper_left
u[0, -1] = upper_left
u[1, -2] = upper_left
u[1, -1] = upper_left
# three
u[0, 0] = lower_left
u[0, 1] = lower_left
u[1, 0] = lower_left
u[1, 1] = lower_left
# four
u[-1, 0] = lower_right
u[-1, 1] = lower_right
def flux_x(self, u):
f = np.empty_like(u)
p = self.pressure(u)
f[:, :, 0] = u[:, :, 1]
f[:, :, 1] = u[:, :, 1] ** 2 / u[:, :, 0] + p
f[:, :, 2] = u[:, :, 1] * u[:, :, 2] / u[:, :, 0]
f[:, :, 3] = (u[:, :, 3] + p) * u[:, :, 1] / u[:, :, 0]
return f
def flux_y(self, u):
g = np.empty_like(u)
p = self.pressure(u)
g[:, :, 0] = u[:, :, 2]
g[:, :, 1] = u[:, :, 1] * u[:, :, 2] / u[:, :, 0]
g[:, :, 2] = u[:, :, 2] ** 2 / u[:, :, 0] + p
g[:, :, 3] = (u[:, :, 3] + p) * u[:, :, 2] / u[:, :, 0]
return g
def spectral_radius_x(self, u):
j0 = centpy._helpers.j0
rho = u[j0, j0, 0]
vx = u[j0, j0, 1] / rho
vy = u[j0, j0, 2] / rho
p = (self.gamma - 1.0) * (u[j0, j0, 3] - 0.5 * rho * (vx ** 2 + vy ** 2))
c = np.sqrt(self.gamma * p / rho)
return np.abs(vx) + c
def spectral_radius_y(self, u):
j0 = centpy._helpers.j0
rho = u[j0, j0, 0]
vx = u[j0, j0, 1] / rho
vy = u[j0, j0, 2] / rho
p = (self.gamma - 1.0) * (u[j0, j0, 3] - 0.5 * rho * (vx ** 2 + vy ** 2))
c = np.sqrt(self.gamma * p / rho)
return np.abs(vy) + c
# MHD equation
class MHD2d(centpy.Equation2d):
# Helper functions for the equation
def pressure(self, u):
return (self.gamma - 1.0) * (
u[:, :, 7]
- 0.5 * (u[:, :, 1] ** 2 + u[:, :, 2] ** 2 + u[:, :, 3] ** 2) / u[:, :, 0]
- 0.5 * (u[:, :, 4] ** 2 + u[:, :, 5] ** 2 + u[:, :, 6] ** 2)
)
def pressure_star(self, u):
return self.pressure(u) + 0.5 * (
u[:, :, 4] ** 2 + u[:, :, 5] ** 2 + u[:, :, 6] ** 2
)
def initial_data(self):
u = np.zeros((self.J + 4, self.K + 4, 8))
x = self.xx.T
y = self.yy.T
gamma = self.gamma
dx = self.dx
dy = self.dy
u[:, :, 0] = gamma ** 2
u[:, :, 1] = u[:, :, 0] / dy * (np.cos(y + 0.5 * dy) - np.cos(y - 0.5 * dy))
u[:, :, 2] = 0.0
u[:, :, 3] = -u[:, :, 0] / dx * (np.cos(x + 0.5 * dx) - | np.cos(x - 0.5 * dx) | numpy.cos |
# Utility Functions
# Authors: <NAME>
# Edited by: <NAME>
'''
Used by the user to define channels that are hard coded for analysis.
'''
# Imports necessary for this function
import numpy as np
import re
from itertools import combinations
def splitpatient(patient):
stringtest = patient.find('seiz')
if stringtest == -1:
stringtest = patient.find('sz')
if stringtest == -1:
stringtest = patient.find('aw')
if stringtest == -1:
stringtest = patient.find('aslp')
if stringtest == -1:
stringtest = patient.find('_')
if stringtest == -1:
print("Not sz, seiz, aslp, or aw! Please add additional naming possibilities, or tell data gatherers to rename datasets.")
else:
pat_id = patient[0:stringtest]
seiz_id = patient[stringtest:]
# remove any underscores
pat_id = re.sub('_', '', pat_id)
seiz_id = re.sub('_', '', seiz_id)
return pat_id, seiz_id
def returnindices(pat_id, seiz_id=None):
included_indices, onsetelecs, clinresult = returnnihindices(
pat_id, seiz_id)
if included_indices.size == 0:
included_indices, onsetelecs, clinresult = returnlaindices(
pat_id, seiz_id)
if included_indices.size == 0:
included_indices, onsetelecs, clinresult = returnummcindices(
pat_id, seiz_id)
if included_indices.size == 0:
included_indices, onsetelecs, clinresult = returnjhuindices(
pat_id, seiz_id)
if included_indices.size == 0:
included_indices, onsetelecs, clinresult = returntngindices(
pat_id, seiz_id)
return included_indices, onsetelecs, clinresult
def returntngindices(pat_id, seiz_id):
included_indices = np.array([])
onsetelecs = None
clinresult = -1
if pat_id == 'id001ac':
# included_indices = np.concatenate((np.arange(0,4), np.arange(5,55),
# np.arange(56,77), np.arange(78,80)))
included_indices = np.array([0, 1, 5, 6, 7, 8, 9, 10, 11, 12, 13,
15, 16, 17, 18, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
32, 33, 35, 36, 37, 38, 39, 40, 41, 42, 43, 45, 46, 47, 48,
49, 50, 51, 52, 53, 58, 59, 60, 61, 62, 63, 64, 65, 66, 68,
69, 70, 71, 72, 73, 74, 75, 76, 78, 79])
elif pat_id == 'id002cj':
# included_indices = np.array(np.arange(0,184))
included_indices = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8,
15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28,
30, 31, 32, 33, 34, 35, 36, 37, 38,
45, 46, 47, 48, 49, 50, 51, 52, 53,
60, 61, 62, 63, 64, 65, 66, 67, 70, 71, 72, 73, 74, 75, 76, 85, 86, 87, 88, 89,
90, 91, 92, 93, 100, 101, 102, 103, 104, 105,
106, 107, 108, 115, 116, 117, 118, 119,
120, 121, 122, 123, 129, 130, 131, 132, 133,
134, 135, 136, 137,
# np.arange(143, 156)
143, 144, 145, 146, 147,
148, 149, 150, 151, 157, 158, 159, 160, 161,
162, 163, 164, 165, 171, 172, 173, 174, 175,
176, 177, 178, 179, 180, 181, 182])
elif pat_id == 'id003cm':
included_indices = np.concatenate((np.arange(0,13), np.arange(25,37),
np.arange(40,50), np.arange(55,69), np.arange(70,79)))
elif pat_id == 'id004cv':
# removed OC'10, SC'5, CC'14/15
included_indices = np.concatenate((np.arange(0,23), np.arange(25,39),
np.arange(40,59), np.arange(60,110)))
elif pat_id == 'id005et':
included_indices = np.concatenate((np.arange(0,39), np.arange(39,47),
np.arange(52,62), np.arange(62,87)))
elif pat_id == 'id006fb':
included_indices = np.concatenate((np.arange(10,19), np.arange(40,50),
np.arange(115,123)))
elif pat_id == 'id008gc':
included_indices = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 13, 14, 15, 16, 17,
18, 19, 20, 21, 22, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 36, 37, 38, 39, 40,
41, 42, 43, 44, 45, 46, 48, 49, 50, 51, 52, 53, 54, 56, 57, 58, 61, 62, 63, 64, 65,
71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 83, 84, 85, 86, 87, 88, 89, 90, 92, 93,
94, 95, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 110, 111])
elif pat_id == 'id009il':
included_indices = np.concatenate((np.arange(0,10), np.arange(10,152)))
elif pat_id == 'id010js':
included_indices = np.concatenate((np.arange(0,14),
np.arange(15,29), np.arange(30,42), np.arange(43,52),
np.arange(53,65), np.arange(66,75), np.arange(76,80),
np.arange(81,85), np.arange(86,94), np.arange(95,98),
np.arange(99,111),
np.arange(112,124)))
elif pat_id == 'id011ml':
included_indices = np.concatenate((np.arange(0,18), np.arange(21,68),
np.arange(69,82), np.arange(82,125)))
elif pat_id == 'id012pc':
included_indices = np.concatenate((np.arange(0,4), np.arange(9,17),
np.arange(18,28), np.arange(31,41), np.arange(44,56),
np.arange(57,69), np.arange(70,82), np.arange(83,96),
np.arange(97,153)))
elif pat_id == 'id013pg':
included_indices = np.array([2, 3, 4, 5, 15, 18, 19, 20, 21, 23, 24,
25, 30, 31, 32, 33, 34, 35, 36, 37, 38, 50, 51, 52, 53, 54, 55, 56,
57, 58, 60, 61, 62, 63, 64, 65, 66, 67, 68, 70, 71, 72, 73, 74, 75,
76, 77, 78])
elif pat_id == 'id014rb':
included_indices = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67,
68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84,
85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101,
102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115,
116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129,
130, 131, 132, 133, 135, 136, 140, 141, 142, 143, 144, 145, 146,
147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159,
160, 161, 162, 163, 164])
elif pat_id == 'id015sf':
included_indices = np.concatenate((np.arange(0,37), np.arange(38,77),
np.arange(78,121)))
return included_indices, onsetelecs, clinresult
def returnnihindices(pat_id, seiz_id):
included_indices = np.array([])
onsetelecs = None
clinresult = -1
if pat_id == 'pt1':
included_indices = np.concatenate((np.arange(0, 36), np.arange(41, 43),
np.arange(45, 69), np.arange(71, 95)))
onsetelecs = set(['ATT1', 'ATT2', 'AD1', 'AD2', 'AD3', 'AD4',
'PD1', 'PD2', 'PD3', 'PD4'])
resectelecs = set(['ATT1', 'ATT2', 'ATT3', 'ATT4', 'ATT5', 'ATT6', 'ATT7', 'ATT8',
'AST1', 'AST2', 'AST3', 'AST4',
'PST1', 'PST2', 'PST3', 'PST4',
'AD1', 'AD2', 'AD3', 'AD4',
'PD1', 'PD2', 'PD3', 'PD4',
'PLT5', 'PLT6', 'SLT1'])
clinresult = 1
elif pat_id == 'pt2':
# [1:14 16:19 21:25 27:37 43 44 47:74]
included_indices = np.concatenate((np.arange(0, 14), np.arange(15, 19),
np.arange(
20, 25), np.arange(
26, 37), np.arange(
42, 44),
np.arange(46, 74)))
onsetelecs = set(['MST1', 'PST1', 'AST1', 'TT1'])
resectelecs = set(['TT1', 'TT2', 'TT3', 'TT4', 'TT6', 'TT6',
'G1', 'G2', 'G3', 'G4', 'G9', 'G10', 'G11', 'G12', 'G18', 'G19',
'G20', 'G26', 'G27',
'AST1', 'AST2', 'AST3', 'AST4',
'MST1', 'MST2', 'MST3', 'MST4'])
clinresult = 1
elif pat_id == 'pt3':
# [1:19 21:37 42:43 46:69 71:107]
included_indices = np.concatenate((np.arange(0, 19), np.arange(20, 37),
np.arange(41, 43), np.arange(45, 69), np.arange(70, 107)))
onsetelecs = set(['SFP1', 'SFP2', 'SFP3',
'IFP1', 'IFP2', 'IFP3',
'MFP2', 'MFP3',
'OF1', 'OF2', 'OF3', 'OF4'])
resectelecs = set(['FG1', 'FG2', 'FG9', 'FG10', 'FG17', 'FG18', 'FG25',
'SFP1', 'SFP2', 'SFP3', 'SFP4', 'SFP5', 'SFP6', 'SFP7', 'SFP8',
'MFP1', 'MFP2', 'MFP3', 'MFP4', 'MFP5', 'MFP6',
'IFP1', 'IFP2', 'IFP3', 'IFP4',
'OF3', 'OF4'])
clinresult = 1
elif pat_id == 'pt4':
# [1:19 21:37 42:43 46:69 71:107]
included_indices = np.concatenate((np.arange(0, 19), np.arange(20, 26),
np.arange(28, 36)))
onsetelecs = set([])
resectelecs = set([])
clinresult = -1
elif pat_id == 'pt5':
included_indices = np.concatenate((np.arange(0, 19), np.arange(20, 26),
np.arange(28, 36)))
onsetelecs = set([])
resectelecs = set([])
clinresult = -1
elif pat_id == 'pt6':
# [1:36 42:43 46 52:56 58:71 73:95]
included_indices = np.concatenate((np.arange(0, 36), np.arange(41, 43),
np.arange(45, 46), np.arange(51, 56), np.arange(57, 71), np.arange(72, 95)))
onsetelecs = set(['LA1', 'LA2', 'LA3', 'LA4',
'LAH1', 'LAH2', 'LAH3', 'LAH4',
'LPH1', 'LPH2', 'LPH3', 'LPH4'])
resectelecs = set(['LALT1', 'LALT2', 'LALT3', 'LALT4', 'LALT5', 'LALT6',
'LAST1', 'LAST2', 'LAST3', 'LAST4',
'LA1', 'LA2', 'LA3', 'LA4', 'LPST4',
'LAH1', 'LAH2', 'LAH3', 'LAH4',
'LPH1', 'LPH2'])
clinresult = 2
elif pat_id == 'pt7':
# [1:17 19:35 37:38 41:62 67:109]
included_indices = np.concatenate((np.arange(0, 17), np.arange(18, 35),
np.arange(36, 38), np.arange(40, 62), np.arange(66, 109)))
onsetelecs = set(['MFP1', 'LFP3',
'PT2', 'PT3', 'PT4', 'PT5',
'MT2', 'MT3',
'AT3', 'AT4',
'G29', 'G30', 'G39', 'G40', 'G45', 'G46'])
resectelecs = set(['G28', 'G29', 'G30', 'G36', 'G37', 'G38', 'G39',
'G41', 'G44', 'G45', 'G46',
'LFP1', 'LFP2', 'LSF3', 'LSF4'])
clinresult = 3
elif pat_id == 'pt8':
# [1:19 21 23 30:37 39:40 43:64 71:76]
included_indices = np.concatenate((np.arange(0, 19), np.arange(20, 21),
np.arange(
22, 23), np.arange(
29, 37), np.arange(
38, 40),
np.arange(42, 64), np.arange(70, 76)))
onsetelecs = set(['G19', 'G23', 'G29', 'G30', 'G31',
'TO6', 'TO5',
'MST3', 'MST4',
'O8', 'O9'])
resectelecs = set(['G22', 'G23', 'G27', 'G28', 'G29', 'G30', 'G31',
'MST2', 'MST3', 'MST4', 'PST2', 'PST3', 'PST4'])
clinresult = 1
elif pat_id == 'pt10':
# [1:3 5:19 21:35 48:69]
included_indices = np.concatenate((np.arange(0, 3), np.arange(4, 19),
np.arange(20, 35), np.arange(47, 69)))
onsetelecs = set(['TT1', 'TT2', 'TT4', 'TT6',
'MST1',
'AST2'])
resectelecs = set(['G3', 'G4', 'G5', 'G6', 'G11', 'G12', 'G13', 'G14',
'TT1', 'TT2', 'TT3', 'TT4', 'TT5', 'TT6', 'AST1', 'AST2', 'AST3', 'AST4'])
clinresult = 2
elif pat_id == 'pt11':
# [1:19 21:35 37 39 40 43:74 76:81 83:84]
included_indices = np.concatenate((np.arange(0, 19), np.arange(20, 35),
np.arange(
36, 37), np.arange(
38, 40), np.arange(
42, 74),
np.arange(75, 81), np.arange(82, 84)))
onsetelecs = set(['RG29', 'RG30', 'RG31', 'RG37', 'RG38', 'RG39',
'RG44', 'RG45'])
resectelecs = set(['RG4', 'RG5', 'RG6', 'RG7', 'RG12', 'RG13', 'RG14', 'RG15',
'RG21', 'RG22', 'RG23', 'RG29', 'RG30', 'RG31', 'RG37', 'RG38', 'RG39', 'RG45', 'RG46', 'RG47'])
resectelecs = set(['RG4', 'RG5', 'RG6', 'RG7', 'RG12',
'RG13', 'RG14', 'RG15',
'RG21', 'RG22', 'RG23', 'RG29', 'RG30',
'RG31', 'RG37', 'RG38', 'RG39', 'RG45', 'RG46', 'RG47'])
clinresult = 1
elif pat_id == 'pt12':
# [1:15 17:33 38:39 42:61]
included_indices = np.concatenate((np.arange(0, 15), np.arange(16, 33),
np.arange(37, 39), np.arange(41, 61)))
onsetelecs = set(['AST1', 'AST2',
'TT2', 'TT3', 'TT4', 'TT5'])
resectelecs = set(['G19', 'G20', 'G21', 'G22', 'G23', 'G27', 'G28', 'G29', 'G30', 'G31',
'TT1', 'TT2', 'TT3', 'TT4', 'TT5', 'TT6',
'AST1', 'AST2', 'AST3', 'AST4',
'MST1', 'MST2', 'MST3', 'MST4'])
clinresult = 2
elif pat_id == 'pt13':
# [1:36 39:40 43:66 69:74 77 79:94 96:103 105:130]
included_indices = np.concatenate((np.arange(0, 36), np.arange(38, 40),
np.arange(
42, 66), np.arange(
68, 74), np.arange(
76, 77),
np.arange(78, 94), np.arange(95, 103), np.arange(104, 130)))
onsetelecs = set(['G1', 'G2', 'G9', 'G10', 'G17', 'G18'])
resectelecs = set(['G1', 'G2', 'G3', 'G4', 'G9', 'G10', 'G11',
'G17', 'G18', 'G19',
'AP2', 'AP3', 'AP4'])
clinresult = 1
elif pat_id == 'pt14':
# [1:19 21:37 41:42 45:61 68:78]
included_indices = np.concatenate((np.arange(0, 3), np.arange(6, 10),
np.arange(
11, 17), np.arange(
18, 19), np.arange(
20, 37),
np.arange(40, 42), np.arange(44, 61), np.arange(67, 78)))
onsetelecs = set(['MST1', 'MST2',
'TT1', 'TT2', 'TT3',
'AST1', 'AST2'])
resectelecs = set(['TT1', 'TT2', 'TT3', 'AST1', 'AST2',
'MST1', 'MST2', 'PST1'])
clinresult = 4
elif pat_id == 'pt15':
# [2:7 9:30 32:36 41:42 45:47 49:66 69 71:85];
included_indices = np.concatenate((np.arange(1, 7), np.arange(8, 30),
np.arange(
31, 36), np.arange(
40, 42), np.arange(
44, 47),
np.arange(48, 66), np.arange(68, 69), np.arange(70, 85)))
onsetelecs = set(['TT1', 'TT2', 'TT3', 'TT4',
'MST1', 'MST2', 'AST1', 'AST2', 'AST3'])
resectelecs = set(['G2', 'G3', 'G4', 'G5', 'G10', 'G11', 'G12', 'G13',
'TT1', 'TT2', 'TT3', 'TT4', 'TT5',
'AST1', 'AST2', 'AST3', 'AST4',
'MST1', 'MST2', 'MST3', 'MST4'])
clinresult = 1
elif pat_id == 'pt16':
# [1:19 21:37 42:43 46:53]
included_indices = np.concatenate((np.arange(0, 19), np.arange(20, 37),
np.arange(41, 43), np.arange(45, 53)))
onsetelecs = set(['TT1', 'TT2', 'TT3', 'TT4', 'TT5', 'TT6',
'AST1', 'AST2', 'AST3', 'AST4',
'MST3', 'MST4',
'G26', 'G27', 'G28', 'G18', 'G19', 'G20', 'OF4'])
resectelecs = set(['G18', 'G19', 'G20', 'G26', 'G27', 'G28',
'G29', 'G30', 'TT1', 'TT2', 'TT3', 'TT4', 'TT5', 'TT6',
'AST1', 'AST2', 'AST3', 'AST4',
'MST1', 'MST2', 'MST3', 'MST4'
])
clinresult = 1
elif pat_id == 'pt17':
# [1:19 21:37 42:43 46:51]
included_indices = np.concatenate((np.arange(0, 19), np.arange(20, 37),
np.arange(41, 43), np.arange(45, 51)))
onsetelecs = set(['TT1', 'TT2'])
resectelecs = set(['G27', 'G28', 'G29', 'G30',
'TT', 'TT2', 'TT3', 'TT4', 'TT5', 'TT6',
'AST1', 'AST2', 'AST3', 'AST4',
'MST1', 'MST2', 'MST3', 'MST4'])
clinresult = 1
return included_indices, onsetelecs, clinresult
def returnlaindices(pat_id, seiz_id):
included_indices = np.array([])
onsetelecs = None
clinresult = -1
spreadelecs = None
if pat_id == 'la01':
# [1 3 7:8 11:13 17:19 22:26 32 34:35 37 42 50:55 58 ...
# 62:65 70:72 77:81 84:97 100:102 105:107 110:114 120:121 130:131];
# onset_electrodes = {'Y''1', 'X''4', ...
# 'T''5', 'T''6', 'O''1', 'O''2', 'B1', 'B2',...% rare onsets
# }
included_indices = np.concatenate((np.arange(0, 3), np.arange(6, 8), np.arange(10, 13),
np.arange(
16, 19), np.arange(
21, 26), np.arange(
31, 32),
np.arange(
33, 35), np.arange(
36, 37), np.arange(
41, 42),
np.arange(
49, 55), np.arange(
57, 58), np.arange(
61, 65),
np.arange(
69, 72), np.arange(
76, 81), np.arange(
83, 97),
np.arange(
99, 102), np.arange(
104, 107), np.arange(
109, 114),
np.arange(119, 121), np.arange(129, 131)))
onsetelecs = ["X'4", "T'5", "T'6", "O'1", "O'2", "B1", "B2"]
spreadelecs = ["P1", "P2", 'P6', "X1", "X8", "X9", "E'2", "E'3"
"T'1"]
if seiz_id == 'inter2':
included_indices = np.concatenate((np.arange(0, 1), np.arange(7, 16), np.arange(21, 28),
np.arange(
33, 36), np.arange(
39, 40), np.arange(
42, 44), np.arange(
46, 50),
np.arange(
56, 58), np.arange(
62, 65), np.arange(
66, 68), np.arange(
69, 75),
np.arange(76, 83), np.arange(85, 89), np.arange(96, 103),
np.arange(106, 109), np.arange(111, 115), np.arange(116, 117),
np.arange(119, 123), np.arange(126, 127), np.arange(130, 134),
np.arange(136, 137), np.arange(138, 144), np.arange(146, 153)))
if seiz_id == 'ictal2':
included_indices = np.concatenate((np.arange(0, 4), np.arange(6, 19), np.arange(20, 33),
np.arange(
34, 37), np.arange(
38, 40), np.arange(
42, 98),
np.arange(107, 136), np.arange(138, 158)))
onsetelecs = ["Y'1"]
clinresult = 1
elif pat_id == 'la02':
# [1:4 7 9 11:12 15:18 21:28 30:34 47 50:62 64:67 ...
# 70:73 79:87 90 95:99]
included_indices = np.concatenate((np.arange(0, 4), np.arange(6, 7), np.arange(8, 9),
np.arange(
10, 12), np.arange(
14, 18), np.arange(
20, 28),
np.arange(
29, 34), np.arange(
46, 47), np.arange(
49, 62),
np.arange(
63, 67), np.arange(
69, 73), np.arange(
78, 87),
np.arange(89, 90), np.arange(94, 99)))
onsetelecs = ["L'2", "L'3", "L'4"]
clinresult = 1
elif pat_id == 'la03':
# [1:3 6:33 36:68 77:163]
included_indices = np.concatenate((np.arange(0, 3), np.arange(5, 33),
np.arange(35, 68), np.arange(76, 163)))
onsetelecs = ["L7"]
clinresult = 2
elif pat_id == 'la04':
# [1:4 9:13 15:17 22 24:32 44:47 52:58 60 63:64 ...
# 67:70 72:74 77:84 88:91 94:96 98:101 109:111 114:116 121 123:129];
included_indices = np.concatenate((np.arange(0, 4), np.arange(8, 13),
np.arange(
14, 17), np.arange(
21, 22), np.arange(
23, 32),
np.arange(43, 47), np.arange(51, 58), np.arange(59, 60),
np.arange(62, 64), np.arange(66, 70), np.arange(71, 74),
np.arange(76, 84), np.arange(87, 91), np.arange(93, 96),
np.arange(97, 101), np.arange(108, 111), np.arange(113, 116),
np.arange(120, 121), np.arange(122, 129)))
# FIRST ABLATION WAS A FAILURE
onsetelecs = ["L'4", "G'1", # 2ND RESECTION REMOVED ALL OF M' ELECTRODES
"M'1", "M'2", "M'3", "M'4", "M'5", "M'6", "M'7",
"M'8", "M'9", "M'10", "M'11", "M'12", "M'13", "M'14", "M'15", "M'16"]
clinresult = 2
elif pat_id == 'la05':
# [2:4 7:15 21:39 42:82 85:89 96:101 103:114 116:121 ...
# 126:145 147:152 154:157 160:161 165:180 182:191];
included_indices = np.concatenate((np.arange(1, 4), np.arange(6, 15),
np.arange(
20, 39), np.arange(
41, 82), np.arange(
84, 89),
np.arange(95, 101), np.arange(102, 114), np.arange(115, 121),
np.arange(125, 145), np.arange(146, 152), np.arange(153, 157),
np.arange(159, 161), np.arange(164, 180), np.arange(181, 191)))
onsetelecs = ["T'1", "T'2", "D'1", "D'2"]
clinresult = 1
elif pat_id == 'la06':
# [1:4 7:12 14:17 19 21:33 37 46:47 50:58 61:62 70:73 77:82 ...
# 84:102 104:112 114:119];
included_indices = np.concatenate((np.arange(0, 4), np.arange(6, 12),
np.arange(
13, 17), np.arange(
18, 19), np.arange(
20, 33),
np.arange(36, 37), np.arange(45, 47), np.arange(49, 58),
np.arange(60, 62), np.arange(69, 73), np.arange(76, 82),
np.arange(83, 102), | np.arange(103, 112) | numpy.arange |
from __future__ import print_function
import tensorflow as tf
import tensorflow.contrib.slim as slim # try out sonnet instead?
from tensorflow.python.client import timeline
from tensorflow.examples.tutorials.mnist import input_data
from sklearn.utils import shuffle
import numpy as np
import os
from utils import *
tf.app.flags.DEFINE_string('logdir', '/tmp/test', 'location for saved embeedings')
tf.app.flags.DEFINE_string('datadir', '/tmp/mnist', 'location for data')
tf.app.flags.DEFINE_integer('batchsize', 50, 'batch size.')
tf.app.flags.DEFINE_integer('epochs', 50, 'number of times through dataset.')
tf.app.flags.DEFINE_float('lr', 0.0001, 'learning rate.')
FLAGS = tf.app.flags.FLAGS
def batch(ims, labels, batchsize):
ims, labels = shuffle(ims, labels)
shape = ims.shape
for i in range(len(labels)//batchsize):
yield (i, ims[i*batchsize:(i+1)*batchsize, ...],
labels[i*batchsize:(i+1)*batchsize, ...])
def accuracy(p, y):
return tf.reduce_mean(tf.cast(tf.equal(tf.argmax(p, axis=1), y), tf.float32))
def main(_):
print('Get data')
mnist = input_data.read_data_sets(FLAGS.datadir, one_hot=False)
ims = np.reshape(mnist.train.images, [-1, 784]).astype(np.float32)
labels = np.reshape(mnist.train.labels, [-1]).astype(np.int64)
test_ims = | np.reshape(mnist.test.images, [-1, 784]) | numpy.reshape |
"""
shows deep stats for the MPD
- calculates similarity and multiplies it with the track popularity
- if the track was unknown, decrease its similarity weight
usage:
python deeper_stats.py path-to-mpd-data/
"""
#import sys
import json
import re
import collections
import os
import gzip
import pandas as pd
import numpy as np
#from sklearn.neighbors import NearestNeighbors
import pickle
from collections import defaultdict
import heapq
import math
MOST_POPULAR_WEIGHT = 0.000002
TITLE_WEIGHT = 0.01
ALBUM_WEIGHT = 0.1
ARTIST_WEIGHT = 0.01
#"challenge_track_predImprSim_256418_64.csv",
#"challenge_track_scoreImprSim_256418_64.csv",
#"challenge_track_names.csv",
#1.0,
relevantTrackKNNs = ["challenge_track_predNNSim_256418_64.csv", "challenge_track_predImprSim_256418_64.csv"]
relevantTrackScores = [ "challenge_track_score2NNSim_256418_64.csv", "challenge_track_scoreImprSim_256418_64.csv"]
relevantTrackNames = ["challenge_track_names.csv", "challenge_track_names.csv"]
trackWeights = [ 1.0, 1.0]
relevantAlbKNNs = ["challenge_track_predNNAlbFinal_250561_64.csv"]
relevantAlbScores = ["challenge_track_scoreNNAlbFinal_250561_64.csv"]
relevantAlbNames = ["challenge_album_names.csv"]
albWeights = [0.1]
trackDT = zip(relevantTrackKNNs, relevantTrackScores, relevantTrackNames, trackWeights)
albDT = zip(relevantAlbKNNs, relevantAlbScores, relevantAlbNames, albWeights)
def softmax(x):
"""
Compute softmax values for each sets of scores in x.
"""
return x / x.sum()
#scoreMatExp = np.exp(np.asarray(x))
#return scoreMatExp / scoreMatExp.sum()
def normalize_name(name):
name = name.lower()
name = re.sub(r"[.,\/#!$%\^\*;:{}=\_`~()@]", ' ', name)
name = re.sub(r'[^\x00-\x7F]','', name)
name = re.sub(r'\s+', ' ', name).strip()
return name
def processKNNs(params):
nnName, scoreName, labelName, weight = params
embeddingsVector = pd.read_csv(nnName, delimiter=";", header=None)
embeddingsScore = pd.read_csv(scoreName, delimiter=";", header=None)
embeddingsLabel = pd.read_csv(labelName, delimiter=";", header=None)
print(len(embeddingsVector), len(embeddingsLabel))
maxNames = len(embeddingsLabel)
maxVals = len(embeddingsVector)
embeddingsVector.drop(range(maxNames, maxVals) , inplace=True)
embeddingsScore.drop(range(maxNames, maxVals), inplace=True)
embeddingsScore *= weight
embeddingsVector["label"] = embeddingsLabel[0]
embeddingsVector.set_index("label", inplace=True)
embeddingsScore["label"] = embeddingsLabel[0]
embeddingsScore.set_index("label", inplace=True)
#print(embeddingsVector.iloc[0:5,0:5])
del embeddingsLabel
return embeddingsVector, embeddingsScore
def save_obj(obj, name ):
with open('obj/'+ name + '.pkl', 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def load_obj(name ):
with open('obj/' + name + '.pkl', 'rb') as f:
return pickle.load(f)
def dsum(dct1, dct2):
for k, v in dct2.items():
dct1[k] += v
return dct1
def process_mpd(path, filename, outFile, trackDT, albDT):
count = 0
fullpath = os.sep.join((path, filename))
f = open(fullpath)
js = f.read()
f.close()
mpd_slice = json.loads(js)
trackKNNs = dict()
albTrackKNNs = defaultdict(int)
tracks = pd.read_csv("trackCounter.csv", delimiter=";", header=0)
tracks.set_index("TrackID", inplace=True)
tracks.drop(tracks[tracks.Count < 2].index, inplace=True)
#tracks.sort_values("Count", axis=0, ascending=False, inplace=True)
#mostPopularTracks = tracks.iloc[:1000]
#tracks = tracks.Count.to_dict()
albumTrack = pd.read_csv("albumTrack.csv", delimiter=";", header=0)
#albumTrack.Count = albumTrack.Count.multiply( ALBUM_WEIGHT )
albumTrack.set_index("AlbumID", inplace=True)
albumTrack.drop(albumTrack[albumTrack.Count < 10].index, inplace=True)
albumTrack.sort_values("Count", axis=0, ascending=False, inplace=True)
print(albumTrack.head())
print(albumTrack.shape)
artistTrack = pd.read_csv("artistTrack.csv", delimiter=";", header=0)
#artistTrack.drop(artistTrack[artistTrack.Count < 1].index, inplace=True)
artistTrack.set_index("ArtistID", inplace=True)
artistTrack.drop(artistTrack[artistTrack.Count < 2].index, inplace=True)
#meanArtist = artistTrack.groupby(artistTrack.index)[['Count']].mean()
#artistTrack.Count = artistTrack.Count.multiply(ARTIST_WEIGHT)
print(artistTrack.head())
print(artistTrack.shape)
titleTrack = pd.read_csv("titleTrack.csv", delimiter=";", header=0, encoding = "ISO-8859-1", error_bad_lines=False)
#titleTrack.Count = titleTrack.Count.multiply( TITLE_WEIGHT )
titleTrack.set_index("title", inplace=True)
titleTrack.drop(titleTrack[titleTrack.Count < 2].index, inplace=True)
print(titleTrack.head())
print(titleTrack.shape)
audioTrackPD = pd.read_csv("trackAudioStd.csv", delimiter=",", header=0, index_col=0)
audioTrackPD.set_index("id", inplace=True)
adt = defaultdict(np.float32)
audioTrack = audioTrackPD.to_dict(into=adt)
del audioTrackPD
audioPlaylistAnalysis = pd.read_csv("analyseDistribution2.csv", sep=",", header=0, index_col=0)
audioPlaylistAnalysis = audioPlaylistAnalysis.loc[audioPlaylistAnalysis.LENGTH > 1]
audioPlaylistAnalysis.acousticness = audioPlaylistAnalysis.acousticness * 1 / np.log(audioPlaylistAnalysis.LENGTH)
audioPlaylistAnalysis.danceability = audioPlaylistAnalysis.danceability * 1 / np.log(audioPlaylistAnalysis.LENGTH)
audioPlaylistAnalysis.speechiness = audioPlaylistAnalysis.speechiness * 1 / np.log(audioPlaylistAnalysis.LENGTH)
audioPlaylistAnalysis.energy = audioPlaylistAnalysis.energy * 1 / np.log(audioPlaylistAnalysis.LENGTH)
audioPlaylistAnalysis.liveness = audioPlaylistAnalysis.liveness * 1 / | np.log(audioPlaylistAnalysis.LENGTH) | numpy.log |
# coding: utf-8
# # Building your Recurrent Neural Network - Step by Step
#
# Welcome to Course 5's first assignment! In this assignment, you will implement your first Recurrent Neural Network in numpy.
#
# Recurrent Neural Networks (RNN) are very effective for Natural Language Processing and other sequence tasks because they have "memory". They can read inputs $x^{\langle t \rangle}$ (such as words) one at a time, and remember some information/context through the hidden layer activations that get passed from one time-step to the next. This allows a uni-directional RNN to take information from the past to process later inputs. A bidirection RNN can take context from both the past and the future.
#
# **Notation**:
# - Superscript $[l]$ denotes an object associated with the $l^{th}$ layer.
# - Example: $a^{[4]}$ is the $4^{th}$ layer activation. $W^{[5]}$ and $b^{[5]}$ are the $5^{th}$ layer parameters.
#
# - Superscript $(i)$ denotes an object associated with the $i^{th}$ example.
# - Example: $x^{(i)}$ is the $i^{th}$ training example input.
#
# - Superscript $\langle t \rangle$ denotes an object at the $t^{th}$ time-step.
# - Example: $x^{\langle t \rangle}$ is the input x at the $t^{th}$ time-step. $x^{(i)\langle t \rangle}$ is the input at the $t^{th}$ timestep of example $i$.
#
# - Lowerscript $i$ denotes the $i^{th}$ entry of a vector.
# - Example: $a^{[l]}_i$ denotes the $i^{th}$ entry of the activations in layer $l$.
#
# We assume that you are already familiar with `numpy` and/or have completed the previous courses of the specialization. Let's get started!
# Let's first import all the packages that you will need during this assignment.
# In[3]:
import numpy as np
from rnn_utils import *
# ## 1 - Forward propagation for the basic Recurrent Neural Network
#
# Later this week, you will generate music using an RNN. The basic RNN that you will implement has the structure below. In this example, $T_x = T_y$.
# <img src="images/RNN.png" style="width:500;height:300px;">
# <caption><center> **Figure 1**: Basic RNN model </center></caption>
# Here's how you can implement an RNN:
#
# **Steps**:
# 1. Implement the calculations needed for one time-step of the RNN.
# 2. Implement a loop over $T_x$ time-steps in order to process all the inputs, one at a time.
#
# Let's go!
#
# ## 1.1 - RNN cell
#
# A Recurrent neural network can be seen as the repetition of a single cell. You are first going to implement the computations for a single time-step. The following figure describes the operations for a single time-step of an RNN cell.
#
# <img src="images/rnn_step_forward.png" style="width:700px;height:300px;">
# <caption><center> **Figure 2**: Basic RNN cell. Takes as input $x^{\langle t \rangle}$ (current input) and $a^{\langle t - 1\rangle}$ (previous hidden state containing information from the past), and outputs $a^{\langle t \rangle}$ which is given to the next RNN cell and also used to predict $y^{\langle t \rangle}$ </center></caption>
#
# **Exercise**: Implement the RNN-cell described in Figure (2).
#
# **Instructions**:
# 1. Compute the hidden state with tanh activation: $a^{\langle t \rangle} = \tanh(W_{aa} a^{\langle t-1 \rangle} + W_{ax} x^{\langle t \rangle} + b_a)$.
# 2. Using your new hidden state $a^{\langle t \rangle}$, compute the prediction $\hat{y}^{\langle t \rangle} = softmax(W_{ya} a^{\langle t \rangle} + b_y)$. We provided you a function: `softmax`.
# 3. Store $(a^{\langle t \rangle}, a^{\langle t-1 \rangle}, x^{\langle t \rangle}, parameters)$ in cache
# 4. Return $a^{\langle t \rangle}$ , $y^{\langle t \rangle}$ and cache
#
# We will vectorize over $m$ examples. Thus, $x^{\langle t \rangle}$ will have dimension $(n_x,m)$, and $a^{\langle t \rangle}$ will have dimension $(n_a,m)$.
# In[4]:
# GRADED FUNCTION: rnn_cell_forward
def rnn_cell_forward(xt, a_prev, parameters):
"""
Implements a single forward step of the RNN-cell as described in Figure (2)
Arguments:
xt -- your input data at timestep "t", numpy array of shape (n_x, m).
a_prev -- Hidden state at timestep "t-1", numpy array of shape (n_a, m)
parameters -- python dictionary containing:
Wax -- Weight matrix multiplying the input, numpy array of shape (n_a, n_x)
Waa -- Weight matrix multiplying the hidden state, numpy array of shape (n_a, n_a)
Wya -- Weight matrix relating the hidden-state to the output, numpy array of shape (n_y, n_a)
ba -- Bias, numpy array of shape (n_a, 1)
by -- Bias relating the hidden-state to the output, numpy array of shape (n_y, 1)
Returns:
a_next -- next hidden state, of shape (n_a, m)
yt_pred -- prediction at timestep "t", numpy array of shape (n_y, m)
cache -- tuple of values needed for the backward pass, contains (a_next, a_prev, xt, parameters)
"""
# Retrieve parameters from "parameters"
Wax = parameters["Wax"]
Waa = parameters["Waa"]
Wya = parameters["Wya"]
ba = parameters["ba"]
by = parameters["by"]
### START CODE HERE ### (≈2 lines)
# compute next activation state using the formula given above
a_next = np.tanh(np.dot(Wax,xt) + np.dot(Waa,a_prev) + ba)
# compute output of the current cell using the formula given above
yt_pred = softmax(np.dot(Wya,a_next) + by)
### END CODE HERE ###
# store values you need for backward propagation in cache
cache = (a_next, a_prev, xt, parameters)
return a_next, yt_pred, cache
# In[5]:
np.random.seed(1)
xt = np.random.randn(3,10)
a_prev = np.random.randn(5,10)
Waa = np.random.randn(5,5)
Wax = np.random.randn(5,3)
Wya = np.random.randn(2,5)
ba = np.random.randn(5,1)
by = np.random.randn(2,1)
parameters = {"Waa": Waa, "Wax": Wax, "Wya": Wya, "ba": ba, "by": by}
a_next, yt_pred, cache = rnn_cell_forward(xt, a_prev, parameters)
print("a_next[4] = ", a_next[4])
print("a_next.shape = ", a_next.shape)
print("yt_pred[1] =", yt_pred[1])
print("yt_pred.shape = ", yt_pred.shape)
# **Expected Output**:
#
# <table>
# <tr>
# <td>
# **a_next[4]**:
# </td>
# <td>
# [ 0.59584544 0.18141802 0.61311866 0.99808218 0.85016201 0.99980978
# -0.18887155 0.99815551 0.6531151 0.82872037]
# </td>
# </tr>
# <tr>
# <td>
# **a_next.shape**:
# </td>
# <td>
# (5, 10)
# </td>
# </tr>
# <tr>
# <td>
# **yt[1]**:
# </td>
# <td>
# [ 0.9888161 0.01682021 0.21140899 0.36817467 0.98988387 0.88945212
# 0.36920224 0.9966312 0.9982559 0.17746526]
# </td>
# </tr>
# <tr>
# <td>
# **yt.shape**:
# </td>
# <td>
# (2, 10)
# </td>
# </tr>
#
# </table>
# ## 1.2 - RNN forward pass
#
# You can see an RNN as the repetition of the cell you've just built. If your input sequence of data is carried over 10 time steps, then you will copy the RNN cell 10 times. Each cell takes as input the hidden state from the previous cell ($a^{\langle t-1 \rangle}$) and the current time-step's input data ($x^{\langle t \rangle}$). It outputs a hidden state ($a^{\langle t \rangle}$) and a prediction ($y^{\langle t \rangle}$) for this time-step.
#
#
# <img src="images/rnn.png" style="width:800px;height:300px;">
# <caption><center> **Figure 3**: Basic RNN. The input sequence $x = (x^{\langle 1 \rangle}, x^{\langle 2 \rangle}, ..., x^{\langle T_x \rangle})$ is carried over $T_x$ time steps. The network outputs $y = (y^{\langle 1 \rangle}, y^{\langle 2 \rangle}, ..., y^{\langle T_x \rangle})$. </center></caption>
#
#
#
# **Exercise**: Code the forward propagation of the RNN described in Figure (3).
#
# **Instructions**:
# 1. Create a vector of zeros ($a$) that will store all the hidden states computed by the RNN.
# 2. Initialize the "next" hidden state as $a_0$ (initial hidden state).
# 3. Start looping over each time step, your incremental index is $t$ :
# - Update the "next" hidden state and the cache by running `rnn_cell_forward`
# - Store the "next" hidden state in $a$ ($t^{th}$ position)
# - Store the prediction in y
# - Add the cache to the list of caches
# 4. Return $a$, $y$ and caches
# In[6]:
# GRADED FUNCTION: rnn_forward
def rnn_forward(x, a0, parameters):
"""
Implement the forward propagation of the recurrent neural network described in Figure (3).
Arguments:
x -- Input data for every time-step, of shape (n_x, m, T_x).
a0 -- Initial hidden state, of shape (n_a, m)
parameters -- python dictionary containing:
Waa -- Weight matrix multiplying the hidden state, numpy array of shape (n_a, n_a)
Wax -- Weight matrix multiplying the input, numpy array of shape (n_a, n_x)
Wya -- Weight matrix relating the hidden-state to the output, numpy array of shape (n_y, n_a)
ba -- Bias numpy array of shape (n_a, 1)
by -- Bias relating the hidden-state to the output, numpy array of shape (n_y, 1)
Returns:
a -- Hidden states for every time-step, numpy array of shape (n_a, m, T_x)
y_pred -- Predictions for every time-step, numpy array of shape (n_y, m, T_x)
caches -- tuple of values needed for the backward pass, contains (list of caches, x)
"""
# Initialize "caches" which will contain the list of all caches
caches = []
# Retrieve dimensions from shapes of x and parameters["Wya"]
n_x, m, T_x = x.shape
n_y, n_a = parameters["Wya"].shape
### START CODE HERE ###
# initialize "a" and "y" with zeros (≈2 lines)
a = np.zeros([n_a,m,T_x])
y_pred = np.zeros([n_y,m,T_x])
# Initialize a_next (≈1 line)
a_next = a0
# loop over all time-steps
for t in range(T_x):
# Update next hidden state, compute the prediction, get the cache (≈1 line)
a_next, yt_pred, cache = rnn_cell_forward(x[:,:,t],a_next,parameters)
# Save the value of the new "next" hidden state in a (≈1 line)
a[:,:,t] = a_next
# Save the value of the prediction in y (≈1 line)
y_pred[:,:,t] = yt_pred
# Append "cache" to "caches" (≈1 line)
caches.append(cache)
### END CODE HERE ###
# store values needed for backward propagation in cache
caches = (caches, x)
return a, y_pred, caches
# In[7]:
np.random.seed(1)
x = np.random.randn(3,10,4)
a0 = np.random.randn(5,10)
Waa = np.random.randn(5,5)
Wax = np.random.randn(5,3)
Wya = np.random.randn(2,5)
ba = np.random.randn(5,1)
by = np.random.randn(2,1)
parameters = {"Waa": Waa, "Wax": Wax, "Wya": Wya, "ba": ba, "by": by}
a, y_pred, caches = rnn_forward(x, a0, parameters)
print("a[4][1] = ", a[4][1])
print("a.shape = ", a.shape)
print("y_pred[1][3] =", y_pred[1][3])
print("y_pred.shape = ", y_pred.shape)
print("caches[1][1][3] =", caches[1][1][3])
print("len(caches) = ", len(caches))
# **Expected Output**:
#
# <table>
# <tr>
# <td>
# **a[4][1]**:
# </td>
# <td>
# [-0.99999375 0.77911235 -0.99861469 -0.99833267]
# </td>
# </tr>
# <tr>
# <td>
# **a.shape**:
# </td>
# <td>
# (5, 10, 4)
# </td>
# </tr>
# <tr>
# <td>
# **y[1][3]**:
# </td>
# <td>
# [ 0.79560373 0.86224861 0.11118257 0.81515947]
# </td>
# </tr>
# <tr>
# <td>
# **y.shape**:
# </td>
# <td>
# (2, 10, 4)
# </td>
# </tr>
# <tr>
# <td>
# **cache[1][1][3]**:
# </td>
# <td>
# [-1.1425182 -0.34934272 -0.20889423 0.58662319]
# </td>
# </tr>
# <tr>
# <td>
# **len(cache)**:
# </td>
# <td>
# 2
# </td>
# </tr>
#
# </table>
# Congratulations! You've successfully built the forward propagation of a recurrent neural network from scratch. This will work well enough for some applications, but it suffers from vanishing gradient problems. So it works best when each output $y^{\langle t \rangle}$ can be estimated using mainly "local" context (meaning information from inputs $x^{\langle t' \rangle}$ where $t'$ is not too far from $t$).
#
# In the next part, you will build a more complex LSTM model, which is better at addressing vanishing gradients. The LSTM will be better able to remember a piece of information and keep it saved for many timesteps.
# ## 2 - Long Short-Term Memory (LSTM) network
#
# This following figure shows the operations of an LSTM-cell.
#
# <img src="images/LSTM.png" style="width:500;height:400px;">
# <caption><center> **Figure 4**: LSTM-cell. This tracks and updates a "cell state" or memory variable $c^{\langle t \rangle}$ at every time-step, which can be different from $a^{\langle t \rangle}$. </center></caption>
#
# Similar to the RNN example above, you will start by implementing the LSTM cell for a single time-step. Then you can iteratively call it from inside a for-loop to have it process an input with $T_x$ time-steps.
#
# ### About the gates
#
# #### - Forget gate
#
# For the sake of this illustration, lets assume we are reading words in a piece of text, and want use an LSTM to keep track of grammatical structures, such as whether the subject is singular or plural. If the subject changes from a singular word to a plural word, we need to find a way to get rid of our previously stored memory value of the singular/plural state. In an LSTM, the forget gate lets us do this:
#
# $$\Gamma_f^{\langle t \rangle} = \sigma(W_f[a^{\langle t-1 \rangle}, x^{\langle t \rangle}] + b_f)\tag{1} $$
#
# Here, $W_f$ are weights that govern the forget gate's behavior. We concatenate $[a^{\langle t-1 \rangle}, x^{\langle t \rangle}]$ and multiply by $W_f$. The equation above results in a vector $\Gamma_f^{\langle t \rangle}$ with values between 0 and 1. This forget gate vector will be multiplied element-wise by the previous cell state $c^{\langle t-1 \rangle}$. So if one of the values of $\Gamma_f^{\langle t \rangle}$ is 0 (or close to 0) then it means that the LSTM should remove that piece of information (e.g. the singular subject) in the corresponding component of $c^{\langle t-1 \rangle}$. If one of the values is 1, then it will keep the information.
#
# #### - Update gate
#
# Once we forget that the subject being discussed is singular, we need to find a way to update it to reflect that the new subject is now plural. Here is the formulat for the update gate:
#
# $$\Gamma_u^{\langle t \rangle} = \sigma(W_u[a^{\langle t-1 \rangle}, x^{\{t\}}] + b_u)\tag{2} $$
#
# Similar to the forget gate, here $\Gamma_u^{\langle t \rangle}$ is again a vector of values between 0 and 1. This will be multiplied element-wise with $\tilde{c}^{\langle t \rangle}$, in order to compute $c^{\langle t \rangle}$.
#
# #### - Updating the cell
#
# To update the new subject we need to create a new vector of numbers that we can add to our previous cell state. The equation we use is:
#
# $$ \tilde{c}^{\langle t \rangle} = \tanh(W_c[a^{\langle t-1 \rangle}, x^{\langle t \rangle}] + b_c)\tag{3} $$
#
# Finally, the new cell state is:
#
# $$ c^{\langle t \rangle} = \Gamma_f^{\langle t \rangle}* c^{\langle t-1 \rangle} + \Gamma_u^{\langle t \rangle} *\tilde{c}^{\langle t \rangle} \tag{4} $$
#
#
# #### - Output gate
#
# To decide which outputs we will use, we will use the following two formulas:
#
# $$ \Gamma_o^{\langle t \rangle}= \sigma(W_o[a^{\langle t-1 \rangle}, x^{\langle t \rangle}] + b_o)\tag{5}$$
# $$ a^{\langle t \rangle} = \Gamma_o^{\langle t \rangle}* \tanh(c^{\langle t \rangle})\tag{6} $$
#
# Where in equation 5 you decide what to output using a sigmoid function and in equation 6 you multiply that by the $\tanh$ of the previous state.
# ### 2.1 - LSTM cell
#
# **Exercise**: Implement the LSTM cell described in the Figure (3).
#
# **Instructions**:
# 1. Concatenate $a^{\langle t-1 \rangle}$ and $x^{\langle t \rangle}$ in a single matrix: $concat = \begin{bmatrix} a^{\langle t-1 \rangle} \\ x^{\langle t \rangle} \end{bmatrix}$
# 2. Compute all the formulas 1-6. You can use `sigmoid()` (provided) and `np.tanh()`.
# 3. Compute the prediction $y^{\langle t \rangle}$. You can use `softmax()` (provided).
# In[8]:
# GRADED FUNCTION: lstm_cell_forward
def lstm_cell_forward(xt, a_prev, c_prev, parameters):
"""
Implement a single forward step of the LSTM-cell as described in Figure (4)
Arguments:
xt -- your input data at timestep "t", numpy array of shape (n_x, m).
a_prev -- Hidden state at timestep "t-1", numpy array of shape (n_a, m)
c_prev -- Memory state at timestep "t-1", numpy array of shape (n_a, m)
parameters -- python dictionary containing:
Wf -- Weight matrix of the forget gate, numpy array of shape (n_a, n_a + n_x)
bf -- Bias of the forget gate, numpy array of shape (n_a, 1)
Wi -- Weight matrix of the update gate, numpy array of shape (n_a, n_a + n_x)
bi -- Bias of the update gate, numpy array of shape (n_a, 1)
Wc -- Weight matrix of the first "tanh", numpy array of shape (n_a, n_a + n_x)
bc -- Bias of the first "tanh", numpy array of shape (n_a, 1)
Wo -- Weight matrix of the output gate, numpy array of shape (n_a, n_a + n_x)
bo -- Bias of the output gate, numpy array of shape (n_a, 1)
Wy -- Weight matrix relating the hidden-state to the output, numpy array of shape (n_y, n_a)
by -- Bias relating the hidden-state to the output, numpy array of shape (n_y, 1)
Returns:
a_next -- next hidden state, of shape (n_a, m)
c_next -- next memory state, of shape (n_a, m)
yt_pred -- prediction at timestep "t", numpy array of shape (n_y, m)
cache -- tuple of values needed for the backward pass, contains (a_next, c_next, a_prev, c_prev, xt, parameters)
Note: ft/it/ot stand for the forget/update/output gates, cct stands for the candidate value (c tilde),
c stands for the memory value
"""
# Retrieve parameters from "parameters"
Wf = parameters["Wf"]
bf = parameters["bf"]
Wi = parameters["Wi"]
bi = parameters["bi"]
Wc = parameters["Wc"]
bc = parameters["bc"]
Wo = parameters["Wo"]
bo = parameters["bo"]
Wy = parameters["Wy"]
by = parameters["by"]
# Retrieve dimensions from shapes of xt and Wy
n_x, m = xt.shape
n_y, n_a = Wy.shape
### START CODE HERE ###
# Concatenate a_prev and xt (≈3 lines)
concat = np.zeros([n_a+n_x,m])
concat[: n_a, :] = a_prev
concat[n_a :, :] = xt
# Compute values for ft, it, cct, c_next, ot, a_next using the formulas given figure (4) (≈6 lines)
ft = sigmoid(np.dot(Wf,concat) + bf)
it = sigmoid(np.dot(Wi,concat) + bi)
cct = np.tanh(np.dot(Wc,concat) + bc)
c_next = ft*c_prev + it*cct
ot = sigmoid(np.dot(Wo,concat) + bo)
a_next = ot*np.tanh(c_next)
# Compute prediction of the LSTM cell (≈1 line)
yt_pred = softmax(np.dot(Wy, a_next) + by)
### END CODE HERE ###
# store values needed for backward propagation in cache
cache = (a_next, c_next, a_prev, c_prev, ft, it, cct, ot, xt, parameters)
return a_next, c_next, yt_pred, cache
# In[9]:
np.random.seed(1)
xt = np.random.randn(3,10)
a_prev = np.random.randn(5,10)
c_prev = np.random.randn(5,10)
Wf = np.random.randn(5, 5+3)
bf = np.random.randn(5,1)
Wi = np.random.randn(5, 5+3)
bi = np.random.randn(5,1)
Wo = np.random.randn(5, 5+3)
bo = np.random.randn(5,1)
Wc = np.random.randn(5, 5+3)
bc = np.random.randn(5,1)
Wy = np.random.randn(2,5)
by = np.random.randn(2,1)
parameters = {"Wf": Wf, "Wi": Wi, "Wo": Wo, "Wc": Wc, "Wy": Wy, "bf": bf, "bi": bi, "bo": bo, "bc": bc, "by": by}
a_next, c_next, yt, cache = lstm_cell_forward(xt, a_prev, c_prev, parameters)
print("a_next[4] = ", a_next[4])
print("a_next.shape = ", c_next.shape)
print("c_next[2] = ", c_next[2])
print("c_next.shape = ", c_next.shape)
print("yt[1] =", yt[1])
print("yt.shape = ", yt.shape)
print("cache[1][3] =", cache[1][3])
print("len(cache) = ", len(cache))
# **Expected Output**:
#
# <table>
# <tr>
# <td>
# **a_next[4]**:
# </td>
# <td>
# [-0.66408471 0.0036921 0.02088357 0.22834167 -0.85575339 0.00138482
# 0.76566531 0.34631421 -0.00215674 0.43827275]
# </td>
# </tr>
# <tr>
# <td>
# **a_next.shape**:
# </td>
# <td>
# (5, 10)
# </td>
# </tr>
# <tr>
# <td>
# **c_next[2]**:
# </td>
# <td>
# [ 0.63267805 1.00570849 0.35504474 0.20690913 -1.64566718 0.11832942
# 0.76449811 -0.0981561 -0.74348425 -0.26810932]
# </td>
# </tr>
# <tr>
# <td>
# **c_next.shape**:
# </td>
# <td>
# (5, 10)
# </td>
# </tr>
# <tr>
# <td>
# **yt[1]**:
# </td>
# <td>
# [ 0.79913913 0.15986619 0.22412122 0.15606108 0.97057211 0.31146381
# 0.00943007 0.12666353 0.39380172 0.07828381]
# </td>
# </tr>
# <tr>
# <td>
# **yt.shape**:
# </td>
# <td>
# (2, 10)
# </td>
# </tr>
# <tr>
# <td>
# **cache[1][3]**:
# </td>
# <td>
# [-0.16263996 1.03729328 0.72938082 -0.54101719 0.02752074 -0.30821874
# 0.07651101 -1.03752894 1.41219977 -0.37647422]
# </td>
# </tr>
# <tr>
# <td>
# **len(cache)**:
# </td>
# <td>
# 10
# </td>
# </tr>
#
# </table>
# ### 2.2 - Forward pass for LSTM
#
# Now that you have implemented one step of an LSTM, you can now iterate this over this using a for-loop to process a sequence of $T_x$ inputs.
#
# <img src="images/LSTM_rnn.png" style="width:500;height:300px;">
# <caption><center> **Figure 4**: LSTM over multiple time-steps. </center></caption>
#
# **Exercise:** Implement `lstm_forward()` to run an LSTM over $T_x$ time-steps.
#
# **Note**: $c^{\langle 0 \rangle}$ is initialized with zeros.
# In[10]:
# GRADED FUNCTION: lstm_forward
def lstm_forward(x, a0, parameters):
"""
Implement the forward propagation of the recurrent neural network using an LSTM-cell described in Figure (3).
Arguments:
x -- Input data for every time-step, of shape (n_x, m, T_x).
a0 -- Initial hidden state, of shape (n_a, m)
parameters -- python dictionary containing:
Wf -- Weight matrix of the forget gate, numpy array of shape (n_a, n_a + n_x)
bf -- Bias of the forget gate, numpy array of shape (n_a, 1)
Wi -- Weight matrix of the update gate, numpy array of shape (n_a, n_a + n_x)
bi -- Bias of the update gate, numpy array of shape (n_a, 1)
Wc -- Weight matrix of the first "tanh", numpy array of shape (n_a, n_a + n_x)
bc -- Bias of the first "tanh", numpy array of shape (n_a, 1)
Wo -- Weight matrix of the output gate, numpy array of shape (n_a, n_a + n_x)
bo -- Bias of the output gate, numpy array of shape (n_a, 1)
Wy -- Weight matrix relating the hidden-state to the output, numpy array of shape (n_y, n_a)
by -- Bias relating the hidden-state to the output, numpy array of shape (n_y, 1)
Returns:
a -- Hidden states for every time-step, numpy array of shape (n_a, m, T_x)
y -- Predictions for every time-step, numpy array of shape (n_y, m, T_x)
caches -- tuple of values needed for the backward pass, contains (list of all the caches, x)
"""
# Initialize "caches", which will track the list of all the caches
caches = []
### START CODE HERE ###
# Retrieve dimensions from shapes of x and parameters['Wy'] (≈2 lines)
n_x, m, T_x = x.shape
n_y, n_a = parameters['Wy'].shape
# initialize "a", "c" and "y" with zeros (≈3 lines)
a = np.zeros([n_a, m, T_x])
c = np.zeros([n_a, m, T_x])
y = np.zeros([n_y, m, T_x])
# Initialize a_next and c_next (≈2 lines)
a_next = a0
c_next = np.zeros([n_a, m])
# loop over all time-steps
for t in range(T_x):
# Update next hidden state, next memory state, compute the prediction, get the cache (≈1 line)
a_next, c_next, yt, cache = lstm_cell_forward(x[:,:,t], a_next, c_next, parameters)
# Save the value of the new "next" hidden state in a (≈1 line)
a[:,:,t] = a_next
# Save the value of the prediction in y (≈1 line)
y[:,:,t] = yt
# Save the value of the next cell state (≈1 line)
c[:,:,t] = c_next
# Append the cache into caches (≈1 line)
caches.append(cache)
### END CODE HERE ###
# store values needed for backward propagation in cache
caches = (caches, x)
return a, y, c, caches
# In[11]:
np.random.seed(1)
x = | np.random.randn(3,10,7) | numpy.random.randn |
# Use inception class to access these
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
from keras.utils import plot_model
from sklearn.metrics import confusion_matrix, f1_score, roc_curve
from keras.models import load_model
class plots:
"""
Class for making plots for the inception model.
Functions
_plotCNN
_plotF1
_plotParamProb
_plotROC
"""
def _plotCNN(self, to_file='graph.png'):
plot_model(self.model_, to_file=to_file)
def _plotROC(self, data='test', save=False, to_file='roc.pdf', fontsize=20):
"""
Function for plotting the ROC curve.
To call:
"""
try:
self.fpr_
self.tpr_
except:
self._getROC(data)
plt.figure(1)
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.plot(self.fpr_, self.tpr_)
plt.xlabel(r'$\rm FPR$', fontsize=fontsize)
plt.ylabel(r'$\rm TPR$', fontsize=fontsize)
plt.tight_layout()
if save:
plt.savefig(to_file)
plt.close('all')
else:
plt.show()
def _plotF1(self, step=0.025, save=False, to_file='f1_score.pdf', fontsize=20):
"""
Function for plotting the F1 score as a function
of the threshold probability.
To call:
_plotF1(step, save=False, to_file, fontsize=20)
Parameters:
step stepsize to take (0.5 to 1.0)
save (boolean) save image
to_file file to save image to
fontsize fontsize of axis labels
"""
try:
self.threshold_
self.F1_
except:
self._getF1(step)
plt.figure(1)
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.plot(self.threshold_, self.F1_)
plt.xlabel(r'$p_\mathrm{cutoff}$', fontsize=fontsize)
plt.ylabel(r'$F_{1} \, \mathrm{score}$', fontsize=fontsize)
plt.tight_layout()
if save:
plt.savefig(to_file)
plt.close('all')
else:
plt.show()
def _plotParamProb(self, param, kind='kde', gridsize=50, save=False, to_file="FluxProb.pdf", fontscale=1.25):
"""
Function for plotting a parameter of the second
component against its probability of being
complex, as measured by the model.
To call:
_plotFluxProb(param, kind, gridsize, save, imfile, fontscale)
Parameters:
param column name in self.dfComplex_
kind seaborn jointplot params: "kde", "hex", etc.
gridsize smoothing parameter
save (boolean) save image
imfile filepath to save image
fontscale axes label scaling
"""
try:
self.dfComplex_
except:
self._getComplexParams()
# ===================================================
# Dictionary for x-axis label
# ===================================================
label = {
"flux": r'$F_{2}$',
"depth": r'$\Delta \phi$',
"chi": r'$\Delta \chi$',
"sig": r'$\sigma_\mathrm{noise}$'
}
# ===================================================
# 1) Retrieve the flux of the second component
# 2) Retrieve the model's probability that the
# source is complex
# ===================================================
valu = pd.Series(self.dfComplex_[param], name=label[param])
prob = pd.Series(self.dfComplex_["prob"], name=r'$p_\mathrm{complex}$')
# ===================================================
# Create the plot
# ===================================================
sns.set(font_scale=fontscale)
sns.jointplot(valu, prob, kind=kind, gridsize=gridsize)
# ===================================================
# Save or display the image
# ===================================================
if save:
plt.savefig(to_file)
plt.close('all')
else:
plt.show()
def _plotBinaryParamProb(self, param, save=False, to_file='param_binary.pdf', fontsize=20,
s=10, alpha=0.05, cComplex='darkorange', cSimple='dodgerblue'):
plt.figure()
plt.scatter(self.dfSimple_[param], self.dfSimple_['prob'], color=cSimple, alpha=alpha, s=s)
plt.scatter(self.dfComplex_[param], self.dfComplex_['prob'], color=cComplex, alpha=alpha, s=s)
plt.xlabel(r'$\sigma$', fontsize=fontsize)
plt.ylabel(r'$p_\mathrm{complex}$', fontsize=fontsize)
if save:
plt.savefig(to_file)
plt.close('all')
else:
plt.show()
def _plotLoss(self, logfile=None, save=False, to_file='loss_vs_epoch.pdf', fontsize=20):
# ===================================================
# Load in the logfile or test to see if a
# logfile has already been loaded
# ===================================================
if logfile == None:
try:
self.dfLog_
except:
print('Please pass in the name of a logfile')
sys.exit(1)
else:
try:
self._loadLog(logfile)
except:
print('Failed to load logfile')
sys.exit(1)
# -------------- Initialize the Graph ---------
fig = plt.figure()
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.xlabel(r'$\rm Epoch$', fontsize=fontsize)
plt.ylabel(r'$\rm Loss$', fontsize=fontsize)
plt.plot(self.dfLog_.index, self.dfLog_['loss'], label='Training Loss')
plt.plot(self.dfLog_.index, self.dfLog_['val_loss'], label='Validation Loss')
plt.legend(loc='best', fontsize=15)
if save:
plt.savefig(to_file)
plt.close()
else:
plt.show()
plt.close()
def _plotAcc(self, logfile=None, save=False, to_file='acc_vs_epoch.pdf', fontsize=20):
"""
Function for plotting the accuracy as a function of epoch.
To call:
_plotAcc(logfile, save, imfile)
Parameters:
"""
# ===================================================
# Load in the logfile or test to see if a
# logfile has already been loaded
# ===================================================
if logfile == None:
try:
self.dfLog_
except:
print('Please pass in the name of a logfile')
sys.exit(1)
else:
try:
self._loadLog(logfile)
except:
print('Failed to load logfile')
sys.exit(1)
# ===================================================
# Plot accuracy vs epoch
# ===================================================
fig = plt.figure()
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.plot(self.dfLog_.index, self.dfLog_['binary_accuracy'], label='Training Binary Accuracy')
plt.plot(self.dfLog_.index, self.dfLog_['val_binary_accuracy'], label='Validation Binary Accuracy')
plt.xlabel('Epoch', fontsize=fontsize)
plt.ylabel('Binary Accuracy ', fontsize=fontsize)
plt.legend(loc='best', fontsize=15)
if save:
plt.savefig(to_file)
plt.close()
else:
plt.show()
plt.close()
'''
def _loadData(self, directory):
"""
Function for loading data arrays from a directory.
To call:
_loadModel(directory)
Parameters:
directory
"""
self.X_data = np.load(directory+'X_data.npy')
self.Y_data = np.load(directory+'label.npy')
#------ creation params --------
self.chi_data = np.load(directory+'chi.npy')
self.depth_data = np.load(directory+'depth.npy')
self.flux_data = np.load(directory+'flux.npy')
self.q_data = np.load(directory+'Q_data.npy')
self.s_data = np.load(directory+'S_data.npy')
self.sig_data = np.load(directory+'sig.npy')
self.u_data = np.load(directory+'U_data.npy')
'''
def _format_param_name(self, param_name):
"""
Function for formatting a string parameter name (chi, depth, etc....) to LateX
form for plot labels.
To call:
_format_param_name(param_name)
Parameters:
param_name
"""
if param_name == 'sigma':
return r'$\sigma$'
elif param_name == 'chi':
return r'$\Delta\chi$'
elif param_name == 'flux':
return r'$\Delta F$'
elif param_name == 'depth':
return r'$\Delta \phi$'
else:
return param_name
def _make_cut(self, param_array, param_name,num_cut=10,prob=0.5, save=False):
"""
Function for cutting along a single parameter value to test the model's performance over
a parameter range. For aid in finding parameter space that model works with certainty within.
Makes a plot showing the True Positive (TP) and True Negative (TN) rates as a function of the
supplied parameter.
To call:
_make_cut(param_array, param_name,num_cut, prob, save)
Parameters:
param_array
param_name
OPTIONAL:
num_cut -- number of cuts to make along the parameter
prob -- probability cutoff to classify as complex or simple
save -- True if want to save a .pdf
"""
cut_array = param_array
# ----------- sigma and other params are formatted differently, this handles either case ------
try:
cut_vals = np.linspace(0.,np.max(cut_array)[0]*.9,num_cut)
oned =False
except:
cut_vals = np.linspace(0.,np.max(cut_array)*.9,num_cut)
oned = True
matrix_vals = []
# --------- make a series of cuts and save results for plotting ----------
for c in cut_vals:
print (c)
#do the cut
float_check = type(0.1); tuple_check = type((0,1))
postcut = [];kept=[]
for i in range(len(cut_array)):
val = cut_array[i]
# ---------- once again handle tuples or floats depending on parameter format ----------
if type(val) == tuple_check:
if abs(val[0]-val[1]) >= c:
postcut.append(abs(val[0]-val[1]))
kept.append(i)
else:
if val >= c:
postcut.append(val)
kept.append(i)
try:
# -------- the subset of data --------------
X_new=np.array([self.X_data[k] for k in kept])
Y_new=np.array([self.Y_data[k] for k in kept])
# ----------- do predictions on the subset ----------
probs = self.model.predict(X_new)[:,1]
# --------- probability cutoff for simple vs complex -------------
predictions = np.where(probs > prob, 1, 0)
'''
#------------ Confusion Matrix -------------
[simple marked as simple simple marked as complex]
[complex marked as simple complex marked as complex]
'''
cm = confusion_matrix(Y_new, predictions)
print(cm)
matrix_vals.append(cm)
except:
print ('Nothing in that cutoff, continuing...')
fstring = self._format_param_name(param_name)
fig = plt.figure(1)
try:
plt.scatter(cut_vals,[float(matrix_vals[i][0,0])/(matrix_vals[i][0,0]+matrix_vals[i][0,1])*100. for i in range(len(matrix_vals))],label='True Simple',c='g')
except:
print ('No simple sources in subsample...')
try:
plt.scatter(cut_vals,[float(matrix_vals[i][1,1])/(matrix_vals[i][1,0]+matrix_vals[i][1,1])*100. for i in range(len(matrix_vals))],label='True Complex',c='b')
except:
print ('No complex sources in subsample...')
plt.xlabel(fstring)
plt.ylabel('Percent Correct')
plt.title(r'Percent Correct over '+fstring)
plt.legend(loc=(0.3,0.8),fontsize=5)
if save:
plt.savefig(param_name+'_plot.png',bbinches='tight')
else:
plt.show()
plt.close()
def _make_2d_cut(self, param_arr1, arr_name1, param_arr2, arr_name2,num_cut=10,prob=0.5,save=False):
"""
Function for cutting along two parameter values to test the model's performance over
a parameter space. For aid in finding parameter space that model works with certainty within.
Makes a plot showing the True Positive (TP) and True Negative (TN) rates as a function of the
supplied parameters. Functions similarly to _make_cut() above.
To call:
_make_2d_cut(param_arr1, arr_name1, param_arr2, arr_name2, num_cut, prob, save)
Parameters:
param_arr1
arr_name1
param_arr2
arr_name2
OPTIONAL:
num_cut -- number of cuts to make along the parameter
prob -- probability cutoff to classify as complex or simple
save -- True if want to save a .pdf
"""
# ----------- sigma and other params are formatted differently, this handles either case ------
try:
cut_vals1 = np.linspace(0.,np.max(param_arr1)[0]*.9,num_cut)
except:
cut_vals1 = np.linspace(0.,np.max(param_arr1)*.9,num_cut)
try:
cut_vals2 = np.linspace(0.,np.max(param_arr2)[0]*.9,num_cut)
except:
cut_vals2 = np.linspace(0.,np.max(param_arr2)*.9,num_cut)
matrix_vals_c = np.zeros((len(cut_vals1),len(cut_vals2)))
matrix_vals_s = np.zeros((len(cut_vals1),len(cut_vals2)))
# --------- make a series of cuts and save results for plotting ----------
for i in range(len(cut_vals1)):
for j in range(len(cut_vals2)):
#do the cut
c1 = cut_vals1[i]; c2 = cut_vals2[j]
float_check = type(0.1); tuple_check = type((0,1))
postcut = [];kept=[]
for k in range(len(param_arr1)):
val1 = param_arr1[k]
val2 = param_arr2[k]
# ---------- once again handle tuples or floats depending on parameter format ----------
if type(val1) == tuple_check:
if abs(val1[0]-val1[1]) >= c1 and abs(val2[0]-val2[1]) >= c2:
kept.append(k)
else:
if val1 >= c1 and val2 >= c2:
kept.append(k)
try:
# -------- the subset of data --------------
X_new=np.array([self.X_data[k] for k in kept])
Y_new=np.array([self.Y_data[k] for k in kept])
# ----------- do predictions on the subset ----------
probs = self.model.predict(X_new)[:,1]
# --------- probability cutoff for simple vs complex -------------
predictions = | np.where(probs > prob, 1, 0) | numpy.where |
"""
Test DOE Driver and Generators.
"""
import unittest
import os
import os.path
import glob
import csv
import numpy as np
import openmdao.api as om
from openmdao.test_suite.components.paraboloid import Paraboloid
from openmdao.test_suite.components.paraboloid_distributed import DistParab
from openmdao.test_suite.groups.parallel_groups import FanInGrouped
from openmdao.utils.assert_utils import assert_near_equal
from openmdao.utils.general_utils import run_driver, printoptions
from openmdao.utils.testing_utils import use_tempdirs
from openmdao.utils.mpi import MPI
try:
from openmdao.vectors.petsc_vector import PETScVector
except ImportError:
PETScVector = None
class ParaboloidArray(om.ExplicitComponent):
"""
Evaluates the equation f(x,y) = (x-3)^2 + x*y + (y+4)^2 - 3.
Where x and y are xy[0] and xy[1] respectively.
"""
def setup(self):
self.add_input('xy', val=np.array([0., 0.]))
self.add_output('f_xy', val=0.0)
def compute(self, inputs, outputs):
"""
f(x,y) = (x-3)^2 + xy + (y+4)^2 - 3
"""
x = inputs['xy'][0]
y = inputs['xy'][1]
outputs['f_xy'] = (x - 3.0)**2 + x * y + (y + 4.0)**2 - 3.0
class ParaboloidDiscrete(om.ExplicitComponent):
def setup(self):
self.add_discrete_input('x', val=10, tags='xx')
self.add_discrete_input('y', val=0, tags='yy')
self.add_discrete_output('f_xy', val=0, tags='ff')
def compute(self, inputs, outputs, discrete_inputs, discrete_outputs):
x = discrete_inputs['x']
y = discrete_inputs['y']
f_xy = (x - 3.0)**2 + x * y + (y + 4.0)**2 - 3.0
discrete_outputs['f_xy'] = int(f_xy)
class ParaboloidDiscreteArray(om.ExplicitComponent):
def setup(self):
self.add_discrete_input('x', val=np.ones((2, )), tags='xx')
self.add_discrete_input('y', val=np.ones((2, )), tags='yy')
self.add_discrete_output('f_xy', val=np.ones((2, )), tags='ff')
def compute(self, inputs, outputs, discrete_inputs, discrete_outputs):
x = discrete_inputs['x']
y = discrete_inputs['y']
f_xy = (x - 3.0)**2 + x * y + (y + 4.0)**2 - 3.0
discrete_outputs['f_xy'] = f_xy.astype(np.int)
class TestErrors(unittest.TestCase):
def test_generator_check(self):
prob = om.Problem()
with self.assertRaises(TypeError) as err:
prob.driver = om.DOEDriver(om.FullFactorialGenerator)
self.assertEqual(str(err.exception),
"DOEDriver requires an instance of DOEGenerator, "
"but a class object was found: FullFactorialGenerator")
with self.assertRaises(TypeError) as err:
prob.driver = om.DOEDriver(om.Problem())
self.assertEqual(str(err.exception),
"DOEDriver requires an instance of DOEGenerator, "
"but an instance of Problem was found.")
def test_lhc_criterion(self):
with self.assertRaises(ValueError) as err:
om.LatinHypercubeGenerator(criterion='foo')
self.assertEqual(str(err.exception),
"Invalid criterion 'foo' specified for LatinHypercubeGenerator. "
"Must be one of ['center', 'c', 'maximin', 'm', 'centermaximin', "
"'cm', 'correlation', 'corr', None].")
@use_tempdirs
class TestDOEDriver(unittest.TestCase):
def setUp(self):
self.expected_fullfact3 = [
{'x': np.array([0.]), 'y': np.array([0.]), 'f_xy': np.array([22.00])},
{'x': np.array([.5]), 'y': np.array([0.]), 'f_xy': np.array([19.25])},
{'x': np.array([1.]), 'y': np.array([0.]), 'f_xy': np.array([17.00])},
{'x': np.array([0.]), 'y': np.array([.5]), 'f_xy': np.array([26.25])},
{'x': np.array([.5]), 'y': np.array([.5]), 'f_xy': np.array([23.75])},
{'x': np.array([1.]), 'y': np.array([.5]), 'f_xy': np.array([21.75])},
{'x': np.array([0.]), 'y': np.array([1.]), 'f_xy': np.array([31.00])},
{'x': np.array([.5]), 'y': np.array([1.]), 'f_xy': np.array([28.75])},
{'x': np.array([1.]), 'y': np.array([1.]), 'f_xy': np.array([27.00])},
]
def test_no_generator(self):
prob = om.Problem()
model = prob.model
model.add_subsystem('p1', om.IndepVarComp('x', 0.), promotes=['*'])
model.add_subsystem('p2', om.IndepVarComp('y', 0.), promotes=['*'])
model.add_subsystem('comp', Paraboloid(), promotes=['*'])
model.add_design_var('x', lower=-10, upper=10)
model.add_design_var('y', lower=-10, upper=10)
model.add_objective('f_xy')
prob.driver = om.DOEDriver()
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.setup()
prob.run_driver()
prob.cleanup()
cr = om.CaseReader("cases.sql")
cases = cr.list_cases('driver', out_stream=None)
self.assertEqual(len(cases), 0)
def test_list(self):
prob = om.Problem()
model = prob.model
model.add_subsystem('p1', om.IndepVarComp('x', 0.0), promotes=['x'])
model.add_subsystem('p2', om.IndepVarComp('y', 0.0), promotes=['y'])
model.add_subsystem('comp', Paraboloid(), promotes=['x', 'y', 'f_xy'])
model.add_design_var('x', lower=0.0, upper=1.0)
model.add_design_var('y', lower=0.0, upper=1.0)
model.add_objective('f_xy')
prob.setup()
# create a list of DOE cases
case_gen = om.FullFactorialGenerator(levels=3)
cases = list(case_gen(model.get_design_vars(recurse=True)))
# create DOEDriver using provided list of cases
prob.driver = om.DOEDriver(cases)
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.run_driver()
prob.cleanup()
expected = self.expected_fullfact3
cr = om.CaseReader("cases.sql")
cases = cr.list_cases('driver', out_stream=None)
self.assertEqual(len(cases), 9)
for case, expected_case in zip(cases, expected):
outputs = cr.get_case(case).outputs
for name in ('x', 'y', 'f_xy'):
self.assertEqual(outputs[name], expected_case[name])
def test_list_errors(self):
prob = om.Problem()
model = prob.model
model.add_subsystem('p1', om.IndepVarComp('x', 0.0), promotes=['x'])
model.add_subsystem('p2', om.IndepVarComp('y', 0.0), promotes=['y'])
model.add_subsystem('comp', Paraboloid(), promotes=['x', 'y', 'f_xy'])
model.add_design_var('x', lower=0.0, upper=1.0)
model.add_design_var('y', lower=0.0, upper=1.0)
model.add_objective('f_xy')
prob.setup()
# data does not contain a list
cases = {'desvar': 1.0}
with self.assertRaises(RuntimeError) as err:
prob.driver = om.DOEDriver(generator=om.ListGenerator(cases))
self.assertEqual(str(err.exception), "Invalid DOE case data, "
"expected a list but got a dict.")
# data contains a list of non-list
cases = [{'desvar': 1.0}]
prob.driver = om.DOEDriver(generator=om.ListGenerator(cases))
with self.assertRaises(RuntimeError) as err:
prob.run_driver()
self.assertEqual(str(err.exception), "Invalid DOE case found, "
"expecting a list of name/value pairs:\n{'desvar': 1.0}")
# data contains a list of list, but one has the wrong length
cases = [
[['p1.x', 0.], ['p2.y', 0.]],
[['p1.x', 1.], ['p2.y', 1., 'foo']]
]
prob.driver = om.DOEDriver(generator=om.ListGenerator(cases))
with self.assertRaises(RuntimeError) as err:
prob.run_driver()
self.assertEqual(str(err.exception), "Invalid DOE case found, "
"expecting a list of name/value pairs:\n"
"[['p1.x', 1.0], ['p2.y', 1.0, 'foo']]")
# data contains a list of list, but one case has an invalid design var
cases = [
[['p1.x', 0.], ['p2.y', 0.]],
[['p1.x', 1.], ['p2.z', 1.]]
]
prob.driver = om.DOEDriver(generator=om.ListGenerator(cases))
with self.assertRaises(RuntimeError) as err:
prob.run_driver()
self.assertEqual(str(err.exception), "Invalid DOE case found, "
"'p2.z' is not a valid design variable:\n"
"[['p1.x', 1.0], ['p2.z', 1.0]]")
# data contains a list of list, but one case has multiple invalid design vars
cases = [
[['p1.x', 0.], ['p2.y', 0.]],
[['p1.y', 1.], ['p2.z', 1.]]
]
prob.driver = om.DOEDriver(generator=om.ListGenerator(cases))
with self.assertRaises(RuntimeError) as err:
prob.run_driver()
self.assertEqual(str(err.exception), "Invalid DOE case found, "
"['p1.y', 'p2.z'] are not valid design variables:\n"
"[['p1.y', 1.0], ['p2.z', 1.0]]")
def test_csv(self):
prob = om.Problem()
model = prob.model
model.add_subsystem('comp', Paraboloid(), promotes=['x', 'y', 'f_xy'])
model.set_input_defaults('x', 0.0)
model.set_input_defaults('y', 0.0)
model.add_design_var('x', lower=0.0, upper=1.0)
model.add_design_var('y', lower=0.0, upper=1.0)
model.add_objective('f_xy')
prob.setup()
# create a list of DOE cases
case_gen = om.FullFactorialGenerator(levels=3)
cases = list(case_gen(model.get_design_vars(recurse=True)))
# generate CSV file with cases
header = [var for (var, val) in cases[0]]
with open('cases.csv', 'w') as f:
writer = csv.writer(f)
writer.writerow(header)
for case in cases:
writer.writerow([val for _, val in case])
# create DOEDriver using generated CSV file
prob.driver = om.DOEDriver(om.CSVGenerator('cases.csv'))
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.run_driver()
prob.cleanup()
expected = self.expected_fullfact3
cr = om.CaseReader("cases.sql")
cases = cr.list_cases('driver', out_stream=None)
self.assertEqual(len(cases), 9)
for case, expected_case in zip(cases, expected):
outputs = cr.get_case(case).outputs
for name in ('x', 'y', 'f_xy'):
self.assertEqual(outputs[name], expected_case[name])
def test_csv_array(self):
prob = om.Problem()
model = prob.model
model.add_subsystem('p1', om.IndepVarComp('x', [0., 1.]))
model.add_subsystem('p2', om.IndepVarComp('y', [0., 1.]))
model.add_subsystem('comp1', Paraboloid())
model.add_subsystem('comp2', Paraboloid())
model.connect('p1.x', 'comp1.x', src_indices=[0])
model.connect('p2.y', 'comp1.y', src_indices=[0])
model.connect('p1.x', 'comp2.x', src_indices=[1])
model.connect('p2.y', 'comp2.y', src_indices=[1])
model.add_design_var('p1.x', lower=0.0, upper=1.0)
model.add_design_var('p2.y', lower=0.0, upper=1.0)
prob.setup()
# create a list of DOE cases
case_gen = om.FullFactorialGenerator(levels=2)
cases = list(case_gen(model.get_design_vars(recurse=True)))
# generate CSV file with cases
header = [var for var, _ in cases[0]]
with open('cases.csv', 'w') as f:
writer = csv.writer(f)
writer.writerow(header)
for case in cases:
writer.writerow([val for _, val in case])
# create DOEDriver using generated CSV file
prob.driver = om.DOEDriver(om.CSVGenerator('cases.csv'))
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.run_driver()
prob.cleanup()
expected = [
{'p1.x': np.array([0., 0.]), 'p2.y': np.array([0., 0.])},
{'p1.x': np.array([1., 0.]), 'p2.y': np.array([0., 0.])},
{'p1.x': np.array([0., 1.]), 'p2.y': np.array([0., 0.])},
{'p1.x': np.array([1., 1.]), 'p2.y': np.array([0., 0.])},
{'p1.x': np.array([0., 0.]), 'p2.y': np.array([1., 0.])},
{'p1.x': np.array([1., 0.]), 'p2.y': np.array([1., 0.])},
{'p1.x': np.array([0., 1.]), 'p2.y': np.array([1., 0.])},
{'p1.x': np.array([1., 1.]), 'p2.y': np.array([1., 0.])},
{'p1.x': np.array([0., 0.]), 'p2.y': np.array([0., 1.])},
{'p1.x': np.array([1., 0.]), 'p2.y': np.array([0., 1.])},
{'p1.x': np.array([0., 1.]), 'p2.y': np.array([0., 1.])},
{'p1.x': np.array([1., 1.]), 'p2.y': np.array([0., 1.])},
{'p1.x': np.array([0., 0.]), 'p2.y': np.array([1., 1.])},
{'p1.x': np.array([1., 0.]), 'p2.y': np.array([1., 1.])},
{'p1.x': np.array([0., 1.]), 'p2.y': np.array([1., 1.])},
{'p1.x': np.array([1., 1.]), 'p2.y': np.array([1., 1.])},
]
cr = om.CaseReader("cases.sql")
cases = cr.list_cases('driver', out_stream=None)
self.assertEqual(len(cases), 16)
for case, expected_case in zip(cases, expected):
outputs = cr.get_case(case).outputs
self.assertEqual(outputs['p1.x'][0], expected_case['p1.x'][0])
self.assertEqual(outputs['p2.y'][0], expected_case['p2.y'][0])
self.assertEqual(outputs['p1.x'][1], expected_case['p1.x'][1])
self.assertEqual(outputs['p2.y'][1], expected_case['p2.y'][1])
def test_csv_errors(self):
# test invalid file name
with self.assertRaises(RuntimeError) as err:
om.CSVGenerator(1.23)
self.assertEqual(str(err.exception),
"'1.23' is not a valid file name.")
# test file not found
with self.assertRaises(RuntimeError) as err:
om.CSVGenerator('nocases.csv')
self.assertEqual(str(err.exception),
"File not found: nocases.csv")
# create problem and a list of DOE cases
prob = om.Problem()
model = prob.model
model.add_subsystem('p1', om.IndepVarComp('x', 0.0), promotes=['x'])
model.add_subsystem('p2', om.IndepVarComp('y', 0.0), promotes=['y'])
model.add_subsystem('comp', Paraboloid(), promotes=['x', 'y', 'f_xy'])
model.add_design_var('x', lower=0.0, upper=1.0)
model.add_design_var('y', lower=0.0, upper=1.0)
model.add_objective('f_xy')
prob.setup()
case_gen = om.FullFactorialGenerator(levels=2)
cases = list(case_gen(model.get_design_vars(recurse=True)))
# test CSV file with an invalid design var
header = [var for var, _ in cases[0]]
header[-1] = 'foobar'
with open('cases.csv', 'w') as f:
writer = csv.writer(f)
writer.writerow(header)
for case in cases:
writer.writerow([val for _, val in case])
prob.driver = om.DOEDriver(om.CSVGenerator('cases.csv'))
with self.assertRaises(RuntimeError) as err:
prob.run_driver()
self.assertEqual(str(err.exception), "Invalid DOE case file, "
"'foobar' is not a valid design variable.")
# test CSV file with invalid design vars
header = [var + '_bad' for var, _ in cases[0]]
with open('cases.csv', 'w') as f:
writer = csv.writer(f)
writer.writerow(header)
for case in cases:
writer.writerow([val for _, val in case])
with self.assertRaises(RuntimeError) as err:
prob.run_driver()
self.assertEqual(str(err.exception), "Invalid DOE case file, "
"%s are not valid design variables." %
str(header))
# test CSV file with invalid values
header = [var for var, _ in cases[0]]
with open('cases.csv', 'w') as f:
writer = csv.writer(f)
writer.writerow(header)
for case in cases:
writer.writerow([np.ones((2, 2)) * val for _, val in case])
from distutils.version import LooseVersion
if LooseVersion(np.__version__) >= LooseVersion("1.14"):
opts = {'legacy': '1.13'}
else:
opts = {}
with printoptions(**opts):
# have to use regex to handle differences in numpy print formats for shape
msg = f"Error assigning p1.x = \[ 0. 0. 0. 0.\]: could not broadcast " \
f"input array from shape \(4.*\) into shape \(1.*\)"
with self.assertRaisesRegex(ValueError, msg):
prob.run_driver()
def test_uniform(self):
prob = om.Problem()
model = prob.model
model.add_subsystem('comp', Paraboloid(), promotes=['*'])
model.set_input_defaults('x', 0.0)
model.set_input_defaults('y', 0.0)
model.add_design_var('x', lower=-10, upper=10)
model.add_design_var('y', lower=-10, upper=10)
model.add_objective('f_xy')
prob.driver = om.DOEDriver(om.UniformGenerator(num_samples=5, seed=0))
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.setup()
prob.run_driver()
prob.cleanup()
# all values should be between -10 and 10, check expected values for seed = 0
expected = [
{'x': np.array([0.97627008]), 'y': np.array([4.30378733])},
{'x': np.array([2.05526752]), 'y': np.array([0.89766366])},
{'x': np.array([-1.52690401]), 'y': np.array([2.91788226])},
{'x': np.array([-1.24825577]), 'y': np.array([7.83546002])},
{'x': np.array([9.27325521]), 'y': np.array([-2.33116962])},
]
cr = om.CaseReader("cases.sql")
cases = cr.list_cases('driver', out_stream=None)
self.assertEqual(len(cases), 5)
for case, expected_case in zip(cases, expected):
outputs = cr.get_case(case).outputs
for name in ('x', 'y'):
assert_near_equal(outputs[name], expected_case[name], 1e-4)
def test_full_factorial(self):
prob = om.Problem()
model = prob.model
model.add_subsystem('comp', Paraboloid(), promotes=['x', 'y', 'f_xy'])
model.set_input_defaults('x', 0.0)
model.set_input_defaults('y', 0.0)
model.add_design_var('x', lower=0.0, upper=1.0)
model.add_design_var('y', lower=0.0, upper=1.0)
model.add_objective('f_xy')
prob.driver = om.DOEDriver(generator=om.FullFactorialGenerator(levels=3))
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.setup()
prob.run_driver()
prob.cleanup()
expected = self.expected_fullfact3
cr = om.CaseReader("cases.sql")
cases = cr.list_cases('driver', out_stream=None)
self.assertEqual(len(cases), 9)
for case, expected_case in zip(cases, expected):
outputs = cr.get_case(case).outputs
for name in ('x', 'y', 'f_xy'):
self.assertEqual(outputs[name], expected_case[name])
def test_full_factorial_factoring(self):
class Digits2Num(om.ExplicitComponent):
"""
Makes from two vectors with 2 elements a 4 digit number.
For singe digit integers always gives a unique output number.
"""
def setup(self):
self.add_input('x', val=np.array([0., 0.]))
self.add_input('y', val=np.array([0., 0.]))
self.add_output('f', val=0.0)
def compute(self, inputs, outputs):
x = inputs['x']
y = inputs['y']
outputs['f'] = x[0] * 1000 + x[1] * 100 + y[0] * 10 + y[1]
prob = om.Problem()
model = prob.model
model.set_input_defaults('x', np.array([0.0, 0.0]))
model.set_input_defaults('y', np.array([0.0, 0.0]))
model.add_subsystem('comp', Digits2Num(), promotes=['*'])
model.add_design_var('x', lower=0.0, upper=np.array([1.0, 2.0]))
model.add_design_var('y', lower=0.0, upper=np.array([3.0, 4.0]))
model.add_objective('f')
prob.driver = om.DOEDriver(generator=om.FullFactorialGenerator(levels=2))
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.setup()
prob.run_driver()
prob.cleanup()
cr = om.CaseReader("cases.sql")
cases = cr.list_cases('driver', out_stream=None)
objs = [int(cr.get_case(case).outputs['f']) for case in cases]
self.assertEqual(len(objs), 16)
# Testing uniqueness. If all elements are unique, it should be the same length as the
# number of cases
self.assertEqual(len(set(objs)), 16)
def test_full_factorial_array(self):
prob = om.Problem()
model = prob.model
model.set_input_defaults('xy', np.array([0., 0.]))
model.add_subsystem('comp', ParaboloidArray(), promotes=['*'])
model.add_design_var('xy', lower=np.array([-10., -50.]), upper=np.array([10., 50.]))
model.add_objective('f_xy')
prob.driver = om.DOEDriver(om.FullFactorialGenerator(levels=3))
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.setup()
prob.run_driver()
prob.cleanup()
expected = [
{'xy': np.array([-10., -50.])},
{'xy': np.array([0., -50.])},
{'xy': np.array([10., -50.])},
{'xy': np.array([-10., 0.])},
{'xy': np.array([0., 0.])},
{'xy': np.array([10., 0.])},
{'xy': np.array([-10., 50.])},
{'xy': np.array([0., 50.])},
{'xy': np.array([10., 50.])},
]
cr = om.CaseReader("cases.sql")
cases = cr.list_cases('driver', out_stream=None)
self.assertEqual(len(cases), 9)
for case, expected_case in zip(cases, expected):
outputs = cr.get_case(case).outputs
self.assertEqual(outputs['xy'][0], expected_case['xy'][0])
self.assertEqual(outputs['xy'][1], expected_case['xy'][1])
def test_full_fact_dict_levels(self):
# Specifying levels only for one DV, the other is defaulted
prob = om.Problem()
model = prob.model
expected = [
{'x': np.array([0.]), 'y': np.array([0.]), 'f_xy': np.array([22.00])},
{'x': np.array([1.]), 'y': np.array([0.]), 'f_xy': np.array([17.00])},
{'x': np.array([0.]), 'y': np.array([.5]), 'f_xy': np.array([26.25])},
{'x': np.array([1.]), 'y': np.array([.5]), 'f_xy': np.array([21.75])},
{'x': np.array([0.]), 'y': np.array([1.]), 'f_xy': np.array([31.00])},
{'x': np.array([1.]), 'y': np.array([1.]), 'f_xy': np.array([27.00])},
]
# size = prob.comm.size
# rank = prob.comm.rank
model.add_subsystem('comp', Paraboloid(), promotes=['x', 'y', 'f_xy'])
model.set_input_defaults('x', 0.0)
model.set_input_defaults('y', 0.0)
model.add_design_var('x', lower=0.0, upper=1.0)
model.add_design_var('y', lower=0.0, upper=1.0)
model.add_objective('f_xy')
prob.driver = om.DOEDriver(generator=om.FullFactorialGenerator(levels={"y": 3}))
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.setup()
prob.run_driver()
prob.cleanup()
cr = om.CaseReader("cases.sql")
cases = cr.list_cases('driver', out_stream=None)
self.assertEqual(len(cases), 6)
for case, expected_case in zip(cases, expected):
outputs = cr.get_case(case).outputs
self.assertEqual(outputs['x'], expected_case['x'])
self.assertEqual(outputs['y'], expected_case['y'])
self.assertEqual(outputs['f_xy'], expected_case['f_xy'])
def test_generalized_subset(self):
# All DVs have the same number of levels
prob = om.Problem()
model = prob.model
model.set_input_defaults('x', 0.0)
model.set_input_defaults('y', 0.0)
model.add_subsystem('comp', Paraboloid(), promotes=['x', 'y', 'f_xy'])
model.add_design_var('x', lower=0.0, upper=1.0)
model.add_design_var('y', lower=0.0, upper=1.0)
model.add_objective('f_xy')
prob.driver = om.DOEDriver(generator=om.GeneralizedSubsetGenerator(levels=2, reduction=2))
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.setup()
prob.run_driver()
prob.cleanup()
expected = [
{'x': np.array([0.0]), 'y': np.array([0.0]), 'f_xy': np.array([22.0])},
{'x': np.array([1.0]), 'y': np.array([1.0]), 'f_xy': np.array([27.0])},
]
cr = om.CaseReader("cases.sql")
cases = cr.list_cases('driver')
self.assertEqual(len(cases), 2)
for case, expected_case in zip(cases, expected):
outputs = cr.get_case(case).outputs
for name in ('x', 'y', 'f_xy'):
self.assertEqual(outputs[name], expected_case[name])
def test_generalized_subset_dict_levels(self):
# Number of variables specified individually for all DVs (scalars).
prob = om.Problem()
model = prob.model
model.set_input_defaults('x', 0.0)
model.set_input_defaults('y', 0.0)
model.add_subsystem('comp', Paraboloid(), promotes=['x', 'y', 'f_xy'])
model.add_design_var('x', lower=0.0, upper=1.0)
model.add_design_var('y', lower=0.0, upper=1.0)
model.add_objective('f_xy')
prob.driver = om.DOEDriver(generator=om.GeneralizedSubsetGenerator(levels={'x': 3, 'y': 6}, reduction=2))
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.setup()
prob.run_driver()
prob.cleanup()
expected = [
{'x': np.array([0.]), 'y': np.array([0.]), 'f_xy': np.array([22.])},
{'x': np.array([0.]), 'y': np.array([0.4]), 'f_xy': np.array([25.36])},
{'x': np.array([0.]), 'y': np.array([0.8]), 'f_xy': np.array([29.04])},
{'x': np.array([1.]), 'y': np.array([0.]), 'f_xy': np.array([17.])},
{'x': np.array([1.]), 'y': np.array([0.4]), 'f_xy': np.array([20.76])},
{'x': np.array([1.]), 'y': np.array([0.8]), 'f_xy': np.array([24.84])},
{'x': np.array([0.5]), 'y': np.array([0.2]), 'f_xy': np.array([20.99])},
{'x': np.array([0.5]), 'y': np.array([0.6]), 'f_xy': np.array([24.71])},
{'x': np.array([0.5]), 'y': np.array([1.]), 'f_xy': np.array([28.75])},
]
cr = om.CaseReader("cases.sql")
cases = cr.list_cases('driver')
self.assertEqual(len(cases), 9)
for case, expected_case in zip(cases, expected):
outputs = cr.get_case(case).outputs
for name in ('x', 'y', 'f_xy'):
self.assertAlmostEqual(outputs[name][0], expected_case[name][0])
def test_generalized_subset_array(self):
# Number of levels specified individually for all DVs (arrays).
class Digits2Num(om.ExplicitComponent):
"""
Makes from two vectors with 2 elements a 4 digit number.
For singe digit integers always gives a unique output number.
"""
def setup(self):
self.add_input('x', val=np.array([0., 0.]))
self.add_input('y', val=np.array([0., 0.]))
self.add_output('f', val=0.0)
def compute(self, inputs, outputs):
x = inputs['x']
y = inputs['y']
outputs['f'] = x[0] * 1000 + x[1] * 100 + y[0] * 10 + y[1]
prob = om.Problem()
model = prob.model
model.set_input_defaults('x', np.array([0.0, 0.0]))
model.set_input_defaults('y', np.array([0.0, 0.0]))
model.add_subsystem('comp', Digits2Num(), promotes=['*'])
model.add_design_var('x', lower=0.0, upper=np.array([1.0, 2.0]))
model.add_design_var('y', lower=0.0, upper=np.array([3.0, 4.0]))
model.add_objective('f')
prob.driver = om.DOEDriver(generator=om.GeneralizedSubsetGenerator(levels={'x': 5, 'y': 8}, reduction=14))
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.setup()
prob.run_driver()
prob.cleanup()
cr = om.CaseReader("cases.sql")
cases = cr.list_cases('driver', out_stream=None)
objs = [int(cr.get_case(case).outputs['f']) for case in cases]
self.assertEqual(len(objs), 104) # The number can be verified with standalone pyDOE2
# Testing uniqueness. If all elements are unique, it should be the same length as the number of cases
self.assertEqual(len(set(objs)), 104)
def test_plackett_burman(self):
prob = om.Problem()
model = prob.model
model.add_subsystem('p1', om.IndepVarComp('x', 0.0), promotes=['x'])
model.add_subsystem('p2', om.IndepVarComp('y', 0.0), promotes=['y'])
model.add_subsystem('comp', Paraboloid(), promotes=['x', 'y', 'f_xy'])
model.add_design_var('x', lower=0.0, upper=1.0)
model.add_design_var('y', lower=0.0, upper=1.0)
model.add_objective('f_xy')
prob.driver = om.DOEDriver(om.PlackettBurmanGenerator())
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.setup()
prob.run_driver()
prob.cleanup()
expected = [
{'x': np.array([0.]), 'y': np.array([0.]), 'f_xy': np.array([22.00])},
{'x': np.array([1.]), 'y': np.array([0.]), 'f_xy': np.array([17.00])},
{'x': np.array([0.]), 'y': np.array([1.]), 'f_xy': np.array([31.00])},
{'x': np.array([1.]), 'y': np.array([1.]), 'f_xy': np.array([27.00])},
]
cr = om.CaseReader("cases.sql")
cases = cr.list_cases('driver', out_stream=None)
self.assertEqual(len(cases), 4)
for case, expected_case in zip(cases, expected):
outputs = cr.get_case(case).outputs
for name in ('x', 'y', 'f_xy'):
self.assertEqual(outputs[name], expected_case[name])
def test_box_behnken(self):
upper = 10.
center = 1
prob = om.Problem()
model = prob.model
indep = model.add_subsystem('indep', om.IndepVarComp(), promotes=['*'])
indep.add_output('x', 0.0)
indep.add_output('y', 0.0)
indep.add_output('z', 0.0)
model.add_subsystem('comp', om.ExecComp('a = x**2 + y - z'), promotes=['*'])
model.add_design_var('x', lower=0., upper=upper)
model.add_design_var('y', lower=0., upper=upper)
model.add_design_var('z', lower=0., upper=upper)
model.add_objective('a')
prob.driver = om.DOEDriver(om.BoxBehnkenGenerator(center=center))
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.setup()
prob.run_driver()
prob.cleanup()
cr = om.CaseReader("cases.sql")
cases = cr.list_cases('driver', out_stream=None)
# The Box-Behnken design for 3 factors involves three blocks, in each of
# which 2 factors are varied thru the 4 possible combinations of high & low.
# It also includes centre points (all factors at their central values).
# ref: https://en.wikipedia.org/wiki/Box-Behnken_design
self.assertEqual(len(cases), (3*4)+center)
expected = [
{'x': np.array([0.]), 'y': np.array([0.]), 'z': np.array([5.])},
{'x': np.array([10.]), 'y': np.array([0.]), 'z': np.array([5.])},
{'x': np.array([0.]), 'y': np.array([10.]), 'z': np.array([5.])},
{'x': np.array([10.]), 'y': np.array([10.]), 'z': np.array([5.])},
{'x': np.array([0.]), 'y': np.array([5.]), 'z': np.array([0.])},
{'x': np.array([10.]), 'y': np.array([5.]), 'z': np.array([0.])},
{'x': np.array([0.]), 'y': np.array([5.]), 'z': np.array([10.])},
{'x': np.array([10.]), 'y': np.array([5.]), 'z': np.array([10.])},
{'x': np.array([5.]), 'y': np.array([0.]), 'z': np.array([0.])},
{'x': np.array([5.]), 'y': np.array([10.]), 'z': np.array([0.])},
{'x': np.array([5.]), 'y': np.array([0.]), 'z': np.array([10.])},
{'x': np.array([5.]), 'y': np.array([10.]), 'z': np.array([10.])},
{'x': np.array([5.]), 'y': np.array([5.]), 'z': np.array([5.])},
]
for case, expected_case in zip(cases, expected):
outputs = cr.get_case(case).outputs
for name in ('x', 'y', 'z'):
self.assertEqual(outputs[name], expected_case[name])
def test_latin_hypercube(self):
samples = 4
bounds = np.array([
[-1, -10], # lower bounds for x and y
[1, 10] # upper bounds for x and y
])
xlb, xub = bounds[0][0], bounds[1][0]
ylb, yub = bounds[0][1], bounds[1][1]
prob = om.Problem()
model = prob.model
model.add_subsystem('p1', om.IndepVarComp('x', 0.0), promotes=['x'])
model.add_subsystem('p2', om.IndepVarComp('y', 0.0), promotes=['y'])
model.add_subsystem('comp', Paraboloid(), promotes=['x', 'y', 'f_xy'])
model.add_design_var('x', lower=xlb, upper=xub)
model.add_design_var('y', lower=ylb, upper=yub)
model.add_objective('f_xy')
prob.driver = om.DOEDriver()
prob.driver.options['generator'] = om.LatinHypercubeGenerator(samples=4, seed=0)
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.setup()
prob.run_driver()
prob.cleanup()
# the sample space for each variable should be divided into equal
# size buckets and each variable should have a value in each bucket
all_buckets = set(range(samples))
x_offset = - xlb
x_bucket_size = xub - xlb
x_buckets_filled = set()
y_offset = - ylb
y_bucket_size = yub - ylb
y_buckets_filled = set()
# expected values for seed = 0
expected = [
{'x': np.array([-0.19861831]), 'y': np.array([-6.42405317])},
{'x': np.array([0.2118274]), 'y': np.array([9.458865])},
{'x': np.array([0.71879361]), 'y': np.array([3.22947057])},
{'x': np.array([-0.72559325]), 'y': np.array([-2.27558409])},
]
cr = om.CaseReader("cases.sql")
cases = cr.list_cases('driver', out_stream=None)
self.assertEqual(len(cases), 4)
for case, expected_case in zip(cases, expected):
outputs = cr.get_case(case).outputs
x = outputs['x']
y = outputs['y']
bucket = int((x + x_offset) / (x_bucket_size / samples))
x_buckets_filled.add(bucket)
bucket = int((y + y_offset) / (y_bucket_size / samples))
y_buckets_filled.add(bucket)
assert_near_equal(x, expected_case['x'], 1e-4)
assert_near_equal(y, expected_case['y'], 1e-4)
self.assertEqual(x_buckets_filled, all_buckets)
self.assertEqual(y_buckets_filled, all_buckets)
def test_latin_hypercube_array(self):
samples = 4
bounds = np.array([
[-10, -50], # lower bounds for x and y
[10, 50] # upper bounds for x and y
])
prob = om.Problem()
model = prob.model
model.add_subsystem('p1', om.IndepVarComp('xy', np.array([50., 50.])), promotes=['*'])
model.add_subsystem('comp', ParaboloidArray(), promotes=['*'])
model.add_design_var('xy', lower=bounds[0], upper=bounds[1])
model.add_objective('f_xy')
prob.driver = om.DOEDriver(om.LatinHypercubeGenerator(samples=4, seed=0))
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.setup()
prob.run_driver()
prob.cleanup()
# the sample space for each variable should be divided into equal
# size buckets and each variable should have a value in each bucket
all_buckets = set(range(samples))
xlb, xub = bounds[0][0], bounds[1][0]
x_offset = - xlb
x_bucket_size = xub - xlb
x_buckets_filled = set()
ylb, yub = bounds[0][1], bounds[1][1]
y_offset = - ylb
y_bucket_size = yub - ylb
y_buckets_filled = set()
# expected values for seed = 0
expected = [
{'xy': np.array([-1.98618312, -32.12026584])},
{'xy': np.array([2.118274, 47.29432502])},
{'xy': np.array([7.18793606, 16.14735283])},
{'xy': np.array([-7.25593248, -11.37792043])},
]
cr = om.CaseReader("cases.sql")
cases = cr.list_cases('driver', out_stream=None)
self.assertEqual(len(cases), 4)
for case, expected_case in zip(cases, expected):
outputs = cr.get_case(case).outputs
x = outputs['xy'][0]
y = outputs['xy'][1]
bucket = int((x + x_offset) / (x_bucket_size / samples))
x_buckets_filled.add(bucket)
bucket = int((y + y_offset) / (y_bucket_size / samples))
y_buckets_filled.add(bucket)
assert_near_equal(x, expected_case['xy'][0], 1e-4)
assert_near_equal(y, expected_case['xy'][1], 1e-4)
self.assertEqual(x_buckets_filled, all_buckets)
self.assertEqual(y_buckets_filled, all_buckets)
def test_latin_hypercube_center(self):
samples = 4
upper = 10.
prob = om.Problem()
model = prob.model
indep = model.add_subsystem('indep', om.IndepVarComp())
indep.add_output('x', 0.0)
indep.add_output('y', 0.0)
model.add_subsystem('comp', Paraboloid())
model.connect('indep.x', 'comp.x')
model.connect('indep.y', 'comp.y')
model.add_design_var('indep.x', lower=0., upper=upper)
model.add_design_var('indep.y', lower=0., upper=upper)
model.add_objective('comp.f_xy')
prob.driver = om.DOEDriver(om.LatinHypercubeGenerator(samples=samples, criterion='c'))
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.setup()
prob.run_driver()
prob.cleanup()
cr = om.CaseReader("cases.sql")
cases = cr.list_cases('driver', out_stream=None)
self.assertEqual(len(cases), samples)
# the sample space for each variable (0 to upper) should be divided into
# equal size buckets and each variable should have a value in each bucket
bucket_size = upper / samples
all_buckets = set(range(samples))
x_buckets_filled = set()
y_buckets_filled = set()
# with criterion of 'center', each value should be in the center of it's bucket
valid_values = [round(bucket_size * (bucket + 1 / 2), 3) for bucket in all_buckets]
for case in cases:
outputs = cr.get_case(case).outputs
x = float(outputs['indep.x'])
y = float(outputs['indep.y'])
x_buckets_filled.add(int(x/bucket_size))
y_buckets_filled.add(int(y/bucket_size))
self.assertTrue(round(x, 3) in valid_values, '%f not in %s' % (x, valid_values))
self.assertTrue(round(y, 3) in valid_values, '%f not in %s' % (y, valid_values))
self.assertEqual(x_buckets_filled, all_buckets)
self.assertEqual(y_buckets_filled, all_buckets)
def test_record_bug(self):
# There was a bug that caused values to be recorded in driver_scaled form.
prob = om.Problem()
model = prob.model
ivc = model.add_subsystem('indeps', om.IndepVarComp(), promotes=['*'])
ivc.add_output('x', val=1.)
model.add_subsystem('obj_comp', om.ExecComp('y=2*x'), promotes=['*'])
model.add_subsystem('con_comp', om.ExecComp('z=3*x'), promotes=['*'])
prob.driver = om.DOEDriver(om.FullFactorialGenerator(levels=3))
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.driver.recording_options['includes'] = ['*']
model.add_design_var('x', lower=0., upper=10., ref=3.0)
model.add_constraint('z', lower=2.0, scaler=13.0)
model.add_objective('y', scaler=-1)
prob.setup(check=True)
prob.run_driver()
cr = om.CaseReader("cases.sql")
final_case = cr.list_cases('driver', out_stream=None)[-1]
outputs = cr.get_case(final_case).outputs
assert_near_equal(outputs['x'], 10.0, 1e-7)
assert_near_equal(outputs['y'], 20.0, 1e-7)
assert_near_equal(outputs['z'], 30.0, 1e-7)
def test_discrete_desvar_list(self):
prob = om.Problem()
model = prob.model
# Add independent variables
indeps = model.add_subsystem('indeps', om.IndepVarComp(), promotes=['*'])
indeps.add_discrete_output('x', 4)
indeps.add_discrete_output('y', 3)
# Add components
model.add_subsystem('parab', ParaboloidDiscrete(), promotes=['*'])
# Specify design variable range and objective
model.add_design_var('x')
model.add_design_var('y')
model.add_objective('f_xy')
samples = [[('x', 5), ('y', 1)],
[('x', 3), ('y', 6)],
[('x', -1), ('y', 3)],
]
# Setup driver for 3 cases at a time
prob.driver = om.DOEDriver(om.ListGenerator(samples))
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.setup()
prob.run_driver()
prob.cleanup()
cr = om.CaseReader("cases.sql")
cases = cr.list_cases('driver', out_stream=None)
expected = [{'x': 5, 'y': 1, 'f_xy': 31},
{'x': 3, 'y': 6, 'f_xy': 115},
{'x': -1, 'y': 3, 'f_xy': 59},
]
self.assertEqual(len(cases), len(expected))
for case, expected_case in zip(cases, expected):
outputs = cr.get_case(case).outputs
for name in ('x', 'y', 'f_xy'):
self.assertEqual(outputs[name], expected_case[name])
self.assertTrue(isinstance(outputs[name], int))
def test_discrete_desvar_alltypes(self):
# Make sure we can handle any allowed type for discrete variables.
class PassThrough(om.ExplicitComponent):
def setup(self):
self.add_discrete_input('x', val='abc')
self.add_discrete_output('y', val='xyz')
def compute(self, inputs, outputs, discrete_inputs, discrete_outputs):
discrete_outputs['y'] = discrete_inputs['x']
prob = om.Problem()
model = prob.model
indeps = model.add_subsystem('indeps', om.IndepVarComp(), promotes=['*'])
indeps.add_discrete_output('x', 'abc')
model.add_subsystem('parab', PassThrough(), promotes=['*'])
model.add_design_var('x')
model.add_constraint('y')
my_obj = Paraboloid()
samples = [[('x', 'abc'), ],
[('x', None), ],
[('x', my_obj, ), ]
]
prob.driver = om.DOEDriver(om.ListGenerator(samples))
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.setup()
prob.run_driver()
prob.cleanup()
cr = om.CaseReader("cases.sql")
cases = cr.list_cases('driver', out_stream=None)
expected = ['abc', None]
for case, expected_value in zip(cases, expected):
outputs = cr.get_case(case).outputs
self.assertEqual(outputs['x'], expected_value)
# Can't read/write objects through SQL case.
self.assertEqual(prob['y'], my_obj)
def test_discrete_array_output(self):
prob = om.Problem()
model = prob.model
# Add independent variables
indeps = model.add_subsystem('indeps', om.IndepVarComp(), promotes=['*'])
indeps.add_discrete_output('x', np.ones((2, ), dtype=np.int))
indeps.add_discrete_output('y', np.ones((2, ), dtype=np.int))
# Add components
model.add_subsystem('parab', ParaboloidDiscreteArray(), promotes=['*'])
# Specify design variable range and objective
model.add_design_var('x', np.array([5, 1]))
model.add_design_var('y', np.array([1, 4]))
model.add_objective('f_xy')
recorder = om.SqliteRecorder("cases.sql")
prob.driver.add_recorder(recorder)
prob.add_recorder(recorder)
prob.recording_options['record_inputs'] = True
prob.setup()
prob.run_driver()
prob.record("end")
prob.cleanup()
cr = om.CaseReader("cases.sql")
cases = cr.list_cases('problem', out_stream=None)
case = cr.get_case('end')
inputs = case.inputs
outputs = case.outputs
for name in ('x', 'y'):
self.assertTrue(isinstance(inputs[name], np.ndarray))
self.assertTrue(inputs[name].shape, (2,))
self.assertTrue(isinstance(outputs[name], np.ndarray))
self.assertTrue(outputs[name].shape, (2,))
def test_discrete_arraydesvar_list(self):
prob = om.Problem()
model = prob.model
# Add components
model.add_subsystem('parab', ParaboloidDiscreteArray(), promotes=['*'])
# Specify design variable range and objective
model.add_design_var('x')
model.add_design_var('y')
model.add_objective('f_xy')
samples = [[('x', np.array([5, 1])), ('y', np.array([1, 4]))],
[('x', np.array([3, 2])), ('y', np.array([6, -3]))],
[('x', np.array([-1, 0])), ('y', np.array([3, 5]))],
]
# Setup driver for 3 cases at a time
prob.driver = om.DOEDriver(om.ListGenerator(samples))
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.setup()
prob.set_val('x', np.ones((2, ), dtype=np.int))
prob.set_val('y', np.ones((2, ), dtype=np.int))
prob.run_driver()
prob.cleanup()
cr = om.CaseReader("cases.sql")
cases = cr.list_cases('driver', out_stream=None)
expected = [{'x': np.array([5, 1]), 'y': np.array([1, 4]), 'f_xy': np.array([31, 69])},
{'x': np.array([3, 2]), 'y': np.array([6, -3]), 'f_xy': np.array([115, -7])},
{'x': np.array([-1, 0]), 'y': np.array([3, 5]), 'f_xy': np.array([59, 87])},
]
self.assertEqual(len(cases), len(expected))
for case, expected_case in zip(cases, expected):
outputs = cr.get_case(case).outputs
for name in ('x', 'y', 'f_xy'):
self.assertEqual(outputs[name][0], expected_case[name][0])
self.assertEqual(outputs[name][1], expected_case[name][1])
def test_discrete_desvar_csv(self):
prob = om.Problem()
model = prob.model
# Add independent variables
indeps = model.add_subsystem('indeps', om.IndepVarComp(), promotes=['*'])
indeps.add_discrete_output('x', 4)
indeps.add_discrete_output('y', 3)
# Add components
model.add_subsystem('parab', ParaboloidDiscrete(), promotes=['*'])
# Specify design variable range and objective
model.add_design_var('x')
model.add_design_var('y')
model.add_objective('f_xy')
samples = '\n'.join([" x , y",
"5, 1",
"3, 6",
"-1, 3",
])
# this file contains design variable inputs in CSV format
with open('cases.csv', 'w') as f:
f.write(samples)
# Setup driver for 3 cases at a time
prob.driver = om.DOEDriver(om.CSVGenerator('cases.csv'))
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.setup()
prob.run_driver()
prob.cleanup()
cr = om.CaseReader("cases.sql")
cases = cr.list_cases('driver', out_stream=None)
expected = [{'x': 5, 'y': 1, 'f_xy': 31},
{'x': 3, 'y': 6, 'f_xy': 115},
{'x': -1, 'y': 3, 'f_xy': 59},
]
self.assertEqual(len(cases), len(expected))
for case, expected_case in zip(cases, expected):
outputs = cr.get_case(case).outputs
for name in ('x', 'y', 'f_xy'):
self.assertEqual(outputs[name], expected_case[name])
self.assertTrue(isinstance(outputs[name], int))
def test_desvar_indices(self):
prob = om.Problem()
prob.model.add_subsystem('comp', om.ExecComp('y=x**2',
x=np.array([1., 2., 3.]),
y=np.zeros(3)), promotes=['*'])
prob.model.add_design_var('x', lower=7.0, upper=11.0, indices=[0])
prob.model.add_objective('y', index=0)
prob.driver = om.DOEDriver(om.FullFactorialGenerator(levels=3))
prob.setup()
prob.run_driver()
# Last value in fullfactorial DOE is 11, which gives 121.
assert_near_equal(prob.get_val('y'), np.array([121., 4., 9.]))
def test_multidimensional_inputs(self):
# Create a subsystem with multidimensional array inputs
matmul_comp = om.ExecComp('z = matmul(x,y)',
x=np.ones((3, 3)),
y=np.ones((3, 3)),
z=np.ones((3, 3)))
# Single execution test
prob = om.Problem()
prob.model.add_subsystem('matmul', matmul_comp, promotes=['*'])
prob.setup()
prob['x'] = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
prob['y'] = np.array([[9, 8, 7], [6, 5, 4], [3, 2, 1]])
prob.run_model()
# DOE test
prob2 = om.Problem()
prob2.model.add_subsystem('matmul', matmul_comp, promotes=['*'])
prob2.model.add_design_var('x')
prob2.model.add_design_var('y')
prob2.model.add_objective('z')
prob2.setup()
case_list = [
[('x', prob['x']), ('y', prob['y'])]
]
prob2.driver = om.DOEDriver(case_list)
prob2.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob2.run_driver()
prob2.cleanup()
cr = om.CaseReader("cases.sql")
outputs = cr.get_case(0).outputs
for name in ('x', 'y', 'z'):
assert_near_equal(outputs[name], prob[name])
def test_multi_constraint_doe(self):
prob = om.Problem()
prob.model.add_subsystem('comp', om.ExecComp('y=x**2 + b',
x=np.array([1., 2., 3.]),
b=np.array([1., 2., 3.]),
y=np.zeros(3)), promotes=['*'])
prob.model.add_design_var('x', lower=7.0, upper=11.0, indices=[0])
prob.model.add_constraint('b', lower=7., indices=[0])
prob.model.add_constraint('b', upper=11., indices=[-1], alias='TEST')
prob.model.add_objective('y', index=0)
prob.driver = om.DOEDriver(om.FullFactorialGenerator(levels=3))
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.setup()
prob.run_driver()
cr = om.CaseReader("cases.sql")
cases = cr.list_cases('driver')
for case in cases:
outputs = cr.get_case(case).outputs
assert_near_equal(outputs['b'], np.array([1., 2, 3]))
@use_tempdirs
class TestDOEDriverListVars(unittest.TestCase):
def test_list_problem_vars(self):
# this passes if no exception is raised
prob = om.Problem()
model = prob.model
# Add independent variables
indeps = model.add_subsystem('indeps', om.IndepVarComp(), promotes=['*'])
indeps.add_discrete_output('x', 4)
indeps.add_discrete_output('y', 3)
# Add components
model.add_subsystem('parab', ParaboloidDiscrete(), promotes=['*'])
# Specify design variable range and objective
model.add_design_var('x')
model.add_design_var('y')
model.add_objective('f_xy')
samples = [[('x', 5), ('y', 1)],
[('x', 3), ('y', 6)],
[('x', -1), ('y', 3)],
]
# Setup driver for 3 cases at a time
prob.driver = om.DOEDriver(om.ListGenerator(samples))
prob.setup(derivatives=False)
prob.run_driver()
prob.cleanup()
prob.list_problem_vars()
@use_tempdirs
class TestDOEDriverListVars(unittest.TestCase):
def test_list_problem_vars(self):
# this passes if no exception is raised
prob = om.Problem()
model = prob.model
# Add independent variables
indeps = model.add_subsystem('indeps', om.IndepVarComp(), promotes=['*'])
indeps.add_discrete_output('x', 4)
indeps.add_discrete_output('y', 3)
# Add components
model.add_subsystem('parab', ParaboloidDiscrete(), promotes=['*'])
# Specify design variable range and objective
model.add_design_var('x')
model.add_design_var('y')
model.add_objective('f_xy')
samples = [[('x', 5), ('y', 1)],
[('x', 3), ('y', 6)],
[('x', -1), ('y', 3)],
]
# Setup driver for 3 cases at a time
prob.driver = om.DOEDriver(om.ListGenerator(samples))
prob.setup(derivatives=False)
prob.run_driver()
prob.cleanup()
prob.list_problem_vars()
@unittest.skipUnless(MPI and PETScVector, "MPI and PETSc are required.")
@use_tempdirs
class TestParallelDOE4Proc(unittest.TestCase):
N_PROCS = 4
def setUp(self):
self.expected_fullfact3 = [
{'x': np.array([0.]), 'y': np.array([0.]), 'f_xy': np.array([22.00])},
{'x': np.array([.5]), 'y': np.array([0.]), 'f_xy': np.array([19.25])},
{'x': np.array([1.]), 'y': np.array([0.]), 'f_xy': np.array([17.00])},
{'x': np.array([0.]), 'y': np.array([.5]), 'f_xy': np.array([26.25])},
{'x': np.array([.5]), 'y': np.array([.5]), 'f_xy': np.array([23.75])},
{'x': np.array([1.]), 'y': np.array([.5]), 'f_xy': np.array([21.75])},
{'x': np.array([0.]), 'y': np.array([1.]), 'f_xy': np.array([31.00])},
{'x': np.array([.5]), 'y': np.array([1.]), 'f_xy': np.array([28.75])},
{'x': np.array([1.]), 'y': np.array([1.]), 'f_xy': np.array([27.00])},
]
def test_indivisible_error(self):
prob = om.Problem()
prob.driver = om.DOEDriver(om.FullFactorialGenerator(levels=3))
prob.driver.options['run_parallel'] = True
prob.driver.options['procs_per_model'] = 3
with self.assertRaises(RuntimeError) as context:
prob.setup()
self.assertEqual(str(context.exception),
"The total number of processors is not evenly divisible by the "
"specified number of processors per model.\n Provide a number of "
"processors that is a multiple of 3, or specify a number "
"of processors per model that divides into 4.")
def test_minprocs_error(self):
prob = om.Problem(FanInGrouped())
# require 2 procs for the ParallelGroup
prob.model._proc_info['sub'] = (2, None, 1.0)
# run cases on all procs
prob.driver = om.DOEDriver(om.FullFactorialGenerator(levels=3))
prob.driver.options['run_parallel'] = True
prob.driver.options['procs_per_model'] = 1
with self.assertRaises(RuntimeError) as context:
prob.setup()
self.assertEqual(str(context.exception),
"<model> <class FanInGrouped>: MPI process allocation failed: can't meet "
"min_procs required for the following subsystems: ['sub']")
def test_full_factorial(self):
prob = om.Problem()
model = prob.model
model.add_subsystem('p1', om.IndepVarComp('x', 0.0), promotes=['x'])
model.add_subsystem('p2', om.IndepVarComp('y', 0.0), promotes=['y'])
model.add_subsystem('comp', Paraboloid(), promotes=['x', 'y', 'f_xy'])
model.add_design_var('x', lower=0.0, upper=1.0)
model.add_design_var('y', lower=0.0, upper=1.0)
model.add_objective('f_xy')
prob.driver = om.DOEDriver(om.FullFactorialGenerator(levels=3), procs_per_model=1,
run_parallel=True)
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.setup()
failed, output = run_driver(prob)
self.assertFalse(failed)
prob.cleanup()
expected = self.expected_fullfact3
size = prob.comm.size
rank = prob.comm.rank
# cases will be split across files for each proc
filename = "cases.sql_%d" % rank
expect_msg = "Cases from rank %d are being written to %s." % (rank, filename)
self.assertTrue(expect_msg in output)
cr = om.CaseReader(filename)
cases = cr.list_cases('driver', out_stream=None)
# cases recorded on this proc
num_cases = len(cases)
self.assertEqual(num_cases, len(expected) // size + (rank < len(expected) % size))
for n in range(num_cases):
outputs = cr.get_case(cases[n]).outputs
idx = n * size + rank # index of expected case
self.assertEqual(outputs['x'], expected[idx]['x'])
self.assertEqual(outputs['y'], expected[idx]['y'])
self.assertEqual(outputs['f_xy'], expected[idx]['f_xy'])
# total number of cases recorded across all procs
num_cases = prob.comm.allgather(num_cases)
self.assertEqual(sum(num_cases), len(expected))
def test_fan_in_grouped_parallel_2x2(self):
# run cases in parallel with 2 procs per model
# (cases will be split between the 2 parallel model instances)
run_parallel = True
procs_per_model = 2
prob = om.Problem(FanInGrouped())
model = prob.model
model.add_design_var('x1', lower=0.0, upper=1.0)
model.add_design_var('x2', lower=0.0, upper=1.0)
model.add_objective('c3.y')
prob.driver = om.DOEDriver(om.FullFactorialGenerator(levels=3))
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.driver.options['run_parallel'] = run_parallel
prob.driver.options['procs_per_model'] = procs_per_model
prob.setup()
failed, output = run_driver(prob)
from openmdao.utils.mpi import multi_proc_exception_check
with multi_proc_exception_check(prob.comm):
self.assertFalse(failed)
prob.cleanup()
expected = [
{'x1': np.array([0.]), 'x2': np.array([0.]), 'c3.y': np.array([0.0])},
{'x1': np.array([.5]), 'x2': np.array([0.]), 'c3.y': np.array([-3.0])},
{'x1': np.array([1.]), 'x2': np.array([0.]), 'c3.y': np.array([-6.0])},
{'x1': np.array([0.]), 'x2': np.array([.5]), 'c3.y': np.array([17.5])},
{'x1': np.array([.5]), 'x2': np.array([.5]), 'c3.y': np.array([14.5])},
{'x1': np.array([1.]), 'x2': np.array([.5]), 'c3.y': np.array([11.5])},
{'x1': np.array([0.]), 'x2': np.array([1.]), 'c3.y': np.array([35.0])},
{'x1': np.array([.5]), 'x2': np.array([1.]), 'c3.y': np.array([32.0])},
{'x1': np.array([1.]), 'x2': np.array([1.]), 'c3.y': np.array([29.0])},
]
num_cases = 0
# we can run two models in parallel on our 4 procs
num_models = prob.comm.size // procs_per_model
# a separate case file will be written by rank 0 of each parallel model
# (the top two global ranks)
rank = prob.comm.rank
filename = "cases.sql_%d" % rank
if rank < num_models:
expect_msg = "Cases from rank %d are being written to %s." % (rank, filename)
self.assertTrue(expect_msg in output)
cr = om.CaseReader(filename)
cases = cr.list_cases('driver')
# cases recorded on this proc
num_cases = len(cases)
self.assertEqual(num_cases, len(expected) // num_models+(rank < len(expected) % num_models))
for n, case in enumerate(cases):
idx = n * num_models + rank # index of expected case
outputs = cr.get_case(case).outputs
for name in ('x1', 'x2', 'c3.y'):
self.assertEqual(outputs[name], expected[idx][name])
else:
self.assertFalse("Cases from rank %d are being written" % rank in output)
self.assertFalse(os.path.exists(filename))
# total number of cases recorded across all requested procs
num_cases = prob.comm.allgather(num_cases)
self.assertEqual(sum(num_cases), len(expected))
def test_fan_in_grouped_parallel_4x1(self):
# run cases in parallel with 1 proc per model
# (cases will be split between the 4 serial model instances)
run_parallel = True
procs_per_model = 1
prob = om.Problem(FanInGrouped())
model = prob.model
model.add_design_var('x1', lower=0.0, upper=1.0)
model.add_design_var('x2', lower=0.0, upper=1.0)
model.add_objective('c3.y')
prob.driver = om.DOEDriver(om.FullFactorialGenerator(levels=3))
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.driver.options['run_parallel'] = run_parallel
prob.driver.options['procs_per_model'] = procs_per_model
prob.setup()
failed, output = run_driver(prob)
self.assertFalse(failed)
prob.cleanup()
expected = [
{'x1': np.array([0.]), 'x2': np.array([0.]), 'c3.y': np.array([0.0])},
{'x1': np.array([.5]), 'x2': np.array([0.]), 'c3.y': np.array([-3.0])},
{'x1': np.array([1.]), 'x2': np.array([0.]), 'c3.y': np.array([-6.0])},
{'x1': np.array([0.]), 'x2': np.array([.5]), 'c3.y': np.array([17.5])},
{'x1': np.array([.5]), 'x2': np.array([.5]), 'c3.y': np.array([14.5])},
{'x1': np.array([1.]), 'x2': np.array([.5]), 'c3.y': np.array([11.5])},
{'x1': np.array([0.]), 'x2': np.array([1.]), 'c3.y': np.array([35.0])},
{'x1': np.array([.5]), 'x2': np.array([1.]), 'c3.y': np.array([32.0])},
{'x1': np.array([1.]), 'x2': np.array([1.]), 'c3.y': np.array([29.0])},
]
rank = prob.comm.rank
# there will be a separate case file for each proc, containing the cases
# run by the instance of the model that runs in serial mode on that proc
filename = "cases.sql_%d" % rank
expect_msg = "Cases from rank %d are being written to %s." % (rank, filename)
self.assertTrue(expect_msg in output)
# we are running 4 models in parallel, each using 1 proc
num_models = prob.comm.size // procs_per_model
cr = om.CaseReader(filename)
cases = cr.list_cases('driver', out_stream=None)
# cases recorded on this proc
num_cases = len(cases)
self.assertEqual(num_cases, len(expected) // num_models + (rank < len(expected) % num_models))
for n, case in enumerate(cases):
idx = n * num_models + rank # index of expected case
outputs = cr.get_case(case).outputs
self.assertEqual(outputs['x1'], expected[idx]['x1'])
self.assertEqual(outputs['x2'], expected[idx]['x2'])
self.assertEqual(outputs['c3.y'], expected[idx]['c3.y'])
# total number of cases recorded across all requested procs
num_cases = prob.comm.allgather(num_cases)
self.assertEqual(sum(num_cases), len(expected))
def test_fan_in_grouped_serial_2x2(self):
# do not run cases in parallel, but with 2 procs per model
# (all cases will run on each of the 2 model instances)
run_parallel = False
procs_per_model = 2
prob = om.Problem(FanInGrouped())
model = prob.model
model.add_design_var('x1', lower=0.0, upper=1.0)
model.add_design_var('x2', lower=0.0, upper=1.0)
model.add_objective('c3.y')
prob.driver = om.DOEDriver(om.FullFactorialGenerator(levels=3))
prob.driver.add_recorder(om.SqliteRecorder("cases.sql"))
prob.driver.options['run_parallel'] = run_parallel
prob.driver.options['procs_per_model'] = procs_per_model
prob.setup()
failed, output = run_driver(prob)
self.assertFalse(failed)
prob.cleanup()
expected = [
{'x1': np.array([0.]), 'x2': np.array([0.]), 'c3.y': np.array([0.0])},
{'x1': np.array([.5]), 'x2': | np.array([0.]) | numpy.array |
import torch
import numpy as np
import torch.nn as nn
from pruner.filter_pruner import FilterPruner
from model.MobileNetV2 import InvertedResidual
class FilterPrunerMBNetV2(FilterPruner):
def parse_dependency(self):
pass
def forward(self, x):
if isinstance(self.model, nn.DataParallel):
model = self.model.module
else:
model = self.model
self.activations = []
self.gradients = []
self.weight_grad = []
self.grad_index = 0
self.linear = None
# activation index to the instance of conv layer
self.activation_to_conv = {}
# retrieve next conv using activation index of conv
self.next_conv = {}
# retrieve next immediate bn layer using activation index of conv
self.bn_for_conv = {}
# Chainning convolutions
# (use activation index to represent a conv)
self.chains = {}
activation_index = 0
prev_blk_last_conv = -1
for l1, m1 in enumerate(model.features.children()):
skipped = False
if isinstance(m1, InvertedResidual):
if m1.use_res_connect:
skipped = True
# m1 is nn.Sequential now
m1 = m1.conv
# use for residual
tmp_x = x
# In the beginning of InvertedResidual block, get prev_conv for chaining purpose
if activation_index-1 >= 0:
prev_blk_last_conv = activation_index-1
cnt = 0
for l2, m2 in enumerate(m1.children()):
cnt += 1
x = m2(x)
h = x.shape[2]
w = x.shape[3]
if isinstance(m2, nn.Conv2d):
self.conv_in_channels[activation_index] = m2.weight.size(1)
self.conv_out_channels[activation_index] = m2.weight.size(0)
self.omap_size[activation_index] = (h, w)
self.cost_map[activation_index] = h * w * m2.weight.size(2) * m2.weight.size(3)
self.in_params[activation_index] = m2.weight.size(1) * m2.weight.size(2) * m2.weight.size(3)
self.cur_flops += h * w * m2.weight.size(0) * m2.weight.size(1) * m2.weight.size(2) * m2.weight.size(3)
# If this is full group_conv it should be bounded with last conv
if m2.groups == m2.out_channels and m2.groups == m2.in_channels:
assert activation_index-1 not in self.chains, 'Previous conv has already chained to some other convs!'
self.chains[activation_index-1] = activation_index
if self.rank_type == 'l1_weight':
if activation_index not in self.filter_ranks:
self.filter_ranks[activation_index] = torch.zeros(m2.weight.size(0), device=self.device)
values = (torch.abs(m2.weight.data)).sum(1).sum(1).sum(1)
# Normalize the rank by the filter dimensions
#values = values / (m2.weight.size(1) * m2.weight.size(2) * m2.weight.size(3))
self.filter_ranks[activation_index] = values
elif self.rank_type == 'l2_weight':
if activation_index not in self.filter_ranks:
self.filter_ranks[activation_index] = torch.zeros(m2.weight.size(0), device=self.device)
values = (torch.pow(m2.weight.data, 2)).sum(1).sum(1).sum(1)
# Normalize the rank by the filter dimensions
# values = values / (m2.weight.size(1) * m2.weight.size(2) * m2.weight.size(3))
self.filter_ranks[activation_index] = values
elif self.rank_type == 'l2_bn' or self.rank_type == 'l1_bn':
pass
else:
x.register_hook(self.compute_rank)
self.activations.append(x)
self.rates[activation_index] = self.conv_in_channels[activation_index] * self.cost_map[activation_index]
self.activation_to_conv[activation_index] = m2
if activation_index > 0:
self.next_conv[activation_index-1] = [activation_index]
activation_index += 1
elif isinstance(m2, nn.BatchNorm2d):
# activation-1 since we increased the index right after conv
self.bn_for_conv[activation_index-1] = m2
if self.rank_type == 'l2_bn':
if activation_index-1 not in self.filter_ranks:
self.filter_ranks[activation_index-1] = torch.zeros(m2.weight.size(0), device=self.device)
values = torch.pow(m2.weight.data, 2)
self.filter_ranks[activation_index-1] = values
elif self.rank_type == 'l2_bn_param':
if activation_index-1 not in self.filter_ranks:
self.filter_ranks[activation_index-1] = torch.zeros(m2.weight.size(0), device=self.device)
values = torch.pow(m2.weight.data, 2)
self.filter_ranks[activation_index-1] = values* self.in_params[activation_index-1]
if cnt == 0:
x = m1(x)
# After we parse through the block, if this block is with residual
if skipped:
x = tmp_x + x
if prev_blk_last_conv >= 0:
assert prev_blk_last_conv not in self.chains, 'Previous conv has already chained to some other convs!'
# activation-1 is the current convolution since we just increased the pointer
self.chains[prev_blk_last_conv] = activation_index-1
for m in model.classifier.modules():
if isinstance(m, nn.Linear):
self.linear = m
self.base_flops = | np.prod(m.weight.shape) | numpy.prod |
#This package contains all the routines that allow the user to mask pixels and columns
#out of the list of spectral orders. The main functionality is wrapped into mask_orders()
#which is defined at the bottom. Mask_orders() is the highest-level thing that's called.
#It does masking of all spectral orders in two steps (depending on which functionality was
#requested by the user when calling it):
#-An automatic sigma clipping.
#-Manual selection of columns using a GUI.
#The masked areas from these two operations are saved separately in the data folder.
#and can be loaded/altered upon new calls/passes through run.py.
#Most of the routines below are related to making the GUI work.
__all__ = [
"manual_masking",
"apply_mask_from_file",
"mask_orders",
"load_columns_from_file",
"write_columns_to_file",
"interpolate_over_NaNs",
"mask_maker"
]
def interpolate_over_NaNs(list_of_orders,cutoff=0.2,quiet=False,parallel=False):
#This is a helper function I had to dump here that is mostly unrelated to the GUI,
#but with healing NaNs. If there are too many NaNs in a column, instead of
#interpolating, just set the entire column to NaN. If an entire column is set to NaN,
#it doesn't need to be healed because the cross-correlation never sees it, and the pixel
#never contributes. It becomes like the column is beyond the edge of the wavelength range of
#the data.
import numpy as np
import tayph.functions as fun
import tayph.util as ut
from tayph.vartests import typetest
import astropy.io.fits as fits
if parallel: from joblib import Parallel, delayed
"""
This function loops through a list of orders, over the individual
spectra in each order, and interpolates over the NaNs. It uses the manual provided at
https://stackoverflow.com/questions/6518811/interpolate-nan-values-in-a-numpy-array
which I duplicated in tayph.functions.
Parameters
----------
list_of_orders : list
The list of 2D orders for which NaNs need to be removed.
cutoff : float
If a column contains more NaNs than this value times its length, instead
of interpolating over those NaNs, the entire column is set to NaN.
Returns
-------
list_of_healed_orders : list
The corrected 2D orders.
"""
typetest(cutoff,float,'cutoff in masking.interpolate_over_NaNs()',)
if cutoff <= 0 or cutoff > 1:
raise RuntimeError('Error in interpolate_over_NaNs: cutoff should be between 0 or 1 '
'(not including 0).')
N = len(list_of_orders)
if N == 0:
raise RuntimeError('Error in interpolate_over_NaNs: List of orders is empty.')
def interpolate_over_NaNs_parallel(i):
order = list_of_orders[i]*1.0 #x1 to copy it, otherwise the input is altered backwardly.
shape = np.shape(order)
nexp = shape[0]
npx = shape[1]
N_pixels = nexp*npx
list_of_masked_columns=[]
N_nans_columns=0
N_nans_columns=0
N_nans_isolated=0
N_healed = 0
if np.sum(np.isnan(order)) > 0:
# N_nans_total+=np.sum(np.isnan(order))
#So this order contains NaNs.
#First we loop over all columns to try to find columns where the number
#of NaNs is greater than CUTOFF.
N_Nans = np.sum(np.isnan(order), axis=0)
list_of_masked_columns = np.where(N_Nans > (cutoff*nexp))[0]
N_nans_columns = len(list_of_masked_columns) * nexp
N_nans_isolated = np.sum(N_Nans[np.where(N_Nans <= (cutoff*nexp))[0]])
for k in range(nexp):
spectrum = order[k,:]
nans,x= fun.nan_helper(spectrum)
if np.sum(nans) > 0:
spectrum_healed = spectrum*1.0
#There are nans in this spectrum.
N_healed += np.sum(nans)
if len(x(~nans)) > 0:
spectrum_healed[nans]= np.interp(x(nans), x(~nans), spectrum[~nans])
#This heals all the NaNs, including the ones in all-NaN columns.
#These will be set back to NaN below.
else:#This happens if an entire order is masked.
spectrum_healed[nans]=0
order[k,:] = spectrum_healed
if len(list_of_masked_columns) > 0:
for l in list_of_masked_columns:
order[:,l]+=np.nan#Set the ones that were erroneously healed back to nan.
return (order, [N_nans_columns, N_nans_isolated, N_pixels, N_healed])
if parallel:
list_of_healed_orders, N_list = zip(*Parallel(n_jobs=len(list_of_orders))
(delayed(interpolate_over_NaNs_parallel)(i) for i in range(len(list_of_orders))))
else:
list_of_healed_orders, N_list = zip(*[interpolate_over_NaNs_parallel(i)
for i in range(len(list_of_orders))])
list_of_healed_orders = list(list_of_healed_orders)
N_nans_columns = np.sum(N_list, axis=0)[0]
N_nans_isolated = np.sum(N_list, axis=0)[1]
N_pixels = np.sum(N_list, axis=0)[2]
N_healed = np.sum(N_list, axis=0)[3]
if quiet == False:
ut.tprint(f'------Total number of pixels in {N} orders: {N_pixels}')
ut.tprint(f'------Number of NaNs in columns identified as bad (or previously masked): '
f'{N_nans_columns} ({np.round(N_nans_columns/N_pixels*100,2)}% of total)')
ut.tprint(f'------Number of NaNs in isolated pixels: {N_nans_isolated} '
f'({np.round(N_nans_isolated/N_pixels*100,2)}% of total)')
ut.tprint(f'------Total number of bad pixels identified: {N_nans_isolated+N_nans_columns} '
f'({np.round((N_nans_isolated+N_nans_columns)/N_pixels*100,2)}% of total)')
return(list_of_healed_orders)
class mask_maker(object):
#This is my third home-made class: A GUI for masking pixels in the spectrum.
def __init__(self,list_of_wls,list_of_orders,list_of_saved_selected_columns,Nxticks,Nyticks,
nsigma=3.0):
"""
We initialize with a figure object, three axis objects (in a list)
the wls, the orders, the masks already made; and we do the first plot.
NOTICE: Anything that is potted in these things as INF actually used to be
a NaN that was masked out before.
"""
import numpy as np
import pdb
import tayph.functions as fun
import tayph.plotting as plotting
import tayph.drag_colour as dcb
import matplotlib.pyplot as plt
import itertools
from matplotlib.widgets import MultiCursor
import tayph.util as ut
from tayph.vartests import typetest,postest
import copy
#Upon initialization, we raise the keywords onto self.
self.N_orders = len(list_of_wls)
if len(list_of_wls) < 1 or len(list_of_orders) < 1:# or len(list_of_masks) <1:
raise Exception('Runtime Error in mask_maker init: lists of WLs, orders and/or masks '
'have less than 1 element.')
if len(list_of_wls) != len(list_of_orders):# or len(list_of_wls) != len(list_of_masks):
raise Exception('Runtime Error in mask_maker init: List of wls and list of orders have '
f'different length ({len(list_of_wls)} & {len(list_of_orders)}).')
typetest(Nxticks,int,'Nxticks in mask_maker init',)
typetest(Nyticks,int,'Nyticks in mask_maker init',)
typetest(nsigma,float,'Nsigma in mask_maker init',)
postest(Nxticks,varname='Nxticks in mask_maker init')
postest(Nyticks,varname='Nyticks in mask_maker init')
postest(nsigma,varname='Nsigma in mask_maker init')
self.list_of_wls = list_of_wls
self.list_of_orders = list_of_orders
self.list_of_selected_columns = list(list_of_saved_selected_columns)
#Normally, if there are no saved columns to load, list_of_saved_selected_columns is an
#empty list. However if it is set, then its automatically loaded into
#self.list_of_selected_columns upon init. Below there is a check to determine whether it
#was empty or not, and whether the list of columns has the same length as the list of
#orders.
if len(self.list_of_selected_columns) == 0:
for i in range(self.N_orders):
self.list_of_selected_columns.append([])#Make a list of empty lists.
#This will contain all columns masked by the user, on top of the things
#that are already masked by the program.
else:
if len(self.list_of_selected_columns) != self.N_orders:
raise Exception('Runtime Error in mask_maker init: Trying to restore previously '
'saved columns but the number of orders in the saved column file does not match '
'the number of orders provided.')
print('------Restoring previously saved columns in mask-maker')
#All checks are now complete. Lets prepare to do the masking.
# self.N = min([56,self.N_orders-1])#We start on order 56, or the last order if order 56
#doesn't exist.
self.N=0
#Set the current active order to order , and calculate the meanspec
#and residuals to be plotted, which are saved in self.
self.set_order(self.N)
#Sorry for the big self.spaghetti of code. This initializes the plot.
#Functions and vars further down in the class will deal with updating the plots
#as buttons are pressed. Some of this is copied from the construct_doppler_model
#function; but this time I made it part of the class.
#First define plotting and axis parameters for the colormesh below.
self.Nxticks = Nxticks
self.Nyticks = Nyticks
self.nsigma = nsigma
self.xrange = [0,self.npx-1]
self.yrange=[0,self.nexp-1]
self.x_axis= np.arange(self.npx, dtype=int) #fun.findgen(self.npx).astype(int)
self.y_axis = np.arange(self.nexp, dtype=int) #fun.findgen(self.nexp).astype(int)
self.x2,self.y2,self.z,self.wl_sel,self.y_axis_sel,self.xticks,self.yticks,void1,void2= plotting.plotting_scales_2D(self.x_axis,self.y_axis,self.residual,self.xrange,self.yrange,Nxticks=self.Nxticks,Nyticks=self.Nyticks,nsigma=self.nsigma)
self.fig,self.ax = plt.subplots(3,1,sharex=True,figsize=(14,6))#Init the figure and 3 axes.
plt.subplots_adjust(left=0.05)#Make them more tight, we need all the space we can get.
plt.subplots_adjust(right=0.75)
self.ax[0].set_title(f'Spectral order {self.N} ({round( | np.min(self.wl) | numpy.min |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.tf.Lu."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import map_fn
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import benchmark
from tensorflow.python.platform import test
class LuOpTest(test.TestCase):
@property
def float_types(self):
return set((np.float64, np.float32, np.complex64, np.complex128))
def _verifyLuBase(self, x, lower, upper, perm, verification,
output_idx_type):
lower_np, upper_np, perm_np, verification_np = self.evaluate(
[lower, upper, perm, verification])
self.assertAllClose(x, verification_np)
self.assertShapeEqual(x, lower)
self.assertShapeEqual(x, upper)
self.assertAllEqual(x.shape[:-1], perm.shape.as_list())
# Check dtypes are as expected.
self.assertEqual(x.dtype, lower_np.dtype)
self.assertEqual(x.dtype, upper_np.dtype)
self.assertEqual(output_idx_type.as_numpy_dtype, perm_np.dtype)
# Check that the permutation is valid.
if perm_np.shape[-1] > 0:
perm_reshaped = np.reshape(perm_np, (-1, perm_np.shape[-1]))
for perm_vector in perm_reshaped:
self.assertAllClose(np.arange(len(perm_vector)), np.sort(perm_vector))
def _verifyLu(self, x, output_idx_type=dtypes.int64):
# Verify that Px = LU.
lu, perm = linalg_ops.lu(x, output_idx_type=output_idx_type)
# Prepare the lower factor of shape num_rows x num_rows
lu_shape = np.array(lu.shape.as_list())
batch_shape = lu_shape[:-2]
num_rows = lu_shape[-2]
num_cols = lu_shape[-1]
lower = array_ops.matrix_band_part(lu, -1, 0)
if num_rows > num_cols:
eye = linalg_ops.eye(
num_rows, batch_shape=batch_shape, dtype=lower.dtype)
lower = array_ops.concat([lower, eye[..., num_cols:]], axis=-1)
elif num_rows < num_cols:
lower = lower[..., :num_rows]
# Fill the diagonal with ones.
ones_diag = array_ops.ones(
np.append(batch_shape, num_rows), dtype=lower.dtype)
lower = array_ops.matrix_set_diag(lower, ones_diag)
# Prepare the upper factor.
upper = array_ops.matrix_band_part(lu, 0, -1)
verification = math_ops.matmul(lower, upper)
# Permute the rows of product of the Cholesky factors.
if num_rows > 0:
# Reshape the product of the triangular factors and permutation indices
# to a single batch dimension. This makes it easy to apply
# invert_permutation and gather_nd ops.
perm_reshaped = array_ops.reshape(perm, [-1, num_rows])
verification_reshaped = array_ops.reshape(verification,
[-1, num_rows, num_cols])
# Invert the permutation in each batch.
inv_perm_reshaped = map_fn.map_fn(array_ops.invert_permutation,
perm_reshaped)
batch_size = perm_reshaped.shape.as_list()[0]
# Prepare the batch indices with the same shape as the permutation.
# The corresponding batch index is paired with each of the `num_rows`
# permutation indices.
batch_indices = math_ops.cast(
array_ops.broadcast_to(
math_ops.range(batch_size)[:, None], perm_reshaped.shape),
dtype=output_idx_type)
permuted_verification_reshaped = array_ops.gather_nd(
verification_reshaped,
array_ops.stack([batch_indices, inv_perm_reshaped], axis=-1))
# Reshape the verification matrix back to the original shape.
verification = array_ops.reshape(permuted_verification_reshaped,
lu_shape)
self._verifyLuBase(x, lower, upper, perm, verification,
output_idx_type)
def testBasic(self):
data = np.array([[4., -1., 2.], [-1., 6., 0], [10., 0., 5.]])
for dtype in (np.float32, np.float64):
for output_idx_type in (dtypes.int32, dtypes.int64):
self._verifyLu(data.astype(dtype), output_idx_type=output_idx_type)
for dtype in (np.complex64, np.complex128):
for output_idx_type in (dtypes.int32, dtypes.int64):
complex_data = np.tril(1j * data, -1).astype(dtype)
complex_data += np.triu(-1j * data, 1).astype(dtype)
complex_data += data
self._verifyLu(complex_data, output_idx_type=output_idx_type)
def testPivoting(self):
# This matrix triggers partial pivoting because the first diagonal entry
# is small.
data = | np.array([[1e-9, 1., 0.], [1., 0., 0], [0., 1., 5]]) | numpy.array |
## The code is based on the implementation from:
## https://github.com/hongwang600/RelationDectection
##
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import random
from data import gen_data
from model import SimilarityModel
from utils import process_testing_samples, process_samples, ranking_sequence,\
copy_grad_data, get_grad_params
from evaluate import evaluate_model
from config import CONFIG as conf
embedding_dim = conf['embedding_dim']
hidden_dim = conf['hidden_dim']
batch_size = conf['batch_size']
model_path = conf['model_path']
num_cands = conf['num_cands']
device = conf['device']
lr = conf['learning_rate']
loss_margin = conf['loss_margin']
def sample_memory_data(sample_pool, sample_size):
if len(sample_pool) > 0:
sample_indexs = random.sample(range(len(sample_pool)),
min(sample_size, len(sample_pool)))
return [sample_pool[index] for index in sample_indexs]
else:
return []
def feed_samples(model, samples, loss_function, all_relations, device):
questions, relations, relation_set_lengths = process_samples(
samples, all_relations, device)
#print('got data')
ranked_questions, reverse_question_indexs = \
ranking_sequence(questions)
ranked_relations, reverse_relation_indexs =\
ranking_sequence(relations)
question_lengths = [len(question) for question in ranked_questions]
relation_lengths = [len(relation) for relation in ranked_relations]
#print(ranked_questions)
pad_questions = torch.nn.utils.rnn.pad_sequence(ranked_questions)
pad_relations = torch.nn.utils.rnn.pad_sequence(ranked_relations)
#print(pad_questions)
pad_questions = pad_questions.to(device)
pad_relations = pad_relations.to(device)
#print(pad_questions)
model.zero_grad()
model.init_hidden(device, sum(relation_set_lengths))
all_scores = model(pad_questions, pad_relations, device,
reverse_question_indexs, reverse_relation_indexs,
question_lengths, relation_lengths)
all_scores = all_scores.to('cpu')
pos_scores = []
neg_scores = []
start_index = 0
for length in relation_set_lengths:
pos_scores.append(all_scores[start_index].expand(length-1))
neg_scores.append(all_scores[start_index+1:start_index+length])
start_index += length
pos_scores = torch.cat(pos_scores)
neg_scores = torch.cat(neg_scores)
loss = loss_function(pos_scores, neg_scores,
torch.ones(sum(relation_set_lengths)-
len(relation_set_lengths)))
loss.backward()
# copied from facebook open scource. (https://github.com/facebookresearch/
# GradientEpisodicMemory/blob/master/model/gem.py)
def project2cone2(gradient, memories, margin=0.5):
"""
Solves the GEM dual QP described in the paper given a proposed
gradient "gradient", and a memory of task gradients "memories".
Overwrites "gradient" with the final projected update.
input: gradient, p-vector
input: memories, (t * p)-vector
output: x, p-vector
"""
memories_np = memories.cpu().view(-1).double().numpy()
gradient_np = gradient.cpu().contiguous().view(-1).double().numpy()
x = gradient_np - (np.dot(gradient_np, memories_np)/
| np.dot(memories_np, memories_np) | numpy.dot |
import cv2
import numpy as np
from PIL import Image
from shapely.geometry import LineString as shape_string
from shapely.geometry import Polygon as shape_poly
def angle_between(p1, p2):
ang1 = np.arctan2(*p1[::-1])
ang2 = np.arctan2(*p2[::-1])
return (ang2 - ang1) % (2 * np.pi)
def pad_to(im, size=3000):
row, col = im.shape[:2]
pad_r = (size - row) // 2
pad_c = (size - col) // 2
border = cv2.copyMakeBorder(
im, top=pad_r, bottom=pad_r, left=pad_c, right=pad_c, borderType=cv2.BORDER_CONSTANT, value=[0]
)
return border
def semmap_to_lightmap(sem):
def gkern(l=10, sig=5):
"""\
creates gaussian kernel with side length l and a sigma of sig
"""
ax = np.linspace(-(l - 1) / 2.0, (l - 1) / 2.0, l)
xx, yy = np.meshgrid(ax, ax)
kernel = np.exp(-0.5 * (np.square(xx) + np.square(yy)) / np.square(sig))
return kernel / np.sum(kernel)
kernel = gkern()
sem_map = np.array(sem)
kitchen_bathroom = np.logical_or(np.logical_or(sem_map == 10, sem_map == 1), sem_map == 19)
kitchen_bathroom_filtered = cv2.dilate(kitchen_bathroom.astype(np.float32), kernel.astype(np.float32))
kitchen_bathroom_filtered = cv2.filter2D(
kitchen_bathroom_filtered.astype(np.float32), -1, kernel.astype(np.float32)
)
return Image.fromarray((kitchen_bathroom_filtered * 255).astype(np.uint8))
class BBox(object):
def __init__(self, raw_pts=None, zmin=None, zmax=None, from_dict=None):
if raw_pts is None:
self.load_dict(from_dict)
else:
self.raw_pts = raw_pts
self.z = (zmin, zmax)
self.init_box()
def init_box(self):
self.center = self.raw_pts.mean(axis=0)
self.calc_nrm()
def get_scale(self):
return np.linalg.norm(self.edge_x), np.linalg.norm(self.edge_y), self.z[1] - self.z[0]
def calc_nrm(self):
if self.raw_pts[0, 0] > self.raw_pts[1, 0]:
id1, id2 = 1, 0
else:
id1, id2 = 0, 1
direction = self.raw_pts[id1, :] - self.raw_pts[id2, :]
dist = np.linalg.norm(direction)
des_len = 0.4
nrm = np.array([-direction[1] * des_len / dist, direction[0] * des_len / dist])
nrm_start = (self.raw_pts[id1] + self.raw_pts[id2]) / 2.0
cand1 = nrm_start + nrm
cand2 = nrm_start - nrm
if np.linalg.norm(cand1 - self.center) > np.linalg.norm(cand2 - self.center):
flip_factor = 1
else:
flip_factor = -1
nrm = nrm * flip_factor
self.nrm = nrm / np.linalg.norm(nrm)
# self.flip_factor = flip_factor
# return nrm
self.edge_y = direction * flip_factor
self.edge_x = np.linalg.norm(self.raw_pts[0, :] - self.raw_pts[3, :]) * self.nrm
def get_coords(self):
"""
Return the vertices of the bounding box, in order of BL,BR,TR,TL
"""
x = self.edge_x / 2.0
y = self.edge_y / 2.0
return np.array([self.center - x - y, self.center + x - y, self.center + x + y, self.center - x + y])
def get_rotation_angle(self, CAD_dir=(0, 1)):
# dir_y, dir_x = self.nrm / np.linalg.norm(self.nrm)
dir_y, dir_x = self.nrm[:]
# theta = np.arcsin(np.cross([1,0], [dir_x, dir_y]))
return angle_between(CAD_dir, (dir_x, dir_y))
def get_center(self):
return (*self.center, (self.z[0] + self.z[1]) / 2.0)
def get_nrm(self):
return self.nrm
def as_dict(self):
return {
"normal": tuple(self.nrm),
"center": tuple(self.center),
"edge_x": tuple(self.edge_x),
"edge_y": tuple(self.edge_y),
"raw_pts": self.raw_pts.tolist(),
"theta": self.get_rotation_angle(),
"z": tuple(self.z),
}
def load_dict(self, d):
self.nrm = np.array(d["normal"])
self.center = np.array(d["center"])
self.edge_x = np.array(d["edge_x"])
self.edge_y = np.array(d["edge_y"])
self.z = d["z"]
self.raw_pts = np.array(d["raw_pts"])
def polygon_to_bbox(Y, X, Z, rotation=None, flip_image=None, scale_factor=100.0):
pts = np.vstack((Y, X)).transpose()
center = pts.mean(axis=0)
dists = np.array([np.linalg.norm(pts[i] - pts[i - 1]) for i in range(4)])
max_idx = np.argmax(dists)
p0, p1 = pts[max_idx], pts[max_idx - 1]
edge_y = p1 - p0
edge_y_dir = edge_y / np.linalg.norm(edge_y)
edge_y_projected = np.array([np.dot(pts[i] - p1, edge_y_dir) for i in range(4)])
edge_y_raw = edge_y_dir * np.ptp(edge_y_projected)
mid_y = (p0 + p1) / 2.0
edge_x_crook = center - mid_y
edge_x_raw = 2 * (edge_x_crook - (edge_y_dir * np.dot(edge_x_crook, edge_y_dir)))
edge_x_dir = edge_x_raw / np.linalg.norm(edge_x_raw)
# so we have:
# edge_x, edge_y, center here
# Z is given as input
# we need to figure out normal direction (aka theta)
# and reorient X/Y accordingly
if rotation is None:
# this means that the object is wall/door/window
if np.linalg.norm(edge_x_raw) > np.linalg.norm(edge_y_raw):
edge_x = edge_y_raw
edge_y = edge_x_raw
nrm = edge_y_dir
else:
edge_y = edge_y_raw
edge_x = edge_x_raw
nrm = edge_x_dir
else:
# this means that this is a fixed furniture
forward_direction = np.array([1, 0])
rotated = np.matmul(
np.asarray([[np.cos(rotation), -np.sin(rotation)], [np.sin(rotation), np.cos(rotation)]]), forward_direction
)
fit_x = np.dot(edge_x_dir, rotated)
fit_y = np.dot(edge_y_dir, rotated)
if abs(fit_y) > abs(fit_x):
edge_x = edge_y_raw
edge_y = edge_x_raw
nrm = edge_y_dir * np.sign(fit_y)
else:
edge_y = edge_y_raw
edge_x = edge_x_raw
nrm = edge_x_dir * np.sign(fit_x)
if flip_image is not None:
if should_flip(center, nrm, flip_image):
nrm = -nrm
bbox_dict = {
"center": center / scale_factor,
"z": Z,
"edge_x": edge_x / scale_factor,
"edge_y": edge_y / scale_factor,
"normal": nrm,
"raw_pts": pts / scale_factor,
}
return BBox(None, None, None, bbox_dict)
def should_flip(center, nrm, flip_image):
flip_image_np = np.asarray(flip_image).astype(int)
# plt.imshow(flip_image_np)
# normal direction
normal_dist = 0
width, height = flip_image_np.shape
for i in range(1000):
y, x = np.round(center + i * nrm).astype(int)
if x >= width or x < 0 or y >= height or y < 0:
normal_dist = 2000
break
if flip_image_np[x, y]:
normal_dist = i
break
# flipped direction
flip_dist = 0
for i in range(1000):
y, x = np.round(center - i * nrm).astype(int)
if x >= width or x < 0 or y >= height or y < 0:
flip_dist = 2000
break
if flip_image_np[x, y]:
flip_dist = i
break
y, x = np.round(center).astype(int)
# print(nrm, center, normal_dist, flip_dist)
return flip_dist > normal_dist
# def squash_to_size(xmin,xmax,ymin,ymax,scale):
def squash_to_size(bbox, scale):
size = random.choice(scale)
print("squashing object...")
x, y, _ = bbox.get_scale()
# print(bbox.get_scale())
if x < y:
bbox.edge_x = bbox.edge_x / | np.linalg.norm(bbox.edge_x) | numpy.linalg.norm |
from typeguard import typechecked
from ai4good.models.model import Model, ModelResult
from ai4good.params.param_store import ParamStore
from ai4good.models.abm.initialise_parameters import Parameters
# from ai4good.webapp.cm_model_report_utils import *
import logging
from . import abm
import numpy as np
import pandas as pd
import math
@typechecked
class ABM(Model):
ID = 'agent-based-model'
def __init__(self, ps: ParamStore):
Model.__init__(self, ps)
def id(self) -> str:
return self.ID
def result_id(self, p: Parameters) -> str:
return p.sha1_hash()
def run(self, p: Parameters) -> ModelResult:
for i in range(p.number_of_steps):
p.track_states[i, :] = np.bincount(p.population[:, 1].astype(int), minlength=14)
if abm.epidemic_finish(np.concatenate((p.track_states[i, 1:6], p.track_states[i, 7:p.number_of_states])), i):
break
p.mild_rec = np.random.uniform(0, 1, p.total_population) > math.exp(0.2 * math.log(0.1)) # Liu et al 2020 The Lancet.
p.sev_rec = np.random.uniform(0, 1, p.total_population) > math.exp(math.log(63 / 153) / 12) # Cai et al.
p.pick_sick = np.random.uniform(0, 1, p.total_population) # Get random numbers to determine health states.
if (p.ACTIVATE_INTERVENTION and (i > 0)):
p.iat1 = i
p.ACTIVATE_INTERVENTION = False
p.smaller_movement_radius = 0.001
p.transmission_reduction = 0.25
p.foodpoints_location, p.foodpoints_numbers, p.foodpoints_sharing = abm.position_foodline(p.households_location, p.foodline_blocks[0], p.foodline_blocks[1])
p.local_interaction_space = abm.interaction_neighbours_fast(p.households_location, p.smaller_movement_radius, p.larger_movement_radius, p.overlapping_rages_radius, p.ethnical_corellations)
p.viol_rate = 0.05
p.population[:, 8] = np.where( | np.random.rand(p.total_population) | numpy.random.rand |
"""Tests for the module :mod:`esmvaltool.cmorizers.obs.utilities`."""
from unittest.mock import Mock
import dask.array as da
import iris
import numpy as np
import pytest
from cf_units import Unit
import esmvaltool.cmorizers.obs.utilities as utils
def np_to_da(array, lazy):
"""Convert numpy array to dask array."""
if not lazy:
return array
if array is None:
return array
return da.from_array(array)
def is_lazy(cube):
"""Check if data is lazy."""
if not cube.has_lazy_data():
return False
for coord in cube.coords(dim_coords=False):
if not coord.has_lazy_points():
return False
if coord.has_bounds():
if not coord.has_lazy_bounds():
return False
return True
def cubes_generator(lazy=True):
"""Generate a list of cubes via test parametrization."""
cube_datas = [
np.array([[0, 1], [-1, 0]], dtype=np.int),
np.array([[0.0, 1.0], [-1.0, 0.0]], dtype=np.float32),
np.array([[0.0, 1.0], [-1.0, 0.0]], dtype=np.float64),
np.ma.masked_equal([[0, 1], [2, 3]], 3).astype(np.int),
np.ma.masked_values([[0.0, 1.0], [2.0, 3.0]], 3.0).astype(np.float32),
| np.ma.masked_values([[0.0, 1.0], [2.0, 3.0]], 3.0) | numpy.ma.masked_values |
import os, time, socket, datetime
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import pickle
from random import sample
import random
import tensorflow as tf
import tensorflow.keras as keras
from tensorflow.keras import layers
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D
import cv2
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder
from tensorflow.keras.utils import to_categorical
def sim(filen,type,scenario,sample):
allimgs = []
y = []
if sample==1:
sd=1
elif sample==2:
sd=15
elif sample==3:
sd=25
elif sample==4:
sd=65
elif sample==5:
sd=1000
random.seed(sd)
for ii in range(1, 4):
if type==1:
with open(file='D:/python/LA/image2S{}.pkl'.format(ii),mode='br') as inpf:
imgs2 = pickle.load(inpf)
with open(file='D:/python/LA/image4S{}.pkl'.format(ii),mode='br') as inpf:
imgs4 = pickle.load(inpf)
elif type==2:
with open(file='D:/python/LA/image2SR{}.pkl'.format(ii),mode='br') as inpf:
imgs2 = pickle.load(inpf)
with open(file='D:/python/LA/image4SR{}.pkl'.format(ii),mode='br') as inpf:
imgs4 = pickle.load(inpf)
if ii==1:
imgs2=random.sample(imgs2,250)
imgs4=random.sample(imgs4,250)
elif ii==2:
imgs2 = imgs2 + random.sample(imgs2,40)
imgs4 = imgs4 + random.sample(imgs4,40)
elif ii==3:
imgs2 = imgs2 + random.sample(imgs2,26)
imgs4 = imgs4 + random.sample(imgs4,26)
allimgs = allimgs + imgs2 + imgs4
y = y + list(np.ones([len(imgs2)+len(imgs4),])*(ii-1)) #相對應產生 y 的label, 必須是 0, 1, 2
X = np.stack(allimgs)
Y = np.stack(y)
X_train, X_test, y_train, y_test = \
train_test_split(X, Y, test_size=0.2, random_state=2100, shuffle=True)
y_train_onehot = to_categorical(y_train)
y_test_onehot = to_categorical(y_test)
#找最大值與最小值進行標準化
x_train_images=X_train.reshape(len(X_train),X_train.shape[1]*X_train.shape[2]).astype('float32') #資料型態 float32
x_test_images=X_test.reshape(len(X_test),X_test.shape[1]*X_test.shape[2]).astype('float32')
# 標準化
x_train_normalize=(x_train_images-min(x_train_images[0,:]))/(max(x_train_images[0,:])-min(x_train_images[0,:]))
X_test_normalize=(x_test_images-min(x_train_images[0,:]))/(max(x_train_images[0,:])-min(x_train_images[0,:]))
x_train_images4D=x_train_normalize.reshape(len(X_train),X_train.shape[1],X_train.shape[2],1).astype('float32') #資料型態 float32
x_test_images4D=X_test_normalize.reshape(len(X_test),X_test.shape[1],X_test.shape[2],1).astype('float32')
y_train_OneHot=keras.utils.to_categorical(y_train)
y_test_OneHot=keras.utils.to_categorical(y_test)
if scenario==1:
par={'filter1':128,'filter2':256,'strides':1,'kernel': 4,'height':128,'width':128,'dropout1':0.5,'dropout2': 0.5,'Neural':512}
elif scenario==2:
par={'filter1':128,'filter2':256,'strides':1,'kernel': 5,'height':128,'width':128,'dropout1':0.5,'dropout2': 0.5,'Neural':512}
elif scenario==6:
par={'filter1':128,'filter2':128,'strides':1,'kernel': 4,'height':128,'width':128,'dropout1':0.5,'dropout2': 0.5,'Neural':512}
elif scenario==9:
par={'filter1':128,'filter2':128,'strides':1,'kernel': 5,'height':128,'width':128,'dropout1':0.5,'dropout2': 0.5,'Neural':512}
rep=50
test_acc = np.zeros((rep,9))
train_acc = np.zeros((rep,9))
acc = np.zeros((rep,2))
outf = open(r'D:/python/LA/final/LA_result_' + filen + '.txt','at') #append text
for i in range(0, rep):
par1={'Validation': 0.2, 'epoch': 40, 'Batch': 100}
result=DL_fun1(par,par1,x_train_images4D,y_train,x_test_images4D,y_test)
h1_test= | np.array(result['Test_acc'].iloc[0,0:3]) | numpy.array |
import numpy as np
import random, time
import tensorflow as tf
from worlds.game import *
from hrl.policy_bank_dqn import PolicyBankDQN
from common.schedules import LinearSchedule
from common.replay_buffer import create_experience_replay_buffer
from tester.saver import Saver
from os import listdir
from os.path import isfile, join
from reward_machines.reward_machine import RewardMachine
from baselines.hrl import MetaController
import csv
def run_hrl_baseline(sess, q, rm_file, meta_controllers, options, policy_bank, tester, curriculum, replay_buffer, beta_schedule, show_print, current_step, previous_test):
"""
Strategy:
- I'll learn a tabular metacontroller over the posible subpolicies
- Initialice a regular policy bank with eventually subpolicies (e.g. Fa, Fb, Fc, Fb, Fd)
- Learn as usual
- Pick actions using the sequence
"""
# Initializing parameters
learning_params = tester.learning_params
testing_params = tester.testing_params
reward_machines = tester.get_reward_machines()
rm_id = tester.get_reward_machine_id_from_file(rm_file)
task_params = tester.get_task_params(rm_file)
task = Game(task_params)
actions = task.get_actions()
num_features = len(task.get_features())
meta_controller = meta_controllers[rm_id]
rm = reward_machines[rm_id]
num_steps = learning_params.max_timesteps_per_task
training_reward = 0
testing_reward = 0
is_test = 0
g = 1
N = 20 #episodes per q update
alpha = 0.8
gamma = 0.99
horizon_reward = 0
mc_u1 = 0
u = 0
mc_a =0
s=0
s_new=0
reward = 0
all_events = list()
# Starting interaction with the environment
if show_print: print("Executing", num_steps, "actions...")
t = 0
curriculum_stop = False
# Getting the initial state of the environment and the reward machine
s1, s1_features = task.get_state_and_features()
u1 = rm.get_initial_state()
while t < learning_params.max_timesteps_per_task and not curriculum_stop:
# selecting a macro action from the meta controller
mc_s1, mc_s1_features, mc_u1 = s1, s1_features, u1
mc_r = []
T = 1
u_pool = range(0,learning_params.pool_size)
pr = np.zeros([learning_params.pool_size,1])
pr_sum = 0
pr_select = np.zeros([learning_params.pool_size+1,1])
for u_ in u_pool:
pr_sum += np.exp(q[s][u_]*T)
for u_ in u_pool:
pr[u_] = np.exp(q[s][u_]*T)/pr_sum
for index in range(1,learning_params.pool_size):
for pr_pos in range(0,index):
pr_select[index] += pr[pr_pos]
pr_select[-1] = 1
randn = random.random()
u_selected = -1
for u_ in u_pool:
if randn >= pr_select[u_][0] and randn <= pr_select[u_+1][0]:
u_selected = u_
break
u_new = u_selected
if reward>0:
testy = 0
q[s][u] = (1-alpha)*q[s][u] + alpha*(reward + gamma*np.amax(q[s_new][u_new]))
mc_a = u_new
u = u_new
if t%N==0:
horizon_reward = 0
else:
horizon_reward += reward
mc_option = meta_controller.get_option(mc_a) # tuple <rm_id,u_0>
mc_done = False
if show_print: print(mc_option)
# The selected option must be executed at least one step (i.e. len(mc_r) == 0)
#while len(mc_r) == 0:
# or not meta_controller.finish_option(mc_a, task.get_true_propositions()):
current_step += 1
# Choosing an action to perform
if random.random() < 0.15:
a = random.choice(actions)
else:
a = policy_bank.get_best_action(mc_option[0], mc_option[1], s1_features.reshape((1,num_features)))
# updating the curriculum
curriculum.add_step()
# Executing the action
if tester.game_type=="trafficworld":
events = task.get_true_propositions_action(a)
task.execute_action(a)
a = task.get_last_action() # due to MDP slip
else:
task.execute_action(a)
a = task.get_last_action() # due to MDP slip
events = task.get_true_propositions()
s2, s2_features = task.get_state_and_features()
all_events.append(events)
u2 = rm.get_next_state(u1, events)
reward = rm.get_reward(u1,u2,s1,a,s2)
training_reward += reward
s = np.where(s1_features==1)[0][0]
s_new = np.where(s2_features==1)[0][0]
sy = s%11+1
sx = (s-sy+1)/11+1
synew = s_new % 11+1
sxnew = (s_new - synew+1) / 11+1
a1=a
if reward>0:
reward
# updating the reward for the meta controller
mc_r.append(reward)
# Getting rewards and next states for each option
rewards, next_states = [],[]
for j in range(len(options)):
j_rewards, j_next_states = options[j].get_rewards_and_next_states(s1, a, s2, events)
rewards.append(j_rewards)
next_states.append(j_next_states)
# Mapping rewards and next states to specific policies in the policy bank
rewards = policy_bank.select_rewards(rewards)
next_policies = policy_bank.select_next_policies(next_states)
# Adding this experience to the experience replay buffer
replay_buffer.add(s1_features, a, s2_features, rewards, next_policies)
# Learning
if curriculum.get_current_step() > learning_params.learning_starts and curriculum.get_current_step() % learning_params.train_freq == 0:
if learning_params.prioritized_replay:
experience = replay_buffer.sample(learning_params.batch_size, beta=beta_schedule.value(curriculum.get_current_step()))
S1, A, S2, Rs, NPs, weights, batch_idxes = experience
else:
S1, A, S2, Rs, NPs = replay_buffer.sample(learning_params.batch_size)
weights, batch_idxes = None, None
abs_td_errors = policy_bank.learn(S1, A, S2, Rs, NPs, weights) # returns the absolute td_error
if learning_params.prioritized_replay:
new_priorities = abs_td_errors + learning_params.prioritized_replay_eps
replay_buffer.update_priorities(batch_idxes, new_priorities)
# Updating the target network
if curriculum.get_current_step() > learning_params.learning_starts and curriculum.get_current_step() % learning_params.target_network_update_freq == 0:
policy_bank.update_target_network()
# Printing
if show_print and (t+1) % learning_params.print_freq == 0:
print("Step:", t+1, "\tTotal reward:", training_reward)
# Testing
if tester.testing_params.test and curriculum.get_current_step() % tester.testing_params.test_freq == 0:
testing_reward, q = tester.run_test(curriculum.get_current_step(), sess, q, run_hrl_baseline_test, meta_controllers, policy_bank, num_features)
is_test = 1
# Restarting the environment (Game Over)
if task.is_env_game_over() or rm.is_terminal_state(u2):
# Restarting the game
task = Game(task_params)
s2, s2_features = task.get_state_and_features()
u2 = rm.get_initial_state()
mc_done = True
if curriculum.stop_task(t):
curriculum_stop = True
# checking the steps time-out
if curriculum.stop_learning():
curriculum_stop = True
# Moving to the next state
s1, s1_features, u1 = s2, s2_features, u2
t += 1
if t == learning_params.max_timesteps_per_task or curriculum_stop or mc_done:
break
# learning on the meta controller
mc_s2, mc_s2_features, mc_u2 = s1, s1_features, u
mc_reward = _get_discounted_reward(mc_r, learning_params.gamma)
mc_steps = len(mc_r)
#meta_controller.learn(mc_s1_features, mc_u1, mc_a, mc_reward, mc_s2_features, mc_u2, mc_done, mc_steps)
#meta_controller.show()
#input()
step_count = t
if is_test==0:
is_test_result = 0
testing_reward = previous_test
else:
is_test_result = 1
return training_reward, step_count, testing_reward, is_test_result, q
def _get_discounted_reward(r_all, gamma):
dictounted_r = 0
for r in r_all[::-1]:
dictounted_r = r + gamma*dictounted_r
return dictounted_r
def run_hrl_baseline_test(sess, q, reward_machines, task_params, rm_id, learning_params, testing_params, meta_controllers, policy_bank, num_features):
# Initializing parameters
meta_controller = meta_controllers[rm_id]
task = Game(task_params)
rm = reward_machines[rm_id]
s1, s1_features = task.get_state_and_features()
u1 = rm.get_initial_state()
horizon_reward = 0
reward = 0
# Starting interaction with the environment
r_total = 0
t = 0
N = 20
alpha = 0.8
gamma = 0.99
u = 0
s=0
s_new=0
u_pool = range(0, learning_params.pool_size)
while t < testing_params.num_steps:
# selecting a macro action from the meta controller
mc_s1, mc_s1_features, mc_u1 = s1, s1_features, u
T = 1
if random.random()<0.1:
mc_a = random.choice(u_pool)
else:
pr = np.zeros([learning_params.pool_size, 1])
pr_sum = 0
pr_select = np.zeros([learning_params.pool_size + 1, 1])
for u_ in u_pool:
pr_sum += np.exp(q[s][u_]*T)
for u_ in u_pool:
pr[u_] = np.exp(q[s][u_]*T) / pr_sum
for index in range(1, learning_params.pool_size):
for pr_pos in range(0, index):
pr_select[index] += pr[pr_pos]
pr_select[-1] = 1
randn = random.random()
u_selected = -1
for u_ in u_pool:
if randn >= pr_select[u_][0] and randn <= pr_select[u_+1][0]:
u_selected = u_
break
u_new = u_selected
q[s][u] = (1-alpha)*q[s][u] + alpha*(reward + gamma*np.amax(q[s_new][u_new]))
mc_a = u_new
mc_option = meta_controller.get_option(mc_a) # tuple <rm_id,u_0>
# The selected option must be executed at least one step
first = True
# Choosing an action to perform
a = policy_bank.get_best_action(mc_option[0], mc_option[1], s1_features.reshape((1,num_features)))
# Executing the action
task.execute_action(a)
a = task.get_last_action() # due to MDP slip
s2, s2_features = task.get_state_and_features()
events = task.get_true_propositions()
u2 = rm.get_next_state(u1, events)
reward = rm.get_reward(u1,u2,s1,a,s2)
r_total += reward * learning_params.gamma**t
s = np.where(s1_features==1)[0][0]
s_new = np.where(s2_features==1)[0][0]
# Moving to the next state
s1, s1_features, u1 = s2, s2_features, u2
t += 1
# Restarting the environment (Game Over)
if task.is_env_game_over() or rm.is_terminal_state(u2) or t == testing_params.num_steps:
break
if rm.is_terminal_state(u2):
return 1, q
else:
return 0, q
return r_total
def _get_option_files(folder):
return [f.replace(".txt","") for f in listdir(folder) if isfile(join(folder, f))]
def run_hrl_experiments(alg_name, tester, curriculum, num_times, show_print, use_rm):
"""
NOTE: To implement this baseline, we encode each option as a reward machine with one transition
- use_rm: Indicates whether to prune options using the reward machine
"""
# Setting up the saver
saver = Saver(alg_name, tester, curriculum)
learning_params = tester.learning_params
# Running the tasks 'num_times'
time_init = time.time()
step = 0
steps = list()
rewards = list()
plot_dict = dict()
for t in range(num_times):
tt=t+1
# Setting the random seed to 't'
random.seed(t)
sess = tf.Session()
testing_reward = 0
testing_step = 0
num_episodes = 0
total = 0
q = np.zeros([1681,8]) #second dimension is number of options
# Reseting default values
curriculum.restart()
# Creating the experience replay buffer
replay_buffer, beta_schedule = create_experience_replay_buffer(learning_params.buffer_size, learning_params.prioritized_replay, learning_params.prioritized_replay_alpha, learning_params.prioritized_replay_beta0, curriculum.total_steps if learning_params.prioritized_replay_beta_iters is None else learning_params.prioritized_replay_beta_iters)
# Loading options for this experiment
option_folder = "../experiments/%s/options/"%tester.get_world_name()
options = [] # NOTE: The policy bank also uses this list (in the same order)
option2file = []
for option_file in _get_option_files(option_folder): # NOTE: The option id indicates what the option does (e.g. "a&!n")
option = RewardMachine(join(option_folder, option_file + ".txt"))
options.append(option)
option2file.append(option_file)
# getting num inputs and outputs net
task_aux = Game(tester.get_task_params(curriculum.get_current_task()))
num_features = len(task_aux.get_features())
num_actions = len(task_aux.get_actions())
# initializing the meta controllers (one metacontroller per task)
meta_controllers = []
reward_machines = tester.get_reward_machines()
if tester.game_type == "trafficworld":
options[0] = options[0]
options[1] = options[6]
options[2] = options[2]
options[3] = options[3]
options[4] = options[7]
learning_params.pool_size = 5
elif tester.game_type == "officeworld":
options[0] = options[0]
options[1] = options[6]
options[2] = options[2]
options[3] = options[3]
options[4] = options[4]
options[5] = options[5]
learning_params.pool_size = 6
else:
options[0] = options[6]
options[1] = options[7]
options[2] = options[2]
options[3] = options[3]
learning_params.pool_size = 4
for i in range(len(reward_machines)):
rm = reward_machines[i]
num_states = len(rm.get_states())
policy_name = "Reward_Machine_%d"%i
mc = MetaController(sess, policy_name, options, option2file, rm, use_rm, learning_params, num_features, num_states, show_print)
meta_controllers.append(mc)
# initializing the bank of policies with one policy per option
policy_bank = PolicyBankDQN(sess, num_actions, num_features, learning_params, options)
# Task loop
while not curriculum.stop_learning():
if show_print: print("Current step:", curriculum.get_current_step(), "from", curriculum.total_steps)
rm_file = curriculum.get_next_task()
num_episodes += 1
# Running 'rm_file' for one episode
found_reward, step_count, testing_reward, is_test, q = run_hrl_baseline(sess, q, rm_file, meta_controllers, options, policy_bank, tester, curriculum, replay_buffer, beta_schedule, show_print, step, testing_reward)
step += step_count
steps.append(step)
rewards.append(found_reward)
total += found_reward
if (num_episodes%100==0):
print("run index:", +tt)
toprint = "Total training reward at "+str(step)+": "+str(total)
print(toprint)
if is_test:
testing_step += tester.testing_params.test_freq
if testing_step in plot_dict:
plot_dict[testing_step].append(testing_reward)
else:
plot_dict[testing_step] = [testing_reward]
tf.reset_default_graph()
sess.close()
# Backing up the results
saver.save_results()
# Showing results
prc_25 = list()
prc_50 = list()
prc_75 = list()
rewards_plot = list()
steps_plot = list()
# Buffers for plots
current_step = list()
current_25 = list()
current_50 = list()
current_75 = list()
steps_plot = list()
for step in plot_dict.keys():
if len(current_step) < 10:
current_25.append(np.percentile(np.array(plot_dict[step]),25))
current_50.append(np.percentile(np.array(plot_dict[step]),50))
current_75.append(np.percentile(np.array(plot_dict[step]),75))
current_step.append(sum(plot_dict[step])/len(plot_dict[step]))
else:
current_step.pop(0)
current_25.pop(0)
current_50.pop(0)
current_75.pop(0)
current_25.append(np.percentile(np.array(plot_dict[step]),25))
current_50.append(np.percentile( | np.array(plot_dict[step]) | numpy.array |
import datetime
import logging
from abc import ABC
from typing import Union, List
import numpy as np
import pandas as pd
logger = logging.getLogger(__name__)
class TimeSeries(ABC):
def __init__(self, **kwargs):
""" Abstract Baseclass representing TimeSeries like measurements
Parameters
----------
kwargs
"""
self.rdy_format_version = kwargs["rdy_format_version"]
kwargs.pop("rdy_format_version")
kwargs.pop("__class__")
for k, v in kwargs.items(): # Replaces None values arguments with empty lists
if v is None and k != "rdy_format_version":
kwargs[k] = np.array([])
else:
if type(v) == np.ndarray:
kwargs[k] = v
else:
kwargs[k] = np.array(v)
if k != "time" and len(kwargs["time"]) > 0 and len(v) == 0:
kwargs[k] = np.zeros(len(kwargs["time"]))
self.__dict__.update(kwargs)
self._time: np.ndarray = np.array(self.time) # Original unadjusted timestamps
self._timedelta: np.ndarray = np.diff(self._time)
if self.rdy_format_version and self.rdy_format_version <= 1.2:
self._time = (self._time * 1e9).astype(np.int64)
self.time = self._time.copy()
def __len__(self):
if np.array_equal(self.time, | np.array(None) | numpy.array |
# Authors:
#
# <NAME>
#
# License: BSD 3 clause
import warnings
import itertools
import numpy as np
import numpy.linalg as la
from scipy import sparse, stats
import pytest
from sklearn.utils import gen_batches
from sklearn.utils._testing import assert_almost_equal
from sklearn.utils._testing import assert_array_almost_equal
from sklearn.utils._testing import assert_array_equal
from sklearn.utils._testing import assert_array_less
from sklearn.utils._testing import assert_allclose
from sklearn.utils._testing import assert_allclose_dense_sparse
from sklearn.utils._testing import skip_if_32bit
from sklearn.utils._testing import _convert_container
from sklearn.utils.sparsefuncs import mean_variance_axis
from sklearn.preprocessing import Binarizer
from sklearn.preprocessing import KernelCenterer
from sklearn.preprocessing import Normalizer
from sklearn.preprocessing import normalize
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import scale
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import minmax_scale
from sklearn.preprocessing import QuantileTransformer
from sklearn.preprocessing import quantile_transform
from sklearn.preprocessing import MaxAbsScaler
from sklearn.preprocessing import maxabs_scale
from sklearn.preprocessing import RobustScaler
from sklearn.preprocessing import robust_scale
from sklearn.preprocessing import add_dummy_feature
from sklearn.preprocessing import PowerTransformer
from sklearn.preprocessing import power_transform
from sklearn.preprocessing._data import _handle_zeros_in_scale
from sklearn.preprocessing._data import BOUNDS_THRESHOLD
from sklearn.exceptions import NotFittedError
from sklearn.base import clone
from sklearn.pipeline import Pipeline
from sklearn.model_selection import cross_val_predict
from sklearn.svm import SVR
from sklearn.utils import shuffle
from sklearn import datasets
iris = datasets.load_iris()
# Make some data to be used many times
rng = np.random.RandomState(0)
n_features = 30
n_samples = 1000
offsets = rng.uniform(-1, 1, size=n_features)
scales = rng.uniform(1, 10, size=n_features)
X_2d = rng.randn(n_samples, n_features) * scales + offsets
X_1row = X_2d[0, :].reshape(1, n_features)
X_1col = X_2d[:, 0].reshape(n_samples, 1)
X_list_1row = X_1row.tolist()
X_list_1col = X_1col.tolist()
def toarray(a):
if hasattr(a, "toarray"):
a = a.toarray()
return a
def _check_dim_1axis(a):
return np.asarray(a).shape[0]
def assert_correct_incr(i, batch_start, batch_stop, n, chunk_size,
n_samples_seen):
if batch_stop != n:
assert (i + 1) * chunk_size == n_samples_seen
else:
assert (i * chunk_size + (batch_stop - batch_start) ==
n_samples_seen)
def test_raises_value_error_if_sample_weights_greater_than_1d():
# Sample weights must be either scalar or 1D
n_sampless = [2, 3]
n_featuress = [3, 2]
for n_samples, n_features in zip(n_sampless, n_featuress):
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
scaler = StandardScaler()
# make sure Error is raised the sample weights greater than 1d
sample_weight_notOK = rng.randn(n_samples, 1) ** 2
with pytest.raises(ValueError):
scaler.fit(X, y, sample_weight=sample_weight_notOK)
@pytest.mark.parametrize(['Xw', 'X', 'sample_weight'],
[([[1, 2, 3], [4, 5, 6]],
[[1, 2, 3], [1, 2, 3], [4, 5, 6]],
[2., 1.]),
([[1, 0, 1], [0, 0, 1]],
[[1, 0, 1], [0, 0, 1], [0, 0, 1], [0, 0, 1]],
np.array([1, 3])),
([[1, np.nan, 1], [np.nan, np.nan, 1]],
[[1, np.nan, 1], [np.nan, np.nan, 1],
[np.nan, np.nan, 1], [np.nan, np.nan, 1]],
np.array([1, 3])),
])
@pytest.mark.parametrize(
"array_constructor", ["array", "sparse_csr", "sparse_csc"]
)
def test_standard_scaler_sample_weight(
Xw, X, sample_weight, array_constructor):
with_mean = not array_constructor.startswith("sparse")
X = _convert_container(X, array_constructor)
Xw = _convert_container(Xw, array_constructor)
# weighted StandardScaler
yw = np.ones(Xw.shape[0])
scaler_w = StandardScaler(with_mean=with_mean)
scaler_w.fit(Xw, yw, sample_weight=sample_weight)
# unweighted, but with repeated samples
y = np.ones(X.shape[0])
scaler = StandardScaler(with_mean=with_mean)
scaler.fit(X, y)
X_test = [[1.5, 2.5, 3.5], [3.5, 4.5, 5.5]]
assert_almost_equal(scaler.mean_, scaler_w.mean_)
assert_almost_equal(scaler.var_, scaler_w.var_)
assert_almost_equal(scaler.transform(X_test), scaler_w.transform(X_test))
def test_standard_scaler_1d():
# Test scaling of dataset along single axis
for X in [X_1row, X_1col, X_list_1row, X_list_1row]:
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
if isinstance(X, list):
X = np.array(X) # cast only after scaling done
if _check_dim_1axis(X) == 1:
assert_almost_equal(scaler.mean_, X.ravel())
assert_almost_equal(scaler.scale_, np.ones(n_features))
assert_array_almost_equal(X_scaled.mean(axis=0),
np.zeros_like(n_features))
assert_array_almost_equal(X_scaled.std(axis=0),
np.zeros_like(n_features))
else:
assert_almost_equal(scaler.mean_, X.mean())
assert_almost_equal(scaler.scale_, X.std())
assert_array_almost_equal(X_scaled.mean(axis=0),
np.zeros_like(n_features))
assert_array_almost_equal(X_scaled.mean(axis=0), .0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.)
assert scaler.n_samples_seen_ == X.shape[0]
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X)
# Constant feature
X = np.ones((5, 1))
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_almost_equal(scaler.mean_, 1.)
assert_almost_equal(scaler.scale_, 1.)
assert_array_almost_equal(X_scaled.mean(axis=0), .0)
assert_array_almost_equal(X_scaled.std(axis=0), .0)
assert scaler.n_samples_seen_ == X.shape[0]
@pytest.mark.parametrize("sparse_constructor",
[None, sparse.csc_matrix, sparse.csr_matrix])
@pytest.mark.parametrize("add_sample_weight", [False, True])
def test_standard_scaler_dtype(add_sample_weight, sparse_constructor):
# Ensure scaling does not affect dtype
rng = np.random.RandomState(0)
n_samples = 10
n_features = 3
if add_sample_weight:
sample_weight = np.ones(n_samples)
else:
sample_weight = None
with_mean = True
for dtype in [np.float16, np.float32, np.float64]:
X = rng.randn(n_samples, n_features).astype(dtype)
if sparse_constructor is not None:
X = sparse_constructor(X)
with_mean = False
scaler = StandardScaler(with_mean=with_mean)
X_scaled = scaler.fit(X, sample_weight=sample_weight).transform(X)
assert X.dtype == X_scaled.dtype
assert scaler.mean_.dtype == np.float64
assert scaler.scale_.dtype == np.float64
@pytest.mark.parametrize("scaler", [
StandardScaler(with_mean=False),
RobustScaler(with_centering=False),
])
@pytest.mark.parametrize("sparse_constructor",
[np.asarray, sparse.csc_matrix, sparse.csr_matrix])
@pytest.mark.parametrize("add_sample_weight", [False, True])
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
@pytest.mark.parametrize("constant", [0, 1., 100.])
def test_standard_scaler_constant_features(
scaler, add_sample_weight, sparse_constructor, dtype, constant):
if (isinstance(scaler, StandardScaler)
and constant > 1
and sparse_constructor is not np.asarray
and add_sample_weight):
# https://github.com/scikit-learn/scikit-learn/issues/19546
pytest.xfail("Computation of weighted variance is numerically unstable"
" for sparse data. See: #19546.")
if isinstance(scaler, RobustScaler) and add_sample_weight:
pytest.skip(f"{scaler.__class__.__name__} does not yet support"
f" sample_weight")
rng = np.random.RandomState(0)
n_samples = 100
n_features = 1
if add_sample_weight:
fit_params = dict(sample_weight=rng.uniform(size=n_samples) * 2)
else:
fit_params = {}
X_array = np.full(shape=(n_samples, n_features), fill_value=constant,
dtype=dtype)
X = sparse_constructor(X_array)
X_scaled = scaler.fit(X, **fit_params).transform(X)
if isinstance(scaler, StandardScaler):
# The variance info should be close to zero for constant features.
assert_allclose(scaler.var_, np.zeros(X.shape[1]), atol=1e-7)
# Constant features should not be scaled (scale of 1.):
assert_allclose(scaler.scale_, np.ones(X.shape[1]))
if hasattr(X_scaled, "toarray"):
assert_allclose(X_scaled.toarray(), X_array)
else:
assert_allclose(X_scaled, X)
if isinstance(scaler, StandardScaler) and not add_sample_weight:
# Also check consistency with the standard scale function.
X_scaled_2 = scale(X, with_mean=scaler.with_mean)
if hasattr(X_scaled_2, "toarray"):
assert_allclose(X_scaled_2.toarray(), X_scaled_2.toarray())
else:
assert_allclose(X_scaled_2, X_scaled_2)
def test_scale_1d():
# 1-d inputs
X_list = [1., 3., 5., 0.]
X_arr = np.array(X_list)
for X in [X_list, X_arr]:
X_scaled = scale(X)
assert_array_almost_equal(X_scaled.mean(), 0.0)
assert_array_almost_equal(X_scaled.std(), 1.0)
assert_array_equal(scale(X, with_mean=False, with_std=False), X)
@skip_if_32bit
def test_standard_scaler_numerical_stability():
# Test numerical stability of scaling
# np.log(1e-5) is taken because of its floating point representation
# was empirically found to cause numerical problems with np.mean & np.std.
x = np.full(8, np.log(1e-5), dtype=np.float64)
# This does not raise a warning as the number of samples is too low
# to trigger the problem in recent numpy
with pytest.warns(None) as record:
scale(x)
assert len(record) == 0
assert_array_almost_equal(scale(x), np.zeros(8))
# with 2 more samples, the std computation run into numerical issues:
x = np.full(10, np.log(1e-5), dtype=np.float64)
warning_message = (
"standard deviation of the data is probably very close to 0"
)
with pytest.warns(UserWarning, match=warning_message):
x_scaled = scale(x)
assert_array_almost_equal(x_scaled, np.zeros(10))
x = np.full(10, 1e-100, dtype=np.float64)
with pytest.warns(None) as record:
x_small_scaled = scale(x)
assert len(record) == 0
assert_array_almost_equal(x_small_scaled, np.zeros(10))
# Large values can cause (often recoverable) numerical stability issues:
x_big = np.full(10, 1e100, dtype=np.float64)
warning_message = (
"Dataset may contain too large values"
)
with pytest.warns(UserWarning, match=warning_message):
x_big_scaled = scale(x_big)
assert_array_almost_equal(x_big_scaled, np.zeros(10))
assert_array_almost_equal(x_big_scaled, x_small_scaled)
with pytest.warns(UserWarning, match=warning_message):
x_big_centered = scale(x_big, with_std=False)
assert_array_almost_equal(x_big_centered, np.zeros(10))
assert_array_almost_equal(x_big_centered, x_small_scaled)
def test_scaler_2d_arrays():
# Test scaling of 2d array along first axis
rng = np.random.RandomState(0)
n_features = 5
n_samples = 4
X = rng.randn(n_samples, n_features)
X[:, 0] = 0.0 # first feature is always of zero
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert not np.any(np.isnan(X_scaled))
assert scaler.n_samples_seen_ == n_samples
assert_array_almost_equal(X_scaled.mean(axis=0), n_features * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has been copied
assert X_scaled is not X
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert X_scaled_back is not X
assert X_scaled_back is not X_scaled
assert_array_almost_equal(X_scaled_back, X)
X_scaled = scale(X, axis=1, with_std=False)
assert not np.any(np.isnan(X_scaled))
assert_array_almost_equal(X_scaled.mean(axis=1), n_samples * [0.0])
X_scaled = scale(X, axis=1, with_std=True)
assert not np.any(np.isnan(X_scaled))
assert_array_almost_equal(X_scaled.mean(axis=1), n_samples * [0.0])
assert_array_almost_equal(X_scaled.std(axis=1), n_samples * [1.0])
# Check that the data hasn't been modified
assert X_scaled is not X
X_scaled = scaler.fit(X).transform(X, copy=False)
assert not np.any(np.isnan(X_scaled))
assert_array_almost_equal(X_scaled.mean(axis=0), n_features * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert X_scaled is X
X = rng.randn(4, 5)
X[:, 0] = 1.0 # first feature is a constant, non zero feature
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert not np.any(np.isnan(X_scaled))
assert_array_almost_equal(X_scaled.mean(axis=0), n_features * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert X_scaled is not X
def test_scaler_float16_overflow():
# Test if the scaler will not overflow on float16 numpy arrays
rng = np.random.RandomState(0)
# float16 has a maximum of 65500.0. On the worst case 5 * 200000 is 100000
# which is enough to overflow the data type
X = rng.uniform(5, 10, [200000, 1]).astype(np.float16)
with np.errstate(over='raise'):
scaler = StandardScaler().fit(X)
X_scaled = scaler.transform(X)
# Calculate the float64 equivalent to verify result
X_scaled_f64 = StandardScaler().fit_transform(X.astype(np.float64))
# Overflow calculations may cause -inf, inf, or nan. Since there is no nan
# input, all of the outputs should be finite. This may be redundant since a
# FloatingPointError exception will be thrown on overflow above.
assert np.all(np.isfinite(X_scaled))
# The normal distribution is very unlikely to go above 4. At 4.0-8.0 the
# float16 precision is 2^-8 which is around 0.004. Thus only 2 decimals are
# checked to account for precision differences.
assert_array_almost_equal(X_scaled, X_scaled_f64, decimal=2)
def test_handle_zeros_in_scale():
s1 = np.array([0, 1e-16, 1, 2, 3])
s2 = _handle_zeros_in_scale(s1, copy=True)
assert_allclose(s1, np.array([0, 1e-16, 1, 2, 3]))
assert_allclose(s2, np.array([1, 1, 1, 2, 3]))
def test_minmax_scaler_partial_fit():
# Test if partial_fit run over many batches of size 1 and 50
# gives the same results as fit
X = X_2d
n = X.shape[0]
for chunk_size in [1, 2, 50, n, n + 42]:
# Test mean at the end of the process
scaler_batch = MinMaxScaler().fit(X)
scaler_incr = MinMaxScaler()
for batch in gen_batches(n_samples, chunk_size):
scaler_incr = scaler_incr.partial_fit(X[batch])
assert_array_almost_equal(scaler_batch.data_min_,
scaler_incr.data_min_)
assert_array_almost_equal(scaler_batch.data_max_,
scaler_incr.data_max_)
assert scaler_batch.n_samples_seen_ == scaler_incr.n_samples_seen_
assert_array_almost_equal(scaler_batch.data_range_,
scaler_incr.data_range_)
assert_array_almost_equal(scaler_batch.scale_, scaler_incr.scale_)
assert_array_almost_equal(scaler_batch.min_, scaler_incr.min_)
# Test std after 1 step
batch0 = slice(0, chunk_size)
scaler_batch = MinMaxScaler().fit(X[batch0])
scaler_incr = MinMaxScaler().partial_fit(X[batch0])
assert_array_almost_equal(scaler_batch.data_min_,
scaler_incr.data_min_)
assert_array_almost_equal(scaler_batch.data_max_,
scaler_incr.data_max_)
assert scaler_batch.n_samples_seen_ == scaler_incr.n_samples_seen_
assert_array_almost_equal(scaler_batch.data_range_,
scaler_incr.data_range_)
assert_array_almost_equal(scaler_batch.scale_, scaler_incr.scale_)
assert_array_almost_equal(scaler_batch.min_, scaler_incr.min_)
# Test std until the end of partial fits, and
scaler_batch = MinMaxScaler().fit(X)
scaler_incr = MinMaxScaler() # Clean estimator
for i, batch in enumerate(gen_batches(n_samples, chunk_size)):
scaler_incr = scaler_incr.partial_fit(X[batch])
assert_correct_incr(i, batch_start=batch.start,
batch_stop=batch.stop, n=n,
chunk_size=chunk_size,
n_samples_seen=scaler_incr.n_samples_seen_)
def test_standard_scaler_partial_fit():
# Test if partial_fit run over many batches of size 1 and 50
# gives the same results as fit
X = X_2d
n = X.shape[0]
for chunk_size in [1, 2, 50, n, n + 42]:
# Test mean at the end of the process
scaler_batch = StandardScaler(with_std=False).fit(X)
scaler_incr = StandardScaler(with_std=False)
for batch in gen_batches(n_samples, chunk_size):
scaler_incr = scaler_incr.partial_fit(X[batch])
assert_array_almost_equal(scaler_batch.mean_, scaler_incr.mean_)
assert scaler_batch.var_ == scaler_incr.var_ # Nones
assert scaler_batch.n_samples_seen_ == scaler_incr.n_samples_seen_
# Test std after 1 step
batch0 = slice(0, chunk_size)
scaler_incr = StandardScaler().partial_fit(X[batch0])
if chunk_size == 1:
assert_array_almost_equal(np.zeros(n_features, dtype=np.float64),
scaler_incr.var_)
assert_array_almost_equal(np.ones(n_features, dtype=np.float64),
scaler_incr.scale_)
else:
assert_array_almost_equal(np.var(X[batch0], axis=0),
scaler_incr.var_)
assert_array_almost_equal(np.std(X[batch0], axis=0),
scaler_incr.scale_) # no constants
# Test std until the end of partial fits, and
scaler_batch = StandardScaler().fit(X)
scaler_incr = StandardScaler() # Clean estimator
for i, batch in enumerate(gen_batches(n_samples, chunk_size)):
scaler_incr = scaler_incr.partial_fit(X[batch])
assert_correct_incr(i, batch_start=batch.start,
batch_stop=batch.stop, n=n,
chunk_size=chunk_size,
n_samples_seen=scaler_incr.n_samples_seen_)
assert_array_almost_equal(scaler_batch.var_, scaler_incr.var_)
assert scaler_batch.n_samples_seen_ == scaler_incr.n_samples_seen_
def test_standard_scaler_partial_fit_numerical_stability():
# Test if the incremental computation introduces significative errors
# for large datasets with values of large magniture
rng = np.random.RandomState(0)
n_features = 2
n_samples = 100
offsets = rng.uniform(-1e15, 1e15, size=n_features)
scales = rng.uniform(1e3, 1e6, size=n_features)
X = rng.randn(n_samples, n_features) * scales + offsets
scaler_batch = StandardScaler().fit(X)
scaler_incr = StandardScaler()
for chunk in X:
scaler_incr = scaler_incr.partial_fit(chunk.reshape(1, n_features))
# Regardless of abs values, they must not be more diff 6 significant digits
tol = 10 ** (-6)
assert_allclose(scaler_incr.mean_, scaler_batch.mean_, rtol=tol)
assert_allclose(scaler_incr.var_, scaler_batch.var_, rtol=tol)
assert_allclose(scaler_incr.scale_, scaler_batch.scale_, rtol=tol)
# NOTE Be aware that for much larger offsets std is very unstable (last
# assert) while mean is OK.
# Sparse input
size = (100, 3)
scale = 1e20
X = rng.randint(0, 2, size).astype(np.float64) * scale
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
for X in [X_csr, X_csc]:
# with_mean=False is required with sparse input
scaler = StandardScaler(with_mean=False).fit(X)
scaler_incr = StandardScaler(with_mean=False)
for chunk in X:
# chunk = sparse.csr_matrix(data_chunks)
scaler_incr = scaler_incr.partial_fit(chunk)
# Regardless of magnitude, they must not differ more than of 6 digits
tol = 10 ** (-6)
assert scaler.mean_ is not None
assert_allclose(scaler_incr.var_, scaler.var_, rtol=tol)
assert_allclose(scaler_incr.scale_, scaler.scale_, rtol=tol)
@pytest.mark.parametrize("sample_weight", [True, None])
def test_partial_fit_sparse_input(sample_weight):
# Check that sparsity is not destroyed
X = np.array([[1.], [0.], [0.], [5.]])
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
if sample_weight:
sample_weight = rng.rand(X_csc.shape[0])
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
for X in [X_csr, X_csc]:
X_null = null_transform.partial_fit(
X, sample_weight=sample_weight).transform(X)
assert_array_equal(X_null.toarray(), X.toarray())
X_orig = null_transform.inverse_transform(X_null)
assert_array_equal(X_orig.toarray(), X_null.toarray())
assert_array_equal(X_orig.toarray(), X.toarray())
@pytest.mark.parametrize("sample_weight", [True, None])
def test_standard_scaler_trasform_with_partial_fit(sample_weight):
# Check some postconditions after applying partial_fit and transform
X = X_2d[:100, :]
if sample_weight:
sample_weight = rng.rand(X.shape[0])
scaler_incr = StandardScaler()
for i, batch in enumerate(gen_batches(X.shape[0], 1)):
X_sofar = X[:(i + 1), :]
chunks_copy = X_sofar.copy()
if sample_weight is None:
scaled_batch = StandardScaler().fit_transform(X_sofar)
scaler_incr = scaler_incr.partial_fit(X[batch])
else:
scaled_batch = StandardScaler().fit_transform(
X_sofar, sample_weight=sample_weight[:i + 1])
scaler_incr = scaler_incr.partial_fit(
X[batch], sample_weight=sample_weight[batch])
scaled_incr = scaler_incr.transform(X_sofar)
assert_array_almost_equal(scaled_batch, scaled_incr)
assert_array_almost_equal(X_sofar, chunks_copy) # No change
right_input = scaler_incr.inverse_transform(scaled_incr)
assert_array_almost_equal(X_sofar, right_input)
zero = np.zeros(X.shape[1])
epsilon = np.finfo(float).eps
assert_array_less(zero, scaler_incr.var_ + epsilon) # as less or equal
assert_array_less(zero, scaler_incr.scale_ + epsilon)
if sample_weight is None:
# (i+1) because the Scaler has been already fitted
assert (i + 1) == scaler_incr.n_samples_seen_
else:
assert (
np.sum(sample_weight[:i + 1]) ==
pytest.approx(scaler_incr.n_samples_seen_)
)
def test_min_max_scaler_iris():
X = iris.data
scaler = MinMaxScaler()
# default params
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), 0)
assert_array_almost_equal(X_trans.max(axis=0), 1)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# not default params: min=1, max=2
scaler = MinMaxScaler(feature_range=(1, 2))
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), 1)
assert_array_almost_equal(X_trans.max(axis=0), 2)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# min=-.5, max=.6
scaler = MinMaxScaler(feature_range=(-.5, .6))
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), -.5)
assert_array_almost_equal(X_trans.max(axis=0), .6)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# raises on invalid range
scaler = MinMaxScaler(feature_range=(2, 1))
with pytest.raises(ValueError):
scaler.fit(X)
def test_min_max_scaler_zero_variance_features():
# Check min max scaler on toy data with zero variance features
X = [[0., 1., +0.5],
[0., 1., -0.1],
[0., 1., +1.1]]
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
# default params
scaler = MinMaxScaler()
X_trans = scaler.fit_transform(X)
X_expected_0_1 = [[0., 0., 0.5],
[0., 0., 0.0],
[0., 0., 1.0]]
assert_array_almost_equal(X_trans, X_expected_0_1)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
X_trans_new = scaler.transform(X_new)
X_expected_0_1_new = [[+0., 1., 0.500],
[-1., 0., 0.083],
[+0., 0., 1.333]]
assert_array_almost_equal(X_trans_new, X_expected_0_1_new, decimal=2)
# not default params
scaler = MinMaxScaler(feature_range=(1, 2))
X_trans = scaler.fit_transform(X)
X_expected_1_2 = [[1., 1., 1.5],
[1., 1., 1.0],
[1., 1., 2.0]]
assert_array_almost_equal(X_trans, X_expected_1_2)
# function interface
X_trans = minmax_scale(X)
assert_array_almost_equal(X_trans, X_expected_0_1)
X_trans = minmax_scale(X, feature_range=(1, 2))
assert_array_almost_equal(X_trans, X_expected_1_2)
def test_minmax_scale_axis1():
X = iris.data
X_trans = minmax_scale(X, axis=1)
assert_array_almost_equal(np.min(X_trans, axis=1), 0)
assert_array_almost_equal(np.max(X_trans, axis=1), 1)
def test_min_max_scaler_1d():
# Test scaling of dataset along single axis
for X in [X_1row, X_1col, X_list_1row, X_list_1row]:
scaler = MinMaxScaler(copy=True)
X_scaled = scaler.fit(X).transform(X)
if isinstance(X, list):
X = np.array(X) # cast only after scaling done
if _check_dim_1axis(X) == 1:
assert_array_almost_equal(X_scaled.min(axis=0),
np.zeros(n_features))
assert_array_almost_equal(X_scaled.max(axis=0),
np.zeros(n_features))
else:
assert_array_almost_equal(X_scaled.min(axis=0), .0)
assert_array_almost_equal(X_scaled.max(axis=0), 1.)
assert scaler.n_samples_seen_ == X.shape[0]
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X)
# Constant feature
X = np.ones((5, 1))
scaler = MinMaxScaler()
X_scaled = scaler.fit(X).transform(X)
assert X_scaled.min() >= 0.
assert X_scaled.max() <= 1.
assert scaler.n_samples_seen_ == X.shape[0]
# Function interface
X_1d = X_1row.ravel()
min_ = X_1d.min()
max_ = X_1d.max()
assert_array_almost_equal((X_1d - min_) / (max_ - min_),
minmax_scale(X_1d, copy=True))
@pytest.mark.parametrize("sample_weight", [True, None])
def test_scaler_without_centering(sample_weight):
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
if sample_weight:
sample_weight = rng.rand(X.shape[0])
with pytest.raises(ValueError):
StandardScaler().fit(X_csr)
with pytest.raises(ValueError):
StandardScaler().fit(X_csc)
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
X_null = null_transform.fit_transform(X_csr)
assert_array_equal(X_null.data, X_csr.data)
X_orig = null_transform.inverse_transform(X_null)
assert_array_equal(X_orig.data, X_csr.data)
scaler = StandardScaler(with_mean=False).fit(
X, sample_weight=sample_weight)
X_scaled = scaler.transform(X, copy=True)
assert not np.any(np.isnan(X_scaled))
scaler_csr = StandardScaler(with_mean=False).fit(
X_csr, sample_weight=sample_weight)
X_csr_scaled = scaler_csr.transform(X_csr, copy=True)
assert not np.any(np.isnan(X_csr_scaled.data))
scaler_csc = StandardScaler(with_mean=False).fit(
X_csc, sample_weight=sample_weight)
X_csc_scaled = scaler_csc.transform(X_csc, copy=True)
assert not np.any(np.isnan(X_csc_scaled.data))
assert_array_almost_equal(scaler.mean_, scaler_csr.mean_)
assert_array_almost_equal(scaler.var_, scaler_csr.var_)
assert_array_almost_equal(scaler.scale_, scaler_csr.scale_)
assert_array_almost_equal(scaler.n_samples_seen_,
scaler_csr.n_samples_seen_)
assert_array_almost_equal(scaler.mean_, scaler_csc.mean_)
assert_array_almost_equal(scaler.var_, scaler_csc.var_)
assert_array_almost_equal(scaler.scale_, scaler_csc.scale_)
assert_array_almost_equal(scaler.n_samples_seen_,
scaler_csc.n_samples_seen_)
if sample_weight is None:
assert_array_almost_equal(
X_scaled.mean(axis=0), [0., -0.01, 2.24, -0.35, -0.78], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
X_csr_scaled_mean, X_csr_scaled_var = \
mean_variance_axis(X_csr_scaled, 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_var, X_scaled.var(axis=0))
# Check that X has not been modified (copy)
assert X_scaled is not X
assert X_csr_scaled is not X_csr
X_scaled_back = scaler.inverse_transform(X_scaled)
assert X_scaled_back is not X
assert X_scaled_back is not X_scaled
assert_array_almost_equal(X_scaled_back, X)
X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled)
assert X_csr_scaled_back is not X_csr
assert X_csr_scaled_back is not X_csr_scaled
assert_array_almost_equal(X_csr_scaled_back.toarray(), X)
X_csc_scaled_back = scaler_csr.inverse_transform(X_csc_scaled.tocsc())
assert X_csc_scaled_back is not X_csc
assert X_csc_scaled_back is not X_csc_scaled
assert_array_almost_equal(X_csc_scaled_back.toarray(), X)
@pytest.mark.parametrize("with_mean", [True, False])
@pytest.mark.parametrize("with_std", [True, False])
@pytest.mark.parametrize("array_constructor",
[np.asarray, sparse.csc_matrix, sparse.csr_matrix])
def test_scaler_n_samples_seen_with_nan(with_mean, with_std,
array_constructor):
X = np.array([[0, 1, 3],
[np.nan, 6, 10],
[5, 4, np.nan],
[8, 0, np.nan]],
dtype=np.float64)
X = array_constructor(X)
if sparse.issparse(X) and with_mean:
pytest.skip("'with_mean=True' cannot be used with sparse matrix.")
transformer = StandardScaler(with_mean=with_mean, with_std=with_std)
transformer.fit(X)
assert_array_equal(transformer.n_samples_seen_, np.array([3, 4, 2]))
def _check_identity_scalers_attributes(scaler_1, scaler_2):
assert scaler_1.mean_ is scaler_2.mean_ is None
assert scaler_1.var_ is scaler_2.var_ is None
assert scaler_1.scale_ is scaler_2.scale_ is None
assert scaler_1.n_samples_seen_ == scaler_2.n_samples_seen_
def test_scaler_return_identity():
# test that the scaler return identity when with_mean and with_std are
# False
X_dense = np.array([[0, 1, 3],
[5, 6, 0],
[8, 0, 10]],
dtype=np.float64)
X_csr = sparse.csr_matrix(X_dense)
X_csc = X_csr.tocsc()
transformer_dense = StandardScaler(with_mean=False, with_std=False)
X_trans_dense = transformer_dense.fit_transform(X_dense)
transformer_csr = clone(transformer_dense)
X_trans_csr = transformer_csr.fit_transform(X_csr)
transformer_csc = clone(transformer_dense)
X_trans_csc = transformer_csc.fit_transform(X_csc)
assert_allclose_dense_sparse(X_trans_csr, X_csr)
assert_allclose_dense_sparse(X_trans_csc, X_csc)
assert_allclose(X_trans_dense, X_dense)
for trans_1, trans_2 in itertools.combinations([transformer_dense,
transformer_csr,
transformer_csc],
2):
_check_identity_scalers_attributes(trans_1, trans_2)
transformer_dense.partial_fit(X_dense)
transformer_csr.partial_fit(X_csr)
transformer_csc.partial_fit(X_csc)
for trans_1, trans_2 in itertools.combinations([transformer_dense,
transformer_csr,
transformer_csc],
2):
_check_identity_scalers_attributes(trans_1, trans_2)
transformer_dense.fit(X_dense)
transformer_csr.fit(X_csr)
transformer_csc.fit(X_csc)
for trans_1, trans_2 in itertools.combinations([transformer_dense,
transformer_csr,
transformer_csc],
2):
_check_identity_scalers_attributes(trans_1, trans_2)
def test_scaler_int():
# test that scaler converts integer input to floating
# for both sparse and dense matrices
rng = np.random.RandomState(42)
X = rng.randint(20, size=(4, 5))
X[:, 0] = 0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
with warnings.catch_warnings(record=True):
X_null = null_transform.fit_transform(X_csr)
assert_array_equal(X_null.data, X_csr.data)
X_orig = null_transform.inverse_transform(X_null)
assert_array_equal(X_orig.data, X_csr.data)
with warnings.catch_warnings(record=True):
scaler = StandardScaler(with_mean=False).fit(X)
X_scaled = scaler.transform(X, copy=True)
assert not np.any(np.isnan(X_scaled))
with warnings.catch_warnings(record=True):
scaler_csr = StandardScaler(with_mean=False).fit(X_csr)
X_csr_scaled = scaler_csr.transform(X_csr, copy=True)
assert not np.any(np.isnan(X_csr_scaled.data))
with warnings.catch_warnings(record=True):
scaler_csc = StandardScaler(with_mean=False).fit(X_csc)
X_csc_scaled = scaler_csc.transform(X_csc, copy=True)
assert not np.any(np.isnan(X_csc_scaled.data))
assert_array_almost_equal(scaler.mean_, scaler_csr.mean_)
assert_array_almost_equal(scaler.var_, scaler_csr.var_)
assert_array_almost_equal(scaler.scale_, scaler_csr.scale_)
assert_array_almost_equal(scaler.mean_, scaler_csc.mean_)
assert_array_almost_equal(scaler.var_, scaler_csc.var_)
assert_array_almost_equal(scaler.scale_, scaler_csc.scale_)
assert_array_almost_equal(
X_scaled.mean(axis=0),
[0., 1.109, 1.856, 21., 1.559], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(
X_csr_scaled.astype(float), 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# Check that X has not been modified (copy)
assert X_scaled is not X
assert X_csr_scaled is not X_csr
X_scaled_back = scaler.inverse_transform(X_scaled)
assert X_scaled_back is not X
assert X_scaled_back is not X_scaled
assert_array_almost_equal(X_scaled_back, X)
X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled)
assert X_csr_scaled_back is not X_csr
assert X_csr_scaled_back is not X_csr_scaled
assert_array_almost_equal(X_csr_scaled_back.toarray(), X)
X_csc_scaled_back = scaler_csr.inverse_transform(X_csc_scaled.tocsc())
assert X_csc_scaled_back is not X_csc
assert X_csc_scaled_back is not X_csc_scaled
assert_array_almost_equal(X_csc_scaled_back.toarray(), X)
def test_scaler_without_copy():
# Check that StandardScaler.fit does not change input
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
X_copy = X.copy()
StandardScaler(copy=False).fit(X)
assert_array_equal(X, X_copy)
X_csr_copy = X_csr.copy()
StandardScaler(with_mean=False, copy=False).fit(X_csr)
assert_array_equal(X_csr.toarray(), X_csr_copy.toarray())
X_csc_copy = X_csc.copy()
StandardScaler(with_mean=False, copy=False).fit(X_csc)
assert_array_equal(X_csc.toarray(), X_csc_copy.toarray())
def test_scale_sparse_with_mean_raise_exception():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
# check scaling and fit with direct calls on sparse data
with pytest.raises(ValueError):
scale(X_csr, with_mean=True)
with pytest.raises(ValueError):
StandardScaler(with_mean=True).fit(X_csr)
with pytest.raises(ValueError):
scale(X_csc, with_mean=True)
with pytest.raises(ValueError):
StandardScaler(with_mean=True).fit(X_csc)
# check transform and inverse_transform after a fit on a dense array
scaler = StandardScaler(with_mean=True).fit(X)
with pytest.raises(ValueError):
scaler.transform(X_csr)
with pytest.raises(ValueError):
scaler.transform(X_csc)
X_transformed_csr = sparse.csr_matrix(scaler.transform(X))
with pytest.raises(ValueError):
scaler.inverse_transform(X_transformed_csr)
X_transformed_csc = sparse.csc_matrix(scaler.transform(X))
with pytest.raises(ValueError):
scaler.inverse_transform(X_transformed_csc)
def test_scale_input_finiteness_validation():
# Check if non finite inputs raise ValueError
X = [[np.inf, 5, 6, 7, 8]]
with pytest.raises(ValueError, match="Input contains infinity "
"or a value too large"):
scale(X)
def test_robust_scaler_error_sparse():
X_sparse = sparse.rand(1000, 10)
scaler = RobustScaler(with_centering=True)
err_msg = "Cannot center sparse matrices"
with pytest.raises(ValueError, match=err_msg):
scaler.fit(X_sparse)
@pytest.mark.parametrize("with_centering", [True, False])
@pytest.mark.parametrize("with_scaling", [True, False])
@pytest.mark.parametrize("X", [np.random.randn(10, 3),
sparse.rand(10, 3, density=0.5)])
def test_robust_scaler_attributes(X, with_centering, with_scaling):
# check consistent type of attributes
if with_centering and sparse.issparse(X):
pytest.skip("RobustScaler cannot center sparse matrix")
scaler = RobustScaler(with_centering=with_centering,
with_scaling=with_scaling)
scaler.fit(X)
if with_centering:
assert isinstance(scaler.center_, np.ndarray)
else:
assert scaler.center_ is None
if with_scaling:
assert isinstance(scaler.scale_, np.ndarray)
else:
assert scaler.scale_ is None
def test_robust_scaler_col_zero_sparse():
# check that the scaler is working when there is not data materialized in a
# column of a sparse matrix
X = np.random.randn(10, 5)
X[:, 0] = 0
X = sparse.csr_matrix(X)
scaler = RobustScaler(with_centering=False)
scaler.fit(X)
assert scaler.scale_[0] == pytest.approx(1)
X_trans = scaler.transform(X)
assert_allclose(X[:, 0].toarray(), X_trans[:, 0].toarray())
def test_robust_scaler_2d_arrays():
# Test robust scaling of 2d array along first axis
rng = np.random.RandomState(0)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
scaler = RobustScaler()
X_scaled = scaler.fit(X).transform(X)
assert_array_almost_equal(np.median(X_scaled, axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0)[0], 0)
@pytest.mark.parametrize("density", [0, 0.05, 0.1, 0.5, 1])
@pytest.mark.parametrize("strictly_signed",
['positive', 'negative', 'zeros', None])
def test_robust_scaler_equivalence_dense_sparse(density, strictly_signed):
# Check the equivalence of the fitting with dense and sparse matrices
X_sparse = sparse.rand(1000, 5, density=density).tocsc()
if strictly_signed == 'positive':
X_sparse.data = np.abs(X_sparse.data)
elif strictly_signed == 'negative':
X_sparse.data = - np.abs(X_sparse.data)
elif strictly_signed == 'zeros':
X_sparse.data = np.zeros(X_sparse.data.shape, dtype=np.float64)
X_dense = X_sparse.toarray()
scaler_sparse = RobustScaler(with_centering=False)
scaler_dense = RobustScaler(with_centering=False)
scaler_sparse.fit(X_sparse)
scaler_dense.fit(X_dense)
assert_allclose(scaler_sparse.scale_, scaler_dense.scale_)
def test_robust_scaler_transform_one_row_csr():
# Check RobustScaler on transforming csr matrix with one row
rng = np.random.RandomState(0)
X = rng.randn(4, 5)
single_row = np.array([[0.1, 1., 2., 0., -1.]])
scaler = RobustScaler(with_centering=False)
scaler = scaler.fit(X)
row_trans = scaler.transform(sparse.csr_matrix(single_row))
row_expected = single_row / scaler.scale_
assert_array_almost_equal(row_trans.toarray(), row_expected)
row_scaled_back = scaler.inverse_transform(row_trans)
assert_array_almost_equal(single_row, row_scaled_back.toarray())
def test_robust_scaler_iris():
X = iris.data
scaler = RobustScaler()
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(np.median(X_trans, axis=0), 0)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
q = np.percentile(X_trans, q=(25, 75), axis=0)
iqr = q[1] - q[0]
assert_array_almost_equal(iqr, 1)
def test_robust_scaler_iris_quantiles():
X = iris.data
scaler = RobustScaler(quantile_range=(10, 90))
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(np.median(X_trans, axis=0), 0)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
q = np.percentile(X_trans, q=(10, 90), axis=0)
q_range = q[1] - q[0]
assert_array_almost_equal(q_range, 1)
def test_quantile_transform_iris():
X = iris.data
# uniform output distribution
transformer = QuantileTransformer(n_quantiles=30)
X_trans = transformer.fit_transform(X)
X_trans_inv = transformer.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# normal output distribution
transformer = QuantileTransformer(n_quantiles=30,
output_distribution='normal')
X_trans = transformer.fit_transform(X)
X_trans_inv = transformer.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# make sure it is possible to take the inverse of a sparse matrix
# which contain negative value; this is the case in the iris dataset
X_sparse = sparse.csc_matrix(X)
X_sparse_tran = transformer.fit_transform(X_sparse)
X_sparse_tran_inv = transformer.inverse_transform(X_sparse_tran)
assert_array_almost_equal(X_sparse.A, X_sparse_tran_inv.A)
def test_quantile_transform_check_error():
X = np.transpose([[0, 25, 50, 0, 0, 0, 75, 0, 0, 100],
[2, 4, 0, 0, 6, 8, 0, 10, 0, 0],
[0, 0, 2.6, 4.1, 0, 0, 2.3, 0, 9.5, 0.1]])
X = sparse.csc_matrix(X)
X_neg = np.transpose([[0, 25, 50, 0, 0, 0, 75, 0, 0, 100],
[-2, 4, 0, 0, 6, 8, 0, 10, 0, 0],
[0, 0, 2.6, 4.1, 0, 0, 2.3, 0, 9.5, 0.1]])
X_neg = sparse.csc_matrix(X_neg)
err_msg = "Invalid value for 'n_quantiles': 0."
with pytest.raises(ValueError, match=err_msg):
QuantileTransformer(n_quantiles=0).fit(X)
err_msg = "Invalid value for 'subsample': 0."
with pytest.raises(ValueError, match=err_msg):
QuantileTransformer(subsample=0).fit(X)
err_msg = ("The number of quantiles cannot be greater than "
"the number of samples used. Got 1000 quantiles "
"and 10 samples.")
with pytest.raises(ValueError, match=err_msg):
QuantileTransformer(subsample=10).fit(X)
transformer = QuantileTransformer(n_quantiles=10)
err_msg = "QuantileTransformer only accepts non-negative sparse matrices."
with pytest.raises(ValueError, match=err_msg):
transformer.fit(X_neg)
transformer.fit(X)
err_msg = "QuantileTransformer only accepts non-negative sparse matrices."
with pytest.raises(ValueError, match=err_msg):
transformer.transform(X_neg)
X_bad_feat = np.transpose([[0, 25, 50, 0, 0, 0, 75, 0, 0, 100],
[0, 0, 2.6, 4.1, 0, 0, 2.3, 0, 9.5, 0.1]])
err_msg = ("X has 2 features, but QuantileTransformer is expecting "
"3 features as input.")
with pytest.raises(ValueError, match=err_msg):
transformer.inverse_transform(X_bad_feat)
transformer = QuantileTransformer(n_quantiles=10,
output_distribution='rnd')
# check that an error is raised at fit time
err_msg = ("'output_distribution' has to be either 'normal' or "
"'uniform'. Got 'rnd' instead.")
with pytest.raises(ValueError, match=err_msg):
transformer.fit(X)
# check that an error is raised at transform time
transformer.output_distribution = 'uniform'
transformer.fit(X)
X_tran = transformer.transform(X)
transformer.output_distribution = 'rnd'
err_msg = ("'output_distribution' has to be either 'normal' or 'uniform'."
" Got 'rnd' instead.")
with pytest.raises(ValueError, match=err_msg):
transformer.transform(X)
# check that an error is raised at inverse_transform time
err_msg = ("'output_distribution' has to be either 'normal' or 'uniform'."
" Got 'rnd' instead.")
with pytest.raises(ValueError, match=err_msg):
transformer.inverse_transform(X_tran)
# check that an error is raised if input is scalar
with pytest.raises(ValueError,
match='Expected 2D array, got scalar array instead'):
transformer.transform(10)
# check that a warning is raised is n_quantiles > n_samples
transformer = QuantileTransformer(n_quantiles=100)
warn_msg = "n_quantiles is set to n_samples"
with pytest.warns(UserWarning, match=warn_msg) as record:
transformer.fit(X)
assert len(record) == 1
assert transformer.n_quantiles_ == X.shape[0]
def test_quantile_transform_sparse_ignore_zeros():
X = np.array([[0, 1],
[0, 0],
[0, 2],
[0, 2],
[0, 1]])
X_sparse = sparse.csc_matrix(X)
transformer = QuantileTransformer(ignore_implicit_zeros=True,
n_quantiles=5)
# dense case -> warning raise
warning_message = ("'ignore_implicit_zeros' takes effect"
" only with sparse matrix. This parameter has no"
" effect.")
with pytest.warns(UserWarning, match=warning_message):
transformer.fit(X)
X_expected = np.array([[0, 0],
[0, 0],
[0, 1],
[0, 1],
[0, 0]])
X_trans = transformer.fit_transform(X_sparse)
assert_almost_equal(X_expected, X_trans.A)
# consider the case where sparse entries are missing values and user-given
# zeros are to be considered
X_data = np.array([0, 0, 1, 0, 2, 2, 1, 0, 1, 2, 0])
X_col = np.array([0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1])
X_row = np.array([0, 4, 0, 1, 2, 3, 4, 5, 6, 7, 8])
X_sparse = sparse.csc_matrix((X_data, (X_row, X_col)))
X_trans = transformer.fit_transform(X_sparse)
X_expected = np.array([[0., 0.5],
[0., 0.],
[0., 1.],
[0., 1.],
[0., 0.5],
[0., 0.],
[0., 0.5],
[0., 1.],
[0., 0.]])
assert_almost_equal(X_expected, X_trans.A)
transformer = QuantileTransformer(ignore_implicit_zeros=True,
n_quantiles=5)
X_data = np.array([-1, -1, 1, 0, 0, 0, 1, -1, 1])
X_col = np.array([0, 0, 1, 1, 1, 1, 1, 1, 1])
X_row = np.array([0, 4, 0, 1, 2, 3, 4, 5, 6])
X_sparse = sparse.csc_matrix((X_data, (X_row, X_col)))
X_trans = transformer.fit_transform(X_sparse)
X_expected = np.array([[0, 1],
[0, 0.375],
[0, 0.375],
[0, 0.375],
[0, 1],
[0, 0],
[0, 1]])
assert_almost_equal(X_expected, X_trans.A)
assert_almost_equal(X_sparse.A, transformer.inverse_transform(X_trans).A)
# check in conjunction with subsampling
transformer = QuantileTransformer(ignore_implicit_zeros=True,
n_quantiles=5,
subsample=8,
random_state=0)
X_trans = transformer.fit_transform(X_sparse)
assert_almost_equal(X_expected, X_trans.A)
assert_almost_equal(X_sparse.A, transformer.inverse_transform(X_trans).A)
def test_quantile_transform_dense_toy():
X = np.array([[0, 2, 2.6],
[25, 4, 4.1],
[50, 6, 2.3],
[75, 8, 9.5],
[100, 10, 0.1]])
transformer = QuantileTransformer(n_quantiles=5)
transformer.fit(X)
# using the a uniform output, each entry of X should be map between 0 and 1
# and equally spaced
X_trans = transformer.fit_transform(X)
X_expected = np.tile(np.linspace(0, 1, num=5), (3, 1)).T
assert_almost_equal(np.sort(X_trans, axis=0), X_expected)
X_test = np.array([
[-1, 1, 0],
[101, 11, 10],
])
X_expected = np.array([
[0, 0, 0],
[1, 1, 1],
])
assert_array_almost_equal(transformer.transform(X_test), X_expected)
X_trans_inv = transformer.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
def test_quantile_transform_subsampling():
# Test that subsampling the input yield to a consistent results We check
# that the computed quantiles are almost mapped to a [0, 1] vector where
# values are equally spaced. The infinite norm is checked to be smaller
# than a given threshold. This is repeated 5 times.
# dense support
n_samples = 1000000
n_quantiles = 1000
X = np.sort(np.random.sample((n_samples, 1)), axis=0)
ROUND = 5
inf_norm_arr = []
for random_state in range(ROUND):
transformer = QuantileTransformer(random_state=random_state,
n_quantiles=n_quantiles,
subsample=n_samples // 10)
transformer.fit(X)
diff = (np.linspace(0, 1, n_quantiles) -
np.ravel(transformer.quantiles_))
inf_norm = np.max(np.abs(diff))
assert inf_norm < 1e-2
inf_norm_arr.append(inf_norm)
# each random subsampling yield a unique approximation to the expected
# linspace CDF
assert len(np.unique(inf_norm_arr)) == len(inf_norm_arr)
# sparse support
X = sparse.rand(n_samples, 1, density=.99, format='csc', random_state=0)
inf_norm_arr = []
for random_state in range(ROUND):
transformer = QuantileTransformer(random_state=random_state,
n_quantiles=n_quantiles,
subsample=n_samples // 10)
transformer.fit(X)
diff = (np.linspace(0, 1, n_quantiles) -
np.ravel(transformer.quantiles_))
inf_norm = np.max(np.abs(diff))
assert inf_norm < 1e-1
inf_norm_arr.append(inf_norm)
# each random subsampling yield a unique approximation to the expected
# linspace CDF
assert len(np.unique(inf_norm_arr)) == len(inf_norm_arr)
def test_quantile_transform_sparse_toy():
X = np.array([[0., 2., 0.],
[25., 4., 0.],
[50., 0., 2.6],
[0., 0., 4.1],
[0., 6., 0.],
[0., 8., 0.],
[75., 0., 2.3],
[0., 10., 0.],
[0., 0., 9.5],
[100., 0., 0.1]])
X = sparse.csc_matrix(X)
transformer = QuantileTransformer(n_quantiles=10)
transformer.fit(X)
X_trans = transformer.fit_transform(X)
assert_array_almost_equal(np.min(X_trans.toarray(), axis=0), 0.)
assert_array_almost_equal(np.max(X_trans.toarray(), axis=0), 1.)
X_trans_inv = transformer.inverse_transform(X_trans)
assert_array_almost_equal(X.toarray(), X_trans_inv.toarray())
transformer_dense = QuantileTransformer(n_quantiles=10).fit(
X.toarray())
X_trans = transformer_dense.transform(X)
assert_array_almost_equal(np.min(X_trans.toarray(), axis=0), 0.)
assert_array_almost_equal(np.max(X_trans.toarray(), axis=0), 1.)
X_trans_inv = transformer_dense.inverse_transform(X_trans)
assert_array_almost_equal(X.toarray(), X_trans_inv.toarray())
def test_quantile_transform_axis1():
X = np.array([[0, 25, 50, 75, 100],
[2, 4, 6, 8, 10],
[2.6, 4.1, 2.3, 9.5, 0.1]])
X_trans_a0 = quantile_transform(X.T, axis=0, n_quantiles=5)
X_trans_a1 = quantile_transform(X, axis=1, n_quantiles=5)
assert_array_almost_equal(X_trans_a0, X_trans_a1.T)
def test_quantile_transform_bounds():
# Lower and upper bounds are manually mapped. We checked that in the case
# of a constant feature and binary feature, the bounds are properly mapped.
X_dense = np.array([[0, 0],
[0, 0],
[1, 0]])
X_sparse = sparse.csc_matrix(X_dense)
# check sparse and dense are consistent
X_trans = QuantileTransformer(n_quantiles=3,
random_state=0).fit_transform(X_dense)
assert_array_almost_equal(X_trans, X_dense)
X_trans_sp = QuantileTransformer(n_quantiles=3,
random_state=0).fit_transform(X_sparse)
assert_array_almost_equal(X_trans_sp.A, X_dense)
assert_array_almost_equal(X_trans, X_trans_sp.A)
# check the consistency of the bounds by learning on 1 matrix
# and transforming another
X = np.array([[0, 1],
[0, 0.5],
[1, 0]])
X1 = np.array([[0, 0.1],
[0, 0.5],
[1, 0.1]])
transformer = QuantileTransformer(n_quantiles=3).fit(X)
X_trans = transformer.transform(X1)
assert_array_almost_equal(X_trans, X1)
# check that values outside of the range learned will be mapped properly.
X = np.random.random((1000, 1))
transformer = QuantileTransformer()
transformer.fit(X)
assert (transformer.transform([[-10]]) ==
transformer.transform([[np.min(X)]]))
assert (transformer.transform([[10]]) ==
transformer.transform([[np.max(X)]]))
assert (transformer.inverse_transform([[-10]]) ==
transformer.inverse_transform([[np.min(transformer.references_)]]))
assert (transformer.inverse_transform([[10]]) ==
transformer.inverse_transform([[np.max(transformer.references_)]]))
def test_quantile_transform_and_inverse():
X_1 = iris.data
X_2 = np.array([[0.], [BOUNDS_THRESHOLD / 10], [1.5], [2], [3], [3], [4]])
for X in [X_1, X_2]:
transformer = QuantileTransformer(n_quantiles=1000, random_state=0)
X_trans = transformer.fit_transform(X)
X_trans_inv = transformer.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv, decimal=9)
def test_quantile_transform_nan():
X = np.array([[np.nan, 0, 0, 1],
[np.nan, np.nan, 0, 0.5],
[np.nan, 1, 1, 0]])
transformer = QuantileTransformer(n_quantiles=10, random_state=42)
transformer.fit_transform(X)
# check that the quantile of the first column is all NaN
assert np.isnan(transformer.quantiles_[:, 0]).all()
# all other column should not contain NaN
assert not np.isnan(transformer.quantiles_[:, 1:]).any()
@pytest.mark.parametrize("array_type", ['array', 'sparse'])
def test_quantile_transformer_sorted_quantiles(array_type):
# Non-regression test for:
# https://github.com/scikit-learn/scikit-learn/issues/15733
# Taken from upstream bug report:
# https://github.com/numpy/numpy/issues/14685
X = np.array([0, 1, 1, 2, 2, 3, 3, 4, 5, 5, 1, 1, 9, 9, 9, 8, 8, 7] * 10)
X = 0.1 * X.reshape(-1, 1)
X = _convert_container(X, array_type)
n_quantiles = 100
qt = QuantileTransformer(n_quantiles=n_quantiles).fit(X)
# Check that the estimated quantile threasholds are monotically
# increasing:
quantiles = qt.quantiles_[:, 0]
assert len(quantiles) == 100
assert all(np.diff(quantiles) >= 0)
def test_robust_scaler_invalid_range():
for range_ in [
(-1, 90),
(-2, -3),
(10, 101),
(100.5, 101),
(90, 50),
]:
scaler = RobustScaler(quantile_range=range_)
with pytest.raises(ValueError, match=r'Invalid quantile range: \('):
scaler.fit(iris.data)
def test_scale_function_without_centering():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_scaled = scale(X, with_mean=False)
assert not np.any(np.isnan(X_scaled))
X_csr_scaled = scale(X_csr, with_mean=False)
assert not np.any(np.isnan(X_csr_scaled.data))
# test csc has same outcome
X_csc_scaled = scale(X_csr.tocsc(), with_mean=False)
assert_array_almost_equal(X_scaled, X_csc_scaled.toarray())
# raises value error on axis != 0
with pytest.raises(ValueError):
scale(X_csr, with_mean=False, axis=1)
assert_array_almost_equal(X_scaled.mean(axis=0),
[0., -0.01, 2.24, -0.35, -0.78], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert X_scaled is not X
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(X_csr_scaled, 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# null scale
X_csr_scaled = scale(X_csr, with_mean=False, with_std=False, copy=True)
assert_array_almost_equal(X_csr.toarray(), X_csr_scaled.toarray())
def test_robust_scale_axis1():
X = iris.data
X_trans = robust_scale(X, axis=1)
assert_array_almost_equal(np.median(X_trans, axis=1), 0)
q = np.percentile(X_trans, q=(25, 75), axis=1)
iqr = q[1] - q[0]
assert_array_almost_equal(iqr, 1)
def test_robust_scale_1d_array():
X = iris.data[:, 1]
X_trans = robust_scale(X)
assert_array_almost_equal(np.median(X_trans), 0)
q = np.percentile(X_trans, q=(25, 75))
iqr = q[1] - q[0]
assert_array_almost_equal(iqr, 1)
def test_robust_scaler_zero_variance_features():
# Check RobustScaler on toy data with zero variance features
X = [[0., 1., +0.5],
[0., 1., -0.1],
[0., 1., +1.1]]
scaler = RobustScaler()
X_trans = scaler.fit_transform(X)
# NOTE: for such a small sample size, what we expect in the third column
# depends HEAVILY on the method used to calculate quantiles. The values
# here were calculated to fit the quantiles produces by np.percentile
# using numpy 1.9 Calculating quantiles with
# scipy.stats.mstats.scoreatquantile or scipy.stats.mstats.mquantiles
# would yield very different results!
X_expected = [[0., 0., +0.0],
[0., 0., -1.0],
[0., 0., +1.0]]
assert_array_almost_equal(X_trans, X_expected)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# make sure new data gets transformed correctly
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
X_trans_new = scaler.transform(X_new)
X_expected_new = [[+0., 1., +0.],
[-1., 0., -0.83333],
[+0., 0., +1.66667]]
assert_array_almost_equal(X_trans_new, X_expected_new, decimal=3)
def test_robust_scaler_unit_variance():
# Check RobustScaler with unit_variance=True on standard normal data with
# outliers
rng = np.random.RandomState(42)
X = rng.randn(1000000, 1)
X_with_outliers = np.vstack(
[X, np.ones((100, 1)) * 100, np.ones((100, 1)) * -100]
)
quantile_range = (1, 99)
robust_scaler = RobustScaler(
quantile_range=quantile_range, unit_variance=True
).fit(X_with_outliers)
X_trans = robust_scaler.transform(X)
assert robust_scaler.center_ == pytest.approx(0, abs=1e-3)
assert robust_scaler.scale_ == pytest.approx(1, abs=1e-2)
assert X_trans.std() == pytest.approx(1, abs=1e-2)
def test_maxabs_scaler_zero_variance_features():
# Check MaxAbsScaler on toy data with zero variance features
X = [[0., 1., +0.5],
[0., 1., -0.3],
[0., 1., +1.5],
[0., 0., +0.0]]
scaler = MaxAbsScaler()
X_trans = scaler.fit_transform(X)
X_expected = [[0., 1., 1.0 / 3.0],
[0., 1., -0.2],
[0., 1., 1.0],
[0., 0., 0.0]]
assert_array_almost_equal(X_trans, X_expected)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# make sure new data gets transformed correctly
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
X_trans_new = scaler.transform(X_new)
X_expected_new = [[+0., 2.0, 1.0 / 3.0],
[-1., 1.0, 0.0],
[+0., 1.0, 1.0]]
assert_array_almost_equal(X_trans_new, X_expected_new, decimal=2)
# function interface
X_trans = maxabs_scale(X)
assert_array_almost_equal(X_trans, X_expected)
# sparse data
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
X_trans_csr = scaler.fit_transform(X_csr)
X_trans_csc = scaler.fit_transform(X_csc)
X_expected = [[0., 1., 1.0 / 3.0],
[0., 1., -0.2],
[0., 1., 1.0],
[0., 0., 0.0]]
assert_array_almost_equal(X_trans_csr.A, X_expected)
assert_array_almost_equal(X_trans_csc.A, X_expected)
X_trans_csr_inv = scaler.inverse_transform(X_trans_csr)
X_trans_csc_inv = scaler.inverse_transform(X_trans_csc)
assert_array_almost_equal(X, X_trans_csr_inv.A)
assert_array_almost_equal(X, X_trans_csc_inv.A)
def test_maxabs_scaler_large_negative_value():
# Check MaxAbsScaler on toy data with a large negative value
X = [[0., 1., +0.5, -1.0],
[0., 1., -0.3, -0.5],
[0., 1., -100.0, 0.0],
[0., 0., +0.0, -2.0]]
scaler = MaxAbsScaler()
X_trans = scaler.fit_transform(X)
X_expected = [[0., 1., 0.005, -0.5],
[0., 1., -0.003, -0.25],
[0., 1., -1.0, 0.0],
[0., 0., 0.0, -1.0]]
assert_array_almost_equal(X_trans, X_expected)
def test_maxabs_scaler_transform_one_row_csr():
# Check MaxAbsScaler on transforming csr matrix with one row
X = sparse.csr_matrix([[0.5, 1., 1.]])
scaler = MaxAbsScaler()
scaler = scaler.fit(X)
X_trans = scaler.transform(X)
X_expected = sparse.csr_matrix([[1., 1., 1.]])
assert_array_almost_equal(X_trans.toarray(), X_expected.toarray())
X_scaled_back = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X.toarray(), X_scaled_back.toarray())
def test_maxabs_scaler_1d():
# Test scaling of dataset along single axis
for X in [X_1row, X_1col, X_list_1row, X_list_1row]:
scaler = MaxAbsScaler(copy=True)
X_scaled = scaler.fit(X).transform(X)
if isinstance(X, list):
X = np.array(X) # cast only after scaling done
if _check_dim_1axis(X) == 1:
assert_array_almost_equal(np.abs(X_scaled.max(axis=0)),
np.ones(n_features))
else:
assert_array_almost_equal(np.abs(X_scaled.max(axis=0)), 1.)
assert scaler.n_samples_seen_ == X.shape[0]
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X)
# Constant feature
X = np.ones((5, 1))
scaler = MaxAbsScaler()
X_scaled = scaler.fit(X).transform(X)
assert_array_almost_equal(np.abs(X_scaled.max(axis=0)), 1.)
assert scaler.n_samples_seen_ == X.shape[0]
# function interface
X_1d = X_1row.ravel()
max_abs = np.abs(X_1d).max()
assert_array_almost_equal(X_1d / max_abs, maxabs_scale(X_1d, copy=True))
def test_maxabs_scaler_partial_fit():
# Test if partial_fit run over many batches of size 1 and 50
# gives the same results as fit
X = X_2d[:100, :]
n = X.shape[0]
for chunk_size in [1, 2, 50, n, n + 42]:
# Test mean at the end of the process
scaler_batch = MaxAbsScaler().fit(X)
scaler_incr = MaxAbsScaler()
scaler_incr_csr = MaxAbsScaler()
scaler_incr_csc = MaxAbsScaler()
for batch in gen_batches(n, chunk_size):
scaler_incr = scaler_incr.partial_fit(X[batch])
X_csr = sparse.csr_matrix(X[batch])
scaler_incr_csr = scaler_incr_csr.partial_fit(X_csr)
X_csc = sparse.csc_matrix(X[batch])
scaler_incr_csc = scaler_incr_csc.partial_fit(X_csc)
assert_array_almost_equal(scaler_batch.max_abs_, scaler_incr.max_abs_)
assert_array_almost_equal(scaler_batch.max_abs_,
scaler_incr_csr.max_abs_)
assert_array_almost_equal(scaler_batch.max_abs_,
scaler_incr_csc.max_abs_)
assert scaler_batch.n_samples_seen_ == scaler_incr.n_samples_seen_
assert (scaler_batch.n_samples_seen_ ==
scaler_incr_csr.n_samples_seen_)
assert (scaler_batch.n_samples_seen_ ==
scaler_incr_csc.n_samples_seen_)
assert_array_almost_equal(scaler_batch.scale_, scaler_incr.scale_)
assert_array_almost_equal(scaler_batch.scale_, scaler_incr_csr.scale_)
assert_array_almost_equal(scaler_batch.scale_, scaler_incr_csc.scale_)
assert_array_almost_equal(scaler_batch.transform(X),
scaler_incr.transform(X))
# Test std after 1 step
batch0 = slice(0, chunk_size)
scaler_batch = MaxAbsScaler().fit(X[batch0])
scaler_incr = MaxAbsScaler().partial_fit(X[batch0])
assert_array_almost_equal(scaler_batch.max_abs_, scaler_incr.max_abs_)
assert scaler_batch.n_samples_seen_ == scaler_incr.n_samples_seen_
assert_array_almost_equal(scaler_batch.scale_, scaler_incr.scale_)
assert_array_almost_equal(scaler_batch.transform(X),
scaler_incr.transform(X))
# Test std until the end of partial fits, and
scaler_batch = MaxAbsScaler().fit(X)
scaler_incr = MaxAbsScaler() # Clean estimator
for i, batch in enumerate(gen_batches(n, chunk_size)):
scaler_incr = scaler_incr.partial_fit(X[batch])
assert_correct_incr(i, batch_start=batch.start,
batch_stop=batch.stop, n=n,
chunk_size=chunk_size,
n_samples_seen=scaler_incr.n_samples_seen_)
def test_normalizer_l1():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='l1', copy=True)
X_norm = normalizer.transform(X)
assert X_norm is not X
X_norm1 = toarray(X_norm)
normalizer = Normalizer(norm='l1', copy=False)
X_norm = normalizer.transform(X)
assert X_norm is X
X_norm2 = toarray(X_norm)
for X_norm in (X_norm1, X_norm2):
row_sums = np.abs(X_norm).sum(axis=1)
for i in range(3):
assert_almost_equal(row_sums[i], 1.0)
assert_almost_equal(row_sums[3], 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert X_norm is not X
assert isinstance(X_norm, sparse.csr_matrix)
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(row_sums[i], 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalizer_l2():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='l2', copy=True)
X_norm1 = normalizer.transform(X)
assert X_norm1 is not X
X_norm1 = toarray(X_norm1)
normalizer = Normalizer(norm='l2', copy=False)
X_norm2 = normalizer.transform(X)
assert X_norm2 is X
X_norm2 = toarray(X_norm2)
for X_norm in (X_norm1, X_norm2):
for i in range(3):
assert_almost_equal(la.norm(X_norm[i]), 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert X_norm is not X
assert isinstance(X_norm, sparse.csr_matrix)
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(la.norm(X_norm[i]), 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalizer_max():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='max', copy=True)
X_norm1 = normalizer.transform(X)
assert X_norm1 is not X
X_norm1 = toarray(X_norm1)
normalizer = Normalizer(norm='max', copy=False)
X_norm2 = normalizer.transform(X)
assert X_norm2 is X
X_norm2 = toarray(X_norm2)
for X_norm in (X_norm1, X_norm2):
row_maxs = abs(X_norm).max(axis=1)
for i in range(3):
assert_almost_equal(row_maxs[i], 1.0)
assert_almost_equal(row_maxs[3], 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert X_norm is not X
assert isinstance(X_norm, sparse.csr_matrix)
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(row_maxs[i], 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalizer_max_sign():
# check that we normalize by a positive number even for negative data
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# check for mixed data where the value with
# largest magnitude is negative
X_dense[2, abs(X_dense[2, :]).argmax()] *= -1
X_all_neg = -np.abs(X_dense)
X_all_neg_sparse = sparse.csr_matrix(X_all_neg)
for X in (X_dense, X_all_neg, X_all_neg_sparse):
normalizer = Normalizer(norm='max')
X_norm = normalizer.transform(X)
assert X_norm is not X
X_norm = toarray(X_norm)
assert_array_equal(
np.sign(X_norm), np.sign(toarray(X)))
def test_normalize():
# Test normalize function
# Only tests functionality not used by the tests for Normalizer.
X = np.random.RandomState(37).randn(3, 2)
assert_array_equal(normalize(X, copy=False),
normalize(X.T, axis=0, copy=False).T)
with pytest.raises(ValueError):
normalize([[0]], axis=2)
with pytest.raises(ValueError):
normalize([[0]], norm='l3')
rs = np.random.RandomState(0)
X_dense = rs.randn(10, 5)
X_sparse = sparse.csr_matrix(X_dense)
ones = np.ones((10))
for X in (X_dense, X_sparse):
for dtype in (np.float32, np.float64):
for norm in ('l1', 'l2'):
X = X.astype(dtype)
X_norm = normalize(X, norm=norm)
assert X_norm.dtype == dtype
X_norm = toarray(X_norm)
if norm == 'l1':
row_sums = np.abs(X_norm).sum(axis=1)
else:
X_norm_squared = X_norm**2
row_sums = X_norm_squared.sum(axis=1)
assert_array_almost_equal(row_sums, ones)
# Test return_norm
X_dense = np.array([[3.0, 0, 4.0], [1.0, 0.0, 0.0], [2.0, 3.0, 0.0]])
for norm in ('l1', 'l2', 'max'):
_, norms = normalize(X_dense, norm=norm, return_norm=True)
if norm == 'l1':
assert_array_almost_equal(norms, np.array([7.0, 1.0, 5.0]))
elif norm == 'l2':
assert_array_almost_equal(norms, np.array([5.0, 1.0, 3.60555127]))
else:
assert_array_almost_equal(norms, np.array([4.0, 1.0, 3.0]))
X_sparse = sparse.csr_matrix(X_dense)
for norm in ('l1', 'l2'):
with pytest.raises(NotImplementedError):
normalize(X_sparse, norm=norm, return_norm=True)
_, norms = normalize(X_sparse, norm='max', return_norm=True)
assert_array_almost_equal(norms, np.array([4.0, 1.0, 3.0]))
def test_binarizer():
X_ = np.array([[1, 0, 5], [2, 3, -1]])
for init in (np.array, list, sparse.csr_matrix, sparse.csc_matrix):
X = init(X_.copy())
binarizer = Binarizer(threshold=2.0, copy=True)
X_bin = toarray(binarizer.transform(X))
assert np.sum(X_bin == 0) == 4
assert np.sum(X_bin == 1) == 2
X_bin = binarizer.transform(X)
assert sparse.issparse(X) == sparse.issparse(X_bin)
binarizer = Binarizer(copy=True).fit(X)
X_bin = toarray(binarizer.transform(X))
assert X_bin is not X
assert np.sum(X_bin == 0) == 2
assert np.sum(X_bin == 1) == 4
binarizer = Binarizer(copy=True)
X_bin = binarizer.transform(X)
assert X_bin is not X
X_bin = toarray(X_bin)
assert np.sum(X_bin == 0) == 2
assert np.sum(X_bin == 1) == 4
binarizer = Binarizer(copy=False)
X_bin = binarizer.transform(X)
if init is not list:
assert X_bin is X
binarizer = Binarizer(copy=False)
X_float = np.array([[1, 0, 5], [2, 3, -1]], dtype=np.float64)
X_bin = binarizer.transform(X_float)
if init is not list:
assert X_bin is X_float
X_bin = toarray(X_bin)
assert np.sum(X_bin == 0) == 2
assert np.sum(X_bin == 1) == 4
binarizer = Binarizer(threshold=-0.5, copy=True)
for init in (np.array, list):
X = init(X_.copy())
X_bin = toarray(binarizer.transform(X))
assert np.sum(X_bin == 0) == 1
assert np.sum(X_bin == 1) == 5
X_bin = binarizer.transform(X)
# Cannot use threshold < 0 for sparse
with pytest.raises(ValueError):
binarizer.transform(sparse.csc_matrix(X))
def test_center_kernel():
# Test that KernelCenterer is equivalent to StandardScaler
# in feature space
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
scaler = StandardScaler(with_std=False)
scaler.fit(X_fit)
X_fit_centered = scaler.transform(X_fit)
K_fit = | np.dot(X_fit, X_fit.T) | numpy.dot |
"""Tests for module optim fro OT optimization """
# Author: <NAME> <<EMAIL>>
#
# License: MIT License
import numpy as np
import ot
def test_conditional_gradient():
n_bins = 100 # nb bins
np.random.seed(0)
# bin positions
x = np.arange(n_bins, dtype=np.float64)
# Gaussian distributions
a = ot.datasets.make_1D_gauss(n_bins, m=20, s=5) # m= mean, s= std
b = ot.datasets.make_1D_gauss(n_bins, m=60, s=10)
# loss matrix
M = ot.dist(x.reshape((n_bins, 1)), x.reshape((n_bins, 1)))
M /= M.max()
def f(G):
return 0.5 * np.sum(G**2)
def df(G):
return G
reg = 1e-1
G, log = ot.optim.cg(a, b, M, reg, f, df, verbose=True, log=True)
np.testing.assert_allclose(a, G.sum(1))
np.testing.assert_allclose(b, G.sum(0))
def test_conditional_gradient2():
n = 1000 # nb samples
mu_s = np.array([0, 0])
cov_s = | np.array([[1, 0], [0, 1]]) | numpy.array |
"""
code to evaluate snow models at catchment scale (i.e. Nevis or Clutha river)
options to call a series of models then compute summary statistics
reads in a computes statistics on MODIS data to evaluate against
requires that dsc_snow model has been pre run either using Fortran version or using run_snow_model
the Clark2009 model can be run on-the-fly or prerun
<NAME>
"""
from __future__ import division
import netCDF4 as nc
import datetime as dt
import numpy as np
import pickle
from nz_snow_tools.eval.catchment_evaluation import *
from nz_snow_tools.util.utils import convert_date_hydro_DOY, trim_lat_lon_bounds, setup_nztm_dem
def load_dsc_snow_output_annual(catchment, output_dem, hydro_year_to_take, dsc_snow_output_folder, dsc_snow_dem_folder, run_opt,origin='bottomleft'):
"""
load output from dsc_snow model previously run from linux VM
:param catchment: string giving catchment area to run model on
:param output_dem: string identifying the grid to run model on
:param hydro_year_to_take: integer specifying the hydrological year to run model over. 2001 = 1/4/2000 to 31/3/2001
:return: st_swe, st_melt, st_acc, out_dt. daily grids of SWE at day's end, total melt and accumulation over the previous day, and datetimes of ouput
"""
data_id = '{}_{}'.format(catchment, output_dem)
dsc_snow_output = nc.Dataset(dsc_snow_output_folder + '/{}_{}_{}.nc'.format(data_id, hydro_year_to_take, run_opt), 'r')
out_dt = nc.num2date(dsc_snow_output.variables['time'][:], dsc_snow_output.variables['time'].units)
st_swe = dsc_snow_output.variables['snow_water_equivalent'][:]
st_melt_total = dsc_snow_output.variables['ablation_total'][:]
st_acc_total = dsc_snow_output.variables['accumulation_total'][:]
if origin == 'topleft':
st_swe = np.flip(st_swe,axis=1)
st_melt_total = np.flip(st_melt_total, axis=1)
st_acc_total = np.flip(st_acc_total, axis=1)
# convert to daily sums
st_melt = np.concatenate((st_melt_total[:1, :], np.diff(st_melt_total, axis=0)))
st_acc = np.concatenate((st_melt_total[:1, :], np.diff(st_acc_total, axis=0)))
if origin == 'topleft':
topo_file = nc.Dataset(dsc_snow_dem_folder + '/{}_topo_no_ice_origintopleft.nc'.format(data_id), 'r')
mask = np.flipud(topo_file.variables['catchment'][:].astype('int'))
else:
topo_file = nc.Dataset(dsc_snow_dem_folder + '/{}_topo_no_ice.nc'.format(data_id), 'r')
mask = topo_file.variables['catchment'][:].astype('int')
mask = mask != 0 # convert to boolean
# mask out values outside of catchment
st_swe[:, mask == False] = np.nan
st_melt[:, mask == False] = np.nan
st_acc[:, mask == False] = np.nan
return st_swe * 1e3, st_melt * 1e3, st_acc * 1e3, out_dt, mask # convert to mm w.e.
def load_subset_modis_annual(catchment, output_dem, year_to_take, modis_folder, dem_folder, modis_dem, mask_folder, catchment_shp_folder):
"""
load modis data from file and cut to catchment of interest
:param catchment: string giving catchment area to run model on
:param output_dem: string identifying the grid to run model on
:param year_to_take: integer specifying the hydrological year to run model over. 2001 = 1/4/2000 to 31/3/2001
:return: trimmed_fsca, modis_dt, trimmed_mask. The data, datetimes and catchment mask
"""
# load a file
nc_file = nc.Dataset(modis_folder + '/DSC_MOD10A1_{}_v0_nosparse_interp001.nc'.format(year_to_take))
ndsi = nc_file.variables['NDSI_Snow_Cover_Cloudfree'][:] # .astype('float32') # nsdi in %
# trim to only the catchment desired
if mask_folder is not None:
mask, trimmed_mask = load_mask_modis(catchment, None, mask_folder, None, modis_dem)
else: # if no catchment specified, just mask to the valid data points.
mask = np.ones(ndsi.shape[1:])
trimmed_mask = mask
# trimmed_fsca = trim_data_bounds(mask, lat_array, lon_array, fsca[183].copy(), y_centres, x_centres)
trimmed_ndsi = trim_data_to_mask(ndsi, mask)
trimmed_ndsi = trimmed_ndsi.astype(np.float32, copy=False)
trimmed_fsca = -1 + 1.45 * trimmed_ndsi # convert to snow cover fraction in % (as per Modis collection 5)
trimmed_fsca[trimmed_ndsi > 100] = np.nan # set all points with inland water or ocean(237 or 239) to nan
trimmed_fsca[trimmed_fsca > 100] = 100 # limit fsca to 100%
trimmed_fsca[trimmed_fsca < 0] = 0 # limit fsca to 0
# read date and convert into hydrological year
modis_dt = nc.num2date(nc_file.variables['time'][:], nc_file.variables['time'].units)
# mask out values outside of catchment
trimmed_fsca[:, trimmed_mask == 0] = np.nan
return trimmed_fsca, modis_dt, trimmed_mask
if __name__ == '__main__':
origin = 'topleft'
which_model = 'dsc_snow' # string identifying the model to be run. options include 'clark2009' or 'dsc_snow'# future will include 'fsm'
clark2009run = True # boolean specifying if the run already exists
dsc_snow_opt = 'fortran' # string identifying which version of the dsc snow model to use output from 'python' or 'fortran'
dsc_snow_opt2 = 'netCDF' # string identifying which version of output from python model 'netCDF' of 'pickle'
catchment = 'Clutha' # string identifying catchment modelled
output_dem = 'nztm250m' # identifier for output dem
run_id = 'jobst_ucc_5_topleft' # string identifying fortran dsc_snow run. everything after the year
years_to_take = range(2000, 2016 + 1) # range(2016, 2016 + 1) # [2013 + 1] # range(2001, 2013 + 1)
modis_sc_threshold = 50 # value of fsca (in percent) that is counted as being snow covered
model_swe_sc_threshold = 5 # threshold for treating a grid cell as snow covered (mm w.e)
dsc_snow_output_folder = 'T:/DSC-Snow/runs/output/clutha_nztm250m_erebus'
clark2009_output_folder = 'T:/DSC-Snow/nz_snow_runs/baseline_clutha1'
mask_folder = 'T:/DSC-Snow/Masks'
catchment_shp_folder = 'Z:/GIS_DATA/Hydrology/Catchments'
modis_folder = 'T:/sync_to_data/MODIS_snow/NSDI_SI_cloudfilled'
dem_folder = 'Z:/GIS_DATA/Topography/DEM_NZSOS/'
modis_dem = 'modis_si_dem_250m'
met_inp_folder = 'T:/DSC-Snow/input_data_hourly'
dsc_snow_dem_folder = 'P:/Projects/DSC-Snow/runs/input_DEM'
output_folder = 'P:/Projects/DSC-Snow/runs/output/clutha_nztm250m_erebus'
# set up lists
ann_ts_av_sca_m = []
ann_ts_av_sca_thres_m = []
ann_hydro_days_m = []
ann_dt_m = []
ann_scd_m = []
ann_ts_av_sca = []
ann_ts_av_swe = []
# ann_ts_av_melt = []
# ann_ts_av_acc = []
ann_hydro_days = []
ann_dt = []
ann_scd = []
configs = []
for year_to_take in years_to_take:
print('loading modis data {}'.format(year_to_take))
# load modis data for evaluation
modis_fsca, modis_dt, modis_mask = load_subset_modis_annual(catchment, output_dem, year_to_take, modis_folder, dem_folder, modis_dem, mask_folder,
catchment_shp_folder)
modis_hydro_days = convert_date_hydro_DOY(modis_dt)
modis_sc = modis_fsca >= modis_sc_threshold
# print('calculating basin average sca')
# modis
num_modis_gridpoints = | np.sum(modis_mask) | numpy.sum |
import numpy as np
import matplotlib.pyplot as plt
from SimpleModelsModule import TestOptimisation
from plotting_code import make_onward_transmission_vector, make_population_tuple
import param_values as scenario
def sample_onward_transmission(onwards_dict):
close_contact = np.random.uniform(*onwards_dict['close_contact'])
symptomatic = np.random.uniform(*onwards_dict['symptomatic'])
asymptomatic = np.random.uniform(*onwards_dict['asymptomatic'])
return make_onward_transmission_vector(close_contact=close_contact,
symptomatic=symptomatic,
asymptomatic=asymptomatic)
def sample_prob_indication(prob_dict):
close_contact = np.random.uniform(*prob_dict['close_contact'])
symptomatic = np.random.uniform(*prob_dict['symptomatic'])
asymptomatic = np.random.uniform(*prob_dict['asymptomatic'])
return close_contact, symptomatic, asymptomatic
def sample_population(pop_dict, prob_indication):
close_contact = np.random.uniform(*pop_dict['close_contact'])
symp = np.random.uniform(*pop_dict['symptomatic'])
total_pop = pop_dict['total_population']
pop, exp_cases = make_population_tuple(num_close=close_contact,
num_symp=symp,
total_pop=total_pop,
presenting_proporition=1,
probability_by_indication=prob_indication)
return pop
def sample_capacitiy(cap_range):
rand_cap = np.random.uniform(*cap_range)
rounded_capacity = 10*int(rand_cap/10)
return rounded_capacity
def sample_present_prop(pres_range):
return np.random.uniform(*pres_range)
def run_analysis_save_plot(priority, onward_transmission, pop, pre_prob, cap, prop_symp, reps, scenario_name,
plot_title=None, test_order=None):
onward_transmission = {key: value * 2 if len(value) == 1 else value for key, value in onward_transmission.items()}
pop_new = {}
for key, value in pop.items():
if key == 'total_population':
pop_new[key] = value
else:
pop_new[key] = value*2 if len(value) == 1 else value
pop = pop_new
pre_prob = {key: value * 2 if len(value) == 1 else value for key, value in pre_prob.items()}
cap = cap*2 if len(cap)==1 else cap
prop_symp = prop_symp*2 if len(prop_symp)==1 else prop_symp
onward_transmission_store = []
for i in range(reps):
print(i)
current_onward = sample_onward_transmission(onward_transmission)
current_prob = sample_prob_indication(pre_prob)
current_pop = sample_population(pop, current_prob)
current_cap = sample_capacitiy(cap)
current_pres = sample_present_prop(prop_symp)
test_optim = TestOptimisation(priority_queue=priority, onward_transmission=current_onward,
population=current_pop,
pre_test_probability=current_prob,
routine_capacity=current_cap,
symptomatic_testing_proportion=current_pres,
test_prioritsation_by_indication=test_order)
max_test = 1500
max_test_prop = max_test/current_cap
test_array, onward_transmission_array, positivity = test_optim.generate_onward_transmission_with_tests(
max_tests_proportion=max_test_prop)
onward_transmission_array = 100 * onward_transmission_array / max(onward_transmission_array) #max onward transmission shouldn not depend on symp perc.
test_array = np.array(test_array) / 100
onward_transmission_store.append(list(onward_transmission_array))
# plt.plot(test_array, onward_transmission_array)
# plt.show()
onward_transmission_full_array = np.array(onward_transmission_store)
median = np.percentile(onward_transmission_full_array, 50, axis=0)
low_ci = np.percentile(onward_transmission_full_array, 5, axis=0)
up_ci = np.percentile(onward_transmission_full_array, 95, axis=0)
low_ci2 = np.percentile(onward_transmission_full_array, 15, axis=0)
up_ci2 = np.percentile(onward_transmission_full_array, 85, axis=0)
low_ci3 = np.percentile(onward_transmission_full_array, 25, axis=0)
up_ci3 = np.percentile(onward_transmission_full_array, 75, axis=0)
low_ci4 = np.percentile(onward_transmission_full_array, 35, axis=0)
up_ci4 = np.percentile(onward_transmission_full_array, 65, axis=0)
low_ci5 = np.percentile(onward_transmission_full_array, 45, axis=0)
up_ci5 = np.percentile(onward_transmission_full_array, 55, axis=0)
plt.plot(test_array, median, 'k')
plt.fill_between(test_array, low_ci, up_ci, alpha=.25, color='b')
plt.fill_between(test_array, low_ci2, up_ci2, alpha=.25, color='b')
plt.fill_between(test_array, low_ci3, up_ci3, alpha=.25, color='b')
plt.fill_between(test_array, low_ci4, up_ci4, alpha=.25, color='b')
plt.fill_between(test_array, low_ci5, up_ci5, alpha=.25, color='b')
# plt.plot(test_array, low_ci)
# plt.plot(test_array, up_ci)
plt.xlabel('Tests per 1000')
plt.ylim((65, 100))
plt.ylabel('Percentage of onwards transmission')
if plot_title:
plt.title(plot_title)
plt.savefig(f'MS_figures/Uncertainty/{scenario_name}.png')
plt.close()
print(f'{scenario_name} -- Done!')
#
#
#
# priority_queue = True
# onward_transmission_range = {'close_contact': [0.25, 1],
# 'symptomatic': [1, 1.5],
# 'asymptomatic': [.5, 1.5]}
# pop_distribution_range = {'close_contact': [10, 100],
# 'symptomatic': [400, 800],
# 'total_population': 100000}
# pre_prob_range = {'close_contact': [0.01, 0.05],
# 'symptomatic': [0.001, 0.01],
# 'asymptomatic': [0.00001, 0.0001]}
# prop_symp_range = [.5, .95]
# cap_range = [400, 400]
#
# run_analysis_save_plot(priority=priority_queue,
# onward_transmission=onward_transmission_range,
# pop=pop_distribution_range,
# pre_prob=pre_prob_range,
# cap=cap_range,
# prop_symp=prop_symp_range,
# reps=100,
# scenario_name='uncertainty_test')
#
cc_on, symp_on, asymp_on = scenario.onward_transmission_high
cc_prob, symp_prob, asymp_prob = scenario.test_prob_high
cc_pop, symp_pop = scenario.pop_high
total_pop = scenario.total_population
cc_on_outbreak, symp_on_outbreak, asymp_on_outbreak = scenario.onward_transmission_low
cc_prob_outbreak, symp_prob_outbreak, asymp_prob_outbreak = scenario.test_prob_low
cc_pop_outbreak, symp_pop_outbreak = scenario.pop_low
total_pop_outbreak = scenario.total_population
# # NO VARIATION TEMPLATE START
# priority_queue = True
# onward_transmission_range = {'close_contact': [cc_on],
# 'symptomatic': [symp_on],
# 'asymptomatic': [asymp_on]}
# pop_distribution_range = {'close_contact': [cc_pop],
# 'symptomatic': [symp_pop],
# 'total_population': total_pop}
# pre_prob_range = {'close_contact': [cc_prob],
# 'symptomatic': [symp_prob],
# 'asymptomatic': [asymp_prob]}
#
# prop_symp_range = [.5]
# cap_range = [scenarios.test_capacity_high]
#
# run_analysis_save_plot(priority=priority_queue,
# onward_transmission=onward_transmission_range,
# pop=pop_distribution_range,
# pre_prob=pre_prob_range,
# cap=cap_range,
# prop_symp=prop_symp_range,
# reps=100,
# scenario_name='presenting_prop_range_25_75',
# plot_title=None)
# # NO VARIATION TEMPLATE END
run_symp_presentation_range = True
run_pre_test_prob_range = True
run_pop_distribution_range = True
run_onward_transmission_range = True
run_test_number_uncertainty = True
if run_symp_presentation_range:
priority_queue = True
onward_transmission_range = {'close_contact': [cc_on],
'symptomatic': [symp_on],
'asymptomatic': [asymp_on]}
pop_distribution_range = {'close_contact': [cc_pop],
'symptomatic': [symp_pop],
'total_population': total_pop}
pre_prob_range = {'close_contact': [cc_prob],
'symptomatic': [symp_prob],
'asymptomatic': [asymp_prob]}
prop_symp_range = [.25, .75]
cap_range = [scenario.test_capacity_high]
run_analysis_save_plot(priority=priority_queue,
onward_transmission=onward_transmission_range,
pop=pop_distribution_range,
pre_prob=pre_prob_range,
cap=cap_range,
prop_symp=prop_symp_range,
reps=100,
scenario_name='presenting_prop_range_25_75_community_transmission_capacity_high',
plot_title='Presenting proportion between 25% and 75%',
test_order=scenario.priority_order[0])
cap_range = [scenario.test_capacity_low]
run_analysis_save_plot(priority=priority_queue,
onward_transmission=onward_transmission_range,
pop=pop_distribution_range,
pre_prob=pre_prob_range,
cap=cap_range,
prop_symp=prop_symp_range,
reps=100,
scenario_name='presenting_prop_range_25_75_community_transmission_capacity_low',
plot_title='Presenting proportion between 25% and 75%',
test_order=scenario.priority_order[0])
priority_queue = True
onward_transmission_range = {'close_contact': [cc_on_outbreak],
'symptomatic': [symp_on_outbreak],
'asymptomatic': [asymp_on_outbreak]}
pop_distribution_range = {'close_contact': [cc_pop_outbreak],
'symptomatic': [symp_pop_outbreak],
'total_population': total_pop_outbreak}
pre_prob_range = {'close_contact': [cc_prob_outbreak],
'symptomatic': [symp_prob_outbreak],
'asymptomatic': [asymp_prob_outbreak]}
prop_symp_range = [.25, .75]
cap_range = [scenario.test_capacity_high]
run_analysis_save_plot(priority=priority_queue,
onward_transmission=onward_transmission_range,
pop=pop_distribution_range,
pre_prob=pre_prob_range,
cap=cap_range,
prop_symp=prop_symp_range,
reps=100,
scenario_name='presenting_prop_range_25_75_outbreak_response_capacity_high',
plot_title='Presenting proportion between 25% and 75%',
test_order=scenario.priority_order[0])
cap_range = [scenario.test_capacity_low]
run_analysis_save_plot(priority=priority_queue,
onward_transmission=onward_transmission_range,
pop=pop_distribution_range,
pre_prob=pre_prob_range,
cap=cap_range,
prop_symp=prop_symp_range,
reps=100,
scenario_name='presenting_prop_range_25_75_outbreak_response_capacity_low',
plot_title='Presenting proportion between 25% and 75%',
test_order=scenario.priority_order[0])
if run_onward_transmission_range:
priority_queue = True
onward_transmission_range = {'close_contact': [cc_on*.8, cc_on*1.2],
'symptomatic': [symp_on*.8, symp_on*1.2],
'asymptomatic': [asymp_on*.8, asymp_on*1.2]}
pop_distribution_range = {'close_contact': [cc_pop],
'symptomatic': [symp_pop],
'total_population': total_pop}
pre_prob_range = {'close_contact': [cc_prob],
'symptomatic': [symp_prob],
'asymptomatic': [asymp_prob]}
prop_symp_range = [.5]
cap_range = [scenario.test_capacity_high]
run_analysis_save_plot(priority=priority_queue,
onward_transmission=onward_transmission_range,
pop=pop_distribution_range,
pre_prob=pre_prob_range,
cap=cap_range,
prop_symp=prop_symp_range,
reps=100,
scenario_name='onward_transmission_uncertainty',
plot_title='Onward transmission +/- 20%')
if run_pop_distribution_range:
priority_queue = True
onward_transmission_range = {'close_contact': [cc_on],
'symptomatic': [symp_on],
'asymptomatic': [asymp_on]}
pop_distribution_range = {'close_contact': [cc_pop*.8, cc_pop*1.2],
'symptomatic': [symp_pop*.8, symp_pop*1.2],
'total_population': total_pop}
pre_prob_range = {'close_contact': [cc_prob],
'symptomatic': [symp_prob],
'asymptomatic': [asymp_prob]}
prop_symp_range = [.5]
cap_range = [scenario.test_capacity_high]
run_analysis_save_plot(priority=priority_queue,
onward_transmission=onward_transmission_range,
pop=pop_distribution_range,
pre_prob=pre_prob_range,
cap=cap_range,
prop_symp=prop_symp_range,
reps=100,
scenario_name='pop_distribution_uncertainty',
plot_title='Close contact and symptomatic population +/- 20%')
if run_pre_test_prob_range:
priority_queue = True
onward_transmission_range = {'close_contact': [cc_on],
'symptomatic': [symp_on],
'asymptomatic': [asymp_on]}
pop_distribution_range = {'close_contact': [cc_pop],
'symptomatic': [symp_pop],
'total_population': total_pop}
pre_prob_range = {'close_contact': [cc_prob*.8, cc_prob*1.2],
'symptomatic': [symp_prob*.8, symp_prob*1.2],
'asymptomatic': [asymp_prob*.8, asymp_prob*1.2]}
prop_symp_range = [.5]
cap_range = [scenario.test_capacity_high]
run_analysis_save_plot(priority=priority_queue,
onward_transmission=onward_transmission_range,
pop=pop_distribution_range,
pre_prob=pre_prob_range,
cap=cap_range,
prop_symp=prop_symp_range,
reps=100,
scenario_name='test_prob_uncertainty',
plot_title='Pre-test probability of positive +/- 20%')
if run_test_number_uncertainty:
priority = True
total_population = scenario.total_population
# High prevelance
onward_transmission_vector_high = \
make_onward_transmission_vector(*scenario.onward_transmission_high)
test_prob_high = scenario.test_prob_high
population_high, cases_high = \
make_population_tuple(num_close=scenario.pop_high[0],
num_symp=scenario.pop_high[1],
total_pop=total_population,
presenting_proporition=1,
probability_by_indication=test_prob_high)
test_optim = TestOptimisation(priority_queue=priority,
onward_transmission=onward_transmission_vector_high,
population=population_high,
pre_test_probability=test_prob_high,
routine_capacity=400,
symptomatic_testing_proportion=1,
test_prioritsation_by_indication=None)
value_gap = .01
uncertainty_range_values = [0] + \
list(np.arange(0,.5,value_gap) +
value_gap)
num_tests_list = []
transmission_list = []
max_num_tests_for_plot = np.inf
for uncertainty_value in uncertainty_range_values:
print(f"Running uncertainy value {uncertainty_value}")
if uncertainty_value == 0:
num_test_array, transmission, positivity = test_optim.generate_onward_transmission_with_tests()
num_tests_list.append(np.array(num_test_array))
transmission_list.append( | np.array(transmission) | numpy.array |
"""
Definition of pipeline processor nodes
Exposed classes
---------------
Preprocessing: ProcessorNode
Downsample and drop bad channels based on observed amplitude jumps
MNE: _InverseSolverNode
Minimum norm source estimation + dSPM and sLORETA
LinearFilter: ProcessorNode
Linear filtering
EnvelopeExtractor: ProcessorNode
Envelope exctraction
Beamformer: _InverseSolverNode
LCMV beamformer source estimation
MCE: _InverseSolverNode
Minimum current source estimation
ICARejection: ProcessorNode
Artefacts rejection via ICA decomposition
AtlasViewer: ProcessorNode
Select source-level signals in regions of interest based on atlas
AmplitudeEnvelopeCorrelation: ProcessorNodeons
Connectivity estimation via amplitude envelopes correlation
Coherence: ProcessorNode
Connectivity estimation via coherence
MneGcs: ProcessorNode
Inverse solver for connecivity estimation via Geometric Correction Scheme
"""
import os
import pickle
import time
from datetime import datetime
import scipy as sc
from copy import deepcopy
import math
from vendor.nfb.pynfb.protocols.ssd.topomap_selector_ica import ICADialog
import numpy as np
import mne
from numpy.linalg import svd
from scipy.optimize import linprog
from sklearn.preprocessing import normalize
from mne.preprocessing import find_outliers
from mne.minimum_norm import apply_inverse_raw # , make_inverse_operator
from mne.minimum_norm import make_inverse_operator as mne_make_inverse_operator
from mne.minimum_norm import prepare_inverse_operator
from mne.beamformer import apply_lcmv_raw
from ..utils.make_lcmv import make_lcmv
from .node import ProcessorNode
from ..utils.matrix_functions import (
make_time_dimension_second,
put_time_dimension_back_from_second,
)
from ..utils.inverse_model import (
get_clean_forward,
make_inverse_operator,
get_mesh_data_from_forward_solution,
matrix_from_inverse_operator,
)
from ..utils.pipeline_signals import Communicate
from ..utils.pynfb import (
pynfb_ndarray_function_wrapper,
ExponentialMatrixSmoother,
)
from ..utils.channels import channel_labels_saver
from ..utils.aux_tools import nostdout
from .. import TIME_AXIS
from vendor.nfb.pynfb.signal_processing import filters
__all__ = (
"Preprocessing",
"MNE",
"LinearFilter",
"EnvelopeExtractor",
"Beamformer",
"MCE",
"ICARejection",
"AtlasViewer",
"AmplitudeEnvelopeCorrelations",
"Coherence",
"SeedCoherence",
"MneGcs",
)
class Preprocessing(ProcessorNode):
CHANGES_IN_THESE_REQUIRE_RESET = (
"collect_for_x_seconds",
"dsamp_factor",
"bad_channels",
)
UPSTREAM_CHANGES_IN_THESE_REQUIRE_REINITIALIZATION = ("mne_info",)
SAVERS_FOR_UPSTREAM_MUTABLE_OBJECTS = {"mne_info": channel_labels_saver}
ALLOWED_CHILDREN = (
"ICARejection",
"SignalViewer",
"MCE",
"MNE",
"Beamformer",
"EnvelopeExtractor",
"LinearFilter",
"LSLStreamOutput",
"Coherence",
"FileOutput",
)
def __init__(
self, collect_for_x_seconds=60, dsamp_factor=1, bad_channels=[]
):
ProcessorNode.__init__(self)
self.collect_for_x_seconds = collect_for_x_seconds # type: int
self._samples_collected = None # type: int
self._enough_collected = None # type: bool
self._means = None # type: np.ndarray
self._mean_sums_of_squares = None # type: np.ndarray
self._bad_channel_indices = None # type: list[int]
self._interpolation_matrix = None # type: np.ndarray
self.dsamp_factor = dsamp_factor
self.viz_type = "sensor time series"
self.is_collecting_samples = False
self.bad_channels = bad_channels
self._reset_statistics()
def _initialize(self):
self._upstream_mne_info = self.traverse_back_and_find("mne_info")
self.mne_info = deepcopy(self._upstream_mne_info)
self.mne_info["bads"] += self.bad_channels
self._signal_sender.initialized.emit()
if self.dsamp_factor and self.dsamp_factor > 1:
filt_freq = self.mne_info["sfreq"] / self.dsamp_factor / 2
if self.mne_info["lowpass"] > filt_freq:
self.mne_info["lowpass"] = filt_freq
self._antialias_filter = filters.ButterFilter(
band=(None, filt_freq),
fs=self.mne_info["sfreq"],
n_channels=self.mne_info["nchan"],
)
self._antialias_filter.apply = pynfb_ndarray_function_wrapper(
self._antialias_filter.apply
)
self._left_n_pad = 0 # initial skip to keep decimation right
self.mne_info["sfreq"] /= self.dsamp_factor
def _update(self):
# Have we collected enough samples without the new input?
if self.is_collecting_samples:
enough_collected = (
self._samples_collected >= self._samples_to_be_collected
)
if not enough_collected:
if (
self.parent.output is not None
and self.parent.output.shape[TIME_AXIS] > 0
):
self._update_statistics()
elif not self._enough_collected: # We just got enough samples
self._enough_collected = True
standard_deviations = self._calculate_standard_deviations()
self._bad_channel_indices = find_outliers(standard_deviations)
if any(self._bad_channel_indices):
self.mne_info["bads"] = self._upstream_mne_info["bads"] + [
self.mne_info["ch_names"][i]
for i in self._bad_channel_indices
]
self.bad_channels = [
self.mne_info["ch_names"][i]
for i in self._bad_channel_indices
]
self._reset_statistics()
self._signal_sender.enough_collected.emit()
if self.dsamp_factor and self.dsamp_factor > 1:
in_data = self.parent.output
in_antialiased = self._antialias_filter.apply(in_data)
self.output = in_antialiased[
:, self._left_n_pad :: self.dsamp_factor
]
timestamps = self.traverse_back_and_find("timestamps")
self.timestamps = timestamps[self._left_n_pad :: self.dsamp_factor]
n_samp = in_data.shape[1]
self._left_n_pad = (n_samp - self._left_n_pad) % self.dsamp_factor
if self.output.size == 0:
# Empty output disables processing for children which
# decreases update time, so the next chunk will be small
# again and downsampled output will be zero again.
# Wait for at leas dsamp_factor samples to avoid this
wait_time = (
self.dsamp_factor / self._upstream_mne_info["sfreq"]
)
time.sleep(wait_time)
else:
self.output = self.parent.output
def reset_bads(self):
self.mne_info["bads"] = self._upstream_mne_info["bads"]
self._bad_channel_indices = []
self.bad_channels = []
@property
def _samples_to_be_collected(self):
frequency = self._upstream_mne_info["sfreq"]
return int(math.ceil(self.collect_for_x_seconds * frequency))
def _on_critical_attr_change(self, key, old_val, new_val) -> bool:
if key == "collect_for_x_seconds":
self._reset_statistics()
output_history_is_no_longer_valid = False
elif key == "dsamp_factor":
self._initialize()
output_history_is_no_longer_valid = True
elif key == "bad_channels":
output_history_is_no_longer_valid = True
return output_history_is_no_longer_valid
def _reset_statistics(self):
self.is_collecting_samples = False
self._samples_collected = 0
self._enough_collected = False
self._means = 0
self._mean_sums_of_squares = 0
self._bad_channel_indices = []
def _update_statistics(self):
input_array = self.parent.output.astype(np.dtype("float64"))
# Using float64 is necessary because otherwise rounding error
# in recursive formula accumulate
n = self._samples_collected
m = input_array.shape[TIME_AXIS] # number of new samples
self._samples_collected += m
self._means = (
self._means * n + np.sum(input_array, axis=TIME_AXIS)
) / (n + m)
self._mean_sums_of_squares = (
self._mean_sums_of_squares * n
+ np.sum(input_array ** 2, axis=TIME_AXIS)
) / (n + m)
def _calculate_standard_deviations(self):
n = self._samples_collected
return np.sqrt(
n / (n - 1) * (self._mean_sums_of_squares - self._means ** 2)
)
def _on_input_history_invalidation(self):
self._reset_statistics()
def _check_value(self, key, value):
pass
class _InverseSolverNode(ProcessorNode):
ALLOWED_CHILDREN = (
"EnvelopeExtractor",
"SignalViewer",
"BrainViewer",
"AtlasViewer",
"LSLStreamOutput",
"FileOutput",
"SeedCoherence",
)
def __init__(self, fwd_path=None, subject=None, subjects_dir=None):
ProcessorNode.__init__(self)
self.fwd_path = fwd_path
self.subjects_dir = subjects_dir
self.subject = subject
def _get_forward_subject_and_subjects_dir(self):
if not (self.fwd_path and self.subject and self.subjects_dir):
self._signal_sender.open_fwd_dialog.emit()
def _set_channel_locations_in_root_data_info(self):
# bads should be set up and should include channels missing from fwd
data_info = deepcopy(self._upstream_mne_info)
fwd_info = self._fwd["info"]
DATA_CHNAMES = [c.upper() for c in data_info["ch_names"]]
DATA_BADS = [c.upper() for c in data_info["bads"]]
FWD_CHNAMES = [c.upper() for c in fwd_info["ch_names"]]
for i, c in enumerate(DATA_CHNAMES):
if c not in DATA_BADS:
try:
i_fwd_ch = FWD_CHNAMES.index(c)
data_info["chs"][i]["loc"] = fwd_info["chs"][i_fwd_ch][
"loc"
]
except Exception as exc:
self._logger.exception(exc)
self.root.montage_info = data_info
def _initialize(self):
mne_info = deepcopy(self.traverse_back_and_find("mne_info"))
self._upstream_mne_info = mne_info
self._get_forward_subject_and_subjects_dir()
# -------------- setup forward -------------- #
try:
self._fwd, self._missing_ch_names = get_clean_forward(
self.fwd_path, mne_info
)
except ValueError:
self.fwd_path = None
self.subject = None
self.subjects_dir = None
self._get_forward_subject_and_subjects_dir()
self._fwd, self._missing_ch_names = get_clean_forward(
self.fwd_path, mne_info
)
self._upstream_mne_info["bads"] = list(
set(self._upstream_mne_info["bads"] + self._missing_ch_names)
)
self._bad_channels = self._upstream_mne_info["bads"]
self._set_channel_locations_in_root_data_info()
class MNE(_InverseSolverNode):
SUPPORTED_METHODS = ["MNE", "dSPM", "sLORETA"]
UPSTREAM_CHANGES_IN_THESE_REQUIRE_REINITIALIZATION = ("mne_info",)
CHANGES_IN_THESE_REQUIRE_RESET = (
"fwd_path",
"snr",
"method",
"subjects_dir",
"subject",
)
SAVERS_FOR_UPSTREAM_MUTABLE_OBJECTS = {"mne_info": channel_labels_saver}
def __init__(
self,
fwd_path=None,
snr=1.0,
method="MNE",
depth=None,
loose=1,
fixed=False,
subjects_dir=None,
subject=None,
):
_InverseSolverNode.__init__(
self, subjects_dir=subjects_dir, subject=subject, fwd_path=fwd_path
)
self.snr = snr
self._default_forward_model_file_path = None
self._upstream_mne_info = None
self.mne_info = None
self._fwd = None
# self._inverse_model_matrix = None
self.method = method
self.loose = loose
self.depth = depth
self.fixed = fixed
self.viz_type = "source time series"
def _initialize(self):
_InverseSolverNode._initialize(self)
self.inverse_operator = make_inverse_operator(
self._fwd,
self._upstream_mne_info,
depth=self.depth,
loose=self.loose,
fixed=self.fixed,
)
self._lambda2 = 1.0 / self.snr ** 2
self.inverse_operator = prepare_inverse_operator(
self.inverse_operator,
nave=100,
lambda2=self._lambda2,
method=self.method,
)
self._inverse_model_matrix = matrix_from_inverse_operator(
inverse_operator=self.inverse_operator,
mne_info=self._upstream_mne_info,
snr=self.snr,
method=self.method,
)
frequency = self._upstream_mne_info["sfreq"]
# channel_count = self._inverse_model_matrix.shape[0]
channel_count = self._fwd["nsource"]
channel_labels = [
"vertex #{}".format(i + 1) for i in range(channel_count)
]
self.mne_info = mne.create_info(channel_labels, frequency)
def _update(self):
mne_info = self._upstream_mne_info
bads = mne_info["bads"]
if bads != self._bad_channels:
self._logger.info(
"Found new bad channels {};".format(bads)
+ "updating inverse operator"
)
self.inverse_operator = make_inverse_operator(
self._fwd,
mne_info,
depth=self.depth,
loose=self.loose,
fixed=self.fixed,
)
self.inverse_operator = prepare_inverse_operator(
self.inverse_operator,
nave=100,
lambda2=self._lambda2,
method=self.method,
)
self._bad_channels = bads
input_array = self.parent.output
raw_array = mne.io.RawArray(input_array, mne_info, verbose="ERROR")
raw_array.pick_types(eeg=True, meg=False, stim=False, exclude="bads")
data = raw_array.get_data()
self.output = self._apply_inverse_model_matrix(data)
# stc = apply_inverse_raw(
# raw_array,
# self.inverse_operator,
# lambda2=self._lambda2,
# method=self.method,
# prepared=True,
# )
# self.output = stc.data
def _on_input_history_invalidation(self):
# The methods implemented in this node do not rely on past inputs
pass
def _check_value(self, key, value):
if key == "method":
if value not in self.SUPPORTED_METHODS:
raise ValueError(
"Method {} is not supported.".format(value)
+ " Use one of: {}".format(self.SUPPORTED_METHODS)
)
if key == "snr":
if value <= 0:
raise ValueError(
"snr (signal-to-noise ratio) must be a positive number."
)
def _on_critical_attr_change(self, key, old_val, new_val) -> bool:
self.initialize()
output_history_is_no_longer_valid = True
return output_history_is_no_longer_valid
def _apply_inverse_model_matrix(self, input_array: np.ndarray):
W = self._inverse_model_matrix # VERTICES x CHANNELS
output_array = W.dot(make_time_dimension_second(input_array))
return put_time_dimension_back_from_second(output_array)
class LinearFilter(ProcessorNode):
UPSTREAM_CHANGES_IN_THESE_REQUIRE_REINITIALIZATION = ("mne_info",)
CHANGES_IN_THESE_REQUIRE_RESET = ("lower_cutoff", "upper_cutoff")
SAVERS_FOR_UPSTREAM_MUTABLE_OBJECTS = {
"mne_info": lambda info: (info["nchan"],)
}
ALLOWED_CHILDREN = (
"MNE",
"MCE",
"Beamformer",
"SignalViewer",
"EnvelopeExtractor",
"LSLStreamOutput",
"FileOutput",
)
def __init__(self, lower_cutoff: float = 1, upper_cutoff: float = 50):
ProcessorNode.__init__(self)
self.lower_cutoff = lower_cutoff
self.upper_cutoff = upper_cutoff
self._linear_filter = None # type: filters.ButterFilter
self.viz_type = None
def _initialize(self):
self.viz_type = self.parent.viz_type
mne_info = self.traverse_back_and_find("mne_info")
frequency = mne_info["sfreq"]
channel_count = mne_info["nchan"]
if not (self.lower_cutoff is None and self.upper_cutoff is None):
band = (self.lower_cutoff, self.upper_cutoff)
self._linear_filter = filters.ButterFilter(
band, fs=frequency, n_channels=channel_count
)
self._linear_filter.apply = pynfb_ndarray_function_wrapper(
self._linear_filter.apply
)
else:
self._linear_filter = None
def _update(self):
input_data = self.parent.output
if self._linear_filter is not None:
self.output = self._linear_filter.apply(input_data)
else:
self.output = input_data
def _check_value(self, key, value):
if value is None:
pass
elif key == "lower_cutoff":
if (
hasattr(self, "upper_cutoff")
and self.upper_cutoff is not None
and value > self.upper_cutoff
):
raise ValueError(
"Lower cutoff can`t be set higher that the upper cutoff"
)
if value < 0:
raise ValueError("Lower cutoff must be a positive number")
elif key == "upper_cutoff":
if (
hasattr(self, "upper_cutoff")
and self.lower_cutoff is not None
and value < self.lower_cutoff
):
raise ValueError(
"Upper cutoff can`t be set lower that the lower cutoff"
)
if value < 0:
raise ValueError("Upper cutoff must be a positive number")
def _on_input_history_invalidation(self):
# Reset filter delays
if self._linear_filter is not None:
self._linear_filter.reset()
def _on_critical_attr_change(self, key, old_val, new_val) -> bool:
self.initialize()
output_history_is_no_longer_valid = True
return output_history_is_no_longer_valid
class EnvelopeExtractor(ProcessorNode):
UPSTREAM_CHANGES_IN_THESE_REQUIRE_REINITIALIZATION = ("mne_info",)
CHANGES_IN_THESE_REQUIRE_RESET = ("method", "factor")
SUPPORTED_METHODS = ("Exponential smoothing",)
SAVERS_FOR_UPSTREAM_MUTABLE_OBJECTS = {
"mne_info": lambda info: (info["nchan"],)
}
ALLOWED_CHILDREN = ("SignalViewer", "LSLStreamOutput", "FileOutput")
def __init__(self, factor=0.9, method="Exponential smoothing"):
ProcessorNode.__init__(self)
self.method = method
self.factor = factor
self._envelope_extractor = None # type: ExponentialMatrixSmoother
self.viz_type = None
def _initialize(self):
channel_count = self.traverse_back_and_find("mne_info")["nchan"]
self._envelope_extractor = ExponentialMatrixSmoother(
factor=self.factor, column_count=channel_count
)
self._envelope_extractor.apply = pynfb_ndarray_function_wrapper(
self._envelope_extractor.apply
)
self.viz_type = self.parent.viz_type
if self.parent.viz_type == "source time series":
self.ALLOWED_CHILDREN = (
"BrainViewer",
"LSLStreamOutput",
"FileOutput",
)
elif self.parent.viz_type == "connectivity":
self.ALLOWED_CHILDREN = (
"ConnectivityViewer",
"LSLStreamOutput",
"FileOutput",
)
def _update(self):
input_data = self.parent.output
self.output = self._envelope_extractor.apply(np.abs(input_data))
def _check_value(self, key, value):
if key == "factor":
if value <= 0 or value >= 1:
raise ValueError("Factor must be a number between 0 and 1")
if key == "method":
if value not in self.SUPPORTED_METHODS:
raise ValueError(
"Method {} is not supported."
+ " Use one of: {}".format(value, self.SUPPORTED_METHODS)
)
def _on_critical_attr_change(self, key, old_val, new_val) -> bool:
self.initialize()
output_history_is_no_longer_valid = True
return output_history_is_no_longer_valid
def _on_input_history_invalidation(self):
self._envelope_extractor.reset()
class Beamformer(_InverseSolverNode):
"""Adaptive and nonadaptive beamformer"""
SUPPORTED_OUTPUT_TYPES = ("power", "activation")
UPSTREAM_CHANGES_IN_THESE_REQUIRE_REINITIALIZATION = ("mne_info",)
CHANGES_IN_THESE_REQUIRE_RESET = (
"reg",
"output_type",
"is_adaptive",
"fixed_orientation",
"fwd_path",
"whiten",
"subject",
"subjects_dir",
"forgetting_factor_per_second",
)
SAVERS_FOR_UPSTREAM_MUTABLE_OBJECTS = {"mne_info": channel_labels_saver}
def __init__(
self,
output_type="power",
is_adaptive=False,
fixed_orientation=True,
forgetting_factor_per_second=0.99,
reg=0.05,
whiten=False,
fwd_path=None,
subject=None,
subjects_dir=None,
):
_InverseSolverNode.__init__(
self, subjects_dir=subjects_dir, subject=subject
)
self.output_type = output_type # type: np.dtype
self.is_adaptive = is_adaptive # type: bool
self.fixed_orientation = fixed_orientation # type: bool
self.mne_info = None # type: mne.Info
self.whiten = whiten
self.reg = reg
self.forgetting_factor_per_second = forgetting_factor_per_second
self.fwd_path = fwd_path
self._default_forward_model_file_path = None # type: str
self._channel_indices = None # type: list
self._gain_matrix = None # type: np.ndarray
self._data_cov = None # type: np.ndarray
self._forgetting_factor_per_sample = None # type: float
self.viz_type = "source time series"
self._noise_cov = None
def _initialize(self):
# self.fwd_dialog_signal_sender.open_dialog.emit()
# raise Exception("BAD FORWARD + DATA COMBINATION!")
# raise Exception
_InverseSolverNode._initialize(self)
self._gain_matrix = self._fwd["sol"]["data"]
G = self._gain_matrix
# ------------------------------------------- #
Rxx = G.dot(G.T) / 1e22
goods = mne.pick_types(
self._upstream_mne_info, eeg=True, meg=False, exclude="bads"
)
ch_names = [self._upstream_mne_info["ch_names"][i] for i in goods]
self._data_cov = mne.Covariance(
Rxx,
ch_names,
self._upstream_mne_info["bads"],
self._upstream_mne_info["projs"],
nfree=1,
)
if self.whiten:
self._noise_cov = mne.Covariance(
G.dot(G.T),
ch_names,
self._upstream_mne_info["bads"],
self._upstream_mne_info["projs"],
nfree=1,
)
else:
self._noise_cov = None
frequency = self._upstream_mne_info["sfreq"]
self._forgetting_factor_per_sample = np.power(
self.forgetting_factor_per_second, 1 / frequency
)
n_vert = self._fwd["nsource"]
channel_labels = ["vertex #{}".format(i + 1) for i in range(n_vert)]
# downstream info
self.mne_info = mne.create_info(channel_labels, frequency)
self._initialized_as_adaptive = self.is_adaptive
self._initialized_as_fixed = self.fixed_orientation
self.fwd_surf = mne.convert_forward_solution(
self._fwd, surf_ori=True, force_fixed=False
)
self._compute_filters(self._upstream_mne_info)
#gsogoyan 2.12.2019
with open('../../matrix.pickle', 'wb') as f:
pickle.dump(G, f)
#end of
def _update(self):
t1 = time.time()
input_array = self.parent.output
raw_array = mne.io.RawArray(
input_array, self._upstream_mne_info, verbose="ERROR"
)
raw_array.pick_types(eeg=True, meg=False, stim=False, exclude="bads")
raw_array.set_eeg_reference(ref_channels="average", projection=True)
t2 = time.time()
self._logger.timing(
"Prepare arrays in {:.1f} ms".format((t2 - t1) * 1000)
)
if self.is_adaptive:
self._update_covariance_matrix(input_array)
t1 = time.time()
self._compute_filters(raw_array.info)
t2 = time.time()
self._logger.timing(
"Assembled lcmv instance in {:.1f} ms".format((t2 - t1) * 1000)
)
self._filters["source_nn"] = []
t1 = time.time()
stc = apply_lcmv_raw(
raw=raw_array, filters=self._filters, max_ori_out="signed"
)
t2 = time.time()
self._logger.timing(
"Applied lcmv inverse in {:.1f} ms".format((t2 - t1) * 1000)
)
output = stc.data
t1 = time.time()
if self.fixed_orientation is True:
if self.output_type == "power":
output = output ** 2
else:
vertex_count = self.fwd_surf["nsource"]
output = np.sum(
np.power(output, 2).reshape((vertex_count, 3, -1)), axis=1
)
if self.output_type == "activation":
output = np.sqrt(output)
self.output = output
t2 = time.time()
self._logger.timing("Finalized in {:.1f} ms".format((t2 - t1) * 1000))
def _compute_filters(self, info):
self._filters = make_lcmv(
info=info,
forward=self.fwd_surf,
data_cov=self._data_cov,
reg=self.reg,
noise_cov=self._noise_cov, # data whiten
pick_ori="max-power",
weight_norm="unit-noise-gain",
reduce_rank=False,
)
def _on_critical_attr_change(self, key, old_val, new_val) -> bool:
# Only change adaptiveness or fixed_orientation requires reinit
# if (self._initialized_as_adaptive is not self.is_adaptive
# or self._initialized_as_fixed is not self.fixed_orientation):
# if old_val != new_val: # we don't expect numpy arrays here
if key in ("reg",):
self._compute_filters(self._upstream_mne_info)
else:
self.initialize()
output_history_is_no_longer_valid = True
return output_history_is_no_longer_valid
def _on_input_history_invalidation(self):
# Only adaptive version relies on history
if self._initialized_as_adaptive is True:
self.initialize()
def _check_value(self, key, value):
if key == "output_type":
if value not in self.SUPPORTED_OUTPUT_TYPES:
raise ValueError(
"Method {} is not supported."
+ " Use one of: {}".format(
value, self.SUPPORTED_OUTPUT_TYPES
)
)
if key == "reg":
if value <= 0:
raise ValueError(
"reg (covariance regularization coefficient)"
" must be a positive number"
)
if key == "is_adaptive":
if not isinstance(value, bool):
raise ValueError(
"Beamformer type (adaptive vs nonadaptive) is not set"
)
def _update_covariance_matrix(self, input_array):
t1 = time.time()
alpha = self._forgetting_factor_per_sample
sample_count = input_array.shape[TIME_AXIS]
self._logger.timing("Number of samples: {}".format(sample_count))
new_Rxx_data = self._data_cov.data
raw_array = mne.io.RawArray(
input_array, self._upstream_mne_info, verbose="ERROR"
)
raw_array.pick_types(eeg=True, meg=False, stim=False, exclude="bads")
raw_array.set_eeg_reference(ref_channels="average", projection=True)
input_array_nobads = raw_array.get_data()
t2 = time.time()
self._logger.timing(
"Prepared covariance update in {:.2f} ms".format((t2 - t1) * 1000)
)
samples = make_time_dimension_second(input_array_nobads).T
new_Rxx_data = alpha * new_Rxx_data + (1 - alpha) * samples.T.dot(
samples
)
t3 = time.time()
self._logger.timing(
"Updated matrix data in {:.2f} ms".format((t3 - t2) * 1000)
)
self._data_cov = mne.Covariance(
new_Rxx_data,
self._data_cov.ch_names,
raw_array.info["bads"],
raw_array.info["projs"],
nfree=1,
)
t4 = time.time()
self._logger.timing(
"Created instance of covariance"
+ " in {:.2f} ms".format((t4 - t4) * 1000)
)
# TODO: implement this function
def pynfb_filter_based_processor_class(pynfb_filter_class):
"""
Returns a ProcessorNode subclass with the functionality of
pynfb_filter_class
pynfb_filter_class: subclass of pynfb.signal_processing.filters.BaseFilter
Sample usage 1:
LinearFilter = pynfb_filter_based_processor_class(filters.ButterFilter)
linear_filter = LinearFilter(band, fs, n_channels, order)
Sample usage 2
(this would correspond to a different implementation of this function):
LinearFilter = pynfb_filter_based_processor_class(filters.ButterFilter)
linear_filter = LinearFilter(band, order)
In this case LinearFilter should provide
fs and n_channels parameters to filters.ButterFilter automatically
"""
class PynfbFilterBasedProcessorClass(ProcessorNode):
def _on_input_history_invalidation(self):
pass
def _check_value(self, key, value):
pass
@property
def CHANGES_IN_THESE_REQUIRE_RESET(self):
pass
@property
def UPSTREAM_CHANGES_IN_THESE_REQUIRE_REINITIALIZATION(self):
pass
def _reset(self):
pass
def __init__(self):
pass
def _initialize(self):
pass
def _update(self):
pass
return PynfbFilterBasedProcessorClass
class MCE(_InverseSolverNode):
UPSTREAM_CHANGES_IN_THESE_REQUIRE_REINITIALIZATION = ()
CHANGES_IN_THESE_REQUIRE_RESET = (
"fwd_path",
"n_comp",
"subjects_dir",
"subject",
)
def __init__(
self,
fwd_path=None,
n_comp=30,
subjects_dir=None,
subject=None,
):
_InverseSolverNode.__init__(
self, subjects_dir=subjects_dir, subject=subject, fwd_path=fwd_path
)
self.fwd_path = fwd_path
self.n_comp = n_comp
self.mne_info = None
self._upstream_mne_info = None
self.input_data = []
self.output = []
self.viz_type = "source time series"
def _initialize(self):
# self.fwd_dialog_signal_sender.open_dialog.emit()
_InverseSolverNode._initialize(self)
fwd_fix = mne.convert_forward_solution(
self._fwd, surf_ori=True, force_fixed=False
)
self._gain_matrix = fwd_fix["sol"]["data"]
self._logger.info("Computing SVD of the forward operator")
U, S, V = svd(self._gain_matrix)
Sn = np.zeros([self.n_comp, V.shape[0]])
Sn[: self.n_comp, : self.n_comp] = np.diag(S[: self.n_comp])
self.Un = U[:, : self.n_comp]
self.A_non_ori = Sn @ V
# ---------------------------------------------------- #
# -------- leadfield dims -------- #
N_SEN = self._gain_matrix.shape[0]
# -------------------------------- #
# ------------------------ noise-covariance ------------------------ #
cov_data = np.identity(N_SEN)
ch_names = np.array(self._upstream_mne_info["ch_names"])[
mne.pick_types(self._upstream_mne_info, eeg=True, meg=False)
]
ch_names = list(ch_names)
noise_cov = mne.Covariance(
cov_data,
ch_names,
self._upstream_mne_info["bads"],
self._upstream_mne_info["projs"],
nfree=1,
)
# ------------------------------------------------------------------ #
self.mne_inv = mne_make_inverse_operator(
self._upstream_mne_info,
fwd_fix,
noise_cov,
depth=0.8,
loose=1,
fixed=False,
verbose="ERROR",
)
self.Sn = Sn
self.V = V
channel_count = self._fwd["nsource"]
channel_labels = [
"vertex #{}".format(i + 1) for i in range(channel_count)
]
self.mne_info = mne.create_info(
channel_labels, self._upstream_mne_info["sfreq"]
)
self._upstream_mne_info = self._upstream_mne_info
def _update(self):
input_array = self.parent.output
# last_slice = last_sample(input_array)
last_slice = np.mean(input_array, axis=1)
n_src = self.mne_inv["nsource"]
n_times = input_array.shape[1]
output_mce = np.empty([n_src, n_times])
raw_slice = mne.io.RawArray(
| np.expand_dims(last_slice, axis=1) | numpy.expand_dims |
"""Module defining analytic polynomial beams."""
import numpy as np
from pyuvsim import AnalyticBeam
from numpy.polynomial.chebyshev import chebval
from . import utils
def stokes_matrix(pol_index):
"""
Calculate Pauli matrices for pseudo-Stokes conversion.
Source code adapted from `pyuvdata`.
Derived from https://arxiv.org/pdf/1401.2095.pdf, the Pauli
indices are reordered from the quantum mechanical
convention to an order which gives the ordering of the pseudo-Stokes vector
['pI', 'pQ', 'pU, 'pV'].
Parameters
----------
pol_index : int
Polarization index for which the Pauli matrix is generated, the index
must lie between 0 and 3 ('pI': 0, 'pQ': 1, 'pU': 2, 'pV':3).
Returns
-------
pauli_mat: array of float
Pauli matrix for pol_index. Shape: (2, 2)
"""
if pol_index == 0:
pauli_mat = np.array([[1.0, 0.0], [0.0, 1.0]])
elif pol_index == 1:
pauli_mat = np.array([[1.0, 0.0], [0.0, -1.0]])
elif pol_index == 2:
pauli_mat = np.array([[0.0, 1.0], [1.0, 0.0]])
elif pol_index == 3:
pauli_mat = np.array([[0.0, -1.0j], [1.0j, 0.0]])
else:
raise ValueError("'pol_index' most be an integer between 0 and 3")
return pauli_mat
def construct_mueller(jones, pol_index1, pol_index2):
"""
Generate Mueller components. Source code adapted from `pyuvdata`.
Following https://arxiv.org/pdf/1802.04151.pdf. Using equation:
Mij = Tr(J sigma_i J^* sigma_j)
where sigma_i and sigma_j are Pauli matrices.
Parameters
----------
jones: array of float
Jones matrices containing the electric field for the dipole arms
or linear polarizations. Shape: (Npixels, 2, 2) for Healpix beams or
(Naxes1 * Naxes2, 2, 2) otherwise.
pol_index1: int
Polarization index referring to the first index of Mij (i).
pol_index2: int
Polarization index referring to the second index of Mij (j).
Returns
-------
mueller: array of float
Mueller array containing the Mij values, shape: (Npixels,) for Healpix beams
or (Naxes1 * Naxes2,) otherwise.
"""
pauli_mat1 = stokes_matrix(pol_index1)
pauli_mat2 = stokes_matrix(pol_index2)
mueller = 0.5 * np.einsum(
"...ab,...bc,...cd,...ad", pauli_mat1, jones, pauli_mat2, np.conj(jones)
)
mueller = np.abs(mueller)
return mueller
def efield_to_pstokes(efield_beam, npix, Nfreqs):
"""
Convert E-field to pseudo-stokes power. Source code adapted from `pyuvdata`.
Following https://arxiv.org/pdf/1802.04151.pdf, using the equation:
M_ij = Tr(sigma_i J sigma_j J^*)
where sigma_i and sigma_j are Pauli matrices.
Parameters
----------
efield_beam: array_like, complex
The E-field to convert to pStokes power beam.
Must have shape (2, 1, 2, Nfreq, npix).
npix: int
The npix number of the HEALPix maps of the efield_beam.
Nfreqs: int
The number of frequencies of the efield_beam.
Returns
-------
power_data: array_like, complex
The pseudo-Stokes power beam computed from efield_beam.
Shape (1, 1, 4, Nfreq, npix)
"""
# construct jones matrix containing the electric field
pol_strings = ["pI", "pQ", "pU", "pV"]
power_data = np.zeros((1, 1, 4, Nfreqs, npix), dtype=np.complex128)
for fq_i in range(Nfreqs):
jones = np.zeros((npix, 2, 2), dtype=np.complex128)
pol_strings = ["pI", "pQ", "pU", "pV"]
jones[:, 0, 0] = efield_beam[0, 0, 0, fq_i, :]
jones[:, 0, 1] = efield_beam[0, 0, 1, fq_i, :]
jones[:, 1, 0] = efield_beam[1, 0, 0, fq_i, :]
jones[:, 1, 1] = efield_beam[1, 0, 1, fq_i, :]
for pol_i in range(len(pol_strings)):
power_data[:, :, pol_i, fq_i, :] = construct_mueller(jones, pol_i, pol_i)
return power_data
def modulate_with_dipole(az, za, freqs, ref_freq, beam_vals, fscale):
"""
Take a beam pattern and modulate it to turn it into an approximate E-field beam.
This is achieved by taking the beam pattern (assumed to be the square-root of a
power beam) and multiplying it by an zenith, azimuth and frequency -dependent
complex dipole matrix (a polarised dipole pattern), with elements:
```
dipole = q(za_s) * (1. + p(za_s) * 1.j) * [[-sin(az), cos(az)], [cos(az), sin(az)]]
```
where q and p are functions defined elsewhere in this file, and za_s is the
zenith angle streched by a power law.
Parameters
----------
az: array_like
Array of azimuth values, in radians. 1-dimensional, of same size 'Naz'
than za.
za: array_like
Array of zenith-angle values, in radians. 1-dimensional, of same size 'Nza'
than az.
freqs: array_like
Array of frequencies at which the beam pattern has been computed. Size 'Nfreqs'.
ref_freq: float
The reference frequency for the beam width scaling power law.
beam_vals: array_like, complex
Array of beam values, with shape (Nfreqs, Naz). This will normally be the
square-root of a power beam.
Returns
-------
pol_efield_beam : array_like, complex
Array of polarized beam values, with shape (2, 1, 2, Nfreqs, Naz), where 2 =
(phi, theta) directions, 1 = number of spectral windows, and 2 = N_feed is the
number of linearly-polarised feeds, assumed to be the 'n' and 'e' directions.
"""
# Form the beam.
# initial dipole matrix, shape (2, 2, az.size)
dipole = np.array([[-np.sin(az), np.cos(az)], [np.cos(az), np.sin(az)]])
# stretched zenith angle, shape (Nfreq, za.size)
za_scale = za[np.newaxis, :] / fscale[:, np.newaxis]
# phase component, shape za_scale.shape = (Nfreq, za.size)
ph = q(za_scale) * (1.0 + p(za_scale) * 1.0j)
# shape (2, 2, 1, az.size)
dipole_mod = ph[np.newaxis, np.newaxis, ...] * dipole[:, :, np.newaxis, :]
# shape (2, 1, 2, Nfreq, az.size)
pol_efield_beam = (
dipole_mod[:, np.newaxis, ...]
* beam_vals[np.newaxis, np.newaxis, np.newaxis, ...]
)
# Correct it for frequency dependency.
# extract modulus and phase of the beams
modulus = np.abs(pol_efield_beam)
phase = np.angle(pol_efield_beam)
# assume linear shift of phase along frequency
shift = -np.pi / 18e6 * (freqs[:, np.newaxis] - ref_freq) # shape (Nfreq, 1)
# shift the phase
phase += shift[np.newaxis, np.newaxis, np.newaxis, :, :]
# upscale the modulus
modulus = np.power(modulus, 0.6) # ad-hoc
# map the phase to [-pi; +pi]
phase = utils.wrap2pipi(phase)
# reconstruct
pol_efield_beam = modulus * np.exp(1j * phase)
return pol_efield_beam
def p(za):
"""
Models the general behavior of the phase of the 'Fagnoni beam', and its first ring.
(the first ring is the θ < π/11 region)
Parameters
----------
za: array_like
Array of zenith-angle values, in radians.
Returns
-------
res: aray_like
Array of same size than za, in radians.
"""
# "manual" fit on the 100 MHz Fagnoni beam
res = np.pi * np.sin(np.pi * za) # general behavior (kind of...)
res[np.where(za < np.pi / 11)] = 0 # first ring
return res
def q(za):
"""
Models the 'second ring' of the phase of the 'Fagnoni beam'.
(the second ring is the π/6 < θ < π/11 region)
Parameters
----------
za: array_like
Array of zenith-angle values, in radians.
Returns
-------
res: aray_like
Array of same size than za, in radians.
"""
# "manual" fit on the 100MHz beam
res = np.ones(za.shape, dtype=np.complex128)
res[np.where(np.logical_and(np.pi / 6 > za, za > np.pi / 11))] = 1j
return res
class PolyBeam(AnalyticBeam):
"""
Analytic, azimuthally-symmetric beam model based on Chebyshev polynomials.
The frequency-dependence of the beam is implemented by scaling source zenith
angles when the beam is interpolated, using a power law.
See HERA memo:
http://reionization.org/wp-content/uploads/2013/03/HERA081_HERA_Primary_Beam_Chebyshev_Apr2020.pdf
Parameters
----------
beam_coeffs : array_like
Co-efficients of the Chebyshev polynomial.
spectral_index : float, optional
Spectral index of the frequency-dependent power law scaling to
apply to the width of the beam. Default: 0.0.
ref_freq : float, optional
Reference frequency for the beam width scaling power law, in Hz.
Default: 1e8.
polarized : bool, optional
Whether to multiply the axisymmetric beam model by a dipole
modulation factor to emulate a polarized beam response. If False,
the axisymmetric representation will be put in the (phi, n)
and (theta, e) elements of the Jones matrix returned by the
`interp()` method. Default: False.
"""
def __init__(
self, beam_coeffs=None, spectral_index=0.0, ref_freq=1e8, polarized=False
):
self.ref_freq = ref_freq
self.spectral_index = spectral_index
self.polarized = polarized
self.data_normalization = "peak"
self.freq_interp_kind = None
self.Nspws = 1
# Polarization conventions
self.beam_type = "efield"
self.Nfeeds = 2 # n and e feeds
self.pixel_coordinate_system = "az_za" # az runs from East to North
self.feed_array = ["N", "E"]
self.x_orientation = "east"
# Beam data
self.beam_coeffs = beam_coeffs
def peak_normalize(self):
"""Normalize the beam to have peak of unity."""
# Not required
pass
def interp(self, az_array, za_array, freq_array, reuse_spline=None):
"""
Evaluate the primary beam at given az, za locations (in radians).
Parameters
----------
az_array : array_like
Azimuth values in radians (same length as za_array). The azimuth
here has the UVBeam convention: North of East(East=0, North=pi/2)
za_array : array_like
Zenith angle values in radians (same length as az_array).
freq_array : array_like
Frequency values to evaluate at.
reuse_spline : bool, optional
Does nothing for analytic beams. Here for compatibility with UVBeam.
Returns
-------
interp_data : array_like
Array of beam values, shape (Naxes_vec, Nspws, Nfeeds or Npols,
Nfreqs or freq_array.size if freq_array is passed,
Npixels/(Naxis1, Naxis2) or az_array.size if az/za_arrays are passed)
interp_basis_vector : array_like
Array of interpolated basis vectors (or self.basis_vector_array
if az/za_arrays are not passed), shape: (Naxes_vec, Ncomponents_vec,
Npixels/(Naxis1, Naxis2) or az_array.size if az/za_arrays are passed)
"""
# Check that coordinates have same length
if az_array.size != za_array.size:
raise ValueError(
"Azimuth and zenith angle coordinate arrays must have same length."
)
# Empty data array
interp_data = np.zeros(
(2, 1, 2, freq_array.size, az_array.size), dtype=np.complex128
)
# Frequency scaling
fscale = (freq_array / self.ref_freq) ** self.spectral_index
# Transformed zenith angle, also scaled with frequency
x = 2.0 * np.sin(za_array[np.newaxis, ...] / fscale[:, np.newaxis]) - 1.0
# Primary beam values from Chebyshev polynomial
beam_values = chebval(x, self.beam_coeffs)
central_val = chebval(-1.0, self.beam_coeffs)
beam_values /= central_val # ensure normalized to 1 at za=0
# Set beam Jones matrix values (see Eq. 5 of Kohn+ arXiv:1802.04151)
# Axes: [phi, theta] (az and za) / Feeds: [n, e]
# interp_data shape: (Naxes_vec, Nspws, Nfeeds or Npols, Nfreqs, Naz)
if self.polarized:
interp_data = modulate_with_dipole(
az_array, za_array, freq_array, self.ref_freq, beam_values, fscale
)
else:
interp_data[1, 0, 0, :, :] = beam_values # (theta, n)
interp_data[0, 0, 1, :, :] = beam_values # (phi, e)
interp_basis_vector = None
if self.beam_type == "power":
# Cross-multiplying feeds, adding vector components
pairs = [(i, j) for i in range(2) for j in range(2)]
power_data = np.zeros((1, 1, 4) + beam_values.shape, dtype=np.float)
for pol_i, pair in enumerate(pairs):
power_data[:, :, pol_i] = (
interp_data[0, :, pair[0]] * np.conj(interp_data[0, :, pair[1]])
) + (interp_data[1, :, pair[0]] * np.conj(interp_data[1, :, pair[1]]))
interp_data = power_data
return interp_data, interp_basis_vector
def __eq__(self, other):
"""Evaluate equality with another object."""
if not isinstance(other, self.__class__):
return False
return self.beam_coeffs == other.beam_coeffs
class PerturbedPolyBeam(PolyBeam):
"""A :class:`PolyBeam` in which the shape of the beam has been modified.
The perturbations can be applied to the mainlobe, sidelobes, or
the entire beam. While the underlying :class:`PolyBeam` depends on
frequency via the `spectral_index` kwarg, the perturbations themselves do
not have a frequency dependence unless explicitly stated.
Mainlobe: A Gaussian of width FWHM is subtracted and then a new
Gaussian with width `mainlobe_width` is added back in. This perturbs
the width of the primary beam mainlobe, but leaves the sidelobes mostly
unchanged.
Sidelobes: The baseline primary beam model, PB, is moduled by a (sine)
Fourier series at angles beyond some zenith angle. There is an angle-
only modulation (set by `perturb_coeffs` and `perturb_scale`), and a
frequency-only modulation (set by `freq_perturb_coeffs` and
`freq_perturb_scale`).
Entire beam: May be sheared, stretched, and rotated.
Parameters
----------
beam_coeffs : array_like
Co-efficients of the baseline Chebyshev polynomial.
perturb_coeffs : array_like, optional
Array of floats with the coefficients of a (sine-only) Fourier
series that will be used to modulate the base Chebyshev primary
beam model.
perturb_scale : float, optional
Overall scale of the primary beam modulation. Must be less than 1,
otherwise the primary beam can go negative.
mainlobe_width : float
Width of the mainlobe, in radians. This determines the width of the
Gaussian mainlobe model that is subtracted, as well as the location
of the transition between the mainlobe and sidelobe regimes.
mainlobe_scale : float, optional
Factor to apply to the FHWM of the Gaussian that is used to rescale
the mainlobe.
transition_width : float, optional
Width of the smooth transition between the range of angles
considered to be in the mainlobe vs in the sidelobes, in radians.
xstretch, ystretch : float, optional
Stretching factors to apply to the beam in the x and y directions,
which introduces beam ellipticity, as well as an overall
stretching/shrinking. Default: 1.0 (no ellipticity or stretching).
rotation : float, optional
Rotation of the beam in the x-y plane, in degrees. Only has an
effect if xstretch != ystretch.
freq_perturb_coeffs : array_like, optional
Array of floats with the coefficients of a sine and cosine Fourier
series that will be used to modulate the base Chebyshev primary
beam model in the frequency direction. Default: None.
freq_perturb_scale : float, optional
Overall scale of the primary beam modulation in the frequency
direction. Must be less than 1, otherwise the primary beam can go
negative. Default: 0.
perturb_zeropoint : float, optional
If specified, override the automatical zero-point calculation for
the angle-dependent sidelobe perturbation. Default: None (use the
automatically-calculated zero-point).
spectral_index : float, optional
Spectral index of the frequency-dependent power law scaling to
apply to the width of the beam.
ref_freq : float, optional
Reference frequency for the beam width scaling power law, in Hz.
**kwargs
Any other parameters are used to initialize superclass :class:`PolyBeam`.
"""
def __init__(
self,
beam_coeffs=None,
perturb_coeffs=None,
perturb_scale=0.1,
mainlobe_width=0.3,
mainlobe_scale=1.0,
transition_width=0.05,
xstretch=1.0,
ystretch=1.0,
rotation=0.0,
freq_perturb_coeffs=None,
freq_perturb_scale=0.0,
perturb_zeropoint=None,
**kwargs
):
# Initialize base class
super().__init__(beam_coeffs=beam_coeffs, **kwargs)
# Check for valid input parameters
if mainlobe_width is None:
raise ValueError("Must specify a value for 'mainlobe_width' kwarg")
# Set sidelobe perturbation parameters
if perturb_coeffs is None:
perturb_coeffs = []
if freq_perturb_coeffs is None:
freq_perturb_coeffs = []
self.perturb_coeffs = np.array(perturb_coeffs)
self.freq_perturb_coeffs = np.array(freq_perturb_coeffs)
# Set all other parameters
self.perturb_scale = perturb_scale
self.freq_perturb_scale = freq_perturb_scale
self.mainlobe_width = mainlobe_width
self.mainlobe_scale = mainlobe_scale
self.transition_width = transition_width
self.xstretch, self.ystretch = xstretch, ystretch
self.rotation = rotation
# Calculate normalization of sidelobe perturbation functions on
# fixed grid (ensures rescaling is deterministic/independent of input
# to the interp() method)
za = np.linspace(0.0, np.pi / 2.0, 1000) # rad
freqs = np.linspace(100.0, 200.0, 1000) * 1e6 # Hz
p_za = self._sidelobe_modulation_za(za, scale=1.0, zeropoint=0.0)
p_freq = self._sidelobe_modulation_freq(freqs, scale=1.0, zeropoint=0.0)
# Rescale p_za to the range [-0.5, +0.5]
self._scale_pza, self._zeropoint_pza = 0.0, 0.0
if self.perturb_coeffs.size > 0:
self._scale_pza = 2.0 / (np.max(p_za) - np.min(p_za))
self._zeropoint_pza = -0.5 - 2.0 * np.min(p_za) / (
np.max(p_za) - np.min(p_za)
)
# Override calculated zeropoint with user-specified value
if perturb_zeropoint is not None:
self._zeropoint_pza = perturb_zeropoint
# Rescale p_freq to the range [-0.5, +0.5]
self._scale_pfreq, self._zeropoint_pfreq = 0.0, 0.0
if self.freq_perturb_coeffs.size > 0:
self._scale_pfreq = 2.0 / (np.max(p_freq) - np.min(p_freq))
self._zeropoint_pfreq = -0.5 - 2.0 * np.min(p_freq) / (
np.max(p_freq) - np.min(p_freq)
)
# Sanity checks
if self.perturb_scale >= 1.0:
raise ValueError(
"'perturb_scale' must be less than 1; otherwise "
"the beam can go negative."
)
if self.freq_perturb_scale >= 1.0:
raise ValueError(
"'freq_perturb_scale' must be less than 1; "
"otherwise the beam can go negative."
)
def _sidelobe_modulation_za(self, za_array, scale=1.0, zeropoint=0.0):
"""Calculate sidelobe modulation factor for a set of zenith angle values.
Parameters
----------
za_array : array_like
Array of zenith angles, in radians.
scale : float, optional
Multiplicative rescaling factor to be applied to the modulation
function. Default: 1.
zeropoint : float, optional
Zero-point correction to be applied to the modulation function.
Default: 0.
"""
# Construct sidelobe perturbations (angle-dependent)
p_za = 0
if self.perturb_coeffs.size > 0:
# Build Fourier (sine) series
f_fac = 2.0 * np.pi / (np.pi / 2.0) # Fourier series with period pi/2
for n in range(self.perturb_coeffs.size):
p_za += self.perturb_coeffs[n] * np.sin(f_fac * n * za_array)
return p_za * scale + zeropoint
def _sidelobe_modulation_freq(self, freq_array, scale=1.0, zeropoint=0.0):
"""Calculate sidelobe modulation factor for a set of frequency values.
Parameters
----------
freq_array : array_like
Array of frequencies, in Hz.
scale : float, optional
Multiplicative rescaling factor to be applied to the modulation
function. Default: 1.
zeropoint : float, optional
Zero-point correction to be applied to the modulation function.
Default: 0.
"""
# Construct sidelobe perturbations (frequency-dependent)
p_freq = 0
if self.freq_perturb_coeffs.size > 0:
# Build Fourier series (sine + cosine)
f_fac = 2.0 * np.pi / (100.0e6) # Fourier series with period 100 MHz
for n in range(self.freq_perturb_coeffs.size):
if n == 0:
fn = 1.0 + 0.0 * freq_array
elif n % 2 == 0:
fn = np.sin(f_fac * ((n + 1) // 2) * freq_array)
else:
fn = np.cos(f_fac * ((n + 1) // 2) * freq_array)
p_freq += self.freq_perturb_coeffs[n] * fn
return p_freq * scale + zeropoint
def interp(self, az_array, za_array, freq_array, reuse_spline=None):
"""Evaluate the primary beam after shearing/stretching/rotation."""
# Apply shearing, stretching, or rotation
if self.xstretch != 1.0 or self.ystretch != 1.0:
# Convert sheared Cartesian coords to circular polar coords
# mX stretches in x direction, mY in y direction, a is angle
# Notation: phi = az, theta = za. Subscript 's' are transformed coords
a = self.rotation * np.pi / 180.0
X = za_array * np.cos(az_array)
Y = za_array * np.sin(az_array)
Xs = (X * np.cos(a) - Y * np.sin(a)) / self.xstretch
Ys = (X * np.sin(a) + Y * np.cos(a)) / self.ystretch
# Updated polar coordinates
theta_s = np.sqrt(Xs ** 2.0 + Ys ** 2.0)
phi_s = | np.zeros_like(theta_s) | numpy.zeros_like |
""" NN training by contrastive divergence
"""
def contrastivedivergence(model, data, validata=None, ncd=1, maxepoch=100,
nadj=10, momentum=.5, batchsize=10, finetune=6):
"""unfold and train fnn model by contrastive divergence
Args:
model: deep FFN model
data: features in rows, observations in columns.
cd: number of contrastive divergence steps
maxepoch: hard limit of learning iterations default is 100
nadj: period of learning rate adjustment in units of epochs
momentum: fraction of previous change in weight carried over to
next weight update step
Returns: exit condition and trained unfolded model.
Exit conditions are 0) learning converged, 1) learning not
converged, and -1) learning cannot be performed.
Training will modify model.
"""
import numpy as np
from crpm.activationfunctions import activation
from crpm.ffn_bodyplan import get_bodyplan
from crpm.ffn_bodyplan import copy_bodyplan
from crpm.ffn_bodyplan import push_bodyplanlayer
from crpm.ffn_bodyplan import init_ffn
#init exit condition to default
exitcond = 0
#get model bodyplan
bodyplan = get_bodyplan(model)
#get number of model layers
nlayer = len(model)
#copy bodyplan
unfolded_bodyplan = copy_bodyplan(bodyplan)
#push layers in reversed order to create a symmetric bodyplan
for layer in reversed(bodyplan[:-1]):
push_bodyplanlayer(unfolded_bodyplan, layer)
#create unfolded model from symmetric bodyplan
smodel = init_ffn(unfolded_bodyplan)
#print(smodel)
#return symmetric model if maxepoch = 0
if maxepoch<1:
return exitcond, smodel
#define minibatches
#get number of observations in data
nobv = data.shape[1]
#calculate number of minibatches needed
batchsize = int(batchsize)
nbatch = nobv//batchsize
#get randomized observation index
data = data.T
np.random.shuffle(data)
data = data.T
#alpha norm scales learning rate by max force relative to weight
alpha_norm = 10**(-finetune)
#alpha_norm = 1E-8#7#5E-6
#initialize previous layer activity with input data for layer 0
prevlayeractivity = data
#do the same for the validation data
validprevlayeractivity = validata
if validata is None:
#use last 20% of batches for validation
vbatch = nbatch//5
nbatch = nbatch - vbatch
prevlayeractivity = data[:, 0:nbatch*batchsize]
validprevlayeractivity = data[:, nbatch*batchsize:]
# loop over first half of symmetric model begining with layer 1
for layerindex in range(1, nlayer):
#encoding index is = layerindex
#decoding index is = 2*nlayer - layerindex +1
decodeindex = 2*nlayer-(layerindex+1)
#define layers
vislayer = smodel[decodeindex]
hidlayer = smodel[layerindex]
#get number of nodes per layer
nv = vislayer["n"]
nh = hidlayer["n"]
#initialize connecting weights ±4sqrt(6/(nv+nh))
hidlayer["weight"] = ((np.random.rand(nh, nv)-1/2)*
8*np.sqrt(6/(nh+nv)))
#determine appropriate RBM type
vtype = vislayer["activation"]
htype = hidlayer["activation"]
rbmtype = None
#1. binary
if vtype == "logistic" and htype == "logistic":
rbmtype = "binary"
#define activity for visible layer
def vsample():
"""returns logistic visible layer activity given hiddenlayer state"""
stimulus = np.add(hidlayer["weight"].T.dot(hstate), vislayer["bias"])
return activation("logistic", stimulus)
#define activity for hidden layer
def hsample():
"""returns logistic hidden layer activity and stocastic binary state given visible layer activity"""
stimulus = np.add(hidlayer["weight"].dot(vact), hidlayer["bias"])
hact = activation("logistic", stimulus)
return hact, hact > np.random.random(hact.shape)
#define free energy equation for binary-binary RBM
def feng(act):
#visible bias term: dim (1,m)
#vbterm = -np.sum(np.multiply(act, vislayer["bias"]), axis=0)
vbterm = -vislayer["bias"].T.dot(act)
#hidden layer stimulus : dim (nh,m)
stimulus = np.add(hidlayer["weight"].dot(act), hidlayer["bias"])
# init hidden term : dim (nh,m)
#hidden_term = activation("vacuum",stimulus)
#for exp(stim) term numerical stability
#first calc where stimulus is negative
#xidx = np.where(stimulus < 0)
#hidden term function for negative stimulus
#hidden_term[xidx] = np.log(1+np.exp(stimulus[xidx]))
#then calc where stimulus is not negative
#xidx = np.where(stimulus >= 0)
#hidden term function for not negative stimulus
#hidden_term[xidx] = stimulus[xidx]+np.log(1+np.exp(-stimulus[xidx]))
hidden_term = np.where(stimulus < 0,
np.log(1+np.exp(stimulus)),
stimulus+np.log(1+np.exp(-stimulus)))
#sum over hidden units to get true hidden_term : dim (1,m)
hidden_term = np.sum(hidden_term, axis=0)
#free energy = sum over samples (visible_bias_term - hidden_term)
return np.sum(vbterm - hidden_term)
#2. Gaussian-Bernoulli
if vtype == "linear" and htype == "logistic":
rbmtype = "gaussian-bernoulli"
#Get standard deviation for real-valued visible units
sigma = np.std(prevlayeractivity, axis=1, keepdims=True)
#define activity for visible layer
def vsample():
"""returns linear plus gaussian noise visible layer activity given hidden layer state"""
stimulus = np.add(hidlayer["weight"].T.dot(hstate)*sigma, vislayer["bias"])
return np.random.normal(loc=stimulus, scale=sigma)
#define activity for hidden layer
def hsample():
"""returns logistic hidden layer activity and stocastic binary state given scaled visible layer activity"""
stimulus = np.add(hidlayer["weight"].dot(vact/sigma), hidlayer["bias"])
act = activation("logistic",stimulus)
return act, act > np.random.random(act.shape)
#define free energy equation for Gaussian - Bernoulli RBM
def feng(act):
#hidden layer stimulus : dim (nh,m)
stimulus = np.add(hidlayer["weight"].dot(act), hidlayer["bias"])
# init hidden term : dim (nh,m)
#hidden_term = activation("vacuum",stimulus)
#for exp(stim) term numerical stability
#first calc where stimulus is negative
#xidx = np.where(stimulus < 0)
#hidden term function for negative stimulus
#hidden_term[xidx] = np.log(1+np.exp(stimulus[xidx]))
#then calc where stimulus is not negative
#xidx = np.where(stimulus >= 0)
#hidden term function for not negative stimulus
#hidden_term[xidx] = stimulus[xidx]+np.log(1+np.exp(-stimulus[xidx]))
hidden_term = np.where(stimulus < 0,
np.log(1+ | np.exp(stimulus) | numpy.exp |
"""
Wrappers around qcodes.utils.dataset.doNd functions that live-plot
data during the sweep.
"""
import re
import sys
import inspect
import functools
import itertools
import numpy as np
from typing import Any, Optional, Union, Tuple, List, Mapping
from dataclasses import dataclass, field
from qcodes import config
from qcodes.dataset.data_set import DataSet
from qcodes.dataset.descriptions.param_spec import ParamSpecBase
from qcodes.instrument.parameter import _BaseParameter
import qcodes.utils.dataset.doNd as doNd
from ..plot import PlotWindow, PlotItem, PlotDataItem, ImageItem, TableWidget
from ..plot.plot_tools import save_figure
from ..logging import get_logger
# Get access to module level variables
this = sys.modules[__name__]
this.current = None
logger = get_logger("tools.doNd")
# Utility functions for parsing parameter names from plot titles
_param = r"(\w+)\s+\([^)]+\)"
_single_id = r"(\d+)(?:-(\d+))?(?:, (?=\d))?"
_id = r"(\(id:\s+(?:\d+(?:-\d+)?(?:, (?=\d))?)+\))"
_id_re = re.compile(_single_id, re.IGNORECASE)
_plot_title_re = re.compile(r"("+_param+r"\s+v\.(?:<br>|\s)+"+_param+r")\s+"+_id, re.MULTILINE|re.IGNORECASE)
_single_param_title_re = re.compile(r"("+_param+r")\s*"+_id, re.MULTILINE)
def _get_window(append, size=(1000, 600)):
"""
Return a handle to a plot window to use for this plot.
If append is False, create a new plot window, otherwise return
a handle to the given window, or the last created window.
Args:
append (Union[bool, PlotWindow]): If true, return the last
created plot window, if PlotWindow, return that window, otherwise
a new window will be created.
size (Tuple[int, int]): The size in px of the new plot window. If append
is not false, this parameter has no effect.
"""
# Set up a plotting window
if append is None or append is False:
win = PlotWindow()
win.win_title = 'ID: '
win.resize(*size)
elif isinstance(append, PlotWindow):
# Append to the given window
win = append
elif isinstance(append, bool):
# Append to the last trace if true
win = PlotWindow.getWindows()[-1]
else:
raise ValueError("Unknown argument to append. Either give a plot window"
" or true to append to the last plot")
return win
def _explode_ids(ids_str: str) -> List[int]:
"""
Explode a list of ids from a plot title into a list of all
ids.
"""
ids = []
for match in _id_re.finditer(ids_str):
start, stop = match.groups()
if stop is None:
ids.append(int(start))
else:
ids.extend(range(int(start), int(stop)+1))
return tuple(ids)
def _reduce_ids(ids: List[int]):
strings = []
i = 1
r = 0
while i < len(ids):
if ids[i] == ids[i-1]+1:
i += 1
else:
if i-1 == r:
strings.append(f"{ids[r]}")
else:
strings.append(f"{ids[r]}-{ids[i-1]}")
r = i
i += 1
if i-1 == r:
strings.append(f"{ids[r]}")
else:
strings.append(f"{ids[r]}-{ids[i-1]}")
return strings
def _parse_title(title) -> Tuple[str, Tuple[str], Tuple[int]]:
match = _plot_title_re.fullmatch(title)
if not match:
# Might be a single title re
match = _single_param_title_re.fullmatch(title)
if not match:
return None
paramstr, param_name, ids = match.groups()
ids = _explode_ids(ids)
return(paramstr, (param_name,), ids)
paramstr, param1_name, param2_name, ids = match.groups()
ids = _explode_ids(ids)
return (paramstr, (param1_name, param2_name), ids)
def _compatible_plot_item(win: PlotWindow,
p_bot: ParamSpecBase,
p_left: Optional[ParamSpecBase] = None) -> Optional[PlotItem]:
"""
Returns a compatible plot item if found
"""
if p_left is not None:
axes = (p_bot.name, p_left.name)
else:
axes = (p_bot.name, )
for item in win.items:
if isinstance(item, PlotItem):
_, params, _ = _parse_title(item.plot_title)
if params == axes:
return item
return None
def _register_subscriber():
"""
Register live plotting in the qcodes config object.
"""
if "qcm" not in config.subscription.subscribers:
logger.info("Registering qcm as a default subscriber")
config.subscription.subscribers["qcm"] = {
'factory': 'qcodes_measurements.tools.doNd.subscriber',
'factory_kwargs': {},
'subscription_kwargs': {
'min_wait': 10,
'min_count': 0,
'callback_kwargs': {}
}
}
config.subscription.default_subscribers.append("qcm")
# Tuple for live plotting
@dataclass(frozen=False)
class LivePlotWindow:
plot_window: Optional[PlotWindow]
stack: bool = False
append: bool = False
dataset: DataSet = None
datacount: Mapping[str, int] = field(default_factory=dict)
table_items: Mapping[str, Union[int, float]] = None
plot_items: Mapping[str, Union[PlotDataItem, ImageItem]] = field(default_factory=dict)
plot_params: List[_BaseParameter] = None
def do_nothing(new_data, data_len, state):
"""
Function that does nothing
"""
return
def update_plots(new_data, data_len, state):
"""
Function that updates plots when live plotting
"""
write_count = this.current.dataset.cache._write_status
# Don't update if we haven't started measuring yet
if not write_count or any(wc == 0 for wc in write_count.values()): return
run_desc = this.current.dataset.description
data_cache = this.current.dataset.cache.data()
params = run_desc.interdeps
shapes = run_desc.shapes
plot_items = this.current.plot_items.items()
table_items = this.current.table_items.items() if this.current.table_items is not None else ()
for param, plotitem in itertools.chain(plot_items, table_items):
# Keep track of how much of the plot we've written, and only update
# parameters that are being measured.
if param not in write_count:
continue
if param not in this.current.datacount:
this.current.datacount[param] = write_count[param]
elif write_count[param] == this.current.datacount[param]:
continue
else:
this.current.datacount[param] = write_count[param]
# Update plots
if shapes[param] == (1,):
val = data_cache[param][param][0]
if isinstance(val, (float, np.float16, np.float32, np.float64)):
val = np.format_float_scientific(val)
else:
val = str(val)
this.current.table_items[param].append(val)
elif len(shapes[param]) == 1:
paramspec = params[param]
setpoint_param = params.dependencies[paramspec][0]
plotitem.setData(data_cache[param][setpoint_param.name][:write_count[param]],
data_cache[param][param][:write_count[param]])
else:
paramspec = params[param]
bot_axis = params.dependencies[paramspec][0]
left_axis = params.dependencies[paramspec][1]
data = data_cache[param][param]
# Check if we are in the first column or if we need to clear nans
if np.isnan(data[-1,-1]) or write_count[param] < shapes[param][1]:
meanval = data.flat[:write_count[param]].mean()
data.flat[write_count[param]:] = meanval
# Update axis scales as data comes in
if plotitem.no_xscale:
# Set Y-scale until we have the entire first column
if plotitem.no_yscale and write_count[param] >= shapes[param][1]:
ldata = data_cache[param][left_axis.name]
ymin, ymax = ldata[0, 0], ldata[0, -1]
plotitem.setpoint_y = np.linspace(ymin, ymax, shapes[param][1])
plotitem.no_yscale = False
plotitem.rescale()
elif plotitem.no_yscale and write_count[param] >= 2:
ldata = data_cache[param][left_axis.name]
ymin, step = ldata[0, 0], ldata[0, 1]-ldata[0, 0]
plotitem.setpoint_y = np.linspace(ymin, ymin + step*shapes[param][1], shapes[param][1], endpoint=False)
plotitem.rescale()
# Set X-scale
if write_count[param]/shapes[param][1] > 1:
bdata = data_cache[param][bot_axis.name]
xmin, step = bdata[0, 0], bdata[1, 0]-bdata[0, 0]
plotitem.setpoint_x = np.linspace(xmin, xmin + step*shapes[param][0], shapes[param][0], endpoint=False)
plotitem.no_xscale = False
plotitem.rescale()
# Rescale x-axis when we have all values in case of F.P. error
if write_count[param] == shapes[param][0]*shapes[param][1]:
bdata = data_cache[param][bot_axis.name]
xmin, xmax = bdata[0, 0], bdata[-1, 0]
plotitem.setpoint_x = np.linspace(xmin, xmax, shapes[param][0])
plotitem.no_xscale = False
plotitem.rescale()
# Update the plot
plotitem.update(data)
# Update table items if requested, expanding parameters that weren't measured
# if necessary.
if this.current.table_items:
nItems = max(len(x) for x in this.current.table_items.values())
for item in this.current.table_items:
if len(this.current.table_items[item]) < nItems:
this.current.table_items[item].append("")
col_titles = this.current.plot_window.table.getHorizontalHeaders()
if len(col_titles) < nItems:
col_titles.append(str(this.current.dataset.run_id))
this.current.plot_window.table.setData(this.current.table_items)
this.current.plot_window.table.setHorizontalHeaderLabels(col_titles)
# Done update
return
def subscriber(dataset, **kwargs):
"""
Attach a plot window to the dataset and supply an update
method that will update the live plots.
"""
# First, check if we actually want to do anything. If not, we return
# a blank function
if this.current is None or this.current.plot_window is None:
logger.info(f"Live plotting disabled for {dataset.run_id}.")
this.current.dataset = dataset
return do_nothing
# Update the plot title
window_run_ids = _explode_ids(f"({this.current.plot_window.win_title})")
if not window_run_ids or window_run_ids is None:
window_run_ids = (dataset.run_id,)
else:
window_run_ids = window_run_ids + (dataset.run_id,)
run_id_str = ', '.join(_reduce_ids(window_run_ids))
this.current.plot_window.win_title = f"ID: {run_id_str}"
# Otherwise, register parameters into the window
this.current.dataset = dataset
win = this.current.plot_window
win.run_id = dataset.run_id
run_desc = dataset.description
params = run_desc.interdeps
shapes = run_desc.shapes
if this.current.plot_params is None:
this.current.plot_params = set(params.names)
else:
this.current.plot_params = set(p.fullname for p in this.current.plot_params)
for param in itertools.chain(params.dependencies, params.standalones):
name = param.name
if name not in this.current.plot_params:
logger.info("Parameter %s not in list of plot parameters %r", name, this.current.plot_params)
continue
# Figure out the shape of the parameter
if shapes[name] == (1,):
logger.info("Adding 0D parameter %s", name)
if win.table is None:
table = TableWidget(sortable=False)
t_widget = win.scene().addWidget(table)
t_widget.setMinimumSize(300, 0)
win.addItem(t_widget)
this.current.table_items = {}
elif this.current.table_items is None:
this.current.table_items = win.table.getData()
if name not in this.current.table_items:
if this.current.table_items:
nVals = len(next(iter(this.current.table_items.values())))
else:
nVals = 0
this.current.table_items[name] = [""]*nVals
win.table.setHorizontalHeaderLabels(list(str(s) for s in window_run_ids))
elif len(shapes[name]) == 1:
logger.info("Adding 1D parameter %s with shape %r", name, shapes[name])
bot_axis = params.dependencies[param][0]
# If we need to stack or append, find the right plot
plotitem = None
if this.current.stack:
try:
plotitem = next(iter(i for i in win.items if isinstance(i, PlotItem)))
except StopIteration:
pass
elif this.current.append:
plotitem = _compatible_plot_item(win, bot_axis, param)
if plotitem is None:
logger.warning("Append requested but appropriate plotitem not found."
" Making a new one.")
# Couldn't find an appropriate plotitem - make a new one
if plotitem is None:
plotitem = win.addPlot(name=name,
title=(f"{bot_axis.name} ({bot_axis.label}) v.<br>"
f"{param.name} ({param.label}) "
f"(id: {run_id_str})"))
plotitem.bot_axis.paramspec = bot_axis
plotitem.left_axis.paramspec = param
else:
# Update ID string
paramstr, _, _ = _parse_title(plotitem.plot_title)
plotitem.plot_title = f"{paramstr} (id: {run_id_str})"
# Add new trace to the plot
plotdata = plotitem.plot(setpoint_x=[],
pen=(255, 0, 0),
name=param.name)
this.current.plot_items[param.name] = plotdata
elif len(shapes[name]) == 2:
logger.info("Adding 2D parameter %s with shape %r", name, shapes[name])
bot_axis = params.dependencies[param][0]
left_axis = params.dependencies[param][1]
plotitem = None
if this.current.stack:
logger.warning("Can't stack 2D param %r. Will create a new plot instead.", name)
if this.current.append:
plotitem = _compatible_plot_item(win, bot_axis, left_axis)
if plotitem is None:
logger.warning("Append requested but appropriate plotitem not found."
" Making a new one.")
# Couldn't find an appropriate plotitem - make a new one
if plotitem is None:
plotitem = win.addPlot(name=name,
title=(f"{bot_axis.name} ({bot_axis.label}) v.<br>"
f"{left_axis.name} ({left_axis.label}) "
f"(id: {run_id_str})"))
plotitem.bot_axis.paramspec = bot_axis
plotitem.left_axis.paramspec = left_axis
else:
# Update ID string
paramstr, _, _ = _parse_title(plotitem.plot_title)
plotitem.plot_title = f"{paramstr} (id: {run_id_str})"
# Add new trace to the plot
# Initially the axes are set to some random range, this will be filled
# in once the first column is taken.
plotdata = plotitem.plot(setpoint_x= | np.linspace(0, 1, shapes[name][0]) | numpy.linspace |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 4 10:34:22 2019
A class based formulation of other analyses.
It is structured as:
Dataset
_| |_
| |
Analysis Forecast
________________|________________
| | |
SubxForecast EC45Forecast Seas5Forecast
Dataset initialises the majority of the variables, handles data loading, copying
and subsetting, and provides deseasonalising and data reduction methods.
Analysis adds a preprocessing method for era5 data, and some additional variable setup
Forecast adds an error correction method, and forecast-specific variable setup
SubxForecast, EC45Forecast, and Seas5Forecast add filetype specific data processing.
@author: josh
"""
import iris
import copy as cp
import datetime as dt
import iris.coord_categorisation as iccat
from iris.analysis.cartography import cosine_latitude_weights
import numpy as np
import cf_units
import os
class Dataset:
def __init__(self,field,dates,leads=None):
"""
Dataset is the base class shared by all analysis and forecast data sets. It defines
all functions that are generic between datasets. Not normally used directly.
Args:
* field - A string used to identify which fields to load from file.
*date - a list or tuple of 2 datetime.datetime objects specifying the
first and last datetime to include in the data
*leads - used by the Forecast class only, a list or tuple of 2 floats,
specifying minimum and maximum lead times in days to include.
"""
self.field=field
self.dates=dates
self._d_l,self._d_u=dates
self.leads=leads
#Only data of the same forecast hour is currently supported.
assert dates[0].hour==dates[1].hour
self.hour=[dates[0].hour]
#Name of the primary time coordinate
self.T="time"
#The expected position of the primary time coordinate in the cube
self.t=0
#The day of year associated with 'dates'
self.calendar_bounds=[d.timetuple().tm_yday for d in dates]
self.type=Dataset
#A dictionary that can contain any number of iris CubeLists, each
#labelled with a keyword. The load_data method generates a "data" and
#a "clim" CubeList
self.data={}
#Used by the get_climatology method
self.dist_means=None
self.distribution=None
#The time unit to use
self.U=cf_units.Unit(f"Days since {cf_units.EPOCH}",\
calendar=cf_units.CALENDAR_GREGORIAN)
#Constraints applied to the data at different points.
self.constraints={
#keep only data with a valid time coordinate
"load":iris.Constraint(cube_func=lambda cube: cube.coords(self.T)!=[]),
#keep only data that falls within the calendar_bounds
"calendar":iris.Constraint(coord_values={"day_of_year":lambda cell:\
self._in_calendar_bounds(cell)}),
#keep only data for the right hour
"hour":iris.Constraint(coord_values={"hour":lambda cell:\
np.isin(cell,self.hour)[0]}),
#keep only data that falls within the dates
"data":iris.Constraint(coord_values={self.T:lambda cell:\
self._d_l<=cell<=self._d_u}),
#keep only data that falls outside the dates
"clim":iris.Constraint(coord_values={self.T:lambda cell:\
(self._d_l>cell)or (cell>self._d_u)})
}
self._setup()
def _setup(self):
"""empty method used by derived classes."""
pass
def set_path(self,path):
"""set the path from which to load data"""
if os.path.isdir(path):
self.path=path
else:
raise(ValueError("Not a valid path."))
def copy(self):
"""A method which returns a copy of the Dataset"""
copy=self.type(self.field,self.dates,self.leads)
copy.dist_means=self.dist_means
copy.distribution=self.distribution
copy.data=cp.deepcopy(self.data)
return copy
def add_constraints(self,constr_dict):
"""add a dictionary of constraints 'constr_dict' to the constraints
attribute. Any previously defined keywords will be overwritten."""
for key in constr_dict:
self.constraints[key]=constr_dict[key]
def load_data(self,strict=True):
"""Load data from self.path as a list of iris cubes, preprocess it,
and split it into two CubeLists "data" and "clim".
"""
CL=iris.cube.CubeList()
fs=[self.path+f for f in os.listdir(self.path) if f.endswith(".nc")]
for f in fs:
CL.append(iris.load_cube(f,constraint=self.constraints["load"]))
self.data=CL
self._clean_loaded_data()
a=self.data.extract(self.constraints["data"])
c=self.data.extract(self.constraints["clim"])
if strict:
if a is None: raise(ValueError("No data after applying constraints."))
if c is None: raise(ValueError("No climatology data after applying constraints."))
self.data={"data":a,"clim":c}
def _clean_loaded_data(self):
"""empty method used by derived classes."""
pass
def _in_calendar_bounds(self,x):
"""Evaluates whether a real number x lies between the calendar_bounds
of the dataset, wrapping around the end of the year if necessary."""
c0,c1=self.calendar_bounds
if c1<c0:
ans=(x<=c1) or (x>=c0)
else:
ans=(x<=c1) and (x>=c0)
return ans
def restrict_area(self,region):
"""A convenience method that restricts the spatial extent of the
Dataset to one of a few preset domains, defined by a string "region".
"""
if region.lower()=="europe":
lons=[-15,20]
lats=[32,60]
elif region.lower()=="france":
lons=[-5,8]
lats=[42,51]
elif region.lower()=="north_atlantic":
lons=[-80,40]
lats=[30,90]
else: raise(ValueError(f"Unrecognised region {region}."))
#We use this over intersection, because it works for cubelists
area_constr=iris.Constraint(longitude=lambda x: lons[0]<=x<=lons[1],\
latitude=lambda x: lats[0]<=x<=lats[1])
for key in self.data:
self.data[key]=self.data[key].extract(area_constr)
def add_cat_coord(self,iccat_function,coordname,base_coord):
"""Adds a categorical coordinate to all cubes in Dataset.data, defined
by 'iccat_function' relative to 'base_coord', and called 'coordname'.
Note that the name of the new coord is defined internally by
iccat_function; coordname serves only to graciously handle the case when
that coordinate already exists."""
for key in self.data:
for i,entry in enumerate(self.data[key]):
if entry.coords(coordname)==[]:
iccat_function(entry,base_coord)
def change_units(self,unit_str=None,cf_unit=None):
"""Changes the units of all cubes in the Dataset to a new unit given
either by a valid cf_units.Unit string specifier 'unit_str', or a
cf_units.Unit object, 'cf_unit'."""
if unit_str is not None and cf_unit is not None:
raise(ValueError("Only one unit can be provided."))
elif unit_str is not None:
unit=cf_units.Unit(unit_str)
elif cf_unit is not None:
unit=cf_unit
else: raise(ValueError("A unit must be provided."))
for key in self.data:
for i,entry in enumerate(self.data[key]):
entry.convert_units(unit)
def change_dates(self,newdates):
"""
Redefines the 'dates' attribute to the list of 2 datetimes 'newdates',
reapplying the "data" and "clim" constraints to match
**currently quite slow for large cubelists**
"""
self.dates=newdates
self._d_l,self._d_u=self.dates
self.calendar_bounds=[d.timetuple().tm_yday for d in self.dates]
CL_data=iris.cube.CubeList()
CL_clim=iris.cube.CubeList()
for key in self.data:
a=self.data[key].extract(self.constraints["data"])
if a != []:
CL_data.append(a)
a=self.data[key].extract(self.constraints["clim"])
if a != []:
CL_clim.append(a)
CL_data=iris.cube.CubeList([c for C in CL_data for c in C])
CL_clim=iris.cube.CubeList([c for C in CL_clim for c in C])
self.data["data"]=CL_data.concatenate()
self.data["clim"]=CL_clim.concatenate()
def change_calendar(self,newcalendar):
for key in self.data:
for i,entry in enumerate(self.data[key]):
newunit=cf_units.Unit(\
entry.coord("time").units.origin,calendar=newcalendar)
self.data[key][i].coord("time").unit=newunit
def aggregate_by(self,coords,bins,aggregator=iris.analysis.MEAN):
"""Aggregates the coordinates of all cubes in Dataset into user defined
bins.
Args:
*coords - A list of strings which are the coordinates
to be aggregated over.
*bins - A corresponding list of lists 'bins'. bins[i]
should contain the bounding values over which to group coords[i].
Kwargs:
*aggregator -A valid iris.analysis.Aggregator object which specifies
how to aggregate entries together.
"""
binlabels=[]
for j,coord in enumerate(coords):
binlabels.append(f"bin{j}")
for key in self.data:
for i,entry in enumerate(self.data[key]):
for j,(coord,b) in enumerate(zip(coords,bins)):
#remove potential old bins:
if self.data[key][i].coords(f"bin{j}")!=[]:
self.data[key][i].remove_coord(f"bin{j}")
if self.data[key][i].coords(coord)==[]:
raise(ValueError("No such coordinate in cube!"))
label=np.digitize(entry.coord(coord).points,b)
coord_dim=entry.coord_dims(entry.coord(coord))
entry.add_aux_coord(iris.coords.AuxCoord(label,\
var_name=f"bin{j}"),data_dims=coord_dim)
self.data[key][i]=entry.aggregated_by(binlabels,aggregator)
for j,coord in enumerate(coords):
if self.data[key][i].coords(coord)!=[]:
self.data[key][i].remove_coord(f"bin{j}")
def collapse_over(self,coord,aggregator=iris.analysis.MEAN):
"""Collapses all cubes in Dataset over a single coordinate.
Args:
*coords - A string which is the coordinate to collapse.
Kwargs:
*aggregator -A valid iris.analysis.Aggregator object which specifies
how to collapse the coordinate.
"""
for key in self.data:
for i,entry in enumerate(self.data[key]):
self.data[key][i]=self.data[key][i].collapsed(coord,aggregator)
def apply_coslat_mean(self,mask=None):
"""Collapses the latitude and longitude coordinates of all cubes in
Dataset, using a cosine latitude weighting.
Kwargs:
*mask:
A cube with matching latitude and longitude coordinates to
the cubes in Dataset. Each gridpoint in 'mask' should vary between
0 (totally masked) to 1 (totally unmasked).
"""
for key in self.data:
for i,entry in enumerate(self.data[key]):
weights = cosine_latitude_weights(entry)
#include the land sea mask in the weighting if one was passed.
if mask is not None:
weights=weights*mask.data
self.data[key][i]=entry.collapsed(["latitude","longitude"],\
iris.analysis.MEAN,weights=weights)
def regrid_to(self,dataset=None,cube=None,regridder=iris.analysis.Linear()):
"""regrids every cube in Dataset to match either those of another
Dataset object, or an iris.Cube object."""
if cube is None and dataset is None:
raise(ValueError("No reference for regridding provided!"))
elif cube is None:
ref_cube=dataset.data["data"][0]
else:
ref_cube=cube
for key in self.data:
for i,entry in enumerate(self.data[key]):
self.data[key][i]=entry.regrid(ref_cube,regridder)
def apply(self,func,*args,in_place=True,keys=None,**kwargs):
"""A method which applies a function to every cube in Dataset
Args:
*func - A function of the type func(cube,*args,**kwargs).
Kwargs:
in_place - A boolean, specifying whether func returns an output or
not. If True, cube is set equal to func(cube), unless the output
is None, in which case cube is removed from the CubeList.
"""
if keys is None:
keys=self.data
for key in keys:
for i,entry in enumerate(self.data[key]):
result=func(entry,*args,**kwargs)
if in_place:
pass
else:
if result is not None:
self.data[key][i]=result
else:
self.data[key].remove(self.data[key][i])
def apply_constraint(self,constraint,keys=None):
"""Apply a constraint to all cubes in Dataset"""
if keys is None:
keys=self.data
for key in keys:
self.data[key]=self.data[key].extract(constraint)
def get_climatology(self,percentiles):
"""Finds the distribution of all values in the Dataset.
Args:
* percentiles - A numpy array ([p_1,...,p_N]) where 0<=p_i<=100,
which defines the percentiles of the data distribution to calculate.
"""
self.percentiles=percentiles
lat,lon=self.data["clim"][0].shape[-2:]
dist=np.zeros([1,lat,lon])
#We call the whole cubelist into memory
self.data["clim"].realise_data()
dist=np.concatenate([f.data.reshape([-1,lat,lon]) for f in self.data["clim"]])
self.distribution=np.percentile(dist,percentiles,axis=0)
self.distribution[0]-=0.01
means=np.zeros([len(percentiles)-1,lat,lon])
for i in range(len(percentiles)-1):
for j in range(lat):
for k in range(lon):
means[i,j,k]=dist[np.digitize(dist[:,j,k],\
self.distribution[:,j,k],right=True)==i+1,j,k].mean()
#interpolates empty bins as being halfway between the distribution bounds
for i,j,k in np.argwhere(np.isnan(means)):
means[i,j,k]=self.distribution[i:i+2,j,k].mean()
self.dist_means=means
def get_seasonal_cycle(self,N=4,period=365.25,keys=None):
"""Fits N sine modes to the data series, with frequencies of n/(365.25 days)
for n in [1,...,N], in order to calculate a smooth seasonal cycle.
Kwargs:
*keys - A list of keys to self.data, specifying which data to use
to calculate the cycle. If keys is None, all data in the dataset
will be used.
"""
#Default is to include all data
if keys is None: keys = [key for key in self.data]
self.deseasonaliser=_Deseasonaliser(self.data,keys,N,period)
self.deseasonaliser.fit_cycle()
def remove_seasonal_cycle(self,deseasonaliser=None,strict_t_ax=False):
if deseasonaliser is None:
if self.deseasonaliser is None:
raise(ValueError("No _Deseasonaliser object found."))
else:
deseasonaliser=self.deseasonaliser
if deseasonaliser.coeffs is None:
deseasonaliser.fit_cycle()
for key in self.data:
for i,cube in enumerate(self.data[key]):
cycle=deseasonaliser.evaluate_cycle(cube.coord("time"),strict=strict_t_ax)
if cycle.shape!=cube.shape:
dim_map=[cube.coord_dims(coord)[0] for coord in \
["time","latitude","longitude"]]
cycle=iris.util.broadcast_to_shape(cycle,cube.shape,dim_map)
self.data[key][i].data=cube.data-cycle
def set_time_axis_first(self,tname="time"):
for key in self.data:
for entry in self.data[key]:
t_ax=entry.coord_dims(tname)[0]
if t_ax!=0:
ax=np.arange(entry.ndim)
entry.transpose([t_ax,*ax[ax!=t_ax]])
class _Deseasonaliser:
def __init__(self,data,keys,N,period=365.25,coeffs=None):
self.raw_data=[]
self.t=[]
self.t_unit=None
self.tref=None
self.keys=keys
self.N=N
self.pnum=2*(N+1)
self.period=period
self.coeffs=None
for key in keys:
for cube in data[key]:
self.raw_data.append(cube.data)
if self.t_unit is not None:
if self.t_unit!=cube.coord("time").units:
raise(ValueError("Clashing time units in data."))
else:
self.t_unit=cube.coord("time").units
self.t.append(cube.coord("time").points)
i=cube.coord_dims("time")[0]
self.raw_data=np.concatenate(self.raw_data,axis=i)
self.t=np.concatenate(self.t,axis=i)
self._setup_data()
self.lat,self.lon=self.raw_data.shape[1:]
def _setup_data(self):
self.raw_data=self.raw_data[np.argsort(self.t)]
self.t.sort()
self.tref=self.t[0]
self.t=(self.t-self.tref)%self.period
#intelligently guesses initial parameters
def _guess_p(self,tstd):
p=np.zeros(self.pnum)
for i in range(0,self.N):
p[2+2*i]=tstd/(i+1.0)
return p
def _change_calendar(self,new_calendar):
self.t_unit=cf_units.Unit(self.t_unit.origin,calendar=new_calendar)
#defines multimode sine function for fitting
def _evaluate_fit(self,x,p,N):
ans=p[0]*x+p[1]
for i in range(0,N):
ans+=p[2*i+2] * np.sin(2 * np.pi * (i+1)/365.25 * x + p[2*i+3])
return ans
#defines error function for optimisation
def _get_residual(self,p,y,x,N):
return y - self._evaluate_fit(x,p,N)
def fit_cycle(self):
from scipy.optimize import leastsq
fit_coeffs=np.zeros([self.pnum,self.lat,self.lon])
for i in range(self.lat):
for j in range(self.lon):
griddata=self.raw_data[:,i,j]
tstd=griddata.std()
p0=self._guess_p(tstd)
plsq=leastsq(self._get_residual,p0,args=(griddata,self.t,self.N))
fit_coeffs[:,i,j]=plsq[0]
self.coeffs=fit_coeffs
def evaluate_cycle(self,t,strict=False):
t=t.copy()
if self.coeffs is None:
raise(ValueError("No coefficients for fitting have been calculated yet."))
if t.units!=self.t_unit:
if t.units.is_convertible(self.t_unit):
t.convert_units(self.t_unit)
elif (t.units.origin==self.t_unit.origin) and (not strict):
t.units=cf_units.Unit(t.units.origin,calendar=self.t_unit.calendar)
else:
raise(ValueError("Units of time series to evaluate are \
incompatible with units of fitted time series."))
t=t.points
t=(t-self.tref)%self.period
cycle=np.zeros([len(t),self.lat,self.lon])
for i in range(self.lat):
for j in range(self.lon):
cycle[:,i,j]=self._evaluate_fit(t,self.coeffs[:,i,j],self.N)
return cycle
"""Analysis is a subclass of Dataset that deals with reanalysis. At the moment
specific to era5, but that should be changed if more analyses start being used."""
class Analysis(Dataset):
def _setup(self):
self.path="/mnt/seasonal/reanalysis/era5/"+self.field+"/"
self.type=Analysis
def _clean_loaded_data(self):
for i in range(len(self.data)):
self.data[i].metadata.attributes.clear()
self.data[i].coord("latitude").points=\
self.data[i].coord("latitude").points.astype(np.float32)
self.data[i].coord("longitude").points=\
self.data[i].coord("longitude").points.astype(np.float32)
self.data=self.data.concatenate_cube()
try:
self.data.coord(self.T).convert_units(self.U)
except:
print(f"Warning: could not convert {self.T} to {self.U}, simply renaming calendar.")
new_T=cf_units.Unit(self.data.coord(self.T).units.origin,self.U.calendar)
self.data.coord(self.T).units=new_T
try:
self.data.coord(self.T).convert_units(self.U)
except:
raise(ValueError("Unsuccesful attempt to change time units."))
iccat.add_hour(self.data,self.T)
self.data=self.data.extract(self.constraints["hour"])
iccat.add_day_of_year(self.data,self.T)
self.data=self.data.extract(self.constraints["calendar"])
self.data=iris.cube.CubeList([self.data])
class Forecast(Dataset):
def _setup(self):
self.T="forecast_reference_time"
self.S="forecast_period"
self.R="realisation"
self._l_l,self._l_u=self.leads
self.type=Forecast
self.t=1
self._fsetup()
self.constraints["lead"]=iris.Constraint(coord_values={self.S:\
lambda cell:(self._l_l<=cell)and (cell<=self._l_u)})
self.constraints["ens"]=iris.Constraint(coord_values={self.R:\
lambda cell: cell.point<self.max_ens})
#Used by derived classes
def _fsetup(self):
pass
def get_quantile_correction(self,analysis):
if self.dist_means is None:
raise(ValueError("Must get forecast climatology first."))
if analysis.dist_means is None:
raise(ValueError("Must get analysis climatology first."))
if not np.all(analysis.percentiles == self.percentiles):
raise(ValueError("These datasets have incomparable climatologies."))
self.quantile_correction=analysis.dist_means-self.dist_means
def apply_quantile_correction(self):
lat,lon=self.data["data"][0].shape[-2:]
for i,entry in enumerate(self.data["data"]):
shape=entry.data.shape
data=entry.data.reshape([-1,lat,lon])
for x in range(lat):
for y in range(lon):
which_bin=np.digitize(data[:,x,y],self.distribution[:,x,y],right=True)
which_bin[which_bin==0]+=1 #cold outliers put in 0-5% bin
which_bin[which_bin==len(self.percentiles)]-=1 #warm outliers in 95-100% bin
which_bin-=1 #indexing from zero
correction=self.quantile_correction[:,x,y][which_bin]
data[:,x,y]+=correction
data=data.reshape(shape)
self.data["data"][i].data=data
self.data["data"][i].long_name="corrected "+self.data["data"][i].name()
class SubxForecast(Forecast):
def _fsetup(self):
self.path="/mnt/seasonal/subx/"+self.field+"/"
self.R="realization"
self.max_ens=11
self.type=SubxForecast
def _clean_loaded_data(self):
CL=iris.cube.CubeList()
for i,cube in enumerate(self.data):
for entry in cube.slices_over(self.T):
entry.coord(self.T).convert_units(self.U)
T_ref=entry.coord(self.T)
S=entry.coord(self.S).points
t_coord=iris.coords.AuxCoord(S+T_ref.points[0],standard_name="time")
t_coord.units=T_ref.units
entry.add_aux_coord(t_coord,data_dims=1)
iccat.add_hour(entry,"time")
iccat.add_day_of_year(entry,"time")
CL.append(entry)
CL.sort(key=lambda cube:cube.coord(self.T).points[0])
self.data=CL
self.data=self.data.extract(self.constraints["calendar"])
self.data=self.data.extract(self.constraints["lead"])
self.data=self.data.extract(self.constraints["hour"])
self.data=self.data.extract(self.constraints["ens"])
def remove_masked(self):
for key in self.data:
self.data[key].realise_data()
masked=[]
for entry in self.data[key]:
if not | np.all(entry.data.mask==False) | numpy.all |
import numpy as np
import gd
import utils
gamma = 0.001
stepsize = 1
maxiter = 100000
plot = False
Q = np.diag([gamma, 1])
def f(x):
return 0.5 * x.T@Q@x
def fp(x):
return Q@x
def f_2d(x1, x2):
return 0.5 * gamma * x1**2 + 0.5 * x2**2
x0 = | np.array([1.0, 1.0]) | numpy.array |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 20 09:17:52 2019
@author: <NAME>, https://github.com/zhaofenqiang
Contact: <EMAIL>
"""
import numpy as np
from interp_numpy import resampleSphereSurf, bilinearResampleSphereSurfImg
# from utils import get_neighs_order
def get_rot_mat_zyz(z1, y2, z3):
"""
first z3, then y2, lastly z1
"""
return np.array([[np.cos(z1) * np.cos(y2) * np.cos(z3) - np.sin(z1) * np.sin(z3), -np.cos(z1) * np.cos(y2) * np.sin(z3) - np.sin(z1) * np.cos(z3), np.cos(z1) * np.sin(y2)],
[np.cos(z1) * np.sin(z3) + np.sin(z1) * np.cos(y2) * np.cos(z3), -np.sin(z1) * np.cos(y2) * np.sin(z3) + np.cos(z1) * np.cos(z3), np.sin(z1) * np.sin(y2)],
[-np.sin(y2) * np.cos(z3), np.sin(y2) * | np.sin(z3) | numpy.sin |
import numpy as np
import mrcfile
def normalize(x, percentile = True, pmin=4.0, pmax=96.0, axis=None, clip=False, eps=1e-20):
"""Percentile-based image normalization."""
if percentile:
mi = | np.percentile(x,pmin,axis=axis,keepdims=True) | numpy.percentile |
"""Functions for importing and analyzing traffic traces"""
from __future__ import division
import math
import collections
import time
import dateutil
import types
import numpy as np
from scipy.stats import chisquare
from icarus.tools import TruncatedZipfDist
__all__ = [
'frequencies',
'one_timers',
'trace_stats',
'zipf_fit',
'parse_url_list',
'parse_wikibench',
'parse_squid',
'parse_youtube_umass',
'parse_common_log_format'
]
def frequencies(data):
"""Extract frequencies from traces. Returns array of sorted frequencies
Parameters
----------
data : array-like
An array of generic data (i.e. URLs of web pages)
Returns
-------
frequencies : array of int
The frequencies of the data sorted in descending order
Notes
-----
This function does not return the mapping between data elements and their
frequencies, it only returns frequencies.
This function can be used to get frequencies to pass to the *zipf_fit*
function given a set of data, e.g. content request traces.
"""
return np.asarray(sorted(collections.Counter(data).values(), reverse=True))
def one_timers(data):
"""Return fraction of contents requested only once (i.e., one-timers)
Parameters
----------
data : array-like
An array of generic data (i.e. URLs of web pages)
Returns
-------
one_timers : float
Fraction of content objects requested only once.
"""
n_items = 0
n_onetimers = 0
counter = collections.Counter(data)
for i in counter.itervalues():
n_items += 1
if i == 1:
n_onetimers += 1
return n_onetimers / n_items
def trace_stats(data):
"""Print full stats of a trace
Parameters
----------
data : array-like
An array of generic data (i.e. URLs of web pages)
Return
------
stats : dict
Metrics of the trace
"""
if isinstance(data, types.GeneratorType):
data = collections.deque(data)
freqs = frequencies(data)
alpha, p = zipf_fit(freqs)
n_reqs = len(data)
n_contents = len(freqs)
n_onetimers = len(freqs[freqs == 1])
return dict(n_contents=n_contents,
n_reqs=n_reqs,
n_onetimers=n_onetimers,
alpha=alpha,
p=p,
onetimers_contents_ratio=n_onetimers / n_contents,
onetimers_reqs_ratio=n_onetimers / n_reqs,
mean_reqs_per_content=n_reqs / n_contents
)
def zipf_fit(obs_freqs, need_sorting=False):
"""Returns the value of the Zipf's distribution alpha parameter that best
fits the data provided and the p-value of the fit test.
Parameters
----------
obs_freqs : array
The array of observed frequencies sorted in descending order
need_sorting : bool, optional
If True, indicates that obs_freqs is not sorted and this function will
sort it. If False, assume that the array is already sorted
Returns
-------
alpha : float
The alpha parameter of the best Zipf fit
p : float
The p-value of the test
Notes
-----
This function uses the method described in
http://stats.stackexchange.com/questions/6780/how-to-calculate-zipfs-law-coefficient-from-a-set-of-top-frequencies
"""
try:
from scipy.optimize import minimize_scalar
except ImportError:
raise ImportError("Cannot import scipy.optimize minimize_scalar. "
"You either don't have scipy install or you have a "
"version too old (required 0.12 onwards)")
obs_freqs = np.asarray(obs_freqs)
if need_sorting:
# Sort in descending order
obs_freqs = -np.sort(-obs_freqs)
n = len(obs_freqs)
def log_likelihood(alpha):
return np.sum(obs_freqs * (alpha * np.log( | np.arange(1.0, n + 1) | numpy.arange |
import copy
import sys
sys.path.append('SetsClustering')
from multiprocessing import Process ,Manager
import numpy as np
import LinearProgrammingInTheDarkClassVersion as LPD
from multiprocessing import Pool
from jgrapht.algorithms.shortestpaths import johnson_allpairs
import jgrapht
from SetsClustering import Utils, PointSet, KMeansAlg
from SetsClustering import KMeansForSetsSensitivityBounder as SensBounder
from SetsClustering import Coreset as CS
from scipy.spatial.distance import cdist
import seaborn as sns
from copy import deepcopy
import itertools
from scipy.ndimage import convolve
from timeit import default_timer as timer
from tqdm import tqdm
import dill
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import matplotlib.pylab as pl
from scipy.linalg import null_space
import scipy.ndimage as ndi
from scipy.spatial import ConvexHull
import argparse, os, pickle
from scipy.io import netcdf
POWER = 4
FORCE_NEIGHBORING = 20
import psutil
CPUS = psutil.cpu_count()
# import multiprocessing
# # from pathos.multiprocessing import ProcessingPool as Pool
# # from sklearn.externals.joblib import Parallel, delayed
# from multiprocessing import Process
parser = argparse.ArgumentParser(description='Initial Location Generator')
parser.add_argument('-d', type=str, default=None, help='Directory containing all maps')
parser.add_argument('-pp', default=False, action='store_true', help='preprocess map')
parser.add_argument('-ft', default='.nc', type=str, help='Type of map file')
parser.add_argument('-nf', default=1, type=int, help='Number of files describing a map of velocities')
parser.add_argument('-eps_g', default=None, type=float, help=r'resolution of the \varepsilon-grid')
parser.add_argument('-eps_b', default=0.08, type=float,
help=r'epsilon approximation for each of the patches of the currents')
parser.add_argument('-k', default=10, type=int, help='Desired number of drifters')
parser.add_argument('-bs', default=2, type=int, help='size of the blob prior to the clustering phase')
parser.add_argument('-coreset_sample_size', default=1000, type=int,
help='The size of the coreset for the clustering phase')
parser.add_argument('-time', default=False, action='store_true', help='Apply our system over time')
parser.add_argument('-tol', default=0.2, type=float, help='Tolerance for minimum volume ellipsoid')
parser.add_argument('-resume', default=False, action='store_true', help='In case of code being killed, you can resume from last map')
parser.add_argument('-show', default=False, action='store_true', help='Show only our segementation and clustering. Must have preporcessed these data before')
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKCYAN = '\033[96m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
NORMAL = '\033[0m'
plt.rcParams.update({'font.size': 16})
manager = Manager()
def removeInclusionsJob(lst, ids, path_str):
global resdict
for i in range(len(lst)):
resdict[ids[i]] = True
if lst[i] in path_str:
resdict[ids[i]] = False
def removeInclusions(unified_paths, file_path='', file_prefix=''):
global manager
global resdict
global A
unified_paths_strings = [str(x[0]).strip('[]') for x in unified_paths]
unified_paths_strings.sort(key=(lambda x: len(x.split(','))))
lst = [list(grp) for i, grp in itertools.groupby(unified_paths_strings, key=(lambda x: len(x.split(','))))]
sizes = np.cumsum([len(x) for x in lst])
unique_ids = [list(range(sizes[i-1], sizes[i]) if i > 0 else range(sizes[i])) for i in range(len(sizes))]
if len(unified_paths_strings) > 10000:
with Manager() as manager:
proc_list = []
resdict = manager.dict()
for i, item in enumerate(lst):
if i != (len(lst) - 1):
proc_list.append(
Process(target=removeInclusionsJob,
args=(item, unique_ids[i], '\n'.join(unified_paths_strings[sizes[i]:])))
)
proc_list[-1].start()
for proc in proc_list:
proc.join()
mask = [x[1] for x in resdict.items()]
else:
resdict = dict()
for i, item in enumerate(lst):
if i != (len(lst) - 1):
removeInclusionsJob(item, unique_ids[i], '\n'.join(unified_paths_strings[sizes[i]:]))
mask = [x[1] for x in resdict.items()]
mask.extend([True for _ in range(len(lst[-1]))])
np.save('{}mask_unified_paths_{}.npy'.format(file_path, file_prefix), mask)
return [[int(y) for y in x.split(', ')] for x in list(itertools.compress(unified_paths_strings, mask))]
def removeDuplicates(list_1):
list2 = list(set(list_1))
list2.sort(key=list_1.index)
return list2
def makedir(dir_path):
try:
os.mkdir(dir_path)
except OSError as error:
print(error)
def saveVels(data, file_path, smoothed=True):
if smoothed:
file_path += 'Smoothed_Vel/'
else:
file_path += 'Original_Vel/'
makedir(file_path)
temp = np.tile(data[:, :, 0][:, :, np.newaxis], 10)
temp.dump(file_path + 'matrix_vel_x.dat')
temp = np.tile(data[:, :, 1][:, :, np.newaxis], 10)
temp.dump(file_path + 'matrix_vel_y.dat')
def readNetCDFFile(file_path, over_time):
file2read = netcdf.NetCDFFile(file_path, 'r')
U = file2read.variables['u'].data # velocity in x-axis
V = file2read.variables['v'].data # velocity in y-axis
mask = np.logical_and(np.abs(U) <= 1e3, np.abs(V) <= 1e3)
V = np.multiply(V, mask)
U = np.multiply(U, mask)
if not over_time:
U = U[0, :, :, :]
V = V[0, :, :, :]
return U,V
def innerFunction(current_possible_combs, unique_keys):
global resdict
for i, element in enumerate(current_possible_combs):
resdict[unique_keys[i]] = (removeDuplicates(element[0][0] + element[1][0]), element[0][1] + element[1][1])
def getAllPossiblePaths(list1, list2):
global CPUS
global manager
global resdict
if len(list1) * len(list2) > 10000:
manager = Manager()
resdict = manager.dict()
all_possible_combs = np.array_split(list(itertools.product(list1, list2)), CPUS)
unique_ids = np.array_split(np.arange(sum([x.size for x in all_possible_combs])), CPUS)
proc_list = []
for i, item in enumerate(all_possible_combs):
proc_list.append(
Process(target=innerFunction, args=(item, unique_ids[i]))
)
proc_list[-1].start()
for proc in proc_list:
proc.join()
temp = list(resdict.values())
else:
temp = []
for element in itertools.product(list1, list2):
temp.append((removeDuplicates(element[0][0] + element[1][0]), element[0][1] + element[1][1]))
return temp
class CurrentEstimation(object):
def __init__(self, grid, k=10, epsilon_grid=0.06, tolerance=0.001, epsilon_body=2, is_grid=True, is_data_vectorized=True,
blob_size=3, sens_file_name='sens.npz', coreset_sample_size = int(1e3), save_mode=True,
matrix_of_velocities=True, save_path='', file_prefix='', show=False, verbose=False):
self.grid = grid
self.is_grid=is_grid
self.d = (self.grid.ndim - 1) if matrix_of_velocities else self.grid.ndim
self.epsilon_grid = epsilon_grid
self.epsilon_body = epsilon_body
self.tolerance = tolerance
self.g = jgrapht.create_graph(directed=True)
self.cost_func = (lambda x: self.grid[tuple(x.astype("int") if is_grid else x)]) # create a simple membership cost function
self.iocsAlg = None
self.segments = []
self.eps_star = None
self.bodies = []
self.full_bodies = []
self.is_data_vectorized = is_data_vectorized
self.k = k
self.blob_size = blob_size
self.coreset_sample_size = coreset_sample_size
self.save_mode = save_mode
self.binary_grid = None
self.matrix_of_velocities = matrix_of_velocities
self.sens_file_name = sens_file_name
self.ellipsoids = []
self.convex_hulls = []
self.verbose = verbose
self.save_path = save_path
self.file_prefix = file_prefix
self.show = show
def polynomialGridSearchParallelizedVersion(self):
with Pool() as pool:
pass
def checkIfContained(self, point):
for i,body in enumerate((self.full_bodies if self.epsilon_body == 0 else self.bodies)):
if body.ndim > 1:
temp_in_body = np.equal(body, point).all(1).any()
temp_in_CH = False
if self.convex_hulls[i] is not None:
temp_in_CH = np.all(self.convex_hulls[i][:,:-1].dot(point) <= -self.convex_hulls[i][:,-1])
if temp_in_body or temp_in_CH:
return True
else:
if np.linalg.norm(body - point) == 0:
return True
return False
def IOCS(self, p):
cost_func = lambda x: 0.85 <= np.dot(np.nan_to_num(self.grid[tuple(p)]/np.linalg.norm(self.grid[tuple(p)])),
np.nan_to_num(self.grid[tuple(x)]/np.linalg.norm(self.grid[tuple(x)]))) \
<= 1 and 0.5 <= np.linalg.norm(self.grid[tuple(p)])/np.linalg.norm(self.grid[tuple(x)]) <= 2
self.iocsAlg = LPD.LinearProgrammingInTheDark(P=self.grid,cost_func=cost_func, point=p,
d=self.d, epsilon=self.tolerance, hull_hyper=None,
matrix_of_vecs=True)
if self.iocsAlg.lower_d <= 1:
if self.iocsAlg.lower_d == 0:
self.bodies.append(p)
self.full_bodies.append(p)
self.ellipsoids.append(None)
self.convex_hulls.append(None)
else:
idxs = np.where(self.iocsAlg.oracle.flattened_data == 1)[0]
Z = np.empty((idxs.shape[0], p.shape[0]))
Z[:, self.iocsAlg.irrelevant_dims] = p[self.iocsAlg.irrelevant_dims]
Z[:, self.iocsAlg.dims_to_keep[0]] = \
np.arange(*(self.iocsAlg.oracle.bounding_box[self.iocsAlg.dims_to_keep].flatten() +
np.array([0, 1])).tolist())[idxs]
self.bodies.append(Z)
self.full_bodies.append(Z)
self.ellipsoids.append(None)
self.convex_hulls.append(None)
elif self.iocsAlg.get_all_points:
idxs = np.where(self.iocsAlg.oracle.flattened_data == 1)[0]
Z = self.iocsAlg.oracle.coordinates[:-1, idxs].T
self.bodies.append(Z)
self.full_bodies.append(Z)
self.ellipsoids.append(None)
self.convex_hulls.append(None)
else:
self.ellipsoids.append(self.iocsAlg.computeAMVEE() + (p, ))
if self.epsilon_body > 0:
s = timer()
self.approximateBody(self.ellipsoids[-1][0][-1], self.ellipsoids[-1][0][-2],
idx_dims_retrieve=self.ellipsoids[-1][-3], dims_value=self.ellipsoids[-1][-1],
rest_dims=self.ellipsoids[-1][-2])
else:
self.attainWholeBody(self.ellipsoids[-1][0][-1], self.ellipsoids[-1][0][-2],
idx_dims_retrieve=self.ellipsoids[-1][-3], dims_value=self.ellipsoids[-1][-1],
rest_dims=self.ellipsoids[-1][-2])
def polynomialGridSearch(self):
dims = list(self.grid.shape[:-1] if self.matrix_of_velocities else self.grid.shape)
for i in range(len(dims)):
dims[i] = np.arange(0, dims[i], int(np.round(dims[i] * self.epsilon_grid)))
try:
X = np.array(np.meshgrid(*dims)).T.reshape(-1, len(dims))
return X
except MemoryError:
raise MemoryError("Cant handle this much data! Lower your epsilon or simply run the parallelized version")
@staticmethod
def semiBinarizeGrid(grid, kernel_size=None):
# Apply Mean-Filter
kernel = np.ones(tuple([grid.ndim if kernel_size is None else kernel_size for i in range(grid.ndim)]),
np.float32) / (kernel_size ** grid.ndim if kernel_size is not None else grid.ndim ** grid.ndim)
return convolve(grid, kernel, mode='constant', cval=0)
def generateEpsilonStar(self, degree=None):
if degree is None:
degree = self.epsilon_body
Z = np.arange(0, 2*np.pi, degree * np.pi)
V = np.array(np.meshgrid(*[Z for i in range(self.d)])).T.reshape(-1, self.d)
V = np.divide(V, np.linalg.norm(V, axis=1)[:, np.newaxis], out=np.zeros_like(V), where=(V != 0))
V = np.unique(np.around(np.unique(V[1:], axis=0), self.d+1), axis=0)
return V
@staticmethod
def run_dill_encoded(payload):
fun, args = dill.loads(payload)
return fun(*args)
@staticmethod
def apply_async(pool, fun, args):
payload = dill.dumps((fun, args))
return pool.apply_async(CurrentEstimation.run_dill_encoded, (payload,))
def attainWholeBody(self, E, c, idx_dims_retrieve=None, dims_value=None, rest_dims=None):
if self.iocsAlg.oracle.checkIfInsidePixelStyleNumpyVer(np.round(c)) > 1.0:
raise ValueError('Something is wrong with the ellipsoid!')
bounding_box = self.iocsAlg.oracle.bounding_box
indices = np.vstack(map(np.ravel, np.meshgrid(*[np.arange(bounding_box[x, 0], bounding_box[x, 1]+1)
for x in range(bounding_box.shape[0])]))).T
body = []
temp = 0
for idx in indices:
if self.iocsAlg.oracle.checkIfInsidePixelStyleNumpyVer(idx) == 1 and np.linalg.norm(E.dot(idx - c)) <= 1 \
and not self.checkIfContained(idx):
temp += 1
if np.linalg.norm(self.grid[tuple(idx)]) > 1e-10:
body.append(idx)
if len(body) > 0:
self.full_bodies.append(np.vstack(body))
def approximateBody(self, E, c, idx_dims_retrieve=None, dims_value=None, rest_dims=None):
bounding_box = self.iocsAlg.oracle.bounding_box
indices_of_lengths = np.argsort([x[0] - x[1] for x in bounding_box])
coeffs = np.zeros((indices_of_lengths.shape[0],))
for i in range(coeffs.shape[0]):
if i == (coeffs.shape[0] - 1):
coeffs[indices_of_lengths[i]] = 1
else:
coeffs[indices_of_lengths[i]] = max(((bounding_box[indices_of_lengths[i],1] -
bounding_box[indices_of_lengths[i],0]) * self.epsilon_body),1)
V = np.vstack(map(np.ravel, np.meshgrid(*[np.arange(start=x[0], stop=x[1],
step=coeffs[j]) for (j,x) in enumerate(bounding_box)]))).T
V = np.unique(V.astype("int"), axis=0)
body = []
for v in V:
if (self.iocsAlg.oracle.checkIfInsidePixelStyleNumpyVer(v) <= 1.0) and\
(np.linalg.norm(E.dot(v - c)) <= np.sqrt(1 + (1 + self.iocsAlg.eps) * E.shape[0])) and\
(np.linalg.norm(self.grid[tuple(v)]) > 0) and (not self.checkIfContained(v)):
body.append(v)
if len(body) > 0:
self.bodies.append(np.vstack(body))
if len(body) > (self.d + 1):
try:
self.convex_hulls.append(ConvexHull(self.bodies[-1]).equations)
except:
self.convex_hulls.append(None)
else:
self.convex_hulls.append(None)
def createBlobs(self, body):
if body.ndim == 1:
return [PointSet.PointSet(body[np.newaxis,:])]
elif body.shape[0] < self.blob_size:
return [PointSet.PointSet(body)]
else:
blob = []
for x_val in np.unique(body[:,0]):
idxs = np.where(body[:, 0] == x_val)[0]
if body[idxs].shape[0] < self.blob_size:
blob.extend([PointSet.PointSet(body[idxs])])
else:
splitted_array = np.array_split(body[idxs], int(body[idxs].shape[0] / self.blob_size))
blob.extend([PointSet.PointSet(x) for x in splitted_array])
return blob
def clusteringAssignment(self, set_P, Q):
assignments_per_point = []
assignments_per_blob = []
for P in set_P:
dists = cdist(P.P, Q)
cols_idxs = np.argmin(dists, axis=1)
min_idx = np.argmin(np.min(dists, axis=1))
assignments_per_point.extend([cols_idxs[min_idx] for p in P.P])
assignments_per_blob.append(cols_idxs[min_idx])
return assignments_per_point, assignments_per_blob
def clusterWaves(self, continue_from=0,return_full_bodies=True):
P = []
blobs = []
if self.epsilon_body != 0:
for body in self.bodies:
P = []
# need to make a way to make sure that there is a trade-off between the first 3 entries and last two
if body.ndim == 1:
body = body[np.newaxis, :]
for point in body:
a = self.grid[tuple(point.astype("int"))]
b = np.linalg.norm(a)
P.append(
np.hstack((point*FORCE_NEIGHBORING, np.divide(a,b, out=np.zeros_like(a), where=b!=0)
* np.linalg.norm(point))))
blobs.extend(self.createBlobs(np.array(deepcopy(P))))
else:
for body in self.full_bodies:
# need to make a way to make sure that there is a trade-off between the first 3 entries and last two
P = []
if body.ndim == 1:
body = body[np.newaxis, :]
for point in body:
P.append(
np.hstack((point*FORCE_NEIGHBORING, self.grid[tuple(point.astype("int"))] /
np.linalg.norm(self.grid[tuple(point.astype("int"))]) * np.linalg.norm(point))))
blobs.extend(self.createBlobs(np.array(deepcopy(P))))
set_P_indiced = [(P, idx) for (idx, P) in enumerate(blobs)] # taking the full!
if continue_from > 0 or self.show:
sensitivity = np.load(self.save_path + self.file_prefix + self.sens_file_name)['s']
print("Loaded sensitivity for sets clustering!")
else:
k_means_sens_bounder = SensBounder.KMeansForSetsSensitivityBounder(set_P_indiced, self.k, None, None)
sensitivity = k_means_sens_bounder.boundSensitivity()
if self.save_mode:
np.savez(self.save_path + self.file_prefix + self.sens_file_name, s=sensitivity)
print('Sum of sensitivity is {}'.format(np.sum(sensitivity)))
print("Saved sensitivity for sets clustering!")
if continue_from <= 1 and not self.show:
k_means_alg = KMeansAlg.KMeansAlg(blobs[0].d, self.k)
coreset = CS.Coreset()
C = coreset.computeCoreset(set_P_indiced, sensitivity, int(self.coreset_sample_size))
_, Q, _ = k_means_alg.computeKmeans(C[0], False)
np.savez('{}Optimal_clustering_{}.npz'.format(self.save_path, self.file_prefix), Q=Q)
else:
Q = np.load('{}Optimal_clustering_{}.npz'.format(self.save_path,self.file_prefix))['Q']
print("Loaded optimal clustering of coreset")
assignments_per_point, assignments_per_blob = self.clusteringAssignment(blobs, Q)
return np.array(blobs), np.array(assignments_per_blob), assignments_per_point
def addConnections(self, pairs, g_all, i, j, list_of_vertices, shift_idx_root, shift_idx_leaf, is_leaf=None,
enable_weights=False, connections=[]):
dists = np.linalg.norm(self.clustered_bodies[i][pairs[:,0]] - self.clustered_bodies[j][pairs[:,1]], axis=1)
pairs_of_interest = pairs[np.where(dists <= 2)[0]]
if len(pairs_of_interest) != 0:
if enable_weights:
for pair in pairs_of_interest:
root_of_path_of_interest = self.clustered_bodies[i][pair[0]]
leaf_of_path_of_interest = self.clustered_bodies[j][pair[1]]
direction = root_of_path_of_interest - leaf_of_path_of_interest
direction = direction / np.linalg.norm(direction)
target_direction = self.grid[tuple(root_of_path_of_interest.astype("int"))]
alpha = np.dot(direction, target_direction/np.linalg.norm(target_direction))
if alpha > 0.7:
try:
g_all.add_edge(int(pair[0] + shift_idx_root), int(pair[1] + shift_idx_leaf))
list_of_vertices = np.delete(list_of_vertices, np.where(list_of_vertices == (pair[1]+shift_idx_leaf)))
if is_leaf is not None:
is_leaf = np.delete(is_leaf, np.where(is_leaf == (pair[0] + shift_idx_root)))
except:
continue
else:
roots = np.unique(pairs_of_interest[:, 0])
for root in roots:
try:
idxs_of_interest = np.where(pairs_of_interest[:, 0] == root)[0]
pairs_of_interest_per_root = pairs_of_interest[idxs_of_interest, :]
root_of_path_of_interest = self.clustered_bodies[i][root][np.newaxis, :]
leaf_of_path_of_interest = self.clustered_bodies[j][pairs_of_interest_per_root[:, 1]]
directions = leaf_of_path_of_interest - root_of_path_of_interest
directions = np.divide(directions,
np.linalg.norm(directions, axis=1)[:, np.newaxis],
out=np.zeros_like(directions),
where=np.linalg.norm(directions, axis=1)[:, np.newaxis]!=0, casting="unsafe")
target_direction = self.grid[tuple(root_of_path_of_interest.flatten().astype("int"))]
alpha = np.dot(directions, target_direction / np.linalg.norm(target_direction))
l = np.argmax(alpha)
if alpha[l] >= 0.7:
g_all.add_edge(int(root + shift_idx_root),
int(pairs_of_interest[idxs_of_interest[l]][1] + shift_idx_leaf))
list_of_vertices = \
np.delete(list_of_vertices,
np.where(list_of_vertices == (pairs_of_interest[idxs_of_interest[l]][1]
+ shift_idx_leaf)))
if is_leaf is not None:
is_leaf = np.delete(is_leaf, np.where(is_leaf == (root + shift_idx_root)))
connections.append((i, int(root), j, int(pairs_of_interest[idxs_of_interest[l]][1])))
except:
continue
return g_all, list_of_vertices, is_leaf, connections
def containedInMap(self, point):
temp = point + self.grid[tuple(point.astype("int"))]
if np.any(temp < 0) or np.any(temp >= np.array(list(self.grid.shape[:-1]))):
return False
return True
def attainDiameterOfSetOfPoints(self, P):
return np.max(np.linalg.norm(P - P[np.argmax(np.linalg.norm(P - np.mean(P, axis=0)[np.newaxis, :],
axis=1))][np.newaxis, :], axis=1))
def avoidRedundantConnection(self, point, P, orig_idxs):
norms = np.linalg.norm(P - point[np.newaxis, :], axis=1)
idxs = np.argsort(norms)
temp = P - point[np.newaxis, :]
temp = np.around(np.multiply(temp[idxs], (1 / norms[idxs])[:, np.newaxis]), 2)
_, idx2 = np.unique(temp, axis=0, return_index=True)
return orig_idxs[idxs[idx2]]
def generateGraph(self, is_full=True, enable_weights=False, enable_all=False):
leaves = []
roots = []
all_others = []
roots_all = np.array([])
leaves_all = np.array([])
idx_shift = 0
g_all = jgrapht.create_graph(directed=True, weighted=False)
graphs = [jgrapht.create_graph(directed=True, weighted=False) for i in range(self.k)]
counter_bad_vertices = np.zeros((self.k, ))
cnt = 0
for body_idx,body in enumerate(self.clustered_bodies):
idxs_leafs = np.arange(body.shape[0])
idxs_roots = np.arange(body.shape[0])
idxs_all_others = np.arange(body.shape[0])
for i in range(idx_shift, idx_shift + body.shape[0]):
graphs[body_idx].add_vertex(i-idx_shift)
g_all.add_vertex(i)
for i, point in enumerate(body):
temp = body-point[np.newaxis, :]
norms = np.linalg.norm(temp, axis=1)[:, np.newaxis]
if is_full:
norms = norms.flatten()
neighbors = np.where(np.logical_and(norms.flatten() <= | np.sqrt(2) | numpy.sqrt |
import collections
import sys
import pickle
import math
import logging
import numpy as np
import scipy.stats.distributions as dists
from sklearn.linear_model import LinearRegression
"""
Classes in the module implement trend detection techniques.
For uniform interface, all classes must implement the following functions:
get_result(): returns the relevant figure of merit based on the current
state of the model
update(kwargs): updates the model with new information;
required keyword arguments may differ between models
"""
class LinearRegressionModel(object):
def __init__(self, config):
self.counts = []
self.averaged_counts = []
self.min_points = int(config['min_points'])
try:
self.averaging_window_size = int(config["averaging_window_size"])
except KeyError:
self.averaging_window_size = 1
try:
self.norm_by_mean = bool(config['norm_by_mean'])
except KeyError:
self.norm_by_mean = False
try:
self.regression_window_size = int(config['regression_window_size'])
except KeyError:
self.regression_window_size = None
self.regression = LinearRegression()
def update(self, **kwargs):
count = kwargs["count"]
self.counts.append( count )
size = self.averaging_window_size
if len(self.counts) >= size:
self.averaged_counts.append( sum(self.counts[-size:])/float(size) )
else:
self.averaged_counts.append(0)
def get_result(self):
""" Run a linear fit on the averaged count,
which will be the raw counts if not otherwise specified. """
if len(self.averaged_counts) < self.min_points:
return 0
if self.regression_window_size is not None:
y = | np.array(self.averaged_counts[-self.regression_window_size:]) | numpy.array |
"""
Least-squares fitting and nearest rotation matrix
"""
import numpy as np
import scipy.linalg as la
from .trafo import Transformation
from .rotation import Rotation, Quaternion, map_to_quat
def qfit(target, source):
"""Least-squares fitting of source onto target using unit quaternions.
Parameters
----------
target : (N, 3) array
3D point cloud onto which the source will be transformed
source : (N, 3) array
3D point cloud that will be transformed so as to fit the target
optimally in a least-squares sense
Returns
-------
R : (3, 3) array
Optimal rotation matrix
t : (3, ) array
Optimal translation vector
"""
assert target.ndim == 2
assert np.shape(target)[1] == 3
assert np.shape(target) == np.shape(source)
x = target.mean(0)
y = source.mean(0)
A = | np.dot((target-x).T, source-y) | numpy.dot |
import os
import numpy as np
from matplotlib import pyplot
from mpl_toolkits.mplot3d import Axes3D
from time import sleep
def warm_up_exercise():
A = np.eye(5)
return A
def plot_data(x, y):
figure = pyplot.figure()
pyplot.plot(x, y, 'ro', ms=10, mec='k')
pyplot.ylabel('Profit in $10,000s')
pyplot.xlabel('Population of City in 10,000s')
def plot_cost_hist(J_hist, rate):
pyplot.plot(np.arange(len(J_hist)), J_hist, '-', ms=10, mec='k')
pyplot.ylabel('Cost J')
pyplot.xlabel('Iterations')
pyplot.plot(np.arange(len(J_hist)), J_hist, '-')
def compute_cost(X, y, theta):
m = y.shape[0]
J = (1/(2*m)) * np.sum( np.square((np.dot(X, theta) - y)) )
return J
def gradient_descent(X, y, theta, alpha, num_iters):
m = y.shape[0]
adjusted_theta = theta.copy()
J_hist = []
for i in range(0, num_iters):
adjusted_theta -= (alpha / m) * (np.dot(X, adjusted_theta) - y).dot(X)
J_hist.append(compute_cost(X, y, adjusted_theta))
return J_hist, adjusted_theta
def predict(X, theta):
pred = np.dot(X, theta)
print("For population =", X[1]*10000, ' we predict a profit of {:.2f}\n'.format(pred*10000))
def visualize_cost(X, y, trained_theta):
# Step over theta0 values in range -10,10 with 100 steps
# theta0_vals => (100 x 1)
theta0_vals = np.linspace(-10,10,100)
# Step over theta1 values in range -4 4 with 100 steps
# thteta1_vals => (100 x 1)
theta1_vals = np.linspace(-1,4,100)
# Create a matrix of costs at different values of theta0 and theta1
# J_vals => (100 x 100)
J_vals = np.zeros((theta0_vals.shape[0], theta1_vals.shape[0]))
for i, theta0 in enumerate(theta0_vals):
for j, theta1 in enumerate(theta1_vals):
J_vals[i, j] = compute_cost(X, y, [theta0, theta1])
J_vals = J_vals.T
figure = pyplot.figure(figsize=(12, 5))
# First parameter controls position in sub plot
# projection controls 3d-ness
axis = figure.add_subplot(121, projection='3d')
# cmap='viridis' makes it colorful
axis.plot_surface(theta0_vals, theta1_vals, J_vals, cmap='viridis')
pyplot.xlabel('theta0')
pyplot.ylabel('theta1')
axis.set_zlabel('Cost J')
pyplot.title('Cost at different thetas')
axis = pyplot.subplot(122)
# Levels controls number and positions of the contour lines, should be int or array-like object
pyplot.contour(theta0_vals, theta1_vals, J_vals, linewidths=2, cmap='viridis', levels=np.logspace(-2, 3, 20))
axis.set_xlabel('theta0')
axis.set_ylabel('theta1')
pyplot.plot(trained_theta[0], trained_theta[1], 'ro', ms=10, lw=2)
pyplot.title('Minimum value of cost J')
def normalizeFeatures(X):
# X => (m x n) : m = num. examples, n = num. features
X_norm = X.copy()
m = X_norm.shape[0]
# X.shape => (m x n)
# np.zeros(X.shape[1]) => np.zeros(n) => (n x 1)
mu = np.zeros(X.shape[1])
sigma = np.zeros(X.shape[1])
for feature in range(X.shape[1]):
mu[feature] = np.mean(X[:, feature])
sigma[feature] = | np.std(X[:, feature]) | numpy.std |
import numpy as np
import math
from .utils import *
# A lot of comments and documentation is directly copied from <NAME> (https://github.com/raphaelvallat/entropy)
# Permutation Entropy
"""Permutation Entropy.
Parameters
----------
x : np.array
One-dimensional time series of shape (n_times)
order : int
Order of permutation entropy. Default is 3.
delay : int
Time delay (lag). Default is 1.
normalize : bool
If True, divide by log2(order!) to normalize the entropy between 0
and 1. Otherwise, return the permutation entropy in bit. Default is true.
Returns
-------
pe : float
Permutation Entropy.
Notes
-----
The permutation entropy is a complexity measure for time-series first
introduced by Bandt and Pompe in 2002.
The permutation entropy of a signal :math:`x` is defined as:
.. math:: H = -\\sum p(\\pi)\\log_2(\\pi)
where the sum runs over all :math:`n!` permutations :math:`\\pi` of order
:math:`n`. This is the information contained in comparing :math:`n`
consecutive values of the time series. It is clear that
:math:`0 ≤ H (n) ≤ \\log_2(n!)` where the lower bound is attained for an
increasing or decreasing sequence of values, and the upper bound for a
completely random system where all :math:`n!` possible permutations appear
with the same probability.
The embedded matrix :math:`Y` is created by:
.. math::
y(i)=[x_i,x_{i+\\text{delay}}, ...,x_{i+(\\text{order}-1) *
\\text{delay}}]
.. math:: Y=[y(1),y(2),...,y(N-(\\text{order}-1))*\\text{delay})]^T
References
----------
Bandt, Christoph, and <NAME>. "Permutation entropy: a
natural complexity measure for time series." Physical review letters
88.17 (2002): 174102.
Examples
--------
Permutation entropy with order 2
>>> from OrdinalEntroPy import *
>>> import numpy as np
>>> x = [4, 7, 9, 10, 6, 11, 3]
>>> # Return a value in bit between 0 and log2(factorial(order))
>>> print(PE(x, order=2, normalize=False))
0.9182958340544896
Normalized permutation entropy with order 3
>>> from OrdinalEntroPy import *
>>> import numpy as np
>>> x = [4, 7, 9, 10, 6, 11, 3]
>>> # Return a value comprised between 0 and 1.
>>> print(PE(x, order=3, normalize=True))
0.5887621559162939
"""
def PE(values,order=3,delay=1,normalize=True):
# get all the permuations
str_permutations = get_str_permutation_ordinal(values,order,delay)
# get set of indices for each unique permutation
permutation_indexes = get_permutation_index(str_permutations)
# get frequency of each permutation pattern
permutation_frequency = get_permutation_frequency(permutation_indexes,len(values),order)
# get shannon entropy of frequencies
entropy = get_shanon_entropy(permutation_frequency)
#Normalize
if normalize:
entropy = entropy/math.log2(math.factorial(order))
return entropy
"""Dispersion Entropy.
Parameters
----------
x : np.array
One-dimensional time series of shape (n_times)
order : int
Order of permutation entropy. Default is 3.
classes : int
Number of classes. Default is 3.
delay : int
Time delay (lag). Default is 1.
normalize : bool
If True, divide by log2(classes**order) to normalize the entropy between 0
and 1. Otherwise, return the permutation entropy in bit. Default is true.
Returns
-------
pe : float
Permutation Entropy.
Notes
-----
Dispersion Entropy (DE) was introduced in the year 2016 by Azami and Rostaghi
to quantify the complexity of time series.
The Dispersion entropy of a signal :math:`x` is defined as:
.. math:: H = -\\sum p(\\pi)\\log_2(\\pi)
where the sum runs over all :math:`classes**order` permutations :math:`\\pi` of order
:math:`n` and consisting of classes :math:`c`. This is the information contained in comparing :math:`n`
consecutive values of the time series. It is clear that
:math:`0 ≤ H (n) ≤ \\log_2(classes^order)` where the lower bound is attained for an
increasing or decreasing sequence of values, and the upper bound for a
completely random system where all :math:`classes**order` possible dispersion patterns appear
with the same probability.
The embedded matrix :math:`Y` is created by:
.. math::
y(i)=[x_i,x_{i+\\text{delay}}, ...,x_{i+(\\text{order}-1) *
\\text{delay}}]
.. math:: Y=[y(1),y(2),...,y(N-(\\text{order}-1))*\\text{delay})]^T
References
----------
<NAME> and <NAME>, "Dispersion Entropy: A Measure for Time-Series Analysis,"
in IEEE Signal Processing Letters, vol. 23, no. 5, pp. 610-614, May 2016, doi: 10.1109/LSP.2016.2542881.
Examples
--------
Dispersion entropy with order=3 and classes=3
>>> from OrdinalEntroPy import *
>>> import numpy as np
>>> np.random.seed(1234567)
>>> x = np.random.rand(3000)
>>> # Return a value in bit between 0 and log2(factorial(order))
>>> print(DE(x, order=3,classes=3,normalize=True))
0.9830685145488814
"""
# Dispersion Entropy
def DE(values,order=3,classes=3,delay=1,normalize=True):
# map TS to classes using cummulative distributive function
mapped_values = get_ncdf_values(values,classes)
# get all the permuations
str_permutations = get_str_permutation(mapped_values,order,delay)
# get set of indices for each unique permutation
permutation_indexes = get_permutation_index(str_permutations)
# get frequency of each permutation pattern
permutation_frequency = get_permutation_frequency(permutation_indexes,len(values),order)
# get shannon entropy of frequencies
entropy = get_shanon_entropy(permutation_frequency)
if normalize:
entropy = entropy/math.log2(classes**order)
return entropy
# Reverse Dispersion Entropy
def RDE(values,order=3,classes=3,delay=1,normalize=True):
mapped_values = get_ncdf_values(values,classes)
# get all the permuations
str_permutations = get_str_permutation(mapped_values,order,delay)
permutation_indexes = get_permutation_index(str_permutations)
permutation_frequency = get_permutation_frequency(permutation_indexes,len(values),order)
entropy = np.square(permutation_frequency).sum() - (1/(classes**order))
if normalize:
entropy = entropy/(1 - (1/(classes**order)))
return entropy
# Reverse Permutation Entropy
def RPE(values,order=3,delay=1,normalize=True):
str_permutations = get_str_permutation_ordinal(values,order,delay)
permutation_indexes = get_permutation_index(str_permutations)
#print(set(str_permutations))
permutation_frequency = get_permutation_frequency(permutation_indexes,len(values),order)
#print(permutation_frequency)
entropy = | np.square(permutation_frequency) | numpy.square |
"""
Name: FissionsAdd
breif: Adding fission particles to phase vectors for MCDC-TNT
Author: <NAME> (OR State Univ - <EMAIL>) CEMeNT
Date: Nov 18th 2021
"""
import numpy as np
def FissionsAdd(p_pos_x, p_pos_y, p_pos_z, p_mesh_cell, p_dir_y, p_dir_z, p_dir_x, p_speed, p_time, p_alive,
fis_count, nu_new_neutrons, fission_event_index, num_part, particle_speed, rands):
"""
Run advance for a
Parameters
----------
p_pos_x : vector double
PSV: x position of phase space particles (index is particle value).
p_pos_y : vector double
PSV: y position of phase space particles (index is particle value).
p_pos_z : vector double
PSV: z position of phase space particles (index is particle value).
p_mesh_cell : vector int
PSV: mesh cell location of a given particle.
p_dir_y : vector double
PSV: y direction unit value of phase space particles (index is particle value).
p_dir_z : vector double
PSV: z direction unit value of phase space particles (index is particle value).
p_dir_x : vector double
PSV: x direction unit value of phase space particles (index is particle value).
p_speed : vector double
PSV: speed (energy) or a particle (index is particle).
p_time : vector double
PSV: particle clock.
p_alive : vector bool
PSV: is it alive?
fis_count : int
how many fissions where recorded in smaple event.
nu_new_neutrons : int
how many neutrons produced per fission.
fission_event_index : vector int
indicies of particles that underwent fission after sample event.
num_part : int
number of particles currently under transport (indxed form 1).
particle_speed : double
speed of fissioned particles.
rands : vector double
produced from an rng, needs to be fis_count*nu*2.
Returns
-------
Phase space variables with new fissions added.
"""
k=0 #index for fission temp vectors
for i in range(fis_count):
for j in range(nu_new_neutrons):
# Position
p_pos_x[k+num_part] = p_pos_x[fission_event_index[i]]
p_mesh_cell[k+num_part] = p_mesh_cell[fission_event_index[i]]
p_pos_y[k+num_part] = p_pos_y[fission_event_index[i]]
p_pos_z[k+num_part] = p_pos_z[fission_event_index[i]]
# print("fission particle produced")
# print("from particle {0} and indexed as particle {1}".format(fission_event_index[i], k+num_part))
# print("produced at: {0}".format(p_pos_x[k+num_part]))
# Direction
# Sample polar and azimuthal angles uniformly
mu = 2.0*rands[4*i+2*j] - 1.0
azi = 2.0*rands[4*i+2*j+1]
# Convert to Cartesian coordinate
c = (1.0 - mu**2)**0.5
p_dir_y[k+num_part] = np.cos(azi)*c
p_dir_z[k+num_part] = np.sin(azi)*c
p_dir_x[k+num_part] = mu
# Speed
p_speed[k+num_part] = particle_speed
# Time
p_time[k+num_part] = p_time[fission_event_index[i]]
# Flags
p_alive[k+num_part] = True
k+=1
return(p_pos_x, p_pos_y, p_pos_z, p_mesh_cell, p_dir_y, p_dir_z, p_dir_x, p_speed, p_time, p_alive, k)
def test_FissionsAdd():
L = 1
dx = .25
N_m = 4
num_part = 3
p_pos_x = | np.array([.55, 3, 5]) | numpy.array |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'cellPoseUI.ui'
import numpy as np
import sys, os, pathlib, warnings, datetime, tempfile, glob, time, threading
from natsort import natsorted
from PyQt5 import QtCore, QtGui, QtWidgets, Qt
import pyqtgraph as pg
import cv2
from scellseg.guis import guiparts, iopart, menus, plot
from scellseg import models, utils, transforms, dynamics, dataset, io
from scellseg.dataset import DatasetShot, DatasetQuery
from scellseg.contrast_learning.dataset import DatasetPairEval
from skimage.measure import regionprops
from tqdm import trange
from math import floor, ceil
from torch.utils.data import DataLoader
try:
import matplotlib.pyplot as plt
MATPLOTLIB = True
except:
MATPLOTLIB = False
class Ui_MainWindow(QtGui.QMainWindow):
"""UI Widget Initialize and UI Layout Initialize,
With any bug or problem, please do connact us from Github Issue"""
def __init__(self, image=None):
super(Ui_MainWindow, self).__init__()
if image is not None:
self.filename = image
iopart._load_image(self, self.filename)
self.now_pyfile_path = os.path.dirname(os.path.abspath(__file__)).replace('\\', '/')
def setupUi(self, MainWindow, image=None):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(1420, 800)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(self.now_pyfile_path + "/assets/logo.svg"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
MainWindow.setWindowIcon(icon)
QtWidgets.QApplication.setOverrideCursor(QtCore.Qt.CrossCursor)
menus.mainmenu(self)
menus.editmenu(self)
menus.helpmenu(self)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.centralwidget)
self.verticalLayout_2.setContentsMargins(6, 6, 6, 6)
self.verticalLayout_2.setSpacing(6)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.splitter = QtWidgets.QSplitter(self.centralwidget)
self.splitter.setOrientation(QtCore.Qt.Horizontal)
self.splitter.setObjectName("splitter")
self.splitter2 = QtWidgets.QSplitter()
self.splitter2.setOrientation(QtCore.Qt.Horizontal)
self.splitter2.setObjectName("splitter2")
self.scrollArea = QtWidgets.QScrollArea(self.splitter)
self.scrollArea.setWidgetResizable(True)
self.scrollArea.setObjectName("scrollArea")
self.scrollAreaWidgetContents = QtWidgets.QWidget()
# self.scrollAreaWidgetContents.setFixedWidth(500)
self.scrollAreaWidgetContents.setGeometry(QtCore.QRect(0, 0, 1500, 848))
self.scrollAreaWidgetContents.setObjectName("scrollAreaWidgetContents")
# self.TableModel = QtGui.QStandardItemModel(self.tableRow, self.tableCol)
# self.TableModel.setHorizontalHeaderLabels(["INDEX", "NAME"])
# self.TableView = QtGui.QTableView()
# self.TableView.setModel(self.TableModel)
self.mainLayout = QtWidgets.QGridLayout(self.scrollAreaWidgetContents)
self.mainLayout.setContentsMargins(0, 0, 0, 0)
self.mainLayout.setSpacing(0)
self.mainLayout.setObjectName("mainLayout")
self.previous_button = QtWidgets.QPushButton("previous image [Ctrl + ←]")
self.load_folder = QtWidgets.QPushButton("load image folder ")
self.next_button = QtWidgets.QPushButton("next image [Ctrl + →]")
self.previous_button.setShortcut(Qt.QKeySequence.MoveToPreviousWord)
self.next_button.setShortcut(Qt.QKeySequence.MoveToNextWord)
self.mainLayout.addWidget(self.previous_button, 1, 1, 1, 1)
self.mainLayout.addWidget(self.load_folder, 1, 2, 1, 1)
self.mainLayout.addWidget(self.next_button, 1, 3, 1, 1)
self.previous_button.clicked.connect(self.PreImBntClicked)
self.next_button.clicked.connect(self.NextImBntClicked)
self.load_folder.clicked.connect(self.OpenDirBntClicked)
# leftside cell list widget
self.listView = QtWidgets.QTableView()
self.myCellList = []
self.listmodel = Qt.QStandardItemModel(0,1)
self.listmodel.setHorizontalHeaderLabels(["Annotation"])
# self.listmodel.setHorizontalHeaderItem(0, QtWidgets.QTableWidgetItem())
self.listView.horizontalHeader().setDefaultAlignment(QtCore.Qt.AlignLeft)
# self.listView.horizontalHeader().setStyle("background-color: #F0F0F0")
# self.listView.horizontalHeader().setVisible(False)
self.listView.verticalHeader().setVisible(False)
for i in range(len(self.myCellList)):
self.listmodel.setItem(i,Qt.QStandardItem(self.myCellList[i]))
self.listView.horizontalHeader().setDefaultSectionSize(140)
self.listView.setMaximumWidth(120)
self.listView.setModel(self.listmodel)
self.listView.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.listView.AdjustToContents
self.listView.customContextMenuRequested.connect(self.show_menu)
# self.listView.setSelectionMode(QtWidgets.QAbstractItemView.SingleSelection)
self.listView.clicked.connect(self.showChoosen)
self.scrollArea.setWidget(self.scrollAreaWidgetContents)
self.toolBox = QtWidgets.QToolBox(self.splitter)
self.toolBox.setObjectName("toolBox")
self.toolBox.setMaximumWidth(340)
self.page = QtWidgets.QWidget()
self.page.setFixedWidth(340)
self.page.setObjectName("page")
self.gridLayout = QtWidgets.QGridLayout(self.page)
self.gridLayout.setContentsMargins(0, 0, 0, 0)
self.gridLayout.setSpacing(6)
self.gridLayout.setObjectName("gridLayout")
# cross-hair/Draw area
self.vLine = pg.InfiniteLine(angle=90, movable=False)
self.hLine = pg.InfiniteLine(angle=0, movable=False)
self.layer_off = False
self.masksOn = True
self.win = pg.GraphicsLayoutWidget()
self.state_label = pg.LabelItem("Scellseg has been initialized!")
self.win.addItem(self.state_label, 3, 0)
self.win.scene().sigMouseClicked.connect(self.plot_clicked)
self.win.scene().sigMouseMoved.connect(self.mouse_moved)
self.make_viewbox()
bwrmap = make_bwr()
self.bwr = bwrmap.getLookupTable(start=0.0, stop=255.0, alpha=False)
self.cmap = []
# spectral colormap
self.cmap.append(make_spectral().getLookupTable(start=0.0, stop=255.0, alpha=False))
# single channel colormaps
for i in range(3):
self.cmap.append(make_cmap(i).getLookupTable(start=0.0, stop=255.0, alpha=False))
if MATPLOTLIB:
self.colormap = (plt.get_cmap('gist_ncar')(np.linspace(0.0, .9, 1000)) * 255).astype(np.uint8)
else:
self.colormap = ((np.random.rand(1000, 3) * 0.8 + 0.1) * 255).astype(np.uint8)
self.is_stack = True # always loading images of same FOV
# if called with image, load it
# if image is not None:
# self.filename = image
# iopart._load_image(self, self.filename)
self.setAcceptDrops(True)
self.win.show()
self.show()
self.splitter2.addWidget(self.listView)
self.splitter2.addWidget(self.win)
self.mainLayout.addWidget(self.splitter2,0,1,1,3)
self.label_2 = QtWidgets.QLabel(self.page)
self.label_2.setObjectName("label_2")
self.gridLayout.addWidget(self.label_2, 7, 0, 1, 1)
self.brush_size = 3
self.BrushChoose = QtWidgets.QComboBox()
self.BrushChoose.addItems(["1", "3", "5", "7", "9", "11", "13", "15", "17", "19"])
self.BrushChoose.currentIndexChanged.connect(self.brush_choose)
self.gridLayout.addWidget(self.BrushChoose, 7, 1, 1, 1)
# turn on single stroke mode
self.sstroke_On = True
self.SSCheckBox = QtWidgets.QCheckBox(self.page)
self.SSCheckBox.setObjectName("SSCheckBox")
self.SSCheckBox.setChecked(True)
self.SSCheckBox.toggled.connect(self.toggle_sstroke)
self.gridLayout.addWidget(self.SSCheckBox, 8, 0, 1, 1)
self.eraser_button = QtWidgets.QCheckBox(self.page)
self.eraser_button.setObjectName("Edit mask")
self.eraser_button.setChecked(False)
self.eraser_button.toggled.connect(self.eraser_model_change)
self.eraser_button.setToolTip("Right-click to add pixels\nShift+Right-click to delete pixels")
self.gridLayout.addWidget(self.eraser_button, 9, 0, 1, 1)
self.CHCheckBox = QtWidgets.QCheckBox(self.page)
self.CHCheckBox.setObjectName("CHCheckBox")
self.CHCheckBox.toggled.connect(self.cross_hairs)
self.gridLayout.addWidget(self.CHCheckBox, 10, 0, 1, 1)
self.MCheckBox = QtWidgets.QCheckBox(self.page)
self.MCheckBox.setChecked(True)
self.MCheckBox.setObjectName("MCheckBox")
self.MCheckBox.setChecked(True)
self.MCheckBox.toggled.connect(self.toggle_masks)
self.gridLayout.addWidget(self.MCheckBox, 11, 0, 1, 1)
self.OCheckBox = QtWidgets.QCheckBox(self.page)
self.outlinesOn = True
self.OCheckBox.setChecked(True)
self.OCheckBox.setObjectName("OCheckBox")
self.OCheckBox.toggled.connect(self.toggle_masks)
self.gridLayout.addWidget(self.OCheckBox, 12, 0, 1, 1)
self.scale_on = True
self.SCheckBox = QtWidgets.QCheckBox(self.page)
self.SCheckBox.setObjectName("SCheckBox")
self.SCheckBox.setChecked(True)
self.SCheckBox.toggled.connect(self.toggle_scale)
self.gridLayout.addWidget(self.SCheckBox, 13, 0, 1, 1)
self.autosaveOn = True
self.ASCheckBox = QtWidgets.QCheckBox(self.page)
self.ASCheckBox.setObjectName("ASCheckBox")
self.ASCheckBox.setChecked(True)
self.ASCheckBox.toggled.connect(self.toggle_autosave)
self.ASCheckBox.setToolTip("If ON, masks/npy/list will be autosaved")
self.gridLayout.addWidget(self.ASCheckBox, 14, 0, 1, 1)
spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.gridLayout.addItem(spacerItem, 15, 0, 1, 2)
# self.eraser_combobox = QtWidgets.QComboBox()
# self.eraser_combobox.addItems(["Pixal delete", "Pixal add"])
# self.gridLayout.addWidget(self.eraser_combobox, 8, 1, 1, 1)
self.RGBChoose = guiparts.RGBRadioButtons(self, 3, 1)
self.RGBDropDown = QtGui.QComboBox()
self.RGBDropDown.addItems(["rgb", "gray", "spectral", "red", "green", "blue"])
self.RGBDropDown.currentIndexChanged.connect(self.color_choose)
self.gridLayout.addWidget(self.RGBDropDown, 3, 0, 1, 1)
self.saturation_label = QtWidgets.QLabel("Saturation")
self.gridLayout.addWidget(self.saturation_label, 0, 0, 1, 1)
self.autobtn = QtGui.QCheckBox('Auto-adjust')
self.autobtn.setChecked(True)
self.autobtn.toggled.connect(self.toggle_autosaturation)
self.gridLayout.addWidget(self.autobtn, 0, 1, 1, 1)
self.currentZ = 0
self.zpos = QtGui.QLineEdit()
self.zpos.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)
self.zpos.setText(str(self.currentZ))
self.zpos.returnPressed.connect(self.compute_scale)
self.zpos.setFixedWidth(20)
# self.gridLayout.addWidget(self.zpos, 0, 2, 1, 1)
self.slider = guiparts.RangeSlider(self)
self.slider.setMaximum(255)
self.slider.setMinimum(0)
self.slider.setHigh(255)
self.slider.setLow(0)
self.gridLayout.addWidget(self.slider, 2, 0, 1, 4)
self.slider.setObjectName("rangeslider")
self.page_2 = QtWidgets.QWidget()
self.page_2.setFixedWidth(340)
self.page_2.setObjectName("page_2")
self.gridLayout_2 = QtWidgets.QGridLayout(self.page_2)
self.gridLayout_2.setContentsMargins(0, 0, 0, 0)
self.gridLayout_2.setObjectName("gridLayout_2")
page2_l = 0
self.useGPU = QtWidgets.QCheckBox(self.page_2)
self.useGPU.setObjectName("useGPU")
self.gridLayout_2.addWidget(self.useGPU, page2_l, 0, 1, 1)
self.check_gpu()
page2_l += 1
self.label_4 = QtWidgets.QLabel(self.page_2)
self.label_4.setObjectName("label_4")
self.gridLayout_2.addWidget(self.label_4, page2_l, 0, 1, 1)
self.ModelChoose = QtWidgets.QComboBox(self.page_2)
self.ModelChoose.setObjectName("ModelChoose")
self.project_path = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) + os.path.sep + ".")
self.model_dir = os.path.join(self.project_path, 'assets', 'pretrained_models')
print('self.model_dir', self.model_dir)
self.ModelChoose.addItem("")
self.ModelChoose.addItem("")
self.ModelChoose.addItem("")
self.gridLayout_2.addWidget(self.ModelChoose, page2_l, 1, 1, 1)
page2_l += 1
self.label_5 = QtWidgets.QLabel(self.page_2)
self.label_5.setObjectName("label_5")
self.gridLayout_2.addWidget(self.label_5, page2_l, 0, 1, 1)
self.jCBChanToSegment = QtWidgets.QComboBox(self.page_2)
self.jCBChanToSegment.setObjectName("jCBChanToSegment")
self.jCBChanToSegment.addItems(["gray", "red", "green", "blue"])
self.jCBChanToSegment.setCurrentIndex(0)
self.gridLayout_2.addWidget(self.jCBChanToSegment, page2_l, 1, 1, 1)
page2_l += 1
self.label_6 = QtWidgets.QLabel(self.page_2)
self.label_6.setObjectName("label_6")
self.gridLayout_2.addWidget(self.label_6, page2_l, 0, 1, 1)
self.jCBChan2 = QtWidgets.QComboBox(self.page_2)
self.jCBChan2.setObjectName("jCBChan2")
self.jCBChan2.addItems(["none", "red", "green", "blue"])
self.jCBChan2.setCurrentIndex(0)
self.gridLayout_2.addWidget(self.jCBChan2, page2_l, 1, 1, 1)
page2_l += 1
self.model_choose_btn = QtWidgets.QPushButton("Model file")
self.model_choose_btn.clicked.connect(self.model_file_dir_choose)
self.gridLayout_2.addWidget(self.model_choose_btn, page2_l, 0, 1, 1)
self.model_choose_btn = QtWidgets.QPushButton("Reset pre-trained")
self.model_choose_btn.clicked.connect(self.reset_pretrain_model)
self.gridLayout_2.addWidget(self.model_choose_btn, page2_l, 1, 1, 1)
page2_l += 1
self.label_null = QtWidgets.QLabel("")
self.gridLayout_2.addWidget(self.label_null, page2_l, 0, 1, 1)
slider_image_path = self.now_pyfile_path + '/assets/slider_handle.png'
self.sliderSheet = [
'QSlider::groove:vertical {',
'background-color: #D3D3D3;',
'position: absolute;',
'left: 4px; right: 4px;',
'}',
'',
'QSlider::groove:horizontal{',
'background-color:#D3D3D3;',
'position: absolute;',
'top: 4px; bottom: 4px;',
'}',
'',
'QSlider::handle:vertical {',
'height: 10px;',
'background-color: {0:s};'.format('#A9A9A9'),
'margin: 0 -4px;',
'}',
'',
'QSlider::handle:horizontal{',
'width: 10px;',
'border-image: url({0:s});'.format(slider_image_path),
'margin: -4px 0px -4px 0px;',
'}',
'QSlider::sub-page:horizontal',
'{',
'background-color: {0:s};'.format('#A9A9A9'),
'}',
'',
'QSlider::add-page {',
'background-color: {0:s};'.format('#D3D3D3'),
'}',
'',
'QSlider::sub-page {',
'background-color: {0:s};'.format('#D3D3D3'),
'}',
]
page2_l += 1
self.label_seg = QtWidgets.QLabel("Run seg for image in window")
self.gridLayout_2.addWidget(self.label_seg, page2_l, 0, 1, 4)
self.label_seg.setObjectName('label_seg')
page2_l += 1
self.label_3 = QtWidgets.QLabel(self.page_2)
self.label_3.setObjectName("label_3")
self.gridLayout_2.addWidget(self.label_3, page2_l, 0, 1, 4)
page2_l += 1
self.prev_selected = 0
self.diameter = 30
# self.Diameter = QtWidgets.QSpinBox(self.page_2)
self.Diameter = QtWidgets.QLineEdit(self.page_2)
self.Diameter.setObjectName("Diameter")
self.Diameter.setText(str(self.diameter))
self.Diameter.setFixedWidth(100)
self.Diameter.editingFinished.connect(self.compute_scale)
self.gridLayout_2.addWidget(self.Diameter, page2_l, 0, 1, 2)
self.SizeButton = QtWidgets.QPushButton(self.page_2)
self.SizeButton.setObjectName("SizeButton")
self.gridLayout_2.addWidget(self.SizeButton, page2_l, 1, 1, 1)
self.SizeButton.clicked.connect(self.calibrate_size)
self.SizeButton.setEnabled(False)
page2_l += 1
self.label_mode = QtWidgets.QLabel("Inference mode")
self.gridLayout_2.addWidget(self.label_mode, page2_l, 0, 1, 1)
self.NetAvg = QtWidgets.QComboBox(self.page_2)
self.NetAvg.setObjectName("NetAvg")
self.NetAvg.addItems(["run 1 net (fast)", "+ resample (slow)"])
self.gridLayout_2.addWidget(self.NetAvg, page2_l, 1, 1, 1)
page2_l += 1
self.invert = QtWidgets.QCheckBox(self.page_2)
self.invert.setObjectName("invert")
self.gridLayout_2.addWidget(self.invert, page2_l, 0, 1, 1)
page2_l += 1
self.ModelButton = QtWidgets.QPushButton(' Run segmentation ')
self.ModelButton.setObjectName("runsegbtn")
self.ModelButton.clicked.connect(self.compute_model)
self.gridLayout_2.addWidget(self.ModelButton, page2_l, 0, 1, 2)
self.ModelButton.setEnabled(False)
page2_l += 1
self.label_7 = QtWidgets.QLabel(self.page_2)
self.label_7.setObjectName("label_7")
self.gridLayout_2.addWidget(self.label_7, page2_l, 0, 1, 1)
self.threshold = 0.4
self.threshslider = QtWidgets.QSlider(self.page_2)
self.threshslider.setOrientation(QtCore.Qt.Horizontal)
self.threshslider.setObjectName("threshslider")
self.threshslider.setMinimum(1.0)
self.threshslider.setMaximum(30.0)
self.threshslider.setValue(31 - 4)
self.threshslider.valueChanged.connect(self.compute_cprob)
self.threshslider.setEnabled(False)
self.threshslider.setStyleSheet('\n'.join(self.sliderSheet))
self.gridLayout_2.addWidget(self.threshslider, page2_l, 1, 1, 1)
self.threshslider.setToolTip("Value: " + str(self.threshold))
page2_l += 1
self.label_8 = QtWidgets.QLabel(self.page_2)
self.label_8.setObjectName("label_8")
self.gridLayout_2.addWidget(self.label_8, page2_l, 0, 1, 1)
self.probslider = QtWidgets.QSlider(self.page_2)
self.probslider.setOrientation(QtCore.Qt.Horizontal)
self.probslider.setObjectName("probslider")
self.probslider.setStyleSheet('\n'.join(self.sliderSheet))
self.gridLayout_2.addWidget(self.probslider, page2_l, 1, 1, 1)
self.probslider.setMinimum(-6.0)
self.probslider.setMaximum(6.0)
self.probslider.setValue(0.0)
self.cellprob = 0.5
self.probslider.valueChanged.connect(self.compute_cprob)
self.probslider.setEnabled(False)
self.probslider.setToolTip("Value: " + str(self.cellprob))
page2_l += 1
self.label_batchseg = QtWidgets.QLabel("Batch segmentation")
self.label_batchseg.setObjectName('label_batchseg')
self.gridLayout_2.addWidget(self.label_batchseg, page2_l, 0, 1, 4)
page2_l += 1
self.label_bz = QtWidgets.QLabel("Batch size")
self.gridLayout_2.addWidget(self.label_bz, page2_l, 0, 1, 1)
self.bz_line = QtWidgets.QLineEdit()
self.bz_line.setPlaceholderText('Default: 8')
self.bz_line.setFixedWidth(120)
self.gridLayout_2.addWidget(self.bz_line, page2_l, 1, 1, 1)
page2_l += 1
self.dataset_inference_bnt = QtWidgets.QPushButton("Data path")
self.gridLayout_2.addWidget(self.dataset_inference_bnt, page2_l, 0, 1, 1)
self.dataset_inference_bnt.clicked.connect(self.batch_inference_dir_choose)
self.batch_inference_bnt = QtWidgets.QPushButton("Run batch")
self.batch_inference_bnt.setObjectName("binferbnt")
self.batch_inference_bnt.clicked.connect(self.batch_inference)
self.gridLayout_2.addWidget(self.batch_inference_bnt, page2_l, 1, 1, 1)
self.batch_inference_bnt.setEnabled(False)
page2_l += 1
self.label_getsingle = QtWidgets.QLabel("Get single instance")
self.label_getsingle.setObjectName('label_getsingle')
self.gridLayout_2.addWidget(self.label_getsingle, page2_l,0,1,2)
page2_l += 1
self.single_dir_bnt = QtWidgets.QPushButton("Data path")
self.single_dir_bnt.clicked.connect(self.single_dir_choose)
self.gridLayout_2.addWidget(self.single_dir_bnt, page2_l,0,1,1)
self.single_cell_btn = QtWidgets.QPushButton("Run batch")
self.single_cell_btn.setObjectName('single_cell_btn')
self.single_cell_btn.clicked.connect(self.get_single_cell)
self.gridLayout_2.addWidget(self.single_cell_btn, page2_l,1,1,1)
self.single_cell_btn.setEnabled(False)
page2_l += 1
spacerItem2 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.gridLayout_2.addItem(spacerItem2, page2_l, 0, 1, 2)
self.page_3 = QtWidgets.QWidget()
self.page_3.setFixedWidth(340)
self.page_3.setObjectName("page_3")
self.progress = QtWidgets.QProgressBar()
self.progress.setProperty("value", 0)
self.progress.setAlignment(QtCore.Qt.AlignCenter)
self.progress.setObjectName("progress")
self.gridLayout_3 = QtWidgets.QGridLayout(self.page_3)
self.gridLayout_3.setContentsMargins(0, 0, 0, 0)
self.gridLayout_3.setObjectName("gridLayout_3")
self.ftuseGPU = QtWidgets.QCheckBox("Use GPU")
self.ftuseGPU.setObjectName("ftuseGPU")
self.gridLayout_3.addWidget(self.ftuseGPU, 0, 0, 1, 2)
self.check_ftgpu()
self.ftdirbtn = QtWidgets.QPushButton("Dataset path")
self.ftdirbtn.clicked.connect(self.fine_tune_dir_choose)
self.gridLayout_3.addWidget(self.ftdirbtn, 0, 2, 1, 2)
self.label_10 = QtWidgets.QLabel("Model architecture")
self.gridLayout_3.addWidget(self.label_10, 1, 0, 1, 2)
self.ftmodelchooseBnt = QtWidgets.QComboBox()
self.ftmodelchooseBnt.addItems(["scellseg", "cellpose", "hover"])
self.gridLayout_3.addWidget(self.ftmodelchooseBnt, 1, 2, 1, 2)
self.label_11 = QtWidgets.QLabel("Chan to segment")
self.gridLayout_3.addWidget(self.label_11, 2, 0, 1, 2)
self.chan1chooseBnt = QtWidgets.QComboBox()
self.chan1chooseBnt.addItems(["gray", "red", "green", "blue"])
self.chan1chooseBnt.setCurrentIndex(0)
self.gridLayout_3.addWidget(self.chan1chooseBnt, 2, 2, 1, 2)
self.label_12 = QtWidgets.QLabel("Chan2 (optional)")
self.gridLayout_3.addWidget(self.label_12, 3, 0, 1, 2)
self.chan2chooseBnt = QtWidgets.QComboBox()
self.chan2chooseBnt.addItems(["none", "red", "green", "blue"])
self.chan2chooseBnt.setCurrentIndex(0)
self.gridLayout_3.addWidget(self.chan2chooseBnt, 3, 2, 1, 2)
self.label_13 = QtWidgets.QLabel("Fine-tune strategy")
self.gridLayout_3.addWidget(self.label_13, 4, 0, 1, 2)
self.stmodelchooseBnt = QtWidgets.QComboBox()
self.stmodelchooseBnt.addItems(["contrastive", "classic"])
self.gridLayout_3.addWidget(self.stmodelchooseBnt, 4, 2, 1, 2)
self.label_14 = QtWidgets.QLabel("Epoch")
self.gridLayout_3.addWidget(self.label_14, 5, 0, 1, 2)
self.epoch_line = QtWidgets.QLineEdit()
self.epoch_line.setPlaceholderText('Default: 100')
self.gridLayout_3.addWidget(self.epoch_line, 5, 2, 1, 2)
self.label_ftbz = QtWidgets.QLabel("Batch size")
self.gridLayout_3.addWidget(self.label_ftbz, 6, 0, 1, 2)
self.ftbz_line = QtWidgets.QLineEdit()
self.ftbz_line.setPlaceholderText('Default: 8')
self.gridLayout_3.addWidget(self.ftbz_line, 6, 2, 1, 2)
self.ftbnt = QtWidgets.QPushButton("Start fine-tuning")
self.ftbnt.setObjectName('ftbnt')
self.ftbnt.clicked.connect(self.fine_tune)
self.gridLayout_3.addWidget(self.ftbnt, 7, 0, 1, 4)
self.ftbnt.setEnabled(False)
spacerItem3 = QtWidgets.QSpacerItem(20, 320, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.gridLayout_3.addItem(spacerItem3, 8, 0, 1, 1)
#initialize scroll size
self.scroll = QtGui.QScrollBar(QtCore.Qt.Horizontal)
# self.scroll.setMaximum(10)
# self.scroll.valueChanged.connect(self.move_in_Z)
# self.gridLayout_3.addWidget(self.scroll)
spacerItem2 = QtWidgets.QSpacerItem(20, 320, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.gridLayout_3.addItem(spacerItem2)
self.toolBox.addItem(self.page, "")
self.toolBox.addItem(self.page_3, "")
self.toolBox.addItem(self.page_2, "")
self.verticalLayout_2.addWidget(self.splitter)
MainWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(MainWindow)
self.toolBox.setCurrentIndex(2)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
self.centralwidget.setFocusPolicy(QtCore.Qt.StrongFocus)
self.reset()
def show_menu(self, point):
# print(point.x())
# item = self.listView.itemAt(point)
# print(item)
temp_cell_idx = self.listView.rowAt(point.y())
self.list_select_cell(temp_cell_idx+1)
# print(self.myCellList[temp_cell_idx])
if self.listView.rowAt(point.y()) >= 0:
self.contextMenu = QtWidgets.QMenu()
self.actionA = QtGui.QAction("Delete this cell", self)
self.actionB = QtGui.QAction("Edit this cell", self)
self.contextMenu.addAction(self.actionA)
self.contextMenu.addAction(self.actionB)
self.contextMenu.popup(QtGui.QCursor.pos())
self.actionA.triggered.connect(lambda: self.remove_cell(temp_cell_idx + 1))
self.actionB.triggered.connect(lambda: self.edit_cell(temp_cell_idx + 1))
self.contextMenu.show()
def edit_cell(self, index):
self.select_cell(index)
self.eraser_button.setChecked(True)
self.toolBox.setCurrentIndex(0)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "Scellseg"))
self.CHCheckBox.setText(_translate("MainWindow", "Crosshair on [C]"))
self.MCheckBox.setText(_translate("MainWindow", "Masks on [X]"))
self.label_2.setText(_translate("MainWindow", "Brush size"))
self.OCheckBox.setText(_translate("MainWindow", "Outlines on [Z]"))
# self.ServerButton.setText(_translate("MainWindow", "send manual seg. to server"))
self.toolBox.setItemText(self.toolBox.indexOf(self.page), _translate("MainWindow", "View and Draw"))
self.SizeButton.setText(_translate("MainWindow", "Calibrate diam"))
self.label_3.setText(_translate("MainWindow", "Cell diameter (pixels):"))
self.useGPU.setText(_translate("MainWindow", "Use GPU"))
self.SCheckBox.setText(_translate("MainWindow", "Scale disk on [S]"))
self.ASCheckBox.setText(_translate("MainWindow", "Autosave [P]"))
self.SSCheckBox.setText(_translate("MainWindow", "Single stroke"))
self.eraser_button.setText(_translate("MainWindow", "Edit mask [E]"))
self.ModelChoose.setItemText(0, _translate("MainWindow", "scellseg"))
self.ModelChoose.setItemText(1, _translate("MainWindow", "cellpose"))
self.ModelChoose.setItemText(2, _translate("MainWindow", "hover"))
self.invert.setText(_translate("MainWindow", "Invert grayscale"))
self.label_4.setText(_translate("MainWindow", "Model architecture"))
self.label_5.setText(_translate("MainWindow", "Chan to segment"))
self.label_6.setText(_translate("MainWindow", "Chan2 (optional)"))
self.toolBox.setItemText(self.toolBox.indexOf(self.page_2), _translate("MainWindow", "Inference"))
self.label_7.setText(_translate("MainWindow", "Model match TH"))
self.label_8.setText(_translate("MainWindow", "Cell prob TH"))
self.toolBox.setItemText(self.toolBox.indexOf(self.page_3), _translate("MainWindow", "Fine-tune"))
# self.menuFile.setTitle(_translate("MainWindow", "File"))
# self.menuEdit.setTitle(_translate("MainWindow", "Edit"))
# self.menuHelp.setTitle(_translate("MainWindow", "Help"))
self.ImFolder = ''
self.ImNameSet = []
self.CurImId = 0
self.CurFolder = os.getcwd()
self.DefaultImFolder = self.CurFolder
def setWinTop(self):
print('get')
def OpenDirDropped(self, curFile=None):
# dir dropped callback func
if self.ImFolder != '':
self.ImNameSet = []
self.ImNameRowSet = os.listdir(self.ImFolder)
# print(self.ImNameRowSet)
for tmp in self.ImNameRowSet:
ext = os.path.splitext(tmp)[-1]
if ext in ['.png', '.jpg', '.jpeg', '.tif', '.tiff', '.jfif'] and '_mask' not in tmp:
self.ImNameSet.append(tmp)
self.ImNameSet.sort()
self.ImPath = self.ImFolder + r'/' + self.ImNameSet[0]
ImNameSetNosuffix = [os.path.splitext(imNameSeti)[0] for imNameSeti in self.ImNameSet]
# pix = QtGui.QPixmap(self.ImPath)
# self.ImShowLabel.setPixmap(pix)
if curFile is not None:
curFile = os.path.splitext(curFile)[0]
try:
self.CurImId = ImNameSetNosuffix.index(curFile)
print(self.CurImId)
except:
curFile = curFile.replace('_cp_masks', '')
curFile = curFile.replace('_masks', '')
self.CurImId = ImNameSetNosuffix.index(curFile)
print(self.CurImId)
return
# self.state_label.setText("", color='#FF6A56')
else:
self.CurImId = 0
iopart._load_image(self, filename=self.ImPath)
self.initialize_listView()
else:
print('Please Find Another File Folder')
def OpenDirBntClicked(self):
# dir choosing callback function
self.ImFolder = QtWidgets.QFileDialog.getExistingDirectory(None, "select folder", self.DefaultImFolder)
if self.ImFolder != '':
self.ImNameSet = []
self.ImNameRowSet = os.listdir(self.ImFolder)
# print(self.ImNameRowSet)
for tmp in self.ImNameRowSet:
ext = os.path.splitext(tmp)[-1]
if ext in ['.png', '.jpg', '.jpeg', '.tif', '.tiff', '.jfif'] and '_mask' not in tmp:
self.ImNameSet.append(tmp)
self.ImNameSet.sort()
print(self.ImNameSet)
self.ImPath = self.ImFolder + r'/' + self.ImNameSet[0]
# pix = QtGui.QPixmap(self.ImPath)
# self.ImShowLabel.setPixmap(pix)
self.CurImId = 0
iopart._load_image(self, filename=self.ImPath)
self.initialize_listView()
else:
print('Please Find Another File Folder')
def PreImBntClicked(self):
self.auto_save()
# show previous image
self.ImFolder = self.ImFolder
self.ImNameSet = self.ImNameSet
self.CurImId = self.CurImId
self.ImNum = len(self.ImNameSet)
print(self.ImFolder, self.ImNameSet)
self.CurImId = self.CurImId - 1
if self.CurImId >= 0: # 第一张图片没有前一张
self.ImPath = self.ImFolder + r'/' + self.ImNameSet[self.CurImId]
iopart._load_image(self, filename=self.ImPath)
self.initialize_listView()
if self.CurImId < 0:
self.CurImId = 0
self.state_label.setText("This is the first image", color='#FF6A56')
def NextImBntClicked(self):
self.auto_save()
# show next image
self.ImFolder = self.ImFolder
self.ImNameSet = self.ImNameSet
self.CurImId = self.CurImId
self.ImNum = len(self.ImNameSet)
if self.CurImId < self.ImNum - 1:
self.ImPath = self.ImFolder + r'/' + self.ImNameSet[self.CurImId + 1]
iopart._load_image(self, filename=self.ImPath)
self.initialize_listView()
self.CurImId = self.CurImId + 1
else:
self.state_label.setText("This is the last image", color='#FF6A56')
def eraser_model_change(self):
if self.eraser_button.isChecked() == True:
self.outlinesOn = False
self.OCheckBox.setChecked(False)
# self.OCheckBox.setEnabled(False)
QtWidgets.QApplication.setOverrideCursor(QtCore.Qt.CrossCursor)
# self.cur_size = self.brush_size * 6
# cursor = Qt.QPixmap("./assets/eraser.png")
# cursor_scaled = cursor.scaled(self.cur_size, self.cur_size)
# cursor_set = Qt.QCursor(cursor_scaled, self.cur_size/2, self.cur_size/2)
# QtWidgets.QApplication.setOverrideCursor(cursor_set)
self.update_plot()
else:
QtWidgets.QApplication.setOverrideCursor(QtCore.Qt.CrossCursor)
def showChoosen(self, item):
temp_cell_idx = int(item.row())
self.list_select_cell(int(temp_cell_idx) + 1)
def save_cell_list(self):
self.listView.selectAll()
self.myCellList = []
for item in self.listView.selectedIndexes():
data = item.data()
self.myCellList.append(data)
self.cell_list_name = os.path.splitext(self.filename)[0] + "_instance_list.txt"
np.savetxt(self.cell_list_name, np.array(self.myCellList), fmt="%s")
self.listView.clearSelection()
def save_cell_list_menu(self):
self.listView.selectAll()
self.myCellList = []
for item in self.listView.selectedIndexes():
data = item.data()
self.myCellList.append(data)
self.cell_list_name = os.path.splitext(self.filename)[0] + "_instance_list.txt"
np.savetxt(self.cell_list_name, np.array(self.myCellList), fmt="%s")
self.state_label.setText("Saved outlines", color='#39B54A')
self.listView.clearSelection()
def help_window(self):
HW = guiparts.HelpWindow(self)
HW.show()
def gui_window(self):
EG = guiparts.ExampleGUI(self)
EG.show()
def toggle_autosave(self):
if self.ASCheckBox.isChecked():
self.autosaveOn = True
else:
self.autosaveOn = False
print('self.autosaveOn', self.autosaveOn)
def toggle_sstroke(self):
if self.SSCheckBox.isChecked():
self.sstroke_On = True
else:
self.sstroke_On = False
print('self.sstroke_On', self.sstroke_On)
def toggle_autosaturation(self):
if self.autobtn.isChecked():
self.compute_saturation()
self.update_plot()
def cross_hairs(self):
if self.CHCheckBox.isChecked():
self.p0.addItem(self.vLine, ignoreBounds=True)
self.p0.addItem(self.hLine, ignoreBounds=True)
else:
self.p0.removeItem(self.vLine)
self.p0.removeItem(self.hLine)
def plot_clicked(self, event):
if event.double():
if event.button() == QtCore.Qt.LeftButton:
print("will initialize the range")
if (event.modifiers() != QtCore.Qt.ShiftModifier and
event.modifiers() != QtCore.Qt.AltModifier):
try:
self.p0.setYRange(0,self.Ly+self.pr)
except:
self.p0.setYRange(0,self.Ly)
self.p0.setXRange(0,self.Lx)
def mouse_moved(self, pos):
# print('moved')
items = self.win.scene().items(pos)
for x in items:
if x == self.p0:
mousePoint = self.p0.mapSceneToView(pos)
if self.CHCheckBox.isChecked():
self.vLine.setPos(mousePoint.x())
self.hLine.setPos(mousePoint.y())
# else:
# QtWidgets.QApplication.restoreOverrideCursor()
# QtWidgets.QApplication.setOverrideCursor(QtCore.Qt.DefaultCursor)
def color_choose(self):
self.color = self.RGBDropDown.currentIndex()
self.view = 0
self.RGBChoose.button(self.view).setChecked(True)
self.update_plot()
def update_ztext(self):
zpos = self.currentZ
try:
zpos = int(self.zpos.text())
except:
print('ERROR: zposition is not a number')
self.currentZ = max(0, min(self.NZ - 1, zpos))
self.zpos.setText(str(self.currentZ))
self.scroll.setValue(self.currentZ)
def calibrate_size(self):
model_type = self.ModelChoose.currentText()
pretrained_model = os.path.join(self.model_dir, model_type)
self.initialize_model(pretrained_model=pretrained_model, gpu=self.useGPU.isChecked(),
model_type=model_type)
diams, _ = self.model.sz.eval(self.stack[self.currentZ].copy(), invert=self.invert.isChecked(),
channels=self.get_channels(), progress=self.progress)
diams = np.maximum(5.0, diams)
print('estimated diameter of cells using %s model = %0.1f pixels' %
(self.current_model, diams))
self.state_label.setText('Estimated diameter of cells using %s model = %0.1f pixels' %
(self.current_model, diams), color='#969696')
self.Diameter.setText('%0.1f'%diams)
self.diameter = diams
self.compute_scale()
self.progress.setValue(100)
def enable_buttons(self):
# self.X2Up.setEnabled(True)
# self.X2Down.setEnabled(True)
self.ModelButton.setEnabled(True)
self.SizeButton.setEnabled(True)
self.saveSet.setEnabled(True)
self.savePNG.setEnabled(True)
self.saveOutlines.setEnabled(True)
self.saveCellList.setEnabled(True)
self.saveAll.setEnabled(True)
self.loadMasks.setEnabled(True)
self.loadManual.setEnabled(True)
self.loadCellList.setEnabled(True)
self.toggle_mask_ops()
self.update_plot()
self.setWindowTitle('Scellseg @ ' + self.filename)
def add_set(self):
if len(self.current_point_set) > 0:
# print(self.current_point_set)
# print(np.array(self.current_point_set).shape)
self.current_point_set = np.array(self.current_point_set)
while len(self.strokes) > 0:
self.remove_stroke(delete_points=False)
if len(self.current_point_set) > 8:
col_rand = np.random.randint(1000)
color = self.colormap[col_rand, :3]
median = self.add_mask(points=self.current_point_set, color=color)
if median is not None:
self.removed_cell = []
self.toggle_mask_ops()
self.cellcolors.append(color)
self.ncells += 1
self.add_list_item()
self.ismanual = np.append(self.ismanual, True)
# if self.NZ == 1:
# # only save after each cell if single image
# iopart._save_sets(self)
self.current_stroke = []
self.strokes = []
self.current_point_set = []
self.update_plot()
def add_mask(self, points=None, color=None):
# loop over z values
median = []
if points.shape[1] < 3:
points = np.concatenate((np.zeros((points.shape[0], 1), np.int32), points), axis=1)
zdraw = np.unique(points[:, 0])
zrange = np.arange(zdraw.min(), zdraw.max() + 1, 1, int)
zmin = zdraw.min()
pix = np.zeros((2, 0), np.uint16)
mall = np.zeros((len(zrange), self.Ly, self.Lx), np.bool)
k = 0
for z in zdraw:
iz = points[:, 0] == z
vr = points[iz, 1]
vc = points[iz, 2]
# get points inside drawn points
mask = np.zeros((np.ptp(vr) + 4, np.ptp(vc) + 4), np.uint8)
pts = np.stack((vc - vc.min() + 2, vr - vr.min() + 2), axis=-1)[:, np.newaxis, :]
mask = cv2.fillPoly(mask, [pts], (255, 0, 0))
ar, ac = np.nonzero(mask)
ar, ac = ar + vr.min() - 2, ac + vc.min() - 2
# get dense outline
contours = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
pvc, pvr = contours[-2][0].squeeze().T
vr, vc = pvr + vr.min() - 2, pvc + vc.min() - 2
# concatenate all points
ar, ac = np.hstack((np.vstack((vr, vc)), np.vstack((ar, ac))))
# if these pixels are overlapping with another cell, reassign them
ioverlap = self.cellpix[z][ar, ac] > 0
if (~ioverlap).sum() < 8:
print('ERROR: cell too small without overlaps, not drawn')
return None
elif ioverlap.sum() > 0:
ar, ac = ar[~ioverlap], ac[~ioverlap]
# compute outline of new mask
mask = np.zeros((np.ptp(ar) + 4, np.ptp(ac) + 4), np.uint8)
mask[ar - ar.min() + 2, ac - ac.min() + 2] = 1
contours = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
pvc, pvr = contours[-2][0].squeeze().T
vr, vc = pvr + ar.min() - 2, pvc + ac.min() - 2
self.draw_mask(z, ar, ac, vr, vc, color)
median.append(np.array([np.median(ar), np.median(ac)]))
mall[z - zmin, ar, ac] = True
pix = np.append(pix, np.vstack((ar, ac)), axis=-1)
mall = mall[:, pix[0].min():pix[0].max() + 1, pix[1].min():pix[1].max() + 1].astype(np.float32)
ymin, xmin = pix[0].min(), pix[1].min()
if len(zdraw) > 1:
mall, zfill = interpZ(mall, zdraw - zmin)
for z in zfill:
mask = mall[z].copy()
ar, ac = np.nonzero(mask)
ioverlap = self.cellpix[z + zmin][ar + ymin, ac + xmin] > 0
if (~ioverlap).sum() < 5:
print('WARNING: stroke on plane %d not included due to overlaps' % z)
elif ioverlap.sum() > 0:
mask[ar[ioverlap], ac[ioverlap]] = 0
ar, ac = ar[~ioverlap], ac[~ioverlap]
# compute outline of mask
outlines = utils.masks_to_outlines(mask)
vr, vc = np.nonzero(outlines)
vr, vc = vr + ymin, vc + xmin
ar, ac = ar + ymin, ac + xmin
self.draw_mask(z + zmin, ar, ac, vr, vc, color)
self.zdraw.append(zdraw)
return median
def move_in_Z(self):
if self.loaded:
self.currentZ = min(self.NZ, max(0, int(self.scroll.value())))
self.zpos.setText(str(self.currentZ))
self.update_plot()
def make_viewbox(self):
# intialize the main viewport widget
# print("making viewbox")
self.p0 = guiparts.ViewBoxNoRightDrag(
parent=self,
lockAspect=True,
name="plot1",
border=[100, 100, 100],
invertY=True
)
# self.p0.setBackgroundColor(color='#292929')
self.brush_size = 3
self.win.addItem(self.p0, 0, 0)
self.p0.setMenuEnabled(False)
self.p0.setMouseEnabled(x=True, y=True)
self.img = pg.ImageItem(viewbox=self.p0, parent=self, axisOrder='row-major')
self.img.autoDownsample = False
# self.null_image = np.ones((200,200))
# self.img.setImage(self.null_image)
self.layer = guiparts.ImageDraw(viewbox=self.p0, parent=self)
self.layer.setLevels([0, 255])
self.scale = pg.ImageItem(viewbox=self.p0, parent=self)
self.scale.setLevels([0, 255])
self.p0.scene().contextMenuItem = self.p0
# self.p0.setMouseEnabled(x=False,y=False)
self.Ly, self.Lx = 512, 512
self.p0.addItem(self.img)
self.p0.addItem(self.layer)
self.p0.addItem(self.scale)
# guiparts.make_quadrants(self)
def get_channels(self):
channels = [self.jCBChanToSegment.currentIndex(), self.jCBChan2.currentIndex()]
return channels
def compute_saturation(self):
# compute percentiles from stack
self.saturation = []
self.slider._low = np.percentile(self.stack[0].astype(np.float32), 1)
self.slider._high = np.percentile(self.stack[0].astype(np.float32), 99)
for n in range(len(self.stack)):
print('n,', n)
self.saturation.append([np.percentile(self.stack[n].astype(np.float32), 1),
np.percentile(self.stack[n].astype(np.float32), 99)])
def keyReleaseEvent(self, event):
# print('self.loaded', self.loaded)
if self.loaded:
# self.p0.setMouseEnabled(x=True, y=True)
if (event.modifiers() != QtCore.Qt.ControlModifier and
event.modifiers() != QtCore.Qt.ShiftModifier and
event.modifiers() != QtCore.Qt.AltModifier) and not self.in_stroke:
updated = False
if len(self.current_point_set) > 0:
if event.key() == QtCore.Qt.Key_Return:
self.add_set()
if self.NZ > 1:
if event.key() == QtCore.Qt.Key_Left:
self.currentZ = max(0, self.currentZ - 1)
self.zpos.setText(str(self.currentZ))
elif event.key() == QtCore.Qt.Key_Right:
self.currentZ = min(self.NZ - 1, self.currentZ + 1)
self.zpos.setText(str(self.currentZ))
else:
if event.key() == QtCore.Qt.Key_M:
self.MCheckBox.toggle()
if event.key() == QtCore.Qt.Key_O:
self.OCheckBox.toggle()
if event.key() == QtCore.Qt.Key_C:
self.CHCheckBox.toggle()
if event.key() == QtCore.Qt.Key_S:
self.SCheckBox.toggle()
if event.key() == QtCore.Qt.Key_E:
self.eraser_button.toggle()
self.toolBox.setCurrentIndex(0)
if event.key() == QtCore.Qt.Key_P:
self.ASCheckBox.toggle()
if event.key() == QtCore.Qt.Key_PageDown:
self.view = (self.view + 1) % (len(self.RGBChoose.bstr))
print('self.view ', self.view)
self.RGBChoose.button(self.view).setChecked(True)
elif event.key() == QtCore.Qt.Key_PageUp:
self.view = (self.view - 1) % (len(self.RGBChoose.bstr))
print('self.view ', self.view)
self.RGBChoose.button(self.view).setChecked(True)
# can change background or stroke size if cell not finished
if event.key() == QtCore.Qt.Key_Up:
self.color = (self.color - 1) % (6)
print('self.color', self.color)
self.RGBDropDown.setCurrentIndex(self.color)
elif event.key() == QtCore.Qt.Key_Down:
self.color = (self.color + 1) % (6)
print('self.color', self.color)
self.RGBDropDown.setCurrentIndex(self.color)
if (event.key() == QtCore.Qt.Key_BracketLeft or
event.key() == QtCore.Qt.Key_BracketRight):
count = self.BrushChoose.count()
gci = self.BrushChoose.currentIndex()
if event.key() == QtCore.Qt.Key_BracketLeft:
gci = max(0, gci - 1)
else:
gci = min(count - 1, gci + 1)
self.BrushChoose.setCurrentIndex(gci)
self.brush_choose()
self.state_label.setText("Brush size: %s"%(2*gci+1), color='#969696')
if not updated:
self.update_plot()
elif event.modifiers() == QtCore.Qt.ControlModifier:
if event.key() == QtCore.Qt.Key_Z:
self.undo_action()
if event.key() == QtCore.Qt.Key_0:
self.clear_all()
def keyPressEvent(self, event):
if event.modifiers() == QtCore.Qt.ControlModifier:
if event.key() == QtCore.Qt.Key_1:
self.toolBox.setCurrentIndex(0)
if event.key() == QtCore.Qt.Key_2:
self.toolBox.setCurrentIndex(1)
if event.key() == QtCore.Qt.Key_3:
self.toolBox.setCurrentIndex(2)
if event.key() == QtCore.Qt.Key_Minus or event.key() == QtCore.Qt.Key_Equal:
self.p0.keyPressEvent(event)
def chanchoose(self, image):
if image.ndim > 2:
if self.jCBChanToSegment.currentIndex() == 0:
image = image.astype(np.float32).mean(axis=-1)[..., np.newaxis]
else:
chanid = [self.jCBChanToSegment.currentIndex() - 1]
if self.jCBChan2.currentIndex() > 0:
chanid.append(self.jCBChan2.currentIndex() - 1)
image = image[:, :, chanid].astype(np.float32)
return image
def initialize_model(self, gpu=False, pretrained_model=False, model_type='scellseg',
diam_mean=30., net_avg=False, device=None, nclasses=3,
residual_on=True, style_on=True, concatenation=False, update_step=1,
last_conv_on=True, attn_on=False, dense_on=False, style_scale_on=True,
task_mode='cellpose', model=None):
self.current_model = model_type
self.model = models.sCellSeg(gpu=gpu, pretrained_model=pretrained_model, model_type=model_type,
diam_mean=diam_mean, net_avg=net_avg, device=device, nclasses=nclasses,
residual_on=residual_on, style_on=style_on, concatenation=concatenation, update_step=update_step,
last_conv_on=last_conv_on, attn_on=attn_on, dense_on=dense_on, style_scale_on=style_scale_on,
task_mode=task_mode, model=model)
def set_compute_thread(self):
self.seg_thread = threading.Thread(target = self.compute_model)
self.seg_thread.setDeamon(True)
self.seg_thread.start()
def compute_model(self):
self.progress.setValue(0)
self.update_plot()
self.state_label.setText("Running...", color='#969696')
QtWidgets.qApp.processEvents() # force update gui
if True:
tic = time.time()
self.clear_all()
self.flows = [[], [], []]
pretrained_model = os.path.join(self.model_dir, self.ModelChoose.currentText())
self.initialize_model(pretrained_model=pretrained_model, gpu=self.useGPU.isChecked(),
model_type=self.ModelChoose.currentText())
print('using model %s' % self.current_model)
self.progress.setValue(10)
do_3D = False
if self.NZ > 1:
do_3D = True
data = self.stack.copy()
else:
data = self.stack[0].copy()
channels = self.get_channels()
# print(channels)
self.diameter = float(self.Diameter.text())
self.update_plot()
try:
# net_avg = self.NetAvg.currentIndex() == 0
resample = self.NetAvg.currentIndex() == 1 # we need modify from here
min_size = ((30. // 2) ** 2) * np.pi * 0.05
try:
finetune_model = self.model_file_path[0]
print('ft_model', finetune_model)
except:
finetune_model = None
# inference
masks, flows, _ = self.model.inference(finetune_model=finetune_model, net_avg=False,
query_images=data, channel=channels,
diameter=self.diameter,
resample=resample, flow_threshold=self.threshold,
cellprob_threshold=self.cellprob,
min_size=min_size, eval_batch_size=8,
postproc_mode=self.model.postproc_mode,
progress=self.progress)
self.state_label.setText(
'%d cells found with scellseg net in %0.3fs' % (
len(np.unique(masks)[1:]), time.time() - tic),
color='#39B54A')
# self.state_label.setStyleSheet("color:green;")
self.update_plot()
self.progress.setValue(75)
self.flows[0] = flows[0].copy()
self.flows[1] = (np.clip(utils.normalize99(flows[2].copy()), 0, 1) * 255).astype(np.uint8)
if not do_3D:
masks = masks[np.newaxis, ...]
self.flows[0] = transforms.resize_image(self.flows[0], masks.shape[-2], masks.shape[-1],
interpolation=cv2.INTER_NEAREST)
self.flows[1] = transforms.resize_image(self.flows[1], masks.shape[-2], masks.shape[-1])
if not do_3D:
self.flows[2] = np.zeros(masks.shape[1:], dtype=np.uint8)
self.flows = [self.flows[n][np.newaxis, ...] for n in range(len(self.flows))]
else:
self.flows[2] = (flows[1][0] / 10 * 127 + 127).astype(np.uint8)
if len(flows) > 2:
self.flows.append(flows[3])
self.flows.append(np.concatenate((flows[1], flows[2][np.newaxis, ...]), axis=0))
print()
self.progress.setValue(80)
z = 0
self.masksOn = True
self.outlinesOn = True
self.MCheckBox.setChecked(True)
self.OCheckBox.setChecked(True)
iopart._masks_to_gui(self, masks, outlines=None)
self.progress.setValue(100)
self.first_load_listView()
# self.toggle_server(off=True)
if not do_3D:
self.threshslider.setEnabled(True)
self.probslider.setEnabled(True)
self.masks_for_save = masks
except Exception as e:
print('NET ERROR: %s' % e)
self.progress.setValue(0)
return
else: # except Exception as e:
print('ERROR: %s' % e)
print('Finished inference')
def batch_inference(self):
self.progress.setValue(0)
# print('threshold', self.threshold, self.cellprob)
# self.update_plot()
if True:
tic = time.time()
self.clear_all()
model_type =self.ModelChoose.currentText()
pretrained_model = os.path.join(self.model_dir, model_type)
self.initialize_model(pretrained_model=pretrained_model, gpu=self.useGPU.isChecked(),
model_type=model_type)
print('using model %s' % self.current_model)
self.progress.setValue(10)
channels = self.get_channels()
self.diameter = float(self.Diameter.text())
try:
# net_avg = self.NetAvg.currentIndex() < 2
# resample = self.NetAvg.currentIndex() == 1
min_size = ((30. // 2) ** 2) * np.pi * 0.05
try:
finetune_model = self.model_file_path[0]
print('ft_model', finetune_model)
except:
finetune_model = None
try:
dataset_path = self.batch_inference_dir
except:
dataset_path = None
# batch inference
bz = 8 if self.bz_line.text() == '' else int(self.bz_line.text())
save_name = self.current_model + '_' + dataset_path.split('\\')[-1]
utils.set_manual_seed(5)
try:
shotset = dataset.DatasetShot(eval_dir=dataset_path, class_name=None, image_filter='_img',
mask_filter='_masks',
channels=channels, task_mode=self.model.task_mode, active_ind=None,
rescale=True)
iopart._initialize_image_portable(self, iopart.imread(self.now_pyfile_path + '/assets/Loading1.png'),
resize=self.resize, X2=0)
self.state_label.setText("Running...", color='#969696')
QtWidgets.qApp.processEvents() # force update gui
except:
iopart._initialize_image_portable(self, iopart.imread(self.now_pyfile_path + '/assets/Loading4.png'),
resize=self.resize, X2=0)
self.state_label.setText("Please choose right data path",
color='#FF6A56')
print("Please choose right data path")
self.batch_inference_bnt.setEnabled(False)
return
queryset = dataset.DatasetQuery(dataset_path, class_name=None, image_filter='_img',
mask_filter='_masks')
query_image_names = queryset.query_image_names
diameter = shotset.md
print('>>>> mean diameter of this style,', round(diameter, 3))
self.model.net.save_name = save_name
self.img.setImage(iopart.imread(self.now_pyfile_path + '/assets/Loading2.png'), autoLevels=False, lut=None)
self.state_label.setText("Running...", color='#969696')
QtWidgets.qApp.processEvents() # force update gui
# flow_threshold was set to 0.4, and cellprob_threshold was set to 0.5
try:
masks, flows, _ = self.model.inference(finetune_model=finetune_model, net_avg=False,
query_image_names=query_image_names, channel=channels,
diameter=diameter,
resample=False, flow_threshold=0.4,
cellprob_threshold=0.5,
min_size=min_size, eval_batch_size=bz,
postproc_mode=self.model.postproc_mode,
progress=self.progress)
except RuntimeError:
iopart._initialize_image_portable(self,
iopart.imread(self.now_pyfile_path + '/assets/Loading4.png'),
resize=self.resize, X2=0)
self.state_label.setText("Batch size is too big, please set smaller",
color='#FF6A56')
print("Batch size is too big, please set smaller")
return
# save output images
diams = np.ones(len(query_image_names)) * diameter
imgs = [io.imread(query_image_name) for query_image_name in query_image_names]
io.masks_flows_to_seg(imgs, masks, flows, diams, query_image_names,
[channels for i in range(len(query_image_names))])
io.save_to_png(imgs, masks, flows, query_image_names, labels=None, aps=None,
task_mode=self.model.task_mode)
self.masks_for_save = masks
except:
iopart._initialize_image_portable(self, iopart.imread(self.now_pyfile_path + '/assets/Loading4.png'), resize=self.resize,
X2=0)
self.state_label.setText("Please choose right data path",
color='#FF6A56')
return
else: # except Exception as e:
print('ERROR: %s' % e)
self.img.setImage(iopart.imread(self.now_pyfile_path + '/assets/Loading3.png'), autoLevels=False, lut=None)
self.state_label.setText('Finished inference in %0.3fs!'%(time.time() - tic), color='#39B54A')
self.batch_inference_bnt.setEnabled(False)
def compute_cprob(self):
rerun = False
if self.cellprob != self.probslider.value():
rerun = True
self.cellprob = self.probslider.value()
if self.threshold != (31 - self.threshslider.value()) / 10.:
rerun = True
self.threshold = (31 - self.threshslider.value()) / 10.
if not rerun:
return
if self.threshold == 3.0 or self.NZ > 1:
thresh = None
print('computing masks with cell prob=%0.3f, no flow error threshold' %
(self.cellprob))
else:
thresh = self.threshold
print('computing masks with cell prob=%0.3f, flow error threshold=%0.3f' %
(self.cellprob, thresh))
maski = dynamics.get_masks(self.flows[3].copy(), iscell=(self.flows[4][-1] > self.cellprob),
flows=self.flows[4][:-1], threshold=thresh)
if self.NZ == 1:
maski = utils.fill_holes_and_remove_small_masks(maski)
maski = transforms.resize_image(maski, self.cellpix.shape[-2], self.cellpix.shape[-1],
interpolation=cv2.INTER_NEAREST)
self.masksOn = True
self.outlinesOn = True
self.MCheckBox.setChecked(True)
self.OCheckBox.setChecked(True)
if maski.ndim < 3:
maski = maski[np.newaxis, ...]
print('%d cells found' % (len(np.unique(maski)[1:])))
iopart._masks_to_gui(self, maski, outlines=None)
self.threshslider.setToolTip("Value: " + str(self.threshold))
self.probslider.setToolTip("Value: " + str(self.cellprob))
self.first_load_listView()
self.show()
def reset(self):
# ---- start sets of points ---- #
self.selected = 0
self.X2 = 0
self.resize = -1
self.onechan = False
self.loaded = False
self.channel = [0, 1]
self.current_point_set = []
self.in_stroke = False
self.strokes = []
self.stroke_appended = True
self.ncells = 0
self.zdraw = []
self.removed_cell = []
self.cellcolors = [np.array([255, 255, 255])]
# -- set menus to default -- #
self.color = 0
self.RGBDropDown.setCurrentIndex(self.color)
self.view = 0
self.RGBChoose.button(self.view).setChecked(True)
self.BrushChoose.setCurrentIndex(1)
self.CHCheckBox.setChecked(False)
self.OCheckBox.setEnabled(True)
self.SSCheckBox.setChecked(True)
# -- zero out image stack -- #
self.opacity = 128 # how opaque masks should be
self.outcolor = [200, 200, 255, 200]
self.NZ, self.Ly, self.Lx = 1, 512, 512
if self.autobtn.isChecked():
self.saturation = [[0, 255] for n in range(self.NZ)]
self.currentZ = 0
self.flows = [[], [], [], [], [[]]]
self.stack = np.zeros((1, self.Ly, self.Lx, 3))
# masks matrix
self.layers = 0 * np.ones((1, self.Ly, self.Lx, 4), np.uint8)
# image matrix with a scale disk
self.radii = 0 * np.ones((self.Ly, self.Lx, 4), np.uint8)
self.cellpix = np.zeros((1, self.Ly, self.Lx), np.uint16)
self.outpix = np.zeros((1, self.Ly, self.Lx), np.uint16)
self.ismanual = np.zeros(0, np.bool)
self.update_plot()
self.filename = []
self.loaded = False
def first_load_listView(self):
self.listmodel = Qt.QStandardItemModel(self.ncells,1)
# self.listmodel = Qt.QStringListModel()
self.listmodel.setHorizontalHeaderLabels(["Annotation"])
self.myCellList = ['instance_' + str(i) for i in range(1, self.ncells + 1)]
for i in range(len(self.myCellList)):
self.listmodel.setItem(i,Qt.QStandardItem(self.myCellList[i]))
self.listView.setModel(self.listmodel)
def initialize_listView(self):
if self.filename != []:
if os.path.isfile(os.path.splitext(self.filename)[0] + '_instance_list.txt'):
self.list_file_name = str(os.path.splitext(self.filename)[0] + '_instance_list.txt')
self.myCellList_array = np.loadtxt(self.list_file_name, dtype=str)
self.myCellList = self.myCellList_array.tolist()
if len(self.myCellList) == self.ncells:
self.listmodel = Qt.QStandardItemModel(self.ncells, 1)
self.listmodel.setHorizontalHeaderLabels(["Annotation"])
for i in range(len(self.myCellList)):
self.listmodel.setItem(i,Qt.QStandardItem(self.myCellList[i]))
self.listView.setModel(self.listmodel)
else:
self.listmodel = Qt.QStandardItemModel(self.ncells, 1)
# self.listmodel = Qt.QStringListModel()
self.listmodel.setHorizontalHeaderLabels(["Annotation"])
self.myCellList = ['instance_' + str(i) for i in range(1, self.ncells + 1)]
for i in range(len(self.myCellList)):
self.listmodel.setItem(i,Qt.QStandardItem(self.myCellList[i]))
self.listView.setModel(self.listmodel)
else:
self.myCellList = ['instance_' + str(i) for i in range(1, self.ncells + 1)]
self.listmodel = Qt.QStandardItemModel(self.ncells, 1)
# self.listmodel = Qt.QStringListModel()
self.listmodel.setHorizontalHeaderLabels(["Annotation"])
for i in range(len(self.myCellList)):
self.listmodel.setItem(i,Qt.QStandardItem(self.myCellList[i]))
self.listView.setModel(self.listmodel)
def initinal_p0(self):
# self.p0.removeItem(self.img)
self.p0.removeItem(self.layer)
self.p0.removeItem(self.scale)
# self.img.deleteLater()
self.layer.deleteLater()
self.scale.deleteLater()
# self.img = pg.ImageItem(viewbox=self.p0, parent=self, axisOrder='row-major')
# self.img.autoDownsample = False
self.layer = guiparts.ImageDraw(viewbox=self.p0, parent=self)
self.layer.setLevels([0, 255])
self.scale = pg.ImageItem(viewbox=self.p0, parent=self)
self.scale.setLevels([0, 255])
self.p0.scene().contextMenuItem = self.p0
# self.p0.addItem(self.img)
self.p0.addItem(self.layer)
self.p0.addItem(self.scale)
def add_list_item(self):
# print(self.ncells)
# self.myCellList = self.listmodel.data()
self.listView.selectAll()
self.myCellList = []
for item in self.listView.selectedIndexes():
data = item.data()
self.myCellList.append(data)
temp_nums = []
for celli in self.myCellList:
if 'instance_' in celli:
temp_nums.append(int(celli.split('instance_')[-1]))
if len(temp_nums) == 0:
now_cellIdx = 0
else:
now_cellIdx = np.max(np.array(temp_nums))
self.myCellList.append('instance_' + str(now_cellIdx+1))
# self.myCellList.append('instance_' + str(self.ncells))
self.listmodel = Qt.QStandardItemModel(self.ncells, 1)
# self.listmodel = Qt.QStringListModel()
self.listmodel.setHorizontalHeaderLabels(["Annotation"])
for i in range(len(self.myCellList)):
self.listmodel.setItem(i, Qt.QStandardItem(self.myCellList[i]))
self.listView.setModel(self.listmodel)
def delete_list_item(self, index):
# self.myCellList = self.listmodel.data()
self.listView.selectAll()
self.myCellList = []
for item in self.listView.selectedIndexes():
data = item.data()
self.myCellList.append(data)
self.last_remove_index = index
self.last_remove_item = self.myCellList.pop(index - 1)
self.listmodel = Qt.QStandardItemModel(self.ncells, 1)
# self.listmodel = Qt.QStringListModel()
self.listmodel.setHorizontalHeaderLabels(["Annotation"])
for i in range(len(self.myCellList)):
self.listmodel.setItem(i, Qt.QStandardItem(self.myCellList[i]))
self.listView.setModel(self.listmodel)
def check_gpu(self, torch=True):
# also decide whether or not to use torch
self.useGPU.setChecked(False)
self.useGPU.setEnabled(False)
if models.use_gpu():
self.useGPU.setEnabled(True)
self.useGPU.setChecked(True)
def check_ftgpu(self, torch=True):
# also decide whether or not to use torch
self.ftuseGPU.setChecked(False)
self.ftuseGPU.setEnabled(False)
if models.use_gpu():
self.ftuseGPU.setEnabled(True)
self.ftuseGPU.setChecked(True)
def clear_all(self):
self.prev_selected = 0
self.selected = 0
# self.layers_undo, self.cellpix_undo, self.outpix_undo = [],[],[]
self.layers = 0 * np.ones((self.NZ, self.Ly, self.Lx, 4), np.uint8)
self.cellpix = np.zeros((self.NZ, self.Ly, self.Lx), np.uint16)
self.outpix = np.zeros((self.NZ, self.Ly, self.Lx), np.uint16)
self.cellcolors = [np.array([255, 255, 255])]
self.ncells = 0
self.initialize_listView()
print('removed all cells')
self.toggle_removals()
self.update_plot()
def list_select_cell(self, idx):
self.prev_selected = self.selected
self.selected = idx
# print(idx)
# print(self.prev_selected)
if self.selected > 0:
self.layers[self.cellpix == idx] = np.array([255, 255, 255, 255])
if idx < self.ncells + 1 and self.prev_selected > 0 and self.prev_selected != idx:
self.layers[self.cellpix == self.prev_selected] = np.append(self.cellcolors[self.prev_selected],
self.opacity)
# if self.outlinesOn:
# self.layers[self.outpix == idx] = np.array(self.outcolor).astype(np.uint8)
self.update_plot()
def select_cell(self, idx):
self.prev_selected = self.selected
self.selected = idx
self.listView.selectRow(idx - 1)
# print('the prev-selected is ', self.prev_selected)
if self.selected > 0:
self.layers[self.cellpix == idx] = np.array([255, 255, 255, self.opacity])
print('idx', self.prev_selected, idx)
if idx < self.ncells + 1 and self.prev_selected > 0 and self.prev_selected != idx:
self.layers[self.cellpix == self.prev_selected] = np.append(self.cellcolors[self.prev_selected],
self.opacity)
# if self.outlinesOn:
# self.layers[self.outpix==idx] = np.array(self.outcolor)
self.update_plot()
def unselect_cell(self):
if self.selected > 0:
idx = self.selected
if idx < self.ncells + 1:
self.layers[self.cellpix == idx] = np.append(self.cellcolors[idx], self.opacity)
if self.outlinesOn:
self.layers[self.outpix == idx] = np.array(self.outcolor).astype(np.uint8)
# [0,0,0,self.opacity])
self.update_plot()
self.selected = 0
def remove_cell(self, idx):
# remove from manual array
# self.selected = 0
for z in range(self.NZ):
cp = self.cellpix[z] == idx
op = self.outpix[z] == idx
# remove from mask layer
self.layers[z, cp] = np.array([0, 0, 0, 0])
# remove from self.cellpix and self.outpix
self.cellpix[z, cp] = 0
self.outpix[z, op] = 0
# reduce other pixels by -1
self.cellpix[z, self.cellpix[z] > idx] -= 1
self.outpix[z, self.outpix[z] > idx] -= 1
self.update_plot()
if self.NZ == 1:
self.removed_cell = [self.ismanual[idx - 1], self.cellcolors[idx], np.nonzero(cp), np.nonzero(op)]
self.redo.setEnabled(True)
# remove cell from lists
self.ismanual = np.delete(self.ismanual, idx - 1)
del self.cellcolors[idx]
del self.zdraw[idx - 1]
self.ncells -= 1
print('removed cell %d' % (idx - 1))
self.delete_list_item(index=idx)
if self.ncells == 0:
self.ClearButton.setEnabled(False)
if self.NZ == 1:
iopart._save_sets(self)
# self.select_cell(0)
def merge_cells(self, idx):
self.prev_selected = self.selected
self.selected = idx
if self.selected != self.prev_selected:
for z in range(self.NZ):
ar0, ac0 = np.nonzero(self.cellpix[z] == self.prev_selected)
ar1, ac1 = np.nonzero(self.cellpix[z] == self.selected)
touching = np.logical_and((ar0[:, np.newaxis] - ar1) == 1,
(ac0[:, np.newaxis] - ac1) == 1).sum()
print(touching)
ar = np.hstack((ar0, ar1))
ac = np.hstack((ac0, ac1))
if touching:
mask = np.zeros((np.ptp(ar) + 4, np.ptp(ac) + 4), np.uint8)
mask[ar - ar.min() + 2, ac - ac.min() + 2] = 1
contours = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
pvc, pvr = contours[-2][0].squeeze().T
vr, vc = pvr + ar.min() - 2, pvc + ac.min() - 2
else:
vr0, vc0 = np.nonzero(self.outpix[z] == self.prev_selected)
vr1, vc1 = np.nonzero(self.outpix[z] == self.selected)
vr = np.hstack((vr0, vr1))
vc = np.hstack((vc0, vc1))
color = self.cellcolors[self.prev_selected]
self.draw_mask(z, ar, ac, vr, vc, color, idx=self.prev_selected)
self.remove_cell(self.selected)
print('merged two cells')
self.update_plot()
iopart._save_sets(self)
self.undo.setEnabled(False)
self.redo.setEnabled(False)
def undo_remove_cell(self):
if len(self.removed_cell) > 0:
z = 0
ar, ac = self.removed_cell[2]
vr, vc = self.removed_cell[3]
color = self.removed_cell[1]
self.draw_mask(z, ar, ac, vr, vc, color)
self.toggle_mask_ops()
self.cellcolors.append(color)
self.ncells += 1
self.add_list_item()
self.ismanual = np.append(self.ismanual, self.removed_cell[0])
self.zdraw.append([])
print('added back removed cell')
self.update_plot()
iopart._save_sets(self)
self.removed_cell = []
self.redo.setEnabled(False)
def fine_tune(self):
tic = time.time()
dataset_dir = self.fine_tune_dir
self.state_label.setText("%s"%(dataset_dir), color='#969696')
if not isinstance(dataset_dir, str): # TODO: 改成警告
print('dataset_dir is not provided')
train_epoch = 100 if self.epoch_line.text() == '' else int(self.epoch_line.text())
ft_bz = 8 if self.ftbz_line.text() == '' else int(self.ftbz_line.text())
contrast_on = 1 if self.stmodelchooseBnt.currentText() == 'contrastive' else 0
model_type = self.ftmodelchooseBnt.currentText()
task_mode, postproc_mode, attn_on, dense_on, style_scale_on = utils.process_different_model(model_type) # task_mode mean different instance representation
pretrained_model = os.path.join(self.model_dir, model_type)
channels = [self.chan1chooseBnt.currentIndex(), self.chan2chooseBnt.currentIndex()]
print(dataset_dir, train_epoch, channels)
utils.set_manual_seed(5)
try:
print('ft_bz', ft_bz)
shotset = DatasetShot(eval_dir=dataset_dir, class_name=None, image_filter='_img', mask_filter='_masks',
channels=channels,
train_num=train_epoch * ft_bz, task_mode=task_mode, rescale=True)
iopart._initialize_image_portable(self, iopart.imread(self.now_pyfile_path + '/assets/Loading1.png'), resize=self.resize, X2=0)
self.state_label.setText("Running...", color='#969696')
QtWidgets.qApp.processEvents() # force update gui
except ValueError:
iopart._initialize_image_portable(self, iopart.imread(self.now_pyfile_path + '/assets/Loading4.png'), resize=self.resize, X2=0)
self.state_label.setText("Please choose right data path",
color='#FF6A56')
print("Please choose right data path")
self.ftbnt.setEnabled(False)
return
shot_gen = DataLoader(dataset=shotset, batch_size=ft_bz, num_workers=0, pin_memory=True)
diameter = shotset.md
print('>>>> mean diameter of this style,', round(diameter, 3))
lr = {'downsample': 0.001, 'upsample': 0.001, 'tasker': 0.001, 'alpha': 0.1}
lr_schedule_gamma = {'downsample': 0.5, 'upsample': 0.5, 'tasker': 0.5, 'alpha': 0.5}
step_size = int(train_epoch * 0.25)
print('step_size', step_size)
self.initialize_model(pretrained_model=pretrained_model, gpu=self.ftuseGPU.isChecked(), model_type=model_type)
self.model.net.pretrained_model = pretrained_model
save_name = model_type + '_' + os.path.basename(dataset_dir)
self.model.net.contrast_on = contrast_on
if contrast_on:
self.model.net.pair_gen = DatasetPairEval(positive_dir=dataset_dir, use_negative_masks=False, gpu=self.ftuseGPU.isChecked(),
rescale=True)
self.model.net.save_name = save_name + '-cft'
else:
self.model.net.save_name = save_name + '-ft'
try:
print('Now is fine-tuning...Please Wait')
self.img.setImage(iopart.imread(self.now_pyfile_path + '/assets/Loading2.png'), autoLevels=False, lut=None)
self.state_label.setText("Running...", color='#969696')
QtWidgets.qApp.processEvents() # force update gui
self.model.finetune(shot_gen=shot_gen, lr=lr, lr_schedule_gamma=lr_schedule_gamma, step_size=step_size, savepath=dataset_dir)
except RuntimeError:
iopart._initialize_image_portable(self, iopart.imread(self.now_pyfile_path + '/assets/Loading4.png'), resize=self.resize, X2=0)
self.state_label.setText("Batch size is too big, please set smaller",
color='#FF6A56')
print("Batch size is too big, please set smaller")
return
print('Finished fine-tuning')
self.img.setImage(iopart.imread(self.now_pyfile_path + '/assets/Loading3.png'), autoLevels=False, lut=None)
self.state_label.setText("Finished in %0.3fs, model saved at %s/fine-tune/%s" %(time.time()-tic, dataset_dir, self.model.net.save_name), color='#39B54A')
self.ftbnt.setEnabled(False)
self.fine_tune_dir = ''
def get_single_cell(self):
tic = time.time()
try:
data_path = self.single_cell_dir
except:
iopart._initialize_image_portable(self, iopart.imread(self.now_pyfile_path + '/assets/Loading4.png'), resize=self.resize, X2=0)
self.state_label.setText("Please choose right data path",
color='#FF6A56')
print(data_path)
try:
image_names = io.get_image_files(data_path, '_masks', imf='_img')
mask_names, _ = io.get_label_files(image_names, '_img_cp_masks', imf='_img')
iopart._initialize_image_portable(self, iopart.imread(self.now_pyfile_path + '/assets/Loading1.png'), resize=self.resize, X2=0)
self.state_label.setText("Running...", color='#969696')
QtWidgets.qApp.processEvents() # force update gui
except:
iopart._initialize_image_portable(self, iopart.imread(self.now_pyfile_path + '/assets/Loading4.png'), resize=self.resize, X2=0)
self.state_label.setText("Please choose right data path",
color='#FF6A56')
print("Please choose right data path")
self.single_cell_btn.setEnabled(False)
return
sta = 256
save_dir = os.path.join(os.path.dirname(data_path), 'single')
utils.make_folder(save_dir)
imgs = [io.imread(os.path.join(data_path, image_name)) for image_name in image_names]
masks = [io.imread(os.path.join(data_path, mask_name)) for mask_name in mask_names]
self.img.setImage(iopart.imread(self.now_pyfile_path + '/assets/Loading2.png'), autoLevels=False, lut=None)
self.state_label.setText("Running...", color='#969696')
QtWidgets.qApp.processEvents() # force update gui
for n in trange(len(masks)):
maskn = masks[n]
props = regionprops(maskn)
i_max = maskn.max() + 1
for i in range(1, i_max):
maskn_ = | np.zeros_like(maskn) | numpy.zeros_like |
import numpy as np
from collections.abc import Iterable
import os
import os.path as osp
from .globals import dir_path
import ctypes
ctypes.CDLL(osp.join(dir_path, "libamirstan_plugin.so"))
import tensorrt as trt
def create_gridanchordynamic_plugin(layer_name,
base_size,
stride,
scales=np.array([1.]),
ratios=np.array([1.]),
scale_major=True,
center_x=-1,
center_y=-1,
base_anchors=None):
creator = trt.get_plugin_registry().get_plugin_creator(
'GridAnchorDynamicPluginDynamic', '1', '')
pfc = trt.PluginFieldCollection()
pf_base_size = trt.PluginField("base_size",
np.array([base_size], dtype=np.int32),
trt.PluginFieldType.INT32)
pfc.append(pf_base_size)
pf_stride = trt.PluginField("stride", np.array([stride], dtype=np.int32),
trt.PluginFieldType.INT32)
pfc.append(pf_stride)
pf_scales = trt.PluginField("scales",
np.array(scales).astype(np.float32),
trt.PluginFieldType.FLOAT32)
pfc.append(pf_scales)
pf_ratios = trt.PluginField("ratios",
np.array(ratios).astype(np.float32),
trt.PluginFieldType.FLOAT32)
pfc.append(pf_ratios)
pf_scale_major = trt.PluginField(
"scale_major", np.array([int(scale_major)], dtype=np.int32),
trt.PluginFieldType.INT32)
pfc.append(pf_scale_major)
pf_center_x = trt.PluginField("center_x",
| np.array([center_x], dtype=np.int32) | numpy.array |
'''
Testing of the zoo
'''
import pytest
import numpy as np
np.random.seed(100)
from freelunch.zoo import animal, particle, krill
from freelunch.util import BadObjectiveFunctionScores, InvalidSolutionUpdate
animals = [particle, krill]
def test_animal():
location_1 = np.array([1,1,1])
fitness_1 = 2
location_2 = np.array([0,0,0])
fitness_2 = 0
location_3 = np.array([2,2,2])
fitness_3 = 10
friend = animal(dna=location_1, fitness=fitness_1)
assert(np.all(friend.dna == location_1))
assert(friend.fitness == 2)
assert(np.all(friend.best_pos == location_1))
assert(friend.best == 2)
friend.move(location_2, fitness_2)
assert(np.all(friend.dna == location_2))
assert(friend.fitness == 0)
assert(np.all(friend.best_pos == location_2))
assert(friend.best == 0)
friend.move(location_3, fitness_3)
assert(np.all(friend.dna == location_3))
assert(friend.fitness == 10)
assert(np.all(friend.best_pos == location_2))
assert(friend.best == 0)
with pytest.raises(ValueError):
friend.move(location_3, np.inf)
with pytest.raises(ValueError):
friend.move(location_3, np.nan)
with pytest.raises(ValueError):
friend.move(location_3, [])
with pytest.raises(InvalidSolutionUpdate):
friend.move(np.array([np.inf,1,1]), 1)
with pytest.raises(InvalidSolutionUpdate):
friend.move(np.array([np.nan,1,1]), 1)
with pytest.raises(InvalidSolutionUpdate):
friend.move(np.array([1+2j,1,1]), 1)
friend = animal(dna=location_1, fitness=fitness_1)
friend2 = animal(dna=location_2, fitness=fitness_2)
assert(friend2 < friend)
assert(friend > friend2)
friend2._fitness = None # Or will throw error
assert(friend < friend2)
assert(not (friend2 < friend))
assert(friend2 > friend)
assert(not (friend > friend2))
friend._fitness = None # Or will throw error
with pytest.raises(BadObjectiveFunctionScores):
friend < friend2
with pytest.raises(BadObjectiveFunctionScores):
friend > friend2
@pytest.mark.parametrize('creature', animals)
def test_particle(creature):
location_1 = np.array([1,1,1])
vel = np.random.randn(1,3)
fitness_1 = 2
location_2 = np.array([0,0,0])
fitness_2 = 0
location_3 = np.array([2,2,2])
fitness_3 = 10
friend = creature(pos=location_1, vel=vel, fitness=fitness_1)
assert(np.all(friend.dna == location_1))
assert(friend.fitness == 2)
assert(np.all(friend.best_pos == location_1))
assert(friend.best == 2)
friend.move(location_2, vel, fitness_2)
assert(np.all(friend.dna == location_2))
assert(friend.fitness == 0)
assert(np.all(friend.best_pos == location_2))
assert(friend.best == 0)
friend.move(location_3, vel, fitness_3)
assert(np.all(friend.dna == location_3))
assert(friend.fitness == 10)
assert(np.all(friend.best_pos == location_2))
assert(friend.best == 0)
with pytest.raises(ValueError):
friend.move(location_3, vel, np.inf)
with pytest.raises(ValueError):
friend.move(location_3, vel, np.nan)
with pytest.raises(ValueError):
friend.move(location_3,vel, [])
with pytest.raises(InvalidSolutionUpdate):
friend.move( | np.array([np.inf,1,1]) | numpy.array |
import numpy as np
import scipy.signal
from frontend.acoustic.basic import smooth
from frontend.acoustic.pitch import read_tools_f0, write_tools_f0, hz_to_midi_note, midi_note_to_hz
from frontend.control import htk_lab_file, notes_file
import matplotlib.pyplot as plt # just for plots
def main():
fn_base = './data/triangler'
master_tuning = 0.0 # cents
# filenames
fn_tools_f0 = fn_base + '.tools.f0'
fn_lab = fn_base + '.lab'
fn_notes = fn_base + '.notes' # XXX: these should be notes with times modified by note timing model!!! maybe add check that sees if vowel onset matches note onsets
fn_out_tools_f0 = fn_base + '.tools.f0.tuned'
# read f0
f0, hoptime = read_tools_f0(fn_tools_f0)
f0_orig = np.array(f0, copy=True) # XXX: just for plotting
# read lab
phn_items = htk_lab_file.read(fn_lab)
# read notes
notes = notes_file.read_old_fmt(fn_notes)
# check note onsets match vowel onsets
# i.e. we should use note times after they were modified by note timing model
raise NotImplementedError # todo
# compute smoothed derivate
assert np.all(f0 > 0), 'expected continuous input pitch'
f0 = hz_to_midi_note(f0)
# 50ms derivative filter length, rounded up to next odd integer
wl = int(np.round(50.0e-3/hoptime))
wl = wl + 1 - (wl % 2)
# derivative by sinusoidal FIR filter
#deriv_filter = -np.sin(np.linspace(0, 1, wl)*2*np.pi)/13.5 # correlate
#deriv_filter = np.sin(np.linspace(0, 1, wl)*2*np.pi)/13.5 # convolve
#assert wl == 11, '13.5 factor is tuned for wl=11 only; use Savitzky-Golay filter for any wl'
deriv_filter = scipy.signal.savgol_coeffs(wl, polyorder=3, deriv=1, use='conv')
#df0 = np.correlate(f0, deriv_filter, 'same')
df0 = np.convolve(f0, deriv_filter, 'same')
#wf0 = 1 / (1 + np.abs(df0)/50)
## XXX: is /50 too much? wf0 (after smoothing is like 0.94 ~ 1.0)
#sl = int(np.round(150e-3/hoptime))
#from frontend.acoustic.basic import smooth_moving
#wf0 = smooth_moving(wf0, sl)
# XXX: wf0 is not used!
if 0:
deriv_filter2 = scipy.signal.savgol_coeffs(wl, polyorder=3, deriv=1, use='conv')
#deriv_filter2 /= np.sqrt(np.mean(deriv_filter2**2)) # XXX: not norm, but RMS normalize
df0_2 = np.convolve(f0, deriv_filter2, 'same')
#df0_2 /= np.sqrt(np.mean(deriv_filter2**2))
df0_2 /= np.linalg.norm(deriv_filter2)/np.sqrt(wl)
ax = plt.subplot(2, 1, 1)
ax.plot(deriv_filter)
ax.plot(deriv_filter2)
ax = plt.subplot(2, 1, 2)
ax.plot(df0)
ax.plot(df0_2)
plt.show()
return
# frame-wise weighting based on phonetics
# i.e. when computing average f0 along a note,
# weight vowels and syllabic consonants more than consonants,
# silences are not considered alltogether
L = len(f0)
w_phn = np.ones(L)
for b_time, e_time, phn in phn_items:
b = int(np.round(b_time/hoptime))
e = int(np.round(e_time/hoptime))
b = np.clip(b, 0, L-1)
e = np.clip(e, 0, L)
if phn in ['sil', 'pau', 'br']:
w_phn[b:e] = 0.0
elif phn in ['a', 'e', 'i', 'o', 'u', 'N']:
w_phn[b:e] = 2.0
if 0:
plt.plot(w_phn)
plt.show()
# add sub-note segments, besides notes
# ...
# XXX: alternatively, do two passes; 1) notes, 2) sub-note segments
# compute transposition
def approx_segment_avg_f0(f0, df0, w_phn, f0_tar, b, e):
# window to reduce influence of edges of note (25% fade in/out)
n = e - b
w_e = scipy.signal.tukey(n, 0.5)
# weighting depending on derivative (stable f0 is weighted higher); range: 1/15 (big derivative) - 1 (zero derivative, flat pitch)
w_d = 1/np.clip(1 + 27*np.abs(df0[b:e]), None, 15)
if 0:
ax = plt.subplot(2, 1, 1)
ax.plot(13.5*np.abs(df0[b:e]))
ax = plt.subplot(2, 1, 2)
ax.plot(w_d)
plt.show()
import sys
sys.exit()
# weighting depending on phonetic regions (vowels and syllabic consonants are weighted higher than consonants, silences are excluded)
if np.sum(w_phn[b:e]) > 0: # avoid zero weighting
w_p = w_phn[b:e]
else:
w_p = 1
# weighting depending on difference from target (big deviations from target are weighted less); range: ~1/24 (big deviation, e.g. 2 octaves) - 1 (<= +/- 1 semitone deviation)
w_t = 1/np.clip(np.abs(f0[b:e] - f0_tar), 1, None)
# weighted average
w = w_e*w_d*w_p*w_t
avg_f0_segment = np.sum(f0[b:e]*w)/ | np.sum(w) | numpy.sum |
# pylint: disable=E1101,E1103,W0232
from datetime import datetime, timedelta
import numpy as np
import warnings
from pandas.core import common as com
from pandas.types.common import (is_integer,
is_float,
is_object_dtype,
is_integer_dtype,
is_float_dtype,
is_scalar,
is_datetime64_dtype,
is_datetime64tz_dtype,
is_timedelta64_dtype,
is_period_dtype,
is_bool_dtype,
pandas_dtype,
_ensure_int64,
_ensure_object)
from pandas.types.dtypes import PeriodDtype
from pandas.types.generic import ABCSeries
import pandas.tseries.frequencies as frequencies
from pandas.tseries.frequencies import get_freq_code as _gfc
from pandas.tseries.index import DatetimeIndex, Int64Index, Index
from pandas.tseries.tdi import TimedeltaIndex
from pandas.tseries.base import DatelikeOps, DatetimeIndexOpsMixin
from pandas.tseries.tools import parse_time_string
import pandas.tseries.offsets as offsets
import pandas._period as period
from pandas._period import (Period, IncompatibleFrequency,
get_period_field_arr, _validate_end_alias,
_quarter_to_myear)
from pandas.core.base import _shared_docs
from pandas.indexes.base import _index_shared_docs, _ensure_index
from pandas import compat
from pandas.util.decorators import (Appender, Substitution, cache_readonly,
deprecate_kwarg)
from pandas.lib import infer_dtype
import pandas.tslib as tslib
from pandas.compat import zip, u
import pandas.indexes.base as ibase
_index_doc_kwargs = dict(ibase._index_doc_kwargs)
_index_doc_kwargs.update(
dict(target_klass='PeriodIndex or list of Periods'))
def _field_accessor(name, alias, docstring=None):
def f(self):
base, mult = _gfc(self.freq)
return get_period_field_arr(alias, self._values, base)
f.__name__ = name
f.__doc__ = docstring
return property(f)
def dt64arr_to_periodarr(data, freq, tz):
if data.dtype != np.dtype('M8[ns]'):
raise ValueError('Wrong dtype: %s' % data.dtype)
freq = Period._maybe_convert_freq(freq)
base, mult = _gfc(freq)
return period.dt64arr_to_periodarr(data.view('i8'), base, tz)
# --- Period index sketch
_DIFFERENT_FREQ_INDEX = period._DIFFERENT_FREQ_INDEX
def _period_index_cmp(opname, nat_result=False):
"""
Wrap comparison operations to convert datetime-like to datetime64
"""
def wrapper(self, other):
if isinstance(other, Period):
func = getattr(self._values, opname)
other_base, _ = _gfc(other.freq)
if other.freq != self.freq:
msg = _DIFFERENT_FREQ_INDEX.format(self.freqstr, other.freqstr)
raise IncompatibleFrequency(msg)
result = func(other.ordinal)
elif isinstance(other, PeriodIndex):
if other.freq != self.freq:
msg = _DIFFERENT_FREQ_INDEX.format(self.freqstr, other.freqstr)
raise IncompatibleFrequency(msg)
result = getattr(self._values, opname)(other._values)
mask = self._isnan | other._isnan
if mask.any():
result[mask] = nat_result
return result
elif other is tslib.NaT:
result = np.empty(len(self._values), dtype=bool)
result.fill(nat_result)
else:
other = Period(other, freq=self.freq)
func = getattr(self._values, opname)
result = func(other.ordinal)
if self.hasnans:
result[self._isnan] = nat_result
return result
return wrapper
class PeriodIndex(DatelikeOps, DatetimeIndexOpsMixin, Int64Index):
"""
Immutable ndarray holding ordinal values indicating regular periods in
time such as particular years, quarters, months, etc. A value of 1 is the
period containing the Gregorian proleptic datetime Jan 1, 0001 00:00:00.
This ordinal representation is from the scikits.timeseries project.
For instance,
# construct period for day 1/1/1 and get the first second
i = Period(year=1,month=1,day=1,freq='D').asfreq('S', 'S')
i.ordinal
===> 1
Index keys are boxed to Period objects which carries the metadata (eg,
frequency information).
Parameters
----------
data : array-like (1-dimensional), optional
Optional period-like data to construct index with
copy : bool
Make a copy of input ndarray
freq : string or period object, optional
One of pandas period strings or corresponding objects
start : starting value, period-like, optional
If data is None, used as the start point in generating regular
period data.
periods : int, optional, > 0
Number of periods to generate, if generating index. Takes precedence
over end argument
end : end value, period-like, optional
If periods is none, generated index will extend to first conforming
period on or just past end argument
year : int, array, or Series, default None
month : int, array, or Series, default None
quarter : int, array, or Series, default None
day : int, array, or Series, default None
hour : int, array, or Series, default None
minute : int, array, or Series, default None
second : int, array, or Series, default None
tz : object, default None
Timezone for converting datetime64 data to Periods
dtype : str or PeriodDtype, default None
Examples
--------
>>> idx = PeriodIndex(year=year_arr, quarter=q_arr)
>>> idx2 = PeriodIndex(start='2000', end='2010', freq='A')
"""
_box_scalars = True
_typ = 'periodindex'
_attributes = ['name', 'freq']
_datetimelike_ops = ['year', 'month', 'day', 'hour', 'minute', 'second',
'weekofyear', 'week', 'dayofweek', 'weekday',
'dayofyear', 'quarter', 'qyear', 'freq',
'days_in_month', 'daysinmonth',
'to_timestamp', 'asfreq', 'start_time', 'end_time',
'is_leap_year']
_is_numeric_dtype = False
_infer_as_myclass = True
freq = None
__eq__ = _period_index_cmp('__eq__')
__ne__ = _period_index_cmp('__ne__', nat_result=True)
__lt__ = _period_index_cmp('__lt__')
__gt__ = _period_index_cmp('__gt__')
__le__ = _period_index_cmp('__le__')
__ge__ = _period_index_cmp('__ge__')
def __new__(cls, data=None, ordinal=None, freq=None, start=None, end=None,
periods=None, copy=False, name=None, tz=None, dtype=None,
**kwargs):
if periods is not None:
if is_float(periods):
periods = int(periods)
elif not is_integer(periods):
raise ValueError('Periods must be a number, got %s' %
str(periods))
if name is None and hasattr(data, 'name'):
name = data.name
if dtype is not None:
dtype = pandas_dtype(dtype)
if not is_period_dtype(dtype):
raise ValueError('dtype must be PeriodDtype')
if freq is None:
freq = dtype.freq
elif freq != dtype.freq:
msg = 'specified freq and dtype are different'
raise IncompatibleFrequency(msg)
if data is None:
if ordinal is not None:
data = np.asarray(ordinal, dtype=np.int64)
else:
data, freq = cls._generate_range(start, end, periods,
freq, kwargs)
else:
ordinal, freq = cls._from_arraylike(data, freq, tz)
data = | np.array(ordinal, dtype=np.int64, copy=copy) | numpy.array |
import numpy as np
import pickle
import matplotlib.pyplot as plt
from scipy.interpolate import griddata
def get_barycentric_coords(tensor):
"""
:param tensor: 3x3 anisotropic Reynolds stress tensor
:return: x and y barycentric coordinates
"""
# Compute barycentric coordinates from the eigenvalues of a 3x3 matrix
eigenvalues_RST = np.linalg.eigvals(tensor)
eigenvalues_RST = np.flip(np.sort(eigenvalues_RST))
# Barycentric coordinates
c1 = eigenvalues_RST[0] - eigenvalues_RST[1]
c2 = 2. * (eigenvalues_RST[1] - eigenvalues_RST[2])
c3 = 3. * eigenvalues_RST[2] + 1.
check_bary = c1 + c2 + c3 # should sum up to 1
# Define the corners
xi1 = 1.2
eta1 = -np.sqrt(3.) / 2.
xi2 = -0.8
eta2 = -np.sqrt(3.) / 2.
xi3 = 0.2
eta3 = np.sqrt(3.) / 2.
x_bary = c1 * xi1 + c2 * xi2 + c3 * xi3
y_bary = c1 * eta1 + c2 * eta2 + c3 * eta3
return x_bary, y_bary, eigenvalues_RST
def get_barycentric_coords2(tensor):
"""
:param tensor: 3x3 anisotropic Reynolds stress tensor
:return: x and y barycentric coordinates, new realizable tensor
"""
# Compute barycentric coordinates from the eigenvalues of a 3x3 matrix
eigenvalues_RST_unsorted = np.linalg.eigvals(tensor)
# Sort eigenvalues based on magnitude
eig_sorted = np.flip(np.argsort(eigenvalues_RST_unsorted))
eigenvalues_RST = eigenvalues_RST_unsorted[eig_sorted]
# Barycentric coordinates
c1 = eigenvalues_RST[0] - eigenvalues_RST[1]
c2 = 2. * (eigenvalues_RST[1] - eigenvalues_RST[2])
c3 = 3. * eigenvalues_RST[2] + 1.
check_bary = c1 + c2 + c3 # should sum up to 1
# Define the corners
xi1 = 1.2
eta1 = -np.sqrt(3.) / 2.
xi2 = -0.8
eta2 = -np.sqrt(3.) / 2.
xi3 = 0.2
eta3 = np.sqrt(3.) / 2.
x_bary = c1 * xi1 + c2 * xi2 + c3 * xi3
y_bary = c1 * eta1 + c2 * eta2 + c3 * eta3
if y_bary < eta1:
# print('\t apply realizability filter')
x_new = x_bary + (xi3 - x_bary) / (eta3 - y_bary) * (eta1 - y_bary)
y_new = eta1
# Solve linear system a beta = y
a = np.array([[xi1, -xi1 + 2*xi2, -2*xi2+3*xi3],
[eta1, -eta1 + 2*eta2, -2*eta2+3*eta3],
[1., 1., 1.]])
y = np.array([[x_new - xi3], [y_new - eta3], [0.]])
bary_realizable = np.reshape(np.linalg.solve(a, y), [3])
c1 = bary_realizable[0] - bary_realizable[1]
c2 = 2. * (bary_realizable[1] - bary_realizable[2])
c3 = 3. * bary_realizable[2] + 1.
check_bary = c1 + c2 + c3 # should sum up to 1
# Define the corners
xi1 = 1.2
eta1 = -np.sqrt(3.) / 2.
xi2 = -0.8
eta2 = -np.sqrt(3.) / 2.
xi3 = 0.2
eta3 = np.sqrt(3.) / 2.
x_bary = c1 * xi1 + c2 * xi2 + c3 * xi3
y_bary = c1 * eta1 + c2 * eta2 + c3 * eta3
else:
# Enforce general realizability -> sum eigenvalues = 0
# Solve linear system a beta = y
a = np.array([[xi1, -xi1 + 2 * xi2, -2 * xi2 + 3 * xi3],
[eta1, -eta1 + 2 * eta2, -2 * eta2 + 3 * eta3],
[1., 1., 1.]])
y = np.array([[x_bary - xi3], [y_bary - eta3], [0.]])
bary_realizable = np.reshape(np.linalg.solve(a, y), [3])
# Compute new tensor with the realizable barycentric coordinates and eigenvalues
eig_sorted_reverse = []
for i in eigenvalues_RST_unsorted:
eig_sorted_reverse.append(np.where(eigenvalues_RST == i)[0][0])
tensor_eigvect = np.linalg.eig(tensor)[1]
vectors = np.vstack([tensor_eigvect[0], tensor_eigvect[1], tensor_eigvect[2]])
bary_realizable_sorted = bary_realizable[eig_sorted_reverse]
tensor_new = np.dot(vectors, np.dot(np.diag(bary_realizable_sorted), np.linalg.inv(vectors)))
return x_bary, y_bary, tensor_new
def get_barycentric_color(x_bary, y_bary):
# Define the corners
xi1 = 1.2
eta1 = -np.sqrt(3.) / 2.
xi2 = -0.8
eta2 = -np.sqrt(3.) / 2.
xi3 = 0.2
eta3 = np.sqrt(3.) / 2.
# Set color range and colormap
steps = 900
phi_range = np.linspace(0, -2. * np.pi, steps)
norm = plt.Normalize()
colors = plt.cm.hsv(norm(phi_range))
# Determine centroid of barycentric map in [x, y]
centroid = [xi3, eta3 - (xi1 - xi2) * np.sqrt(3.) / 3.]
# Determine polar coordinates of the input bary x and y
radius = np.sqrt((x_bary - centroid[0])**2 + (y_bary - centroid[1])**2)
delta_phi_1C = np.arctan2(eta1 - centroid[1], xi1 - centroid[0])
phi = np.arctan2(y_bary - centroid[1], x_bary - centroid[0])
# Correct for angles in top half of bary map
if phi >= 0.:
phi = - (2. * np.pi - phi)
# set phi zero equal to the anlge of the 1C corner
phi = phi - delta_phi_1C
# Correct for angles between phi= 0 and the 1C corner
if phi >= 0.:
phi = - (2. * np.pi - phi)
color_index = steps - np.searchsorted(np.flip(phi_range), phi, side="left") - 1
# Determine reference radius
if -120./180. * np.pi < phi <= 0.:
lhs = np.array([[(y_bary - centroid[1]) / (x_bary - centroid[0]), -1.],
[(eta1 - eta2) / (xi1 - xi2), -1.]])
rhs = np.array([[-centroid[1] + centroid[0] * (y_bary - centroid[1]) / (x_bary - centroid[0])],
[-eta2 + xi2 * (eta1 - eta2) / (xi1 - xi2)]])
coords_side = np.linalg.solve(lhs, rhs)
max_radius = np.sqrt((coords_side[0] - centroid[0])**2 + (coords_side[1] - centroid[1])**2)
elif -240./180. * np.pi < phi <= -120./180. * np.pi:
lhs = np.array([[(y_bary - centroid[1]) / (x_bary - centroid[0]), -1.],
[(eta3 - eta2) / (xi3 - xi2), -1.]])
rhs = np.array([[-centroid[1] + centroid[0] * (y_bary - centroid[1]) / (x_bary - centroid[0])],
[-eta2 + xi2 * (eta3 - eta2) / (xi3 - xi2)]])
coords_side = np.linalg.solve(lhs, rhs)
max_radius = np.sqrt((coords_side[0] - centroid[0]) ** 2 + (coords_side[1] - centroid[1]) ** 2)
else:
lhs = np.array([[(y_bary - centroid[1]) / (x_bary - centroid[0]), -1.],
[(eta1 - eta3) / (xi1 - xi3), -1.]])
rhs = np.array([[-centroid[1] + centroid[0] * (y_bary - centroid[1]) / (x_bary - centroid[0])],
[-eta3 + xi3 * (eta1 - eta3) / (xi1 - xi3)]])
coords_side = np.linalg.solve(lhs, rhs)
max_radius = np.sqrt((coords_side[0] - centroid[0]) ** 2 + (coords_side[1] - centroid[1]) ** 2)
# Select color
bary_colors = colors[color_index, :]
if radius / max_radius < 1.0:
bary_colors[3] *= (radius / max_radius)**(1./3)
else:
max_radius = (eta3 - centroid[1])
problem = radius / (eta3 - centroid[1])
# print('Radius outside barycentric map')
# Return colors as [R, B, G, alpha], because that is what matplotlib needs
return bary_colors[[0, 2, 1, 3]]
def area(x1, y1, x2, y2, x3, y3):
return abs((x1 * (y2 - y3) + x2 * (y3 - y1) + x3 * (y1 - y2)) / 2.0)
def isInside(x1, y1, x2, y2, x3, y3, x, y):
# Calculate area of triangle ABC
A = area(x1, y1, x2, y2, x3, y3)
# Calculate area of triangle PBC
A1 = area(x, y, x2, y2, x3, y3)
# Calculate area of triangle PAC
A2 = area(x1, y1, x, y, x3, y3)
# Calculate area of triangle PAB
A3 = area(x1, y1, x2, y2, x, y)
# Check if sum of A1, A2 and A3
# is same as A
if (A == A1 + A2 + A3):
return True
else:
return False
# Plot a sample of the barycentric map
plot_map = True
if plot_map:
# Sample barycentric map
# Define the corners
xi1 = 1.2
eta1 = -np.sqrt(3.) / 2.
xi2 = -0.8
eta2 = -np.sqrt(3.) / 2.
xi3 = 0.2
eta3 = np.sqrt(3.) / 2.
x_sample = np.random.uniform(xi2, xi1, 10000)
y_sample = np.random.uniform(eta1, eta3, 10000)
xy_bary = []
for i in range(1000):
check_inside = isInside(xi2, eta2, xi3, eta3, xi1, eta1, x_sample[i], y_sample[i])
if check_inside:
xy_bary.append([x_sample[i], y_sample[i]])
# add the corners
xy_bary.append([xi1, eta1])
xy_bary.append([xi2, eta2])
xy_bary.append([xi3, eta3])
res = []
for i in range(len(xy_bary)):
color_i = get_barycentric_color(xy_bary[i][0], xy_bary[i][1])
res.append(color_i)
res = np.array(res)
plt.figure(dpi=150)
points = np.fliplr( | np.array(xy_bary) | numpy.array |
#! /usr/bin/env python
import warnings
warnings.filterwarnings('ignore')
import numpy as np
import scipy.interpolate as spi
from astropy.io import fits as pf
import matplotlib.pyplot as plt
import multiprocessing as mp
from ..lib import manageevent as me
from ..lib import sort_nicely as sn
from ..lib import centroid, suntimecorr, utc_tt
import time, os, sys, shutil
#import hst_scan as hst
from importlib import reload
#reload(hst)
def reduceWFC3(eventname, eventdir, madVariable=False, madVarSet=False, isplots=False):
'''
Reduces data images and calculated optimal spectra.
Parameters
----------
isplots : Set True to produce plots
Returns
-------
None
Remarks
-------
Requires eventname_params file to intialize event object
Steps
-----
1. Read in all data frames and header info
2. Record JD, scan direction, etc
3. Group images by frame, batch, and orbit number
4. Calculate centroid of direct image(s)
5. Calculate trace and 1D+2D wavelength solutions
6. Make flats, apply flat field correction
7. Manually mask regions
8. Apply light-time correction
9. Compute difference frames
10. Compute scan length
11. Perform outlier rejection of BG region
12. Background subtraction
13. Compute 2D drift, apply rough (integer-pixel) correction
14. Full-frame outlier rejection for time-series stack of NDRs
15. Apply sub-pixel 2D drift correction
16. Extract spectrum through summation
17. Compute median frame
18. Optimal spectral extraction
19. Save results, plot figures
History
-------
Written by <NAME> January 2017
'''
evpname = eventname + '_params'
#exec 'import ' + evpname + ' as evp' in locals()
#exec('import ' + evpname + ' as evp', locals())
exec('import ' + evpname + ' as evp', globals())
reload(evp)
t0 = time.time()
# Initialize event object
# All parameters are specified in this file
ev = evp.event_init()
try:
aux = evp.aux_init()
except:
print("Need to update event file to include auxiliary object.")
return
ev.eventdir = eventdir
# Create directories
if not os.path.exists(ev.eventdir):
os.makedirs(ev.eventdir)
if not os.path.exists(ev.eventdir+"/figs"):
os.makedirs(ev.eventdir+"/figs")
# Copy ev_params file
shutil.copyfile(evpname + '.py', ev.eventdir+'/'+evpname+'.py')
# Reset attribute for MAD variable (added by <NAME>)
if madVariable:
setattr(ev,madVariable,madVarSet)
ev.madVarStr = madVariable
ev.madVariable = madVarSet
# Object
ev.obj_list = [] #Do not rename ev.obj_list!
if ev.objfile == None:
#Retrieve files within specified range
for i in range(ev.objstart,ev.objend):
ev.obj_list.append(ev.loc_sci + ev.filebase + str(i).zfill(4) + ".fits")
elif ev.objfile == 'all':
#Retrieve all files from science directory
for fname in os.listdir(ev.loc_sci):
ev.obj_list.append(ev.loc_sci +'/'+ fname)
ev.obj_list = sn.sort_nicely(ev.obj_list)
else:
#Retrieve filenames from list
files = np.genfromtxt(ev.objfile, dtype=str, comments='#')
for fname in files:
ev.obj_list.append(ev.loc_sci +'/'+ fname)
# handle = open(ev.objfile)
# for line in handle:
# print(line)
# ev.obj_list.append(ev.loc_sci + line)
# handle.close()
ev.n_files = len(ev.obj_list)
#Determine image size and filter/grism
hdulist = pf.open(ev.obj_list[0].rstrip())
nx = hdulist['SCI',1].header['NAXIS1']
ny = hdulist['SCI',1].header['NAXIS2']
ev.grism = hdulist[0].header['FILTER']
ev.detector = hdulist[0].header['DETECTOR']
ev.flatoffset = [[-1*hdulist['SCI',1].header['LTV2'], -1*hdulist['SCI',1].header['LTV1']]]
n_reads = hdulist['SCI',1].header['SAMPNUM']
hdulist.close()
# Record JD and exposure times
print('Reading data & headers, recording JD and exposure times...')
ywindow = ev.ywindow[0]
xwindow = ev.xwindow[0]
subny = ywindow[1] - ywindow[0]
subnx = xwindow[1] - xwindow[0]
subdata = np.zeros((ev.n_files,n_reads,subny,subnx))
suberr = np.zeros((ev.n_files,n_reads,subny,subnx))
data_mhdr = []
data_hdr = []
ev.jd = np.zeros(ev.n_files)
ev.exptime = np.zeros(ev.n_files)
for m in range(ev.n_files):
data, err, hdr, mhdr = hst.read(ev.obj_list[m].rstrip())
subdata[m] = data[0,:,ywindow[0]:ywindow[1],xwindow[0]:xwindow[1]]
suberr [m] = err [0,:,ywindow[0]:ywindow[1],xwindow[0]:xwindow[1]]
data_mhdr.append(mhdr[0])
data_hdr.append(hdr[0])
ev.jd[m] = 2400000.5 + 0.5*(data_mhdr[m]['EXPSTART'] + data_mhdr[m]['EXPEND'])
ev.exptime[m] = data_mhdr[m]['EXPTIME']
# Assign scan direction
ev.scandir = np.zeros(ev.n_files, dtype=int)
ev.n_scan0 = 0
ev.n_scan1 = 0
try:
scan0 = data_mhdr[0]['POSTARG2']
scan1 = data_mhdr[1]['POSTARG2']
for m in range(ev.n_files):
if data_mhdr[m]['POSTARG2'] == scan0:
ev.n_scan0 += 1
elif data_mhdr[m]['POSTARG2'] == scan1:
ev.scandir[m] = 1
ev.n_scan1 += 1
else:
print('WARNING: Unknown scan direction for file ' + str(m) + '.')
print("# of files in scan direction 0: " + str(ev.n_scan0))
print("# of files in scan direction 1: " + str(ev.n_scan1))
except:
ev.n_scan0 = ev.n_files
print("Unable to determine scan direction, assuming unidirectional.")
# Group frames into frame, batch, and orbit number
ev.framenum, ev.batchnum, ev.orbitnum = hst.groupFrames(ev.jd)
# Determine read noise and gain
ev.readNoise = np.mean((data_mhdr[0]['READNSEA'],
data_mhdr[0]['READNSEB'],
data_mhdr[0]['READNSEC'],
data_mhdr[0]['READNSED']))
print('Read noise: ' + str(ev.readNoise))
print('Gain: ' + str(ev.gain))
#ev.v0 = (ev.readNoise/ev.gain)**2 #Units of ADU
ev.v0 = ev.readNoise**2 #Units of electrons
# Calculate centroid of direct image(s)
ev.img_list = []
if isinstance(ev.directfile, str) and ev.directfile.endswith('.fits'):
ev.img_list.append(ev.loc_cal + ev.directfile)
else:
#Retrieve filenames from list
handle = open(ev.directfile)
for line in handle:
ev.img_list.append(ev.loc_cal + line)
handle.close()
ev.n_img = len(ev.img_list)
ev.centroid, ev.directim = hst.imageCentroid(ev.img_list, ev.centroidguess, ev.centroidtrim, ny, ev.obj_list[0])
"""
# Calculate theoretical centroids along spatial scan direction
ev.centroids = []
for j in range(ev.n_img):
ev.centroids.append([])
for i in range(ev.n_spec):
# Can assume that scan direction is only in y direction (no x component)
# because we will apply drift correction to make it so
ev.centroids[j].append([np.zeros(subny)+ev.centroid[j][0],ev.centroid[j][1]])
# Calculate trace
print("Calculating 2D trace and wavelength assuming " + ev.grism + " filter/grism...")
ev.xrange = []
for i in range(ev.n_spec):
ev.xrange.append(np.arange(ev.xwindow[i][0],ev.xwindow[i][1]))
ev.trace2d = []
ev.wave2d = []
for j in range(ev.n_img):
ev.trace2d.append([])
ev.wave2d.append([])
for i in range(ev.n_spec):
ev.trace2d[j].append(hst.calcTrace(ev.xrange[i], ev.centroids[j][i], ev.grism))
ev.wave2d[j].append(hst.calibrateLambda(ev.xrange[i], ev.centroids[j][i], ev.grism)/1e4) #wavelength in microns
if ev.detector == 'IR':
print("Calculating slit shift values using last frame...")
i = 0 #Use first spectrum
j = -1 #Use last image
#spectrum = subdata[j]
spectrum = pf.getdata(ev.obj_list[j])
ev.slitshift, ev.shift_values, ev.yfit = hst.calc_slitshift2(spectrum, ev.xrange[i], ev.ywindow[i], ev.xwindow[i])
ev.wavegrid = ev.wave2d
ev.wave = []
for j in range(ev.n_img):
ev.wave.append([])
for i in range(ev.n_spec):
ev.wave[j].append(np.mean(ev.wavegrid[j][i],axis=0))
else:
# Assume no slitshift for UVIS
ev.yfit = range(ev.ywindow[0][1] - ev.ywindow[0][0])
ev.slitshift = np.zeros(ev.ywindow[0][1] - ev.ywindow[0][0])
ev.shift_values = np.zeros(len(ev.yfit))
# Make list of master flat field frames
subflat = np.ones((ev.n_img,ev.n_spec,subny,subnx))
flatmask = np.ones((ev.n_img,ev.n_spec,subny,subnx))
if ev.flatfile == None:
print('No flat frames found.')
flat_hdr = None
flat_mhdr = None
else:
print('Loading flat frames...')
for j in range(ev.n_img):
tempflat, tempmask = hst.makeflats(ev.flatfile, ev.wavegrid[j], ev.xwindow, ev.ywindow, ev.flatoffset, ev.n_spec, ny, nx, sigma=ev.flatsigma)
for i in range(ev.n_spec):
subflat[j][i] = tempflat[i][ywindow[0]:ywindow[1],xwindow[0]:xwindow[1]]
flatmask[j][i] = tempmask[i][ywindow[0]:ywindow[1],xwindow[0]:xwindow[1]]
# Manually mask regions [specnum, colstart, colend]
if hasattr(ev, 'manmask'):
print("\rMasking manually identified bad pixels.")
for j in range(ev.n_img):
for i in range(len(ev.manmask)):
ind, colstart, colend, rowstart, rowend = ev.manmask[i]
n = ind % ev.n_spec
flatmask[j][n][rowstart:rowend,colstart:colend] = 0 #ev.window[:,ind][0]:ev.window[:,ind][1]
# Calculate reduced image
for m in range(ev.n_files):
#Select appropriate flat, mask, and slitshift
if ev.n_img == (np.max(ev.orbitnum)+1):
j = int(ev.orbitnum[m])
else:
j = 0
for n in range(n_reads):
subdata[m][n] /= subflat[j][0]
"""
# Read in drift2D from previous iteration
# np.save("drift2D.npy",ev.drift2D)
#try:
# drift2D = np.load("drift2D.npy")
#except:
# print("drift2D.npy not found.")
drift2D = np.zeros((ev.n_files,n_reads-1,2))
# Calculate centroids for each grism frame
ev.centroids = np.zeros((ev.n_files,n_reads-1,2))
for m in range(ev.n_files):
for n in range(n_reads-1):
ev.centroids[m,n] = np.array([ev.centroid[0][0]+drift2D[m,n,0],
ev.centroid[0][1]+drift2D[m,n,1]])
#ev.centroids[m,n] = np.array([np.zeros(subny)+ev.centroid[0][0]+drift2D[m,n,0],
# np.zeros(subnx)+ev.centroid[0][1]+drift2D[m,n,1]])
# Calculate trace
print("Calculating 2D trace and wavelength assuming " + ev.grism + " filter/grism...")
ev.xrange = np.arange(ev.xwindow[0][0],ev.xwindow[0][1])
trace2d = np.zeros((ev.n_files,n_reads-1,subny,subnx))
wave2d = np.zeros((ev.n_files,n_reads-1,subny,subnx))
for m in range(ev.n_files):
for n in range(n_reads-1):
trace2d[m,n] = hst.calcTrace(ev.xrange, ev.centroids[m,n], ev.grism)
wave2d[m,n] = hst.calibrateLambda(ev.xrange, ev.centroids[m,n], ev.grism)/1e4 #wavelength in microns
# Assume no slitshift
ev.yfit = range(ev.ywindow[0][1] - ev.ywindow[0][0])
ev.slitshift = np.zeros(ev.ywindow[0][1] - ev.ywindow[0][0])
ev.shift_values = np.zeros(len(ev.yfit))
ev.wave = np.mean(wave2d, axis=2)
print("Wavelength Range: %.3f - %.3f" % (np.min(ev.wave), np.max(ev.wave)))
#iwmax = np.where(ev.wave[0][0]>1.65)[0][0]
#print(ev.wave[0,0])
#print(ev.wave[0,1])
#print(ev.centroids)
# Make list of master flat field frames
subflat = np.ones((ev.n_files,subny,subnx))
flatmask = np.ones((ev.n_files,subny,subnx))
if ev.flatfile == None:
print('No flat frames found.')
flat_hdr = None
flat_mhdr = None
else:
print('Loading flat frames...')
print(ev.flatfile)
for m in range(ev.n_files):
tempflat, tempmask = hst.makeflats(ev.flatfile, [np.mean(wave2d[m],axis=0)], ev.xwindow, ev.ywindow, ev.flatoffset, ev.n_spec, ny, nx, sigma=ev.flatsigma)
#tempflat = [pf.getdata(ev.flatfile)]
#tempmask = [np.ones(tempflat[0].shape)]
subflat[m] = tempflat[0][ywindow[0]:ywindow[1],xwindow[0]:xwindow[1]]
flatmask[m] = tempmask[0][ywindow[0]:ywindow[1],xwindow[0]:xwindow[1]]
# Manually mask regions [specnum, colstart, colend]
if hasattr(ev, 'manmask'):
print("\rMasking manually identified bad pixels.")
for m in range(ev.n_files):
for i in range(len(ev.manmask)):
ind, colstart, colend, rowstart, rowend = ev.manmask[i]
flatmask[m][rowstart:rowend,colstart:colend] = 0
#FINDME: Change flat field
#subflat[:,:,28] /= 1.015
#subflat[:,:,50] /= 1.015
#subflat[:,:,70] *= 1.01
"""
plt.figure(2)
plt.clf()
plt.imshow(np.copy(subdata[10,-1]),origin='lower',aspect='auto',
vmin=0,vmax=25000,cmap=plt.cm.RdYlBu_r)
plt.ylim(65,95)
plt.show()
"""
# Calculate reduced image
subdata /= subflat[:,np.newaxis]
#subdata /= np.mean(subflat,axis=0)[np.newaxis,np.newaxis]
"""
# FINDME
# Perform self flat field calibration
# drift2D_int = np.round(edrift2D,0)
# Identify frames outside SAA
iNoSAA = np.where(np.round(drift2D[:,0,0],0)==0)[0]
# Select subregion with lots of photons
normdata = np.copy(subdata[iNoSAA,-1,69:91,15:147])
normmask = flatmask[iNoSAA,69:91,15:147]
normdata[np.where(normmask==0)] = 0
# Normalize flux in each row to remove ramp/transit/variable scan rate
normdata /= np.sum(normdata,axis=2)[:,:,np.newaxis]
# Divide by mean spectrum to remove wavelength dependence
normdata /= np.mean(normdata,axis=(0,1))[np.newaxis,np.newaxis,:]
# Average frames to get flat-field correction
flat_norm = np.mean(normdata,axis=0)
flat_norm[np.where(np.mean(normmask,axis=0)<1)] = 1
'''
normdata /= np.mean(normdata,axis=(1,2))[:,np.newaxis,np.newaxis]
flat_window = np.median(normdata,axis=0)
medflat = np.median(flat_window, axis=0)
flat_window /= medflat
flat_window /= np.median(flat_window,axis=1)[:,np.newaxis]
flat_norm = flat_window/np.mean(flat_window)
'''
plt.figure(3)
plt.clf()
plt.imshow(np.copy(subdata[10,-1]),origin='lower',aspect='auto',
vmin=0,vmax=25000,cmap=plt.cm.RdYlBu_r)
plt.ylim(65,95)
ff = np.load('ff.npy')
subff = ff[ywindow[0]:ywindow[1],xwindow[0]:xwindow[1]]
#subdata[:,:,69:91,15:147] /= flat_norm
subdata /= subff
plt.figure(4)
plt.clf()
plt.imshow(subdata[10,-1],origin='lower',aspect='auto',vmin=0,vmax=25000,cmap=plt.cm.RdYlBu_r)
plt.ylim(65,95)
plt.figure(1)
plt.clf()
plt.imshow(flat_norm,origin='lower',aspect='auto')
plt.colorbar()
plt.tight_layout()
plt.pause(0.1)
ev.flat_norm = flat_norm
return ev
"""
"""
if isplots:
# Plot normalized flat fields
plt.figure(1000, figsize=(12,8))
plt.clf()
plt.suptitle('Master Flat Frames')
for i in range(ev.n_spec):
for j in range(ev.n_img):
#plt.subplot(ev.n_spec,ev.n_img,i*ev.n_img+j+1)
plt.subplot(2,np.ceil(ev.n_img/2.),i*ev.n_img+j+1)
plt.title(str(j) +','+ str(i))
plt.imshow(subflat[j][i], origin='lower')
plt.tight_layout()
plt.savefig(ev.eventdir + '/figs/fig1000-Flats.png')
# Plot masks
plt.figure(1001, figsize=(12,8))
plt.clf()
plt.suptitle('Mask Frames')
for i in range(ev.n_spec):
for j in range(ev.n_img):
#plt.subplot(ev.n_spec,ev.n_img,i*ev.n_img+j+1)
plt.subplot(2,np.ceil(ev.n_img/2.),i*ev.n_img+j+1)
plt.title(str(j) +','+ str(i))
plt.imshow(flatmask[j][i], origin='lower')
plt.tight_layout()
plt.savefig(ev.eventdir + '/figs/fig1001-Masks.png')
if ev.detector == 'IR':
# Plot Slit shift
plt.figure(1004, figsize=(12,8))
plt.clf()
plt.suptitle('Model Slit Tilts/Shifts')
plt.plot(ev.shift_values, ev.yfit, '.')
plt.plot(ev.slitshift, range(ev.ywindow[0][0],ev.ywindow[0][1]), 'r-', lw=2)
plt.xlim(-1,1)
plt.savefig(ev.eventdir + '/figs/fig1004-SlitTilt.png')
plt.pause(0.1)
"""
ev.ra = data_mhdr[0]['RA_TARG']*np.pi/180
ev.dec = data_mhdr[0]['DEC_TARG']*np.pi/180
if ev.horizonsfile != None:
# Apply light-time correction, convert to BJD_TDB
# Horizons file created for HST around time of observations
print("Converting times to BJD_TDB...")
ev.bjd_corr = suntimecorr.suntimecorr(ev.ra, ev.dec, ev.jd, ev.horizonsfile)
bjdutc = ev.jd + ev.bjd_corr/86400.
ev.bjdtdb = utc_tt.utc_tt(bjdutc,ev.leapdir)
print('BJD_corr range: ' + str(ev.bjd_corr[0]) + ', ' + str(ev.bjd_corr[-1]))
else:
print("No Horizons file found.")
ev.bjdtdb = ev.jd
if n_reads > 1:
ev.n_reads = n_reads
# Subtract pairs of subframes
diffdata = np.zeros((ev.n_files,ev.n_reads-1,subny,subnx))
differr = np.zeros((ev.n_files,ev.n_reads-1,subny,subnx))
for m in range(ev.n_files):
for n in range(n_reads-1):
#diffmask[m,n] = np.copy(flatmask[j][0])
#diffmask[m,n][np.where(suberr[m,n ] > diffthresh*np.std(suberr[m,n ]))] = 0
#diffmask[m,n][np.where(suberr[m,n+1] > diffthresh*np.std(suberr[m,n+1]))] = 0
diffdata[m,n] = subdata[m,n+1]-subdata[m,n]
differr [m,n] = | np.sqrt(suberr[m,n+1]**2+suberr[m,n]**2) | numpy.sqrt |
#!/usr/bin/env python3
import gym
import numpy as np
from itertools import product
import matplotlib.pyplot as plt
def print_policy(Q, env):
""" This is a helper function to print a nice policy from the Q function"""
moves = [u'←', u'↓', u'→', u'↑']
if not hasattr(env, 'desc'):
env = env.env
dims = env.desc.shape
policy = np.chararray(dims, unicode=True)
policy[:] = ' '
for s in range(len(Q)):
idx = np.unravel_index(s, dims)
policy[idx] = moves[np.argmax(Q[s])]
if env.desc[idx] in ['H', 'G']:
policy[idx] = u'·'
print('\n'.join([''.join([u'{:2}'.format(item) for item in row])
for row in policy]))
def plot_V(Q, env):
""" This is a helper function to plot the state values from the Q function"""
fig = plt.figure()
if not hasattr(env, 'desc'):
env = env.env
dims = env.desc.shape
V = np.zeros(dims)
for s in range(len(Q)):
idx = np.unravel_index(s, dims)
V[idx] = np.max(Q[s])
if env.desc[idx] in ['H', 'G']:
V[idx] = 0.
plt.imshow(V, origin='upper',
extent=[0, dims[0], 0, dims[1]], vmin=.0, vmax=.6,
cmap=plt.cm.RdYlGn, interpolation='none')
for x, y in product(range(dims[0]), range(dims[1])):
plt.text(y + 0.5, dims[0] - x - 0.5, '{:.3f}'.format(V[x, y]),
horizontalalignment='center',
verticalalignment='center')
plt.xticks([])
plt.yticks([])
def plot_Q(Q, env):
""" This is a helper function to plot the Q function """
from matplotlib import colors, patches
fig = plt.figure()
ax = fig.gca()
if not hasattr(env, 'desc'):
env = env.env
dims = env.desc.shape
up = np.array([[0, 1], [0.5, 0.5], [1, 1]])
down = np.array([[0, 0], [0.5, 0.5], [1, 0]])
left = np.array([[0, 0], [0.5, 0.5], [0, 1]])
right = np.array([[1, 0], [0.5, 0.5], [1, 1]])
tri = [left, down, right, up]
pos = [[0.2, 0.5], [0.5, 0.2], [0.8, 0.5], [0.5, 0.8]]
cmap = plt.cm.RdYlGn
norm = colors.Normalize(vmin=.0, vmax=.6)
ax.imshow(np.zeros(dims), origin='upper', extent=[0, dims[0], 0, dims[1]], vmin=.0, vmax=.6, cmap=cmap)
ax.grid(which='major', color='black', linestyle='-', linewidth=2)
for s in range(len(Q)):
idx = np.unravel_index(s, dims)
x, y = idx
if env.desc[idx] in ['H', 'G']:
ax.add_patch(patches.Rectangle((y, 3 - x), 1, 1, color=cmap(.0)))
plt.text(y + 0.5, dims[0] - x - 0.5, '{:.2f}'.format(.0),
horizontalalignment='center',
verticalalignment='center')
continue
for a in range(len(tri)):
ax.add_patch(patches.Polygon(tri[a] + np.array([y, 3 - x]), color=cmap(Q[s][a])))
plt.text(y + pos[a][0], dims[0] - 1 - x + pos[a][1], '{:.2f}'.format(Q[s][a]),
horizontalalignment='center', verticalalignment='center',
fontsize=9, fontweight=('bold' if Q[s][a] == np.max(Q[s]) else 'normal'))
plt.xticks([])
plt.yticks([])
def sarsa(env, name, alpha=0.1, gamma=0.9, epsilon=0.1, num_ep=int(1e4)):
Q = np.zeros((env.observation_space.n, env.action_space.n))
# TODO: implement the sarsa algorithm
Q.fill(0.5)
# make terminal states zero
Q[((env.desc == b'H') | (env.desc == b'G')).flatten(), :] = 0
average_train_len = list()
current_average = 0
for i in range(num_ep):
state = env.reset()
done = False
if np.random.uniform(0, 1) <= epsilon:
action = env.action_space.sample()
else:
action = np.argmax(Q[state, :])
counter = 0
while not done:
new_state, reward, done, _ = env.step(action)
if | np.random.uniform(0, 1) | numpy.random.uniform |
"""
Filename: visualization.py
Purpose: Set of go-to plotting functions
Author: <NAME>
Date created: 28.11.2018
Possible problems:
1.
"""
import os
import numpy as np
from tractor.galaxy import ExpGalaxy
from tractor import EllipseE
from tractor.galaxy import ExpGalaxy
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm, SymLogNorm
from matplotlib.patches import Ellipse
from matplotlib.patches import Rectangle
from skimage.segmentation import find_boundaries
from astropy.visualization import hist
from scipy import stats
import config as conf
import matplotlib.cm as cm
import random
from time import time
from astropy.io import fits
import logging
logger = logging.getLogger('farmer.visualization')
# Random discrete color generator
colors = cm.rainbow(np.linspace(0, 1, 1000))
cidx = np.arange(0, 1000)
random.shuffle(cidx)
colors = colors[cidx]
def plot_background(brick, idx, band=''):
fig, ax = plt.subplots(figsize=(20,20))
vmin, vmax = brick.background_images[idx].min(), brick.background_images[idx].max()
vmin = -vmax
img = ax.imshow(brick.background_images[idx], cmap='RdGy', norm=SymLogNorm(linthresh=0.03))
# plt.colorbar(img, ax=ax)
out_path = os.path.join(conf.PLOT_DIR, f'B{brick.brick_id}_{band}_background.pdf')
ax.axis('off')
ax.margins(0,0)
fig.savefig(out_path, dpi = 300, overwrite=True, pad_inches=0.0)
plt.close()
logger.info(f'Saving figure: {out_path}')
def plot_mask(brick, idx, band=''):
fig, ax = plt.subplots(figsize=(20,20))
img = ax.imshow(brick.masks[idx])
out_path = os.path.join(conf.PLOT_DIR, f'B{brick.brick_id}_{band}_mask.pdf')
ax.axis('off')
ax.margins(0,0)
fig.savefig(out_path, dpi = 300, overwrite=True, pad_inches=0.0)
plt.close()
logger.info(f'Saving figure: {out_path}')
def plot_brick(brick, idx, band=''):
fig, ax = plt.subplots(figsize=(20,20))
backlevel, noisesigma = brick.backgrounds[idx]
vmin, vmax = np.max([backlevel + noisesigma, 1E-5]), brick.images[idx].max()
# vmin, vmax = brick.images[idx].min(), brick.images[idx].max()
if vmin > vmax:
logger.warning(f'{band} brick not plotted!')
return
vmin = -vmax
norm = SymLogNorm(linthresh=0.03)
img = ax.imshow(brick.images[idx], cmap='RdGy', origin='lower', norm=norm)
# plt.colorbar(img, ax=ax)
out_path = os.path.join(conf.PLOT_DIR, f'B{brick.brick_id}_{band}_brick.pdf')
ax.axis('off')
ax.margins(0,0)
fig.savefig(out_path, dpi = 300, overwrite=True, pad_inches=0.0)
plt.close()
logger.info(f'Saving figure: {out_path}')
def plot_blob(myblob, myfblob):
fig, ax = plt.subplots(ncols=4, nrows=1+myfblob.n_bands, figsize=(5 + 5*myfblob.n_bands, 10), sharex=True, sharey=True)
back = myblob.backgrounds[0]
mean, rms = back[0], back[1]
noise = np.random.normal(mean, rms, size=myfblob.dims)
tr = myblob.solution_tractor
norm = LogNorm(np.max([mean + rms, 1E-5]), myblob.images.max(), clip='True')
img_opt = dict(cmap='Greys', norm=norm)
# img_opt = dict(cmap='RdGy', vmin=-5*rms, vmax=5*rms)
mmask = myblob.masks[0].copy()
mmask[mmask==1] = np.nan
ax[0, 0].imshow(myblob.images[0], **img_opt)
ax[0, 0].imshow(mmask, alpha=0.5, cmap='Greys')
ax[0, 1].imshow(myblob.solution_model_images[0] + noise, **img_opt)
ax[0, 2].imshow(myblob.images[0] - myblob.solution_model_images[0], cmap='RdGy', vmin=-5*rms, vmax=5*rms)
ax[0, 3].imshow(myblob.solution_chi_images[0], cmap='RdGy', vmin = -7, vmax = 7)
ax[0, 0].set_ylabel(f'Detection ({myblob.bands[0]})')
ax[0, 0].set_title('Data')
ax[0, 1].set_title('Model')
ax[0, 2].set_title('Data - Model')
ax[0, 3].set_title('$\chi$-map')
band = myblob.bands[0]
for j, src in enumerate(myblob.solution_catalog):
try:
mtype = src.name
except:
mtype = 'PointSource'
flux = src.getBrightness().getFlux(band)
chisq = myblob.solved_chisq[j]
topt = dict(color=colors[j], transform = ax[0, 3].transAxes)
ystart = 0.99 - j * 0.4
ax[0, 3].text(1.05, ystart - 0.1, f'{j}) {mtype}', **topt)
ax[0, 3].text(1.05, ystart - 0.2, f' F({band}) = {flux:4.4f}', **topt)
ax[0, 3].text(1.05, ystart - 0.3, f' $\chi^{2}$ = {chisq:4.4f}', **topt)
objects = myblob.bcatalog[j]
e = Ellipse(xy=(objects['x'], objects['y']),
width=6*objects['a'],
height=6*objects['b'],
angle=objects['theta'] * 180. / np.pi)
e.set_facecolor('none')
e.set_edgecolor('red')
ax[0, 0].add_artist(e)
try:
for i in np.arange(myfblob.n_bands):
back = myfblob.backgrounds[i]
mean, rms = back[0], back[1]
noise = np.random.normal(mean, rms, size=myfblob.dims)
tr = myfblob.solution_tractor
# norm = LogNorm(np.max([mean + rms, 1E-5]), myblob.images.max(), clip='True')
# img_opt = dict(cmap='Greys', norm=norm)
img_opt = dict(cmap='RdGy', vmin=-5*rms, vmax=5*rms)
ax[i+1, 0].imshow(myfblob.images[i], **img_opt)
ax[i+1, 1].imshow(myfblob.solution_model_images[i] + noise, **img_opt)
ax[i+1, 2].imshow(myfblob.images[i] - myfblob.solution_model_images[i], cmap='RdGy', vmin=-5*rms, vmax=5*rms)
ax[i+1, 3].imshow(myfblob.solution_chi_images[i], cmap='RdGy', vmin = -7, vmax = 7)
ax[i+1, 0].set_ylabel(myfblob.bands[i])
band = myfblob.bands[i]
for j, src in enumerate(myfblob.solution_catalog):
try:
mtype = src.name
except:
mtype = 'PointSource'
flux = src.getBrightness().getFlux(band)
chisq = myfblob.solution_chisq[j, i]
Nres = myfblob.n_residual_sources[i]
topt = dict(color=colors[j], transform = ax[i+1, 3].transAxes)
ystart = 0.99 - j * 0.4
ax[i+1, 3].text(1.05, ystart - 0.1, f'{j}) {mtype}', **topt)
ax[i+1, 3].text(1.05, ystart - 0.2, f' F({band}) = {flux:4.4f}', **topt)
ax[i+1, 3].text(1.05, ystart - 0.3, f' $\chi^{2}$ = {chisq:4.4f}', **topt)
if Nres > 0:
ax[i+1, 3].text(1.05, ystart - 0.4, f'{Nres} residual sources found!', **topt)
res_x = myfblob.residual_catalog[i]['x']
res_y = myfblob.residual_catalog[i]['y']
for x, y in zip(res_x, res_y):
ax[i+1, 3].scatter(x, y, marker='+', color='r')
for s, src in enumerate(myfblob.solution_catalog):
x, y = src.pos
color = colors[s]
for i in np.arange(1 + myfblob.n_bands):
for j in np.arange(4):
ax[i,j].plot([x, x], [y - 10, y - 5], c=color)
ax[i,j].plot([x - 10, x - 5], [y, y], c=color)
except:
logger.warning('Could not plot multiwavelength diagnostic figures')
[[ax[i,j].set(xlim=(0,myfblob.dims[1]), ylim=(0,myfblob.dims[0])) for i in np.arange(myfblob.n_bands+1)] for j in np.arange(4)]
#fig.suptitle(f'Solution for {blob_id}')
fig.subplots_adjust(wspace=0.01, hspace=0, right=0.8)
if myblob._is_itemblob:
sid = myblob.bcatalog['source_id'][0]
fig.savefig(os.path.join(conf.PLOT_DIR, f'{myblob.brick_id}_B{myblob.blob_id}_S{sid}.pdf'))
else:
fig.savefig(os.path.join(conf.PLOT_DIR, f'{myblob.brick_id}_B{myblob.blob_id}.pdf'))
plt.close()
def plot_srcprofile(blob, src, sid, bands=None):
if bands is None:
band = conf.MODELING_NICKNAME
nickname = conf.MODELING_NICKNAME
bidx = [0,]
bands = [band,]
outpath = os.path.join(conf.PLOT_DIR, f'T{blob.brick_id}_B{blob.blob_id}_S{sid}_{conf.MODELING_NICKNAME}_srcprofile.pdf')
elif (len(bands) == 1) & (bands[0] == conf.MODELING_NICKNAME):
band = conf.MODELING_NICKNAME
nickname = conf.MODELING_NICKNAME
bidx = [0,]
bands = [band,]
outpath = os.path.join(conf.PLOT_DIR, f'T{blob.brick_id}_B{blob.blob_id}_S{sid}_{conf.MODELING_NICKNAME}_srcprofile.pdf')
else:
bidx = [blob._band2idx(b, bands=blob.bands) for b in bands]
if bands[0].startswith(conf.MODELING_NICKNAME):
nickname = conf.MODELING_NICKNAME
else:
nickname = conf.MULTIBAND_NICKNAME
if len(bands) > 1:
outpath = os.path.join(conf.PLOT_DIR, f'T{blob.brick_id}_B{blob.blob_id}_S{sid}_{nickname}_srcprofile.pdf')
else:
outpath = os.path.join(conf.PLOT_DIR, f'T{blob.brick_id}_B{blob.blob_id}_S{sid}_{bands[0]}_srcprofile.pdf')
import matplotlib.backends.backend_pdf
pdf = matplotlib.backends.backend_pdf.PdfPages(outpath)
for idx, band in zip(bidx, bands):
if band == conf.MODELING_NICKNAME:
zpt = conf.MODELING_ZPT
rband = conf.MODELING_NICKNAME
elif band.startswith(conf.MODELING_NICKNAME):
band_name = band[len(conf.MODELING_NICKNAME)+1:]
# zpt = conf.MULTIBAND_ZPT[blob._band2idx(band_name)]
if band_name == conf.MODELING_NICKNAME:
zpt = conf.MODELING_ZPT
rband = band
else:
zpt = conf.MULTIBAND_ZPT[blob._band2idx(band_name)]
rband = conf.MODELING_NICKNAME + '_' + band_name
else:
zpt = conf.MULTIBAND_ZPT[blob._band2idx(band)]
rband = conf.MODELING_NICKNAME + '_' + band
# information
bid = blob.blob_id
bsrc = blob.bcatalog[blob.bcatalog['source_id'] == sid]
ra, dec = bsrc['RA'][0], bsrc['DEC'][0]
if nickname == conf.MODELING_NICKNAME:
xp0, yp0 = bsrc['x_orig'][0] - blob.subvector[1], bsrc['y_orig'][0] - blob.subvector[0]
else:
xp0, yp0 = bsrc['x_orig'][0] - blob.subvector[1] - blob.mosaic_origin[1] + conf.BRICK_BUFFER, bsrc['y_orig'][0] - blob.subvector[0] - blob.mosaic_origin[0] + conf.BRICK_BUFFER
xp, yp = src.pos[0], src.pos[1]
xps, yps = xp, yp
flux, flux_err = bsrc[f'FLUX_{band}'][0], bsrc[f'FLUXERR_{band}'][0]
mag, mag_err = bsrc[f'MAG_{band}'][0], bsrc[f'MAGERR_{band}'][0]
n_blob = bsrc['N_BLOB'][0]
chi2 = bsrc[f'CHISQ_{band}'][0]
snr = bsrc[f'SNR_{band}'][0]
is_resolved = False
if src.name not in ('PointSource', 'SimpleGalaxy'):
is_resolved = True
col = np.array(bsrc.colnames)[np.array([tcoln.startswith('REFF') for tcoln in bsrc.colnames])][0]
rband = col[len('REFF_'):]
reff, reff_err = np.exp(bsrc[f'REFF_{rband}'][0])*conf.PIXEL_SCALE, np.exp(bsrc[f'REFF_{rband}'][0])*bsrc[f'REFF_ERR_{rband}'][0]*2.303*conf.PIXEL_SCALE
ab, ab_err = bsrc[f'AB_{rband}'][0], bsrc[f'AB_ERR_{rband}'][0]
if ab == -99.0:
ab = -99
ab_err = -99
theta, theta_err = bsrc[f'THETA_{rband}'][0], bsrc[f'THETA_ERR_{rband}'][0]
if 'Sersic' in src.name:
nre, nre_err = bsrc[f'N_{rband}'][0], bsrc[f'N_ERR_{rband}'][0]
# images
img = blob.images[idx]
wgt = blob.weights[idx]
err = 1. / np.sqrt(wgt)
mask = blob.masks[idx]
seg = blob.segmap.copy()
seg[blob.segmap != sid] = 0
mod = blob.solution_model_images[idx]
chi = blob.solution_tractor.getChiImage(idx)
chi[blob.segmap != sid] = 0
res = img - mod
rms = np.median(blob.background_rms_images[idx])
xpix, ypix = np.nonzero(seg)
dx, dy = (np.max(xpix) - np.min(xpix)) / 2., (np.max(ypix) - np.min(ypix)) / 2.
buff = np.min([conf.BLOB_BUFFER, 10.])
xlim, ylim = np.array([-(dx + buff), (dx + buff)]) * conf.PIXEL_SCALE, np.array([-(dy + buff), (dy + buff)]) * conf.PIXEL_SCALE
h, w = np.shape(img)
dw, dh = w - xp - 1, h - yp - 1
extent = np.array([-xp, dw, -yp, dh]) * conf.PIXEL_SCALE
xp0, yp0 = (xp0 - xp) * conf.PIXEL_SCALE, (yp0 - yp) * conf.PIXEL_SCALE
xp, yp = 0., 0.
if is_resolved:
aeff = reff #* conf.PIXEL_SCALE
beff = reff / ab #* conf.PIXEL_SCALE
xa = xp + np.cos(np.deg2rad(90-theta)) * np.array([-1, 1]) * aeff
ya = yp + np.sin(np.deg2rad(90-theta)) * np.array([-1, 1]) * aeff
xb = xp + np.cos(np.deg2rad(theta)) * np.array([-1, 1]) * beff
yb = yp + np.sin(np.deg2rad(theta)) * np.array([1, -1]) * beff
# tests
res_seg = res[blob.segmap==sid].flatten()
try:
k2, p_norm = stats.normaltest(res_seg)
except:
k2, p_norm = -99, -99
chi_seg = chi[blob.segmap==sid].flatten()
chi_sig = np.std(chi_seg)
chi_mu = np.mean(chi_seg)
# plotting
fig, ax = plt.subplots(ncols=4, nrows=4, figsize=(15, 15))
# row 1 -- image, info
if rms > 0.95*np.nanmax(img):
normmin = 1.05*np.nanmin(abs(img))
else:
normmin = rms
normmax = 0.95*np.nanmax(img)
if normmin != normmax:
norm = LogNorm(normmin, normmax, clip='True')
else:
norm=None
ax[0,0].imshow(img, norm=norm, cmap='Greys', extent=extent)
ax[0,0].text(0.05, 1.03, band, transform=ax[0,0].transAxes)
ax[0,0].scatter(xp0, yp0, c='purple', marker='+', alpha=0.5)
ax[0,0].scatter(xp, yp, c='royalblue', marker='x', alpha=0.9)
ax[0,0].set(xlim=xlim, ylim=ylim)
ax[0,1].axis('off')
ax[0,2].axis('off')
ax[0,3].axis('off')
ax[0,1].text(0, 0.90,
s = f'Source: {sid} | Blob: {bid} | Brick: {blob.brick_id} | RA: {ra:6.6f}, Dec: {dec:6.6f}',
transform=ax[0,1].transAxes)
if is_resolved:
if 'Sersic' in src.name:
ax[0,1].text(0, 0.70,
s = f'{src.name} with Reff: {reff:3.3f}+/-{reff_err:3.3f}, n: {nre:3.3f}+/-{nre_err:3.3f}, A/B: {ab:3.3f}+/-{ab_err:3.3f}, Theta: {theta:3.3f}+/-{theta_err:3.3f}',
transform=ax[0,1].transAxes)
else:
ax[0,1].text(0, 0.70,
s = f'{src.name} with Reff: {reff:3.3f}+/-{reff_err:3.3f}, A/B: {ab:3.3f}+/-{ab_err:3.3f}, and Theta: {theta:3.3f}+/-{theta_err:3.3f}',
transform=ax[0,1].transAxes)
else:
ax[0,1].text(0, 0.70,
s = f'{src.name}',
transform=ax[0,1].transAxes)
ax[0,1].text(0, 0.50,
s = f'{band} | {flux:3.3f}+/-{flux_err:3.3f} uJy | {mag:3.3f}+/-{mag_err:3.3f} AB | S/N = {snr:3.3f}',
transform=ax[0,1].transAxes)
ax[0,1].text(0, 0.30,
s = f'Chi2/N: {chi2:3.3f} | N_blob: {n_blob} | '+r'$\mu(\chi)$'+f'={chi_mu:3.3f}, '+r'$\sigma(\chi)$'+f'={chi_sig:3.3f} | K2-test: {k2:3.3f}',
transform=ax[0,1].transAxes)
# row 2 -- image, weights, mask, segment
ax[1,0].imshow(img, vmin=-3*rms, vmax=3*rms, cmap='RdGy', extent=extent)
ax[1,0].text(0.05, 1.03, 'Image', transform=ax[1,0].transAxes)
ax[1,0].set(xlim=xlim, ylim=ylim)
ax[1,0].contour(img, levels=np.arange(2*rms, np.min([5*rms, np.max(img)]), rms), colors='royalblue', extent=extent, alpha=0.5)
ax[1,1].imshow(err, cmap='Greys', extent=extent)
ax[1,1].text(0.05, 1.03, r'med($\sigma$)'+f'={rms*10**(-0.4 * (zpt - 23.9)):5.5f} uJy', transform=ax[1,1].transAxes)
ax[1,1].set(xlim=xlim, ylim=ylim)
ax[1,1].contour(img, levels=np.arange(2*rms, np.min([5*rms, np.max(img)]), rms), colors='royalblue', extent=extent, alpha=0.5)
ax[1,2].imshow(mask, cmap='Greys', extent=extent)
ax[1,2].text(0.05, 1.03, 'Blob', transform=ax[1,2].transAxes)
ax[1,2].set(xlim=xlim, ylim=ylim)
ax[1,2].contour(img, levels=np.arange(2*rms, np.min([5*rms, np.max(img)]), rms), colors='royalblue', extent=extent, alpha=0.5)
ax[1,3].imshow(~seg, cmap='Greys', extent=extent)
ax[1,3].text(0.05, 1.03, 'Segment', transform=ax[1,3].transAxes)
ax[1,3].set(xlim=xlim, ylim=ylim)
ax[1,3].contour(img, levels=np.arange(2*rms, np.min([5*rms, np.max(img)]), rms), colors='royalblue', extent=extent, alpha=0.5)
ax[1,0].scatter(xp, yp, c='royalblue', marker='x')
ax[1,2].scatter(xp0, yp0, c='purple', marker='+', alpha=0.5)
ax[1,2].scatter(xp, yp, c='royalblue', marker='x', alpha=0.9)
ax[1,3].scatter(xp0, yp0, c='purple', marker='+', alpha=0.5)
ax[1,3].scatter(xp, yp, c='royalblue', marker='x', alpha=0.9)
ax[1,2].plot()
# row 3 -- image, model, residual, chi
ax[2,0].imshow(img/err, vmin=-3, vmax=3, cmap='RdGy', extent=extent)
ax[2,0].text(0.05, 1.03, 'S/N', transform=ax[2,0].transAxes)
ax[2,0].set(xlim=xlim, ylim=ylim)
ax[2,0].contour(img, levels=np.arange(2*rms, np.min([5*rms, np.max(img)]), rms), colors='royalblue', extent=extent, alpha=0.5)
# ax[2,1].imshow(mod, vmin=-3*rms, vmax=3*rms, cmap='RdGy', extent=extent)
ax[2,1].imshow(mod, norm=norm, cmap='Greys', extent=extent)
ax[2,1].text(0.05, 1.03, 'Model', transform=ax[2,1].transAxes)
ax[2,1].set(xlim=xlim, ylim=ylim)
ax[2,1].contour(img, levels=np.arange(2*rms, np.min([5*rms, np.max(img)]), rms), colors='royalblue', extent=extent, alpha=0.5)
ax[2,2].imshow(res, vmin=-3*rms, vmax=3*rms, cmap='RdGy', extent=extent)
ax[2,2].text(0.05, 1.03, 'Residual', transform=ax[2,2].transAxes)
ax[2,2].set(xlim=xlim, ylim=ylim)
ax[2,2].contour(img, levels=np.arange(2*rms, np.min([5*rms, np.max(img)]), rms), colors='royalblue', extent=extent, alpha=0.5)
ax[2,3].imshow(chi, vmin=-3, vmax=3, cmap='RdGy', extent=extent)
ax[2,3].text(0.05, 1.03, r'$\chi$', transform=ax[2,3].transAxes)
ax[2,3].set(xlim=xlim, ylim=ylim)
ax[2,3].contour(img, levels=np.arange(2*rms, np.min([5*rms, np.max(img)]), rms), colors='royalblue', extent=extent, alpha=0.5)
if is_resolved:
ax[2,0].plot(xa, ya, c='royalblue', alpha=0.7)
ax[2,0].plot(xb, yb, c='royalblue', alpha=0.7)
ax[2,1].plot(xa, ya, c='royalblue', alpha=0.7)
ax[2,1].plot(xb, yb, c='royalblue', alpha=0.7)
ax[2,2].plot(xa, ya, c='royalblue', alpha=0.7)
ax[2,2].plot(xb, yb, c='royalblue', alpha=0.7)
ax[2,3].plot(xa, ya, c='royalblue', alpha=0.7)
ax[2,3].plot(xb, yb, c='royalblue', alpha=0.7)
else:
ax[2,0].scatter(xp, yp, c='royalblue', marker='x', alpha=0.7)
ax[2,1].scatter(xp, yp, c='royalblue', marker='x', alpha=0.7)
ax[2,2].scatter(xp, yp, c='royalblue', marker='x', alpha=0.7)
ax[2,3].scatter(xp, yp, c='royalblue', marker='x', alpha=0.7)
# row 4 -- psf, x-slice, y-slice, hist
psfmodel = blob.psfimg[band]
xax = np.arange(-np.shape(psfmodel)[0]/2 + 0.5, np.shape(psfmodel)[0]/2 + 0.5)
[ax[3,0].plot(xax * 0.15, psfmodel[x], c='royalblue', alpha=0.5) for x in np.arange(0, np.shape(psfmodel)[0])]
ax[3,0].axvline(0, ls='dotted', c='k')
ax[3,0].set(xlim=(-5, 5), yscale='log', ylim=(1E-6, 1E-1), xlabel='arcsec')
ax[3,0].text(0.05, 1.03, 'PSF', transform=ax[3,0].transAxes)
# x slice
imgx = blob.images[idx][:, int(xps)]
errx = 1./np.sqrt(blob.weights[idx][:, int(xps)])
modx = blob.solution_model_images[idx][:, int(xps)]
sign = 1
if bsrc[f'RAWFLUX_{band}'][0] < 0:
sign = -1
modxlo = blob.solution_model_images[idx][:, int(xps)] / bsrc[f'RAWFLUX_{band}'][0] * (bsrc[f'RAWFLUX_{band}'][0] - sign * bsrc[f'RAWFLUXERR_{band}'][0])
modxhi = blob.solution_model_images[idx][:, int(xps)] / bsrc[f'RAWFLUX_{band}'][0] * (bsrc[f'RAWFLUX_{band}'][0] + sign * bsrc[f'RAWFLUXERR_{band}'][0])
resx = imgx - modx
# y slice
imgy = blob.images[idx][int(yps), :]
erry = 1./np.sqrt(blob.weights[idx][int(yps), :])
mody = blob.solution_model_images[idx][int(yps), :]
if bsrc[f'RAWFLUX_{band}'][0] < 0:
sign = -1
modylo = blob.solution_model_images[idx][int(yps), :] / bsrc[f'RAWFLUX_{band}'][0] * (bsrc[f'RAWFLUX_{band}'][0] - sign * bsrc[f'RAWFLUXERR_{band}'][0])
modyhi = blob.solution_model_images[idx][int(yps), :] / bsrc[f'RAWFLUX_{band}'][0] * (bsrc[f'RAWFLUX_{band}'][0] + sign * bsrc[f'RAWFLUXERR_{band}'][0])
resy = imgy - mody
ylim = (0.9*np.min([np.min(imgx), np.min(imgy)]), 1.1*np.max([np.max(imgx), np.max(imgy)]))
xax = np.linspace(extent[2], extent[3]+conf.PIXEL_SCALE, len(imgx))
ax[3,2].errorbar(xax, imgx, yerr=errx, c='k')
ax[3,2].plot(xax, modx, c='r')
ax[3,2].fill_between(xax, modxlo, modxhi, color='r', alpha=0.3)
ax[3,2].plot(xax, resx, c='g')
ax[3,2].axvline(0, ls='dotted', c='k')
ax[3,2].set(ylim =ylim, xlabel='arcsec', xlim=xlim)
ax[3,2].text(0.05, 1.03, 'Y', transform=ax[3,2].transAxes)
yax = np.linspace(extent[0], extent[1]+conf.PIXEL_SCALE, len(imgy))
ax[3,1].errorbar(yax, imgy, yerr=erry, c='k')
ax[3,1].plot(yax, mody, c='r')
ax[3,1].fill_between(yax, modylo, modyhi, color='r', alpha=0.3)
ax[3,1].plot(yax, resy, c='g')
ax[3,1].axvline(0, ls='dotted', c='k')
ax[3,1].set(ylim=ylim, xlabel='arcsec', xlim=xlim)
ax[3,1].text(0.05, 1.03, 'X', transform=ax[3,1].transAxes)
hist(chi_seg, ax=ax[3,3], bins='freedman', histtype='step', density=True)
ax[3,3].axvline(0, ls='dotted', color='grey')
ax[3,3].text(0.05, 1.03, 'Residual '+r'$\sigma(\chi)$'+f'={chi_sig:3.3f}', transform=ax[3,3].transAxes)
ax[3,3].set(xlim=(-10, 10), xlabel=r'$\chi$')
ax[3,3].axvline(chi_mu, c='royalblue', ls='dashed')
ax[3,3].axvline(0, c='grey', ls='dashed', alpha=0.3)
ax[3,3].axvline(chi_mu-chi_sig, c='royalblue', ls='dotted')
ax[3,3].axvline(chi_mu+chi_sig, c='royalblue', ls='dotted')
ax[3,3].axvline(-1, c='grey', ls='dotted', alpha=0.3)
ax[3,3].axvline(1, c='grey', ls='dotted', alpha=0.3)
pdf.savefig(fig)
plt.close()
logger.info(f'Saving figure: {outpath}')
pdf.close()
def plot_apertures(blob, band=None):
pass
def plot_iterblob(blob, tr, iteration, bands=None):
if blob._is_itemblob:
sid = blob.bcatalog['source_id'][0]
if bands is None:
band = conf.MODELING_NICKNAME
nickname = conf.MODELING_NICKNAME
bidx = [0,]
bands = [band,]
outpath = os.path.join(conf.PLOT_DIR, f'T{blob.brick_id}_B{blob.blob_id}_S{sid}_{conf.MODELING_NICKNAME}_{blob._level}_{blob._sublevel}_{iteration}_iterblob.pdf')
else:
bidx = [blob._band2idx(b, bands=blob.bands) for b in bands]
if bands[0].startswith(conf.MODELING_NICKNAME):
nickname = conf.MODELING_NICKNAME
else:
nickname = conf.MULTIBAND_NICKNAME
if len(bands) > 1:
outpath = os.path.join(conf.PLOT_DIR, f'T{blob.brick_id}_B{blob.blob_id}_S{sid}_{nickname}_{blob._level}_{blob._sublevel}_{iteration}_iterblob.pdf')
else:
outpath = os.path.join(conf.PLOT_DIR, f'T{blob.brick_id}_B{blob.blob_id}_S{sid}_{bands[0]}_{blob._level}_{blob._sublevel}_{iteration}_iterblob.pdf')
else:
if bands is None:
band = conf.MODELING_NICKNAME
nickname = conf.MODELING_NICKNAME
bidx = [0,]
bands = [band,]
outpath = os.path.join(conf.PLOT_DIR, f'T{blob.brick_id}_B{blob.blob_id}_{conf.MODELING_NICKNAME}_{blob._level}_{blob._sublevel}_{iteration}_iterblob.pdf')
else:
bidx = [blob._band2idx(b, bands=blob.bands) for b in bands]
if bands[0].startswith(conf.MODELING_NICKNAME):
nickname = conf.MODELING_NICKNAME
else:
nickname = conf.MULTIBAND_NICKNAME
if len(bands) > 1:
outpath = os.path.join(conf.PLOT_DIR, f'T{blob.brick_id}_B{blob.blob_id}_{nickname}_{blob._level}_{blob._sublevel}_{iteration}_iterblob.pdf')
else:
outpath = os.path.join(conf.PLOT_DIR, f'T{blob.brick_id}_B{blob.blob_id}_{bands[0]}_{blob._level}_{blob._sublevel}_{iteration}_iterblob.pdf')
import matplotlib.backends.backend_pdf
pdf = matplotlib.backends.backend_pdf.PdfPages(outpath)
for idx, band in zip(bidx, bands):
if band == conf.MODELING_NICKNAME:
zpt = conf.MODELING_ZPT
elif band.startswith(conf.MODELING_NICKNAME):
band_name = band[len(conf.MODELING_NICKNAME)+1:]
zpt = conf.MULTIBAND_ZPT[blob._band2idx(band_name)]
else:
zpt = conf.MULTIBAND_ZPT[blob._band2idx(band)]
cat = tr.getCatalog()
xp, yp = [src.pos[0] for src in cat], [src.pos[1] for src in cat]
back = blob.background_images[idx]
back_rms = blob.background_rms_images[idx]
mean, rms = np.nanmean(back), np.nanmean(back_rms)
img_opt = dict(cmap='RdGy', vmin=-5*rms, vmax=5*rms)
# image
img = blob.images[idx]
# model
mod = tr.getModelImage(idx)
# residual
res = img - mod
# chi2
chi2 = tr.getChiImage(idx)
fig, ax = plt.subplots(ncols=4)
ax[0].imshow(img, **img_opt)
ax[1].imshow(mod, **img_opt)
ax[2].imshow(res, **img_opt)
ax[3].imshow(chi2, cmap='RdGy', vmin=-5, vmax=5)
fig.suptitle(f'Blob {blob.blob_id} | {band} | iter: {iteration}')
[ax[i].scatter(xp, yp, marker='x', c='royalblue') for i in np.arange(4)]
[ax[i].set_title(title, fontsize=20) for i, title in enumerate(('Image', 'Model', 'Image-Model', '$\chi^{2}$'))]
pdf.savefig(fig)
plt.close()
logger.info(f'Saving figure: {outpath}')
pdf.close()
def plot_modprofile(blob, band=None):
if band is None:
band = conf.MODELING_NICKNAME
idx = 0
else:
idx = blob._band2idx(band, bands=blob.bands)
psfmodel = blob.psfimg[band]
back = blob.background_images[idx]
back_rms = blob.background_rms_images[idx]
mean, rms = np.nanmean(back), np.nanmean(back_rms)
noise = np.random.normal(mean, rms, size=blob.dims)
tr = blob.solution_tractor
norm = LogNorm(mean + 3*rms, blob.images[idx].max(), clip='True')
img_opt = dict(cmap='Greys', norm=norm)
img_opt = dict(cmap='RdGy', vmin=-5*rms, vmax=5*rms)
xlim = (-np.shape(blob.images[idx])[1]/2, np.shape(blob.images[idx])[1]/2)
fig, ax = plt.subplots(ncols = 5, nrows = 2, figsize=(20,10))
ax[1,0].imshow(blob.images[idx], **img_opt)
ax[1,1].imshow(blob.solution_model_images[idx], **img_opt)
residual = blob.images[idx] - blob.solution_model_images[idx]
ax[1,2].imshow(residual, **img_opt)
xax = np.arange(-np.shape(blob.images[idx])[1]/2, np.shape(blob.images[idx])[1]/2)
[ax[0,0].plot(xax * 0.15, blob.images[idx][x], c='royalblue', alpha=0.5) for x in np.arange(0, np.shape(blob.images[idx])[0])]
ax[0,0].axvline(0, ls='dotted', c='k')
ax[0,0].set(yscale='log', xlabel='arcsec')
xax = np.arange(-np.shape(blob.solution_model_images[idx])[1]/2, np.shape(blob.solution_model_images[idx])[1]/2)
[ax[0,1].plot(xax * 0.15, blob.solution_model_images[idx][x], c='royalblue', alpha=0.5) for x in np.arange(0, np.shape(blob.solution_model_images[idx])[0])]
ax[0,1].axvline(0, ls='dotted', c='k')
ax[0,1].set(yscale='log', xlabel='arcsec')
xax = np.arange(-np.shape(residual)[1]/2, np.shape(residual)[1]/2)
[ax[0,2].plot(xax * 0.15, residual[x], c='royalblue', alpha=0.5) for x in np.arange(0, np.shape(residual)[0])]
ax[0,2].axvline(0, ls='dotted', c='k')
ax[0,2].set(yscale='log', xlabel='arcsec')
norm = LogNorm(1e-5, 0.1*np.nanmax(psfmodel), clip='True')
img_opt = dict(cmap='Blues', norm=norm)
ax[1,3].imshow(psfmodel, norm=norm, extent=0.15 *np.array([-np.shape(psfmodel)[0]/2, np.shape(psfmodel)[0]/2, -np.shape(psfmodel)[0]/2, np.shape(psfmodel)[0]/2,]))
ax[1,3].set(xlim=xlim, ylim=xlim)
xax = np.arange(-np.shape(psfmodel)[0]/2 + 0.5, np.shape(psfmodel)[0]/2 + 0.5)
[ax[0,3].plot(xax * 0.15, psfmodel[x], c='royalblue', alpha=0.5) for x in np.arange(0, np.shape(psfmodel)[0])]
ax[0,3].axvline(0, ls='dotted', c='k')
ax[0,3].set(xlim=xlim, yscale='log', ylim=(1E-6, 1E-1), xlabel='arcsec')
for j, src in enumerate(blob.solution_catalog):
try:
mtype = src.name
except:
mtype = 'PointSource'
flux = src.getBrightness().getFlux(band)
chisq = blob.solution_chisq[j, idx]
band = band.replace(' ', '_')
if band == conf.MODELING_NICKNAME:
zpt = conf.MODELING_ZPT
elif band.startswith(conf.MODELING_NICKNAME):
band_name = band[len(conf.MODELING_NICKNAME)+1:]
zpt = conf.MULTIBAND_ZPT[blob._band2idx(band_name)]
else:
zpt = conf.MULTIBAND_ZPT[blob._band2idx(band)]
mag = zpt - 2.5 * np.log10(flux)
topt = dict(color=colors[j], transform = ax[0, 3].transAxes)
ystart = 0.99 - j * 0.5
ax[0, 4].text(1.05, ystart - 0.1, f'{j}) {mtype}', **topt)
ax[0, 4].text(1.05, ystart - 0.2, f' F({band}) = {flux:4.4f}', **topt)
ax[0, 4].text(1.05, ystart - 0.3, f' M({band}) = {mag:4.4f}', **topt)
ax[0, 4].text(1.05, ystart - 0.4, f' zpt({band}) = {zpt:4.4f}', **topt)
ax[0, 4].text(1.05, ystart - 0.5, f' $\chi^{2}$ = {chisq:4.4f}', **topt)
ax[0, 4].axis('off')
ax[1, 4].axis('off')
for i in np.arange(3):
ax[0, i].set(xlim=(0.15*xlim[0], 0.15*xlim[1]), ylim=(np.nanmedian(blob.images[idx]), blob.images[idx].max()))
# ax[1, i].set(xlim=(-15, 15), ylim=(-15, 15))
ax[0, 3].set(xlim=(0.15*xlim[0], 0.15*xlim[1]))
if blob._is_itemblob:
sid = blob.bcatalog['source_id'][0]
outpath = os.path.join(conf.PLOT_DIR, f'T{blob.brick_id}_B{blob.blob_id}_S{sid}_{band}_debugprofile.pdf')
else:
outpath = os.path.join(conf.PLOT_DIR, f'T{blob.brick_id}_B{blob.blob_id}_{band}_debugprofile.pdf')
logger.info(f'Saving figure: {outpath}')
fig.savefig(outpath)
plt.close()
def plot_xsection(blob, band, src, sid):
if band is None:
band = conf.MODELING_NICKNAME
idx = 0
else:
idx = blob._band2idx(band, bands=blob.bands)
back = blob.background_images[idx]
back_rms = blob.background_rms_images[idx]
mean, rms = np.nanmean(back), np.nanmean(back_rms)
fig, ax = plt.subplots(ncols=2)
posx, posy = src.pos[0], src.pos[1]
try:
# x slice
imgx = blob.images[idx][:, int(posx)]
errx = 1/np.sqrt(blob.weights[idx][:, int(posx)])
modx = blob.solution_model_images[idx][:, int(posx)]
resx = imgx - modx
# y slice
imgy = blob.images[idx][int(posy), :]
erry = 1/np.sqrt(blob.weights[idx][int(posy), :])
mody = blob.solution_model_images[idx][int(posy), :]
resy = imgy - mody
except:
plt.close()
logger.warning('Could not make plot -- object may have escaped?')
return
# idea: show areas outside segment in grey
ylim = (0.9*np.min([np.min(imgx), np.min(imgy)]), 1.1*np.max([np.max(imgx), np.max(imgy)]))
xax = np.arange(-np.shape(blob.images[idx])[0]/2, np.shape(blob.images[idx])[0]/2) * conf.PIXEL_SCALE
ax[0].errorbar(xax, imgx, yerr=errx, c='k')
ax[0].plot(xax, modx, c='r')
ax[0].plot(xax, resx, c='g')
ax[0].axvline(0, ls='dotted', c='k')
ax[0].set(ylim =ylim, xlabel='arcsec')
yax = np.arange(-np.shape(blob.images[idx])[1]/2, np.shape(blob.images[idx])[1]/2) * conf.PIXEL_SCALE
ax[1].errorbar(yax, imgy, yerr=erry, c='k')
ax[1].plot(yax, mody, c='r')
ax[1].plot(yax, resy, c='g')
ax[1].axvline(0, ls='dotted', c='k')
ax[1].set(ylim=ylim, xlabel='arcsec')
# for j, src in enumerate(blob.solution_catalog):
# try:
# mtype = src.name
# except:
# mtype = 'PointSource'
# flux = src.getBrightness().getFlux(band)
# chisq = blob.solution_chisq[j, idx]
# band = band.replace(' ', '_')
# if band == conf.MODELING_NICKNAME:
# zpt = conf.MODELING_ZPT
# else:
# zpt = conf.MULTIBAND_ZPT[idx]
# mag = zpt - 2.5 * np.log10(flux)
# topt = dict(color=colors[j], transform = ax[0, 3].transAxes)
# ystart = 0.99 - j * 0.4
# ax[0, 4].text(1.05, ystart - 0.1, f'{j}) {mtype}', **topt)
# ax[0, 4].text(1.05, ystart - 0.2, f' F({band}) = {flux:4.4f}', **topt)
# ax[0, 4].text(1.05, ystart - 0.3, f' M({band}) = {mag:4.4f}', **topt)
# ax[0, 4].text(1.05, ystart - 0.4, f' $\chi^{2}$ = {chisq:4.4f}', **topt)
outpath = os.path.join(conf.PLOT_DIR, f'T{blob.brick_id}_B{blob.blob_id}_S{sid}_{band}_xsection.pdf')
logger.info(f'Saving figure: {outpath}')
fig.savefig(outpath)
plt.close()
def plot_detblob(blob, fig=None, ax=None, band=None, level=0, sublevel=0, final_opt=False, init=False):
if band is None:
idx = 0
band = ''
else:
# print(blob.bands)
# print(band)
idx = np.argwhere(np.array(blob.bands) == band)[0][0]
back = blob.background_images[idx]
rms = blob.background_rms_images[idx]
mean, rms = np.nanmean(back), np.nanmean(rms)
noise = np.random.normal(mean, rms, size=blob.dims)
tr = blob.solution_tractor
norm = LogNorm(np.max([mean + rms, 1E-5]), blob.images.max(), clip='True')
img_opt = dict(cmap='Greys', norm=norm)
img_opt = dict(cmap='RdGy', vmin=-5*rms, vmax=5*rms)
# Init
if fig is None:
plt.ioff()
fig, ax = plt.subplots(figsize=(24,48), ncols=6, nrows=13)
# Detection image
ax[0,0].imshow(blob.images[idx], **img_opt)
[ax[0,i].axis('off') for i in np.arange(1, 6)]
if blob.n_sources == 1:
objects = blob.bcatalog
e = Ellipse(xy=(objects['x'], objects['y']),
width=6*objects['a'],
height=6*objects['b'],
angle=objects['theta'] * 180. / np.pi)
e.set_facecolor('none')
e.set_edgecolor(colors[0])
ax[0, 0].add_artist(e)
else:
for j, src in enumerate(blob.bcatalog):
objects = blob.bcatalog[j]
e = Ellipse(xy=(objects['x'], objects['y']),
width=6*objects['a'],
height=6*objects['b'],
angle=objects['theta'] * 180. / np.pi)
e.set_facecolor('none')
e.set_edgecolor(colors[j])
ax[0, 0].add_artist(e)
ax[0,1].text(0.1, 0.9, f'Blob #{blob.blob_id}', transform=ax[0,1].transAxes)
ax[0,1].text(0.1, 0.8, f'{blob.n_sources} source(s)', transform=ax[0,1].transAxes)
[ax[1,i+1].set_title(title, fontsize=20) for i, title in enumerate(('Model', 'Model+Noise', 'Image-Model', '$\chi^{2}$', 'Residuals'))]
if blob._is_itemblob:
sid = blob.bcatalog['source_id'][0]
outpath = os.path.join(conf.PLOT_DIR, f'T{blob.brick_id}_B{blob.blob_id}_S{sid}_{conf.MODELING_NICKNAME}_{band}.pdf')
else:
outpath = os.path.join(conf.PLOT_DIR, f'T{blob.brick_id}_B{blob.blob_id}_{conf.MODELING_NICKNAME}_{band}.pdf')
fig.savefig(outpath)
logger.info(f'Saving figure: {outpath}')
elif final_opt:
nrow = 4 * level + 2* sublevel + 2
[[ax[i,j].axis('off') for i in np.arange(nrow+1, 11)] for j in np.arange(0, 6)]
ax[11,0].axis('off')
residual = blob.images[idx] - blob.pre_solution_model_images[idx]
ax[11,1].imshow(blob.pre_solution_model_images[idx], **img_opt)
ax[11,2].imshow(blob.pre_solution_model_images[idx] + noise, **img_opt)
ax[11,3].imshow(residual, cmap='RdGy', vmin=-5*rms, vmax=5*rms)
ax[11,4].imshow(blob.tr.getChiImage(idx), cmap='RdGy', vmin = -5, vmax = 5)
bins = np.linspace(np.nanmin(residual), np.nanmax(residual), 30)
minx, maxx = 0, 0
for i, src in enumerate(blob.bcatalog):
res_seg = residual[blob.segmap==src['source_id']].flatten()
ax[11,5].hist(res_seg, bins=20, histtype='step', color=colors[i], density=True)
resmin, resmax = np.nanmin(res_seg), np.nanmax(res_seg)
if resmin < minx:
minx = resmin
if resmax > maxx:
maxx = resmax
ax[11,5].set_xlim(minx, maxx)
ax[11,5].axvline(0, c='grey', ls='dotted')
ax[11,5].set_ylim(bottom=0)
ax[12,0].axis('off')
residual = blob.images[idx] - blob.solution_model_images[idx]
ax[12,1].imshow(blob.solution_model_images[idx], **img_opt)
ax[12,2].imshow(blob.solution_model_images[idx] + noise, **img_opt)
ax[12,3].imshow(residual, cmap='RdGy', vmin=-5*rms, vmax=5*rms)
ax[12,4].imshow(blob.tr.getChiImage(idx), cmap='RdGy', vmin = -5, vmax = 5)
ax[12,1].set_ylabel('Solution')
bins = np.linspace( | np.nanmin(residual) | numpy.nanmin |
import json
import torch
from torch.utils.data import Dataset
import numpy as np
from utils import constants
import random
class SyntaxDataset(Dataset):
def __init__(self, fname_all, transition_file=None, transition_system=None, tokenizer=None, rel_size = 20):
# self.fname = fname
self.max_rel = rel_size
self.model = transition_system
self.transition_file = transition_file
self.transition_system = {act: i for (act, i) in zip(transition_system[0], transition_system[1])}
# self.transition_system[None] = -2
self.act_counts = {self.transition_system[act]: 0 for act in self.transition_system.keys()}
self.tokenizer = tokenizer
self.max_sent_len = 0
self.words, self.word_st, self.pos, self.heads, self.rels = [], [], [], [], []
self.actions = []
self.mappings = []
self.relations_in_order = []
self.language_starts = [0]
if isinstance(fname_all,list):
for fname, tf in zip(fname_all, transition_file):
self.load_data(fname, tf)
else:
self.load_data(fname_all, transition_file)
#self.n_instances = len(self.words)
# save agenda actions in the same format as the rest
def load_data(self, fname, transition_file):
# self.labeled_actions = []
index = 0
with open(fname, 'r') as file, open(transition_file, 'r') as file2:
for line, action in zip(file.readlines(), file2.readlines()):
sentence = json.loads(line)
#print(sentence)
tranisiton = json.loads(action)
self.words += [self.list2tensor([word['word_id'] for word in sentence])]
tokens, mapping = self.tokenize([word['word'] for word in sentence])
self.word_st += [tokens]
self.mappings += [mapping]
length = len([word['word_id'] for word in sentence])
if length > self.max_sent_len:
self.max_sent_len = length
# self.check_lens([word['word_id'] for word in sentence],tranisiton['transition'])
self.pos += [self.list2tensor([word['tag1_id'] for word in sentence])]
self.heads += [self.list2tensor([word['head'] for word in sentence])]
self.rels += [self.list2tensor([word['rel_id'] for word in sentence])]
self.actions += [self.actionsequence2tensor(tranisiton['transition'])]
self.relations_in_order += [self.list2tensor(tranisiton['relations'])]
# self.labeled_actions += [self.labeled_act2tensor(tranisiton['labeled_actions'])]
index += 1
new_start = self.language_starts[-1] + index
self.language_starts.append(new_start)
self.n_instances = index
def check_lens(self, words, actions):
n = len(words)
ids = [self.transition_system[act] for act in actions]
# should be 2n-1, there's one extra "null" action for implementation purposes
print(len(ids) == 2 * n - 1)
return len(ids) == 2 * n - 1
def tokenize(self, wordlist):
wordlist = wordlist # + ["<EOS>"]
#print("wordlen {}".format(len(wordlist)))
encoded = self.tokenizer(wordlist, is_split_into_words=True, return_tensors="pt",
return_attention_mask=False,
return_token_type_ids=False,
add_special_tokens=True)
#print("encoded {}".format(len(encoded)))
# print(encoded)
# kj
enc = [self.tokenizer.encode(x, add_special_tokens=False) for x in wordlist]
idx = 0
token_mapping = []
token_mapping2 = []
for token in enc:
tokenout = []
for ids in token:
tokenout.append(idx)
idx += 1
token_mapping.append(tokenout[0])
token_mapping.append(tokenout[-1])
token_mapping2.append(tokenout)
# print(self.tokenizer.eos_token_id)
# jk
return encoded['input_ids'].squeeze(0), torch.LongTensor(token_mapping).to(device=constants.device)
def hypergraph2tensor(self, hypergraph):
all_graphs = []
for left in hypergraph:
i1, j1 = left
tmp = torch.LongTensor([i1, j1]).to(device=constants.device)
all_graphs.append(tmp)
return torch.stack(all_graphs).to(device=constants.device)
def actionsequence2tensor(self, actions):
acts = []
if self.model == constants.agenda:
return self.hypergraph2tensor(actions)
elif self.model == constants.easy_first:
for item in actions:
edge = item[1]
head = edge[0]
mod = edge[1]
acts.append(head)
acts.append(mod)
return torch.LongTensor(acts).to(device=constants.device)
else:
ids = [self.transition_system[act] for act in actions]
for id in ids:
self.act_counts[id] += 1
return torch.LongTensor(ids).to(device=constants.device)
def labeled_act2tensor(self, labeled_actions):
ret = []
for (act, lab) in labeled_actions:
act_id = torch.LongTensor(self.transition_system[act]).to(device=constants.device)
lab_tens = torch.LongTensor(lab).to(device=constants.device)
ret.append((act_id, lab_tens))
return ret
@staticmethod
def list2tensor(data):
return torch.LongTensor(data).to(device=constants.device)
@staticmethod
def get_n_instances(fname):
with open(fname, 'r') as file:
count = 0
for _ in file.readlines():
count += 1
return count
def __len__(self):
return self.n_instances
def __getitem__(self, index):
# print(self.actions[index])
# return (self.words[index], self.pos[index]), \
# (self.heads[index], self.rels[index]), self.actions[index]
return (self.word_st[index], self.pos[index]), (self.heads[index], self.rels[index]), \
(self.actions[index], self.relations_in_order[index]), (self.mappings[index], self.words[index])
class LanguageBatchSampler(torch.utils.data.Sampler):
def __init__(self, language_start_indicies, batch_size, shuffle):
self.batch_size = batch_size
self.shuffle = shuffle
self.language_lengths = []
for i in range(1, len(language_start_indicies)):
len_lang = language_start_indicies[i] - language_start_indicies[i - 1]
self.language_lengths.append(len_lang)
langlengths = np.array(self.language_lengths)
langlengths = | np.power(langlengths, 0.3) | numpy.power |
# coding=utf-8
# Copyright 2020 The Real-World RL Suite Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for real-world environments."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import operator
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
import realworldrl_suite.environments as rwrl
from realworldrl_suite.environments import realworld_env
NUM_DUMMY = 5
class EnvTest(parameterized.TestCase):
@parameterized.named_parameters(*rwrl.ALL_TASKS)
def testLoadEnv(self, domain_name, task_name):
"""Ensure it is possible to load the environment."""
env = rwrl.load(domain_name=domain_name, task_name=task_name)
env.reset()
self.assertIsNotNone(env)
@parameterized.named_parameters(*rwrl.ALL_TASKS)
def testSafetyConstraintsPresent(self, domain_name, task_name):
"""Ensure observations contain 'constraints' when safety is specified."""
env = rwrl.load(
domain_name=domain_name,
task_name=task_name,
safety_spec={'enable': True})
env.reset()
step = env.step(0)
self.assertIn('constraints', step.observation.keys())
@parameterized.named_parameters(*rwrl.ALL_TASKS)
def testSafetyCoeff(self, domain_name, task_name):
"""Ensure observations contain 'constraints' when safety is specified."""
env = rwrl.load(
domain_name=domain_name,
task_name=task_name,
safety_spec={'enable': True, 'safety_coeff': 0.1})
env.reset()
step = env.step(0)
self.assertIn('constraints', step.observation.keys())
for c in [2, -1]:
with self.assertRaises(ValueError):
env = rwrl.load(
domain_name=domain_name,
task_name=task_name,
safety_spec={'enable': True, 'safety_coeff': c})
@parameterized.named_parameters(*rwrl.ALL_TASKS)
def testSafetyObservationsDisabled(self, domain_name, task_name):
"""Ensure safety observations can be disabled."""
env = rwrl.load(
domain_name=domain_name,
task_name=task_name,
safety_spec={
'enable': True,
'observations': False
})
env.reset()
step = env.step(0)
self.assertNotIn('constraints', step.observation.keys())
@parameterized.named_parameters(*rwrl.ALL_TASKS)
def testDelayActionsNoDelay(self, domain_name, task_name):
"""Ensure there is no action delay if not specified."""
env = rwrl.load(domain_name=domain_name, task_name=task_name)
env.reset()
action_spec = env.action_spec()
# Send zero action and make sure it is immediately executed.
zero_action = np.zeros(shape=action_spec.shape, dtype=action_spec.dtype)
env.step(copy.copy(zero_action))
np.testing.assert_array_equal(env.physics.control(), zero_action)
# Send one action and make sure it is immediately executed.
one_action = np.ones(shape=action_spec.shape, dtype=action_spec.dtype)
if hasattr(action_spec, 'minimum'):
one_action = np.minimum(action_spec.maximum, one_action)
env.step(copy.copy(one_action))
np.testing.assert_array_equal(env.physics.control(), one_action)
@parameterized.named_parameters(*rwrl.ALL_TASKS)
def testDelayActionsDelay(self, domain_name, task_name):
"""Ensure there is action delay as specified."""
actions_delay = np.random.randint(low=1, high=10)
env = rwrl.load(
domain_name=domain_name,
task_name=task_name,
delay_spec={
'enable': True,
'actions': actions_delay
})
env.reset()
action_spec = env.action_spec()
zero_action = np.zeros(shape=action_spec.shape, dtype=action_spec.dtype)
one_action = np.ones(shape=action_spec.shape, dtype=action_spec.dtype)
if hasattr(action_spec, 'minimum'):
one_action = np.minimum(action_spec.maximum, one_action)
# Perfrom first action that fills up the buffer.
env.step(copy.copy(zero_action))
# Send one action and make sure zero action is still executed.
for _ in range(actions_delay):
env.step(copy.copy(one_action))
np.testing.assert_array_equal(env.physics.control(), zero_action)
# Make sure we finally perform the delayed one action.
env.step(copy.copy(zero_action))
np.testing.assert_array_equal(env.physics.control(), one_action)
@parameterized.named_parameters(*rwrl.ALL_TASKS)
def testDelayObservationsNoDelay(self, domain_name, task_name):
"""Ensure there is no observation delay if not specified."""
env = rwrl.load(domain_name=domain_name, task_name=task_name)
env.reset()
action_spec = env.action_spec()
one_action = np.ones(shape=action_spec.shape, dtype=action_spec.dtype)
if hasattr(action_spec, 'minimum'):
one_action = np.minimum(action_spec.maximum, one_action)
obs1 = env._task.get_observation(env._physics)
env.step(copy.copy(one_action))
obs2 = env._task.get_observation(env._physics)
# Make sure subsequent observations are different.
array_equality = []
for key in obs1:
array_equality.append((obs1[key] == obs2[key]).all())
self.assertIn(False, array_equality)
@parameterized.named_parameters(*rwrl.ALL_TASKS)
def testDelayObservationsDelay(self, domain_name, task_name):
"""Ensure there is observation delay as specified."""
observations_delay = np.random.randint(low=1, high=10)
env = rwrl.load(
domain_name=domain_name,
task_name=task_name,
delay_spec={
'enable': True,
'observations': observations_delay
})
obs1 = env.reset()[3]
action_spec = env.action_spec()
one_action = np.ones(shape=action_spec.shape, dtype=action_spec.dtype)
# Make sure subsequent observations are the same (clearing the buffer).
for _ in range(observations_delay):
obs2 = env.step(copy.copy(one_action))[3]
for key in obs1:
np.testing.assert_array_equal(obs1[key], obs2[key])
# Make sure we finally observe a different observation.
obs2 = env.step(copy.copy(one_action))[3]
array_equality = []
for key in obs1:
array_equality.append((obs1[key] == obs2[key]).all())
self.assertIn(False, array_equality)
@parameterized.named_parameters(*rwrl.ALL_TASKS)
def testNoiseGaussianActions(self, domain_name, task_name):
"""Ensure there is an additive Gaussian noise to the action."""
noise = 0.5
env = rwrl.load(
domain_name=domain_name,
task_name=task_name,
noise_spec={'gaussian': {
'enable': True,
'actions': noise
}})
env.reset()
action_spec = env.action_spec()
zero_action = np.zeros(shape=action_spec.shape, dtype=action_spec.dtype)
# Perform zero action.
env.step(copy.copy(zero_action))
# Verify that a non-zero action was actually performed.
np.testing.assert_array_compare(operator.__ne__, env.physics.control(),
zero_action)
@parameterized.named_parameters(*rwrl.ALL_TASKS)
def testAddedDummyObservations(self, domain_name, task_name):
"""Ensure there is an additive Gaussian noise to the observation."""
env = rwrl.load(
domain_name=domain_name,
task_name=task_name,
dimensionality_spec={
'enable': True,
'num_random_state_observations': 5,
})
env.reset()
# Get observation from realworld task.
obs = env._task.get_observation(env._physics)
for i in range(5):
self.assertIn('dummy-{}'.format(i), obs.keys())
for i in range(6, 10):
self.assertNotIn('dummy-{}'.format(i), obs.keys())
@parameterized.named_parameters(*rwrl.ALL_TASKS)
def testAddedDummyObservationsFlattened(self, domain_name, task_name):
"""Ensure there is an additive Gaussian noise to the observation."""
base_env = rwrl.load(
domain_name=domain_name,
task_name=task_name,
safety_spec={'enable': True},
environment_kwargs=dict(flat_observation=True))
base_env.reset()
mod_env = rwrl.load(
domain_name=domain_name,
task_name=task_name,
dimensionality_spec={
'enable': True,
'num_random_state_observations': NUM_DUMMY,
},
safety_spec={'enable': True},
environment_kwargs=dict(flat_observation=True))
mod_env.reset()
# Get observation from realworld task.
base_obs = base_env.step(0)
mod_obs = mod_env.step(0)
self.assertEqual(mod_obs.observation.shape[0],
base_obs.observation.shape[0] + NUM_DUMMY)
@parameterized.named_parameters(*rwrl.ALL_TASKS)
def testNoiseGaussianObservationsFlattening(self, domain_name, task_name):
"""Ensure there is an additive Gaussian noise to the observation."""
noise = 0.5
env = rwrl.load(
domain_name=domain_name,
task_name=task_name,
noise_spec={'gaussian': {
'enable': True,
'observations': noise
}},
environment_kwargs={'flat_observation': True})
env.reset()
env.step(0)
@parameterized.named_parameters(*rwrl.ALL_TASKS)
def testNoiseGaussianObservations(self, domain_name, task_name):
"""Ensure there is an additive Gaussian noise to the observation."""
noise = 0.5
env = rwrl.load(
domain_name=domain_name,
task_name=task_name,
noise_spec={'gaussian': {
'enable': True,
'observations': noise
}})
env.reset()
# Get observation from realworld cartpole.
obs1 = env._task.get_observation(env._physics)
# Get observation from underlying cartpole.
obs2 = collections.OrderedDict()
if domain_name == 'cartpole':
obs2['position'] = env.physics.bounded_position()
obs2['velocity'] = env.physics.velocity()
elif domain_name == 'humanoid':
obs2['joint_angles'] = env.physics.joint_angles()
obs2['head_height'] = env.physics.head_height()
obs2['extremities'] = env.physics.extremities()
obs2['torso_vertical'] = env.physics.torso_vertical_orientation()
obs2['com_velocity'] = env.physics.center_of_mass_velocity()
obs2['velocity'] = env.physics.velocity()
elif domain_name == 'manipulator':
arm_joints = [
'arm_root', 'arm_shoulder', 'arm_elbow', 'arm_wrist', 'finger',
'fingertip', 'thumb', 'thumbtip'
]
obs2['arm_pos'] = env.physics.bounded_joint_pos(arm_joints)
obs2['arm_vel'] = env.physics.joint_vel(arm_joints)
obs2['touch'] = env.physics.touch()
obs2['hand_pos'] = env.physics.body_2d_pose('hand')
obs2['object_pos'] = env.physics.body_2d_pose(env._task._object)
obs2['object_vel'] = env.physics.joint_vel(env._task._object_joints)
obs2['target_pos'] = env.physics.body_2d_pose(env._task._target)
elif domain_name == 'quadruped':
obs2['egocentric_state'] = env.physics.egocentric_state()
obs2['torso_velocity'] = env.physics.torso_velocity()
obs2['torso_upright'] = env.physics.torso_upright()
obs2['imu'] = env.physics.imu()
obs2['force_torque'] = env.physics.force_torque()
elif domain_name == 'walker':
obs2['orientations'] = env.physics.orientations()
obs2['height'] = env.physics.torso_height()
obs2['velocity'] = env.physics.velocity()
else:
raise ValueError('Unknown environment name: %s' % domain_name)
# Verify that the observations are different (noise added).
for key in obs1:
np.testing.assert_array_compare(operator.__ne__, obs1[key], obs2[key])
@parameterized.named_parameters(*rwrl.ALL_TASKS)
def testNoiseDroppedObservationsFlattening(self, domain_name, task_name):
"""Ensure there is an additive Gaussian noise to the observation."""
prob = 1.
steps = np.random.randint(low=3, high=10)
env = rwrl.load(
domain_name=domain_name,
task_name=task_name,
noise_spec={
'dropped': {
'enable': True,
'observations_prob': prob,
'observations_steps': steps,
}
},
environment_kwargs={'flat_observation': True}
)
env.reset()
env.step(np.array(0)) # Scalar actions aren't tolerated with noise.
@parameterized.named_parameters(*rwrl.ALL_TASKS)
def testNoiseDroppedObservationsValues(self, domain_name, task_name):
"""Ensure observations drop values."""
steps = np.random.randint(low=3, high=10)
prob = 1.
env = rwrl.load(
domain_name=domain_name,
task_name=task_name,
noise_spec={
'dropped': {
'enable': True,
'observations_prob': prob,
'observations_steps': steps,
}
})
action_spec = env.action_spec()
one_action = np.ones(shape=action_spec.shape, dtype=action_spec.dtype)
for step in range(steps):
# Verify that values are dropping for the first `steps` steps.
if step == 1:
# Cancel the dropped values after the first sequence.
env._task._noise_dropped_obs_steps = 0.
obs = env.step(copy.copy(one_action))[3]
for key in obs:
if isinstance(obs[key], np.ndarray):
np.testing.assert_array_equal(obs[key], np.zeros(obs[key].shape))
else:
np.testing.assert_array_equal(obs[key], 0.)
obs = env.step(copy.copy(one_action))[3]
# Ensure observation is not filled with zeros.
for key in obs:
obs[key] += np.random.normal()
# Pass observation through the base class that in charge of dropping values.
obs = realworld_env.Base.get_observation(env._task, env._physics, obs)
for key in obs:
# Verify that values have stopped dropping.
if isinstance(obs[key], np.ndarray):
np.testing.assert_array_compare(operator.__ne__, obs[key],
np.zeros(obs[key].shape))
else:
np.testing.assert_array_compare(operator.__ne__, obs[key], 0.)
@parameterized.named_parameters(*rwrl.ALL_TASKS)
def testNoiseDroppedActionsValues(self, domain_name, task_name):
"""Ensure observations drop values."""
steps = np.random.randint(low=3, high=10)
prob = 1.
env = rwrl.load(
domain_name=domain_name,
task_name=task_name,
noise_spec={
'dropped': {
'enable': True,
'actions_prob': prob,
'actions_steps': steps,
}
})
env.reset()
action_spec = env.action_spec()
one_action = | np.ones(shape=action_spec.shape, dtype=action_spec.dtype) | numpy.ones |
"""
pyrad.proc.process_intercomp
============================
Functions used in the inter-comparison between radars
.. autosummary::
:toctree: generated/
process_time_stats
process_time_stats2
process_time_avg
process_weighted_time_avg
process_time_avg_flag
process_colocated_gates
process_intercomp
process_intercomp_time_avg
process_fields_diff
process_intercomp_fields
"""
from copy import deepcopy
from warnings import warn
import datetime
import numpy as np
import scipy
from netCDF4 import num2date
import pyart
from ..io.io_aux import get_datatype_fields, get_fieldname_pyart
from ..io.io_aux import get_save_dir, make_filename
from ..io.read_data_other import read_colocated_gates, read_colocated_data
from ..io.read_data_other import read_colocated_data_time_avg
from ..io.read_data_radar import interpol_field
from ..util.radar_utils import time_avg_range, get_range_bins_to_avg
from ..util.radar_utils import find_colocated_indexes
def process_time_stats(procstatus, dscfg, radar_list=None):
"""
computes the temporal statistics of a field
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration. Accepted Configuration Keywords::
datatype : list of string. Dataset keyword
The input data types
period : float. Dataset keyword
the period to average [s]. If -1 the statistics are going to be
performed over the entire data. Default 3600.
start_average : float. Dataset keyword
when to start the average [s from midnight UTC]. Default 0.
lin_trans: int. Dataset keyword
If 1 apply linear transformation before averaging
use_nan : bool. Dataset keyword
If true non valid data will be used
nan_value : float. Dataset keyword
The value of the non valid data. Default 0
stat: string. Dataset keyword
Statistic to compute: Can be mean, std, cov, min, max. Default
mean
radar_list : list of Radar objects
Optional. list of radar objects
Returns
-------
new_dataset : dict
dictionary containing the output
ind_rad : int
radar index
"""
for datatypedescr in dscfg['datatype']:
radarnr, _, datatype, _, _ = get_datatype_fields(datatypedescr)
field_name = get_fieldname_pyart(datatype)
break
ind_rad = int(radarnr[5:8])-1
start_average = dscfg.get('start_average', 0.)
period = dscfg.get('period', 3600.)
lin_trans = dscfg.get('lin_trans', 0)
use_nan = dscfg.get('use_nan', 0)
nan_value = dscfg.get('nan_value', 0.)
stat = dscfg.get('stat', 'mean')
if procstatus == 0:
return None, None
if procstatus == 1:
if radar_list[ind_rad] is None:
warn('No valid radar')
return None, None
radar = radar_list[ind_rad]
if field_name not in radar.fields:
warn(field_name+' not available.')
return None, None
# Prepare auxiliary radar
field = deepcopy(radar.fields[field_name])
if stat in ('mean', 'std', 'cov'):
if lin_trans:
field['data'] = np.ma.power(10., 0.1*field['data'])
if use_nan:
field['data'] = np.ma.asarray(field['data'].filled(nan_value))
if stat in ('std', 'cov'):
sum2_dict = pyart.config.get_metadata('sum_squared')
sum2_dict['data'] = field['data']*field['data']
else:
if use_nan:
field['data'] = np.ma.asarray(field['data'].filled(nan_value))
npoints_dict = pyart.config.get_metadata('number_of_samples')
npoints_dict['data'] = np.ma.asarray(
np.logical_not(np.ma.getmaskarray(field['data'])), dtype=int)
radar_aux = deepcopy(radar)
radar_aux.fields = dict()
radar_aux.add_field(field_name, field)
radar_aux.add_field('number_of_samples', npoints_dict)
if stat in ('std', 'cov'):
radar_aux.add_field('sum_squared', sum2_dict)
# first volume: initialize start and end time of averaging
if dscfg['initialized'] == 0:
avg_par = dict()
if period != -1:
date_00 = dscfg['timeinfo'].replace(
hour=0, minute=0, second=0, microsecond=0)
avg_par.update(
{'starttime': date_00+datetime.timedelta(
seconds=start_average)})
avg_par.update(
{'endtime': avg_par['starttime']+datetime.timedelta(
seconds=period)})
else:
avg_par.update({'starttime': dscfg['timeinfo']})
avg_par.update({'endtime': dscfg['timeinfo']})
avg_par.update({'timeinfo': dscfg['timeinfo']})
dscfg['global_data'] = avg_par
dscfg['initialized'] = 1
if dscfg['initialized'] == 0:
return None, None
dscfg['global_data']['timeinfo'] = dscfg['timeinfo']
# no radar object in global data: create it
if 'radar_out' not in dscfg['global_data']:
if period != -1:
# get start and stop times of new radar object
(dscfg['global_data']['starttime'],
dscfg['global_data']['endtime']) = (
time_avg_range(
dscfg['timeinfo'], dscfg['global_data']['starttime'],
dscfg['global_data']['endtime'], period))
# check if volume time older than starttime
if dscfg['timeinfo'] > dscfg['global_data']['starttime']:
dscfg['global_data'].update({'radar_out': radar_aux})
else:
dscfg['global_data'].update({'radar_out': radar_aux})
return None, None
# still accumulating: add field to global field
if (period == -1 or
dscfg['timeinfo'] < dscfg['global_data']['endtime']):
if period == -1:
dscfg['global_data']['endtime'] = dscfg['timeinfo']
field_interp = interpol_field(
dscfg['global_data']['radar_out'], radar_aux, field_name)
npoints_interp = interpol_field(
dscfg['global_data']['radar_out'], radar_aux,
'number_of_samples')
if use_nan:
field_interp['data'] = np.ma.asarray(
field_interp['data'].filled(nan_value))
dscfg['global_data']['radar_out'].fields[
'number_of_samples']['data'] += np.ma.asarray(
npoints_interp['data'].filled(fill_value=1),
dtype=int)
else:
dscfg['global_data']['radar_out'].fields[
'number_of_samples']['data'] += np.ma.asarray(
npoints_interp['data'].filled(fill_value=0),
dtype=int)
if stat in ('mean', 'std', 'cov'):
masked_sum = np.ma.getmaskarray(
dscfg['global_data']['radar_out'].fields[
field_name]['data'])
valid_sum = np.logical_and(
np.logical_not(masked_sum),
np.logical_not(np.ma.getmaskarray(field_interp['data'])))
dscfg['global_data']['radar_out'].fields[
field_name]['data'][masked_sum] = (
field_interp['data'][masked_sum])
dscfg['global_data']['radar_out'].fields[
field_name]['data'][valid_sum] += (
field_interp['data'][valid_sum])
if stat in ('cov', 'std'):
dscfg['global_data']['radar_out'].fields[
'sum_squared']['data'][masked_sum] = (
field_interp['data'][masked_sum] *
field_interp['data'][masked_sum])
dscfg['global_data']['radar_out'].fields[
'sum_squared']['data'][valid_sum] += (
field_interp['data'][valid_sum] *
field_interp['data'][valid_sum])
elif stat == 'max':
dscfg['global_data']['radar_out'].fields[
field_name]['data'] = np.maximum(
dscfg['global_data']['radar_out'].fields[
field_name]['data'].filled(fill_value=-1.e300),
field_interp['data'].filled(fill_value=-1.e300))
dscfg['global_data']['radar_out'].fields[
field_name]['data'] = np.ma.masked_values(
dscfg['global_data']['radar_out'].fields[
field_name]['data'], -1.e300)
elif stat == 'min':
dscfg['global_data']['radar_out'].fields[
field_name]['data'] = np.minimum(
dscfg['global_data']['radar_out'].fields[
field_name]['data'].filled(fill_value=1.e300),
field_interp['data'].filled(fill_value=1.e300))
dscfg['global_data']['radar_out'].fields[
field_name]['data'] = np.ma.masked_values(
dscfg['global_data']['radar_out'].fields[
field_name]['data'], 1.e300)
return None, None
# we have reached the end of the accumulation period: do the averaging
# and start a new object (only reachable if period != -1)
if stat in ('mean', 'std', 'cov'):
field_mean = (
dscfg['global_data']['radar_out'].fields[field_name]['data'] /
dscfg['global_data']['radar_out'].fields[
'number_of_samples']['data'])
if stat == 'mean':
if lin_trans:
dscfg['global_data']['radar_out'].fields[
field_name]['data'] = 10.*np.ma.log10(field_mean)
else:
dscfg['global_data']['radar_out'].fields[
field_name]['data'] = field_mean
elif stat in ('std', 'cov'):
field_std = np.ma.sqrt(
dscfg['global_data']['radar_out'].fields[
'sum_squared']['data'] /
dscfg['global_data']['radar_out'].fields[
'number_of_samples']['data']-field_mean*field_mean)
if stat == 'std':
if lin_trans:
dscfg['global_data']['radar_out'].fields[
field_name]['data'] = 10.*np.ma.log10(field_std)
else:
dscfg['global_data']['radar_out'].fields[
field_name]['data'] = field_std
else:
if lin_trans:
dscfg['global_data']['radar_out'].fields[
field_name]['data'] = 10.*np.ma.log10(
field_std/field_mean)
else:
dscfg['global_data']['radar_out'].fields[
field_name]['data'] = field_std/field_mean
new_dataset = {
'radar_out': deepcopy(dscfg['global_data']['radar_out']),
'timeinfo': dscfg['global_data']['endtime']}
dscfg['global_data']['starttime'] += datetime.timedelta(
seconds=period)
dscfg['global_data']['endtime'] += datetime.timedelta(seconds=period)
# remove old radar object from global_data dictionary
dscfg['global_data'].pop('radar_out', None)
# get start and stop times of new radar object
dscfg['global_data']['starttime'], dscfg['global_data']['endtime'] = (
time_avg_range(
dscfg['timeinfo'], dscfg['global_data']['starttime'],
dscfg['global_data']['endtime'], period))
# check if volume time older than starttime
if dscfg['timeinfo'] > dscfg['global_data']['starttime']:
dscfg['global_data'].update({'radar_out': radar_aux})
return new_dataset, ind_rad
# no more files to process if there is global data pack it up
if procstatus == 2:
if dscfg['initialized'] == 0:
return None, None
if 'radar_out' not in dscfg['global_data']:
return None, None
if stat in ('mean', 'std', 'cov'):
field_mean = (
dscfg['global_data']['radar_out'].fields[field_name]['data'] /
dscfg['global_data']['radar_out'].fields[
'number_of_samples']['data'])
if stat == 'mean':
if lin_trans:
dscfg['global_data']['radar_out'].fields[
field_name]['data'] = 10.*np.ma.log10(field_mean)
else:
dscfg['global_data']['radar_out'].fields[
field_name]['data'] = field_mean
elif stat in ('std', 'cov'):
field_std = np.ma.sqrt(
dscfg['global_data']['radar_out'].fields[
'sum_squared']['data'] /
dscfg['global_data']['radar_out'].fields[
'number_of_samples']['data']-field_mean*field_mean)
if stat == 'std':
if lin_trans:
dscfg['global_data']['radar_out'].fields[
field_name]['data'] = 10.*np.ma.log10(field_std)
else:
dscfg['global_data']['radar_out'].fields[
field_name]['data'] = field_std
else:
dscfg['global_data']['radar_out'].fields[
field_name]['data'] = field_std/field_mean
new_dataset = {
'radar_out': deepcopy(dscfg['global_data']['radar_out']),
'timeinfo': dscfg['global_data']['endtime']}
return new_dataset, ind_rad
def process_time_stats2(procstatus, dscfg, radar_list=None):
"""
computes the temporal mean of a field
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration. Accepted Configuration Keywords::
datatype : list of string. Dataset keyword
The input data types
period : float. Dataset keyword
the period to average [s]. If -1 the statistics are going to be
performed over the entire data. Default 3600.
start_average : float. Dataset keyword
when to start the average [s from midnight UTC]. Default 0.
stat: string. Dataset keyword
Statistic to compute: Can be median, mode, percentileXX
use_nan : bool. Dataset keyword
If true non valid data will be used
nan_value : float. Dataset keyword
The value of the non valid data. Default 0
radar_list : list of Radar objects
Optional. list of radar objects
Returns
-------
new_dataset : dict
dictionary containing the output
ind_rad : int
radar index
"""
for datatypedescr in dscfg['datatype']:
radarnr, _, datatype, _, _ = get_datatype_fields(datatypedescr)
field_name = get_fieldname_pyart(datatype)
break
ind_rad = int(radarnr[5:8])-1
start_average = dscfg.get('start_average', 0.)
period = dscfg.get('period', 3600.)
use_nan = dscfg.get('use_nan', 0)
nan_value = dscfg.get('nan_value', 0.)
stat = dscfg.get('stat', 'median')
if 'percentile' in stat:
percentile = float(stat.replace('percentile', ''))
if procstatus == 0:
return None, None
if procstatus == 1:
if radar_list[ind_rad] is None:
warn('No valid radar')
return None, None
radar = radar_list[ind_rad]
if field_name not in radar.fields:
warn(field_name+' not available.')
return None, None
# prepare auxiliary radar
field = deepcopy(radar.fields[field_name])
if use_nan:
field['data'] = np.ma.asarray(field['data'].filled(nan_value))
npoints_dict = pyart.config.get_metadata('number_of_samples')
npoints_dict['data'] = np.ma.asarray(
np.logical_not(np.ma.getmaskarray(field['data'])), dtype=int)
radar_aux = deepcopy(radar)
radar_aux.fields = dict()
radar_aux.add_field(field_name, field)
radar_aux.add_field('number_of_samples', npoints_dict)
# first volume: initialize start and end time of averaging
if dscfg['initialized'] == 0:
avg_par = dict()
if period != -1:
date_00 = dscfg['timeinfo'].replace(
hour=0, minute=0, second=0, microsecond=0)
avg_par.update(
{'starttime': date_00+datetime.timedelta(
seconds=start_average)})
avg_par.update(
{'endtime': avg_par['starttime']+datetime.timedelta(
seconds=period)})
else:
avg_par.update({'starttime': dscfg['timeinfo']})
avg_par.update({'endtime': dscfg['timeinfo']})
avg_par.update({'timeinfo': dscfg['timeinfo']})
dscfg['global_data'] = avg_par
dscfg['initialized'] = 1
if dscfg['initialized'] == 0:
return None, None
dscfg['global_data']['timeinfo'] = dscfg['timeinfo']
# no radar object in global data: create it
if 'radar_out' not in dscfg['global_data']:
if period != -1:
# get start and stop times of new radar object
(dscfg['global_data']['starttime'],
dscfg['global_data']['endtime']) = (
time_avg_range(
dscfg['timeinfo'], dscfg['global_data']['starttime'],
dscfg['global_data']['endtime'], period))
# check if volume time older than starttime
if dscfg['timeinfo'] > dscfg['global_data']['starttime']:
dscfg['global_data'].update({'radar_out': radar_aux})
dscfg['global_data'].update(
{'field_data': np.atleast_3d(
radar_aux.fields[field_name]['data'])})
else:
dscfg['global_data'].update({'radar_out': radar_aux})
dscfg['global_data'].update(
{'field_data': np.atleast_3d(
radar_aux.fields[field_name]['data'])})
return None, None
# still accumulating: add field to global field
if (period == -1 or
dscfg['timeinfo'] < dscfg['global_data']['endtime']):
if period == -1:
dscfg['global_data']['endtime'] = dscfg['timeinfo']
field_interp = interpol_field(
dscfg['global_data']['radar_out'], radar_aux, field_name)
npoints_interp = interpol_field(
dscfg['global_data']['radar_out'], radar_aux,
'number_of_samples')
if use_nan:
field_interp['data'] = np.ma.asarray(
field_interp['data'].filled(nan_value))
dscfg['global_data']['radar_out'].fields[
'number_of_samples']['data'] += np.ma.asarray(
npoints_interp['data'].filled(fill_value=1),
dtype=int)
else:
dscfg['global_data']['radar_out'].fields[
'number_of_samples']['data'] += np.ma.asarray(
npoints_interp['data'].filled(fill_value=0),
dtype=int)
dscfg['global_data']['field_data'] = np.ma.append(
dscfg['global_data']['field_data'],
np.atleast_3d(field_interp['data']), axis=2)
return None, None
# we have reached the end of the accumulation period: do the averaging
# and start a new object (only reachable if period != -1)
if stat == 'median':
dscfg['global_data']['radar_out'].fields[
field_name]['data'] = np.ma.median(
dscfg['global_data']['field_data'], axis=2)
elif stat == 'mode':
mode_data, _ = scipy.stats.mode(
dscfg['global_data']['field_data'].filled(fill_value=np.nan),
axis=2, nan_policy='omit')
dscfg['global_data']['radar_out'].fields[field_name]['data'] = (
np.ma.masked_invalid( | np.squeeze(mode_data, axis=2) | numpy.squeeze |
"""
Author: <NAME>
Email: <EMAIL>
Description: Part of the code package corresponding to my master thesis. This file implements a 2d
optimal control problem example.
Year: 2019/2020
"""
########################################################################################################################
# importing stuff
########################################################################################################################
import numpy as np
import random
import logging
import re
from matplotlib import pyplot as plt
from collections import deque
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
import time
from valueFunctionApprox.myValueFunctionApproximation import MyValueFunctionApproximation
from misc.myLoggingFunctions import createSetupFile
from misc.myLoggingFunctions import depositeIntermediateTrainingResults
import sys
sys.path.append('../functionApproximation/cpd')
from myALSOptimiser import MyALSRbfOptimiser
########################################################################################################################
# definition of functions
########################################################################################################################
def terminalCostFunc(xList, domainBox):
"""
@param[in] ### xList ### list of length two containing data points, where
xList[0] corresponds to the x1 data, and xList[1] corresponds to the x2 data
NOTE:
> data can be given in scalar or in matrix form!
@param[in] ### domainBox ### list of two tuples representing the boundary of the domain the vehicle should
not leave
@return ### retVal ### a scalar or a matrix modeling the cost of achieving the states
given in xList at the final time
"""
# take care of the following:
# > there are logical operators in python for boolean arrays and for boolean scalars - there is no approach to
# cover both cases ...
# > hence distinguish two cases: is xList a list of two scalars or is it a list of arrays ....
tmp = 100.0
if np.isscalar(xList[0]) == True:
if (domainBox[0][0] <= xList[0] <= domainBox[0][1]) and (domainBox[1][0] <= xList[1] <= domainBox[1][1]):
return 0.0
else:
return tmp
else:
retVal = tmp * np.ones(xList[0].shape)
tmp1 = np.logical_and((domainBox[0][0] <= xList[0]), (xList[0] <= domainBox[0][1]))
tmp2 = np.logical_and((domainBox[1][0] <= xList[1]), (xList[1] <= domainBox[1][1]))
retVal[np.logical_and(tmp1, tmp2)] = 0
return retVal
def stageCostFunc(xList, u, domainBox):
"""
@param[in] ### xList ### as in terminalCostFunc
@param[in] ### u ### a single control value (a scalar, not a vector or a matrix)
@param[in] ### domainBox ### as in terminalCostFunc
@return ### retVal ### a scalar or a matrix modeling the cost of performing the control u given the states xList
"""
return xList[0] ** 2 + xList[1] ** 2 + u ** 2 + terminalCostFunc(xList, domainBox)
# return 1000 * xList[0] ** 2 + xList[1] ** 2 + 0.000001 * u ** 2 + terminalCostFunc(xList, domainBox)
def mySdeCoeffsFunc(xList, u, sig1, sig2):
"""
@param[in] ### xList ### as above
@param[in] ### u ### as above
@param[in] ### sig1 ### volatility parameter of the sde
@param[in] ### sig2 ### volatility parameter of the sde
@return ### retVal1, retVal2 ### vectors or matrices containing the coefficients of the
sde given the input parameters
"""
if np.isscalar(xList[0]) == True:
retVal1 = np.zeros(2)
retVal2 = np.zeros(2)
retVal1[0] = xList[1]
retVal1[1] = u
retVal2[0] = sig1
retVal2[1] = sig2
return retVal1, retVal2
else:
retVal1 = np.zeros((2, xList[0].shape[0]))
retVal2 = np.zeros((2, xList[1].shape[0]))
retVal1[0, :] = xList[1]
retVal1[1, :] = u
retVal2[0, :] = sig1
retVal2[1, :] = sig2
return retVal1, retVal2
def my2dExample_a(L, rank, degrs, signature, subSignature, path2SubSignDir, path2FigDir,\
path2ModDir, path2PerfDataDir):
"""
@param[in] ### L ### number of data points to be used
@param[in] ### rank ### rank of cpd decomposition which will be used
@param[in] ### degrs ### list of numbers corresponding to the degrees for the cpd decomposition along each of
the coordinate axis.
@param[in] ### signature ### string corresponding to the signature of the training session
@param[in] ### subsignature ### string corresponding to the subsignature of the training session
@param[in] ### path2SubSignDir ### string corresponding to the path of the directory where information/data
of the corresponding subsession should be stored.
@param[in] ### path2FigDir ### string corresponding to the path of the directory where figures
of the corresponding subsession should be stored.
@param[in] ### path2ModDir ### string corresponding to the path of the directory where the model
of the corresponding subsession should be stored.
@param[in] ### path2PerfDataDir ### string corresponding to the path of the directory where performance data
of the corresponding subsession should be stored.
NOTE:
> at every time point the same ansatz function is used for the approximation of the value function!
"""
random.seed(0)
np.random.seed(0)
#################################################################
# ### initialisations
#################################################################
####################### TEMPORAL AND SPATIAL INITIALISATIONS ########################
dim = 2
boundingBox = []
boundingBox.append((-4.0, 4.0))
boundingBox.append((-4.0, 4.0))
sampleBox = []
sampleBox.append((-4.0, 4.0))
sampleBox.append((-4.0, 4.0))
numControls = 21
controlVec = np.linspace(-1, 1, numControls)
t0 = 0
T = 3.0
numTimeIntervals = 30
numTimePts = numTimeIntervals + 1
dt = (T - t0) / numTimeIntervals
sqrtDt = np.sqrt(dt)
sampleSpace = np.array([np.array([1, 0]), np.array([-1, 0]), np.array([0, 1]), np.array([0, -1])])
####################### USER CHECK ########################
# print('### check parameters ###')
# print(' > t0 = ' + str(t0))
# print(' > T = ' + str(T))
# print(' > numTimeIntervals = ' + str(numTimeIntervals))
# print(' > numTimePts = ' + str(numTimePts))
# print(' > dt = ' + str(dt))
# print(' > sqrt(dt) = ' + str(sqrtDt))
# print(' > numControls = ' + str(numControls))
# print('########################')
# input(' ... hit ENTER to continue ...')
####################### MODEL PARAMETERS ########################
prob = 1.0 / (2 * dim)
sigma1 = 5e-2
sigma2 = 5e-2
domainBox = []
domainBox.append((-2, 2))
domainBox.append((-2, 2))
#################################################################
# ### start function approximation procedure
#################################################################
# fix ALS parameters (there are default parameters ... )
maxNumALSIter = 200
eta = 1e-4
epsALS = 1e-5
# fix CG parameters (there are default parameters ... )
maxNumCGIter = 50
epsCG = 1e-5
resNormFrac = 1e-1
# fix parameters regarding the value iteration
maxNumValIter = 1
#################################################################
# ### write parameters into to dict - for logging purposes
#################################################################
# sde parameters
sdeParamsDict = {}
sdeParamsDict['sigma1'] = sigma1
sdeParamsDict['sigma2'] = sigma2
sdeParamsDict['domainBox_x1_left'] = domainBox[0][0]
sdeParamsDict['domainBox_x1_right'] = domainBox[0][1]
sdeParamsDict['domainBox_x2_left'] = domainBox[1][0]
sdeParamsDict['domainBox_x2_right'] = domainBox[1][1]
# parameters of the control problem
ctrlProbDict = {}
ctrlProbDict['numTimePts'] = numTimePts
ctrlProbDict['startTime'] = t0
ctrlProbDict['endTime'] = T
ctrlProbDict['numControls'] = numControls
ctrlProbDict['controlLwrBnd'] = controlVec[0]
ctrlProbDict['controlUprBnd'] = controlVec[-1]
# value iteration parameters
valIterParamsDict = {}
valIterParamsDict['sampleBox_x1_left'] = sampleBox[0][0]
valIterParamsDict['sampleBox_x1_right'] = sampleBox[0][1]
valIterParamsDict['sampleBox_x2_left'] = sampleBox[1][0]
valIterParamsDict['sampleBox_x2_right'] = sampleBox[1][1]
valIterParamsDict['maxNumValIter'] = maxNumValIter
# cpd parameters
cpdParamsDict = {}
cpdParamsDict['rank'] = rank
for d in range(0, dim):
cpdParamsDict['degrCrdnt' + str(d + 1)] = degrs[d]
# als parameters
alsParamsDict = {}
alsParamsDict['boundingBox_x1_left'] = boundingBox[0][0]
alsParamsDict['boundingBox_x1_right'] = boundingBox[0][1]
alsParamsDict['boundingBox_x2_left'] = boundingBox[1][0]
alsParamsDict['boundingBox_x2_right'] = boundingBox[1][1]
alsParamsDict['eta'] = eta
alsParamsDict['maxNumALSIter'] = maxNumALSIter
alsParamsDict['numDataPoints'] = L
alsParamsDict['descentBound'] = epsALS
cgParamsDict = {}
cgParamsDict['maxNumCGIter'] = maxNumCGIter
cgParamsDict['residualBound'] = epsCG
cgParamsDict['residualFraction'] = resNormFrac
#################################################################
# ### initialise model function
#################################################################
modelFunc = MyValueFunctionApproximation(t0, T, numTimePts - 1, dim, boundingBox, lambda x : terminalCostFunc(x, domainBox))
apprParams = []
for i in range(0, numTimePts - 1):
apprParams.append((rank, degrs))
modelFunc.initialise(apprParams)
totNumParams = 0
for d in range(0, dim):
totNumParams += rank * degrs[d]
#################################################################
# ### start value function approximation procedure
#################################################################
logging.info('------------------------------------------------------------------------------------------')
logging.info('> start optimisation procedure corresponding to the sub signature # ' + subSignature + ' #')
print('------------------------------------------------------------------------------------------')
print('> start optimisation procedure corresponding to the sub signature # ' + subSignature + ' #')
xDataList = []
for i in range(0, numTimePts - 1):
tmpXData = np.zeros((dim, L))
tmpXData[0, :] = np.random.uniform(sampleBox[0][0], sampleBox[0][1], L)
tmpXData[1, :] = np.random.uniform(sampleBox[1][0], sampleBox[1][1], L)
xDataList.append(tmpXData)
# initialise object of optimiser class
optimiser = MyALSRbfOptimiser(eta = eta, maxNumALSIter = maxNumALSIter, epsALS = epsALS)
# provide a data structure where certain results of the optimisation procedure will be stored ...
optimData = np.zeros((maxNumValIter, numTimePts - 1, 3))
stateData = np.zeros((numTimePts, dim, L))
controlData = np.zeros((numTimePts - 1, L))
##########################################################################
depositeIntermediateTrainingResults(modelFunc, path2ModDir, 0)
for k in range(0, maxNumValIter):
logging.info('start policy iteration number # ' + str(k) + ' #')
print('start policy iteration number # ' + str(k) + ' #')
#################################################################
# ### update phase
#################################################################
# RECALL:
# > there are numTimePts - 1 ansatz functions which have to be optimised!
# > the value function at the final time point equals the terminal cost function
for i in range(numTimePts - 2, -1, -1):
################################################################################
print('# update - time point t' + str(i))
logging.info('# update - time point t' + str(i))
################################################################################
# reinitialise data
xData = np.zeros((dim, L))
xData = xDataList[i].copy()
yData = np.zeros(L)
stgCost = np.zeros(L)
################################################################################
f0 = np.zeros((numControls, dim, L))
f1 = np.zeros((numControls, dim, L))
f2 = np.zeros((numControls, dim, L))
f3 = np.zeros((numControls, dim, L))
controlData = np.zeros(L)
sampleIdx = np.zeros(L)
sampleIdx = np.random.choice([0, 1, 2, 3], L, replace = True)
sampleData = np.zeros((2, L))
sampleData = sampleSpace[sampleIdx].transpose()
tmpVec = np.zeros((numControls, L))
for j in range(0, numControls):
a1 = np.zeros((dim, L))
b1 = np.zeros((dim, L))
stgCost = np.zeros(L)
a1, b1 = mySdeCoeffsFunc([xData[0, :], xData[1, :]], controlVec[j], sigma1, sigma2)
f0[j, :, :] = xData + a1 * dt + sqrtDt * b1 * np.asarray([1, 0]).reshape(2, 1)
f1[j, :, :] = xData + a1 * dt + sqrtDt * b1 * np.asarray([0, 1]).reshape(2, 1)
f2[j, :, :] = xData + a1 * dt + sqrtDt * b1 * np.asarray([0, -1]).reshape(2, 1)
f3[j, :, :] = xData + a1 * dt + sqrtDt * b1 * np.asarray([-1, 0]).reshape(2, 1)
stgCost = stageCostFunc([xData[0, :], xData[1, :]], controlVec[j], domainBox)
tmpVec[j, :] = stgCost * dt \
+ prob * (modelFunc.evaluate(i + 1, [f0[j, 0, :], f0[j, 1, :]], L) \
+ modelFunc.evaluate(i + 1, [f1[j, 0, :], f1[j, 1, :]], L) \
+ modelFunc.evaluate(i + 1, [f2[j, 0, :], f2[j, 1, :]], L) \
+ modelFunc.evaluate(i + 1, [f3[j, 0, :], f3[j, 1, :]], L))
################################################################################
# compute target values
yData = tmpVec.min(axis = 0)
# start optimisation process
costFuncValList, mseApprErrList, lInfApprErrList, cgPerformanceList = \
optimiser.myRbfCgOptimiser(L, xData, yData, modelFunc.getPartialModelFunc(i), \
path2ModDir, 'modelFunc_t' + str(i), \
maxNumCGIter = maxNumCGIter, epsCG = epsCG, resNormFrac = resNormFrac, \
warmUp = False, verbose = False, write2File = False)
depositeIntermediateTrainingResults(modelFunc, path2ModDir, k + 1)
####################################################################################################################
# ### post processing
####################################################################################################################
# create setup file
logging.info('> create setup file')
createSetupFile(sdeParamsDict, ctrlProbDict, valIterParamsDict, alsParamsDict, cgParamsDict, cpdParamsDict, path2SubSignDir)
# write performance data 2 file
# writePerformanceData2File(numTimePts, optimData, pathNames[0])
def simulateFromFile_2d_a(sessId, subSessId, iterId):
"""
this function reads a model from file (more exact: its parameter) and simulates the dynamics of the system
following the policy given by the approximation to the value function
@param[in] ### sessId ### integer corresponding to the of the training session whos model files should be load
@param[in] ### subsSessId ### integer corresponding to the of the training sub session
whos model files should be load
@param[in] ### iterId ### integer corresponding to the of the iteration level whos model files should be load
"""
sign = str(sessId)
subSign = str(subSessId)
path2Dir = '../../results/valueIteration/2d_a_' + sign + '/2d_a_' + sign + '_' + subSign
#################################################################
# ### initialisations
#################################################################
####################### TEMPORAL AND SPATIAL INITIALISATIONS ########################
dim = 2
boundingBox = []
boundingBox.append((-4.0, 4.0))
boundingBox.append((-4.0, 4.0))
# sampleBox = []
# sampleBox.append((-3.0, 3.0))
# sampleBox.append((-3.0, 3.0))
numControls = 21
controlVec = np.linspace(-1, 1, numControls)
t0 = 0
T = 3.0
numTimeIntervals = 30
numTimePts = numTimeIntervals + 1
dt = (T - t0) / numTimeIntervals
sqrtDt = np.sqrt(dt)
sampleSpace = np.array([np.array([1, 0]), np.array([-1, 0]), np.array([0, 1]), np.array([0, -1])])
####################### MODEL PARAMETERS ########################
prob = 1.0 / (2 * dim)
sigma1 = 5e-2
sigma2 = 5e-2
domainBox = []
domainBox.append((-2, 2))
domainBox.append((-2, 2))
####################### USER CHECK ########################
# print('### check parameters ###')
# print(' > t0 = ' + str(t0))
# print(' > T = ' + str(T))
# print(' > numTimeIntervals = ' + str(numTimeIntervals))
# print(' > numTimePts = ' + str(numTimePts))
# print(' > dt = ' + str(dt))
# print(' > sqrt(dt) = ' + str(sqrtDt))
# print(' > numControls = ' + str(numControls))
# print('########################')
# proceed by pressing enter
# input(' ... hit ENTER to continue ...')
###########################################################
#################################################################
# ### initialise model function
#################################################################
modelFunc = MyValueFunctionApproximation(t0, T, numTimePts - 1, dim, \
boundingBox, lambda x : terminalCostFunc(x, domainBox))
modelFunc.readParamsFromFile(path2Dir + '/model' + '/iter_' + str(iterId), 'modelData_t')
for i in range(0, numTimePts - 1):
modelFunc.partialModelFuncList[i].setBoundingBox(boundingBox)
#################################################################
# ### provide data structures needed in the simulation process
#################################################################
numEvalPaths = 5
f0 = np.zeros((numControls, dim, numEvalPaths))
f1 = np.zeros((numControls, dim, numEvalPaths))
f2 = np.zeros((numControls, dim, numEvalPaths))
f3 = np.zeros((numControls, dim, numEvalPaths))
sampleIdx = np.zeros((numTimePts, numEvalPaths))
sampleIdx = np.random.choice([0, 1, 2, 3], (numTimePts, numEvalPaths), replace = True)
sampleData = np.zeros((dim, numEvalPaths))
X = np.zeros((numTimePts, dim, numEvalPaths))
U = np.zeros((numTimePts - 1, numEvalPaths))
# choose initial value
X[0, 0, :] = 1.2
X[0, 1, :] = 0.5
for i in range(0, numTimePts - 1):
####################################################
#
# simulate using approximative value function
#
####################################################
# the same as above using the approximation to the value function
tmpVec = np.zeros((numControls, numEvalPaths))
sampleData = sampleSpace[sampleIdx[i, :]].transpose()
for j in range(0, numControls):
a = np.zeros((dim, numEvalPaths))
b = np.zeros((dim, numEvalPaths))
a, b = mySdeCoeffsFunc([X[i, 0, :], X[i, 1, :]], controlVec[j], sigma1, sigma2)
f0[j, :, :] = X[i, :, :] + a * dt + sqrtDt * b * np.asarray([1, 0]).reshape(2, 1)
f1[j, :, :] = X[i, :, :] + a * dt + sqrtDt * b * np.asarray([0, 1]).reshape(2, 1)
f2[j, :, :] = X[i, :, :] + a * dt + sqrtDt * b * np.asarray([0, -1]).reshape(2, 1)
f3[j, :, :] = X[i, :, :] + a * dt + sqrtDt * b * np.asarray([-1, 0]).reshape(2, 1)
stgCost = stageCostFunc([X[i, 0, :], X[i, 1, :]], controlVec[j], domainBox)
tmpVec[j, :] = 0.25 * (stgCost * dt + modelFunc.evaluate(i + 1, [f0[j, 0, :], f0[j, 1, :]], numEvalPaths)) \
+ 0.25 * (stgCost * dt + modelFunc.evaluate(i + 1, [f1[j, 0, :], f1[j, 1, :]], numEvalPaths)) \
+ 0.25 * (stgCost * dt + modelFunc.evaluate(i + 1, [f2[j, 0, :], f2[j, 1, :]], numEvalPaths)) \
+ 0.25 * (stgCost * dt + modelFunc.evaluate(i + 1, [f3[j, 0, :], f3[j, 1, :]], numEvalPaths))
# determine optimal control
U[i, :] = controlVec[tmpVec.argmin(axis = 0)]
a = np.zeros((2, numEvalPaths))
b = np.zeros((2, numEvalPaths))
# ... and finally the subsequent state
a, b = mySdeCoeffsFunc([X[i, 0, :], X[i, 1, :]], U[i, :], sigma1, sigma2)
tmpMtrx = np.zeros(b.shape)
for row in range(0, dim):
tmpMtrx[row, :] = sampleData[row, :] * b[row, :]
X[i + 1, :, :] = X[i, :, :] + a * dt + sqrtDt * tmpMtrx
##########################################################################################################
# ### produce plot showing a variety of paths
fig1 = plt.figure(1)
for l in range(0, numEvalPaths):
plt.plot(X[:, 0, l], X[:, 1, l])
plt.title('Solution paths over time')
plt.xlabel('Position')
plt.ylabel('Velocity')
plt.xlim(0, 2)
plt.ylim(-1, 1)
# plt.xlim(boundingBox[0][0], boundingBox[0][1])
# plt.ylim(boundingBox[1][0], boundingBox[1][1])
plt.savefig(path2Dir + '/figures/' + 'solutionPaths.png')
plt.close(fig1)
# ### produce a plot showing the control paths over time
fig2 = plt.figure(2)
for l in range(0, numEvalPaths):
plt.plot( | np.arange(0, numTimePts - 1) | numpy.arange |
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pandas as pd
import pytest
import mars.tensor as mt
import mars.dataframe as md
from mars.core import tile
from mars.learn.utils import shuffle
from mars.learn.utils.shuffle import LearnShuffle
def test_shuffle_expr():
a = mt.random.rand(10, 3, chunk_size=2)
b = md.DataFrame(mt.random.rand(10, 5), chunk_size=2)
new_a, new_b = shuffle(a, b, random_state=0)
assert new_a.op is new_b.op
assert isinstance(new_a.op, LearnShuffle)
assert new_a.shape == a.shape
assert new_b.shape == b.shape
assert b.index_value.key != new_b.index_value.key
new_a, new_b = tile(new_a, new_b)
assert len(new_a.chunks) == 10
assert np.isnan(new_a.chunks[0].shape[0])
assert len(new_b.chunks) == 15
assert np.isnan(new_b.chunks[0].shape[0])
assert new_b.chunks[0].index_value.key != new_b.chunks[1].index_value.key
assert new_a.chunks[0].op.seeds == new_b.chunks[0].op.seeds
c = mt.random.rand(10, 5, 3, chunk_size=2)
d = md.DataFrame(mt.random.rand(10, 5), chunk_size=(2, 5))
new_c, new_d = shuffle(c, d, axes=(0, 1), random_state=0)
assert new_c.op is new_d.op
assert isinstance(new_c.op, LearnShuffle)
assert new_c.shape == c.shape
assert new_d.shape == d.shape
assert d.index_value.key != new_d.index_value.key
assert not np.all(new_d.dtypes.index[:-1] < new_d.dtypes.index[1:])
pd.testing.assert_series_equal(d.dtypes, new_d.dtypes.sort_index())
new_c, new_d = tile(new_c, new_d)
assert len(new_c.chunks) == 5 * 1 * 2
assert np.isnan(new_c.chunks[0].shape[0])
assert len(new_d.chunks) == 5
assert np.isnan(new_d.chunks[0].shape[0])
assert new_d.chunks[0].shape[1] == 5
assert new_d.chunks[0].index_value.key != new_d.chunks[1].index_value.key
pd.testing.assert_series_equal(new_d.chunks[0].dtypes.sort_index(), d.dtypes)
assert new_c.chunks[0].op.seeds == new_d.chunks[0].op.seeds
assert len(new_c.chunks[0].op.seeds) == 1
assert new_c.chunks[0].op.reduce_sizes == (5,)
with pytest.raises(ValueError):
a = mt.random.rand(10, 5)
b = mt.random.rand(10, 4, 3)
shuffle(a, b, axes=1)
with pytest.raises(TypeError):
shuffle(a, b, unknown_param=True)
assert isinstance(shuffle(mt.random.rand(10, 5)), mt.Tensor)
def _sort(data, axes):
cur = data
for ax in axes:
if ax < data.ndim:
cur = | np.sort(cur, axis=ax) | numpy.sort |
#!/usr/bin/env python
# coding: utf-8
########################################
# clustered Gaussian surrogate #
########################################
#Author: <NAME>
#<EMAIL>
#Last update: 2021-Apr-28
########################################
# System information #
########################################
#Print the python version and the name/input arguments
print('usage: python cGP_constrained.py RND_SEED(random seed) N_SEQUENTIAL(sequential sample size) EXPLORATION_RATE(exploration rate) NO_CLUSTER (1=simple GP;0=cGP) N_COMPONENTS(maximal # of components in cGP) N_NEIGHBORS(# of neighbors in k-NN classification) N_PILOT(pilot sample size, or passing a pilot sample file name) F_TRUTH_PY(location of f_truth.py, no extension needed) ACQUISITION(name of acquistion function.flexible) OBJ_NAME(output model name, saving the surrogate model)')
print('example1: python cGP_constrained.py 123 30 1.0 0 4 3 10')
print("example2: python cGP_constrained.py 123 60 0.5 0 2 3 samples_sobol_10.txt 'f_truth1' 'test1'")
print("example3: python cGP_constrained.py 123 40 0.8 0 'cluster' 'classify' samples_random_10.txt 'f_truth' 'acquisition' 'test1' 'model1'")
#Clean up
import sys
sys.modules[__name__].__dict__.clear()
import sys
#Warnings supression
import warnings
warnings.filterwarnings('ignore')
print("Python version: ", sys.version)
print(sys.argv)
#Print the numpy version and set the random seed
import numpy as np
print('numpy version: ', np.__version__)
from numpy import int64
from numpy import int
from numpy import float
from numpy import bool
RND_SEED = int(sys.argv[1])
np.random.seed(RND_SEED)
print('Seed=',RND_SEED)
#Get a random string stamp for this specific run, used for the filename of image export.
import random
import string
def get_random_string(length):
return ''.join(random.choice(string.ascii_lowercase) for i in range(length))
rdstr=get_random_string(8)
print('random stamp for this run:',rdstr)
#Print the GPy version
import GPy
print('GPy version: ', GPy.__version__)
#Print the sklearn version
import sklearn
print('sklearn version: ', sklearn.__version__)
from sklearn.gaussian_process import GaussianProcessRegressor
import importlib
if len(sys.argv) >=9:
task_name = str(sys.argv[8])
else:
task_name = 'f_truth'
from sklearn.cluster import KMeans
from sklearn.mixture import BayesianGaussianMixture
from sklearn.neighbors import KNeighborsClassifier
########################################
# Model specification #
########################################
#How many pilot and sequential samples do we allow to get?
#N_PILOT is the pilot samples we start with, usually a small number would do.
#N_SEQUENTIAL is the number of sequential (noisy) samples we should draw from the black-box function.
if sys.argv[7].isdigit():
N_PILOT = int(sys.argv[7])
N_SEQUENTIAL = int(sys.argv[2])
#Which method should we use for the Bayesian optimization scheme?
#'FREQUENTIST' method means that the (hyper-)parameters are estimated by using some frequestist optimization like lbfgs.
#'BAYESIAN' method means that the paramteres are estimated by putting a prior(Gamma)-posterior mechnism, the estimated value would be posterior mean.
METHOD = 'FREQUENTIST'
#Following 3 parameters are only for HMC Bayesian sampling, you have to choose METHOD = 'BAYESIAN' to use these parameters.
N_BURNIN = 500
N_MCMCSAMPLES = 500
N_INFERENCE = 300
#Exploration rate is the probability (between 0 and 1) of following the next step produced by acquisition function.
EXPLORATION_RATE = float(sys.argv[3])
#Do you want a cluster GP? If NO_CLUSTER = True, a simple GP will be used.
NO_CLUSTER = bool(int(sys.argv[4]))
print('NO_CLUSTER:',NO_CLUSTER,sys.argv[4])
#Do you want to amplify the weight/role of response X when doing clustering?
X_AMPLIFY = 1.
#Do you want to subtract an amount from the response X when doing clustering?
X_TRANSLATE = []
#Do you want to amplify the weight/role of response Y when doing clustering?
Y_AMPLIFY = 1.
#Do you want to subtract an amount from the response Y when doing clustering?
Y_TRANSLATE = 0.
#What is the maximal number of cluster by your guess? This option will be used only if NO_CLUSTER=False.
if sys.argv[6].isdigit():
N_NEIGHBORS = int(sys.argv[6])
print('\n > Classify: KNeighbors:',N_NEIGHBORS,'-neighbors')
clf_XY = KNeighborsClassifier(n_neighbors=N_NEIGHBORS)
else:
CLASSIFY_FUN = str(sys.argv[6])
print('\n > Classify: ',CLASSIFY_FUN)
clf_mdl = importlib.import_module(CLASSIFY_FUN)
clf_mdl_names = [x for x in clf_mdl.__dict__ if not x.startswith("_")]
globals().update({k: getattr(clf_mdl, k) for k in clf_mdl_names})
clf_XY = f_Classify()
#When deciding cluster components, how many neighbors shall we look into and get their votes? This option will be used only if NO_CLUSTER=False.
if sys.argv[5].isdigit():
N_COMPONENTS = int(sys.argv[5])
print('\n > Cluster: BayesianGaussianMixture:',N_COMPONENTS,' components')
dgm_XY = BayesianGaussianMixture(
#weight_concentration_prior_type="dirichlet_distribution",
weight_concentration_prior_type="dirichlet_process",
n_components=N_COMPONENTS,#pick a big number, DGM will automatically adjust
random_state=0)
#dgm_XY = KMeans(n_clusters=N_COMPONENTS, random_state=0))
else:
CLUSTER_FUN = str(sys.argv[5])
print('\n > Cluster: ',CLUSTER_FUN)
dgm_mdl = importlib.import_module(CLUSTER_FUN)
dgm_mdl_names = [x for x in dgm_mdl.__dict__ if not x.startswith("_")]
globals().update({k: getattr(dgm_mdl, k) for k in dgm_mdl_names})
dgm_XY = f_Cluster(RND_SEED)
#This is a GPy parameter, whether you want to normalize the response before/after fitting. Don't change unless necessary.
GPy_normalizer = True
#Whether we should sample repetitive locations in the sequential sampling procedure.
#If True, we would keep identical sequential samples no matter what. (Preferred if we believe a lot of noise)
#If False, we would re-sample when we run into identical sequential samples. (Default)
#In a acquisition maximization step, this is achieved by setting the acquisition function at repetitive samples to -Inf
#In a random search step, this is achieved by repeat the random selection until we got a new location.
REPEAT_SAMPLE = False
USE_SKLEARN = True
ALPHA_SKLEARN = 1e-5
#Value added to the diagonal of the kernel matrix during fitting.
SKLEARN_normalizer = True
########################################
# Import model specification #
########################################
mdl = importlib.import_module(task_name)
if "__all__" in mdl.__dict__:
names = mdl.__dict__["__all__"]
else:
names = [x for x in mdl.__dict__ if not x.startswith("_")]
globals().update({k: getattr(mdl, k) for k in names})
bounds = get_bounds(1)
print('bounds',bounds.shape)
inp_dim=bounds.shape[0]
########################################
# Import acquisition function #
########################################
if len(sys.argv)>=10:
acquisition_name=str(sys.argv[9])
else:
acquisition_name='expected_improvement'
print('>>>Acquisition function: ',acquisition_name)
acq = importlib.import_module('acquisition')
if "__all__" in acq.__dict__:
names = acq.__dict__["__all__"]
else:
names = [x for x in acq.__dict__ if not x.startswith("_")]
globals().update({k: getattr(acq, k) for k in names})
acq_fun = getattr(acq,acquisition_name)
########################################
# Kernel specification #
########################################
#Which kernel you want to use for your model? Such a kernel must be implemented as a GPy/sklearn kernel class.
if USE_SKLEARN==True:
from sklearn.gaussian_process import *
KERNEL_TEMPLATE = sklearn.gaussian_process.kernels.Matern(length_scale=np.ones(inp_dim,), length_scale_bounds=(1e-05, 100000.0), nu=3/2) + sklearn.gaussian_process.kernels.WhiteKernel(noise_level=1.0, noise_level_bounds=(1e-05, 100000.0))
#KERNEL_TEMPLATE = sklearn.gaussian_process.kernels.Matern(length_scale=np.ones(inp_dim,), length_scale_bounds=(1e-05, 100000.0), nu=1/2)
else:
KERNEL_TEMPLATE = GPy.kern.Matern32(input_dim=inp_dim, variance=1., lengthscale=1.) + GPy.kern.White(input_dim=inp_dim)
#KERNEL_TEMPLATE = GPy.kern.Exponential(input_dim=inp_dim, variance=1., lengthscale=1.)
#Do you want to penalize boundary sample points? If so, how?
from datetime import datetime
# datetime object containing current date and time
samplestartingtime = datetime.now()
########################################
# Draw pilot samples #
########################################
#This cell only provides a pilot sample.
#Prepare pilot samples (X,Y)
print('Example : ',EXAMPLE_NAME)
if not sys.argv[7].isdigit():
print('\n>>>>>>>>>>Load pilot samples from external file: ',sys.argv[7],'...<<<<<<<<<<\n')
X_sample = np.loadtxt( sys.argv[7] )
N_PILOT = X_sample.shape[0]
else:
print('\n>>>>>>>>>>Sampling ',N_PILOT,' pilot samples...<<<<<<<<<<\n')
X_sample = np.zeros((N_PILOT,bounds.shape[0]))
for j in range(bounds.shape[0]):
X_sample[:,j] = np.random.uniform(bounds[j,0],bounds[j,1],size=(N_PILOT,1)).ravel()
Y_sample = np.zeros((N_PILOT,1))
Y_sample = np.zeros((N_PILOT,1))
for k in range(N_PILOT):
Y_sample[k,0] = f_truth(X_sample[k,:].reshape(1,-1))
Y_sample[k,0] = censor_function(Y_sample[k,0])
#print('Pilot X',X_sample)
#print('Pilot Y',Y_sample)
#The cGP procedure consists of following steps
#Step 1. For observations, we can do a (unsupervised) (X,Y)-clustering and label them, different components are generated.
#Step 2. For predictive locations, we can do a (supervised) k-nearest neighbor classification, and predict at each location based on which component it belongs to.
#Step 3. We compute the acquisition function and then proceed to the next sample, after adding the new sample we repeat Step 1 and 2.
#Prepare an up-to-date X_TRANSLATE, as the empirical mean of the X_sample
if len(X_TRANSLATE)>0:
X_TRANSLATE = np.mean(X_sample,axis=0)
else:
X_TRANSLATE = np.mean(X_sample,axis=0)*0
#Prepare an up-to-date Y_TRANSLATE, as the empirical mean of the Y_sample
if Y_TRANSLATE != 0:
Y_TRANSLATE = np.mean(Y_sample)
#print(Y_sample - Y_TRANSLATE)
print(np.concatenate((X_sample,Y_AMPLIFY*(Y_sample-Y_TRANSLATE)),axis=1))
#Prepare initial clusters, with XY-joint.
XY_sample = np.concatenate((X_AMPLIFY*(X_sample-X_TRANSLATE),Y_AMPLIFY*(Y_sample-Y_TRANSLATE)),axis=1)
XY_label = dgm_XY.fit_predict(XY_sample)
print('\n Initial labels for (X,Y)-joint clustering',XY_label)
#Make copies of X_sample for X-only fitting and XY-joint fitting.
X_sample_XY = np.copy(X_sample)
Y_sample_XY = np.copy(Y_sample)
#Prepare initial labels
clf_XY.fit(X_sample_XY, XY_label)
#This is an artifact, we need to have at least d samples to fit a d-dimensional GP model (for its mean and variance)
for c in np.unique(XY_label):
if sum(XY_label==c)<=X_sample_XY.shape[1]+2:
occ = np.bincount(XY_label)
XY_label[np.where(XY_label==c)] = np.argmax(occ)
########################################
# Draw sequential samples #
########################################
from scipy import stats
from scipy.optimize import SR1
from scipy.optimize import minimize
VERBOSE = False
#Prepare sequential samples (X,Y)
print('\n>>>>>>>>>>Sampling ',N_SEQUENTIAL,' sequential samples...<<<<<<<<<<\n')
X_sample = X_sample_XY
Y_sample = Y_sample_XY
cluster_label = XY_label
def get_KER():
return KERNEL_TEMPLATE
#This recode function will turn the labels into increasing order,e.g. [1, 1, 3, 3, 0] ==> [0, 0, 1, 1, 2].
def recode(label):
level = np.unique(np.array(label))
ck = 0
for j in level:
label[label==j]=ck
ck=ck+1
return label
#Main loop that guides us in sampling sequential samples
component_label = np.unique(np.array(cluster_label))
for it in range(N_SEQUENTIAL):
print('\n>>>>>>>>>> ***** STEP ',it+1,'/',N_SEQUENTIAL,'***** <<<<<<<<<<')
print('\n>>>>>>>>>> +++++ N_PROC disabled +++++ <<<<<<<<<<')
#Step 1. For observations, we can do a (unsupervised) (X,Y)-clustering and label them, different components are generated.
#Create the (X,Y) joint sample to conduct (unsupervised clustering)
#if len(X_TRANSLATE)>0:
# X_TRANSLATE = np.mean(X_sample,axis=0)
#if Y_TRANSLATE != 0:
# Y_TRANSLATE = np.mean(Y_sample)
#The cluster must be based on adjusted response value Y.
XY_sample = np.concatenate((X_AMPLIFY*(X_sample-X_TRANSLATE),Y_AMPLIFY*(Y_sample-Y_TRANSLATE).reshape(-1,1)),axis=1)
if NO_CLUSTER:
print('>>NO CLUSTER, a GP surrogate.')
cluster_label = np.zeros(XY_sample.shape[0])
else:
#print('>>CLUSTERED, a cGP surrogate. ',len(component_label),' components in surrogate model.')
cluster_label = dgm_XY.fit_predict(XY_sample)#cluster_label
if VERBOSE: print('dgm label', cluster_label)
#Again, we need to ensure that every cluster has at least d (dimension of covariate) samples.
for c in np.unique(cluster_label):
if sum(cluster_label==c)<=X_sample.shape[1]+2:
occ = np.bincount(cluster_label)
cluster_label[np.where(cluster_label==c)] = np.argmax(occ)
if VERBOSE: print('merged label',cluster_label)
cluster_label = recode(cluster_label)
if VERBOSE: print('All labels are recoded: ',cluster_label)
#Step 2. For predictive locations, we can do a (supervised) k-nearest neighbor classification, and predict at each location based on which component it belongs to.
#Refresh the Classifier
if sys.argv[6].isdigit():
clf_XY = KNeighborsClassifier(n_neighbors=N_NEIGHBORS)
else:
clf_XY = f_Classify()
clf_XY.fit(X_sample,cluster_label)
#Step 3. We either randomly search one location or compute the acquisition function and then proceed to the next sample, after adding the new sample we repeat Step 1 and 2.
coin = np.random.uniform(0,1,1)
if coin<EXPLORATION_RATE:
component_label = np.unique(np.array(cluster_label))
if not NO_CLUSTER:
print('>>CLUSTERED, a cGP surrogate.',len(component_label),' components in surrogate model.');
print('>>>>Find next sample: acquisition proposal.')
opt_x = | np.zeros((1,X_sample.shape[1])) | numpy.zeros |
#!/usr/bin/env python3
# -*- codcolg: utf-8 -*-
# @Filename: matrix.py
# @Date: 2019-06-10-13-56
# @Author: <NAME>
# @Contact: <EMAIL>
import numpy as np
import numpy.random as npr
from scipy import linalg
from numpy.core.umath_tests import inner1d
from mimo.abstractions import Distribution
class MatrixNormal(Distribution):
def __init__(self, M=None, U=None, V=None):
self.M = M
self._U = U
self._V = V
@property
def params(self):
return self.M, self.U, self.V
@params.setter
def params(self, values):
self.M, self.U, self.V = values
@property
def dcol(self):
return self.M.shape[1]
@property
def drow(self):
return self.M.shape[0]
@property
def U(self):
return self._U
@U.setter
def U(self, value):
self._U = value
self._U_chol = None
@property
def V(self):
return self._V
@V.setter
def V(self, value):
self._V = value
self._V_chol = None
@property
def sigma(self):
return np.kron(self.V, self.U)
@property
def U_chol(self):
if not hasattr(self, '_U_chol') or self._U_chol is None:
self._U_chol = np.linalg.cholesky(self.U)
return self._U_chol
@property
def V_chol(self):
if not hasattr(self, '_V_chol') or self._V_chol is None:
self._V_chol = np.linalg.cholesky(self.V)
return self._V_chol
@property
def sigma_chol(self):
if not hasattr(self, '_sigma_chol') or self._sigma_chol is None:
self._sigma_chol = np.linalg.cholesky(self.sigma)
return self._sigma_chol
def rvs(self, size=None):
if size is None:
aux = npr.normal(size=self.drow * self.dcol).dot(self.sigma_chol.T)
return self.M + np.reshape(aux, (self.drow, self.dcol), order='F')
else:
size = tuple([size, self.drow * self.dcol])
aux = npr.normal(size=self.size).dot(self.sigma_chol.T)
return self.M + np.reshape(aux, (size, self.drow, self.dcol), order='F')
def mean(self):
return self.M
def mode(self):
return self.M
def log_likelihood(self, x):
# apply vector operator with Fortran convention
xr = np.reshape(x, (-1, self.drow * self.dcol), order='F')
mr = np.reshape(self.M, (self.drow * self.dcol), order='F')
# Gaussian likelihood on vector dist.
bads = np.isnan(np.atleast_2d(xr)).any(axis=1)
xc = np.nan_to_num(xr).reshape((-1, self.dim)) - mr
xs = linalg.solve_triangular(self.sigma_chol, xc.T, lower=True)
out = - 0.5 * self.drow * self.dcol * np.log(2. * np.pi) -\
np.sum(np.log(np.diag(self.sigma_chol))) - 0.5 * inner1d(xs.T, xs.T)
out[bads] = 0
return out
def get_statistics(self, data):
if isinstance(data, np.ndarray):
idx = ~np.isnan(data).any(1)
data = data[idx]
xxT = np.einsum('nk,nh->kh', data, data)
x = data.sum(0)
n = data.shape[0]
return np.array([x, xxT, n])
else:
return sum(list(map(self.get_statistics, data)), self._empty_statistics())
def get_weighted_statistics(self, data, weights):
if isinstance(data, np.ndarray):
idx = ~np.isnan(data).any(1)
data = data[idx]
weights = weights[idx]
xxT = np.einsum('nk,n,nh->kh', data, weights, data)
x = weights.dot(data)
n = weights.sum()
return np.array([x, xxT, n])
else:
return sum(list(map(self.get_weighted_statistics, data, weights)), self._empty_statistics())
def _empty_statistics(self):
return np.array([np.zeros((self.dim, )),
np.zeros((self.dim, self.dim)), 0])
def log_partition(self):
return 0.5 * self.drow * self.dcol * np.log(2. * np.pi) +\
self.drow * np.sum(np.log( | np.diag(self.V_chol) | numpy.diag |
# -*- coding: utf-8 -*-
import sys
import gzip as gz
import pickle as pk
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy.interpolate import griddata
import URP
import NBR6123
#=============================================================================
ALIM = 40. # limit for acceleration plots (mG)
DLIM = 500. # limit for displacement plots (mm)
RLIM = 1. # limit for rotation plots (degrees)
ELIM = 1000 # limit for energy plots (kJ)
MMAX = 12 # maximum number of modes to be considered
#=============================================================================
def read_batch(dirname, prefix):
batchpickle = dirname + prefix + '.pk'
try:
with open(batchpickle, 'rb') as target:
print('Loading "{0}" ... '.format(dirname))
return pk.load(target)
except:
sys.exit('Could not read batch file {0}!'.format(batchpickle))
#=============================================================================
def get_taps(b):
print(' Direction = {0:0>3}deg: "{1}"... '.format(b.wnd, b.file))
with gz.GzipFile(b.file, 'rb') as target:
return pk.load(target)
#=============================================================================
def unpack_batch(batch):
wnd = batch['wnd']
Cat = batch['Cat']
Vm = batch['Vm']
dPa = batch['dPa']
Td = batch['Td']
if batch.ndim == 2:
wnd = wnd.values.astype('int')
Cat = Cat.values.astype('int')
Vm = Vm.values.astype('float')
dPa = dPa.values.astype('float')
Td = Td.values.astype('float')
return wnd, Cat, Vm, dPa, Td
#=============================================================================
def unpack_master(master):
tap = master['tap'].astype('int')
A = master['A'].astype('float')
XT = master[['X', 'Y', 'Z' ]].values.astype('float')
CT = master[['cx','cy','cz']].values.astype('float')
if master.ndim == 2:
tap = tap.values.astype('int')
A = A.values.astype('float')
return tap, A, XT, CT
#=============================================================================
def build_3dof(Bx, By, Hz, rho, gamma, NS):
# 1. Define storey coordinates and masses
dz = Hz/NS
Si = np.linspace(1, NS, NS)
ZS = np.linspace(dz, Hz, NS)
X0 = np.zeros(ZS.shape)
Mz = (Bx*By*dz*rho)*np.ones(NS)
Iz = (Bx**2 + By**2)*Mz/12
XS = np.vstack(( X0, X0, ZS)).T # stifness center for each floor
MS = np.vstack(( Mz, Mz, Iz)).T.reshape((3*NS,)) # masses and inertia
# 2. Prepare the 3 modal shapes
NQ = 3
QS = np.zeros((3*NS,NQ))
for k in range(NQ):
QS[k:3*NS:3, k] = (ZS/Hz)**gamma[k]
# 5. Normalize modal shapes for unitary modal masses
Mk = np.sum(MS*QS[:,k]*QS[:,k])
QS[:,k] = QS[:,k] / np.sqrt(Mk)
return Si, XS, MS, QS
#=============================================================================
def interp_3dof(XT, XS, QS):
# 1. Get taps coordinates (with offset over XT and ZS already applyed)
# Note: verify if extrapolation will be necessary beyond ZS.
NT = XT.shape[0]
NS = XS.shape[0]
ZT = np.array(XT[:,2], dtype=np.float)
ZS = np.array(XS[:,2], dtype=np.float)
top = False
if (ZS.max() < ZT.max()):
ZS = np.append(ZS, ZT.max()+0.1)
top = True
bot = False
if (ZS.min() > ZT.min()):
ZS = np.append(ZT.min()-0.1, ZS)
bot = True
QT = np.empty((3*NT,MMAX))
# 2. Loop over valid modes
for k in range(MMAX):
qTk = np.zeros((NT, 3))
qSk = QS[:,k].reshape(NS, 3)
# 3. If necessary, extrapolate modal shape as constant
if top:
qSk = np.vstack((qSk, qSk[-1,:]))
if bot:
qSk = np.vstack((qSk[ 0,:], qSk))
# 4. Perform mode interpolation at each tap
r = griddata(ZS, qSk[:,2], ZT, method='linear')
qTk[:,0] = griddata(ZS, qSk[:,0], ZT, method='linear') - r*XT[:,1]
qTk[:,1] = griddata(ZS, qSk[:,1], ZT, method='linear') + r*XT[:,0]
QT[:,k] = qTk.reshape(3*NT,)
return QT
#=============================================================================
def view_3dof(Bx, By, Hz, fk, XT, QT, XS, QS, scale):
# 1. Loop over all modes
NT = XT.shape[0]
NS = XS.shape[0]
BX = np.vstack((np.array([-Bx/2., -Bx/2., Bx/2., Bx/2., -Bx/2.]),
np.array([-By/2., By/2., By/2., -By/2., -By/2.]),
np.array([ 0.00 , 0.00, 0.00, 0.00, 0.00 ]))).T
gray = [0.8,0.8,0.8]
blue = [0.0,0.0,1.0]
for k in range(MMAX):
text = r'MODE {0}: $f_k$ = {1:5.2f}Hz'.format(k+1, fk[k])
plt.figure(figsize=(15, 8))
plt.suptitle(text, fontsize=18)
ax1 = plt.subplot(131)
ax1.set_xlim([-1.0*Bx, 1.0*Bx])
ax1.set_ylim([-1.0*Bx, 1.0*Bx])
ax1.set_aspect('equal', adjustable='box')
ax1.grid(True)
ax2 = plt.subplot(132)
ax2.grid(True)
ax2.set_xlim([-1.0*Bx, 1.0*Bx])
ax2.set_ylim([-0.05*Hz, 1.1*Hz])
ax2.set_xticks([-40, -20, 0, 20, 40])
ax2.set_aspect('equal', adjustable='box')
ax3 = plt.subplot(133)
ax3.grid(True)
ax3.set_xlim([-1.0*Bx, 1.0*Bx])
ax3.set_ylim([-0.05*Hz, 1.1*Hz])
ax3.set_xticks([-40, -20, 0, 20, 40])
ax3.set_aspect('equal', adjustable='box')
# 2. Plot frames
qkT = QT[:,k].reshape(NT,3)
sc = scale/np.abs([qkT[:,0], qkT[:,1]]).max()
qk = sc*QS[:,k].reshape(NS,3)
for xs, qs in zip(XS, qk):
off = np.array([qs[0], qs[1], 0.0, -qs[2]])
BS = offset_taps(BX, [], off)
ZS = xs[2]*np.ones((5,))
ax1.plot(BX[:,0], BX[:,1], color=gray, linewidth=2)
ax1.plot(BS[:,0], BS[:,1], color=blue, linewidth=2)
ax2.plot(BX[:,0], ZS, color=gray, linewidth=2)
ax2.plot(BS[:,0], ZS, color=blue, linewidth=2)
ax3.plot(BX[:,1], ZS, color=gray, linewidth=2)
ax3.plot(BS[:,1], ZS, color=blue, linewidth=2)
# 3. Plot taps
if len(XT) == 0: continue
qk = sc*QT[:,k].reshape(NT,3)
Xoff = XT + qk
ax1.plot(Xoff[:,0], Xoff[:,1], 'ro')
ax2.plot(Xoff[:,0], Xoff[:,2], 'ro')
ax3.plot(Xoff[:,1], Xoff[:,2], 'ro')
plt.savefig('MODE_{0:0>2}.png'.format(k+1),
format='png', dpi=300, bbox_inches='tight')
return
#=============================================================================
def offset_taps(XT, CT, offset):
# 1. Prepare translation vector and rotation matrix
D = offset[0:3].reshape(3,1)
rz = offset[3]
R = np.array([[ np.cos(rz), np.sin(rz), 0.0],
[-np.sin(rz), np.cos(rz), 0.0],
[ 0.0, 0.0, 1.0]], dtype='float')
# 2. Apply transformation to coordinates
if len(CT):
return (np.dot(R,(XT.T + D))).T, (np.dot(R, CT.T)).T
else:
return (np.dot(R,(XT.T + D))).T
#=============================================================================
def aero_coeffs(graph, batch, structure, C_eqv=[], plot=False):
dirname, prefix, m, zts = graph
# 1. Preliminaries, and get NBR6123 drag coefficients
stpref = structure[ 'prefix']
Bx = structure[ 'Bx']
By = structure[ 'By']
Hz = structure[ 'Hz']
H0 = structure[ 'H0']
H1 = structure[ 'H1']
offT = structure[['dxT', 'dyT', 'dzT', 'rzT']].values.astype('float')
offT[3] = np.pi*offT[3]/180
alpha = batch['wnd'].values.astype('int64')
Cm = np.empty((len(batch),3))
Cs = np.empty((len(batch),3))
Cx, Cy = NBR6123.drag(Bx, By, Hz, case='low')
if Bx > By: Ct = 0.075*Cy*Bx/By
else: Ct = 0.075*Cx*By/Bx
# 2. Iterate over all wind directions
for ib, b in batch.iterrows():
wnd, Cat, Vm, dPa, Td = unpack_batch(b)
# 3. Open pressure data for wind direction
master, Cp = get_taps(b)
tap, A, XT, CT = unpack_master(master)
XT, CT = offset_taps(XT, CT, offT) ##########
A[(XT[:,2] < H0) | (XT[:,2] > H1)] = 0
# 4. Time series for total forces and moments at ground level
CAx = A*CT[:,0]
CAy = A*CT[:,1]
CAt = XT[:,0]*CAy - XT[:,1]*CAx
Cx0 = -Cp.dot(CAx)/(By*(H1-H0))
Cy0 = -Cp.dot(CAy)/(Bx*(H1-H0))
Ct0 = -Cp.dot(CAt)/(By*Bx*(H1-H0))
# 5. Reset mean and rms collection of coefficients
Cm[ib,0] = Cx0.mean()
Cm[ib,1] = Cy0.mean()
Cm[ib,2] = Ct0.mean()
Cs[ib,0] = Cx0.std()
Cs[ib,1] = Cy0.std()
Cs[ib,2] = Ct0.std()
# 6. Plot results if required
if plot:
xticks = 30*np.arange(13)
grid = {'linestyle':'-','linewidth':'0.2'}
leg2 = {'loc':4, 'fancybox':True, 'ncol':2, 'fontsize':10}
leg3 = {'loc':4, 'fancybox':True, 'ncol':3, 'fontsize':10}
# 6.1 Plot mean coefficients
plt.figure(figsize=(14, 8))
yticks = [-2.0, -1.5, -1.0, -0.5, 0.0, 0.5, 1.0, 1.5, 2.0]
tticks = [-0.5, -0.4, -0.3, -0.2, -0.1, 0.0, 0.1, 0.2, 0.3, 0.4, 0.5]
ax1 = plt.subplot(221)
ax1.plot(alpha, Cm[:,0], color='b', marker='o')
if len(C_eqv) > 0:
ax1.plot(alpha, C_eqv[:,0], color='k', ls=':', lw=2)
ax1.plot([0, 360], [ Cx, Cx], color='r', ls='--', lw=2)
ax1.plot([0, 360], [-Cx, -Cx], color='r', ls='--', lw=2)
ax1.grid(**grid)
ax1.set_xlabel('Wind direction (deg)')
ax1.set_xlim([0, 360])
ax1.set_xticks(xticks)
ax1.set_ylabel(r'mean $C_x$', fontsize=14)
ax1.set_yticks(yticks)
ax2 = plt.subplot(222)
ax2.plot(alpha, Cm[:,1], color='b', marker='o')
if len(C_eqv) > 0:
ax2.plot(alpha, C_eqv[:,1], color='k', ls=':', lw=2)
ax2.plot([0, 360], [ Cy, Cy], color='r', ls='--', lw=2)
ax2.plot([0, 360], [-Cy, -Cy], color='r', ls='--', lw=2)
ax2.grid(**grid)
ax2.set_xlabel('Wind direction (deg)')
ax2.set_xlim([0, 360])
ax2.set_xticks(xticks)
ax2.set_ylabel(r'mean $C_y$', fontsize=14)
ax2.set_yticks(yticks)
ax3 = plt.subplot(223)
ax3.plot(alpha, Cm[:,2], color='b', marker='o')
if len(C_eqv) > 0:
ax3.plot(alpha, C_eqv[:,2], color='k', ls=':', lw=2)
ax3.plot([0, 360], [ Ct, Ct], color='r', ls='--', lw=2)
ax3.plot([0, 360], [-Ct, -Ct], color='r', ls='--', lw=2)
ax3.grid(**grid)
ax3.set_xlabel('Wind direction (deg)')
ax3.set_xlim([0, 360])
ax3.set_xticks(xticks)
ax3.set_ylabel(r'mean $C_t$', fontsize=14)
ax3.set_yticks(tticks)
if len(C_eqv) > 0:
ax1.legend(('Mean', 'HFPI', 'NBR'), **leg3)
ax2.legend(('Mean', 'HFPI', 'NBR'), **leg3)
ax3.legend(('Mean', 'HFPI', 'NBR'), **leg3)
else:
ax1.legend(('Mean', 'NBR'), **leg2)
ax2.legend(('Mean', 'NBR'), **leg2)
ax3.legend(('Mean', 'NBR'), **leg2)
img = plt.imread('Sections/' + stpref + '_plan.png')
ax4 = plt.subplot(224)
ax4.imshow(img)
plt.gca().axison = False
tst = prefix + ': Mean coefficients for'
tst = tst + r' m = {0:0>2} years, $\zeta$ = {1}'
tst = tst.format(m, zts)
fst = prefix + '_{0:0>3}Y_{1}_AeroCoeffs.png'
fst = fst.format(m, zts)
plt.suptitle(tst, fontsize=18)
plt.savefig( fst, format='png', dpi=300, bbox_inches='tight')
'''
# 6.2 Plot rms coefficients
plt.figure(figsize=(14, 8))
yticks = [ 0.00, 0.05, 0.10, 0.15]
tticks = [ 0.00, 0.01, 0.02, 0.03]
ax1 = plt.subplot(221)
ax1.plot(alpha, Cs[:,0], color='b', marker='o')
ax1.grid(**grid)
ax1.set_xlabel('Wind direction (deg)')
ax1.set_xlim([0, 360])
ax1.set_xticks(xticks)
ax1.set_ylabel(r'rms $C_x$', fontsize=14)
ax1.set_yticks(yticks)
ax2 = plt.subplot(222)
ax2.plot(alpha, Cs[:,1], color='b', marker='o')
ax2.grid(**grid)
ax2.set_xlabel('Wind direction (deg)')
ax2.set_xlim([0, 360])
ax2.set_xticks(xticks)
ax2.set_ylabel(r'rms $C_y$', fontsize=14)
ax2.set_yticks(yticks)
ax3 = plt.subplot(223)
ax3.plot(alpha, Cs[:,2], color='b', marker='o')
ax3.grid(**grid)
ax3.set_xlabel('Wind direction (deg)')
ax3.set_xlim([0, 360])
ax3.set_xticks(xticks)
ax3.set_ylabel(r'rms $C_t$', fontsize=14)
ax3.set_yticks(tticks)
img = plt.imread('Sections/' + stpref + '_plan.png')
ax4 = plt.subplot(224)
ax4.imshow(img)
plt.gca().axison = False
tst = prefix + r': RMS coefficients for '
tst = tst + r' m = {0:0>2} years, $\zeta$ = {1}'
tst = tst.format(m, zts)
fst = prefix + '_{0:0>3}Y_{1}_RMSCoeffs.png'
fst = fst.format(m, zts)
plt.suptitle(tst, fontsize=18)
plt.savefig( fst, format='png', dpi=300, bbox_inches='tight')
'''
return alpha, Cm, Cs
#==============================================================================
def mload_3dof(wgraph, master, Cp, XT, QT, Td, plot=False):
graph, wnd = wgraph
dirname, prefix, m, zts = graph
# 1. Calculate modal loads for all modes
CAx = master[['A', 'cx']].product(axis=1)
CAy = master[['A', 'cy']].product(axis=1)
CAt = XT[:,0]*CAy - XT[:,1]*CAx
n = Cp.shape[0]
fs = n/Td
N3 = QT.shape[0]
Pt = np.empty((N3,n))
fPk = np.zeros(3)
Pt[0::3,:] = -Cp.multiply(CAx, axis=1).values.T
Pt[1::3,:] = -Cp.multiply(CAy, axis=1).values.T
Pt[2::3,:] = -Cp.multiply(CAt, axis=1).values.T
Pk = np.dot(QT.T,Pt)
mPk = np.mean(Pk, axis=1)
sPk = np.std (Pk, axis=1)
if plot:
t = np.linspace(0,Td,n)
plt.figure(figsize=(15, 8))
# 2. Search for vortex shedding frequency
for k in range(3):
Pk0 = (Pk[k,1:] - mPk[k])/sPk[k] # normalize process
f, SPk = URP.periodogram(Pk0, fs) # get spectrum
SPk = URP.mov_average(SPk, 21) # smooth spectrum
'''
i0 = np.argmin(np.abs(f - 0.15))
f0 = np.log(f[i0])
S0 = np.log(SPk[i0])
i1 = np.argmin(np.abs(f - 0.80))
f1 = np.log(f[i1])
S1 = np.log(SPk[i1])
SS = S0 + (S1 - S0)*(np.log(f) - f0)/(f1 - f0)
bump = np.log(SPk) - SS
ik = np.argmax(bump)
fPk[k] = f[ik]
if (fPk[k] < 0.15) | (bump[ik] < 1.6): fPk[k] = 0.
'''
# 3. Plot all modal loads and spectra if required
if plot:
plt.subplot(3,2,2*k+1)
plt.plot(t,Pk[k,:])
plt.grid(True)
plt.ylabel('Mode {0}'.format(k+1))
if k == 0:
plt.axis([0,Td,-3500,3500])
plt.text(0.75*Td,2800,
r'$\mu_k =$ {0:6.1f}'.format(mPk[k]), fontsize=14)
if k == 1:
plt.axis([0,Td,-1500,1500])
plt.text(0.75*Td,1200,
r'$\mu_k =$ {0:6.1f}'.format(mPk[k]), fontsize=14)
if k == 2:
plt.axis([0,Td, -1000, 1000])
plt.xlabel('Time (s)')
plt.xlabel('Time (s)')
plt.text(0.75*Td, 800,
r'$\mu_k =$ {0:6.1f}'.format(mPk[k]), fontsize=14)
plt.subplot(3,2,2*k+2)
plt.loglog(f,SPk,'b')#, f,np.exp(SS),'r:')
plt.grid(True)
plt.axis([1e-3,1,0.001,100])
plt.text(0.2,20,
r'$\sigma_k =$ {0:6.1f}'.format(sPk[k]), fontsize=14)
if k == 2:
plt.xlabel('Frequency (Hz)')
if fPk[k] > 0.1:
plt.loglog([fPk[k], fPk[k]], [0.001, 100], color='r', lw=2)
plt.text(fPk[k], 10, '{0:.3f}Hz'.format(fPk[k]))
# 4. Save plot
if plot:
tst = prefix + r': Modal loads for '
tst = tst + r' m = {0:0>2} years, $\alpha$ = {1} deg'
tst = tst.format(m, wnd)
fst = prefix + '_{0:0>3}Y_{1:0>3}D_Mloads.png'
fst = fst.format(m, wnd)
plt.suptitle(tst, fontsize=18)
plt.savefig( fst, format='png', dpi=300, bbox_inches='tight')
return Pk, mPk, sPk, fPk
#=============================================================================
def modal_response(wgraph, fk, zt, Td, Pk, plot=False):
graph, wnd = wgraph
dirname, prefix, m, zts = graph
# 1. Calculate modal response for all modes
n = Pk.shape[1]
fs = n/Td
uk = np.zeros((MMAX,n))
fuk = | np.zeros(MMAX) | numpy.zeros |
from nose import SkipTest
from nose.tools import assert_raises, assert_true, assert_equal
import networkx as nx
from networkx.generators.classic import barbell_graph,cycle_graph,path_graph
class TestConvertNumpy(object):
numpy=1 # nosetests attribute, use nosetests -a 'not numpy' to skip test
@classmethod
def setupClass(cls):
global np
global np_assert_equal
try:
import numpy as np
np_assert_equal=np.testing.assert_equal
except ImportError:
raise SkipTest('NumPy not available.')
def __init__(self):
self.G1 = barbell_graph(10, 3)
self.G2 = cycle_graph(10, create_using=nx.DiGraph())
self.G3 = self.create_weighted(nx.Graph())
self.G4 = self.create_weighted(nx.DiGraph())
def create_weighted(self, G):
g = cycle_graph(4)
e = g.edges()
source = [u for u,v in e]
dest = [v for u,v in e]
weight = [s+10 for s in source]
ex = zip(source, dest, weight)
G.add_weighted_edges_from(ex)
return G
def assert_equal(self, G1, G2):
assert_true( sorted(G1.nodes())==sorted(G2.nodes()) )
assert_true( sorted(G1.edges())==sorted(G2.edges()) )
def identity_conversion(self, G, A, create_using):
GG = nx.from_numpy_matrix(A, create_using=create_using)
self.assert_equal(G, GG)
GW = nx.to_networkx_graph(A, create_using=create_using)
self.assert_equal(G, GW)
GI = create_using.__class__(A)
self.assert_equal(G, GI)
def test_shape(self):
"Conversion from non-square array."
A=np.array([[1,2,3],[4,5,6]])
assert_raises(nx.NetworkXError, nx.from_numpy_matrix, A)
def test_identity_graph_matrix(self):
"Conversion from graph to matrix to graph."
A = nx.to_numpy_matrix(self.G1)
self.identity_conversion(self.G1, A, nx.Graph())
def test_identity_graph_array(self):
"Conversion from graph to array to graph."
A = nx.to_numpy_matrix(self.G1)
A = np.asarray(A)
self.identity_conversion(self.G1, A, nx.Graph())
def test_identity_digraph_matrix(self):
"""Conversion from digraph to matrix to digraph."""
A = nx.to_numpy_matrix(self.G2)
self.identity_conversion(self.G2, A, nx.DiGraph())
def test_identity_digraph_array(self):
"""Conversion from digraph to array to digraph."""
A = nx.to_numpy_matrix(self.G2)
A = np.asarray(A)
self.identity_conversion(self.G2, A, nx.DiGraph())
def test_identity_weighted_graph_matrix(self):
"""Conversion from weighted graph to matrix to weighted graph."""
A = nx.to_numpy_matrix(self.G3)
self.identity_conversion(self.G3, A, nx.Graph())
def test_identity_weighted_graph_array(self):
"""Conversion from weighted graph to array to weighted graph."""
A = nx.to_numpy_matrix(self.G3)
A = np.asarray(A)
self.identity_conversion(self.G3, A, nx.Graph())
def test_identity_weighted_digraph_matrix(self):
"""Conversion from weighted digraph to matrix to weighted digraph."""
A = nx.to_numpy_matrix(self.G4)
self.identity_conversion(self.G4, A, nx.DiGraph())
def test_identity_weighted_digraph_array(self):
"""Conversion from weighted digraph to array to weighted digraph."""
A = nx.to_numpy_matrix(self.G4)
A = | np.asarray(A) | numpy.asarray |
# -*- coding: utf-8 -*-
################################################################
# The contents of this file are subject to the BSD 3Clause (New) License
# you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
# http://directory.fsf.org/wiki/License:BSD_3Clause
# Software distributed under the License is distributed on an "AS IS"
# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
# License for the specific language governing rights and limitations
# under the License.
# The Original Code is part of the PyRadi toolkit.
# The Initial Developer of the Original Code is <NAME>,
# Portions created by <NAME> are Copyright (C) 2006-2012
# All Rights Reserved.
# Contributor(s): ______________________________________.
################################################################
"""
This module provides various utility functions for radiometry calculations.
Functions are provided for a maximally flat spectral filter, a simple photon
detector spectral response, effective value calculation, conversion of spectral
domain variables between [um], [cm^-1] and [Hz], conversion of spectral
density quantities between [um], [cm^-1] and [Hz] and spectral convolution.
See the __main__ function for examples of use.
This package was partly developed to provide additional material in support of students
and readers of the book Electro-Optical System Analysis and Design: A Radiometry
Perspective, <NAME>, ISBN 9780819495693, SPIE Monograph Volume
PM236, SPIE Press, 2013. http://spie.org/x648.html?product_id=2021423&origin_id=x646
"""
__version__= "$Revision$"
__author__= 'pyradi team'
__all__= ['buildLogSpace','sfilter', 'responsivity', 'effectiveValue', 'convertSpectralDomain',
'convertSpectralDensity', 'convolve', 'savitzkyGolay1D','abshumidity', 'TFromAbshumidity',
'rangeEquation','_rangeEquationCalc','detectThresholdToNoiseTpFAR',
'detectSignalToNoiseThresholdToNoisePd',
'detectThresholdToNoiseSignalToNoisepD',
'detectProbabilityThresholdToNoiseSignalToNoise',
'detectFARThresholdToNoisepulseWidth', 'upMu',
'cart2polar', 'polar2cart','index_coords','framesFirst','framesLast',
'rect', 'circ','poissonarray','draw_siemens_star','drawCheckerboard',
'makemotionsequence','extractGraph','luminousEfficiency','Spectral',
'Atmo','Sensor','Target','calcMTFwavefrontError',
'polar2cartesian','warpPolarImageToCartesianImage','warpCartesianImageToPolarImage',
'intify_tuple','differcommonfiles','blurryextract','update_progress'
]
import sys
import numpy as np
from scipy import constants
from scipy import interpolate
import matplotlib.pyplot as plt
from matplotlib.patches import Wedge
from matplotlib.collections import PatchCollection
import os
import pkg_resources
from numbers import Number
if sys.version_info[0] > 2:
from io import StringIO
else:
from StringIO import StringIO
#################################################################################
"""
Gathers and presents version information.
Adapted from https://github.com/ahmedsalhin/version_information
This makes it much easier to determine which versions of modules
were installed in the source IPython interpreter's environment.
Produces output in:
* Plaintext (IPython [qt]console)
* HTML (IPython notebook, ``nbconvert --to html``, ``--to slides``)
* JSON (IPython notebook ``.ipynb`` files)
* LaTeX (e.g. ``ipython nbconvert example.ipynb --to LaTeX --post PDF``)
Usage
======
.. sourcecode:: ipython
print(ryutils.VersionInformation('matplotlib,numpy'))
"""
import html
import json
import sys
import time
import locale
import IPython
import platform
try:
import pkg_resources
except ImportError:
pkg_resources = None
timefmt = '%a %b %d %H:%M:%S %Y %Z'
def _date_format_encoding():
return locale.getlocale(locale.LC_TIME)[1] or locale.getpreferredencoding()
class VersionInformation():
def __init__(self,line=''):
self.version_information( line=line)
def version_information(self, line=''):
"""Show information about versions of modules.
Usage:
%version_information [optional comma-separated list of modules]
"""
self.packages = [
("Python", "{version} {arch} [{compiler}]".format(
version=platform.python_version(),
arch=platform.architecture()[0],
compiler=platform.python_compiler())),
("IPython", IPython.__version__),
("OS", platform.platform().replace('-', ' '))
]
modules = line.replace(' ', '').split(",")
for module in modules:
if len(module) > 0:
try:
code = ("import %s; version=str(%s.__version__)" %
(module, module))
ns_g = ns_l = {}
exec(compile(code, "<string>", "exec"), ns_g, ns_l)
self.packages.append((module, ns_l["version"]))
except Exception as e:
try:
if pkg_resources is None:
raise
version = pkg_resources.require(module)[0].version
self.packages.append((module, version))
except Exception as e:
self.packages.append((module, str(e)))
return self
def _repr_json_(self):
obj = {
'Software versions': [
{'module': name, 'version': version} for
(name, version) in self.packages]}
if IPython.version_info[0] >= 3:
return obj
else:
return json.dumps(obj)
@staticmethod
def _htmltable_escape(str_):
CHARS = {
'&': r'\&',
'%': r'\%',
'$': r'\$',
'#': r'\#',
'_': r'\_',
'{': r'\letteropenbrace{}',
'}': r'\letterclosebrace{}',
'~': r'\lettertilde{}',
'^': r'\letterhat{}',
'\\': r'\letterbackslash{}',
'>': r'\textgreater',
'<': r'\textless',
}
return u"".join([CHARS.get(c, c) for c in str_])
def _repr_html_(self):
html_table = "<table>"
html_table += "<tr><th>Software</th><th>Version</th></tr>"
for name, version in self.packages:
_version = self._htmltable_escape(version)
html_table += "<tr><td>%s</td><td>%s</td></tr>" % (name, _version)
try:
html_table += "<tr><td colspan='2'>%s</td></tr>" % time.strftime(timefmt)
except:
html_table += "<tr><td colspan='2'>%s</td></tr>" % \
time.strftime(timefmt).decode(_date_format_encoding())
html_table += "</table>"
return html_table
@staticmethod
def _latex_escape(str_):
CHARS = {
'&': r'\&',
'%': r'\%',
'$': r'\$',
'#': r'\#',
'_': r'\_',
'{': r'\letteropenbrace{}',
'}': r'\letterclosebrace{}',
'~': r'\lettertilde{}',
'^': r'\letterhat{}',
'\\': r'\letterbackslash{}',
'>': r'\textgreater',
'<': r'\textless',
}
return u"".join([CHARS.get(c, c) for c in str_])
def _repr_latex_(self):
latex = r"\begin{tabular}{|l|l|}\hline" + "\n"
latex += r"{\bf Software} & {\bf Version} \\ \hline\hline" + "\n"
for name, version in self.packages:
_version = self._latex_escape(version)
latex += r"%s & %s \\ \hline" % (name, _version) + "\n"
try:
latex += r"\hline \multicolumn{2}{|l|}{%s} \\ \hline" % \
time.strftime(timefmt) + "\n"
except:
latex += r"\hline \multicolumn{2}{|l|}{%s} \\ \hline" % \
time.strftime(timefmt).decode(_date_format_encoding()) + "\n"
latex += r"\end{tabular}" + "\n"
return latex
def _repr_pretty_(self):
text = "Software versions\n"
for name, version in self.packages:
text += "%s %s\n" % (name, version)
try:
text += "%s" % time.strftime(timefmt)
except:
text += "%s" % \
time.strftime(timefmt).decode(_date_format_encoding())
import pprint
pprint.pprint(text)
def __str__(self):
text = 'Software versions\n'
for name, version in self.packages:
text += f"{name}: {version}\n"
try:
text += f"{time.strftime(timefmt)}"
except:
text += f"{time.strftime(timefmt).decode(_date_format_encoding())}"
return text
##############################################################################
##
def buildLogSpace(Vmin,Vmax,nDec,patn=False):
"""Calculate a log space given low, high and number samples per decade
If patn is True, the upper limit is adjusted to obtain a
repeat numeric pattern in each dcade.
Args:
| Vmin (float) lower limit
| Vmax (float) upper limit
| nDec (int) number of points per decade
| patn (bool) repeat pattern in each decade
Returns:
| vector with equal spacing in log
Raises:
| No exception is raised.
"""
decs = int(np.log10(Vmax/Vmin))
if patn:
ful = np.log10(Vmax/Vmin)
upp = np.ceil(nDec *(ful - decs))
num = np.ceil(decs * nDec + upp + 1)
Vmax = 10 ** (np.log10(Vmin) + ((num-1) / nDec))
else:
num = np.ceil(decs * nDec)
return np.logspace(np.log10(Vmin),np.log10(Vmax),num)
##############################################################################
##
def update_progress(progress, bar_length=20):
"""Simple text-based progress bar for Jupyter notebooks.
Note that clear_output, and hence this function wipes the entire cell output,
including previous output and widgets.
Usage:
import pyradi.ryutils as ryutils
import time
print('before')
#Replace this with a real computation
number_of_elements = 100
for i in range(number_of_elements):
time.sleep(0.1)
# progress must be a float between 0 and 1
ryutils.update_progress((i+1) / number_of_elements,bar_length=40)
print('after')
source:
https://mikulskibartosz.name/how-to-display-a-progress-bar-in-jupyter-notebook-47bd4c2944bf
https://ipython.org/ipython-doc/3/api/generated/IPython.display.html#IPython.display.clear_output
Wait to clear the output until new output is available to replace it.
"""
from IPython.display import clear_output
if isinstance(progress, int):
progress = float(progress)
if not isinstance(progress, float):
progress = 0
if progress < 0:
progress = 0
if progress >= 1:
progress = 1
block = int(round(bar_length * progress))
clear_output(wait = True)
text = "Progress: [{0}] {1:.1f}%".format( "#" * block + "-" * (bar_length - block), progress * 100)
print(text)
##############################################################################
##
def solidAngleSquare(width,breadth,height,stype,numsamples):
"""Calculate the solid angle of a rectagular plate from a point on the normal at its centre
The solid angle of a rectangular flat surface, with dimensions $W$ and $D$, as seen from a
reference point centered above the surface, is determined by the integral of the projected
area of a small elemental area $\cos\theta\,dd\,dw$ across the full size of the surface:
$$
\omega_{\rm s}=\int_W\int_D\frac{dw\,dd\,\cos^{n-2}\theta}{R^2}
$$
$$
\omega_{\rm s}=\int_W\int_D\frac{dw\,dd\,\cos^n\theta}{H^2}
$$
$$
\omega_{\rm s}=\int_W\int_D\frac{dw\,dd\,}{H^2}\left(\frac{H}{R}\right)^n
$$
$$\omega_{\rm s}=\int_W\int_D\frac{dw\,dd\,}{H^2}\left(\frac{H}{\sqrt{w^2+d^2+H^2}}\right)^n,
$$
where $H$ is the reference point height above the surface, and $n=3$ for the geometrical solid angle
and $n=4$ for the projected solid angle. The integral is performed along the $W$ and $D$ dimensions
with increments of $dw$ and $dd$. The slant range between the reference point and the elemental area
$dd\times dw$ is $R=H/\cos\theta$.
Args:
| width (float): size along one edge of rectangle
| breadth (float): size along the second edge of rectangle
| height (float): distance along normal to the rect to reference point
| stype (str): type of solid angle can be one of ('g' or 'p') for ('geometric','projected')
| numsamples (int): number of samples along edges
Returns:
| solid angle (float) or None if incorrect type
Raises:
| No exception is raised.
"""
varx = np.linspace(-width/2, width/2, numsamples)
vary = np.linspace(-breadth/2, breadth/2, numsamples)
x, y = np.meshgrid(varx, vary)
if stype[0]=='g':
gv = (1. / ( (x / height) ** 2 + (y / height) ** 2 + 1 ) ) ** (3 / 2)
elif stype[0]=='p':
gv = (1. / ( (x / height) ** 2 + (y / height) ** 2 + 1 ) ) ** (4 / 2)
else:
return None
solidAngle = np.trapz(np.ravel(gv), dx=breadth*width/(numsamples**2))/(height*height)
return solidAngle
##############################################################################
##
def intify_tuple(tup):
"""Make tuple entries int type
"""
tup_int = ()
for tup_ent in tup:
tup_int = tup_int + (int(tup_ent),)
return tup_int
##############################################################################
##
def framesFirst(imageSequence):
"""Image sequence with frames along axis=2 (last index), reordered such that
frames are along axis=0 (first index).
Image sequences are stored in three-dimensional arrays, in rows, columns and frames.
Not all libraries share the same sequencing, some store frames along axis=0 and
others store frames along axis=2. This function reorders an image sequence with
frames along axis=2 to an image sequence with frames along axis=0. The function
uses np.transpose(imageSequence, (2,0,1))
Args:
| imageSequence (3-D np.array): image sequence in three-dimensional array, frames along axis=2
Returns:
| ((3-D np.array): reordered three-dimensional array (view or copy)
Raises:
| No exception is raised.
"""
return np.transpose(imageSequence, (2,0,1))
##############################################################################
##
def framesLast(imageSequence):
"""Image sequence with frames along axis=0 (first index), reordered such that
frames are along axis=2 (last index).
Image sequences are stored in three-dimensional arrays, in rows, columns and frames.
Not all libraries share the same sequencing, some store frames along axis=0 and
others store frames along axis=2. This function reorders an image sequence with
frames along axis=0 to an image sequence with frames along axis=2. The function
uses np.transpose(imageSequence, (1,2,0))
Args:
| imageSequence (3-D np.array): image sequence in three-dimensional array, frames along axis=0
Returns:
| ((3-D np.array): reordered three-dimensional array (view or copy)
Raises:
| No exception is raised.
"""
return np.transpose(imageSequence, (1,2,0))
##############################################################################
##
def index_coords(data, origin=None, framesFirst=True):
"""Creates (x,y) zero-based coordinate arrrays for a numpy array indices, relative to some origin.
This function calculates two meshgrid arrays containing the coordinates of the
input array. The origin of the new coordinate system defaults to the
center of the image, unless the user supplies a new origin.
The data format can be data.shape = (rows, cols, frames) or
data.shape = (frames, rows, cols), the format of which is indicated by the
framesFirst parameter.
Args:
| data (np.array): array for which coordinates must be calculated.
| origin ( (x-orig, y-orig) ): data-coordinates of where origin should be
| framesFirst (bool): True if data.shape is (frames, rows, cols), False if
data.shape is (rows, cols, frames)
Returns:
| x (float np.array): x coordinates in array format.
| y (float np.array): y coordinates in array format.
Raises:
| No exception is raised.
original code by <NAME>
https://stackoverflow.com/questions/3798333/image-information-along-a-polar-coordinate-system
"""
if framesFirst:
ny, nx = data.shape[1:3]
else:
ny, nx = data.shape[:2]
if origin is None:
origin_x, origin_y = nx // 2, ny // 2
else:
origin_x, origin_y = origin
x, y = np.meshgrid(np.arange(nx), np.arange(ny))
x -= origin_x
y -= origin_y
return x, y
##############################################################################
##
def cart2polar(x, y):
"""Converts from cartesian to polar coordinates, given (x,y) to (r,theta).
Args:
| x (float np.array): x values in array format.
| y (float np.array): y values in array format.
Returns:
| r (float np.array): radial component for given (x,y).
| theta (float np.array): angular component for given (x,y).
Raises:
| No exception is raised.
original code by <NAME>
https://stackoverflow.com/questions/3798333/image-information-along-a-polar-coordinate-system
"""
r = np.sqrt(x**2 + y**2)
theta = np.arctan2(y, x)
return r, theta
##############################################################################
##
def polar2cart(r, theta):
"""Converts from polar to cartesian coordinates, given (r,theta) to (x,y).
Args:
| r (float np.array): radial values in array format.
| theta (float np.array): angular values in array format.
Returns:
| x (float np.array): x component for given (r, theta).
| y (float np.array): y component for given (r, theta).
Raises:
| No exception is raised.
original code by <NAME>
https://stackoverflow.com/questions/3798333/image-information-along-a-polar-coordinate-system
"""
x = r * | np.cos(theta) | numpy.cos |
# -*- coding: utf-8 -*-
"""
FDMNES class "classes_fdmnes.py"
functions for generating fdmnes files
By <NAME>, PhD
Diamond
2018
Version 1.6
Last updated: 22/04/20
Version History:
17/04/18 0.9 Program created
04/06/18 1.0 Program completed and tested
11/06/18 1.1 Analysis updated to find density with _sd1 and _sd0
13/07/19 1.2 Small updates for gui functionality
07/08/19 1.3 Changed reflist in FdmnesAnalysis to list of hkl
16/08/19 1.4 Added BavFile to read parts of .bav file
18/03/20 1.5 Corrected density file for new headers
22/04/20 1.6 Added FdmnesCompare and __add__ method for FdmnesAnalysis
@author: DGPorter
"""
import os, re
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D # 3D plotting
from . import functions_general as fg
from . import functions_crystallography as fc
__version__ = '1.6'
class Fdmnes:
"""
FDMNES Create files and run program
E.G.
fdm = Fdmnes(xtl)
fdm.setup(comment='Test',
absorber='Co',
edge='K'
azi_ref=[0,0,1],
hkl_reflections=[[1,0,0],[0,1,0],[1,1,0]]
fdm.create_files('New_Calculation')
fdm.write_fdmfile()
output = fdm.run_fdmnes()
###Wait for program completion###
analysis = fdm.analyse()
"""
def __init__(self, xtl):
"""
initialise
:param xtl: object
"""
self.xtl = xtl
# Options
self.exe_path = find_fdmnes()
self.output_name = 'out'
self.output_path = self.generate_output_path()
self.input_name = 'FDMNES_%s.txt' % fg.saveable(self.xtl.name)
self.comment = ''
self.range = '-19 0.1 20'
self.radius = 4.0
self.edge = 'K'
self.absorber = self.xtl.Atoms.atom_type[0]
self.green = True
self.scf = False
self.quadrupole = False
self.azi_ref = [1, 0, 0]
self.correct_azi = True
self.hkl_reflections = [[1, 0, 0]]
def setup(self, exe_path=None, output_path=None, output_name=None, folder_name=None, input_name=None,
comment=None, range=None, radius=None, edge=None, absorber=None, green=None, scf=None,
quadrupole=None, azi_ref=None, correct_azi=None, hkl_reflections=None):
"""
Set FDMNES Parameters
:param exe_path: Location of FDMNES executable, e.g. 'c:\FDMNES\fdmnes_win64.exe'
:param output_path: Specify the output path
:param folder_name: Specify output folder name (replaces output_path)
:param output_name: Name of FDMNES output files
:param input_name: Name of FDMNES input file
:param comment: A comment written in the input file
:param range: str energy range in eV relative to Fermi energy
:param radius: calculation radius
:param edge: absorptin edge, 'K', 'L3', 'L2'
:param absorber: absorbing element, 'Co'
:param green: Green's function (muffin-tin potential)
:param scf: True/False, Self consistent solution
:param quadrupole: False/True, E1E2 terms allowed
:param azi_ref: azimuthal reference, [1,0,0]
:param correct_azi: if True, correct azimuthal reference for real cell (use in hexagonal systems)
:param hkl_reflections: list of hkl reflections [[1,0,0],[0,1,0]]
:return: None
"""
if exe_path is not None:
self.exe_path = exe_path
if output_path is not None:
self.output_path = output_path
if output_name is not None:
self.output_name = output_name
if folder_name is not None:
self.output_path = self.generate_output_path(folder_name)
if input_name is not None:
self.input_name = input_name
if comment is not None:
self.comment = comment
if range is not None:
self.range = range
if radius is not None:
self.radius = radius
if edge is not None:
self.edge = edge
if absorber is not None:
self.absorber = absorber
if green is not None:
self.green = green
if scf is not None:
self.scf = scf
if quadrupole is not None:
self.quadrupole = quadrupole
if azi_ref is not None:
self.azi_ref = azi_ref
if correct_azi is not None:
self.correct_azi = correct_azi
if hkl_reflections is not None:
self.hkl_reflections = np.asarray(hkl_reflections).reshape(-1,3)
self.info()
def info(self):
"""
Print setup info
:return: None
"""
print('FDMNES Options')
print('exe_path : %s' % self.exe_path)
print('output_path : %s' % self.output_path)
print('output_name : %s' % self.output_name)
print('input_name : %s' % self.input_name)
print('comment : %s' % self.comment)
print('range : %s' % self.range)
print('radius : %s' % self.radius)
print('absorber : %s' % self.absorber)
print('edge : %s' % self.edge)
print('green : %s' % self.green)
print('scf : %s' % self.scf)
print('quadrupole : %s' % self.quadrupole)
if self.correct_azi:
print('azi_ref : %s' % self.azimuthal_reference(self.azi_ref))
else:
print('azi_ref : %s' % self.azi_ref)
print('hkl_reflections:')
for ref in self.hkl_reflections:
print(' (%1.0f,%1.0f,%1.0f)' % (ref[0], ref[1], ref[2]))
def azimuthal_reference(self, hkl=[1, 0, 0]):
"""
Generate the azimuthal reference
:param hkl: (1*3) array [h,k,l]
:return: None
"""
UV = self.xtl.Cell.UV()
UVs = self.xtl.Cell.UVstar()
sl_ar = np.dot(np.dot(hkl, UVs), np.linalg.inv(UVs)) # Q*/UV*
fdm_ar = np.dot(np.dot(hkl, UVs), np.linalg.inv(UV)) # Q*/UV
fdm_ar = fdm_ar / np.sqrt(np.sum(fdm_ar ** 2)) # normalise length to 1
return fdm_ar
def generate_parameters_string(self):
"""
Create the string of parameters and comments for the input file
:return: str
"""
# Get crystal parameters
UV = self.xtl.Cell.UV()
avUV=self.xtl.Cell.UV()
uvw, type, label, occupancy, uiso, mxmymz = self.xtl.Structure.get()
noat = len(uvw)
# Lattice parameters
a,b,c,alpha,beta,gamma = self.xtl.Cell.lp()
# element types
types,typ_idx = np.unique(type, return_inverse=True)
Z = fc.atom_properties(types,'Z')
absorber_idx = np.where(type == self.absorber)[0]
nonabsorber_idx = | np.where(type != self.absorber) | numpy.where |
import os
from ase.visualize import view
from mpl_toolkits.mplot3d import Axes3D # noqa
from scipy.optimize import curve_fit
from tqdm import tqdm
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
sns.set(
style="ticks",
rc={
"font.family": "Arial",
"font.size": 40,
"axes.linewidth": 2,
"lines.linewidth": 5,
},
font_scale=3.5,
palette=sns.color_palette("Set2")
)
c = ["#007fff", "#ff3616", "#138d75", "#7d3c98", "#fbea6a"] # Blue, Red, Green, Purple, Yellow
import utilities
from Helix import Helix
import matplotlib
matplotlib.use("Qt5Agg")
def cart2pol(x, y):
rho = np.sqrt(x**2 + y**2)
phi = np.arctan2(y, x)
return rho, phi
def center_atoms(atoms, center):
x = center[0]
y = center[1]
z = center[2]
# Centering atoms around given atom
for idx, atom in enumerate(atoms):
atoms[idx].position[0] = atom.position[0] - x
atoms[idx].position[1] = atom.position[1] - y
atoms[idx].position[2] = atom.position[2] - z
return atoms
def print_jmol_str(line_values, center):
file = "analyzed/diffp_2me_homo-1"
print("*"*25)
print(f"Writing to {file}")
print("*"*25)
curve_str = f"draw curve1 CURVE curve width 0.3"
for value in line_values:
x = value[0] + center[0]
y = value[1] + center[1]
z = value[2] + center[2]
curve_str += f" {{ {x} {y} {z} }}"
with open(f"{file}/jmol_export.spt", "a") as f:
f.write(curve_str)
print(curve_str)
def remove_outlier(ordered):
# Not elegant, possibly slow, but it works
temp = []
for idx, value in enumerate(ordered[:, 2]):
if idx < len(ordered[:, 2]) - 1:
temp.append(abs(value - ordered[idx + 1, 2]))
std = np.std(temp)
mean = np.mean(temp)
# It lies much further down the z-axis
# than the rest of the points
if not (mean - std) < temp[0] < (mean + std):
return ordered[1:]
# If no outliers is found, return the original array
else:
return ordered
center_bottom_top = np.array([2, 9, 7])
handedness = None
truncation = [None, None]
file = "./8cum_me_homo_homo/homo.cube"
ax = plt.axes(projection='3d')
radius = 1.4
limits = 3
# Check that the analysis hasn't already been done
names = file.split("/")
folder = "/".join(names[-3:-1])
print(f"foldername: {folder}")
if os.path.exists(folder):
print(f"Found existing data files in {folder}")
planes = np.load(folder + "/planes.npy", allow_pickle=True)
atoms, _, _, center = np.load(
folder + "/atom_info.npy", allow_pickle=True
)
xyz_vec = np.load(folder + "/xyz_vec.npy", allow_pickle=True)
else:
atoms, all_info, xyz_vec = utilities.read_cube(file)
# Sort the data after z-value
all_info = all_info[all_info[:, 2].argsort()]
# Center of the molecule is chosen to be Ru
# center = atoms[3].position
center = atoms[center_bottom_top[0]].position
all_info[:, :3] = all_info[:, :3] - center
atoms = center_atoms(atoms, center)
planes = []
plane = []
prev_coord = all_info[0]
for coordinate in tqdm(all_info, desc="Finding planes.."):
if np.equal(coordinate[2], prev_coord[2]):
# we're in the same plane so add the coordinate
plane.append([coordinate[0],
coordinate[1],
coordinate[2],
coordinate[3]])
else:
plane = np.array(plane)
# Drop coordinates with isovalues == 0.0
plane = plane[np.where(plane[:, 3] != 0.0)]
if plane.size != 0:
planes.append(plane)
plane = []
prev_coord = coordinate
planes = np.array(planes)
mean_z = []
ordered = []
all_r = []
bottom_carbon = atoms[center_bottom_top[1]].position
top_carbon = atoms[center_bottom_top[2]].position
print('Cleaning values..')
for idx, plane in enumerate(planes):
if top_carbon[2] > plane[0, 2] > bottom_carbon[2]:
if idx < len(planes) - 1:
# Uncomment to find points with the most positive isovalue
# Rare cases there might be the same maximum at two locations
# That's I just take the first one with [0][0]
maximum = np.amax(plane[:, 3])
max_index = np.where(plane[:, 3] == maximum)[0][0]
next_plane = planes[idx + 1]
next_maximum = np.amax(next_plane[:, 3])
next_index = np.where(next_plane[:, 3] == next_maximum)[0][0]
# Uncomment to find points with the most negative isovalue
# minimum = np.amin(plane[:, 3])
# min_index = np.where(plane[:, 3] == minimum)
# next_plane = planes[idx + 1]
# next_minimum = np.amin(next_plane[:, 3])
# next_index = np.where(next_plane[:, 3] == next_minimum)
current_iso_idx = max_index
next_iso_idx = next_index
# Check if point is within certain radius of the helical axis
if cart2pol(plane[current_iso_idx, 0], plane[current_iso_idx, 1])[0] < radius:
current_x = plane[current_iso_idx, 0].item()
current_y = plane[current_iso_idx, 1].item()
current_z = plane[current_iso_idx, 2].item()
current_iso = plane[current_iso_idx, 3].item()
next_x = next_plane[next_index, 0].item()
next_y = next_plane[next_index, 1].item()
next_z = next_plane[next_index, 2].item()
next_iso = next_plane[next_iso_idx, 3].item()
# Current point is beneath the next point
if (current_x == next_x) & (current_y == next_y):
delta_z = abs(next_z - current_z)
# Are they direcly on top of each other?
if round(delta_z, 4) <= 2*round(xyz_vec[2], 4):
mean_z.append(current_z)
# They are not directly on top of each other
else:
ax.scatter(
plane[current_iso_idx, 0],
plane[current_iso_idx, 1],
plane[current_iso_idx, 2],
# c='purple',
c=c[0],
)
# To be used as an estimate of
# the radius when fitting the helix
all_r.append(
cart2pol(plane[current_iso_idx, 0], plane[current_iso_idx, 1])[0]
)
mean_z.append(current_z)
ordered.append(
[current_x, current_y, np.mean(mean_z), current_iso]
)
mean_z = []
# TODO: Maybe I'm skipping the last point? Does it even matter?
# else:
# prev_x = current_x
# prev_y = current_y
# prev_z = current_z
# prev_iso = current_iso
# current_x = plane[max_index, 0].item()
# current_y = plane[max_index, 1].item()
# current_z = plane[max_index, 2].item()
# current_iso = plane[max_index, 3].item()
# if cart2pol(current_x, current_y)[0] < radius:
# all_r.append(cart2pol(plane[max_index, 0], plane[max_index, 1])[0])
# if (current_x == prev_x) & (current_y == prev_y):
# delta_z = abs(prev_z - current_z)
# # Are they directly on top of each other?
# if round(delta_z, 4) <= 2*round(z_vec, 4):
# mean_z.append(current_z)
# ordered.append([current_x,
# current_y,
# np.mean(mean_z),
# current_iso])
# # They are not directly on top of each other
# else:
# mean_z.append(current_z)
# ordered.append([current_x,
# current_y,
# np.mean(mean_z),
# current_iso])
# mean_z = []
ordered = | np.array(ordered) | numpy.array |
# Copyright 2013-2021 The Salish Sea MEOPAR contributors
# and The University of British Columbia
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A collection of functions for working with the output of WebTide.
"""
def get_data_from_csv(tidevar, constituent, depth, CFactor, ibmin, ibmax,
Tfilename='Tidal Elevation Constituents T.csv',
Ufilename='Tidal Current Constituents U.csv',
Vfilename='Tidal Current Constituents V.csv', inorth=0):
"""Get the constituent data from the csv file.
"""
import pandas as pd
from math import radians
import numpy
import math
theta = radians(29) # rotation of the grid = 29 degrees
# correction factors
base = constituent
corr = 1 # if not otherwise set
corr_shift = 0 # if not otherwise set
if constituent == "M2":
corr_pha = CFactor['A2 Phase']
corr_amp = CFactor['A2 Amp']
corr = CFactor['A2 Flux']
corr_shift = CFactor['A2 Shift']
elif constituent == "S2":
corr_pha = CFactor['A2 Phase'] + CFactor['S2 Phase']
corr_amp = CFactor['A2 Amp'] * CFactor['S2 Amp']
corr = CFactor['A2 Flux']
corr_shift = CFactor['A2 Shift']
elif constituent == "N2":
corr_pha = CFactor['A2 Phase'] + CFactor['N2 Phase']
corr_amp = CFactor['A2 Amp'] * CFactor['N2 Amp']
corr = CFactor['A2 Flux']
corr_shift = CFactor['A2 Shift']
elif constituent == "K2": # based on S2
base = "S2"
corr_pha = CFactor['A2 Phase'] + CFactor['S2 Phase']
corr_amp = CFactor['A2 Amp'] * CFactor['S2 Amp']
corr = CFactor['A2 Flux']
corr_shift = CFactor['A2 Shift']
elif constituent == "K1":
corr_pha = CFactor['A1 Phase']
corr_amp = CFactor['A1 Amp']
elif constituent == "O1":
corr_pha = CFactor['A1 Phase'] + CFactor['O1 Phase']
corr_amp = CFactor['A1 Amp'] * CFactor['O1 Amp']
elif constituent == "P1": # based on K1
base = "K1"
corr_pha = CFactor['A1 Phase']
corr_amp = CFactor['A1 Amp']
elif constituent == "Q1":
corr_pha = CFactor['A1 Phase'] + CFactor['Q1 Phase']
corr_amp = CFactor['A1 Amp'] * CFactor['Q1 Amp']
# WATER LEVEL ELEVATION
if tidevar == 'T':
webtide = pd.read_csv(Tfilename,
skiprows=2)
webtide = webtide.rename(columns={'Constituent': 'const',
'Longitude': 'lon',
'Latitude': 'lat',
'Amplitude (m)': 'amp',
'Phase (deg GMT)': 'pha'})
# number of points from webtide
nwebtide = int(webtide.shape[0]/8.)
# how long is the boundary?
boundlen = ibmax - ibmin
gap = boundlen - nwebtide - inorth
# along western boundary, etaZ1 and etaZ2
amp_W = numpy.zeros((boundlen, 1))
pha_W = numpy.zeros((boundlen, 1))
# find the boundary
I = numpy.arange(ibmin, ibmax)
# allocate the M2 phase and amplitude from Webtide
# to the boundary cells
# (CHECK: Are these allocated in the right order?)
amp_W[gap:boundlen-inorth, 0] = webtide[webtide.const == (base + ':')].amp * corr_amp
amp_W[0:gap, 0] = amp_W[gap, 0]
amp_W[boundlen-inorth:, 0] = amp_W[boundlen-inorth-1,0]
pha_W[gap:boundlen-inorth, 0] = webtide[webtide.const == (base + ':')].pha + corr_pha
pha_W[0:gap, 0] = pha_W[gap, 0]
pha_W[boundlen-inorth:, 0] = pha_W[boundlen-inorth-1, 0]
if constituent == "K1" or constituent == "M2":
print (constituent, "eta")
if constituent == "P1":
amp_W = amp_W * 0.310
pha_W = pha_W - 3.5
elif constituent == "K2":
amp_W = amp_W * 0.235
pha_W = pha_W - 5.7
# convert the phase and amplitude to cosine and sine format that NEMO likes
Z1 = amp_W * numpy.cos(numpy.radians(pha_W))
Z2 = amp_W * numpy.sin(numpy.radians(pha_W))
#U VELOCITY
if tidevar == 'U':
webtide = pd.read_csv(Ufilename,
skiprows=2)
webtide = webtide.rename(columns={'Constituent': 'const',
'Longitude': 'lon',
'Latitude': 'lat',
'U Amplitude (m)': 'ewamp',
'U Phase (deg GMT)': 'ewpha',
'V Amplitude (m)': 'nsamp',
'V Phase (deg GMT)': 'nspha'})
# number of points from webtide
nwebtide = int(webtide.shape[0]/8.)
# how long is the boundary?
boundlen = ibmax - ibmin
gap = boundlen - nwebtide - inorth
# Convert amplitudes from north/south u/v into grid co-ordinates
# Convert phase from north/south into grid co-ordinates (see docs/tides/tides_data_acquisition for details)
ua_ugrid = numpy.array(webtide[webtide.const == (base + ':')].ewamp) * corr
va_ugrid = numpy.array(webtide[webtide.const == (base + ':')].nsamp) * corr
uphi_ugrid = numpy.radians(numpy.array(webtide[webtide.const == (base + ':')].ewpha))
vphi_ugrid = numpy.radians(numpy.array(webtide[webtide.const == (base + ':')].nspha))
uZ1 = (ua_ugrid * numpy.cos(theta) * numpy.cos(uphi_ugrid) -
va_ugrid * numpy.sin(theta) * numpy.sin(vphi_ugrid))
uZ2 = (ua_ugrid * numpy.cos(theta) * numpy.sin(uphi_ugrid) +
va_ugrid * numpy.sin(theta) * numpy.cos(vphi_ugrid))
# adjustments for phase correction
amp = numpy.sqrt(uZ1[:]**2 + uZ2[:]**2)
pha = []
for i in range(0, len(amp)):
pha.append(math.atan2(uZ2[i], uZ1[i]) + numpy.radians(corr_pha + corr_shift))
if constituent == "P1":
amp = amp * 0.310
pha[:] = [phase - numpy.radians(3.5) for phase in pha]
elif constituent == "K2":
amp = amp * 0.235
pha[:] = [phase - numpy.radians(5.7) for phase in pha]
uZ1 = amp * numpy.cos(pha) * corr_amp
uZ2 = amp * numpy.sin(pha) * corr_amp
# find the boundary
I = numpy.arange(ibmin, ibmax)
#allocate the z1 and z2 I calculated from Webtide to the boundary cells
#along western boundary, etaZ1 and etaZ2 are 0 in masked cells
#(CHECK: Are these allocated in the right order?)
Z1 = numpy.zeros((boundlen,1))
Z2 = numpy.zeros((boundlen,1))
Z1[gap:boundlen-inorth,0] = uZ1
Z2[gap:boundlen-inorth,0] = uZ2
Z1[0:gap,0] = Z1[gap, 0]
Z2[0:gap,0] = Z2[gap, 0]
Z1[boundlen-inorth:, 0] = Z1[boundlen-inorth-1, 0]
Z2[boundlen-inorth:, 0] = Z2[boundlen-inorth-1, 0]
#V VELOCITY
if tidevar == 'V':
webtide = pd.read_csv(Vfilename,\
skiprows = 2)
webtide = webtide.rename(columns={'Constituent': 'const', 'Longitude': 'lon', 'Latitude': 'lat', \
'U Amplitude (m)': 'ewamp', 'U Phase (deg GMT)': 'ewpha',\
'V Amplitude (m)': 'nsamp', 'V Phase (deg GMT)': 'nspha'})
# number of points from webtide
nwebtide = int(webtide.shape[0]/8.)
# how long is the boundary?
boundlen = ibmax - ibmin
gap = boundlen - nwebtide - inorth
#Convert phase from north/south into grid co-ordinates (see docs/tides/tides_data_acquisition for details)
ua_vgrid = numpy.array(webtide[webtide.const==(base+':')].ewamp)*corr
va_vgrid = numpy.array(webtide[webtide.const==(base+':')].nsamp)*corr
uphi_vgrid = numpy.radians(numpy.array(webtide[webtide.const==(base+':')].ewpha))
vphi_vgrid = numpy.radians(numpy.array(webtide[webtide.const==(base+':')].nspha))
vZ1 = -ua_vgrid*numpy.sin(theta)*numpy.cos(uphi_vgrid) - va_vgrid*numpy.cos(theta)*numpy.sin(vphi_vgrid)
vZ2 = -ua_vgrid*numpy.sin(theta)*numpy.sin(uphi_vgrid) + va_vgrid* | numpy.cos(theta) | numpy.cos |
import argparse
import numpy as np
from PIL import Image
import scipy.io
import matplotlib.pyplot as plt
import os
def visualize_semantic_segmentation(label_array, color_map, black_bg=False, save_path=None):
"""
tool for visualizing semantic segmentation for a given label array
:param label_array: [H, W], contains [0-nClasses], 0 for background
:param color_map: array read from 'colorMapC46.mat'
:param black_bg: the background is black if set True
:param save_path: path for saving the image
"""
visual_image = np.zeros((label_array.shape[0], label_array.shape[1], 3), dtype=np.uint8)
if not black_bg:
visual_image.fill(255)
## read all colors
colors_list = []
for i in range(color_map.shape[0]):
colors_list.append(color_map[i][1][0])
colors_list = | np.array(colors_list) | numpy.array |
# ======================================================================================================================
# KIV auxiliary functions: based on matlab codes of the authors
# https://github.com/r4hu1-5in9h/KIV
# ======================================================================================================================
import numpy as np
import os
from scipy import optimize
def make_psd(A):
""" for numerical stability, add a small ridge to a symmetric matrix """
# shape check: A should be a square matrix
if A.shape[0] != A.shape[1]:
raise TypeError('input matrix should be a square matrix')
eps = 1e-10
N = A.shape[0]
A_psd = (A + A.T) / 2 + eps * np.eye(N)
return A_psd
def data_split(X, Y, Z, frac):
""" splits the data in two parts according to a fraction """
# shape check: if X/Z is a vector => convert into a matrix
if len(X.shape) == 1:
X = X.reshape(len(X), 1)
if len(Z.shape) == 1:
Z = Z.reshape(len(Z), 1)
# splitting
N = len(Y)
n = int(np.round(frac * N))
X1, X2 = X[0:n, :], X[n:N, :]
Z1, Z2 = Z[0:n, :], Z[n:N, :]
Y1, Y2 = Y[0:n], Y[n:N]
# output
df = {'X1': X1, 'X2': X2, 'Z1': Z1, 'Z2': Z2, 'Y1': Y1, 'Y2': Y2}
return df
def med_inter(X):
"""
:param X: input vector
:return: median interpoint distance to use as the bandwidth
"""
n_x = len(X)
A = np.repeat(X.reshape(n_x, 1), n_x, axis=1)
dist = np.abs(A - A.T).reshape(-1)
v = np.median(dist)
return v
def get_Kmat(X, Y, v):
"""
returns the covariance matrix for the noiseless GP with RBF kernel at inputs X and Y
:param X, Y: vectors of dim n_x and n_y
:param v: bandwidth
"""
n_x = len(X)
n_y = len(Y)
K_true = np.empty((n_x, n_y))
# fill in the matrix
for i in range(n_x):
for j in range(n_y):
K_true[i, j] = np.exp(-np.sum((X[i] - Y[j]) ** 2) / (2 * (v ** 2)))
return K_true
def get_Kmat_mult(X, Y, v_vec):
"""
calculates a multivariate RBF kernel as a product of scalar products of each column of X
:param X and Y: matrices
:param v_vec: vector of bandwidths
"""
# shape check: if X/Y is a vector => convert into a matrix
if len(X.shape) == 1:
X = X.reshape(len(X), 1)
if len(Y.shape) == 1:
Y = Y.reshape(len(Y), 1)
# shape check: the number of columns should be the same
if X.shape[1] != Y.shape[1]:
raise TypeError('number of columns of input matrices must coincide')
n_x = X.shape[0]
n_y = Y.shape[0]
d = X.shape[1]
# calculate the kernel
K_true = np.ones((n_x, n_y))
for j in range(d):
K_j = get_Kmat(X[:, j], Y[:, j], v_vec[j])
K_true = np.multiply(K_true, K_j)
return K_true
def get_K(X, Z, Y, X_test):
"""
Precalculates kernel matrices for the 1st and 2nd stages
:param X: endogenous regressors
:param Z: IVs
:param Y: response variable
:param X_test: test sample
:return: data dictionary
"""
# shape check: if X/Z is a vector => convert into a matrix
if len(X.shape) == 1:
X = X.reshape(len(X), 1)
if len(Z.shape) == 1:
Z = Z.reshape(len(Z), 1)
# shape check: if oos_type is point, then X_test is d_x by 1 a vector => into [1, d_x] matrix
if len(X_test.shape) == 1:
X_test = X_test.reshape(1, len(X_test))
# bandwidths
v_x = np.array([med_inter(X[:, j]) for j in range(X.shape[1])])
v_z = np.array([med_inter(Z[:, j]) for j in range(Z.shape[1])])
# split the data
df = data_split(X, Y, Z, frac=0.5)
# calculate kernels
K_XX = get_Kmat_mult(df['X1'], df['X1'], v_x)
K_xx = get_Kmat_mult(df['X2'], df['X2'], v_x)
K_xX = get_Kmat_mult(df['X2'], df['X1'], v_x)
K_Xtest = get_Kmat_mult(df['X1'], X_test, v_x)
K_ZZ = get_Kmat_mult(df['Z1'], df['Z1'], v_z)
K_Zz = get_Kmat_mult(df['Z1'], df['Z2'], v_z)
# output
df_out = {'K_XX': K_XX, 'K_xx': K_xx, 'K_xX': K_xX, 'K_Xtest': K_Xtest,
'K_ZZ': K_ZZ, 'K_Zz': K_Zz, 'Y1': df['Y1'], 'Y2': df['Y2']}
return df_out
def KIV_pred(df, hyp, stage):
"""
:param df: data frame produced by get_K
:param hyp: hyperparameters
:param stage: stage=(2,3) corresponds to stage 2 and testing
:return: predictive mean for KIV
"""
n = len(df['Y1'])
m = len(df['Y2'])
lam = hyp[0]
xi = hyp[1]
brac = make_psd(df['K_ZZ']) + lam * np.eye(n) * n
W = df['K_XX'] @ np.linalg.inv(brac) @ df['K_Zz']
brac2 = make_psd(W @ W.T) + m * xi * make_psd(df['K_XX'])
alpha = np.linalg.inv(brac2) @ W @ df['Y2']
if stage == 2:
K_Xtest = df['K_XX']
elif stage == 3:
K_Xtest = df['K_Xtest']
else:
os.exit('stage should be equal to either 2 or 3')
y_pred = (alpha.T @ K_Xtest).flatten()
return y_pred
def KIV1_loss(df, lam):
"""
:param df: data frame produced by get_K
:param lam: 1st stage hyperparameter
:return: 1st stage error of KIV
"""
n = len(df['Y1'])
m = len(df['Y2'])
brac = make_psd(df['K_ZZ']) + lam * np.eye(n) * n
gamma = np.linalg.inv(brac) @ df['K_Zz']
loss = np.trace(df['K_xx'] - 2 * df['K_xX'] @ gamma + gamma.T @ df['K_XX'] @ gamma) / m
return loss
def KIV2_loss(df, hyp):
"""
:param df: data frame produced by get_K
:param hyp: hyperparameters
:return: 2nd stage error of KIV
"""
n = len(df['Y1'])
Y1_pred = KIV_pred(df, hyp, 2)
loss = np.sum((df['Y1'] - Y1_pred) ** 2) / n
return loss
def get_KIV(data, X_test):
"""
This function estimates the model using KIV and provides out of sample estimates
:param data: a dictionary, which is a tuple (X, Y, Z)
:param X_test: out of sample data
:return: out of sample estimates
"""
X, Y, Z = data['X'], data['Y'], data['Z']
# 1. calculate kernels
df = get_K(X, Z, Y, X_test)
# 2. initialize hyperparameters for tuning
lam_0 = | np.log(0.05) | numpy.log |
import numpy as np
import pypw85
if __name__ == "__main__":
x1 = np.array([-0.5, 0.4, -0.7])
n1 = np.array([0.0, 0.0, 1.0])
a1, c1 = 10, 0.1
x2 = np.array([0.2, -0.3, 0.4])
n2 = np.array([1.0, 0.0, 0.0])
a2, c2 = 0.5, 5.0
r12 = x2 - x1
q1 = np.empty((6,), dtype=np.float64)
pypw85.spheroid(a1, c1, n1, q1)
print(repr(q1))
q2 = np.empty_like(q1)
pypw85.spheroid(a2, c2, n2, q2)
print(repr(q2))
out = np.empty((2,), dtype=np.float64)
pypw85.contact_function(r12, q1, q2, out)
mu2, lambda_ = out
print("μ² = {}".format(mu2))
print("λ = {}".format(lambda_))
Q1 = np.zeros((3, 3), dtype=np.float64)
i, j = np.triu_indices_from(Q1)
Q1[i, j] = q1
Q1[j, i] = q1
print(repr(Q1))
Q2 = np.zeros_like(Q1)
Q2[i, j] = q2
Q2[j, i] = q2
print(repr(Q2))
Q1_inv = np.linalg.inv(Q1)
Q2_inv = np.linalg.inv(Q2)
f1 = lambda x: Q1_inv.dot(x).dot(x)
print(f1((-a1, 0.0, 0.0)))
print(f1((a1, 0.0, 0.0)))
print(f1((0.0, -a1, 0.0)))
print(f1((0.0, a1, 0.0)))
print(f1((0.0, 0.0, -c1)))
print(f1((0.0, 0.0, c1)))
f2 = lambda x: Q2_inv.dot(x).dot(x)
print(f2((c2, 0.0, 0.0)))
print(f2((-c2, 0.0, 0.0)))
print(f2((0.0, a2, 0.0)))
print(f2((0.0, -a2, 0.0)))
print(f2((0.0, 0.0, a2)))
print(f2((0.0, 0.0, -a2)))
Q = (1 - lambda_) * Q1 + lambda_ * Q2
x = np.linalg.solve(Q, r12)
x0a = x1 + (1 - lambda_) * np.dot(Q1, x)
print(repr(x0a))
x0b = x2 - lambda_ * | np.dot(Q2, x) | numpy.dot |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Convert between various time formats relevant to Chandra.
Chandra.Time provides a simple interface to the C++ time conversion
utility axTime3 (which itself is a wrapper for XTime) written by <NAME>. Chandra.Time also supports some useful additional time formats.
The supported time formats are:
============ ============================================== =======
Format Description System
============ ============================================== =======
secs Seconds since 1998-01-01T00:00:00 (float) tt
numday DDDD:hh:mm:ss.ss... Elapsed days and time utc
relday [+-]<float> Relative number of days from now utc
jd Julian Day utc
mjd Modified Julian Day = JD - 2400000.5 utc
date YYYY:DDD:hh:mm:ss.ss.. utc
caldate YYYYMonDD at hh:mm:ss.ss.. utc
fits YYYY-MM-DDThh:mm:ss.ss.. tt
iso YYYY-MM-DD hh:mm:ss.ss.. utc
unix Unix time (since 1970.0) utc
greta YYYYDDD.hhmmss[sss] utc
year_doy YYYY:DDD utc
mxDateTime mx.DateTime object utc
frac_year YYYY.ffffff = date as a floating point year utc
plotdate Matplotlib plotdate (days since year 0) utc
cxotime CxoTime class object varies
============ ============================================== =======
Each of these formats has an associated time system, which must be one of:
======= ============================
met Mission Elapsed Time
tt Terrestrial Time
tai International Atomic Time
utc Coordinated Universal Time
======= ============================
Usage
-----
The normal usage is to create an object that allows conversion from one time
format to another. Conversion takes place by examining the appropriate
attribute. Unless the time format is specified or it is ambiguous (i.e. secs,
jd, mjd, and unix), the time format is automatically determined. To
specifically select a format use the 'format' option.::
>>> from Chandra.Time import DateTime
>>> t = DateTime('1999-07-23T23:56:00')
>>> print t.date
1999:204:23:54:55.816
>>> t.date
'1999:204:23:54:55.816'
>>> t.secs
49161360.0
>>> t.jd
2451383.496479352
>>> DateTime(t.jd + 1, format='jd').fits
'1999-07-24T23:56:00.056'
>>> DateTime(t.mjd + 1, format='mjd').caldate
'1999Jul24 at 23:54:55.820'
>>> u = DateTime(1125538824.0, format='unix')
>>> u.date
'2005:244:01:40:24.000'
>>> mxd = mx.DateTime.Parser.DateTimeFromString('1999-01-01 12:13:14')
>>> DateTime(mxd).fits
'1999-01-01T12:14:18.184'
>>> DateTime(mxd).date
'1999:001:12:13:14.000'
>>> DateTime(mxd).mxDateTime.strftime('%c')
'Fri Jan 1 12:13:14 1999'
>>> DateTime('2007122.01020340').date
'2007:122:01:02:03.400'
If no input time is supplied when creating the object then the current time is used.::
>>> DateTime().fits
'2009-11-14T18:24:14.504'
For convenience a DateTime object can be initialized from another DateTime object.
>>> t = DateTime()
>>> u = DateTime(t)
Sequences of dates
------------------
The input time can also be an iterable sequence (returns a list) or
a numpy array (returns a numpy array with the same shape)::
>>> import numpy
>>> DateTime([1,'2001:255',3]).date
['1997:365:23:58:57.816', '2001:255:12:00:00.000', '1997:365:23:58:59.816']
>>> DateTime(numpy.array([[1,2],[3,4]])).fits
array([['1998-01-01T00:00:01.000', '1998-01-01T00:00:02.000'],
['1998-01-01T00:00:03.000', '1998-01-01T00:00:04.000']],
dtype='|S23')
Date arithmetic
---------------
DateTime objects support a limited arithmetic with a delta time expressed in days.
One can add a delta time to a DateTime or subtract a delta time from a DateTime.
It is also possible to subtract two DateTiem objects to get a delta time in days.
If the DateTime holds a NumPy array or the delta times are NumPy arrays then the
appropriate broadcasting will be done.
::
>>> d1 = DateTime('2011:200:00:00:00')
>>> d2 = d1 + 4.25
>>> d2.date
'2011:204:06:00:00.000'
>>> d2 - d1
4.25
>>> import numpy as np
>>> d3 = d1 + np.array([1,2,3])
>>> d3.date
array(['2011:201:00:00:00.000', '2011:202:00:00:00.000',
'2011:203:00:00:00.000'],
dtype='|S21')
>>> (d3 + 7).year_doy
array(['2011:208', '2011:209', '2011:210'],
dtype='|S8')
Fast conversion functions
-------------------------
The DateTime class does full validation and format-detection of input
values. In cases where this is not necessary a substantial improvement in
speed (factor of 4 to 12) can be obtained using functions that skip the
validation and format detection. See the documentation for
:func:`~Chandra.Time.date2secs`, :func:`~Chandra.Time.secs2date`, and
:func:`~Chandra.Time.convert_vals`.
::
>>> from Chandra.Time import date2secs, secs2date, convert_vals
>>> date2secs('2001:001:01:01:01')
94698125.18399999
>>> dates = secs2date([0, 1e8, 2e8])
>>> dates
array(['1997:365:23:58:56.816', '2001:062:09:45:35.816', '2004:124:19:32:15.816'],
dtype='|S21')
>>> date2secs(dates)
array([ 0.00000000e+00, 1.00000000e+08, 2.00000000e+08])
>>> convert_vals(dates, 'date', 'mjd')
array([ 50813.9992687 , 51971.40666454, 53128.81407194])
>>> convert_vals(dates, 'date', 'secs')
array([ 0.00000000e+00, 1.00000000e+08, 2.00000000e+08])
Input and output time system
----------------------------
Currently the object-oriented interface does not allow you to adjust the
input or output time system. If you really need to do this, use the package
function convert()::
>>> import Chandra.Time
>>> Chandra.Time.convert(53614.0,
... fmt_in='mjd',
... sys_in='tt',
... fmt_out='caldate',
... sys_out='tai')
'2005Aug31 at 23:59:27.816'
The convert() routine will guess fmt_in and supply a default for sys_in if not
specified. As for DateTime() the input time can be a sequence or numpy array.
Time attributes
---------------
A ``DateTime`` object has additional attributes ``year``, ``mon``, ``day``,
``hour``, ``min``, ``sec``, ``yday``, and ``wday``. These provide the
year, month (1-12), day of month (1-31), hour (0-23), minute (0-59), second (0-60),
day of year (1-366), and day of week (0-6, where 0 is Monday).
These are all referenced to UTC time.
Date when hour, minutes, seconds not provided
---------------------------------------------
A date like ``2020:001`` will be taken as ``2020:001:00:00:00`` since version 4.0.
Before 4.0, ``2020:001`` was ``2020:001:12:00:00``. To get the pre-4.0 behavior
use the following code::
from Chandra.Time import use_noon_day_start
# Set to use 12:00:00 globally from now on.
use_noon_day_start()
.. note::
You should do this globally once in your code at the beginning. There
is no way to revert to using 00:00:00 after calling ``use_noon_day_start``.
This impacts all code using ``DateTime``, not just the calls from your script.
"""
import re
from functools import wraps
import warnings
import time
import six
import numpy as np
# Time for dates specified without HMS. This was changed from '12:00:00' to
# '00:00:00' in version 4.0 of Chandra.Time. Call use_noon_day_start(True)
# for compatibility with the pre-4.0 behavior.
_DAY_START = '00:00:00'
def use_noon_day_start():
"""Set global default so date with no hours, min, sec uses 12:00:00.
A date like 2020:001 will be taken as 2020:001:00:00:00 since version 4.0.
Before 4.0, 2020:001 was 2020:001:12:00:00. To get the pre-4.0 behavior
use the following code.
NOTE: you should do this globally once in your code at the beginning. There
is no way to revert to using 00:00:00 after calling ``use_noon_day_start``.
::
from Chandra.Time import use_noon_day_start
# Set to use 12:00:00 globally from now on.
use_noon_day_start()
"""
global _DAY_START
_DAY_START = '12:00:00'
def override__dir__(f):
"""
When overriding a __dir__ method on an object, you often want to
include the "standard" members on the object as well. This
decorator takes care of that automatically, and all the wrapped
function needs to do is return a list of the "special" members
that wouldn't be found by the normal Python means.
Example
-------
@override__dir__
def __dir__(self):
return ['special_method1', 'special_method2']
This method is copied from astropy.utils.compat.misc.
"""
# http://bugs.python.org/issue12166
@wraps(f)
def override__dir__wrapper(self):
members = set(object.__dir__(self))
members.update(f(self))
return sorted(members)
return override__dir__wrapper
class TimeAttribute(object):
def __init__(self, attr):
self.attr = attr
def __get__(self, instance, owner):
return instance.time_attributes[self.attr]
class TimeStyle(object):
def __init__(self,
name,
ax3_fmt,
ax3_sys,
match_expr=None,
match_func=lambda x, y: re.match(x, y).group(),
match_err=AttributeError,
postprocess=None,
preprocess=None,
dtype=None,
):
self.name = name
self.match_expr = match_expr
self.match_func = match_func
self.match_err = match_err
self.ax3_fmt = ax3_fmt
self.ax3_sys = ax3_sys
self.postprocess = postprocess
self.preprocess = preprocess
self.dtype = dtype
def match(self, time):
try:
self.time_in = self.match_func(self.match_expr, time)
return True
except self.match_err:
pass
return False
T1998 = 883612736.816 # Seconds from 1970:001:00:00:00 (UTC) to 1998-01-01T00:00:00 (TT)
RE = {'float': r'[+-]?(?:\d+[.]?\d*|[.]\d+)(?:[dDeE][+-]?\d+)?$',
'date': r'^(\d{4}):(\d{3}):(\d{2}):(\d{2}):(\d{2})(\.\d*)?$',
'year_doy': r'^(\d{4}):(\d{3})$',
'caldate': r'^\d{4}\w{3}\d{1,2}\s+at\s+\d{1,2}:\d{1,2}:\d{1,2}(\.\d*)?$',
'greta': r'^(\d{4})(\d{3})\.(\d{2})?(\d{2})?(\d{2})?(\d+)?$',
'fits': r'^\d{4}-\d{1,2}-\d{1,2}T\d{1,2}:\d{1,2}:\d{1,2}(\.\d*)?$',
'year_mon_day': r'^\d{4}-\d{1,2}-\d{1,2}$',
'iso': r'^\d{4}-\d{1,2}-\d{1,2} \d{1,2}:\d{1,2}:\d{1,2}(\.\d*)?$',
}
# Conversions for greta format
def greta_to_date(date_in):
# Force date_in string to have 9 digits of precision to represent
# hhmmssfff (where fff is milliseconds within the second)
date_in = '{:.9f}'.format(float(date_in))
m = re.match(RE['greta'], date_in)
out = '%s:%s:%s:%s:%s' % m.groups()[0:5]
if m.group(6) is not None:
out += '.%s' % m.group(6)
return out
def date_to_greta(date_in):
m = re.match(RE['date'], date_in)
out = '%s%s.%s%s%s' % m.groups()[0:5]
if m.group(6) is not None:
frac = m.group(6).replace('.', '')
out += frac
return out
# Conversions for frac_year format
_year_secs = {} # Start and end secs for a year
def year_start_end_secs(year):
return (DateTime('%04d:001:00:00:00' % year).secs,
DateTime('%04d:001:00:00:00' % (year + 1)).secs)
def frac_year_to_secs(frac_year):
frac_year = float(frac_year)
year = int(frac_year)
s0, s1 = _year_secs.setdefault(year, year_start_end_secs(year))
return repr((frac_year - year) * (s1 - s0) + s0)
def secs_to_frac_year(secs):
year = int(DateTime(secs).date[:4])
s0, s1 = _year_secs.setdefault(year, year_start_end_secs(year))
return (float(secs) - s0) / (s1 - s0) + year
def raise_(r):
raise r
def mx_DateTime_ISO_ParseDateTime(t):
try:
import mx.DateTime
warnings.warn('mxDateTime format will be removed soon, ask TLA for migration options')
return mx.DateTime.ISO.ParseDateTime(t)
except ImportError:
raise ValueError('mxDateTime format is unavailable, ask TLA for migration options')
time_styles = [TimeStyle(name='fits',
match_expr=RE['fits'],
ax3_fmt='f3',
ax3_sys='t',
dtype='S23',
),
TimeStyle(name='year_mon_day',
match_expr=RE['year_mon_day'],
ax3_fmt='f3',
ax3_sys='u',
preprocess=lambda t: t + 'T' + _DAY_START,
postprocess=lambda t: re.sub(r'T\d{2}:\d{2}:\d{2}\.\d+$', '', t),
),
TimeStyle(name='relday',
match_expr=r'^[+-]' + RE['float'] + '$', # DDDD:hh:mm:ss.ss.
ax3_fmt='s',
ax3_sys='u',
preprocess=lambda x: str(time.time() + float(x) * 86400.0 - T1998),
postprocess=lambda x: (float(x) + T1998 - time.time()) / 86400.0,
),
TimeStyle(name='greta',
match_expr=RE['greta'],
match_func=lambda f, t: ((float(t) < 2099001.000000 or raise_(ValueError))
and re.match(f, t).group()),
match_err=(AttributeError, ValueError),
ax3_fmt='d3',
ax3_sys='u',
preprocess=greta_to_date,
postprocess=date_to_greta,
),
TimeStyle(name='secs',
match_expr='^' + RE['float'] + '$',
ax3_fmt='s',
ax3_sys='m',
postprocess=float,
dtype=np.float64,
),
TimeStyle(name='frac_year',
match_expr='^' + RE['float'] + '$',
ax3_fmt='s',
ax3_sys='m',
preprocess=frac_year_to_secs,
postprocess=secs_to_frac_year,
),
TimeStyle(name='unix',
match_expr='^' + RE['float'] + '$',
ax3_fmt='s',
ax3_sys='u',
preprocess=lambda x: repr(float(x) - T1998),
postprocess=lambda x: float(x) + T1998,
),
TimeStyle(name='iso',
match_expr=RE['iso'],
ax3_fmt='f3',
ax3_sys='u',
preprocess=lambda t: t.replace(' ', 'T'),
postprocess=lambda t: t.replace('T', ' '),
),
TimeStyle(name='mxDateTime',
match_expr=RE['iso'],
ax3_fmt='f3',
ax3_sys='u',
preprocess=lambda t: t.replace(' ', 'T'),
postprocess=mx_DateTime_ISO_ParseDateTime,
),
TimeStyle(name='caldate',
match_expr=RE['caldate'],
ax3_fmt='c3',
ax3_sys='u',
dtype='S25',
),
TimeStyle(name='date',
match_expr=RE['date'],
ax3_fmt='d3',
ax3_sys='u',
dtype='S21',
),
TimeStyle(name='year_doy',
match_expr=RE['year_doy'],
ax3_fmt='d3',
ax3_sys='u',
preprocess=lambda t: t + ':' + _DAY_START,
postprocess=lambda t: re.sub(r':\d{2}:\d{2}:\d{2}\.\d+$', '', t),
),
TimeStyle(name='jd',
match_expr='^' + RE['float'] + '$',
ax3_fmt='j',
ax3_sys='u',
postprocess=float,
dtype=np.float64,
),
TimeStyle(name='mjd',
match_expr='^' + RE['float'] + '$',
ax3_fmt='m',
ax3_sys='u',
postprocess=float,
dtype=np.float64,
),
TimeStyle(name='numday',
# DDDD:hh:mm:ss.ss.
match_expr=r'^\d{1,4}:\d{1,2}:\d{1,2}:\d{1,2}(\.\d*)?$',
ax3_fmt='n3',
ax3_sys='u',
),
TimeStyle(name='plotdate',
match_expr='^' + RE['float'] + '$',
ax3_fmt='j',
ax3_sys='u',
preprocess=lambda x: repr(float(x) + 1721424.5),
postprocess=lambda x: float(x) - 1721424.5,
),
]
time_system = {'met': 'm', # MET Mission Elapsed Time ("m")
'tt': 't', # TT Terrestrial Time ("t")
'tai': 'a', # TAI International Atomic Time ("ta" or "a")
'utc': 'u', # UTC Coordinated Universal Time ("u")
}
# Preloaded methods go here.
class ChandraTimeError(ValueError):
"""Exception class for bad input values to Chandra.Time"""
def _make_array(val):
"""
Take ``val`` and convert/reshape to a 1-d array. If ``copy`` is True
then copy input values.
Returns
-------
val, val_ndim: ndarray, int
Array version of ``val`` and the number of dims in original.
"""
val = np.array(val)
val_ndim = val.ndim # remember original ndim
if val.ndim == 0:
val = np.asarray([val])
# Allow only string or float arrays as input (XXX datetime later...)
if val.dtype.kind == 'i':
val = np.asarray(val, dtype=np.float64)
return val, val_ndim
def convert_vals(vals, format_in, format_out):
"""
Convert ``vals`` from the input ``format_in`` to the output format
``format_out``. This does **no input validation** and thus runs much faster
than the corresponding DateTime() conversion. Be careful because invalid
inputs can give unpredictable results.
The input ``vals`` can be a single (scalar) value, a Python list or a numpy
array. The output data type is specified with ``dtype`` which must be a
valid numpy dtype.
The input and output format should be one of the following DateTime
format names: 'secs', 'date', 'jd', 'mjd', 'fits', 'caldate'.
The function returns the converted time as either a scalar or a numpy
array, depending on the input ``vals``.
:param vals: input values (scalar, list, array)
:param fmt_in: input format (e.g. 'secs', 'date', 'jd', ..)
:param fmt_out: output format (e.g. 'secs', 'date', 'jd', ..)
:returns: converted values as either scalar or numpy array
"""
from . import _axTime3 as axTime3
def get_style(fmt):
# Only the styles with a dtype attribute can be converted using this function.
ok_styles = [x for x in time_styles if x.dtype]
for time_style in ok_styles:
if time_style.name == fmt:
return time_style.ax3_sys, time_style.ax3_fmt, time_style.dtype
else:
raise ValueError('Error - specified format {} is not an allowed value {}'
.format(fmt, [x.name for x in ok_styles]))
sys_in, fmt_in, dtype_in = get_style(format_in)
sys_out, fmt_out, dtype_out = get_style(format_out)
vals, ndim = _make_array(vals)
# Allow passing bytes to axTime3 by converting to string here. This is
# actually silly since later in axTime3.convert_time() it gets encoded back
# to ASCII bytes. But since this package is largely deprecated we take the
# performance hit in the interest of simpler code.
if vals.dtype.kind == 'S':
vals = np.char.decode(vals, 'ascii')
# If the input is already string-like then pass straight to convert_time.
# Otherwise convert to string with repr().
if vals.dtype.kind == 'U':
outs = [axTime3.convert_time(val, sys_in, fmt_in, sys_out, fmt_out)
for val in vals.flatten()]
else:
outs = [axTime3.convert_time(repr(val), sys_in, fmt_in, sys_out, fmt_out)
for val in vals.flatten()]
if (six.PY3
and isinstance(dtype_out, six.string_types)
and dtype_out.startswith('S')):
dtype_out = 'U' + dtype_out[1:]
outs = | np.array(outs, dtype=dtype_out) | numpy.array |
from __future__ import division
from __future__ import print_function
#TODO: there are multiple implementations of functions like _apply_by_file_index. these should be consolidated into one
#common function that is used and called multiple times. In addition, aggregator and transform functions that are used
#across apply_by_file wrappers should be shared (rather than defined multiple times). We could also call_apply_by_file_index
#"groupby" to conform to the pandas style. e.g. bo.groupby(session) returns a generator whose produces are brain objects
#each of one session. we could then use bo.groupby(session).aggregate(xform) to produce a list of objects, where each is
#comprised of the xform applied to the brain object containing one session worth of data from the original object.
import copy
import os
import numpy.matlib as mat
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import imageio
import nibabel as nib
import hypertools as hyp
import shutil
import warnings
from nilearn import plotting as ni_plt
from nilearn import image
from nilearn.input_data import NiftiMasker
from scipy.stats import kurtosis, zscore, pearsonr
from scipy.spatial.distance import pdist
from scipy.spatial.distance import cdist
from scipy.spatial.distance import squareform
from scipy.special import logsumexp
from scipy import linalg
from scipy.ndimage.interpolation import zoom
try:
from itertools import zip_longest
except:
from itertools import izip_longest as zip_longest
def _std(res=None):
"""
Load a Nifti image of the standard MNI 152 brain at the given resolution
Parameters
----------
res : int or float or None
If int or float: (for cubic voxels) or a list or array of 3D voxel dimensions
If None, returns loaded gray matter masked brain
Returns
----------
results : Nifti1Image
Nifti image of the standard brain
"""
from .nifti import Nifti
from .load import load
std_img = load('std')
if res:
return _resample_nii(std_img, res)
else:
return std_img
def _gray(res=None):
"""
Load a Nifti image of the gray matter masked MNI 152 brain at the given resolution
Parameters
----------
res : int or float or None
If int or float: (for cubic voxels) or a list or array of 3D voxel dimensions
If None, returns loaded gray matter masked brain
Returns
----------
results : Nifti1Image
Nifti image of gray masked brain
"""
from .nifti import Nifti
from .load import load
gray_img = load('gray')
threshold = 100
gray_data = gray_img.get_data()
gray_data[np.isnan(gray_data) | (gray_data < threshold)] = 0
if np.iterable(res) or np.isscalar(res):
return _resample_nii(Nifti(gray_data, gray_img.affine), res)
else:
return Nifti(gray_data, gray_img.affine)
def _resample_nii(x, target_res, precision=5):
"""
Resample a Nifti image to have the given voxel dimensions
Parameters
----------
x : Nifti1Image
Input Nifti image (a nibel Nifti1Image object)
target_res : int or float or None
Int or float (for cubic voxels) or a list or array of 3D voxel dimensions
precision : int
Number of decimal places in affine transformation matrix for resulting image (default: 5)
Returns
----------
results : Nifti1Image
Re-scaled Nifti image
"""
from .nifti import Nifti
if np.any(np.isnan(x.get_data())):
img = x.get_data()
img[np.isnan(img)] = 0.0
x = nib.nifti1.Nifti1Image(img, x.affine)
res = x.header.get_zooms()[0:3]
scale = np.divide(res, target_res).ravel()
target_affine = x.affine
target_affine[0:3, 0:3] /= scale
target_affine = np.round(target_affine, decimals=precision)
# correct for 1-voxel shift
target_affine[0:3, 3] -= np.squeeze(np.multiply(np.divide(target_res, 2.0), np.sign(target_affine[0:3, 3])))
target_affine[0:3, 3] += np.squeeze(np.sign(target_affine[0:3, 3]))
if len(scale) < np.ndim(x.get_data()):
assert np.ndim(x.get_data()) == 4, 'Data must be 3D or 4D'
scale = np.append(scale, x.shape[3])
z = zoom(x.get_data(), scale)
try:
z[z < 1e-5] = np.nan
except:
pass
return Nifti(z, target_affine)
def _apply_by_file_index(bo, xform, aggregator):
"""
Session dependent function application and aggregation
Parameters
----------
bo : Brain object
Contains data
xform : function
The function to apply to the data matrix from each filename
aggregator: function
Function for aggregating results across multiple iterations
Returns
----------
results : numpy ndarray
Array of aggregated results
"""
for idx, session in enumerate(bo.sessions.unique()):
session_xform = xform(bo.get_slice(sample_inds=np.where(bo.sessions == session)[0], inplace=False))
if idx is 0:
results = session_xform
else:
results = aggregator(results, session_xform)
return results
def _kurt_vals(bo):
"""
Function that calculates maximum kurtosis values for each channel
Parameters
----------
bo : Brain object
Contains data
Returns
----------
results: 1D ndarray
Maximum kurtosis across sessions for each channel
"""
sessions = bo.sessions.unique()
results = list(map(lambda s: kurtosis(bo.data[(s==bo.sessions).values]), sessions))
return np.max( | np.vstack(results) | numpy.vstack |
import numpy as np
from itertools import product
from sklearn.model_selection import train_test_split
def get_mask(pairs, shape, row_g2i, col_g2i, sym=True):
'''
Convert a list of pairs, into a boolean indicator matrix, m, where
(a,b) in pairs, is indexed into m[ai, bj] and m[bi, aj] where possible.
if sym = False, then a pair is indicated only once so either
m[ai, bj] == True xor m[bi, aj] == True
'''
mask = np.zeros(shape, dtype=bool)
for i, (a, b) in enumerate(pairs):
inserted=False
if a in row_g2i and b in col_g2i:
mask[row_g2i[a], col_g2i[b]] = True
inserted = True
if not sym and inserted:
assert(inserted)
continue
if a in col_g2i and b in row_g2i:
mask[row_g2i[b], col_g2i[a]] = True
inserted = True
assert(inserted)
return mask
def get_eval_pair_list(pairs, row_g2i, col_g2i, gi_data):
values = gi_data['values']
pairlist_1 = []
pairlist_2 = []
for A, B in pairs:
A_r = row_g2i.get(A)
B_c = col_g2i.get(B)
A_c = col_g2i.get(A)
B_r = row_g2i.get(B)
if (A_r is not None) and \
(B_c is not None) and \
(A_c is not None) and \
(B_r is not None):
v_ab = values[A_r, B_c]
v_ba = values[B_r, A_c]
if not (np.isnan(v_ab) or np.isnan(v_ba)):
pairlist_1.append((A_r, B_c))
pairlist_2.append((B_r, A_c))
else:
pass
elif (A_r is not None) and \
(B_c is not None):
if not np.isnan(values[A_r, B_c]):
pairlist_1.append((A_r, B_c))
pairlist_2.append((A_r, B_c))
else:
pass
elif (A_c is not None) and \
(B_r is not None):
if not np.isnan(values[B_r, A_c]):
pairlist_1.append((B_r, A_c))
pairlist_2.append((B_r, A_c))
else:
pass
else:
continue
pairlist_1 = tuple(zip(*pairlist_1))
pairlist_2 = tuple(zip(*pairlist_2))
return pairlist_1, pairlist_2
def gi_train_test_split_w_pairlists(gi_data, hf):
'''
Sample train/test set but return lists of indices whose indexed values should be
averaged for evaluation
[(A,B), ...], [(B,A),...]
'''
rows = gi_data['rows']
cols = gi_data['cols']
values = gi_data['values']
col_g2i = dict((n, i) for i, n in enumerate(cols))
row_g2i = dict((n, i) for i, n in enumerate(rows))
rowset = set(rows)
colset = set(cols)
pairs = product(rows, cols)
pairs = set(frozenset((a,b)) for a,b in pairs if a != b)
pairs = [tuple(p) for p in pairs]
train_pairs, test_pairs = train_test_split(pairs, test_size=hf)
test_mask = get_mask(test_pairs, values.shape, row_g2i, col_g2i)
# This implements train/test over *all* possible pairs,
# in expectation is equivalent to CV over observed pairs
value_mask = ~np.isnan(values)
test_mask = np.logical_and(value_mask, test_mask)
train_mask = np.logical_and(value_mask, ~test_mask)
train_X = np.where(train_mask, values, np.nan)
test_X = np.where(test_mask, values, np.nan)
eval_pairs1, eval_pairs2 = get_eval_pair_list(test_pairs, row_g2i, col_g2i, gi_data)
assert(np.all(~np.isnan(test_X[test_mask])))
assert(np.all(~np.isnan(test_X[eval_pairs1[0], eval_pairs1[1]])))
assert(np.all(~np.isnan(test_X[eval_pairs2[0], eval_pairs2[1]])))
return train_X, test_X, (eval_pairs1, eval_pairs2)
def sym_train_test_split(gi_data, hf):
values = gi_data['values']
assert( | np.allclose(values, values.T, equal_nan=True) | numpy.allclose |
"""
Functions for aeri retrievals.
"""
import numpy as np
from scipy.optimize import brentq
from act.retrievals.irt import irt_response_function, sum_function_irt
def aeri2irt(
aeri_ds,
wnum_name='wnum',
rad_name='mean_rad',
hatch_name='hatchOpen',
tolerance=0.1,
temp_low=150.0,
temp_high=320.0,
maxiter=200,
):
"""
This function will integrate over the correct wavenumber values to produce
the effective IRT temperature.
As a note from the ARM IRT Instrument Handbook
A positive bias of the sky temperature is exhibited by the downwelling IRT,
compared to the AERI, during clear-sky conditions when the sky temperature
is less than ~180K. The effect depends on the characteristics of the
individual IRT and the internal reference temperature of the IRT. The
greatest difference compared to AERI will occur when the sky is very clear,
dry, and cold and the ambient temperature is relatively hot, maximizing the
difference in temperature between the sky and instrument, and the
calibration of the IRT at the lower limit of 223K was not performed
accurately. This bias is especially apparent at high-latitude sites
(e.g., NSA, OLI, and AWR).
https://www.arm.gov/publications/tech_reports/handbooks/irt_handbook.pdf
Author - <NAME>
Parameters
----------
aeri_ds : Xarray Dataset Object
The Dataset object containing AERI data.
wnum_name : str
The variable name for coordinate dimention of wave number Xarray Dataset.
hatch_name : str or None
The variable name for hatch status. If set to None will not try to set
when hatch is not opent to NaN.
rad_name : str
The variable name for mean radiance in Xarray Dataset.
tolerance : float
The tolerance value to try and match for returned temperature.
temp_low : float
The initial low value to use in zbren function to invert radiances.
temp_high : float
The initial low value to use in zbren function to invert radiances.
maxiter : int
The maximum number if iterations to use with invertion process.
Prevents runaway processes.
Returns
-------
obj : Xarray Dataset Object or None
The aeri_ds Dataset with new DataArray of temperatures added under
variable name 'aeri_irt_equiv_temperature'.
"""
# Get data values
rf_wnum, rf = irt_response_function()
wnum = aeri_ds[wnum_name].values
mean_rad = aeri_ds[rad_name].values
# Pull out AERI data for correct wavenumbers and apply response function --;
index = np.where((wnum >= (rf_wnum[0] - 0.001)) & (wnum <= (rf_wnum[-1] + 0.001)))[0]
if index.size == 0:
raise ValueError('No wavenumbers match for aeri2irt')
wnum = wnum[index]
mean_rad = mean_rad[:, index]
# If the wavenumbers in AERI data are not close enough to response function
# match the wavenumbers and adjust.
atol = 0.001
if not np.all(np.isclose(wnum, rf_wnum, atol=atol)):
index_wnum = []
index_rf = []
for ii in range(wnum.size):
idx = (np.abs(wnum[ii] - rf_wnum)).argmin()
if | np.isclose(wnum[ii], rf_wnum[idx], atol=atol) | numpy.isclose |
import torch
from torch.utils.data import Dataset
import numpy as np
from skimage.io import imread
class WindDataset(Dataset):
def __init__(
self,
images,
folder,
transform,
load_n=1,
type_agg='simple',
wind_speed=None):
self.images = images
self.folder = folder
self.wind_speed = wind_speed
self.transform = transform
self.type_agg = type_agg
self.load_n = load_n
def __len__(self):
return(len(self.images))
def get_path_load(self, idx):
image_id = self.images[idx]
image_order = int(image_id.split('_')[-1].split('.')[0])
image_storm = image_id.split('_')[0]
last_prev_idx = max(0, image_order-self.load_n+1)
prev_load = [*range(last_prev_idx, image_order+1)]
prev_load = [last_prev_idx] * (self.load_n-len(prev_load)) + prev_load
prev_load = [self.folder + '/' + image_storm+'_'+str(x).zfill(3)+'.jpg' for x in prev_load]
assert(len(prev_load) == self.load_n)
return prev_load
def __getitem__(self, idx):
"""Will load the mask, get random coordinates around/with the mask,
load the image by coordinates
"""
# get image id and image order
images_to_load = self.get_path_load(idx)
image = [imread(image_path) for image_path in images_to_load]
image = np.stack(image, axis=0) / 255
if self.type_agg == 'minmaxmean':
im_max = image.max(axis=0)
im_mean = image.mean(axis=0)
im_min = image.min(axis=0)
image = | np.stack([im_max, im_mean, im_min], axis=0) | numpy.stack |
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from scipy.linalg import sqrtm
from collections import OrderedDict
from GLM.GLM_Model.PoissonVariational import PoissonVariational
from GLM.GLM_Model.PoissonMAP import PoissonMAP
from GLM.GLM_Model import GLM_Model_GP, GLM_Model_MAP, GP_Covariate, MAP_Covariate
from Utils import utils
class Model_Runner:
def __init__(self, params):
self.params = params
self.poisson_model = None
self.variational_model = None
self.map_model = None
self.ml_model = None
self.data_df = None
self.hist_data = None
self.stim_data = None
def initialize_design_matrices_demo(self, data_df):
self.data_df = data_df
self.covariate_data = OrderedDict()
if 'History' not in self.data_df.index:
raise KeyError('"History" needs to be a data field')
else:
self.spike_data = self.data_df.loc['History', 'data']
for covariate in self.data_df.index:
self.covariate_data[covariate] = self.data_df.loc[covariate, 'data']
self.params.num_test_trials = np.floor(self.spike_data.shape[0] * 1e-2 * self.params.percent_test_trials).astype(np.int)
def create_variational_covariates_demo(self):
self.kernel_prep_dict = {'chol': ['Kuu'], 'inv': ['Kuu']}
self.glm_gp = GLM_Model_GP.GLM_Model_GP(self.params)
self.glm_gp.add_spike_history(self.spike_data)
def add_covariate(self, covariate):
self.glm_gp.add_covariate(covariate)
def train_demo(self):
self.variational_model = PoissonVariational(self.params, self.data_df, self.glm_gp, self.kernel_prep_dict)
self.variational_model.initialize_variational_model()
self.variational_model.train_variational_parameters()
def initialize_design_matrices(self):
self.data_df = pd.read_pickle(self.params.expt_problem_data_path)
self.hist_data = self.data_df.loc['History', 'data']
self.spike_data = self.data_df.loc['History', 'data']
self.stim1_data = self.data_df.loc['Stim1', 'data']
self.stim2_data = self.data_df.loc['Stim2', 'data']
self.stim3_data = self.data_df.loc['Stim3', 'data']
self.hist_data_b = self.data_df.loc['Coupling_b', 'data']
self.hist_data_c = self.data_df.loc['Coupling_c', 'data']
self.params.num_test_trials = np.floor(self.hist_data.shape[0] * 1e-2 * self.params.percent_test_trials).astype(np.int)
def create_variational_covariates(self):
kernel_prep_dict = {'chol': ['Kuu'], 'inv': ['Kuu']}
# create glm object
glm_gp = GLM_Model_GP.GLM_Model_GP(self.params)
glm_gp.add_spike_history(self.spike_data)
# history filter parameters
hist_etc_params = {'use_exp_mean': True,
'use_basis_form': False}
hist_bounds = {'m': [-np.inf, np.inf],
'r': [-np.inf, np.inf],
'u': [0, 0.25],
'alpha': [100, 100000],
'gamma': [100, 100000],
'sigma': [0.1, 15],
'kernel_epsilon_noise_std': [1e-4, 5],
'gain': [-15, -3],
'tau': [1e-4, 3e-3]
}
hist_time_params = {'filter_offset': 1,
'filter_duration': 110,
'time_plot_min': 1,
'time_plot_max': 115,
'inducing_pt_spacing_init': 2,
'is_hist': True}
hist_gp_params = {'alpha': [750.0, True],
'gamma': [1000.0, True],
'sigma': [np.sqrt(4), True],
'gain': [-5, False],
'tau': [1e-3, False],
'kernel_epsilon_noise_std': [1e-3, False],
'kernel_fn': [utils.decay_kernel_torch, False]}
hist = GP_Covariate.GP_Covariate(self.params, hist_etc_params, self.hist_data,
name='History',
use_bases=False)
hist.add_bounds_params(hist_bounds)
hist.add_gp_params(hist_gp_params)
hist.add_time_init(hist_time_params)
glm_gp.add_covariate(hist)
##########################################
# Stim1
##########################################
cue_etc_params = {'use_exp_mean': False,
'use_basis_form': False}
cue_bounds = {'m': [-np.inf, np.inf],
'r': [-np.inf, np.inf],
'u': [0, 2.0],
'alpha': [50, 5000],
'gamma': [10, 3000],
'sigma': [0.1, 15],
'b': [300e-3, 800e-3],
'kernel_epsilon_noise_std': [1e-4, 1.5]
}
cue_time_params = {'filter_offset': 0,
'filter_duration': 1000,
'time_plot_min': 0,
'time_plot_max': 1100,
'inducing_pt_spacing_init': 15}
cue_gp_params = {'alpha': [100.0, True], 'gamma': [600.0, True], 'sigma': [np.sqrt(4), True],
'b': [500e-3, True],
'kernel_epsilon_noise_std': [1e-3, False],
'kernel_fn': [utils.decay_kernel_torch, False]}
cue = GP_Covariate.GP_Covariate(self.params, cue_etc_params, self.stim1_data, name='Stim1')
cue.add_bounds_params(cue_bounds)
cue.add_gp_params(cue_gp_params)
cue.add_time_init(cue_time_params)
glm_gp.add_covariate(cue)
##########################################
# Stim2
##########################################
cue_etc_params = {'use_exp_mean': False,
'use_basis_form': False}
cue_bounds = {'m': [-np.inf, np.inf],
'r': [-np.inf, np.inf],
'u': [-300e-3, 300e-3],
'alpha': [50, 5000],
'gamma': [10, 3000],
'sigma': [0.1, 15],
'b': [0e-3, 200e-3],
'kernel_epsilon_noise_std': [1e-4, 1.5]
}
cue_time_params = {'filter_offset': -250,
'filter_duration': 500,
'time_plot_min': -300,
'time_plot_max': 300,
'inducing_pt_spacing_init': 15}
cue_gp_params = {'alpha': [100.0, True], 'gamma': [600.0, True], 'sigma': [np.sqrt(4), True],
'b': [100e-3, True],
'kernel_epsilon_noise_std': [1e-3, False],
'kernel_fn': [utils.decay_kernel_torch, False]}
cue = GP_Covariate.GP_Covariate(self.params, cue_etc_params, self.stim2_data, name='Stim2')
cue.add_bounds_params(cue_bounds)
cue.add_gp_params(cue_gp_params)
cue.add_time_init(cue_time_params)
glm_gp.add_covariate(cue)
##########################################
# Stim1
##########################################
cue_etc_params = {'use_exp_mean': False,
'use_basis_form': False}
cue_bounds = {'m': [-np.inf, np.inf],
'r': [-np.inf, np.inf],
'u': [0, 500e-3],
'alpha': [50, 5000],
'gamma': [10, 3000],
'sigma': [0.1, 15],
'b': [100e-3, 500e-3],
'kernel_epsilon_noise_std': [1e-4, 1.5]
}
cue_time_params = {'filter_offset': 0,
'filter_duration': 400,
'time_plot_min': 0,
'time_plot_max': 500,
'inducing_pt_spacing_init': 15}
cue_gp_params = {'alpha': [100.0, True], 'gamma': [600.0, True], 'sigma': [np.sqrt(4), True],
'b': [250e-3, True],
'kernel_epsilon_noise_std': [1e-3, False],
'kernel_fn': [utils.decay_kernel_torch, False]}
cue = GP_Covariate.GP_Covariate(self.params, cue_etc_params, self.stim3_data, name='Stim3')
cue.add_bounds_params(cue_bounds)
cue.add_gp_params(cue_gp_params)
cue.add_time_init(cue_time_params)
glm_gp.add_covariate(cue)
##########################################
# Coupling a
##########################################
cue_etc_params = {'use_exp_mean': False,
'use_basis_form': False}
cue_bounds = {'m': [-np.inf, np.inf],
'r': [-np.inf, np.inf],
'u': [0, 200],
'alpha': [50, 10000],
'gamma': [10, 5000],
'sigma': [0.1, 15],
'b': [5e-3, 100e-3],
'kernel_epsilon_noise_std': [1e-4, 1.5]
}
cue_time_params = {'filter_offset': 1,
'filter_duration': 100,
'time_plot_min': 0,
'time_plot_max': 150,
'inducing_pt_spacing_init': 2}
cue_gp_params = {'alpha': [5000.0, True], 'gamma': [1000.0, True], 'sigma': [np.sqrt(9), True],
'b': [15e-3, True],
'kernel_epsilon_noise_std': [1e-3, False],
'kernel_fn': [utils.decay_kernel_torch, False]}
cue = GP_Covariate.GP_Covariate(self.params, cue_etc_params, self.hist_data_b, name='Coupling_b')
cue.add_bounds_params(cue_bounds)
cue.add_gp_params(cue_gp_params)
cue.add_time_init(cue_time_params)
glm_gp.add_covariate(cue)
##########################################
# Coupling b
##########################################
cue_etc_params = {'use_exp_mean': False,
'use_basis_form': False}
cue_bounds = {'m': [-np.inf, np.inf],
'r': [-np.inf, np.inf],
'u': [0, 200],
'alpha': [50, 10000],
'gamma': [10, 5000],
'sigma': [0.1, 15],
'b': [5e-3, 100e-3],
'kernel_epsilon_noise_std': [1e-4, 1.5]
}
cue_time_params = {'filter_offset': 1,
'filter_duration': 100,
'time_plot_min': 0,
'time_plot_max': 150,
'inducing_pt_spacing_init': 2}
cue_gp_params = {'alpha': [5000.0, True], 'gamma': [1000.0, True], 'sigma': [np.sqrt(9), True],
'b': [30e-3, True],
'kernel_epsilon_noise_std': [1e-3, False],
'kernel_fn': [utils.decay_kernel_torch, False]}
cue = GP_Covariate.GP_Covariate(self.params, cue_etc_params, self.hist_data_c, name='Coupling_c',)
cue.add_bounds_params(cue_bounds)
cue.add_gp_params(cue_gp_params)
cue.add_time_init(cue_time_params)
glm_gp.add_covariate(cue)
self.variational_model = PoissonVariational(self.params, self.data_df, glm_gp, kernel_prep_dict)
self.variational_model.initialize_variational_model()
def create_map_covariates(self):
glm_map = GLM_Model_MAP.GLM_Model_MAP(self.params)
glm_map.add_spike_history(self.spike_data)
###################################
# History
###################################
hist_bounds = {'m': [-np.inf, np.inf],
'r': [0, np.inf]}
hist_bases_params = {'bases_fn': utils.create_nonlinear_raised_cos,
'duration': 150,
'num_bases': 20,
'bin_size': self.params.delta,
'start_point': 0,
'end_point': 60e-3,
'nl_offset': 1e-4,
'offset': 1,
'filter_duration': self.params.duration_hist,
'filter_offset': 1,
'time_plot_min': 1,
'time_plot_max': 100}
# hist = MAP_Covariate.MAP_Covariate(self.params, y, name='History', is_cov=False, is_hist=True)
hist = MAP_Covariate.MAP_Covariate(self.params, self.hist_data, name='History', is_cov=False, is_hist=True)
hist.add_bounds_params(hist_bounds)
hist.add_bases_params(hist_bases_params)
glm_map.add_covariate(hist)
# stimulus 1 parameters
cue_bounds = {'m': [-np.inf, np.inf],
'r': [0, 5]}
cue_bases_params = {'bases_fn': utils.create_nonlinear_raised_cos,
'num_bases': 15,
'duration': 1000, # self.params.duration_cov,
'bin_size': self.params.delta,
'end_point': 600e-3,
'start_point': 0,
'nl_offset': 1.3e-2,
'offset': 0,
'filter_duration': 1500,
'filter_offset': 0,
'time_plot_min': 0,
'time_plot_max': 1500}
cue = MAP_Covariate.MAP_Covariate(self.params, self.stim1_data, name='Stim1', is_cov=True, is_hist=False)
cue.add_bounds_params(cue_bounds)
cue.add_bases_params(cue_bases_params)
glm_map.add_covariate(cue)
######################
# Lick Init
######################
lick_init_bounds = {'m': [-np.inf, np.inf],
'r': [0, 2]}
lick_init_bases_params = {'bases_fn': utils.create_raised_cosine_basis,
'duration': 500,
'num_bases': 20,
'bin_size': self.params.delta,
'start_point': 1,
'end_point': 60e-3,
'nl_offset': 1e-4,
'offset': -250,
'filter_duration': 500,
'filter_offset': -250,
'time_plot_min': -250,
'time_plot_max': 250}
lick_init = MAP_Covariate.MAP_Covariate(self.params, self.stim2_data, name='Stim2', is_cov=True, is_hist=False)
lick_init.add_bounds_params(lick_init_bounds)
lick_init.add_bases_params(lick_init_bases_params)
glm_map.add_covariate(lick_init)
###################
# Lick Train
###################
lick_train_bounds = {'m': [-np.inf, np.inf],
'r': [0, 2]}
lick_train_bases_params = {'bases_fn': utils.create_raised_cosine_basis,
'duration': 500,
'num_bases': 20,
'bin_size': self.params.delta,
'start_point': 1,
'end_point': 60e-3,
'nl_offset': 1e-4,
'offset': 0,
'filter_duration': 500,
'filter_offset': 0,
'time_plot_min': 0,
'time_plot_max': 500}
lick_train = MAP_Covariate.MAP_Covariate(self.params, self.stim3_data, name='Stim3', is_cov=True, is_hist=False)
lick_train.add_bounds_params(lick_train_bounds)
lick_train.add_bases_params(lick_train_bases_params)
glm_map.add_covariate(lick_train)
###################################
# Coupling a
###################################
hist_bounds = {'m': [-np.inf, np.inf],
'r': [0, 2]}
hist_bases_params = {'bases_fn': utils.create_raised_cosine_basis,
'duration': 125,
'num_bases': 20,
'bin_size': self.params.delta,
'start_point': 0,
'end_point': 60e-3,
'nl_offset': 1e-4,
'offset': 1,
'filter_duration': 125,
'filter_offset': 1,
'time_plot_min': 1,
'time_plot_max': 125}
# hist = MAP_Covariate.MAP_Covariate(self.params, y, name='History', is_cov=False, is_hist=True)
hist = MAP_Covariate.MAP_Covariate(self.params, self.hist_data_b, name='Coupling_b', is_cov=False, is_hist=False)
hist.add_bounds_params(hist_bounds)
hist.add_bases_params(hist_bases_params)
glm_map.add_covariate(hist)
###################################
# Coupling b
###################################
hist_bounds = {'m': [-np.inf, np.inf],
'r': [0, 2]}
hist_bases_params = {'bases_fn': utils.create_raised_cosine_basis,
'duration': 125,
'num_bases': 20,
'bin_size': self.params.delta,
'start_point': 0,
'end_point': 60e-3,
'nl_offset': 1e-4,
'offset': 1,
'filter_duration': 125,
'filter_offset': 1,
'time_plot_min': 1,
'time_plot_max': 125}
# hist = MAP_Covariate.MAP_Covariate(self.params, y, name='History', is_cov=False, is_hist=True)
hist = MAP_Covariate.MAP_Covariate(self.params, self.hist_data_c, name='Coupling_c', is_cov=False, is_hist=False)
hist.add_bounds_params(hist_bounds)
hist.add_bases_params(hist_bases_params)
glm_map.add_covariate(hist)
self.map_model = PoissonMAP(self.params, self.data_df, glm_map)
self.map_model.initialize_model()
def create_ml_covariates(self):
glm_map = GLM_Model_MAP.GLM_Model_MAP(self.params)
# stimulus 1 parameters
stim1_bounds = {'m': [-np.inf, np.inf]}
stim1_bases_params = {'bases_fn': utils.create_raised_cosine_basis,
'num_bases': 10,
'duration': self.params.duration_cov,
'bin_size': self.params.delta,
'end_point': 125e-3,
'start_point': 0,
'nl_offset': 2e-3,
'offset': self.params.offset_cov,
'filter_duration': self.params.duration_cov,
'filter_offset': self.params.offset_cov}
stim1 = ML_Covariate.ML_Covariate(self.params, self.stim_data, name='Stimuli_1', is_cov=True, is_hist=False)
stim1.add_bounds_params(stim1_bounds)
stim1.add_bases_params(stim1_bases_params)
glm_map.add_covariate(stim1)
# history filter parameters
hist_bounds = {'m': [-np.inf, np.inf]}
hist_bases_params = {'bases_fn': utils.create_nonlinear_raised_cos,
'duration': 80,
'num_bases': 15,
'bin_size': self.params.delta,
'start_point': 0,
'end_point': 35e-3,
'nl_offset': 1e-4,
'offset': 1,
'filter_duration': self.params.duration_hist,
'filter_offset': self.params.offset_hist}
hist = ML_Covariate.ML_Covariate(self.params, self.hist_data, name='History', is_cov=False, is_hist=True)
hist.add_bounds_params(hist_bounds)
hist.add_bases_params(hist_bases_params)
glm_map.add_covariate(hist)
self.ml_model = PoissonMAP(self.params, self.data_df, glm_map, self.params.run_toy_problem)
self.ml_model.initialize_model()
def train_variational(self):
self.variational_model.train_variational_parameters()
def train_map(self):
self.map_model.train_map_parameters()
def train_ml(self):
self.ml_model.train_ml_parameters()
def _add_training_params(self):
pass
def train_model(self, model='variational'):
trained_params = self.poisson_model.train_variational()
def _get_ml_h_k_mu(self):
optimizer = Optimizer.Optimizer(self.params.gp_ml_opt, self.h.shape[0], b1=self.params.gp_ml_b1,
b2=self.params.gp_ml_b2, step_size=self.params.gp_ml_step_size)
for i in range(self.params.gp_ml_iter):
grad = self.X.T @ self.y - self.params.delta * self.X.T @ np.exp(self.X @ self.h + self.Y @ self.k)
update = optimizer.get_update(grad)
self.h = self.h + update # maximizing maximum likelihood
plt.plot(self.h, label='ml')
plt.plot(self.h_true, label='ground truth')
plt.title('ml estimate')
plt.show()
def plot_updates(self):
fig, axs = plt.subplots(self.h_evolution.shape[0] - 1, figsize=(10,60))
fig.suptitle('GP Filter Evolution', y=0.92)
for dx, (row, series) in enumerate(self.h_evolution.iloc[1:,:].iterrows()):
axs[dx].plot(series['filter'], label='gp', color='k')
axs[dx].plot(self.h_true, label='true', color='r')
axs[dx].fill_between(np.arange(series['filter'].shape[0]), series['filter'] - series['cov'],
series['filter'] + series['cov'], alpha=0.3, color='k')
axs[dx].plot(series['inducing'], np.zeros(series['inducing'].shape[0]), 'o', color='orange', label='inducing points')
axs[dx].legend()
self._set_filter_axs(axs[dx])
axs[dx].set_title(row)
plt.subplots_adjust(hspace=0.3)
fig.savefig('glm_data/gp_filter_evolution.pdf', dpi=300)
plt.show()
def _set_filter_axs(self, axs):
len = self.h_time.shape[0]
axs.set_xticks([i for i in np.arange(len + 1) if i % 50 == 0])
labels = [int(i * self.params.time_div) for i in self.h_time if (i * self.params.time_div) % 50 == 0]
labels.append(int(len / 2))
axs.set_xticklabels(labels)
# def fn_min(half_coeff, x, y, Kinv):
# temp = np.zeros(Kinv.shape[0])
# return -1 * (h.T @ (x.T @ y) - np.sum(np.exp(x @ h)) - 0.5 * h.T @ (Kinv @ h))
#
# def jac(half_coeff, X, y, Kinv):
# return X.T @ np.exp(X@h) - X.T @ y + Kinv @ h
#
# def hess(half_coeff, x, y, Kinv):
# return x.T @ np.diag(np.exp(x@h)) @ x
def callback(h_updated):
print('entered callback')
mult_exp_mat = np.load('mult_exp_mat.npy')
unused_exp = mult_exp_mat @ h_updated
cov = np.load('callback_var.npy')
# h_unused = np.random.multivariate_normal(unused_exp, cov)
h_unused = unused_exp
np.save('h_unused.npy', h_unused)
def fn_min(half_coeff, time, use_dx, unuse_dx, X, x_use, y, Kinv):
delta = 0.001
h = np.zeros(time.shape[0])
h[use_dx] = half_coeff
h[unuse_dx] = np.load('h_unused.npy')
obj = -1 * (h.T @ (X.T @ y) - delta*np.sum(np.exp(X @ h)) - 0.5 * half_coeff @ (Kinv @ half_coeff)) + time.shape[0] * np.log(delta)
print(obj)
return obj
def jac(half_coeff, time, use_dx, unuse_dx, X, x_use, y, Kinv):
delta = 0.001
h = np.zeros(time.shape[0])
h[use_dx] = half_coeff
h[unuse_dx] = np.load('h_unused.npy')
return delta*x_use.T @ np.exp(X@h) - x_use.T @ y + Kinv @ half_coeff
def hess(half_coeff, time, use_dx, unuse_dx, X, x_use, y, Kinv):
delta = 0.001
h = np.zeros(time.shape[0])
h[use_dx] = half_coeff
h[unuse_dx] = np.load('h_unused.npy')
hess = delta*x_use.T @ np.diag(np.exp(X@h)) @ x_use
# if not utils.isPD(hess):
# return utils.nearestPD(hess)
return hess
#
# u = 4
# unused_dx = [i for i in range(self.h_time.shape[0]) if i % u == 0]
# used_dx = [i for i in range(self.h_time.shape[0]) if i % u != 0]
# unused_time = self.h_time[unused_dx]
# hh_time = self.h_time[used_dx]
#
# h = self.h_true + 0.01*np.random.randn(self.h.shape[0])
# hh = h[used_dx]
# xx = self.X[:, used_dx]
# # kk = utils.decay_kernel(self.h_time.reshape(-1, 1),self.h_time.reshape(-1, 1), sigma_h=self.sigma_true,
# # alpha=self.alpha_true, gamma=self.gamma_true)[:,used_dx][used_dx,:]
# kk = ka.RBF(1).__call__(1000*hh_time.reshape(-1,1))
# kk_inv = np.linalg.inv(kk)
#
#
#
#
# # k_used_used = utils.decay_kernel(hh_time.reshape(-1, 1),hh_time.reshape(-1, 1), sigma_h=self.sigma_true,
# # alpha=self.alpha_true, gamma=self.gamma_true)
# k_used_used = ka.RBF(1).__call__(hh_time.reshape(-1,1)*1000, 1000*hh_time.reshape(-1,1))
# k_unused_used = ka.RBF(1).__call__(1000*unused_time.reshape(-1, 1),1000*hh_time.reshape(-1, 1))
# k_used_unused = ka.RBF(1).__call__(1000*hh_time.reshape(-1, 1), 1000*unused_time.reshape(-1, 1))
# k_unused_unused = ka.RBF(1).__call__(1000*unused_time.reshape(-1, 1), 1000*unused_time.reshape(-1, 1))
# k_unused_used = utils.decay_kernel(unused_time.reshape(-1, 1),hh_time.reshape(-1, 1), sigma_h=self.sigma_true,
# alpha=self.alpha_true, gamma=self.gamma_true)
# k_used_unused = utils.decay_kernel(hh_time.reshape(-1, 1),unused_time.reshape(-1, 1), sigma_h=self.sigma_true,
# alpha=self.alpha_true, gamma=self.gamma_true)
# k_unused_unused = utils.decay_kernel(unused_time.reshape(-1, 1),unused_time.reshape(-1, 1), sigma_h=self.sigma_true,
# alpha=self.alpha_true, gamma=self.gamma_true)
# time, use_dx, unuse_dx, X, x_use, y, Kinv
#
#
# u = 3
# r = 10
# h = np.copy(self.h_true) + 0.5*np.random.randn(self.h_true.shape[0])
# unuse_dx = [i for i in range(self.h_time.shape[0]) if i % u == 0]
# use_dx = [i for i in range(self.h_time.shape[0]) if i % u != 0]
# time = self.h_time
# hh_time = time[use_dx]
# unused_time = time[unuse_dx]
#
# K = utils.decay_kernel(hh_time.reshape(-1, 1),hh_time.reshape(-1, 1), sigma_h=self.sigma_true,
# alpha=self.alpha_true, gamma=self.gamma_true, noise_std=self.params.gp_noise_std)
# k_unused_used = utils.decay_kernel(unused_time.reshape(-1, 1),hh_time.reshape(-1, 1), sigma_h=self.sigma_true,
# alpha=self.alpha_true, gamma=self.gamma_true)
# k_used_unused = utils.decay_kernel(hh_time.reshape(-1, 1),unused_time.reshape(-1, 1), sigma_h=self.sigma_true,
# alpha=self.alpha_true, gamma=self.gamma_true)
# k_unused_unused = utils.decay_kernel(unused_time.reshape(-1, 1),unused_time.reshape(-1, 1), sigma_h=self.sigma_true,
# alpha=self.alpha_true, gamma=self.gamma_true, noise_std=self.params.gp_noise_std)
#
#
# X = np.copy(self.X)
# x_use = X[:,use_dx]
# y = np.copy(self.y)
# # y[y>0] = 1
#
# Kinv = scipy.linalg.inv(K)
# h_use = h[use_dx]
#
# mult_exp_mat = k_unused_used @ Kinv
# h_unuse_est = mult_exp_mat @ h_use
# np.save('mult_exp_mat.npy', mult_exp_mat)
#
# cov = k_unused_unused - k_unused_used @ Kinv @ k_unused_used.T
# np.save('callback_var.npy', cov)
# h_unused = np.random.multivariate_normal(mult_exp_mat @ h_use, cov)
# np.save('h_unused.npy', h_unused)
#
# a = scipy.optimize.minimize(fn_min, h_use, args=(time, use_dx, unuse_dx, X, x_use, y, Kinv),
# method='Newton-CG', jac=jac, hess=hess, options={'xtol':1e-4, 'disp':True, 'maxiter':100000},
# callback=callback)
# min_h_use = a.x
# mult_exp_mat = k_unused_used @ Kinv
# h_unuse_est = mult_exp_mat @ min_h_use
# h_use_est = k_used_unused @ (np.linalg.inv(k_unused_unused) @ h_unuse_est)
#
# estimated_h_all = np.zeros(self.h_true.shape[0])
# estimated_h_all[use_dx] = h_use_est
# estimated_h_all[unuse_dx] = h_unuse_est
# plt.plot(estimated_h_all)
# plt.plot(self.h_true)
# plt.show()
# k_used_used = ka.RBF(r).__call__(hh_time.reshape(-1,1)*1000, 1000*hh_time.reshape(-1,1))
# k_unused_used = ka.RBF(r).__call__(1000*unused_time.reshape(-1, 1),1000*hh_time.reshape(-1, 1))
# k_used_unused = ka.RBF(r).__call__(1000*hh_time.reshape(-1, 1), 1000*unused_time.reshape(-1, 1))
# k_unused_unused = ka.RBF(r).__call__(1000*unused_time.reshape(-1, 1), 1000*unused_time.reshape(-1, 1))
#
# a = scipy.optimize.minimize(fn_min, hh, args=(xx, self.y, kk_inv),
# method='Newton-CG', jac=jac, hess=hess, options={'xtol':1e-5, 'disp':True, 'maxiter':100000},
# callback=callback)
# h_used = a.x
# # h_unused = -1*(k_unused_used) @ (utils.nearestPD(np.linalg.inv(k_used_used)) @ h_used)
# h_unused = self.h_true[unused_dx]
# h_all = np.zeros(self.h_time.shape[0])
# h_all[used_dx] = h_used
# h_all[unused_dx] = h_unused
# plt.plot(h_all)
# plt.plot(self.h_true)
# plt.show()
#
#
# plt.plot(h_used)
# plt.plot(self.h_true[used_dx])
# plt.show()
#
# k_unused_unused = utils.decay_kernel(10*unused_time.reshape(-1, 1),10*unused_time.reshape(-1, 1), sigma_h=1000,
# alpha=0.1, gamma=0.45)
#
# sample = np.random.multivariate_normal(np.zeros(k_unused_unused.shape[0]), k_unused_unused)
# plt.plot(sample)
# plt.show()
#
# def obj_fn(alpha, X, y, h, h_time):
# K = utils.decay_kernel(h_time.reshape(-1, 1), h_time.reshape(-1, 1), sigma_h=2, alpha=alpha, gamma=600, noise_std=1)
# Kinv = np.linalg.inv(K)
# W = X.T @ np.diag(np.exp(X @ h)) @ X
# Wsqrt = np.sqrt(1e-3) * sqrtm(X.T @ (np.diag(np.exp(X @ h)) @ X))
# I = np.identity(K.shape[0])
# obj1 = y.T @ X @ h
# obj2 = -1 * np.sum(np.exp(X@h))
# obj3 = -0.5 * h.T @ Kinv @ h
# obj4 = 0 #-0.5 * np.linalg.slogdet(I + Wsqrt @ (K @ Wsqrt))
#
# return -1*(obj1 + obj2 + obj3 + obj4)
#
# def obj_grad(alpha, X, y, h, h_time):
# K = utils.decay_kernel(h_time.reshape(-1, 1), h_time.reshape(-1, 1), sigma_h=2,
# alpha=alpha, gamma=600, noise_std=0.001)
# Kinv = np.linalg.inv(K)
# Wsqrt = np.sqrt(1e-3) * sqrtm(X.T @ (np.diag(np.exp(X @ h)) @ X))
# I = np.identity(K.shape[0])
#
# K_prime = K * -1 * np.log(np.outer(np.exp(h_time ** 2), np.exp(h_time ** 2)))
# term1 = 0.5 * h.T @ (Kinv @ (K_prime @ Kinv)) @ h
# term2 = -0.5 * np.trace(np.linalg.inv(I + Wsqrt @ K @ Wsqrt) @ (Wsqrt @ K_prime @ Wsqrt))
#
# return -1*(term1 + term2)
#
# def callback(alpha):
# print(f'alpha: {alpha}')
#
# a = optimize.minimize(obj_fn, 500, args=(x_design, y_true, h_true, h_time),
# method='BFGS', jac=obj_grad, options={'xtol': 1e-4, 'disp': True, 'maxiter': 500},
# callback=callback)
#
# def test_deriv():
# a = 1
# b = 1
# c = 3.1
# d = 1.3
# e = 2.4
# time = np.array([1,2]).reshape(-1,1)
# time_add_sq = -1 * np.log(np.outer(np.exp(time ** 2), np.exp(time ** 2)))
# A = np.array([[a, 0], [0, b]])
# B = np.array([[c, d], [0, e]])
#
# K = utils.decay_kernel(time, time, sigma_h=2,
# alpha=2, gamma=1)
#
# inside_log = a*e*K[1,1] + a*b + c*e*K[0,0]*K[1,1] + c*b*K[0,0] + d*e*K[1,0]*K[1,1] + b*d*K[1,0]
# deriv = (-a*e*(2**2 + 2**2)*K[1,1] - c*e*K[0,0]*(2**2 + 2**2)*K[1,1] - c*e*K[1,1]*(1**2 + 1**2)*K[0,0] -
# b*c*(1**2 + 1**2)*K[0,0] - d*e*K[1,0]*(2**2 + 2**2)*K[1,1] - d*e*K[1,1]*(1**2 + 2**2)*K[1,0] - d*b*(1**2 + 2**2)*K[1,0])
#
# K_prime = K * time_add_sq
# grad1 = deriv/inside_log
# grad2 = np.trace(np.linalg.inv(A + B@K) @ B@K_prime)
def _log_likelihood_brute( params, *args):
alpha = params[0]
gamma = params[1]
sigma = params[2]
print(alpha)
h = args[0]
time = args[1]
X = args[2]
y = args[3]
delta = self.params.delta
Kinv = self.GP.K_all_inv
obj = -1 * (h.T @ (X.T @ y) - delta * np.sum(np.exp(X @ h)) - 0.5 * h @ (Kinv @ h)) + \
time.shape[0] * | np.log(delta) | numpy.log |
################################
# WAVE FUNCTION COLLAPSE IN 2D #
################################
# Original WFC implementation by <NAME> @mxgmn on github
# Python implementation by <NAME> @Coac on github
# Blender implementation by <NAME> @benkl on github
import time
import os
import numpy as np
import random
import sys
import bpy
class WaveFunctionCollapse:
# WaveFunctionCollapse encapsulates the wfc algorithm
def __init__(self, grid_size, sample, pattern_size):
self.patterns = Pattern.from_sample(sample, pattern_size)
self.grid = self._create_grid(grid_size)
self.propagator = Propagator(self.patterns)
def run(self):
start_time = time.time()
done = False
border = bpy.context.scene.wfc_vars.wfc_border
# self.propagator.propagate(cell)
if border == True:
# BorderInsert
# print(self.grid.size[2])
# print("we got a cell", self.grid.get_cell(0))
cell = self.grid.get_cell(0)[self.grid.size[1]-1][0]
# self.propagate(cell)
cell = self.grid.get_cell(0)[0][self.grid.size[2]-1]
# self.propagate(cell)
cell = self.grid.get_cell(
0)[self.grid.size[1]-1][self.grid.size[2]-1]
# self.propagate(cell)
cell = self.grid.get_cell(0)[0][0]
self.propagate(cell)
# Border Insert end
while not done:
done = self.step()
print("WFC run took %s seconds" % (time.time() - start_time))
def step(self):
step_time = time.time()
self.grid.print_allowed_pattern_count()
cell = self.observe()
if cell is None:
return True
self.propagate(cell)
print("Step took %s seconds" % (time.time() - step_time))
return False
def get_image(self):
return self.grid.get_image()
def get_patterns(self):
return [pattern.to_image() for pattern in self.patterns]
def observe(self):
if self.grid.check_contradiction():
return None
cell = self.grid.find_lowest_entropy()
if cell is None:
return None
cell.choose_rnd_pattern()
return cell
def propagate(self, cell):
self.propagator.propagate(cell)
def _create_grid(self, grid_size):
num_pattern = len(self.patterns)
return Grid(grid_size, num_pattern)
class Grid:
# Grid is made of Cells
def __init__(self, size, num_pattern):
self.size = size
self.grid = np.empty(self.size, dtype=object)
# Filling grid with cells
for position in np.ndindex(self.size):
self.grid[position] = Cell(num_pattern, position, self)
# self.grid = np.array([[Cell(num_pattern, (x, y), self) for x in range(self.size)] for y in range(self.size)])
# self.grid = np.array([Cell(num_pattern, (x,), self) for x in range(self.size)])
def find_lowest_entropy(self):
min_entropy = 999999
lowest_entropy_cells = []
for cell in self.grid.flat:
if cell.is_stable():
continue
entropy = cell.entropy()
if entropy == min_entropy:
lowest_entropy_cells.append(cell)
elif entropy < min_entropy:
min_entropy = entropy
lowest_entropy_cells = [cell]
if len(lowest_entropy_cells) == 0:
return None
cell = lowest_entropy_cells[np.random.randint(
len(lowest_entropy_cells))]
return cell
def get_cell(self, index):
# Returns the cell contained in the grid at the provided index
# :param index: (...z, y, x)
# :return: cell
return self.grid[index]
def get_image(self):
# Returns the grid converted from index to back to color
# :return:
image = np.vectorize(lambda c: c.get_value())(self.grid)
image = Pattern.index_to_img(image)
return image
def check_contradiction(self):
for cell in self.grid.flat:
if len(cell.allowed_patterns) == 0:
return True
return False
def print_allowed_pattern_count(self):
grid_allowed_patterns = np.vectorize(
lambda c: len(c.allowed_patterns))(self.grid)
print(grid_allowed_patterns)
class Propagator:
# Propagator that computes and stores the legal patterns relative to another
def __init__(self, patterns):
self.patterns = patterns
self.offsets = [(z, y, x) for x in range(-1, 2)
for y in range(-1, 2) for z in range(-1, 2)]
start_time = time.time()
self.precompute_legal_patterns()
print("Patterns constraints generation took %s seconds" %
(time.time() - start_time))
def precompute_legal_patterns(self):
# pool = Pool(os.cpu_count())
# pool = Pool(1)
patterns_offsets = []
# patterns_var = []
# offsets_var = []
for pattern in self.patterns:
# patterns_var.append(pattern[0][0])
for offset in self.offsets:
patterns_offsets.append((pattern, offset))
# offsets_var.append(pattern[0][1])
# patterns_compatibility = pool.starmap(
# self.legal_patterns, patterns_offsets)
# pool.close()
# pool.join()
patterns_compatibility = []
for i, pattern in enumerate(patterns_offsets):
patterns_compatibility.append(self.legal_patterns(
patterns_offsets[i][0], patterns_offsets[i][1]))
# patterns_compatibility = self.legal_patterns(patterns_var, offsets_var)
for pattern_index, offset, legal_patterns in patterns_compatibility:
self.patterns[pattern_index].set_legal_patterns(
offset, legal_patterns)
def legal_patterns(self, pattern, offset):
legal_patt = []
for candidate_pattern in self.patterns:
if pattern.is_compatible(candidate_pattern, offset):
legal_patt.append(candidate_pattern.index)
pattern.set_legal_patterns(offset, legal_patt)
return pattern.index, offset, legal_patt
@staticmethod
def propagate(cell):
to_update = [neighbour for neighbour, _ in cell.get_neighbors()]
while len(to_update) > 0:
cell = to_update.pop(0)
for neighbour, offset in cell.get_neighbors():
for pattern_index in cell.allowed_patterns:
pattern = Pattern.from_index(pattern_index)
pattern_still_compatible = False
for neighbour_pattern_index in neighbour.allowed_patterns:
neighbour_pattern = Pattern.from_index(
neighbour_pattern_index)
if pattern.is_compatible(neighbour_pattern, offset):
pattern_still_compatible = True
break
if not pattern_still_compatible:
cell.allowed_patterns.remove(pattern_index)
for neigh, _ in cell.get_neighbors():
if neigh not in to_update:
to_update.append(neigh)
class Pattern:
# Pattern is a configuration of tiles from the input image.
index_to_pattern = {}
color_to_index = {}
index_to_color = {}
def __init__(self, data, index):
self.index = index
self.data = np.array(data)
self.legal_patterns_index = {} # offset -> [pattern_index]
def get(self, index=None):
if index is None:
return self.data.item(0)
return self.data[index]
def set_legal_patterns(self, offset, legal_patterns):
self.legal_patterns_index[offset] = legal_patterns
@property
def shape(self):
return self.data.shape
def is_compatible(self, candidate_pattern, offset):
# Check if pattern is compatible with a candidate pattern for a given offset
# :param candidate_pattern:
# :param offset:
# :return: True if compatible
assert (self.shape == candidate_pattern.shape)
# Precomputed compatibility
if offset in self.legal_patterns_index:
return candidate_pattern.index in self.legal_patterns_index[offset]
# Computing compatibility
ok_constraint = True
start = tuple([max(offset[i], 0) for i, _ in enumerate(offset)])
end = tuple([min(self.shape[i] + offset[i], self.shape[i])
for i, _ in enumerate(offset)])
for index in np.ndindex(end): # index = (x, y, z...)
start_constraint = True
for i, d in enumerate(index):
if d < start[i]:
start_constraint = False
break
if not start_constraint:
continue
if candidate_pattern.get(tuple(np.array(index) - | np.array(offset) | numpy.array |
import mrl
import gym
from mrl.replays.core.shared_buffer import SharedMemoryTrajectoryBuffer as Buffer
import numpy as np
import pickle
import os
from mrl.utils.misc import batch_block_diag
class OnlineHERBuffer(mrl.Module):
def __init__(
self,
module_name='replay_buffer'
):
"""
Buffer that does online hindsight relabeling.
Replaces the old combo of ReplayBuffer + HERBuffer.
"""
super().__init__(module_name, required_agent_modules=['env'], locals=locals())
self.size = None
self.goal_space = None
self.buffer = None
self.save_buffer = None
def _setup(self):
self.size = self.config.replay_size
env = self.env
if type(env.observation_space) == gym.spaces.Dict:
observation_space = env.observation_space.spaces["observation"]
self.goal_space = env.observation_space.spaces["desired_goal"]
else:
observation_space = env.observation_space
items = [("state", observation_space.shape),
("action", env.action_space.shape), ("reward", (1,)),
("next_state", observation_space.shape), ("done", (1,))]
if self.goal_space is not None:
items += [("previous_ag", self.goal_space.shape), # for reward shaping
("ag", self.goal_space.shape), # achieved goal
("bg", self.goal_space.shape), # behavioral goal (i.e., intrinsic if curious agent)
("dg", self.goal_space.shape)] # desired goal (even if ignored behaviorally)
self.buffer = Buffer(self.size, items)
self._subbuffers = [[] for _ in range(self.env.num_envs)]
self.n_envs = self.env.num_envs
# HER mode can differ if demo or normal replay buffer
if 'demo' in self.module_name:
self.fut, self.act, self.ach, self.beh = parse_hindsight_mode(self.config.demo_her)
else:
self.fut, self.act, self.ach, self.beh = parse_hindsight_mode(self.config.her)
def _process_experience(self, exp):
if getattr(self, 'logger'):
self.logger.add_tabular('Replay buffer size', len(self.buffer))
done = np.expand_dims(exp.done, 1) # format for replay buffer
reward = np.expand_dims(exp.reward, 1) # format for replay buffer
action = exp.action
if self.goal_space:
state = exp.state['observation']
next_state = exp.next_state['observation']
previous_achieved = exp.state['achieved_goal']
achieved = exp.next_state['achieved_goal']
desired = exp.state['desired_goal']
if hasattr(self, 'ag_curiosity') and self.ag_curiosity.current_goals is not None:
behavioral = self.ag_curiosity.current_goals
# recompute online reward
reward = self.env.compute_reward(achieved, behavioral, {'s':state, 'ns':next_state}).reshape(-1, 1)
else:
behavioral = desired
for i in range(self.n_envs):
self._subbuffers[i].append([
state[i], action[i], reward[i], next_state[i], done[i], previous_achieved[i], achieved[i],
behavioral[i], desired[i]
])
else:
state = exp.state
next_state = exp.next_state
for i in range(self.n_envs):
self._subbuffers[i].append(
[state[i], action[i], reward[i], next_state[i], done[i]])
for i in range(self.n_envs):
if exp.trajectory_over[i]:
trajectory = [np.stack(a) for a in zip(*self._subbuffers[i])]
self.buffer.add_trajectory(*trajectory)
self._subbuffers[i] = []
def sample(self, batch_size, to_torch=True):
if hasattr(self, 'prioritized_replay'):
batch_idxs = self.prioritized_replay(batch_size)
else:
batch_idxs = np.random.randint(self.buffer.size, size=batch_size)
if self.goal_space:
if "demo" in self.module_name:
has_config_her = self.config.get('demo_her')
else:
has_config_her = self.config.get('her')
if has_config_her:
if self.config.env_steps > self.config.future_warm_up:
fut_batch_size, act_batch_size, ach_batch_size, beh_batch_size, real_batch_size = np.random.multinomial(
batch_size, [self.fut, self.act, self.ach, self.beh, 1.])
else:
fut_batch_size, act_batch_size, ach_batch_size, beh_batch_size, real_batch_size = batch_size, 0, 0, 0, 0
fut_idxs, act_idxs, ach_idxs, beh_idxs, real_idxs = np.array_split(batch_idxs,
np.cumsum([fut_batch_size, act_batch_size, ach_batch_size, beh_batch_size]))
# Sample the real batch (i.e., goals = behavioral goals)
states, actions, rewards, next_states, dones, previous_ags, ags, goals, _ =\
self.buffer.sample(real_batch_size, batch_idxs=real_idxs)
# Sample the future batch
states_fut, actions_fut, _, next_states_fut, dones_fut, previous_ags_fut, ags_fut, _, _, goals_fut =\
self.buffer.sample_future(fut_batch_size, batch_idxs=fut_idxs)
# Sample the actual batch
states_act, actions_act, _, next_states_act, dones_act, previous_ags_act, ags_act, _, _, goals_act =\
self.buffer.sample_from_goal_buffer('dg', act_batch_size, batch_idxs=act_idxs)
# Sample the achieved batch
states_ach, actions_ach, _, next_states_ach, dones_ach, previous_ags_ach, ags_ach, _, _, goals_ach =\
self.buffer.sample_from_goal_buffer('ag', ach_batch_size, batch_idxs=ach_idxs)
# Sample the behavioral batch
states_beh, actions_beh, _, next_states_beh, dones_beh, previous_ags_beh, ags_beh, _, _, goals_beh =\
self.buffer.sample_from_goal_buffer('bg', beh_batch_size, batch_idxs=beh_idxs)
# Concatenate the five
states = np.concatenate([states, states_fut, states_act, states_ach, states_beh], 0)
actions = np.concatenate([actions, actions_fut, actions_act, actions_ach, actions_beh], 0)
ags = np.concatenate([ags, ags_fut, ags_act, ags_ach, ags_beh], 0)
goals = np.concatenate([goals, goals_fut, goals_act, goals_ach, goals_beh], 0)
next_states = np.concatenate([next_states, next_states_fut, next_states_act, next_states_ach, next_states_beh], 0)
# Recompute reward online
if hasattr(self, 'goal_reward'):
rewards = self.goal_reward(ags, goals, {'s':states, 'ns':next_states}).reshape(-1, 1).astype(np.float32)
else:
rewards = self.env.compute_reward(ags, goals, {'s':states, 'ns':next_states}).reshape(-1, 1).astype(np.float32)
if self.config.get('never_done'):
dones = np.zeros_like(rewards, dtype=np.float32)
elif self.config.get('first_visit_succ'):
dones = np.round(rewards + 1.)
else:
raise ValueError("Never done or first visit succ must be set in goal environments to use HER.")
dones = np.concatenate([dones, dones_fut, dones_act, dones_ach, dones_beh], 0)
if self.config.sparse_reward_shaping:
previous_ags = | np.concatenate([previous_ags, previous_ags_fut, previous_ags_act, previous_ags_ach, previous_ags_beh], 0) | numpy.concatenate |
import matplotlib.pyplot as plt
import numpy as np
from scipy import signal
from numpy.linalg import inv
import matplotlib.colors as colors
from matplotlib import cm
from matplotlib import rc
from matplotlib import rcParams
__author__ = 'ernesto'
# if use latex or mathtext
rc('text', usetex=True)
rcParams['text.latex.preamble']=[r"\usepackage{amsmath}"]
####### Parámetros #######
# número de muestras
N = 400
# parámetro a de la ecuación de estado
a = 1
# varianza del ruido de excitación
var_u = 0.0000005
# varianza del ruido de observación
var_w = 0.1
# media y varianza de f0[-1]
mu_f0_i = 0.2
var_f0_i = 0.05
# parámetros del filtro de Kalman
# número de parámetros
p = 2
# matriz de transcición de estados
A = np.array([[a, 0], [2 * np.pi * a, 1]])
B = np.array([1, 2 * np.pi])
# condiciones iniciales del filtro de Kalman
# s[-1|-1]
s_est_i = np.array([[mu_f0_i], [0]])
# M[-1|-1]
C_s_i = 1 * np.eye(p)
### Fin de parámetros ###
ns = np.arange(N)
# generación de la frecuencia instantanea
f0d_1 = np.zeros((N,))
N1 = 100
N2 = 300
f01 = 0.1
f02 = 0.3
f0d_1[:N1] = f01
f0d_1[N1:N2] = (f02 - f01) / (N2 - N1) * np.arange(N2 - N1) + f01
f0d_1[N2:] = f02
# f01 = 0.1
# f02 = 0.3
# N1 = 200
# f0d_1[:N1] = f01
# f0d_1[N1:] = f02
# var_u = 0.000001
# generación de las observaciones
phi = 2 * np.pi * np.cumsum(f0d_1)
y = np.cos(phi)
x = y + np.random.normal(0, np.sqrt(var_w), N)
# variables para guardar los resultados
s_ests = np.zeros((p, N))
Ms = | np.zeros((p, N)) | numpy.zeros |
import numpy as np
from .sfo import SFO
from time import time
from scipy.optimize import minimize
# NB: binary classification is done with +/- labels
def tm_preprocess(X, colnorms=None):
"""
Preprocessing that seems to make TM more accurate:
normalize each column so training data has length 1 (use same normalization constants for training and test)
normalize each row to have length 1 (so normalization constant differs at test time)
Inputs:
X - feature matrix, rows are instances
colnorms - vector containing the norm of each colum of the training matrix
Outputs:
if colnorms is None (training):
Xnormalized - the normalized training data
colnorms - the vector containing the norm of each column of the training matrix
if colnorms is set (testing):
Xnormalized - the normalized test data
"""
returnargs = 1
if colnorms is None:
# Train
colnorms = np.sqrt(np.sum(X*X, axis=0))
returnargs = 2
Xnormalized = np.copy(X)
Xnormalized[:, colnorms > 0] = Xnormalized[:, colnorms > 0] / colnorms[colnorms > 0]
rownorms = np.sqrt(np.sum(Xnormalized*Xnormalized, axis=1))
Xnormalized = Xnormalized / rownorms[:, np.newaxis]
if returnargs == 1:
return Xnormalized
elif returnargs == 2:
return (Xnormalized, colnorms)
def tm_predict(w0, X, q, r, type):
"""
Returns predicted values based on a learned tensor machine
Inputs:
w0 - TM factors
X,q,r,type - see the description of tm_fit
Outputs:
z - predictions for each row in X
"""
(n,d) = X.shape
r_vec = np.concatenate(([1], (q-1)*[r]))
b = w0[0]
w = w0[1:]
w = np.reshape(w, (d, len(w)//d))
acc_sum = 0
w_offset = 0
Z = b*np.ones((n,1))
for i in range(q):
for j in range(r_vec[i]):
# the vectors whose outer product form the jth rank-one term in the
# outer product of the coefficients for the degree i+1 term
# d-by-i matrix
W = w[:, w_offset:(w_offset + i + 1)]
XW = X.dot(W) # n-by-(i+1)
prodXW = np.prod(XW, axis=1) # n-by-1
prodXW = prodXW[:, np.newaxis]
Z = Z + prodXW # n-by-1
w_offset = w_offset + i + 1
if type.upper() == 'REGRESSION':
return Z
elif type.upper() == 'BC':
return np.sign(Z)
def tm_f_df(w0, X, y, q, r, type, gamma):
"""
Computes the TM objective value and gradient for scipy's optimization functions
Inputs:
w0 - TM factors
X,y,q,r,type,gamma - see the description of tm_fit
Outputs:
f - function value
df - gradient of TM factors
"""
(n,d) = X.shape
r_vec = np.concatenate(([1], (q-1)*[r]))
b = w0[0]
w = w0[1:]
w = np.reshape(w, (d, len(w)//d))
nw = w.shape[1]
acc_sum = 0
w_offset = 0
Z = b*np.ones((n,1))
bl = np.zeros((n, nw))
for i in range(q):
for j in range(r_vec[i]):
# the vectors whose outer product form the jth rank-one term in the
# outer product of the coefficients for the degree i+1 term
# d-by-i matrix
W = w[:, w_offset:(w_offset + i + 1)]
XW = X.dot(W) # n-by-(i+1)
prodXW = np.prod(XW, axis=1) # n-by-1
prodXW = prodXW[:, np.newaxis] # make it a column vector
bl[:, w_offset:(w_offset+i+1)] = prodXW / XW
Wsquared = W*W
norm_squares = np.sum(Wsquared, axis=0) # 1-by-(i+1)
acc_sum = acc_sum + np.sum(norm_squares)
Z = Z + prodXW # n-by-1
w_offset = w_offset + i + 1
f = 0
diff = np.empty_like(Z)
if type.upper() == 'REGRESSION':
diff = Z - y;
f = np.sum(diff*diff)/n/2
elif type.upper() == 'BC':
eyz = np.exp(-y*Z);
diff = -y*eyz/(1+eyz)
f = np.mean(np.log(1 + eyz))
f = f + gamma*acc_sum/2;
df = np.empty_like(w0)
df[0] = np.mean(diff)
df_w = X.transpose().dot(diff*bl)
df_w = df_w + gamma*w;
df[1:] = np.reshape(df_w, (len(w0)-1,))
return (f, df)
def tm_f_df_sub(w0, indices, X, y, q, r, type, gamma):
"""
Computes the TM objective value and gradient for SFO solver
Inputs:
w0 - TM factors
indices - list of indexes into the training data defining this minibatch
X,y,q,r,type,gamma - see the description of tm_fit
Outputs:
f - function value
df - gradient of TM factors
"""
minibatchX = X[indices, :]
minibatchy = y[indices, :]
return tm_f_df0(w0, X, y, q, r, type, gamma)
def tm_f_df0(w0, X, y, q, r, type, gamma):
"""
Computes the TM objective value and gradient for SFO
Inputs:
w0 - TM factors
X,y,q,r,type,gamma - see the description of tm_fit
Outputs:
f - function value
df - gradient of TM factors
"""
(n,d) = X.shape
gamma = n*gamma
r_vec = np.concatenate(([1], (q-1)*[r]))
b = w0[0]
w = w0[1:]
w = np.reshape(w, (d, len(w)//d))
nw = w.shape[1]
acc_sum = 0
w_offset = 0
Z = b*np.ones((n,1))
bl = | np.empty((n, nw)) | numpy.empty |
# -*- coding: utf-8 -*-
################################################################
# The contents of this file are subject to the BSD 3Clause (New) License
# you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
# http://directory.fsf.org/wiki/License:BSD_3Clause
# Software distributed under the License is distributed on an "AS IS"
# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
# License for the specific language governing rights and limitations
# under the License.
# The Original Code is part of the PyRadi toolkit.
# The Initial Developer of the Original Code is <NAME>,
# Portions created by <NAME> are Copyright (C) 2006-2012
# All Rights Reserved.
# Contributor(s): ______________________________________.
################################################################
"""
This module provides various utility functions for radiometry calculations.
Functions are provided for a maximally flat spectral filter, a simple photon
detector spectral response, effective value calculation, conversion of spectral
domain variables between [um], [cm^-1] and [Hz], conversion of spectral
density quantities between [um], [cm^-1] and [Hz] and spectral convolution.
See the __main__ function for examples of use.
This package was partly developed to provide additional material in support of students
and readers of the book Electro-Optical System Analysis and Design: A Radiometry
Perspective, <NAME>, ISBN 9780819495693, SPIE Monograph Volume
PM236, SPIE Press, 2013. http://spie.org/x648.html?product_id=2021423&origin_id=x646
"""
__version__= "$Revision$"
__author__= 'pyradi team'
__all__= ['buildLogSpace','sfilter', 'responsivity', 'effectiveValue', 'convertSpectralDomain',
'convertSpectralDensity', 'convolve', 'savitzkyGolay1D','abshumidity', 'TFromAbshumidity',
'rangeEquation','_rangeEquationCalc','detectThresholdToNoiseTpFAR',
'detectSignalToNoiseThresholdToNoisePd',
'detectThresholdToNoiseSignalToNoisepD',
'detectProbabilityThresholdToNoiseSignalToNoise',
'detectFARThresholdToNoisepulseWidth', 'upMu',
'cart2polar', 'polar2cart','index_coords','framesFirst','framesLast',
'rect', 'circ','poissonarray','draw_siemens_star','drawCheckerboard',
'makemotionsequence','extractGraph','luminousEfficiency','Spectral',
'Atmo','Sensor','Target','calcMTFwavefrontError',
'polar2cartesian','warpPolarImageToCartesianImage','warpCartesianImageToPolarImage',
'intify_tuple','differcommonfiles','blurryextract','update_progress'
]
import sys
import numpy as np
from scipy import constants
from scipy import interpolate
import matplotlib.pyplot as plt
from matplotlib.patches import Wedge
from matplotlib.collections import PatchCollection
import os
import pkg_resources
from numbers import Number
if sys.version_info[0] > 2:
from io import StringIO
else:
from StringIO import StringIO
#################################################################################
"""
Gathers and presents version information.
Adapted from https://github.com/ahmedsalhin/version_information
This makes it much easier to determine which versions of modules
were installed in the source IPython interpreter's environment.
Produces output in:
* Plaintext (IPython [qt]console)
* HTML (IPython notebook, ``nbconvert --to html``, ``--to slides``)
* JSON (IPython notebook ``.ipynb`` files)
* LaTeX (e.g. ``ipython nbconvert example.ipynb --to LaTeX --post PDF``)
Usage
======
.. sourcecode:: ipython
print(ryutils.VersionInformation('matplotlib,numpy'))
"""
import html
import json
import sys
import time
import locale
import IPython
import platform
try:
import pkg_resources
except ImportError:
pkg_resources = None
timefmt = '%a %b %d %H:%M:%S %Y %Z'
def _date_format_encoding():
return locale.getlocale(locale.LC_TIME)[1] or locale.getpreferredencoding()
class VersionInformation():
def __init__(self,line=''):
self.version_information( line=line)
def version_information(self, line=''):
"""Show information about versions of modules.
Usage:
%version_information [optional comma-separated list of modules]
"""
self.packages = [
("Python", "{version} {arch} [{compiler}]".format(
version=platform.python_version(),
arch=platform.architecture()[0],
compiler=platform.python_compiler())),
("IPython", IPython.__version__),
("OS", platform.platform().replace('-', ' '))
]
modules = line.replace(' ', '').split(",")
for module in modules:
if len(module) > 0:
try:
code = ("import %s; version=str(%s.__version__)" %
(module, module))
ns_g = ns_l = {}
exec(compile(code, "<string>", "exec"), ns_g, ns_l)
self.packages.append((module, ns_l["version"]))
except Exception as e:
try:
if pkg_resources is None:
raise
version = pkg_resources.require(module)[0].version
self.packages.append((module, version))
except Exception as e:
self.packages.append((module, str(e)))
return self
def _repr_json_(self):
obj = {
'Software versions': [
{'module': name, 'version': version} for
(name, version) in self.packages]}
if IPython.version_info[0] >= 3:
return obj
else:
return json.dumps(obj)
@staticmethod
def _htmltable_escape(str_):
CHARS = {
'&': r'\&',
'%': r'\%',
'$': r'\$',
'#': r'\#',
'_': r'\_',
'{': r'\letteropenbrace{}',
'}': r'\letterclosebrace{}',
'~': r'\lettertilde{}',
'^': r'\letterhat{}',
'\\': r'\letterbackslash{}',
'>': r'\textgreater',
'<': r'\textless',
}
return u"".join([CHARS.get(c, c) for c in str_])
def _repr_html_(self):
html_table = "<table>"
html_table += "<tr><th>Software</th><th>Version</th></tr>"
for name, version in self.packages:
_version = self._htmltable_escape(version)
html_table += "<tr><td>%s</td><td>%s</td></tr>" % (name, _version)
try:
html_table += "<tr><td colspan='2'>%s</td></tr>" % time.strftime(timefmt)
except:
html_table += "<tr><td colspan='2'>%s</td></tr>" % \
time.strftime(timefmt).decode(_date_format_encoding())
html_table += "</table>"
return html_table
@staticmethod
def _latex_escape(str_):
CHARS = {
'&': r'\&',
'%': r'\%',
'$': r'\$',
'#': r'\#',
'_': r'\_',
'{': r'\letteropenbrace{}',
'}': r'\letterclosebrace{}',
'~': r'\lettertilde{}',
'^': r'\letterhat{}',
'\\': r'\letterbackslash{}',
'>': r'\textgreater',
'<': r'\textless',
}
return u"".join([CHARS.get(c, c) for c in str_])
def _repr_latex_(self):
latex = r"\begin{tabular}{|l|l|}\hline" + "\n"
latex += r"{\bf Software} & {\bf Version} \\ \hline\hline" + "\n"
for name, version in self.packages:
_version = self._latex_escape(version)
latex += r"%s & %s \\ \hline" % (name, _version) + "\n"
try:
latex += r"\hline \multicolumn{2}{|l|}{%s} \\ \hline" % \
time.strftime(timefmt) + "\n"
except:
latex += r"\hline \multicolumn{2}{|l|}{%s} \\ \hline" % \
time.strftime(timefmt).decode(_date_format_encoding()) + "\n"
latex += r"\end{tabular}" + "\n"
return latex
def _repr_pretty_(self):
text = "Software versions\n"
for name, version in self.packages:
text += "%s %s\n" % (name, version)
try:
text += "%s" % time.strftime(timefmt)
except:
text += "%s" % \
time.strftime(timefmt).decode(_date_format_encoding())
import pprint
pprint.pprint(text)
def __str__(self):
text = 'Software versions\n'
for name, version in self.packages:
text += f"{name}: {version}\n"
try:
text += f"{time.strftime(timefmt)}"
except:
text += f"{time.strftime(timefmt).decode(_date_format_encoding())}"
return text
##############################################################################
##
def buildLogSpace(Vmin,Vmax,nDec,patn=False):
"""Calculate a log space given low, high and number samples per decade
If patn is True, the upper limit is adjusted to obtain a
repeat numeric pattern in each dcade.
Args:
| Vmin (float) lower limit
| Vmax (float) upper limit
| nDec (int) number of points per decade
| patn (bool) repeat pattern in each decade
Returns:
| vector with equal spacing in log
Raises:
| No exception is raised.
"""
decs = int(np.log10(Vmax/Vmin))
if patn:
ful = np.log10(Vmax/Vmin)
upp = np.ceil(nDec *(ful - decs))
num = np.ceil(decs * nDec + upp + 1)
Vmax = 10 ** (np.log10(Vmin) + ((num-1) / nDec))
else:
num = np.ceil(decs * nDec)
return np.logspace(np.log10(Vmin),np.log10(Vmax),num)
##############################################################################
##
def update_progress(progress, bar_length=20):
"""Simple text-based progress bar for Jupyter notebooks.
Note that clear_output, and hence this function wipes the entire cell output,
including previous output and widgets.
Usage:
import pyradi.ryutils as ryutils
import time
print('before')
#Replace this with a real computation
number_of_elements = 100
for i in range(number_of_elements):
time.sleep(0.1)
# progress must be a float between 0 and 1
ryutils.update_progress((i+1) / number_of_elements,bar_length=40)
print('after')
source:
https://mikulskibartosz.name/how-to-display-a-progress-bar-in-jupyter-notebook-47bd4c2944bf
https://ipython.org/ipython-doc/3/api/generated/IPython.display.html#IPython.display.clear_output
Wait to clear the output until new output is available to replace it.
"""
from IPython.display import clear_output
if isinstance(progress, int):
progress = float(progress)
if not isinstance(progress, float):
progress = 0
if progress < 0:
progress = 0
if progress >= 1:
progress = 1
block = int(round(bar_length * progress))
clear_output(wait = True)
text = "Progress: [{0}] {1:.1f}%".format( "#" * block + "-" * (bar_length - block), progress * 100)
print(text)
##############################################################################
##
def solidAngleSquare(width,breadth,height,stype,numsamples):
"""Calculate the solid angle of a rectagular plate from a point on the normal at its centre
The solid angle of a rectangular flat surface, with dimensions $W$ and $D$, as seen from a
reference point centered above the surface, is determined by the integral of the projected
area of a small elemental area $\cos\theta\,dd\,dw$ across the full size of the surface:
$$
\omega_{\rm s}=\int_W\int_D\frac{dw\,dd\,\cos^{n-2}\theta}{R^2}
$$
$$
\omega_{\rm s}=\int_W\int_D\frac{dw\,dd\,\cos^n\theta}{H^2}
$$
$$
\omega_{\rm s}=\int_W\int_D\frac{dw\,dd\,}{H^2}\left(\frac{H}{R}\right)^n
$$
$$\omega_{\rm s}=\int_W\int_D\frac{dw\,dd\,}{H^2}\left(\frac{H}{\sqrt{w^2+d^2+H^2}}\right)^n,
$$
where $H$ is the reference point height above the surface, and $n=3$ for the geometrical solid angle
and $n=4$ for the projected solid angle. The integral is performed along the $W$ and $D$ dimensions
with increments of $dw$ and $dd$. The slant range between the reference point and the elemental area
$dd\times dw$ is $R=H/\cos\theta$.
Args:
| width (float): size along one edge of rectangle
| breadth (float): size along the second edge of rectangle
| height (float): distance along normal to the rect to reference point
| stype (str): type of solid angle can be one of ('g' or 'p') for ('geometric','projected')
| numsamples (int): number of samples along edges
Returns:
| solid angle (float) or None if incorrect type
Raises:
| No exception is raised.
"""
varx = np.linspace(-width/2, width/2, numsamples)
vary = np.linspace(-breadth/2, breadth/2, numsamples)
x, y = np.meshgrid(varx, vary)
if stype[0]=='g':
gv = (1. / ( (x / height) ** 2 + (y / height) ** 2 + 1 ) ) ** (3 / 2)
elif stype[0]=='p':
gv = (1. / ( (x / height) ** 2 + (y / height) ** 2 + 1 ) ) ** (4 / 2)
else:
return None
solidAngle = np.trapz(np.ravel(gv), dx=breadth*width/(numsamples**2))/(height*height)
return solidAngle
##############################################################################
##
def intify_tuple(tup):
"""Make tuple entries int type
"""
tup_int = ()
for tup_ent in tup:
tup_int = tup_int + (int(tup_ent),)
return tup_int
##############################################################################
##
def framesFirst(imageSequence):
"""Image sequence with frames along axis=2 (last index), reordered such that
frames are along axis=0 (first index).
Image sequences are stored in three-dimensional arrays, in rows, columns and frames.
Not all libraries share the same sequencing, some store frames along axis=0 and
others store frames along axis=2. This function reorders an image sequence with
frames along axis=2 to an image sequence with frames along axis=0. The function
uses np.transpose(imageSequence, (2,0,1))
Args:
| imageSequence (3-D np.array): image sequence in three-dimensional array, frames along axis=2
Returns:
| ((3-D np.array): reordered three-dimensional array (view or copy)
Raises:
| No exception is raised.
"""
return np.transpose(imageSequence, (2,0,1))
##############################################################################
##
def framesLast(imageSequence):
"""Image sequence with frames along axis=0 (first index), reordered such that
frames are along axis=2 (last index).
Image sequences are stored in three-dimensional arrays, in rows, columns and frames.
Not all libraries share the same sequencing, some store frames along axis=0 and
others store frames along axis=2. This function reorders an image sequence with
frames along axis=0 to an image sequence with frames along axis=2. The function
uses np.transpose(imageSequence, (1,2,0))
Args:
| imageSequence (3-D np.array): image sequence in three-dimensional array, frames along axis=0
Returns:
| ((3-D np.array): reordered three-dimensional array (view or copy)
Raises:
| No exception is raised.
"""
return np.transpose(imageSequence, (1,2,0))
##############################################################################
##
def index_coords(data, origin=None, framesFirst=True):
"""Creates (x,y) zero-based coordinate arrrays for a numpy array indices, relative to some origin.
This function calculates two meshgrid arrays containing the coordinates of the
input array. The origin of the new coordinate system defaults to the
center of the image, unless the user supplies a new origin.
The data format can be data.shape = (rows, cols, frames) or
data.shape = (frames, rows, cols), the format of which is indicated by the
framesFirst parameter.
Args:
| data (np.array): array for which coordinates must be calculated.
| origin ( (x-orig, y-orig) ): data-coordinates of where origin should be
| framesFirst (bool): True if data.shape is (frames, rows, cols), False if
data.shape is (rows, cols, frames)
Returns:
| x (float np.array): x coordinates in array format.
| y (float np.array): y coordinates in array format.
Raises:
| No exception is raised.
original code by <NAME>
https://stackoverflow.com/questions/3798333/image-information-along-a-polar-coordinate-system
"""
if framesFirst:
ny, nx = data.shape[1:3]
else:
ny, nx = data.shape[:2]
if origin is None:
origin_x, origin_y = nx // 2, ny // 2
else:
origin_x, origin_y = origin
x, y = np.meshgrid(np.arange(nx), np.arange(ny))
x -= origin_x
y -= origin_y
return x, y
##############################################################################
##
def cart2polar(x, y):
"""Converts from cartesian to polar coordinates, given (x,y) to (r,theta).
Args:
| x (float np.array): x values in array format.
| y (float np.array): y values in array format.
Returns:
| r (float np.array): radial component for given (x,y).
| theta (float np.array): angular component for given (x,y).
Raises:
| No exception is raised.
original code by <NAME>
https://stackoverflow.com/questions/3798333/image-information-along-a-polar-coordinate-system
"""
r = np.sqrt(x**2 + y**2)
theta = np.arctan2(y, x)
return r, theta
##############################################################################
##
def polar2cart(r, theta):
"""Converts from polar to cartesian coordinates, given (r,theta) to (x,y).
Args:
| r (float np.array): radial values in array format.
| theta (float np.array): angular values in array format.
Returns:
| x (float np.array): x component for given (r, theta).
| y (float np.array): y component for given (r, theta).
Raises:
| No exception is raised.
original code by <NAME>
https://stackoverflow.com/questions/3798333/image-information-along-a-polar-coordinate-system
"""
x = r * np.cos(theta)
y = r * np.sin(theta)
return x, y
##############################################################################
##
def upMu(uprightMu=True, textcomp=False):
"""Returns a LaTeX micron symbol, either an upright version or the normal symbol.
The upright symbol requires that the siunitx LaTeX package be installed on the
computer running the code. This function also changes the Matplotlib rcParams
file.
Args:
| uprightMu (bool): signals upright (True) or regular (False) symbol (optional).
| textcomp (bool): if True use the textcomp package, else use siunitx package (optional).
Returns:
| range (string): LaTeX code for the micro symbol.
Raises:
| No exception is raised.
"""
if sys.version_info[0] < 3:
if uprightMu:
from matplotlib import rc, font_manager
import matplotlib as mpl
rc('text', usetex=True)
# set up the use of external latex, fonts and packages
if not textcomp :
mpl.rcParams['text.latex.preamble'] = [
# r'\usepackage{siunitx}', # i need upright \micro symbols, but you need...
'\\usepackage{siunitx}', # i need upright \micro symbols, but you need...
'\\sisetup{detect-all}', # ...this to force siunitx to actually use your fonts
'\\usepackage{helvet}', # set the normal font here
'\\usepackage{sansmath}', # load up the sansmath so that math -> helvet
'\\sansmath'] # <- tricky! -- gotta actually tell tex to use!
upmu = '\si{\micro}'
else:
mpl.rcParams['text.latex.preamble'] = [
'\\usepackage{textcomp}', # i need upright \micro symbols, but you need...
'\\usepackage{helvet}', # set the normal font here
'\\usepackage{sansmath}', # load up the sansmath so that math -> helvet
'\\sansmath' # <- tricky! -- gotta actually tell tex to use!
]
upmu = '\\textmu{}'
else:
upmu = '$\\mu$'
else:
upmu = '\u00B5'
return upmu
##############################################################################
##
def detectFARThresholdToNoisepulseWidth(ThresholdToNoise, pulseWidth):
""" Solve for the FAR, given the threshold to noise ratio and pulse width, for matched filter.
References:
"Electro-optics handbook," Tech. Rep. EOH-11, RCA, 1974. RCA Technical Series Publication.
<NAME>, Detection Theory: Applications and Digital Signal Processing, CRC Press, 2002
Args:
| ThresholdToNoise (float): the threshold to noise ratio.
| pulseWidth (float): the signal pulse width in [s].
Returns:
| FAR (float): the false alarm rate in [alarms/s]
Raises:
| No exception is raised.
"""
FAR = np.exp(- (ThresholdToNoise ** 2) / 2.) / (2. * pulseWidth * np.sqrt(3))
return FAR
##############################################################################
##
def detectThresholdToNoiseTpFAR(pulseWidth, FAR):
""" Solve for threshold to noise ratio, given pulse width and FAR, for matched filter.
Using the theory of matched filter design, calculate the
threshold to noise ratio, to achieve a required false alarm rate.
References:
"Electro-optics handbook," Tech. Rep. EOH-11, RCA, 1974. RCA Technical Series Publication.
<NAME>, Detection Theory: Applications and Digital Signal Processing, CRC Press, 2002
Args:
| pulseWidth (float): the signal pulse width in [s].
| FAR (float): the false alarm rate in [alarms/s]
Returns:
| range (float): threshold to noise ratio
Raises:
| No exception is raised.
"""
ThresholdToNoise = np.sqrt(-2 * np.log (2 * pulseWidth * np.sqrt(3) * FAR ))
return ThresholdToNoise
##############################################################################
##
def detectSignalToNoiseThresholdToNoisePd(ThresholdToNoise, pD):
""" Solve for the signal to noise ratio, given the threshold to noise ratio and
probability of detection.
Using the theory of matched filter design, calculate the
signal to noise ratio, to achieve a required probability of detection.
References:
"Electro-optics handbook," Tech. Rep. EOH-11, RCA, 1974. RCA Technical Series Publication.
<NAME>, Detection Theory: Applications and Digital Signal Processing, CRC Press, 2002
Args:
| ThresholdToNoise (float): the threshold to noise ratio [-]
| pD (float): the probability of detection [-]
Returns:
| range (float): signal to noise ratio
Raises:
| No exception is raised.
"""
import scipy.special
SignalToNoise = np.sqrt(2) * scipy.special.erfinv(2 * pD -1) + ThresholdToNoise
return SignalToNoise
##############################################################################
##
def detectThresholdToNoiseSignalToNoisepD(SignalToNoise, pD):
""" Solve for the threshold to noise ratio, given the signal to noise ratio and
probability of detection.
References:
"Electro-optics handbook," Tech. Rep. EOH-11, RCA, 1974. RCA Technical Series Publication.
<NAME>, Detection Theory: Applications and Digital Signal Pro-cessing, CRC Press, 2002
Args:
| SignalToNoise (float): the signal to noise ratio [-]
| pD (float): the probability of detection [-]
Returns:
| range (float): signal to noise ratio
Raises:
| No exception is raised.
"""
import scipy.special
ThresholdToNoise = SignalToNoise - np.sqrt(2) * scipy.special.erfinv(2 * pD -1)
return ThresholdToNoise
##############################################################################
##
def detectProbabilityThresholdToNoiseSignalToNoise(ThresholdToNoise, SignalToNoise):
""" Solve for the probability of detection, given the signal to noise ratio and
threshold to noise ratio
References:
"Electro-optics handbook," Tech. Rep. EOH-11, RCA, 1974. RCA Technical Series Publication.
<NAME>, Detection Theory: Applications and Digital Signal Pro-cessing, CRC Press, 2002
Args:
| ThresholdToNoise (float): the threshold to noise ratio [-]
| SignalToNoise (float): the signal to noise ratio [-]
Returns:
| range (float): probability of detection
Raises:
| No exception is raised.
"""
import scipy.special
pD = 0.5 * (scipy.special.erf((SignalToNoise - ThresholdToNoise) / np.sqrt(2)) + 1)
return pD
##############################################################################
##
def rangeEquation(Intensity, Irradiance, rangeTab, tauTab, rangeGuess = 1, n = 2):
""" Solve the range equation for arbitrary transmittance vs range.
This function solve for the range :math:`R` in the range equation
.. math::
E = \\frac{I\\tau_a(R)}{R^n}
where :math:`E` is the threshold irradiance in [W/m2],
and :math:`I` is the intensity in [W/sr]. This range equation holds for
the case where the target is smaller than the field of view.
The range :math:`R` must be in [m], and :math:`\\tau_a(R)`
is calculated from a lookup table of atmospheric transmittance vs. range.
The transmittance lookup table can be calculated from the simple Bouguer law,
or it can have any arbitrary shape, provided it decreases with increasing range.
The user supplies the lookup table in the form of an array of range values and
an associated array of transmittance values. The range values need not be on
constant linear range increment.
The parameter :math:`n`
* :math:`n=2` (default value) the general case of a radiating source
smaller than the field of view.
* :math:`n=4` the special case of a laser range finder illuminating a target
smaller than the field of view, viewed against the sky. In this case there
is an :math:`R^2` attenuation from the laser to the source and another
:math:`R^2` attenuation from the source to the receiver, hence
:math:`R^4` overall.
If the range solution is doubtful (e.g. not a trustworthy solution) the
returned value is made negative.
Args:
| Intensity (float or np.array[N,] or [N,1]): in [W/sr].
| Irradiance (float or np.array[N,] or [N,1]): in [W/m2].
| rangeTab (np.array[N,] or [N,1]): range vector for tauTab lookup in [m]
| tauTab (np.array[N,] or [N,1]): transmittance vector for lookup in [m]
| rangeGuess (float): starting value range estimate in [m] (optional)
| n (float): range power (2 or 4) (optional)
Returns:
| range (float or np.array[N,] or [N,1]): Solution to the range equation in [m].
Value is negative if calculated range exceeds the top value in range table,
or if calculated range is too near the lower resolution limit.
Raises:
| No exception is raised.
"""
from scipy.interpolate import interp1d
from scipy.optimize import fsolve
tauTable = interp1d(rangeTab, tauTab, kind = 'linear')
Range = fsolve(_rangeEquationCalc, rangeGuess,
args = (Intensity,Irradiance,tauTable,n,np.max(rangeTab),))
#near the bottom (minimum) range of the table
if(Range < rangeTab[2] ):
Range = - Range
# beyond the top of the range table
if(Range > rangeTab[-1] ):
Range = - Range
return Range
##############################################################################
##
def _rangeEquationCalc(r,i,e,tauTable,n,rMax):
if r > rMax:
return 0
return i * tauTable(r) / (r ** n) - e
##############################################################################
##
def TFromAbshumidity(AH, equationSelect = 1):
"""temperature in [K] between 248 K and 342 K, given atmopsheric absolute humidity [g/m3], assuming 100% RH
This function uses two similar equations, but with different constants.
Args:
| AH (float): absolute humidity in g/m3.
| equationSelect (int): select the equation to be used.
Returns:
| temperature (float): in K
Raises:
| No exception is raised.
"""
T = np.linspace(248., 342., 100 )
absLUT = abshumidity(T, equationSelect = equationSelect)
f = interpolate.interp1d(absLUT, T,bounds_error=True)
return f(AH)
##############################################################################
##
def abshumidity(T, equationSelect = 1):
""" Atmopsheric absolute humidity [g/m3] for temperature in [K] between 248 K and 342 K.
This function provides two similar equations, but with different constants.
Args:
| temperature (np.array[N,] or [N,1]): in [K].
| equationSelect (int): select the equation to be used.
Returns:
| absolute humidity (np.array[N,] or [N,1]): abs humidity in [g/m3]
Raises:
| No exception is raised.
"""
#there are two options, the fist one seems more accurate (relative to test set)
if equationSelect == 1:
#http://www.vaisala.com/Vaisala%20Documents/Application%20notes/Humidity_Conversion_Formulas_B210973EN-D.pdf
return ( 1325.2520998 * 10 **(7.5892*(T - 273.15)/(T -32.44)))/T
else:
#http://www.see.ed.ac.uk/~shs/Climate%20change/Data%20sources/Humidity%20with%20altidude.pdf
return (1324.37872 * 2.718281828459046 **(17.67*(T - 273.16)/(T - 29.66)))/T
##############################################################################
##
def sfilter(spectral,center, width, exponent=6, taupass=1.0, \
taustop=0.0, filtertype = 'bandpass' ):
""" Calculate a symmetrical filter response of shape exp(-x^n)
Given a number of parameters, calculates maximally flat,
symmetrical transmittance. The function parameters controls
the width, pass-band and stop-band transmittance and sharpness
of cutoff. This function is not meant to replace the use of
properly measured filter responses, but rather serves as a
starting point if no other information is available.
This function does not calculate ripple in the pass-band
or cut-off band.
Filter types supported include band pass, high (long) pass and
low (short) pass filters. High pass filters have maximal
transmittance for all spectral values higher than the central
value. Low pass filters have maximal transmittance for all
spectral values lower than the central value.
Args:
| spectral (np.array[N,] or [N,1]): spectral vector in [um] or [cm-1].
| center (float): central value for filter passband
| width (float): proportional to width of filter passband
| exponent (float): even integer, define the sharpness of cutoff.
| If exponent=2 then gaussian
| If exponent=infinity then square
| taupass (float): the transmittance in the pass band (assumed constant)
| taustop (float): peak transmittance in the stop band (assumed constant)
| filtertype (string): filter type, one of 'bandpass', 'lowpass' or 'highpass'
Returns:
| transmittance (np.array[N,] or [N,1]): transmittances at "spectral" intervals.
Raises:
| No exception is raised.
| If an invalid filter type is specified, return None.
| If negative spectral is specified, return None.
"""
maxexp = np.log(sys.float_info.max)/np.log(np.max(2*np.abs(spectral-center)/width))
# minexp = np.log(sys.float_info.min)/np.log(np.min(2*(spectral-center)/width))
exponent = maxexp if exponent > maxexp else exponent
# exponent = minexp if exponent < minexp else exponent
tau = taustop+(taupass-taustop)*np.exp(-(2*np.abs(spectral-center)/width)**exponent)
maxtau=np.max(tau)
if filtertype == 'bandpass':
pass
elif filtertype == 'lowpass':
tau = tau * np.greater(spectral,center) + \
maxtau * np.ones(spectral.shape) * np.less(spectral,center)
elif filtertype == 'highpass':
tau = tau * np.less(spectral,center) + \
maxtau * np.ones(spectral.shape) * np.greater(spectral,center)
else:
return None
return tau
##############################################################################
##
def responsivity(wavelength,lwavepeak, cuton=1, cutoff=20, scaling=1.0):
""" Calculate a photon detector wavelength spectral responsivity
Given a number of parameters, calculates a shape that is somewhat similar to a photon
detector spectral response, on wavelength scale. The function parameters controls the
cutoff wavelength and shape of the response. This function is not meant to replace the use
of properly measured spectral responses, but rather serves as a starting point if no other
information is available.
Args:
| wavelength (np.array[N,] or [N,1]): vector in [um].
| lwavepeak (float): approximate wavelength at peak response
| cutoff (float): cutoff strength beyond peak, 5 < cutoff < 50
| cuton (float): cuton sharpness below peak, 0.5 < cuton < 5
| scaling (float): scaling factor
Returns:
| responsivity (np.array[N,] or [N,1]): responsivity at wavelength intervals.
Raises:
| No exception is raised.
"""
responsivity=scaling *( ( wavelength / lwavepeak) **cuton - ( wavelength / lwavepeak) **cutoff)
responsivity= responsivity * (responsivity > 0)
return responsivity
################################################################
##
def effectiveValue(spectraldomain, spectralToProcess, spectralBaseline):
"""Normalise a spectral quantity to a scalar, using a weighted mapping by another spectral quantity.
Effectivevalue = integral(spectralToProcess * spectralBaseline) / integral( spectralBaseline)
The data in spectralToProcess and spectralBaseline must both be sampled at the same
domain values as specified in spectraldomain.
The integral is calculated with numpy/scipy trapz trapezoidal integration function.
Args:
| inspectraldomain (np.array[N,] or [N,1]): spectral domain in wavelength, frequency or wavenumber.
| spectralToProcess (np.array[N,] or [N,1]): spectral quantity to be normalised
| spectralBaseline (np.array[N,] or [N,1]): spectral serving as baseline for normalisation
Returns:
| (float): effective value
| Returns None if there is a problem
Raises:
| No exception is raised.
"""
num=np.trapz(spectralToProcess.reshape(-1, 1)*spectralBaseline.reshape(-1, 1),spectraldomain, axis=0)[0]
den=np.trapz(spectralBaseline.reshape(-1, 1),spectraldomain, axis=0)[0]
return num/den
################################################################
##
def convertSpectralDomain(inspectraldomain, type=''):
"""Convert spectral domains, i.e. between wavelength [um], wavenummber [cm^-1] and frequency [Hz]
In string variable type, the 'from' domain and 'to' domains are indicated each with a single letter:
'f' for temporal frequency, 'l' for wavelength and 'n' for wavenumber
The 'from' domain is the first letter and the 'to' domain the second letter.
Note that the 'to' domain vector is a direct conversion of the 'from' domain
to the 'to' domain (not interpolated or otherwise sampled.
Args:
| inspectraldomain (np.array[N,] or [N,1]): spectral domain in wavelength, frequency or wavenumber.
| wavelength vector in [um]
| frequency vector in [Hz]
| wavenumber vector in [cm^-1]
| type (string): specify from and to domains:
| 'lf' convert from wavelength to per frequency
| 'ln' convert from wavelength to per wavenumber
| 'fl' convert from frequency to per wavelength
| 'fn' convert from frequency to per wavenumber
| 'nl' convert from wavenumber to per wavelength
| 'nf' convert from wavenumber to per frequency
Returns:
| [N,1]: outspectraldomain
| Returns zero length array if type is illegal, i.e. not one of the expected values
Raises:
| No exception is raised.
"""
#use dictionary to switch between options, lambda fn to calculate, default zero
outspectraldomain = {
'lf': lambda inspectraldomain: constants.c / (inspectraldomain * 1.0e-6),
'ln': lambda inspectraldomain: (1.0e4/inspectraldomain),
'fl': lambda inspectraldomain: constants.c / (inspectraldomain * 1.0e-6),
'fn': lambda inspectraldomain: (inspectraldomain / 100) / constants.c ,
'nl': lambda inspectraldomain: (1.0e4/inspectraldomain),
'nf': lambda inspectraldomain: (inspectraldomain * 100) * constants.c,
}.get(type, lambda inspectraldomain: np.zeros(shape=(0, 0)) )(inspectraldomain)
return outspectraldomain
################################################################
##
def convertSpectralDensity(inspectraldomain, inspectralquantity, type=''):
"""Convert spectral density quantities, i.e. between W/(m^2.um), W/(m^2.cm^-1) and W/(m^2.Hz).
In string variable type, the 'from' domain and 'to' domains are indicated each with a
single letter:
'f' for temporal frequency, 'w' for wavelength and ''n' for wavenumber
The 'from' domain is the first letter and the 'to' domain the second letter.
The return values from this function are always positive, i.e. not mathematically correct,
but positive in the sense of radiance density.
The spectral density quantity input is given as a two vectors: the domain value vector
and the density quantity vector. The output of the function is also two vectors, i.e.
the 'to' domain value vector and the 'to' spectral density. Note that the 'to' domain
vector is a direct conversion of the 'from' domain to the 'to' domain (not interpolated
or otherwise sampled).
Args:
| inspectraldomain (np.array[N,] or [N,1]): spectral domain in wavelength,
frequency or wavenumber.
| inspectralquantity (np.array[N,] or [N,1]): spectral density in same domain
as domain vector above.
| wavelength vector in [um]
| frequency vector in [Hz]
| wavenumber vector in [cm^-1]
| type (string): specify from and to domains:
| 'lf' convert from per wavelength interval density to per frequency interval density
| 'ln' convert from per wavelength interval density to per wavenumber interval density
| 'fl' convert from per frequency interval density to per wavelength interval density
| 'fn' convert from per frequency interval density to per wavenumber interval density
| 'nl' convert from per wavenumber interval density to per wavelength interval density
| 'nf' convert from per wavenumber interval density to per frequency interval density
Returns:
| ([N,1],[N,1]): outspectraldomain and outspectralquantity
| Returns zero length arrays is type is illegal, i.e. not one of the expected values
Raises:
| No exception is raised.
"""
inspectraldomain = inspectraldomain.reshape(-1,)
inspectralquantity = inspectralquantity.reshape(inspectraldomain.shape[0], -1)
outspectralquantity = np.zeros(inspectralquantity.shape)
# the meshgrid idea does not work well here, because we can have very long
# spectral arrays and these become too large for meshgrid -> size **2
# we have to loop this one
spec = inspectraldomain
for col in range(inspectralquantity.shape[1]):
quant = inspectralquantity[:,col]
#use dictionary to switch between options, lambda fn to calculate, default zero
outspectraldomain = {
'lf': lambda spec: constants.c / (spec * 1.0e-6),
'fn': lambda spec: (spec / 100) / constants.c ,
'nl': lambda spec: (1.0e4/spec),
'ln': lambda spec: (1.0e4/spec),
'nf': lambda spec: (spec * 100) * constants.c,
'fl': lambda spec: constants.c / (spec * 1.0e-6),
}.get(type, lambda spec: np.zeros(shape=(0, 0)) )(spec)
outspectralquantity[:, col] = {
'lf': lambda quant: quant / (constants.c *1.0e-6 / ((spec * 1.0e-6)**2)),
'fn': lambda quant: quant * (100 *constants.c),
'nl': lambda quant: quant / (1.0e4 / spec**2) ,
'ln': lambda quant: quant / (1.0e4 / spec**2) ,
'nf': lambda quant: quant / (100 * constants.c),
'fl': lambda quant: quant / (constants.c *1.0e-6 / ((spec * 1.0e-6)**2)),
}.get(type, lambda quant: np.zeros(shape=(0, 0)) )(quant)
return (outspectraldomain,outspectralquantity)
##############################################################################
##
def savitzkyGolay1D(y, window_size, order, deriv=0, rate=1):
r"""Smooth (and optionally differentiate) data with a Savitzky-Golay filter.
Source: http://wiki.scipy.org/Cookbook/SavitzkyGolay
The Savitzky Golay filter is a particular type of low-pass filter,
well adapted for data smoothing. For further information see:
http://www.wire.tu-bs.de/OLDWEB/mameyer/cmr/savgol.pdf
The Savitzky-Golay filter removes high frequency noise from data.
It has the advantage of preserving the original shape and
features of the signal better than other types of filtering
approaches, such as moving averages techniques.
The Savitzky-Golay is a type of low-pass filter, particularly
suited for smoothing noisy data. The main idea behind this
approach is to make for each point a least-square fit with a
polynomial of high order over a odd-sized window centered at
the point.
Examples:
t = np.linspace(-4, 4, 500)
y = np.exp( -t**2 ) + np.random.normal(0, 0.05, t.shape)
ysg = savitzky_golay(y, window_size=31, order=4)
import matplotlib.pyplot as plt
plt.plot(t, y, label='Noisy signal')
plt.plot(t, np.exp(-t**2), 'k', lw=1.5, label='Original signal')
plt.plot(t, ysg, 'r', label='Filtered signal')
plt.legend()
plt.show()
References:
[1] <NAME>, <NAME>, Smoothing and Differentiation of
Data by Simplified Least Squares Procedures. Analytical
Chemistry, 1964, 36 (8), pp 1627-1639.
[2] Numerical Recipes 3rd Edition: The Art of Scientific Computing
<NAME>, <NAME>, <NAME>, <NAME>
Cambridge University Press ISBN-13: 9780521880688
Args:
| y : array_like, shape (N,) the values of the time history of the signal.
| window_size : int the length of the window. Must be an odd integer number.
| order : int the order of the polynomial used in the filtering.
Must be less then `window_size` - 1.
| deriv: int the order of the derivative to compute (default = 0 means only smoothing)
Returns:
| ys : ndarray, shape (N) the smoothed signal (or it's n-th derivative).
Raises:
| Exception raised for window size errors.
"""
import numpy as np
from math import factorial
try:
window_size = np.abs(np.int(window_size))
order = np.abs(np.int(order))
except ValueError as msg:
raise ValueError("window_size and order have to be of type int")
if window_size % 2 != 1 or window_size < 1:
raise TypeError("window_size size must be a positive odd number")
if window_size < order + 2:
raise TypeError("window_size is too small for the polynomials order")
order_range = list(range(order+1))
half_window = (window_size -1) // 2
# precompute coefficients
b = np.mat([[k**i for i in order_range] for k in range(-half_window, half_window+1)])
m = np.linalg.pinv(b).A[deriv] * rate**deriv * factorial(deriv)
# pad the signal at the extremes with
# values taken from the signal itself
firstvals = y[0] - np.abs( y[1:half_window+1][::-1] - y[0] )
lastvals = y[-1] + np.abs(y[-half_window-1:-1][::-1] - y[-1])
y = np.concatenate((firstvals, y, lastvals))
return np.convolve( m[::-1], y, mode='valid')
##############################################################################
##
def getFHWM(wl,tau,normaliseMax=False):
"""Given spectral domain and range, determine full-width half-max domain width
Returns the FWHM, and the two 50% wavelengths
"""
# get FWHM https://stackoverflow.com/questions/53445337/implementation-of-a-threshold-detection-function-in-python
if normaliseMax:
tau = tau / np.max(tau)
mask = np.diff(1 * (tau > 0.5) != 0)
wlcr = np.vstack((wl[:-1][mask],wl[1:][mask]))
spcr = np.vstack((tau[:-1][mask],tau[1:][mask]))
lamh = np.zeros((2,))
# interpolate to get 0.5 crossing
for i in [0,1]:
lamh[i] = wlcr[0,i]+(wlcr[1,i]-wlcr[0,i])*(0.5-spcr[0,i])/(spcr[1,i]-spcr[0,i])
fwhm = lamh[1] - lamh[0]
return np.abs(fwhm),lamh[0], lamh[1]
##############################################################################
##
def convolve(inspectral, samplingresolution, inwinwidth, outwinwidth, windowtype=np.bartlett):
""" Convolve (non-circular) a spectral variable with a window function,
given the input resolution and input and output window widths.
This function is normally used on wavenumber-domain spectral data. The spectral
data is assumed sampled at samplingresolution wavenumber intervals.
The inwinwidth and outwinwidth window function widths are full width half-max (FWHM)
for the window functions for the inspectral and returned spectral variables, respectively.
The Bartlett function is used as default, but the user can use a different function.
The Bartlett function is a triangular function reaching zero at the ends. Window function
width is correct for Bartlett and only approximate for other window functions.
Spectral convolution is best done in frequency domain ([cm-1] units) because
the filter or emission line shapes have better symmetry in frequency domain than
in wavelength domain.
The input spectral vector must be in spectral density units of cm-1.
Args:
| inspectral (np.array[N,] or [N,1]): spectral variable input vector (e.g., radiance or transmittance).
| samplingresolution (float): wavenumber interval between inspectral samples
| inwinwidth (float): FWHM window width used to obtain the input spectral vector (e.g., spectroradiometer window width)
| outwinwidth (float): FWHM window width of the output spectral vector after convolution
| windowtype (function): name of a numpy/scipy function for the window function
Returns:
| outspectral (np.array[N,]): input vector, filtered to new window width.
| windowfn (np.array[N,]): The window function used.
Raises:
| No exception is raised.
"""
winbins = round(2*(outwinwidth/(inwinwidth*samplingresolution)), 0)
winbins = winbins if winbins%2==1 else winbins+1
windowfn=windowtype(winbins)
#np.convolve is unfriendly towards unicode strings
if sys.version_info[0] > 2:
cmode='same'
else:
cmode='same'.encode('utf-8')
outspectral = np.convolve(windowfn/(samplingresolution*windowfn.sum()),
inspectral.reshape(-1, ),mode=cmode)
return outspectral, windowfn
######################################################################################
def circ(x, y, d=1):
""" Generation of a circular aperture.
Args:
| x (np.array[N,M]): x-grid, metres
| y (np.array[N,M]): y-grid, metres
| d (float): diameter in metres.
| comment (string): the symbol used to comment out lines, default value is None.
| delimiter (string): delimiter used to separate columns, default is whitespace.
Returns:
| z (np.array[N,M]): z-grid, 1's inside radius, meters/pixels.
Raises:
| No exception is raised.
Author: Prof. <NAME>, revised/ported by <NAME>
Original source: http://arxiv.org/pdf/1412.4031.pdf
"""
z = None
r = np.sqrt(x ** 2 + y ** 2)
z = np.zeros(r.shape)
z[r < d / 2.] = 1.0
z[r == d / 2.] = 0.5
return z
######################################################################################
def rect(x, y, sx=1, sy=1):
""" Generation of a rectangular aperture.
Args:
| x (np.array[N,M]): x-grid, metres
| y (np.array[N,M]): x-grid, metres
| sx (float): full size along x.
| sy (float): full size along y.
Returns:
| Nothing.
Raises:
| No exception is raised.
Author: <NAME>
Original source: http://arxiv.org/pdf/1412.4031.pdf
"""
z = None
if x is not None and y is not None:
z = np.zeros(x.shape)
z[np.logical_and(np.abs(x) < sx/2.,np.abs(y) < sy/2.)] = 1.
z[np.logical_and(np.abs(x) == sx/2., np.abs(y) == sy/2.)] = 0.5
return z
######################################################################################################
def poissonarray(inp, seedval=None, tpoint=1000):
r"""This routine calculates a Poisson random variable for an array of input values
with potentially very high event counts.
At high mean values the Poisson distribution calculation overflows. For
mean values exceeding 1000, the Poisson distribution may be approximated by a
Gaussian distribution.
The function accepts a two-dimensional array and calculate a separate random
value for each element in the array, using the element value as the mean value.
A typical use case is when calculating shot noise for image data.
From http://en.wikipedia.org/wiki/Poisson_distribution#Related_distributions
For sufficiently large values of :math:`\lambda`, (say :math:`\lambda>1000`),
the normal distribution with mean :math:`\lambda` and
variance :math:`\lambda` (standard deviation :math:`\sqrt{\lambda}`)
is an excellent approximation to the Poisson distribution.
If :math:`\lambda` is greater than about 10, then the normal distribution
is a good approximation if an appropriate continuity correction is performed, i.e.,
:math:`P(X \le x)`, where (lower-case) x is a non-negative integer, is replaced by
:math:`P(X\le\,x+0.5)`.
:math:`F_\mathrm{Poisson}(x;\lambda)\approx\,F_\mathrm{normal}(x;\mu=\lambda,\sigma^2=\lambda)`
This function returns values of zero when the input is zero.
Args:
| inp (np.array[N,M]): array with mean value
| seedval (int): seed for random number generator, None means use system time.
| tpoint (int): Threshold when to switch over between Poisson and Normal distributions
Returns:
| outp (np.array[N,M]): Poisson random variable for given mean value
Raises:
| No exception is raised.
Author: <NAME>
"""
#If seed is omitted or None, current system time is used
np.random.seed(seedval)
#this is a bit of a mess:
# - for values smaller than tpoint calculate using standard Poisson distribution
# - for values larger than tpoint but nonzero use normal approximation, add small sdelta to avoid variance==0
# - for values larger than tpoint but zero keep at zero, sdelta added has no effect, just avoids zero divide
sdelta = 1e-10
outp = np.zeros(inp.shape)
outp = (inp<=tpoint) * np.random.poisson(inp * (inp<=tpoint) )\
+ ((inp>tpoint) & (inp!=0)) * np.random.normal(loc=inp, scale=np.sqrt(inp+sdelta))
outp = np.where(inp==0, 0., outp)
return outp
######################################################################################################
def draw_siemens_star(outfile, n, dpi):
r"""Siemens star chart generator
by <NAME>, http://cmp.felk.cvut.cz/~wagnelib/utils/star.html
Args:
| outfile (str): output image filename (monochrome only)
| n (int): number of spokes in the output image.
| dpi (int): dpi in output image, determines output image size.
Returns:
| Nothing, creates a monochrome siemens star image
Raises:
| No exception is raised.
Author: <NAME>, adapted by <NAME>
"""
from scipy import misc
# Create figure and add patterns
fig, ax = plt.subplots()
ax.add_collection(gen_siemens_star((0,0), 1., n))
plt.axis('equal')
plt.axis([-1.03, 1.03, -1.03, 1.03])
plt.axis('off')
fig.savefig(outfile, figsize=(900,900), papertype='a0', bbox_inches='tight', dpi=dpi)
#read image back in order to crop to spokes only
imgIn = np.abs(255 - misc.imread(outfile)[:,:,0])
nz0 = np.nonzero(np.sum(imgIn,axis=0))
nz1 = np.nonzero(np.sum(imgIn,axis=1))
imgOut = imgIn[(nz1[0][0]-1) : (nz1[0][-1]+2), (nz0[0][0]-1) : (nz0[0][-1]+2)]
imgOut = np.abs(255 - imgOut)
misc.imsave(outfile, imgOut)
######################################################################################################
def gen_siemens_star(origin, radius, n):
centres = np.linspace(0, 360, n+1)[:-1]
step = (((360.0)/n)/4.0)
patches = []
for c in centres:
patches.append(Wedge(origin, radius, c-step, c+step))
return PatchCollection(patches, facecolors='k', edgecolors='none')
######################################################################################################
def drawCheckerboard(rows, cols, numPixInBlock, imageMode, colour1, colour2, imageReturnType='image',datatype=np.uint8):
"""Draw checkerboard with 8-bit pixels
From http://stackoverflow.com/questions/2169478/how-to-make-a-checkerboard-in-numpy
Args:
| rows (int) : number or rows in checkerboard
| cols (int) : number of columns in checkerboard
| numPixInBlock (int) : number of pixels to be used in one block of the checkerboard
| imageMode (string) : PIL image mode [e.g. L (8-bit pixels, black and white), RGB (3x8-bit pixels, true color)]
| colour1 (int or RGB tuple) : colour 1 specified according to the imageMode
| colour2 (int or RGB tuple) : colour 2 specified according to the imageMode
| imageReturnType: 'image' for PIL image, 'nparray' for numpy array
| datatype (numpy data type) : numpy data type for the returned np.array
Returns:
| img : checkerboard numpy array or PIL image (see imageReturnType)
Raises:
| No exception is raised.
Example Usage:
rows = 5
cols = 7
pixInBlock = 4
color1 = 0
color2 = 255
img = drawCheckerboard(rows,cols,pixInBlock,'L',color1,color2,'nparray')
pilImg = Img.fromarray(img, 'L')
pilImg.save('{0}.png'.format('checkerboardL'))
color1 = (0,0,0)
color2 = (255,255,255)
pilImage = drawCheckerboard(rows,cols,pixInBlock,'RGB',color1,color2,'image')
pilImage.save('{0}.png'.format('checkerboardRGB'))
"""
width = numPixInBlock * cols
height = numPixInBlock * rows
coords = np.ogrid[0:height, 0:width]
idx = (coords[0] // numPixInBlock + coords[1] // numPixInBlock) % 2
vals = np.array([colour1, colour2], dtype=datatype)
img = vals[idx]
if (imageReturnType == 'nparray'):
return img
else:
from PIL import Image as Img
pilImage = Img.fromarray(img, imageMode)
return pilImage
######################################################################################################
def extractGraph(filename, xmin, xmax, ymin, ymax, outfile=None,doPlot=False,\
xaxisLog=False, yaxisLog=False, step=None, value=None):
"""Scan an image containing graph lines and produce (x,y,value) data.
This function processes an image, calculate the location of pixels on a
graph line, and then scale the (r,c) or (x,y) values of pixels with non-zero
values. The
Get a bitmap of the graph (scan or screen capture).
Take care to make the graph x and y axes horizontal/vertical.
The current version of the software does not work with rotated images.
Bitmap edit the graph. Clean the graph to the maximum extent possible,
by removing all the clutter, such that only the line to be scanned is visible.
Crop only the central block that contains the graph box, by deleting
the x and y axes notation and other clutter. The size of the cropped image
must cover the range in x and y values you want to cover in the scan. The
graph image/box must be cut out such that the x and y axes min and max
correspond exactly with the edges of the bitmap.
You must end up with nothing in the image except the line you want
to digitize.
The current version only handles single lines on the graph, but it does
handle vertical and horizontal lines.
The function can also write out a value associated with the (x,y) coordinates
of the graph, as the third column. Normally these would have all the same
value if the line represents an iso value.
The x,y axes can be lin/lin, lin/log, log/lin or log/log, set the flags.
Args:
| filename: name of the image file
| xmin: the value corresponding to the left side (column=0)
| xmax: the value corresponding to the right side (column=max)
| ymin: the value corresponding to the bottom side (row=bottom)
| ymax: the value corresponding to the top side (row=top)
| outfile: write the sampled points to this output file
| doPlot: plot the digitised graph for visual validation
| xaxisLog: x-axis is in log10 scale (min max are log values)
| yaxisLog: y-axis is in log10 scale (min max are log values)
| step: if not None only ouput every step values
| value: if not None, write this value as the value column
Returns:
| outA: a numpy array with columns (xval, yval, value)
| side effect: a file may be written
| side effect: a graph may be displayed
Raises:
| No exception is raised.
Author: <EMAIL>
"""
from scipy import ndimage
from skimage.morphology import medial_axis
if doPlot:
import pylab
import matplotlib.pyplot as pyplot
#read image file, as grey scale
img = ndimage.imread(filename, True)
# find threshold 50% up the way
halflevel = img.min() + (img.max()-img.min()) /2
# form binary image by thresholding
img = img < halflevel
#find the skeleton one pixel wide
imgskel = medial_axis(img)
#if doPlot:
# pylab.imshow(imgskel)
# pylab.gray()
# pylab.show()
# set up indices arrays to get x and y indices
ind = np.indices(img.shape)
#skeletonise the graph to one pixel only
#then get the y pixel value, using indices
yval = ind[0,...] * imgskel.astype(float)
#if doPlot:
# pylab.imshow(yval>0)
# pylab.gray()
# pylab.show()
# invert y-axis origin from left top to left bottom
yval = yval.shape[0] - np.max(yval, axis=0)
#get indices for only the pixels where we have data
wantedIdx = np.where(np.sum(imgskel, axis = 0) > 0)
# convert to original graph coordinates
cvec = np.arange(0.0,img.shape[1])
xval = xmin + (cvec[wantedIdx] / img.shape[1]) * (xmax - xmin)
xval = xval.reshape(-1,1)
yval = ymin + (yval[wantedIdx] / img.shape[0]) * (ymax - ymin)
yval = yval.reshape(-1,1)
if xaxisLog:
xval = 10** xval
if yaxisLog:
yval = 10 ** yval
#build the result array
outA = np.hstack((xval,yval))
if value is not None:
outA = np.hstack((outA,value*np.ones(yval.shape)))
# process step intervals
if step is not None:
# collect the first value, every step'th value, and last value
outA = | np.vstack((outA[0,:],outA[1:-2:step,:],outA[-1,:])) | numpy.vstack |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import time
import misc.utils as utils
from collections import OrderedDict
from functools import partial
import math
import torch
import torch.nn.functional as F
from torch import multiprocessing as mp
from multiprocessing.managers import BaseManager
import sys
sys.path.append("cider")
from pyciderevalcap.ciderD.ciderD import CiderD
sys.path.append("coco-caption")
from pycocoevalcap.bleu.bleu import Bleu
CiderD_scorer = None
Bleu_scorer = None
#CiderD_scorer = CiderD(df='corpus')
def init_scorer(cached_tokens):
global CiderD_scorer
CiderD_scorer = CiderD_scorer or CiderD(df=cached_tokens)
global Bleu_scorer
Bleu_scorer = Bleu_scorer or Bleu(4)
def array_to_str(arr):
out = ''
for i in range(len(arr)):
out += str(arr[i]) + ' '
if arr[i] == 0:
break
return out.strip()
def get_self_critical_reward(model, fc_feats, att_feats, att_masks, data_gts, gen_result, opt):
batch_size = gen_result.size(0)# batch_size = sample_size * seq_per_img
seq_per_img = batch_size // len(data_gts)
# get greedy decoding baseline
model.eval()
with torch.no_grad():
greedy_res, _ = model(fc_feats, att_feats, att_masks=att_masks, mode='sample')
model.train()
res = OrderedDict()
gen_result = gen_result.data.cpu().numpy()
greedy_res = greedy_res.data.cpu().numpy()
for i in range(batch_size):
res[i] = [array_to_str(gen_result[i])]
for i in range(batch_size):
res[batch_size + i] = [array_to_str(greedy_res[i])]
gts = OrderedDict()
for i in range(len(data_gts)):
gts[i] = [array_to_str(data_gts[i][j]) for j in range(len(data_gts[i]))]
res_ = [{'image_id':i, 'caption': res[i]} for i in range(2 * batch_size)]
res__ = {i: res[i] for i in range(2 * batch_size)}
gts = {i: gts[i % batch_size // seq_per_img] for i in range(2 * batch_size)}
if opt.cider_reward_weight > 0:
_, cider_scores = CiderD_scorer.compute_score(gts, res_)
print('Cider scores:', _)
else:
cider_scores = 0
if opt.bleu_reward_weight > 0:
_, bleu_scores = Bleu_scorer.compute_score(gts, res__)
bleu_scores = np.array(bleu_scores[3])
print('Bleu scores:', _[3])
else:
bleu_scores = 0
scores = opt.cider_reward_weight * cider_scores + opt.bleu_reward_weight * bleu_scores
scores = scores[:batch_size] - scores[batch_size:]
rewards = | np.repeat(scores[:, np.newaxis], gen_result.shape[1], 1) | numpy.repeat |
from __future__ import print_function
import os
import copy
import logging
import matplotlib.pyplot as plt
import numpy as np
import re
import tensorflow as tf
import warnings
# from colabtools import publish
from tensor2tensor.utils import registry
from magenta.models import svg_vae
from magenta.models.svg_vae import svg_utils
from tensor2tensor.layers import common_layers
from tensor2tensor.utils import trainer_lib
tfe = tf.contrib.eager
Modes = tf.estimator.ModeKeys
tf.enable_eager_execution()
logging.getLogger("mlperf_compliance").setLevel(logging.ERROR)
warnings.simplefilter("ignore")
def initialize_model(problem_name, data_dir, hparam_set, hparams, model_name,
ckpt_dir, split=Modes.TRAIN):
"""Returns an initialized model, dataset iterator and hparams."""
tf.reset_default_graph()
# create hparams and get glyphazzn problem definition
hparams = trainer_lib.create_hparams(hparam_set, hparams, data_dir=data_dir,
problem_name=problem_name)
problem = registry.problem(problem_name)
# get model definition
ModelClass = registry.model(model_name)
model = ModelClass(hparams, mode=Modes.PREDICT,
problem_hparams=hparams.problem_hparams)
# create dataset iterator from problem definition
dataset = problem.dataset(Modes.PREDICT, dataset_split=split,
data_dir=data_dir, shuffle_files=False,
hparams=hparams).batch(52*2)
iterator = tfe.Iterator(dataset)
# finalize/initialize model
# creates ops to be initialized
output, extra_losses = model(iterator.next())
model.initialize_from_ckpt(ckpt_dir) # initializes ops
return model, iterator, hparams
def get_bottleneck(features, model):
"""Retrieve latent encoding for given input pixel image in features dict."""
features = features.copy()
# the presence of a 'bottleneck' feature with 0 dimensions indicates that the
# model should return the bottleneck from the input image
features['bottleneck'] = tf.zeros((2, 32))
print("get_bottleneck shape", features['bottleneck'].get_shape())
print("Entering model Entering model Entering model Entering model Entering model")
return model(features)[0]
def infer_from_bottleneck(features, bottleneck, model, out='cmd'):
"""Returns a sample from a decoder, conditioned on the given a latent."""
features = features.copy()
# set bottleneck which we're decoding from
features['bottleneck'] = bottleneck
# reset inputs/targets. This guarantees that the decoder is only being
# conditioned on the given bottleneck.
batch_size = tf.shape(bottleneck)[:1].numpy().tolist()
features['inputs'] = tf.zeros(
batch_size + tf.shape(features['inputs'])[1:].numpy().tolist())
features['targets'] = tf.zeros(
batch_size + tf.shape(features['targets'])[1:].numpy().tolist())
features['targets_psr'] = tf.zeros(
batch_size + tf.shape(features['targets_psr'])[1:].numpy().tolist())
if out == 'cmd':
# using the SVG Decoder
return model.infer(features, decode_length=0)
# using the Image Decoder (from the Image VAE)
return model(features)
def infer_from_source(features, model, out='cmd'):
"""Returns a sample from a decoder, conditioned on the given a latent."""
features = features.copy()
if out == 'cmd':
# using the SVG Decoder
return model.infer(features, decode_length=0)
# using the Image Decoder (from the Image VAE)
return model(features)
def merge_features(features_list):
new_features = {}
for k in features_list[0].keys():
all_vs = [features[k] for features in features_list]
new_features[k] = tf.concat(all_vs, axis=0)
return new_features
start = ("""<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www."""
"""w3.org/1999/xlink" width="256px" height="256px" style="-ms-trans"""
"""form: rotate(360deg); -webkit-transform: rotate(360deg); transfo"""
"""rm: rotate(360deg);" preserveAspectRatio="xMidYMid meet" viewBox"""
"""="0 0 24 24"><path d=\"""")
end = """\" fill="currentColor"/></svg>"""
COMMAND_RX = re.compile("([MmLlHhVvCcSsQqTtAaZz])")
FLOAT_RX = re.compile("[-+]?[0-9]*\.?[0-9]+(?:[eE][-+]?[0-9]+)?")
def svg_html_to_path_string(svg):
return svg.replace(start, '').replace(end, '')
def _tokenize(pathdef):
"""Returns each svg token from path list."""
# e.g.: 'm0.1-.5c0,6' -> m', '0.1, '-.5', 'c', '0', '6'
for x in COMMAND_RX.split(pathdef):
if x != '' and x in 'MmLlHhVvCcSsQqTtAaZz':
yield x
for token in FLOAT_RX.findall(x):
yield token
def path_string_to_tokenized_commands(path):
"""Tokenizes the given path string.
E.g.:
Given M 0.5 0.5 l 0.25 0.25 z
Returns [['M', '0.5', '0.5'], ['l', '0.25', '0.25'], ['z']]
"""
new_path = []
current_cmd = []
for token in _tokenize(path):
if len(current_cmd) > 0:
if token in 'MmLlHhVvCcSsQqTtAaZz':
# cmd ended, convert to vector and add to new_path
new_path.append(current_cmd)
current_cmd = [token]
else:
# add arg to command
current_cmd.append(token)
else:
# add to start new cmd
current_cmd.append(token)
if current_cmd:
# process command still unprocessed
new_path.append(current_cmd)
return new_path
def separate_substructures(tokenized_commands):
"""Returns a list of SVG substructures."""
# every moveTo command starts a new substructure
# an SVG substructure is a subpath that closes on itself
# such as the outter and the inner edge of the character `o`
substructures = []
curr = []
for cmd in tokenized_commands:
if cmd[0] in 'mM' and len(curr) > 0:
substructures.append(curr)
curr = []
curr.append(cmd)
if len(curr) > 0:
substructures.append(curr)
return substructures
def postprocess(svg, dist_thresh=2., skip=False):
path = svg_html_to_path_string(svg)
svg_template = svg.replace(path, '{}')
tokenized_commands = path_string_to_tokenized_commands(path)
def dist(a, b): return np.sqrt((float(a[0]) - float(b[0]))**2 +
(float(a[1]) - float(b[1]))**2)
def are_close_together(a, b, t): return dist(a, b) < t
# first, go through each start/end point and merge if they're close enough
# together (that is, make end point the same as the start point).
# TODO: there are better ways of doing this, in a way that propagates error
# back (so if total error is 0.2, go through all N commands in this
# substructure and fix each by 0.2/N (unless they have 0 vertical change))
substructures = separate_substructures(tokenized_commands)
previous_substructure_endpoint = (0., 0.,)
for substructure in substructures:
# first, if the last substructure's endpoint was updated, we must update
# the start point of this one to reflect the opposite update
substructure[0][-2] = str(float(substructure[0][-2]) -
previous_substructure_endpoint[0])
substructure[0][-1] = str(float(substructure[0][-1]) -
previous_substructure_endpoint[1])
start = list(map(float, substructure[0][-2:]))
curr_pos = (0., 0.)
for cmd in substructure:
curr_pos, _ = svg_utils._update_curr_pos(curr_pos, cmd, (0., 0.))
if are_close_together(start, curr_pos, dist_thresh):
new_point = np.array(start)
previous_substructure_endpoint = ((new_point[0] - curr_pos[0]),
(new_point[1] - curr_pos[1]))
substructure[-1][-2] = str(float(substructure[-1][-2]) +
(new_point[0] - curr_pos[0]))
substructure[-1][-1] = str(float(substructure[-1][-1]) +
(new_point[1] - curr_pos[1]))
if substructure[-1][0] in 'cC':
substructure[-1][-4] = str(float(substructure[-1][-4]) +
(new_point[0] - curr_pos[0]))
substructure[-1][-3] = str(float(substructure[-1][-3]) +
(new_point[1] - curr_pos[1]))
if skip:
return svg_template.format(' '.join([' '.join(' '.join(cmd) for cmd in s)
for s in substructures]))
def cosa(x, y): return (x[0] * y[0] + x[1] * y[1]) / (
(np.sqrt(x[0]**2 + x[1]**2) * np.sqrt(y[0]**2 + y[1]**2)))
def rotate(a, x, y): return (x * | np.cos(a) | numpy.cos |
import os.path as osp
import numpy as np
import bcolz
from torchvision import datasets, transforms
from sklearn.datasets import make_classification
from config import SimpleNet,ThreeLayer
def bcolz_save(path, np_array):
c = bcolz.carray(np_array, rootdir=path, mode='w')
c.flush()
print("Saved to " + path)
get_lin = False
if get_lin:
DATA_DIR = "../../data/lin/"
TRN_INPUTS_BCOLZ_PATH = osp.join(DATA_DIR, "trn_inputs.bcolz")
TRN_TARGETS_BCOLZ_PATH = osp.join(DATA_DIR, "trn_targets.bcolz")
TST_INPUTS_BCOLZ_PATH = osp.join(DATA_DIR, "tst_inputs.bcolz")
TST_TARGETS_BCOLZ_PATH = osp.join(DATA_DIR, "tst_targets.bcolz")
dataset_tensor = make_classification(n_samples=12000, n_features=2, n_redundant=0,n_informative=2,random_state=7,n_clusters_per_class=1)
trn_dataset_tensor_x = dataset_tensor[0][:10000]
trn_dataset_tensor_y = dataset_tensor[1][:10000]
tst_dataset_tensor_x = dataset_tensor[0][10000:]
tst_dataset_tensor_y = dataset_tensor[1][10000:]
trn_dataset_tensor = [(trn_dataset_tensor_x[i], trn_dataset_tensor_y[i]) for i in range(len(trn_dataset_tensor_y))]
tst_dataset_tensor = [(tst_dataset_tensor_x[i], tst_dataset_tensor_y[i]) for i in range(len(tst_dataset_tensor_y))]
#tst_dataset_tensor = (tst_dataset_tensor_x, tst_dataset_tensor_y)
trn_inputs_np = np.array([x for x, y in list(trn_dataset_tensor)])
trn_targets_np = np.array([y for x, y in trn_dataset_tensor])
tst_inputs_np = np.array([x for x, y in list(tst_dataset_tensor)])
tst_targets_np = np.array([y for x, y in tst_dataset_tensor])
bcolz_save(TRN_INPUTS_BCOLZ_PATH, trn_inputs_np)
bcolz_save(TRN_TARGETS_BCOLZ_PATH, trn_targets_np)
bcolz_save(TST_INPUTS_BCOLZ_PATH, tst_inputs_np)
bcolz_save(TST_TARGETS_BCOLZ_PATH, tst_targets_np)
simple = True
if simple:
DATA_DIR = "../../data/simple/"
TRN_INPUTS_BCOLZ_PATH = osp.join(DATA_DIR, "trn_inputs.bcolz")
TRN_TARGETS_BCOLZ_PATH = osp.join(DATA_DIR, "trn_targets.bcolz")
TST_INPUTS_BCOLZ_PATH = osp.join(DATA_DIR, "tst_inputs.bcolz")
TST_TARGETS_BCOLZ_PATH = osp.join(DATA_DIR, "tst_targets.bcolz")
dataset_tensor = make_classification(n_samples=12000, n_features=2, n_redundant=0,n_informative=2,random_state=7,n_clusters_per_class=1)
trn_dataset_tensor_x = dataset_tensor[0][:10000]
tst_dataset_tensor_x = dataset_tensor[0][10000:]
net = SimpleNet()
trn_dataset_tensor_y = net(trn_dataset_tensor_x)
tst_dataset_tensor_y = net(tst_dataset_tensor_x)
trn_dataset_tensor = [(trn_dataset_tensor_x[i], trn_dataset_tensor_y[i]) for i in range(len(trn_dataset_tensor_y))]
tst_dataset_tensor = [(tst_dataset_tensor_x[i], tst_dataset_tensor_y[i]) for i in range(len(tst_dataset_tensor_y))]
#tst_dataset_tensor = (tst_dataset_tensor_x, tst_dataset_tensor_y)
trn_inputs_np = np.array([x for x, y in list(trn_dataset_tensor)])
trn_targets_np = np.array([y for x, y in trn_dataset_tensor])
tst_inputs_np = np.array([x for x, y in list(tst_dataset_tensor)])
tst_targets_np = np.array([y for x, y in tst_dataset_tensor])
bcolz_save(TRN_INPUTS_BCOLZ_PATH, trn_inputs_np)
bcolz_save(TRN_TARGETS_BCOLZ_PATH, trn_targets_np)
bcolz_save(TST_INPUTS_BCOLZ_PATH, tst_inputs_np)
bcolz_save(TST_TARGETS_BCOLZ_PATH, tst_targets_np)
threelayer = True
if threelayer:
DATA_DIR = "../../data/threelayer/"
TRN_INPUTS_BCOLZ_PATH = osp.join(DATA_DIR, "trn_inputs.bcolz")
TRN_TARGETS_BCOLZ_PATH = osp.join(DATA_DIR, "trn_targets.bcolz")
TST_INPUTS_BCOLZ_PATH = osp.join(DATA_DIR, "tst_inputs.bcolz")
TST_TARGETS_BCOLZ_PATH = osp.join(DATA_DIR, "tst_targets.bcolz")
dataset_tensor = make_classification(n_samples=12000, n_features=2, n_redundant=0,n_informative=2,random_state=7,n_clusters_per_class=1)
trn_dataset_tensor_x = dataset_tensor[0][:10000]
tst_dataset_tensor_x = dataset_tensor[0][10000:]
net = ThreeLayer()
trn_dataset_tensor_y = net(trn_dataset_tensor_x)
tst_dataset_tensor_y = net(tst_dataset_tensor_x)
trn_dataset_tensor = [(trn_dataset_tensor_x[i], trn_dataset_tensor_y[i]) for i in range(len(trn_dataset_tensor_y))]
tst_dataset_tensor = [(tst_dataset_tensor_x[i], tst_dataset_tensor_y[i]) for i in range(len(tst_dataset_tensor_y))]
#tst_dataset_tensor = (tst_dataset_tensor_x, tst_dataset_tensor_y)
trn_inputs_np = np.array([x for x, y in list(trn_dataset_tensor)])
trn_targets_np = np.array([y for x, y in trn_dataset_tensor])
tst_inputs_np = np.array([x for x, y in list(tst_dataset_tensor)])
tst_targets_np = np.array([y for x, y in tst_dataset_tensor])
bcolz_save(TRN_INPUTS_BCOLZ_PATH, trn_inputs_np)
bcolz_save(TRN_TARGETS_BCOLZ_PATH, trn_targets_np)
bcolz_save(TST_INPUTS_BCOLZ_PATH, tst_inputs_np)
get_mnist = False
if get_mnist:
DATA_DIR = "../../data/mnist/"
TRN_INPUTS_BCOLZ_PATH = osp.join(DATA_DIR, "trn_inputs.bcolz")
TRN_TARGETS_BCOLZ_PATH = osp.join(DATA_DIR, "trn_targets.bcolz")
TST_INPUTS_BCOLZ_PATH = osp.join(DATA_DIR, "tst_inputs.bcolz")
TST_TARGETS_BCOLZ_PATH = osp.join(DATA_DIR, "tst_targets.bcolz")
transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.1307,),
(0.3081,))])
trn_dataset_tensor = datasets.MNIST(DATA_DIR, train=True, download=True,
transform=transform)
tst_dataset_tensor = datasets.MNIST(DATA_DIR, train=False, download=True,
transform=transform)
trn_inputs_np = np.array([x.numpy() for x, y in list(trn_dataset_tensor)])
trn_targets_np = | np.array([y for x, y in trn_dataset_tensor]) | numpy.array |
import numpy as np
import os
import re
import requests
import sys
import time
from netCDF4 import Dataset
import pandas as pd
from bs4 import BeautifulSoup
from tqdm import tqdm
# setup constants used to access the data from the different M2M interfaces
BASE_URL = 'https://ooinet.oceanobservatories.org/api/m2m/' # base M2M URL
SENSOR_URL = '12576/sensor/inv/' # Sensor Information
# setup access credentials
AUTH = ['OOIAPI-853A3LA6QI3L62', '<KEY>']
def M2M_Call(uframe_dataset_name, start_date, end_date):
options = '?beginDT=' + start_date + '&endDT=' + end_date + '&format=application/netcdf'
r = requests.get(BASE_URL + SENSOR_URL + uframe_dataset_name + options, auth=(AUTH[0], AUTH[1]))
if r.status_code == requests.codes.ok:
data = r.json()
else:
return None
# wait until the request is completed
print('Waiting for OOINet to process and prepare data request, this may take up to 20 minutes')
url = [url for url in data['allURLs'] if re.match(r'.*async_results.*', url)][0]
check_complete = url + '/status.txt'
with tqdm(total=400, desc='Waiting') as bar:
for i in range(400):
r = requests.get(check_complete)
bar.update(1)
if r.status_code == requests.codes.ok:
bar.n = 400
bar.last_print_n = 400
bar.refresh()
print('\nrequest completed in %f minutes.' % elapsed)
break
else:
time.sleep(3)
elapsed = (i * 3) / 60
return data
def M2M_Files(data, tag=''):
"""
Use a regex tag combined with the results of the M2M data request to collect the data from the THREDDS catalog.
Collected data is gathered into an xarray dataset for further processing.
:param data: JSON object returned from M2M data request with details on where the data is to be found for download
:param tag: regex tag to use in discriminating the data files, so we only collect the correct ones
:return: the collected data as an xarray dataset
"""
# Create a list of the files from the request above using a simple regex as a tag to discriminate the files
url = [url for url in data['allURLs'] if re.match(r'.*thredds.*', url)][0]
files = list_files(url, tag)
return files
def list_files(url, tag=''):
"""
Function to create a list of the NetCDF data files in the THREDDS catalog created by a request to the M2M system.
:param url: URL to user's THREDDS catalog specific to a data request
:param tag: regex pattern used to distinguish files of interest
:return: list of files in the catalog with the URL path set relative to the catalog
"""
page = requests.get(url).text
soup = BeautifulSoup(page, 'html.parser')
pattern = re.compile(tag)
return [node.get('href') for node in soup.find_all('a', text=pattern)]
def M2M_Data(nclist,variables):
thredds = 'https://opendap.oceanobservatories.org/thredds/dodsC/ooi/'
#nclist is going to contain more than one url eventually
for jj in range(len(nclist)):
url=nclist[jj]
url=url[25:]
dap_url = thredds + url + '#fillmismatch'
openFile = Dataset(dap_url,'r')
for ii in range(len(variables)):
dum = openFile.variables[variables[ii].name]
variables[ii].data = np.append(variables[ii].data, dum[:].data)
tmp = variables[0].data/60/60/24
time_converted = pd.to_datetime(tmp, unit='D', origin=pd.Timestamp('1900-01-01'))
return variables, time_converted
class var(object):
def __init__(self):
"""A Class that generically holds data with a variable name
and the units as attributes"""
self.name = ''
self.data = np.array([])
self.units = ''
def __repr__(self):
return_str = "name: " + self.name + '\n'
return_str += "units: " + self.units + '\n'
return_str += "data: size: " + str(self.data.shape)
return return_str
class structtype(object):
def __init__(self):
""" A class that imitates a Matlab structure type
"""
self._data = []
def __getitem__(self, index):
"""implement index behavior in the struct"""
if index == len(self._data):
self._data.append(var())
return self._data[index]
def __len__(self):
return len(self._data)
def M2M_URLs(platform_name,node,instrument_class,method):
var_list = structtype()
#MOPAK
if platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#METBK
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
#FLORT
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
#FDCHP
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/telemetered/fdchp_a_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#DOSTA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
#ADCP
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
#ZPLSC
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#WAVSS
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
#VELPT
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
#PCO2W
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
#PHSEN
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
#SPKIR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
#PRESF
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
#CTDBP
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#VEL3D
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
#VEL3DK
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/03-CTDPFK000/telemetered/ctdpf_ckl_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#PCO2A
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#PARAD
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/05-PARADK000/telemetered/parad_k__stc_imodem_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
#OPTAA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/01-OPTAAC000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#NUTNR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
##
#MOPAK
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSPM/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#METBK
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
#FLORT
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
#FDCHP
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/recovered_host/fdchp_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#DOSTA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
#ADCP
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
#WAVSS
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
#VELPT
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
#uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
#PCO2W
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
#PHSEN
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
#SPKIR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
#PRESF
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
#CTDBP
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#VEL3D
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
#PCO2A
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#OPTAA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/01-OPTAAC000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#NUTNR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/03-CTDPFK000/recovered_wfp/ctdpf_ckl_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/SBD17/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/01-VEL3DK000/recovered_wfp/vel3d_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/05-PARADK000/recovered_wfp/parad_k__stc_imodem_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/recovered_inst/fdchp_a_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/recovered_inst/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/recovered_inst/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/04-FLORTK000/recovered_wfp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/02-DOFSTK000/recovered_wfp/dofst_k_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/recovered_inst/adcpt_m_instrument_log9_recovered'
var_list[0].name = 'time'
var_list[1].name = 'significant_wave_height'
var_list[2].name = 'peak_wave_period'
var_list[3].name = 'peak_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'seconds'
var_list[3].units = 'degrees'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/recovered_inst/adcpt_m_instrument_log9_recovered'
var_list[0].name = 'time'
var_list[1].name = 'significant_wave_height'
var_list[2].name = 'peak_wave_period'
var_list[3].name = 'peak_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'seconds'
var_list[3].units = 'degrees'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'CTD' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/06-CTDBPN106/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_no_seawater_pressure'
var_list[5].name = 'ctdbp_no_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'CTD' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/06-CTDBPO108/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_no_seawater_pressure'
var_list[5].name = 'ctdbp_no_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'DOSTA' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/06-CTDBPN106/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'DOSTA' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/06-CTDBPO108/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'PHSEN' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/10-PHSEND103/streamed/phsen_data_record'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'PHSEN' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/10-PHSEND107/streamed/phsen_data_record'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'PCO2W' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/09-PCO2WB103/streamed/pco2w_b_sami_data_record'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'PCO2W' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/09-PCO2WB104/streamed/pco2w_b_sami_data_record'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'ADCP' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/05-ADCPTB104/streamed/adcp_velocity_beam'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'ADCP' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/05-ADCPSI103/streamed/adcp_velocity_beam'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'VEL3D' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/07-VEL3DC108/streamed/vel3d_cd_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'VEL3D' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/07-VEL3DC107/streamed/vel3d_cd_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'OPTAA' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/08-OPTAAD106/streamed/optaa_sample'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'OPTAA' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/08-OPTAAC104/streamed/optaa_sample'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#CSPP Data below
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/08-FLORTJ000/telemetered/flort_dj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/08-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/08-FLORTJ000/telemetered/flort_dj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/08-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/02-DOSTAJ000/telemetered/dosta_abcdjm_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/02-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/02-DOSTAJ000/telemetered/dosta_abcdjm_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/02-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/09-CTDPFJ000/telemetered/ctdpf_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/09-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = | np.array([]) | numpy.array |
#!/usr/bin/env python3
import random
import numpy as np
import matplotlib.pyplot as plt
from copy import deepcopy
from IPython import embed
class Robot:
def __init__(self, length=20.0):
"""
Creates robot and initializes location/orientation to 0, 0, 0.
"""
self.x = 0.0
self.y = 0.0
self.orientation = 0.0
self.length = length
self.steering_noise = 0.0
self.distance_noise = 0.0
self.steering_drift = 0.0
def set(self, x, y, orientation):
"""
Sets a robot coordinate.
"""
self.x = x
self.y = y
self.orientation = orientation % (2.0 * np.pi)
def set_noise(self, steering_noise, distance_noise):
"""
Sets the noise parameters.
"""
# makes it possible to change the noise parameters
# this is often useful in particle filters
self.steering_noise = steering_noise
self.distance_noise = distance_noise
def set_steering_drift(self, drift):
"""
Sets the systematical steering drift parameter
"""
self.steering_drift = drift
def move(self, steering, distance, tolerance=0.001, max_steering_angle=np.pi / 4.0):
"""
steering = front wheel steering angle, limited by max_steering_angle
distance = total distance driven, most be non-negative
"""
if steering > max_steering_angle:
steering = max_steering_angle
if steering < -max_steering_angle:
steering = -max_steering_angle
if distance < 0.0:
distance = 0.0
# apply noise
steering2 = random.gauss(steering, self.steering_noise)
distance2 = random.gauss(distance, self.distance_noise)
# apply steering drift
steering2 += self.steering_drift
# Execute motion
turn = np.tan(steering2) * distance2 / self.length
if abs(turn) < tolerance:
# approximate by straight line motion
self.x += distance2 * np.cos(self.orientation)
self.y += distance2 * | np.sin(self.orientation) | numpy.sin |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Trajectory model based on Equations of Motion Karlgaard
Created on Mon Nov 28 15:50:50 2016
@author: tr1010 (<NAME>)
"""
import numpy as np
import atmospheres as atmo
import aerodynamics as aero
#from pprint import pprint
def traj_uvw(x, t, earth, mass, areas, normals, centroids, I, scLS, aero_params):
"""
traj_uvw calculates the time derivative of the system state vector. See
See documentation for a full description of the state vector and the
trajectory equations which are being solved, as well as the different frames
of reference used.
Inputs:
x: state vector. Contains:
r, phi, theta, u, v, w, e[0], e[1], e[2], e[3], angvel[0],
angvel[1], angvel[2] = x
t: time variable (necessary for scipy.odeint)
earth: python tuple of earth parameters: mu, RE, J2, omega
mass: mass of spacecraft
areas: n-element array of the spacecraft surface areas
normals: 3xn element array of the outward-pointing unit normal vectors
centroids: n-element array of the centroids of the spacecraft surfaces
I: 3x3 spacecraft inertia tensor
scLS: length-scale associated with spacecraft. By default, it is the
longest of the spacecraft's three dimensions
aero_params: python tuple describing a number of parameters for the
aerodynamics solver in the following order:
KnFM, KnCont, a1, a2, SigN, SigT = aero_params
Outputs:
dxdt: Time derivative of the state vector
"""
# Unpack
e = np.zeros(4)
angvel = | np.zeros(3) | numpy.zeros |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""POS and NER Taggers.
Part of speech taggers (POS) classifies words into 17 syntactic category.
Named entity Recognition extractors (NER) Detect three types of entities: {Person, Location, Organization.}
"""
import numpy as np
from six.moves import range
from ..decorators import memoize
from ..load import load_embeddings, load_ner_model, load_pos_model, load_unified_pos_model
NER_ID_TAG = {0: u'O', 1: u'I-PER', 2: u'I-LOC', 3: u'I-ORG'}
POS_TAG_ID = {u'ADJ': 0, u'ADP': 1, u'ADV': 2, u'AUX': 3, u'CONJ': 4,
u'DET': 5, u'INTJ': 6, u'NOUN': 7, u'NUM': 8, u'PART': 9,
u'PRON': 10, u'PROPN': 11, u'PUNCT': 12, u'SCONJ': 13,
u'SYM': 14, u'VERB': 15, u'X': 16}
POS_ID_TAG = {v:k for k,v in POS_TAG_ID.items()}
class TaggerBase(object):
"""Tagger base class that defines the interface. """
PAD = u'<PAD>'
START = u'<S>'
END = u'</S>'
UNK = u'<UNK>'
def __init__(self, lang='en'):
"""
Args:
lang: language code to decide which chunker to use.
"""
self.lang = lang
self.predictor = self._load_network()
self.ID_TAG = {}
self.add_bias = True
self.context = 2
self.transfer = lambda _:_
@staticmethod
def ngrams(sequence, n, transfer=None):
ngrams_ = []
part1 = (n-1) * [TaggerBase.PAD] + [TaggerBase.START]
if transfer is not None:
part2 = [transfer(_) for _ in sequence]
else:
part2 = sequence
part3 = [TaggerBase.END] + (n-1) * [TaggerBase.PAD]
seq = part1 + part2 + part3
for i in range(n, n+len(sequence)):
yield seq[i-n: i+n+1]
def _load_network(self):
raise NotImplementedError()
def annotate(self, sent):
"""Annotate a squence of words with entity tags.
Args:
sent: sequence of strings/words.
"""
preds = []
words = []
for word, fv in self.sent2examples(sent):
probs = self.predictor(fv)
tags = probs.argsort()
tag = self.ID_TAG[tags[-1]]
words.append(word)
preds.append(tag)
# fix_chunks(preds)
annotations = zip(words, preds)
return annotations
def sent2examples(self, sent):
""" Convert ngrams into feature vectors."""
# TODO(rmyeid): use expanders.
words = [w if w in self.embeddings else TaggerBase.UNK for w in sent]
ngrams = TaggerBase.ngrams(words, self.context, self.transfer)
fvs = []
for word, ngram in zip(sent, ngrams):
fv = np.array([self.embeddings.get(w, self.embeddings.zero_vector()) for w in ngram]).flatten()
if self.add_bias:
fv = np.hstack((fv, np.array(1)))
yield word, fv
class NEChunker(TaggerBase):
"""Named entity extractor."""
def __init__(self, lang='en'):
"""
Args:
lang: language code to decide which chunker to use.
"""
super(NEChunker, self).__init__(lang=lang)
self.ID_TAG = NER_ID_TAG
def _load_network(self):
""" Building the predictor out of the model."""
self.embeddings = load_embeddings(self.lang, type='cw', normalize=True)
self.model = load_ner_model(lang=self.lang, version=2)
first_layer, second_layer = self.model
def predict_proba(input_):
hidden = np.tanh( | np.dot(first_layer, input_) | numpy.dot |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.