prompt
stringlengths 15
655k
| completion
stringlengths 3
32.4k
| api
stringlengths 8
52
|
---|---|---|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import itertools
import numpy as np
import paddle
import paddle.fluid as fluid
import paddle.fluid.layers as layers
import paddle.fluid.core as core
class TestQrAPI(unittest.TestCase):
def test_dygraph(self):
paddle.disable_static()
def run_qr_dygraph(shape, mode, dtype):
if dtype == "float32":
np_dtype = np.float32
elif dtype == "float64":
np_dtype = np.float64
a = np.random.rand(*shape).astype(np_dtype)
m = a.shape[-2]
n = a.shape[-1]
min_mn = min(m, n)
if mode == "reduced" or mode == "r":
k = min_mn
else:
k = m
np_q_shape = list(a.shape[:-2])
np_q_shape.extend([m, k])
np_r_shape = list(a.shape[:-2])
np_r_shape.extend([k, n])
np_q =
|
np.zeros(np_q_shape)
|
numpy.zeros
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
r"""Provide the CoM linear and angular momentum task.
The centroidal momentum task tries to minimize the difference between the desired and current centroidal moment
given by:
.. math:: ||A_G \dot{q} - h_{G,d}||^2
where :math:`A_G \in \mathbb{R}^{6 \times N}` is the centroidal momentum matrix (CMM, see below for description),
:math:`\dot{q}` are the joint velocities being optimized, and :math:`h_{G,d}` is the desired centroidal momentum.
This is equivalent to the QP objective function :math:`||Ax - b||^2`, by setting :math:`A=A_G`, :math:`x=\dot{q}`,
and :math:`b = h_{G,d}`.
The centroidal momentum (which is the sum of all body spatial momenta computed wrt the CoM) is given by:
.. math:: h_G = A_G \dot{q}
where :math:`h_G = [k_G^\top, l_G^\top]^\top \in \mathbb{R}^6` is the centroidal momentum (the subscript :math:`G`
denotes the CoM) with :math:`k_G \in \mathbb{R}^3` being the angular momentum and :math:`l_G \in \mathbb{R}^3` the
linear part, :math:`\dot{q}` are the joint velocities, and :math:`A_G \in \mathbb{R}^{6 \times N}` (with :math:`N`
is the number of DoFs) is the centroidal momentum matrix (CMM).
"The CMM is computed from the joint space inertia matrix :math:`H(q)`, given by:
.. math:: A_G = ^1X_G^\top S_1 H(q) = ^1X_G^\top H_1(q)
where :math:`^1X_G^\top \in \mathbb{R}^{6 \times 6}` is the spatial transformation matrix that transfers spatial
momentum from the floating base (Body 1) to the CoM (G), :math:`H(q)` is the full joint space inertia matrix,
:math:`H_1 = S_1 H` is the floating base (Body 1) inertia matrix selected using the selector matrix
:math:`S_1 = [1_{6 \times 6}, 0_{6 \times (N-6)}}`.
The spatial transformation matrix is given by:
.. math::
^1X_G^\top = \left[ \begin{array}{cc}
^GR_1 & ^GR_1 S(^1p_G)^\top \\
0 & ^GR_1
\\end{array} \right]
where :math:`^GR_1` is the rotation matrix of :math:`G` wrt the floating base (Body 1),
:math:`^1p_G = ^1R_0 (^0p_G - ^0p_1)` is the position vector from the floating base (Body 1) origin to the CoM
expressed in the floating base (Body 1) frame, :math:`S(\cdot)` provides the skew symmetric cross product matrix
such that :math:`S(p)v = p \cross v`. Note that the orientation of Frame :math:`G` (CoM) could be selected to be
parallel to the ground inertial (i.e. world) frame then the rotation matrix :math:`^GR_1 = ^0R_1`." [2]
The implementation of this class is inspired by [1, 2] (where [1] is licensed under the LGPLv2).
References:
- [1] "OpenSoT: A whole-body control library for the compliant humanoid robot COMAN", Rocchi et al., 2015
- [2] "Motion Planning and Control of Dynamic Humanoid Locomotion" (PhD thesis), Xin, 2018
"""
import numpy as np
from pyrobolearn.priorities.tasks import JointVelocityTask
__author__ = "<NAME>"
__copyright__ = "Copyright 2019, PyRoboLearn"
__credits__ = ["<NAME> (insight)", "<NAME> (Python + doc)"]
__license__ = "GNU GPLv3"
__version__ = "1.0.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
class CentroidalMomentumTask(JointVelocityTask):
r"""Centroidal Momentum Task
The centroidal momentum task tries to minimize the difference between the desired and current centroidal moment
given by:
.. math:: ||A_G \dot{q} - h_{G,d}||^2
where :math:`A_G \in \mathbb{R}^{6 \times N}` is the centroidal momentum matrix (CMM, see below for description),
:math:`\dot{q}` are the joint velocities being optimized, and :math:`h_{G,d}` is the desired centroidal momentum.
This is equivalent to the QP objective function :math:`||Ax - b||^2`, by setting :math:`A=A_G`, :math:`x=\dot{q}`,
and :math:`b = h_{G,d}`.
The centroidal momentum (which is the sum of all body spatial momenta computed wrt the CoM) is given by:
.. math:: h_G = A_G \dot{q}
where :math:`h_G = [k_G^\top, l_G^\top]^\top \in \mathbb{R}^6` is the centroidal momentum (the subscript :math:`G`
denotes the CoM) with :math:`k_G \in \mathbb{R}^3` being the angular momentum and :math:`l_G \in \mathbb{R}^3` the
linear part, :math:`\dot{q}` are the joint velocities, and :math:`A_G \in \mathbb{R}^{6 \times N}` (with :math:`N`
is the number of DoFs) is the centroidal momentum matrix (CMM).
"The CMM is computed from the joint space inertia matrix :math:`H(q)`, given by:
.. math:: A_G = ^1X_G^\top S_1 H(q) = ^1X_G^\top H_1(q)
where :math:`^1X_G^\top \in \mathbb{R}^{6 \times 6}` is the spatial transformation matrix that transfers spatial
momentum from the floating base (Body 1) to the CoM (G), :math:`H(q)` is the full joint space inertia matrix,
:math:`H_1 = S_1 H` is the floating base (Body 1) inertia matrix selected using the selector matrix
:math:`S_1 = [1_{6 \times 6}, 0_{6 \times (N-6)}}`.
The spatial transformation matrix is given by:
.. math::
^1X_G^\top = \left[ \begin{array}{cc}
^GR_1 & ^GR_1 S(^1p_G)^\top \\
0 & ^GR_1
\\end{array} \right]
where :math:`^GR_1` is the rotation matrix of :math:`G` wrt the floating base (Body 1),
:math:`^1p_G = ^1R_0 (^0p_G - ^0p_1)` is the position vector from the floating base (Body 1) origin to the CoM
expressed in the floating base (Body 1) frame, :math:`S(\cdot)` provides the skew symmetric cross product matrix
such that :math:`S(p)v = p \cross v`. Note that the orientation of Frame :math:`G` (CoM) could be selected to be
parallel to the ground inertial (i.e. world) frame then the rotation matrix :math:`^GR_1 = ^0R_1`." [3]
The implementation of this class is inspired by [3, 4] (where [4] is licensed under the LGPLv2).
References:
- [1] "Centroidal momentum matrix of a humanoid robot: structure and properties", Orin et al., 2008
- [2] "Centroidal dynamics of a humanoid robot", Orin et al., 2013
- [3] "Motion Planning and Control of Dynamic Humanoid Locomotion" (PhD thesis), Xin, 2018
- [4] "OpenSoT: A whole-body control library for the compliant humanoid robot COMAN", Rocchi et al., 2015
"""
def __init__(self, model, desired_angular_momentum=None, desired_linear_momentum=None, weight=1., constraints=[]):
"""
Initialize the task.
Args:
model (ModelInterface): model interface.
desired_angular_momentum (np.array[float[3]], None): desired centroidal angular momentum. If None, it
will not be considered. However, if the next parameter :attr:`desired_linear_momentum` is also None,
then this argument will be set to zero.
desired_linear_momentum (np.array[float[3]], None): desired centroidal linear momentum. If None, it
will not be considered. However, if the previous parameter :attr:`desired_angular_momentum` was also set
to None, then this argument will be set to zero.
weight (float, np.array[float[6,6]], np.array[float[3,3]]): weight scalar or matrix associated to the task.
constraints (list[Constraint]): list of constraints associated with the task.
"""
super(CentroidalMomentumTask, self).__init__(model=model, weight=weight, constraints=constraints)
# define desired reference
self.desired_angular_momentum = desired_angular_momentum
self.desired_linear_momentum = desired_linear_momentum
# first update
self.update()
##############
# Properties #
##############
@property
def desired_angular_momentum(self):
"""Get the desired centroidal angular momentum."""
return self._des_k
@desired_angular_momentum.setter
def desired_angular_momentum(self, k_d):
"""Set the desired centroidal angular momentum."""
if k_d is not None:
if not isinstance(k_d, (np.ndarray, list, tuple)):
raise TypeError("Expecting the given desired centroidal angular momentum to be an instance of "
"np.array, instead got: {}".format(type(k_d)))
k_d = np.asarray(k_d)
if len(k_d) != 3:
raise ValueError("Expecting the length of the given desired angular centroidal momentum to be of "
"length 3, instead got: {}".format(len(k_d)))
self._des_k = k_d
@property
def desired_linear_momentum(self):
"""Get the desired centroidal linear momentum."""
return self._des_l
@desired_linear_momentum.setter
def desired_linear_momentum(self, l_d):
"""Set the desired centroidal linear momentum."""
if l_d is not None:
if not isinstance(l_d, (np.ndarray, list, tuple)):
raise TypeError("Expecting the given desired centroidal linear momentum to be an instance of "
"np.array, instead got: {}".format(type(l_d)))
l_d =
|
np.asarray(l_d)
|
numpy.asarray
|
""" Classes and functions for fitting tensors without free water
contamination """
from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
import scipy.optimize as opt
from dipy.reconst.base import ReconstModel
from dipy.reconst.dti import (TensorFit, design_matrix, decompose_tensor,
_decompose_tensor_nan, from_lower_triangular,
lower_triangular)
from dipy.reconst.dki import _positive_evals
from dipy.reconst.vec_val_sum import vec_val_vect
from dipy.core.ndindex import ndindex
from dipy.reconst.multi_voxel import multi_voxel_fit
def fwdti_prediction(params, gtab, S0=1, Diso=3.0e-3):
r""" Signal prediction given the free water DTI model parameters.
Parameters
----------
params : (..., 13) ndarray
Model parameters. The last dimension should have the 12 tensor
parameters (3 eigenvalues, followed by the 3 corresponding
eigenvectors) and the volume fraction of the free water compartment.
gtab : a GradientTable class instance
The gradient table for this prediction
S0 : float or ndarray
The non diffusion-weighted signal in every voxel, or across all
voxels. Default: 1
Diso : float, optional
Value of the free water isotropic diffusion. Default is set to 3e-3
$mm^{2}.s^{-1}$. Please adjust this value if you are assuming different
units of diffusion.
Returns
--------
S : (..., N) ndarray
Simulated signal based on the free water DTI model
Notes
-----
The predicted signal is given by:
$S(\theta, b) = S_0 * [(1-f) * e^{-b ADC} + f * e^{-b D_{iso}]$, where
$ADC = \theta Q \theta^T$, $\theta$ is a unit vector pointing at any
direction on the sphere for which a signal is to be predicted, $b$ is the b
value provided in the GradientTable input for that direction, $Q$ is the
quadratic form of the tensor determined by the input parameters, $f$ is the
free water diffusion compartment, $D_{iso}$ is the free water diffusivity
which is equal to $3 * 10^{-3} mm^{2}s^{-1} [1]_.
References
----------
.. [1] <NAME>., <NAME>., <NAME>., <NAME>., 2014.
Optimization of a free water elimination two-compartmental model
for diffusion tensor imaging. NeuroImage 103, 323-333.
doi: 10.1016/j.neuroimage.2014.09.053
"""
evals = params[..., :3]
evecs = params[..., 3:-1].reshape(params.shape[:-1] + (3, 3))
f = params[..., 12]
qform = vec_val_vect(evecs, evals)
lower_dt = lower_triangular(qform, S0)
lower_diso = lower_dt.copy()
lower_diso[..., 0] = lower_diso[..., 2] = lower_diso[..., 5] = Diso
lower_diso[..., 1] = lower_diso[..., 3] = lower_diso[..., 4] = 0
D = design_matrix(gtab)
pred_sig = np.zeros(f.shape + (gtab.bvals.shape[0],))
mask = _positive_evals(evals[..., 0], evals[..., 1], evals[..., 2])
index = ndindex(f.shape)
for v in index:
if mask[v]:
pred_sig[v] = (1 - f[v]) * np.exp(np.dot(lower_dt[v], D.T)) + \
f[v] * np.exp(np.dot(lower_diso[v], D.T))
return pred_sig
class FreeWaterTensorModel(ReconstModel):
""" Class for the Free Water Elimination Diffusion Tensor Model """
def __init__(self, gtab, fit_method="NLS", *args, **kwargs):
""" Free Water Diffusion Tensor Model [1]_.
Parameters
----------
gtab : GradientTable class instance
fit_method : str or callable
str can be one of the following:
'WLS' for weighted linear least square fit according to [1]_
:func:`fwdti.wls_iter`
'NLS' for non-linear least square fit according to [1]_
:func:`fwdti.nls_iter`
callable has to have the signature:
fit_method(design_matrix, data, *args, **kwargs)
args, kwargs : arguments and key-word arguments passed to the
fit_method. See fwdti.wls_iter, fwdti.nls_iter for
details
References
----------
.. [1] <NAME>., <NAME>., <NAME>., <NAME>., 2014.
Optimization of a free water elimination two-compartmental model
for diffusion tensor imaging. NeuroImage 103, 323-333.
doi: 10.1016/j.neuroimage.2014.09.053
"""
ReconstModel.__init__(self, gtab)
if not callable(fit_method):
try:
fit_method = common_fit_methods[fit_method]
except KeyError:
e_s = '"' + str(fit_method) + '" is not a known fit '
e_s += 'method, the fit method should either be a '
e_s += 'function or one of the common fit methods'
raise ValueError(e_s)
self.fit_method = fit_method
self.design_matrix = design_matrix(self.gtab)
self.args = args
self.kwargs = kwargs
# Check if at least three b-values are given
bmag = int(np.log10(self.gtab.bvals.max()))
b = self.gtab.bvals.copy() / (10 ** (bmag-1)) # normalize b units
b = b.round()
uniqueb = np.unique(b)
if len(uniqueb) < 3:
mes = "fwdti fit requires data for at least 2 non zero b-values"
raise ValueError(mes)
@multi_voxel_fit
def fit(self, data, mask=None):
""" Fit method of the free water elimination DTI model class
Parameters
----------
data : array
The measured signal from one voxel.
mask : array
A boolean array used to mark the coordinates in the data that
should be analyzed that has the shape data.shape[:-1]
"""
S0 = np.mean(data[self.gtab.b0s_mask])
fwdti_params = self.fit_method(self.design_matrix, data, S0,
*self.args, **self.kwargs)
return FreeWaterTensorFit(self, fwdti_params)
def predict(self, fwdti_params, S0=1):
""" Predict a signal for this TensorModel class instance given
parameters.
Parameters
----------
fwdti_params : (..., 13) ndarray
The last dimension should have 13 parameters: the 12 tensor
parameters (3 eigenvalues, followed by the 3 corresponding
eigenvectors) and the free water volume fraction.
S0 : float or ndarray
The non diffusion-weighted signal in every voxel, or across all
voxels. Default: 1
Returns
--------
S : (..., N) ndarray
Simulated signal based on the free water DTI model
"""
return fwdti_prediction(fwdti_params, self.gtab, S0=S0)
class FreeWaterTensorFit(TensorFit):
""" Class for fitting the Free Water Tensor Model """
def __init__(self, model, model_params):
""" Initialize a FreeWaterTensorFit class instance.
Since the free water tensor model is an extension of DTI, class
instance is defined as subclass of the TensorFit from dti.py
Parameters
----------
model : FreeWaterTensorModel Class instance
Class instance containing the free water tensor model for the fit
model_params : ndarray (x, y, z, 13) or (n, 13)
All parameters estimated from the free water tensor model.
Parameters are ordered as follows:
1) Three diffusion tensor's eigenvalues
2) Three lines of the eigenvector matrix each containing the
first, second and third coordinates of the eigenvector
3) The volume fraction of the free water compartment
"""
TensorFit.__init__(self, model, model_params)
@property
def f(self):
""" Returns the free water diffusion volume fraction f """
return self.model_params[..., 12]
def predict(self, gtab, S0=1):
r""" Given a free water tensor model fit, predict the signal on the
vertices of a gradient table
Parameters
----------
gtab : a GradientTable class instance
The gradient table for this prediction
S0 : float array
The mean non-diffusion weighted signal in each voxel. Default: 1 in
all voxels.
Returns
--------
S : (..., N) ndarray
Simulated signal based on the free water DTI model
"""
return fwdti_prediction(self.model_params, gtab, S0=S0)
def wls_iter(design_matrix, sig, S0, Diso=3e-3, mdreg=2.7e-3,
min_signal=1.0e-6, piterations=3):
""" Applies weighted linear least squares fit of the water free elimination
model to single voxel signals.
Parameters
----------
design_matrix : array (g, 7)
Design matrix holding the covariants used to solve for the regression
coefficients.
sig : array (g, )
Diffusion-weighted signal for a single voxel data.
S0 : float
Non diffusion weighted signal (i.e. signal for b-value=0).
Diso : float, optional
Value of the free water isotropic diffusion. Default is set to 3e-3
$mm^{2}.s^{-1}$. Please ajust this value if you are assuming different
units of diffusion.
mdreg : float, optimal
DTI's mean diffusivity regularization threshold. If standard DTI
diffusion tensor's mean diffusivity is almost near the free water
diffusion value, the diffusion signal is assumed to be only free water
diffusion (i.e. volume fraction will be set to 1 and tissue's diffusion
parameters are set to zero). Default md_reg is 2.7e-3 $mm^{2}.s^{-1}$
(corresponding to 90% of the free water diffusion value).
min_signal : float
The minimum signal value. Needs to be a strictly positive
number. Default: minimal signal in the data provided to `fit`.
piterations : inter, optional
Number of iterations used to refine the precision of f. Default is set
to 3 corresponding to a precision of 0.01.
Returns
-------
All parameters estimated from the free water tensor model.
Parameters are ordered as follows:
1) Three diffusion tensor's eigenvalues
2) Three lines of the eigenvector matrix each containing the
first, second and third coordinates of the eigenvector
3) The volume fraction of the free water compartment
"""
W = design_matrix
# DTI ordinary linear least square solution
log_s = np.log(np.maximum(sig, min_signal))
# Define weights
S2 = np.diag(sig**2)
# DTI weighted linear least square solution
WTS2 = np.dot(W.T, S2)
inv_WT_S2_W = np.linalg.pinv(np.dot(WTS2, W))
invWTS2W_WTS2 = np.dot(inv_WT_S2_W, WTS2)
params = np.dot(invWTS2W_WTS2, log_s)
md = (params[0] + params[2] + params[5]) / 3
# Process voxel if it has significant signal from tissue
if md < mdreg and np.mean(sig) > min_signal and S0 > min_signal:
# General free-water signal contribution
fwsig = np.exp(np.dot(design_matrix,
np.array([Diso, 0, Diso, 0, 0, Diso, 0])))
df = 1 # initialize precision
flow = 0 # lower f evaluated
fhig = 1 # higher f evaluated
ns = 9 # initial number of samples per iteration
for p in range(piterations):
df = df * 0.1
fs = np.linspace(flow+df, fhig-df, num=ns) # sampling f
SFW = np.array([fwsig, ]*ns) # repeat contributions for all values
FS, SI = np.meshgrid(fs, sig)
SA = SI - FS*S0*SFW.T
# SA < 0 means that the signal components from the free water
# component is larger than the total fiber. This cases are present
# for inapropriate large volume fractions (given the current S0
# value estimated). To overcome this issue negative SA are replaced
# by data's min positive signal.
SA[SA <= 0] = min_signal
y = np.log(SA / (1-FS))
all_new_params = np.dot(invWTS2W_WTS2, y)
# Select params for lower F2
SIpred = (1-FS)*np.exp(np.dot(W, all_new_params)) + FS*S0*SFW.T
F2 = np.sum(np.square(SI - SIpred), axis=0)
Mind = np.argmin(F2)
params = all_new_params[:, Mind]
f = fs[Mind] # Updated f
flow = f - df # refining precision
fhig = f + df
ns = 19
evals, evecs = decompose_tensor(from_lower_triangular(params))
fw_params = np.concatenate((evals, evecs[0], evecs[1], evecs[2],
np.array([f])), axis=0)
else:
fw_params = np.zeros(13)
if md > mdreg:
fw_params[12] = 1.0
return fw_params
def wls_fit_tensor(gtab, data, Diso=3e-3, mask=None, min_signal=1.0e-6,
piterations=3, mdreg=2.7e-3):
r""" Computes weighted least squares (WLS) fit to calculate self-diffusion
tensor using a linear regression model [1]_.
Parameters
----------
gtab : a GradientTable class instance
The gradient table containing diffusion acquisition parameters.
data : ndarray ([X, Y, Z, ...], g)
Data or response variables holding the data. Note that the last
dimension should contain the data. It makes no copies of data.
Diso : float, optional
Value of the free water isotropic diffusion. Default is set to 3e-3
$mm^{2}.s^{-1}$. Please ajust this value if you are assuming different
units of diffusion.
mask : array, optional
A boolean array used to mark the coordinates in the data that should
be analyzed that has the shape data.shape[:-1]
min_signal : float
The minimum signal value. Needs to be a strictly positive
number. Default: 1.0e-6.
piterations : inter, optional
Number of iterations used to refine the precision of f. Default is set
to 3 corresponding to a precision of 0.01.
mdreg : float, optimal
DTI's mean diffusivity regularization threshold. If standard DTI
diffusion tensor's mean diffusivity is almost near the free water
diffusion value, the diffusion signal is assumed to be only free water
diffusion (i.e. volume fraction will be set to 1 and tissue's diffusion
parameters are set to zero). Default md_reg is 2.7e-3 $mm^{2}.s^{-1}$
(corresponding to 90% of the free water diffusion value).
Returns
-------
fw_params : ndarray (x, y, z, 13)
Matrix containing in the last dimention the free water model parameters
in the following order:
1) Three diffusion tensor's eigenvalues
2) Three lines of the eigenvector matrix each containing the
first, second and third coordinates of the eigenvector
3) The volume fraction of the free water compartment.
References
----------
.. [1] <NAME>., <NAME>., <NAME>., <NAME>., 2014.
Optimization of a free water elimination two-compartmental model
for diffusion tensor imaging. NeuroImage 103, 323-333.
doi: 10.1016/j.neuroimage.2014.09.053
"""
fw_params = np.zeros(data.shape[:-1] + (13,))
W = design_matrix(gtab)
# Prepare mask
if mask is None:
mask = np.ones(data.shape[:-1], dtype=bool)
else:
if mask.shape != data.shape[:-1]:
raise ValueError("Mask is not the same shape as data.")
mask = np.array(mask, dtype=bool, copy=False)
# Prepare S0
S0 = np.mean(data[:, :, :, gtab.b0s_mask], axis=-1)
index = ndindex(mask.shape)
for v in index:
if mask[v]:
params = wls_iter(W, data[v], S0[v], min_signal=min_signal,
Diso=3e-3, piterations=piterations, mdreg=mdreg)
fw_params[v] = params
return fw_params
def _nls_err_func(tensor_elements, design_matrix, data, Diso=3e-3,
weighting=None, sigma=None, cholesky=False,
f_transform=False):
""" Error function for the non-linear least-squares fit of the tensor water
elimination model.
Parameters
----------
tensor_elements : array (8, )
The six independent elements of the diffusion tensor followed by
-log(S0) and the volume fraction f of the water elimination
compartment. Note that if cholesky is set to true, tensor elements are
assumed to be written as Cholesky's decomposition elements. If
f_transform is true, volume fraction f has to be converted to
ft = arcsin(2*f - 1) + pi/2
design_matrix : array
The design matrix
data : array
The voxel signal in all gradient directions
Diso : float, optional
Value of the free water isotropic diffusion. Default is set to 3e-3
$mm^{2}.s^{-1}$. Please ajust this value if you are assuming different
units of diffusion.
weighting : str (optional).
Whether to use the Geman McClure weighting criterion (see [1]_
for details)
sigma : float or float array (optional)
If 'sigma' weighting is used, we will weight the error function
according to the background noise estimated either in aggregate over
all directions (when a float is provided), or to an estimate of the
noise in each diffusion-weighting direction (if an array is
provided). If 'gmm', the Geman-Mclure M-estimator is used for
weighting.
cholesky : bool, optional
If true, the diffusion tensor elements were decomposed using cholesky
decomposition. See fwdti.nls_fit_tensor
Default: False
f_transform : bool, optional
If true, the water volume fraction was converted to
ft = arcsin(2*f - 1) + pi/2, insuring f estimates between 0 and 1.
See fwdti.nls_fit_tensor
Default: True
"""
tensor = np.copy(tensor_elements)
if cholesky:
tensor[:6] = cholesky_to_lower_triangular(tensor[:6])
if f_transform:
f = 0.5 * (1 + np.sin(tensor[7] - np.pi/2))
else:
f = tensor[7]
# This is the predicted signal given the params:
y = (1-f) * np.exp(np.dot(design_matrix, tensor[:7])) + \
f * np.exp(np.dot(design_matrix,
np.array([Diso, 0, Diso, 0, 0, Diso, tensor[6]])))
# Compute the residuals
residuals = data - y
# If we don't want to weight the residuals, we are basically done:
if weighting is None:
# And we return the SSE:
return residuals
se = residuals ** 2
# If the user provided a sigma (e.g 1.5267 * std(background_noise), as
# suggested by Chang et al.) we will use it:
if weighting == 'sigma':
if sigma is None:
e_s = "Must provide sigma value as input to use this weighting"
e_s += " method"
raise ValueError(e_s)
w = 1/(sigma**2)
elif weighting == 'gmm':
# We use the Geman McClure M-estimator to compute the weights on the
# residuals:
C = 1.4826 * np.median(np.abs(residuals - np.median(residuals)))
with warnings.catch_warnings():
warnings.simplefilter("ignore")
w = 1/(se + C**2)
# The weights are normalized to the mean weight (see p. 1089):
w = w/np.mean(w)
# Return the weighted residuals:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
return np.sqrt(w * se)
def _nls_jacobian_func(tensor_elements, design_matrix, data, Diso=3e-3,
weighting=None, sigma=None, cholesky=False,
f_transform=False):
"""The Jacobian is the first derivative of the least squares error
function.
Parameters
----------
tensor_elements : array (8, )
The six independent elements of the diffusion tensor followed by
-log(S0) and the volume fraction f of the water elimination
compartment. Note that if f_transform is true, volume fraction f is
converted to ft = arcsin(2*f - 1) + pi/2
design_matrix : array
The design matrix
Diso : float, optional
Value of the free water isotropic diffusion. Default is set to 3e-3
$mm^{2}.s^{-1}$. Please ajust this value if you are assuming different
units of diffusion.
f_transform : bool, optional
If true, the water volume fraction was converted to
ft = arcsin(2*f - 1) + pi/2, insuring f estimates between 0 and 1.
See fwdti.nls_fit_tensor
Default: True
"""
tensor = np.copy(tensor_elements)
if f_transform:
f = 0.5 * (1 + np.sin(tensor[7] - np.pi/2))
else:
f = tensor[7]
t = np.exp(np.dot(design_matrix, tensor[:7]))
s = np.exp(np.dot(design_matrix,
np.array([Diso, 0, Diso, 0, 0, Diso, tensor[6]])))
T = (f-1.0) * t[:, None] * design_matrix
S = np.zeros(design_matrix.shape)
S[:, 6] = f * s
if f_transform:
df = (t-s) * (0.5*np.cos(tensor[7]-np.pi/2))
else:
df = (t-s)
return
|
np.concatenate((T - S, df[:, None]), axis=1)
|
numpy.concatenate
|
import numpy as np
import scipy.sparse
from numpy import sin, cos, tan
import sys
import slepc4py
slepc4py.init(sys.argv)
from petsc4py import PETSc
from slepc4py import SLEPc
opts = PETSc.Options()
import pickle as pkl
class Model():
def __init__(self, model_variables, model_parameters, physical_constants):
self.model_variables = model_variables
self.model_parameters = model_parameters
self.physical_constants = physical_constants
for key in model_parameters:
exec('self.'+str(key)+' = model_parameters[\''+str(key)+'\']')
for key in physical_constants:
exec('self.'+str(key)+' = physical_constants[\''+str(key)+'\']')
self.calculate_nondimensional_parameters()
self.set_up_grid(self.R, self.h)
def set_up_grid(self, R, h):
"""
Creates the r and theta coordinate vectors
inputs:
R: radius of outer core in m
h: layer thickness in m
outputs: None
"""
self.R = R
self.h = h
self.Size_var = self.Nk*self.Nl
self.SizeM = len(self.model_variables)*self.Size_var
self.rmin = (R-h)/self.r_star
self.rmax = R/self.r_star
self.dr = (self.rmax-self.rmin)/(self.Nk)
ones = np.ones((self.Nk,self.Nl))
self.r = (ones.T*np.linspace(self.rmin+self.dr/2., self.rmax-self.dr/2.,num=self.Nk)).T # r value at center of each cell
self.rp = (ones.T*np.linspace(self.rmin+self.dr, self.rmax, num=self.Nk)).T # r value at plus border (top) of cell
self.rm = (ones.T*np.linspace(self.rmin, self.rmax-self.dr, num=self.Nk)).T # r value at minus border (bottom) of cell
self.dth = np.pi/(self.Nl)
self.th = ones*np.linspace(self.dth/2., np.pi-self.dth/2., num=self.Nl) # theta value at center of cell
self.thp = ones*np.linspace(self.dth, np.pi, num=self.Nl) # theta value at plus border (top) of cell
self.thm = ones*np.linspace(0,np.pi-self.dth, num=self.Nl)
return None
def calculate_nondimensional_parameters(self):
'''
Calculates the non-dimensional parameters in model from the physical
constants.
'''
self.t_star = 1/self.Omega # seconds
self.r_star = self.R # meters
self.P_star = self.rho*self.r_star**2/self.t_star**2
self.B_star = (self.eta*self.mu_0*self.rho/self.t_star)**0.5
self.u_star = self.r_star/self.t_star
self.E = self.nu*self.t_star/self.r_star**2
self.Pm = self.nu/self.eta
return None
def set_Br(self, BrT):
''' Sets the background phi magnetic field in Tesla
BrT = Br values for each cell in Tesla'''
if isinstance(BrT, (float, int)):
self.BrT = np.ones((self.Nk, self.Nl))*BrT
self.Br = self.BrT/self.B_star
elif isinstance(BrT, np.ndarray) and BrT.shape == (self.Nk, self.Nl):
self.BrT = BrT
self.Br = self.BrT/self.B_star
else:
raise TypeError("BrT must either be an int, float, or np.ndarray of correct size")
def set_Bth(self, BthT):
''' Sets the background phi magnetic field in Tesla
BthT = Bth values for each cell in Tesla'''
if isinstance(BthT, (float, int)):
self.BthT = np.ones((self.Nk, self.Nl))*BthT
self.Bth = self.BthT/self.B_star
elif isinstance(BthT, np.ndarray) and BthT.shape == (self.Nk, self.Nl) :
self.BthT = BthT
self.Bth = self.BthT/self.B_star
else:
raise TypeError("BthT must either be an int, float, or np.ndarray of correct size")
def set_Bph(self, BphT):
''' Sets the background phi magnetic field in Tesla
BphT = Bph values for each cell in Tesla'''
if isinstance(BphT, (float, int)):
self.BphT = np.ones((self.Nk, self.Nl))*BphT
self.Bph = self.BphT/self.B_star
elif isinstance(BphT, np.ndarray) and BphT.shape ==(self.Nk, self.Nl):
self.BphT = BphT
self.Bph = self.BphT/self.B_star
else:
raise TypeError("BphT must either be an int, float, or np.ndarray of correct size")
def set_Br_dipole(self, Bd, const=0):
''' Sets the background magnetic field to a dipole field with
Bd = dipole constant in Tesla '''
self.Bd = Bd
self.BrT = 2*cos(self.th)*Bd + const
self.Br = self.BrT/self.B_star
self.set_Bth(0.0)
self.set_Bph(0.0)
return None
def set_B_dipole(self, Bd, const=0):
''' Sets the background magnetic field to a dipole field with
Bd = dipole constant in Tesla '''
self.Bd = Bd
self.BrT = 2*cos(self.th)*Bd + const
self.Br = self.BrT/self.B_star
self.BthT = sin(self.th)*Bd + const
self.Bth = self.BthT/self.B_star
self.set_Bph(0.0)
return None
def set_B_abs_dipole(self, Bd, const=0):
''' Sets the background magnetic Br and Bth field to the absolute value of a
dipole field with Bd = dipole constant in Tesla '''
self.Bd = Bd
self.BrT = 2*abs(cos(self.th))*Bd + const
self.Br = self.BrT/self.B_star
self.BthT = abs(sin(self.th))*Bd + const
self.Bth = self.BthT/self.B_star
self.set_Bph(0.0)
return None
def set_B_dipole_absrsymth(self, Bd, const=0):
''' Sets the background magnetic Br and Bth field to the absolute value of a
dipole field with Bd = dipole constant in Tesla '''
self.Bd = Bd
self.BrT = 2*abs(cos(self.th))*Bd + const
self.Br = self.BrT/self.B_star
self.BthT = sin(self.th)*Bd + const
self.Bth = self.BthT/self.B_star
self.set_Bph(0.0)
return None
def set_Br_abs_dipole(self, Bd, const=0, noise=None, N=10000):
''' Sets the background Br magnetic field the absolute value of a
dipole with Bd = dipole constant in Tesla.
optionally, can offset the dipole by a constant with const or add numerical noise with noise '''
if noise:
from scipy.special import erf
def folded_mean(mu, s):
return s*(2/np.pi)**0.5*np.exp(-mu**2/(2*s**2)) - mu*erf(-mu/(2*s**2)**0.5)
self.Bd = Bd
Bdip = 2*Bd*np.abs(np.cos(self.th))
Bdip_noise = np.zeros_like(Bdip)
for (i,B) in enumerate(Bdip):
Bdip_noise[i] = folded_mean(Bdip[i], noise)
self.BrT = np.ones((self.Nk, self.Nl))*Bdip_noise
self.Br = self.BrT/self.B_star
else:
self.Bd = Bd
self.BrT = 2*abs(cos(self.th))*Bd + const
self.Br = self.BrT/self.B_star
self.set_Bth(0.0)
self.set_Bph(0.0)
return None
def set_Br_sinfunc(self, Bmin, Bmax, sin_exp=2.5):
self.BrT = ((1-sin(self.th)**sin_exp)*(Bmax-Bmin)+Bmin)
self.Br = self.BrT/self.B_star
self.set_Bth(0.0)
self.set_Bph(0.0)
return None
def set_B_by_type(self, B_type, Bd=0.0, Br=0.0, Bth=0.0, Bph=0.0, const=0.0, Bmin=0.0, Bmax=0.0, sin_exp=2.5, noise=0.0):
''' Sets the background magnetic field to given type.
B_type choices:
* dipole : Br, Bth dipole; specify scalar dipole constant Bd (T)
* abs_dipole : absolute value of dipole in Br and Bth, specify scalar Bd (T)
* dipole_Br : Br dipole, Bth=0; specify scalar dipole constant Bd (T)
* abs_dipole_Br : absolute value of dipole in Br, specify scalar Bd (T)
* constant_Br : constant Br, Bth=0; specify scalar Br (T)
* set : specify array Br, Bth, and Bph values in (T)
* dipole_absrsymth : absolute value of dipole in Br, symmetric in Bth, specify scalar Bd (T)
'''
if B_type == 'dipole':
self.set_B_dipole(Bd, const=const)
elif B_type == 'dipoleBr':
self.set_Br_dipole(Bd, const=const)
elif B_type == 'constantBr':
self.set_Br(Br*np.ones((self.Nk, self.Nl)))
self.set_Bth(0.0)
self.set_Bph(0.0)
elif B_type == 'set':
self.set_Br(Br)
self.set_Bth(Bth)
self.set_Bph(Bph)
elif B_type == 'absDipoleBr':
self.set_Br_abs_dipole(Bd, const=const, noise=noise)
elif B_type == 'absDipole':
self.set_B_abs_dipole(Bd, const=const)
elif B_type == 'dipoleAbsRSymTh':
self.set_B_dipole_absrsymth(Bd, const=const)
elif B_type == 'sinfuncBr':
self.set_Br_sinfunc(Bmin, Bmax, sin_exp=sin_exp)
else:
raise ValueError('B_type not valid')
def set_CC_skin_depth(self, period):
''' sets the magnetic skin depth for conducting core BC
inputs:
period = period of oscillation in years
returns:
delta_C = skin depth in (m)
'''
self.delta_C = np.sqrt(2*self.eta/(2*np.pi/(period*365.25*24*3600)))
self.physical_constants['delta_C'] = self.delta_C
return self.delta_C
def set_Uphi(self, Uphi):
'''Sets the background velocity field in m/s'''
if isinstance(Uphi, (float, int)):
self.Uphi = np.ones((self.Nk, self.Nl))*Uphi
elif isinstance(Uphi, np.ndarray):
self.Uphi = Uphi
else:
raise TypeError("The value passed for Uphi must be either an int, float, or np.ndarray")
self.U0 = self.Uphi*self.r_star/self.t_star
return None
def set_buoyancy(self, drho_dr):
'''Sets the buoyancy structure of the layer'''
self.omega_g = np.sqrt(-self.g/self.rho*drho_dr)
self.N = self.omega_g**2*self.t_star**2
def set_buoy_by_type(self, buoy_type, buoy_ratio):
self.omega_g0 = buoy_ratio*self.Omega
if buoy_type == 'constant':
self.omega_g = np.ones((self.Nk, self.Nl))*self.omega_g0
elif buoy_type == 'linear':
self.omega_g = (np.ones((self.Nk, self.Nl)).T*np.linspace(0, self.omega_g0, self.Nk)).T
self.N = self.omega_g**2*self.t_star**2
def get_index(self, k, l, var):
'''
Takes coordinates for a point, gives back index in matrix.
inputs:
k: k grid value from 0 to K-1
l: l grid value from 0 to L-1
var: variable name in model_variables
outputs:
index of location in matrix
'''
Nk = self.Nk
Nl = self.Nl
SizeM = self.SizeM
Size_var = self.Size_var
if (var not in self.model_variables):
raise RuntimeError('variable not in model_variables')
elif not (l >= 0 and l <= Nl-1):
raise RuntimeError('l index out of bounds')
elif not (k >= 0 and k <= Nk-1):
raise RuntimeError('k index out of bounds')
return Size_var*self.model_variables.index(var) + k + l*Nk
def get_variable(self, vector, var):
'''
Takes a flat vector and a variable name, returns the variable in a
np.matrix
inputs:
vector: flat vector array with len == SizeM
var: str of variable name in model
outputs:
variable in np.array
'''
Nk = self.Nk
Nl = self.Nl
if (var not in self.model_variables):
raise RuntimeError('variable not in model_variables')
elif len(vector) != self.SizeM:
raise RuntimeError('vector given is not correct length in this \
model')
else:
var_start = self.get_index(0, 0, var)
var_end = self.get_index(Nk-1, Nl-1, var)+1
variable = np.array(np.reshape(vector[var_start:var_end], (Nk, Nl), 'F'))
return variable
def create_vector(self, variables):
'''
Takes a set of variables and creates a vector out of
them.
inputs:
variables: list of (Nk x Nl) matrices or vectors for each model
variable
outputs:
vector of size (SizeM x 1)
'''
Nk = self.Nk
Nl = self.Nl
vector = np.array([1])
# Check Inputs:
if len(variables) != len(self.model_variables):
raise RuntimeError('Incorrect number of variable vectors passed')
for var in variables:
vector = np.vstack((vector, np.reshape(var, (Nk*Nl, 1))))
return np.array(vector[1:])
def add_gov_equation(self, name, variable):
setattr(self, name, GovEquation(self, variable))
def setup_SLEPc(self, nev=10, Target=None, Which='TARGET_MAGNITUDE'):
self.EPS = SLEPc.EPS().create()
self.EPS.setDimensions(10, PETSc.DECIDE)
self.EPS.setOperators(self.A_SLEPc, self.M_SLEPc)
self.EPS.setProblemType(SLEPc.EPS.ProblemType.PGNHEP)
self.EPS.setTarget(Target)
self.EPS.setWhichEigenpairs(eval('self.EPS.Which.'+Which))
self.EPS.setFromOptions()
self.ST = self.EPS.getST()
self.ST.setType(SLEPc.ST.Type.SINVERT)
return self.EPS
def solve_SLEPc(self, Target=None):
self.EPS.solve()
conv = self.EPS.getConverged()
vs, ws = PETSc.Mat.getVecs(self.A_SLEPc)
vals = []
vecs = []
for ind in range(conv):
vals.append(self.EPS.getEigenpair(ind, ws))
vecs.append(ws.getArray())
return vals, vecs
def save_mat_PETSc(self, filename, mat, type='Binary'):
''' Saves a Matrix in PETSc format '''
if type == 'Binary':
viewer = PETSc.Viewer().createBinary(filename, 'w')
elif type == 'ASCII':
viewer = PETSc.Viewer().createASCII(filename, 'w')
viewer(mat)
def load_mat_PETSc(self, filename, type='Binary'):
''' Loads and returns a Matrix stored in PETSc format '''
if type == 'Binary':
viewer = PETSc.Viewer().createBinary(filename, 'r')
elif type == 'ASCII':
viewer = PETSc.Viewer().createASCII(filename, 'r')
return PETSc.Mat().load(viewer)
def save_vec_PETSc(self, filename, vec, type='Binary'):
''' Saves a vector in PETSc format '''
if type == 'Binary':
viewer = PETSc.Viewer().createBinary(filename, 'w')
elif type == 'ASCII':
viewer = PETSc.Viewer().createASCII(filename, 'w')
viewer(vec)
def load_vec_PETSc(self, filename, type='Binary'):
''' Loads and returns a vector stored in PETSc format '''
if type == 'Binary':
viewer = PETSc.Viewer().createBinary(filename, 'r')
elif type == 'ASCII':
viewer = PETSc.Viewer().createASCII(filename, 'r')
return PETSc.Mat().load(viewer)
def save_model(self, filename):
''' Saves the model structure without the computed A and M matrices'''
try:
self.A
except:
pass
else:
A = self.A
del self.A
try:
self.M
except:
pass
else:
M = self.M
del self.M
pkl.dump(self, open(filename, 'wb'))
try:
A
except:
pass
else:
self.A = A
try:
M
except:
pass
else:
self.M = M
def make_d2Mat(self):
self.d2_rows = []
self.d2_cols = []
self.d2_vals = []
for var in self.model_variables:
self.add_gov_equation('d2_'+var, var)
exec('self.d2_'+var+'.add_d2_bd0(\''+var+'\','+str(self.m)+')')
exec('self.d2_rows = self.d2_'+var+'.rows')
exec('self.d2_cols = self.d2_'+var+'.cols')
exec('self.d2_vals = self.d2_'+var+'.vals')
self.d2Mat = coo_matrix((self.d2_vals, (self.d2_rows, self.d2_cols)),
shape=(self.SizeM, self.SizeM))
return self.d2Mat
def make_dthMat(self):
self.dth_rows = []
self.dth_cols = []
self.dth_vals = []
for var in self.model_variables:
self.add_gov_equation('dth_'+var, var)
exec('self.dth_'+var+'.add_dth(\''+var+'\','+str(self.m)+')')
exec('self.dth_rows += self.dth_'+var+'.rows')
exec('self.dth_cols += self.dth_'+var+'.cols')
exec('self.dth_vals += self.dth_'+var+'.vals')
self.dthMat = coo_matrix((self.dth_vals, (self.dth_rows, self.dth_cols)),
shape=(self.SizeM, self.SizeM))
return self.dthMat
def make_dphMat(self):
self.dph_rows = []
self.dph_cols = []
self.dph_vals = []
for var in self.model_variables:
self.add_gov_equation('dth_'+var, var)
exec('self.dph_'+var+'.add_dth(\''+var+'\','+str(self.m)+')')
exec('self.dph_rows += self.dth_'+var+'.rows')
exec('self.dph_cols += self.dth_'+var+'.cols')
exec('self.dph_vals += self.dth_'+var+'.vals')
self.dthMat = coo_matrix((self.dph_vals, (self.dph_rows, self.dph_cols)),
shape=(self.SizeM, self.SizeM))
return self.dphMat
def make_Bobs(self):
BrobsT = 2*np.ones((self.Nk, self.Nl))*cos(self.th)
self.Brobs = BrobsT/self.B_star
gradBrobsT = -2*np.ones((self.Nk, self.Nl))*sin(self.th)/self.R
self.gradBrobs = gradBrobsT/self.B_star*self.r_star
self.add_gov_equation('Bobs', self.model_variables[0])
self.Bobs.add_term('uth', self.gradBrobs)
self.Bobs.add_dth('uth', C= self.Brobs)
self.Bobs.add_dph('uph', C= self.Brobs)
self.BobsMat = coo_matrix((self.Bobs.vals, (self.Bobs.rows, self.Bobs.cols)),
shape=(self.SizeM, self.SizeM))
return self.BobsMat
def make_operators(self):
"""
:return:
"""
dr = self.dr
r = self.r
rp = self.rp
rm = self.rm
dth = self.dth
th = self.th
thm = self.thm
thp = self.thp
Nk = self.Nk
Nl = self.Nl
m = self.m
delta_C = self.delta_C/self.r_star
E = self.E
Pm = self.Pm
# ddr
self.ddr_kp1 = rp**2/(2*r**2*dr)
self.ddr_km1 = -rm**2/(2*r**2*dr)
self.ddr = 1/r
self.ddr_kp1_b0 = np.array(self.ddr_kp1)
self.ddr_km1_b0 = np.array(self.ddr_km1)
self.ddr_b0 = np.array(self.ddr)
self.ddr_kp1_b0[-1,:] = np.zeros(Nl)
self.ddr_b0[-1,:] = -rm[-1,:]**2/(2*r[-1,:]**2*dr)
self.ddr_km1_b0[0,:] = np.zeros(Nl)
self.ddr_b0[0,:] = rp[0,:]**2/(2*r[0,:]**2*dr)
self.ddr_kp1_bd0 = np.array(self.ddr_kp1)
self.ddr_km1_bd0 = np.array(self.ddr_km1)
self.ddr_bd0 = np.array(self.ddr)
self.ddr_kp1_bd0[-1,:] = np.zeros(Nl)
self.ddr_bd0[-1,:] = (2*rp[-1,:]**2 -rm[-1,:]**2)/(2*r[-1,:]**2*dr)
self.ddr_km1_bd0[0,:] = np.zeros(Nl)
self.ddr_bd0[0,:] = (rp[0,:]**2 - 2*rm[0,:]**2)/(2*r[0,:]**2*dr)
# ddr for Conducting core boundary conditions
self.ddr_kp1_ccb0 = np.array(self.ddr_kp1_b0)
self.ddr_kp1_ccb0[0,:] = rp[0,:]**2/(r[0,:]**2*2*dr)
self.ddr_km1_ccb0 = np.array(self.ddr_km1_b0)
self.ddr_km1_ccb0[0,:] = np.zeros(Nl)
self.ddr_ccb0 = np.array(self.ddr_b0)
self.ddr_ccb0[0,:] = rp[0,:]**2/(r[0,:]**2*2*dr)
self.ddr_u_ccb0 = -rm[0,:]**2/(r[0,:]**2*dr)
# ddth
self.ddth_lp1 = sin(thp)/(2*r*sin(th)*dth)
self.ddth_lm1 = -sin(thm)/(2*r*sin(th)*dth)
self.ddth = (sin(thp)-sin(thm))/(2*r*sin(th)*dth)
# ddph
self.ddph = 1j*m/(r*sin(th))
# drP
self.drP_kp1 = rp**2/(2*dr*r**2)
self.drP_km1 = -rm**2/(2*dr*r**2)
self.drP_lp1 = -sin(thp)/(4*r*sin(th))
self.drP_lm1 = -sin(thm)/(4*r*sin(th))
self.drP = -(sin(thp)+sin(thm))/(4*r*sin(th))
self.drP_kp1[-1,:] = np.zeros(Nl)
self.drP[-1,:] = rp[-1,:]**2/(2*dr*r[-1,:]**2) \
- (sin(thp[-1,:]) + sin(thm[-1,:]))/(4*r[-1,:]*sin(th[-1,:]))
self.drP_km1[0,:] = np.zeros(Nl)
self.drP[0,:] = -rm[0,:]**2/(2*dr*r[0,:]**2) \
- (
|
sin(thp[0,:])
|
numpy.sin
|
import sys
import time
from math import *
import matplotlib.pyplot as plt
import numpy as np
from numba import cuda, float64
import bvp_problem
import collocation_coefficients
import gauss_coefficients
import matrix_factorization_cuda
import matrix_operation_cuda
from BVPDAEReadWriteData import bvpdae_write_data
from bvp_problem import _abvp_f, _abvp_g, _abvp_r, _abvp_Df, _abvp_Dg, _abvp_Dr
import pathlib
import solve_babd_system
import mesh_strategies
# TODO: adaptive size
TPB_N = 16 # threads per block in time dimension, must be bigger than (m_max - m_min + 1)
N_shared = TPB_N + 1
TPB_m = 16 # threads per block in collocation dimension
TPB = 32 # threads per block for 1d kernel
m_collocation = 0
global_m_min = 0
global_m_max = 0
global_m_range = 0
global_m_sum = 0
global_size_y = 0
global_size_z = 0
global_size_p = 0
global_y_shared_size = 0
residual_type = 1
scale_by_time = True
scale_by_initial = False
residual_compute_type = "nodal"
save_result = False
def collocation_solver_parallel(m_init=3, mesh_strategy="adaptive"):
global global_size_y, global_size_z, global_size_p, \
m_collocation, TPB_m, global_y_shared_size, \
global_m_min, global_m_max, global_m_range, global_m_sum
# construct the bvp-dae problem
# obtain the initial input
bvp_dae = bvp_problem.BvpDae()
size_y = bvp_dae.size_y
global_size_y = size_y
size_z = bvp_dae.size_z
global_size_z = size_z
size_p = bvp_dae.size_p
global_size_p = size_p
size_inequality = bvp_dae.size_inequality
size_sv_inequality = bvp_dae.size_sv_inequality
output_file = bvp_dae.output_file
example_name = output_file.split('.')[0]
t_span0 = bvp_dae.T0
N = t_span0.shape[0]
y0 = bvp_dae.Y0
z0 = bvp_dae.Z0
p0 = bvp_dae.P0
# copy the data
para = np.copy(p0)
t_span = np.copy(t_span0)
# parameters for numerical solvers
tol = bvp_dae.tolerance
max_iter = bvp_dae.maximum_newton_iterations
max_iter = 500
max_mesh = bvp_dae.maximum_mesh_refinements
max_nodes = bvp_dae.maximum_nodes
max_nodes = 4000
min_nodes = 3
max_linesearch = 20
alpha = 0.1 # continuation parameter
if size_inequality > 0 or size_sv_inequality > 0:
alpha_m = 1e-6
else:
alpha_m = 0.1
beta = 0.9 # scale factor
# specify collocation coefficients
m_min = 3
m_max = 9
global_m_min = m_min
if residual_compute_type == "nodal":
# extra when computing the nodal residual
# with extra collocation point in each interval
global_m_max = m_max + 1
global_m_range = m_max - m_min + 2
global_y_shared_size = TPB_N * (m_max + 1)
else:
global_m_max = m_max
global_m_range = m_max - m_min + 1
global_y_shared_size = TPB_N * m_max
# m_init = 5 # number of collocation points
# m_init2 = 4
m_collocation = m_max
# minimum number of power of 2 as the TPB in m direction
pos = ceil(log(m_max, 2))
TPB_m = max(int(pow(2, pos)), 2) # at least two threads in y direction
# parameters for mesh
thres_remove = 1
thres_add = 1
rho = 2 * thres_add
m_d = 2
m_i = 2
decay_rate_thres = 0.25
M = 8 # number of blocks used to solve the BABD system in parallel
success_flag = 1
max_residual = 1 / tol
# benchmark data
initial_input_time, initial_input_count, residual_time, residual_count, \
jacobian_time, jacobian_count, reduce_jacobian_time, reduce_jacobian_count, \
recover_babd_time, recover_babd_count, segment_residual_time, segment_residual_count = benchmark_data_init()
solver_start_time = time.time()
# initial setup for the mesh of collocation points
m_N, m_accumulate = collocation_points_init(m_init, m_min, m_max, N)
# m_N, m_accumulate = collocation_points_init_2(m_init, m_init2, m_min, m_max, N)
# m_max + 1 as computing the residual needs m + 1 collocation points
m_sum, a_m, b_m, c_m = collocation_coefficients_init(m_min, m_max + 1)
mesh_before = np.zeros(N - 1)
global_m_sum = m_sum[-1]
start_time_initial_input = time.time()
# form the initial input of the solver
y, y_dot, z_tilde = form_initial_input_parallel(
size_y, size_z, size_p, m_min, m_max, m_N, m_accumulate, c_m, N, y0, z0, para)
y_tilde = np.zeros((m_accumulate[-1], size_y))
initial_input_time += (time.time() - start_time_initial_input)
initial_input_count += 1
for alpha_iter in range(max_iter):
print("Continuation iteration: {}, solving alpha = {}".format(alpha_iter, alpha))
mesh_it = 0
iter_time = 0
for iter_time in range(max_iter):
start_time_residual = time.time()
# compute the residual
norm_f_q, y_tilde, f_a, f_b, r_bc = compute_f_q_parallel(
size_y, size_z, size_p, m_min, m_max, m_N, m_accumulate, m_sum, N, a_m, b_m,
t_span, y, y_dot, z_tilde, para, alpha)
print("\tnorm: {0:.8f}".format(norm_f_q))
residual_time += (time.time() - start_time_residual)
residual_count += 1
if norm_f_q < tol:
print('\talpha = {}, solution is found. Number of nodes: {}'.format(alpha, N))
break
start_time_jacobian = time.time()
# compute each necessary element in the Jacobian matrix
J, V, D, W, B_0, B_n, V_n = construct_jacobian_parallel(
size_y, size_z, size_p, m_min, m_max, N,
m_N, m_accumulate, m_sum, a_m, b_m,
t_span, y, y_tilde, z_tilde, para, alpha)
jacobian_time += (time.time() - start_time_jacobian)
jacobian_count += 1
start_time_reduce_jacobian = time.time()
# compute each necessary element in the reduced BABD system
A, C, H, b = reduce_jacobian_parallel(
size_y, size_z, size_p, m_max, N,
m_N, m_accumulate,
W, D, J, V, f_a, f_b)
reduce_jacobian_time += (time.time() - start_time_reduce_jacobian)
reduce_jacobian_count += 1
# solve the BABD system
# perform the partition factorization on the Jacobian matrix with qr decomposition
index, R, E, J_reduced, G, d, A_tilde, C_tilde, H_tilde, b_tilde = \
solve_babd_system.partition_factorization_parallel(size_y, size_p, M, N, A, C, H, b)
# construct the partitioned Jacobian system
sol = solve_babd_system.construct_babd_mshoot(
size_y, 0, size_p, M, A_tilde, C_tilde, H_tilde, b_tilde, B_0, B_n, V_n, -r_bc)
# perform the qr decomposition to transfer the system
solve_babd_system.qr_decomposition(size_y, size_p, M + 1, sol)
# perform the backward substitution to obtain the solution to the linear system of Newton's method
solve_babd_system.backward_substitution(M + 1, sol)
# obtain the solution from the reduced BABD system
delta_s_r, delta_para = solve_babd_system.recover_babd_solution(M, size_y, 0, size_p, sol)
# get the solution to the BABD system
delta_y = solve_babd_system.partition_backward_substitution_parallel(
size_y, size_p, M, N, index, delta_s_r, delta_para, R, G, E, J_reduced, d)
start_time_recover_babd = time.time()
# recover delta_k from the reduced BABD system
delta_k, delta_y_dot, delta_z_tilde = recover_delta_k_parallel(
size_y, size_z, size_p, m_max, N, m_N, m_accumulate, delta_y, delta_para, f_a, J, V, W)
recover_babd_time += (time.time() - start_time_recover_babd)
recover_babd_count += 1
# line search
alpha0 = 1
line_search = 0
for line_search in range(max_linesearch):
y_new = y + alpha0 * delta_y
y_dot_new = y_dot + alpha0 * delta_y_dot
z_tilde_new = z_tilde + alpha0 * delta_z_tilde
para_new = para + alpha0 * delta_para
start_time_residual = time.time()
norm_f_q_new, _, _, _, _ = compute_f_q_parallel(
size_y, size_z, size_p, m_min, m_max, m_N, m_accumulate, m_sum, N, a_m, b_m,
t_span, y_new, y_dot_new, z_tilde_new, para_new, alpha)
residual_time += (time.time() - start_time_residual)
residual_count += 1
if norm_f_q_new < norm_f_q:
y = y_new
y_dot = y_dot_new
z_tilde = z_tilde_new
para = para_new
break
alpha0 /= 2
if line_search >= (max_linesearch - 1):
print("\tLine search fails.")
start_time_segment_residual = time.time()
if residual_compute_type == "nodal":
residual, residual_collocation, max_residual, max_y_error, max_z_error = \
compute_residual_nodal(size_y, size_z, size_p, m_min, m_max, N, m_N, m_accumulate,
c_m, t_span, y, y_dot, z_tilde, para, alpha, tol, m_sum, a_m, b_m, M)
else:
residual, residual_collocation, max_residual = \
compute_segment_residual_collocation_parallel(
size_y, size_z, size_p, m_min, m_max, N, m_N, m_accumulate,
c_m, t_span, y, y_dot, z_tilde, para, alpha, tol)
print('\tresidual error = {}, number of nodes = {}.'.format(max_residual, N))
z = recover_solution_parallel(size_z, m_max, N, m_N, m_accumulate, z_tilde)
if mesh_strategy == "adaptive":
N, t_span, y, z, m_N, m_accumulate = \
mesh_strategies.adaptive_remesh(
size_y, size_z, size_p, m_min, m_max, m_init, N, alpha,
m_N, m_accumulate, t_span, y, z, para, y_tilde, y_dot, z_tilde,
residual, residual_collocation,
thres_remove, thres_add, tol)
else:
N, t_span, y, z = \
mesh_strategies.normal_remesh_add_only(
size_y, size_z, N, t_span, y, z, residual, thres_remove, thres_add)
# update the collocation points distribution
m_N, m_accumulate = collocation_points_init(m_init, m_min, m_max, N)
# m_N, m_accumulate = collocation_points_init_2(m_init, m_init2, m_min, m_max, N)
segment_residual_time += (time.time() - start_time_segment_residual)
segment_residual_count += 1
mesh_it += 1
start_time_initial_input = time.time()
y, y_dot, z_tilde = form_initial_input_parallel(
size_y, size_z, size_p, m_min, m_max, m_N, m_accumulate, c_m, N, y, z, para)
initial_input_time += (time.time() - start_time_initial_input)
initial_input_count += 1
print("\tRemeshed the problem. Number of nodes = {}".format(N))
if mesh_sanity_check(N, mesh_it, max_mesh, max_nodes, min_nodes):
print("\talpha = {}, number of nodes is beyond limit after remesh!".format(
alpha))
success_flag = 0
break
# check whether the iteration exceeds the maximum number
if alpha_iter >= (max_iter - 1) or iter_time >= (max_iter - 1) \
or N > max_nodes or mesh_it > max_mesh or N < min_nodes:
print("\talpha = {}, reach the maximum iteration numbers and the problem does not converge!".format(alpha))
success_flag = 0
break
start_time_segment_residual = time.time()
if residual_compute_type == "nodal":
residual, residual_collocation, max_residual, max_y_error, max_z_error = \
compute_residual_nodal(size_y, size_z, size_p, m_min, m_max, N, m_N, m_accumulate,
c_m, t_span, y, y_dot, z_tilde, para, alpha, tol, m_sum, a_m, b_m, M)
else:
residual, residual_collocation, max_residual = \
compute_segment_residual_collocation_parallel(
size_y, size_z, size_p, m_min, m_max, N, m_N, m_accumulate,
c_m, t_span, y, y_dot, z_tilde, para, alpha, tol)
segment_residual_time += (time.time() - start_time_segment_residual)
segment_residual_count += 1
print('\tresidual error = {}, number of nodes = {}.'.format(max_residual, N))
if max_residual > 1:
z = recover_solution_parallel(size_z, m_max, N, m_N, m_accumulate, z_tilde)
if mesh_strategy == "adaptive":
N, t_span, y, z, m_N, m_accumulate = \
mesh_strategies.adaptive_remesh(
size_y, size_z, size_p, m_min, m_max, m_init, N, alpha,
m_N, m_accumulate, t_span, y, z, para, y_tilde, y_dot, z_tilde,
residual, residual_collocation,
thres_remove, thres_add, tol)
else:
N, t_span, y, z = \
mesh_strategies.normal_remesh_add_only(
size_y, size_z, N, t_span, y, z, residual, thres_remove, thres_add)
# update the collocation points distribution
m_N, m_accumulate = collocation_points_init(m_init, m_min, m_max, N)
# m_N, m_accumulate = collocation_points_init_2(m_init, m_init2, m_min, m_max, N)
mesh_it += 1
start_time_initial_input = time.time()
y, y_dot, z_tilde = form_initial_input_parallel(
size_y, size_z, size_p, m_min, m_max, m_N, m_accumulate, c_m, N, y, z, para)
initial_input_time += (time.time() - start_time_initial_input)
initial_input_count += 1
print("\tRemeshed the problem. Number of nodes = {}".format(N))
if mesh_sanity_check(N, mesh_it, max_mesh, max_nodes, min_nodes):
print("\talpha = {}, number of nodes is beyond limit after remesh!".format(
alpha))
success_flag = 0
break
else:
print("\talpha = {}, solution is found with residual error = {}. Number of nodes = {}".format(
alpha, max_residual, N))
if alpha <= alpha_m:
print("Final solution is found, alpha = {}. Number of nodes: {}".format(alpha, N))
break
alpha *= beta
total_time = time.time() - solver_start_time
print("Maximum residual: {}".format(max_residual))
print("Elapsed time: {}".format(total_time))
# recover the final solution
z = recover_solution_parallel(size_z, m_max, N, m_N, m_accumulate, z_tilde)
# write benchmark result
benchmark_dir = "./benchmark_performance/"
# create the directory
pathlib.Path(benchmark_dir).mkdir(0o755, parents=True, exist_ok=True)
benchmark_file = benchmark_dir + example_name + "_parallel_benchmark_M_{}.data".format(M)
write_benchmark_result(benchmark_file,
initial_input_time, initial_input_count,
residual_time, residual_count,
jacobian_time, jacobian_count,
reduce_jacobian_time, reduce_jacobian_count,
recover_babd_time, recover_babd_count,
segment_residual_time, segment_residual_count,
total_time)
# if alpha <= alpha_m and success_flag:
if alpha <= alpha_m:
if N >= max_nodes:
N_saved = 101
# if the number of nodes is too many, just take part of them to save
index_saved = np.linspace(0, N - 1, num=N_saved, dtype=int)
# write solution to the output file
error = bvpdae_write_data(
output_file, N_saved, size_y, size_z, size_p,
t_span[index_saved], y[index_saved, :], z[index_saved, :], para)
else:
# directly write solution to the output file
error = bvpdae_write_data(output_file, N, size_y, size_z, size_p, t_span, y, z, para)
if error != 0:
print('Write file failed.')
# record the solved example
with open("test_results_mesh_strategy_{}_residual_{}_m_init_{}.txt".format(
mesh_strategy, residual_compute_type, m_init), 'a') as f:
if alpha <= alpha_m and success_flag:
f.write("{} solved successfully. alpha = {}. Elapsed time: {}(s). "
"Total number of time nodes: {}. Total number of collocation points: {}.\n".format(
example_name, alpha, total_time, N, m_accumulate[-1]))
print("Problem solved successfully.")
else:
f.write("{} solved unsuccessfully. alpha = {}. Elapsed time: {}(s). "
"Total number of time nodes: {}. Total number of collocation points: {}.\n".format(
example_name, alpha, total_time, N, m_accumulate[-1]))
print("Problem solved unsuccessfully.")
# plot the result
plot_result(size_y, size_z, t_span, y, z)
return
def collocation_points_init(m_init, m_min, m_max, N):
"""
Initial collocation points set up for adaptive collocation methods.
:param m_init: initial global number of collocation points used
:param m_min: minimum number of collocation points allowed
:param m_max: maximum number of collocation points allowed
:param N: number of time nodes
:return:
m_N: number of collocation points in each interval,
size: (N - 1,)
m_accumulate: accumulated number of collocation points prior to each interval;
range(m_accumulate[i], m_accumulate[i + 1]) corresponds to
the collocation points in interval i;
m_accumulate[-1] holds the total number of collocation points in the mesh
size: (N, )
"""
# sanity check
if m_min > m_max:
raise TypeError("Minimum number of collocation points given is bigger than the maximum given!")
m_N = m_init * np.ones(N - 1, dtype=int) # total of N - 1 intervals
m_accumulate = np.zeros(N, dtype=int) # accumulated collocation points used
for i in range(N):
m_accumulate[i] = i * m_init
return m_N, m_accumulate
def lobatto_weights_init(m_min, m_max):
# sanity check
if m_min > m_max:
raise TypeError("Minimum number of collocation points given is bigger than the maximum given!")
w_m = np.zeros((m_max - m_min + 1, m_max)) # coefficients w for each m
for m in range(m_min, m_max + 1):
lobatto_coef = collocation_coefficients.lobatto(m)
w = lobatto_coef.w
for j in range(m):
w_m[m - m_min, j] = w[j]
return w_m
def collocation_points_init_2(m_init, m_init2, m_min, m_max, N):
"""
Initial collocation points set up for adaptive collocation methods.
:param m_init: initial global number of collocation points used
:param m_min: minimum number of collocation points allowed
:param m_max: maximum number of collocation points allowed
:param N: number of time nodes
:return:
m_N: number of collocation points in each interval,
size: (N - 1,)
m_accumulate: accumulated number of collocation points prior to each interval;
range(m_accumulate[i], m_accumulate[i + 1]) corresponds to
the collocation points in interval i;
m_accumulate[-1] holds the total number of collocation points in the mesh
size: (N, )
"""
# sanity check
if m_min > m_max:
raise TypeError("Minimum number of collocation points given is bigger than the maximum given!")
m_N = np.ones(N - 1, dtype=int) # total of N - 1 intervals
m_accumulate = np.zeros(N, dtype=int) # accumulated collocation points used
for i in range(N - 1):
if i < N // 2:
m_N[i] = m_init
m_accumulate[i + 1] = m_accumulate[i] + m_init
else:
m_N[i] = m_init2
m_accumulate[i + 1] = m_accumulate[i] + m_init2
return m_N, m_accumulate
def collocation_coefficients_init(m_min, m_max):
"""
Generate the coefficients for all the collocation points.
:param m_min: minimum number of collocation points allowed
:param m_max: maximum number of collocation points allowed
:return:
m_sum: accumulated stages for each collocation point;
range(m_sum[m - m_min], m_sum[m - m_min + 1]) corresponds to the stages of collocation point m
size: (m_max - m_min + 2, )
a_m: coefficients a in row dominant order;
a_m[m_sum[m - m_min]: m_sum[m - m_min], :] corresponds to the coefficients of collocation m
shape: (m_sum[-1], m_max)
b_m: coefficients b in row dominant order;
b_m[m_sum[m - m_min], :] corresponds to the coefficients of collocation m
shape: (m_max - m_min + 1, m_max)
c_m: coefficients c in row dominant order;
c_m[m_sum[m - m_min], :] corresponds to the coefficients of collocation m
shape: (m_max - m_min + 1, m_max)
"""
# sanity check
if m_min > m_max:
raise TypeError("Minimum number of collocation points given is bigger than the maximum given!")
m_sum = np.zeros(m_max - m_min + 2, dtype=int)
for m in range(m_min, m_max + 1):
m_sum[m - m_min + 1] = m_sum[m - m_min] + m
a_m = np.zeros((m_sum[-1], m_max)) # coefficients b for each m
b_m = np.zeros((m_max - m_min + 1, m_max)) # coefficients b for each m
c_m = np.zeros((m_max - m_min + 1, m_max)) # coefficients c for each m
for m in range(m_min, m_max + 1):
rk = collocation_coefficients.lobatto(m)
a = rk.A
b = rk.b
c = rk.c
for j in range(m):
for k in range(m):
a_m[m_sum[m - m_min] + j, k] = a[j, k]
b_m[m - m_min, j] = b[j]
c_m[m - m_min, j] = c[j]
return m_sum, a_m, b_m, c_m
def gauss_coefficients_init(m_min, m_max):
"""
Generate the coefficients for all the collocation points.
:param m_min: minimum number of collocation points allowed
:param m_max: maximum number of collocation points allowed
:return:
tau_m: time coefficients of different gauss points used in row dominant order;
tau_m[m_sum[m - m_min], :] corresponds to the coefficients of (m + 1) gauss points
shape: (m_max - m_min + 1, m_max + 1)
w_m: weight coefficients of different gauss points used in row dominant order;
w_m[m_sum[m - m_min], :] corresponds to the coefficients of (m + 1) gauss points
shape: (m_max - m_min + 1, m_max + 1)
"""
# sanity check
if m_min > m_max:
raise TypeError("Minimum number of collocation points given is bigger than the maximum given!")
tau_m = np.zeros((m_max - m_min + 1, m_max + 1)) # coefficients b for each m
w_m = np.zeros((m_max - m_min + 1, m_max + 1)) # coefficients c for each m
for m in range(m_min, m_max + 1):
gauss_coef = gauss_coefficients.gauss(m + 1)
tau = gauss_coef.t
w = gauss_coef.w
for j in range(m + 1):
tau_m[m - m_min, j] = tau[j]
w_m[m - m_min, j] = w[j]
return tau_m, w_m
# start implementations for forming initial input
def form_initial_input_parallel(size_y, size_z, size_p, m_min, m_max, m_N, m_accumulate, c_m, N, y0, z0, p0):
"""
Form the initial input for the collocation algorithm. The inputs for the solver
are usually just ODE variables, DAE variables, and parameter variables. However,
the inputs to the collocation solver should be ODE variables at each time node, the
derivatives of the ODE variables and the value of DAE variables at each collocation
point.
:param size_y: number of ODE variables of the BVP-DAE problem
:param size_z: number of DAE variables of the BVP-DAE problem
:param size_p: number of parameter variables of the BVP-DAE problem
:param m_min: minimum number of collocation points allowed
:param m_max: maximum number of collocation points allowed
:param m_N: number of collocation points in each time interval
:param m_accumulate: accumulated number of collocation points in each time interval
:param c_m: coefficients of the collocation points used;
from m_min to m_max in row dominant order;
size: (m_max - m_min + 1) * m_max, with zeros in empty spaces
:param N: number of time nodes
:param y0: values of the ODE variables in matrix form
dimension: N x size_y, where each row corresponds the values at each time node
:param z0: values of the DAE variables in matrix form
dimension: N x size_z, where each row corresponds the values at each time node
:param p0: values of the parameter variables in vector form
dimension: N x size_z, where each row corresponds the values at each time node
:return:
y0: values of the ODE variables in matrix form
dimension: N x size_y, where each row corresponds the values at each time node
y_dot: values of the derivatives of ODE variables in row dominant matrix form
dimension: (N - 1) * m x size_y, where each row corresponds the values at each time node.
The index for the jth collocation point from the ith time node is i * m + j.
z_tilde: values of the DAE variables in row dominant matrix form
dimension: (N - 1) * m x size_z, where each row corresponds the values at each time node.
The index for the jth collocation point from the ith time node is i * m + j.
"""
# warp dimension for CUDA kernel
grid_dims_1d = ((N - 1) + TPB_N - 1) // TPB_N
block_dims_1d = TPB_N
# transfer memory from CPU to GPU
d_m_N = cuda.to_device(m_N)
d_m_accumulate = cuda.to_device(m_accumulate)
d_c_m = cuda.to_device(c_m)
d_y0 = cuda.to_device(y0)
d_z0 = cuda.to_device(z0)
d_p0 = cuda.to_device(p0)
# create holder for temporary variables
d_y_temp = cuda.device_array((m_accumulate[-1], size_y), dtype=np.float64)
# create holder for output variables
d_y_dot = cuda.device_array((m_accumulate[-1], size_y), dtype=np.float64)
d_z_tilde = cuda.device_array((m_accumulate[-1], size_z), dtype=np.float64)
form_initial_input_kernel[grid_dims_1d, block_dims_1d](
size_y, size_z, size_p, m_min, m_max, N, d_m_N, d_m_accumulate, d_c_m,
d_y0, d_z0, d_p0, d_y_temp, d_y_dot, d_z_tilde)
# transfer the memory back to CPU
y_dot = d_y_dot.copy_to_host()
z_tilde = d_z_tilde.copy_to_host()
return y0, y_dot, z_tilde
@cuda.jit
def form_initial_input_kernel(
size_y, size_z, size_p, m_min, m_max, N, d_m_N, d_m_accumulate, d_c_m,
d_y0, d_z0, d_p0, d_y_temp, d_y_dot, d_z_tilde):
"""
Kernel function for forming initial input.
:param size_y:
:param size_z:
:param size_p:
:param m_min:
:param m_max:
:param N:
:param d_m_N:
:param d_m_accumulate:
:param d_c_m:
:param d_y0:
:param d_z0:
:param d_p0:
:param d_y_temp:
:param d_y_dot:
:param d_z_tilde:
:return:
"""
# Define an array in the shared memory
# The size and type of the arrays must be known at compile time
shared_d_c_m = cuda.shared.array(shape=(global_m_range, global_m_max), dtype=float64)
shared_d_y0 = cuda.shared.array(shape=(N_shared, global_size_y), dtype=float64)
shared_d_z0 = cuda.shared.array(shape=(N_shared, global_size_z), dtype=float64)
shared_d_p0 = cuda.shared.array(shape=(global_size_p, ), dtype=float64)
# cuda thread index
i = cuda.grid(1)
tx = cuda.threadIdx.x # thread index in x direction
if i >= (N - 1):
return
# only need 1 dimensional memory load here
for j in range(size_y):
shared_d_y0[tx + 1, j] = d_y0[i + 1, j]
for j in range(size_z):
shared_d_z0[tx + 1, j] = d_z0[i + 1, j]
for j in range(size_p):
shared_d_p0[j] = d_p0[j]
if tx == 0:
# let the first index to load the coefficients
# the reason is that the number of threads in the block may be less than (m_max - m_min + 1)
# so we can not let each thread to load the corresponding row, which is not scallable
for j in range(m_max - m_min + 1):
# load coefficients c if the thread index is in range
for k in range(m_min + j):
shared_d_c_m[j, k] = d_c_m[j, k]
# load the additional column in shared memory using the first thread
for j in range(size_y):
shared_d_y0[0, j] = d_y0[i, j]
for j in range(size_z):
shared_d_z0[0, j] = d_z0[i, j]
cuda.syncthreads() # finish the loading here
m = d_m_N[i]
for j in range(d_m_accumulate[i], d_m_accumulate[i + 1]):
for k in range(size_y):
d_y_temp[j, k] = (1 - shared_d_c_m[m - m_min, j - d_m_accumulate[i]]) * shared_d_y0[tx, k] + \
shared_d_c_m[m - m_min, j - d_m_accumulate[i]] * shared_d_y0[tx + 1, k]
for k in range(size_z):
d_z_tilde[j, k] = (1 - shared_d_c_m[m - m_min, j - d_m_accumulate[i]]) * shared_d_z0[tx, k] + \
shared_d_c_m[m - m_min, j - d_m_accumulate[i]] * shared_d_z0[tx + 1, k]
_abvp_f(d_y_temp[j, 0: size_y], d_z_tilde[j, 0: size_z], shared_d_p0[0: size_p],
d_y_dot[j, 0: size_y])
return
# finish implementations for forming initial input
# start implementation for computing the residual of the system
def compute_f_q_parallel(size_y, size_z, size_p, m_min, m_max, m_N, m_accumulate, m_sum, N,
a_m, b_m, t_span, y, y_dot, z_tilde, p, alpha):
"""
Compute the residual of the BVP-DAE using collocation method.
:param size_y: number of ode variables of the problem
:param size_z: number of algebraic variables
:param size_p: number of parameter variables
:param m_min: minimum number of collocation points allowed
:param m_max: maximum number of collocation points allowed
:param m_N: number of collocation points in each interval
:param m_accumulate: accumulated collocation points in each interal
:param m_sum: accumulated collocation sums in collocation coefficients
:param N: number of time nodes of the problem
:param a_m: stack coefficients a in lobbatto method in feasible range [m_min, m_max]
in row dominated order
shape: (m_sum[-1], m_max)
:param b_m: stack coefficients b in lobbatto method in feasible range [m_min, m_max]
in row dominated order
shape: (m_max - m_min + 1, m_max)
:param t_span: time span of the mesh
:param y: values of the ODE variables in matrix form
dimension: N x size_y, where each row corresponds the values at each time node
:param y_dot: values of the derivatives of ODE variables in row dominant matrix form
shape: (m_accumulate[-1], size_y), where each row corresponds the values at each time node.
The index for the jth collocation point from the ith time node is i * m + j.
:param z_tilde: values of the DAE variables in row dominant matrix form
dimension: (m_accumulate[-1]m, size_z), where each row corresponds the values at each time node.
The index for the jth collocation point from the ith time node is i * m + j.
:param p: values of the parameter variables in vector form
shape: (size_p, )
:param alpha: continuation parameter of the Newton method
:return:
norm_f_q : infinite norm of the residual
y_tilde: values of ODE variables y at each collocation point in row dominant matrix for
shape: (m_accumulate[-1], size_y), where each row corresponds the values at each collocation point
f_a : matrix of the residual f_a for each time node in row dominant matrix form
shape: ((N - 1), m_max * (size_y + size_z)), where each row corresponds the values at each time node
f_b : matrix of the residual f_b for each time node in row dominant matrix form
shape: (N - 1, size_y), where each row corresponds the values at each time node
r_bc : boundary conditions of the system in vector form
shape: (size_y + size_p, )
"""
# calculate the y values on collocation points
# combine those two kernels into one maybe?
# grid dimension of the warp of CUDA
grid_dims_1d = ((N - 1) + TPB_N - 1) // TPB_N
block_dims_1d = TPB_N
# transfer memory from CPU to GPU
d_m_N = cuda.to_device(m_N)
d_m_accumulate = cuda.to_device(m_accumulate)
d_m_sum = cuda.to_device(m_sum)
d_a_m = cuda.to_device(a_m)
d_b_m = cuda.to_device(b_m)
d_t_span = cuda.to_device(t_span)
d_y = cuda.to_device(y)
d_y_dot = cuda.to_device(y_dot)
# container to hold temporary variables
d_sum_j = cuda.device_array((m_accumulate[-1], size_y), dtype=np.float64)
# container to hold output variables y_tilde
d_y_tilde = cuda.device_array((m_accumulate[-1], size_y), dtype=np.float64)
# calculate the y variables at collocation points with the kernel function
collocation_update_kernel[grid_dims_1d, block_dims_1d](
size_y, m_min, m_max, N, d_m_N, d_m_accumulate, d_m_sum, d_a_m, d_t_span, d_y, d_y_dot, d_sum_j, d_y_tilde)
# load the memory back from GPU to CPU
y_tilde = d_y_tilde.copy_to_host()
# transfer memory from CPU to GPU
d_z_tilde = cuda.to_device(z_tilde)
d_p = cuda.to_device(p)
# container to hold temporary variables
d_sum_i = cuda.device_array((N - 1, size_y), dtype=np.float64)
# container to hold derivatives
d_r_h = cuda.device_array((m_accumulate[-1], size_y), dtype=np.float64)
d_r_g = cuda.device_array((m_accumulate[-1], size_z), dtype=np.float64)
# container to hold residuals, be careful about the dimensions
d_f_a = cuda.device_array((N - 1, m_max * (size_y + size_z)), dtype=np.float64)
d_f_b = cuda.device_array((N - 1, size_y), dtype=np.float64)
# calculate the f_a and f_b at each time node with the kernel function
compute_f_q_kernel1[grid_dims_1d, block_dims_1d](size_y, size_z, m_max, N, d_m_N, d_m_accumulate,
d_y_dot, d_y_tilde, d_z_tilde, d_p, alpha, d_r_h, d_r_g, d_f_a)
compute_f_q_kernel2[grid_dims_1d, block_dims_1d](
size_y, m_min, m_max, N, d_m_N, d_m_accumulate, d_b_m, d_t_span, d_y, d_y_dot, d_sum_i, d_f_b)
# load the memory back from GPU to CPU
f_a = d_f_a.copy_to_host()
f_b = d_f_b.copy_to_host()
# calculate the boundary conditions
r_bc = np.zeros((size_y + size_p), dtype=np.float64)
# this boundary function is currently on CPU
_abvp_r(y[0, 0: size_y], y[N - 1, 0: size_y], p, r_bc)
# return the norm of the residual directly,
# no need to form the residual as the infinity norm is used here
norm_f_a = cuda_infinity_norm(d_f_a.reshape((N - 1) * m_max * (size_y + size_z), order='C'))
norm_f_b = cuda_infinity_norm(d_f_b.reshape((N - 1) * size_y, order='C'))
norm_r = np.linalg.norm(r_bc, np.inf)
norm_f_q = max(norm_f_a, norm_f_b, norm_r)
return norm_f_q, y_tilde, f_a, f_b, r_bc
'''
Kernel function to compute each part of the residual of the system.
d_f_a: N - 1 x m * (size_y + size_z)
d_f_b: N - 1 x size_y
d_r_h: (N - 1) * m x size_y
d_r_g: (N - 1) * m x size_z
'''
@cuda.jit
def compute_f_q_kernel1(size_y, size_z, m_max, N, d_m_N, d_m_accumulate,
d_y_dot, d_y_tilde, d_z_tilde, d_p, alpha, d_r_h, d_r_g, d_f_a):
i = cuda.grid(1)
if i < (N - 1):
m = d_m_N[i]
# zero initialize the f_a
for j in range(m_max * (size_y + size_z)):
d_f_a[i, j] = 0.0
for j in range(m):
_abvp_f(d_y_tilde[d_m_accumulate[i] + j, 0: size_y], d_z_tilde[d_m_accumulate[i] + j, 0: size_z], d_p,
d_r_h[d_m_accumulate[i] + j, 0: size_y])
_abvp_g(
d_y_tilde[d_m_accumulate[i] + j, 0: size_y], d_z_tilde[d_m_accumulate[i] + j, 0: size_z], d_p, alpha,
d_r_g[d_m_accumulate[i] + j, 0: size_z])
# calculate the residual $h - y_dot$ on each collocation point
for k in range(size_y):
d_r_h[d_m_accumulate[i] + j, k] -= d_y_dot[d_m_accumulate[i] + j, k]
# copy the result to f_a of the collocation point to the corresponding position
start_index_y = j * (size_y + size_z)
start_index_z = start_index_y + size_y
# copy the residual of h and g to the corresponding positions
for k in range(size_y):
d_f_a[i, start_index_y + k] = d_r_h[d_m_accumulate[i] + j, k]
for k in range(size_z):
d_f_a[i, start_index_z + k] = d_r_g[d_m_accumulate[i] + j, k]
return
@cuda.jit
def compute_f_q_kernel2(size_y, m_min, m_max, N, d_m_N, d_m_accumulate, d_b_m, d_t_span, d_y, d_y_dot, d_sum_i, d_f_b):
shared_d_b_m = cuda.shared.array(shape=(global_m_range, global_m_max), dtype=float64)
shared_d_y = cuda.shared.array(shape=(N_shared, global_size_y), dtype=float64)
# cuda thread index
i = cuda.grid(1)
tx = cuda.threadIdx.x # thread index in x direction
if i >= (N - 1):
return
if tx == 0:
# load coefficients b using the first thread in x direction
for m in range(m_max - m_min + 1):
for j in range(m + m_min):
shared_d_b_m[m, j] = d_b_m[m, j]
# only need 1 dimensional memory load here
for j in range(size_y):
shared_d_y[tx + 1, j] = d_y[i + 1, j]
if tx == 0:
# load the additional column in shared memory using the first thread
for j in range(size_y):
shared_d_y[0, j] = d_y[i, j]
cuda.syncthreads() # finish the loading here
m = d_m_N[i]
# initialize d_sum_i as zeros
for k in range(size_y):
d_sum_i[i, k] = 0
for j in range(m):
for k in range(size_y):
d_sum_i[i, k] += shared_d_b_m[m - m_min, j] * d_y_dot[d_m_accumulate[i] + j, k]
delta_t_i = d_t_span[i + 1] - d_t_span[i]
for k in range(size_y):
d_f_b[i, k] = shared_d_y[tx + 1, k] - shared_d_y[tx, k] - delta_t_i * d_sum_i[i, k]
return
'''
Kernel method for computing the values of y variables on each collocation point.
'''
@cuda.jit
def collocation_update_kernel(size_y, m_min, m_max, N, d_m_N, d_m_accumulate, d_m_sum,
d_a_m, d_t_span, d_y, d_y_dot, d_sum_j, d_y_tilde):
# Define an array in the shared memory
# The size and type of the arrays must be known at compile time
shared_d_a = cuda.shared.array(shape=(global_m_sum, global_m_max), dtype=float64)
shared_d_y = cuda.shared.array(shape=(TPB_N, global_size_y), dtype=float64)
shared_d_y_dot = cuda.shared.array(shape=(global_y_shared_size, global_size_y), dtype=float64)
# cuda thread index
i = cuda.grid(1)
tx = cuda.threadIdx.x # thread index in x direction
bx = cuda.blockIdx.x # block index in x direction
if i >= (N - 1):
return
m = d_m_N[i]
if tx == 0:
# load coefficients a to shared memory using the first thread in x direction
for m_index in range(m_max - m_min + 1):
# load 2d coefficients a if the thread index is in range
for j in range(d_m_sum[m_index], d_m_sum[m_index + 1]):
for k in range(m_min + m_index):
shared_d_a[j, k] = d_a_m[j, k]
# load d_y to shared memory
for l in range(size_y):
# load d_y to shared memory using the first thread in y direction
shared_d_y[tx, l] = d_y[i, l]
# load d_y_dot to shared memory
for j in range(d_m_accumulate[i], d_m_accumulate[i + 1]):
for l in range(size_y):
# load d_y_dot to shared memory
# each thread loads the all corresponding collocation points
shared_d_y_dot[j - d_m_accumulate[bx * TPB_N], l] = d_y_dot[j, l]
cuda.syncthreads()
# if tx == 0 and bx == 0:
# from pdb import set_trace
# set_trace()
delta_t_i = d_t_span[i + 1] - d_t_span[i]
m_start = d_m_sum[m - m_min]
for j in range(m):
# loop j for each collocation point t_ij
# zero the initial value
for l in range(size_y):
d_sum_j[d_m_accumulate[i] + j, l] = 0
# loop k to perform the integral on all collocation points
for k in range(m):
# loop l to loop over all the y variables
for l in range(size_y):
d_sum_j[d_m_accumulate[i] + j, l] += \
shared_d_a[m_start + j, k] * shared_d_y_dot[d_m_accumulate[i] + k - d_m_accumulate[bx * TPB_N], l]
# loop l to loop over all the y variables to update the result
for l in range(size_y):
d_y_tilde[d_m_accumulate[i] + j, l] = shared_d_y[tx, l] + delta_t_i * d_sum_j[d_m_accumulate[i] + j, l]
return
# finish the implementation of computing the residual
# start the implementation of constructing the Jacobian matrix
def construct_jacobian_parallel(size_y, size_z, size_p, m_min, m_max, N, m_N, m_accumulate, m_sum,
a_m, b_m, t_span, y, y_tilde, z_tilde, p, alpha):
"""
Compute each small matrix elements in the Jacobian of the system.
:param size_y: number of ode variables of the problem
:param size_z: number of algebraic variables
:param size_p: number of parameter variables
:param m_min: minimum number of collocation points allowed
:param m_max: maximum number of collocation points allowed
:param N: number of time nodes of the problem
:param m_N: number of collocation points in each interval
:param m_accumulate: accumulated collocation points in each interval
:param m_sum: accumulated collocation sums in collocation coefficients
:param a_m: stack coefficients a in lobatto method in feasible range [m_min, m_max]
in row dominated order
shape: (m_sum[-1], m_max)
:param b_m: stack coefficients b in lobbatto method in feasible range [m_min, m_max]
in row dominated order
shape: (m_max - m_min + 1, m_max)
:param t_span: time span of the mesh
:param y: values of the ODE variables in matrix form
:param y_tilde: values of ODE variables y at each collocation point in row dominant matrix for
shape: (m_accumulate[-1], size_y), where each row corresponds the values at each collocation point
:param z_tilde: values of the DAE variables in row dominant matrix form
dimension: (m_accumulate[-1], size_z), where each row corresponds the values at each time node.
The index for the jth collocation point from the ith time node is i * m + j.
:param p: values of the parameter variables in vector form
shape: (size_p, )
:param alpha: continuation parameter of the Newton method
:return:
J: the J matrix element in the Jacobian matrix in row dominant matrix form
shape: (m_accumulate[-1] * (size_y + size_z), size_y)
each m * (size_y + size_z) x size_y corresponds to a matrix block at a time node
V: the V matrix element in the Jacobian matrix in row dominant matrix form
shape: (m_accumulate[-1] * (size_y + size_z), size_p)
each m * (size_y + size_z) x size_p corresponds to a matrix block at a time node
D: the D matrix element in the Jacobian matrix in row dominant matrix form
shape: ((N - 1) * size_y, m_max * (size_y + size_z))
each size_y x m * (size_y + size_z) corresponds to a matrix block at a time node
W: the D matrix element in the Jacobian matrix in row dominant matrix form
shape: (m_accumulate[-1] * (size_y + size_z), m_max * (size_y + size_z))
each m * (size_y + size_z) x m * (size_y + size_z) corresponds to a matrix block at a time node
B_0: derivatives of boundary conditions w.r.t. ODE variables at initial time
dimension: size_y + size_p x size_y
B_n: derivatives of boundary conditions w.r.t. ODE variables at final time
dimension: size_y + size_p x size_y
V_n: derivatives of boundary conditions w.r.t. parameter varaibels
dimension: size_y + size_p x size_p
"""
# grid dimension of the kernel of CUDA
grid_dims_1d = ((N - 1) + TPB_N - 1) // TPB_N
block_dims_1d = TPB_N
# transfer memory from CPU to GPU
d_m_N = cuda.to_device(m_N)
d_m_accumulate = cuda.to_device(m_accumulate)
d_m_sum = cuda.to_device(m_sum)
d_a_m = cuda.to_device(a_m)
d_b_m = cuda.to_device(b_m)
d_t_span = cuda.to_device(t_span)
d_p = cuda.to_device(p)
d_y_tilde = cuda.to_device(y_tilde)
d_z_tilde = cuda.to_device(z_tilde)
# container to hold the derivatives
'''
large row dominated matrix
start_row_index : i * m * size_y + j * size_y
end_row_index : start_index + size_y
d_d_h[start_row_index : end_row_index, :] can access the derivatives of the ODE
equations at the jth collocation node of the ith time span
d_d_g[start_row_index : end_row_index, :] can access the derivatives of the DAE
equations at the jth collocation node of the ith time span
'''
# no zero initialize here, initialize it in the kernel
d_d_h = cuda.device_array((size_y * m_accumulate[-1], (size_y + size_z + size_p)), dtype=np.float64)
d_d_g = cuda.device_array((size_z * m_accumulate[-1], (size_y + size_z + size_p)), dtype=np.float64)
'''
large row dominant matrix
start_index : i * m * (size_y + size_z + size_p) + j * (size_y + size_z)
end_index : start_index + (size_y + size_z + size_p)
d_J[start_index : end_index, 0 : size_y] can access the elements associated with
the jth collocation node of the ith time span
d_V[start_index : end_index, 0 : size_p] can access the elements associated with
the jth collocation node of the ith time span
'''
# holder for the output variables
d_J = cuda.device_array(((size_y + size_z) * m_accumulate[-1], size_y), dtype=np.float64)
d_V = cuda.device_array(((size_y + size_z) * m_accumulate[-1], size_p), dtype=np.float64)
d_W = cuda.device_array(((size_y + size_z) * m_accumulate[-1], m_max * (size_y + size_z)), dtype=np.float64)
# no zero initialization, initialize it in the kernel
d_D = cuda.device_array(((N - 1) * size_y, m_max * (size_y + size_z)), dtype=np.float64)
# construct the J, V, W, and D matrix on CUDA kernel
construct_jacobian_kernel[grid_dims_1d, block_dims_1d](size_y, size_z, size_p, m_min, N,
d_m_N, d_m_accumulate, d_m_sum, d_a_m, d_b_m,
d_t_span, d_y_tilde, d_z_tilde, d_p, alpha,
d_d_h, d_d_g, d_J, d_V, d_D, d_W)
# compute the derivative of the boundary conditions
d_r = np.zeros(((size_y + size_p), (size_y + size_y + size_p)), dtype=np.float64)
y_i = y[0, :] # y values at the initial time
y_f = y[N - 1, :] # y values at the final time
_abvp_Dr(y_i, y_f, p, d_r)
B_0 = d_r[0: size_y + size_p, 0: size_y] # B_1 in the paper
B_n = d_r[0: size_y + size_p, size_y: size_y + size_y] # B_N in the paper
V_n = d_r[0: size_y + size_p, size_y + size_y: size_y + size_y + size_p] # V_N in the paper
return d_J.copy_to_host(), d_V.copy_to_host(), d_D.copy_to_host(), d_W.copy_to_host(), B_0, B_n, V_n
@cuda.jit()
def construct_jacobian_kernel(size_y, size_z, size_p, m_min, N,
d_m_N, d_m_accumulate, d_m_sum, d_a_m, d_b_m,
d_t_span, d_y_tilde, d_z_tilde, d_p, alpha,
d_d_h, d_d_g, d_J, d_V, d_D, d_W):
"""
Kernel function for computing each element J, V, D, W in the Jacobian matrix
:param size_y:
:param size_z:
:param size_p:
:param m_min:
:param N:
:param d_m_N:
:param d_m_accumulate:
:param d_m_sum:
:param d_a_m:
:param d_b_m:
:param d_t_span:
:param d_y_tilde:
:param d_z_tilde:
:param d_p:
:param alpha:
:param d_d_h:
:param d_d_g:
:param d_J:
:param d_V:
:param d_D:
:param d_W:
:return:
d_d_h : size_y x m * (N - 1) * (size_y + size_z + size_p)
d_d_g : size_z x m * (N - 1) * (size_y + size_z + size_p)
d_J : m * (size_y + size_z) * (N - 1) x size_y
d_V : m * (size_y + size_z) * (N - 1) x size_p
d_D : (N - 1) * size_y x m * (size_y + size_z)
d_W : m * (size_y + size_z) * (N - 1) x m * (size_y + size_z)
"""
i = cuda.grid(1)
if i >= (N - 1):
return
m = d_m_N[i]
m_start = d_m_sum[m - m_min] # start index in a coefficients
delta_t_i = d_t_span[i + 1] - d_t_span[i]
for j in range(m):
# the block index for each derivative of d_h
start_row_index_d_h = d_m_accumulate[i] * size_y + j * size_y
end_row_index_d_h = start_row_index_d_h + size_y
# zero initialize the derivative matrix
for row in range(start_row_index_d_h, end_row_index_d_h):
for col in range(0, size_y + size_z + size_p):
d_d_h[row, col] = 0
# compute the derivatives
_abvp_Df(d_y_tilde[d_m_accumulate[i] + j, 0: size_y], d_z_tilde[d_m_accumulate[i] + j, 0: size_z], d_p,
d_d_h[start_row_index_d_h: end_row_index_d_h, 0: size_y + size_z + size_p])
# the block index for each derivative of d_g
start_row_index_d_g = d_m_accumulate[i] * size_z + j * size_z
end_row_index_d_g = start_row_index_d_g + size_z
# zero initialize the derivative matrix
for row in range(start_row_index_d_g, end_row_index_d_g):
for col in range(0, size_y + size_z + size_p):
d_d_g[row, col] = 0
# compute the derivatives
_abvp_Dg(d_y_tilde[d_m_accumulate[i] + j, 0: size_y], d_z_tilde[d_m_accumulate[i] + j, 0: size_z], d_p, alpha,
d_d_g[start_row_index_d_g: end_row_index_d_g, 0: size_y + size_z + size_p])
'''
indexing for each derivatives
h_y = d_d_h[start_row_index_d_h: end_row_index_d_h, 0: size_y])
h_z = d_d_h[start_row_index_d_h: end_row_index_d_h, size_y: size_y + size_z])
h_p = d_d_h[start_row_index_d_h: end_row_index_d_h, size_y + size_z: size_y + size_z + size_p])
g_y = d_d_g[start_row_index_d_g: end_row_index_d_g, 0: size_y]
g_z = d_d_g[start_row_index_d_g: end_row_index_d_g, size_y: size_y + size_z]
g_p = d_d_g[start_row_index_d_g: end_row_index_d_g, size_y + size_z: size_y + size_z + size_p]
'''
# construct the J and V matrix
start_index_JV_h = d_m_accumulate[i] * (size_y + size_z) + j * (size_y + size_z)
start_index_JV_g = start_index_JV_h + size_y
for row in range(size_y):
for col in range(size_y):
d_J[start_index_JV_h + row, col] = d_d_h[start_row_index_d_h + row, col]
for col in range(size_p):
d_V[start_index_JV_h + row, col] = d_d_h[start_row_index_d_h + row, size_y + size_z + col]
for row in range(size_z):
for col in range(size_y):
d_J[start_index_JV_g + row, col] = d_d_g[start_row_index_d_g + row, col]
for col in range(size_p):
d_V[start_index_JV_g + row, col] = d_d_g[start_row_index_d_g + row, size_y + size_z + col]
# construct the D matrix
start_row_index_D = i * size_y
start_col_index_D = j * (size_y + size_z)
for row in range(size_y):
for col in range(size_y + size_z):
if row == col:
d_D[start_row_index_D + row, start_col_index_D + col] = delta_t_i * d_b_m[m - m_min, j]
else:
d_D[start_row_index_D + row, start_col_index_D + col] = 0.0
# construct the W matrix
# j associates the corresponding row block
# start_row_index_W = i * m * (size_y + size_z) + j * (size_y + size_z)
# loop through the m column blocks
# each column block is size (size_y + size_z) x (size_y + size_z)
for k in range(m):
# start row index for the top block in W matrix
start_row_index_W_top = d_m_accumulate[i] * (size_y + size_z) + j * (size_y + size_z)
# start row index for the bottom block in W matrix
start_row_index_W_bot = start_row_index_W_top + size_y
# start column index for the left block in W matrix
start_col_index_W_left = k * (size_y + size_z)
# start column index for the right block in W matrix
start_col_index_W_right = start_col_index_W_left + size_y
# for the diagonal block
if k == j:
# top left block: -I + delta_t_i * a[j, k] * h_y
for ii in range(size_y):
for jj in range(size_y):
# diagonal element
if ii == jj:
d_W[start_row_index_W_top + ii, start_col_index_W_left + jj] = \
-1.0 + delta_t_i * d_a_m[m_start + j, k] * d_d_h[start_row_index_d_h + ii, jj]
else:
d_W[start_row_index_W_top + ii, start_col_index_W_left + jj] = \
delta_t_i * d_a_m[m_start + j, k] * d_d_h[start_row_index_d_h + ii, jj]
# top right block: h_z
for ii in range(size_y):
for jj in range(size_z):
d_W[start_row_index_W_top + ii, start_col_index_W_right + jj] = \
d_d_h[start_row_index_d_h + ii, size_y + jj]
# bottom left block: delta_t_i * a[j, k] * g_y
for ii in range(size_z):
for jj in range(size_y):
d_W[start_row_index_W_bot + ii, start_col_index_W_left + jj] = \
delta_t_i * d_a_m[m_start + j, k] * d_d_g[start_row_index_d_g + ii, jj]
# bottom right block: g_z
for ii in range(size_z):
for jj in range(size_z):
d_W[start_row_index_W_bot + ii, start_col_index_W_right + jj] = \
d_d_g[start_row_index_d_g + ii, size_y + jj]
else:
# top left block: delta_t_i * a[j, k] * h_y
for ii in range(size_y):
for jj in range(size_y):
d_W[start_row_index_W_top + ii, start_col_index_W_left + jj] = \
delta_t_i * d_a_m[m_start + j, k] * d_d_h[start_row_index_d_h + ii, jj]
# top right block: 0s
for ii in range(size_y):
for jj in range(size_z):
d_W[start_row_index_W_top + ii, start_col_index_W_right + jj] = 0
# bottom left block: delta_t_i * a[j, k] * g_y
for ii in range(size_z):
for jj in range(size_y):
d_W[start_row_index_W_bot + ii, start_col_index_W_left + jj] = \
delta_t_i * d_a_m[m_start + j, k] * d_d_g[start_row_index_d_g + ii, jj]
# bottom right block: 0s
for ii in range(size_z):
for jj in range(size_z):
d_W[start_row_index_W_bot + ii, start_col_index_W_right + jj] = 0
return
def reduce_jacobian_parallel(size_y, size_z, size_p, m_max, N, m_N, m_accumulate, W, D, J, V, f_a, f_b):
"""
Construct the reduced BABD system with a self-implemented LU factorization solver.
:param size_y: number of ode variables of the problem
:param size_z: number of algebraic variables
:param size_p: number of parameter variables
:param m_max: maximum number of collocation points allowed
:param N: number of time nodes of the problem
:param m_N: number of collocation points in each interval
:param m_accumulate: accumulated collocation points in each interval
:param W: the D matrix element in the Jacobian matrix in row dominant matrix form
shape: (m_accumulate[-1] * (size_y + size_z), m_max * (size_y + size_z))
each m * (size_y + size_z) x m * (size_y + size_z) corresponds to a matrix block at a time node
:param D: he D matrix element in the Jacobian matrix in row dominant matrix form
shape: ((N - 1) * size_y, m_max * (size_y + size_z))
each size_y x m * (size_y + size_z) corresponds to a matrix block at a time node
:param J: the J matrix element in the Jacobian matrix in row dominant matrix form
shape: (m_accumulate[-1] * (size_y + size_z), size_y)
each m * (size_y + size_z) x size_y corresponds to a matrix block at a time node
:param V: the V matrix element in the Jacobian matrix in row dominant matrix form
shape: (m_accumulate[-1] * (size_y + size_z), size_p)
each m * (size_y + size_z) x size_p corresponds to a matrix block at a time node
:param f_a: matrix of the residual f_a for each time node in row dominant matrix form
shape: ((N - 1), m_max * (size_y + size_z)), where each row corresponds the values at each time node
:param f_b: matrix of the residual f_b for each time node in row dominant matrix form
shape: (N - 1, size_y), where each row corresponds the values at each time node
:return:
A : the A matrix element in the reduced Jacobian matrix in a row dominant matrix form
shape: ((N - 1) * size_y, size_y)
each size_y x size_y corresponds to a matrix block at a time node
C : the C matrix element in the reduced Jacobian matrix in a row dominant matrix form
shape: ((N - 1) * size_y, size_y)
each size_y x size_y corresponds to a matrix block at a time node
H : the H matrix element in the reduced Jacobian matrix in a row dominant matrix form
shape: ((N - 1) * size_y, size_p)
each size_y x size_p corresponds to a matrix block at a time node
b : the b vector element of the residual in the reduced BABD system in a row dominant matrix form
shape: (N - 1, size_y)
each row vector with size size_y corresponds to a vector block at a time node
"""
# grid dimension of the kernel of CUDA
grid_dims_1d = ((N - 1) + TPB_N - 1) // TPB_N
block_dims_1d = TPB_N
# transfer memory from CPU to GPU
d_m_N = cuda.to_device(m_N)
d_m_accumulate = cuda.to_device(m_accumulate)
d_W = cuda.to_device(W)
d_D = cuda.to_device(D)
d_J = cuda.to_device(J)
d_V = cuda.to_device(V)
d_f_a = cuda.to_device(f_a)
d_f_b = cuda.to_device(f_b)
# holder for output variables
d_A = cuda.device_array(((N - 1) * size_y, size_y), dtype=np.float64)
d_C = cuda.device_array(((N - 1) * size_y, size_y), dtype=np.float64)
d_H = cuda.device_array(((N - 1) * size_y, size_p), dtype=np.float64)
d_b = cuda.device_array((N - 1, size_y), dtype=np.float64)
# holder for the intermediate variables in lu decomposition
d_P = cuda.device_array((m_accumulate[-1] * (size_y + size_z), m_max * (size_y + size_z)), dtype=np.float64)
d_L = cuda.device_array((m_accumulate[-1] * (size_y + size_z), m_max * (size_y + size_z)), dtype=np.float64)
d_U = cuda.device_array((m_accumulate[-1] * (size_y + size_z), m_max * (size_y + size_z)), dtype=np.float64)
d_cpy = cuda.device_array((m_accumulate[-1] * (size_y + size_z), m_max * (size_y + size_z)), dtype=np.float64)
d_c_J = cuda.device_array((m_accumulate[-1] * (size_y + size_z), size_y), dtype=np.float64)
d_y_J = cuda.device_array((m_accumulate[-1] * (size_y + size_z), size_y), dtype=np.float64)
d_x_J = cuda.device_array((m_accumulate[-1] * (size_y + size_z), size_y), dtype=np.float64)
d_D_W_J = cuda.device_array(((N - 1) * size_y, size_y), dtype=np.float64)
d_c_V = cuda.device_array((m_accumulate[-1] * (size_y + size_z), size_p), dtype=np.float64)
d_y_V = cuda.device_array((m_accumulate[-1] * (size_y + size_z), size_p), dtype=np.float64)
d_x_V = cuda.device_array((m_accumulate[-1] * (size_y + size_z), size_p), dtype=np.float64)
d_c_f_a = cuda.device_array((N - 1, m_max * (size_y + size_z)), dtype=np.float64)
d_y_f_a = cuda.device_array((N - 1, m_max * (size_y + size_z)), dtype=np.float64)
d_x_f_a = cuda.device_array((N - 1, m_max * (size_y + size_z)), dtype=np.float64)
d_D_W_f_a = cuda.device_array((N - 1, size_y), dtype=np.float64)
# machine precision
eps = sys.float_info.epsilon
# perform the jacobian reduction in parallel with the GPU kernel
reduce_jacobian_parallel_kernel[grid_dims_1d, block_dims_1d](
eps, size_y, size_z, size_p, N,
d_m_N, d_m_accumulate,
d_W, d_D, d_J, d_V, d_f_a, d_f_b,
d_P, d_L, d_U, d_cpy, d_c_J, d_y_J, d_x_J, d_D_W_J,
d_c_V, d_y_V, d_x_V, d_c_f_a, d_y_f_a, d_x_f_a, d_D_W_f_a,
d_A, d_C, d_H, d_b)
return d_A.copy_to_host(), d_C.copy_to_host(), d_H.copy_to_host(), d_b.copy_to_host()
@cuda.jit()
def reduce_jacobian_parallel_kernel(eps, size_y, size_z, size_p, N,
d_m_N, d_m_accumulate,
d_W, d_D, d_J, d_V, d_f_a, d_f_b,
d_P, d_L, d_U, d_cpy, d_c_J, d_y_J, d_x_J, d_D_W_J,
d_c_V, d_y_V, d_x_V, d_c_f_a, d_y_f_a, d_x_f_a, d_D_W_f_a,
d_A, d_C, d_H, d_b):
"""
Kernel function for computing each element A, C, H, b in the reduced Jacobian matrix.
:param eps:
:param size_y:
:param size_z:
:param size_p:
:param N:
:param d_m_N:
:param d_m_accumulate:
:param d_W:
:param d_D:
:param d_J:
:param d_V:
:param d_f_a:
:param d_f_b:
:param d_P:
:param d_L:
:param d_U:
:param d_cpy:
:param d_c_J:
:param d_y_J:
:param d_x_J:
:param d_D_W_J:
:param d_c_V:
:param d_y_V:
:param d_x_V:
:param d_c_f_a:
:param d_y_f_a:
:param d_x_f_a:
:param d_D_W_f_a:
:param d_A:
:param d_C:
:param d_H:
:param d_b:
:return:
"""
i = cuda.grid(1)
if i < (N - 1):
m = d_m_N[i]
# start row index of the W element
start_row_index_W = d_m_accumulate[i] * (size_y + size_z)
# end row index of the W element
end_row_index_W = start_row_index_W + m * (size_y + size_z)
# start row index of the D element
start_row_index_D = i * size_y
# end row index of the D element
end_row_index_D = start_row_index_D + size_y
# start row index of the J element
start_row_index_J = d_m_accumulate[i] * (size_y + size_z)
# end row index of the J element
end_row_index_J = start_row_index_J + m * (size_y + size_z)
# start row index of the V element
start_row_index_V = d_m_accumulate[i] * (size_y + size_z)
# end row index of the V element
end_row_index_V = start_row_index_V + m * (size_y + size_z)
# start row index for A, C, and H
start_row_index_ACH = i * size_y
# end row index for A, C, and H
end_row_index_ACH = start_row_index_ACH + size_y
# perform LU decomposition of matrix W and save the results
# P * W = L * U
matrix_factorization_cuda.lu(
d_W[start_row_index_W: end_row_index_W, 0: m * (size_y + size_z)],
d_cpy[start_row_index_W: end_row_index_W, 0: m * (size_y + size_z)],
d_P[start_row_index_W: end_row_index_W, 0: m * (size_y + size_z)],
d_L[start_row_index_W: end_row_index_W, 0: m * (size_y + size_z)],
d_U[start_row_index_W: end_row_index_W, 0: m * (size_y + size_z)],
eps)
# A = -I + D * W^(-1) * J
# compute W^(-1) * J = X first
# J = W * X => P * J = P * W * X = L * U * X => L^{-1} * P * J = U * X => U^{-1} * L^{-1} * P * J = X
# compute P * J first, the result of the product is saved in d_c_J
matrix_operation_cuda.mat_mul(
d_P[start_row_index_W: end_row_index_W, 0: m * (size_y + size_z)],
d_J[start_row_index_J: end_row_index_J, 0: size_y],
d_c_J[start_row_index_J: end_row_index_J, 0: size_y])
# first, forward solve the linear system L * (U * X) = (P * J), and the result is saved in d_y_J
matrix_factorization_cuda.forward_solve_mat(
d_L[start_row_index_W: end_row_index_W, 0: m * (size_y + size_z)],
d_c_J[start_row_index_J: end_row_index_J, 0: size_y],
d_y_J[start_row_index_J: end_row_index_J, 0: size_y],
eps)
# then, backward solve the linear system U * X = Y, and the result is saved in d_x_J
# X = W^(-1) * J
matrix_factorization_cuda.backward_solve_mat(
d_U[start_row_index_W: end_row_index_W, 0: m * (size_y + size_z)],
d_y_J[start_row_index_J: end_row_index_J, 0: size_y],
d_x_J[start_row_index_J: end_row_index_J, 0: size_y],
eps)
# perform D * X
matrix_operation_cuda.mat_mul(
d_D[start_row_index_D: end_row_index_D, 0: m * (size_y + size_z)],
d_x_J[start_row_index_J: end_row_index_J, 0: size_y],
d_D_W_J[start_row_index_D: end_row_index_D, 0: size_y])
# final step, A = -I + D * X
# nested for loops, row-wise first, column-wise next
for j in range(size_y):
for k in range(size_y):
if j == k:
d_A[start_row_index_ACH + j, k] = -1.0 + d_D_W_J[start_row_index_D + j, k]
else:
d_A[start_row_index_ACH + j, k] = d_D_W_J[start_row_index_D + j, k]
# H = D * W^(-1) * V
# compute W^(-1) * V = X first
# compute P * V first, the result of the product is saved in d_c_V
matrix_operation_cuda.mat_mul(
d_P[start_row_index_W: end_row_index_W, 0: m * (size_y + size_z)],
d_V[start_row_index_V: end_row_index_V, 0: size_p],
d_c_V[start_row_index_V: end_row_index_V, 0: size_p])
# first, forward solve the linear system L * (U * X) = (P * V), and the result is saved in d_y_V
matrix_factorization_cuda.forward_solve_mat(
d_L[start_row_index_W: end_row_index_W, 0: m * (size_y + size_z)],
d_c_V[start_row_index_V: end_row_index_V, 0: size_p],
d_y_V[start_row_index_V: end_row_index_V, 0: size_p],
eps)
# then, backward solve the linear system U * X = Y, and the result is saved in d_x_V
# X = W^(-1) * V
matrix_factorization_cuda.backward_solve_mat(
d_U[start_row_index_W: end_row_index_W, 0: m * (size_y + size_z)],
d_y_V[start_row_index_V: end_row_index_V, 0: size_p],
d_x_V[start_row_index_V: end_row_index_V, 0: size_p],
eps)
# final step, perform D * X, then we get the results H
matrix_operation_cuda.mat_mul(
d_D[start_row_index_D: end_row_index_D, 0: m * (size_y + size_z)],
d_x_V[start_row_index_V: end_row_index_V, 0: size_p],
d_H[start_row_index_ACH: end_row_index_ACH, 0: size_p])
# C = I
# nested for loops, row-wise first, column-wise next
for j in range(size_y):
for k in range(size_y):
if j == k:
d_C[start_row_index_ACH + j, k] = 1.0
else:
d_C[start_row_index_ACH + j, k] = 0.0
# b = -f_b - D * W^(-1) * f_a
# compute W^(-1) * f_a = X first
# compute P * f_a first, the result of the product is saved in d_c_f_a
matrix_operation_cuda.mat_vec_mul(
d_P[start_row_index_W: end_row_index_W, 0: m * (size_y + size_z)],
d_f_a[i, 0: m * (size_y + size_z)],
d_c_f_a[i, 0: m * (size_y + size_z)])
# first, forward solve the linear system L * (U * X) = (P * f_a), and the result is saved in d_y_f_a
matrix_factorization_cuda.forward_solve_vec(
d_L[start_row_index_W: end_row_index_W, 0: m * (size_y + size_z)],
d_c_f_a[i, 0: m * (size_y + size_z)],
d_y_f_a[i, 0: m * (size_y + size_z)],
eps)
# then, backward solve the linear system U * X = Y, and the result is saved in d_x_f_a
# X = W^(-1) * f_a
matrix_factorization_cuda.backward_solve_vec(
d_U[start_row_index_W: end_row_index_W, 0: m * (size_y + size_z)],
d_y_f_a[i, 0: m * (size_y + size_z)],
d_x_f_a[i, 0: m * (size_y + size_z)],
eps)
# perform D * X
matrix_operation_cuda.mat_vec_mul(
d_D[start_row_index_D: end_row_index_D, 0: m * (size_y + size_z)],
d_x_f_a[i, 0: m * (size_y + size_z)],
d_D_W_f_a[i, 0: size_y])
# final step, b = -f_b - D * W^(-1) * f_a
for j in range(size_y):
d_b[i, j] = -d_f_b[i, j] - d_D_W_f_a[i, j]
return
# finish the implementation of constructing the Jacobian matrix
# start the implementation of the parallel recovering the delta_k
def recover_delta_k_parallel(size_y, size_z, size_p, m_max, N, m_N, m_accumulate, delta_y, delta_p, f_a, J, V, W):
"""
Recover the delta_k of the search direction from the reduced BABD system.
:param size_y: number of ode variables of the problem
:param size_z: number of algebraic variables
:param size_p: number of parameter variables
:param m_max: maximum number of collocation points allowed
:param N: number of time nodes of the problem
:param m_N: number of collocation points in each interval
:param m_accumulate: accumulated collocation points in each interval
:param delta_y: search direction of the ode variables obtained from Newton's method
shape: (N, size_y)
:param delta_p: search direction of the parameter variables obtained from Newton's method
shape: (size_p, )
:param f_a: matrix of the residual f_a for each time node in row dominant matrix form
shape: ((N - 1), m_max * (size_y + size_z)), where each row corresponds the values at each time node
:param J: the J matrix element in the Jacobian matrix in row dominant matrix form
shape: (m_accumulate[-1] * (size_y + size_z), size_y)
each m * (size_y + size_z) x size_y corresponds to a matrix block at a time node
:param V: V: the V matrix element in the Jacobian matrix in row dominant matrix form
shape: (m_accumulate[-1] * (size_y + size_z), size_p)
each m * (size_y + size_z) x size_p corresponds to a matrix block at a time node
:param W: the D matrix element in the Jacobian matrix in row dominant matrix form
shape: (m_accumulate[-1] * (size_y + size_z), m_max * (size_y + size_z))
each m * (size_y + size_z) x m * (size_y + size_z) corresponds to a matrix block at a time node
:return:
delta_k: solution of the search direction of y_dot and z variables of the system recovered from the reduced BABD
system
shape: ((N - 1), m_max * (size_y + size_z))
each size (size_y + size_z) vector corresponds to the search direction at each time node
delta_y_dot: solution of the search direction of y_dot from corresponding position at delta_k
shape: (m_accumulate[-1], size_y)
each size size_y row vector corresponds to the search direction at the corresponding collocation
point. The index for the jth collocation point from the ith time node is i * x + j
delta_z_tilde: solution of the search direction of z_tilde from corresponding position at delta_k
shape: (m_accumulate[-1], size_z)
each size size_z row vector corresponds to the search direction at the corresponding collocation
point. The index for the jth collocation point from the ith time node is i * x + j
"""
# grid dimension of the kernel of CUDA
grid_dims_1d = ((N - 1) + TPB_N - 1) // TPB_N
block_dims_1d = TPB_N
# transfer memory from CPU to GPU
d_m_N = cuda.to_device(m_N)
d_m_accumulate = cuda.to_device(m_accumulate)
d_delta_y = cuda.to_device(delta_y)
d_delta_p = cuda.to_device(delta_p)
d_f_a = cuda.to_device(f_a)
d_J = cuda.to_device(J)
d_V = cuda.to_device(V)
d_W = cuda.to_device(W)
# holder for output variable
d_delta_k = cuda.device_array((N - 1, m_max * (size_y + size_z)), dtype=np.float64)
d_delta_y_dot = cuda.device_array((m_accumulate[-1], size_y), dtype=np.float64)
d_delta_z_tilde = cuda.device_array((m_accumulate[-1], size_z), dtype=np.float64)
# holder for intermediate matrix vector multiplication variables
d_J_delta_y = cuda.device_array((N - 1, m_max * (size_y + size_z)), dtype=np.float64)
d_V_delta_p = cuda.device_array((N - 1, m_max * (size_y + size_z)), dtype=np.float64)
# holder for the right hand side of the linear system to solve in BABD system
d_vec = cuda.device_array((N - 1, m_max * (size_y + size_z)), dtype=np.float64)
# holder for the intermediate variables in lu decomposition
d_P = cuda.device_array((m_accumulate[-1] * (size_y + size_z), m_max * (size_y + size_z)), dtype=np.float64)
d_L = cuda.device_array((m_accumulate[-1] * (size_y + size_z), m_max * (size_y + size_z)), dtype=np.float64)
d_U = cuda.device_array((m_accumulate[-1] * (size_y + size_z), m_max * (size_y + size_z)), dtype=np.float64)
d_cpy = cuda.device_array((m_accumulate[-1] * (size_y + size_z), m_max * (size_y + size_z)), dtype=np.float64)
d_c = cuda.device_array((N - 1, m_max * (size_y + size_z)), dtype=np.float64)
d_y = cuda.device_array((N - 1, m_max * (size_y + size_z)), dtype=np.float64)
# machine precision
eps = sys.float_info.epsilon
recover_delta_k_kernel[grid_dims_1d, block_dims_1d](
eps, size_y, size_z, size_p, N, d_m_N, d_m_accumulate,
d_delta_y, d_delta_p, d_f_a, d_J, d_V, d_W,
d_J_delta_y, d_V_delta_p, d_vec, d_P, d_L, d_U, d_cpy, d_c, d_y,
d_delta_k, d_delta_y_dot, d_delta_z_tilde)
return d_delta_k.copy_to_host(), d_delta_y_dot.copy_to_host(), d_delta_z_tilde.copy_to_host()
@cuda.jit()
def recover_delta_k_kernel(eps, size_y, size_z, size_p, N, d_m_N, d_m_accumulate,
d_delta_y, d_delta_p, d_f_a, d_J, d_V, d_W,
d_J_delta_y, d_V_delta_p, d_vec, d_P, d_L, d_U, d_cpy, d_c, d_y,
d_delta_k, d_delta_y_dot, d_delta_z_tilde):
"""
Kernel function of recovering delta_k from the reduced BABD system.
:param eps:
:param size_y:
:param size_z:
:param size_p:
:param N:
:param d_m_N:
:param d_m_accumulate:
:param d_delta_y:
:param d_delta_p:
:param d_f_a:
:param d_J:
:param d_V:
:param d_W:
:param d_J_delta_y:
:param d_V_delta_p:
:param d_vec:
:param d_P:
:param d_L:
:param d_U:
:param d_cpy:
:param d_c:
:param d_y:
:param d_delta_k:
:param d_delta_y_dot:
:param d_delta_z_tilde:
:return:
"""
i = cuda.grid(1)
if i < (N - 1):
m = d_m_N[i]
# start row index of the J element
start_row_index_JVW = d_m_accumulate[i] * (size_y + size_z)
# end row index of the J element
end_row_index_JVW = start_row_index_JVW + m * (size_y + size_z)
matrix_operation_cuda.mat_vec_mul(
d_J[start_row_index_JVW: end_row_index_JVW, 0: size_y],
d_delta_y[i, 0: size_y],
d_J_delta_y[i, 0: m * (size_y + size_z)])
matrix_operation_cuda.mat_vec_mul(
d_V[start_row_index_JVW: end_row_index_JVW, 0: size_p],
d_delta_p,
d_V_delta_p[i, 0: m * (size_y + size_z)])
for j in range(m * (size_y + size_z)):
d_vec[i, j] = -d_f_a[i, j] - d_J_delta_y[i, j] - d_V_delta_p[i, j]
matrix_factorization_cuda.lu_solve_vec(
d_W[start_row_index_JVW: end_row_index_JVW, 0: m * (size_y + size_z)],
d_cpy[start_row_index_JVW: end_row_index_JVW, 0: m * (size_y + size_z)],
d_P[start_row_index_JVW: end_row_index_JVW, 0: m * (size_y + size_z)],
d_L[start_row_index_JVW: end_row_index_JVW, 0: m * (size_y + size_z)],
d_U[start_row_index_JVW: end_row_index_JVW, 0: m * (size_y + size_z)],
d_vec[i, 0: m * (size_y + size_z)],
d_c[i, 0: m * (size_y + size_z)],
d_y[i, 0: m * (size_y + size_z)],
d_delta_k[i, 0: m * (size_y + size_z)],
eps)
for j in range(m):
start_index_y_collocation = j * (size_y + size_z)
start_index_z_collocation = start_index_y_collocation + size_y
for k in range(size_y):
d_delta_y_dot[d_m_accumulate[i] + j, k] = d_delta_k[i, start_index_y_collocation + k]
for k in range(size_z):
d_delta_z_tilde[d_m_accumulate[i] + j, k] = d_delta_k[i, start_index_z_collocation + k]
return
# start the implementation of computing segment residual on each time node
'''
Input:
size_y: number of ODE variables.
m: number of collocation points used
t: time during the time span
L: collocation weights vector
dimension: m
y_dot: value of the derivative of the ODE variables in time span [t_j, t_(j + 1)]
dimension: m x size_y
Output:
y_dot_ret: returned value of the derivative of the ODE variables at time t
dimension: size_y
'''
@cuda.jit(device=True)
def get_y_dot(size_y, m, t, L, y_dot, y_dot_ret):
# ydot = sum_{k=1,m} L_k(t) * ydot_j[k]
# zero initialization
if L.shape[0] != m:
print("Input L size is wrong! Supposed to be", m, "instead of", L.shape[0], "columns!")
raise Exception("Input L size is wrong!")
if y_dot.shape[0] != m:
print("Input y_dot size is wrong! Supposed to be", m, "instead of", y_dot.shape[0], "rows!")
raise Exception("Input y_dot size is wrong!")
if y_dot.shape[1] != size_y:
print("Input y_dot size is wrong! Supposed to be", size_y, "instead of", y_dot.shape[1], "columns!")
raise Exception("Input y_dot size is wrong!")
if y_dot_ret.shape[0] != size_y:
print("Input y_dot_ret size is wrong! Supposed to be", size_y, "instead of", y_dot_ret.shape[0], "columns!")
raise Exception("Input y_dot_ret size is wrong!")
for i in range(size_y):
y_dot_ret[i] = 0
# compute the collocation weights at time t
collocation_coefficients.compute_L(m, t, L)
# perform collocation integration
for i in range(m):
for j in range(size_y):
y_dot_ret[j] += L[i] * y_dot[i, j]
return
'''
Input:
size_z: number of DAE variables.
m: number of collocation points used
t: time during the time span
L: collocation weights vector
dimension: m
z_tilde: value of the DAE variables in time span [t_j, t_(j + 1)]
dimension: m x size_z
Output:
z: returned value of the derivative of the DAE variables at time t
dimension: size_z
'''
@cuda.jit(device=True)
def get_z(size_z, m, t, L, z_tilde, z):
# z = sum_{k=1,m} L_k(t) * z_j[k]
if L.shape[0] != m:
print("Input L size is wrong! Supposed to be", m, "instead of", L.shape[0], "columns!")
raise Exception("Input L size is wrong!")
if z_tilde.shape[0] != m:
print("Input z_tilde size is wrong! Supposed to be", m, "instead of", z_tilde.shape[0], "rows!")
raise Exception("Input z_tilde size is wrong!")
if z_tilde.shape[1] != size_z:
print("Input z_tilde size is wrong! Supposed to be", size_z, "instead of", z_tilde.shape[1], "columns!")
raise Exception("Input z_tilde size is wrong!")
if z.shape[0] != size_z:
print("Input z size is wrong! Supposed to be", size_z, "instead of", z.shape[0], "columns!")
raise Exception("Input z size is wrong!")
# zero initialization
for i in range(size_z):
z[i] = 0
# compute the collocation weights at time t
collocation_coefficients.compute_L(m, t, L)
for i in range(m):
for j in range(size_z):
z[j] += L[i] * z_tilde[i, j]
return
'''
Input:
size_y: number of ODE variables.
m: number of collocation points used
t: time during the time span
delta_t: time interval of the time span
I: collocation weights vector
dimension: m
y_dot: value of the derivative of the ODE variables in time span [t_j, t_(j + 1)]
dimension: m x size_y
Output:
y_ret: returned value of the ODE variables at time t
dimension: size_y
'''
@cuda.jit(device=True)
def get_y(size_y, m, t, delta_t, I, y, y_dot, y_ret):
# y = sy + delta*sum_{k=1,m} I_k(t) * ydot_jk
if I.shape[0] != m:
print("Input I size is wrong! Supposed to be", m, "instead of", I.shape[0], "columns!")
raise Exception("Input I size is wrong!")
if y_dot.shape[0] != m:
print("Input y_dot size is wrong! Supposed to be", m, "instead of", y_dot.shape[0], "rows!")
raise Exception("Input y dot size is wrong!")
if y_dot.shape[1] != size_y:
print("Input y_dot size is wrong! Supposed to be", size_y, "instead of", y_dot.shape[1], "columns!")
raise Exception("Input y dot size is wrong!")
if y_ret.shape[0] != size_y:
print("Input y_ret size is wrong! Supposed to be", size_y, "instead of", y_ret.shape[0], "columns!")
raise Exception("Input y_ret size is wrong!")
# copy the y values
for i in range(size_y):
y_ret[i] = y[i]
# compute the collocation weights at time t
collocation_coefficients.compute_I(m, t, I)
for i in range(m):
for j in range(size_y):
y_ret[j] += delta_t * I[i] * y_dot[i, j]
return
def compute_segment_residual_parallel(size_y, size_z, size_p, m_min, m_max, N, m_N, m_accumulate,
tau_m, w_m, t_span, y, y_dot, z_tilde, p, alpha, tol):
"""
Compute the segment residual using Gaussian quadrature rule on Gauss points.
:param size_y: number of ode variables of the problem
:param size_z: number of algebraic variables
:param size_p: number of parameter variables
:param m_min: minimum number of collocation points allowed
:param m_max: maximum number of collocation points allowed
:param N: number of time nodes of the problem
:param m_N: number of collocation points in each interval
:param m_accumulate: accumulated collocation points in each interval
:param tau_m: time coefficients of different gauss points used in row dominant order;
tau_m[m_sum[m - m_min], :] corresponds to the coefficients of (m + 1) gauss points
shape: (m_max - m_min + 1, m_max + 1)
:param w_m: weight coefficients of different gauss points used in row dominant order;
w_m[m_sum[m - m_min], :] corresponds to the coefficients of (m + 1) gauss points
shape: (m_max - m_min + 1, m_max + 1)
:param t_span: time span of the mesh
:param y: values of the ODE variables in matrix form
:param y_dot: values of the derivatives of ODE variables in row dominant matrix form
shape: (m_accumulate[-1], size_y), where each row corresponds the values at each time node.
The index for the jth collocation point from the ith time node is m_accumulate[i] + j.
:param z_tilde: values of the DAE variables in row dominant matrix form
dimension: (m_accumulate[-1], size_z), where each row corresponds the values at each time node.
The index for the jth collocation point from the ith time node is m_accumulate[i] + j.
:param p: values of the parameter variables in vector form
shape: (size_p, )
:param alpha: continuation parameter
:param tol: numerical tolerance
:return:
residual: residual error evaluated for each time interval
shape: (N, )
max_residual: maximum residual error
"""
# grid dimension of the kernel of CUDA
grid_dims_1d = ((N - 1) + TPB_N - 1) // TPB_N
block_dims_1d = TPB_N
# tranfer memory from CPU to GPU
d_m_N = cuda.to_device(m_N)
d_m_accumulate = cuda.to_device(m_accumulate)
d_tau_m = cuda.to_device(tau_m)
d_w_m = cuda.to_device(w_m)
d_t_span = cuda.to_device(t_span)
d_y = cuda.to_device(y)
d_y_dot = cuda.to_device(y_dot)
d_z_tilde = cuda.to_device(z_tilde)
d_p = cuda.to_device(p)
# holder for the output variables
# remember to zero initialization in the kernel
d_residual = cuda.device_array(N, dtype=np.float64)
# holder for the intermediate variables
d_h_res = cuda.device_array((N - 1, size_y), dtype=np.float64)
d_g_res = cuda.device_array((N - 1, size_z), dtype=np.float64)
# d_r = cuda.device_array(size_y + size_p, dtype=np.float64)
d_L = cuda.device_array((N - 1, m_max), dtype=np.float64)
d_I = cuda.device_array((N - 1, m_max), dtype=np.float64)
d_y_temp = cuda.device_array((N - 1, size_y), dtype=np.float64)
d_z_temp = cuda.device_array((N - 1, size_z), dtype=np.float64)
d_y_dot_temp = cuda.device_array((N - 1, size_y), dtype=np.float64)
# need reduction here maybe?
d_rho_h = cuda.device_array(N - 1, dtype=np.float64)
d_rho_g = cuda.device_array(N - 1, dtype=np.float64)
compute_segment_residual_kernel[grid_dims_1d, block_dims_1d](
size_y, size_z, m_min, N, alpha, tol,
d_m_N, d_m_accumulate, d_tau_m, d_w_m, d_t_span,
d_y, d_y_dot, d_z_tilde, d_y_temp, d_y_dot_temp, d_z_temp,
d_p, d_L, d_I, d_h_res, d_g_res, d_rho_h, d_rho_g,
residual_type, scale_by_time, scale_by_initial,
d_residual)
# copy the memory back to CPU
rho_h = d_rho_h.copy_to_host()
rho_g = d_rho_g.copy_to_host()
residual = d_residual.copy_to_host()
max_rho_r = 0
# compute the residual at the boundary
if (size_y + size_p) > 0:
r = np.zeros((size_y + size_p), dtype=np.float64)
_abvp_r(y[0, 0: size_y], y[N - 1, 0: size_y], p, r)
max_rho_r = np.linalg.norm(r, np.inf)
residual[N - 1] = max_rho_r / tol
max_rho_h = np.amax(rho_h)
max_rho_g = np.amax(rho_g)
max_residual = np.amax(residual)
if residual_type == 2:
print('\tres: |h|: {}, |g|: {}, |r|: {}'.format(sqrt(max_rho_h) / tol, sqrt(max_rho_g) / tol, max_rho_r / tol))
else:
print('\tres: |h|: {}, |g|: {}, |r|: {}'.format(max_rho_h / tol, max_rho_g / tol, max_rho_r / tol))
return residual, max_residual
'''
Kernel function for computing segment residual.
'''
@cuda.jit()
def compute_segment_residual_kernel(size_y, size_z, m_min, N, alpha, tol,
d_m_N, d_m_accumulate, d_tau_m, d_w_m, d_t_span,
d_y, d_y_dot, d_z_tilde, d_y_temp, d_y_dot_temp, d_z_temp,
d_p, d_L, d_I, d_h_res, d_g_res, d_rho_h, d_rho_g,
residual_type, scale_by_time, scale_by_initial,
d_residual):
j = cuda.grid(1) # cuda thread index
if j < (N - 1):
m = d_m_N[j] # number of collocation points of the interval j
delta_t_j = d_t_span[j + 1] - d_t_span[j]
d_rho_h[j] = 0
d_rho_g[j] = 0
for i in range(m + 1):
# compute y_dot at gaussian points
get_y_dot(
size_y, m, d_tau_m[m - m_min, i], d_L[j, 0: m],
d_y_dot[d_m_accumulate[j]: d_m_accumulate[j + 1], 0: size_y],
d_y_dot_temp[j, 0: size_y])
# compute y at gaussian points
get_y(
size_y, m, d_tau_m[m - m_min, i], delta_t_j, d_I[j, 0: m], d_y[j, 0: size_y],
d_y_dot[d_m_accumulate[j]: d_m_accumulate[j + 1], 0: size_y],
d_y_temp[j, 0: size_y])
# compute z at gaussian points
get_z(
size_z, m, d_tau_m[m - m_min, i], d_L[j, 0: m],
d_z_tilde[d_m_accumulate[j]: d_m_accumulate[j + 1], 0: size_z],
d_z_temp[j, 0: size_z])
# compute h
_abvp_f(d_y_temp[j, 0: size_y], d_z_temp[j, 0: size_z], d_p, d_h_res[j, 0: size_y])
# h(y,z,p) - ydot
for k in range(size_y):
d_h_res[j, k] -= d_y_dot_temp[j, k]
# d_rho_h[j] += np.dot(h_res, h_res) * w[i]
for k in range(size_y):
if residual_type == 2:
d_rho_h[j] += d_w_m[m - m_min, i] * d_h_res[j, k] * d_h_res[j, k]
elif residual_type == 1:
d_rho_h[j] += d_w_m[m - m_min, i] * abs(d_h_res[j, k])
elif residual_type == 0:
d_rho_h[j] = max(d_rho_h[j], d_w_m[m - m_min, i] * abs(d_h_res[j, k]))
else:
print("\tNorm type invalid!")
if scale_by_time:
d_rho_h[j] *= delta_t_j
# d_rho_h[j] += delta_t_j * d_w[i] * d_h_res[j, k] * d_h_res[j, k]
if size_z > 0:
_abvp_g(d_y_temp[j, 0: size_y], d_z_temp[j, 0: size_z], d_p, alpha, d_g_res[j, 0: size_z])
# rho_g += np.dot(g_res, g_res) * w[i]
for k in range(size_z):
if residual_type == 2:
d_rho_g[j] += d_w_m[m - m_min, i] * d_g_res[j, k] * d_g_res[j, k]
elif residual_type == 1:
d_rho_g[j] += d_w_m[m - m_min, i] * abs(d_g_res[j, k])
elif residual_type == 0:
d_rho_g[j] = max(d_rho_g[j], d_w_m[m - m_min, i] * abs(d_h_res[j, k]))
else:
print("\tNorm type invalid!")
if scale_by_time:
d_rho_g[j] *= delta_t_j
# d_rho_g[j] += delta_t_j * d_w[i] * d_g_res[j, k] * d_g_res[j, k]
if residual_type == 2:
d_residual[j] = sqrt(d_rho_h[j] + d_rho_g[j]) / tol
elif residual_type == 1:
d_residual[j] = (abs(d_rho_h[j]) + abs(d_rho_g[j])) / tol
elif residual_type == 0:
d_residual[j] = max(d_rho_h[j], d_rho_g[j]) / tol
return
def compute_segment_residual_collocation_parallel(size_y, size_z, size_p, m_min, m_max, N, m_N, m_accumulate,
c_m, t_span, y, y_dot, z_tilde, p, alpha, tol):
"""
Compute the segment residual on m + 1 collocation points on relative scale.
:param size_y: number of ode variables of the problem
:param size_z: number of algebraic variables
:param size_p: number of parameter variables
:param m_min: minimum number of collocation points allowed
:param m_max: maximum number of collocation points allowed
:param N: number of time nodes of the problem
:param m_N: number of collocation points in each interval
:param m_accumulate: accumulated collocation points in each interval
:param c_m: collocation time coefficients
:param t_span: time span of the mesh
:param y: values of the ODE variables in matrix form
:param y_dot: alues of the derivatives of ODE variables in row dominant matrix form
shape: (m_accumulate[-1], size_y), where each row corresponds the values at each time node.
The index for the jth collocation point from the ith time node is m_accumulate[i] + j.
:param z_tilde: values of the DAE variables in row dominant matrix form
dimension: (m_accumulate[-1], size_z), where each row corresponds the values at each time node.
The index for the jth collocation point from the ith time node is m_accumulate[i] + j.
:param p: values of the parameter variables in vector form
shape: (size_p, )
:param alpha: continuation parameter
:param tol: numerical tolerance
:return:
residual: residual error evaluated for each time interval
shape: (N, )
residual_collocation: residual error on each collocation point
max_residual: maximum residual error
"""
# grid dimension of the kernel of CUDA
grid_dims_1d = ((N - 1) + TPB_N - 1) // TPB_N
block_dims_1d = TPB_N
# tranfer memory from CPU to GPU
d_m_N = cuda.to_device(m_N)
d_m_accumulate = cuda.to_device(m_accumulate)
d_c_m = cuda.to_device(c_m)
d_t_span = cuda.to_device(t_span)
d_y = cuda.to_device(y)
d_y_dot = cuda.to_device(y_dot)
d_z_tilde = cuda.to_device(z_tilde)
d_p = cuda.to_device(p)
# holder for the output variables
# remember to zero initialization in the kernel
d_residual = cuda.device_array(N, dtype=np.float64)
d_residual_collocation = cuda.device_array((N, m_max + 1), dtype=np.float64)
# holder for the intermediate variables
d_h_res = cuda.device_array((N - 1, size_y), dtype=np.float64)
d_g_res = cuda.device_array((N - 1, size_z), dtype=np.float64)
# d_r = cuda.device_array(size_y + size_p, dtype=np.float64)
d_L = cuda.device_array((N - 1, m_max), dtype=np.float64)
d_I = cuda.device_array((N - 1, m_max), dtype=np.float64)
d_y_temp = cuda.device_array((N - 1, size_y), dtype=np.float64)
d_z_temp = cuda.device_array((N - 1, size_z), dtype=np.float64)
d_y_dot_temp = cuda.device_array((N - 1, size_y), dtype=np.float64)
# need reduction here maybe?
d_rho_h = cuda.device_array(N - 1, dtype=np.float64)
d_rho_g = cuda.device_array(N - 1, dtype=np.float64)
compute_segment_residual_collocation_kernel[grid_dims_1d, block_dims_1d](
size_y, size_z, m_min, m_max, N, alpha, tol,
d_m_N, d_m_accumulate, d_c_m, d_t_span,
d_y, d_y_dot, d_z_tilde, d_y_temp, d_y_dot_temp, d_z_temp,
d_p, d_L, d_I, d_h_res, d_g_res, d_rho_h, d_rho_g,
d_residual, d_residual_collocation)
# copy the memory back to CPU
residual = d_residual.copy_to_host()
residual_collocation = d_residual_collocation.copy_to_host()
max_rho_r = 0
# compute the residual at the boundary
if (size_y + size_p) > 0:
r = np.zeros((size_y + size_p), dtype=np.float64)
_abvp_r(y[0, 0: size_y], y[N - 1, 0: size_y], p, r)
max_rho_r = np.linalg.norm(r, np.inf) / tol
residual[N - 1] = max_rho_r
max_rho_h = cuda_infinity_norm(d_rho_h) / tol
max_rho_g = cuda_infinity_norm(d_rho_g) / tol
max_residual = np.amax(residual)
print('\tres: |h|: {}, |g|: {}, |r|: {}'.format(max_rho_h, max_rho_g, max_rho_r))
return residual, residual_collocation, max_residual
'''
Kernel function for computing segment residual.
'''
@cuda.jit()
def compute_segment_residual_collocation_kernel(size_y, size_z, m_min, m_max, N, alpha, tol,
d_m_N, d_m_accumulate, d_c_m, d_t_span,
d_y, d_y_dot, d_z_tilde, d_y_temp, d_y_dot_temp, d_z_temp,
d_p, d_L, d_I, d_h_res, d_g_res, d_rho_h, d_rho_g,
d_residual, d_residual_collocation):
j = cuda.grid(1) # cuda thread index
if j < (N - 1):
m = d_m_N[j] # number of collocation points in the jth interval
delta_t_j = d_t_span[j + 1] - d_t_span[j]
d_rho_h[j] = 0.0
d_rho_g[j] = 0.0
d_residual[j] = 0.0
for k in range(m_max):
d_residual_collocation[j, k] = 0.0
# compute the residual at (m + 1) collocation points
for i in range(m + 1):
# compute y_dot at gaussian points
get_y_dot(size_y, m, d_c_m[m + 1 - m_min, i], d_L[j, 0: m],
d_y_dot[d_m_accumulate[j]: d_m_accumulate[j + 1], 0: size_y],
d_y_dot_temp[j, 0: size_y])
# compute y at gaussian points
get_y(size_y, m, d_c_m[m + 1 - m_min, i], delta_t_j, d_I[j, 0: m], d_y[j, 0: size_y],
d_y_dot[d_m_accumulate[j]: d_m_accumulate[j + 1], 0: size_y],
d_y_temp[j, 0: size_y])
# compute z at gaussian points
get_z(size_z, m, d_c_m[m + 1 - m_min, i], d_L[j, 0: m],
d_z_tilde[d_m_accumulate[j]: d_m_accumulate[j + 1], 0: size_z],
d_z_temp[j, 0: size_z])
# compute h
_abvp_f(d_y_temp[j, 0: size_y], d_z_temp[j, 0: size_z], d_p, d_h_res[j, 0: size_y])
# h(y,z,p) - ydot
for k in range(size_y):
# E[j, k] = y_dot_tilde[j, k] - y_dot[j, k]
d_h_res[j, k] -= d_y_dot_temp[j, k]
# e^{h, j}_{i, k} = abs(E[j, k]) / (1 + y_dot[i, k])
# e^h_j = max e^j_{i, k}
residual_scaled = abs(d_h_res[j, k]) / (1 + abs(d_y_dot_temp[j, k]))
d_rho_h[j] = max(d_rho_h[j], residual_scaled)
d_residual_collocation[j, i] = max(d_residual_collocation[j, i], residual_scaled)
if size_z > 0:
_abvp_g(d_y_temp[j, 0: size_y], d_z_temp[j, 0: size_z], d_p, alpha, d_g_res[j, 0: size_z])
for k in range(size_z):
# e^{g, j}_{i, k} = abs(E[j, k]) / (1 + z[i, k])
# e^g_j = max e^j_{i, k}
residual_scaled = abs(d_g_res[j, k]) / (1 + abs(d_z_temp[j, k]))
d_rho_g[j] = max(d_rho_g[j], residual_scaled)
d_residual_collocation[j, i] = max(d_residual_collocation[j, i], residual_scaled)
# d_residual[j] = max(d_rho_h[j], d_rho_g[j]) / tol
d_residual[j] = max(d_rho_h[j], d_rho_g[j]) / tol
return
# finish the implementation of computing segment residual on each time node
# start the implementation of recovering solution
def recover_solution_parallel(size_z, m_max, N, m_N, m_accumulate, z_tilde):
"""
Recover the solution to the BVP-DAE problem.
:param size_z: number of algebraic variables
:param m_max: maximum number of collocation points allowed
:param N: number of time nodes of the problem
:param m_N: number of collocation points in each interval
:param m_accumulate: accumulated collocation points in each interval
:param z_tilde: values of the DAE variables in row dominant matrix form
dimension: (m_accumulate[-1], size_z), where each row corresponds the values at each time node.
The index for the jth collocation point from the ith time node is i * m + j.
:return:
z: values of algebraic variables at each time node in row dominant matrix form
shape: (N, size_z)
"""
# grid dimension of the kernel of CUDA
grid_dims_1d = (N + TPB_N - 1) // TPB_N
block_dims_1d = TPB_N
# tranfer memory from CPU to GPU
d_m_N = cuda.to_device(m_N)
d_m_accumulate = cuda.to_device(m_accumulate)
d_z_tilde = cuda.to_device(z_tilde)
# holder for output variables
d_z = cuda.device_array((N, size_z), dtype=np.float64)
# holder for intermediate variables
d_L = cuda.device_array((N, m_max), dtype=np.float64)
# execute the kernel function
recover_solution_kernel[grid_dims_1d, block_dims_1d](size_z, N, d_m_N, d_m_accumulate, d_z_tilde, d_L, d_z)
# return the ouput
return d_z.copy_to_host()
@cuda.jit()
def recover_solution_kernel(size_z, N, d_m_N, d_m_accumulate, d_z_tilde, d_L, d_z):
i = cuda.grid(1) # cuda thread index
if i < (N - 1):
m = d_m_N[i]
t = 0
collocation_coefficients.compute_L(m, t, d_L[i, 0: m])
# zero initialization
for k in range(size_z):
d_z[i, k] = 0
# loop through all the collocation points
for j in range(m):
# loop through all the Z variables
for k in range(size_z):
d_z[i, k] += d_L[i, j] * d_z_tilde[d_m_accumulate[i] + j, k]
# for the last time node
if i == (N - 1):
m = d_m_N[i - 1] # number of collocation points of the last interval
t = 1
collocation_coefficients.compute_L(m, t, d_L[i, 0: m])
# zero initialization
for k in range(size_z):
d_z[i, k] = 0
# loop through all the collocation points
for j in range(m):
# loop through all the Z variables
for k in range(size_z):
d_z[i, k] += d_L[i, j] * d_z_tilde[d_m_accumulate[i - 1] + j, k]
return
# finish the implementation of recovering solution
# start the implementation of remesh
def remesh(size_y, size_z, N, tspan, y0, z0, residual):
"""
Remesh the problem
:param size_y: number of y variables
:param size_z: number of z variables
:param N: number of time nodes in the current mesh
:param tspan: time span of the current mesh
:param y0: values of the differential variables in matrix form
:param z0: values of the algebraic variables in matrix form
:param residual: residual error evaluated for each time interval
:return:
N_New : number of time nodes of the new mesh .
tspan_New : new time span of the problem.
y0_New : values of the differential variables in new mesh in matrix form
z0_New : values of the algebraic variables in in new mesh in matrix form
"""
N_Temp = 0
tspan_Temp = []
y0_Temp = []
z0_Temp = []
residual_Temp = []
# Deleting Nodes
i = 0
# Record the number of the deleted nodes
k_D = 0
thresholdDel = 1e-2
while i < N - 4:
res_i = residual[i]
if res_i <= thresholdDel:
res_i_Plus1 = residual[i + 1]
res_i_Plus2 = residual[i + 2]
res_i_Plus3 = residual[i + 3]
res_i_Plus4 = residual[i + 4]
if res_i_Plus1 <= thresholdDel and res_i_Plus2 <= thresholdDel and res_i_Plus3 <= thresholdDel and \
res_i_Plus4 <= thresholdDel:
# append the 1st, 3rd, and 5th node
# 1st node
tspan_Temp.append(tspan[i])
y0_Temp.append(y0[i, :])
z0_Temp.append(z0[i, :])
residual_Temp.append(residual[i])
# 3rd node
tspan_Temp.append(tspan[i + 2])
y0_Temp.append(y0[i + 2, :])
z0_Temp.append(z0[i + 2, :])
residual_Temp.append(residual[i + 2])
# 5th node
tspan_Temp.append(tspan[i + 4])
y0_Temp.append(y0[i + 4, :])
z0_Temp.append(z0[i + 4, :])
residual_Temp.append(residual[i + 4])
# delete 2 nodes
k_D += 2
# add 3 nodes to the total number
N_Temp += 3
# ignore those five nodes
i += 5
else:
# directly add the node
tspan_Temp.append(tspan[i])
y0_Temp.append(y0[i, :])
z0_Temp.append(z0[i, :])
residual_Temp.append(residual[i])
N_Temp += 1
i += 1
else:
# directly add the node
tspan_Temp.append(tspan[i])
y0_Temp.append(y0[i, :])
z0_Temp.append(z0[i, :])
residual_Temp.append(residual[i])
N_Temp += 1
i += 1
'''
if the previous loop stop at the ith node which is bigger than (N - 4), those last
few nodes left are added manually, if the last few nodes have already been processed,
the index i should be equal to N, then nothing needs to be done
'''
if i < N:
'''
add the last few nodes starting from i to N - 1, which
is a total of (N - i) nodes
'''
for j in range(N - i):
# append the N - 4 + j node
tspan_Temp.append(tspan[i + j])
y0_Temp.append(y0[i + j, :])
z0_Temp.append(z0[i + j, :])
residual_Temp.append(residual[i + j])
N_Temp += 1
# convert from list to numpy arrays for the convenience of indexing
tspan_Temp = np.array(tspan_Temp)
y0_Temp = np.array(y0_Temp)
z0_Temp = np.array(z0_Temp)
residual_Temp = np.array(residual_Temp)
# lists to hold the outputs
N_New = 0
tspan_New = []
y0_New = []
z0_New = []
residual_New = []
# Adding Nodes
i = 0
# Record the number of the added nodes
k_A = 0
while i < N_Temp - 1:
res_i = residual_Temp[i]
if res_i > 1:
if res_i > 10:
# add three uniformly spaced nodes
# add the time point of new nodes
delta_t = (tspan_Temp[i + 1] - tspan_Temp[i]) / 4
t_i = tspan_Temp[i]
t_i_Plus1 = t_i + delta_t
t_i_Plus2 = t_i + 2 * delta_t
t_i_Plus3 = t_i + 3 * delta_t
tspan_New.append(t_i)
tspan_New.append(t_i_Plus1)
tspan_New.append(t_i_Plus2)
tspan_New.append(t_i_Plus3)
# add the residuals of the new nodes
delta_res = (residual_Temp[i + 1] - residual_Temp[i]) / 4
res_i_Plus1 = res_i + delta_res
res_i_Plus2 = res_i + 2 * delta_res
res_i_Plus3 = res_i + 3 * delta_res
residual_New.append(res_i)
residual_New.append(res_i_Plus1)
residual_New.append(res_i_Plus2)
residual_New.append(res_i_Plus3)
# add the ys of the new nodes
y0_i = y0_Temp[i, :]
y0_i_Next = y0_Temp[i + 1, :]
delta_y0 = (y0_i_Next - y0_i) / 4
y0_i_Plus1 = y0_i + delta_y0
y0_i_Plus2 = y0_i + 2 * delta_y0
y0_i_Plus3 = y0_i + 3 * delta_y0
y0_New.append(y0_i)
y0_New.append(y0_i_Plus1)
y0_New.append(y0_i_Plus2)
y0_New.append(y0_i_Plus3)
# add the zs of the new nodes
z0_i = z0_Temp[i, :]
z0_i_Next = z0_Temp[i + 1, :]
delta_z0 = (z0_i_Next - z0_i) / 4
z0_i_Plus1 = z0_i + delta_z0
z0_i_Plus2 = z0_i + 2 * delta_z0
z0_i_Plus3 = z0_i + 3 * delta_z0
z0_New.append(z0_i)
z0_New.append(z0_i_Plus1)
z0_New.append(z0_i_Plus2)
z0_New.append(z0_i_Plus3)
# update the index
# 1 original node + 3 newly added nodes
N_New += 4
k_A += 3
i += 1
else:
# add one node to the middle
# add the time point of the new node
delta_t = (tspan_Temp[i + 1] - tspan_Temp[i]) / 2
t_i = tspan_Temp[i]
t_i_Plus1 = t_i + delta_t
tspan_New.append(t_i)
tspan_New.append(t_i_Plus1)
# add the residual of the new node
delta_res = (residual_Temp[i + 1] - residual_Temp[i]) / 2
res_i_Plus1 = res_i + delta_res
residual_New.append(res_i)
residual_New.append(res_i_Plus1)
# add the y of the new node
y0_i = y0_Temp[i, :]
y0_i_Next = y0_Temp[i + 1, :]
delta_y0 = (y0_i_Next - y0_i) / 2
y0_i_Plus1 = y0_i + delta_y0
y0_New.append(y0_i)
y0_New.append(y0_i_Plus1)
# add the z of the new node
z0_i = z0_Temp[i, :]
z0_i_Next = z0_Temp[i + 1, :]
delta_z0 = (z0_i_Next - z0_i) / 2
z0_i_Plus1 = z0_i + delta_z0
z0_New.append(z0_i)
z0_New.append(z0_i_Plus1)
# update the index
# 1 original node + 1 newly added node
N_New += 2
k_A += 1
i += 1
else:
# add the current node only
# add the time node of the current node
t_i = tspan_Temp[i]
tspan_New.append(t_i)
# add the residual of the current node
residual_New.append(res_i)
# add the y of the current node
y0_i = y0_Temp[i, :]
y0_New.append(y0_i)
# add the z of the current node
z0_i = z0_Temp[i, :]
z0_New.append(z0_i)
# update the index
# 1 original node only
N_New += 1
i += 1
# add the final node
tspan_New.append(tspan_Temp[N_Temp - 1])
y0_New.append(y0_Temp[N_Temp - 1, :])
z0_New.append(z0_Temp[N_Temp - 1, :])
residual_New.append(residual_Temp[N_Temp - 1])
N_New += 1
# convert from list to numpy arrays for the convenience of indexing
tspan_New = np.array(tspan_New)
y0_New = np.array(y0_New)
z0_New = np.array(z0_New)
print("\tDelete nodes: {}; Add nodes: {}; Number of nodes after mesh: {}".format(k_D, k_A, N_New))
# return the output
return N_New, tspan_New, y0_New, z0_New
def hp_remesh(size_y, size_z, m_min, m_max, m_init, N,
m_N, m_accumulate, c_m, tspan, y0, z0,
residual, residual_collocation,
thres_remove, thres_add, rho, m_d, m_i, tol):
"""
Use hp mesh to remesh the problem
:param size_y: number of y variables
:param size_z: number of z variables.
:param m_min: minimum number of collocation points allowed
:param m_max: maximum number of collocation points allowed
:param m_init: initial number of collocation points used
:param N: number of time nodes in the current mesh
:param m_N: number of collocation points in each interval in the current mesh
:param m_accumulate: accumulated collocation points at each time node in the current mesh
:param c_m: coefficients c of lobatto collocation points
:param tspan: time span of the current mesh
:param y0: values of the differential variables in the current mesh
:param z0: values of the algebraic variables in the current mesh
:param residual: residual error evaluated for each time interval
:param residual_collocation: residual error on collocation points evaluated for each time interval
:param thres_remove: threshold to remove nodes from the mesh
:param thres_add: threshold to add nodes to the mesh
:param m_d: number of collocation points decreased in each interval
:param m_i: number of collocation points increased in each interval
:param rho: threshold for uniform error
:param tol: numerical tolerances
:return:
N_new: number of time nodes in the new mesh
tspan_new: time span of the new mesh
y0_new: values of the differential variables in the new mesh
z0_new: values of the algebraic variables in the new mesh
m_N_new: number of collocation points in each interval in the new mesh
m_accumulate_new: accumulated collocation points at each time node in the new mesh
"""
N_temp = 0
tspan_temp = []
y0_temp = []
z0_temp = []
residual_temp = []
residual_collocation_temp = []
m_N_temp = []
# remove Nodes
i = 0
# record the number of removed nodes
n_r = 0
m_r = 0
while i < N - 5:
res_i = residual[i]
if res_i <= thres_remove:
res_i_Plus1 = residual[i + 1]
res_i_Plus2 = residual[i + 2]
res_i_Plus3 = residual[i + 3]
res_i_Plus4 = residual[i + 4]
if res_i_Plus1 <= thres_remove and res_i_Plus2 <= thres_remove and res_i_Plus3 <= thres_remove and \
res_i_Plus4 <= thres_remove:
# append the 1st, 3rd, and 5th node
# check the 2nd and the 4th node
# if they use the m_min then remove, or decrese the number of collocation points by m_d
# 1st node
tspan_temp.append(tspan[i])
y0_temp.append(y0[i, :])
z0_temp.append(z0[i, :])
residual_temp.append(residual[i])
residual_collocation_temp.append(residual_collocation[i, :])
m_N_temp.append(m_N[i])
N_temp += 1
# 2nd node
if m_N[i + 1] > m_min:
tspan_temp.append(tspan[i + 1])
y0_temp.append(y0[i + 1, 0: size_y])
z0_temp.append(z0[i + 1, 0: size_z])
residual_temp.append(residual[i + 1])
residual_collocation_temp.append(residual_collocation[i + 1, :])
m_add = max(m_N[i + 1] - m_d, m_min)
m_N_temp.append(m_add)
m_r += (m_N[i + 1] - m_add)
N_temp += 1
else:
n_r += 1
m_r += (m_N[i + 1])
# 3rd node
tspan_temp.append(tspan[i + 2])
y0_temp.append(y0[i + 2, 0: size_y])
z0_temp.append(z0[i + 2, 0: size_z])
residual_temp.append(residual[i + 2])
residual_collocation_temp.append(residual_collocation[i + 2, :])
m_N_temp.append(m_N[i + 2])
N_temp += 1
# 4th node
if m_N[i + 3] > m_min:
tspan_temp.append(tspan[i + 3])
y0_temp.append(y0[i + 3, 0: size_y])
z0_temp.append(z0[i + 3, 0: size_z])
residual_temp.append(residual[i + 3])
residual_collocation_temp.append(residual_collocation[i + 3, :])
m_add = max(m_N[i + 3] - m_d, m_min)
m_N_temp.append(m_add)
m_r += m_N[i + 3] - m_add
N_temp += 1
else:
n_r += 1
m_r += (m_N[i + 3])
# 5th node
tspan_temp.append(tspan[i + 4])
y0_temp.append(y0[i + 4, 0: size_y])
z0_temp.append(z0[i + 4, 0: size_z])
residual_temp.append(residual[i + 4])
residual_collocation_temp.append(residual_collocation[i + 4, :])
m_N_temp.append(m_N[i + 4])
N_temp += 1
# ignore those five nodes
i += 5
else:
# directly add the node
tspan_temp.append(tspan[i])
y0_temp.append(y0[i, 0: size_y])
z0_temp.append(z0[i, 0: size_z])
residual_temp.append(residual[i])
residual_collocation_temp.append(residual_collocation[i, :])
m_N_temp.append(m_N[i])
N_temp += 1
i += 1
else:
# directly add the node
tspan_temp.append(tspan[i])
y0_temp.append(y0[i, 0: size_y])
z0_temp.append(z0[i, 0: size_z])
residual_temp.append(residual[i])
residual_collocation_temp.append(residual_collocation[i, :])
m_N_temp.append(m_N[i])
N_temp += 1
i += 1
'''
if the previous loop stop at the ith node which is bigger than (N - 4), those last
few nodes left are added directly, if the last few nodes have already been processed,
the index i should be equal to N, then nothing needs to be done
'''
if i < N:
'''
add the last few nodes starting from i to N - 1, which
is a total of (N - i) nodes
'''
for j in range(N - i):
# append the N - 4 + j node
tspan_temp.append(tspan[i + j])
y0_temp.append(y0[i + j, 0: size_y])
z0_temp.append(z0[i + j, 0: size_z])
residual_temp.append(residual[i + j])
N_temp += 1
if (i + j) != (N - 1):
# no collocation residual for the last node
residual_collocation_temp.append(residual_collocation[i + j, :])
m_N_temp.append(m_N[i + j])
# convert from list to numpy arrays for the convenience of indexing
tspan_temp = np.array(tspan_temp)
y0_temp = np.array(y0_temp)
z0_temp = np.array(z0_temp)
residual_temp = np.array(residual_temp)
residual_collocation_temp = np.array(residual_collocation_temp)
m_N_temp = np.array(m_N_temp)
# lists to hold the outputs
N_new = 0
tspan_new = []
y0_new = []
z0_new = []
m_N_new = []
# Adding Nodes
i = 0
# Record the number of the added nodes
n_a = 0
m_a = 0
while i < N_temp - 1:
res_i = residual_temp[i]
m = m_N_temp[i]
if res_i > thres_add:
m_add = 0
# detect it is uniform type or non-uniform
if res_i > rho:
# non-uniform type
index_add = [] # index to add time node at the point
for j in range(1, m):
# start from index 1 as the initial node will be added automatically
# loop through the inner collocation points except the boundary points
if (residual_collocation_temp[i, j] / tol) > rho:
if j == 1 and residual_collocation_temp[i, j] >= residual_collocation_temp[i, j + 1]:
index_add.append(j)
elif j == (m - 1) and residual_collocation_temp[i, j] >= residual_collocation_temp[i, j - 1]:
index_add.append(j)
elif residual_collocation_temp[i, j] >= residual_collocation_temp[i, j + 1] and \
residual_collocation_temp[i, j] >= residual_collocation_temp[i, j - 1]:
index_add.append(j)
# add the initial time node first
delta_t = tspan_temp[i + 1] - tspan_temp[i]
t_init = tspan_temp[i]
tspan_new.append(t_init)
y0_init = y0_temp[i, 0: size_y]
y0_next = y0_temp[i + 1, 0: size_y]
y0_new.append(y0_init)
z0_init = z0_temp[i, 0: size_z]
z0_next = z0_temp[i + 1, 0: size_z]
z0_new.append(z0_init)
# for divided intervals, starts with m_init for all the intervals
m_N_new.append(m_init)
m_add += m_init
N_new += 1
for index in index_add:
c = c_m[m + 1 - m_min, index]
t_cur = t_init + c * delta_t
tspan_new.append(t_cur)
y0_cur = (1 - c) * y0_init + c * y0_next
y0_new.append(y0_cur)
z0_cur = (1 - c) * z0_init + c * z0_next
z0_new.append(z0_cur)
m_N_new.append(m_init)
m_add += m_init
N_new += 1
n_a += 1
m_a += (m_add - m)
else:
# uniform type
delta_t = tspan_temp[i + 1] - tspan_temp[i]
t_init = tspan_temp[i]
y0_init = y0_temp[i, 0: size_y]
y0_next = y0_temp[i + 1, 0: size_y]
z0_init = z0_temp[i, 0: size_z]
z0_next = z0_temp[i + 1, 0: size_z]
# increase the number of collocation points
# if exceeds the maximum allowed, divide the interval into two
if (m + m_i) > m_max:
# add the initial time node first
# add the current node with current number of collocation points
tspan_new.append(t_init)
y0_new.append(y0_init)
z0_new.append(z0_init)
m_N_new.append(m)
N_new += 1
# add the middle time node with current number of collocation points
t_mid = t_init + 0.5 * delta_t
y0_mid = (y0_init + y0_next) / 2
z0_mid = (z0_init + z0_next) / 2
tspan_new.append(t_mid)
y0_new.append(y0_mid)
z0_new.append(z0_mid)
m_N_new.append(m)
N_new += 1
m_a += m
else:
tspan_new.append(t_init)
y0_new.append(y0_init)
z0_new.append(z0_init)
m_N_new.append(m + m_i)
N_new += 1
m_a += m_i
else:
# directly add the node
t_init = tspan_temp[i]
tspan_new.append(t_init)
y0_init = y0_temp[i, 0: size_y]
y0_new.append(y0_init)
z0_init = z0_temp[i, 0: size_z]
z0_new.append(z0_init)
m_N_new.append(m)
N_new += 1
i += 1 # update the loop index
# add the final node
tspan_new.append(tspan_temp[-1])
y0_new.append(y0_temp[-1, :])
z0_new.append(z0_temp[-1, :])
N_new += 1
# convert from list to numpy arrays for the convenience of indexing
tspan_new = np.array(tspan_new)
y0_new = np.array(y0_new)
z0_new = np.array(z0_new)
m_N_new = np.array(m_N_new)
# generate the new accumulate collocation array
m_accumulate_new = np.zeros(N_new, dtype=int) # accumulated collocation points used
for i in range(1, N_new):
m_accumulate_new[i] = m_accumulate_new[i - 1] + m_N_new[i - 1]
print("\tRemove time nodes: {}; Add time nodes: {}; "
"Number of nodes before mesh: {}, after mesh: {}".format(n_r, n_a, N, N_new))
print("\tRemove collocation points: {}; Add collocation points: {};\n"
"\tPrevious total number of collocation points: {}, new total number of collocation points: {}.".format(
m_r, m_a, m_accumulate[-1], m_accumulate_new[-1]))
# return the output
return N_new, tspan_new, y0_new, z0_new, m_N_new, m_accumulate_new
def hp_remesh2(size_y, size_z, m_min, m_max, m_init, N,
m_N, m_accumulate, c_m, tspan, y0, z0,
residual, residual_collocation,
thres_remove, thres_add, m_d, m_i, rho, tol):
"""
Use hp mesh to remesh the problem
:param size_y: number of y variables
:param size_z: number of z variables.
:param m_min: minimum number of collocation points allowed
:param m_max: maximum number of collocation points allowed
:param m_init: initial number of collocation points used
:param N: number of time nodes in the current mesh
:param m_N: number of collocation points in each interval in the current mesh
:param m_accumulate: accumulated collocation points at each time node in the current mesh
:param c_m: coefficients c of lobatto collocation points
:param tspan: time span of the current mesh
:param y0: values of the differential variables in the current mesh
:param z0: values of the algebraic variables in the current mesh
:param residual: residual error evaluated for each time interval
:param residual_collocation: residual error on collocation points evaluated for each time interval
:param thres_remove: threshold to remove nodes from the mesh
:param thres_add: threshold to add nodes to the mesh
:param m_d: number of collocation points decreased in each interval
:param m_i: number of collocation points increased in each interval
:param rho: threshold for uniform error
:param tol: numerical tolerances
:return:
N_new: number of time nodes in the new mesh
tspan_new: time span of the new mesh
y0_new: values of the differential variables in the new mesh
z0_new: values of the algebraic variables in the new mesh
m_N_new: number of collocation points in each interval in the new mesh
m_accumulate_new: accumulated collocation points at each time node in the new mesh
"""
N_temp = 0
tspan_temp = []
y0_temp = []
z0_temp = []
residual_temp = []
residual_collocation_temp = []
m_N_temp = []
# remove Nodes
i = 0
# record the number of removed nodes
n_r = 0
# add the initial node first
tspan_temp.append(tspan[0])
y0_temp.append(y0[0, :])
z0_temp.append(z0[0, :])
N_temp += 1
# leave out the last two intervals first
while i < N - 2:
# check the two consecutive intervals
if residual[i] <= thres_remove and residual[i + 1] <= thres_remove:
# check the number of collocation points
if m_N[i] == m_min and m_N[i + 1] == m_min:
# if the collocation points in both intervals are the minimum allowed,
# delete the time node between the two intervals
n_r += 1
# append the second time node only
tspan_temp.append(tspan[i + 2])
y0_temp.append(y0[i + 2, 0: size_y])
z0_temp.append(z0[i + 2, 0: size_z])
residual_temp.append(residual[i + 1])
residual_collocation_temp.append(residual_collocation[i + 1, :])
m_N_temp.append(m_min)
N_temp += 1
else:
# or adjust the collocation points in the intervals
# adjust the number of collocation points used
# the first time node
tspan_temp.append(tspan[i + 1])
y0_temp.append(y0[i + 1, 0: size_y])
z0_temp.append(z0[i + 1, 0: size_z])
residual_temp.append(residual[i])
residual_collocation_temp.append(residual_collocation[i, :])
m_N_temp.append(max(m_N[i] - m_d, m_min))
N_temp += 1
# the second time node
tspan_temp.append(tspan[i + 2])
y0_temp.append(y0[i + 2, 0: size_y])
z0_temp.append(z0[i + 2, 0: size_z])
residual_temp.append(residual[i + 1])
residual_collocation_temp.append(residual_collocation[i + 1, :])
m_N_temp.append(max(m_N[i + 1] - m_d, m_min))
N_temp += 1
i += 2
else:
# append the time node directly
tspan_temp.append(tspan[i + 1])
y0_temp.append(y0[i + 1, :])
z0_temp.append(z0[i + 1, :])
residual_temp.append(residual[i])
residual_collocation_temp.append(residual_collocation[i, :])
m_N_temp.append(m_N[i])
N_temp += 1
i += 1
# check whether the last node is added
if i < N - 1:
for j in range((N - 1) - i):
# append the time node directly
tspan_temp.append(tspan[i + j + 1])
y0_temp.append(y0[i + j + 1, :])
z0_temp.append(z0[i + j + 1, :])
residual_temp.append(residual[i + j])
residual_collocation_temp.append(residual_collocation[i + j, :])
if residual[i + j] <= thres_remove:
m_N_temp.append(max(m_N[i + j] - m_d, m_min))
else:
m_N_temp.append(m_N[i + j])
N_temp += 1
# convert from list to numpy arrays for the convenience of indexing
tspan_temp = np.array(tspan_temp)
y0_temp = np.array(y0_temp)
z0_temp = np.array(z0_temp)
residual_temp = np.array(residual_temp)
residual_collocation_temp = np.array(residual_collocation_temp)
m_N_temp = np.array(m_N_temp)
# lists to hold the outputs
N_new = 0
tspan_new = []
y0_new = []
z0_new = []
m_N_new = []
# Adding Nodes
i = 0
# Record the number of the added nodes
n_a = 0
while i < N_temp - 1:
res_i = residual_temp[i]
m = m_N_temp[i]
if res_i > thres_add:
# detect it is uniform type or non-uniform
if res_i > rho:
# non-uniform type
index_add = [] # index to add time node at the point
for j in range(1, m):
# start from index 1 as the initial node will be added automatically
# loop through the inner collocation points except the boundary points
if (residual_collocation_temp[i, j] / tol) > rho:
if j == 1 and residual_collocation_temp[i, j] >= residual_collocation_temp[i, j + 1]:
index_add.append(j)
elif j == (m - 1) and residual_collocation_temp[i, j] >= residual_collocation_temp[i, j - 1]:
index_add.append(j)
elif residual_collocation_temp[i, j] >= residual_collocation_temp[i, j + 1] and \
residual_collocation_temp[i, j] >= residual_collocation_temp[i, j - 1]:
index_add.append(j)
# add the initial time node first
delta_t = tspan_temp[i + 1] - tspan_temp[i]
t_init = tspan_temp[i]
tspan_new.append(t_init)
y0_init = y0_temp[i, 0: size_y]
y0_next = y0_temp[i + 1, 0: size_y]
y0_new.append(y0_init)
z0_init = z0_temp[i, 0: size_z]
z0_next = z0_temp[i + 1, 0: size_z]
z0_new.append(z0_init)
# for divided intervals, starts with m_init for all the intervals
m_N_new.append(m_init)
N_new += 1
for index in index_add:
c = c_m[m + 1 - m_min, index]
t_cur = t_init + c * delta_t
tspan_new.append(t_cur)
y0_cur = (1 - c) * y0_init + c * y0_next
y0_new.append(y0_cur)
z0_cur = (1 - c) * z0_init + c * z0_next
z0_new.append(z0_cur)
m_N_new.append(m_init)
N_new += 1
n_a += 1
else:
# uniform type
delta_t = tspan_temp[i + 1] - tspan_temp[i]
t_init = tspan_temp[i]
y0_init = y0_temp[i, 0: size_y]
y0_next = y0_temp[i + 1, 0: size_y]
z0_init = z0_temp[i, 0: size_z]
z0_next = z0_temp[i + 1, 0: size_z]
# increase the number of collocation points
# if exceeds the maximum allowed, divide the interval into two
if (m + m_i) > m_max:
# add the initial time node first
# add the current node with current number of collocation points
tspan_new.append(t_init)
y0_new.append(y0_init)
z0_new.append(z0_init)
m_N_new.append(m)
N_new += 1
# add the middle time node with current number of collocation points
t_mid = t_init + 0.5 * delta_t
y0_mid = (y0_init + y0_next) / 2
z0_mid = (z0_init + z0_next) / 2
tspan_new.append(t_mid)
y0_new.append(y0_mid)
z0_new.append(z0_mid)
m_N_new.append(m)
N_new += 1
n_a += 1
else:
tspan_new.append(t_init)
y0_new.append(y0_init)
z0_new.append(z0_init)
m_N_new.append(m + m_i)
N_new += 1
else:
# directly add the node
t_init = tspan_temp[i]
tspan_new.append(t_init)
y0_init = y0_temp[i, 0: size_y]
y0_new.append(y0_init)
z0_init = z0_temp[i, 0: size_z]
z0_new.append(z0_init)
m_N_new.append(m)
N_new += 1
i += 1 # update the loop index
# add the final node
tspan_new.append(tspan_temp[-1])
y0_new.append(y0_temp[-1, :])
z0_new.append(z0_temp[-1, :])
N_new += 1
# convert from list to numpy arrays for the convenience of indexing
tspan_new = np.array(tspan_new)
y0_new = np.array(y0_new)
z0_new = np.array(z0_new)
m_N_new = np.array(m_N_new)
# generate the new accumulate collocation array
m_accumulate_new =
|
np.zeros(N_new, dtype=int)
|
numpy.zeros
|
import numpy as np
import opt_einsum
from acqdp.tensor_network import ContractionTask, defaultOrderResolver
class Compiler:
"""Compile a :class:`ContractionScheme` indicating contraction scheme and sliced edges into a
:class:`ContractionTask`, a hardware-aware format that can be readily used for tensor network contraction.
Typically, a :class:`ContractionScheme` found by one of the :class:`OrderFinder` only aims to minimize the theoretical
floating number operations and / or largest size of intermediate tensors. Although those are typically accurate indicators
of the overall contracion cost, they fail to accomodate inefficiencies from elsewhere. The compiler takes care of
machine-related inefficiencies by slightly modifying the order.
:ivar do_patch: If set to false, the patching and reorganization of the contraction order below will be skipped.
:ivar patch_size: Whenever two adjacent branches both have size less than the patch size, the branches are merged
together. This is to avoid inefficiency called by skewed tensor shapes in tensor multiplication, with a slight
sacrifice in floating number operations.
:ivar reorg_thres: Threshold for contraction order reorganization. The order is seperated into small tensor
multiplications and large tensor multiplications. All pairwise tensor multiplications involving tensors with size
less than `reorg_thres` will be put forward whenever possible.
"""
def __init__(self,
reorg_thres=23,
patch_size=5,
do_patch=False,
**kwargs):
self.reorg_thres = reorg_thres
self.patch_size = patch_size
self.do_patch = do_patch
def compile(self, tn, scheme, **kwargs):
"""
:param tn: The :class:`TensorNetwork` the input contraction scheme is based on.
:type tn: :class:`TensorNetwork`
:param scheme: The contraction scheme from which the task is generated.
:type scheme: :class:`ContractionScheme`
:returns: :class:`ContractionTask` -- The compiled contraction task ready for execution.
"""
tn = tn._expand_and_delta()
res = self._generate_template(tn, scheme=scheme, **kwargs)
res.set_data({
node: tn.network.nodes[(0, node)]['tensor'].contract()
for node in tn.nodes_by_name
})
res.update_fix(tn)
return res
def _generate_template(self, tn, scheme, test=0, **kwargs):
inputs = {node[1]: [None] for node in tn.nodes}
data_ref = {(0, node): inputs[node] for node in inputs}
order, split = scheme.order, scheme.slices
split = [(1, i) for i in split]
tn_copy = tn.copy().expand(recursive=True)
tn_copy.open_edges += [i[1] for i in split]
tn_copy.fix()
for edge_name in list(tn_copy.edges_by_name):
if (len(tn_copy.network[(1, edge_name)])
== 0) and edge_name not in tn_copy.open_edges:
tn_copy.network.remove_node((1, edge_name))
init_order, final_order = self._order_patch(tn_copy.copy(), order, split,
**kwargs)
k = self._generate_template_fix(tn, [], data_ref)
if isinstance(k, ContractionTask):
return k
lst, fix_dict = k
for item in init_order:
if item[1] == '#':
break
stn_name, stn = tn_copy.encapsulate(item[0], stn_name=item[1])
lst += self._generate_template_basic(stn, data_ref, (0, stn_name))
cnt = len(lst)
fix_lst, fix_dict_new = self._generate_template_fix(
tn_copy, split, data_ref)
lst += fix_lst
fix_dict.update(fix_dict_new)
tn_copy.open_edges = [
i for i in tn_copy.open_edges if (1, i) not in split
]
for i in split:
tn_copy.fix_edge(i[1])
tn_copy.fix()
lst += self._generate_template_basic(tn_copy, data_ref, '#')
_, _, shapes = tn_copy.subscripts()
if len(final_order) > 0:
path = []
lst_nodes = list(n[1] for n in tn_copy.nodes)
for o in final_order:
path.append(
(lst_nodes.index(o[0][0]), lst_nodes.index(o[0][1])))
lst_nodes.remove(o[0][0])
lst_nodes.remove(o[0][1])
lst_nodes.append(o[1])
expr = opt_einsum.contract_expression(lst[-1][3]['subscripts'],
*shapes,
optimize=path)
lst[-1][3]['expr'] = expr
res = ContractionTask(commands=lst,
fix_dict=fix_dict,
inputs=inputs,
output=data_ref['#'],
shape=tn.shape,
open_edges=tn.open_edges,
sub_outputs=tn_copy.open_edges,
cnt=cnt)
return res
def _generate_template_fix(self, tn, split, data_ref):
lst = []
fix_dict = self._generate_template_edge_fix(tn, split)
if not isinstance(fix_dict, dict):
return fix_dict
for node in tn.nodes:
fix_idx = []
fix_to = []
for i, edge in enumerate(tn.network.nodes[node]['edges']):
if (1, edge) in fix_dict:
fix_idx.append(i)
fix_to.append(fix_dict[(1, edge)][1])
if len(fix_idx) > 0:
new_res = [None]
lst.append(('f', data_ref[node], new_res, {'fix_idx': fix_idx, 'fix_to': fix_to}))
data_ref[node] = new_res
res_dict = {(k, tuple(fix_dict[k][0])): fix_dict[k][1] for k in fix_dict}
return lst, res_dict
def _generate_template_edge_fix(self, tn, split):
fix_dict = {}
for edge in tn.edges:
if edge in split:
tn.network.nodes[edge]['fix_to'] = list(range(tn.network.nodes[edge]['dim']))
fix_dict[edge] = (list(range(tn.network.nodes[edge]['dim'])), [0])
elif 'fix_to' in tn.network.nodes[edge]:
ll = len(tn.network.nodes[edge]['fix_to'])
if ll == 0:
return ContractionTask(output=[(0,
|
np.zeros(tn.shape)
|
numpy.zeros
|
from numpy import cos, sin, array, eye
class FKImplementations:
# compare with analytic solution from the book
# "Robotics: modelling, planning and control"
@staticmethod
def fk_PlanarArm(q, links):
a1, a2, a3 = links[0].dh.a, links[1].dh.a, links[2].dh.a
c123 = cos(q[0] + q[1] + q[2])
s123 = sin(q[0] + q[1] + q[2])
T = eye(4)
T[0, 0] = c123
T[0, 1] = -s123
T[1, 0] = s123
T[1, 1] = c123
T[0, 3] = a1 * cos(q[0]) + a2 * cos(q[0] + q[1]) + a3 * c123
T[1, 3] = a1 * sin(q[0]) + a2 * sin(q[0] + q[1]) + a3 * s123
return T
@staticmethod
def fk_SphericalArm(q, links):
d2, d3 = links[1].dh.d, q[2]
c1 = cos(q[0])
s1 = sin(q[0])
c2 = cos(q[1])
s2 = sin(q[1])
T = eye(4)
T[0, 0:3] = array([c1 * c2, -s1, c1 * s2])
T[0, 3] = c1 * s2 * d3 - s1 * d2
T[1, 0:3] = array([s1 * c2, c1, s1 * s2])
T[1, 3] = s1 * s2 * d3 + c1 * d2
T[2, 0:3] = array([-s2, 0, c2])
T[2, 3] = c2 * d3
return T
@staticmethod
def fk_AnthropomorphicArm(q, links):
a2, a3 = links[1].dh.a, links[2].dh.a
c1 = cos(q[0])
s1 = sin(q[0])
c2 = cos(q[1])
s2 = sin(q[1])
c23 = cos(q[1] + q[2])
s23 = sin(q[1] + q[2])
T = eye(4)
T[0, 0:3] = array([c1 * c23, -c1 * s23, s1])
T[0, 3] = c1 * (a2 * c2 + a3 * c23)
T[1, 0:3] = array([s1 * c23, -s1 * s23, -c1])
T[1, 3] = s1 * (a2 * c2 + a3 * c23)
T[2, 0:3] = array([s23, c23, 0])
T[2, 3] = a2 * s2 + a3 * s23
return T
@staticmethod
def fk_Arm2(q, links):
a1, a2, a3 = links[0].dh.a, links[1].dh.a, links[2].dh.a
c1 = cos(q[0])
s1 = sin(q[0])
c2 = cos(q[1])
s2 = sin(q[1])
c23 = cos(q[1] + q[2])
s23 = sin(q[1] + q[2])
T = eye(4)
T[0, 0:3] = array([c1 * c23, s1, c1 * s23])
T[1, 0:3] = array([s1 * c23, -c1, s1 * s23])
T[2, 0:3] = array([s23, 0, -c23])
T[0, 3] = c1 * (a1 + a2 * c2 + a3 * c23)
T[1, 3] = s1 * (a1 + a2 * c2 + a3 * c23)
T[2, 3] = a2 * s2 + a3 * s23
return T
@staticmethod
def fk_kuka(q):
q1, q2, q3, q4, q5, q6 = q
a1 = 0.18
a2 = 0.6
d4 = 0.62
d6 = 0.115
T = array(
[
[
(
(sin(q1) * sin(q4) + cos(q1) * cos(q4) * cos(q2 + q3)) * cos(q5)
- sin(q5) * sin(q2 + q3) * cos(q1)
)
* cos(q6)
+ (sin(q1) * cos(q4) - sin(q4) * cos(q1) * cos(q2 + q3)) * sin(q6),
-(
(sin(q1) * sin(q4) + cos(q1) * cos(q4) * cos(q2 + q3)) * cos(q5)
- sin(q5) * sin(q2 + q3) * cos(q1)
)
* sin(q6)
+ (sin(q1) * cos(q4) - sin(q4) * cos(q1) * cos(q2 + q3)) * cos(q6),
(sin(q1) * sin(q4) + cos(q1) * cos(q4) * cos(q2 + q3)) * sin(q5)
+ sin(q2 + q3) * cos(q1) * cos(q5),
a1 * cos(q1)
+ a2 * cos(q1) * cos(q2)
+ d4 * sin(q2 + q3) * cos(q1)
+ d6
* (
(sin(q1) * sin(q4) + cos(q1) * cos(q4) * cos(q2 + q3)) * sin(q5)
+ sin(q2 + q3) * cos(q1) * cos(q5)
),
],
[
(
(sin(q1) * cos(q4) * cos(q2 + q3) - sin(q4) * cos(q1)) * cos(q5)
- sin(q1) * sin(q5) * sin(q2 + q3)
)
* cos(q6)
- (sin(q1) * sin(q4) * cos(q2 + q3) + cos(q1) * cos(q4)) * sin(q6),
-(
(sin(q1) * cos(q4) * cos(q2 + q3) - sin(q4) * cos(q1)) * cos(q5)
- sin(q1) * sin(q5) * sin(q2 + q3)
)
* sin(q6)
- (sin(q1) * sin(q4) * cos(q2 + q3) +
|
cos(q1)
|
numpy.cos
|
#!/usr/bin/env python
"""
Python function to estimate derivatives from noisy data based on
<NAME>'s Total Variation Regularized Numerical
Differentiation (TVDiff) algorithm.
Example:
>>> u = TVRegDiff(data, iter, alph, u0, scale, ep, dx,
... plotflag, diagflag)
<NAME> (<EMAIL>), Apr. 10, 2011
Please cite <NAME>, "Numerical differentiation of noisy,
nonsmooth data," ISRN Applied Mathematics, Vol. 2011, Article ID 164564,
2011.
Copyright notice:
Copyright 2010. Los Alamos National Security, LLC. This material
was produced under U.S. Government contract DE-AC52-06NA25396 for
Los Alamos National Laboratory, which is operated by Los Alamos
National Security, LLC, for the U.S. Department of Energy. The
Government is granted for, itself and others acting on its
behalf, a paid-up, nonexclusive, irrevocable worldwide license in
this material to reproduce, prepare derivative works, and perform
publicly and display publicly. Beginning five (5) years after
(March 31, 2011) permission to assert copyright was obtained,
subject to additional five-year worldwide renewals, the
Government is granted for itself and others acting on its behalf
a paid-up, nonexclusive, irrevocable worldwide license in this
material to reproduce, prepare derivative works, distribute
copies to the public, perform publicly and display publicly, and
to permit others to do so. NEITHER THE UNITED STATES NOR THE
UNITED STATES DEPARTMENT OF ENERGY, NOR LOS ALAMOS NATIONAL
SECURITY, LLC, NOR ANY OF THEIR EMPLOYEES, MAKES ANY WARRANTY,
EXPRESS OR IMPLIED, OR ASSUMES ANY LEGAL LIABILITY OR
RESPONSIBILITY FOR THE ACCURACY, COMPLETENESS, OR USEFULNESS OF
ANY INFORMATION, APPARATUS, PRODUCT, OR PROCESS DISCLOSED, OR
REPRESENTS THAT ITS USE WOULD NOT INFRINGE PRIVATELY OWNED
RIGHTS.
BSD License notice:
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
Redistributions of source code must retain the above
copyright notice, this list of conditions and the following
disclaimer.
Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials
provided with the distribution.
Neither the name of Los Alamos National Security nor the names of its
contributors may be used to endorse or promote products
derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
#########################################################
# #
# Python translation by <NAME> #
# Rutherford Appleton Laboratory, STFC, UK (2014) #
# <EMAIL> #
# #
#########################################################
"""
import sys
import logging
import numpy as np
import scipy as sp
from scipy import sparse
from scipy.sparse import linalg as splin
_has_matplotlib = True
try:
import matplotlib.pyplot as plt
except ImportError:
_has_matplotlib = False
logging.warning("Matplotlib is not installed - plotting "
"functionality disabled")
def log_iteration(ii, s0, u, g):
relative_change = np.linalg.norm(s0) / np.linalg.norm(u)
g_norm = np.linalg.norm(g)
logging.info('iteration {0:4d}: relative change = {1:.3e}, '
'gradient norm = {2:.3e}\n'.format(ii,
relative_change,
g_norm))
def TVRegDiff(data, itern, alph, u0=None, scale='small', ep=1e-6, dx=None,
plotflag=_has_matplotlib, diagflag=True, precondflag=True,
diffkernel='abs', cgtol=1e-4, cgmaxit=100):
"""
Estimate derivatives from noisy data based using the Total
Variation Regularized Numerical Differentiation (TVDiff)
algorithm.
Parameters
----------
data : ndarray
One-dimensional array containing series data to be
differentiated.
itern : int
Number of iterations to run the main loop. A stopping
condition based on the norm of the gradient vector g
below would be an easy modification. No default value.
alph : float
Regularization parameter. This is the main parameter
to fiddle with. Start by varying by orders of
magnitude until reasonable results are obtained. A
value to the nearest power of 10 is usally adequate.
No default value. Higher values increase
regularization strenght and improve conditioning.
u0 : ndarray, optional
Initialization of the iteration. Default value is the
naive derivative (without scaling), of appropriate
length (this being different for the two methods).
Although the solution is theoretically independent of
the initialization, a poor choice can exacerbate
conditioning issues when the linear system is solved.
scale : {large' or 'small' (case insensitive)}, str, optional
Default is 'small'. 'small' has somewhat better boundary
behavior, but becomes unwieldly for data larger than
1000 entries or so. 'large' has simpler numerics but
is more efficient for large-scale problems. 'large' is
more readily modified for higher-order derivatives,
since the implicit differentiation matrix is square.
ep : float, optional
Parameter for avoiding division by zero. Default value
is 1e-6. Results should not be very sensitive to the
value. Larger values improve conditioning and
therefore speed, while smaller values give more
accurate results with sharper jumps.
dx : float, optional
Grid spacing, used in the definition of the derivative
operators. Default is the reciprocal of the data size.
plotflag : bool, optional
Flag whether to display plot at each iteration.
Default is True. Useful, but adds significant
running time.
diagflag : bool, optional
Flag whether to display diagnostics at each
iteration. Default is True. Useful for diagnosing
preconditioning problems. When tolerance is not met,
an early iterate being best is more worrying than a
large relative residual.
precondflag: bool, optional
Flag whether to use a preconditioner for conjugate gradient solution.
Default is True. While in principle it should speed things up,
sometimes the preconditioner can cause convergence problems instead,
and should be turned off. Note that this mostly makes sense for 'small'
scale problems; for 'large' ones, the improved preconditioner is one
of the main features of the algorithms and turning it off defeats the
point.
diffkernel: str, optional
Kernel to use in the integral to smooth the derivative. By default it's
the absolute value, |u'| (value: "abs"). However, it can be changed to
being the square, (u')^2 (value: "sq"). The latter produces smoother
derivatives, whereas the absolute values tends to make them more blocky.
Default is abs.
cgtol: float, optional
Tolerance to use in conjugate gradient optimisation. Default is 1e-4.
cgmaxit: int, optional
Maximum number of iterations to use in conjugate gradient optimisation.
Default is 100
Returns
-------
u : ndarray
Estimate of the regularized derivative of data. Due to
different grid assumptions, length(u) = length(data) + 1
if scale = 'small', otherwise length(u) = length(data).
"""
# Make sure we have a column vector
data = np.array(data)
assert len(data.shape) == 1, "data is not one-dimensional"
# Get the data size.
n = len(data)
# Default checking. (u0 is done separately within each method.)
if dx is None:
dx = 1.0 / n
# Different methods for small- and large-scale problems.
if (scale.lower() == 'small'):
# Differentiation operator
d0 = -np.ones(n)/dx
du =
|
np.ones(n-1)
|
numpy.ones
|
from unittest import TestCase
from unittest.mock import Mock
import copulas
import numpy as np
import pytest
from rdt.transformers.numerical import GaussianCopulaTransformer, NumericalTransformer
class TestNumericalTransformer(TestCase):
def test___init__super_attrs(self):
"""super() arguments are properly passed and set as attributes."""
nt = NumericalTransformer(dtype='int', nan='mode', null_column=False)
assert nt.dtype == 'int'
assert nt.nan == 'mode'
assert nt.null_column is False
def test_fit(self):
"""Test fit nan mean with numpy.array"""
# Setup
data = np.array([1.5, None, 2.5])
# Run
transformer = NumericalTransformer(dtype=np.float, nan='nan')
transformer.fit(data)
# Asserts
expect_fill_value = 'nan'
expect_dtype = np.float
assert transformer.null_transformer.fill_value == expect_fill_value
assert transformer._dtype == expect_dtype
def test__learn_rounding_digits_more_than_15_decimals(data):
"""Test the _learn_rounding_digits method with more than 15 decimals.
If the data has more than 15 decimals, None should be returned.
Input:
- An array that contains floats with more than 15 decimals.
Output:
- None
"""
data = np.random.random(size=10).round(20)
output = NumericalTransformer._learn_rounding_digits(data)
assert output is None
def test__learn_rounding_digits_less_than_15_decimals(data):
"""Test the _learn_rounding_digits method with less than 15 decimals.
If the data has less than 15 decimals, the maximum number of decimals
should be returned.
Input:
- An array that contains floats with a maximum of 3 decimals and a NaN.
Output:
- 3
"""
data = np.array([10, 0., 0.1, 0.12, 0.123, np.nan])
output = NumericalTransformer._learn_rounding_digits(data)
assert output == 3
def test__learn_rounding_digits_negative_decimals_float(data):
"""Test the _learn_rounding_digits method with floats multiples of powers of 10.
If the data has all multiples of 10, 100, or any other higher power of 10,
the output is the negative number of decimals representing the corresponding
power of 10.
Input:
- An array that contains floats that are multiples of powers of 10, 100 and 1000
and a NaN.
Output:
- -1
"""
data = np.array([1230., 12300., 123000., np.nan])
output = NumericalTransformer._learn_rounding_digits(data)
assert output == -1
def test__learn_rounding_digits_negative_decimals_integer(data):
"""Test the _learn_rounding_digits method with integers multiples of powers of 10.
If the data has all multiples of 10, 100, or any other higher power of 10,
the output is the negative number of decimals representing the corresponding
power of 10.
Input:
- An array that contains integers that are multiples of powers of 10, 100 and 1000
and a NaN.
Output:
- -1
"""
data = np.array([1230, 12300, 123000, np.nan])
output = NumericalTransformer._learn_rounding_digits(data)
assert output == -1
def test_fit_rounding_none(self):
"""Test fit rounding parameter with ``None``
If the rounding parameter is set to ``None``, the ``fit`` method
should not set its ``rounding`` or ``_rounding_digits`` instance
variables.
Input:
- An array with floats rounded to one decimal and a None value
Side Effect:
- ``rounding`` and ``_rounding_digits`` continue to be ``None``
"""
# Setup
data = np.array([1.5, None, 2.5])
# Run
transformer = NumericalTransformer(dtype=np.float, nan='nan', rounding=None)
transformer.fit(data)
# Asserts
assert transformer.rounding is None
assert transformer._rounding_digits is None
def test_fit_rounding_int(self):
"""Test fit rounding parameter with int
If the rounding parameter is set to ``None``, the ``fit`` method
should not set its ``rounding`` or ``_rounding_digits`` instance
variables.
Input:
- An array with floats rounded to one decimal and a None value
Side Effect:
- ``rounding`` and ``_rounding_digits`` are the provided int
"""
# Setup
data = np.array([1.5, None, 2.5])
expected_digits = 3
# Run
transformer = NumericalTransformer(dtype=np.float, nan='nan', rounding=expected_digits)
transformer.fit(data)
# Asserts
assert transformer.rounding == expected_digits
assert transformer._rounding_digits == expected_digits
def test_fit_rounding_auto(self):
"""Test fit rounding parameter with ``'auto'``
If the ``rounding`` parameter is set to ``'auto'``,
``fit`` should learn the ``_rounding_digits`` to be the max
number of decimal places seen in the data.
Input:
- Array of floats with up to 4 decimals
Side Effect:
- ``_rounding_digits`` is set to 4
"""
# Setup
data = np.array([1, 2.1, 3.12, 4.123, 5.1234, 6.123, 7.12, 8.1, 9])
# Run
transformer = NumericalTransformer(dtype=np.float, nan='nan', rounding='auto')
transformer.fit(data)
# Asserts
assert transformer._rounding_digits == 4
def test_fit_rounding_auto_large_numbers(self):
"""Test fit rounding parameter with ``'auto'``
If the ``rounding`` parameter is set to ``'auto'``
and the data is very large, ``fit`` should learn
``_rounding_digits`` to be the biggest number of 0s
to round to that keeps the data the same.
Input:
- Array of data with numbers between 10^10 and 10^20
Side Effect:
- ``_rounding_digits`` is set to the minimum exponent seen in the data
"""
# Setup
exponents = [np.random.randint(10, 20) for i in range(10)]
big_numbers = [10**exponents[i] for i in range(10)]
data = np.array(big_numbers)
# Run
transformer = NumericalTransformer(dtype=np.float, nan='nan', rounding='auto')
transformer.fit(data)
# Asserts
assert transformer._rounding_digits == -min(exponents)
def test_fit_rounding_auto_max_decimals(self):
"""Test fit rounding parameter with ``'auto'``
If the ``rounding`` parameter is set to ``'auto'``,
``fit`` should learn the ``_rounding_digits`` to be the max
number of decimal places seen in the data. The max
amount of decimals that floats can be accurately compared
with is 15. If the input data has values with more than
14 decimals, we will not be able to accurately learn the
number of decimal places required, so we do not round.
Input:
- Array with a value that has 15 decimals
Side Effect:
- ``_rounding_digits`` is set to ``None``
"""
# Setup
data = np.array([0.000000000000001])
# Run
transformer = NumericalTransformer(dtype=np.float, nan='nan', rounding='auto')
transformer.fit(data)
# Asserts
assert transformer._rounding_digits is None
def test_fit_rounding_auto_max_inf(self):
"""Test fit rounding parameter with ``'auto'``
If the ``rounding`` parameter is set to ``'auto'``,
and the data contains infinite values, ``fit`` should
learn the ``_rounding_digits`` to be the min
number of decimal places seen in the data with
the infinite values filtered out.
Input:
- Array with ``np.inf`` as a value
Side Effect:
- ``_rounding_digits`` is set to max seen in rest of data
"""
# Setup
data = np.array([15000, 4000, 60000, np.inf])
# Run
transformer = NumericalTransformer(dtype=np.float, nan='nan', rounding='auto')
transformer.fit(data)
# Asserts
assert transformer._rounding_digits == -3
def test_fit_rounding_auto_max_zero(self):
"""Test fit rounding parameter with ``'auto'``
If the ``rounding`` parameter is set to ``'auto'``,
and the max in the data is 0, ``fit`` should
learn the ``_rounding_digits`` to be 0.
Input:
- Array with 0 as max value
Side Effect:
- ``_rounding_digits`` is set to 0
"""
# Setup
data = np.array([0, 0, 0])
# Run
transformer = NumericalTransformer(dtype=np.float, nan='nan', rounding='auto')
transformer.fit(data)
# Asserts
assert transformer._rounding_digits == 0
def test_fit_rounding_auto_max_negative(self):
"""Test fit rounding parameter with ``'auto'``
If the ``rounding`` parameter is set to ``'auto'``,
and the max in the data is negative, the ``fit`` method
should learn ``_rounding_digits`` to be the min number
of digits seen in those negative values.
Input:
- Array with negative max value
Side Effect:
- ``_rounding_digits`` is set to min number of digits in array
"""
# Setup
data = np.array([-500, -220, -10])
# Run
transformer = NumericalTransformer(dtype=np.float, nan='nan', rounding='auto')
transformer.fit(data)
# Asserts
assert transformer._rounding_digits == -1
def test_fit_min_max_none(self):
"""Test fit min and max parameters with ``None``
If the min and max parameters are set to ``None``,
the ``fit`` method should not set its ``min`` or ``max``
instance variables.
Input:
- Array of floats and null values
Side Effect:
- ``_min_value`` and ``_max_value`` stay ``None``
"""
# Setup
data = np.array([1.5, None, 2.5])
# Run
transformer = NumericalTransformer(dtype=np.float, nan='nan',
min_value=None, max_value=None)
transformer.fit(data)
# Asserts
assert transformer._min_value is None
assert transformer._max_value is None
def test_fit_min_max_int(self):
"""Test fit min and max parameters with int values
If the min and max parameters are set to an int,
the ``fit`` method should not change them.
Input:
- Array of floats and null values
Side Effect:
- ``_min_value`` and ``_max_value`` remain unchanged
"""
# Setup
data = np.array([1.5, None, 2.5])
# Run
transformer = NumericalTransformer(dtype=np.float, nan='nan',
min_value=1, max_value=10)
transformer.fit(data)
# Asserts
assert transformer._min_value == 1
assert transformer._max_value == 10
def test_fit_min_max_auto(self):
"""Test fit min and max parameters with ``'auto'``
If the min or max parameters are set to ``'auto'``
the ``fit`` method should learn them from the
fitted data.
Input:
- Array of floats and null values
Side Effect:
- ``_min_value`` and ``_max_value`` are learned
"""
# Setup
data = np.array([-100, -5000, 0, None, 100, 4000])
# Run
transformer = NumericalTransformer(dtype=np.float, nan='nan',
min_value='auto', max_value='auto')
transformer.fit(data)
# Asserts
assert transformer._min_value == -5000
assert transformer._max_value == 4000
def test_reverse_transform_rounding_none(self):
"""Test ``reverse_transform`` when ``rounding`` is ``None``
The data should not be rounded at all.
Input:
- Random array of floats between 0 and 1
Output:
- Input array
"""
# Setup
data = np.random.random(10)
# Run
transformer = NumericalTransformer(dtype=np.float, nan=None)
transformer._rounding_digits = None
result = transformer.reverse_transform(data)
# Assert
np.testing.assert_array_equal(result, data)
def test_reverse_transform_rounding_none_integer(self):
"""Test ``reverse_transform`` when ``rounding`` is ``None`` and the dtype is integer.
The data should be rounded to 0 decimals and returned as integer values.
Input:
- Array of multiple float values with decimals.
Output:
- Input array rounded an converted to integers.
"""
# Setup
data = np.array([0., 1.2, 3.45, 6.789])
# Run
transformer = NumericalTransformer(dtype=np.int64, nan=None)
transformer._rounding_digits = None
transformer._dtype = np.int64
result = transformer.reverse_transform(data)
# Assert
expected = np.array([0, 1, 3, 7])
np.testing.assert_array_equal(result, expected)
def test_reverse_transform_rounding_none_with_nulls(self):
"""Test ``reverse_transform`` when ``rounding`` is ``None`` and there are nulls.
The data should not be rounded at all.
Input:
- 2d Array of multiple float values with decimals and a column setting at least 1 null.
Output:
- First column of the input array as entered, replacing the indicated value with a Nan.
"""
# Setup
data = np.array([
[0., 0.],
[1.2, 0.],
[3.45, 1.],
[6.789, 0.],
])
# Run
transformer = NumericalTransformer()
null_transformer = Mock()
null_transformer.reverse_transform.return_value = np.array([0., 1.2, np.nan, 6.789])
transformer.null_transformer = null_transformer
transformer._rounding_digits = None
transformer._dtype = np.float
result = transformer.reverse_transform(data)
# Assert
expected = np.array([0., 1.2, np.nan, 6.789])
np.testing.assert_array_equal(result, expected)
def test_reverse_transform_rounding_none_with_nulls_dtype_int(self):
"""Test ``reverse_transform`` when rounding is None, dtype is int and there are nulls.
The data should be rounded to 0 decimals and returned as float values with
nulls in the right place.
Input:
- 2d Array of multiple float values with decimals and a column setting at least 1 null.
Output:
- First column of the input array rounded, replacing the indicated value with a Nan,
and kept as float values.
"""
# Setup
data = np.array([
[0., 0.],
[1.2, 0.],
[3.45, 1.],
[6.789, 0.],
])
# Run
transformer = NumericalTransformer()
null_transformer = Mock()
null_transformer.reverse_transform.return_value = np.array([0., 1.2, np.nan, 6.789])
transformer.null_transformer = null_transformer
transformer._rounding_digits = None
transformer._dtype = np.int
result = transformer.reverse_transform(data)
# Assert
expected = np.array([0., 1., np.nan, 7.])
np.testing.assert_array_equal(result, expected)
def test_reverse_transform_rounding_positive_rounding(self):
"""Test ``reverse_transform`` when ``rounding`` is a positive int
The data should round to the maximum number of decimal places
set in the ``_rounding_digits`` value.
Input:
- Array with decimals
Output:
- Same array rounded to the provided number of decimal places
"""
# Setup
data = np.array([1.1111, 2.2222, 3.3333, 4.44444, 5.555555])
# Run
transformer = NumericalTransformer(dtype=np.float, nan=None)
transformer._rounding_digits = 2
result = transformer.reverse_transform(data)
# Assert
expected_data = np.array([1.11, 2.22, 3.33, 4.44, 5.56])
np.testing.assert_array_equal(result, expected_data)
def test_reverse_transform_rounding_negative_rounding_int(self):
"""Test ``reverse_transform`` when ``rounding`` is a negative int
The data should round to the number set in the ``_rounding_digits``
attribute and remain ints.
Input:
- Array with with floats above 100
Output:
- Same array rounded to the provided number of 0s
- Array should be of type int
"""
# Setup
data = np.array([2000.0, 120.0, 3100.0, 40100.0])
# Run
transformer = NumericalTransformer(dtype=np.int, nan=None)
transformer._dtype = np.int
transformer._rounding_digits = -3
result = transformer.reverse_transform(data)
# Assert
expected_data =
|
np.array([2000, 0, 3000, 40000])
|
numpy.array
|
from .fdc import FDC
from .classify import CLF
import numpy as np
import pickle
from scipy.cluster.hierarchy import dendrogram as scipydendroed
from scipy.cluster.hierarchy import to_tree
from .hierarchy import compute_linkage_matrix
import copy
from collections import OrderedDict as OD
class TREENODE:
def __init__(self, id_ = -1, parent = None, child = None, scale = -1):
if child is None:
self.child = [] # has to be list of TreeNode
else:
self.child = child
self.scale = scale
self.parent = parent
self.id_ = id_
def __repr__(self):
return ("Node: [%s] @ s = %.3f" % (self.id_,self.scale))
def is_leaf(self):
return len(self.child) == 0
def get_child(self, id_ = None):
if id_ is None:
return self.child
else:
for c in self.child:
if c.get_id() == id_:
return c
def get_scale(self):
return self.scale
def get_id(self):
return self.id_
def add_child(self, treenode):
self.child.append(treenode)
def get_rev_child(self):
child = self.child[:]
child.reverse()
return child
class TREE:
""" Contains all the hierachy and information concerning the clustering
"""
def __init__(self, root = None, shallow_copy = None, ignore_root = True):
self.root = root
self.node_dict = None
self.mergers = None
self.robust_node = None
self.new_cluster_label = None
self.robust_terminal_node = None #list of the terminal robust nodes
self.robust_clf_node = None # full information about classification is recorded here, keys of dict are the classifying nodes
self.all_clf_node = None # calculated when checking all nodes !
self.all_robust_node = None # list of all nodes in the robust tree (classifying nodes and leaf nodes)
self.cluster_to_node_id = None # dictionary mapping cluster labels (displayed on plot) with node id
self.new_idx_centers = None
self.tree_constructed = False
self.ignore_root = ignore_root
def build_tree(self, model):
"""Given hierachy, builds a tree of the clusterings. The nodes are class objects define in the class TreeNode
Parameters
---------
model : object from the FDC class
contains the fitted hierarchy produced via the coarse_graining() method
Returns
---------
tuple = (root, node_dict, mergers)
root : TreeNode class object
root of the tree
node_dict : dictionary of TreeNode objects.
Objects are stored by their merger ID
mergers :
list of nodes being merged with the corresponding scale of merging
"""
if self.tree_constructed is True:
return
mergers = find_mergers(model.hierarchy , model.noise_range) # OK this might be a problem ... need to check this.
mergers.reverse()
m = mergers[0]
self.node_dict = OD()
self.root = TREENODE(id_ = m[1], scale = m[2])
self.node_dict[self.root.get_id()] = self.root
for m in mergers:
for mc in m[0]:
c_node = TREENODE(id_ = mc, parent = self.node_dict[m[1]], child = [], scale = -1)
self.node_dict[m[1]].add_child(c_node)
self.node_dict[c_node.get_id()] = c_node
self.node_dict[m[1]].scale = m[2]
self.mergers = mergers
self.tree_constructed = True
def node_items(self): # breath-first ordering
""" Returns a list of the nodes using a breath-first search
"""
stack = [self.root]
list_nodes = []
while stack:
current_node = stack[0]
list_nodes.append(current_node)
for c in current_node.child:
stack.append(c)
stack = stack[1:]
return list_nodes
def identify_robust_merge(self, model, X, n_average = 10, score_threshold = 0.5):
"""Starting from the root, goes down the tree and evaluates which clustering node are robust
It returns a list of the nodes for which their corresponding clusters are well defined according to
a logistic regression and a score_threshold given by the user
Will write information in the following objects :
self.robust_terminal_node (list) #
self.robust_clf_node (dict) # full info
"""
self.build_tree(model) # Extracts all the information from model and outputs a tree
root, node_dict, mergers = self.root, self.node_dict, self.mergers
print("[tree.py] : Printing two top-most layers")
print("[tree.py] : Root :", root)
print("[tree.py] : Root's childs :", root.get_child())
if self.all_clf_node is not None: # meaning, the nodes have already been checked for classification
print("Need to update this part, ")
assert False
else: # focus on this loop .........
self.compute_robust_node(model, X, n_average, score_threshold)
# Listing all nodes in the robust tree ...==
all_robust_node = set([])
for k, _ in self.robust_clf_node.items():
all_robust_node.add(k)
current_node = node_dict[k]
for c in current_node.child:
all_robust_node.add(c.get_id())
self.all_robust_node = list(all_robust_node)
def compute_propagated_error(self, node_id):
path = []
p = self.node_dict[node_id]
while p.get_id() != self.root.get_id():
tmp = p
p = p.parent
path.append((p.get_id(),tmp.get_id()))
full_path_prob = [self.probability_tree[p] for p in path]
return compute_prob(full_path_prob)
#print(path)
#full_path = [self.probability_tree[p] for p in path]
#print(full_path)
#print('total prob:',compute_prob(full_path))
def compute_propagated_robust_node(self, score_threshold):
""" Based on the classifying information obtained from compute_robust_node,
finds subset of the tree that has a -> total error <- better than the score_threshold !
"""
self.compute_probability_tree() # builds a dictionary of the classification scores on every branches.
self.robust_clf_propag_error = OD()
self.robust_clf_propag_node = OD()
terminal_node = []
for node_id in self.robust_clf_node.keys():
#print("checking node ", node_id,'\t',self.node_dict[node_id])
p_error = self.compute_propagated_error(node_id)
self.robust_clf_propag_error[node_id] = p_error
if p_error+1e-6 > score_threshold:
self.robust_clf_propag_node[node_id] = self.robust_clf_node[node_id]
for n in self.robust_clf_propag_node.keys():
for n_c in self.node_dict[n].child:
if n_c.get_id() not in self.robust_clf_propag_node.keys():
terminal_node.append(n_c.get_id())
self.robust_terminal_propag_node = terminal_node
def compute_robust_node(self, model, X, n_average, score_threshold):
""" Start from the root, computes the classification score at every branch in the tree
and stops if classication score is below a certain threshold.
Results are stored in:
self.robust_clf_node : dictionary of node id to classification information (weights, biases, scores, etc.)
self.robust_terminal_node : list of terminal nodes id, whose parents are robust classifiers.
"""
self.robust_terminal_node = [] #list of the terminal robust nodes
self.robust_clf_node = OD() # dictionary of the nodes where a partition is made (non-leaf nodes)
# add root first
clf = classify_root(self.root, model, X, n_average = n_average)
score = clf.cv_score
if self.ignore_root is True:
print("[tree.py] : root is ignored, # %i \t score = %.4f"%(self.root.get_id(),score))
self.robust_clf_node[self.root.get_id()] = clf
else:
if score+1e-6 > score_threshold: # --- search stops if the node is not statistically signicant (threshold)
print("[tree.py] : {0:<15s}{1:<10d}{2:<10s}{3:<.4f}".format("root is node #",self.root.get_id(),"score =",score))
self.robust_clf_node[self.root.get_id()] = clf
else:
print("[tree.py] : root is not robust # %i \t score = %.4f"%(self.root.get_id(),score))
for current_node in self.node_items()[1:]:
if current_node.parent.get_id() in self.robust_clf_node.keys():
if not current_node.is_leaf():
clf = classify_root(current_node, model, X, n_average = n_average)
score = clf.cv_score
if score+1e-6 > score_threshold: # --- search stops if the node is not statistically signicant (threshold)
print("[tree.py] : {0:<15s}{1:<10d}{2:<10s}{3:<.4f}".format("robust node #",current_node.get_id(),"score =",score))
self.robust_clf_node[current_node.get_id()] = clf
else:
print("[tree.py] : {0:<15s}{1:<10d}{2:<10s}{3:<.4f}".format("reject node #",current_node.get_id(),"score =",score))
self.robust_terminal_node.append(current_node.get_id())
else: # implies it's parent was robust, and is a leaf node
self.robust_terminal_node.append(current_node.get_id())
def compute_probability_tree(self):
""" Compute the probability of correct classification for every branch of the tree.
The info is stored in a dictionary self.probability_tree which map tuples to probabilities
Tuples are the parent node id and the child node id.
"""
self.probability_tree = OD()
for node_id, clf in self.robust_clf_node.items():
current_node = self.node_dict[node_id]
for i, c in enumerate(current_node.child):
self.probability_tree[(node_id, c.get_id())] = clf.cv_score # could also give score per class ........
#classify_results['mean_score_cluster'][i]
def find_robust_labelling(self, model, X, n_average = 10, score_threshold = 0.5):
""" Finds the merges that are statistically significant (i.e. greater than the score_threshold)
and relabels the data accordingly
Trick here: first use a low threshold (will compute the tree down to it's lowest components)
Then one can just iterate quickly over score threshold ...
Parameters
------
model : fdc object
Contains the coarse graining information
X : array, shape = (n_sample, n_marker)
Contains the data in the original space
n_average : int
Number of folds in the cross validation
score_threshold : float
Classification score threshold
Returns
---------
self : TREE() object
"""
if score_threshold > 1.0 or score_threshold < 0.0:
assert False, "Can't choose a threshold above 1.0 or below 0.0 !"
if self.robust_terminal_node is None:
self.identify_robust_merge(model, X, n_average = n_average, score_threshold = score_threshold)
root = self.root
node_dict = self.node_dict
mergers = self.mergers
robust_terminal_node = self.robust_terminal_node # this is a list
self.compute_propagated_robust_node(score_threshold)
robust_terminal_node = self.robust_terminal_propag_node
#print('quite: ',len(robust_terminal_node))
#print('not quite: ',len(self.robust_terminal_node))
#exit()
###################
###################
# RELABELLING DATA !
###################
###################
cluster_n = len(robust_terminal_node)
n_sample = len(model.X)
y_robust = -1*np.ones(n_sample,dtype=np.int)
y_original = model.hierarchy[0]['cluster_labels']
cluster_to_node_id = OD()
# here all terminal nodes are given a label, in the same order they are stored.
y_node = classification_labels([node_dict[i] for i in robust_terminal_node], model)
assert np.count_nonzero(y_node == -1) == 0, "Wrong labelling or ROOT is not robust ... !"
for i, node_id in enumerate(robust_terminal_node):
pos = (y_node == i)
y_robust[pos] = i
cluster_to_node_id[i] = node_id
if len(robust_terminal_node) == 0:
y_robust *= 0 # only one coloring
new_idx_centers = []
all_idx = np.arange(0, model.X.shape[0], dtype=int)
for i in range(cluster_n):
pos_i = (y_robust == i)
max_rho = np.argmax(model.rho[y_robust == i])
idx_i = all_idx[pos_i][max_rho]
new_idx_centers.append(idx_i)
self.new_cluster_label = y_robust
self.new_idx_centers = np.array(new_idx_centers,dtype=int)
self.cluster_to_node_id = cluster_to_node_id
self.node_to_cluster_id = OD({v: k for k, v in self.cluster_to_node_id.items()})
print("\n\n\n")
print("[tree.py] : -----------> VALIDATION SCORING INFORMATION < -----------------")
print("[tree.py] : ", "{0:<15s}{1:<15s}{2:<15s}{3:<15s}".format("Terminal node","Parent node", "Displayed node","Progated probability"))
for n in robust_terminal_node:
p_id = self.node_dict[n].parent.get_id()
print("[tree.py] : ", "{0:<15d}{1:<15d}{2:<15d}{3:<15.4f}".format(n,p_id,self.node_to_cluster_id[n],self.robust_clf_propag_error[p_id]))
return self
def check_all_merge(self, model, X, n_average = 10):
""" Goes over all classification nodes and evaluates classification scores """
self.build_tree(model)
self.all_clf_node = OD()
for merger in self.mergers : # don't need to go through the whole hierarchy, since we're checking everything
node_id = merger[1]
clf = classify_root(self.node_dict[node_id], model, X, n_average = n_average)
print("[tree.py] : ", node_id, "accuracy : %.3f"%clf.cv_score, "sample_size : %i"%clf._n_sample, sep='\t')
self.all_clf_node[node_id] = clf
return self
def predict(self, X):
""" Uses the root classifiers to perform a hierarchical classification of the nodes !
need to do recursive classification ...
"""
#uprint(self.robust_clf_node)
terminal_nodes = set(self.robust_terminal_propag_node)
node_to_cluster = self.node_to_cluster_id
for i, x in enumerate(X):
current_clf_node = self.root # recursively go down the tree, starting from root
current_id = current_clf_node.get_id()
while True:
if current_clf_node.get_id() in terminal_nodes:
y_pred[i] = node_to_cluster[current_id] # reached the leaf node
break
else:
y_branch = self.robust_clf_node[current_id].predict([x])[0]
child_list = current_clf_node.child
current_clf_node = child_list[y_branch] # go down one layer
return y_pred
''' def classify_point(self, x, W, b):
""" Given weight matrix and intercept (bias), classifies point x
w.r.t to a linear classifier """
n_class = len(b)
if n_class == 1: # binary classification
f = (np.dot(x,W[0]) + b)[0]
if f > 0.:
return 1
else:
return 0
else:
score_per_class = []
for i, w in enumerate(W):
score_per_class.append(np.dot(x,w)+b[i])
#print(score_per_class)
return np.argmax(score_per_class) '''
def save(self, name=None):
""" Saves current model to specified path 'name' """
if name is None:
name = self.make_file_name()
fopen = open(name,'wb')
pickle.dump(self,fopen)
fopen.close()
def load(self, name=None):
if name is None:
name = self.make_file_name()
self.__dict__.update(pickle.load(open(name,'rb')).__dict__)
return self
def make_file_name(self):
t_name = "clf_tree.pkl"
return t_name
def write_result_mathematica(self, model, marker) : # graph should be a dict of list
"""
-> Saves results in .txt files, which are easily read with a Mathematica
script for ez plotting ...
"""
if self.robust_clf_node is None :
assert False, "Model not yet fitted !"
self.gate_dict = self.find_full_gate(model)
my_graph = OD()
my_graph_score = OD()
for e,v in self.robust_clf_node.items():
my_graph[e] = []
my_graph_score[e] = v['mean_score']
for c in self.node_dict[e].child:
my_graph[e].append(c.get_id())
self.graph = my_graph
self.graph_score = my_graph_score
self.write_graph_mathematica()
self.write_graph_score_mathematica()
self.write_gate_mathematica(self.gate_dict, marker)
self.write_cluster_label_mathematica()
def write_graph_mathematica(self, out_file = "graph.txt"):
""" Writes graph in mathematica readable format """
f = open(out_file,'w')
my_string_list = []
for node_id, node_childs in self.graph.items(): # v is a list
for child in node_childs :
my_string_list.append("%i -> %i"%(node_id, child))
f.write(",".join(my_string_list))
f.close()
def write_graph_score_mathematica(self, out_file = "graph_score.txt"):
""" Writes scores of classification for every division node """
f = open(out_file, 'w')
string_list = []
for k, v in self.graph_score.items():
string_list.append('%i -> % .5f'%(k,v))
f.write(','.join(string_list))
f.close()
def write_gate_mathematica(self, gate_dict, marker, out_file = "gate.txt"):
""" Writes most important gates for discriminating data in a classification """
f = open(out_file, 'w')
string_list = []
for k, g in gate_dict.items():
string_list.append("{%i -> %i, \"%s\"}"%(k[0],k[1],str_gate(marker[g[0][0]],g[1][0])))
f.write("{")
f.write(','.join(string_list))
f.write("}")
f.close()
def write_cluster_label_mathematica(self, out_file = "n_to_c.txt"): # cton is a dictionary of clusters to node id
""" Node id to cluster labels """
f = open(out_file, 'w')
string_list = []
for k, v in self.cluster_to_node_id.items():
string_list.append("{%i -> %i}"%(v,k))
f.write("<|")
f.write(','.join(string_list))
f.write("|>")
f.close()
def print_mapping(self):
print("Mapping of terminal nodes to plotted labels:")
[print(k, " -> ", v) for k,v in OD(self.node_to_cluster_id).items()]
def find_gate(self, node_id):
""" Return the most important gates, sorted by amplitude. One set of gate per category (class)
"""
import copy
clf_info = self.robust_clf_node[node_id]
n_class = len(self.node_dict[node_id].child)
weights = clf_info['coeff'] # weights should be sorted by amplitude for now, those are the most important for the scoring function
gate_array = []
gate_weights = []
for i, w in enumerate(weights):
argsort_w = np.argsort(np.abs(w))[::-1] # ordering (largest to smallest) -> need also to get the signs
sign = np.sign(w[argsort_w])
gate_array.append([argsort_w, sign])
gate_weights.append(w)
if n_class == 2: # for binary classfication the first class (0) has a negative score ... for all other cases the classes have positive scores
gate_array.append(copy.deepcopy(gate_array[-1]))
gate_array[0][1] *= -1
gate_weights.append(copy.deepcopy(w))
gate_weights[0] *= -1
gate_weights = np.array(gate_weights)
return gate_array, gate_weights
def print_clf_weight(self, markers=None, file='weight.txt'):
weight_summary = OD()
for node_id, info in self.robust_clf_node.items():
node = self.node_dict[node_id]
_, gate_weights = self.find_gate(node_id)
for i,c in enumerate(node.child):
weight_summary[(node_id, c.get_id())] = gate_weights[i]
fout = open(file, 'w')
fout.write('n1\tn2\t')
n_feature = len(gate_weights[0])
if markers is not None:
for m in markers:
fout.write(m+'\t')
else:
for i in range(n_feature):
fout.write(str(i)+'\t')
fout.write('\n')
for k, v in weight_summary.items():
k0 = k[0]
k1 = k[1]
if k[0] in self.node_to_cluster_id.keys():
k0 = self.node_to_cluster_id[k[0]]
if k[1] in self.node_to_cluster_id.keys():
k1 = self.node_to_cluster_id[k[1]]
fout.write('%i\t%i\t'%(k0,k1))
for w in v:
fout.write('%.3f\t'%w)
fout.write('\n')
def find_full_gate(self, model):
""" Determines the most relevant gates which specify each partition
"""
gate_dict = OD() # (tuple to gate ... (clf_node, child_node) -> gate (ordered in magnitude))
for node_id, info in self.robust_clf_node.items():
childs = self.node_dict[node_id].child
gates, _ = self.find_gate(node_id)
for c, g in zip(childs, gates):
gate_dict[(node_id, c.get_id())] = g # storing gate info
return gate_dict
def describe_clusters(self, X_standard, cluster_label = None, marker = None, perc = 0.05):
""" Checks the composition of each clusters in terms of outliers (define by top and bottom perc)
Parameters
--------------
X_standard : array, shape = (n_sample, n_marker)
Data array with raw marker expression
cluster_label : optional, array, shape = n_sample
Cluster labels for each data point. If none, just uses the labels infered by the Tree
marker : optional, list of str, len(list) = n_marker
Marker labels. If not specified will use marker_0, marker_1, etc.
perc : optional, float
The percentage of most and least expressed data points for a marker that you consider outliers
Return
-------------
df_pos, df_neg : tuple of pandas.DataFrame
dataframes with row index as markers and columns as cluster labels. An additional row also
indicates the size of each cluster as a fraction of the total sample.
"""
if cluster_label is None:
cluster_label = self.new_cluster_label
label_to_idx = OD() # cluster label to data index
unique_label = np.unique(cluster_label)
n_sample, n_marker = X_standard.shape
if marker is None:
marker = ['marker_%i'%i for i in range(n_marker)]
assert n_sample == len(X_standard)
n_perc = int(round(0.05*n_sample))
for ul in unique_label:
label_to_idx[ul] = np.where(cluster_label == ul)[0]
idx_top = []
idx_bot = []
for m in range(n_marker):
asort = np.argsort(X_standard[:,m])
idx_bot.append(asort[:n_perc]) # botoom most expressed markers
idx_top.append(asort[-n_perc:]) # top most expressed markers
cluster_positive_composition = OD()
cluster_negative_composition = OD()
for label, idx in label_to_idx.items():
# count percentage of saturated markers in a given cluster ...
# compare that to randomly distributed (size_of_cluster/n_sample)*n_perc
cluster_positive_composition[label] = []
cluster_negative_composition[label] = []
for m in range(n_marker):
ratio_pos = len(set(idx_top[m]).intersection(set(idx)))/len(idx_top[m])
ratio_neg = len(set(idx_bot[m]).intersection(set(idx)))/len(idx_bot[m])
cluster_positive_composition[label].append(ratio_pos)
cluster_negative_composition[label].append(ratio_neg)
df_pos = pd.DataFrame(cluster_positive_composition, index = marker)
df_neg = pd.DataFrame(cluster_negative_composition, index = marker)
cluster_ratio_size = np.array([len(label_to_idx[ul])/n_sample for ul in unique_label])
df_cluster_ratio_size = pd.DataFrame(cluster_ratio_size.reshape(1,-1), index = ['Cluster_ratio'], columns = label_to_idx.keys())
# data frame, shape = (n_marker + 1, n_cluster) with index labels [cluster_ratio, marker_1, marker_2 ...]
df_pos_new = df_cluster_ratio_size.append(df_pos)
df_neg_new = df_cluster_ratio_size.append(df_neg)
return df_pos_new, df_neg_new
##############################################
###############################################
def classify_root(root, model, X, n_average = 10, C=1.0):
""" Trains a classifier on the childs of "root" and returns a classifier for these types.
Important attributes are:
self.scaler_list -> [mu, std]
self.cv_score -> mean cv score
self.mean_train_score -> mean train score
self.clf_list -> list of sklearn classifiers (for taking majority vote)
Returns
---------
CLF object (from classify.py). Object has similar syntax to sklearn's classifier syntax
"""
y = classification_labels(root.get_child(), model)
if len(
|
np.unique(y)
|
numpy.unique
|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
import matplotlib.gridspec as gridspec
import seaborn
import datetime
flno = [2,3,4,6,7,8]
#colors = ['black','red','orange','lime','black','green','blue','purple']
colors = ["k","#045275","#0C7BDC","#7CCBA2","k","#FED976","#F0746E","#7C1D6F"]
maxlag = [0,0,5,10,10,20]
#c = plt.rcParams['axes.prop_cycle'].by_key()['color']
#c = ["#FF1F58","#009ADE","#FFC61E","blue","green"]
c = ["#009ADE","#FF1F58","k","green","orange"]
def make_means(dat,pt):
chiA, chiB = np.zeros((len(pt),3)), np.zeros((len(pt),3))
flaA, flaB = np.zeros((len(pt),3)), np.zeros((len(pt),3))
fishA, fishB = np.zeros((len(pt),3)), np.zeros((len(pt),3))
chiA[:,0], flaA[:,0], fishA[:,0] = pt, pt, pt
chiB[:,0], flaB[:,0], fishB[:,0] = pt, pt, pt
# add cloudy flag
dat['CLOUDY'] = ((dat['NICE'] > 0) | (dat['MASBR'] >= 1.2)).astype(int)
for i,f in enumerate(flno):
for lag in np.arange(1,maxlag[i]):
dat.loc[(dat['FLIGHT'] == f),'CLOUDY'] = np.maximum(dat.loc[(dat['FLIGHT'] == f),'CLOUDY'],
dat[(dat['FLIGHT'] == f)].shift(periods=lag, fill_value=0.0)['CLOUDY'])
# add ascent/descent flag
dz = (dat['ALT'] - dat.shift(periods=1)['ALT'])*1e3
dt = dat['TIME'] - dat.shift(periods=1)['TIME']
vert = np.abs(dz / dt)
vert_avg = vert.rolling(window=20).mean()
dat['ASCENT_FLAG'] = ((vert_avg > 10) | (dat['ALT'] < 12)).astype(int)
# add chiwis flag
dat['CELL_FLAG'] = ((dat['PRES_CELL'] < 30.0) | (dat['PRES_CELL'] > 45.0) | (dat['FLAG'] == 1)).astype(int)
#dat['CELL_FLAG'] = ((dat['PRES_CELL'] < 20.0) | (dat['PRES_CELL'] > 45.0) | (dat['FLAG'] == 1)).astype(int)
#dat['OOR'] = ((dat['PRES_CELL'] < 20.0) | (dat['PRES_CELL'] > 30.0) | (dat['FLAG'] == 1)).astype(int)
# FL7 dive flag
dat['F7_DIVE'] = ((dat['FLIGHT'] == 7) & (dat['TIME'] > 19.9e3) & (dat['TIME'] < 20.2e3)).astype('int')
for i,pti in enumerate(pt):
datA = dat[(dat['ASCENT_FLAG'] == 0) & (dat['PT'] >= pti-2.0) & (dat['PT'] < pti+2.0) & (dat['FLIGHT'] < 5)]
dat_chiwisA = datA[(datA['CELL_FLAG'] == 0)]
#dat_chiwisA_oor = datA[(datA['OOR'] == 0)]
dat_flashA = datA[(datA['F7_DIVE'] == 0)]
dat_clr_fishA = datA[(datA['CLOUDY'] == 0)]
chiA[i,1], chiA[i,2] = np.mean(dat_chiwisA['H2O']), np.std(dat_chiwisA['H2O'])
#chiA_oor[i,1], chiA_oor[i,2] = np.mean(dat_chiwisA_oor['H2O']), np.std(dat_chiwisA_oor['H2O'])
flaA[i,1], flaA[i,2] = np.mean(dat_flashA['FLH2O']), np.std(dat_flashA['FLH2O'])
fishA[i,1], fishA[i,2] = np.mean(dat_clr_fishA['FIH2O']), np.std(dat_clr_fishA['FIH2O'])
datB = dat[(dat['ASCENT_FLAG'] == 0) & (dat['PT'] >= pti-2.0) & (dat['PT'] < pti+2.0) & (dat['FLIGHT'] > 5)]
dat_chiwisB = datB[(datB['CELL_FLAG'] == 0)]
#dat_chiwisB_oor = datB[(datB['OOR'] == 0)]
dat_flashB = datB[(datB['F7_DIVE'] == 0)]
dat_clr_fishB = datB[(datB['CLOUDY'] == 0)]
chiB[i,1], chiB[i,2] = np.mean(dat_chiwisB['H2O']), np.std(dat_chiwisB['H2O'])
flaB[i,1], flaB[i,2] = np.mean(dat_flashB['FLH2O']), np.std(dat_flashB['FLH2O'])
fishB[i,1], fishB[i,2] = np.mean(dat_clr_fishB['FIH2O']), np.std(dat_clr_fishB['FIH2O'])
return chiA, chiB, flaA, flaB, fishA, fishB
def plot_profs(ax, chi, fla, fish, mls, byesno=False, b=False):
ax.plot(chi[:,1], chi[:,0], color=c[2], label="ChiWIS")
ax.fill_betweenx(chi[:,0], chi[:,1]-chi[:,2], chi[:,1]+chi[:,2], color=c[2], alpha=0.2)
ax.plot(fla[:,1], fla[:,0], color=c[0], label="FLASH")
ax.fill_betweenx(fla[:,0], fla[:,1]-fla[:,2], fla[:,1]+fla[:,2], color=c[0], alpha=0.2)
if byesno != False:
ax.plot(b[:,1], b[:,0], color=c[3], label="balloon CFH")
ax.fill_betweenx(b[:,0], b[:,1]-b[:,2], b[:,1]+b[:,2], color=c[3], alpha=0.2)
ax.plot(mls[:,1], mls[:,0], color=c[4], label="MLS satellite")
ax.fill_betweenx(mls[:,0], mls[:,1]-mls[:,2], mls[:,1]+mls[:,2], color=c[4], alpha=0.2)
ax.plot([1,100],[382,382],"k--")
ax.plot([1,100],[405,405],"k:")
ax.set_ylim([370,480])
ax.set_xlim([2.5,14])
axin = ax.inset_axes([8,420,6,60], transform=ax.transData)
axin.set_xscale("log", nonposx='clip')
axin.plot(chi[:,1], chi[:,0], color=c[2])
axin.plot(fla[:,1], fla[:,0], color=c[0])
if byesno!= False:
axin.plot(b[:,1], b[:,0], color=c[3])
axin.plot(mls[:,1], mls[:,0], color=c[4])
axin.plot([1,300],[382,382],"k--")
axin.plot([1,300],[405,405],"k:")
axin.set_ylim([360,480])
ytk = [360,380,400,420,440,460]
axin.set_yticklabels(list(map(str,ytk)))
axin.set_xlim([2.5,100])
xtk = [4,6,10,20,50]
axin.set_xticks(xtk)
axin.set_xticklabels(list(map(str,xtk)))
axin.grid(linestyle=':')
return ax
def mean_profile_compare(dat):
pt = np.arange(362,502,4)
chiA, chiB, flaA, flaB, fishA, fishB = make_means(dat,pt)
balloon = np.loadtxt("Data/balloon_avg_prof.csv",delimiter=',',skiprows=1)
mlsA = np.loadtxt("Data/mls_perA_prof.csv",delimiter=',',skiprows=1)
mlsB =
|
np.loadtxt("Data/mls_perB_prof.csv",delimiter=',',skiprows=1)
|
numpy.loadtxt
|
import logging as lg
import os
import numpy as np
import sklearn.model_selection as skms
import torch
from PIL import ImageFile, Image
from torch.utils.data import Dataset, Sampler
from torchvision import transforms
from utils.multiset import MultiSet, Multi_Set_Binned
ImageFile.LOAD_TRUNCATED_IMAGES = True
# dataset for preembedding images
class ImageDataSet(Dataset):
def __init__(self, root='train', transform=transforms.ToTensor()):
self.root = root
self.transform = transform
self.paths = [f.path for f in os.scandir(root) if f.is_file()]
names = [f.name for f in os.scandir(root) if f.is_file()]
self.ids = [f.split('.')[0] for f in names]
self.ids.sort()
lg.debug("dataset input_side initialized")
def __len__(self):
# Here, we need to return the number of samples in this dataset.
return len(self.paths)
def __getitem__(self, index):
img_name = os.path.join(self.root, index + '.jpg')
image = Image.open(img_name)
image = image.resize((224, 224))
if self.transform is not None:
image = self.transform(image)
return image, index
# Sampler used in preembedding
class IdSampler(Sampler):
def __init__(self, data_source):
super().__init__(data_source)
self.data_source = data_source
def __iter__(self):
return iter(self.data_source.ids)
def __len__(self):
return len(self.data_source)
def save_partition(n_partition, accstr, accnum, output):
np.save(output + 'array_' + str(n_partition) + '_ids.npy', accstr)
np.save(output + 'array_' + str(n_partition) + '_vals.npy', accnum)
lg.debug('saved: ' + output + 'array_' + str(n_partition) + '.npy')
# Sampler for multiset
class MultiSampler(torch.utils.data.Sampler):
data_source: MultiSet
def __init__(self, data_source: MultiSet):
super().__init__(data_source)
self.data_source = data_source
self.inds = data_source.resp_inds
self.l = len(self.inds)
lg.info("sampler inititiated with %s samples", len(self))
def __iter__(self):
return iter(np.random.permutation(self.inds))
def __len__(self):
return self.l
# sampler for multiset that supports a test/val split
class MultiSplitSampler(MultiSampler):
def __init__(self, data_source, train=True):
super().__init__(data_source)
inds = self.data_source.training_idx if train else self.data_source.test_idx
self.inds =
|
np.intersect1d(self.inds, inds)
|
numpy.intersect1d
|
import numpy as np
def viterbi(pi, a, b, obs):
'''
pi = (1 x nStates)
a = (nStates x nStates)
b = (nStates x m)
obs = (1 x k)
path = (1 x k)
delta = (nStates x T)
phi = (nStates x T)
'''
nStates =
|
np.shape(b)
|
numpy.shape
|
import numpy as np
np.random.seed(42)
import gc
import csv
import os
import warnings
import random
import glob
import scipy
import librosa
import scipy
from bird import utils
from bird import data_augmentation as da
from bird import signal_processing as sp
def load_test_data_birdclef(directory, target_size, input_data_mode):
if not os.path.isdir(directory):
raise ValueError("data filepath is invalid")
classes = []
for subdir in sorted(os.listdir(directory)):
if os.path.isdir(os.path.join(directory, subdir)):
classes.append(subdir)
nb_classes = len(classes)
class_indices = dict(zip(classes, range(nb_classes)))
index_to_species = dict(zip(range(nb_classes), classes))
X_test = []
Y_test = []
training_files = []
for subdir in classes:
subpath = os.path.join(directory, subdir)
# load sound data
class_segments = glob.glob(os.path.join(subpath, "*.wav"))
# print(subdir+": ", len(class_segments))
print("group segments ... ")
samples = group_segments(class_segments)
for sample in samples:
training_files.append(sample)
data = load_segments(sample, target_size, input_data_mode)
X_test.append(data)
y = np.zeros(nb_classes)
y[class_indices[subdir]] = 1.0
Y_test.append(y)
return np.asarray(X_test),
|
np.asarray(Y_test)
|
numpy.asarray
|
#!/usr/bin/env python
import cv2
import numpy as np
import matplotlib.pyplot as plt
import time
from imutils import *
import rospy
from sklearn.cluster import KMeans
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
from core_msgs.msg import CenPoint, ParkPoints
from geometry_msgs.msg import Vector3
import copy
'''
MAIN LOGIC FOR LANE DETECTION.
INPUT: Image of Top-Down stitched image from 3 lane cameras
OUTPUT: 1) Lane information in core_msgs/CenPoint Format,
2) Free and occupied space in transformed coordinates for the path planner, in sensor_msgs/Image mono8 image format
3) (Only when rosparam park_mode is 1) Information regarding the parking spot in core_msgs/ParkPoints format.
'''
Z_DEBUG = False
# define values boundaries for color
lower_yellow = np.array([15,40,150],np.uint8)
upper_yellow = np.array([40,255,255],np.uint8)
#DUBUGGING! GREEN (Used when working at Idea Factory)
if Z_DEBUG:
lower_white_hsv = np.array([50, 40, 0], np.uint8)
upper_white_hsv = np.array([100, 255, 255], np.uint8)
else:
lower_white_hsv = np.array([0,0,170], np.uint8)
upper_white_hsv = np.array([255,50,255], np.uint8)
lower_white_rgb = np.array([190,190,190], np.uint8)
upper_white_rgb = np.array([255,255,255], np.uint8)
lower_blue = np.array([90,50, 120])
upper_blue = np.array([140,255,255])
lower_red1 = np.array([0, 100, 100], np.uint8)
upper_red1 = np.array([10, 255,255], np.uint8)
lower_red2 = np.array([160, 100,100], np.uint8)
upper_red2 = np.array([179, 255,255], np.uint8)
# Define some buffers for filtering
coeff_buffer = []
outlier_count = 0
x_waypoint_buffer = []
y_waypoint_buffer = []
# Define a simple logic that selects a confindence (which directly affects vehicle velocity) depending on how
# certain we are of our lane estimation
def findConfidence(below_200, below_100, nolane, linearfit, small_data, previous_coeff):
if (not below_200):
if (previous_coeff):
conf1 = 5
else:
conf1 = 10
else:
conf1 = 3
if below_100:
if below_200:
conf2 = 3
else:
conf2 = 6
else:
conf2 = 10
if nolane:
conf3 = 3
else:
conf3 = 10
if linearfit:
conf4 = 6
else:
conf4 = 10
if small_data:
conf5 = 3
else:
conf5 = 10
confidence = np.array([conf1, conf2, conf3, conf4, conf5])
confidence_val = np.min(confidence)
return confidence_val
# Simple method that calculates R-squared value of data
def calculate_rsquared(x, y, f):
yhat = f(x)
if (len(y) != 0):
ybar =
|
np.sum(y)
|
numpy.sum
|
import math
import string
from multiprocessing import Process, Manager
import matplotlib.image as im
import numpy as np
from matplotlib import pyplot as plt
from numpy import ndarray
class borderDetector:
def __init__(self, imgPath: string, sigmas: ndarray) -> None:
"""
'borderDetector' is a class that performs the Laplacian of Gaussian and Gaussian's filter,
given image with different Sigma values. The image is preprocessed (if occur) in order to
transform it into a grey scale. It builds and convolve the image with different filter
(built through the sigma value), in the end show for each sigma the convolved(filtered)
image and the relative kernel(filter).
In order to improve the execution of this script, it's worth use a multiprocessing approach
thus we can parallelize the execution of different convolve-tasks.
Each convolve-task is made by a different process (may increase the memory usage)
:param imgPath: Image's path
:param sigmas: array of different sigma values
"""
self.__path = imgPath
self.__sigmas = sigmas
self.__source = None
def detect(self) -> None:
"""
Apply Laplacian of Gaussian
:return:
"""
# Retrieve the number of sigma values (perform convolution for each value)
length = len(self.__sigmas)
# empty array
if length == 0:
return
# Retrieve a matrix-base image
image = im.imread(self.__path)
self.__source = np.copy(image)
# We expect a grey scale image, but it's also ok a tensor (R,G,B matrices)
if image.ndim != 2 and image.ndim != 3:
return
# If the image is a rgb based, we have to transform in grey scale image
if image.ndim == 3:
image = self.__rgbToGrey(image=image)
# Dictionary where each process put the results (filtered image and kernel) inside
results = Manager().dict()
worker = np.empty(shape=len(self.__sigmas), dtype=Process)
for i in range(0, length):
# For each Sigma value, start the convolution
worker[i] = Process(target=self.detector, args=(results, i, image, self.__sigmas[i]))
worker[i].start()
for i in range(0, length):
worker[i].join()
# 2 row , length columns
fig, axs = plt.subplots(2, length)
for i in range(0, length):
axs[0, i].set_title('Convolved image S: ' + str(self.__sigmas[i]))
axs[0, i].imshow(results[i][0], cmap="gray")
axs[1, i].set_title('Kernel')
axs[1, i].imshow(results[i][1])
plt.show()
return
@staticmethod
def __buildKernel(dim: int, sigma: float, krnl) -> ndarray:
"""
Build the filter (in this case Laplacian of Gaussian) given kernel, dimensions and sigma
:param dim: dimensions of filter
:param sigma: sigma value used into LoG formula
:param krnl: Kernel function
:return: matrix that represent the filter
"""
large = dim // 2 # "radius" of filter
# Define an array between (-large to large) with size "dim"
linSp = np.linspace(-large, large + 1, dim)
# Create a matrix a square base (like a classical cartesian plan but in 2 dimension)
X, Y = np.meshgrid(linSp, linSp)
# Apply the Laplacian of Gaussian's formula
return krnl(x=X, y=Y, sigma=sigma)
@staticmethod
def __loG(x: float, y: float, sigma: float) -> ndarray:
# Laplacian of Gaussian 's formula
s2 = np.power(sigma, 2)
sub1 = -(np.power(x, 2) + np.power(y, 2)) / (2 * s2)
return -(1 / math.pi * np.power(s2, 2)) * (1 + sub1) * np.exp(sub1)
def detector(self, imgDict: dict, index: int, image: ndarray, sigma: float) -> None:
"""
Given an image and sigma value apply the convolution and put the result in the dictionary
:param imgDict: dictionary the put the result
:param index: position of the dictionary where put the result
:param image: image source where apply the convolution
:param sigma: sigma value (used to build the filter)
:return: is a function used in another process, no result returned
"""
# Define the dimension of filter given a sigma value
dim = 2 * int(4 * sigma + 0.5) + 1
# Build the kernel (LoG)
kernel = self.__buildKernel(dim=dim, sigma=sigma, krnl=self.__loG)
# Convolve the image with the kernel, next we perform a sort of rescaling
# we transform the float value in unsigned int (lie on 8 bytes)
result = self.__convolution(image=image, kernel=kernel)
imgDict[index] = [result.astype(np.uint8),kernel]
return
@staticmethod
def __gaussian(x, y, sigma):
s2 = 2 *
|
np.power(sigma, 2)
|
numpy.power
|
#!/usr/bin/env python
# ~*~ coding: utf-8 ~*~
"""Plot results from inversion.
Only a single run. Currently set up for plots from
run_inversion_osse
"""
from __future__ import print_function, division
import datetime
import glob
import sys
import os
import numpy as np
import scipy.stats
import scipy.special
import matplotlib as mpl
mpl.use("Agg")
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import cartopy.crs as ccrs
import cartopy.feature as cfeat
import iris
import dask.array as da
import xarray.plot
print(datetime.datetime.now(), "Imports")
sys.stdout.flush()
YEAR = 2010
MONTH = 7
NOISE_FUNCTION = "exp"
NOISE_LENGTH = 200
NOISE_TIME_FUN = "exp"
NOISE_TIME_LEN = 21
INV_FUNCTION = "exp"
INV_LENGTH = 1000
INV_TIME_FUN = "exp"
INV_TIME_LEN = 7
FLUX_INTERVAL = 6
FLUX_RESOLUTION = 27
def write_console_message(msg):
"""Write a message to stdout and flush output streams.
Parameters
----------
msg: str
"""
sys.stderr.flush()
print(datetime.datetime.now(), msg, flush=True)
def long_description(df, ci_width=0.95):
"""Print longer description of df.
Parameters
----------
df: pd.DataFrame
ci_width: float
Width of confidence intervals.
Must between 0 and 1.
Returns
-------
pd.DataFrame
"""
df_stats = df.describe()
df_stats_loc = df_stats.loc
# Robust measures of scale
df_stats_loc["IQR", :] = df_stats_loc["75%", :] - df_stats_loc["25%", :]
df_stats_loc["mean abs. dev.", :] = df.mad()
deviation_from_median = df - df_stats_loc["50%", :]
df_stats_loc["med. abs. dev.", :] = deviation_from_median.abs().median()
# Higher-order moments
df_stats_loc["Fisher skewness", :] = df.skew()
df_stats_loc["Y-K skewness", :] = (
(df_stats_loc["75%", :] + df_stats_loc["25%", :] -
2 * df_stats_loc["50%", :]) /
(df_stats_loc["75%", :] - df_stats_loc["25%", :])
)
df_stats_loc["Fisher kurtosis", :] = df.kurt()
# Confidence intervals
for col_name in df:
# I'm already dropping NAs for the rest of these.
mean, var, std = scipy.stats.bayes_mvs(
df[col_name].dropna(),
alpha=ci_width
)
# Record mean
df_stats_loc["Mean point est", col_name] = mean[0]
df_stats_loc[
"Mean {width:2d}%CI low".format(width=round(ci_width * 100)),
col_name
] = mean[1][0]
df_stats_loc[
"Mean {width:2d}%CI high".format(width=round(ci_width * 100)),
col_name
] = mean[1][1]
# Record var
df_stats_loc["Var. point est", col_name] = var[0]
df_stats_loc[
"Var. {width:2d}%CI low".format(width=round(ci_width * 100)),
col_name
] = var[1][0]
df_stats_loc[
"Var. {width:2d}%CI high".format(width=round(ci_width * 100)),
col_name
] = var[1][1]
# Record Std Dev
df_stats_loc["std point est", col_name] = std[0]
df_stats_loc[
"std {width:2d}%CI low".format(width=round(ci_width * 100)),
col_name
] = std[1][0]
df_stats_loc[
"std {width:2d}%CI high".format(width=round(ci_width * 100)),
col_name
] = std[1][1]
return df_stats
PRIOR_PATH = (
"../data_files/"
"{year:04d}-{month:02d}_osse_bio_priors_{interval:d}h_{res:d}km_"
"noise_{fun:s}{len:d}km_{time_fun:s}{time_len:d}d_exp3h.nc"
).format(
year=YEAR, month=MONTH, interval=FLUX_INTERVAL, res=FLUX_RESOLUTION,
fun=NOISE_FUNCTION, len=NOISE_LENGTH, time_fun=NOISE_TIME_FUN,
time_len=NOISE_TIME_LEN)
# 2010-07_monthly_inversion_06h_027km_noiseexp100kmexp14d_icovexp100kmexp14d_output.nc4
FRAT_POSTERIOR_PATH = (
"{year:04d}-{month:02d}_monthly_inversion_{interval:02d}h_{res:03d}km_"
"noise{noisefun:s}{noiselen:d}km{noise_time_fun:s}{noise_time_len:d}d"
"_icov{invfun:s}{invlen:d}km{inv_time_fun:s}{inv_time_len:d}d"
"_output.nc4"
).format(year=YEAR, month=MONTH, interval=FLUX_INTERVAL, res=FLUX_RESOLUTION,
noisefun=NOISE_FUNCTION, noiselen=NOISE_LENGTH,
noise_time_fun=NOISE_TIME_FUN, noise_time_len=NOISE_TIME_LEN,
invfun=INV_FUNCTION, invlen=INV_LENGTH,
inv_time_fun=INV_TIME_FUN, inv_time_len=INV_TIME_LEN)
IDEN_POSTERIOR_PATH = (
"{year:04d}-{month:02d}_monthly_inversion_{interval:02d}h_{res:03d}km_"
"noise{noisefun:s}{noiselen:d}km{noise_time_fun:s}{noise_time_len:d}d"
"_icov{invfun:s}{invlen:d}km{inv_time_fun:s}{inv_time_len:d}d"
"_output.nc4"
).format(year=YEAR, month=MONTH, interval=FLUX_INTERVAL, res=FLUX_RESOLUTION,
noisefun=NOISE_FUNCTION, noiselen=NOISE_LENGTH,
noise_time_fun=NOISE_TIME_FUN, noise_time_len=NOISE_TIME_LEN,
invfun=NOISE_FUNCTION, invlen=NOISE_LENGTH,
inv_time_fun=NOISE_TIME_FUN, inv_time_len=NOISE_TIME_LEN)
WRF_CRS = ccrs.LambertConformal(
standard_parallels=(30, 60), central_latitude=40,
central_longitude=-96, false_easting=0, false_northing=0,
globe=ccrs.Globe(semimajor_axis=6370e3, semiminor_axis=6360e3,
ellipse=None))
LPDM_PROJ = ccrs.LambertConformal(
central_longitude=-96, central_latitude=40, standard_parallels=[30, 60],
false_easting=3347998.5116325677, false_northing=2470499.376688077,
globe=ccrs.Globe(semimajor_axis=6370e3, semiminor_axis=6370e3,
ellipse=None))
BIG_LAKES = cfeat.NaturalEarthFeature(
"physical", "lakes", "110m",
edgecolor="gray", facecolor="none", linewidth=.5)
STATES = cfeat.NaturalEarthFeature(
"cultural", "admin_1_states_provinces_lines", "110m",
edgecolor="gray", facecolor="none", linewidth=.5)
TRACER_NAMES = [
"diurnal_bio",
"fossil",
"ocean",
"biomass_burn",
"biofuel",
"ship",
"posterior_bio",
"boundaries",
"prior_bio",
"",
]
# WEST_BOUNDARY_LPDM = 2.7e6
# WEST_BOUNDARY_WRF = WRF_CRS.transform_point(
# WEST_BOUNDARY_LPDM, 0, LPDM_PROJ)[0]
# Estimates for West Virginia, roughly
WEST_BOUNDARY_WRF = 1.13e6
EAST_BOUNDARY_WRF = 1.52e6
SOUTH_BOUNDARY_WRF = -1.76e5
NORTH_BOUNDARY_WRF = 2.02e5
LPDM_BOUNDS = LPDM_PROJ.transform_points(
WRF_CRS,
np.array([WEST_BOUNDARY_WRF, EAST_BOUNDARY_WRF]),
np.array([SOUTH_BOUNDARY_WRF, NORTH_BOUNDARY_WRF]),
)
print(datetime.datetime.now(), "Constants")
sys.stdout.flush()
def plot_realizations(data_array):
"""Plot a `Dataarray` with realizations."""
time_dim = [dim for dim in data_array.dims if "time" in dim][0]
x_dim = [dim for dim in data_array.dims if "_x" in dim][0]
y_dim = [dim for dim in data_array.dims if "_y" in dim][0]
xarray.plot.pcolormesh(
data_array.isel(**{time_dim: slice(3, None, 96),
"realization": slice(3)}),
x_dim, y_dim,
col=time_dim, row="realization", cmap="RdBu_r", center=0,
aspect=1.35, size=1.8,
subplot_kws=dict(projection=WRF_CRS),
)
post_fig = plt.gcf()
axes = post_fig.axes
try:
axes[-1].set_ylabel(
"{long_name:s} (${units:s}$)".format(**data_array.attrs))
except KeyError:
pass
xlim = data_array[x_dim][[0, -1]]
ylim = data_array[y_dim][[0, -1]]
for ax in axes[:-1]:
ax.coastlines()
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.add_feature(cfeat.BORDERS)
return post_fig
def plot_fluxes(data_array):
"""Plot a single flux realization."""
time_dim = [dim for dim in data_array.dims if "time" in dim][0]
x_dim = [dim for dim in data_array.dims if "_x" in dim][0]
y_dim = [dim for dim in data_array.dims if "_y" in dim][0]
xarray.plot.pcolormesh(
data_array.isel(**{time_dim: slice(3, None, 32)}),
x_dim, y_dim,
col=time_dim, col_wrap=3, cmap="RdBu_r", center=0,
aspect=1.35, size=1.8,
subplot_kws=dict(projection=WRF_CRS),
)
post_fig = plt.gcf()
axes = post_fig.axes
try:
axes[-1].set_ylabel(
"{long_name:s} (${units:s}$)".format(**data_array.attrs))
except KeyError:
pass
xlim = data_array[x_dim][[0, -1]]
ylim = data_array[y_dim][[0, -1]]
for ax in axes[:-1]:
ax.coastlines()
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.add_feature(cfeat.BORDERS)
post_fig.subplots_adjest(top=.9, bottom=.0256, left=.0216,
right=.82, hspace=.2, wspace=.08)
return post_fig
print(datetime.datetime.now(), "Functions")
sys.stdout.flush()
PRIOR_CUBES = iris.load(PRIOR_PATH)
PRIOR_DS = xarray.open_dataset(
PRIOR_PATH, chunks=dict(realization=1,
flux_time=8 * 7)
).set_coords(
"lambert_conformal_conic"
) # .rename(
# dict(south_north="dim_y", west_east="dim_x"))
PRIOR_CUBES.sort(key=lambda cube: cube.name())
PRIOR_CUBES.sort(key=lambda cube: len(cube.name()))
for name, var in PRIOR_DS.data_vars.items():
num_end = name.rindex("_")
if num_end < 5:
num_end = None
tracer_num = int(name[5:num_end])
long_name = TRACER_NAMES[tracer_num - 1]
if name.endswith("_noisy"):
long_name += "_with_added_noise"
var.attrs["long_name"] = long_name
try:
var.attrs["units"] = "\N{MICRO SIGN}" + var.attrs["units"]
var *= 1e6
except KeyError:
print(name)
pass
NOISE_STD_DS = xarray.open_dataset(
"../data_files/{year:04d}_MsTMIP_flux_std.nc4".format(
year=YEAR, month=MONTH),
chunks=dict(Time=21 * 8)
)[["E_TRA1"]].sel(
Time=slice("2010-06-16", "2010-07-31")
).mean("Time", keep_attrs=True)
NOISE_STD_DS["E_TRA1"].attrs["units"] = "umol/m^2/s"
for name, var in NOISE_STD_DS.data_vars.items():
if name.startswith("E_TRA"):
tracer_num = int(name[5:])
elif name.startswith("tracer_"):
tracer_num = int(name[7:])
else:
continue
long_name = TRACER_NAMES[tracer_num - 1]
long_name += "_rms"
var.attrs["long_name"] = long_name
if var.attrs["units"] == "mol km^-2 hr^-1":
var /= 3.6e3
var.attrs["units"] = "\N{MICRO SIGN}mol/m^2/s"
# NOISE_STD_DS.coords["south_north"] = PRIOR_DS.coords["dim_y"].data
# NOISE_STD_DS.coords["west_east"] = PRIOR_DS.coords["dim_x"].data
NOISE_STD_DS["E_TRA7"] = NOISE_STD_DS["E_TRA1"]
############################################################
# Load fraternal-twin dataset
FRAT_INVERSION_DS = xarray.open_dataset(
FRAT_POSTERIOR_PATH,
chunks=dict(realization=1, flux_time=8 * 14)
)
FRAT_INVERSION_DS = FRAT_INVERSION_DS.set_index(
observation=["observation_time", "site"])
FRAT_PSEUDO_OBS_DS = FRAT_INVERSION_DS["pseudo_observations"]
FRAT_POSTERIOR_DS = FRAT_INVERSION_DS[["posterior", "prior", "truth"]].rename(
dict(dim_x="projection_x_coordinate", dim_y="projection_y_coordinate",
flux_time="time"))
FRAT_POSTERIOR_DS.coords["projection_x_coordinate"].attrs.update(
dict(units="m", standard_name="projection_x_coordinate", axis="X"))
FRAT_POSTERIOR_DS.coords["projection_y_coordinate"].attrs.update(
dict(units="m", standard_name="projection_y_coordinate", axis="Y"))
FRAT_POSTERIOR_DS.coords["time"] = FRAT_POSTERIOR_DS.indexes["time"].round("S")
for var in FRAT_POSTERIOR_DS.data_vars.values():
var *= 1e6
var.attrs["units"] = "\N{MICRO SIGN}" + var.attrs["units"]
del var
###############################################################################
# Load identical-twin dataset
IDEN_INVERSION_DS = xarray.open_dataset(
IDEN_POSTERIOR_PATH,
chunks=dict(realization=1, flux_time=8 * 14)
)
IDEN_INVERSION_DS = IDEN_INVERSION_DS.set_index(
observation=["observation_time", "site"]
)
IDEN_PSEUDO_OBS_DS = IDEN_INVERSION_DS["pseudo_observations"]
IDEN_POSTERIOR_DS = IDEN_INVERSION_DS[["posterior", "prior", "truth"]].rename(
dict(dim_x="projection_x_coordinate", dim_y="projection_y_coordinate",
flux_time="time"))
IDEN_POSTERIOR_DS.coords["projection_x_coordinate"].attrs.update(
dict(units="m", standard_name="projection_x_coordinate", axis="X"))
IDEN_POSTERIOR_DS.coords["projection_y_coordinate"].attrs.update(
dict(units="m", standard_name="projection_y_coordinate", axis="Y"))
IDEN_POSTERIOR_DS.coords["time"] = FRAT_POSTERIOR_DS.indexes["time"].round("S")
for var in IDEN_POSTERIOR_DS.data_vars.values():
var *= 1e6
var.attrs["units"] = "\N{MICRO SIGN}" + var.attrs["units"]
del var
NOISE_STD_DS.coords["south_north"] = (
FRAT_POSTERIOR_DS.coords["projection_y_coordinate"].data
)
NOISE_STD_DS.coords["west_east"] = (
FRAT_POSTERIOR_DS.coords["projection_x_coordinate"].data
)
FRAT_COVARIANCE_DS = xarray.open_dataset(
"{year:04d}-{month:02d}_monthly_inversion_{interval:02d}h_{res:03d}km_"
"noise{noisefun:s}{noiselen:d}km{noise_time_fun:s}{noise_time_len:d}d_"
"icov{invfun:s}{invlen:d}km{inv_time_fun:s}{inv_time_len:d}d_"
"162km_7D_covariance_output.nc4".format(
year=YEAR, month=MONTH, interval=FLUX_INTERVAL, res=FLUX_RESOLUTION,
noisefun=NOISE_FUNCTION, noiselen=NOISE_LENGTH,
noise_time_fun=NOISE_TIME_FUN, noise_time_len=NOISE_TIME_LEN,
invfun=INV_FUNCTION, invlen=INV_LENGTH,
inv_time_fun=INV_TIME_FUN, inv_time_len=INV_TIME_LEN
),
chunks=dict(reduced_flux_time_adjoint=3, reduced_dim_y_adjoint=3,
reduced_dim_x_adjoint=3),
)
IDEN_COVARIANCE_DS = xarray.open_dataset(
"{year:04d}-{month:02d}_monthly_inversion_{interval:02d}h_{res:03d}km_"
"noise{noisefun:s}{noiselen:d}km{noise_time_fun:s}{noise_time_len:d}d_"
"icov{invfun:s}{invlen:d}km{inv_time_fun:s}{inv_time_len:d}d_"
"162km_7D_covariance_output.nc4".format(
year=YEAR, month=MONTH, interval=FLUX_INTERVAL, res=FLUX_RESOLUTION,
noisefun=NOISE_FUNCTION, noiselen=NOISE_LENGTH,
noise_time_fun=NOISE_TIME_FUN, noise_time_len=NOISE_TIME_LEN,
invfun=NOISE_FUNCTION, invlen=NOISE_LENGTH,
inv_time_fun=NOISE_TIME_FUN, inv_time_len=NOISE_TIME_LEN
),
chunks=dict(reduced_flux_time_adjoint=3, reduced_dim_y_adjoint=3,
reduced_dim_x_adjoint=3),
)
############################################################
# Read in lower-resolution posterior covariance datasets
LOWER_RES_FRAT_COVARIANCE_DS = xarray.open_dataset(
"{year:04d}-{month:02d}_monthly_inversion_{interval:02d}h_{res:03d}km_"
"noise{noisefun:s}{noiselen:d}km{noise_time_fun:s}{noise_time_len:d}d_"
"icov{invfun:s}{invlen:d}km{inv_time_fun:s}{inv_time_len:d}d_"
"216km_7D_covariance_output.nc4".format(
year=YEAR, month=MONTH, interval=FLUX_INTERVAL, res=FLUX_RESOLUTION,
noisefun=NOISE_FUNCTION, noiselen=NOISE_LENGTH,
noise_time_fun=NOISE_TIME_FUN, noise_time_len=NOISE_TIME_LEN,
invfun=INV_FUNCTION, invlen=INV_LENGTH,
inv_time_fun=INV_TIME_FUN, inv_time_len=INV_TIME_LEN
),
chunks=dict(reduced_flux_time_adjoint=3, reduced_dim_y_adjoint=3,
reduced_dim_x_adjoint=3),
)
LOWER_RES_IDEN_COVARIANCE_DS = xarray.open_dataset(
"{year:04d}-{month:02d}_monthly_inversion_{interval:02d}h_{res:03d}km_"
"noise{noisefun:s}{noiselen:d}km{noise_time_fun:s}{noise_time_len:d}d_"
"icov{invfun:s}{invlen:d}km{inv_time_fun:s}{inv_time_len:d}d_"
"216km_7D_covariance_output.nc4".format(
year=YEAR, month=MONTH, interval=FLUX_INTERVAL, res=FLUX_RESOLUTION,
noisefun=NOISE_FUNCTION, noiselen=NOISE_LENGTH,
noise_time_fun=NOISE_TIME_FUN, noise_time_len=NOISE_TIME_LEN,
invfun=NOISE_FUNCTION, invlen=NOISE_LENGTH,
inv_time_fun=NOISE_TIME_FUN, inv_time_len=NOISE_TIME_LEN
),
chunks=dict(reduced_flux_time_adjoint=3, reduced_dim_y_adjoint=3,
reduced_dim_x_adjoint=3),
)
LOWEST_RES_FRAT_COVARIANCE_DS = xarray.open_dataset(
"{year:04d}-{month:02d}_monthly_inversion_{interval:02d}h_{res:03d}km_"
"noise{noisefun:s}{noiselen:d}km{noise_time_fun:s}{noise_time_len:d}d_"
"icov{invfun:s}{invlen:d}km{inv_time_fun:s}{inv_time_len:d}d_"
"432km_7D_covariance_output.nc4".format(
year=YEAR, month=MONTH, interval=FLUX_INTERVAL, res=FLUX_RESOLUTION,
noisefun=NOISE_FUNCTION, noiselen=NOISE_LENGTH,
noise_time_fun=NOISE_TIME_FUN, noise_time_len=NOISE_TIME_LEN,
invfun=INV_FUNCTION, invlen=INV_LENGTH,
inv_time_fun=INV_TIME_FUN, inv_time_len=INV_TIME_LEN
),
chunks=dict(reduced_flux_time_adjoint=3, reduced_dim_y_adjoint=3,
reduced_dim_x_adjoint=3),
)
LOWEST_RES_IDEN_COVARIANCE_DS = xarray.open_dataset(
"{year:04d}-{month:02d}_monthly_inversion_{interval:02d}h_{res:03d}km_"
"noise{noisefun:s}{noiselen:d}km{noise_time_fun:s}{noise_time_len:d}d_"
"icov{invfun:s}{invlen:d}km{inv_time_fun:s}{inv_time_len:d}d_"
"432km_7D_covariance_output.nc4".format(
year=YEAR, month=MONTH, interval=FLUX_INTERVAL, res=FLUX_RESOLUTION,
noisefun=NOISE_FUNCTION, noiselen=NOISE_LENGTH,
noise_time_fun=NOISE_TIME_FUN, noise_time_len=NOISE_TIME_LEN,
invfun=NOISE_FUNCTION, invlen=NOISE_LENGTH,
inv_time_fun=NOISE_TIME_FUN, inv_time_len=NOISE_TIME_LEN
),
chunks=dict(reduced_flux_time_adjoint=3, reduced_dim_y_adjoint=3,
reduced_dim_x_adjoint=3),
)
HIGHER_RES_FRAT_COVARIANCE_DS = xarray.open_dataset(
"{year:04d}-{month:02d}_monthly_inversion_{interval:02d}h_{res:03d}km_"
"noise{noisefun:s}{noiselen:d}km{noise_time_fun:s}{noise_time_len:d}d_"
"icov{invfun:s}{invlen:d}km{inv_time_fun:s}{inv_time_len:d}d_"
"108km_7D_covariance_output.nc4".format(
year=YEAR, month=MONTH, interval=FLUX_INTERVAL, res=FLUX_RESOLUTION,
noisefun=NOISE_FUNCTION, noiselen=NOISE_LENGTH,
noise_time_fun=NOISE_TIME_FUN, noise_time_len=NOISE_TIME_LEN,
invfun=INV_FUNCTION, invlen=INV_LENGTH,
inv_time_fun=INV_TIME_FUN, inv_time_len=INV_TIME_LEN
),
chunks=dict(reduced_flux_time_adjoint=3, reduced_dim_y_adjoint=3,
reduced_dim_x_adjoint=3),
)
HIGHER_RES_IDEN_COVARIANCE_DS = xarray.open_dataset(
"{year:04d}-{month:02d}_monthly_inversion_{interval:02d}h_{res:03d}km_"
"noise{noisefun:s}{noiselen:d}km{noise_time_fun:s}{noise_time_len:d}d_"
"icov{invfun:s}{invlen:d}km{inv_time_fun:s}{inv_time_len:d}d_"
"108km_7D_covariance_output.nc4".format(
year=YEAR, month=MONTH, interval=FLUX_INTERVAL, res=FLUX_RESOLUTION,
noisefun=NOISE_FUNCTION, noiselen=NOISE_LENGTH,
noise_time_fun=NOISE_TIME_FUN, noise_time_len=NOISE_TIME_LEN,
invfun=NOISE_FUNCTION, invlen=NOISE_LENGTH,
inv_time_fun=NOISE_TIME_FUN, inv_time_len=NOISE_TIME_LEN
),
chunks=dict(reduced_flux_time_adjoint=3, reduced_dim_y_adjoint=3,
reduced_dim_x_adjoint=3),
)
############################################################
# Now the experiments with temporal resolution
HIGHEST_TEMP_RES_FRAT_COVARIANCE_DS = xarray.open_dataset(
"{year:04d}-{month:02d}_monthly_inversion_{interval:02d}h_{res:03d}km_"
"noise{noisefun:s}{noiselen:d}km{noise_time_fun:s}{noise_time_len:d}d_"
"icov{invfun:s}{invlen:d}km{inv_time_fun:s}{inv_time_len:d}d_"
"162km_2D_covariance_output.nc4".format(
year=YEAR, month=MONTH, interval=FLUX_INTERVAL, res=FLUX_RESOLUTION,
noisefun=NOISE_FUNCTION, noiselen=NOISE_LENGTH,
noise_time_fun=NOISE_TIME_FUN, noise_time_len=NOISE_TIME_LEN,
invfun=INV_FUNCTION, invlen=INV_LENGTH,
inv_time_fun=INV_TIME_FUN, inv_time_len=INV_TIME_LEN
),
chunks=dict(reduced_flux_time_adjoint=3, reduced_dim_y_adjoint=3,
reduced_dim_x_adjoint=3),
)
# HIGHEST_TEMP_RES_IDEN_COVARIANCE_DS = xarray.open_dataset(
# "{year:04d}-{month:02d}_monthly_inversion_{interval:02d}h_{res:03d}km_"
# "noise{noisefun:s}{noiselen:d}km{noise_time_fun:s}{noise_time_len:d}d_"
# "icov{invfun:s}{invlen:d}km{inv_time_fun:s}{inv_time_len:d}d_"
# "162km_2D_covariance_output.nc4".format(
# year=YEAR, month=MONTH, interval=FLUX_INTERVAL, res=FLUX_RESOLUTION,
# noisefun=NOISE_FUNCTION, noiselen=NOISE_LENGTH,
# noise_time_fun=NOISE_TIME_FUN, noise_time_len=NOISE_TIME_LEN,
# invfun=NOISE_FUNCTION, invlen=NOISE_LENGTH,
# inv_time_fun=NOISE_TIME_FUN, inv_time_len=NOISE_TIME_LEN
# ),
# chunks=dict(reduced_flux_time_adjoint=3, reduced_dim_y_adjoint=3,
# reduced_dim_x_adjoint=3),
# )
############################################################
# Read in the influence functions
INFLUENCE_PATHS = ["/mc1s2/s4/dfw5129/data/LPDM_2010_fpbounds/"
"ACT-America_trial5/2010/01/GROUP1",
"/mc1s2/s4/dfw5129/data/LPDM_2010_fpbounds/"
"candidacy_more_towers/2010/01/GROUP1"]
COLLAPSED_INFLUENCE_DS = xarray.open_dataset(
"../data_files/LPDM_2010_01_31day_027km_molar_footprints.nc4",
).set_coords(
["observation_time", "time_before_observation",
"lpdm_configuration", "wrf_configuration"])
FULL_INFLUENCE_DS = xarray.open_mfdataset(
[name
for path in INFLUENCE_PATHS
for name in glob.iglob(os.path.join(
path,
("LPDM_2010_01*{flux_interval:02d}hrly_{res:03d}km_"
"molar_footprints.nc4").format(
flux_interval=FLUX_INTERVAL, res=FLUX_RESOLUTION)))],
concat_dim="site",
chunks=dict(time_before_observation=4 * 7, observation_time=24),
).set_coords(["lpdm_configuration", "wrf_configuration"])
print(datetime.datetime.now(), "Files", flush=True)
sys.stderr.flush()
write_console_message("Getting influence")
INFLUENCE_TEMPORAL_ONLY = FULL_INFLUENCE_DS.H.sum(
["dim_x", "dim_y"]
).mean("site")
ALIGNED_TEMPORAL_INFLUENCES = xarray.concat(
[here_infl.set_index(
time_before_observation="flux_time"
).rename(
dict(time_before_observation="flux_time")
)
for here_infl in INFLUENCE_TEMPORAL_ONLY],
"observation_time"
)
OBSERVATIONAL_CONSTRAINT = ALIGNED_TEMPORAL_INFLUENCES.sum("observation_time")
OBSERVATIONAL_CONSTRAINT.coords["flux_time"] = (
OBSERVATIONAL_CONSTRAINT.coords["flux_time"] +
(np.array("2010-07-01T00:00:00", dtype="M8[ns]") -
np.array("2010-01-01T00:00:00", dtype="M8[ns]"))
)
OBSERVATIONAL_CONSTRAINT.load()
write_console_message("Got influence of fluxes on observations")
############################################################
# Plot standard deviations
fig, axes = plt.subplots(
1, 1, figsize=(5.5, 3.3), subplot_kw=dict(projection=WRF_CRS))
(2. * 5. * NOISE_STD_DS.E_TRA7).plot.pcolormesh(robust=True)
axes = fig.axes
axes[0].coastlines()
axes[1].set_ylabel("standard deviation of noise (µmol/m$^2$/s)")
fig.suptitle("Standard deviation of added noise")
axes[0].set_xlim(FRAT_POSTERIOR_DS.coords["projection_x_coordinate"][[0, -1]])
axes[0].set_ylim(FRAT_POSTERIOR_DS.coords["projection_y_coordinate"][[0, -1]])
axes[0].set_title("")
axes[0].add_feature(cfeat.BORDERS)
axes[0].add_feature(STATES)
axes[0].add_feature(BIG_LAKES)
fig.savefig("{year:04d}-{month:02d}_noise_standard_deviation.png".format(
year=YEAR, month=MONTH))
plt.close(fig)
write_console_message("Made std plot")
############################################################
# Plot pseudo-observations
fig, axes = plt.subplots(len(FRAT_PSEUDO_OBS_DS.site),
sharex=True,
figsize=(8, 1.5 * len(FRAT_PSEUDO_OBS_DS.site)))
write_console_message("Made figure")
fig.autofmt_xdate()
pseudo_obs = FRAT_PSEUDO_OBS_DS
for i, site in enumerate(set(FRAT_PSEUDO_OBS_DS.site.values)):
site_obs = pseudo_obs.sel(site=site).transpose(
"observation_time", "realization"
)
axes[i].plot(site_obs.observation_time.values, site_obs.values)
axes[i].text(0.01, 0.98, site, transform=axes[i].transAxes,
horizontalalignment="left", verticalalignment="top")
fig.suptitle("Pseudo-observations used in inversion")
# fig.savefig("{year:04d}-{month:02d}_pseudo_obs_afternoon.png".format(
# year=YEAR, month=MONTH))
fig.savefig(
"{year:04d}-{month:02d}_noise_{noise_fun:s}{noise_len:03d}km_"
"{noise_time_fun:s}{noise_time_len:02d}d_pseudo_obs_afternoon.pdf"
.format(
year=YEAR, month=MONTH, noise_fun=NOISE_FUNCTION,
noise_len=NOISE_LENGTH, noise_time_fun=NOISE_TIME_FUN,
noise_time_len=NOISE_TIME_LEN))
plt.close(fig)
write_console_message("Made pseudo-obs plot")
############################################################
# Plot "truth", prior, and posterior side-by-side
for_plotting = xarray.concat((FRAT_POSTERIOR_DS.prior.isel(realization=0),
IDEN_POSTERIOR_DS.posterior.isel(realization=0),
FRAT_POSTERIOR_DS.posterior.isel(realization=0)),
dim="type")
del for_plotting.coords["realization"]
# e_tra7_for_plot = PRIOR_DS["E_TRA7"].rename(
# dict(dim_x="projection_x_coordinate", dim_y="projection_y_coordinate",
# flux_time="time"))
for_plotting = xarray.concat((FRAT_POSTERIOR_DS.truth, for_plotting),
dim="type")
for_plotting.coords["type"] = ['"Truth"', "Prior", "Posterior\nIdentical-Twin",
"Posterior\nFraternal-Twin"]
for_plotting.persist()
xlim = for_plotting.coords["projection_x_coordinate"][[0, -1]]
ylim = for_plotting.coords["projection_y_coordinate"][[0, -1]]
plots = for_plotting.isel(time=slice(55, None, 40)).plot.pcolormesh(
"projection_x_coordinate", "projection_y_coordinate",
col="type", row="time", subplot_kws=dict(projection=WRF_CRS),
aspect=1.3, size=1.8,
center=0, vmin=-40, vmax=40,
cmap="RdBu_r", levels=None)
for ax in plots.axes.flat:
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.coastlines()
plots.cbar.ax.set_ylabel("CO$_2$ Flux (\N{MICRO SIGN}mol/m$^2$/s)")
plots.axes[0, 0].set_title('"Truth"')
plots.axes[0, 1].set_title("Prior")
plots.axes[0, 2].set_title("Posterior\nIdentical-Twin")
plots.axes[0, 3].set_title("Posterior\nFraternal-Twin")
plots.fig.savefig(
"{year:04d}-{month:02d}_noise_{noise_fun:s}{noise_len:03d}km_"
"{noise_time_fun:s}{noise_time_len:02d}d_inv_{inv_fun:s}{inv_len:03d}km_"
"{inv_time_fun:s}{inv_time_len:02d}d_osse_realization.png".format(
year=YEAR, month=MONTH, noise_fun=NOISE_FUNCTION,
noise_len=NOISE_LENGTH, noise_time_fun=NOISE_TIME_FUN,
noise_time_len=NOISE_TIME_LEN, inv_fun=INV_FUNCTION,
inv_len=INV_LENGTH, inv_time_fun=INV_TIME_FUN,
inv_time_len=INV_TIME_LEN),
dpi=400)
plt.close(plots.fig)
write_console_message("Done realization plot")
############################################################
# Plot tower locations
fig, ax = plt.subplots(
1, 1, subplot_kw=dict(projection=WRF_CRS), figsize=(4, 3))
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.coastlines()
ax.add_feature(BIG_LAKES)
ax.add_feature(cfeat.BORDERS)
ax.add_feature(STATES)
ax.scatter(pseudo_obs.tower_lon, pseudo_obs.tower_lat,
transform=WRF_CRS.as_geodetic())
fig.suptitle("WRF domain and tower locations")
fig.savefig("tower_locations.pdf")
plt.close(fig)
write_console_message("Done tower loc plot")
############################################################
# Plot differences
differences = (for_plotting.isel(type=slice(1, None)) -
for_plotting.isel(type=0))
differences.persist()
plots = differences.isel(time=slice(68 - 1, None, 40)).plot.pcolormesh(
"projection_x_coordinate", "projection_y_coordinate",
col="type", row="time", subplot_kws=dict(projection=WRF_CRS),
aspect=1.3, size=1.8,
center=0, vmin=-10, vmax=10,
cmap="RdBu_r", levels=None)
for ax in plots.axes.flat:
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.coastlines()
plots.cbar.ax.set_ylabel("CO$_2$ Flux (\N{MICRO SIGN}mol/m$^2$/s)")
plots.axes[0, 0].set_title("Prior $-$ \"Truth\"")
plots.axes[0, 1].set_title("Posterior $-$ \"Truth\"\nIdentical-Twin")
plots.axes[0, 2].set_title("Posterior $-$ \"Truth\"\nFraternal-Twin")
plots.fig.savefig(
"{year:04d}-{month:02d}_noise_{noise_fun:s}{noise_len:03d}km_"
"{noise_time_fun:s}{noise_time_len:02d}d_inv_{inv_fun:s}{inv_len:03d}km_"
"{inv_time_fun:s}{inv_time_len:02d}d_osse_errors.png".format(
year=YEAR, month=MONTH, noise_fun=NOISE_FUNCTION,
noise_len=NOISE_LENGTH, noise_time_fun=NOISE_TIME_FUN,
noise_time_len=NOISE_TIME_LEN, inv_fun=INV_FUNCTION,
inv_len=INV_LENGTH, inv_time_fun=INV_TIME_FUN,
inv_time_len=INV_TIME_LEN))
plt.close(plots.fig)
write_console_message("Done error plot")
############################################################
# Plot "truth", prior, and increments
for_incr_plotting = xarray.concat(
[for_plotting.isel(type=slice(None, 2)),
for_plotting.isel(type=slice(2, None)) -
for_plotting.sel(type="Prior")],
dim="type"
)
for_incr_plotting.coords["type"] = ['"Truth"', "Prior",
"Posterior - Prior: Identical-Twin",
"Posterior - Prior: Fraternal-Twin"]
for_incr_plotting.persist()
xlim = for_incr_plotting.coords["projection_x_coordinate"][[0, -1]]
ylim = for_incr_plotting.coords["projection_y_coordinate"][[0, -1]]
plots = for_incr_plotting.isel(time=slice(55, None, 40)).plot.pcolormesh(
"projection_x_coordinate", "projection_y_coordinate",
col="type", row="time", subplot_kws=dict(projection=WRF_CRS),
aspect=1.3, size=1.8,
center=0, vmin=-40, vmax=40,
cmap="RdBu_r", levels=None)
for ax in plots.axes.flat:
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.coastlines()
plots.cbar.ax.set_ylabel("CO$_2$ Flux (\N{MICRO SIGN}mol/m$^2$/s)")
plots.axes[0, 0].set_title('"Truth"')
plots.axes[0, 1].set_title("Prior")
plots.axes[0, 2].set_title("Posterior - Prior\nIdentical-Twin")
plots.axes[0, 3].set_title("Posterior - Prior\nFraternal-Twin")
plots.fig.savefig(
"{year:04d}-{month:02d}_noise_{noise_fun:s}{noise_len:03d}km_"
"{noise_time_fun:s}{noise_time_len:02d}d_inv_{inv_fun:s}{inv_len:03d}km_"
"{inv_time_fun:s}{inv_time_len:02d}d_osse_realization_increment.png"
.format(
year=YEAR, month=MONTH, noise_fun=NOISE_FUNCTION,
noise_len=NOISE_LENGTH, noise_time_fun=NOISE_TIME_FUN,
noise_time_len=NOISE_TIME_LEN, inv_fun=INV_FUNCTION,
inv_len=INV_LENGTH, inv_time_fun=INV_TIME_FUN,
inv_time_len=INV_TIME_LEN),
dpi=400)
plt.close(plots.fig)
write_console_message("Done realization increment plot")
############################################################
# Plot gain over time
gain = 1 - (
da.fabs(differences.sel(
type=[name for name in differences.coords["type"].values
if "Posterior" in name])) /
da.fabs(differences.sel(type="Prior"))
)
gain.attrs["long_name"] = "inversion_gain"
gain.attrs["units"] = "1"
# gain.load()
plots = gain.isel(time=slice(68 - 1, None, 40)).plot.pcolormesh(
"projection_x_coordinate", "projection_y_coordinate",
row="time", col="type", subplot_kws=dict(projection=WRF_CRS),
aspect=1.3, size=1.8,
vmin=0, vmax=1, cmap="viridis", levels=None)
for ax in plots.axes.flat:
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.coastlines()
plots.fig.savefig(
"{year:04d}-{month:02d}_noise_{noise_fun:s}{noise_len:03d}km_"
"{noise_time_fun:s}{noise_time_len:02d}d_inv_{inv_fun:s}{inv_len:03d}km_"
"{inv_time_fun:s}{inv_time_len:02d}d_realization_pointwise_gain.png"
.format(
year=YEAR, month=MONTH, noise_fun=NOISE_FUNCTION,
noise_len=NOISE_LENGTH, noise_time_fun=NOISE_TIME_FUN,
noise_time_len=NOISE_TIME_LEN, inv_fun=INV_FUNCTION,
inv_len=INV_LENGTH, inv_time_fun=INV_TIME_FUN,
inv_time_len=INV_TIME_LEN))
plt.close(plots.fig)
write_console_message("Done pointwise gain")
############################################################
# Find and plot gains for all realizations
all_differences = xarray.concat(
(FRAT_POSTERIOR_DS.prior - for_plotting.sel(type='"Truth"'),
IDEN_POSTERIOR_DS.posterior - for_plotting.sel(type='"Truth"'),
FRAT_POSTERIOR_DS.posterior - for_plotting.sel(type='"Truth"')),
dim="type")
all_differences.coords["type"] = ["prior_error", "iden_posterior_error",
"frat_posterior_error"]
print(datetime.datetime.now(), "Getting January means east of line")
sys.stdout.flush()
time_mean_error = all_differences.mean("time")
time_mean_error.load()
all_mean_error = time_mean_error.mean(
("projection_x_coordinate", "projection_y_coordinate"))
all_mean_error.load()
small_mean_error = time_mean_error.sel(
projection_x_coordinate=slice(WEST_BOUNDARY_WRF, EAST_BOUNDARY_WRF),
projection_y_coordinate=slice(SOUTH_BOUNDARY_WRF, NORTH_BOUNDARY_WRF)
).mean(
("projection_x_coordinate", "projection_y_coordinate"))
small_mean_error.load()
print(datetime.datetime.now(), "Have means")
sys.stdout.flush()
total_gain = 1 - (
da.fabs(all_mean_error.sel(type=[
"iden_posterior_error", "frat_posterior_error"
])) /
da.fabs(all_mean_error.sel(type="prior_error"))
)
total_gain.load()
fig = plt.figure()
total_gain.plot.hist(range=(-2, 1), bins=15)
fig.suptitle("Total gain")
plt.xlabel("Gain in total flux")
fig.savefig(
"{year:04d}-{month:02d}_noise_{noise_fun:s}{noise_len:03d}km_"
"{noise_time_fun:s}{noise_time_len:02d}d_inv_{inv_fun:s}{inv_len:03d}km_"
"{inv_time_fun:s}{inv_time_len:02d}d_total_gain_hist.pdf".format(
year=YEAR, month=MONTH, noise_fun=NOISE_FUNCTION,
noise_len=NOISE_LENGTH, noise_time_fun=NOISE_TIME_FUN,
noise_time_len=NOISE_TIME_LEN, inv_fun=INV_FUNCTION,
inv_len=INV_LENGTH, inv_time_fun=INV_TIME_FUN,
inv_time_len=INV_TIME_LEN))
plt.close(fig)
write_console_message("Done large gain plot")
############################################################
# Describe the distribution
with open(
"{year:04d}-{month:02d}_noise_{noise_fun:s}{noise_len:03d}km_"
"{noise_time_fun:s}{noise_time_len:02d}d_inv_{inv_fun:s}{inv_len:03d}km_"
"{inv_time_fun:s}{inv_time_len:02d}d_gain_dist.txt".format(
year=YEAR, month=MONTH, noise_fun=NOISE_FUNCTION,
noise_len=NOISE_LENGTH, noise_time_fun=NOISE_TIME_FUN,
noise_time_len=NOISE_TIME_LEN, inv_fun=INV_FUNCTION,
inv_len=INV_LENGTH, inv_time_fun=INV_TIME_FUN,
inv_time_len=INV_TIME_LEN), "w") as result_file:
print(datetime.datetime.now(), "Total gain", file=result_file)
print(scipy.stats.describe(total_gain), file=result_file)
print(total_gain.quantile((0, .2, .25, .4, .5, .6, .75, .8, 1)),
file=result_file)
write_console_message("Done gain statistics files")
############################################################
# Plot timeseries of mean flux
spatial_avg_differences = all_differences.mean(
["projection_x_coordinate", "projection_y_coordinate"])
fig = plt.figure(figsize=(5, 3.4))
fig.autofmt_xdate()
plt.subplots_adjust(left=.18)
ax = plt.gca()
spatial_avg_differences.sel(
type="prior_error", realization=0).plot.line(
"-", label="Prior")
spatial_avg_differences.sel(
type="iden_posterior_error", realization=0).plot.line(
"--", label="Posterior: Iden.")
spatial_avg_differences.sel(
type="frat_posterior_error", realization=0).plot.line(
"-.", label="Posterior: Frat.")
ax.set_xlim(mpl.dates.datestr2num(
["2010-06-18T00:00:00Z", "2010-08-01T00:00:00Z"]))
# for xval, color in (zip(
# mpl.dates.datestr2num(
# ["2010-07-01T00:00:00Z", "2010-07-03T00:00:00Z",
# "2010-07-17T00:00:00Z"]),
# ["red", "gray", "red"])):
# ax.axvline(xval, color=color)
ax.axhline(0, color="black", linewidth=.75)
plt.legend()
ax.set_ylabel("Average flux error over whole domain\n"
"(\N{MICRO SIGN}mol/m\N{SUPERSCRIPT TWO}/s)")
ax.set_xlabel("")
plt.title("Spatial average flux error")
fig.savefig(
"{year:04d}-{month:02d}_noise_{noise_fun:s}{noise_len:03d}km_"
"{noise_time_fun:s}{noise_time_len:02d}d_inv_{inv_fun:s}{inv_len:03d}km_"
"{inv_time_fun:s}{inv_time_len:02d}d_realization_spatial_"
"avg_timeseries.pdf".format(
year=YEAR, month=MONTH, noise_fun=NOISE_FUNCTION,
noise_len=NOISE_LENGTH, noise_time_fun=NOISE_TIME_FUN,
noise_time_len=NOISE_TIME_LEN, inv_fun=INV_FUNCTION,
inv_len=INV_LENGTH, inv_time_fun=INV_TIME_FUN,
inv_time_len=INV_TIME_LEN))
plt.close(fig)
write_console_message("Done timeseries")
small_spatial_avg_differences = all_differences.sel(
projection_x_coordinate=slice(WEST_BOUNDARY_WRF, EAST_BOUNDARY_WRF),
projection_y_coordinate=slice(SOUTH_BOUNDARY_WRF, NORTH_BOUNDARY_WRF)
).mean(["projection_x_coordinate", "projection_y_coordinate"])
fig = plt.figure(figsize=(5, 3.4))
fig.autofmt_xdate()
plt.subplots_adjust(left=.18)
ax = plt.gca()
small_spatial_avg_differences.sel(
type="prior_error", realization=0).plot.line(
"-", label="Prior")
small_spatial_avg_differences.sel(
type="iden_posterior_error", realization=0).plot.line(
"--", label="Posterior")
small_spatial_avg_differences.sel(
type="frat_posterior_error", realization=0).plot.line(
"--", label="Posterior")
ax.set_xlim(mpl.dates.datestr2num(
["2010-06-18T00:00:00Z", "2010-08-01T00:00:00Z"]))
# for xval, color in (zip(
# mpl.dates.datestr2num(
# ["2010-07-01T00:00:00Z", "2010-07-03T00:00:00Z",
# "2010-07-17T00:00:00Z"]),
# ["red", "gray", "red"])):
# ax.axvline(xval, color=color)
ax.axhline(0, color="black", linewidth=.75)
plt.legend()
ax.set_ylabel("Average flux error over West Virginia\n"
"(\N{MICRO SIGN}mol/m\N{SUPERSCRIPT TWO}/s)")
ax.set_xlabel("")
plt.title("Spatial average flux error")
fig.savefig(
"{year:04d}-{month:02d}_noise_{noise_fun:s}{noise_len:03d}km_"
"{noise_time_fun:s}{noise_time_len:02d}d_inv_{inv_fun:s}{inv_len:03d}km_"
"{inv_time_fun:s}{inv_time_len:02d}d_realization_small_spatial_"
"avg_timeseries.pdf".format(
year=YEAR, month=MONTH, noise_fun=NOISE_FUNCTION,
noise_len=NOISE_LENGTH, noise_time_fun=NOISE_TIME_FUN,
noise_time_len=NOISE_TIME_LEN, inv_fun=INV_FUNCTION,
inv_len=INV_LENGTH, inv_time_fun=INV_TIME_FUN,
inv_time_len=INV_TIME_LEN))
plt.close(fig)
write_console_message("Done small timeseries")
############################################################
# Plot timeseries of increment
spatial_avg_increment = (
spatial_avg_differences.sel(type=["iden_posterior_error",
"frat_posterior_error"]) -
spatial_avg_differences.sel(type="prior_error")
)
fig = plt.figure(figsize=(5, 3.4))
fig.autofmt_xdate()
plt.subplots_adjust(left=.18)
ax = plt.gca()
ax.set_xlim(mpl.dates.datestr2num(
["2010-06-18T00:00:00Z", "2010-08-01T00:00:00Z"]))
spatial_avg_increment.isel(realization=0).plot.line('-', hue="type")
# for xval, color in (zip(
# mpl.dates.datestr2num(
# ["2010-07-01T00:00:00Z", "2010-07-03T00:00:00Z",
# "2010-07-17T00:00:00Z"]),
# ["red", "gray", "red"])):
# ax.axvline(xval, color=color)
ax.axhline(0, color="black", linewidth=.75)
ax.set_ylabel("Average increment over whole domain\n"
"(\N{MICRO SIGN}mol/m\N{SUPERSCRIPT TWO}/s)")
ax.set_xlabel("")
plt.title("Spatial average increment")
fig.savefig(
"{year:04d}-{month:02d}_noise_{noise_fun:s}{noise_len:03d}km_"
"{noise_time_fun:s}{noise_time_len:02d}d_inv_{inv_fun:s}{inv_len:03d}km_"
"{inv_time_fun:s}{inv_time_len:02d}d_realization_spatial_"
"avg_increment_timeseries.pdf".format(
year=YEAR, month=MONTH, noise_fun=NOISE_FUNCTION,
noise_len=NOISE_LENGTH, noise_time_fun=NOISE_TIME_FUN,
noise_time_len=NOISE_TIME_LEN, inv_fun=INV_FUNCTION,
inv_len=INV_LENGTH, inv_time_fun=INV_TIME_FUN,
inv_time_len=INV_TIME_LEN))
plt.close(fig)
small_spatial_avg_increment = (
small_spatial_avg_differences.sel(type=["iden_posterior_error",
"frat_posterior_error"]) -
small_spatial_avg_differences.sel(type="prior_error")
)
fig = plt.figure(figsize=(5, 3.4))
fig.autofmt_xdate()
plt.subplots_adjust(left=.18)
ax = plt.gca()
try:
ax.set_xlim(mpl.dates.datestr2num(
["2010-06-18T00:00:00Z", "2010-08-01T00:00:00Z"]))
small_spatial_avg_increment.isel(realization=0).plot.line('-', hue="type")
except ValueError:
ax.set_xlim(mpl.dates.datestr2num(
["2010-06-18T00:00:00Z", "2010-08-01T00:00:00Z"]))
small_spatial_avg_increment.isel(realization=0).plot.line('-', hue="type")
# for xval, color in (zip(
# mpl.dates.datestr2num(
# ["2010-07-01T00:00:00Z", "2010-07-03T00:00:00Z",
# "2010-07-17T00:00:00Z"]),
# ["red", "gray", "red"])):
# ax.axvline(xval, color=color)
ax.axhline(0, color="black", linewidth=.75)
ax.set_ylabel("Average increment over West Virginia\n"
"(\N{MICRO SIGN}mol/m\N{SUPERSCRIPT TWO}/s)")
ax.set_xlabel("")
plt.title("Spatial average increment")
fig.savefig(
"{year:04d}-{month:02d}_noise_{noise_fun:s}{noise_len:03d}km_"
"{noise_time_fun:s}{noise_time_len:02d}d_inv_{inv_fun:s}{inv_len:03d}km_"
"{inv_time_fun:s}{inv_time_len:02d}d_realization_small_spatial_"
"avg_increment_timeseries.pdf".format(
year=YEAR, month=MONTH, noise_fun=NOISE_FUNCTION,
noise_len=NOISE_LENGTH, noise_time_fun=NOISE_TIME_FUN,
noise_time_len=NOISE_TIME_LEN, inv_fun=INV_FUNCTION,
inv_len=INV_LENGTH, inv_time_fun=INV_TIME_FUN,
inv_time_len=INV_TIME_LEN))
plt.close(fig)
write_console_message("Done increment plots")
spatial_avg_differences.load()
spatial_avg_increment.load()
############################################################
# Make big combined plot
fig, axes = plt.subplots(3, 1, figsize=(7.5, 6.5), sharex=True)
fig.subplots_adjust(hspace=.27, bottom=.12, top=.85)
ax = axes[0]
prior_line = spatial_avg_differences.sel(
type="prior_error", realization=0).plot.line(
"-", label="Prior", ax=ax)
id_post_line = spatial_avg_differences.sel(
type="iden_posterior_error", realization=0).plot.line(
"--", label="Posterior: Identical", ax=ax)
fr_post_line = spatial_avg_differences.sel(
type="frat_posterior_error", realization=0).plot.line(
"-.", label="Posterior: Fraternal", ax=ax)
ax.set_xlim(mpl.dates.datestr2num(
["2010-06-18T00:00:00Z", "2010-08-01T00:00:00Z"]))
ax.axhline(0, color="black", linewidth=.75)
ax.set_ylim(-5, 5)
fig.legend([prior_line[0], id_post_line[0], fr_post_line[0]],
["Prior", "Posterior: Identical", "Posterior: Fraternal"],
(.2, .9),
ncol=3)
ax.set_ylabel("Average flux error\n(\N{MICRO SIGN}mol/m\N{SUPERSCRIPT TWO}/s)")
ax.set_xlabel("")
ax.set_title("Spatial average flux error")
ax = axes[1]
spatial_avg_increment.isel(
realization=0
).sel(
type="iden_posterior_error"
).plot.line('--', color="tab:orange", ax=ax)
spatial_avg_increment.isel(
realization=0
).sel(
type="frat_posterior_error"
).plot.line('-.', color="tab:green", ax=ax)
ax.axhline(0, color="black", linewidth=.75)
ax.set_ylabel("Average increment\n(\N{MICRO SIGN}mol/m\N{SUPERSCRIPT TWO}/s)")
ax.set_xlabel("")
ax.set_title("Spatial average increment")
ax = axes[2]
OBSERVATIONAL_CONSTRAINT.plot.line("-", ax=ax)
ax.axhline(0, color="black", linewidth=.75)
ax.set_ylabel("Influence\n"
"(ppm/(\N{MICRO SIGN}mol/m\N{SUPERSCRIPT TWO}/s))")
ax.set_xlabel("OSSE Flux Time")
ax.set_title("Influence Of Fluxes On Observations")
ax.set_ylim(0, 5e7)
for ax in axes:
ax.axvline(mpl.dates.datestr2num("2010-07-01T00:00:00Z"),
color="black", linewidth=.75)
fig.suptitle("Average fluxes, increment, and constraint over whole domain")
fig.savefig(
"{year:04d}-{month:02d}_noise_{noise_fun:s}{noise_len:03d}km_"
"{noise_time_fun:s}{noise_time_len:02d}d_inv_{inv_fun:s}{inv_len:03d}km_"
"{inv_time_fun:s}{inv_time_len:02d}d_realization_spatial_"
"avg_combined_timeseries.pdf".format(
year=YEAR, month=MONTH, noise_fun=NOISE_FUNCTION,
noise_len=NOISE_LENGTH, noise_time_fun=NOISE_TIME_FUN,
noise_time_len=NOISE_TIME_LEN, inv_fun=INV_FUNCTION,
inv_len=INV_LENGTH, inv_time_fun=INV_TIME_FUN,
inv_time_len=INV_TIME_LEN))
plt.close(fig)
write_console_message("Done combined timeseries plot")
############################################################
# Calculate prior and posterior variances
write_console_message("Calculating variances for identical-twin OSSE")
iden_prior_theoretical_variance = (
IDEN_COVARIANCE_DS["reduced_prior_covariance"].mean() * 1e12
).values
write_console_message("Done prior, starting posterior")
iden_posterior_theoretical_variance = (
IDEN_COVARIANCE_DS["reduced_posterior_covariance"].mean() * 1e12
).values
write_console_message("Done posterior w/ agg, starting w/o")
iden_posterior_theoretical_variance_no_agg = (
IDEN_COVARIANCE_DS["reduced_posterior_covariance_no_aggregation"].mean() *
1e12
).values
write_console_message("Calculating variances for fraternal-twin OSSE")
frat_prior_theoretical_variance = (
FRAT_COVARIANCE_DS["reduced_prior_covariance"].mean() * 1e12
).values
write_console_message("Done prior, starting posterior")
frat_posterior_theoretical_variance = (
FRAT_COVARIANCE_DS["reduced_posterior_covariance"].mean() * 1e12
).values
write_console_message("Done posterior w/ agg, starting w/o")
frat_posterior_theoretical_variance_no_agg = (
FRAT_COVARIANCE_DS["reduced_posterior_covariance_no_aggregation"].mean() *
1e12
).values
write_console_message("Done highest resolution, starting lower resolution")
LOWER_RES_REDUCED_IDEN_COVARIANCE_DS = (
LOWER_RES_IDEN_COVARIANCE_DS.mean() * 1e12
).persist()
lower_res_iden_prior_theoretical_variance = (
LOWER_RES_REDUCED_IDEN_COVARIANCE_DS[
"reduced_prior_covariance"
].values
)
lower_res_iden_posterior_theoretical_variance = (
LOWER_RES_REDUCED_IDEN_COVARIANCE_DS[
"reduced_posterior_covariance"
].values
)
lower_res_iden_posterior_theoretical_variance_no_agg = (
LOWER_RES_REDUCED_IDEN_COVARIANCE_DS[
"reduced_posterior_covariance_no_aggregation"
].values
)
LOWER_RES_REDUCED_FRAT_COVARIANCE_DS = (
LOWER_RES_FRAT_COVARIANCE_DS.mean() * 1e12
).persist()
lower_res_frat_prior_theoretical_variance = (
LOWER_RES_REDUCED_FRAT_COVARIANCE_DS[
"reduced_prior_covariance"
].values
)
lower_res_frat_posterior_theoretical_variance = (
LOWER_RES_REDUCED_FRAT_COVARIANCE_DS[
"reduced_posterior_covariance"
].values
)
lower_res_frat_posterior_theoretical_variance_no_agg = (
LOWER_RES_REDUCED_FRAT_COVARIANCE_DS[
"reduced_posterior_covariance_no_aggregation"
].values
)
write_console_message("Done lower resolution, starting lowest_resolution")
LOWEST_RES_REDUCED_IDEN_COVARIANCE_DS = (
LOWEST_RES_IDEN_COVARIANCE_DS.mean() * 1e12
).persist()
lowest_res_iden_prior_theoretical_variance = (
LOWEST_RES_REDUCED_IDEN_COVARIANCE_DS[
"reduced_prior_covariance"
].values
)
lowest_res_iden_posterior_theoretical_variance = (
LOWEST_RES_REDUCED_IDEN_COVARIANCE_DS[
"reduced_posterior_covariance"
].values
)
lowest_res_iden_posterior_theoretical_variance_no_agg = (
LOWEST_RES_REDUCED_IDEN_COVARIANCE_DS[
"reduced_posterior_covariance_no_aggregation"
].values
)
LOWEST_RES_REDUCED_FRAT_COVARIANCE_DS = (
LOWEST_RES_FRAT_COVARIANCE_DS.mean() * 1e12
).persist()
lowest_res_frat_prior_theoretical_variance = (
LOWEST_RES_REDUCED_FRAT_COVARIANCE_DS[
"reduced_prior_covariance"
].values
)
lowest_res_frat_posterior_theoretical_variance = (
LOWEST_RES_REDUCED_FRAT_COVARIANCE_DS[
"reduced_posterior_covariance"
].values
)
lowest_res_frat_posterior_theoretical_variance_no_agg = (
LOWEST_RES_REDUCED_FRAT_COVARIANCE_DS[
"reduced_posterior_covariance_no_aggregation"
].values
)
write_console_message("Done lowest resolution, starting higher resolution")
HIGHER_RES_REDUCED_IDEN_COVARIANCE_DS = (
HIGHER_RES_IDEN_COVARIANCE_DS.mean() * 1e12
).persist()
higher_res_iden_prior_theoretical_variance = (
HIGHER_RES_REDUCED_IDEN_COVARIANCE_DS[
"reduced_prior_covariance"
].values
)
higher_res_iden_posterior_theoretical_variance = (
HIGHER_RES_REDUCED_IDEN_COVARIANCE_DS[
"reduced_posterior_covariance"
].values
)
higher_res_iden_posterior_theoretical_variance_no_agg = (
HIGHER_RES_REDUCED_IDEN_COVARIANCE_DS[
"reduced_posterior_covariance_no_aggregation"
].values
)
HIGHER_RES_REDUCED_FRAT_COVARIANCE_DS = (
HIGHER_RES_FRAT_COVARIANCE_DS.mean() * 1e12
).persist()
higher_res_frat_prior_theoretical_variance = (
HIGHER_RES_REDUCED_FRAT_COVARIANCE_DS[
"reduced_prior_covariance"
].values
)
higher_res_frat_posterior_theoretical_variance = (
HIGHER_RES_REDUCED_FRAT_COVARIANCE_DS[
"reduced_posterior_covariance"
].values
)
higher_res_frat_posterior_theoretical_variance_no_agg = (
HIGHER_RES_REDUCED_FRAT_COVARIANCE_DS[
"reduced_posterior_covariance_no_aggregation"
].values
)
# Temporal variance section
HIGHEST_TEMP_RES_REDUCED_FRAT_COVARIANCE_DS = (
HIGHEST_TEMP_RES_FRAT_COVARIANCE_DS.mean() * 1e12
).persist()
highest_temp_res_frat_prior_theoretical_variance = (
HIGHEST_TEMP_RES_REDUCED_FRAT_COVARIANCE_DS[
"reduced_prior_covariance"
].values
)
highest_temp_res_frat_posterior_theoretical_variance = (
HIGHEST_TEMP_RES_REDUCED_FRAT_COVARIANCE_DS[
"reduced_posterior_covariance"
].values
)
highest_temp_res_frat_posterior_theoretical_variance_no_agg = (
HIGHEST_TEMP_RES_REDUCED_FRAT_COVARIANCE_DS[
"reduced_posterior_covariance_no_aggregation"
].values
)
write_console_message("Done calculating variances")
############################################################
# Plot histogram of average flux errors
mean_error_df = all_mean_error.to_dataframe(
name="error"
).loc[:, "error"].unstack(0).loc[
:, ["prior_error", "iden_posterior_error", "frat_posterior_error"]
]
error_range = da.asarray(
[all_mean_error.min(), all_mean_error.max()]
).compute()
fig, ax = plt.subplots(1, 1)
mean_error_df.plot.hist(ax=ax, alpha=.5, xlim=(-1.8, 1.8))
ax.set_ylabel("Count")
ax.set_xlabel(
"Error in mean estimate (\N{MICRO SIGN}mol/m\N{SUPERSCRIPT TWO}/s)"
)
ax.legend(["Prior means", "Posterior means: Identical",
"Posterior means: Fraternal"])
mean_error_df["prior_error"].plot.box(
ax=ax, vert=False, positions=(31,), color="blue", widths=1.5)
mean_error_df["iden_posterior_error"].plot.box(
ax=ax, vert=False, positions=(33,), color="orange", widths=1.5)
mean_error_df["frat_posterior_error"].plot.box(
ax=ax, vert=False, positions=(35,), color="tab:green", widths=1.5)
ax.set_ylim(0, 37)
ax.set_yticks(np.arange(0, 31, 5))
ax.set_yticklabels(np.arange(0, 31, 5))
fig.savefig(
"{year:04d}-{month:02d}_noise_{noise_fun:s}{noise_len:03d}km_"
"{noise_time_fun:s}{noise_time_len:02d}d_inv_{inv_fun:s}{inv_len:03d}km_"
"{inv_time_fun:s}{inv_time_len:02d}d_flux_error_hist.pdf".format(
year=YEAR, month=MONTH, noise_fun=NOISE_FUNCTION,
noise_len=NOISE_LENGTH, noise_time_fun=NOISE_TIME_FUN,
noise_time_len=NOISE_TIME_LEN, inv_fun=INV_FUNCTION,
inv_len=INV_LENGTH, inv_time_fun=INV_TIME_FUN,
inv_time_len=INV_TIME_LEN))
plt.close(fig)
############################################################
# Write file of statistics for flux errors
ZSTAR_90 = scipy.stats.norm.ppf(.95)
with open(
"{year:04d}-{month:02d}_noise_{noise_fun:s}{noise_len:03d}km_"
"{noise_time_fun:s}{noise_time_len:02d}d_inv_{inv_fun:s}{inv_len:03d}km_"
"{inv_time_fun:s}{inv_time_len:02d}d_flux_error_stats.txt".format(
year=YEAR, month=MONTH, noise_fun=NOISE_FUNCTION,
noise_len=NOISE_LENGTH, noise_time_fun=NOISE_TIME_FUN,
noise_time_len=NOISE_TIME_LEN, inv_fun=INV_FUNCTION,
inv_len=INV_LENGTH, inv_time_fun=INV_TIME_FUN,
inv_time_len=INV_TIME_LEN), "w") as out_file:
print("Theoretical/analytic/deterministic standard deviations: "
"Identical-twin OSSE", file=out_file)
print(np.sqrt([iden_prior_theoretical_variance,
iden_posterior_theoretical_variance,
iden_posterior_theoretical_variance_no_agg]),
file=out_file)
print("Theoretical/analytic/deterministic standard deviations: "
"Fraternal-twin OSSE", file=out_file)
print(np.sqrt([frat_prior_theoretical_variance,
frat_posterior_theoretical_variance,
frat_posterior_theoretical_variance_no_agg]),
file=out_file)
print("Lower-resolution Theoretical/analytic/deterministic "
"standard deviations: Identical-twin OSSE", file=out_file)
print(np.sqrt([lower_res_iden_prior_theoretical_variance,
lower_res_iden_posterior_theoretical_variance,
lower_res_iden_posterior_theoretical_variance_no_agg]),
file=out_file)
print("Lower-resolution Theoretical/analytic/deterministic "
"standard deviations: Fraternal-twin OSSE", file=out_file)
print(np.sqrt([lower_res_frat_prior_theoretical_variance,
lower_res_frat_posterior_theoretical_variance,
lower_res_frat_posterior_theoretical_variance_no_agg]),
file=out_file)
print("Lowest-resolution Theoretical/analytic/deterministic "
"standard deviations: Identical-twin OSSE", file=out_file)
print(np.sqrt([lowest_res_iden_prior_theoretical_variance,
lowest_res_iden_posterior_theoretical_variance,
lowest_res_iden_posterior_theoretical_variance_no_agg]),
file=out_file)
print("Lowest-resolution Theoretical/analytic/deterministic "
"standard deviations: Fraternal-twin OSSE", file=out_file)
print(np.sqrt([lowest_res_frat_prior_theoretical_variance,
lowest_res_frat_posterior_theoretical_variance,
lowest_res_frat_posterior_theoretical_variance_no_agg]),
file=out_file)
print("Higher-resolution Theoretical/analytic/deterministic "
"standard deviations: Identical-twin OSSE", file=out_file)
print(np.sqrt([higher_res_iden_prior_theoretical_variance,
higher_res_iden_posterior_theoretical_variance,
higher_res_iden_posterior_theoretical_variance_no_agg]),
file=out_file)
print("Highest-temporal-resolution Theoretical/analytic/deterministic "
"standard deviations: Fraternal-twin OSSE", file=out_file)
print(
np.sqrt([
highest_temp_res_frat_prior_theoretical_variance,
highest_temp_res_frat_posterior_theoretical_variance,
highest_temp_res_frat_posterior_theoretical_variance_no_agg,
]),
file=out_file
)
theoretical_errors = pd.DataFrame(
np.sqrt([
[iden_prior_theoretical_variance,
iden_posterior_theoretical_variance,
iden_posterior_theoretical_variance_no_agg,
frat_prior_theoretical_variance,
frat_posterior_theoretical_variance,
frat_posterior_theoretical_variance_no_agg],
[lower_res_iden_prior_theoretical_variance,
lower_res_iden_posterior_theoretical_variance,
lower_res_iden_posterior_theoretical_variance_no_agg,
lower_res_frat_prior_theoretical_variance,
lower_res_frat_posterior_theoretical_variance,
lower_res_frat_posterior_theoretical_variance_no_agg],
[lowest_res_iden_prior_theoretical_variance,
lowest_res_iden_posterior_theoretical_variance,
lowest_res_iden_posterior_theoretical_variance_no_agg,
lowest_res_frat_prior_theoretical_variance,
lowest_res_frat_posterior_theoretical_variance,
lowest_res_frat_posterior_theoretical_variance_no_agg],
[higher_res_iden_prior_theoretical_variance,
higher_res_iden_posterior_theoretical_variance,
higher_res_iden_posterior_theoretical_variance_no_agg,
higher_res_frat_prior_theoretical_variance,
higher_res_frat_posterior_theoretical_variance,
higher_res_frat_posterior_theoretical_variance_no_agg],
]),
columns=pd.MultiIndex.from_product(
[["Identical-twin OSSE", "Fraternal-twin OSSE"],
["Prior", "Posterior (attempt agg.)", "Posterior (no agg.)"]],
names=["Experiment", "Source"]
),
index=[162, 216, 432, 108],
)
print(
"Theoretical/analytic/deterministic "
"error standard deviation estimates\n"
"Averaged over all of space and time within domain\n"
"Both experiments, all resolutions\n",
theoretical_errors, file=out_file
)
theoretical_errors.to_csv(
"iden_frat_osse_deterministic_domain_avg_std_vs_res.csv"
)
print("Description of errors", file=out_file)
SUBSET_SIZES = (5, 10, 20, 40, 80)
std_cis = []
for n_realizations in SUBSET_SIZES:
print(
"Number of realizations considered:", n_realizations, file=out_file
)
ldesc = long_description(mean_error_df.iloc[:n_realizations, :])
print(ldesc, file=out_file)
std_cis.append(
ldesc.loc[["std point est", "std 95%CI low", "std 95%CI high"], :]
)
std_cis = pd.concat(std_cis, keys=SUBSET_SIZES, names=["N", "CI part"])
print("Standard deviation confidence intervals:\n",
std_cis, file=out_file)
std_cis.to_csv(
"iden_frat_osse_monte_carlo_domain_avg_std_vs_ensemble_size.csv"
)
print("Coverage for 90% confidence interval, identical prior:",
file=out_file)
number_in_prior_ci = (
np.abs(mean_error_df["prior_error"] /
np.sqrt(iden_prior_theoretical_variance))
< ZSTAR_90
).sum()
print(number_in_prior_ci / mean_error_df.shape[0], file=out_file)
print("Coverage for 90% confidence interval, identical posterior:",
file=out_file)
number_in_posterior_ci = (
np.abs(mean_error_df["iden_posterior_error"] /
np.sqrt(iden_posterior_theoretical_variance))
< ZSTAR_90
).sum()
print(number_in_posterior_ci / mean_error_df.shape[0], file=out_file)
print("Coverage for 90% confidence interval, posterior (no agg.):",
file=out_file)
number_in_posterior_ci_no_agg = (
np.abs(mean_error_df["iden_posterior_error"] /
np.sqrt(iden_posterior_theoretical_variance_no_agg))
< ZSTAR_90
).sum()
print(number_in_posterior_ci_no_agg / mean_error_df.shape[0],
file=out_file)
print("P-values are one of:", file=out_file)
dist = scipy.stats.binom(mean_error_df.shape[0], .9)
print(
dist.cdf([number_in_prior_ci, number_in_posterior_ci,
number_in_posterior_ci_no_agg]) * 2,
file=out_file
)
print(
dist.sf([number_in_prior_ci, number_in_posterior_ci,
number_in_posterior_ci_no_agg]) * 2,
file=out_file
)
print("Coverage for 90% confidence interval, fraternal prior:",
file=out_file)
number_in_prior_ci = (
np.abs(mean_error_df["prior_error"] /
np.sqrt(frat_prior_theoretical_variance))
< ZSTAR_90
).sum()
print(number_in_prior_ci / mean_error_df.shape[0], file=out_file)
print("Coverage for 90% confidence interval, fraternal posterior:",
file=out_file)
number_in_posterior_ci = (
np.abs(mean_error_df["frat_posterior_error"] /
np.sqrt(frat_posterior_theoretical_variance))
< ZSTAR_90
).sum()
print(number_in_posterior_ci / mean_error_df.shape[0], file=out_file)
print("Coverage for 90% confidence interval, posterior (no agg.):",
file=out_file)
number_in_posterior_ci_no_agg = (
np.abs(mean_error_df["frat_posterior_error"] /
np.sqrt(frat_posterior_theoretical_variance_no_agg))
< ZSTAR_90
).sum()
print(number_in_posterior_ci_no_agg / mean_error_df.shape[0],
file=out_file)
print("P-values are one of:", file=out_file)
dist = scipy.stats.binom(mean_error_df.shape[0], .9)
print(
dist.cdf([number_in_prior_ci, number_in_posterior_ci,
number_in_posterior_ci_no_agg]) * 2,
file=out_file
)
print(
dist.sf([number_in_prior_ci, number_in_posterior_ci,
number_in_posterior_ci_no_agg]) * 2,
file=out_file
)
print("K-S tests for distributions (identical-twin):", file=out_file)
print(
"Identical-twin Prior: ",
scipy.stats.kstest(
mean_error_df["prior_error"],
scipy.stats.norm(
scale=np.sqrt(iden_prior_theoretical_variance)).cdf
),
file=out_file
)
print(
"Identical-twin Posterior:",
scipy.stats.kstest(
mean_error_df["iden_posterior_error"],
scipy.stats.norm(
scale=np.sqrt(iden_posterior_theoretical_variance)).cdf
),
file=out_file
)
print(
"Identical-twin Posterior (no agg.):",
scipy.stats.kstest(
mean_error_df["iden_posterior_error"],
scipy.stats.norm(
scale=np.sqrt(iden_posterior_theoretical_variance_no_agg)).cdf
),
file=out_file
)
print("K-S tests for distributions (Fraternal-twin):", file=out_file)
print(
"Fraternal-twin Prior: ",
scipy.stats.kstest(
mean_error_df["prior_error"],
scipy.stats.norm(
scale=np.sqrt(frat_prior_theoretical_variance)).cdf
),
file=out_file
)
print(
"Fraternal-twin Posterior:",
scipy.stats.kstest(
mean_error_df["frat_posterior_error"],
scipy.stats.norm(
scale=np.sqrt(frat_posterior_theoretical_variance)).cdf
),
file=out_file
)
print(
"Fraternal-twin Posterior (no agg.):",
scipy.stats.kstest(
mean_error_df["frat_posterior_error"],
scipy.stats.norm(
scale=np.sqrt(frat_posterior_theoretical_variance_no_agg)).cdf
),
file=out_file
)
print("\N{GREEK SMALL LETTER CHI}\N{SUPERSCRIPT TWO} test for variance",
file=out_file)
degrees_freedom = ldesc.loc["count", :] - 1
degrees_freedom = np.array([degrees_freedom[0],
degrees_freedom[1],
degrees_freedom[1]])
std = ldesc.loc["std", :]
std = np.array([[std[0], std[1], std[1]],
[std[0], std[2], std[2]]])
statistic = (
degrees_freedom * std ** 2 /
np.asarray([[iden_prior_theoretical_variance,
iden_posterior_theoretical_variance,
iden_posterior_theoretical_variance_no_agg],
[frat_prior_theoretical_variance,
frat_posterior_theoretical_variance,
frat_posterior_theoretical_variance_no_agg]])
)
print("First line is identical-twin, second line is fraternal-twin",
file=out_file)
print("Statistics are\n", statistic, file=out_file, sep="")
print("One-sided test for sample variance "
"larger than theoretical variance:\n",
scipy.stats.chi2.sf(statistic, df=degrees_freedom),
file=out_file, sep="")
print("\nPearson product-moment correlations\n",
mean_error_df.corr(),
"\nSpearman rank correlations\n",
mean_error_df.corr("spearman"),
"\nKendall Tau correlation\n",
mean_error_df.corr("kendall"),
file=out_file)
N_FLUXES = 200
sample_fluxes = np.linspace(-3, 3, N_FLUXES)
iden_prior_mean_flux_density = (
np.exp(-0.5 * sample_fluxes ** 2 / iden_prior_theoretical_variance) /
np.sqrt(2 * np.pi * iden_prior_theoretical_variance)
)
iden_posterior_mean_flux_density = (
np.exp(-0.5 * sample_fluxes ** 2 / iden_posterior_theoretical_variance) /
np.sqrt(2 * np.pi * iden_posterior_theoretical_variance)
)
iden_prior_flux_densities = (
np.exp(
-0.5 *
(sample_fluxes[:, np.newaxis] -
mean_error_df["prior_error"].values[np.newaxis, :]) ** 2 /
iden_prior_theoretical_variance
) /
np.sqrt(2 * np.pi * iden_prior_theoretical_variance)
)
iden_prior_flux_density_estimate = iden_prior_flux_densities.mean(axis=1)
iden_posterior_flux_densities = (
np.exp(
-0.5 *
(sample_fluxes[:, np.newaxis] -
mean_error_df["iden_posterior_error"].values[np.newaxis, :]) ** 2 /
iden_posterior_theoretical_variance
) /
np.sqrt(2 * np.pi * iden_posterior_theoretical_variance)
)
iden_posterior_flux_density_estimate = iden_posterior_flux_densities.mean(
axis=1)
frat_prior_mean_flux_density = (
np.exp(-0.5 * sample_fluxes ** 2 / frat_prior_theoretical_variance) /
|
np.sqrt(2 * np.pi * frat_prior_theoretical_variance)
|
numpy.sqrt
|
#!/usr/local/sci/bin/python
# PYTHON2.7
#
# Author: <NAME>
# Created: 23 April 2016
# Last update: 23 April 2016
# Location: /data/local/hadkw/HADCRUH2/MARINE/EUSTACEMDS/ANALYSIS_PLOTS/
# GitHub: https://github.com/Kate-Willett/HadISDH_Marine_Build/
# -----------------------
# CODE PURPOSE AND OUTPUT
# -----------------------
# This code reads in lists of annual summaries for PT counts (before and after QC) and QC and day/night flags
# and makes overview plots
#
# -----------------------
# LIST OF MODULES
# -----------------------
# inbuilt:
# import datetime as dt
# import copy
## Folling two lines should be uncommented if using with SPICE or screen
## import matplotlib
## matplotlib.use('Agg')
# import matplotlib.pyplot as plt
# import numpy as np
# from matplotlib.dates import date2num,num2date
# import sys, os
# import sys, getopt
# from scipy.optimize import curve_fit,fsolve,leastsq
# from scipy import pi,sqrt,exp
# from scipy.special import erf
# import scipy.stats
# from math import sqrt,pi
# import struct
# import pdb # pdb.set_trace() or c
#
# Kates:
# from LinearTrends import MedianPairwise - fits linear trend using Median Pairwise
# import MDS_basic_KATE as MDStool
#
# -----------------------
# DATA
# -----------------------
# /data/local/hadkw/HADCRUH2/MARINE/LISTS/
#
# -----------------------
# HOW TO RUN THE CODE
# -----------------------
# check the desired bits are uncommented/commented (filepaths etc)
#
# python2.7 PlotObsCount_APR2016.py
#
# This runs the code, outputs the plots
#
# -----------------------
# OUTPUT
# -----------------------
# some plots:
# /data/local/hadkw/HADCRUH2/MARINE/IMAGES/SummaryPTall_ERAclimNBC_APR2016.png
# /data/local/hadkw/HADCRUH2/MARINE/IMAGES/SummaryPTallDAY_ERAclimNBC_APR2016.png
# /data/local/hadkw/HADCRUH2/MARINE/IMAGES/SummaryPTallNIGHT_ERAclimNBC_APR2016.png
# /data/local/hadkw/HADCRUH2/MARINE/IMAGES/SummaryPTgood_ERAclimNBC_APR2016.png
# /data/local/hadkw/HADCRUH2/MARINE/IMAGES/SummaryPTgoodDAY_ERAclimNBC_APR2016.png
# /data/local/hadkw/HADCRUH2/MARINE/IMAGES/SummaryPTgoodNIGHT_ERAclimNBC_APR2016.png
# /data/local/hadkw/HADCRUH2/MARINE/IMAGES/SummaryQCDAY_ERAclimNBC_APR2016.png
# /data/local/hadkw/HADCRUH2/MARINE/IMAGES/SummaryQCNIGHT_ERAclimNBC_APR2016.png
#
# -----------------------
# VERSION/RELEASE NOTES
# -----------------------
#
# Version 1 (21 April 2016)
# ---------
#
# Enhancements
#
# Changes
#
# Bug fixes
#
# -----------------------
# OTHER INFORMATION
# -----------------------
#
#************************************************************************
# START
#************************************************************************
import datetime as dt
import copy
# Folling two lines should be uncommented if using with SPICE or screen
## import matplotlib
## matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.dates import date2num,num2date
import sys, os
import sys, getopt
from scipy.optimize import curve_fit,fsolve,leastsq
from scipy import pi,sqrt,exp
from scipy.special import erf
import scipy.stats
from math import sqrt,pi
import struct
import pdb # pdb.set_trace() or c
#*************************************************************************
# READDATA
#*************************************************************************
def ReadData(FileName,typee,delimee):
''' Use numpy genfromtxt reading to read in all rows from a complex array '''
''' Need to specify format as it is complex '''
''' outputs an array of tuples that in turn need to be subscripted by their names defaults f0...f8 '''
return np.genfromtxt(FileName, dtype=typee,delimiter=delimee,encoding = 'latin-1') #comments=False) # ReadData
#************************************************************************
# Main
#************************************************************************
ICOADSV = 'I300'
THRESHV = '55'
ITV = 'OBSclim2NBC' # ERAclimNBC, OBSclim1NBC, OBSclim2NBC
NOWMON = 'JAN'
NOWYEAR = '2019'
StartYear = 1973
EndYear = 2018
NYears = (EndYear - StartYear)+1
INDIR = '/data/users/hadkw/WORKING_HADISDH/MARINE/LISTS/'
INFILPT = 'PTTypeMetaDataStats_'+ITV+'_'+ICOADSV+'_'+THRESHV+'_'+NOWMON+NOWYEAR+'.txt'
INFILPTD = 'PTTypeMetaDataStatsDAY_'+ITV+'_'+ICOADSV+'_'+THRESHV+'_'+NOWMON+NOWYEAR+'.txt'
INFILPTN = 'PTTypeMetaDataStatsNIGHT_'+ITV+'_'+ICOADSV+'_'+THRESHV+'_'+NOWMON+NOWYEAR+'.txt'
INFILPTG = 'PTTypeGOODMetaDataStats_'+ITV+'_'+ICOADSV+'_'+THRESHV+'_'+NOWMON+NOWYEAR+'.txt'
INFILPTGD = 'PTTypeGOODMetaDataStatsDAY_'+ITV+'_'+ICOADSV+'_'+THRESHV+'_'+NOWMON+NOWYEAR+'.txt'
INFILPTGN = 'PTTypeGOODMetaDataStatsNIGHT_'+ITV+'_'+ICOADSV+'_'+THRESHV+'_'+NOWMON+NOWYEAR+'.txt'
INFILQC = 'QCMetaDataStats_'+ITV+'_'+ICOADSV+'_'+THRESHV+'_'+NOWMON+NOWYEAR+'.txt'
INFILQCD = 'QCMetaDataStatsDAY_'+ITV+'_'+ICOADSV+'_'+THRESHV+'_'+NOWMON+NOWYEAR+'.txt'
INFILQCN = 'QCMetaDataStatsNIGHT_'+ITV+'_'+ICOADSV+'_'+THRESHV+'_'+NOWMON+NOWYEAR+'.txt'
OUTDIR = '/data/users/hadkw/WORKING_HADISDH/MARINE/IMAGES/'
OutPltPTG = 'SummaryPT_good_'+ITV+'_'+ICOADSV+'_'+THRESHV+'_'+NOWMON+NOWYEAR
OutPltPTGD = 'SummaryPT_DAYgood_'+ITV+'_'+ICOADSV+'_'+THRESHV+'_'+NOWMON+NOWYEAR
OutPltPTGN = 'SummaryPT_NIGHTgood_'+ITV+'_'+ICOADSV+'_'+THRESHV+'_'+NOWMON+NOWYEAR
OutPltPT = 'SummaryPT_'+ITV+'_'+ICOADSV+'_'+THRESHV+'_'+NOWMON+NOWYEAR
OutPltPTD = 'SummaryPT_DAY_'+ITV+'_'+ICOADSV+'_'+THRESHV+'_'+NOWMON+NOWYEAR
OutPltPTN = 'SummaryPT_NIGHT_'+ITV+'_'+ICOADSV+'_'+THRESHV+'_'+NOWMON+NOWYEAR
OutPltQC = 'SummaryQC_'+ITV+'_'+ICOADSV+'_'+THRESHV+'_'+NOWMON+NOWYEAR
OutPltQCD = 'SummaryQC_DAY_'+ITV+'_'+ICOADSV+'_'+THRESHV+'_'+NOWMON+NOWYEAR
OutPltQCN = 'SummaryQC_NIGHT_'+ITV+'_'+ICOADSV+'_'+THRESHV+'_'+NOWMON+NOWYEAR
# Read in Instrument file and populate lists
#typee = ("int","|S17","int","|S37","float","|S16","float","|S16","float","|S16","float","|S16","float","|S16","float",
# "|S16","float","|S16","float","|S16","float","|S17","float","|S17","float","|S2")
typee = ("int","|U17","int","|U37","float","|U16","float","|U16","float","|U16","float","|U16","float","|U16","float",
"|U16","float","|U16","float","|U16","float","|U17","float","|U17","float","|U2")
delimee = (4,17,9,37,5,16,5,16,5,16,5,16,5,16,5,
16,5,16,5,16,5,17,5,17,5,2)
# create empty arrays for instrument type data bundles
Yr = []
nobs = [] # we're looking at all obs, not just those with 'good' data
PT0s = []
PT1s = []
PT2s = []
PT3s = []
PT4s = []
PT5s = []
PT6s = []
PT8s = []
PT9s = []
PT10s = []
PT15s = []
RawData = ReadData(INDIR+INFILPT,typee, delimee)
Yr = np.array(RawData['f0'][0:NYears])
nobs = np.array(RawData['f2'][0:NYears])
PT0s = np.array(RawData['f4'][0:NYears])
PT1s = np.array(RawData['f6'][0:NYears])
PT2s = np.array(RawData['f8'][0:NYears])
PT3s = np.array(RawData['f10'][0:NYears])
PT4s = np.array(RawData['f12'][0:NYears])
PT5s = np.array(RawData['f14'][0:NYears])
PT6s = np.array(RawData['f16'][0:NYears])
PT8s = np.array(RawData['f18'][0:NYears])
PT9s = np.array(RawData['f20'][0:NYears])
PT10s = np.array(RawData['f22'][0:NYears])
PT15s = np.array(RawData['f24'][0:NYears])
# Makes zero values sub zero so that they are not plotted
gPT0s = np.where(PT0s > 0.)[0]
gPT1s = np.where(PT1s > 0.)[0]
gPT2s = np.where(PT2s > 0.)[0]
gPT3s = np.where(PT3s > 0.)[0]
gPT4s = np.where(PT4s > 0.)[0]
gPT5s = np.where(PT5s > 0.)[0]
gPT6s = np.where(PT6s > 0.)[0]
gPT8s = np.where(PT8s > 0.)[0]
gPT9s = np.where(PT9s > 0.)[0]
gPT10s = np.where(PT10s > 0.)[0]
gPT15s = np.where(PT15s > 0.)[0]
# Make plot of instrument types for EOT and EOH over time
gap= 0.04
#PT: 0=US Navy/unknown - usually ship, 1=merchant ship/foreign military, 2=ocean station vessel off station (or unknown loc),
# 3=ocean station vessel on station, 4=lightship, 5=ship, 6=moored buiy, 7=drifting buoy, 8=ice buoy, 9=ice station,
# 10=oceanographic station, 11=MBT (bathythermograph), 12=XBT (bathythermograph),
# 13=Coastal-Marine Automated Network (C-MAN), 14=other coastal/island station, 15=fixed ocean platoform, 16=tide guage, 17=hi res CTD, 18=profiling float, 19=undulating oceanographic recorder, 10=auonomous pinneped bathythermograph (seal?), 21=glider
plt.clf()
fig, ax1 = plt.subplots()
ax1.tick_params(axis='y',direction='in',right=True)
ax1.plot(Yr,nobs,c='black',linestyle='solid',linewidth=2)
i=0
ax1.annotate("TOTAL",xy=(0.05,0.94-(i*gap)),xycoords='axes fraction',size=12,color='black')
latest = np.zeros(NYears)
oldlatest=copy.copy(latest)
latest = latest + PT0s
if (len(gPT0s) > 0):
#ax1.plot(Yr,(latest/100.)*nobs,c='hotpink',linestyle='solid',linewidth=1)
ax1.fill_between(Yr,(oldlatest/100.)*nobs,(latest/100.)*nobs,facecolor='hotpink',edgecolor='none')
i=1
ax1.annotate("0 = US Navy/Unknown - usually ship",xy=(0.05,0.94-(i*gap)),xycoords='axes fraction',size=12,color='hotpink')
oldlatest=copy.copy(latest)
latest = latest + PT1s
if (len(gPT1s) > 0):
#ax1.plot(Yr,(latest/100.)*nobs,c='deeppink',linestyle='solid',linewidth=1)
ax1.fill_between(Yr,(oldlatest/100.)*nobs,(latest/100.)*nobs,facecolor='deeppink',edgecolor='none')
i=2
ax1.annotate("1 = merchant ship/foreign military",xy=(0.05,0.94-(i*gap)),xycoords='axes fraction',size=12,color='deeppink')
oldlatest=copy.copy(latest)
latest = latest + PT2s
if (len(gPT2s) > 0):
#ax1.plot(Yr,(latest/100.)*nobs,c='red',linestyle='solid',linewidth=1)
ax1.fill_between(Yr,(oldlatest/100.)*nobs,(latest/100.)*nobs,facecolor='red',edgecolor='none')
i=3
ax1.annotate("2 = ocean vessel off station (or unknown location)",xy=(0.05,0.94-(i*gap)),xycoords='axes fraction',size=12,color='red')
#pdb.set_trace()
oldlatest=copy.copy(latest)
latest = latest + PT3s
if (len(gPT3s) > 0):
#ax1.plot(Yr,(latest/100.)*nobs,c='darkorange',linestyle='solid',linewidth=1)
ax1.fill_between(Yr,(oldlatest/100.)*nobs,(latest/100.)*nobs,facecolor='darkorange',edgecolor='none')
#pdb.set_trace()
i=4
ax1.annotate("3 = ocean vessel on stations",xy=(0.05,0.94-(i*gap)),xycoords='axes fraction',size=12,color='darkorange')
oldlatest=copy.copy(latest)
latest = latest + PT4s
if (len(gPT4s) > 0):
#ax1.plot(Yr,(latest/100.)*nobs,c='gold',linestyle='solid',linewidth=1)
ax1.fill_between(Yr,(oldlatest/100.)*nobs,(latest/100.)*nobs,facecolor='gold',edgecolor='none')
i=5
ax1.annotate("4 = lightship",xy=(0.05,0.94-(i*gap)),xycoords='axes fraction',size=12,color='gold')
oldlatest=copy.copy(latest)
latest = latest + PT5s
if (len(gPT5s) > 0):
#ax1.plot(Yr,(latest/100.)*nobs,c='grey',linestyle='solid',linewidth=1)
ax1.fill_between(Yr,(oldlatest/100.)*nobs,(latest/100.)*nobs,facecolor='grey',edgecolor='none')
i=6
ax1.annotate("5 = ship",xy=(0.05,0.94-(i*gap)),xycoords='axes fraction',size=12,color='grey')
oldlatest=copy.copy(latest)
latest = latest + PT6s
if (len(gPT6s) > 0):
#ax1.plot(Yr,(latest/100.)*nobs,c='limegreen',linestyle='solid',linewidth=1)
ax1.fill_between(Yr,(oldlatest/100.)*nobs,(latest/100.)*nobs,facecolor='limegreen',edgecolor='none')
i=7
ax1.annotate("6 = moored buoy",xy=(0.05,0.94-(i*gap)),xycoords='axes fraction',size=12,color='limegreen')
oldlatest=copy.copy(latest)
latest = latest + PT8s
if (len(gPT8s) > 0):
#ax1.plot(Yr,(latest/100.)*nobs,c='olivedrab',linestyle='solid',linewidth=1)
ax1.fill_between(Yr,(oldlatest/100.)*nobs,(latest/100.)*nobs,facecolor='olivedrab',edgecolor='none')
i=8
ax1.annotate("8 = ice buoy",xy=(0.05,0.94-(i*gap)),xycoords='axes fraction',size=12,color='olivedrab')
#oldlatest=copy.copy(latest)
#latest = latest + PT9s
#if (len(gPT9s) > 0):
# #ax1.plot(Yr,(latest/100.)*nobs,c='blue',linestyle='solid',linewidth=1)
# ax1.fill_between(Yr,(oldlatest/100.)*nobs,(latest/100.)*nobs,facecolor='blue')
#i=9
#ax1.annotate("9 = ice station",xy=(0.05,0.94-(i*gap)),xycoords='axes fraction',size=12,color='blue')
#oldlatest=copy.copy(latest)
#latest = latest + PT10s
#if (len(gPT10s) > 0):
# #ax1.plot(Yr,(latest/100.)*nobs,c='indigo',linestyle='solid',linewidth=1)
# ax1.fill_between(Yr,(oldlatest/100.)*nobs,(latest/100.)*nobs,facecolor='indigo')
#i=10
#ax1.annotate("10 = oceanographic stations",xy=(0.05,0.94-(i*gap)),xycoords='axes fraction',size=12,color='indigo')
#oldlatest=copy.copy(latest)
#latest = latest + PT15s
#if (len(gPT15s) > 0):
# #ax1.plot(Yr,(latest/100.)*nobs,c='violet',linestyle='solid',linewidth=1)
# ax1.fill_between(Yr,(oldlatest/100.)*nobs,(latest/100.)*nobs,facecolor='violet')
#i=11
#ax1.annotate("15 = fixed ocean platform",xy=(0.05,0.94-(i*gap)),xycoords='axes fraction',size=12,color='violet')
ax1.set_xlabel('Year')
ax1.set_ylabel('No. of Obs (PT Type proportional)', color='black')
ax1.set_ylim(0,5000000)
ax1.set_xlim(StartYear-1,EndYear+1)
plt.tight_layout()
plt.savefig(OUTDIR+OutPltPT+".eps")
plt.savefig(OUTDIR+OutPltPT+".png")
#pdb.set_trace()
# create empty arrays for instrument type data bundles
Yr = []
nobs = [] # we're looking at all obs, not just those with 'good' data
PT0s = []
PT1s = []
PT2s = []
PT3s = []
PT4s = []
PT5s = []
PT6s = []
PT8s = []
PT9s = []
PT10s = []
PT15s = []
RawData = ReadData(INDIR+INFILPTD,typee, delimee)
Yr = np.array(RawData['f0'][0:NYears])
nobs = np.array(RawData['f2'][0:NYears])
PT0s = np.array(RawData['f4'][0:NYears])
PT1s = np.array(RawData['f6'][0:NYears])
PT2s = np.array(RawData['f8'][0:NYears])
PT3s = np.array(RawData['f10'][0:NYears])
PT4s = np.array(RawData['f12'][0:NYears])
PT5s = np.array(RawData['f14'][0:NYears])
PT6s = np.array(RawData['f16'][0:NYears])
PT8s = np.array(RawData['f18'][0:NYears])
PT9s = np.array(RawData['f20'][0:NYears])
PT10s = np.array(RawData['f22'][0:NYears])
PT15s = np.array(RawData['f24'][0:NYears])
# Makes zero values sub zero so that they are not plotted
gPT0s = np.where(PT0s > 0.)[0]
gPT1s = np.where(PT1s > 0.)[0]
gPT2s = np.where(PT2s > 0.)[0]
gPT3s = np.where(PT3s > 0.)[0]
gPT4s = np.where(PT4s > 0.)[0]
gPT5s = np.where(PT5s > 0.)[0]
gPT6s = np.where(PT6s > 0.)[0]
gPT8s = np.where(PT8s > 0.)[0]
gPT9s = np.where(PT9s > 0.)[0]
gPT10s = np.where(PT10s > 0.)[0]
gPT15s = np.where(PT15s > 0.)[0]
# Make plot of instrument types for EOT and EOH over time
gap= 0.04
#PT: 0=US Navy/unknown - usually ship, 1=merchant ship/foreign military, 2=ocean station vessel off station (or unknown loc),
# 3=ocean station vessel on station, 4=lightship, 5=ship, 6=moored buiy, 7=drifting buoy, 8=ice buoy, 9=ice station,
# 10=oceanographic station, 11=MBT (bathythermograph), 12=XBT (bathythermograph),
# 13=Coastal-Marine Automated Network (C-MAN), 14=other coastal/island station, 15=fixed ocean platoform, 16=tide guage, 17=hi res CTD, 18=profiling float, 19=undulating oceanographic recorder, 10=auonomous pinneped bathythermograph (seal?), 21=glider
plt.clf()
fig, ax1 = plt.subplots()
ax1.tick_params(axis='y',direction='in',right=True)
ax1.plot(Yr,nobs,c='black',linestyle='solid',linewidth=2)
i=0
ax1.annotate("TOTAL DAY",xy=(0.05,0.94-(i*gap)),xycoords='axes fraction',size=12,color='black')
latest = np.zeros(NYears)
oldlatest=copy.copy(latest)
latest = latest + PT0s
if (len(gPT0s) > 0):
#ax1.plot(Yr,(latest/100.)*nobs,c='hotpink',linestyle='solid',linewidth=1)
ax1.fill_between(Yr,(oldlatest/100.)*nobs,(latest/100.)*nobs,facecolor='hotpink',edgecolor='none')
i=1
ax1.annotate("0 = US Navy/Unknown - usually ship",xy=(0.05,0.94-(i*gap)),xycoords='axes fraction',size=12,color='hotpink')
oldlatest=copy.copy(latest)
latest = latest + PT1s
if (len(gPT1s) > 0):
#ax1.plot(Yr,(latest/100.)*nobs,c='deeppink',linestyle='solid',linewidth=1)
ax1.fill_between(Yr,(oldlatest/100.)*nobs,(latest/100.)*nobs,facecolor='deeppink',edgecolor='none')
i=2
ax1.annotate("1 = merchant ship/foreign military",xy=(0.05,0.94-(i*gap)),xycoords='axes fraction',size=12,color='deeppink')
oldlatest=copy.copy(latest)
latest = latest + PT2s
if (len(gPT2s) > 0):
#ax1.plot(Yr,(latest/100.)*nobs,c='red',linestyle='solid',linewidth=1)
ax1.fill_between(Yr,(oldlatest/100.)*nobs,(latest/100.)*nobs,facecolor='red',edgecolor='none')
i=3
ax1.annotate("2 = ocean vessel off station (or unknown location)",xy=(0.05,0.94-(i*gap)),xycoords='axes fraction',size=12,color='red')
#pdb.set_trace()
oldlatest=copy.copy(latest)
latest = latest + PT3s
if (len(gPT3s) > 0):
#ax1.plot(Yr,(latest/100.)*nobs,c='darkorange',linestyle='solid',linewidth=1)
ax1.fill_between(Yr,(oldlatest/100.)*nobs,(latest/100.)*nobs,facecolor='darkorange',edgecolor='none')
#pdb.set_trace()
i=4
ax1.annotate("3 = ocean vessel on stations",xy=(0.05,0.94-(i*gap)),xycoords='axes fraction',size=12,color='darkorange')
oldlatest=copy.copy(latest)
latest = latest + PT4s
if (len(gPT4s) > 0):
#ax1.plot(Yr,(latest/100.)*nobs,c='gold',linestyle='solid',linewidth=1)
ax1.fill_between(Yr,(oldlatest/100.)*nobs,(latest/100.)*nobs,facecolor='gold',edgecolor='none')
i=5
ax1.annotate("4 = lightship",xy=(0.05,0.94-(i*gap)),xycoords='axes fraction',size=12,color='gold')
oldlatest=copy.copy(latest)
latest = latest + PT5s
if (len(gPT5s) > 0):
#ax1.plot(Yr,(latest/100.)*nobs,c='grey',linestyle='solid',linewidth=1)
ax1.fill_between(Yr,(oldlatest/100.)*nobs,(latest/100.)*nobs,facecolor='grey',edgecolor='none')
i=6
ax1.annotate("5 = ship",xy=(0.05,0.94-(i*gap)),xycoords='axes fraction',size=12,color='grey')
oldlatest=copy.copy(latest)
latest = latest + PT6s
if (len(gPT6s) > 0):
#ax1.plot(Yr,(latest/100.)*nobs,c='limegreen',linestyle='solid',linewidth=1)
ax1.fill_between(Yr,(oldlatest/100.)*nobs,(latest/100.)*nobs,facecolor='limegreen',edgecolor='none')
i=7
ax1.annotate("6 = moored buoy",xy=(0.05,0.94-(i*gap)),xycoords='axes fraction',size=12,color='limegreen')
oldlatest=copy.copy(latest)
latest = latest + PT8s
if (len(gPT8s) > 0):
#ax1.plot(Yr,(latest/100.)*nobs,c='olivedrab',linestyle='solid',linewidth=1)
ax1.fill_between(Yr,(oldlatest/100.)*nobs,(latest/100.)*nobs,facecolor='olivedrab',edgecolor='none')
i=8
ax1.annotate("8 = ice buoy",xy=(0.05,0.94-(i*gap)),xycoords='axes fraction',size=12,color='olivedrab')
#oldlatest=copy.copy(latest)
#latest = latest + PT9s
#if (len(gPT9s) > 0):
# #ax1.plot(Yr,(latest/100.)*nobs,c='blue',linestyle='solid',linewidth=1)
# ax1.fill_between(Yr,(oldlatest/100.)*nobs,(latest/100.)*nobs,facecolor='blue')
#i=9
#ax1.annotate("9 = ice station",xy=(0.05,0.94-(i*gap)),xycoords='axes fraction',size=12,color='blue')
#oldlatest=copy.copy(latest)
#latest = latest + PT10s
#if (len(gPT10s) > 0):
# #ax1.plot(Yr,(latest/100.)*nobs,c='indigo',linestyle='solid',linewidth=1)
# ax1.fill_between(Yr,(oldlatest/100.)*nobs,(latest/100.)*nobs,facecolor='indigo')
#i=10
#ax1.annotate("10 = oceanographic stations",xy=(0.05,0.94-(i*gap)),xycoords='axes fraction',size=12,color='indigo')
#oldlatest=copy.copy(latest)
#latest = latest + PT15s
#if (len(gPT15s) > 0):
# #ax1.plot(Yr,(latest/100.)*nobs,c='violet',linestyle='solid',linewidth=1)
# ax1.fill_between(Yr,(oldlatest/100.)*nobs,(latest/100.)*nobs,facecolor='violet')
#i=11
#ax1.annotate("15 = fixed ocean platform",xy=(0.05,0.94-(i*gap)),xycoords='axes fraction',size=12,color='violet')
ax1.set_xlabel('Year')
ax1.set_ylabel('No. of Obs (PT Type proportional)', color='black')
ax1.set_ylim(0,5000000)
ax1.set_xlim(StartYear-1,EndYear+1)
plt.tight_layout()
plt.savefig(OUTDIR+OutPltPTD+".eps")
plt.savefig(OUTDIR+OutPltPTD+".png")
# create empty arrays for instrument type data bundles
Yr = []
nobs = [] # we're looking at all obs, not just those with 'good' data
PT0s = []
PT1s = []
PT2s = []
PT3s = []
PT4s = []
PT5s = []
PT6s = []
PT8s = []
PT9s = []
PT10s = []
PT15s = []
RawData = ReadData(INDIR+INFILPTN,typee, delimee)
Yr = np.array(RawData['f0'][0:NYears])
nobs = np.array(RawData['f2'][0:NYears])
PT0s = np.array(RawData['f4'][0:NYears])
PT1s = np.array(RawData['f6'][0:NYears])
PT2s = np.array(RawData['f8'][0:NYears])
PT3s = np.array(RawData['f10'][0:NYears])
PT4s = np.array(RawData['f12'][0:NYears])
PT5s = np.array(RawData['f14'][0:NYears])
PT6s = np.array(RawData['f16'][0:NYears])
PT8s = np.array(RawData['f18'][0:NYears])
PT9s = np.array(RawData['f20'][0:NYears])
PT10s = np.array(RawData['f22'][0:NYears])
PT15s = np.array(RawData['f24'][0:NYears])
# Makes zero values sub zero so that they are not plotted
gPT0s = np.where(PT0s > 0.)[0]
gPT1s = np.where(PT1s > 0.)[0]
gPT2s = np.where(PT2s > 0.)[0]
gPT3s = np.where(PT3s > 0.)[0]
gPT4s = np.where(PT4s > 0.)[0]
gPT5s = np.where(PT5s > 0.)[0]
gPT6s = np.where(PT6s > 0.)[0]
gPT8s = np.where(PT8s > 0.)[0]
gPT9s = np.where(PT9s > 0.)[0]
gPT10s = np.where(PT10s > 0.)[0]
gPT15s = np.where(PT15s > 0.)[0]
# Make plot of instrument types for EOT and EOH over time
gap= 0.04
#PT: 0=US Navy/unknown - usually ship, 1=merchant ship/foreign military, 2=ocean station vessel off station (or unknown loc),
# 3=ocean station vessel on station, 4=lightship, 5=ship, 6=moored buiy, 7=drifting buoy, 8=ice buoy, 9=ice station,
# 10=oceanographic station, 11=MBT (bathythermograph), 12=XBT (bathythermograph),
# 13=Coastal-Marine Automated Network (C-MAN), 14=other coastal/island station, 15=fixed ocean platoform, 16=tide guage, 17=hi res CTD, 18=profiling float, 19=undulating oceanographic recorder, 10=auonomous pinneped bathythermograph (seal?), 21=glider
plt.clf()
fig, ax1 = plt.subplots()
ax1.tick_params(axis='y',direction='in',right=True)
ax1.plot(Yr,nobs,c='black',linestyle='solid',linewidth=2)
i=0
ax1.annotate("TOTAL NIGHT",xy=(0.05,0.94-(i*gap)),xycoords='axes fraction',size=12,color='black')
latest = np.zeros(NYears)
oldlatest=copy.copy(latest)
latest = latest + PT0s
if (len(gPT0s) > 0):
#ax1.plot(Yr,(latest/100.)*nobs,c='hotpink',linestyle='solid',linewidth=1)
ax1.fill_between(Yr,(oldlatest/100.)*nobs,(latest/100.)*nobs,facecolor='hotpink',edgecolor='none')
i=1
ax1.annotate("0 = US Navy/Unknown - usually ship",xy=(0.05,0.94-(i*gap)),xycoords='axes fraction',size=12,color='hotpink')
oldlatest=copy.copy(latest)
latest = latest + PT1s
if (len(gPT1s) > 0):
#ax1.plot(Yr,(latest/100.)*nobs,c='deeppink',linestyle='solid',linewidth=1)
ax1.fill_between(Yr,(oldlatest/100.)*nobs,(latest/100.)*nobs,facecolor='deeppink',edgecolor='none')
i=2
ax1.annotate("1 = merchant ship/foreign military",xy=(0.05,0.94-(i*gap)),xycoords='axes fraction',size=12,color='deeppink')
oldlatest=copy.copy(latest)
latest = latest + PT2s
if (len(gPT2s) > 0):
#ax1.plot(Yr,(latest/100.)*nobs,c='red',linestyle='solid',linewidth=1)
ax1.fill_between(Yr,(oldlatest/100.)*nobs,(latest/100.)*nobs,facecolor='red',edgecolor='none')
i=3
ax1.annotate("2 = ocean vessel off station (or unknown location)",xy=(0.05,0.94-(i*gap)),xycoords='axes fraction',size=12,color='red')
#pdb.set_trace()
oldlatest=copy.copy(latest)
latest = latest + PT3s
if (len(gPT3s) > 0):
#ax1.plot(Yr,(latest/100.)*nobs,c='darkorange',linestyle='solid',linewidth=1)
ax1.fill_between(Yr,(oldlatest/100.)*nobs,(latest/100.)*nobs,facecolor='darkorange',edgecolor='none')
#pdb.set_trace()
i=4
ax1.annotate("3 = ocean vessel on stations",xy=(0.05,0.94-(i*gap)),xycoords='axes fraction',size=12,color='darkorange')
oldlatest=copy.copy(latest)
latest = latest + PT4s
if (len(gPT4s) > 0):
#ax1.plot(Yr,(latest/100.)*nobs,c='gold',linestyle='solid',linewidth=1)
ax1.fill_between(Yr,(oldlatest/100.)*nobs,(latest/100.)*nobs,facecolor='gold',edgecolor='none')
i=5
ax1.annotate("4 = lightship",xy=(0.05,0.94-(i*gap)),xycoords='axes fraction',size=12,color='gold')
oldlatest=copy.copy(latest)
latest = latest + PT5s
if (len(gPT5s) > 0):
#ax1.plot(Yr,(latest/100.)*nobs,c='grey',linestyle='solid',linewidth=1)
ax1.fill_between(Yr,(oldlatest/100.)*nobs,(latest/100.)*nobs,facecolor='grey',edgecolor='none')
i=6
ax1.annotate("5 = ship",xy=(0.05,0.94-(i*gap)),xycoords='axes fraction',size=12,color='grey')
oldlatest=copy.copy(latest)
latest = latest + PT6s
if (len(gPT6s) > 0):
#ax1.plot(Yr,(latest/100.)*nobs,c='limegreen',linestyle='solid',linewidth=1)
ax1.fill_between(Yr,(oldlatest/100.)*nobs,(latest/100.)*nobs,facecolor='limegreen',edgecolor='none')
i=7
ax1.annotate("6 = moored buoy",xy=(0.05,0.94-(i*gap)),xycoords='axes fraction',size=12,color='limegreen')
oldlatest=copy.copy(latest)
latest = latest + PT8s
if (len(gPT8s) > 0):
#ax1.plot(Yr,(latest/100.)*nobs,c='olivedrab',linestyle='solid',linewidth=1)
ax1.fill_between(Yr,(oldlatest/100.)*nobs,(latest/100.)*nobs,facecolor='olivedrab',edgecolor='none')
i=8
ax1.annotate("8 = ice buoy",xy=(0.05,0.94-(i*gap)),xycoords='axes fraction',size=12,color='olivedrab')
#oldlatest=copy.copy(latest)
#latest = latest + PT9s
#if (len(gPT9s) > 0):
# #ax1.plot(Yr,(latest/100.)*nobs,c='blue',linestyle='solid',linewidth=1)
# ax1.fill_between(Yr,(oldlatest/100.)*nobs,(latest/100.)*nobs,facecolor='blue')
#i=9
#ax1.annotate("9 = ice station",xy=(0.05,0.94-(i*gap)),xycoords='axes fraction',size=12,color='blue')
#oldlatest=copy.copy(latest)
#latest = latest + PT10s
#if (len(gPT10s) > 0):
# #ax1.plot(Yr,(latest/100.)*nobs,c='indigo',linestyle='solid',linewidth=1)
# ax1.fill_between(Yr,(oldlatest/100.)*nobs,(latest/100.)*nobs,facecolor='indigo')
#i=10
#ax1.annotate("10 = oceanographic stations",xy=(0.05,0.94-(i*gap)),xycoords='axes fraction',size=12,color='indigo')
#oldlatest=copy.copy(latest)
#latest = latest + PT15s
#if (len(gPT15s) > 0):
# #ax1.plot(Yr,(latest/100.)*nobs,c='violet',linestyle='solid',linewidth=1)
# ax1.fill_between(Yr,(oldlatest/100.)*nobs,(latest/100.)*nobs,facecolor='violet')
#i=11
#ax1.annotate("15 = fixed ocean platform",xy=(0.05,0.94-(i*gap)),xycoords='axes fraction',size=12,color='violet')
ax1.set_xlabel('Year')
ax1.set_ylabel('No. of Obs (PT Type proportional)', color='black')
ax1.set_ylim(0,5000000)
ax1.set_xlim(StartYear-1,EndYear+1)
plt.tight_layout()
plt.savefig(OUTDIR+OutPltPTN+".eps")
plt.savefig(OUTDIR+OutPltPTN+".png")
# create empty arrays for instrument type data bundles
Yr = []
nobs = [] # we're looking at all obs, not just those with 'good' data
PT0s = []
PT1s = []
PT2s = []
PT3s = []
PT4s = []
PT5s = []
PT6s = []
PT8s = []
PT9s = []
PT10s = []
PT15s = []
RawData = ReadData(INDIR+INFILPTG,typee, delimee)
Yr = np.array(RawData['f0'][0:NYears])
nobs = np.array(RawData['f2'][0:NYears])
PT0s = np.array(RawData['f4'][0:NYears])
PT1s = np.array(RawData['f6'][0:NYears])
PT2s = np.array(RawData['f8'][0:NYears])
PT3s = np.array(RawData['f10'][0:NYears])
PT4s = np.array(RawData['f12'][0:NYears])
PT5s = np.array(RawData['f14'][0:NYears])
PT6s = np.array(RawData['f16'][0:NYears])
PT8s = np.array(RawData['f18'][0:NYears])
PT9s = np.array(RawData['f20'][0:NYears])
PT10s = np.array(RawData['f22'][0:NYears])
PT15s = np.array(RawData['f24'][0:NYears])
# Makes zero values sub zero so that they are not plotted
gPT0s = np.where(PT0s > 0.)[0]
gPT1s = np.where(PT1s > 0.)[0]
gPT2s = np.where(PT2s > 0.)[0]
gPT3s = np.where(PT3s > 0.)[0]
gPT4s = np.where(PT4s > 0.)[0]
gPT5s = np.where(PT5s > 0.)[0]
gPT6s = np.where(PT6s > 0.)[0]
gPT8s = np.where(PT8s > 0.)[0]
gPT9s = np.where(PT9s > 0.)[0]
gPT10s = np.where(PT10s > 0.)[0]
gPT15s = np.where(PT15s > 0.)[0]
# Make plot of instrument types for EOT and EOH over time
gap= 0.04
#PT: 0=US Navy/unknown - usually ship, 1=merchant ship/foreign military, 2=ocean station vessel off station (or unknown loc),
# 3=ocean station vessel on station, 4=lightship, 5=ship, 6=moored buiy, 7=drifting buoy, 8=ice buoy, 9=ice station,
# 10=oceanographic station, 11=MBT (bathythermograph), 12=XBT (bathythermograph),
# 13=Coastal-Marine Automated Network (C-MAN), 14=other coastal/island station, 15=fixed ocean platoform, 16=tide guage, 17=hi res CTD, 18=profiling float, 19=undulating oceanographic recorder, 10=auonomous pinneped bathythermograph (seal?), 21=glider
plt.clf()
fig, ax1 = plt.subplots()
ax1.tick_params(axis='y',direction='in',right=True)
ax1.plot(Yr,nobs,c='black',linestyle='solid',linewidth=2)
i=0
ax1.annotate("TOTAL GOOD",xy=(0.05,0.94-(i*gap)),xycoords='axes fraction',size=12,color='black')
latest = np.zeros(NYears)
oldlatest=copy.copy(latest)
latest = latest + PT0s
if (len(gPT0s) > 0):
#ax1.plot(Yr,(latest/100.)*nobs,c='hotpink',linestyle='solid',linewidth=1)
ax1.fill_between(Yr,(oldlatest/100.)*nobs,(latest/100.)*nobs,facecolor='hotpink',edgecolor='none')
i=1
ax1.annotate("0 = US Navy/Unknown - usually ship",xy=(0.05,0.94-(i*gap)),xycoords='axes fraction',size=12,color='hotpink')
oldlatest=copy.copy(latest)
latest = latest + PT1s
if (len(gPT1s) > 0):
#ax1.plot(Yr,(latest/100.)*nobs,c='deeppink',linestyle='solid',linewidth=1)
ax1.fill_between(Yr,(oldlatest/100.)*nobs,(latest/100.)*nobs,facecolor='deeppink',edgecolor='none')
i=2
ax1.annotate("1 = merchant ship/foreign military",xy=(0.05,0.94-(i*gap)),xycoords='axes fraction',size=12,color='deeppink')
oldlatest=copy.copy(latest)
latest = latest + PT2s
if (len(gPT2s) > 0):
#ax1.plot(Yr,(latest/100.)*nobs,c='red',linestyle='solid',linewidth=1)
ax1.fill_between(Yr,(oldlatest/100.)*nobs,(latest/100.)*nobs,facecolor='red',edgecolor='none')
i=3
ax1.annotate("2 = ocean vessel off station (or unknown location)",xy=(0.05,0.94-(i*gap)),xycoords='axes fraction',size=12,color='red')
#pdb.set_trace()
oldlatest=copy.copy(latest)
latest = latest + PT3s
if (len(gPT3s) > 0):
#ax1.plot(Yr,(latest/100.)*nobs,c='darkorange',linestyle='solid',linewidth=1)
ax1.fill_between(Yr,(oldlatest/100.)*nobs,(latest/100.)*nobs,facecolor='darkorange',edgecolor='none')
#pdb.set_trace()
i=4
ax1.annotate("3 = ocean vessel on stations",xy=(0.05,0.94-(i*gap)),xycoords='axes fraction',size=12,color='darkorange')
oldlatest=copy.copy(latest)
latest = latest + PT4s
if (len(gPT4s) > 0):
#ax1.plot(Yr,(latest/100.)*nobs,c='gold',linestyle='solid',linewidth=1)
ax1.fill_between(Yr,(oldlatest/100.)*nobs,(latest/100.)*nobs,facecolor='gold',edgecolor='none')
i=5
ax1.annotate("4 = lightship",xy=(0.05,0.94-(i*gap)),xycoords='axes fraction',size=12,color='gold')
oldlatest=copy.copy(latest)
latest = latest + PT5s
if (len(gPT5s) > 0):
#ax1.plot(Yr,(latest/100.)*nobs,c='grey',linestyle='solid',linewidth=1)
ax1.fill_between(Yr,(oldlatest/100.)*nobs,(latest/100.)*nobs,facecolor='grey',edgecolor='none')
i=6
ax1.annotate("5 = ship",xy=(0.05,0.94-(i*gap)),xycoords='axes fraction',size=12,color='grey')
oldlatest=copy.copy(latest)
latest = latest + PT6s
if (len(gPT6s) > 0):
#ax1.plot(Yr,(latest/100.)*nobs,c='limegreen',linestyle='solid',linewidth=1)
ax1.fill_between(Yr,(oldlatest/100.)*nobs,(latest/100.)*nobs,facecolor='limegreen',edgecolor='none')
i=7
ax1.annotate("6 = moored buoy",xy=(0.05,0.94-(i*gap)),xycoords='axes fraction',size=12,color='limegreen')
oldlatest=copy.copy(latest)
latest = latest + PT8s
if (len(gPT8s) > 0):
#ax1.plot(Yr,(latest/100.)*nobs,c='olivedrab',linestyle='solid',linewidth=1)
ax1.fill_between(Yr,(oldlatest/100.)*nobs,(latest/100.)*nobs,facecolor='olivedrab',edgecolor='none')
i=8
ax1.annotate("8 = ice buoy",xy=(0.05,0.94-(i*gap)),xycoords='axes fraction',size=12,color='olivedrab')
#oldlatest=copy.copy(latest)
#latest = latest + PT9s
#if (len(gPT9s) > 0):
# #ax1.plot(Yr,(latest/100.)*nobs,c='blue',linestyle='solid',linewidth=1)
# ax1.fill_between(Yr,(oldlatest/100.)*nobs,(latest/100.)*nobs,facecolor='blue')
#i=9
#ax1.annotate("9 = ice station",xy=(0.05,0.94-(i*gap)),xycoords='axes fraction',size=12,color='blue')
#oldlatest=copy.copy(latest)
#latest = latest + PT10s
#if (len(gPT10s) > 0):
# #ax1.plot(Yr,(latest/100.)*nobs,c='indigo',linestyle='solid',linewidth=1)
# ax1.fill_between(Yr,(oldlatest/100.)*nobs,(latest/100.)*nobs,facecolor='indigo')
#i=10
#ax1.annotate("10 = oceanographic stations",xy=(0.05,0.94-(i*gap)),xycoords='axes fraction',size=12,color='indigo')
#oldlatest=copy.copy(latest)
#latest = latest + PT15s
#if (len(gPT15s) > 0):
# #ax1.plot(Yr,(latest/100.)*nobs,c='violet',linestyle='solid',linewidth=1)
# ax1.fill_between(Yr,(oldlatest/100.)*nobs,(latest/100.)*nobs,facecolor='violet')
#i=11
#ax1.annotate("15 = fixed ocean platform",xy=(0.05,0.94-(i*gap)),xycoords='axes fraction',size=12,color='violet')
ax1.set_xlabel('Year')
ax1.set_ylabel('No. of Obs (PT Type proportional)', color='black')
ax1.set_ylim(0,5000000)
ax1.set_xlim(StartYear-1,EndYear+1)
plt.tight_layout()
plt.savefig(OUTDIR+OutPltPTG+".eps")
plt.savefig(OUTDIR+OutPltPTG+".png")
# create empty arrays for instrument type data bundles
Yr = []
nobs = [] # we're looking at all obs, not just those with 'good' data
PT0s = []
PT1s = []
PT2s = []
PT3s = []
PT4s = []
PT5s = []
PT6s = []
PT8s = []
PT9s = []
PT10s = []
PT15s = []
RawData = ReadData(INDIR+INFILPTGD,typee, delimee)
Yr = np.array(RawData['f0'][0:NYears])
nobs = np.array(RawData['f2'][0:NYears])
PT0s = np.array(RawData['f4'][0:NYears])
PT1s = np.array(RawData['f6'][0:NYears])
PT2s = np.array(RawData['f8'][0:NYears])
PT3s = np.array(RawData['f10'][0:NYears])
PT4s = np.array(RawData['f12'][0:NYears])
PT5s = np.array(RawData['f14'][0:NYears])
PT6s = np.array(RawData['f16'][0:NYears])
PT8s = np.array(RawData['f18'][0:NYears])
PT9s = np.array(RawData['f20'][0:NYears])
PT10s = np.array(RawData['f22'][0:NYears])
PT15s = np.array(RawData['f24'][0:NYears])
# Makes zero values sub zero so that they are not plotted
gPT0s = np.where(PT0s > 0.)[0]
gPT1s = np.where(PT1s > 0.)[0]
gPT2s = np.where(PT2s > 0.)[0]
gPT3s = np.where(PT3s > 0.)[0]
gPT4s = np.where(PT4s > 0.)[0]
gPT5s = np.where(PT5s > 0.)[0]
gPT6s = np.where(PT6s > 0.)[0]
gPT8s = np.where(PT8s > 0.)[0]
gPT9s = np.where(PT9s > 0.)[0]
gPT10s = np.where(PT10s > 0.)[0]
gPT15s = np.where(PT15s > 0.)[0]
# Make plot of instrument types for EOT and EOH over time
gap= 0.04
#PT: 0=US Navy/unknown - usually ship, 1=merchant ship/foreign military, 2=ocean station vessel off station (or unknown loc),
# 3=ocean station vessel on station, 4=lightship, 5=ship, 6=moored buiy, 7=drifting buoy, 8=ice buoy, 9=ice station,
# 10=oceanographic station, 11=MBT (bathythermograph), 12=XBT (bathythermograph),
# 13=Coastal-Marine Automated Network (C-MAN), 14=other coastal/island station, 15=fixed ocean platoform, 16=tide guage, 17=hi res CTD, 18=profiling float, 19=undulating oceanographic recorder, 10=auonomous pinneped bathythermograph (seal?), 21=glider
plt.clf()
fig, ax1 = plt.subplots()
ax1.tick_params(axis='y',direction='in',right=True)
ax1.plot(Yr,nobs,c='black',linestyle='solid',linewidth=2)
i=0
ax1.annotate("TOTAL GOOD DAY",xy=(0.05,0.94-(i*gap)),xycoords='axes fraction',size=12,color='black')
latest = np.zeros(NYears)
oldlatest=copy.copy(latest)
latest = latest + PT0s
if (len(gPT0s) > 0):
#ax1.plot(Yr,(latest/100.)*nobs,c='hotpink',linestyle='solid',linewidth=1)
ax1.fill_between(Yr,(oldlatest/100.)*nobs,(latest/100.)*nobs,facecolor='hotpink',edgecolor='none')
i=1
ax1.annotate("0 = US Navy/Unknown - usually ship",xy=(0.05,0.94-(i*gap)),xycoords='axes fraction',size=12,color='hotpink')
oldlatest=copy.copy(latest)
latest = latest + PT1s
if (len(gPT1s) > 0):
#ax1.plot(Yr,(latest/100.)*nobs,c='deeppink',linestyle='solid',linewidth=1)
ax1.fill_between(Yr,(oldlatest/100.)*nobs,(latest/100.)*nobs,facecolor='deeppink',edgecolor='none')
i=2
ax1.annotate("1 = merchant ship/foreign military",xy=(0.05,0.94-(i*gap)),xycoords='axes fraction',size=12,color='deeppink')
oldlatest=copy.copy(latest)
latest = latest + PT2s
if (len(gPT2s) > 0):
#ax1.plot(Yr,(latest/100.)*nobs,c='red',linestyle='solid',linewidth=1)
ax1.fill_between(Yr,(oldlatest/100.)*nobs,(latest/100.)*nobs,facecolor='red',edgecolor='none')
i=3
ax1.annotate("2 = ocean vessel off station (or unknown location)",xy=(0.05,0.94-(i*gap)),xycoords='axes fraction',size=12,color='red')
#pdb.set_trace()
oldlatest=copy.copy(latest)
latest = latest + PT3s
if (len(gPT3s) > 0):
#ax1.plot(Yr,(latest/100.)*nobs,c='darkorange',linestyle='solid',linewidth=1)
ax1.fill_between(Yr,(oldlatest/100.)*nobs,(latest/100.)*nobs,facecolor='darkorange',edgecolor='none')
#pdb.set_trace()
i=4
ax1.annotate("3 = ocean vessel on stations",xy=(0.05,0.94-(i*gap)),xycoords='axes fraction',size=12,color='darkorange')
oldlatest=copy.copy(latest)
latest = latest + PT4s
if (len(gPT4s) > 0):
#ax1.plot(Yr,(latest/100.)*nobs,c='gold',linestyle='solid',linewidth=1)
ax1.fill_between(Yr,(oldlatest/100.)*nobs,(latest/100.)*nobs,facecolor='gold',edgecolor='none')
i=5
ax1.annotate("4 = lightship",xy=(0.05,0.94-(i*gap)),xycoords='axes fraction',size=12,color='gold')
oldlatest=copy.copy(latest)
latest = latest + PT5s
if (len(gPT5s) > 0):
#ax1.plot(Yr,(latest/100.)*nobs,c='grey',linestyle='solid',linewidth=1)
ax1.fill_between(Yr,(oldlatest/100.)*nobs,(latest/100.)*nobs,facecolor='grey',edgecolor='none')
i=6
ax1.annotate("5 = ship",xy=(0.05,0.94-(i*gap)),xycoords='axes fraction',size=12,color='grey')
oldlatest=copy.copy(latest)
latest = latest + PT6s
if (len(gPT6s) > 0):
#ax1.plot(Yr,(latest/100.)*nobs,c='limegreen',linestyle='solid',linewidth=1)
ax1.fill_between(Yr,(oldlatest/100.)*nobs,(latest/100.)*nobs,facecolor='limegreen',edgecolor='none')
i=7
ax1.annotate("6 = moored buoy",xy=(0.05,0.94-(i*gap)),xycoords='axes fraction',size=12,color='limegreen')
oldlatest=copy.copy(latest)
latest = latest + PT8s
if (len(gPT8s) > 0):
#ax1.plot(Yr,(latest/100.)*nobs,c='olivedrab',linestyle='solid',linewidth=1)
ax1.fill_between(Yr,(oldlatest/100.)*nobs,(latest/100.)*nobs,facecolor='olivedrab',edgecolor='none')
i=8
ax1.annotate("8 = ice buoy",xy=(0.05,0.94-(i*gap)),xycoords='axes fraction',size=12,color='olivedrab')
#oldlatest=copy.copy(latest)
#latest = latest + PT9s
#if (len(gPT9s) > 0):
# #ax1.plot(Yr,(latest/100.)*nobs,c='blue',linestyle='solid',linewidth=1)
# ax1.fill_between(Yr,(oldlatest/100.)*nobs,(latest/100.)*nobs,facecolor='blue')
#i=9
#ax1.annotate("9 = ice station",xy=(0.05,0.94-(i*gap)),xycoords='axes fraction',size=12,color='blue')
#oldlatest=copy.copy(latest)
#latest = latest + PT10s
#if (len(gPT10s) > 0):
# #ax1.plot(Yr,(latest/100.)*nobs,c='indigo',linestyle='solid',linewidth=1)
# ax1.fill_between(Yr,(oldlatest/100.)*nobs,(latest/100.)*nobs,facecolor='indigo')
#i=10
#ax1.annotate("10 = oceanographic stations",xy=(0.05,0.94-(i*gap)),xycoords='axes fraction',size=12,color='indigo')
#oldlatest=copy.copy(latest)
#latest = latest + PT15s
#if (len(gPT15s) > 0):
# #ax1.plot(Yr,(latest/100.)*nobs,c='violet',linestyle='solid',linewidth=1)
# ax1.fill_between(Yr,(oldlatest/100.)*nobs,(latest/100.)*nobs,facecolor='violet')
#i=11
#ax1.annotate("15 = fixed ocean platform",xy=(0.05,0.94-(i*gap)),xycoords='axes fraction',size=12,color='violet')
ax1.set_xlabel('Year')
ax1.set_ylabel('No. of Obs (PT Type proportional)', color='black')
ax1.set_ylim(0,5000000)
ax1.set_xlim(StartYear-1,EndYear+1)
plt.tight_layout()
plt.savefig(OUTDIR+OutPltPTGD+".eps")
plt.savefig(OUTDIR+OutPltPTGD+".png")
# create empty arrays for instrument type data bundles
Yr = []
nobs = [] # we're looking at all obs, not just those with 'good' data
PT0s = []
PT1s = []
PT2s = []
PT3s = []
PT4s = []
PT5s = []
PT6s = []
PT8s = []
PT9s = []
PT10s = []
PT15s = []
RawData = ReadData(INDIR+INFILPTGN,typee, delimee)
Yr = np.array(RawData['f0'][0:NYears])
nobs = np.array(RawData['f2'][0:NYears])
PT0s = np.array(RawData['f4'][0:NYears])
PT1s = np.array(RawData['f6'][0:NYears])
PT2s = np.array(RawData['f8'][0:NYears])
PT3s = np.array(RawData['f10'][0:NYears])
PT4s = np.array(RawData['f12'][0:NYears])
PT5s = np.array(RawData['f14'][0:NYears])
PT6s =
|
np.array(RawData['f16'][0:NYears])
|
numpy.array
|
"""
special column names:
mle -- pivot at unpenalized MLE
truth -- pivot at true parameter
pvalue -- tests of H0 for each variable
count -- how many runs (including last one) until success
active -- was variable truly active
naive_pvalue --
cover --
naive_cover --
"""
from __future__ import division
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import probplot, uniform
import statsmodels.api as sm
def collect_multiple_runs(test_fn, columns, nrun, summary_fn, *args, **kwargs):
"""
Assumes a wait_for_return_value test...
"""
dfs = []
for i in range(nrun):
print(i)
count, result = test_fn(*args, **kwargs)
#print(result)
#print(len(np.atleast_1d(result[0])))
if hasattr(result, "__len__"):
df_i = pd.DataFrame(index=np.arange(len(np.atleast_1d(result[0]))),
columns=columns + ['count', 'run'])
else:
df_i = pd.DataFrame(index=np.arange(1),
columns=columns + ['count', 'run'])
df_i = pd.DataFrame(index=np.arange(len(np.atleast_1d(result[0]))),
columns=columns + ['count', 'run'])
df_i.loc[:,'count'] = count
df_i.loc[:,'run'] = i
for col, v in zip(columns, result):
df_i.loc[:,col] = np.atleast_1d(v)
df_i['func'] = [str(test_fn)] * len(df_i)
dfs.append(df_i)
if summary_fn is not None:
summary_fn(pd.concat(dfs))
return pd.concat(dfs)
def pvalue_plot(multiple_results, screening=False, fig=None, label = '$H_0$', colors=['b','r']):
"""
Extract pvalues and group by
null and alternative.
"""
P0 = multiple_results['pvalue'][~multiple_results['active_var']]
P0 = P0[~pd.isnull(P0)]
PA = multiple_results['pvalue'][multiple_results['active_var']]
PA = PA[~pd.isnull(PA)]
if fig is None:
fig = plt.figure()
ax = fig.gca()
fig.suptitle('Null and alternative p-values')
grid = np.linspace(0, 1, 51)
if len(P0) > 0:
ecdf0 = sm.distributions.ECDF(P0)
F0 = ecdf0(grid)
ax.plot(grid, F0, '--o', c=colors[0], lw=2, label=label)
if len(PA) > 0:
ecdfA = sm.distributions.ECDF(PA)
FA = ecdfA(grid)
ax.plot(grid, FA, '--o', c=colors[1], lw=2, label=r'$H_A$')
ax.plot([0, 1], [0, 1], 'k-', lw=1)
ax.set_xlabel("observed p-value", fontsize=18)
ax.set_ylabel("empirical CDF", fontsize=18)
ax.legend(loc='lower right', fontsize=18)
if screening:
screen = 1. / np.mean(multiple_results.loc[multiple_results.index == 0,'count'])
ax.set_title('Screening: %0.2f' % screen)
return fig
def naive_pvalue_plot(multiple_results, screening=False, fig=None, colors=['r', 'g']):
"""
Extract naive pvalues and group by
null and alternative.
"""
P0 = multiple_results['naive_pvalues'][~multiple_results['active_var']]
P0 = P0[~pd.isnull(P0)]
PA = multiple_results['naive_pvalues'][multiple_results['active_var']]
PA = PA[~pd.isnull(PA)]
if fig is None:
fig = plt.figure()
ax = fig.gca()
fig.suptitle('Null and alternative p-values')
grid = np.linspace(0, 1, 51)
if len(P0) > 0:
ecdf0 = sm.distributions.ECDF(P0)
F0 = ecdf0(grid)
ax.plot(grid, F0, '--o', c=colors[0], lw=2, label=r'Naive p-values')
if len(PA) > 0:
ecdfA = sm.distributions.ECDF(PA)
FA = ecdfA(grid)
ax.plot(grid, FA, '--o', c=colors[1], lw=2, label=r'$H_A$ naive')
ax.plot([0, 1], [0, 1], 'k-', lw=2)
ax.set_xlabel("Observed p-pvalue", fontsize=18)
ax.set_ylabel("Empirical CDF", fontsize=18)
ax.legend(loc='lower right', fontsize=18)
if screening:
screen = 1. / np.mean(multiple_results.loc[multiple_results.index == 0,'count'])
ax.set_title('Screening: %0.2f' % screen)
return fig
def split_pvalue_plot(multiple_results, screening=False, fig=None):
"""
Compare pvalues where we have a split_pvalue
"""
have_split = ~pd.isnull(multiple_results['split_pvalue'])
multiple_results = multiple_results.loc[have_split]
P0_s = multiple_results['split_pvalue'][~multiple_results['active']]
PA_s = multiple_results['split_pvalue'][multiple_results['active']]
# presumes we also have a pvalue
P0 = multiple_results['pvalue'][~multiple_results['active']]
PA = multiple_results['pvalue'][multiple_results['active']]
if fig is None:
fig = plt.figure()
ax = fig.gca()
fig.suptitle('Null and alternative p-values')
grid = np.linspace(0, 1, 51)
if len(P0) > 0:
ecdf0 = sm.distributions.ECDF(P0)
F0 = ecdf0(grid)
ax.plot(grid, F0, '--o', c='r', lw=2, label=r'$H_0$')
if len(PA) > 0:
ecdfA = sm.distributions.ECDF(PA)
FA = ecdfA(grid)
ax.plot(grid, FA, '--o', c='g', lw=2, label=r'$H_A$')
if len(P0_s) > 0:
ecdf0 = sm.distributions.ECDF(P0_s)
F0 = ecdf0(grid)
ax.plot(grid, F0, '-+', c='r', lw=2, label=r'$H_0$ split')
if len(PA) > 0:
ecdfA = sm.distributions.ECDF(PA_s)
FA = ecdfA(grid)
ax.plot(grid, FA, '-+', c='g', lw=2, label=r'$H_A$ split')
ax.plot([0, 1], [0, 1], 'k-', lw=2)
ax.legend(loc='lower right')
if screening:
screen = 1. / np.mean(multiple_results.loc[multiple_results.index == 0,'count'])
ax.set_title('Screening: %0.2f' % screen)
def pivot_plot_simple(multiple_results, coverage=True, color='b', label=None, fig=None):
"""
Extract pivots at truth and mle.
"""
if fig is None:
fig, _ = plt.subplots(nrows=1, ncols=2)
plot_pivots, _ = fig.axes
plot_pivots.set_title("CLT Pivots")
else:
_, plot_pivots = fig.axes
plot_pivots.set_title("Bootstrap Pivots")
if 'pivot' in multiple_results.columns:
ecdf = sm.distributions.ECDF(multiple_results['pivot'])
elif 'truth' in multiple_results.columns:
ecdf = sm.distributions.ECDF(multiple_results['truth'])
G = np.linspace(0, 1)
F_pivot = ecdf(G)
#print(color)
plot_pivots.plot(G, F_pivot, '-o', c=color, lw=2, label=label)
plot_pivots.plot([0, 1], [0, 1], 'k-', lw=2)
plot_pivots.set_xlim([0, 1])
plot_pivots.set_ylim([0, 1])
return fig
def pivot_plot_2in1(multiple_results, coverage=True, color='b', label=None, fig=None):
"""
Extract pivots at truth and mle.
"""
if fig is None:
fig = plt.figure()
ax = fig.gca()
fig.suptitle('Plugin CLT and bootstrap pivots')
if 'pivot' in multiple_results.columns:
ecdf = sm.distributions.ECDF(multiple_results['pivot'])
elif 'truth' in multiple_results.columns:
ecdf = sm.distributions.ECDF(multiple_results['truth'])
elif 'pvalue' in multiple_results.columns:
ecdf = sm.distributions.ECDF(multiple_results['pvalue'])
G = np.linspace(0, 1)
F_pivot = ecdf(G)
#print(color)
ax.plot(G, F_pivot, '-o', c=color, lw=2, label=label)
ax.plot([0, 1], [0, 1], 'k-', lw=2)
ax.set_xlim([0, 1])
ax.set_ylim([0, 1])
ax.legend(loc='lower right')
return fig
def pivot_plot_2in1(multiple_results, coverage=True, color='b', label=None, fig=None):
"""
Extract pivots at truth and mle.
"""
if fig is None:
fig = plt.figure()
ax = fig.gca()
fig.suptitle('Plugin CLT and bootstrap pivots')
if 'pivot' in multiple_results.columns:
ecdf = sm.distributions.ECDF(multiple_results['pivot'])
elif 'truth' in multiple_results.columns:
ecdf = sm.distributions.ECDF(multiple_results['truth'])
elif 'pvalue' in multiple_results.columns:
ecdf = sm.distributions.ECDF(multiple_results['pvalue'])
G = np.linspace(0, 1)
F_pivot = ecdf(G)
#print(color)
ax.plot(G, F_pivot, '-o', c=color, lw=2, label=label)
ax.plot([0, 1], [0, 1], 'k-', lw=2)
ax.set_xlim([0, 1])
ax.set_ylim([0, 1])
ax.legend(loc='lower right')
return fig
def pivot_plot_plus_naive(multiple_results, coverage=True, color='b', label=None, fig=None):
"""
Extract pivots at truth and mle.
"""
if fig is None:
fig = plt.figure()
ax = fig.gca()
fig.suptitle('Lee et al. and naive p-values')
if 'pivot' in multiple_results.columns:
ecdf = sm.distributions.ECDF(multiple_results['pivot'])
elif 'truth' in multiple_results.columns:
ecdf = sm.distributions.ECDF(multiple_results['truth'])
elif 'pvalue' in multiple_results.columns:
ecdf = sm.distributions.ECDF(multiple_results['pvalue'])
G = np.linspace(0, 1)
F_pivot = ecdf(G)
#print(color)
ax.plot(G, F_pivot, '-o', c=color, lw=2, label="Lee et al. p-values")
ax.plot([0, 1], [0, 1], 'k-', lw=2)
if 'naive_pvalues' in multiple_results.columns:
ecdf_naive = sm.distributions.ECDF(multiple_results['naive_pvalues'])
F_naive = ecdf_naive(G)
ax.plot(G, F_naive, '-o', c='r', lw=2, label="Naive p-values")
ax.plot([0, 1], [0, 1], 'k-', lw=2)
ax.set_xlim([0, 1])
ax.set_ylim([0, 1])
ax.set_xlabel("Observed value", fontsize=18)
ax.set_ylabel("Empirical CDF", fontsize=18)
ax.legend(loc='lower right', fontsize=18)
return fig
def pivot_plot(multiple_results, coverage=True, color='b', label=None, fig=None):
"""
Extract pivots at truth and mle.
"""
if fig is None:
fig, _ = plt.subplots(nrows=1, ncols=2)
plot_pvalues_mle, plot_pvalues_truth = fig.axes
ecdf_mle = sm.distributions.ECDF(multiple_results['mle'])
G = np.linspace(0, 1)
F_MLE = ecdf_mle(G)
print(color)
plot_pvalues_mle.plot(G, F_MLE, '-o', c=color, lw=2, label=label)
plot_pvalues_mle.plot([0, 1], [0, 1], 'k-', lw=2)
plot_pvalues_mle.set_title("Pivots at the unpenalized MLE")
plot_pvalues_mle.set_xlim([0, 1])
plot_pvalues_mle.set_ylim([0, 1])
plot_pvalues_mle.legend(loc='lower right')
ecdf_truth = sm.distributions.ECDF(multiple_results['truth'])
F_true = ecdf_truth(G)
plot_pvalues_truth.plot(G, F_true, '-o', c=color, lw=2, label=label)
plot_pvalues_truth.plot([0, 1], [0, 1], 'k-', lw=2)
plot_pvalues_truth.set_title("Pivots at the truth (by tilting)")
plot_pvalues_truth.set_xlim([0, 1])
plot_pvalues_truth.set_ylim([0, 1])
plot_pvalues_truth.legend(loc='lower right')
if coverage:
if 'naive_cover' in multiple_results.columns:
fig.suptitle('Coverage: %0.2f, Naive: %0.2f' % (np.mean(multiple_results['cover']),
np.mean(multiple_results['naive_cover'])))
else:
fig.suptitle('Coverage: %0.2f' % np.mean(multiple_results['cover']))
return fig
def boot_clt_plot(multiple_results, coverage=True, label=None, fig=None, active=True, inactive=True):
"""
Extract pivots at truth and mle.
"""
test = np.zeros_like(multiple_results['active'])
if active:
test += multiple_results['active']
if inactive:
test += ~multiple_results['active']
multiple_results = multiple_results[test]
print(test.sum(), test.shape)
if fig is None:
fig = plt.figure()
ax = fig.gca()
ecdf_clt = sm.distributions.ECDF(multiple_results['pivots_clt'])
G = np.linspace(0, 1)
F_MLE = ecdf_clt(G)
ax.plot(G, F_MLE, '-o', c='b', lw=2, label='CLT')
ax.plot([0, 1], [0, 1], 'k-', lw=2)
ax.set_xlim([0, 1])
ax.set_ylim([0, 1])
ecdf_boot = sm.distributions.ECDF(multiple_results['pivots_boot'])
F_true = ecdf_boot(G)
ax.plot(G, F_true, '-o', c='g', lw=2, label='Bootstrap')
ax.plot([0, 1], [0, 1], 'k-', lw=2)
ax.set_xlim([0, 1])
ax.set_ylim([0, 1])
ax.legend(loc='lower right')
#plot_pvalues_boot.legend(loc='lower right')
if coverage:
if 'covered_split' in multiple_results.columns:
fig.suptitle('CLT Coverage: %0.2f, Boot: %0.2f, Naive: %0.2f, Split: %0.2f' % (np.mean(multiple_results['covered_clt']),
np.mean(multiple_results['covered_boot']), np.mean(multiple_results['covered_naive']),
np.mean(multiple_results['covered_split'])))
else:
fig.suptitle('CLT Coverage: %0.2f, Boot: %0.2f, Naive: %0.2f' % (np.mean(multiple_results['covered_clt']),
np.mean(multiple_results['covered_boot']),
np.mean(multiple_results['covered_naive'])))
return fig
def compute_pivots(multiple_results):
if 'truth' in multiple_results.columns:
pivots = multiple_results['truth']
return {'pivot (mean, SD, type I):': (np.mean(pivots), np.std(pivots), np.mean(pivots < 0.05))}
if 'truth' in multiple_results.columns:
pivots = multiple_results['truth']
return {'pivot (mean, SD, type I):': (np.mean(pivots), np.std(pivots), np.mean(pivots < 0.05))}
if 'pvalue' in multiple_results.columns:
pivots = multiple_results['pvalue']
return {'selective pvalues (mean, SD, type I):': (np.mean(pivots), np.std(pivots), np.mean(pivots < 0.05))}
return {}
def compute_naive_pivots(multiple_results):
if 'naive_pvalues' in multiple_results.columns:
pivots = multiple_results['naive_pvalues']
return {'naive pvalues (mean, SD, type I):': (np.mean(pivots), np.std(pivots), np.mean(pivots < 0.05))}
return {}
def boot_clt_pivots(multiple_results):
pivot_summary = {}
if 'pivots_clt' in multiple_results.columns:
pivots_clt = multiple_results['pivots_clt']
pivot_summary['pivots_clt'] = {'CLT pivots (mean, SD, type I):': (np.mean(pivots_clt), np.std(pivots_clt), np.mean(pivots_clt < 0.05))}
if 'pivots_boot' in multiple_results.columns:
pivots_boot = multiple_results['pivots_boot']
pivot_summary['pivots_boot'] = {'Bootstrap pivots (mean, SD, type I):': (np.mean(pivots_boot), np.std(pivots_boot), np.mean(pivots_boot < 0.05))}
if 'pivot' in multiple_results.columns:
pivots = multiple_results['pivot']
pivot_summary['pivots'] = {'pivots (mean, SD, type I):': (np.mean(pivots), np.std(pivots), np.mean(pivots < 0.05))}
if 'naive_pvalues' in multiple_results.columns:
naive_pvalues = multiple_results['naive_pvalues']
pivot_summary['naive_pvalues'] = {'pivots (mean, SD, type I):': (np.mean(naive_pvalues), np.std(naive_pvalues), np.mean(naive_pvalues < 0.05))}
return pivot_summary
def compute_coverage(multiple_results):
result = {}
if 'naive_cover' in multiple_results.columns:
result['naive coverage'] = np.mean(multiple_results['naive_cover'])
if 'cover' in multiple_results.columns:
result['selective coverage'] = np.mean(multiple_results['cover'])
return result
def boot_clt_coverage(multiple_results): #
result = {}
if 'covered_naive' in multiple_results.columns:
result['naive coverage'] = np.mean(multiple_results['covered_naive'])
if 'covered_boot' in multiple_results.columns:
result['boot coverage'] = np.mean(multiple_results['covered_boot'])
if 'covered_clt' in multiple_results.columns:
result['clt coverage'] = np.mean(multiple_results['covered_clt'])
if 'covered_split' in multiple_results.columns:
result['split coverage'] = np.mean(multiple_results['covered_split'])
return result
def compute_lengths(multiple_results):
result = {}
if 'ci_length_clt' in multiple_results.columns:
result['ci_length_clt'] = np.mean(multiple_results['ci_length_clt'])
if 'ci_length_boot' in multiple_results.columns:
result['ci_length_boot'] = np.mean(multiple_results['ci_length_boot'])
if 'ci_length_split' in multiple_results.columns:
result['ci_length_split'] =
|
np.mean(multiple_results['ci_length_split'])
|
numpy.mean
|
# -- coding: utf8 --
r"""
EOS object for SAFT-:math:`\gamma`-Mie
Equations referenced in this code are from <NAME> et al J. Chem. Phys. 140 054107 2014
"""
import numpy as np
import logging
import despasito.equations_of_state.eos_toolbox as tb
from despasito.equations_of_state import constants
import despasito.equations_of_state.saft.saft_toolbox as stb
from despasito.equations_of_state.saft import Aassoc
from .compiled_modules.ext_gamma_mie_python import prefactor, calc_Iij
logger = logging.getLogger(__name__)
try:
import cython
flag_cython = True
except ModuleNotFoundError:
flag_cython = False
logger.warning("Cython package is unavailable, using Numba")
def _import_supporting_functions(method_stat=None):
""" Import appropriate functions for compilation mode
"""
if method_stat == None or method_stat.fortran or method_stat.python:
import despasito.equations_of_state.saft.compiled_modules.ext_gamma_mie_python as cm
elif method_stat.cython and flag_cython:
try:
import despasito.equations_of_state.saft.compiled_modules.ext_gamma_mie_cython as cm
except Exception:
raise ImportError("Cython package is available but module: despasito.equations_of_state.saft.compiled_modules.ext_gamma_mie_cython, has not been compiled.")
elif method_stat.numba or not flag_cython:
import despasito.equations_of_state.saft.compiled_modules.ext_gamma_mie_numba as cm
else:
raise ValueError("Unknown instructions for importing supportive functions of SAFT")
return cm
class SaftType:
r"""
Object of SAFT-𝛾-Mie
Parameters
----------
beads : list[str]
List of unique bead names used among components
molecular_composition : numpy.ndarray
:math:`\nu_{i,k}/k_B`. Array containing the number of components by the number of bead types. Defines the number of each type of group in each component.
bead_library : dict
A dictionary where bead names are the keys to access EOS self interaction parameters:
- epsilon: :math:`\epsilon_{k,k}/k_B`, Energy well depth scaled by Boltzmann constant
- sigma: :math:`\sigma_{k,k}`, Size parameter [nm]
- mass: Bead mass [kg/mol]
- lambdar: :math:`\lambda^{r}_{k,k}`, Exponent of repulsive term between groups of type k
- lambdaa: :math:`\lambda^{a}_{k,k}`, Exponent of attractive term between groups of type k
- Sk: Optional, default=1, Shape factor, reflects the proportion with which a given segment contributes to the total free energy
- Vks: Optional, default=1, Number of segments in this molecular group
cross_library : dict, Optional, default={}
Optional library of bead cross interaction parameters. As many or as few of the desired parameters may be defined for whichever group combinations are desired.
- epsilon: :math:`\epsilon_{k,k}/k_B`, Energy well depth scaled by Boltzmann constant
- sigma: :math:`\sigma_{k,k}`, Size parameter [nm]
- mass: Bead mass [kg/mol]
- lambdar: :math:`\lambda^{r}_{k,k}`, Exponent of repulsive term between groups of type k
- lambdaa: :math:`\lambda^{a}_{k,k}`, Exponent of attractive term between groups of type k
num_rings : list
Number of rings in each molecule. This will impact the chain contribution to the Helmholtz energy.
Attributes
----------
beads : list[str]
List of unique bead names used among components
bead_library : dict
A dictionary where bead names are the keys to access EOS self interaction parameters. See entry in **Parameters** section.
cross_library : dict, Optional, default={}
Optional library of bead cross interaction parameters. As many or as few of the desired parameters may be defined for whichever group combinations are desired. Any interaction parameters that aren't provided are computed with the appropriate ``combining_rules``. See entry in **Parameters** section.
Aideal_method : str
"Abroglie" the default functional form of the ideal gas contribution of the Helmholtz energy
residual_helmholtz_contributions : list[str]
List of methods from the specified ``saft_source`` representing contributions to the Helmholtz energy that are functions of density, temperature, and composition. For this variant, [``Amonomer``, ``Achain``]
parameter_types : list[str]
This list of parameter names, "epsilon", "lambdar", "lambdaa", "sigma", and/or "Sk" as well as parameters for the main saft class.
parameter_bound_extreme : dict
With each parameter name as an entry representing a list with the minimum and maximum feasible parameter value.
- epsilon: [100.,1000.]
- lambdar: [6.0,100.]
- lambdaa: [3.0,100.]
- sigma: [0.1,10.0]
- Sk: [0.1,1.0]
combining_rules : dict
Contains functional form and additional information for calculating cross interaction parameters that are not found in `cross_library`. Function must be one of those contained in :mod:`~despasito.equations_of_state.combining_rule_types`. The default values are:
- sigma: {"function": "mean"}
- lambdar: {"function": "mie_exponent"}
- lambdar: {"function": "mie_exponent"}
- epsilon: {"function": "volumetric_geometric_mean", "weighting_parameters": ["sigma"]}
eos_dict : dict
Dictionary of parameters and specific settings
- molecular_composition (numpy.ndarray) - :math:`\nu_{i,k}/k_B`. Array containing the number of components by the number of bead types. Defines the number of each type of group in each component.
- num_rings (list) - Number of rings in each molecule. This will impact the chain contribution to the Helmholtz energy.
- Sk (numpy.ndarray) - Shape factor, reflects the proportion which a given segment contributes to the total free energy. Length of ``beads`` array.
- Vks (numpy.ndarray) - Number of segments in this molecular group. Length of ``beads`` array.
- Ckl (numpy.ndarray) - Matrix of Mie potential prefactors between beads (l,k)
- epsilonkl (numpy.ndarray) - Matrix of Mie potential well depths for groups (k,l)
- sigmakl (numpy.ndarray) - Matrix of bead diameters (k,l)
- lambdarkl (numpy.ndarray) - Matrix of repulsive Mie exponent for groups (k,l)
- lambdaakl (numpy.ndarray) - Matrix of attractive Mie exponent for groups (k,l)
- dkl (numpy.ndarray) - Matrix of hard sphere equivalent for each bead and interaction between them (l,k)
- x0kl (numpy.ndarray) - Matrix of sigmakl/dkl, sigmakl is the Mie radius for groups (k,l)
- Cmol2seg (float) - Conversion factor from from molecular number density, :math:`\rho`, to segment (i.e. group) number density, :math:`\rho_S`.
- xskl (numpy.ndarray) - Matrix of mole fractions of bead (i.e. segment or group) k multiplied by that of bead l
- alphakl (np.array) - (Ngroup,Ngroup) "A dimensionless form of the integrated vdW energy of the Mie potential" eq. 33
- epsilonii_avg (numpy.ndarray) - Matrix of molecule averaged well depths (i.j)
- sigmaii_avg (numpy.ndarray) - Matrix of molecule averaged Mie diameter (i.j)
- lambdaaii_avg (numpy.ndarray) - Matrix of molecule averaged Mie potential attractive exponents (i.j)
- lambdarii_avg (numpy.ndarray) - Matrix of molecule averaged Mie potential attractive exponents (i.j)
- dii_eff (numpy.ndarray) - Matrix of mole averaged hard sphere equivalent for each bead and interaction between them (i.j)
- x0ii (numpy.ndarray) - Matrix of sigmaii_avg/dii_eff, sigmaii_avg is the average molecular Mie radius and dii_eff the average molecular hard sphere diameter
ncomp : int
Number of components in the system
nbeads : int
Number of beads in system that are shared among components
xi : numpy.ndarray
Mole fraction of each molecule in mixture. Default initialization is np.nan
T : float
Temperature value is initially defined as NaN for a placeholder until temperature dependent attributes are initialized by using a method of this class.
"""
def __init__(self, **kwargs):
if "method_stat" in kwargs:
self.method_stat = kwargs["method_stat"]
del kwargs["method_stat"]
else:
self.method_stat = None
self._cm = _import_supporting_functions(self.method_stat)
self.Aideal_method = "Abroglie"
self.parameter_types = ["epsilon", "sigma", "lambdar", "lambdaa", "Sk"]
self._parameter_defaults = {
"epsilon": None,
"lambdar": None,
"lambdaa": None,
"sigma": None,
"Sk": 1.0,
"Vks": 1.0,
}
self.parameter_bound_extreme = {
"epsilon": [100.0, 1000.0],
"sigma": [0.1, 1.0],
"lambdar": [6.0, 100.0],
"lambdaa": [3.0, 100.0],
"Sk": [0.1, 1.0],
}
self.residual_helmholtz_contributions = ["Amonomer", "Achain"]
self.combining_rules = {
"sigma": {"function": "mean"},
"lambdar": {"function": "mie_exponent"},
"lambdaa": {"function": "mie_exponent"},
"epsilon": {
"function": "volumetric_geometric_mean",
"weighting_parameters": ["sigma"],
},
}
self._mixing_temp_dependence = None
if not hasattr(self, "eos_dict"):
self.eos_dict = {}
needed_attributes = ["molecular_composition", "beads", "bead_library"]
for key in needed_attributes:
if key not in kwargs:
raise ValueError(
"The one of the following inputs is missing: {}".format(
", ".join(tmp)
)
)
elif key == "molecular_composition":
self.eos_dict[key] = kwargs[key]
elif not hasattr(self, key):
setattr(self, key, kwargs[key])
self.bead_library = tb.check_bead_parameters(
self.bead_library, self._parameter_defaults
)
if "cross_library" not in kwargs:
self.cross_library = {}
else:
self.cross_library = kwargs["cross_library"]
if "Vks" not in self.eos_dict:
self.eos_dict["Vks"] = tb.extract_property(
"Vks", self.bead_library, self.beads, default=1.0
)
if "Sk" not in self.eos_dict:
self.eos_dict["Sk"] = tb.extract_property(
"Sk", self.bead_library, self.beads, default=1.0
)
# Initialize temperature attribute
if not hasattr(self, "T"):
self.T = np.nan
if not hasattr(self, "xi"):
self.xi = np.nan
if not hasattr(self, "nbeads") or not hasattr(self, "ncomp"):
self.ncomp, self.nbeads = np.shape(self.eos_dict["molecular_composition"])
# Initiate cross interaction terms
output = tb.cross_interaction_from_dict(
self.beads,
self.bead_library,
self.combining_rules,
cross_library=self.cross_library,
)
self.eos_dict["sigmakl"] = output["sigma"]
self.eos_dict["epsilonkl"] = output["epsilon"]
self.eos_dict["lambdaakl"] = output["lambdaa"]
self.eos_dict["lambdarkl"] = output["lambdar"]
# compute alphakl eq. 33
self.eos_dict["Ckl"] = prefactor(
self.eos_dict["lambdarkl"], self.eos_dict["lambdaakl"]
)
self.eos_dict["alphakl"] = self.eos_dict["Ckl"] * (
(1.0 / (self.eos_dict["lambdaakl"] - 3.0))
- (1.0 / (self.eos_dict["lambdarkl"] - 3.0))
)
# Initiate average interaction terms
self.calc_component_averaged_properties()
if "num_rings" in kwargs:
self.eos_dict["num_rings"] = kwargs["num_rings"]
logger.info(
"Accepted component ring structure: {}".format(kwargs["num_rings"])
)
else:
self.eos_dict["num_rings"] = np.zeros(
len(self.eos_dict["molecular_composition"])
)
def calc_component_averaged_properties(self):
r"""
Calculate component averaged properties specific to SAFT-𝛾-Mie for the chain term.
Attributes
----------
output : dict
Dictionary of outputs, the following possibilities are calculated if all relevant beads have those properties.
- epsilonii_avg (numpy.ndarray) - Matrix of molecule averaged well depths
- sigmaii_avg (numpy.ndarray) - Matrix of molecule averaged Mie diameter
- lambdaaii_avg (numpy.ndarray) - Matrix of molecule averaged Mie potential attractive exponents
- lambdarii_avg (numpy.ndarray) - Matrix of molecule averaged Mie potential attractive exponents
"""
zki = np.zeros((self.ncomp, self.nbeads), float)
zkinorm = np.zeros(self.ncomp, float)
output = {}
output["epsilonii_avg"] = np.zeros(self.ncomp, float)
output["sigmaii_avg"] = np.zeros(self.ncomp, float)
output["lambdarii_avg"] = np.zeros(self.ncomp, float)
output["lambdaaii_avg"] = np.zeros(self.ncomp, float)
# compute zki
for i in range(self.ncomp):
for k in range(self.nbeads):
zki[i, k] = (
self.eos_dict["molecular_composition"][i, k]
* self.eos_dict["Vks"][k]
* self.eos_dict["Sk"][k]
)
zkinorm[i] += zki[i, k]
for i in range(self.ncomp):
for k in range(self.nbeads):
zki[i, k] = zki[i, k] / zkinorm[i]
for i in range(self.ncomp):
for k in range(self.nbeads):
for l in range(self.nbeads):
output["sigmaii_avg"][i] += (
zki[i, k] * zki[i, l] * self.eos_dict["sigmakl"][k, l] ** 3
)
output["epsilonii_avg"][i] += (
zki[i, k] * zki[i, l] * self.eos_dict["epsilonkl"][k, l]
)
output["lambdarii_avg"][i] += (
zki[i, k] * zki[i, l] * self.eos_dict["lambdarkl"][k, l]
)
output["lambdaaii_avg"][i] += (
zki[i, k] * zki[i, l] * self.eos_dict["lambdaakl"][k, l]
)
output["sigmaii_avg"][i] = output["sigmaii_avg"][i] ** (1 / 3.0)
self.eos_dict.update(output)
def Ahard_sphere(self, rho, T, xi):
r"""
Outputs monomer contribution to the Helmholtz energy, :math:`A^{HS}/Nk_{B}T`.
Parameters
----------
rho : numpy.ndarray
Number density of system [:math:`mol/m^3`]
T : float
Temperature of the system [K]
xi : numpy.ndarray
Mole fraction of each component, sum(xi) should equal 1.0
Returns
-------
Ahard_sphere : numpy.ndarray
Helmholtz energy of monomers for each density given.
"""
rho = self._check_density(rho)
self._check_temperature_dependent_parameters(T)
self._check_composition_dependent_parameters(xi)
eta = np.zeros((np.size(rho), 4))
for m in range(4):
eta[:, m] = (
rho
* constants.molecule_per_nm3
* self.eos_dict["Cmol2seg"]
* (
np.sum(
np.sqrt(np.diag(self.eos_dict["xskl"]))
* (np.diag(self.eos_dict["dkl"]) ** m)
)
* (np.pi / 6.0)
)
)
tmp = 6.0 / (np.pi * rho * constants.molecule_per_nm3)
if self.ncomp == 1:
tmp1 = 0
else:
tmp1 = np.log1p(-eta[:, 3]) * (
eta[:, 2] ** 3 / (eta[:, 3] ** 2) - eta[:, 0]
)
tmp2 = 3.0 * eta[:, 2] / (1 - eta[:, 3]) * eta[:, 1]
tmp3 = eta[:, 2] ** 3 / (eta[:, 3] * ((1.0 - eta[:, 3]) ** 2))
AHS = tmp * (tmp1 + tmp2 + tmp3)
return AHS
def Afirst_order(self, rho, T, xi, zetax=None):
r"""
Outputs :math:`A^{1st order}/Nk_{B}T`. This is the first order term in the high-temperature perturbation expansion
Parameters
----------
rho : numpy.ndarray
Number density of system [:math:`mol/m^3`]
T : float
Temperature of the system [K]
xi : numpy.ndarray
Mole fraction of each component, sum(xi) should equal 1.0
zetax : numpy.ndarray, Optional, default=None
Matrix of hypothetical packing fraction based on hard sphere diameter for groups (k,l)
Returns
-------
Afirst_order : numpy.ndarray
Helmholtz energy of monomers for each density given.
"""
rho = self._check_density(rho)
self._check_temperature_dependent_parameters(T)
self._check_composition_dependent_parameters(xi)
if zetax is None:
zetax = stb.calc_zetax(
rho,
self.eos_dict["Cmol2seg"],
self.eos_dict["xskl"],
self.eos_dict["dkl"],
)
# compute components of eq. 19
a1kl = self._cm.calc_a1ii(
rho,
self.eos_dict["Cmol2seg"],
self.eos_dict["dkl"],
self.eos_dict["lambdaakl"],
self.eos_dict["lambdarkl"],
self.eos_dict["x0kl"],
self.eos_dict["epsilonkl"],
zetax,
)
# eq. 18
a1 = np.einsum("ijk,jk->i", a1kl, self.eos_dict["xskl"])
A1 = (self.eos_dict["Cmol2seg"] / T) * a1 # Units of K
return A1
def Asecond_order(self, rho, T, xi, zetaxstar=None, zetax=None, KHS=None):
r"""
Outputs :math:`A^{2nd order}/Nk_{B}T`. This is the second order term in the high-temperature perturbation expansion
Parameters
----------
rho : numpy.ndarray
Number density of system [:math:`mol/m^3`]
T : float
Temperature of the system [K]
xi : numpy.ndarray
Mole fraction of each component, sum(xi) should equal 1.0
zetaxstar : numpy.ndarray, Optional, default=None
Matrix of hypothetical packing fraction based on sigma for groups (k,l)
zetax : numpy.ndarray, Optional, default=None
Matrix of hypothetical packing fraction based on hard sphere diameter for groups (k,l)
KHS : numpy.ndarray, Optional, default=None
(length of densities) isothermal compressibility of system with packing fraction zetax
Returns
-------
Asecond_order : numpy.ndarray
Helmholtz energy of monomers for each density given.
"""
rho = self._check_density(rho)
self._check_temperature_dependent_parameters(T)
self._check_composition_dependent_parameters(xi)
if zetax is None:
zetax = stb.calc_zetax(
rho,
self.eos_dict["Cmol2seg"],
self.eos_dict["xskl"],
self.eos_dict["dkl"],
)
if zetaxstar is None:
zetaxstar = stb.calc_zetaxstar(
rho,
self.eos_dict["Cmol2seg"],
self.eos_dict["xskl"],
self.eos_dict["sigmakl"],
)
if KHS is None:
KHS = stb.calc_KHS(zetax)
## compute a2kl, eq. 30 #####
# compute f1, f2, and f3 for eq. 32
fmlist123 = self.calc_fm(self.eos_dict["alphakl"], np.array([1, 2, 3]))
chikl = (
np.einsum("i,jk", zetaxstar, fmlist123[0])
+ np.einsum("i,jk", zetaxstar ** 5, fmlist123[1])
+ np.einsum("i,jk", zetaxstar ** 8, fmlist123[2])
)
a1s_2la = self._cm.calc_a1s(
rho,
self.eos_dict["Cmol2seg"],
2.0 * self.eos_dict["lambdaakl"],
zetax,
self.eos_dict["epsilonkl"],
self.eos_dict["dkl"],
)
a1s_2lr = self._cm.calc_a1s(
rho,
self.eos_dict["Cmol2seg"],
2.0 * self.eos_dict["lambdarkl"],
zetax,
self.eos_dict["epsilonkl"],
self.eos_dict["dkl"],
)
a1s_lalr = self._cm.calc_a1s(
rho,
self.eos_dict["Cmol2seg"],
self.eos_dict["lambdaakl"] + self.eos_dict["lambdarkl"],
zetax,
self.eos_dict["epsilonkl"],
self.eos_dict["dkl"],
)
B_2la = self._cm.calc_Bkl(
rho,
2.0 * self.eos_dict["lambdaakl"],
self.eos_dict["Cmol2seg"],
self.eos_dict["dkl"],
self.eos_dict["epsilonkl"],
self.eos_dict["x0kl"],
zetax,
)
B_2lr = self._cm.calc_Bkl(
rho,
2.0 * self.eos_dict["lambdarkl"],
self.eos_dict["Cmol2seg"],
self.eos_dict["dkl"],
self.eos_dict["epsilonkl"],
self.eos_dict["x0kl"],
zetax,
)
B_lalr = self._cm.calc_Bkl(
rho,
self.eos_dict["lambdaakl"] + self.eos_dict["lambdarkl"],
self.eos_dict["Cmol2seg"],
self.eos_dict["dkl"],
self.eos_dict["epsilonkl"],
self.eos_dict["x0kl"],
zetax,
)
a2kl = (
(self.eos_dict["x0kl"] ** (2.0 * self.eos_dict["lambdaakl"]))
* (a1s_2la + B_2la)
/ constants.molecule_per_nm3
- (
(
2.0
* self.eos_dict["x0kl"]
** (self.eos_dict["lambdaakl"] + self.eos_dict["lambdarkl"])
)
* (a1s_lalr + B_lalr)
/ constants.molecule_per_nm3
)
+ (
(self.eos_dict["x0kl"] ** (2.0 * self.eos_dict["lambdarkl"]))
* (a1s_2lr + B_2lr)
/ constants.molecule_per_nm3
)
)
a2kl *= (
(1.0 + chikl) * self.eos_dict["epsilonkl"] * (self.eos_dict["Ckl"] ** 2)
) # *(KHS/2.0)
a2kl = np.einsum("i,ijk->ijk", KHS / 2.0, a2kl)
# eq. 29
a2 = np.einsum("ijk,jk->i", a2kl, self.eos_dict["xskl"])
A2 = (self.eos_dict["Cmol2seg"] / (T ** 2)) * a2
return A2
def Athird_order(self, rho, T, xi, zetaxstar=None):
r"""
Outputs :math:`A^{3rd order}/Nk_{B}T`. This is the third order term in the high-temperature perturbation expansion
Parameters
----------
rho : numpy.ndarray
Number density of system [:math:`mol/m^3`]
T : float
Temperature of the system [K]
xi : numpy.ndarray
Mole fraction of each component, sum(xi) should equal 1.0
zetaxstar : numpy.ndarray, Optional, default=None
Matrix of hypothetical packing fraction based on sigma for groups (k,l)
Returns
-------
Athird_order : numpy.ndarray
Helmholtz energy of monomers for each density given.
"""
rho = self._check_density(rho)
self._check_temperature_dependent_parameters(T)
self._check_composition_dependent_parameters(xi)
if zetaxstar is None:
zetaxstar = stb.calc_zetaxstar(
rho,
self.eos_dict["Cmol2seg"],
self.eos_dict["xskl"],
self.eos_dict["sigmakl"],
)
# compute a3kl
fmlist456 = self.calc_fm(self.eos_dict["alphakl"], np.array([4, 5, 6]))
a3kl = np.einsum(
"i,jk", zetaxstar, -(self.eos_dict["epsilonkl"] ** 3) * fmlist456[0]
) * np.exp(
np.einsum("i,jk", zetaxstar, fmlist456[1])
+ np.einsum("i,jk", zetaxstar ** 2, fmlist456[2])
) # a3kl=-(epsilonkl**3)*fmlist456[0]*zetaxstar*np.exp((fmlist456[1]*zetaxstar)+(fmlist456[2]*(zetaxstar**2)))
# eq. 37
a3 = np.einsum("ijk,jk->i", a3kl, self.eos_dict["xskl"])
A3 = (self.eos_dict["Cmol2seg"] / (T ** 3)) * a3
return A3
def Amonomer(self, rho, T, xi):
r"""
Outputs the monomer contribution of the Helmholtz energy, :math:`A^{mono.}/Nk_{B}T`.
This term is composed of: :math:`A^{HS}/Nk_{B}T + A^{1st order}/Nk_{B}T + A^{2nd order}/Nk_{B}T` + :math:`A^{3rd order}/Nk_{B}T`
Parameters
----------
rho : numpy.ndarray
Number density of system [:math:`mol/m^3`]
T : float
Temperature of the system [K]
xi : numpy.ndarray
Mole fraction of each component, sum(xi) should equal 1.0
Returns
-------
Amonomer : numpy.ndarray
Helmholtz energy of monomers for each density given.
"""
if np.all(rho >= self.density_max(xi, T, maxpack=1.0)):
raise ValueError(
"Density values should not all be greater than {}, or calc_Amono will fail in log calculation.".format(
self.density_max(xi, T)
)
)
rho = self._check_density(rho)
self._check_temperature_dependent_parameters(T)
self._check_composition_dependent_parameters(xi)
zetax = stb.calc_zetax(
rho, self.eos_dict["Cmol2seg"], self.eos_dict["xskl"], self.eos_dict["dkl"]
)
zetaxstar = stb.calc_zetaxstar(
rho,
self.eos_dict["Cmol2seg"],
self.eos_dict["xskl"],
self.eos_dict["sigmakl"],
)
Amonomer = (
self.Ahard_sphere(rho, T, xi)
+ self.Afirst_order(rho, T, xi, zetax=zetax)
+ self.Asecond_order(rho, T, xi, zetax=zetax, zetaxstar=zetaxstar)
+ self.Athird_order(rho, T, xi, zetaxstar=zetaxstar)
)
return Amonomer
def gdHS(self, rho, T, xi, zetax=None):
r"""
The zeroth order expansion term in calculating the radial distribution function of a Mie fluid.
This is also known as the hard sphere radial distribution function.
Parameters
----------
rho : numpy.ndarray
Number density of system [:math:`mol/m^3`]
T : float
Temperature of the system [K]
xi : numpy.ndarray
Mole fraction of each component, sum(xi) should equal 1.0
zetax : numpy.ndarray, Optional, default=None
Matrix of hypothetical packing fraction based on hard sphere diameter for groups (k,l)
Returns
-------
gdHS : numpy.ndarray
Hard sphere radial distribution function
"""
rho = self._check_density(rho)
self._check_temperature_dependent_parameters(T)
self._check_composition_dependent_parameters(xi)
if zetax is None:
zetax = stb.calc_zetax(
rho,
self.eos_dict["Cmol2seg"],
self.eos_dict["xskl"],
self.eos_dict["dkl"],
)
km = np.zeros((np.size(rho), 4))
gdHS = np.zeros((np.size(rho), np.size(xi)))
km[:, 0] = -np.log(1.0 - zetax) + (
42.0 * zetax - 39.0 * zetax ** 2 + 9.0 * zetax ** 3 - 2.0 * zetax ** 4
) / (6.0 * (1.0 - zetax) ** 3)
km[:, 1] = (zetax ** 4 + 6.0 * zetax ** 2 - 12.0 * zetax) / (
2.0 * (1.0 - zetax) ** 3
)
km[:, 2] = -3.0 * zetax ** 2 / (8.0 * (1.0 - zetax) ** 2)
km[:, 3] = (-zetax ** 4 + 3.0 * zetax ** 2 + 3.0 * zetax) / (
6.0 * (1.0 - zetax) ** 3
)
for i in range(self.ncomp):
gdHS[:, i] = np.exp(
km[:, 0]
+ km[:, 1] * self.eos_dict["x0ii"][i]
+ km[:, 2] * self.eos_dict["x0ii"][i] ** 2
+ km[:, 3] * self.eos_dict["x0ii"][i] ** 3
)
return gdHS
def g1(self, rho, T, xi, zetax=None):
r"""
Calculate the first order expansion term in calculating the radial distribution function of a Mie fluid
Parameters
----------
rho : numpy.ndarray
Number density of system [:math:`mol/m^3`]
T : float
Temperature of the system [K]
xi : numpy.ndarray
Mole fraction of each component, sum(xi) should equal 1.0
zetax : numpy.ndarray, Optional, default=None
Matrix of hypothetical packing fraction based on hard sphere diameter for groups (k,l)
Returns
-------
g1 : numpy.ndarray
First order expansion term in calculating the radial distribution function of a Mie fluid
"""
rho = self._check_density(rho)
self._check_temperature_dependent_parameters(T)
self._check_composition_dependent_parameters(xi)
if zetax is None:
zetax = stb.calc_zetax(
rho,
self.eos_dict["Cmol2seg"],
self.eos_dict["xskl"],
self.eos_dict["dkl"],
)
da1iidrhos = self._cm.calc_da1iidrhos(
rho,
self.eos_dict["Cmol2seg"],
self.eos_dict["dii_eff"],
self.eos_dict["lambdaaii_avg"],
self.eos_dict["lambdarii_avg"],
self.eos_dict["x0ii"],
self.eos_dict["epsilonii_avg"],
zetax,
)
a1sii_lambdaaii_avg = self._cm.calc_a1s_eff(
rho,
self.eos_dict["Cmol2seg"],
self.eos_dict["lambdaaii_avg"],
zetax,
self.eos_dict["epsilonii_avg"],
self.eos_dict["dii_eff"],
)
a1sii_lambdarii_avg = self._cm.calc_a1s_eff(
rho,
self.eos_dict["Cmol2seg"],
self.eos_dict["lambdarii_avg"],
zetax,
self.eos_dict["epsilonii_avg"],
self.eos_dict["dii_eff"],
)
Bii_lambdaaii_avg = self._cm.calc_Bkl_eff(
rho,
self.eos_dict["lambdaaii_avg"],
self.eos_dict["Cmol2seg"],
self.eos_dict["dii_eff"],
self.eos_dict["epsilonii_avg"],
self.eos_dict["x0ii"],
zetax,
)
Bii_lambdarii_avg = self._cm.calc_Bkl_eff(
rho,
self.eos_dict["lambdarii_avg"],
self.eos_dict["Cmol2seg"],
self.eos_dict["dii_eff"],
self.eos_dict["epsilonii_avg"],
self.eos_dict["x0ii"],
zetax,
)
Cii = prefactor(self.eos_dict["lambdarii_avg"], self.eos_dict["lambdaaii_avg"])
tmp1 = 1.0 / (
2.0
* np.pi
* self.eos_dict["epsilonii_avg"]
* self.eos_dict["dii_eff"] ** 3
* constants.molecule_per_nm3 ** 2
)
tmp11 = 3.0 * da1iidrhos
tmp21 = (
Cii
* self.eos_dict["lambdaaii_avg"]
* (self.eos_dict["x0ii"] ** self.eos_dict["lambdaaii_avg"])
)
tmp22 = np.einsum(
"ij,i->ij",
(a1sii_lambdaaii_avg + Bii_lambdaaii_avg),
1.0 / (rho * self.eos_dict["Cmol2seg"]),
)
tmp31 = (
Cii
* self.eos_dict["lambdarii_avg"]
* (self.eos_dict["x0ii"] ** self.eos_dict["lambdarii_avg"])
)
tmp32 = np.einsum(
"ij,i->ij",
(a1sii_lambdarii_avg + Bii_lambdarii_avg),
1.0 / (rho * self.eos_dict["Cmol2seg"]),
)
g1 = tmp1 * (tmp11 - tmp21 * tmp22 + tmp31 * tmp32)
return g1
def g2(self, rho, T, xi, zetax=None):
r"""
Calculate the second order expansion term in calculating the radial distribution function of a Mie fluid
Parameters
----------
rho : numpy.ndarray
Number density of system [:math:`mol/m^3`]
T : float
Temperature of the system [K]
xi : numpy.ndarray
Mole fraction of each component, sum(xi) should equal 1.0
zetax : numpy.ndarray, Optional, default=None
Matrix of hypothetical packing fraction based on hard sphere diameter for groups (k,l)
Returns
-------
g2 : numpy.ndarray
Second order expansion term in calculating the radial distribution function of a Mie fluid
"""
rho = self._check_density(rho)
self._check_temperature_dependent_parameters(T)
self._check_composition_dependent_parameters(xi)
if zetax is None:
zetax = stb.calc_zetax(
rho,
self.eos_dict["Cmol2seg"],
self.eos_dict["xskl"],
self.eos_dict["dkl"],
)
zetaxstar = stb.calc_zetaxstar(
rho,
self.eos_dict["Cmol2seg"],
self.eos_dict["xskl"],
self.eos_dict["sigmakl"],
)
KHS = stb.calc_KHS(zetax)
Cii = prefactor(self.eos_dict["lambdarii_avg"], self.eos_dict["lambdaaii_avg"])
phi7 = np.array([10.0, 10.0, 0.57, -6.7, -8.0])
alphaii = Cii * (
(1.0 / (self.eos_dict["lambdaaii_avg"] - 3.0))
- (1.0 / (self.eos_dict["lambdarii_avg"] - 3.0))
)
theta = np.exp(self.eos_dict["epsilonii_avg"] / T) - 1.0
gammacii = np.zeros((np.size(rho), np.size(xi)))
for i in range(self.ncomp):
gammacii[:, i] = (
phi7[0]
* (-
|
np.tanh(phi7[1] * (phi7[2] - alphaii[i]))
|
numpy.tanh
|
from torch.utils.data import Dataset
import numpy as np
import torch
from sklearn.preprocessing import PowerTransformer
class LightDataset(Dataset):
def __init__(self, row_id_to_idx, col_id_to_idx, propagation_scores, directed_pairs_list,
sources, terminals, normalization_method, samples_normalization_constants,
degree_feature_normalization_constants=None,
pairs_source_type=None, id_to_degree=None, train=True):
self.row_id_to_idx = row_id_to_idx
self.col_id_to_idx = col_id_to_idx
self.col_idx_to_id = {xx: x for x, xx in self.col_id_to_idx.items()}
self.propagation_scores = propagation_scores
self.source_indexes = self.get_experiment_indexes(sources)
self.terminal_indexes = self.get_experiment_indexes(terminals)
self.pairs_indexes = [(self.col_id_to_idx[pair[0]], self.col_id_to_idx[pair[1]]) for pair in directed_pairs_list]
self.longest_source = np.max([len(source) for source in sources.values()])
self.longest_terminal = np.max([len(terminal) for terminal in terminals.values()])
normalizer = self.get_normalization_method(normalization_method)
self.normalizer = normalizer(samples_normalization_constants)
self.pairs_source_type = pairs_source_type
self.idx_to_degree = {self.col_id_to_idx[id]: id_to_degree[id] for id in self.col_id_to_idx.keys()}
self.degree_normalizer = self.get_degree_normalizar(degree_feature_normalization_constants)
self.idx_to_degree = {self.col_id_to_idx[id]: self.degree_normalizer(id_to_degree[id]) for id in self.col_id_to_idx.keys()}
self.propagation_scores = self.normalizer(self.propagation_scores)
self.train = train
def __len__(self):
return len(self.pairs_indexes) * 2
def __getitem__(self, idx):
if type(idx) == torch.Tensor:
idx = idx.item()
neg_flag = idx >= len(self.pairs_indexes)
idx = np.mod(idx, len(self.pairs_indexes))
label = 0 if neg_flag else 1
pair = self.pairs_indexes[idx]
if neg_flag:
pair = (pair[1], pair[0])
from_degree = self.idx_to_degree[pair[0]]
to_degree = self.idx_to_degree[pair[1]]
pair_source_type = self.pairs_source_type[idx] if self.pairs_source_type is not None else None
source_sample = np.zeros((len(self.source_indexes), self.longest_source, 2))
terminal_sample = np.zeros((len(self.source_indexes), self.longest_terminal, 2))
for exp_idx in range(len(self.source_indexes)):
source_sample[exp_idx, :len(self.source_indexes[exp_idx]), :] =\
self.propagation_scores[:, pair][self.source_indexes[exp_idx], :]
terminal_sample[exp_idx, :len(self.terminal_indexes[exp_idx]), :] =\
self.propagation_scores[:, pair][self.terminal_indexes[exp_idx], :]
return source_sample, terminal_sample, label, pair, pair_source_type,
|
np.array([from_degree, to_degree])
|
numpy.array
|
"""
##################################################################################################
# Copyright Info : Copyright (c) <NAME> @ Hikvision Research Institute. All rights reserved.
# Filename : test_utils.py
# Abstract : function utils used in inference time
# Current Version: 1.0.0
# Date : 2021-06-15
##################################################################################################
"""
import os
import json
import glob
import collections
import numpy as np
from sklearn.cluster import KMeans
from scipy.optimize import linear_sum_assignment
import Polygon as plg
import Levenshtein
def polygon_from_points(points):
"""make a Polygon object to use with the Polygon2 class from a list of 8 points: x1,y1,x2,y2,x3,y3,x4,y4
Args:
points (list): coordinate for box with 8 points:x1,y1,x2,y2,x3,y3,x4,y4
Returns:
Polygon: Polygon object
"""
res_boxes = np.empty([1, 8], dtype='int32')
res_boxes[0, 0] = int(points[0])
res_boxes[0, 4] = int(points[1])
res_boxes[0, 1] = int(points[2])
res_boxes[0, 5] = int(points[3])
res_boxes[0, 2] = int(points[4])
res_boxes[0, 6] = int(points[5])
res_boxes[0, 3] = int(points[6])
res_boxes[0, 7] = int(points[7])
point_mat = res_boxes[0].reshape([2, 4]).T
return plg.Polygon(point_mat)
def get_union(p_d, p_g):
"""Get two polygons' union area
Args:
p_d (Polygon): Polygon object
p_g (Polygon): Polygon object
Returns:
float: the union area between pD and pG
"""
area_a = p_d.area()
area_b = p_g.area()
return area_a + area_b - get_intersection(p_d, p_g)
def get_intersection(p_d, p_g):
"""Get two polygons' intersection area
Args:
p_d (Polygon): Polygon object
p_g (Polygon): Polygon object
Returns:
float: the intersection area between pD and pG
"""
p_inter = p_d & p_g
if len(p_inter) == 0:
return 0
return p_inter.area()
def get_intersection_over_union(p_d, p_g):
"""Get two polygons' IOU
Args:
p_d (Polygon): Polygon object
p_g (Polygon): Polygon object
Returns:
float: the IOU area between p_d and p_g
"""
try:
return get_intersection(p_d, p_g) / get_union(p_d, p_g)
except ZeroDivisionError:
return 0
def hungary(task_matrix):
"""Use Hungary algorithm to calculate the maximum match matrix
Args:
task_matrix (numpy array): two-dimensional matrix
Returns:
list(int): the row indices of task_matrix
Returns:
list(int): the matched col indices of task_matrix
"""
row_ind, col_ind = linear_sum_assignment(task_matrix, maximize=True)
return row_ind, col_ind
def edit_dist_iou(first, second):
"""Calculate the edit distance iou between two words
Args:
first(str): the compared word 1
second(str): the compared word 2
Returns:
float: the edit distance iou
"""
edit_dist = Levenshtein.distance(first, second)
inter = max(len(first), len(second)) - edit_dist
union = len(first) + len(second) - inter
return np.float(inter) / np.float(union)
def instance_to_list(img_info, key):
"""extract each bbox from one img detection result to construct a list
Args:
img_info(dict): one img detection result
key(str): img filename
Returns:
list(dict): the list of all detection bboxes from one img
"""
reslist = list()
bboxes = img_info['content_ann']['bboxes']
texts = img_info['content_ann'].get('texts', [None]*len(bboxes))
labels = img_info['content_ann'].get('labels', [None]*len(bboxes))
scores = img_info['content_ann'].get('scores', [None] * len(bboxes))
track_id = img_info['content_ann'].get('trackID', [None] * len(bboxes))
for i, bbox in enumerate(bboxes):
if len(bbox) != 8: # Filter out defective boxes and polygonal boxes
continue
reslist.append({
'filename': key,
'ann': {
'text': texts[i],
'bbox': bbox,
'label': labels[i],
'score': scores[i],
'trackID': track_id[i]
}
})
return reslist
def cal_cossim(array_a, array_b):
"""Calculate the cosine similarity between feature a and b
Args:
array_a(numpy array): the 2-dimensional feature a
array_b(numpy array): the 2-dimensional feature b
Returns:
float: the cosine similarity between [-1, 1]
"""
assert array_a.shape == array_b.shape, 'array_a and array_b should be same shape'
a_norm = np.linalg.norm(array_a, axis=1, keepdims=True)
b_norm = np.linalg.norm(array_b, axis=1, keepdims=True)
sim = np.dot(array_a, array_b.T) / (a_norm * b_norm)
return sim
def gen_train_score(glimpses, img_info, refer, output):
""" Generate gt trainset quality score according to the KMeans algorithm, which we only cluster the quality
of "HIGH" or "MODERATE" samples, and choose the center as the template, then we calculate the samples' cosine
similarity with template which belong to the same track
Args:
glimpses (Tensor): feature extract from attention cell
img_info (dict): imgs information
refer (str): the train set json file
output (str): the output json file
Returns:
"""
assert len(glimpses) == len(img_info), 'glimpse num {} != img_info num {}'.format(len(glimpses), len(img_info))
# Track dict: key: track seq id, value: [[], [], []] for glimpse index, quality label, care label
track_dict = dict()
# Iter all samples, to get all track info
for i, instance in enumerate(img_info):
track_id = instance['img_info']['ann']['trackID']
care = instance['img_info']['ann']['care']
quality = instance['img_info']['ann']['quality']
if track_id not in track_dict.keys():
# index, quality, care
track_dict[track_id] = [[], [], []]
track_dict[track_id][0].append(i)
track_dict[track_id][1].append(quality)
track_dict[track_id][2].append(care)
for key, value in track_dict.items():
print("processing track id: ", key)
indices = value[0]
quality = value[1]
care = value[2]
# save index for high quality
high_indices = []
# save index for moderate quality
mid_indices = []
for idx, item in enumerate(indices):
if quality[idx] == 'HIGH' and care[idx] == 1:
high_indices.append(item)
if quality[idx] == 'MODERATE' and care[idx] == 1:
mid_indices.append(item)
# KMeans first choose high quality samples,if not exists, choose moderate samples, ignore low quality
if len(high_indices) == 0:
high_indices = mid_indices
if len(high_indices) == 0:
continue
# choose glimpse to cluster
tmp_glimpse = []
for index in high_indices:
tmp_glimpse.append(glimpses[index])
tmp_glimpse = np.stack(tmp_glimpse)
clf = KMeans(n_clusters=1)
clf.fit(tmp_glimpse)
center = clf.cluster_centers_
for idx in indices:
sim = cal_cossim(
|
np.expand_dims(glimpses[idx], 0)
|
numpy.expand_dims
|
import unittest
import numpy as np
import pandas as pd
import scipy.stats as st
from ..analysis import GroupStatistics, GroupStatisticsStacked
from ..analysis.exc import MinimumSizeError, NoDataError
from ..data import Vector
class TestGroupStatistics(unittest.TestCase):
def test_0001_group_statistics_no_name(self):
"""Test the Group Statistic class with generated group names"""
np.random.seed(987654321)
x_input_array = st.norm.rvs(2, 1, size=100)
y_input_array = st.norm.rvs(2, 3, size=45)
z_input_array = st.norm.rvs(8, 1, size=18)
output = """
Overall Statistics
------------------
Number of Groups = 3
Total = 163
Grand Mean = 4.1568
Pooled Std Dev = 2.0798
Grand Median = 2.2217
Group Statistics
----------------
n Mean Std Dev Min Median Max Group
--------------------------------------------------------------------------------------------------
100 2.0083 1.0641 -0.4718 2.0761 4.2466 1
45 2.3678 3.5551 -4.8034 2.2217 11.4199 2
18 8.0944 1.1855 6.0553 7.9712 10.5272 3 """
res = GroupStatistics(x_input_array, y_input_array, z_input_array, display=False)
self.assertTrue(res)
self.assertEqual(str(res), output)
self.assertEqual(res.total, 163)
self.assertEqual(res.k, 3)
self.assertAlmostEqual(res.pooled, 2.0798, 4)
self.assertAlmostEqual(res.pooled_std, 2.0798, 4)
self.assertAlmostEqual(res.gmean, 4.1568, 4)
self.assertAlmostEqual(res.grand_mean, 4.1568, 4)
self.assertAlmostEqual(res.gmedian, 2.2217, 4)
self.assertAlmostEqual(res.grand_median, 2.2217, 4)
def test_0002_group_statistics_group_names(self):
"""Test the Group Statistic class with group names specified in a list"""
np.random.seed(987654321)
x_input_array = st.norm.rvs(2, 1, size=100)
y_input_array = st.norm.rvs(2, 3, size=45)
z_input_array = st.norm.rvs(8, 1, size=18)
names = ("one", "two", "three")
output = """
Overall Statistics
------------------
Number of Groups = 3
Total = 163
Grand Mean = 4.1568
Pooled Std Dev = 2.0798
Grand Median = 2.2217
Group Statistics
----------------
n Mean Std Dev Min Median Max Group
--------------------------------------------------------------------------------------------------
100 2.0083 1.0641 -0.4718 2.0761 4.2466 one
18 8.0944 1.1855 6.0553 7.9712 10.5272 three
45 2.3678 3.5551 -4.8034 2.2217 11.4199 two """
res = GroupStatistics(x_input_array, y_input_array, z_input_array, groups=names, display=False)
self.assertTrue(res)
self.assertEqual(str(res), output)
def test_0003_group_statistics_dict(self):
"""Test the Group Statistic class with data passed as a dict"""
np.random.seed(987654321)
x_input_array = st.norm.rvs(2, 1, size=100)
y_input_array = st.norm.rvs(2, 3, size=45)
z_input_array = st.norm.rvs(8, 1, size=18)
data = {"one": x_input_array, "two": y_input_array, "three": z_input_array}
output = """
Overall Statistics
------------------
Number of Groups = 3
Total = 163
Grand Mean = 4.1568
Pooled Std Dev = 2.0798
Grand Median = 2.2217
Group Statistics
----------------
n Mean Std Dev Min Median Max Group
--------------------------------------------------------------------------------------------------
100 2.0083 1.0641 -0.4718 2.0761 4.2466 one
18 8.0944 1.1855 6.0553 7.9712 10.5272 three
45 2.3678 3.5551 -4.8034 2.2217 11.4199 two """
res = GroupStatistics(data, display=False)
self.assertTrue(res)
self.assertEqual(str(res), output)
self.assertEqual(res.total, 163)
self.assertEqual(res.k, 3)
self.assertAlmostEqual(res.pooled, 2.0798, 4)
self.assertAlmostEqual(res.pooled_std, 2.0798, 4)
self.assertAlmostEqual(res.gmean, 4.1568, 4)
self.assertAlmostEqual(res.grand_mean, 4.1568, 4)
self.assertAlmostEqual(res.gmedian, 2.2217, 4)
self.assertAlmostEqual(res.grand_median, 2.2217, 4)
def test_0004_group_statistics_dict_just_above_min_size(self):
"""Test the Group Statistic class with data passed as a dict just above min size"""
np.random.seed(987654321)
x_input_array = st.norm.rvs(2, 1, size=2)
y_input_array = st.norm.rvs(2, 3, size=2)
z_input_array = st.norm.rvs(8, 1, size=2)
data = {"one": x_input_array, "two": y_input_array, "three": z_input_array}
output = """
Overall Statistics
------------------
Number of Groups = 3
Total = 6
Grand Mean = 4.4847
Pooled Std Dev = 4.0150
Grand Median = 3.1189
Group Statistics
----------------
n Mean Std Dev Min Median Max Group
--------------------------------------------------------------------------------------------------
2 2.8003 2.0453 1.3541 2.8003 4.2466 one
2 7.5349 0.7523 7.0029 7.5349 8.0668 three
2 3.1189 6.6038 -1.5507 3.1189 7.7885 two """
res = GroupStatistics(data, display=False)
self.assertTrue(res)
self.assertEqual(str(res), output)
self.assertEqual(res.total, 6)
self.assertEqual(res.k, 3)
self.assertAlmostEqual(res.pooled, 4.0150, 4)
self.assertAlmostEqual(res.pooled_std, 4.0150, 4)
self.assertAlmostEqual(res.gmean, 4.4847, 4)
self.assertAlmostEqual(res.grand_mean, 4.4847, 4)
self.assertAlmostEqual(res.gmedian, 3.1189, 4)
self.assertAlmostEqual(res.grand_median, 3.1189, 4)
def test_0005_group_statistics_dict_at_min_size(self):
"""Test the Group Statistic class with data passed as a dict at min size"""
np.random.seed(987654321)
x_input_array = st.norm.rvs(2, 1, size=1)
y_input_array = st.norm.rvs(2, 3, size=1)
z_input_array = st.norm.rvs(8, 1, size=1)
data = {"one": x_input_array, "two": y_input_array, "three": z_input_array}
self.assertRaises(MinimumSizeError, lambda: GroupStatistics(data, display=False))
def test_0006_group_statistics_dict_single_empty_vector(self):
"""Test the Group Statistic class with data passed as a dict and a single missing vector"""
np.random.seed(987654321)
x_input_array = st.norm.rvs(2, 1, size=10)
y_input_array = ["this", "is", "a", "string"]
z_input_array = st.norm.rvs(8, 1, size=10)
data = {"one": x_input_array, "two": y_input_array, "three": z_input_array}
output = """
Overall Statistics
------------------
Number of Groups = 2
Total = 20
Grand Mean = 5.1489
Pooled Std Dev = 1.2409
Grand Median = 5.1744
Group Statistics
----------------
n Mean Std Dev Min Median Max Group
--------------------------------------------------------------------------------------------------
10 2.3511 1.3732 0.6591 2.3882 4.2466 one
10 7.9466 1.0927 6.3630 7.9607 9.7260 three """
res = GroupStatistics(data, display=False)
self.assertTrue(res)
self.assertEqual(str(res), output)
self.assertEqual(res.total, 20)
self.assertEqual(res.k, 2)
self.assertAlmostEqual(res.pooled, 1.2409, 4)
self.assertAlmostEqual(res.pooled_std, 1.2409, 4)
self.assertAlmostEqual(res.gmean, 5.1489, 4)
self.assertAlmostEqual(res.grand_mean, 5.1489, 4)
self.assertAlmostEqual(res.gmedian, 5.1744, 4)
self.assertAlmostEqual(res.grand_median, 5.1744, 4)
def test_0007_group_statistics_single_group(self):
"""Test the Group Statistic class with a single group"""
np.random.seed(987654321)
x_input_array = st.norm.rvs(2, 1, size=10)
output = """
Group Statistics
----------------
n Mean Std Dev Min Median Max Group
--------------------------------------------------------------------------------------------------
10 2.3511 1.3732 0.6591 2.3882 4.2466 1 """
res = GroupStatistics(x_input_array, display=False)
self.assertTrue(res)
self.assertEqual(str(res), output)
self.assertEqual(res.total, 10)
self.assertEqual(res.k, 1)
self.assertIsNone(res.pooled)
self.assertIsNone(res.pooled_std)
self.assertIsNone(res.gmean)
self.assertIsNone(res.grand_mean)
self.assertIsNone(res.gmedian)
self.assertIsNone(res.grand_median)
def test_0008_group_statistics_dict_empty(self):
"""Test the Group Statistic class with data passed as empty"""
|
np.random.seed(987654321)
|
numpy.random.seed
|
from flask import Flask, request, jsonify
from flask_cors import CORS
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import base64
app = Flask(__name__)
CORS(app)
@app.route('/graph', methods=['POST'])
def graph():
content = request.json
slope = content["slope"]
y_intercept = content["yIntercept"]
x = get_x(100)
y = get_y(x, slope, y_intercept)
create_fig(x, y)
img = {"image": serialize_fig()}
return jsonify(img)
def get_x(range: int):
return
|
np.arange(range)
|
numpy.arange
|
#!/usr/bin/env python
#
# Tests the basic methods of the adaptive covariance MCMC routine.
#
# This file is part of PINTS.
# Copyright (c) 2017-2019, University of Oxford.
# For licensing information, see the LICENSE file distributed with the PINTS
# software package.
#
import pints
import pints.toy as toy
import unittest
import numpy as np
from shared import StreamCapture
debug = False
class TestAdaptiveCovarianceMCMC(unittest.TestCase):
"""
Tests the basic methods of the adaptive covariance MCMC routine.
"""
@classmethod
def setUpClass(cls):
""" Set up problem for tests. """
# Create toy model
cls.model = toy.LogisticModel()
cls.real_parameters = [0.015, 500]
cls.times = np.linspace(0, 1000, 1000)
cls.values = cls.model.simulate(cls.real_parameters, cls.times)
# Add noise
cls.noise = 10
cls.values += np.random.normal(0, cls.noise, cls.values.shape)
cls.real_parameters.append(cls.noise)
cls.real_parameters =
|
np.array(cls.real_parameters)
|
numpy.array
|
#Copyright (c) Facebook, Inc. and its affiliates.
#This source code is licensed under the MIT license found in the
#LICENSE file in the root directory of this source tree.
import numpy as np
from settings import config
import matplotlib.pyplot as plt
from copy import deepcopy
import copy
import matplotlib.cbook as cbook
import _pickle as cPickle
if config.simulation_method == "power_knobs":
from specs.database_input_powerKnobs import *
elif config.simulation_method == "performance":
from specs.database_input import *
else:
raise NameError("Simulation method unavailable")
# ------------------------------
# Functionality:
# plot moves stats
# ------------------------------
def move_profile_plot(move_lists_):
move_lists = [move_ for move_ in move_lists_ if not(move_.get_metric() == "cost")] # for now filtered cost
move_on_metric_freq = {}
for metric in config.budgetted_metrics:
move_on_metric_freq[metric] = [0]
for move in move_lists:
metric = move.get_metric()
move_on_metric_freq[metric] = [move_on_metric_freq[metric][0] + 1]
labels = ['Metric']
x = np.arange(len(labels)) # the label locations
width = 0.2 # the width of the bars
fig, ax = plt.subplots()
rects1 = ax.bar(x - 1 * (width), move_on_metric_freq["latency"], width, label='perf moves', color="orange")
rects2 = ax.bar(x, move_on_metric_freq["power"], width, label='power moves', color="mediumpurple")
rects3 = ax.bar(x + 1 * width, move_on_metric_freq["area"], width, label='area moves', color="brown")
ax.set_ylabel('frequency', fontsize=15)
ax.set_title('Move frequency', fontsize=15)
# ax.set_ylabel('Sim time (s)', fontsize=25)
# ax.set_title('Sim time across system comoplexity.', fontsize=24)
ax.set_xticks(x)
ax.set_xticklabels(labels, fontsize=4)
ax.legend(prop={'size': 15})
fig.savefig(os.path.join(config.latest_visualization,"move_freq_breakdown.pdf"))
# ------------------------------
# Functionality:
# visualize the sequence of moves made
# Variables:
# des_trail_list: design trail, i.e., the list of designs made in the chronological order
# move_profiles: list of moves made
# des_per_iteration: number of designs tried per iteration (in each iteration, we can be looking at a host of designs
# depending on the depth and breadth of search at the point)
# ------------------------------
def des_trail_plot(des_trail_list, move_profile, des_per_iteration):
metric_bounds = {}
for metric in config.budgetted_metrics:
metric_bounds[metric] = (+10000, -10000)
metric_ref_des_dict = {}
metric_trans_des_dict = {}
for metric in config.budgetted_metrics:
metric_ref_des_dict[metric] = []
metric_trans_des_dict[metric] = []
# contains all the results
res_list = []
for ref_des, transformed_des in des_trail_list:
ref_des_metrics = []
transformed_des_metrics = []
# get the metrics
for metric in config.budgetted_metrics:
#ref_des_metric_value = 100*(1 - ref_des.get_dp_stats().get_system_complex_metric(metric)/ref_des.database.get_budget(metric, "glass"))
if isinstance(ref_des.get_dp_stats().get_system_complex_metric(metric), dict): # must be latency
system_complex_metric = max(list(ref_des.get_dp_stats().get_system_complex_metric(metric).values()))
system_complex_budget = max(list(ref_des.database.get_budget(metric, "glass").values()))
ref_des_metric_value = 100 * (1 - system_complex_metric/system_complex_budget)
trans_des_metric_value = 100 * (1 - system_complex_metric/system_complex_budget) - ref_des_metric_value# need to subtract since the second one needs to be magnitude
else:
ref_des_metric_value = 100*(1 - ref_des.get_dp_stats().get_system_complex_metric(metric)/ref_des.database.get_budget(metric, "glass"))
trans_des_metric_value = \
100*(1 - transformed_des.get_dp_stats().get_system_complex_metric(metric)/
ref_des.database.get_budget(metric, "glass")) - ref_des_metric_value # need to subtract since the second one needs to be magnitude
ref_des_metrics.append(ref_des_metric_value)
transformed_des_metrics.append(trans_des_metric_value)
metric_bounds[metric] = (min(metric_bounds[metric][0], ref_des_metric_value, trans_des_metric_value),
max(metric_bounds[metric][1], ref_des_metric_value, trans_des_metric_value))
metric_ref_des_dict[metric].append(ref_des_metric_value)
metric_trans_des_dict[metric].append((ref_des_metric_value + trans_des_metric_value))
#res_list.append(copy.deepcopy(ref_des_metrics + transformed_des_metrics))
res_list.append(cPickle.loads(cPickle.dumps(ref_des_metrics + transformed_des_metrics, -1)))
# soa = np.array([[0, 0, 0, 1, 3, 1], [1,1,1, 3,3,3]])
soa = np.array(res_list)
des_per_iteration.append(len(res_list))
des_iteration_unflattened = [(des_per_iteration[itr+1]-des_per_iteration[itr])*[itr+1] for itr,el in enumerate(des_per_iteration[:-1])]
des_iteration = [j for sub in des_iteration_unflattened for j in sub]
X, Y, Z, U, V, W = zip(*soa)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
c_ = list(range(0, len(X)))
for id in range(0, len(c_)):
c_[id] = str((1 - c_[id]/(len(X)))/2)
#c_ = (list(range(0, len(X)))* float(1)/len(X))
ax.quiver(X, Y, Z, U, V, W, arrow_length_ratio=.04, color=c_)
ax.scatter(X[-1], Y[-1], Z[-1], c="red")
ax.scatter(0, 0, 0, c="green")
#ax.quiver(X, Y, Z, U, V, W)
ax.set_xlim([-1 + metric_bounds[config.budgetted_metrics[0]][0], 1 + max(metric_bounds[config.budgetted_metrics[0]][1], 1)])
ax.set_ylim([-1 + metric_bounds[config.budgetted_metrics[1]][0], 1 + max(1.1*metric_bounds[config.budgetted_metrics[1]][1], 1)])
ax.set_zlim([-1 + metric_bounds[config.budgetted_metrics[2]][0], 1 + max(1.1*metric_bounds[config.budgetted_metrics[2]][1], 1)])
#ax.set_xlim([0*metric_bounds[config.budgetted_metrics[0]][0], 5*metric_bounds[config.budgetted_metrics[0]][1]])
#ax.set_ylim([0*metric_bounds[config.budgetted_metrics[1]][0], 5*metric_bounds[config.budgetted_metrics[1]][1]])
#ax.set_zlim([0*metric_bounds[config.budgetted_metrics[2]][0], 5*metric_bounds[config.budgetted_metrics[2]][1]])
ax.set_title("normalized distance to budget")
ax.set_xlabel(config.budgetted_metrics[0])
ax.set_ylabel(config.budgetted_metrics[1])
ax.set_zlabel(config.budgetted_metrics[2])
fig.savefig(os.path.join(config.latest_visualization,"DSE_trail.pdf"))
des_iteration_move_markers = {}
des_iteration_move_markers["latency"] = []
des_iteration_move_markers["power"] = []
des_iteration_move_markers["area"] = []
des_iteration_move_markers["energy"] = []
for move in move_profile:
for metric in metric_ref_des_dict.keys() :
if move.get_metric() == metric:
des_iteration_move_markers[metric].append(1)
else:
des_iteration_move_markers[metric].append(2)
if metric == "cost":
print("ok")
# proression per metric
for metric in metric_ref_des_dict.keys():
fig, ax = plt.subplots()
ax.set_title("normalize distance to budget VS iteration")
#blah = des_iteration[des_iteration_move_markers[metric]==1]
#blah3 = metric_ref_des_dict[metric]
#blah2 = blah3[des_iteration_move_markers[metric]==1]
#ax.scatter(des_iteration, metric_ref_des_dict[metric][des_iteration_move_markers[metric]==1], color="red", label="orig des", marker="*")
#ax.scatter(des_iteration[des_iteration_move_markers[metric]==2], metric_ref_des_dict[metric][des_iteration_move_markers[metric] == 2], color="red", label="orig des", marker=".")
#ax.scatter(des_iteration[des_iteration_move_markers[metric] == 1], metric_trans_des_dict[metric][des_iteration_move_markers[metric]==1], color ="green", label="trans des", alpha=.05, marker="*")
#ax.scatter(des_iteration[des_iteration_move_markers[metric] == 2], metric_trans_des_dict[metric][des_iteration_move_markers[metric]==2], color ="green", label="trans des", alpha=.05, marker=".")
blah_ = [np.array(des_iteration_move_markers[metric])==1]
blah = np.array(des_iteration)[np.array(des_iteration_move_markers[metric])==1]
blah2 = np.array(metric_ref_des_dict[metric])[np.array(des_iteration_move_markers[metric])==1]
ax.scatter(np.array(des_iteration)[np.array(des_iteration_move_markers[metric])==1],
|
np.array(metric_ref_des_dict[metric])
|
numpy.array
|
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 12 14:45:32 2017
subscript 1 = R-band (has more entries)
subscript 2 = g-band
@author: stephaniekwan
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes, mark_inset
from matplotlib import gridspec
from matplotlib.ticker import AutoMinorLocator
from astropy.table import Table
# Use LaTeX font
plt.rc({'weight' : 'normal',
'size' : 15})
plt.rc('font', **{'family': 'serif', 'serif': ['Computer Modern']})
#plt.rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
plt.rc('text', usetex = True)
table1 = np.genfromtxt('../optical/PTF_fulltable_phot1.txt', comments = '|',
skip_header = 43, skip_footer = 2)
table2 = np.genfromtxt('../optical/PTF_fulltable_phot2.txt', comments = '|',
skip_header = 43, skip_footer = 2)
flux1, flux2 = table1[:, 7], table2[:, 7]
sigflux1, sigflux2 = table1[:, 8], table2[:, 8]
mjd1, mjd2 = table1[:, 15], table2[:, 15]
snr1, snr2 = table1[:, 9], table2[:, 9]
zp1, zp2 = table1[:, 5], table2[:, 5]
snt = 3
snu = 5
flux_ref1, sigflux_ref1 = 2771.08, 205.304 #from bottom of table
flux_ref2, sigflux_ref2 = 587.622, 46.0016
mag1, mag1sig, mag1date = np.array([]), np.array([]), np.array([])
upperlim1, upperlim1date = np.array([]), np.array([])
for i in range(len(flux1)):
# Section 9: define new sigflux DC
if (sigflux1[i] > sigflux_ref1):
sigflux_DC = np.sqrt(sigflux1[i] ** 2 - sigflux_ref1 ** 2)
else:
sigflux_DC = np.sqrt(sigflux1[i] ** 2 + sigflux_ref1 ** 2)
# Section 9: redefine SNR
newSnr = (flux1[i] + flux_ref1) / sigflux_DC
if (newSnr > snt): # we have a confident detection
mag1 = np.append(mag1,
zp1[i] - 2.5 * np.log10(flux1[i] + flux_ref1))
mag1sig = np.append(mag1sig,
1.0875 * sigflux1[i] / (flux1[i] + flux_ref1))
mag1date = np.append(mag1date, mjd1[i])
else:
# compute upper flux limit and plot as arrow or triangle
upperlim1 = np.append(upperlim1,
zp1[i] - 2.5 * np.log10(snu * sigflux1[i]))
upperlim1date = np.append(upperlim1date, mjd1[i])
toosmall = []
for i in range(0, len(mag1)):
if mag1[i] < 10.0:
toosmall.append(i)
for i in toosmall[::-1]:
mag1 = np.delete(mag1, i)
mag1date = np.delete(mag1date, i)
mag1sig = np.delete(mag1sig, i)
gs = gridspec.GridSpec(2, 2, width_ratios = [5, 1])
ax1 = plt.subplot(gs[0])
ax1 = plt.gca()
ax1.text(-0.07, 1.1, '\\textbf{(a)}', transform=ax1.transAxes,
fontsize = 15, fontweight = 'bold', va = 'top', ha = 'right')
plt.scatter(mag1date, mag1, marker = 'o', s = 2, color = 'black', zorder = 3)
#plt.scatter(upperlim1date, upperlim1, color = 'grey', marker = 'v',
# facecolors = 'grey', s = 15, zorder = 4)
for i in range(0, len(upperlim1)):
ax1.arrow(upperlim1date[i], upperlim1[i],
0.0, 0.3, head_width = 20, head_length = 0.15,
fc = 'grey', ec = 'grey', linestyle = '-')
plt.errorbar(mag1date, mag1, yerr = mag1sig, linestyle = 'None',
color = 'grey', linewidth = 1, zorder = 2)
plt.axhline(y = np.median(mag1), color = 'k', ls = ':')
xlowerlim1, xupperlim1, ylowerlim1, yupperlim1 = 56400, 57800, 17.0, 20.0
ax1.set_xlim([xlowerlim1, xupperlim1])
ax1.set_ylim([ylowerlim1, yupperlim1])
ax1.invert_yaxis()
plt.xlabel('Date (MJD)', fontsize = 14)
plt.ylabel('R Magnitude', fontsize = 14)
minorLocator = AutoMinorLocator()
minorLocator2= AutoMinorLocator()
ax1.xaxis.set_minor_locator(minorLocator)
ax1.yaxis.set_minor_locator(minorLocator2)
# Shaded area to denote uncertainty of median (average of mag1sig)
ax1.add_patch(
patches.Rectangle(
(xlowerlim1, np.median(mag1) - 5 * np.average(mag1sig)), # (x, y)
xupperlim1 - xlowerlim1, # width
10 * np.average(mag1sig), # height
0.0, # angle
facecolor = 'lightgrey',
edgecolor = 'none',
zorder = 1
))
# Add inset
i1, i2 = 4, 12
x1, x2 = mag1date[i1], mag1date[i2]
axins = plt.subplot(gs[1])
axins.set_xlim(x1 + 7, x2 + 10)
axins.set_xticks(np.arange(56480, 56514, 10))
axins.set_ylim(18.35, 18.85)
plt.scatter(mag1date[i1:i2], mag1[i1:i2], color = 'black', marker = 'o',
s = 4, zorder = 3)
plt.errorbar(mag1date[i1:i2], mag1[i1:i2], yerr = mag1sig[i1:i2],
linestyle = 'None', color = 'black', zorder = 2)
plt.axhline(y = np.median(mag1), color = 'k', ls = ':')
axins.invert_yaxis()
minorLocator3 = AutoMinorLocator()
minorLocator4 = AutoMinorLocator()
axins.xaxis.set_minor_locator(minorLocator3)
axins.yaxis.set_minor_locator(minorLocator4)
# Inset: Shaded area to denote uncertainty of median (average of mag1sig)
axins.add_patch(
patches.Rectangle(
(x1 - 10, np.median(mag1) - 5 * np.average(mag1sig)), # (x, y)
xupperlim1 - xlowerlim1, # width
10 * np.average(mag1sig), # height
0.0, # angle
facecolor = 'lightgrey',
edgecolor = 'none',
zorder = 1
))
###########################
## g-band data
###########################
mag2, mag2sig, mag2date = np.array([]), np.array([]), np.array([])
upperlim2, upperlim2date = np.array([]), np.array([])
for i in range(len(flux2)):
# Section 9: define new sigflux DC
if (sigflux2[i] > sigflux_ref2):
sigflux_DC = np.sqrt(sigflux2[i] ** 2 - sigflux_ref2 ** 2)
else:
sigflux_DC = np.sqrt(sigflux2[i] ** 2 + sigflux_ref2 ** 2)
# Section 9: redefine SNR
newSnr = (flux2[i] + flux_ref2) / sigflux_DC
if (newSnr > snt): # we have a confident detection
mag2 = np.append(mag2,
zp2[i] - 2.5 * np.log10(flux2[i] + flux_ref2))
mag2sig = np.append(mag2sig,
1.0875 * sigflux2[i] / (flux2[i] + flux_ref2))
mag2date = np.append(mag2date, mjd2[i])
else:
# compute upper flux limit and plot as arrow or triangle
upperlim2 = np.append(upperlim2,
zp2[i] - 2.5 * np.log10(snu * sigflux2[i]))
upperlim2date =
|
np.append(upperlim2date, mjd1[i])
|
numpy.append
|
#!/usr/bin/env python3
"""CIS 693 - Histogram Processing Homework.
Author: <NAME>
Date: May 22, 2020
Description:
List of Functions:
1. Histogram Processing
"""
import cv2
import numpy as np
from matplotlib import pyplot as plt
def convert_to_grayscale(image):
"""Convert RGB Image to grayscale using RGB weights with dot product.
:param image: Original Image
:type: numpy.ndarray
:return: Grayscale Image
:rtype: numpy.ndarray
"""
rgb_weights = [0.2989, 0.5870, 0.1140]
new_image = np.dot(image[..., :3], rgb_weights)
new_image = new_image.astype(np.uint8)
return new_image
def process_histogram(image, show_plot=True):
"""Generate a normalized histogram of a given grayscale image.
p(r_k) = n_k/M*N for given NxM image
:param image: Original Image
:type: nmpy.ndarray
:param show_plot: True will show Matplotlib plot
:type: bool
:return: hist, bins, pdf
"""
dim_M, dim_N = image.shape
image_max_pixel_size = np.iinfo(image.dtype).max
image_bins, counts = np.unique(image, return_counts=True)
hist = np.zeros(image_max_pixel_size, dtype=np.uint8)
pdf =
|
np.zeros(image_max_pixel_size, dtype=np.double)
|
numpy.zeros
|
import cv2
import imutils
import numpy as np
import pytesseract
img = cv2.imread(r'd:\444.jpg', cv2.IMREAD_COLOR)
#img = cv2.resize(img, (600, 400))
img = cv2.resize(img, (300, 200))
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray = cv2.bilateralFilter(gray, 13, 15, 15)
edged = cv2.Canny(gray, 30, 200)
contours = cv2.findContours(edged.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
contours = imutils.grab_contours(contours)
contours = sorted(contours, key=cv2.contourArea, reverse=True)[:10]
screenCnt = None
for c in contours:
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.018 * peri, True)
if len(approx) == 4:
screenCnt = approx
break
if screenCnt is None:
detected = 0
print("No contour detected")
else:
detected = 1
if detected == 1:
cv2.drawContours(img, [screenCnt], -1, (0, 0, 255), 3)
mask = np.zeros(gray.shape, np.uint8)
new_image = cv2.drawContours(mask, [screenCnt], 0, 255, -1, )
new_image = cv2.bitwise_and(img, img, mask=mask)
(x, y) = np.where(mask == 255)
(topx, topy) = (np.min(x), np.min(y))
(bottomx, bottomy) = (np.max(x),
|
np.max(y)
|
numpy.max
|
from tools.nc_reader import nc_reader
from tools.mpl_beautify import *
from tools.mpl_style import *
from tools.units import *
import matplotlib.colors as cls
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
import numpy as np
import os
import pandas as pd
import seaborn as sns
def _plot_parcels(ax, ncreader, step, coloring, vmin, vmax, draw_cbar=True, **kwargs):
# 19 Feb 2021
# https://stackoverflow.com/questions/43009724/how-can-i-convert-numbers-to-a-color-scale-in-matplotlib
norm = cls.Normalize(vmin=vmin, vmax=vmax)
cmap = kwargs.pop('cmap', plt.cm.viridis_r)
origin = ncreader.get_box_origin()
extent = ncreader.get_box_extent()
ncells = ncreader.get_box_ncells()
dx = extent / ncells
timestamp = kwargs.pop('timestamp', True)
nparcels = kwargs.pop('nparcels', True)
timestamp_xy = kwargs.pop('timestamp_xy', (0.75, 1.05))
timestamp_fmt = kwargs.pop('timestamp_fmt', "%.3f")
nparcels_xy = kwargs.pop('nparcels_xy', (0.01, 1.05))
no_xlabel = kwargs.pop("no_xlabel", False)
no_ylabel = kwargs.pop("no_ylabel", False)
# instantiating the figure object
fkwargs = {k: v for k, v in kwargs.items() if v is not None}
left = fkwargs.get("xmin", origin[0])
right = fkwargs.get("xmax", origin[0] + extent[0])
bottom = fkwargs.get("ymin", origin[1])
top = fkwargs.get("ymax", origin[1] + extent[1])
x_pos = ncreader.get_dataset(step=step, name="x_position")
z_pos = ncreader.get_dataset(step=step, name="z_position")
ind = np.argwhere((x_pos >= left - dx[0]) & (x_pos <= right + dx[0]) &
(z_pos >= bottom - dx[1]) & (z_pos <= top + dx[1]))
ind = ind.squeeze()
pos = None
if coloring == "aspect-ratio":
data = ncreader.get_aspect_ratio(step=step, indices=ind)
elif coloring == "vol-distr":
data = ncreader.get_dataset(step=step, name="volume", indices=ind)
# 5 August 2021
# https://stackoverflow.com/questions/14777066/matplotlib-discrete-colorbar
# https://stackoverflow.com/questions/40601997/setting-discrete-colormap-corresponding-to-specific-data-range-in-matplotlib
cmap = plt.cm.get_cmap("bwr", 2)
bounds = [0, vmin, vmax]
norm = cls.BoundaryNorm(bounds, cmap.N)
else:
data = ncreader.get_dataset(step=step, name=coloring, indices=ind)
ells = ncreader.get_ellipses(step=step, indices=ind)
ax.set_rasterized(True)
ax.add_collection(ells)
ells.set_offset_transform(ax.transData)
ells.set_clip_box(ax.bbox)
ells.set_alpha(1.0)
ells.set_facecolor(cmap(norm(data)))
ax.set_xlim([left, right])
ax.set_ylim([bottom, top])
# 26 May 2021
# https://matplotlib.org/stable/gallery/subplots_axes_and_figures/axis_equal_demo.html
ax.set_aspect("equal", "box")
if timestamp:
add_timestamp(ax, ncreader.get_dataset(step=step, name="t"),
xy=timestamp_xy, fmt=timestamp_fmt)
if nparcels:
add_number_of_parcels(ax, len(data), xy=nparcels_xy)
if draw_cbar:
# 27 May 2021
# https://stackoverflow.com/questions/29516157/set-equal-aspect-in-plot-with-colorbar
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.1)
# fig.add_axes(cax)
sm = plt.cm.ScalarMappable(cmap=cmap, norm=norm)
cbar = plt.colorbar(sm, drawedges=False, ax=ax, cax=cax)
# 19 Feb 2021
# https://stackoverflow.com/questions/15003353/why-does-my-colorbar-have-lines-in-it
cbar.set_alpha(0.75)
cbar.solids.set_edgecolor("face")
cbar.draw_all()
if coloring == "aspect-ratio":
cbar.set_label(r"$1 \leq \lambda \leq \lambda_{max}$")
elif coloring == "vol-distr":
# 5 August 2021
# https://matplotlib.org/stable/gallery/ticks_and_spines/colorbar_tick_labelling_demo.html
cbar.ax.set_yticklabels([r"0", r"$V_{min}$", r"$V_{max}$"])
else:
cbar.set_label(coloring)
if not no_xlabel:
ax.set_xlabel(get_label("$x$", units["position"]))
if not no_ylabel:
ax.set_ylabel(get_label("$y$", units["position"]))
return plt.cm.ScalarMappable(cmap=cmap, norm=norm)
def plot_parcels(
fname, step, figure="save", fmt="png", coloring="aspect-ratio", **kwargs
):
ncreader = nc_reader()
ncreader.open(fname)
if not ncreader.is_parcel_file:
raise IOError("Not a parcel output file.")
nsteps = ncreader.get_num_steps()
if step > nsteps - 1:
raise ValueError("Step number exceeds limit of " + str(nsteps - 1) + ".")
if step < 0:
raise ValueError("Step number cannot be negative.")
if coloring == "aspect-ratio":
vmin = 1.0
vmax = ncreader.get_global_attribute("lambda_max")
elif coloring == "vol-distr":
extent = ncreader.get_box_extent()
ncells = ncreader.get_box_ncells()
vcell = np.prod(extent / ncells)
vmin = vcell / ncreader.get_global_attribute("vmin_fraction")
vmax = vcell / ncreader.get_global_attribute("vmax_fraction")
else:
vmin, vmax = ncreader.get_dataset_min_max(coloring)
plt.figure(num=step)
_plot_parcels(plt.gca(), ncreader, step, coloring, vmin, vmax, **kwargs)
ncreader.close()
if figure == "return":
return plt
elif figure == "save":
plt.savefig(
"parcels_"
+ coloring
+ "_step_"
+ str(step).zfill(len(str(nsteps)))
+ "."
+ fmt,
bbox_inches="tight",
)
else:
plt.tight_layout()
plt.show()
plt.close()
def plot_volume_symmetry_error(fnames, figure="save", fmt="png", **kwargs):
"""
Plot the symmetry error of the gridded volume.
(The gridded symmetry volume is only written in debug mode.)
"""
n = len(fnames)
labels = kwargs.pop("labels", n * [None])
if len(labels) < n:
raise ValueError("Not enough labels provided.")
colors = plt.cm.tab10(np.arange(n).astype(int))
for i, fname in enumerate(fnames):
ncreader = nc_reader()
ncreader.open(fname)
if ncreader.is_parcel_file:
raise IOError("Not a field output file.")
try:
ncreader.get_dataset(0, "max_sym_vol_err")
except:
raise IOError("This plot is only available in debug mode.")
nsteps = ncreader.get_num_steps()
vmax = np.zeros(nsteps)
t = np.zeros(nsteps)
for step in range(nsteps):
vmax[step] = ncreader.get_dataset(step, "max_sym_vol_err")
t[step] = ncreader.get_dataset(step, "t")
ncreader.close()
plt.fill_between(
t,
0,
vmax,
color=colors[i],
label=labels[i],
edgecolor=colors[i],
linewidth=0.75,
)
plt.grid(which="both", linestyle="dashed")
plt.xlabel(get_label("time", units["time"]))
plt.ylabel(r"volume symmetry error")
plt.yscale("log")
# plt.ticklabel_format(axis='y', style='scientific', scilimits=(0, 0))
if not labels[0] is None:
plt.legend(
loc=legend_dict["loc"],
ncol=legend_dict["ncol"],
bbox_to_anchor=legend_dict["bbox_to_anchor"],
)
plt.tight_layout()
if figure == "return":
return plt
elif figure == "save":
prefix = os.path.splitext(fnames[0])[0] + "_"
if n > 1:
prefix = ""
plt.savefig(prefix + "vol_sym_err." + fmt, bbox_inches="tight")
else:
plt.show()
plt.close()
def plot_rms_volume_error(fnames, figure="save", fmt="png", **kwargs):
"""
Plot the gridded r.m.s. volume error.
"""
n = len(fnames)
labels = kwargs.pop("labels", n * [None])
yscale = kwargs.pop("yscale", "linear")
ylim = kwargs.pop("ylim", (None, None))
no_xlabel = kwargs.pop("no_xlabel", False)
beg = kwargs.pop("begin", None)
end = kwargs.pop("end", None)
tight_layout = kwargs.pop('tight_layout', True)
if len(labels) < n:
raise ValueError("Not enough labels provided.")
colors = plt.cm.tab10(np.arange(n).astype(int))
ncreader = nc_reader()
for i, fname in enumerate(fnames):
ncreader.open(fname)
if ncreader.is_parcel_file:
raise IOError("Not a field output file.")
vrms = ncreader.get_diagnostic("rms_v")
t = ncreader.get_diagnostic("t")
ncreader.close()
plt.plot(
t[beg:end], vrms[beg:end], label=labels[i], linewidth=2, color=colors[i]
)
if not no_xlabel:
plt.xlabel(get_label("time", units["time"]))
plt.ylabel(r"r.m.s. area error")
plt.grid(which="both", linestyle="dashed", zorder=-1)
if not labels[0] is None:
plt.legend(
loc=legend_dict["loc"],
ncol=legend_dict["ncol"],
bbox_to_anchor=legend_dict["bbox_to_anchor"],
)
plt.yscale(yscale)
if yscale == "linear":
plt.ticklabel_format(axis="y", style="scientific", scilimits=(0, 0))
plt.ylim(ylim)
if tight_layout:
plt.tight_layout()
if figure == "return":
return plt
elif figure == "save":
plt.savefig("rms_vol_err." + fmt, bbox_inches="tight")
else:
plt.show()
plt.close()
def plot_max_volume_error(fnames, figure="save", fmt="png", **kwargs):
"""
Plot the gridded absolute volume error (normalised with
cell volume).
"""
n = len(fnames)
labels = kwargs.pop("labels", n * [None])
no_xlabel = kwargs.pop("no_xlabel", False)
beg = kwargs.pop("begin", None)
end = kwargs.pop("end", None)
tight_layout = kwargs.pop('tight_layout', True)
if len(labels) < n:
raise ValueError("Not enough labels provided.")
colors = plt.cm.tab10(np.arange(n).astype(int))
ncreader = nc_reader()
for i, fname in enumerate(fnames):
ncreader.open(fname)
if ncreader.is_parcel_file:
raise IOError("Not a field output file.")
vmax = ncreader.get_diagnostic("max absolute normalised volume error")
t = ncreader.get_diagnostic("t")
ncreader.close()
plt.plot(
t[beg:end], vmax[beg:end], label=labels[i], linewidth=2, color=colors[i]
)
plt.ticklabel_format(axis="y", style="scientific", scilimits=(0, 0))
if not no_xlabel:
plt.xlabel(get_label("time", units["time"]))
plt.ylabel(r"max normalised volume error")
plt.grid(linestyle="dashed", zorder=-1)
if not labels[0] is None:
plt.legend(
loc=legend_dict["loc"],
ncol=legend_dict["ncol"],
bbox_to_anchor=legend_dict["bbox_to_anchor"],
)
if tight_layout:
plt.tight_layout()
if figure == "return":
return plt
elif figure == "save":
plt.savefig("max_normalised_vol_err." + fmt, bbox_inches="tight")
else:
plt.show()
plt.close()
def plot_parcel_profile(fnames, figure="save", fmt="png", **kwargs):
"""
Plot the mean and standard deviation of the parcel aspect ratio.
"""
n = len(fnames)
labels = kwargs.pop("labels", n * [None])
dset = kwargs.pop("dset", "aspect-ratio")
no_xlabel = kwargs.pop("no_xlabel", False)
beg = kwargs.pop("begin", None)
end = kwargs.pop("end", None)
tight_layout = kwargs.pop('tight_layout', True)
colors = plt.cm.tab10(np.arange(n).astype(int))
if len(labels) < n:
raise ValueError("Not enough labels provided.")
ncreader = nc_reader()
lmax = 0
for i, fname in enumerate(fnames):
ncreader.open(fname)
if not ncreader.is_parcel_file:
raise IOError("Not a parcel output file.")
nsteps = ncreader.get_num_steps()
data_mean = np.zeros(nsteps)
data_std = np.zeros(nsteps)
t = np.zeros(nsteps)
for step in range(nsteps):
data = None
if dset == "aspect-ratio":
data = ncreader.get_aspect_ratio(step)
else:
data = ncreader.get_dataset(step, dset)
if dset == "volume":
extent = ncreader.get_box_extent()
ncells = ncreader.get_box_ncells()
vcell = np.prod(extent / ncells)
data /= vcell
data_mean[step] = data.mean()
data_std[step] = data.std()
t[step] = ncreader.get_dataset(step, "t")
if dset == "aspect-ratio":
lmax = max(lmax, ncreader.get_global_attribute("lambda_max"))
ncreader.close()
plt.plot(t[beg:end], data_mean[beg:end], label=labels[i], color=colors[i])
plt.fill_between(
t[beg:end],
data_mean[beg:end] - data_std[beg:end],
data_mean[beg:end] + data_std[beg:end],
alpha=0.5,
color=colors[i],
)
print(fname, data_mean.mean(), data_std.mean())
if not no_xlabel:
plt.xlabel(get_label("time", units["time"]))
plt.grid(linestyle="dashed", zorder=-1)
if dset == "aspect-ratio":
plt.ylabel(r"aspect ratio $\lambda$")
plt.text(t[10], 0.92 * lmax, r"$\lambda\le\lambda_{max} = " + str(lmax) + "$")
plt.axhline(lmax, linestyle="dashed", color="black")
elif dset == "volume":
plt.ylabel(r"parcel volume / $V_{g}$")
# plt.axhline(1.0, linestyle='dashed', color='black',
# label=r'cell volume $V_{g}$')
else:
plt.ylabel(r"parcel " + dset)
if not labels[0] is None:
plt.legend(
loc=legend_dict["loc"],
ncol=legend_dict["ncol"],
bbox_to_anchor=legend_dict["bbox_to_anchor"],
)
if tight_layout:
plt.tight_layout()
if figure == "return":
return plt
elif figure == "save":
prefix = os.path.splitext(fnames[0])[0] + "_"
if n > 1:
prefix = ""
plt.savefig(prefix + "parcel_" + dset + "_profile." + fmt, bbox_inches="tight")
else:
plt.show()
plt.close()
def plot_parcel_stats_profile(fnames, figure="save", fmt="png", **kwargs):
"""
Plot parcel statistics
"""
n = len(fnames)
labels = kwargs.pop("labels", n * [None])
dset = kwargs.pop("dset", "aspect-ratio")
no_xlabel = kwargs.pop("no_xlabel", False)
beg = kwargs.pop("begin", None)
end = kwargs.pop("end", None)
if dset == "aspect-ratio":
dset = "aspect ratio"
colors = plt.cm.tab10(np.arange(n).astype(int))
if len(labels) < n:
raise ValueError("Not enough labels provided.")
ncreader = nc_reader()
lmax = 0
for i, fname in enumerate(fnames):
ncreader.open(fname)
if not ncreader.is_parcel_stats_file:
raise IOError("Not a parcel diagnostic output file.")
nsteps = ncreader.get_num_steps()
data_mean = np.zeros(nsteps)
data_std = np.zeros(nsteps)
t = np.zeros(nsteps)
for step in range(nsteps):
t[step] = ncreader.get_dataset(step, "t")
data_mean[step] = ncreader.get_dataset(step, "avg " + dset)
data_std[step] = ncreader.get_dataset(step, "std " + dset)
if dset == "aspect ratio":
lmax = max(lmax, ncreader.get_global_attribute("lambda_max"))
ncreader.close()
plt.plot(t[beg:end], data_mean[beg:end], label=labels[i], color=colors[i])
plt.fill_between(
t[beg:end],
data_mean[beg:end] - data_std[beg:end],
data_mean[beg:end] + data_std[beg:end],
alpha=0.5,
color=colors[i],
)
if not no_xlabel:
plt.xlabel(get_label("time", units["time"]))
plt.grid(linestyle="dashed", zorder=-1)
if dset == "aspect-ratio":
plt.ylabel(r"aspect ratio $\lambda$")
plt.text(t[10], 0.95 * lmax, r"$\lambda\le\lambda_{max} = " + str(lmax) + "$")
plt.axhline(lmax, linestyle="dashed", color="black")
elif dset == "volume":
plt.ylabel(r"parcel volume / $V_{g}$")
# plt.axhline(1.0, linestyle='dashed', color='black',
# label=r'cell volume $V_{g}$')
else:
plt.ylabel(r"parcel " + dset)
if not labels[0] is None:
plt.legend(
loc=legend_dict["loc"],
ncol=legend_dict["ncol"],
bbox_to_anchor=legend_dict["bbox_to_anchor"],
)
plt.tight_layout()
if figure == "return":
return plt
elif figure == "save":
prefix = os.path.splitext(fnames[0])[0] + "_"
if n > 1:
prefix = ""
dset = dset.replace(" ", "_")
plt.savefig(prefix + "parcel_" + dset + "_profile." + fmt, bbox_inches="tight")
else:
plt.show()
plt.close()
def plot_parcel_number(fnames, figure="save", fmt="png", **kwargs):
"""
Plot the number of parcels in simulation.
"""
labels = kwargs.pop("labels", None)
no_xlabel = kwargs.pop("no_xlabel", False)
beg = kwargs.pop("begin", None)
end = kwargs.pop("end", None)
tight_layout = kwargs.pop('tight_layout', True)
if labels is None:
labels = [None] * len(fnames)
for i, fname in enumerate(fnames):
ncreader = nc_reader()
ncreader.open(fname)
if not ncreader.is_parcel_file:
raise IOError("Not a parcel output file.")
nsteps = ncreader.get_num_steps()
nparcels = np.zeros(nsteps)
t = np.zeros(nsteps)
for step in range(nsteps):
nparcels[step] = ncreader.get_num_parcels(step)
t[step] = ncreader.get_dataset(step, "t")
ncreader.close()
plt.plot(t[beg:end], nparcels[beg:end], label=labels[i])
plt.grid(linestyle="dashed", zorder=-1)
if not labels[0] is None:
plt.legend(
loc=legend_dict["loc"],
ncol=min(len(labels), legend_dict["ncol"]),
bbox_to_anchor=legend_dict["bbox_to_anchor"],
)
if not no_xlabel:
plt.xlabel(get_label("time", units["time"]))
plt.ylabel(r"parcel count")
if tight_layout:
plt.tight_layout()
if figure == "return":
return plt
elif figure == "save":
plt.savefig("parcel_number_profile." + fmt, bbox_inches="tight")
else:
plt.show()
plt.close()
def plot_small_parcel_number(fnames, figure="save", fmt="png", **kwargs):
"""
Plot the number of small parcels in simulation.
"""
labels = kwargs.pop("labels", None)
no_xlabel = kwargs.pop("no_xlabel", False)
beg = kwargs.pop("begin", None)
end = kwargs.pop("end", None)
tight_layout = kwargs.pop('tight_layout', True)
if labels is None:
labels = [None] * len(fnames)
for i, fname in enumerate(fnames):
ncreader = nc_reader()
ncreader.open(fname)
if not ncreader.is_parcel_stats_file:
raise IOError("Not a parcel diagnostic output file.")
nsteps = ncreader.get_num_steps()
nparcels = np.zeros(nsteps)
nsmall = np.zeros(nsteps)
t = np.zeros(nsteps)
for step in range(nsteps):
nparcels[step] = ncreader.get_dataset(step, "n_parcels")
nsmall[step] = ncreader.get_dataset(step, "n_small_parcel")
t[step] = ncreader.get_dataset(step, "t")
ncreader.close()
plt.plot(
t[beg:end], nsmall[beg:end] / nparcels[beg:end] * 100.0, label=labels[i]
)
plt.grid(linestyle="dashed", zorder=-1)
if not labels[0] is None:
plt.legend(
loc=legend_dict["loc"],
ncol=min(len(labels), legend_dict["ncol"]),
bbox_to_anchor=legend_dict["bbox_to_anchor"],
)
if not no_xlabel:
plt.xlabel(get_label("time", units["time"]))
plt.ylabel(r"small parcel fraction (\%)")
if tight_layout:
plt.tight_layout()
if figure == "return":
return plt
elif figure == "save":
plt.savefig("parcel_small_number_profile." + fmt, bbox_inches="tight")
else:
plt.show()
plt.close()
def plot_center_of_mass(fnames, figure="save", fmt="png", dset="buoyancy", **kwargs):
tag = None
if dset == "buoyancy":
tag = "b"
tag_name = "b"
elif dset == "vorticity":
tag = "w"
tag_name = "\zeta"
else:
raise ValueError("Dataset must be 'buoyancy' or 'vorticity'.")
labels = kwargs.pop("labels", None)
variance = kwargs.pop("variance", False)
beg = kwargs.pop("begin", None)
end = kwargs.pop("end", None)
n = len(fnames)
if labels is None:
labels = [None] * n
colors = plt.cm.tab10(np.arange(n).astype(int))
ncreader = nc_reader()
fig1 = plt.figure(num=1)
ax1 = fig1.gca()
fig2 = plt.figure(num=2)
ax2 = fig2.gca()
for i, fname in enumerate(fnames):
ncreader.open(fname)
if not ncreader.is_parcel_stats_file:
raise IOError("Not a parcel diagnostic output file.")
nsteps = ncreader.get_num_steps()
t = np.zeros(nsteps)
xb_bar = np.zeros(nsteps)
zb_bar = np.zeros(nsteps)
x2b_bar = np.zeros(nsteps)
z2b_bar =
|
np.zeros(nsteps)
|
numpy.zeros
|
# This is the code for experiments performed on the Eitz Sketches dataset for the DeLiGAN model. Minor adjustments
# in the code as suggested in the comments can be done to test GAN. Corresponding details about these experiments
# can be found in section 5.5 of the paper and the results showing the outputs can be seen in Fig 6 and Table 2,3.
import argparse
import cPickle
import time
import numpy as np
import theano as th
import theano.tensor as T
from theano.sandbox.rng_mrg import MRG_RandomStreams
import lasagne
import lasagne.layers as ll
from lasagne.init import Normal
from lasagne.layers import dnn
import nn
import sys
import plotting
import input_data_gan
# settings
parser = argparse.ArgumentParser()
parser.add_argument('--seed', default=1)
parser.add_argument('--batch_size', default=100)
parser.add_argument('--unlabeled_weight', type=float, default=1.)
parser.add_argument('--learning_rate', type=float, default=0.0001)
parser.add_argument('--data_dir', type=str, default='../datasets/sketches/')
parser.add_argument('--results_dir', type=str, default='../results/sketches/')
parser.add_argument('--count', type=int, default=400)
args = parser.parse_args()
gen_dim = 40
disc_dim = 20
print(args)
# fixed random seeds
rng =
|
np.random.RandomState(args.seed)
|
numpy.random.RandomState
|
import tensorflow as tf
import numpy as np
from typing import Tuple, Callable
from spiking.helpers import spike_function
class IntegratorNeuronCell(tf.keras.layers.Layer):
"""
A simple spiking neuron layer that integrates (sums up) the outputs of the previous layer.
"""
def __init__(self, n_in, n_neurons, **kwargs):
"""
Initialization function of the IntegratorNeuronCell.
@param n_in: Number of inputs, i.e. outputs of previous layer.
@param n_neurons: Number of neurons, i.e. outputs of this layer.
@param kwargs: Additional parameters, forwarded to standard Layer init function of tf.
"""
super(IntegratorNeuronCell, self).__init__(**kwargs)
self.n_in = n_in
self.n_neurons = n_neurons
self.w_in = None
self.b_in = None
def build(self, input_shape):
"""
Creates the variables of this layer, i.e. creates and initializes the weights
for all neurons within this layer.
@param input_shape: Not needed for this layer.
@type input_shape:
"""
del input_shape # Unused
w_in = tf.random.normal((self.n_in, self.n_neurons), dtype=self.dtype)
self.w_in = tf.Variable(initial_value=w_in / np.sqrt(self.n_in), trainable=True)
b_in = tf.random.normal((self.n_neurons,), dtype=self.dtype)
# TODO why "/ np.sqrt"? Should not matter as we don't train
self.b_in = tf.Variable(initial_value=b_in / np.sqrt(self.n_in), trainable=True)
@property
def state_size(self) -> Tuple[int, int, int]:
"""
Returns the state size depicted of cell and hidden state as a tuple of number of neurons, number of neurons.
@return:
"""
return self.n_neurons, self.n_neurons, 1
def get_initial_state(self, inputs=None, batch_size=None, dtype=None):
"""
@param inputs:
@param batch_size:
@param dtype:
@return:
"""
del inputs # Unused
zeros = tf.zeros((batch_size, self.n_neurons), dtype=dtype)
return zeros, zeros, 0.
def call(self, input_at_t, states_at_t):
"""
@param input_at_t:
@param states_at_t:
@return:
"""
old_v, old_z, t = states_at_t
# TODO this is never used, everything but dynthresh uses rate integrator, has to be changed for clarity!
u_t = tf.add(tf.matmul(tf.subtract(t, input_at_t), self.w_in), tf.multiply(self.b_in, t))
new_v = old_v + u_t
new_z = tf.nn.softmax(new_v)
return new_z, (new_v, new_z, t + 1)
class LifNeuronCell(IntegratorNeuronCell):
"""
A LifNeuron that uses time-to-first-spike (ttfs) encoding.
Be aware that this implementation does not resemble the way ttfs normally works.
Normally, ttfs would output one spike only, in this implementation, ttfs outputs spikes at every timestep after
it has spiked for the first time.
The reason for that is the way the neuron potential for ttfs in the Rueackauer 2018 paper is defined. They sum over
all timesteps and multiply the weight with the time since the first spike.
Here, for every timestep, the weight is added to the potential of the previous timestep, hence the continuous
spiking after the first spike.
"""
def __init__(self, n_in: int, n_neurons: int, tau: float = 999999., threshold: float = 0.1,
activation_function: Callable[[tf.Tensor], tuple] = spike_function, **kwargs):
"""
Initializes a (Recurrent)LifNeuronCell.
@param n_in: Number of inputs, i.e. outputs of previous layer.
@param n_neurons: Number of neurons, i.e. outputs of this layer.
@param tau: The time constant tau.
@param threshold: The threshold for the neurons in this layer.
@param activation_function: The activation function for the LIF-Neuron, defaults to a simple spike-function.
@param kwargs: Additional parameters, forwarded to standard Layer init function of tf.
"""
super(LifNeuronCell, self).__init__(n_in, n_neurons, **kwargs)
self.threshold = threshold
self.alpha = tf.exp(-1 / tau)
self.activation_function = activation_function
@property
def state_size(self) -> Tuple[int, int]:
"""
Returns the state size depicted of cell and hidden state as a tuple of number of neurons, number of neurons.
@return:
"""
return self.n_neurons, self.n_neurons
def get_initial_state(self, inputs=None, batch_size=None, dtype=None):
"""
@param inputs:
@param batch_size:
@param dtype:
@return:
"""
del inputs # Unused
zeros = tf.zeros((batch_size, self.n_neurons), dtype=dtype)
return zeros, zeros
def call(self, input_at_t, states_at_t):
old_v, old_z = states_at_t
# membrane potential that will be added this timestep
# input * weights + bias
i_t = tf.add(tf.matmul(input_at_t, self.w_in), self.b_in)
# add new potential of this timestep to the old potential
new_v = self.alpha * old_v + i_t
# new_z -> output_at_t, tf.maximum makes output stay at 1 when the first spike was emitted
# this is due to the way the Rueckauer TTFS math is translated to TensorFlow (TODO more to come)
new_z = tf.maximum(self.activation_function(new_v / self.threshold), old_z)
return new_z, (new_v, new_z)
class LifNeuronCellConv2D(IntegratorNeuronCell):
"""
A LifNeuron that uses time-to-first-spike (ttfs) encoding.
Be aware that this implementation does not resemble the way ttfs normally works.
Normally, ttfs would output one spike only, in this implementation, ttfs outputs spikes at every timestep after
it has spiked for the first time.
The reason for that is the way the neuron potential for ttfs in the Rueackauer 2018 paper is defined. They sum over
all timesteps and multiply the weight with the time since the first spike.
Here, for every timestep, the weight is added to the potential of the previous timestep, hence the continuous
spiking after the first spike.
This class implements ttfs with convolutions.
"""
def __init__(self, input_shape: int, output_shape: int, tau: float = 999999., threshold: float = 0.1,
activation_function: Callable[[tf.Tensor], tuple] = spike_function,
kernel_size: (int, int) = (3, 3), filters: int = 3, strides: (int, int) = (1, 1),
padding: str = "VALID", data_format: str = "NHWC", dilations: int or list = 1, **kwargs):
"""
Initializes a (Recurrent)LifNeuronCell.
@param n_in: Number of inputs, i.e. outputs of previous layer.
@param n_neurons: Number of neurons, i.e. outputs of this layer.
@param tau: The time constant tau.
@param threshold: The threshold for the neurons in this layer.
@param activation_function: The activation function for the LIF-Neuron, defaults to a simple spike-function.
@param kwargs: Additional parameters, forwarded to standard Layer init function of tf.
"""
input_shape_np = np.array(input_shape)
output_shape_np = np.array(output_shape)
n_in = np.prod(input_shape_np[input_shape_np != np.array(None)])
n_neurons = np.prod(output_shape_np[output_shape_np != np.array(None)])
super(LifNeuronCellConv2D, self).__init__(n_in, n_neurons, **kwargs)
self.conv_input_shape = input_shape_np
self.conv_output_shape = output_shape_np
self.threshold = threshold
self.alpha = tf.exp(-1 / tau)
self.kernel_size = kernel_size
self.filters = filters
self.strides = strides
self.padding = padding.upper()
self.activation_function = activation_function
self.data_format = "NHWC" if data_format == "channels_last" else "NCHW"
self.dilations = dilations
def build(self, input_shape):
"""
Creates the variables of this layer, i.e. creates and initializes the weights
for all neurons within this layer.
@param input_shape: Not needed for this layer.
@type input_shape:
"""
del input_shape # Unused
# kernel, kernel, in_depth, out_depth
w_in = tf.random.normal((self.kernel_size[0],
self.kernel_size[1],
self.conv_input_shape[3],
self.filters), dtype=self.dtype)
self.w_in = tf.Variable(initial_value=w_in / np.sqrt(self.n_in), trainable=True)
b_in = tf.random.normal((self.filters,), dtype=self.dtype)
self.b_in = tf.Variable(initial_value=b_in /
|
np.sqrt(self.n_in)
|
numpy.sqrt
|
from collections import OrderedDict
from functools import partial
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import sparse as sp
import cPickle as pkl
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.mixture import GaussianMixture
from tqdm import tqdm, trange
# TODO: add support for sparse matrices
class BaseMF:
__metaclass__ = ABCMeta
def __init__(self, n_factors, learning_rate=0.01, reg=0.01,
max_iter=10, init=0.001, bias=True, verbose=False,
*args, **kwargs):
"""
beta: balance of rec over side
"""
self.lr = learning_rate
self.reg = reg # regularization coeff
self.k = n_factors
self.bias = bias
self.init = init
self.max_iter = max_iter
self.costs = []
self.verbose = verbose
# dummy variable
self.R, self.X = None, None
self.U, self.V, self.W = None, None, None
self.b_u, self.b_i, self.b_w = None, None, None
@property
def n_users(self):
""""""
return self.R.shape[0]
@property
def n_items(self):
""""""
return self.R.shape[1]
def _init_params(self, R, X=None):
""""""
if X is not None:
self.X = X
self.W = np.random.rand(X.shape[1], self.k) * self.init
if self.bias:
self.b_w = np.zeros((self.k,))
# currently only supports dense mat ops.
if sp.isspmatrix(R):
# self.R = R.tocsr()
self.R = R.toarray()
else:
if isinstance(R, np.ndarray):
self.R = R
else:
raise ValueError(
'[ERROR] input need to be sparse or dense matrix!')
# factors for outland
self.U = np.zeros((self.R.shape[0], self.k))
self.V = np.zeros((self.R.shape[1], self.k))
if self.bias:
self.b_u = np.zeros((self.R.shape[0],))
self.b_i = np.zeros((self.R.shape[1],))
# checkup zero entry row/col and make hahs for post processing
self.row_hash = OrderedDict([(ix, j) for j, ix in
enumerate(np.where(self.R.sum(axis=1) > 0)[0])])
self.col_hash = OrderedDict([(ix, j) for j, ix in
enumerate(np.where(self.R.sum(axis=0) > 0)[0])])
# get squached data
self.R_ = self.R[self.row_hash.keys()][:, self.col_hash.keys()]
if X is not None: self.X_ = self.X[self.col_hash.keys()]
# squashed factors for inland
self.U_ = np.random.rand(self.R_.shape[0], self.k) * self.init
self.V_ =
|
np.random.rand(self.R_.shape[1], self.k)
|
numpy.random.rand
|
#!/usr/bin/env python3
#
# Copyright (c) 2016 MagicStack Inc.
# All rights reserved.
#
# See LICENSE for details.
##
import argparse
import asyncio
from concurrent import futures
import csv
import io
import json
import re
import sys
import time
import numpy as np
import uvloop
import aiopg
import asyncpg
import postgresql
import psycopg2
import psycopg2.extras
def psycopg_connect(args):
conn = psycopg2.connect(user=args.pguser, host=args.pghost,
port=args.pgport)
return conn
def psycopg_execute(conn, query, args):
cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
cur.execute(query, args)
return len(cur.fetchall())
def psycopg_copy(conn, query, args):
rows, copy = args[:2]
f = io.StringIO()
writer = csv.writer(f, delimiter='\t')
for row in rows:
writer.writerow(row)
f.seek(0)
cur = conn.cursor()
cur.copy_from(f, copy['table'], columns=copy['columns'])
conn.commit()
return cur.rowcount
def pypostgresql_connect(args):
conn = postgresql.open(user=args.pguser, host=args.pghost,
port=args.pgport)
return conn
def pypostgresql_execute(conn, query, args):
stmt = conn.prepare(query)
return len(list(stmt.rows(*args)))
async def aiopg_connect(args):
conn = await aiopg.connect(user=args.pguser, host=args.pghost,
port=args.pgport)
return conn
async def aiopg_execute(conn, query, args):
cur = await conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
await cur.execute(query, args)
return len(await cur.fetchall())
aiopg_tuples_connect = aiopg_connect
async def aiopg_tuples_execute(conn, query, args):
cur = await conn.cursor()
await cur.execute(query, args)
return len(await cur.fetchall())
async def asyncpg_connect(args):
conn = await asyncpg.connect(user=args.pguser, host=args.pghost,
port=args.pgport)
return conn
async def asyncpg_execute(conn, query, args):
return len(await conn.fetch(query, *args))
async def asyncpg_copy(conn, query, args):
rows, copy = args[:2]
result = await conn.copy_records_to_table(
copy['table'], columns=copy['columns'], records=rows)
cmd, _, count = result.rpartition(' ')
return int(count)
async def worker(executor, eargs, start, duration, timeout):
queries = 0
rows = 0
latency_stats = np.zeros((timeout * 100,))
min_latency = float('inf')
max_latency = 0.0
while time.monotonic() - start < duration:
req_start = time.monotonic()
rows += await executor(*eargs)
req_time = round((time.monotonic() - req_start) * 1000 * 100)
if req_time > max_latency:
max_latency = req_time
if req_time < min_latency:
min_latency = req_time
latency_stats[req_time] += 1
queries += 1
return queries, rows, latency_stats, min_latency, max_latency
def sync_worker(executor, eargs, start, duration, timeout):
queries = 0
rows = 0
latency_stats =
|
np.zeros((timeout * 100,))
|
numpy.zeros
|
##
# This class represents a node within the network
#
import sys
import time
import warnings
from collections import deque
import numpy as np
import tensorflow as tf
from dgp_aepmcm.layers.gp_layer import GPLayer
from dgp_aepmcm.layers.input_layer import InputLayer
from dgp_aepmcm.layers.noise_layer import NoiseLayer
from dgp_aepmcm.layers.output_layer_classification import OutputLayerClassification
from dgp_aepmcm.layers.output_layer_regression import OutputLayerRegression
from .utils import (
ProblemType,
calculate_ETA_str,
extend_dimension_if_1d,
memory_used,
valid_q_initializations,
)
class DGPNetwork:
"""Creates a new Deep GP network using Approximate Expectation propagation and Monte Carlo Methods
Args:
x_train (ndarray): Training points (X)
y_train (ndarray): Training targets (y)
inducing_points (ndarray): If not None, initializations for the inducing points (Z) of the GP nodes
share_z_within_layer (Boolean): If True all the nodes in the GP same layer share
the same inducing points
share_kernel_params_within_layer (Boolean): If True all the nodes in the same GP layer
share the same kernel parameters but still using ARD kernel.
n_samples_training (int): Number of samples to use when training
n_samples_prediction (int): Number of samples to use when predicting
show_debug_info (Boolean): Show Epoch information when training
sacred_exp (): _run variable of sacred experiment information,
see: http://sacred.readthedocs.io/en/latest/collected_information.html
seed (int): Seed to use in random number generation functions
jitter (float): Jitter level to add to the diagonal of Kxx, bigger jitters improve numerical stability
minibatch_size (int): Minibatch size to use when initializing, training and predicting.
Smaller minibatches makes the training use less memory.
dtype (type): Type to use for inputs (X) of the network. Either np.float32/np.float64.
float64 will make the network more stable but slower.
"""
def __init__(
self,
x_train,
y_train,
inducing_points=None,
share_z_within_layer=False,
share_kernel_params_within_layer=False,
n_samples_training=20,
n_samples_prediction=100,
show_debug_info=True,
sacred_exp=None,
seed=None,
jitter=1e-5,
minibatch_size=100,
dtype=np.float32,
):
# Sometimes the Tensorflow graph is not deleted when the class is destroyed.
tf.reset_default_graph()
self.seed = seed
self.dtype = dtype
if seed is not None:
print(f"Random seed set: {seed}")
tf.set_random_seed(seed)
np.random.seed(seed)
self.x_train = x_train
self.y_train = y_train
self.inducing_points = inducing_points
self.share_z = share_z_within_layer
self.share_kernel_params = share_kernel_params_within_layer
self.show_debug_info = show_debug_info
# To store sacred experiments data (_run dictionary).
# More info: https://sacred.readthedocs.io/en/latest/collected_information.html
self.sacred_exp = sacred_exp
self.x_train = extend_dimension_if_1d(self.x_train)
self.y_train = extend_dimension_if_1d(self.y_train)
self.n_points = self.x_train.shape[0]
self.problem_dim = self.x_train.shape[1]
# Minibatch size to use in the network and reduce memory usage.
self.minibatch_size = min(self.n_points, minibatch_size)
# Three possible values, regression, bin_classification, multi_classification
self.problem_type = None
self.jitter = jitter
if self.inducing_points is not None:
self.inducing_points = extend_dimension_if_1d(self.inducing_points)
assert (
self.inducing_points.shape[1] == self.x_train.shape[1]
), "The inducing points dimensions must be the same as the X dimensions"
self.inducing_points = self.inducing_points.astype(self.dtype)
self.z_running_tf = self.inducing_points
self.x_tf = tf.placeholder(
self.dtype, name="x_input", shape=[None, self.x_train.shape[1]]
)
# If targets are integer -> classification problem
# If targets are -1 and 1 -> binary classification
# If targets have values from 0, 1, 2,.. n_classes - 1 -> multiclass classification
if np.sum(np.mod(self.y_train, 1)) == 0:
# There is no decimal in y training , we are probably in a classification problem
self.y_train = self.y_train.astype(np.int32)
if np.issubdtype(self.y_train.dtype, np.integer):
self.n_classes = np.max(self.y_train) + 1
# self.n_classes = len(np.unique(self.y_train)) # This one works even if the classes start at 1
y_type = tf.int32
if self.show_debug_info:
print(
f"Creating DGP network for classification problem with {self.n_classes} classes"
)
if self.n_classes == 2:
self.problem_type = ProblemType.BINARY_CLASSIFICATION
else:
self.problem_type = ProblemType.MULTICLASS_CLASSIFICATION
else:
if self.show_debug_info:
print(f"Creating DGP network for regression problem")
self.problem_type = ProblemType.REGRESSION
y_type = self.dtype
# TODO: merge this two placeholders into one. As in x_tf
self.y_train_tf = tf.placeholder(
y_type, name="y_training", shape=[None, self.y_train.shape[1]]
)
self.y_test_tf = tf.placeholder(
y_type, name="y_training", shape=[None, self.y_train.shape[1]]
)
self.y_train_mean_tf = None
self.y_train_std_tf = None
self.layers = []
self.initialized = False
self._predict_function = None
self.session_saved = False
self.n_samples_dict = {
"training": n_samples_training, # num samples for training
"prediction": n_samples_prediction, # num samples for prediction
}
# Placeholder for the status of the network.
# 1 -> Training 0 -> Prediction
# Tells the network the right number of samples to use (training or prediction)
# and uses either the cavity (training) or the posterior (prediction) in the GP node
self.network_set_for_training_tf = tf.placeholder(
self.dtype, shape=(), name="network_set_for_training"
)
self.x_running_tf = tf.cast(self.x_train, self.dtype)
self.sess = tf.Session()
self.saver = None
self.objective_energy_function = None
self.trainable_params = None
self.gradient_optimization_step = None
def add_input_layer(self):
"""Adds an input layer to the network.
The input layer is in charge of replicating the x_train of shape (N,D) to shape (S,N,D)
"""
assert not self.layers, "Network should be empty"
with tf.variable_scope("Input_layer"):
new_layer = InputLayer(
self.x_tf,
self.problem_dim,
self.n_samples_dict,
self.network_set_for_training_tf,
)
self._stack_new_layer(new_layer)
def add_noise_layer(self, noise_initial_value=0.01):
"""Adds noise to the variance of the output of the layer
Args:
noise_initial_value (float): Initial value for the noise
"""
assert self.layers, "Network should have an input node"
# TODO: Reduce default noise?
new_layer = NoiseLayer(self.dtype, noise_initial_value)
self._stack_new_layer(new_layer, self.layers[-1])
def add_gp_layer(
self, n_inducing_points, n_nodes=1, q_initializations="random", W=None
):
"""Adds a Gaussian processes layer
Args:
n_inducing_points (int): Number of inducing points (Z)
n_nodes (int): Number of GP nodes of the layer, the number of nodes will be the output dim. of the layer
q_initializations (str): Initializations of the posterior approximation q(u) params. Valid values are:
'random' (default): Mean and covariance initialized from random normal.
'deterministic': Mean initialized to mean of the prior p(u) and cov. to 1e-5 * Kzz (1e-5 * prior cov)
'prior': Mean and cov. initialized to the prior covariance.
W (ndarray): Mean function weights of the GP m(x) = XW, if None, the identity matrix will be used
"""
if q_initializations not in valid_q_initializations():
raise ValueError(
f"initializations should take a value from {valid_q_initializations()}"
)
assert self.layers, "Network should have an input node"
with tf.variable_scope(f"Layer_{len(self.layers)}_GP"):
is_first_layer = len(self.layers) == 1
# The dim of the layer is the number of nodes in the last one
input_dim_layer = self.layers[-1].n_nodes
output_dim_layer = n_nodes
Z = None
if self.inducing_points is not None:
Z = tf.identity(self.z_running_tf)
# set mean function weights of the layer.
# W should have the same dimension [1] as the number of nodes in the layer
if W is None:
W = self._linear_mean_function(input_dim_layer, output_dim_layer)
else:
W = tf.cast(W, self.dtype)
self.x_running_tf = tf.matmul(self.x_running_tf, W)
if self.inducing_points is not None:
self.z_running_tf = self.z_running_tf @ W
assert W.shape == [input_dim_layer, output_dim_layer], (
f"The given mean weights must be of shape [input_d({input_dim_layer}), output_d({output_dim_layer})], "
f"Given: {W.shape}"
)
new_layer = GPLayer(
W=W,
n_inducing_points=n_inducing_points,
n_points=self.n_points,
n_nodes=output_dim_layer,
input_d=input_dim_layer,
first_gp_layer=is_first_layer,
jitter=self.jitter,
share_z=self.share_z,
share_kernel_params=self.share_kernel_params,
q_initializations=q_initializations,
z_initializations=Z,
seed=self.seed,
dtype=self.dtype,
)
self._stack_new_layer(new_layer, self.layers[-1])
def _linear_mean_function(self, input_dim_layer, output_dim_layer):
""" Sets the W for the mean function m(X) = XW
The last GP layer will have m(X) = 0. This method is based on:
Doubly Stochastic Variational Inference for Deep Gaussian Processes https://arxiv.org/abs/1705.08933
Args:
input_dim_layer (int): Input dimension to the layer. (Number of nodes in the last layer)
output_dim_layer (int): Dimension of the layer. (Number of nodes in the layer)
"""
if input_dim_layer == output_dim_layer:
W = tf.eye(output_dim_layer, dtype=self.dtype)
elif output_dim_layer > input_dim_layer:
zeros = tf.zeros(
(input_dim_layer, output_dim_layer - input_dim_layer), dtype=self.dtype
)
W = tf.concat([tf.eye(input_dim_layer, dtype=self.dtype), zeros], 1)
self.x_running_tf = tf.matmul(self.x_running_tf, W)
if self.inducing_points is not None:
self.z_running_tf = self.z_running_tf @ W
elif output_dim_layer < input_dim_layer:
_, _, V = tf.svd(self.x_running_tf)
# Using the first output_dim_layer values of the input X
W = tf.transpose(V[:output_dim_layer, :])
self.x_running_tf = tf.matmul(self.x_running_tf, W)
if self.inducing_points is not None:
self.z_running_tf = self.z_running_tf @ W
return W
def _set_mean_function_last_layer(self):
# Set the mean funcion of the last GP layer to Zero
for layer in reversed(self.layers):
if isinstance(layer, GPLayer):
for node in layer.get_node_list():
node.W = tf.zeros_like(node.W)
return
def _stack_new_layer(self, new_layer, previous_layer=None):
# Previous layer should be None only when adding the input layer
if previous_layer is not None:
new_layer.stack_on_previous_layer(previous_layer)
self.layers.append(new_layer)
def add_output_layer_regression(self):
""" Add an output layer for regression to the network
This mean that a Gaussian Likelihood is used.
"""
assert self.layers, "Network should have an input node"
self._require_normalized_y()
with tf.variable_scope(f"Layer_{len(self.layers)}_Out"):
new_layer = OutputLayerRegression(
self.y_train_tf,
self.y_test_tf,
self.y_train_mean_tf,
self.y_train_std_tf,
self.n_samples_dict,
self.dtype,
)
new_layer.stack_on_previous_layer(self.layers[-1])
self.layers.append(new_layer)
def add_output_layer_binary_classification(self, use_norm_cdf=False):
"""Adds an output layer for binary classification to the network.
Args:
use_norm_cdf (Boolean): Add bias term (+1) to the variance of f^L (+0 if False).
if use_norm_cdf == True then likelihood p(y | f^L) will be norm.cdf(y_train * f^L)
if use_norm_cdf == False then likelihood p(y | f^L) will be heavyside(y_train * f^L)
"""
self._add_output_layer_classification(use_norm_cdf=use_norm_cdf)
def add_output_layer_multiclass_classification(
self, noise_in_labels=False, noise_in_labels_trainable=True
):
"""Adds an output layer for multiclass classification to the network.
Args:
noise_in_labels (Boolean): If true the likelihood will take into account
that there may be wrong labeled examples.
Using a robust multiclass likelihood (as in GPflow when using Multiclass likelihood).
noise_in_labels_trainable (Boolean): Specifies if the noise in labels is a trainable parameter.
Note: For fair comparison with DGP-VI it should be set to False,
for other tasks it should be set to True as it makes the network more robust.
This parameter is ignored is noise_in_labels=False.
"""
self._add_output_layer_classification(
noise_in_labels=noise_in_labels,
noise_in_labels_trainable=noise_in_labels_trainable,
)
def _add_output_layer_classification(
self, *, use_norm_cdf=False, noise_in_labels=False, noise_in_labels_trainable=True
):
""" Private function. Refer to either:
add_output_layer_binary_classification()
add_output_layer_multiclass_classification()
"""
assert self.layers, "Network should have an input node"
variance_bias = (
tf.constant(1.0, dtype=self.dtype)
if use_norm_cdf
else tf.constant(0.0, dtype=self.dtype)
)
with tf.variable_scope("Layer_{}_Out".format(len(self.layers))):
new_layer = OutputLayerClassification(
self.y_train_tf,
self.y_test_tf,
self.n_samples_dict,
self.n_classes,
variance_bias,
noise_in_labels,
noise_in_labels_trainable,
self.dtype,
)
new_layer.stack_on_previous_layer(self.layers[-1])
self.layers.append(new_layer)
def add_output_layer_regression_multioutput(self, n_outputs):
raise NotImplementedError()
# assert self.layers, "Network should have an input node"
# new_layer = OutputLayerRegressionMultioutput(self.y_train, n_outputs)
# new_layer.stack_on_previous_layer(self.layers[-1])
# self.layers.append(new_layer)
def _require_normalized_y(self):
# This function should be called when the network requires normalized observations
# (regression)
if self.y_train_mean_tf is None:
self.y_train_mean_tf = tf.placeholder(
self.dtype, name="y_train_mean", shape=(1,)
)
if self.y_train_std_tf is None:
self.y_train_std_tf = tf.placeholder(
self.dtype, name="y_train_std", shape=(1,)
)
def _initialize_network(self, learning_rate=1e-3):
assert len(self.layers) > 1
if self.initialized:
return
if self.show_debug_info:
print("Initializing network")
self._set_mean_function_last_layer()
# Do a forward pass trough the network to 'connect the graph'
self.objective_energy_function = -self._get_network_energy()
# Params to optimize
self.trainable_params = self.get_params()
self.gradient_optimization_step = tf.train.AdamOptimizer(
learning_rate=learning_rate
).minimize(self.objective_energy_function, var_list=self.trainable_params)
self.sess.run(tf.global_variables_initializer())
# All inits operations remaining
tf_operations = []
ops_returned = None
for layer in self.layers:
with tf.control_dependencies(tf_operations):
ops_returned = layer.initialize_params_layer()
if ops_returned is not None:
tf_operations += ops_returned
# If minibatch size is smaller than N
# Use part of the data to initialize the network and be memory efficient
batch_indexes = np.random.choice(
self.n_points, min(int(self.minibatch_size), self.n_points), replace=False
)
self.sess.run(
tf_operations,
feed_dict={
self.x_tf: self.x_train[batch_indexes],
self.y_train_tf: self.y_train[batch_indexes],
self.network_set_for_training_tf: 1.0,
},
)
self._load_functions_to_graph()
self.initialized = True
def _load_functions_to_graph(self):
"""Load Symbolic tensorflow functions
"""
# Predict function
self._predict_function = self.layers[-1].get_predicted_values()
if self.problem_type == ProblemType.REGRESSION:
# TODO: Implement some of these for classification
# Calculate rmse function
self._rmse_likelihood_function = self.layers[-1].calculate_loglikehood_rmse()
# Sample from predictive dist.
self._sample_from_predictive_function = self.layers[
-1
].sample_from_predictive_distribution()
# Get PDF for point function
self.y_range_tf = tf.placeholder(
self.dtype, name="y_range", shape=[None, self.y_train.shape[1]]
)
self._pdf_function = (
self.layers[-1].get_predictive_distribution_fixed_x(self.y_range_tf),
)
if self.problem_type == ProblemType.BINARY_CLASSIFICATION:
self._log_likelihood_function = self.layers[-1].calculate_log_likelihood()
self._sample_from_last_layer = self.layers[-1].sample_from_latent()
if self.problem_type == ProblemType.MULTICLASS_CLASSIFICATION:
self._log_likelihood_function = self.layers[-1].calculate_log_likelihood()
self._init_saver()
def _get_network_energy(self):
"""Returns the tensorflow operation to calculate the energy of the network
The energy is the approximation to the marginal likelihood of the AEP algorithm
Returns:
Tensor -- Symbolic operation to calculate the energy
"""
energy = 0.0
for layer in self.layers:
layer.forward_pass_computations()
energy += layer.get_layer_contribution_to_energy()
return energy[0, 0]
def get_params(self):
"""Returns all trainable parameters of the network
Returns:
list -- List of Tensor, with all the parameters
"""
assert len(self.layers) > 1
if self.trainable_params is not None:
return self.trainable_params
params = []
for layer in self.layers:
params += layer.get_params()
return params
def train_via_adam(self, max_epochs=1000, learning_rate=1e-3, step_callback=None):
""" Finalizes the graph and trains the DGP AEPMCM network using Adam optimizer.
Args:
max_epochs (int): Maximun number of epochs to train for.
An epoch is a full pass through all the minibatches (whole dataset)
learning_rate (float): Learning rate to use. Default = 1e-3
step_callback (function): If set, function to call every gradient step.
This function should accept at least one parameter, the iteration number.
"""
assert len(self.layers) > 1
if self.show_debug_info:
print("Compiling adam updates")
self._initialize_network(learning_rate)
# self.sess.graph.finalize()
# Main loop of the optimization
n_batches = int(np.ceil(self.n_points / self.minibatch_size))
if self.show_debug_info:
print(
f"Training for {max_epochs} epochs, {max_epochs * n_batches} iterations"
)
sys.stdout.flush()
start = time.time()
# Object that keeps maxlen epoch times, for ETA prediction.
last_epoch_times = deque(maxlen=20)
for j in range(max_epochs):
shuffle = np.random.choice(self.n_points, self.n_points, replace=False)
shuffled_x_train = self.x_train[shuffle, :]
shuffled_y_train = self.y_train[shuffle, :]
avg_energy = 0.0
start_epoch = time.time()
for i in range(n_batches):
start_index = i * self.minibatch_size
end_index = min((i + 1) * self.minibatch_size, self.n_points)
minibatch_x = shuffled_x_train[start_index:end_index, :]
minibatch_y = shuffled_y_train[start_index:end_index, :]
current_energy = self.sess.run(
[self.gradient_optimization_step, self.objective_energy_function],
feed_dict={
self.x_tf: minibatch_x,
self.y_train_tf: minibatch_y,
self.network_set_for_training_tf: 1.0,
},
)[1]
if step_callback is not None:
step_callback(self, j * n_batches + i)
avg_energy += current_energy / (minibatch_x.shape[0] * n_batches)
elapsed_time_epoch = time.time() - start_epoch
last_epoch_times.append(elapsed_time_epoch)
if self.show_debug_info:
eta = calculate_ETA_str(last_epoch_times, j, max_epochs)
print(
"Epoch: {: <4}| Energy: {: <11.6f} | Time: {: >8.4f}s | Memory: {: >2.2f} GB | ETA: {}".format(
j, avg_energy, elapsed_time_epoch, memory_used(), eta
)
)
sys.stdout.flush()
if self.sacred_exp is not None:
self.sacred_exp.log_scalar("train.energy", round(avg_energy, 4))
elapsed_time = time.time() - start
if self.show_debug_info:
print("Total time: {}".format(elapsed_time))
# Log final energy to sacred
if self.sacred_exp is not None:
if self.sacred_exp.info.get("last_train_energies") is None:
self.sacred_exp.info.update(
{"last_train_energies": [round(avg_energy, 4)]}
)
else:
self.sacred_exp.info.get("last_train_energies").append(
round(avg_energy, 4)
)
def predict(self, x_test):
""" Returns predictions for a given x
Args:
x_test (ndarray): K x D matrix with locations for predictions.
With K the number of test points and D the dimension.
D should be the same as the one in the original training data.
"""
x_test = extend_dimension_if_1d(x_test)
assert x_test.shape[1] == self.problem_dim
x_test = x_test.astype(self.dtype)
# Use minibatches to predic
n_batches = int(np.ceil(x_test.shape[0] / self.minibatch_size))
pred, uncert = [], []
current_batch = 0
for x_test_batch in np.array_split(x_test, n_batches):
if self.show_debug_info and n_batches > 1:
current_batch += 1
print(f"Predicting batch {current_batch}/{n_batches}")
pred_batch, uncert_batch = self.sess.run(
self._predict_function,
feed_dict={
self.x_tf: x_test_batch,
self.network_set_for_training_tf: 0.0,
},
)
pred.append(pred_batch)
uncert.append(uncert_batch)
pred_uncert_values = np.concatenate(pred, 0), np.concatenate(uncert, 0)
return pred_uncert_values
def sample_from_predictive_distribution(self, x_locations):
assert x_locations.shape[1] == self.problem_dim
x_locations = x_locations.astype(self.dtype)
samples = self.sess.run(
self._sample_from_predictive_function,
feed_dict={self.x_tf: x_locations, self.network_set_for_training_tf: 0.0},
)
return samples
def get_predictive_distribution_for_x(self, x_value, y_range):
""" Returns the probability of each y value for a fixed x. p(y | x)
It returns the predictive distribution for a fixed x.
Useful to plot the PDF of the predictive distribution
Args:
x_value (ndarray): Single point to which calculate the PDF
y_range (ndarray): All the plausible y values to test. suggested: np.linspace()
"""
assert x_value.shape[1] == self.problem_dim
x_value = x_value.astype(self.dtype)
pdf = self.sess.run(
self._pdf_function,
feed_dict={
self.x_tf: x_value,
self.y_range_tf: y_range,
self.network_set_for_training_tf: 0.0,
},
)
return pdf[0]
def calculate_log_likelihood(
self, x_test, y_test, y_train_mean=None, y_train_std=None
):
if self.problem_type == ProblemType.REGRESSION:
raise NotImplementedError()
elif (
self.problem_type == ProblemType.BINARY_CLASSIFICATION
or self.problem_type == ProblemType.MULTICLASS_CLASSIFICATION
):
n_batches = int(np.ceil(x_test.shape[0] / self.minibatch_size))
lik = []
for X_batch, Y_batch in zip(
np.array_split(x_test, n_batches), np.array_split(y_test, n_batches)
):
l = self.sess.run(
self._log_likelihood_function,
feed_dict={
self.x_tf: X_batch,
self.y_test_tf: Y_batch,
self.network_set_for_training_tf: 0.0,
},
)
lik.append(l)
# (N, 1), still need to calculate the average likelihood for all the dataset
lik = np.concatenate(lik, 0)
return np.mean(lik)
else:
raise NotImplementedError()
def save_model(self, path_to_save, name):
save_path = self.saver.save(self.sess, f"{path_to_save}/{name}.ckpt")
print(f"Model saved in path: {save_path}")
def restore_model(self, model_path, name):
if not self.initialized:
self._initialize_network()
self.saver.restore(self.sess, f"{model_path}/{name}.ckpt")
def _init_saver(self):
if self.saver is None:
self.saver = tf.train.Saver()
def calculate_loglikehood_rmse(self, x_test, y_test, y_train_mean, y_train_std):
# TODO: As we will normally want log likelihood for classification too
# this function should be separated.
# The calculate_log_likelihood valid for all kind of problems
# and the RMSE one valid just for regression.
# We expect unnormalized y_test
if not np.allclose(
|
np.mean(x_test)
|
numpy.mean
|
import os
import pandas as pd
import numpy as np
import random
from human_ISH_config import *
import h5py
import time
from shutil import copyfile
import operator
import matplotlib.pyplot as plt
import math
import json
random.seed(1)
def get_stats(images_info_df):
"""
Uses the images_info_df and calculates some stats.
:param images_info_df: pandas dataframe that has the information of all image
:return: a dictionary containing stats.
"""
stats_dict = {'image_count':None, 'donor_count':None, 'female_donor_count':None, 'male_donor_count':None,
'unique_genes_count': None, 'unique_entrez_id_count' : None}
image_id_list = images_info_df['image_id']
gene_symbol_list = images_info_df['gene_symbol']
entrez_id_list = images_info_df['entrez_id']
experiment_id_list = images_info_df['experiment_id']
specimen_id_list = images_info_df['specimen_id']
donor_id_list = images_info_df['donor_id']
donor_sex_list = images_info_df['donor_sex']
female_donors = images_info_df[images_info_df['donor_sex'] == 'F']
male_donors = images_info_df[images_info_df['donor_sex'] == 'M']
# -----------
# How many donors does this study have? How many are female and how many are male?
donors_count = len(set(images_info_df['donor_id']))
print ("Total number of donors: {}".format(donors_count))
female_donors_count = len(set(female_donors['donor_id']))
print("Number of female donors: {}".format(female_donors_count))
male_donors_count = len(set(male_donors['donor_id']))
print("Number of male donors: {}".format(male_donors_count))
if female_donors_count + male_donors_count != donors_count:
print ("something is not right about the number of female and male donors ...")
# -----------
# How many unique genes does this study include?
gene_count = len(set(gene_symbol_list))
print ("Number of unique genes: {}".format(gene_count))
entrez_id_count = len(set(entrez_id_list))
print("Number of unique entrez IDs: {}".format(entrez_id_count))
if entrez_id_count != gene_count:
print ("something is not right. The number of unique genes should be equal to the number of unique entrez IDs")
# -----------
# How many genes have been tested from each donor.
# How many images do we have from each donor.
group_by_donor = images_info_df.groupby('donor_id')
unique_gene_count_per_donor_list = []
unique_image_count_per_donor_list = []
for key, item in group_by_donor:
this_group_genes = group_by_donor.get_group(key)['gene_symbol']
this_group_images = group_by_donor.get_group(key)['image_id']
unique_gene_count_per_donor_list.append(len(set(this_group_genes)))
unique_image_count_per_donor_list.append(len(set(this_group_images)))
print("Minimum number of unique genes from a donor: {}".format(min(unique_gene_count_per_donor_list)))
print("Maximum number of unique genes from a donor: {}".format(max(unique_gene_count_per_donor_list)))
print("Average number of unique genes from a donor: {}".format(np.mean(unique_gene_count_per_donor_list)))
print("Minimum number of images from a donor: {}".format(min(unique_image_count_per_donor_list)))
print("Maximum number of images from a donor: {}".format(max(unique_image_count_per_donor_list)))
print("Average number of images from a donor: {}".format(
|
np.mean(unique_image_count_per_donor_list)
|
numpy.mean
|
from typing import Tuple, List, Callable, Iterable, Union, Dict
import numpy as np
from savageml.utility import ActivationFunctions, ActivationFunctionsDerivatives
from savageml.utility import LossFunctions, LossFunctionDerivatives
from savageml.models import BaseModel
from savageml.utility import get_sample_from_iterator, batch_iterator, \
batch_np_array
class LayerlessSparseNetModel(BaseModel):
"""A Layerless neural network, with sparsely packed hidden wights
The layerless networks are meant to be able to represent various networks with non standard shapes.
They can any network shape that is not cyclical.
The sparse model has 4 sets of weights:
* Input to Output
* Input to Hidden
* Hidden to Hidden, limited to above the diagonal, everything at or below the diagonal must be 0
* Hidden to Output
The equations for the layers are as follows:
* :math:`H = \\sigma ([I \oplus 1 ] * W_{io} + H * W_{hh})` This needs to be repeated until stable
* :math:`O = \\sigma ([I \oplus 1 ] * W_{io} + H * W_{ho})`
+-------------------+------------------------------------+
| Symbol | Meaning |
+===================+====================================+
| :math:`W_{io}` | Input to output weights |
+-------------------+------------------------------------+
| :math:`W_{ih}` | Input to hidden weights |
+-------------------+------------------------------------+
| :math:`W_{hh}` | Hidden to hidden weights |
+-------------------+------------------------------------+
| :math:`W_{ho}` | Hidden to output weights |
+-------------------+------------------------------------+
| :math:`\\sigma` | The activation function |
+-------------------+------------------------------------+
| :math:`H` | The hidden nodes for the network |
+-------------------+------------------------------------+
| :math:`I` | The input to the network |
+-------------------+------------------------------------+
| :math:`O` | The output of the network |
+-------------------+------------------------------------+
Parameters
----------
input_dimension
The number of input nodes in the network
hidden_dimension
The number of hidden nodes in the network
output_dimension
The number of output nodes in the network
weight_range: Tuple[float, float], optional
The minimum and maximum values for randomly generated weight values
activation_function: Callable, optional
The activation function for the network. Defaults to sigmoid.
Remember to also set the activation derivative if you want the model to learn
activation_derivative: Callable, optional
The derivative of the activation function for the network.
This is used in backpropagation.
Defaults to derivative of a sigmoid.
Remember to also set the activation function if you want the model to learn
loss_function: Callable, optional
The loss function of network, used to compare predictions to expected values.
Defaults to Mean Squared Error.
Remember to also set the loss derivative, or the network will not learn properly.
loss_function_derivative: Callable, optional
The derivative of the loss function of network, used in backpropagation.
Defaults to the derivative of mean squared error.
input_output_weights - np.ndarray, optional
The values of the input to output weights, if no value is supplied, randomly generated weights will be created.
input_output_connections - np.ndarray, optional
The connections of the input to output weights, if no value is supplied, all possible connections will be marked.
input_hidden_weights - np.ndarray, optional
The values of the input to hidden weights, if no value is supplied, randomly generated weights will be created.
input_output_connections - np.ndarray, optional
The connections of the input to hidden weights, if no value is supplied, all possible connections will be marked.
hidden_hidden_weights - np.ndarray, optional
The values of the hidden to hidden weights, if no value is supplied, randomly generated weights will be created.
input_output_connections - np.ndarray, optional
The connections of the hidden to hidden weights, if no value is supplied,
all possible (forward facing) connections will be marked.
hidden_output_weights - np.ndarray, optional
The values of the hidden to output weights, if no value is supplied, randomly generated weights will be created.
input_output_connections - np.ndarray, optional
The connections of the hidden to output weights, if no value is supplied, all possible connections will be marked.
"""
output_dimension: int
hidden_dimension: int
bias_dimension: int = 1
input_dimension: int
loss_function: Callable
loss_function_derivative: Callable
activation_function: Callable
activation_derivative: Callable
weight_range: Tuple[float, float]
input_output_connections: np.ndarray
input_output_weights: np.ndarray
input_hidden_connections: np.ndarray
input_hidden_weights: np.ndarray
hidden_hidden_connections: np.ndarray
hidden_hidden_weights: np.ndarray
hidden_output_connections: np.ndarray
hidden_output_weights: np.ndarray
def __init__(self,
input_dimension: int,
hidden_dimension: int,
output_dimension: int,
weight_range: Tuple[float, float] = (-2.0, 2.0),
activation_function: Callable = ActivationFunctions.SIGMOID,
activation_derivative: Callable = ActivationFunctionsDerivatives.SIGMOID_DERIVATIVE,
loss_function=LossFunctions.MSE,
loss_function_derivative=LossFunctionDerivatives.MSE_DERIVATIVE,
input_output_connections: np.array = None,
input_output_weights: np.array = None,
input_hidden_connections: np.array = None,
input_hidden_weights: np.array = None,
hidden_hidden_connections: np.array = None,
hidden_hidden_weights: np.array = None,
hidden_output_connections: np.array = None,
hidden_output_weights: np.array = None,
**kwargs):
"""Constructor Method"""
super().__init__(**kwargs)
self.output_dimension = output_dimension
self.hidden_dimension = hidden_dimension
self.bias_dimension = 1
self.input_dimension = input_dimension
self.loss_function = loss_function
self.loss_function_derivative = loss_function_derivative
self.activation_function = activation_function
self.activation_derivative = activation_derivative
self.weight_range = weight_range
self.input_output_connections: np.ndarray = input_output_connections
self.input_output_weights: np.ndarray = input_output_weights
self.input_hidden_connections: np.ndarray = input_hidden_connections
self.input_hidden_weights: np.ndarray = input_hidden_weights
self.hidden_hidden_connections: np.ndarray = hidden_hidden_connections
self.hidden_hidden_weights: np.ndarray = hidden_hidden_weights
self.hidden_output_connections: np.ndarray = hidden_output_connections
self.hidden_output_weights: np.ndarray = hidden_output_weights
# Working on input output
if self.input_output_weights is None:
shape = (self.bias_dimension + self.input_dimension, self.output_dimension)
weight_array = np.random.random(shape) * (self.weight_range[1] -
self.weight_range[0]) + self.weight_range[0]
self.input_output_weights = weight_array
if self.input_output_connections is None:
self.input_output_connections = np.ones_like(self.input_output_weights)
self.input_output_weights = self.input_output_weights * self.input_output_connections
# Working on input hidden
if self.input_hidden_weights is None:
shape = (self.bias_dimension + self.input_dimension, self.hidden_dimension)
weight_array = np.random.random(shape) * (self.weight_range[1] -
self.weight_range[0]) + self.weight_range[0]
self.input_hidden_weights = weight_array
if self.input_hidden_connections is None:
self.input_hidden_connections =
|
np.ones_like(self.input_hidden_weights)
|
numpy.ones_like
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 5 12:13:33 2018
@author: <NAME> (<EMAIL> / <EMAIL>)
"""
#Python dependencies
from __future__ import division
import pandas as pd
import numpy as np
from scipy.constants import codata
from pylab import *
from scipy.optimize import curve_fit
import mpmath as mp
from lmfit import minimize, Minimizer, Parameters, Parameter, report_fit
#from scipy.optimize import leastsq
pd.options.mode.chained_assignment = None
#Plotting
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.ticker import LinearLocator, FormatStrFormatter
import seaborn as sns
import matplotlib.ticker as mtick
mpl.rc('mathtext', fontset='stixsans', default='regular')
mpl.rcParams.update({'axes.labelsize':22})
mpl.rc('xtick', labelsize=16)
mpl.rc('ytick', labelsize=16)
mpl.rc('legend',fontsize=14)
from scipy.constants import codata
F = codata.physical_constants['Faraday constant'][0]
Rg = codata.physical_constants['molar gas constant'][0]
### Importing PyEIS add-ons
from .PyEIS_Data_extraction import *
from .PyEIS_Lin_KK import *
from .PyEIS_Advanced_tools import *
### Frequency generator
##
#
def freq_gen(f_start, f_stop, pts_decade=7):
'''
Frequency Generator with logspaced freqencies
Inputs
----------
f_start = frequency start [Hz]
f_stop = frequency stop [Hz]
pts_decade = Points/decade, default 7 [-]
Output
----------
[0] = frequency range [Hz]
[1] = Angular frequency range [1/s]
'''
f_decades = np.log10(f_start) - np.log10(f_stop)
f_range = np.logspace(np.log10(f_start), np.log10(f_stop), num=np.around(pts_decade*f_decades).astype(int), endpoint=True)
w_range = 2 * np.pi * f_range
return f_range, w_range
### Simulation Element Functions
##
#
def elem_L(w, L):
'''
Simulation Function: -L-
Returns the impedance of an inductor
<NAME> (<EMAIL> || <EMAIL>)
Inputs
----------
w = Angular frequency [1/s]
L = Inductance [ohm * s]
'''
return 1j*w*L
def elem_C(w,C):
'''
Simulation Function: -C-
Inputs
----------
w = Angular frequency [1/s]
C = Capacitance [F]
'''
return 1/(C*(w*1j))
def elem_Q(w,Q,n):
'''
Simulation Function: -Q-
Inputs
----------
w = Angular frequency [1/s]
Q = Constant phase element [s^n/ohm]
n = Constant phase elelment exponent [-]
'''
return 1/(Q*(w*1j)**n)
### Simulation Curciuts Functions
##
#
def cir_RsC(w, Rs, C):
'''
Simulation Function: -Rs-C-
Inputs
----------
w = Angular frequency [1/s]
Rs = Series resistance [Ohm]
C = Capacitance [F]
'''
return Rs + 1/(C*(w*1j))
def cir_RsQ(w, Rs, Q, n):
'''
Simulation Function: -Rs-Q-
Inputs
----------
w = Angular frequency [1/s]
Rs = Series resistance [Ohm]
Q = Constant phase element [s^n/ohm]
n = Constant phase elelment exponent [-]
'''
return Rs + 1/(Q*(w*1j)**n)
def cir_RQ(w, R='none', Q='none', n='none', fs='none'):
'''
Simulation Function: -RQ-
Return the impedance of an Rs-RQ circuit. See details for RQ under cir_RQ_fit()
<NAME> (<EMAIL> / <EMAIL>)
Inputs
----------
w = Angular frequency [1/s]
R = Resistance [Ohm]
Q = Constant phase element [s^n/ohm]
n = Constant phase elelment exponent [-]
fs = Summit frequency of RQ circuit [Hz]
'''
if R == 'none':
R = (1/(Q*(2*np.pi*fs)**n))
elif Q == 'none':
Q = (1/(R*(2*np.pi*fs)**n))
elif n == 'none':
n = np.log(Q*R)/np.log(1/(2*np.pi*fs))
return (R/(1+R*Q*(w*1j)**n))
def cir_RsRQ(w, Rs='none', R='none', Q='none', n='none', fs='none'):
'''
Simulation Function: -Rs-RQ-
Return the impedance of an Rs-RQ circuit. See details for RQ under cir_RQ_fit()
<NAME> (<EMAIL> / <EMAIL>)
Inputs
----------
w = Angular frequency [1/s]
Rs = Series resistance [Ohm]
R = Resistance [Ohm]
Q = Constant phase element [s^n/ohm]
n = Constant phase elelment exponent [-]
fs = Summit frequency of RQ circuit [Hz]
'''
if R == 'none':
R = (1/(Q*(2*np.pi*fs)**n))
elif Q == 'none':
Q = (1/(R*(2*np.pi*fs)**n))
elif n == 'none':
n = np.log(Q*R)/np.log(1/(2*np.pi*fs))
return Rs + (R/(1+R*Q*(w*1j)**n))
def cir_RC(w, C='none', R='none', fs='none'):
'''
Simulation Function: -RC-
Returns the impedance of an RC circuit, using RQ definations where n=1. see cir_RQ() for details
<NAME> (<EMAIL> || <EMAIL>)
Inputs
----------
w = Angular frequency [1/s]
R = Resistance [Ohm]
C = Capacitance [F]
fs = Summit frequency of RC circuit [Hz]
'''
return cir_RQ(w, R=R, Q=C, n=1, fs=fs)
def cir_RsRQRQ(w, Rs, R='none', Q='none', n='none', fs='none', R2='none', Q2='none', n2='none', fs2='none'):
'''
Simulation Function: -Rs-RQ-RQ-
Return the impedance of an Rs-RQ circuit. See details for RQ under cir_RQ_fit()
<NAME> (<EMAIL> || <EMAIL>)
Inputs
----------
w = Angular frequency [1/s]
Rs = Series Resistance [Ohm]
R = Resistance [Ohm]
Q = Constant phase element [s^n/ohm]
n = Constant phase element exponent [-]
fs = Summit frequency of RQ circuit [Hz]
R2 = Resistance [Ohm]
Q2 = Constant phase element [s^n/ohm]
n2 = Constant phase element exponent [-]
fs2 = Summit frequency of RQ circuit [Hz]
'''
if R == 'none':
R = (1/(Q*(2*np.pi*fs)**n))
elif Q == 'none':
Q = (1/(R*(2*np.pi*fs)**n))
elif n == 'none':
n = np.log(Q*R)/np.log(1/(2*np.pi*fs))
if R2 == 'none':
R2 = (1/(Q2*(2*np.pi*fs2)**n2))
elif Q2 == 'none':
Q2 = (1/(R2*(2*np.pi*fs2)**n2))
elif n2 == 'none':
n2 = np.log(Q2*R2)/np.log(1/(2*np.pi*fs2))
return Rs + (R/(1+R*Q*(w*1j)**n)) + (R2/(1+R2*Q2*(w*1j)**n2))
def cir_RsRQQ(w, Rs, Q, n, R1='none', Q1='none', n1='none', fs1='none'):
'''
Simulation Function: -Rs-RQ-Q-
Inputs
----------
w = Angular frequency [1/s]
Rs = Series Resistance [ohm]
R1 = Resistance in (RQ) circuit [ohm]
Q1 = Constant phase element in (RQ) circuit [s^n/ohm]
n1 = Constant phase elelment exponent in (RQ) circuit [-]
fs1 = Summit frequency of RQ circuit [Hz]
Q = Constant phase element of series Q [s^n/ohm]
n = Constant phase elelment exponent of series Q [-]
'''
return Rs + cir_RQ(w, R=R1, Q=Q1, n=n1, fs=fs1) + elem_Q(w,Q,n)
def cir_RsRQC(w, Rs, C, R1='none', Q1='none', n1='none', fs1='none'):
'''
Simulation Function: -Rs-RQ-C-
Inputs
----------
w = Angular frequency [1/s]
Rs = Series Resistance [ohm]
R1 = Resistance in (RQ) circuit [ohm]
Q1 = Constant phase element in (RQ) circuit [s^n/ohm]
n1 = Constant phase elelment exponent in (RQ) circuit [-]
fs1 = summit frequency of RQ circuit [Hz]
C = Constant phase element of series Q [s^n/ohm]
'''
return Rs + cir_RQ(w, R=R1, Q=Q1, n=n1, fs=fs1) + elem_C(w, C=C)
def cir_RsRCC(w, Rs, R1, C1, C):
'''
Simulation Function: -Rs-RC-C-
Inputs
----------
w = Angular frequency [1/s]
Rs = Series Resistance [ohm]
R1 = Resistance in (RQ) circuit [ohm]
C1 = Constant phase element in (RQ) circuit [s^n/ohm]
C = Capacitance of series C [s^n/ohm]
'''
return Rs + cir_RC(w, C=C1, R=R1, fs='none') + elem_C(w, C=C)
def cir_RsRCQ(w, Rs, R1, C1, Q, n):
'''
Simulation Function: -Rs-RC-Q-
Inputs
----------
w = Angular frequency [1/s]
Rs = Series Resistance [ohm]
R1 = Resistance in (RQ) circuit [ohm]
C1 = Constant phase element in (RQ) circuit [s^n/ohm]
Q = Constant phase element of series Q [s^n/ohm]
n = Constant phase elelment exponent of series Q [-]
'''
return Rs + cir_RC(w, C=C1, R=R1, fs='none') + elem_Q(w,Q,n)
def Randles_coeff(w, n_electron, A, E='none', E0='none', D_red='none', D_ox='none', C_red='none', C_ox='none', Rg=Rg, F=F, T=298.15):
'''
Returns the Randles coefficient sigma [ohm/s^1/2].
Two cases: a) ox and red are both present in solution here both Cred and Dred are defined, b) In the particular case where initially
only Ox species are present in the solution with bulk concentration C*_ox, the surface concentrations may be calculated as function
of the electrode potential following Nernst equation. Here C_red and D_red == 'none'
Ref.:
- <NAME>., ISBN: 978-1-4614-8932-0, "Electrochemical Impedance Spectroscopy and its Applications"
- <NAME>., ISBN: 0-471-04372-9, <NAME>. R. (2001) "Electrochemical methods: Fundamentals and applications". New York: Wiley.
<NAME> (<EMAIL> // <EMAIL>)
Inputs
----------
n_electron = number of e- [-]
A = geometrical surface area [cm2]
D_ox = Diffusion coefficent of oxidized specie [cm2/s]
D_red = Diffusion coefficent of reduced specie [cm2/s]
C_ox = Bulk concetration of oxidized specie [mol/cm3]
C_red = Bulk concetration of reduced specie [mol/cm3]
T = Temperature [K]
Rg = Gas constant [J/molK]
F = Faradays consntat [C/mol]
E = Potential [V]
if reduced specie is absent == 'none'
E0 = formal potential [V]
if reduced specie is absent == 'none'
Returns
----------
Randles coefficient [ohm/s^1/2]
'''
if C_red != 'none' and D_red != 'none':
sigma = ((Rg*T) / ((n_electron**2) * A * (F**2) * (2**(1/2)))) * ((1/(D_ox**(1/2) * C_ox)) + (1/(D_red**(1/2) * C_red)))
elif C_red == 'none' and D_red == 'none' and E!='none' and E0!= 'none':
f = F/(Rg*T)
x = (n_electron*f*(E-E0))/2
func_cosh2 = (np.cosh(2*x)+1)/2
sigma = ((4*Rg*T) / ((n_electron**2) * A * (F**2) * C_ox * ((2*D_ox)**(1/2)) )) * func_cosh2
else:
print('define E and E0')
Z_Aw = sigma*(w**(-0.5))-1j*sigma*(w**(-0.5))
return Z_Aw
def cir_Randles(w, n_electron, D_red, D_ox, C_red, C_ox, Rs, Rct, n, E, A, Q='none', fs='none', E0=0, F=F, Rg=Rg, T=298.15):
'''
Simulation Function: Randles -Rs-(Q-(RW)-)-
Return the impedance of a Randles circuit with full complity of the warbug constant
NOTE: This Randles circuit is only meant for semi-infinate linear diffusion
<NAME> (<EMAIL> / <EMAIL>)
Inputs
----------
n_electron = number of e- [-]
A = geometrical surface area [cm2]
D_ox = Diffusion coefficent of oxidized specie [cm2/s]
D_red = Diffusion coefficent of reduced specie [cm2/s]
C_ox = Concetration of oxidized specie [mol/cm3]
C_red = Concetration of reduced specie [mol/cm3]
T = Temperature [K]
Rg = Gas constant [J/molK]
F = Faradays consntat [C/mol]
E = Potential [V]
if reduced specie is absent == 'none'
E0 = Formal potential [V]
if reduced specie is absent == 'none'
Rs = Series resistance [ohm]
Rct = charge-transfer resistance [ohm]
Q = Constant phase element used to model the double-layer capacitance [F]
n = expononent of the CPE [-]
Returns
----------
The real and imaginary impedance of a Randles circuit [ohm]
'''
Z_Rct = Rct
Z_Q = elem_Q(w,Q,n)
Z_w = Randles_coeff(w, n_electron=n_electron, E=E, E0=E0, D_red=D_red, D_ox=D_ox, C_red=C_red, C_ox=C_ox, A=A, T=T, Rg=Rg, F=F)
return Rs + 1/(1/Z_Q + 1/(Z_Rct+Z_w))
def cir_Randles_simplified(w, Rs, R, n, sigma, Q='none', fs='none'):
'''
Simulation Function: Randles -Rs-(Q-(RW)-)-
Return the impedance of a Randles circuit with a simplified
NOTE: This Randles circuit is only meant for semi-infinate linear diffusion
<NAME> (<EMAIL> / <EMAIL>)
'''
if R == 'none':
R = (1/(Q*(2*np.pi*fs)**n))
elif Q == 'none':
Q = (1/(R*(2*np.pi*fs)**n))
elif n == 'none':
n = np.log(Q*R)/np.log(1/(2*np.pi*fs))
Z_Q = 1/(Q*(w*1j)**n)
Z_R = R
Z_w = sigma*(w**(-0.5))-1j*sigma*(w**(-0.5))
return Rs + 1/(1/Z_Q + 1/(Z_R+Z_w))
# Polymer electrolytes
def cir_C_RC_C(w, Ce, Cb='none', Rb='none', fsb='none'):
'''
Simulation Function: -C-(RC)-C-
This circuit is often used for modeling blocking electrodes with a polymeric electrolyte, which exhibts a immobile ionic species in bulk that gives a capacitance contribution
to the otherwise resistive electrolyte
Ref:
- <NAME>., and <NAME>. "Polymer Electrolyte Reviews - 1" Elsevier Applied Science Publishers LTD, London, Bruce, P. "Electrical Measurements on Polymer Electrolytes"
<NAME> (<EMAIL> || <EMAIL>)
Inputs
----------
w = Angular frequency [1/s]
Ce = Interfacial capacitance [F]
Rb = Bulk/series resistance [Ohm]
Cb = Bulk capacitance [F]
fsb = summit frequency of bulk (RC) circuit [Hz]
'''
Z_C = elem_C(w,C=Ce)
Z_RC = cir_RC(w, C=Cb, R=Rb, fs=fsb)
return Z_C + Z_RC
def cir_Q_RQ_Q(w, Qe, ne, Qb='none', Rb='none', fsb='none', nb='none'):
'''
Simulation Function: -Q-(RQ)-Q-
Modified cir_C_RC_C() circuits that can be used if electrodes and bulk are not behaving like ideal capacitors
<NAME> (<EMAIL> || <EMAIL>)
Inputs
----------
w = Angular frequency [1/s]
Qe = Interfacial capacitance modeled with a CPE [F]
ne = Interfacial constant phase element exponent [-]
Rb = Bulk/series resistance [Ohm]
Qb = Bulk capacitance modeled with a CPE [s^n/ohm]
nb = Bulk constant phase element exponent [-]
fsb = summit frequency of bulk (RQ) circuit [Hz]
'''
Z_Q = elem_Q(w,Q=Qe,n=ne)
Z_RQ = cir_RQ(w, Q=Qb, R=Rb, fs=fsb, n=nb)
return Z_Q + Z_RQ
def tanh(x):
'''
As numpy gives errors when tanh becomes very large, above 10^250, this functions is used for np.tanh
'''
return (1-np.exp(-2*x))/(1+np.exp(-2*x))
def cir_RCRCZD(w, L, D_s, u1, u2, Cb='none', Rb='none', fsb='none', Ce='none', Re='none', fse='none'):
'''
Simulation Function: -RC_b-RC_e-Z_D
This circuit has been used to study non-blocking electrodes with an ioniocally conducting electrolyte with a mobile and immobile ionic specie in bulk, this is mixed with a
ionically conducting salt. This behavior yields in a impedance response, that consists of the interfacial impendaces -(RC_e)-, the ionically conducitng polymer -(RC_e)-,
and the diffusional impedance from the dissolved salt.
Refs.:
- <NAME>. and <NAME>., Electrochimica Acta, 27, 1671-1675, 1982, "Conductivity, Charge Transfer and Transport number - An AC-Investigation
of the Polymer Electrolyte LiSCN-Poly(ethyleneoxide)"
- <NAME>., and <NAME>. "Polymer Electrolyte Reviews - 1" Elsevier Applied Science Publishers LTD, London
Bruce, P. "Electrical Measurements on Polymer Electrolytes"
<NAME> (<EMAIL> || <EMAIL>)
Inputs
----------
w = Angular frequency [1/s]
L = Thickness of electrode [cm]
D_s = Diffusion coefficient of dissolved salt [cm2/s]
u1 = Mobility of the ion reacting at the electrode interface
u2 = Mobility of other ion
Re = Interfacial resistance [Ohm]
Ce = Interfacial capacitance [F]
fse = Summit frequency of the interfacial (RC) circuit [Hz]
Rb = Bulk/series resistance [Ohm]
Cb = Bulk capacitance [F]
fsb = Summit frequency of the bulk (RC) circuit [Hz]
'''
Z_RCb = cir_RC(w, C=Cb, R=Rb, fs=fsb)
Z_RCe = cir_RC(w, C=Ce, R=Re, fs=fse)
alpha = ((w*1j*L**2)/D_s)**(1/2)
Z_D = Rb * (u2/u1) * (tanh(x=alpha)/alpha)
return Z_RCb + Z_RCe + Z_D
# Transmission lines
def cir_RsTLsQ(w, Rs, L, Ri, Q='none', n='none'):
'''
Simulation Function: -Rs-TLsQ-
TLs = Simplified Transmission Line, with a non-faradaic interfacial impedance (Q)
The simplified transmission line assumes that Ri is much greater than Rel (electrode resistance).
Ref.:
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- <NAME>. Electrochemistry Communications 1, 1999, 429-435, "Anamalous transport effects in the impedance of porous film electrodes"
- <NAME>. Phys. Chem. B., 2000, 104, 2287-2298, "Doubling exponent models for the analysis of porous film electrodes by impedance.
Relaxation of TiO2 nanoporous in aqueous solution"
<NAME> (<EMAIL> / <EMAIL>)
Inputs
-----------
Rs = Series resistance [ohm]
L = Length/Thickness of porous electrode [cm]
Ri = Ionic resistance inside of flodded pores [ohm/cm]
Q = Interfacial capacitance of non-faradaic interface [F/cm]
n = exponent for the interfacial capacitance [-]
'''
Phi = 1/(Q*(w*1j)**n)
X1 = Ri # ohm/cm
Lam = (Phi/X1)**(1/2) #np.sqrt(Phi/X1)
x = L/Lam
x_mp = mp.matrix(x) #x in mp.math format
coth_mp = []
for i in range(len(Lam)):
coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
Z_TLsQ = Lam * X1 * coth_mp
return Rs + Z_TLsQ
def cir_RsRQTLsQ(w, Rs, R1, fs1, n1, L, Ri, Q, n, Q1='none'):
'''
Simulation Function: -Rs-RQ-TLsQ-
TLs = Simplified Transmission Line, with a non-faradaic interfacial impedance(Q)
The simplified transmission line assumes that Ri is much greater than Rel (electrode resistance).
Ref.:
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- Bisquert J. Electrochemistry Communications 1, 1999, 429-435, "Anamalous transport effects in the impedance of porous film electrodes"
- <NAME>. J. Phys. Chem. B., 2000, 104, 2287-2298, "Doubling exponent models for the analysis of porous film electrodes by impedance.
Relaxation of TiO2 nanoporous in aqueous solution"
<NAME> (<EMAIL> / <EMAIL>)
Inputs
-----------
Rs = Series resistance [ohm]
R1 = Charge transfer resistance of RQ circuit [ohm]
fs1 = Summit frequency for RQ circuit [Hz]
n1 = Exponent for RQ circuit [-]
Q1 = Constant phase element of RQ circuit [s^n/ohm]
L = Length/Thickness of porous electrode [cm]
Ri = Ionic resistance inside of flodded pores [ohm/cm]
Q = Interfacial capacitance of non-faradaic interface [F/cm]
n = Exponent for the interfacial capacitance [-]
Output
-----------
Impdance of Rs-(RQ)1-TLsQ
'''
Z_RQ = cir_RQ(w=w, R=R1, Q=Q1, n=n1, fs=fs1)
Phi = 1/(Q*(w*1j)**n)
X1 = Ri
Lam = (Phi/X1)**(1/2)
x = L/Lam
x_mp = mp.matrix(x) #x in mp.math format
coth_mp = []
for i in range(len(Lam)):
coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j)
Z_TLsQ = Lam * X1 * coth_mp
return Rs + Z_RQ + Z_TLsQ
def cir_RsTLs(w, Rs, L, Ri, R='none', Q='none', n='none', fs='none'):
'''
Simulation Function: -Rs-TLs-
TLs = Simplified Transmission Line, with a faradaic interfacial impedance (RQ)
The simplified transmission line assumes that Ri is much greater than Rel (electrode resistance).
Ref.:
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- <NAME>. Electrochemistry Communications 1, 1999, 429-435, "Anamalous transport effects in the impedance of porous film electrodes"
- <NAME>., 2000, 104, 2287-2298, "Doubling exponent models for the analysis of porous film electrodes by impedance.
Relaxation of TiO2 nanoporous in aqueous solution"
<NAME> (<EMAIL> / <EMAIL>)
Inputs
-----------
Rs = Series resistance [ohm]
L = Length/Thickness of porous electrode [cm]
Ri = Ionic resistance inside of flodded pores [ohm/cm]
R = Interfacial Charge transfer resistance [ohm*cm]
fs = Summit frequency of interfacial RQ circuit [Hz]
n = Exponent for interfacial RQ circuit [-]
Q = Constant phase element of interfacial capacitance [s^n/Ohm]
Output
-----------
Impedance of Rs-TLs(RQ)
'''
Phi = cir_RQ(w, R, Q, n, fs)
X1 = Ri
Lam = (Phi/X1)**(1/2)
x = L/Lam
x_mp = mp.matrix(x) #x in mp.math format
coth_mp = []
for i in range(len(Lam)):
coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
Z_TLs = Lam * X1 * coth_mp
return Rs + Z_TLs
def cir_RsRQTLs(w, Rs, L, Ri, R1, n1, fs1, R2, n2, fs2, Q1='none', Q2='none'):
'''
Simulation Function: -Rs-RQ-TLs-
TLs = Simplified Transmission Line, with a faradaic interfacial impedance (RQ)
The simplified transmission line assumes that Ri is much greater than Rel (electrode resistance).
Ref.:
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- Bisquert J. Electrochemistry Communications 1, 1999, 429-435, "Anamalous transport effects in the impedance of porous film electrodes"
- Bisquert J. J. Phys. Chem. B., 2000, 104, 2287-2298, "Doubling exponent models for the analysis of porous film electrodes by impedance.
Relaxation of TiO2 nanoporous in aqueous solution"
<NAME> (<EMAIL> / <EMAIL>)
Inputs
-----------
Rs = Series resistance [ohm]
R1 = Charge transfer resistance of RQ circuit [ohm]
fs1 = Summit frequency for RQ circuit [Hz]
n1 = Exponent for RQ circuit [-]
Q1 = Constant phase element of RQ circuit [s^n/(ohm * cm)]
L = Length/Thickness of porous electrode [cm]
Ri = Ionic resistance inside of flodded pores [ohm/cm]
R2 = Interfacial Charge transfer resistance [ohm*cm]
fs2 = Summit frequency of interfacial RQ circuit [Hz]
n2 = Exponent for interfacial RQ circuit [-]
Q2 = Constant phase element of interfacial capacitance [s^n/Ohm]
Output
-----------
Impedance of Rs-(RQ)1-TLs(RQ)2
'''
Z_RQ = cir_RQ(w=w, R=R1, Q=Q1, n=n1, fs=fs1)
Phi = cir_RQ(w=w, R=R2, Q=Q2, n=n2, fs=fs2)
X1 = Ri
Lam = (Phi/X1)**(1/2)
x = L/Lam
x_mp = mp.matrix(x) #x in mp.math format
coth_mp = []
for i in range(len(Lam)):
coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
Z_TLs = Lam * X1 * coth_mp
return Rs + Z_RQ + Z_TLs
### Support function
def sinh(x):
'''
As numpy gives errors when sinh becomes very large, above 10^250, this functions is used instead of np/mp.sinh()
'''
return (1 - np.exp(-2*x))/(2*np.exp(-x))
def coth(x):
'''
As numpy gives errors when coth becomes very large, above 10^250, this functions is used instead of np/mp.coth()
'''
return (1 + np.exp(-2*x))/(1 - np.exp(-2*x))
###
def cir_RsTLQ(w, L, Rs, Q, n, Rel, Ri):
'''
Simulation Function: -R-TLQ- (interfacial non-reacting, i.e. blocking electrode)
Transmission line w/ full complexity, which both includes Ri and Rel
Ref.:
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- Bisquert J. Electrochemistry Communications 1, 1999, 429-435, "Anamalous transport effects in the impedance of porous film electrodes"
- Bisquert J. J. Phys. Chem. B., 2000, 104, 2287-2298, "Doubling exponent models for the analysis of porous film electrodes by impedance.
Relaxation of TiO2 nanoporous in aqueous solution"
<NAME> (<EMAIL> || <EMAIL>)
Inputs
------------------
Rs = Series resistance [ohm]
Q = Constant phase element for the interfacial capacitance [s^n/ohm]
n = exponenet for interfacial RQ element [-]
Rel = electronic resistance of electrode [ohm/cm]
Ri = Ionic resistance inside of flodded pores [ohm/cm]
L = thickness of porous electrode [cm]
Output
--------------
Impedance of Rs-TL
'''
#The impedance of the series resistance
Z_Rs = Rs
# The Interfacial impedance is given by an -(RQ)- circuit
Phi = elem_Q(w, Q=Q, n=n)
X1 = Ri
X2 = Rel
Lam = (Phi/(X1+X2))**(1/2)
x = L/Lam
# x_mp = mp.matrix(x) #x in mp.math format
# coth_mp = []
# sinh_mp = []
# for i in range(len(Lam)):
# coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
# sinh_mp.append(float(mp.sinh(x_mp[i]).real)+float(mp.sinh(x_mp[i]).imag)*1j)
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/np.array(sinh_mp))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/sinh(x))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * coth(x)
return Z_Rs + Z_TL
def cir_RsRQTLQ(w, L, Rs, Q, n, Rel, Ri, R1, n1, fs1, Q1='none'):
'''
Simulation Function: -R-RQ-TLQ- (interfacial non-reacting, i.e. blocking electrode)
Transmission line w/ full complexity, which both includes Ri and Rel
Ref.:
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- Bisquert J. Electrochemistry Communications 1, 1999, 429-435, "Anamalous transport effects in the impedance of porous film electrodes"
- <NAME>. J. Phys. Chem. B., 2000, 104, 2287-2298, "Doubling exponent models for the analysis of porous film electrodes by impedance.
Relaxation of TiO2 nanoporous in aqueous solution"
<NAME> (<EMAIL> || <EMAIL>)
Inputs
------------------
Rs = Series resistance [ohm]
R1 = Charge transfer resistance of RQ circuit [ohm]
fs1 = Summit frequency for RQ circuit [Hz]
n1 = exponent for RQ circuit [-]
Q1 = constant phase element of RQ circuit [s^n/(ohm * cm)]
Q = Constant phase element for the interfacial capacitance [s^n/ohm]
n = exponenet for interfacial RQ element [-]
Rel = electronic resistance of electrode [ohm/cm]
Ri = Ionic resistance inside of flodded pores [ohm/cm]
L = thickness of porous electrode [cm]
Output
--------------
Impedance of Rs-TL
'''
#The impedance of the series resistance
Z_Rs = Rs
#The (RQ) circuit in series with the transmission line
Z_RQ1 = cir_RQ(w=w, R=R1, Q=Q1, n=n1, fs=fs1)
# The Interfacial impedance is given by an -(RQ)- circuit
Phi = elem_Q(w, Q=Q, n=n)
X1 = Ri
X2 = Rel
Lam = (Phi/(X1+X2))**(1/2)
x = L/Lam
# x_mp = mp.matrix(x) #x in mp.math format
# coth_mp = []
# sinh_mp = []
# for i in range(len(Lam)):
# coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
# sinh_mp.append(float(mp.sinh(x_mp[i]).real)+float(mp.sinh(x_mp[i]).imag)*1j)
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/np.array(sinh_mp))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/sinh(x))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * coth(x)
return Z_Rs + Z_RQ1 + Z_TL
def cir_RsTL(w, L, Rs, R, fs, n, Rel, Ri, Q='none'):
'''
Simulation Function: -R-TL- (interfacial reacting, i.e. non-blocking)
Transmission line w/ full complexity, which both includes Ri and Rel
Ref.:
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- <NAME>. Electrochemistry Communications 1, 1999, 429-435, "Anamalous transport effects in the impedance of porous film electrodes"
- <NAME>. Phys. Chem. B., 2000, 104, 2287-2298, "Doubling exponent models for the analysis of porous film electrodes by impedance.
Relaxation of TiO2 nanoporous in aqueous solution"
<NAME> (<EMAIL> || <EMAIL>)
Inputs
------------------
Rs = Series resistance [ohm]
R = Interfacial charge transfer resistance [ohm * cm]
fs = Summit frequency for the interfacial RQ element [Hz]
n = Exponenet for interfacial RQ element [-]
Q = Constant phase element for the interfacial capacitance [s^n/ohm]
Rel = Electronic resistance of electrode [ohm/cm]
Ri = Ionic resistance inside of flodded pores [ohm/cm]
L = Thickness of porous electrode [cm]
Output
--------------
Impedance of Rs-TL
'''
#The impedance of the series resistance
Z_Rs = Rs
# The Interfacial impedance is given by an -(RQ)- circuit
Phi = cir_RQ(w, R=R, Q=Q, n=n, fs=fs)
X1 = Ri
X2 = Rel
Lam = (Phi/(X1+X2))**(1/2)
x = L/Lam
# x_mp = mp.matrix(x) #x in mp.math format
# coth_mp = []
# sinh_mp = []
# for i in range(len(Lam)):
# coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
# sinh_mp.append(float(mp.sinh(x_mp[i]).real)+float(mp.sinh(x_mp[i]).imag)*1j)
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/np.array(sinh_mp))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/sinh(x))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * coth(x)
return Z_Rs + Z_TL
def cir_RsRQTL(w, L, Rs, R1, fs1, n1, R2, fs2, n2, Rel, Ri, Q1='none', Q2='none'):
'''
Simulation Function: -R-RQ-TL- (interfacial reacting, i.e. non-blocking)
Transmission line w/ full complexity, which both includes Ri and Rel
Ref.:
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- Bisquert J. Electrochemistry Communications 1, 1999, 429-435, "Anamalous transport effects in the impedance of porous film electrodes"
- <NAME>. J. Phys. Chem. B., 2000, 104, 2287-2298, "Doubling exponent models for the analysis of porous film electrodes by impedance.
Relaxation of TiO2 nanoporous in aqueous solution"
<NAME> (<EMAIL> || <EMAIL>)
Inputs
------------------
Rs = Series resistance [ohm]
R1 = Charge transfer resistance of RQ circuit [ohm]
fs1 = Summit frequency for RQ circuit [Hz]
n1 = exponent for RQ circuit [-]
Q1 = constant phase element of RQ circuit [s^n/(ohm * cm)]
R2 = interfacial charge transfer resistance [ohm * cm]
fs2 = Summit frequency for the interfacial RQ element [Hz]
n2 = exponenet for interfacial RQ element [-]
Q2 = Constant phase element for the interfacial capacitance [s^n/ohm]
Rel = electronic resistance of electrode [ohm/cm]
Ri = Ionic resistance inside of flodded pores [ohm/cm]
L = thickness of porous electrode [cm]
Output
--------------
Impedance of Rs-TL
'''
#The impedance of the series resistance
Z_Rs = Rs
#The (RQ) circuit in series with the transmission line
Z_RQ1 = cir_RQ(w=w, R=R1, Q=Q1, n=n1, fs=fs1)
# The Interfacial impedance is given by an -(RQ)- circuit
Phi = cir_RQ(w, R=R2, Q=Q2, n=n2, fs=fs2)
X1 = Ri
X2 = Rel
Lam = (Phi/(X1+X2))**(1/2)
x = L/Lam
# x_mp = mp.matrix(x) #x in mp.math format
# coth_mp = []
# sinh_mp = []
# for i in range(len(Lam)):
# coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
# sinh_mp.append(float(mp.sinh(x_mp[i]).real)+float(mp.sinh(x_mp[i]).imag)*1j)
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/np.array(sinh_mp))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/sinh(x))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * coth(x)
return Z_Rs + Z_RQ1 + Z_TL
# Transmission lines with solid-state transport
def cir_RsTL_1Dsolid(w, L, D, radius, Rs, R, Q, n, R_w, n_w, Rel, Ri):
'''
Simulation Function: -R-TL(Q(RW))-
Transmission line w/ full complexity, which both includes Ri and Rel
Warburg element is specific for 1D solid-state diffusion
Refs:
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- Bisquert J. Electrochemistry Communications 1, 1999, 429-435, "Anamalous transport effects in the impedance of porous film electrodes"
- <NAME>. Phys. Chem. B., 2000, 104, 2287-2298, "Doubling exponent models for the analysis of porous film electrodes by impedance.
Relaxation of TiO2 nanoporous in aqueous solution"
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- Illig, J., Physically based Impedance Modelling of Lithium-ion Cells, KIT Scientific Publishing (2014)
- Scipioni, et al., ECS Transactions, 69 (18) 71-80 (2015)
<NAME> (<EMAIL> || <EMAIL>)
Inputs
------------------
Rs = Series resistance [ohm]
R = particle charge transfer resistance [ohm*cm^2]
Q = Summit frequency peak of RQ element in the modified randles element of a particle [Hz]
n = exponenet for internal RQ element in the modified randles element of a particle [-]
Rel = electronic resistance of electrode [ohm/cm]
Ri = ionic resistance of solution in flooded pores of electrode [ohm/cm]
R_w = polarization resistance of finite diffusion Warburg element [ohm]
n_w = exponent for Warburg element [-]
L = thickness of porous electrode [cm]
D = solid-state diffusion coefficient [cm^2/s]
radius = average particle radius [cm]
Output
--------------
Impedance of Rs-TL(Q(RW))
'''
#The impedance of the series resistance
Z_Rs = Rs
#The impedance of a 1D Warburg Element
time_const = (radius**2)/D
x = (time_const*w*1j)**n_w
x_mp = mp.matrix(x)
warburg_coth_mp = []
for i in range(len(w)):
warburg_coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j)
Z_w = R_w * np.array(warburg_coth_mp)/x
# The Interfacial impedance is given by a Randles Equivalent circuit with the finite space warburg element in series with R2
Z_Rct = R
Z_Q = elem_Q(w,Q=Q,n=n)
Z_Randles = 1/(1/Z_Q + 1/(Z_Rct+Z_w)) #Ohm
# The Impedance of the Transmission Line
lamb = (Z_Randles/(Rel+Ri))**(1/2)
x = L/lamb
# lamb_mp = mp.matrix(x)
# sinh_mp = []
# coth_mp = []
# for j in range(len(lamb_mp)):
# sinh_mp.append(float(mp.sinh(lamb_mp[j]).real)+float((mp.sinh(lamb_mp[j]).imag))*1j)
# coth_mp.append(float(mp.coth(lamb_mp[j]).real)+float(mp.coth(lamb_mp[j]).imag)*1j)
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*lamb)/np.array(sinh_mp))) + lamb * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*lamb)/sinh(x))) + lamb * ((Rel**2 + Ri**2)/(Rel+Ri)) * coth(x)
return Z_Rs + Z_TL
def cir_RsRQTL_1Dsolid(w, L, D, radius, Rs, R1, fs1, n1, R2, Q2, n2, R_w, n_w, Rel, Ri, Q1='none'):
'''
Simulation Function: -R-RQ-TL(Q(RW))-
Transmission line w/ full complexity, which both includes Ri and Rel
Warburg element is specific for 1D solid-state diffusion
Refs:
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- Bisquert J. Electrochemistry Communications 1, 1999, 429-435, "Anamalous transport effects in the impedance of porous film electrodes"
- <NAME>. J. Phys. Chem. B., 2000, 104, 2287-2298, "Doubling exponent models for the analysis of porous film electrodes by impedance.
Relaxation of TiO2 nanoporous in aqueous solution"
- Illig, J., Physically based Impedance Modelling of Lithium-ion Cells, KIT Scientific Publishing (2014)
- Scipioni, et al., ECS Transactions, 69 (18) 71-80 (2015)
<NAME> (<EMAIL>)
<NAME> (<EMAIL> || <EMAIL>)
Inputs
------------------
Rs = Series resistance [ohm]
R1 = charge transfer resistance of the interfacial RQ element [ohm*cm^2]
fs1 = max frequency peak of the interfacial RQ element[Hz]
n1 = exponenet for interfacial RQ element
R2 = particle charge transfer resistance [ohm*cm^2]
Q2 = Summit frequency peak of RQ element in the modified randles element of a particle [Hz]
n2 = exponenet for internal RQ element in the modified randles element of a particle [-]
Rel = electronic resistance of electrode [ohm/cm]
Ri = ionic resistance of solution in flooded pores of electrode [ohm/cm]
R_w = polarization resistance of finite diffusion Warburg element [ohm]
n_w = exponent for Warburg element [-]
L = thickness of porous electrode [cm]
D = solid-state diffusion coefficient [cm^2/s]
radius = average particle radius [cm]
Output
------------------
Impedance of R-RQ-TL(Q(RW))
'''
#The impedance of the series resistance
Z_Rs = Rs
# The Interfacial impedance is given by an -(RQ)- circuit
Z_RQ = cir_RQ(w=w, R=R1, Q=Q1, n=n1, fs=fs1)
#The impedance of a 1D Warburg Element
time_const = (radius**2)/D
x = (time_const*w*1j)**n_w
x_mp = mp.matrix(x)
warburg_coth_mp = []
for i in range(len(w)):
warburg_coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j)
Z_w = R_w * np.array(warburg_coth_mp)/x
# The Interfacial impedance is given by a Randles Equivalent circuit with the finite space warburg element in series with R2
Z_Rct = R2
Z_Q = elem_Q(w,Q=Q2,n=n2)
Z_Randles = 1/(1/Z_Q + 1/(Z_Rct+Z_w)) #Ohm
# The Impedance of the Transmission Line
lamb = (Z_Randles/(Rel+Ri))**(1/2)
x = L/lamb
# lamb_mp = mp.matrix(x)
# sinh_mp = []
# coth_mp = []
# for j in range(len(lamb_mp)):
# sinh_mp.append(float(mp.sinh(lamb_mp[j]).real)+float((mp.sinh(lamb_mp[j]).imag))*1j)
# coth_mp.append(float(mp.coth(lamb_mp[j]).real)+float(mp.coth(lamb_mp[j]).imag)*1j)
#
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*lamb)/np.array(sinh_mp))) + lamb * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*lamb)/sinh(x))) + lamb * ((Rel**2 + Ri**2)/(Rel+Ri)) * coth(x)
return Z_Rs + Z_RQ + Z_TL
### Fitting Circuit Functions
##
#
def elem_C_fit(params, w):
'''
Fit Function: -C-
'''
C = params['C']
return 1/(C*(w*1j))
def elem_Q_fit(params, w):
'''
Fit Function: -Q-
Constant Phase Element for Fitting
'''
Q = params['Q']
n = params['n']
return 1/(Q*(w*1j)**n)
def cir_RsC_fit(params, w):
'''
Fit Function: -Rs-C-
'''
Rs = params['Rs']
C = params['C']
return Rs + 1/(C*(w*1j))
def cir_RsQ_fit(params, w):
'''
Fit Function: -Rs-Q-
'''
Rs = params['Rs']
Q = params['Q']
n = params['n']
return Rs + 1/(Q*(w*1j)**n)
def cir_RC_fit(params, w):
'''
Fit Function: -RC-
Returns the impedance of an RC circuit, using RQ definations where n=1
'''
if str(params.keys())[10:].find("R") == -1: #if R == 'none':
Q = params['C']
fs = params['fs']
R = (1/(Q*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("C") == -1: #elif Q == 'none':
R = params['R']
fs = params['fs']
Q = (1/(R*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("n") == -1: #elif n == 'none':
R = params['R']
Q = params['C']
fs = params['fs']
n = np.log(Q*R)/np.log(1/(2*np.pi*fs))
if str(params.keys())[10:].find("fs") == -1: #elif fs == 'none':
R = params['R']
Q = params['C']
return cir_RQ(w, R=R, Q=C, n=1, fs=fs)
def cir_RQ_fit(params, w):
'''
Fit Function: -RQ-
Return the impedance of an RQ circuit:
Z(w) = R / (1+ R*Q * (2w)^n)
See Explanation of equations under cir_RQ()
The params.keys()[10:] finds the names of the user defined parameters that should be interated over if X == -1, if the paramter is not given, it becomes equal to 'none'
<NAME> (<EMAIL> / <EMAIL>)
'''
if str(params.keys())[10:].find("R") == -1: #if R == 'none':
Q = params['Q']
n = params['n']
fs = params['fs']
R = (1/(Q*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("Q") == -1: #elif Q == 'none':
R = params['R']
n = params['n']
fs = params['fs']
Q = (1/(R*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("n") == -1: #elif n == 'none':
R = params['R']
Q = params['Q']
fs = params['fs']
n = np.log(Q*R)/np.log(1/(2*np.pi*fs))
if str(params.keys())[10:].find("fs") == -1: #elif fs == 'none':
R = params['R']
n = params['n']
Q = params['Q']
return R/(1+R*Q*(w*1j)**n)
def cir_RsRQ_fit(params, w):
'''
Fit Function: -Rs-RQ-
Return the impedance of an Rs-RQ circuit. See details for RQ under cir_RsRQ_fit()
<NAME> (<EMAIL> / <EMAIL>)
'''
if str(params.keys())[10:].find("R") == -1: #if R == 'none':
Q = params['Q']
n = params['n']
fs = params['fs']
R = (1/(Q*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("Q") == -1: #elif Q == 'none':
R = params['R']
n = params['n']
fs = params['fs']
Q = (1/(R*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("n") == -1: #elif n == 'none':
R = params['R']
Q = params['Q']
fs = params['fs']
n = np.log(Q*R)/np.log(1/(2*np.pi*fs))
if str(params.keys())[10:].find("fs") == -1: #elif fs == 'none':
R = params['R']
Q = params['Q']
n = params['n']
Rs = params['Rs']
return Rs + (R/(1+R*Q*(w*1j)**n))
def cir_RsRQRQ_fit(params, w):
'''
Fit Function: -Rs-RQ-RQ-
Return the impedance of an Rs-RQ circuit. See details under cir_RsRQRQ()
<NAME> (<EMAIL> / <EMAIL>)
'''
if str(params.keys())[10:].find("'R'") == -1: #if R == 'none':
Q = params['Q']
n = params['n']
fs = params['fs']
R = (1/(Q*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("'Q'") == -1: #elif Q == 'none':
R = params['R']
n = params['n']
fs = params['fs']
Q = (1/(R*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("'n'") == -1: #elif n == 'none':
R = params['R']
Q = params['Q']
fs = params['fs']
n = np.log(Q*R)/np.log(1/(2*np.pi*fs))
if str(params.keys())[10:].find("'fs'") == -1: #elif fs == 'none':
R = params['R']
Q = params['Q']
n = params['n']
if str(params.keys())[10:].find("'R2'") == -1: #if R == 'none':
Q2 = params['Q2']
n2 = params['n2']
fs2 = params['fs2']
R2 = (1/(Q2*(2*np.pi*fs2)**n2))
if str(params.keys())[10:].find("'Q2'") == -1: #elif Q == 'none':
R2 = params['R2']
n2 = params['n2']
fs2 = params['fs2']
Q2 = (1/(R2*(2*np.pi*fs2)**n2))
if str(params.keys())[10:].find("'n2'") == -1: #elif n == 'none':
R2 = params['R2']
Q2 = params['Q2']
fs2 = params['fs2']
n2 = np.log(Q2*R2)/np.log(1/(2*np.pi*fs2))
if str(params.keys())[10:].find("'fs2'") == -1: #elif fs == 'none':
R2 = params['R2']
Q2 = params['Q2']
n2 = params['n2']
Rs = params['Rs']
return Rs + (R/(1+R*Q*(w*1j)**n)) + (R2/(1+R2*Q2*(w*1j)**n2))
def cir_Randles_simplified_Fit(params, w):
'''
Fit Function: Randles simplified -Rs-(Q-(RW)-)-
Return the impedance of a Randles circuit. See more under cir_Randles_simplified()
NOTE: This Randles circuit is only meant for semi-infinate linear diffusion
<NAME> (<EMAIL> || <EMAIL>)
'''
if str(params.keys())[10:].find("'R'") == -1: #if R == 'none':
Q = params['Q']
n = params['n']
fs = params['fs']
R = (1/(Q*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("'Q'") == -1: #elif Q == 'none':
R = params['R']
n = params['n']
fs = params['fs']
Q = (1/(R*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("'n'") == -1: #elif n == 'none':
R = params['R']
Q = params['Q']
fs = params['fs']
n = np.log(Q*R)/np.log(1/(2*np.pi*fs))
if str(params.keys())[10:].find("'fs'") == -1: #elif fs == 'none':
R = params['R']
Q = params['Q']
n = params['n']
Rs = params['Rs']
sigma = params['sigma']
Z_Q = 1/(Q*(w*1j)**n)
Z_R = R
Z_w = sigma*(w**(-0.5))-1j*sigma*(w**(-0.5))
return Rs + 1/(1/Z_Q + 1/(Z_R+Z_w))
def cir_RsRQQ_fit(params, w):
'''
Fit Function: -Rs-RQ-Q-
See cir_RsRQQ() for details
'''
Rs = params['Rs']
Q = params['Q']
n = params['n']
Z_Q = 1/(Q*(w*1j)**n)
if str(params.keys())[10:].find("R1") == -1: #if R == 'none':
Q1 = params['Q1']
n1 = params['n1']
fs1 = params['fs1']
R1 = (1/(Q1*(2*np.pi*fs1)**n1))
if str(params.keys())[10:].find("Q1") == -1: #elif Q == 'none':
R1 = params['R1']
n1 = params['n1']
fs1 = params['fs1']
Q1 = (1/(R1*(2*np.pi*fs1)**n1))
if str(params.keys())[10:].find("n1") == -1: #elif n == 'none':
R1 = params['R1']
Q1 = params['Q1']
fs1 = params['fs1']
n1 = np.log(Q1*R1)/np.log(1/(2*np.pi*fs1))
if str(params.keys())[10:].find("fs1") == -1: #elif fs == 'none':
R1 = params['R1']
n1 = params['n1']
Q1 = params['Q1']
Z_RQ = (R1/(1+R1*Q1*(w*1j)**n1))
return Rs + Z_RQ + Z_Q
def cir_RsRQC_fit(params, w):
'''
Fit Function: -Rs-RQ-C-
See cir_RsRQC() for details
'''
Rs = params['Rs']
C = params['C']
Z_C = 1/(C*(w*1j))
if str(params.keys())[10:].find("R1") == -1: #if R == 'none':
Q1 = params['Q1']
n1 = params['n1']
fs1 = params['fs1']
R1 = (1/(Q1*(2*np.pi*fs1)**n1))
if str(params.keys())[10:].find("Q1") == -1: #elif Q == 'none':
R1 = params['R1']
n1 = params['n1']
fs1 = params['fs1']
Q1 = (1/(R1*(2*np.pi*fs1)**n1))
if str(params.keys())[10:].find("n1") == -1: #elif n == 'none':
R1 = params['R1']
Q1 = params['Q1']
fs1 = params['fs1']
n1 = np.log(Q1*R1)/np.log(1/(2*np.pi*fs1))
if str(params.keys())[10:].find("fs1") == -1: #elif fs == 'none':
R1 = params['R1']
n1 = params['n1']
Q1 = params['Q1']
Z_RQ = (R1/(1+R1*Q1*(w*1j)**n1))
return Rs + Z_RQ + Z_C
def cir_RsRCC_fit(params, w):
'''
Fit Function: -Rs-RC-C-
See cir_RsRCC() for details
'''
Rs = params['Rs']
R1 = params['R1']
C1 = params['C1']
C = params['C']
return Rs + cir_RC(w, C=C1, R=R1, fs='none') + elem_C(w, C=C)
def cir_RsRCQ_fit(params, w):
'''
Fit Function: -Rs-RC-Q-
See cir_RsRCQ() for details
'''
Rs = params['Rs']
R1 = params['R1']
C1 = params['C1']
Q = params['Q']
n = params['n']
return Rs + cir_RC(w, C=C1, R=R1, fs='none') + elem_Q(w,Q,n)
# Polymer electrolytes
def cir_C_RC_C_fit(params, w):
'''
Fit Function: -C-(RC)-C-
See cir_C_RC_C() for details
<NAME> (<EMAIL> || <EMAIL>)
'''
# Interfacial impedance
Ce = params['Ce']
Z_C = 1/(Ce*(w*1j))
# Bulk impendance
if str(params.keys())[10:].find("Rb") == -1: #if R == 'none':
Cb = params['Cb']
fsb = params['fsb']
Rb = (1/(Cb*(2*np.pi*fsb)))
if str(params.keys())[10:].find("Cb") == -1: #elif Q == 'none':
Rb = params['Rb']
fsb = params['fsb']
Cb = (1/(Rb*(2*np.pi*fsb)))
if str(params.keys())[10:].find("fsb") == -1: #elif fs == 'none':
Rb = params['Rb']
Cb = params['Cb']
Z_RC = (Rb/(1+Rb*Cb*(w*1j)))
return Z_C + Z_RC
def cir_Q_RQ_Q_Fit(params, w):
'''
Fit Function: -Q-(RQ)-Q-
See cir_Q_RQ_Q() for details
<NAME> (<EMAIL> || <EMAIL>)
'''
# Interfacial impedance
Qe = params['Qe']
ne = params['ne']
Z_Q = 1/(Qe*(w*1j)**ne)
# Bulk impedance
if str(params.keys())[10:].find("Rb") == -1: #if R == 'none':
Qb = params['Qb']
nb = params['nb']
fsb = params['fsb']
Rb = (1/(Qb*(2*np.pi*fsb)**nb))
if str(params.keys())[10:].find("Qb") == -1: #elif Q == 'none':
Rb = params['Rb']
nb = params['nb']
fsb = params['fsb']
Qb = (1/(Rb*(2*np.pi*fsb)**nb))
if str(params.keys())[10:].find("nb") == -1: #elif n == 'none':
Rb = params['Rb']
Qb = params['Qb']
fsb = params['fsb']
nb = np.log(Qb*Rb)/np.log(1/(2*np.pi*fsb))
if str(params.keys())[10:].find("fsb") == -1: #elif fs == 'none':
Rb = params['Rb']
nb = params['nb']
Qb = params['Qb']
Z_RQ = Rb/(1+Rb*Qb*(w*1j)**nb)
return Z_Q + Z_RQ
def cir_RCRCZD_fit(params, w):
'''
Fit Function: -RC_b-RC_e-Z_D
See cir_RCRCZD() for details
<NAME> (<EMAIL> || <EMAIL>)
'''
# Interfacial impendace
if str(params.keys())[10:].find("Re") == -1: #if R == 'none':
Ce = params['Ce']
fse = params['fse']
Re = (1/(Ce*(2*np.pi*fse)))
if str(params.keys())[10:].find("Ce") == -1: #elif Q == 'none':
Re = params['Rb']
fse = params['fsb']
Ce = (1/(Re*(2*np.pi*fse)))
if str(params.keys())[10:].find("fse") == -1: #elif fs == 'none':
Re = params['Re']
Ce = params['Ce']
Z_RCe = (Re/(1+Re*Ce*(w*1j)))
# Bulk impendance
if str(params.keys())[10:].find("Rb") == -1: #if R == 'none':
Cb = params['Cb']
fsb = params['fsb']
Rb = (1/(Cb*(2*np.pi*fsb)))
if str(params.keys())[10:].find("Cb") == -1: #elif Q == 'none':
Rb = params['Rb']
fsb = params['fsb']
Cb = (1/(Rb*(2*np.pi*fsb)))
if str(params.keys())[10:].find("fsb") == -1: #elif fs == 'none':
Rb = params['Rb']
Cb = params['Cb']
Z_RCb = (Rb/(1+Rb*Cb*(w*1j)))
# Mass transport impendance
L = params['L']
D_s = params['D_s']
u1 = params['u1']
u2 = params['u2']
alpha = ((w*1j*L**2)/D_s)**(1/2)
Z_D = Rb * (u2/u1) * (tanh(alpha)/alpha)
return Z_RCb + Z_RCe + Z_D
# Transmission lines
def cir_RsTLsQ_fit(params, w):
'''
Fit Function: -Rs-TLsQ-
TLs = Simplified Transmission Line, with a non-faradaic interfacial impedance (Q)
See more under cir_RsTLsQ()
<NAME> (<EMAIL> / <EMAIL>)
'''
Rs = params['Rs']
L = params['L']
Ri = params['Ri']
Q = params['Q']
n = params['n']
Phi = 1/(Q*(w*1j)**n)
X1 = Ri # ohm/cm
Lam = (Phi/X1)**(1/2) #np.sqrt(Phi/X1)
x = L/Lam
# x_mp = mp.matrix(x) #x in mp.math format
# coth_mp = []
# for i in range(len(Lam)):
# coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
#
# Z_TLsQ = Lam * X1 * coth_mp
Z_TLsQ = Lam * X1 * coth(x)
return Rs + Z_TLsQ
def cir_RsRQTLsQ_Fit(params, w):
'''
Fit Function: -Rs-RQ-TLsQ-
TLs = Simplified Transmission Line, with a non-faradaic interfacial impedance (Q)
See more under cir_RsRQTLsQ
<NAME> (<EMAIL> / <EMAIL>)
'''
Rs = params['Rs']
L = params['L']
Ri = params['Ri']
Q = params['Q']
n = params['n']
if str(params.keys())[10:].find("R1") == -1: #if R == 'none':
Q1 = params['Q1']
n1 = params['n1']
fs1 = params['fs1']
R1 = (1/(Q1*(2*np.pi*fs1)**n1))
if str(params.keys())[10:].find("Q1") == -1: #elif Q == 'none':
R1 = params['R1']
n1 = params['n1']
fs1 = params['fs1']
Q1 = (1/(R1*(2*np.pi*fs1)**n1))
if str(params.keys())[10:].find("n1") == -1: #elif n == 'none':
R1 = params['R1']
Q1 = params['Q1']
fs1 = params['fs1']
n1 = np.log(Q1*R1)/np.log(1/(2*np.pi*fs1))
if str(params.keys())[10:].find("fs1") == -1: #elif fs == 'none':
R1 = params['R1']
n1 = params['n1']
Q1 = params['Q1']
Z_RQ = (R1/(1+R1*Q1*(w*1j)**n1))
Phi = 1/(Q*(w*1j)**n)
X1 = Ri
Lam = (Phi/X1)**(1/2)
x = L/Lam
x_mp = mp.matrix(x) #x in mp.math format
coth_mp = []
for i in range(len(Lam)):
coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
Z_TLsQ = Lam * X1 * coth_mp
return Rs + Z_RQ + Z_TLsQ
def cir_RsTLs_Fit(params, w):
'''
Fit Function: -Rs-RQ-TLs-
TLs = Simplified Transmission Line, with a faradaic interfacial impedance (RQ)
See mor under cir_RsTLs()
<NAME> (<EMAIL> / <EMAIL>)
'''
Rs = params['Rs']
L = params['L']
Ri = params['Ri']
if str(params.keys())[10:].find("R") == -1: #if R == 'none':
Q = params['Q']
n = params['n']
fs = params['fs']
R = (1/(Q*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("Q") == -1: #elif Q == 'none':
R = params['R']
n = params['n']
fs = params['fs']
Q = (1/(R*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("n") == -1: #elif n == 'none':
R = params['R']
Q = params['Q']
fs = params['fs']
n = np.log(Q*R)/np.log(1/(2*np.pi*fs))
if str(params.keys())[10:].find("fs") == -1: #elif fs == 'none':
R = params['R']
n = params['n']
Q = params['Q']
Phi = R/(1+R*Q*(w*1j)**n)
X1 = Ri
Lam = (Phi/X1)**(1/2)
x = L/Lam
x_mp = mp.matrix(x) #x in mp.math format
coth_mp = []
for i in range(len(Lam)):
coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
Z_TLs = Lam * X1 * coth_mp
return Rs + Z_TLs
def cir_RsRQTLs_Fit(params, w):
'''
Fit Function: -Rs-RQ-TLs-
TLs = Simplified Transmission Line with a faradaic interfacial impedance (RQ)
See more under cir_RsRQTLs()
<NAME> (<EMAIL> || <EMAIL>)
'''
Rs = params['Rs']
L = params['L']
Ri = params['Ri']
if str(params.keys())[10:].find("R1") == -1: #if R == 'none':
Q1 = params['Q1']
n1 = params['n1']
fs1 = params['fs1']
R1 = (1/(Q1*(2*np.pi*fs1)**n1))
if str(params.keys())[10:].find("Q1") == -1: #elif Q == 'none':
R1 = params['R1']
n1 = params['n1']
fs1 = params['fs1']
Q1 = (1/(R1*(2*np.pi*fs1)**n1))
if str(params.keys())[10:].find("n1") == -1: #elif n == 'none':
R1 = params['R1']
Q1 = params['Q1']
fs1 = params['fs1']
n1 = np.log(Q1*R1)/np.log(1/(2*np.pi*fs1))
if str(params.keys())[10:].find("fs1") == -1: #elif fs == 'none':
R1 = params['R1']
n1 = params['n1']
Q1 = params['Q1']
Z_RQ = (R1/(1+R1*Q1*(w*1j)**n1))
if str(params.keys())[10:].find("R2") == -1: #if R == 'none':
Q2 = params['Q2']
n2 = params['n2']
fs2 = params['fs2']
R2 = (1/(Q2*(2*np.pi*fs2)**n2))
if str(params.keys())[10:].find("Q2") == -1: #elif Q == 'none':
R2 = params['R2']
n2 = params['n2']
fs2 = params['fs2']
Q2 = (1/(R2*(2*np.pi*fs2)**n1))
if str(params.keys())[10:].find("n2") == -1: #elif n == 'none':
R2 = params['R2']
Q2 = params['Q2']
fs2 = params['fs2']
n2 = np.log(Q2*R2)/np.log(1/(2*np.pi*fs2))
if str(params.keys())[10:].find("fs2") == -1: #elif fs == 'none':
R2 = params['R2']
n2 = params['n2']
Q2 = params['Q2']
Phi = (R2/(1+R2*Q2*(w*1j)**n2))
X1 = Ri
Lam = (Phi/X1)**(1/2)
x = L/Lam
x_mp = mp.matrix(x) #x in mp.math format
coth_mp = []
for i in range(len(Lam)):
coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
Z_TLs = Lam * X1 * coth_mp
return Rs + Z_RQ + Z_TLs
def cir_RsTLQ_fit(params, w):
'''
Fit Function: -R-TLQ- (interface non-reacting, i.e. blocking electrode)
Transmission line w/ full complexity, which both includes Ri and Rel
<NAME> (<EMAIL> || <EMAIL>)
'''
Rs = params['Rs']
L = params['L']
Ri = params['Ri']
Rel = params['Rel']
Q = params['Q']
n = params['n']
#The impedance of the series resistance
Z_Rs = Rs
# The Interfacial impedance is given by an -(RQ)- circuit
Phi = elem_Q(w, Q=Q, n=n)
X1 = Ri
X2 = Rel
Lam = (Phi/(X1+X2))**(1/2)
x = L/Lam
# x_mp = mp.matrix(x) #x in mp.math format
# coth_mp = []
# sinh_mp = []
# for i in range(len(Lam)):
# coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
# sinh_mp.append(float(mp.sinh(x_mp[i]).real)+float(mp.sinh(x_mp[i]).imag)*1j)
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/np.array(sinh_mp))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/sinh(x))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * coth(x)
return Z_Rs + Z_TL
def cir_RsRQTLQ_fit(params, w):
'''
Fit Function: -R-RQ-TLQ- (interface non-reacting, i.e. blocking electrode)
Transmission line w/ full complexity, which both includes Ri and Rel
<NAME> (<EMAIL> || <EMAIL>)
'''
Rs = params['Rs']
L = params['L']
Ri = params['Ri']
Rel = params['Rel']
Q = params['Q']
n = params['n']
#The impedance of the series resistance
Z_Rs = Rs
#The (RQ) circuit in series with the transmission line
if str(params.keys())[10:].find("R1") == -1: #if R == 'none':
Q1 = params['Q1']
n1 = params['n1']
fs1 = params['fs1']
R1 = (1/(Q1*(2*np.pi*fs1)**n1))
if str(params.keys())[10:].find("Q1") == -1: #elif Q == 'none':
R1 = params['R1']
n1 = params['n1']
fs1 = params['fs1']
Q1 = (1/(R1*(2*np.pi*fs1)**n1))
if str(params.keys())[10:].find("n1") == -1: #elif n == 'none':
R1 = params['R1']
Q1 = params['Q1']
fs1 = params['fs1']
n1 = np.log(Q1*R1)/np.log(1/(2*np.pi*fs1))
if str(params.keys())[10:].find("fs1") == -1: #elif fs == 'none':
R1 = params['R1']
n1 = params['n1']
Q1 = params['Q1']
Z_RQ1 = (R1/(1+R1*Q1*(w*1j)**n1))
# The Interfacial impedance is given by an -(RQ)- circuit
Phi = elem_Q(w, Q=Q, n=n)
X1 = Ri
X2 = Rel
Lam = (Phi/(X1+X2))**(1/2)
x = L/Lam
# x_mp = mp.matrix(x) #x in mp.math format
# coth_mp = []
# sinh_mp = []
# for i in range(len(Lam)):
# coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
# sinh_mp.append(float(mp.sinh(x_mp[i]).real)+float(mp.sinh(x_mp[i]).imag)*1j)
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/np.array(sinh_mp))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/sinh(x))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * coth(x)
return Z_Rs + Z_RQ1 + Z_TL
def cir_RsTL_Fit(params, w):
'''
Fit Function: -R-TLQ- (interface reacting, i.e. non-blocking)
Transmission line w/ full complexity, which both includes Ri and Rel
See cir_RsTL() for details
<NAME> (<EMAIL> || <EMAIL>)
'''
Rs = params['Rs']
L = params['L']
Ri = params['Ri']
Rel = params['Rel']
#The impedance of the series resistance
Z_Rs = Rs
# The Interfacial impedance is given by an -(RQ)- circuit
if str(params.keys())[10:].find("R") == -1: #if R == 'none':
Q = params['Q']
n = params['n']
fs = params['fs']
R = (1/(Q*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("Q") == -1: #elif Q == 'none':
R = params['R']
n = params['n']
fs = params['fs']
Q = (1/(R*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("n") == -1: #elif n == 'none':
R = params['R']
Q = params['Q']
fs = params['fs']
n = np.log(Q*R)/np.log(1/(2*np.pi*fs))
if str(params.keys())[10:].find("fs") == -1: #elif fs == 'none':
R = params['R']
n = params['n']
Q = params['Q']
Phi = (R/(1+R*Q*(w*1j)**n))
X1 = Ri
X2 = Rel
Lam = (Phi/(X1+X2))**(1/2)
x = L/Lam
# x_mp = mp.matrix(x) #x in mp.math format
# coth_mp = []
# sinh_mp = []
# for i in range(len(Lam)):
# coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
# sinh_mp.append(float(mp.sinh(x_mp[i]).real)+float(mp.sinh(x_mp[i]).imag)*1j)
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/np.array(sinh_mp))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/sinh(x))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * coth(x)
return Z_Rs + Z_TL
def cir_RsRQTL_fit(params, w):
'''
Fit Function: -R-RQ-TL- (interface reacting, i.e. non-blocking)
Transmission line w/ full complexity including both includes Ri and Rel
<NAME> (<EMAIL> || <EMAIL>)
'''
Rs = params['Rs']
L = params['L']
Ri = params['Ri']
Rel = params['Rel']
#The impedance of the series resistance
Z_Rs = Rs
# The Interfacial impedance is given by an -(RQ)- circuit
if str(params.keys())[10:].find("R1") == -1: #if R == 'none':
Q1 = params['Q1']
n1 = params['n1']
fs1 = params['fs1']
R1 = (1/(Q1*(2*np.pi*fs1)**n1))
elif str(params.keys())[10:].find("Q1") == -1: #elif Q == 'none':
R1 = params['R1']
n1 = params['n1']
fs1 = params['fs1']
Q1 = (1/(R1*(2*np.pi*fs1)**n1))
elif str(params.keys())[10:].find("n1") == -1: #elif n == 'none':
R1 = params['R1']
Q1 = params['Q1']
fs1 = params['fs1']
n1 = np.log(Q1*R1)/np.log(1/(2*np.pi*fs1))
elif str(params.keys())[10:].find("fs1") == -1: #elif fs == 'none':
R1 = params['R1']
n1 = params['n1']
Q1 = params['Q1']
Z_RQ1 = (R1/(1+R1*Q1*(w*1j)**n1))
#
# # The Interfacial impedance is given by an -(RQ)- circuit
if str(params.keys())[10:].find("R2") == -1: #if R == 'none':
Q2 = params['Q2']
n2 = params['n2']
fs2 = params['fs2']
R2 = (1/(Q2*(2*np.pi*fs2)**n2))
elif str(params.keys())[10:].find("Q2") == -1: #elif Q == 'none':
R2 = params['R2']
n2 = params['n2']
fs2 = params['fs2']
Q2 = (1/(R2*(2*np.pi*fs2)**n1))
elif str(params.keys())[10:].find("n2") == -1: #elif n == 'none':
R2 = params['R2']
Q2 = params['Q2']
fs2 = params['fs2']
n2 = np.log(Q2*R2)/np.log(1/(2*np.pi*fs2))
elif str(params.keys())[10:].find("fs2") == -1: #elif fs == 'none':
R2 = params['R2']
n2 = params['n2']
Q2 = params['Q2']
Phi = (R2/(1+R2*Q2*(w*1j)**n2))
X1 = Ri
X2 = Rel
Lam = (Phi/(X1+X2))**(1/2)
x = L/Lam
# x_mp = mp.matrix(x) #x in mp.math format
# coth_mp = []
# sinh_mp = []
# for i in range(len(Lam)):
# coth_mp.append(float(mp.coth(x_mp[i]).real)+float((mp.coth(x_mp[i]).imag))*1j) #Handles coth with x having very large or very small numbers
# sinh_mp.append(float(((1-mp.exp(-2*x_mp[i]))/(2*mp.exp(-x_mp[i]))).real) + float(((1-mp.exp(-2*x_mp[i]))/(2*mp.exp(-x_mp[i]))).real)*1j)
# sinh_mp.append(float(mp.sinh(x_mp[i]).real)+float((mp.sinh(x_mp[i]).imag))*1j)
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/np.array(sinh_mp))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/sinh(x))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * coth(x)
return Z_Rs + Z_RQ1 + Z_TL
def cir_RsTL_1Dsolid_fit(params, w):
'''
Fit Function: -R-TL(Q(RW))-
Transmission line w/ full complexity
See cir_RsTL_1Dsolid() for details
<NAME> (<EMAIL>)
<NAME> (<EMAIL> || <EMAIL>)
'''
Rs = params['Rs']
L = params['L']
Ri = params['Ri']
radius = params['radius']
D = params['D']
R = params['R']
Q = params['Q']
n = params['n']
R_w = params['R_w']
n_w = params['n_w']
Rel = params['Rel']
Ri = params['Ri']
#The impedance of the series resistance
Z_Rs = Rs
#The impedance of a 1D Warburg Element
time_const = (radius**2)/D
x = (time_const*w*1j)**n_w
x_mp = mp.matrix(x)
warburg_coth_mp = []
for i in range(len(w)):
warburg_coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j)
Z_w = R_w * np.array(warburg_coth_mp)/x
# The Interfacial impedance is given by a Randles Equivalent circuit with the finite space warburg element in series with R2
Z_Rct = R
Z_Q = elem_Q(w=w, Q=Q, n=n)
Z_Randles = 1/(1/Z_Q + 1/(Z_Rct+Z_w)) #Ohm
# The Impedance of the Transmission Line
lamb = (Z_Randles/(Rel+Ri))**(1/2)
x = L/lamb
# lamb_mp = mp.matrix(x)
# sinh_mp = []
# coth_mp = []
# for j in range(len(lamb_mp)):
# sinh_mp.append(float(mp.sinh(lamb_mp[j]).real)+float((mp.sinh(lamb_mp[j]).imag))*1j)
# coth_mp.append(float(mp.coth(lamb_mp[j]).real)+float(mp.coth(lamb_mp[j]).imag)*1j)
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*lamb)/np.array(sinh_mp))) + lamb * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*lamb)/sinh(x))) + lamb * ((Rel**2 + Ri**2)/(Rel+Ri)) * coth(x)
return Z_Rs + Z_TL
def cir_RsRQTL_1Dsolid_fit(params, w):
'''
Fit Function: -R-RQ-TL(Q(RW))-
Transmission line w/ full complexity, which both includes Ri and Rel. The Warburg element is specific for 1D solid-state diffusion
See cir_RsRQTL_1Dsolid() for details
<NAME> (<EMAIL>)
<NAME> (<EMAIL> || <EMAIL>)
'''
Rs = params['Rs']
L = params['L']
Ri = params['Ri']
radius = params['radius']
D = params['D']
R2 = params['R2']
Q2 = params['Q2']
n2 = params['n2']
R_w = params['R_w']
n_w = params['n_w']
Rel = params['Rel']
Ri = params['Ri']
#The impedance of the series resistance
Z_Rs = Rs
# The Interfacial impedance is given by an -(RQ)- circuit
if str(params.keys())[10:].find("R1") == -1: #if R == 'none':
Q1 = params['Q1']
n1 = params['n1']
fs1 = params['fs1']
R1 = (1/(Q1*(2*np.pi*fs1)**n1))
elif str(params.keys())[10:].find("Q1") == -1: #elif Q == 'none':
R1 = params['R1']
n1 = params['n1']
fs1 = params['fs1']
Q1 = (1/(R1*(2*np.pi*fs1)**n1))
elif str(params.keys())[10:].find("n1") == -1: #elif n == 'none':
R1 = params['R1']
Q1 = params['Q1']
fs1 = params['fs1']
n1 = np.log(Q1*R1)/np.log(1/(2*np.pi*fs1))
elif str(params.keys())[10:].find("fs1") == -1: #elif fs == 'none':
R1 = params['R1']
n1 = params['n1']
Q1 = params['Q1']
Z_RQ1 = (R1/(1+R1*Q1*(w*1j)**n1))
#The impedance of a 1D Warburg Element
time_const = (radius**2)/D
x = (time_const*w*1j)**n_w
x_mp = mp.matrix(x)
warburg_coth_mp = []
for i in range(len(w)):
warburg_coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j)
Z_w = R_w * np.array(warburg_coth_mp)/x
# The Interfacial impedance is given by a Randles Equivalent circuit with the finite space warburg element in series with R2
Z_Rct = R2
Z_Q = elem_Q(w,Q=Q2,n=n2)
Z_Randles = 1/(1/Z_Q + 1/(Z_Rct+Z_w)) #Ohm
# The Impedance of the Transmission Line
lamb = (Z_Randles/(Rel+Ri))**(1/2)
x = L/lamb
# lamb_mp = mp.matrix(x)
# sinh_mp = []
# coth_mp = []
# for j in range(len(lamb_mp)):
# sinh_mp.append(float(mp.sinh(lamb_mp[j]).real)+float((mp.sinh(lamb_mp[j]).imag))*1j)
# coth_mp.append(float(mp.coth(lamb_mp[j]).real)+float(mp.coth(lamb_mp[j]).imag)*1j)
#
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*lamb)/np.array(sinh_mp))) + lamb * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*lamb)/sinh(x))) + lamb * ((Rel**2 + Ri**2)/(Rel+Ri)) * coth(x)
return Z_Rs + Z_RQ1 + Z_TL
### Least-Squares error function
def leastsq_errorfunc(params, w, re, im, circuit, weight_func):
'''
Sum of squares error function for the complex non-linear least-squares fitting procedure (CNLS). The fitting function (lmfit) will use this function to iterate over
until the total sum of errors is minimized.
During the minimization the fit is weighed, and currently three different weigh options are avaliable:
- modulus
- unity
- proportional
Modulus is generially recommended as random errors and a bias can exist in the experimental data.
<NAME> (<EMAIL> || <EMAIL>)
Inputs
------------
- params: parameters needed for CNLS
- re: real impedance
- im: Imaginary impedance
- circuit:
The avaliable circuits are shown below, and this this parameter needs it as a string.
- C
- Q
- R-C
- R-Q
- RC
- RQ
- R-RQ
- R-RQ-RQ
- R-RQ-Q
- R-(Q(RW))
- R-(Q(RM))
- R-RC-C
- R-RC-Q
- R-RQ-Q
- R-RQ-C
- RC-RC-ZD
- R-TLsQ
- R-RQ-TLsQ
- R-TLs
- R-RQ-TLs
- R-TLQ
- R-RQ-TLQ
- R-TL
- R-RQ-TL
- R-TL1Dsolid (reactive interface with 1D solid-state diffusion)
- R-RQ-TL1Dsolid
- weight_func
Weight function
- modulus
- unity
- proportional
'''
if circuit == 'C':
re_fit = elem_C_fit(params, w).real
im_fit = -elem_C_fit(params, w).imag
elif circuit == 'Q':
re_fit = elem_Q_fit(params, w).real
im_fit = -elem_Q_fit(params, w).imag
elif circuit == 'R-C':
re_fit = cir_RsC_fit(params, w).real
im_fit = -cir_RsC_fit(params, w).imag
elif circuit == 'R-Q':
re_fit = cir_RsQ_fit(params, w).real
im_fit = -cir_RsQ_fit(params, w).imag
elif circuit == 'RC':
re_fit = cir_RC_fit(params, w).real
im_fit = -cir_RC_fit(params, w).imag
elif circuit == 'RQ':
re_fit = cir_RQ_fit(params, w).real
im_fit = -cir_RQ_fit(params, w).imag
elif circuit == 'R-RQ':
re_fit = cir_RsRQ_fit(params, w).real
im_fit = -cir_RsRQ_fit(params, w).imag
elif circuit == 'R-RQ-RQ':
re_fit = cir_RsRQRQ_fit(params, w).real
im_fit = -cir_RsRQRQ_fit(params, w).imag
elif circuit == 'R-RC-C':
re_fit = cir_RsRCC_fit(params, w).real
im_fit = -cir_RsRCC_fit(params, w).imag
elif circuit == 'R-RC-Q':
re_fit = cir_RsRCQ_fit(params, w).real
im_fit = -cir_RsRCQ_fit(params, w).imag
elif circuit == 'R-RQ-Q':
re_fit = cir_RsRQQ_fit(params, w).real
im_fit = -cir_RsRQQ_fit(params, w).imag
elif circuit == 'R-RQ-C':
re_fit = cir_RsRQC_fit(params, w).real
im_fit = -cir_RsRQC_fit(params, w).imag
elif circuit == 'R-(Q(RW))':
re_fit = cir_Randles_simplified_Fit(params, w).real
im_fit = -cir_Randles_simplified_Fit(params, w).imag
elif circuit == 'R-(Q(RM))':
re_fit = cir_Randles_uelectrode_fit(params, w).real
im_fit = -cir_Randles_uelectrode_fit(params, w).imag
elif circuit == 'C-RC-C':
re_fit = cir_C_RC_C_fit(params, w).real
im_fit = -cir_C_RC_C_fit(params, w).imag
elif circuit == 'Q-RQ-Q':
re_fit = cir_Q_RQ_Q_Fit(params, w).real
im_fit = -cir_Q_RQ_Q_Fit(params, w).imag
elif circuit == 'RC-RC-ZD':
re_fit = cir_RCRCZD_fit(params, w).real
im_fit = -cir_RCRCZD_fit(params, w).imag
elif circuit == 'R-TLsQ':
re_fit = cir_RsTLsQ_fit(params, w).real
im_fit = -cir_RsTLsQ_fit(params, w).imag
elif circuit == 'R-RQ-TLsQ':
re_fit = cir_RsRQTLsQ_Fit(params, w).real
im_fit = -cir_RsRQTLsQ_Fit(params, w).imag
elif circuit == 'R-TLs':
re_fit = cir_RsTLs_Fit(params, w).real
im_fit = -cir_RsTLs_Fit(params, w).imag
elif circuit == 'R-RQ-TLs':
re_fit = cir_RsRQTLs_Fit(params, w).real
im_fit = -cir_RsRQTLs_Fit(params, w).imag
elif circuit == 'R-TLQ':
re_fit = cir_RsTLQ_fit(params, w).real
im_fit = -cir_RsTLQ_fit(params, w).imag
elif circuit == 'R-RQ-TLQ':
re_fit = cir_RsRQTLQ_fit(params, w).real
im_fit = -cir_RsRQTLQ_fit(params, w).imag
elif circuit == 'R-TL':
re_fit = cir_RsTL_Fit(params, w).real
im_fit = -cir_RsTL_Fit(params, w).imag
elif circuit == 'R-RQ-TL':
re_fit = cir_RsRQTL_fit(params, w).real
im_fit = -cir_RsRQTL_fit(params, w).imag
elif circuit == 'R-TL1Dsolid':
re_fit = cir_RsTL_1Dsolid_fit(params, w).real
im_fit = -cir_RsTL_1Dsolid_fit(params, w).imag
elif circuit == 'R-RQ-TL1Dsolid':
re_fit = cir_RsRQTL_1Dsolid_fit(params, w).real
im_fit = -cir_RsRQTL_1Dsolid_fit(params, w).imag
else:
print('Circuit is not defined in leastsq_errorfunc()')
error = [(re-re_fit)**2, (im-im_fit)**2] #sum of squares
#Different Weighing options, see Lasia
if weight_func == 'modulus':
weight = [1/((re_fit**2 + im_fit**2)**(1/2)), 1/((re_fit**2 + im_fit**2)**(1/2))]
elif weight_func == 'proportional':
weight = [1/(re_fit**2), 1/(im_fit**2)]
elif weight_func == 'unity':
unity_1s = []
for k in range(len(re)):
unity_1s.append(1) #makes an array of [1]'s, so that the weighing is == 1 * sum of squres.
weight = [unity_1s, unity_1s]
else:
print('weight not defined in leastsq_errorfunc()')
S = np.array(weight) * error #weighted sum of squares
return S
### Fitting Class
class EIS_exp:
'''
This class is used to plot and/or analyze experimental impedance data. The class has three major functions:
- EIS_plot()
- Lin_KK()
- EIS_fit()
- EIS_plot() is used to plot experimental data with or without fit
- Lin_KK() performs a linear Kramers-Kronig analysis of the experimental data set.
- EIS_fit() performs complex non-linear least-squares fitting of the experimental data to an equivalent circuit
<NAME> (<EMAIL> || <EMAIL>)
Inputs
-----------
- path: path of datafile(s) as a string
- data: datafile(s) including extension, e.g. ['EIS_data1', 'EIS_data2']
- cycle: Specific cycle numbers can be extracted using the cycle function. Default is 'none', which includes all cycle numbers.
Specific cycles can be extracted using this parameter, insert cycle numbers in brackets, e.g. cycle number 1,4, and 6 are wanted. cycle=[1,4,6]
- mask: ['high frequency' , 'low frequency'], if only a high- or low-frequency is desired use 'none' for the other, e.g. maks=[10**4,'none']
'''
def __init__(self, path, data, cycle='off', mask=['none','none']):
self.df_raw0 = []
self.cycleno = []
for j in range(len(data)):
if data[j].find(".mpt") != -1: #file is a .mpt file
self.df_raw0.append(extract_mpt(path=path, EIS_name=data[j])) #reads all datafiles
elif data[j].find(".DTA") != -1: #file is a .dta file
self.df_raw0.append(extract_dta(path=path, EIS_name=data[j])) #reads all datafiles
elif data[j].find(".z") != -1: #file is a .z file
self.df_raw0.append(extract_solar(path=path, EIS_name=data[j])) #reads all datafiles
else:
print('Data file(s) could not be identified')
self.cycleno.append(self.df_raw0[j].cycle_number)
if np.min(self.cycleno[j]) <= np.max(self.cycleno[j-1]):
if j > 0: #corrects cycle_number except for the first data file
self.df_raw0[j].update({'cycle_number': self.cycleno[j]+np.max(self.cycleno[j-1])}) #corrects cycle number
# else:
# print('__init__ Error (#1)')
#currently need to append a cycle_number coloumn to gamry files
# adds individual dataframes into one
if len(self.df_raw0) == 1:
self.df_raw = self.df_raw0[0]
elif len(self.df_raw0) == 2:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1]], axis=0)
elif len(self.df_raw0) == 3:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1], self.df_raw0[2]], axis=0)
elif len(self.df_raw0) == 4:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1], self.df_raw0[2], self.df_raw0[3]], axis=0)
elif len(self.df_raw0) == 5:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1], self.df_raw0[2], self.df_raw0[3], self.df_raw0[4]], axis=0)
elif len(self.df_raw0) == 6:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1], self.df_raw0[2], self.df_raw0[3], self.df_raw0[4], self.df_raw0[5]], axis=0)
elif len(self.df_raw0) == 7:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1], self.df_raw0[2], self.df_raw0[3], self.df_raw0[4], self.df_raw0[5], self.df_raw0[6]], axis=0)
elif len(self.df_raw0) == 8:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1], self.df_raw0[2], self.df_raw0[3], self.df_raw0[4], self.df_raw0[5], self.df_raw0[6], self.df_raw0[7]], axis=0)
elif len(self.df_raw0) == 9:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1], self.df_raw0[2], self.df_raw0[3], self.df_raw0[4], self.df_raw0[5], self.df_raw0[6], self.df_raw0[7], self.df_raw0[8]], axis=0)
elif len(self.df_raw0) == 10:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1], self.df_raw0[2], self.df_raw0[3], self.df_raw0[4], self.df_raw0[5], self.df_raw0[6], self.df_raw0[7], self.df_raw0[8], self.df_raw0[9]], axis=0)
elif len(self.df_raw0) == 11:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1], self.df_raw0[2], self.df_raw0[3], self.df_raw0[4], self.df_raw0[5], self.df_raw0[6], self.df_raw0[7], self.df_raw0[8], self.df_raw0[9], self.df_raw0[10]], axis=0)
elif len(self.df_raw0) == 12:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1], self.df_raw0[2], self.df_raw0[3], self.df_raw0[4], self.df_raw0[5], self.df_raw0[6], self.df_raw0[7], self.df_raw0[8], self.df_raw0[9], self.df_raw0[10], self.df_raw0[11]], axis=0)
elif len(self.df_raw0) == 13:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1], self.df_raw0[2], self.df_raw0[3], self.df_raw0[4], self.df_raw0[5], self.df_raw0[6], self.df_raw0[7], self.df_raw0[8], self.df_raw0[9], self.df_raw0[10], self.df_raw0[11], self.df_raw0[12]], axis=0)
elif len(self.df_raw0) == 14:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1], self.df_raw0[2], self.df_raw0[3], self.df_raw0[4], self.df_raw0[5], self.df_raw0[6], self.df_raw0[7], self.df_raw0[8], self.df_raw0[9], self.df_raw0[10], self.df_raw0[11]], self.df_raw0[12], self.df_raw0[13], axis=0)
elif len(self.df_raw0) == 15:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1], self.df_raw0[2], self.df_raw0[3], self.df_raw0[4], self.df_raw0[5], self.df_raw0[6], self.df_raw0[7], self.df_raw0[8], self.df_raw0[9], self.df_raw0[10], self.df_raw0[11]], self.df_raw0[12], self.df_raw0[13], self.df_raw0[14], axis=0)
else:
print("Too many data files || 15 allowed")
self.df_raw = self.df_raw.assign(w = 2*np.pi*self.df_raw.f) #creats a new coloumn with the angular frequency
#Masking data to each cycle
self.df_pre = []
self.df_limited = []
self.df_limited2 = []
self.df = []
if mask == ['none','none'] and cycle == 'off':
for i in range(len(self.df_raw.cycle_number.unique())): #includes all data
self.df.append(self.df_raw[self.df_raw.cycle_number == self.df_raw.cycle_number.unique()[i]])
elif mask == ['none','none'] and cycle != 'off':
for i in range(len(cycle)):
self.df.append(self.df_raw[self.df_raw.cycle_number == cycle[i]]) #extracting dataframe for each cycle
elif mask[0] != 'none' and mask[1] == 'none' and cycle == 'off':
self.df_pre = self.df_raw.mask(self.df_raw.f > mask[0])
self.df_pre.dropna(how='all', inplace=True)
for i in range(len(self.df_pre.cycle_number.unique())): #Appending data based on cycle number
self.df.append(self.df_pre[self.df_pre.cycle_number == self.df_pre.cycle_number.unique()[i]])
elif mask[0] != 'none' and mask[1] == 'none' and cycle != 'off': # or [i for i, e in enumerate(mask) if e == 'none'] == [0]
self.df_limited = self.df_raw.mask(self.df_raw.f > mask[0])
for i in range(len(cycle)):
self.df.append(self.df_limited[self.df_limited.cycle_number == cycle[i]])
elif mask[0] == 'none' and mask[1] != 'none' and cycle == 'off':
self.df_pre = self.df_raw.mask(self.df_raw.f < mask[1])
self.df_pre.dropna(how='all', inplace=True)
for i in range(len(self.df_raw.cycle_number.unique())): #includes all data
self.df.append(self.df_pre[self.df_pre.cycle_number == self.df_pre.cycle_number.unique()[i]])
elif mask[0] == 'none' and mask[1] != 'none' and cycle != 'off':
self.df_limited = self.df_raw.mask(self.df_raw.f < mask[1])
for i in range(len(cycle)):
self.df.append(self.df_limited[self.df_limited.cycle_number == cycle[i]])
elif mask[0] != 'none' and mask[1] != 'none' and cycle != 'off':
self.df_limited = self.df_raw.mask(self.df_raw.f < mask[1])
self.df_limited2 = self.df_limited.mask(self.df_raw.f > mask[0])
for i in range(len(cycle)):
self.df.append(self.df_limited[self.df_limited2.cycle_number == cycle[i]])
elif mask[0] != 'none' and mask[1] != 'none' and cycle == 'off':
self.df_limited = self.df_raw.mask(self.df_raw.f < mask[1])
self.df_limited2 = self.df_limited.mask(self.df_raw.f > mask[0])
for i in range(len(self.df_raw.cycle_number.unique())):
self.df.append(self.df_limited[self.df_limited2.cycle_number == self.df_raw.cycle_number.unique()[i]])
else:
print('__init__ error (#2)')
def Lin_KK(self, num_RC='auto', legend='on', plot='residuals', bode='off', nyq_xlim='none', nyq_ylim='none', weight_func='Boukamp', savefig='none'):
'''
Plots the Linear Kramers-Kronig (KK) Validity Test
The script is based on Boukamp and Schōnleber et al.'s papers for fitting the resistances of multiple -(RC)- circuits
to the data. A data quality analysis can hereby be made on the basis of the relative residuals
Ref.:
- Schōnleber, M. et al. Electrochimica Acta 131 (2014) 20-27
- Boukamp, B.A. J. Electrochem. Soc., 142, 6, 1885-1894
The function performs the KK analysis and as default the relative residuals in each subplot
Note, that weigh_func should be equal to 'Boukamp'.
<NAME> (<EMAIL> || <EMAIL>)
Optional Inputs
-----------------
- num_RC:
- 'auto' applies an automatic algorithm developed by Schōnleber, M. et al. Electrochimica Acta 131 (2014) 20-27
that ensures no under- or over-fitting occurs
- can be hardwired by inserting any number (RC-elements/decade)
- plot:
- 'residuals' = plots the relative residuals in subplots correspoding to the cycle numbers picked
- 'w_data' = plots the relative residuals with the experimental data, in Nyquist and bode plot if desired, see 'bode =' in description
- nyq_xlim/nyq_xlim: Change the x/y-axis limits on nyquist plot, if not equal to 'none' state [min,max] value
- legend:
- 'on' = displays cycle number
- 'potential' = displays average potential which the spectra was measured at
- 'off' = off
bode = Plots Bode Plot - options:
'on' = re, im vs. log(freq)
'log' = log(re, im) vs. log(freq)
're' = re vs. log(freq)
'log_re' = log(re) vs. log(freq)
'im' = im vs. log(freq)
'log_im' = log(im) vs. log(freq)
'''
if num_RC == 'auto':
print('cycle || No. RC-elements || u')
self.decade = []
self.Rparam = []
self.t_const = []
self.Lin_KK_Fit = []
self.R_names = []
self.KK_R0 = []
self.KK_R = []
self.number_RC = []
self.number_RC_sort = []
self.KK_u = []
self.KK_Rgreater = []
self.KK_Rminor = []
M = 2
for i in range(len(self.df)):
self.decade.append(np.log10(np.max(self.df[i].f))-np.log10(np.min(self.df[i].f))) #determine the number of RC circuits based on the number of decades measured and num_RC
self.number_RC.append(M)
self.number_RC_sort.append(M) #needed for self.KK_R
self.Rparam.append(KK_Rnam_val(re=self.df[i].re, re_start=self.df[i].re.idxmin(), num_RC=int(self.number_RC[i]))[0]) #Creates intial guesses for R's
self.t_const.append(KK_timeconst(w=self.df[i].w, num_RC=int(self.number_RC[i]))) #Creates time constants values for self.number_RC -(RC)- circuits
self.Lin_KK_Fit.append(minimize(KK_errorfunc, self.Rparam[i], method='leastsq', args=(self.df[i].w.values, self.df[i].re.values, self.df[i].im.values, self.number_RC[i], weight_func, self.t_const[i]) )) #maxfev=99
self.R_names.append(KK_Rnam_val(re=self.df[i].re, re_start=self.df[i].re.idxmin(), num_RC=int(self.number_RC[i]))[1]) #creates R names
for j in range(len(self.R_names[i])):
self.KK_R0.append(self.Lin_KK_Fit[i].params.get(self.R_names[i][j]).value)
self.number_RC_sort.insert(0,0) #needed for self.KK_R
for i in range(len(self.df)):
self.KK_R.append(self.KK_R0[int(np.cumsum(self.number_RC_sort)[i]):int(np.cumsum(self.number_RC_sort)[i+1])]) #assigns resistances from each spectra to their respective df
self.KK_Rgreater.append(np.where(np.array(self.KK_R)[i] >= 0, np.array(self.KK_R)[i], 0) )
self.KK_Rminor.append(np.where(np.array(self.KK_R)[i] < 0, np.array(self.KK_R)[i], 0) )
self.KK_u.append(1-(np.abs(np.sum(self.KK_Rminor[i]))/np.abs(np.sum(self.KK_Rgreater[i]))))
for i in range(len(self.df)):
while self.KK_u[i] <= 0.75 or self.KK_u[i] >= 0.88:
self.number_RC_sort0 = []
self.KK_R_lim = []
self.number_RC[i] = self.number_RC[i] + 1
self.number_RC_sort0.append(self.number_RC)
self.number_RC_sort = np.insert(self.number_RC_sort0, 0,0)
self.Rparam[i] = KK_Rnam_val(re=self.df[i].re, re_start=self.df[i].re.idxmin(), num_RC=int(self.number_RC[i]))[0] #Creates intial guesses for R's
self.t_const[i] = KK_timeconst(w=self.df[i].w, num_RC=int(self.number_RC[i])) #Creates time constants values for self.number_RC -(RC)- circuits
self.Lin_KK_Fit[i] = minimize(KK_errorfunc, self.Rparam[i], method='leastsq', args=(self.df[i].w.values, self.df[i].re.values, self.df[i].im.values, self.number_RC[i], weight_func, self.t_const[i]) ) #maxfev=99
self.R_names[i] = KK_Rnam_val(re=self.df[i].re, re_start=self.df[i].re.idxmin(), num_RC=int(self.number_RC[i]))[1] #creates R names
self.KK_R0 = np.delete(np.array(self.KK_R0), np.s_[0:len(self.KK_R0)])
self.KK_R0 = []
for q in range(len(self.df)):
for j in range(len(self.R_names[q])):
self.KK_R0.append(self.Lin_KK_Fit[q].params.get(self.R_names[q][j]).value)
self.KK_R_lim = np.cumsum(self.number_RC_sort) #used for KK_R[i]
self.KK_R[i] = self.KK_R0[self.KK_R_lim[i]:self.KK_R_lim[i+1]] #assigns resistances from each spectra to their respective df
self.KK_Rgreater[i] = np.where(np.array(self.KK_R[i]) >= 0, np.array(self.KK_R[i]), 0)
self.KK_Rminor[i] = np.where(np.array(self.KK_R[i]) < 0, np.array(self.KK_R[i]), 0)
self.KK_u[i] = 1-(np.abs(np.sum(self.KK_Rminor[i]))/np.abs(np.sum(self.KK_Rgreater[i])))
else:
print('['+str(i+1)+']'+' '+str(self.number_RC[i]),' '+str(np.round(self.KK_u[i],2)))
elif num_RC != 'auto': #hardwired number of RC-elements/decade
print('cycle || u')
self.decade = []
self.number_RC0 = []
self.number_RC = []
self.Rparam = []
self.t_const = []
self.Lin_KK_Fit = []
self.R_names = []
self.KK_R0 = []
self.KK_R = []
for i in range(len(self.df)):
self.decade.append(np.log10(np.max(self.df[i].f))-np.log10(np.min(self.df[i].f))) #determine the number of RC circuits based on the number of decades measured and num_RC
self.number_RC0.append(np.round(num_RC * self.decade[i]))
self.number_RC.append(np.round(num_RC * self.decade[i])) #Creats the the number of -(RC)- circuits
self.Rparam.append(KK_Rnam_val(re=self.df[i].re, re_start=self.df[i].re.idxmin(), num_RC=int(self.number_RC0[i]))[0]) #Creates intial guesses for R's
self.t_const.append(KK_timeconst(w=self.df[i].w, num_RC=int(self.number_RC0[i]))) #Creates time constants values for self.number_RC -(RC)- circuits
self.Lin_KK_Fit.append(minimize(KK_errorfunc, self.Rparam[i], method='leastsq', args=(self.df[i].w.values, self.df[i].re.values, self.df[i].im.values, self.number_RC0[i], weight_func, self.t_const[i]) )) #maxfev=99
self.R_names.append(KK_Rnam_val(re=self.df[i].re, re_start=self.df[i].re.idxmin(), num_RC=int(self.number_RC0[i]))[1]) #creates R names
for j in range(len(self.R_names[i])):
self.KK_R0.append(self.Lin_KK_Fit[i].params.get(self.R_names[i][j]).value)
self.number_RC0.insert(0,0)
# print(report_fit(self.Lin_KK_Fit[i])) # prints fitting report
self.KK_circuit_fit = []
self.KK_rr_re = []
self.KK_rr_im = []
self.KK_Rgreater = []
self.KK_Rminor = []
self.KK_u = []
for i in range(len(self.df)):
self.KK_R.append(self.KK_R0[int(np.cumsum(self.number_RC0)[i]):int(np.cumsum(self.number_RC0)[i+1])]) #assigns resistances from each spectra to their respective df
self.KK_Rx = np.array(self.KK_R)
self.KK_Rgreater.append(np.where(self.KK_Rx[i] >= 0, self.KK_Rx[i], 0) )
self.KK_Rminor.append(np.where(self.KK_Rx[i] < 0, self.KK_Rx[i], 0) )
self.KK_u.append(1-(np.abs(np.sum(self.KK_Rminor[i]))/np.abs(np.sum(self.KK_Rgreater[i])))) #currently gives incorrect values
print('['+str(i+1)+']'+' '+str(np.round(self.KK_u[i],2)))
else:
print('num_RC incorrectly defined')
self.KK_circuit_fit = []
self.KK_rr_re = []
self.KK_rr_im = []
for i in range(len(self.df)):
if int(self.number_RC[i]) == 2:
self.KK_circuit_fit.append(KK_RC2(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 3:
self.KK_circuit_fit.append(KK_RC3(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 4:
self.KK_circuit_fit.append(KK_RC4(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 5:
self.KK_circuit_fit.append(KK_RC5(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 6:
self.KK_circuit_fit.append(KK_RC6(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 7:
self.KK_circuit_fit.append(KK_RC7(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 8:
self.KK_circuit_fit.append(KK_RC8(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 9:
self.KK_circuit_fit.append(KK_RC9(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 10:
self.KK_circuit_fit.append(KK_RC10(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 11:
self.KK_circuit_fit.append(KK_RC11(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 12:
self.KK_circuit_fit.append(KK_RC12(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 13:
self.KK_circuit_fit.append(KK_RC13(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 14:
self.KK_circuit_fit.append(KK_RC14(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 15:
self.KK_circuit_fit.append(KK_RC15(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 16:
self.KK_circuit_fit.append(KK_RC16(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 17:
self.KK_circuit_fit.append(KK_RC17(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 18:
self.KK_circuit_fit.append(KK_RC18(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 19:
self.KK_circuit_fit.append(KK_RC19(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 20:
self.KK_circuit_fit.append(KK_RC20(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 21:
self.KK_circuit_fit.append(KK_RC21(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 22:
self.KK_circuit_fit.append(KK_RC22(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 23:
self.KK_circuit_fit.append(KK_RC23(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 24:
self.KK_circuit_fit.append(KK_RC24(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 25:
self.KK_circuit_fit.append(KK_RC25(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 26:
self.KK_circuit_fit.append(KK_RC26(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 27:
self.KK_circuit_fit.append(KK_RC27(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 28:
self.KK_circuit_fit.append(KK_RC28(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 29:
self.KK_circuit_fit.append(KK_RC29(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 30:
self.KK_circuit_fit.append(KK_RC30(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 31:
self.KK_circuit_fit.append(KK_RC31(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 32:
self.KK_circuit_fit.append(KK_RC32(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 33:
self.KK_circuit_fit.append(KK_RC33(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 34:
self.KK_circuit_fit.append(KK_RC34(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 35:
self.KK_circuit_fit.append(KK_RC35(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 36:
self.KK_circuit_fit.append(KK_RC36(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 37:
self.KK_circuit_fit.append(KK_RC37(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 38:
self.KK_circuit_fit.append(KK_RC38(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 39:
self.KK_circuit_fit.append(KK_RC39(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 40:
self.KK_circuit_fit.append(KK_RC40(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 41:
self.KK_circuit_fit.append(KK_RC41(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 42:
self.KK_circuit_fit.append(KK_RC42(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 43:
self.KK_circuit_fit.append(KK_RC43(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 44:
self.KK_circuit_fit.append(KK_RC44(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 45:
self.KK_circuit_fit.append(KK_RC45(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 46:
self.KK_circuit_fit.append(KK_RC46(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 47:
self.KK_circuit_fit.append(KK_RC47(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 48:
self.KK_circuit_fit.append(KK_RC48(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 49:
self.KK_circuit_fit.append(KK_RC49(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 50:
self.KK_circuit_fit.append(KK_RC50(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 51:
self.KK_circuit_fit.append(KK_RC51(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 52:
self.KK_circuit_fit.append(KK_RC52(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 53:
self.KK_circuit_fit.append(KK_RC53(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 54:
self.KK_circuit_fit.append(KK_RC54(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 55:
self.KK_circuit_fit.append(KK_RC55(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 56:
self.KK_circuit_fit.append(KK_RC56(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 57:
self.KK_circuit_fit.append(KK_RC57(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 58:
self.KK_circuit_fit.append(KK_RC58(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 59:
self.KK_circuit_fit.append(KK_RC59(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 60:
self.KK_circuit_fit.append(KK_RC60(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 61:
self.KK_circuit_fit.append(KK_RC61(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 62:
self.KK_circuit_fit.append(KK_RC62(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 63:
self.KK_circuit_fit.append(KK_RC63(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 64:
self.KK_circuit_fit.append(KK_RC64(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 65:
self.KK_circuit_fit.append(KK_RC65(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 66:
self.KK_circuit_fit.append(KK_RC66(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 67:
self.KK_circuit_fit.append(KK_RC67(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 68:
self.KK_circuit_fit.append(KK_RC68(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 69:
self.KK_circuit_fit.append(KK_RC69(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 70:
self.KK_circuit_fit.append(KK_RC70(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 71:
self.KK_circuit_fit.append(KK_RC71(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 72:
self.KK_circuit_fit.append(KK_RC72(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 73:
self.KK_circuit_fit.append(KK_RC73(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 74:
self.KK_circuit_fit.append(KK_RC74(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 75:
self.KK_circuit_fit.append(KK_RC75(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 76:
self.KK_circuit_fit.append(KK_RC76(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 77:
self.KK_circuit_fit.append(KK_RC77(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 78:
self.KK_circuit_fit.append(KK_RC78(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 79:
self.KK_circuit_fit.append(KK_RC79(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 80:
self.KK_circuit_fit.append(KK_RC80(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
else:
print('RC simulation circuit not defined')
print(' Number of RC = ', self.number_RC)
self.KK_rr_re.append(residual_real(re=self.df[i].re, fit_re=self.KK_circuit_fit[i].to_numpy().real, fit_im=-self.KK_circuit_fit[i].to_numpy().imag)) #relative residuals for the real part
self.KK_rr_im.append(residual_imag(im=self.df[i].im, fit_re=self.KK_circuit_fit[i].to_numpy().real, fit_im=-self.KK_circuit_fit[i].to_numpy().imag)) #relative residuals for the imag part
### Plotting Linear_kk results
##
#
### Label functions
self.label_re_1 = []
self.label_im_1 = []
self.label_cycleno = []
if legend == 'on':
for i in range(len(self.df)):
self.label_re_1.append("Z' (#"+str(i+1)+")")
self.label_im_1.append("Z'' (#"+str(i+1)+")")
self.label_cycleno.append('#'+str(i+1))
elif legend == 'potential':
for i in range(len(self.df)):
self.label_re_1.append("Z' ("+str(np.round(np.average(self.df[i].E_avg), 2))+' V)')
self.label_im_1.append("Z'' ("+str(np.round(np.average(self.df[i].E_avg), 2))+' V)')
self.label_cycleno.append(str(np.round(np.average(self.df[i].E_avg), 2))+' V')
if plot == 'w_data':
fig = figure(figsize=(6, 8), dpi=120, facecolor='w', edgecolor='k')
fig.subplots_adjust(left=0.1, right=0.95, hspace=0.5, bottom=0.1, top=0.95)
ax = fig.add_subplot(311, aspect='equal')
ax1 = fig.add_subplot(312)
ax2 = fig.add_subplot(313)
colors = sns.color_palette("colorblind", n_colors=len(self.df))
colors_real = sns.color_palette("Blues", n_colors=len(self.df)+2)
colors_imag = sns.color_palette("Oranges", n_colors=len(self.df)+2)
### Nyquist Plot
for i in range(len(self.df)):
ax.plot(self.df[i].re, self.df[i].im, marker='o', ms=4, lw=2, color=colors[i], ls='-', alpha=.7, label=self.label_cycleno[i])
### Bode Plot
if bode == 'on':
for i in range(len(self.df)):
ax1.plot(np.log10(self.df[i].f), self.df[i].re, color=colors_real[i+1], marker='D', ms=3, lw=2.25, ls='-', alpha=.7, label=self.label_re_1[i])
ax1.plot(np.log10(self.df[i].f), self.df[i].im, color=colors_imag[i+1], marker='s', ms=3, lw=2.25, ls='-', alpha=.7, label=self.label_im_1[i])
ax1.set_xlabel("log(f) [Hz]")
ax1.set_ylabel("Z', -Z'' [$\Omega$]")
if legend == 'on' or legend == 'potential':
ax1.legend(loc='best', fontsize=10, frameon=False)
elif bode == 're':
for i in range(len(self.df)):
ax1.plot(np.log10(self.df[i].f), self.df[i].re, color=colors_real[i+1], marker='D', ms=3, lw=2.25, ls='-', alpha=.7, label=self.label_cycleno[i])
ax1.set_xlabel("log(f) [Hz]")
ax1.set_ylabel("Z' [$\Omega$]")
if legend == 'on' or legend == 'potential':
ax1.legend(loc='best', fontsize=10, frameon=False)
elif bode == 'log_re':
for i in range(len(self.df)):
ax1.plot(np.log10(self.df[i].f), np.log10(self.df[i].re), color=colors_real[i+1], marker='D', ms=3, lw=2.25, ls='-', alpha=.7, label=self.label_cycleno[i])
ax1.set_xlabel("log(f) [Hz]")
ax1.set_ylabel("log(Z') [$\Omega$]")
if legend == 'on' or legend == 'potential':
ax1.legend(loc='best', fontsize=10, frameon=False)
elif bode == 'im':
for i in range(len(self.df)):
ax1.plot(np.log10(self.df[i].f), self.df[i].im, color=colors_imag[i+1], marker='s', ms=3, lw=2.25, ls='-', alpha=.7, label=self.label_cycleno[i])
ax1.set_xlabel("log(f) [Hz]")
ax1.set_ylabel("-Z'' [$\Omega$]")
if legend == 'on' or legend == 'potential':
ax1.legend(loc='best', fontsize=10, frameon=False)
elif bode == 'log_im':
for i in range(len(self.df)):
ax1.plot(np.log10(self.df[i].f), np.log10(self.df[i].im), color=colors_imag[i+1], marker='s', ms=3, lw=2.25, ls='-', alpha=.7, label=self.label_cycleno[i])
ax1.set_xlabel("log(f) [Hz]")
ax1.set_ylabel("log(-Z'') [$\Omega$]")
if legend == 'on' or legend == 'potential':
ax1.legend(loc='best', fontsize=10, frameon=False)
elif bode == 'log':
for i in range(len(self.df)):
ax1.plot(np.log10(self.df[i].f), np.log10(self.df[i].re), color=colors_real[i+1], marker='D', ms=3, lw=2.25, ls='-', alpha=.7, label=self.label_re_1[i])
ax1.plot(np.log10(self.df[i].f), np.log10(self.df[i].im), color=colors_imag[i+1], marker='s', ms=3, lw=2.25, ls='-', alpha=.7, label=self.label_im_1[i])
ax1.set_xlabel("log(f) [Hz]")
ax1.set_ylabel("log(Z', -Z'') [$\Omega$]")
if legend == 'on' or legend == 'potential':
ax1.legend(loc='best', fontsize=10, frameon=False)
### Kramers-Kronig Relative Residuals
for i in range(len(self.df)):
ax2.plot(np.log10(self.df[i].f), self.KK_rr_re[i]*100, color=colors_real[i+1], marker='D', ls='--', ms=6, alpha=.7, label=self.label_re_1[i])
ax2.plot(np.log10(self.df[i].f), self.KK_rr_im[i]*100, color=colors_imag[i+1], marker='s', ls='--', ms=6, alpha=.7, label=self.label_im_1[i])
ax2.set_xlabel("log(f) [Hz]")
ax2.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]")
if legend == 'on' or legend == 'potential':
ax2.legend(loc='best', fontsize=10, frameon=False)
ax2.axhline(0, ls='--', c='k', alpha=.5)
### Setting ylims and write 'KK-Test' on RR subplot
self.KK_rr_im_min = []
self.KK_rr_im_max = []
self.KK_rr_re_min = []
self.KK_rr_re_max = []
for i in range(len(self.df)):
self.KK_rr_im_min.append(np.min(self.KK_rr_im[i]))
self.KK_rr_im_max.append(np.max(self.KK_rr_im[i]))
self.KK_rr_re_min.append(np.min(self.KK_rr_re[i]))
self.KK_rr_re_max.append(np.max(self.KK_rr_re[i]))
if np.min(self.KK_rr_im_min) > np.min(self.KK_rr_re_min):
ax2.set_ylim(np.min(self.KK_rr_re_min)*100*1.5, np.max(np.abs(self.KK_rr_re_min))*100*1.5)
ax2.annotate('Lin-KK', xy=[np.min(np.log10(self.df[0].f)), np.max(self.KK_rr_re_max)*100*.9], color='k', fontweight='bold')
elif np.min(self.KK_rr_im_min) < np.min(self.KK_rr_re_min):
ax2.set_ylim(np.min(self.KK_rr_im_min)*100*1.5, np.max(self.KK_rr_im_max)*100*1.5)
ax2.annotate('Lin-KK', xy=[np.min(np.log10(self.df[0].f)), np.max(self.KK_rr_im_max)*100*.9], color='k', fontweight='bold')
### Figure specifics
if legend == 'on' or legend == 'potential':
ax.legend(loc='best', fontsize=10, frameon=False)
ax.set_xlabel("Z' [$\Omega$]")
ax.set_ylabel("-Z'' [$\Omega$]")
if nyq_xlim != 'none':
ax.set_xlim(nyq_xlim[0], nyq_xlim[1])
if nyq_ylim != 'none':
ax.set_ylim(nyq_ylim[0], nyq_ylim[1])
#Save Figure
if savefig != 'none':
fig.savefig(savefig)
### Illustrating residuals only
elif plot == 'residuals':
colors = sns.color_palette("colorblind", n_colors=9)
colors_real = sns.color_palette("Blues", n_colors=9)
colors_imag = sns.color_palette("Oranges", n_colors=9)
### 1 Cycle
if len(self.df) == 1:
fig = figure(figsize=(12, 3.8), dpi=120, facecolor='w', edgecolor='k')
fig.subplots_adjust(left=0.1, right=0.95, hspace=0.25, wspace=0.25, bottom=0.1, top=0.95)
ax = fig.add_subplot(231)
ax.plot(np.log10(self.df[0].f), self.KK_rr_re[0]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax.plot(np.log10(self.df[0].f), self.KK_rr_im[0]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax.set_xlabel("log(f) [Hz]")
ax.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]")
if legend == 'on' or legend == 'potential':
ax.legend(loc='best', fontsize=10, frameon=False)
ax.axhline(0, ls='--', c='k', alpha=.5)
### Setting ylims and write 'KK-Test' on RR subplot
self.KK_rr_im_min = np.min(self.KK_rr_im)
self.KK_rr_im_max = np.max(self.KK_rr_im)
self.KK_rr_re_min = np.min(self.KK_rr_re)
self.KK_rr_re_max = np.max(self.KK_rr_re)
if self.KK_rr_re_max > self.KK_rr_im_max:
self.KK_ymax = self.KK_rr_re_max
else:
self.KK_ymax = self.KK_rr_im_max
if self.KK_rr_re_min < self.KK_rr_im_min:
self.KK_ymin = self.KK_rr_re_min
else:
self.KK_ymin = self.KK_rr_im_min
if np.abs(self.KK_ymin) > self.KK_ymax:
ax.set_ylim(self.KK_ymin*100*1.5, np.abs(self.KK_ymin)*100*1.5)
if legend == 'on':
ax.annotate('Lin-KK, #1', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin)*100*1.3], color='k', fontweight='bold')
elif legend == 'potential':
ax.annotate('Lin-KK, ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin)*100*1.3], color='k', fontweight='bold')
elif np.abs(self.KK_ymin) < self.KK_ymax:
ax.set_ylim(np.negative(self.KK_ymax)*100*1.5, np.abs(self.KK_ymax)*100*1.5)
if legend == 'on':
ax.annotate('Lin-KK, #1', xy=[np.min(np.log10(self.df[0].f)), self.KK_ymax*100*1.3], color='k', fontweight='bold')
elif legend == 'potential':
ax.annotate('Lin-KK, ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[0].f)), self.KK_ymax*100*1.3], color='k', fontweight='bold')
#Save Figure
if savefig != 'none':
fig.savefig(savefig)
### 2 Cycles
elif len(self.df) == 2:
fig = figure(figsize=(12, 5), dpi=120, facecolor='w', edgecolor='k')
fig.subplots_adjust(left=0.1, right=0.95, hspace=0.25, wspace=0.25, bottom=0.1, top=0.95)
ax1 = fig.add_subplot(231)
ax2 = fig.add_subplot(232)
#cycle 1
ax1.plot(np.log10(self.df[0].f), self.KK_rr_re[0]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax1.plot(np.log10(self.df[0].f), self.KK_rr_im[0]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax1.set_xlabel("log(f) [Hz]")
ax1.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]", fontsize=18)
if legend == 'on' or legend == 'potential':
ax1.legend(loc='best', fontsize=10, frameon=False)
ax1.axhline(0, ls='--', c='k', alpha=.5)
#cycle 2
ax2.plot(np.log10(self.df[1].f), self.KK_rr_re[1]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax2.plot(np.log10(self.df[1].f), self.KK_rr_im[1]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax2.set_xlabel("log(f) [Hz]")
if legend == 'on' or legend == 'potential':
ax2.legend(loc='best', fontsize=10, frameon=False)
ax2.axhline(0, ls='--', c='k', alpha=.5)
### Setting ylims and labeling plot with 'KK-Test' in RR subplot
self.KK_rr_im_min = []
self.KK_rr_im_max = []
self.KK_rr_re_min = []
self.KK_rr_re_max = []
self.KK_ymin = []
self.KK_ymax = []
for i in range(len(self.df)):
self.KK_rr_im_min.append(np.min(self.KK_rr_im[i]))
self.KK_rr_im_max.append(np.max(self.KK_rr_im[i]))
self.KK_rr_re_min.append(np.min(self.KK_rr_re[i]))
self.KK_rr_re_max.append(np.max(self.KK_rr_re[i]))
if self.KK_rr_re_max[i] > self.KK_rr_im_max[i]:
self.KK_ymax.append(self.KK_rr_re_max[i])
else:
self.KK_ymax.append(self.KK_rr_im_max[i])
if self.KK_rr_re_min[i] < self.KK_rr_im_min[i]:
self.KK_ymin.append(self.KK_rr_re_min[i])
else:
self.KK_ymin.append(self.KK_rr_im_min[i])
if np.abs(self.KK_ymin[0]) > self.KK_ymax[0]:
ax1.set_ylim(self.KK_ymin[0]*100*1.5, np.abs(self.KK_ymin[0])*100*1.5)
if legend == 'on':
ax1.annotate('Lin-KK, #1', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin[0])*100*1.3], color='k', fontweight='bold')
elif legend == 'potential':
ax1.annotate('Lin-KK ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin[0])*100*1.3], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:
ax1.set_ylim(np.negative(self.KK_ymax[0])*100*1.5, np.abs(self.KK_ymax[0])*100*1.5)
if legend == 'on':
ax1.annotate('Lin-KK, #1', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymax[0])*100*1.3], color='k', fontweight='bold')
elif legend == 'potential':
ax1.annotate('Lin-KK, ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[0].f)), self.KK_ymax[0]*100*1.3], color='k', fontweight='bold')
if np.abs(self.KK_ymin[1]) > self.KK_ymax[1]:
ax2.set_ylim(self.KK_ymin[1]*100*1.5, np.abs(self.KK_ymin[1])*100*1.5)
if legend == 'on':
ax2.annotate('Lin-KK, #2', xy=[np.min(np.log10(self.df[1].f)), np.max(np.abs(self.KK_ymin[1]))*100*1.3], color='k', fontweight='bold')
elif legend == 'potential':
ax2.annotate('Lin-KK ('+str(np.round(np.average(self.df[1].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[1].f)), np.max(np.abs(self.KK_ymin[1]))*100*1.3], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:
ax2.set_ylim(np.negative(self.KK_ymax[1])*100*1.5, np.abs(self.KK_ymax[1])*100*1.5)
if legend == 'on':
ax2.annotate('Lin-KK, #2', xy=[np.min(np.log10(self.df[1].f)), self.KK_ymax[1]*100*1.3], color='k', fontweight='bold')
elif legend == 'potential':
ax2.annotate('Lin-KK ('+str(np.round(np.average(self.df[1].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[1].f)), self.KK_ymax[1]*100*1.3], color='k', fontweight='bold')
#Save Figure
if savefig != 'none':
fig.savefig(savefig)
### 3 Cycles
elif len(self.df) == 3:
fig = figure(figsize=(12, 5), dpi=120, facecolor='w', edgecolor='k')
fig.subplots_adjust(left=0.1, right=0.95, hspace=0.25, wspace=0.25, bottom=0.1, top=0.95)
ax1 = fig.add_subplot(231)
ax2 = fig.add_subplot(232)
ax3 = fig.add_subplot(233)
#cycle 1
ax1.plot(np.log10(self.df[0].f), self.KK_rr_re[0]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax1.plot(np.log10(self.df[0].f), self.KK_rr_im[0]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax1.set_xlabel("log(f) [Hz]")
ax1.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]", fontsize=18)
if legend == 'on' or legend == 'potential':
ax1.legend(loc='best', fontsize=10, frameon=False)
ax1.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 2
ax2.plot(np.log10(self.df[1].f), self.KK_rr_re[1]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax2.plot(np.log10(self.df[1].f), self.KK_rr_im[1]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax2.set_xlabel("log(f) [Hz]")
if legend == 'on' or legend == 'potential':
ax2.legend(loc='best', fontsize=10, frameon=False)
ax2.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 3
ax3.plot(np.log10(self.df[2].f), self.KK_rr_re[2]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax3.plot(np.log10(self.df[2].f), self.KK_rr_im[2]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax3.set_xlabel("log(f) [Hz]")
if legend == 'on' or legend == 'potential':
ax3.legend(loc='best', fontsize=10, frameon=False)
ax3.axhline(0, ls='--', c='k', alpha=.5)
### Setting ylims and labeling plot with 'KK-Test' in RR subplot
self.KK_rr_im_min = []
self.KK_rr_im_max = []
self.KK_rr_re_min = []
self.KK_rr_re_max = []
self.KK_ymin = []
self.KK_ymax = []
for i in range(len(self.df)):
self.KK_rr_im_min.append(np.min(self.KK_rr_im[i]))
self.KK_rr_im_max.append(np.max(self.KK_rr_im[i]))
self.KK_rr_re_min.append(np.min(self.KK_rr_re[i]))
self.KK_rr_re_max.append(np.max(self.KK_rr_re[i]))
if self.KK_rr_re_max[i] > self.KK_rr_im_max[i]:
self.KK_ymax.append(self.KK_rr_re_max[i])
else:
self.KK_ymax.append(self.KK_rr_im_max[i])
if self.KK_rr_re_min[i] < self.KK_rr_im_min[i]:
self.KK_ymin.append(self.KK_rr_re_min[i])
else:
self.KK_ymin.append(self.KK_rr_im_min[i])
if np.abs(self.KK_ymin[0]) > self.KK_ymax[0]:
ax1.set_ylim(self.KK_ymin[0]*100*1.5, np.abs(self.KK_ymin[0])*100*1.5)
if legend == 'on':
ax1.annotate('Lin-KK, #1', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin[0])*100*1.3], color='k', fontweight='bold')
elif legend == 'potential':
ax1.annotate('Lin-KK ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin[0])*100*1.3], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:
ax1.set_ylim(np.negative(self.KK_ymax[0])*100*1.5, np.abs(self.KK_ymax[0])*100*1.5)
if legend == 'on':
ax1.annotate('Lin-KK, #1', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymax[0])*100*1.3], color='k', fontweight='bold')
elif legend == 'potential':
ax1.annotate('Lin-KK, ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[0].f)), self.KK_ymax[0]*100*1.3], color='k', fontweight='bold')
if np.abs(self.KK_ymin[1]) > self.KK_ymax[1]:
ax2.set_ylim(self.KK_ymin[1]*100*1.5, np.abs(self.KK_ymin[1])*100*1.5)
if legend == 'on':
ax2.annotate('Lin-KK, #2', xy=[np.min(np.log10(self.df[1].f)), np.max(np.abs(self.KK_ymin[1]))*100*1.3], color='k', fontweight='bold')
elif legend == 'potential':
ax2.annotate('Lin-KK ('+str(np.round(np.average(self.df[1].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[1].f)), np.max(np.abs(self.KK_ymin[1]))*100*1.3], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:
ax2.set_ylim(np.negative(self.KK_ymax[1])*100*1.5, np.abs(self.KK_ymax[1])*100*1.5)
if legend == 'on':
ax2.annotate('Lin-KK, #2', xy=[np.min(np.log10(self.df[1].f)), self.KK_ymax[1]*100*1.3], color='k', fontweight='bold')
elif legend == 'potential':
ax2.annotate('Lin-KK ('+str(np.round(np.average(self.df[1].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[1].f)), self.KK_ymax[1]*100*1.3], color='k', fontweight='bold')
if np.abs(self.KK_ymin[2]) > self.KK_ymax[2]:
ax3.set_ylim(self.KK_ymin[2]*100*1.5, np.abs(self.KK_ymin[2])*100*1.5)
if legend == 'on':
ax3.annotate('Lin-KK, #3', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymin[2])*100*1.3], color='k', fontweight='bold')
elif legend == 'potential':
ax3.annotate('Lin-KK ('+str(np.round(np.average(self.df[2].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymin[2])*100*1.3], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[2]) < self.KK_ymax[2]:
ax3.set_ylim(np.negative(self.KK_ymax[0])*100*1.5, np.abs(self.KK_ymax[2])*100*1.5)
if legend == 'on':
ax3.annotate('Lin-KK, #3', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymax[2])*100*1.3], color='k', fontweight='bold')
elif legend == 'potential':
ax3.annotate('Lin-KK, ('+str(np.round(np.average(self.df[2].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[2].f)), self.KK_ymax[2]*100*1.3], color='k', fontweight='bold')
#Save Figure
if savefig != 'none':
fig.savefig(savefig)
### 4 Cycles
elif len(self.df) == 4:
fig = figure(figsize=(12, 3.8), dpi=120, facecolor='w', edgecolor='k')
fig.subplots_adjust(left=0.1, right=0.95, hspace=0.25, wspace=0.25, bottom=0.1, top=0.95)
ax1 = fig.add_subplot(221)
ax2 = fig.add_subplot(222)
ax3 = fig.add_subplot(223)
ax4 = fig.add_subplot(224)
#cycle 1
ax1.plot(np.log10(self.df[0].f), self.KK_rr_re[0]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax1.plot(np.log10(self.df[0].f), self.KK_rr_im[0]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax1.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]", fontsize=18)
if legend == 'on' or legend == 'potential':
ax1.legend(loc='best', fontsize=10, frameon=False)
ax1.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 2
ax2.plot(np.log10(self.df[1].f), self.KK_rr_re[1]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax2.plot(np.log10(self.df[1].f), self.KK_rr_im[1]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax2.set_xlabel("log(f) [Hz]")
if legend == 'on' or legend == 'potential':
ax2.legend(loc='best', fontsize=10, frameon=False)
ax2.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 3
ax3.plot(np.log10(self.df[2].f), self.KK_rr_re[2]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax3.plot(np.log10(self.df[2].f), self.KK_rr_im[2]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax3.set_xlabel("log(f) [Hz]")
ax3.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]", fontsize=18)
if legend == 'on' or legend == 'potential':
ax3.legend(loc='best', fontsize=10, frameon=False)
ax3.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 4
ax4.plot(np.log10(self.df[3].f), self.KK_rr_re[3]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax4.plot(np.log10(self.df[3].f), self.KK_rr_im[3]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax4.set_xlabel("log(f) [Hz]")
if legend == 'on' or legend == 'potential':
ax4.legend(loc='best', fontsize=10, frameon=False)
ax4.axhline(0, ls='--', c='k', alpha=.5)
### Setting ylims and labeling plot with 'KK-Test' in RR subplot
self.KK_rr_im_min = []
self.KK_rr_im_max = []
self.KK_rr_re_min = []
self.KK_rr_re_max = []
self.KK_ymin = []
self.KK_ymax = []
for i in range(len(self.df)):
self.KK_rr_im_min.append(np.min(self.KK_rr_im[i]))
self.KK_rr_im_max.append(np.max(self.KK_rr_im[i]))
self.KK_rr_re_min.append(np.min(self.KK_rr_re[i]))
self.KK_rr_re_max.append(np.max(self.KK_rr_re[i]))
if self.KK_rr_re_max[i] > self.KK_rr_im_max[i]:
self.KK_ymax.append(self.KK_rr_re_max[i])
else:
self.KK_ymax.append(self.KK_rr_im_max[i])
if self.KK_rr_re_min[i] < self.KK_rr_im_min[i]:
self.KK_ymin.append(self.KK_rr_re_min[i])
else:
self.KK_ymin.append(self.KK_rr_im_min[i])
if np.abs(self.KK_ymin[0]) > self.KK_ymax[0]:
ax1.set_ylim(self.KK_ymin[0]*100*1.5, np.abs(self.KK_ymin[0])*100*1.5)
if legend == 'on':
ax1.annotate('Lin-KK, #1', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin[0])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax1.annotate('Lin-KK ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin[0])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:
ax1.set_ylim(np.negative(self.KK_ymax[0])*100*1.5, np.abs(self.KK_ymax[0])*100*1.5)
if legend == 'on':
ax1.annotate('Lin-KK, #1', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymax[0])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax1.annotate('Lin-KK, ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[0].f)), self.KK_ymax[0]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[1]) > self.KK_ymax[1]:
ax2.set_ylim(self.KK_ymin[1]*100*1.5, np.abs(self.KK_ymin[1])*100*1.5)
if legend == 'on':
ax2.annotate('Lin-KK, #2', xy=[np.min(np.log10(self.df[1].f)), np.max(np.abs(self.KK_ymin[1]))*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax2.annotate('Lin-KK ('+str(np.round(np.average(self.df[1].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[1].f)), np.max(np.abs(self.KK_ymin[1]))*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:
ax2.set_ylim(np.negative(self.KK_ymax[1])*100*1.5, np.abs(self.KK_ymax[1])*100*1.5)
if legend == 'on':
ax2.annotate('Lin-KK, #2', xy=[np.min(np.log10(self.df[1].f)), self.KK_ymax[1]*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax2.annotate('Lin-KK ('+str(np.round(np.average(self.df[1].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[1].f)), self.KK_ymax[1]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[2]) > self.KK_ymax[2]:
ax3.set_ylim(self.KK_ymin[2]*100*1.5, np.abs(self.KK_ymin[2])*100*1.5)
if legend == 'on':
ax3.annotate('Lin-KK, #3', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymin[2])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax3.annotate('Lin-KK ('+str(np.round(np.average(self.df[2].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymin[2])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[2]) < self.KK_ymax[2]:
ax3.set_ylim(np.negative(self.KK_ymax[0])*100*1.5, np.abs(self.KK_ymax[2])*100*1.5)
if legend == 'on':
ax3.annotate('Lin-KK, #3', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymax[2])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax3.annotate('Lin-KK, ('+str(np.round(np.average(self.df[2].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[2].f)), self.KK_ymax[2]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[3]) > self.KK_ymax[3]:
ax4.set_ylim(self.KK_ymin[3]*100*1.5, np.abs(self.KK_ymin[3])*100*1.5)
if legend == 'on':
ax4.annotate('Lin-KK, #4', xy=[np.min(np.log10(self.df[3].f)), np.abs(self.KK_ymin[3])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax4.annotate('Lin-KK ('+str(np.round(np.average(self.df[3].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[3].f)), np.abs(self.KK_ymin[3])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[3]) < self.KK_ymax[3]:
ax4.set_ylim(np.negative(self.KK_ymax[3])*100*1.5, np.abs(self.KK_ymax[3])*100*1.5)
if legend == 'on':
ax4.annotate('Lin-KK, #4', xy=[np.min(np.log10(self.df[3].f)), np.abs(self.KK_ymax[3])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax4.annotate('Lin-KK, ('+str(np.round(np.average(self.df[3].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[3].f)), self.KK_ymax[3]*100*1.2], color='k', fontweight='bold')
#Save Figure
if savefig != 'none':
fig.savefig(savefig)
### 5 Cycles
elif len(self.df) == 5:
fig = figure(figsize=(12, 3.8), dpi=120, facecolor='w', edgecolor='k')
fig.subplots_adjust(left=0.1, right=0.95, hspace=0.25, wspace=0.25, bottom=0.1, top=0.95)
ax1 = fig.add_subplot(231)
ax2 = fig.add_subplot(232)
ax3 = fig.add_subplot(233)
ax4 = fig.add_subplot(234)
ax5 = fig.add_subplot(235)
#cycle 1
ax1.plot(np.log10(self.df[0].f), self.KK_rr_re[0]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax1.plot(np.log10(self.df[0].f), self.KK_rr_im[0]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax1.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]", fontsize=18)
if legend == 'on' or legend == 'potential':
ax1.legend(loc='best', fontsize=10, frameon=False)
ax1.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 2
ax2.plot(np.log10(self.df[1].f), self.KK_rr_re[1]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax2.plot(np.log10(self.df[1].f), self.KK_rr_im[1]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
if legend == 'on' or legend == 'potential':
ax2.legend(loc='best', fontsize=10, frameon=False)
ax2.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 3
ax3.plot(np.log10(self.df[2].f), self.KK_rr_re[2]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax3.plot(np.log10(self.df[2].f), self.KK_rr_im[2]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax3.set_xlabel("log(f) [Hz]")
if legend == 'on' or legend == 'potential':
ax3.legend(loc='best', fontsize=10, frameon=False)
ax3.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 4
ax4.plot(np.log10(self.df[3].f), self.KK_rr_re[3]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax4.plot(np.log10(self.df[3].f), self.KK_rr_im[3]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax4.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]", fontsize=18)
ax4.set_xlabel("log(f) [Hz]")
if legend == 'on' or legend == 'potential':
ax4.legend(loc='best', fontsize=10, frameon=False)
ax4.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 5
ax5.plot(np.log10(self.df[4].f), self.KK_rr_re[4]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax5.plot(np.log10(self.df[4].f), self.KK_rr_im[4]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax5.set_xlabel("log(f) [Hz]")
if legend == 'on' or legend == 'potential':
ax5.legend(loc='best', fontsize=10, frameon=False)
ax5.axhline(0, ls='--', c='k', alpha=.5)
### Setting ylims and labeling plot with 'KK-Test' in RR subplot
self.KK_rr_im_min = []
self.KK_rr_im_max = []
self.KK_rr_re_min = []
self.KK_rr_re_max = []
self.KK_ymin = []
self.KK_ymax = []
for i in range(len(self.df)):
self.KK_rr_im_min.append(np.min(self.KK_rr_im[i]))
self.KK_rr_im_max.append(np.max(self.KK_rr_im[i]))
self.KK_rr_re_min.append(np.min(self.KK_rr_re[i]))
self.KK_rr_re_max.append(np.max(self.KK_rr_re[i]))
if self.KK_rr_re_max[i] > self.KK_rr_im_max[i]:
self.KK_ymax.append(self.KK_rr_re_max[i])
else:
self.KK_ymax.append(self.KK_rr_im_max[i])
if self.KK_rr_re_min[i] < self.KK_rr_im_min[i]:
self.KK_ymin.append(self.KK_rr_re_min[i])
else:
self.KK_ymin.append(self.KK_rr_im_min[i])
if np.abs(self.KK_ymin[0]) > self.KK_ymax[0]:
ax1.set_ylim(self.KK_ymin[0]*100*1.5, np.abs(self.KK_ymin[0])*100*1.5)
if legend == 'on':
ax1.annotate('Lin-KK, #1', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin[0])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax1.annotate('Lin-KK ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin[0])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:
ax1.set_ylim(np.negative(self.KK_ymax[0])*100*1.5, np.abs(self.KK_ymax[0])*100*1.5)
if legend == 'on':
ax1.annotate('Lin-KK, #1', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymax[0])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax1.annotate('Lin-KK, ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[0].f)), self.KK_ymax[0]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[1]) > self.KK_ymax[1]:
ax2.set_ylim(self.KK_ymin[1]*100*1.5, np.abs(self.KK_ymin[1])*100*1.5)
if legend == 'on':
ax2.annotate('Lin-KK, #2', xy=[np.min(np.log10(self.df[1].f)), np.max(np.abs(self.KK_ymin[1]))*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax2.annotate('Lin-KK ('+str(np.round(np.average(self.df[1].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[1].f)), np.max(np.abs(self.KK_ymin[1]))*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:
ax2.set_ylim(np.negative(self.KK_ymax[1])*100*1.5, np.abs(self.KK_ymax[1])*100*1.5)
if legend == 'on':
ax2.annotate('Lin-KK, #2', xy=[np.min(np.log10(self.df[1].f)), self.KK_ymax[1]*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax2.annotate('Lin-KK ('+str(np.round(np.average(self.df[1].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[1].f)), self.KK_ymax[1]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[2]) > self.KK_ymax[2]:
ax3.set_ylim(self.KK_ymin[2]*100*1.5, np.abs(self.KK_ymin[2])*100*1.5)
if legend == 'on':
ax3.annotate('Lin-KK, #3', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymin[2])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax3.annotate('Lin-KK ('+str(np.round(np.average(self.df[2].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymin[2])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[2]) < self.KK_ymax[2]:
ax3.set_ylim(np.negative(self.KK_ymax[0])*100*1.5, np.abs(self.KK_ymax[2])*100*1.5)
if legend == 'on':
ax3.annotate('Lin-KK, #3', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymax[2])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax3.annotate('Lin-KK, ('+str(np.round(np.average(self.df[2].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[2].f)), self.KK_ymax[2]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[3]) > self.KK_ymax[3]:
ax4.set_ylim(self.KK_ymin[3]*100*1.5, np.abs(self.KK_ymin[3])*100*1.5)
if legend == 'on':
ax4.annotate('Lin-KK, #4', xy=[np.min(np.log10(self.df[3].f)), np.abs(self.KK_ymin[3])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax4.annotate('Lin-KK ('+str(np.round(np.average(self.df[3].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[3].f)), np.abs(self.KK_ymin[3])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[3]) < self.KK_ymax[3]:
ax4.set_ylim(np.negative(self.KK_ymax[3])*100*1.5, np.abs(self.KK_ymax[3])*100*1.5)
if legend == 'on':
ax4.annotate('Lin-KK, #4', xy=[np.min(np.log10(self.df[3].f)), np.abs(self.KK_ymax[3])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax4.annotate('Lin-KK, ('+str(np.round(np.average(self.df[3].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[3].f)), self.KK_ymax[3]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[4]) > self.KK_ymax[4]:
ax5.set_ylim(self.KK_ymin[4]*100*1.5, np.abs(self.KK_ymin[4])*100*1.5)
if legend == 'on':
ax5.annotate('Lin-KK, #5', xy=[np.min(np.log10(self.df[4].f)), np.abs(self.KK_ymin[4])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax5.annotate('Lin-KK ('+str(np.round(np.average(self.df[4].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[4].f)), np.abs(self.KK_ymin[4])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[4]) < self.KK_ymax[4]:
ax5.set_ylim(np.negative(self.KK_ymax[4])*100*1.5, np.abs(self.KK_ymax[4])*100*1.5)
if legend == 'on':
ax5.annotate('Lin-KK, #5', xy=[np.min(np.log10(self.df[4].f)), np.abs(self.KK_ymax[4])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax5.annotate('Lin-KK, ('+str(np.round(np.average(self.df[4].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[4].f)), self.KK_ymax[4]*100*1.2], color='k', fontweight='bold')
#Save Figure
if savefig != 'none':
fig.savefig(savefig)
### 6 Cycles
elif len(self.df) == 6:
fig = figure(figsize=(12, 3.8), dpi=120, facecolor='w', edgecolor='k')
fig.subplots_adjust(left=0.1, right=0.95, hspace=0.25, wspace=0.25, bottom=0.1, top=0.95)
ax1 = fig.add_subplot(231)
ax2 = fig.add_subplot(232)
ax3 = fig.add_subplot(233)
ax4 = fig.add_subplot(234)
ax5 = fig.add_subplot(235)
ax6 = fig.add_subplot(236)
#cycle 1
ax1.plot(np.log10(self.df[0].f), self.KK_rr_re[0]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax1.plot(np.log10(self.df[0].f), self.KK_rr_im[0]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax1.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]", fontsize=15)
if legend == 'on' or legend == 'potential':
ax1.legend(loc='best', fontsize=10, frameon=False)
ax1.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 2
ax2.plot(np.log10(self.df[1].f), self.KK_rr_re[1]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax2.plot(np.log10(self.df[1].f), self.KK_rr_im[1]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
if legend == 'on' or legend == 'potential':
ax2.legend(loc='best', fontsize=10, frameon=False)
ax2.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 3
ax3.plot(np.log10(self.df[2].f), self.KK_rr_re[2]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax3.plot(np.log10(self.df[2].f), self.KK_rr_im[2]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
if legend == 'on' or legend == 'potential':
ax3.legend(loc='best', fontsize=10, frameon=False)
ax3.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 4
ax4.plot(np.log10(self.df[3].f), self.KK_rr_re[3]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax4.plot(np.log10(self.df[3].f), self.KK_rr_im[3]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax4.set_xlabel("log(f) [Hz]")
ax4.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]", fontsize=15)
if legend == 'on' or legend == 'potential':
ax4.legend(loc='best', fontsize=10, frameon=False)
ax4.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 5
ax5.plot(np.log10(self.df[4].f), self.KK_rr_re[4]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax5.plot(np.log10(self.df[4].f), self.KK_rr_im[4]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax5.set_xlabel("log(f) [Hz]")
if legend == 'on' or legend == 'potential':
ax5.legend(loc='best', fontsize=10, frameon=False)
ax5.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 6
ax6.plot(np.log10(self.df[5].f), self.KK_rr_re[5]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax6.plot(np.log10(self.df[5].f), self.KK_rr_im[5]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax6.set_xlabel("log(f) [Hz]")
if legend == 'on' or legend == 'potential':
ax6.legend(loc='best', fontsize=10, frameon=False)
ax6.axhline(0, ls='--', c='k', alpha=.5)
### Setting ylims and labeling plot with 'KK-Test' in RR subplot
self.KK_rr_im_min = []
self.KK_rr_im_max = []
self.KK_rr_re_min = []
self.KK_rr_re_max = []
self.KK_ymin = []
self.KK_ymax = []
for i in range(len(self.df)):
self.KK_rr_im_min.append(np.min(self.KK_rr_im[i]))
self.KK_rr_im_max.append(np.max(self.KK_rr_im[i]))
self.KK_rr_re_min.append(np.min(self.KK_rr_re[i]))
self.KK_rr_re_max.append(np.max(self.KK_rr_re[i]))
if self.KK_rr_re_max[i] > self.KK_rr_im_max[i]:
self.KK_ymax.append(self.KK_rr_re_max[i])
else:
self.KK_ymax.append(self.KK_rr_im_max[i])
if self.KK_rr_re_min[i] < self.KK_rr_im_min[i]:
self.KK_ymin.append(self.KK_rr_re_min[i])
else:
self.KK_ymin.append(self.KK_rr_im_min[i])
if np.abs(self.KK_ymin[0]) > self.KK_ymax[0]:
ax1.set_ylim(self.KK_ymin[0]*100*1.5, np.abs(self.KK_ymin[0])*100*1.5)
if legend == 'on':
ax1.annotate('Lin-KK, #1', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin[0])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax1.annotate('Lin-KK ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin[0])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:
ax1.set_ylim(np.negative(self.KK_ymax[0])*100*1.5, np.abs(self.KK_ymax[0])*100*1.5)
if legend == 'on':
ax1.annotate('Lin-KK, #1', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymax[0])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax1.annotate('Lin-KK, ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[0].f)), self.KK_ymax[0]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[1]) > self.KK_ymax[1]:
ax2.set_ylim(self.KK_ymin[1]*100*1.5, np.abs(self.KK_ymin[1])*100*1.5)
if legend == 'on':
ax2.annotate('Lin-KK, #2', xy=[np.min(np.log10(self.df[1].f)), np.max(np.abs(self.KK_ymin[1]))*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax2.annotate('Lin-KK ('+str(np.round(np.average(self.df[1].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[1].f)), np.max(np.abs(self.KK_ymin[1]))*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:
ax2.set_ylim(np.negative(self.KK_ymax[1])*100*1.5, np.abs(self.KK_ymax[1])*100*1.5)
if legend == 'on':
ax2.annotate('Lin-KK, #2', xy=[np.min(np.log10(self.df[1].f)), self.KK_ymax[1]*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax2.annotate('Lin-KK ('+str(np.round(np.average(self.df[1].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[1].f)), self.KK_ymax[1]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[2]) > self.KK_ymax[2]:
ax3.set_ylim(self.KK_ymin[2]*100*1.5, np.abs(self.KK_ymin[2])*100*1.5)
if legend == 'on':
ax3.annotate('Lin-KK, #3', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymin[2])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax3.annotate('Lin-KK ('+str(np.round(np.average(self.df[2].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymin[2])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[2]) < self.KK_ymax[2]:
ax3.set_ylim(np.negative(self.KK_ymax[0])*100*1.5, np.abs(self.KK_ymax[2])*100*1.5)
if legend == 'on':
ax3.annotate('Lin-KK, #3', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymax[2])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax3.annotate('Lin-KK, ('+str(np.round(np.average(self.df[2].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[2].f)), self.KK_ymax[2]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[3]) > self.KK_ymax[3]:
ax4.set_ylim(self.KK_ymin[3]*100*1.5, np.abs(self.KK_ymin[3])*100*1.5)
if legend == 'on':
ax4.annotate('Lin-KK, #4', xy=[np.min(np.log10(self.df[3].f)), np.abs(self.KK_ymin[3])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax4.annotate('Lin-KK ('+str(np.round(np.average(self.df[3].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[3].f)), np.abs(self.KK_ymin[3])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[3]) < self.KK_ymax[3]:
ax4.set_ylim(np.negative(self.KK_ymax[3])*100*1.5, np.abs(self.KK_ymax[3])*100*1.5)
if legend == 'on':
ax4.annotate('Lin-KK, #4', xy=[np.min(np.log10(self.df[3].f)), np.abs(self.KK_ymax[3])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax4.annotate('Lin-KK, ('+str(np.round(np.average(self.df[3].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[3].f)), self.KK_ymax[3]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[4]) > self.KK_ymax[4]:
ax5.set_ylim(self.KK_ymin[4]*100*1.5, np.abs(self.KK_ymin[4])*100*1.5)
if legend == 'on':
ax5.annotate('Lin-KK, #5', xy=[np.min(np.log10(self.df[4].f)), np.abs(self.KK_ymin[4])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax5.annotate('Lin-KK ('+str(np.round(np.average(self.df[4].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[4].f)), np.abs(self.KK_ymin[4])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[4]) < self.KK_ymax[4]:
ax5.set_ylim(np.negative(self.KK_ymax[4])*100*1.5, np.abs(self.KK_ymax[4])*100*1.5)
if legend == 'on':
ax5.annotate('Lin-KK, #5', xy=[np.min(np.log10(self.df[4].f)), np.abs(self.KK_ymax[4])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax5.annotate('Lin-KK, ('+str(np.round(np.average(self.df[4].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[4].f)), self.KK_ymax[4]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[5]) > self.KK_ymax[5]:
ax6.set_ylim(self.KK_ymin[5]*100*1.5, np.abs(self.KK_ymin[5])*100*1.5)
if legend == 'on':
ax6.annotate('Lin-KK, #6', xy=[np.min(np.log10(self.df[5].f)), np.abs(self.KK_ymin[5])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax6.annotate('Lin-KK ('+str(np.round(np.average(self.df[5].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[5].f)), np.abs(self.KK_ymin[5])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[5]) < self.KK_ymax[5]:
ax6.set_ylim(np.negative(self.KK_ymax[5])*100*1.5, np.abs(self.KK_ymax[5])*100*1.5)
if legend == 'on':
ax6.annotate('Lin-KK, #6', xy=[np.min(np.log10(self.df[5].f)), np.abs(self.KK_ymax[5])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax6.annotate('Lin-KK, ('+str(np.round(np.average(self.df[5].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[5].f)), self.KK_ymax[5]*100*1.2], color='k', fontweight='bold')
#Save Figure
if savefig != 'none':
fig.savefig(savefig)
### 7 Cycles
elif len(self.df) == 7:
fig = figure(figsize=(12, 5), dpi=120, facecolor='w', edgecolor='k')
fig.subplots_adjust(left=0.1, right=0.95, hspace=0.25, wspace=0.25, bottom=0.1, top=0.95)
ax1 = fig.add_subplot(331)
ax2 = fig.add_subplot(332)
ax3 = fig.add_subplot(333)
ax4 = fig.add_subplot(334)
ax5 = fig.add_subplot(335)
ax6 = fig.add_subplot(336)
ax7 = fig.add_subplot(337)
#cycle 1
ax1.plot(np.log10(self.df[0].f), self.KK_rr_re[0]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax1.plot(np.log10(self.df[0].f), self.KK_rr_im[0]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax1.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]", fontsize=15)
if legend == 'on' or legend == 'potential':
ax1.legend(loc='best', fontsize=10, frameon=False)
ax1.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 2
ax2.plot(np.log10(self.df[1].f), self.KK_rr_re[1]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax2.plot(np.log10(self.df[1].f), self.KK_rr_im[1]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
if legend == 'on' or legend == 'potential':
ax2.legend(loc='best', fontsize=10, frameon=False)
ax2.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 3
ax3.plot(np.log10(self.df[2].f), self.KK_rr_re[2]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax3.plot(np.log10(self.df[2].f), self.KK_rr_im[2]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax3.set_xlabel("log(f) [Hz]")
if legend == 'on' or legend == 'potential':
ax3.legend(loc='best', fontsize=10, frameon=False)
ax3.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 4
ax4.plot(np.log10(self.df[3].f), self.KK_rr_re[3]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax4.plot(np.log10(self.df[3].f), self.KK_rr_im[3]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax4.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]", fontsize=15)
if legend == 'on' or legend == 'potential':
ax4.legend(loc='best', fontsize=10, frameon=False)
ax4.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 5
ax5.plot(np.log10(self.df[4].f), self.KK_rr_re[4]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax5.plot(np.log10(self.df[4].f), self.KK_rr_im[4]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax5.set_xlabel("log(f) [Hz]")
if legend == 'on' or legend == 'potential':
ax5.legend(loc='best', fontsize=10, frameon=False)
ax5.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 6
ax6.plot(np.log10(self.df[5].f), self.KK_rr_re[5]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax6.plot(np.log10(self.df[5].f), self.KK_rr_im[5]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax6.set_xlabel("log(f) [Hz]")
if legend == 'on' or legend == 'potential':
ax6.legend(loc='best', fontsize=10, frameon=False)
ax6.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 7
ax7.plot(np.log10(self.df[6].f), self.KK_rr_re[6]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax7.plot(np.log10(self.df[6].f), self.KK_rr_im[6]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax7.set_xlabel("log(f) [Hz]")
ax7.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]", fontsize=15)
if legend == 'on' or legend == 'potential':
ax7.legend(loc='best', fontsize=10, frameon=False)
ax7.axhline(0, ls='--', c='k', alpha=.5)
### Setting ylims and labeling plot with 'KK-Test' in RR subplot
self.KK_rr_im_min = []
self.KK_rr_im_max = []
self.KK_rr_re_min = []
self.KK_rr_re_max = []
self.KK_ymin = []
self.KK_ymax = []
for i in range(len(self.df)):
self.KK_rr_im_min.append(np.min(self.KK_rr_im[i]))
self.KK_rr_im_max.append(np.max(self.KK_rr_im[i]))
self.KK_rr_re_min.append(np.min(self.KK_rr_re[i]))
self.KK_rr_re_max.append(np.max(self.KK_rr_re[i]))
if self.KK_rr_re_max[i] > self.KK_rr_im_max[i]:
self.KK_ymax.append(self.KK_rr_re_max[i])
else:
self.KK_ymax.append(self.KK_rr_im_max[i])
if self.KK_rr_re_min[i] < self.KK_rr_im_min[i]:
self.KK_ymin.append(self.KK_rr_re_min[i])
else:
self.KK_ymin.append(self.KK_rr_im_min[i])
if np.abs(self.KK_ymin[0]) > self.KK_ymax[0]:
ax1.set_ylim(self.KK_ymin[0]*100*1.5, np.abs(self.KK_ymin[0])*100*1.5)
if legend == 'on':
ax1.annotate('Lin-KK, #1', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin[0])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax1.annotate('Lin-KK ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin[0])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:
ax1.set_ylim(np.negative(self.KK_ymax[0])*100*1.5, np.abs(self.KK_ymax[0])*100*1.5)
if legend == 'on':
ax1.annotate('Lin-KK, #1', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymax[0])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax1.annotate('Lin-KK, ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[0].f)), self.KK_ymax[0]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[1]) > self.KK_ymax[1]:
ax2.set_ylim(self.KK_ymin[1]*100*1.5, np.abs(self.KK_ymin[1])*100*1.5)
if legend == 'on':
ax2.annotate('Lin-KK, #2', xy=[np.min(np.log10(self.df[1].f)), np.max(np.abs(self.KK_ymin[1]))*100*1.3], color='k', fontweight='bold')
elif legend == 'potential':
ax2.annotate('Lin-KK ('+str(np.round(np.average(self.df[1].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[1].f)), np.max(np.abs(self.KK_ymin[1]))*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:
ax2.set_ylim(np.negative(self.KK_ymax[1])*100*1.5, np.abs(self.KK_ymax[1])*100*1.5)
if legend == 'on':
ax2.annotate('Lin-KK, #2', xy=[np.min(np.log10(self.df[1].f)), self.KK_ymax[1]*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax2.annotate('Lin-KK ('+str(np.round(np.average(self.df[1].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[1].f)), self.KK_ymax[1]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[2]) > self.KK_ymax[2]:
ax3.set_ylim(self.KK_ymin[2]*100*1.5, np.abs(self.KK_ymin[2])*100*1.5)
if legend == 'on':
ax3.annotate('Lin-KK, #3', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymin[2])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax3.annotate('Lin-KK ('+str(np.round(np.average(self.df[2].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymin[2])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[2]) < self.KK_ymax[2]:
ax3.set_ylim(np.negative(self.KK_ymax[0])*100*1.5, np.abs(self.KK_ymax[2])*100*1.5)
if legend == 'on':
ax3.annotate('Lin-KK, #3', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymax[2])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax3.annotate('Lin-KK, ('+str(np.round(np.average(self.df[2].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[2].f)), self.KK_ymax[2]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[3]) > self.KK_ymax[3]:
ax4.set_ylim(self.KK_ymin[3]*100*1.5, np.abs(self.KK_ymin[3])*100*1.5)
if legend == 'on':
ax4.annotate('Lin-KK, #4', xy=[np.min(np.log10(self.df[3].f)), np.abs(self.KK_ymin[3])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax4.annotate('Lin-KK ('+str(np.round(np.average(self.df[3].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[3].f)), np.abs(self.KK_ymin[3])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[3]) < self.KK_ymax[3]:
ax4.set_ylim(np.negative(self.KK_ymax[3])*100*1.5, np.abs(self.KK_ymax[3])*100*1.5)
if legend == 'on':
ax4.annotate('Lin-KK, #4', xy=[np.min(np.log10(self.df[3].f)), np.abs(self.KK_ymax[3])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax4.annotate('Lin-KK, ('+str(np.round(np.average(self.df[3].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[3].f)), self.KK_ymax[3]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[4]) > self.KK_ymax[4]:
ax5.set_ylim(self.KK_ymin[4]*100*1.5, np.abs(self.KK_ymin[4])*100*1.5)
if legend == 'on':
ax5.annotate('Lin-KK, #5', xy=[np.min(np.log10(self.df[4].f)), np.abs(self.KK_ymin[4])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax5.annotate('Lin-KK ('+str(np.round(np.average(self.df[4].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[4].f)), np.abs(self.KK_ymin[4])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[4]) < self.KK_ymax[4]:
ax5.set_ylim(np.negative(self.KK_ymax[4])*100*1.5, np.abs(self.KK_ymax[4])*100*1.5)
if legend == 'on':
ax5.annotate('Lin-KK, #5', xy=[np.min(np.log10(self.df[4].f)), np.abs(self.KK_ymax[4])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax5.annotate('Lin-KK, ('+str(np.round(np.average(self.df[4].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[4].f)), self.KK_ymax[4]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[5]) > self.KK_ymax[5]:
ax6.set_ylim(self.KK_ymin[5]*100*1.5, np.abs(self.KK_ymin[5])*100*1.5)
if legend == 'on':
ax6.annotate('Lin-KK, #6', xy=[np.min(np.log10(self.df[5].f)), np.abs(self.KK_ymin[5])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax6.annotate('Lin-KK ('+str(np.round(np.average(self.df[5].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[5].f)), np.abs(self.KK_ymin[5])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[5]) < self.KK_ymax[5]:
ax6.set_ylim(np.negative(self.KK_ymax[5])*100*1.5, np.abs(self.KK_ymax[5])*100*1.5)
if legend == 'on':
ax6.annotate('Lin-KK, #6', xy=[np.min(np.log10(self.df[5].f)), np.abs(self.KK_ymax[5])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax6.annotate('Lin-KK, ('+str(np.round(np.average(self.df[5].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[5].f)), self.KK_ymax[5]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[6]) > self.KK_ymax[6]:
ax7.set_ylim(self.KK_ymin[6]*100*1.5, np.abs(self.KK_ymin[6])*100*1.5)
if legend == 'on':
ax7.annotate('Lin-KK, #7', xy=[np.min(np.log10(self.df[6].f)), np.abs(self.KK_ymin[6])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax7.annotate('Lin-KK ('+str(np.round(np.average(self.df[6].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[6].f)), np.abs(self.KK_ymin[6])*100*1.2], color='k', fontweight='bold')
elif
|
np.abs(self.KK_ymin[6])
|
numpy.abs
|
import pandas as pd
import numpy as np
import time
import os
import argparse
from utils import str2bool
def run_analysis(args, gpu, data, model_name, explanations_to_use, labels_to_use, seed, split_name, model_size):
'''
compute sim metric for a model by writing to file (or checking if these in data)
'''
if data == 'QA':
extension = 'csv'
sep = ','
folder = 'data/v1.0'
elif data == 'NLI':
extension = 'tsv'
sep = '\t'
folder = 'data/e-SNLI-data'
save_dir = os.path.join(args.base_dir, 'saved_models')
cache_dir = os.path.join(args.base_dir, 'cached_models')
pretrained_name = args.task_pretrained_name + '-' + model_size
train_file = os.path.join(folder, 'train.%s' % extension)
dev_file = os.path.join(folder, 'dev.%s' % extension)
test_file = os.path.join(folder, 'test.%s' % extension)
write_base = 'preds'
xe_col = '%s_%s_%s_%s_seed%s_XE' % (write_base, data, pretrained_name, model_name, seed)
e_col = '%s_%s_%s_%s_seed%s_E' % (write_base, data, pretrained_name, model_name, seed)
x_col = '%s_%s_%s_%s_seed%s_X' % (write_base, data, pretrained_name, model_name, seed)
train = pd.read_csv(train_file, sep=sep)
dev = pd.read_csv(dev_file, sep=sep)
test = pd.read_csv(test_file, sep=sep)
to_use = dev if split_name == 'dev' else test
script = 'main'
if args.small_data:
small_data_add = '-s -ss 100 '
else:
small_data_add = ''
if xe_col not in to_use.columns or args.overwrite:
print("\nWriting XE predictions...")
os.system(f"python {script}.py --gpu {gpu} --model_name {model_name} --do_explain false --task_pretrained_name {pretrained_name} --multi_explanation false "
f"--data_dir {folder} --condition_on_explanations true --explanations_to_use {explanations_to_use} "
f"--dev_batch_size 20 "
f"--labels_to_use {labels_to_use} --do_train false --do_eval false --write_predictions --preds_suffix XE "
f"--save_dir {save_dir} --cache_dir {cache_dir} --seed {seed} {small_data_add}"
)
if x_col not in to_use.columns or args.overwrite:
print("Writing X predictions...")
os.system(f"python {script}.py --gpu {gpu} --model_name {model_name} --do_explain false --task_pretrained_name {pretrained_name} --multi_explanation false "
f"--data_dir {folder} --condition_on_explanations false "
f"--dev_batch_size 20 "
f"--labels_to_use {labels_to_use} --do_train false --do_eval false --write_predictions --preds_suffix X "
f"--save_dir {save_dir} --cache_dir {cache_dir} --seed {seed} {small_data_add}"
)
if e_col not in to_use.columns or args.overwrite:
print("Writing E predictions...")
os.system(f"python {script}.py --gpu {gpu} --model_name {model_name} --do_explain false --task_pretrained_name {pretrained_name} --multi_explanation false "
f"--data_dir {folder} --condition_on_explanations true --explanations_to_use {explanations_to_use} --explanations_only true "
f"--dev_batch_size 20 "
f"--labels_to_use {labels_to_use} --do_train false --do_eval false --write_predictions --preds_suffix E "
f"--save_dir {save_dir} --cache_dir {cache_dir} --seed {seed} {small_data_add}"
)
train = pd.read_csv(train_file, sep=sep)
dev = pd.read_csv(dev_file, sep=sep)
test = pd.read_csv(test_file, sep=sep)
to_use = dev if split_name == 'dev' else test
_ = compute_sim(args, to_use, labels_to_use, data, pretrained_name, model_name, seed, print_results = True)
if args.bootstrap:
start = time.time()
boot_times = 10000
print(f"Starting bootstrap with {boot_times/1000:.0f}k samples...")
leaking_diff_list = []
nonleaking_diff_list = []
overall_metric_list = []
for b in range(boot_times):
boot_idx = np.random.choice(np.arange(len(to_use)), replace=True, size = len(to_use))
to_use_boot = to_use.iloc[boot_idx,:]
mean, leaking_diff, nonleaking_diff = compute_sim(args, to_use_boot, labels_to_use, data, pretrained_name, model_name, seed, print_results = False)
overall_metric_list.append(mean)
leaking_diff_list.append(leaking_diff)
nonleaking_diff_list.append(nonleaking_diff)
lb, ub = np.quantile(nonleaking_diff_list, (.025, .975))
CI = (ub - lb) / 2
print("\nnonleaking diff: %.2f (+/- %.2f)" % (np.mean(nonleaking_diff_list)*100, 100*CI))
lb, ub = np.quantile(leaking_diff_list, (.025, .975))
CI = (ub - lb) / 2
print("\nleaking diff: %.2f (+/- %.2f)" % (np.mean(leaking_diff_list)*100, 100*CI))
lb, ub = np.quantile(overall_metric_list, (.025, .975))
CI = (ub - lb) / 2
print("\nunweighted mean: %.2f (+/- %.2f)\n" % (np.mean(overall_metric_list)*100, 100*CI))
print("time for bootstrap: %.1f minutes" % ((time.time() - start)/60))
print("--------------------------\n")
def compute_sim(args, to_use, labels_to_use, data, pretrained_name, model_name, seed, print_results = False):
labels = to_use[labels_to_use]
xe_col = '%s_%s_%s_%s_seed%s_XE' % ('preds', data, pretrained_name, model_name, seed)
e_col = '%s_%s_%s_%s_seed%s_E' % ('preds', data, pretrained_name, model_name, seed)
x_col = '%s_%s_%s_%s_seed%s_X' % ('preds', data, pretrained_name, model_name, seed)
xe = to_use[xe_col]
e = to_use[e_col]
x = to_use[x_col]
xe_correct = np.array(1*(labels==xe))
x_correct = np.array(1*(labels==x))
e_correct = np.array(1*(labels==e))
# baseline and leaking proxy variable
baseline_correct = 1*(x_correct)
leaking = 1*(e_correct)
leaked = np.argwhere(leaking.tolist()).reshape(-1)
# get subgroups
nonleaked = np.setdiff1d(np.arange(len(e_correct)), leaked)
xe_correct_leaked = xe_correct[leaked]
e_correct_leaked = e_correct[leaked]
x_correct_leaked = x_correct[leaked]
xe_correct_nonleaked = xe_correct[nonleaked]
e_correct_nonleaked = e_correct[nonleaked]
x_correct_nonleaked = x_correct[nonleaked]
num_leaked = len(leaked)
num_non_leaked = len(xe) - num_leaked
unweighted_mean = np.mean([np.mean(xe_correct[split]) - np.mean(baseline_correct[split]) for split in [leaked,nonleaked]])
nonleaking_diff = np.mean(xe_correct_nonleaked) - np.mean(baseline_correct[nonleaked])
leaking_diff = np.mean(xe_correct_leaked) - np.mean(baseline_correct[leaked])
if print_results:
print("\n------------------------")
print("num (probably) leaked: %d" % num_leaked)
print("y|x,e : %.4f baseline : %.4f y|x,e=null: %.4f" % (np.mean(xe_correct_leaked), np.mean(baseline_correct[leaked]),
|
np.mean(x_correct_leaked)
|
numpy.mean
|
from molsysmt._private_tools._digestion import *
from molsysmt._private_tools.exceptions import *
from molsysmt.lib import geometry as libgeometry
from molsysmt import puw
import numpy as np
def distance(molecular_system, selection="all", groups_of_atoms=None, group_behavior=None, frame_indices="all",
selection_2=None, groups_of_atoms_2=None, group_behavior_2=None, frame_indices_2=None,
pairs=False, crossed_frames=False, pbc=False, parallel=False, output_form='tensor',
output_atom_indices=False, output_frame_indices=False, engine='MolSysMT', syntaxis='MolSysMT'):
# group_behavior in
# ['center_of_mass','geometric_center','minimum_distance','maximum_distance']
# output_form in ['tensor','dict']
# crossed_frames es para cuando queremos calcular lista de frames1 contra lista de frames 2
# (todos con todos), si crossed_frames=False entonces es sólo el primer frame de lista 1 contra
# el primer frame de lista 2, el segundo contra el segundo, etc.
# selection groups está por si quiero distancias entre centros de masas, necesita
# hacer un lista de listas frente a otra lista de listas.
from molsysmt.multitool import convert, select, get, extract
from molsysmt.centers import center_of_mass, geometric_center
molecular_system = digest_molecular_system(molecular_system)
engine = digest_engine(engine)
frame_indices = digest_frame_indices(frame_indices)
frame_indices_2 = digest_frame_indices(frame_indices_2)
if group_behavior=='minimum_distance' or group_behavior_2=='minimum_distance':
if group_behavior=='minimum_distance' and group_behavior_2=='minimum_distance':
raise NotImplementedError(NotImplementedMessage)
#num_groups_1=len(groups_of_atoms)
#num_groups_2=len(groups_of_atoms_2)
#frame_indices = _digest_frame_indices(item, frame_indices)
#num_frames=len(frame_indices)
#dists = np.zeros((num_frames, num_groups_1, num_groups_2),dtype=float)
#for ii in range(num_groups_1):
# group1 = groups_of_atoms_2[ii]
# for jj in range(num_groups_2):
# group2 = groups_of_atoms_2[jj]
# _, min_dist = min_distances(molecular_system=molecular_system, selection=group1,
# frame_indices=frame_indices,
# selection_2=group2,
# pbc=pbc, parallel=parallel, engine=engine)
# dists[:,ii,jj]=min_dist
#del(num_groups1,num_groups2,frame_indices,num_frames,group1,group2)
#return dists
else:
raise NotImplementedError(NotImplementedMessage)
if engine=='MolSysMT':
diff_set = True
same_selection = False
same_groups = False
same_frames = False
if groups_of_atoms is not None:
selection=None
if (selection is not None) and (selection_2 is None):
if (groups_of_atoms_2 is None):
selection_2 = selection
same_selection = True
diff_set = False
if groups_of_atoms is not None:
if (selection_2 is None) and (groups_of_atoms_2 is None):
groups_of_atoms_2=groups_of_atoms
same_groups = True
diff_set = False
if frame_indices_2 is None:
frame_indices_2 = frame_indices
same_frames = True
else:
diff_set = True
if selection is not None:
if group_behavior == 'center_of_mass':
coordinates_1 = center_of_mass(molecular_system, selection=selection, frame_indices=frame_indices)
atom_indices_1 = [0]
elif group_behavior == 'geometric_center':
coordinates_1 = geometric_center(molecular_system, selection=selection, frame_indices=frame_indices)
atom_indices_1 = [0]
else:
atom_indices_1 = select(molecular_system, selection=selection, syntaxis=syntaxis)
coordinates_1 = get(molecular_system, target='atom', indices=atom_indices_1, frame_indices=frame_indices, coordinates=True)
else:
if group_behavior == 'center_of_mass':
coordinates_1 = center_of_mass(molecular_system, groups_of_atoms=groups_of_atoms, frame_indices=frame_indices)
atom_indices_1 = np.range(coordinates_1.shape[1])
elif group_behavior == 'geometric_center':
coordinates_1 = geometric_center(molecular_system, groups_of_atoms=groups_of_atoms, frame_indices=frame_indices)
atom_indices_1 = np.arange(coordinates_1.shape[1])
else:
raise ValueError("Value of argument group_behavior not recognized.")
if selection_2 is not None:
if group_behavior_2 == 'center_of_mass':
coordinates_2 = center_of_mass(molecular_system, selection=selection_2, frame_indices=frame_indices_2)
atom_indices_2 = [0]
elif group_behavior_2 == 'geometric_center':
coordinates_2 = geometric_center(molecular_system, selection=selection_2, frame_indices=frame_indices_2)
atom_indices_2 = [0]
else:
atom_indices_2 = select(molecular_system, selection=selection_2, syntaxis=syntaxis)
coordinates_2 = get(molecular_system, target='atom', indices=atom_indices_2, frame_indices=frame_indices_2, coordinates=True)
else:
if same_groups and same_frames:
atom_indices_2 = atom_indices_1
coordinates_2 = coordinates_1
else:
if group_behavior_2 == 'center_of_mass':
coordinates_2 = center_of_mass(molecular_system, groups_of_atoms=groups_of_atoms_2, frame_indices=frame_indices_2)
atom_indices_2 =
|
np.arange(coordinates_2.shape[1])
|
numpy.arange
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Bone histomorphometry and image processing methods.
"""
__author__ = ['<NAME>']
__date_created__ = '2021-11-03'
__date__ = '2022-04-22'
__copyright__ = 'Copyright (c) 2021, JC|MSK'
__docformat__ = 'restructuredtext en'
__license__ = "GPL"
__version__ = "0.1"
__maintainer__ = '<NAME>'
__email__ = "<EMAIL>"
import numpy as np
from scipy import ndimage
from skimage import measure, morphology
import logging
from tqdm import tqdm
import recon_utils as ru
def centerofmass(bwimage):
"""Center Of Mass (COM) of binary image.
Parameters
----------
bwimage: bool
Binary image. Can be 2D and 3D.
Returns
-------
cmassx_array
X-coordinate array of the COM. If input is 3D, an array of the slicewise COMs is returned.
cmassy_array
Y-coordinate array of the COM.
"""
if bwimage.ndim == 3:
# output arrays initialization
cmassx_array = np.zeros([bwimage.shape[0]])
cmassy_array = np.zeros([bwimage.shape[0]])
for slice in range(0, bwimage.shape[0]):
y = np.sum(bwimage[slice,:,:], 1)
cmassy = np.inner(y, np.arange(0, y.size))
cmassy_array[slice] = cmassy / np.sum(y)
x = np.sum(bwimage[slice, :, :], 0)
cmassx = np.inner(x, np.arange(0, x.size))
cmassx_array[slice] = cmassx / np.sum(x)
elif bwimage.ndim == 2:
y =
|
np.sum(bwimage, 1)
|
numpy.sum
|
# -*- coding: utf-8 -*-
##########################################################################
# NSAp - Copyright (C) CEA, 2021
# Distributed under the terms of the CeCILL-B license, as published by
# the CEA-CNRS-INRIA. Refer to the LICENSE file or to
# http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html
# for details.
##########################################################################
"""
Coordinate system tools.
"""
# Imports
import warnings
import itertools
import numpy as np
from scipy.interpolate import griddata, NearestNDInterpolator
from scipy.spatial import transform
def cart2sph(x, y, z):
""" Cartesian to spherical coordinate transform.
See Also
--------
sph2cart, text2grid, grid2text
Parameters
----------
x: float or array_like.
x-component of Cartesian coordinates
y: float or array_like.
y-component of Cartesian coordinates
z: float or array_like.
z-component of Cartesian coordinates
Returns
-------
alpha: float or `numpy.ndarray`
Azimuth angle in radiants. The value of the angle is in the range
[-pi pi].
beta: float or `numpy.ndarray`
Elevation angle in radiants. The value of the angle is in the range
[-pi/2, pi/2].
r: float or `numpy.ndarray`
Radius.
"""
alpha = np.arctan2(y, x)
beta = np.arctan2(z, np.sqrt(x**2 + y**2))
r = np.sqrt(x**2 + y**2 + z**2)
return alpha, beta, r
def sph2cart(alpha, beta, r):
""" Spherical to cartesian coordinate transform.
See Also
--------
cart2sph, text2grid, grid2text
Parameters
----------
alpha: float or array_like
Azimuth angle in radiants.
beta: float or array_like
Elevation angle in radiants.
r: float or array_like
Radius.
Returns
-------
x: float or `numpy.ndarray`
x-component of Cartesian coordinates
y: float or `numpy.ndarray`
y-component of Cartesian coordinates
z: float or `numpy.ndarray`
z-component of Cartesian coordinates
"""
x = r * np.cos(alpha) * np.cos(beta)
y = r * np.sin(alpha) * np.cos(beta)
z = r *
|
np.sin(beta)
|
numpy.sin
|
import numpy as np
import os
import yaml
from scipy.stats import norm
pdfs_dir = os.path.join(os.path.dirname(__file__), '../../config_files/2dpdfs')
characteristics_dir = os.path.join(os.path.dirname(__file__),
'../../config_files/survey_characteristics')
class StochasticNoise(object):
"""
Returns an array of specified size of randomly selected values of stochastic
seeing and sky-brightness from a list.
"""
def __init__(self, size, pdf):
rand_idx = np.random.randint(len(pdf), size=size)
self.seeing = pdf[rand_idx, 0]
self.sky_brightness = pdf[rand_idx, 1]
def noise_from_yaml(survey, band, pdfs_dir=pdfs_dir,
yaml_dir=characteristics_dir):
# Generates nobs simulated noise profiles.
"""
Loads noise configuration from yaml file and 2d pdf file
"""
try:
pdf = np.loadtxt("%s/2d%s_%s.txt" % (pdfs_dir, band, survey))
rand_idx = np.random.randint(len(pdf))
seeing = pdf[rand_idx, 0]
sky_brightness = pdf[rand_idx, 1]
yaml_file = '%s/%s_%s.yaml' % (yaml_dir, band, survey)
with open(yaml_file, 'r') as config_file:
survey_noise = yaml.safe_load(config_file)
survey_noise['seeing'] = seeing
survey_noise['sky_brightness'] = sky_brightness
except FileNotFoundError:
print('%s band in survey %s is not supported.' % (band, survey))
print('Please make sure the appropriate config file exists.')
raise
except OSError:
print('%s band in survey %s is not supported.' % (band, survey))
print('Please make sure the appropriate 2d pdf exists.')
raise
return survey_noise
def survey_noise(survey_name, band, directory=pdfs_dir):
"""Specify survey name and band"""
survey_noise = noise_from_yaml(survey_name, band, directory)
return survey_noise
def calculate_background_noise(image):
"""Takes in array of pixel values of an image, fits a gaussian profile to the negative tail of the histogram,
returns a dictionary containing the 'background_noise' parameter containing the standard deviation of the scatter.
Parameters:
file_loc (ndarray): Array of image pixel values
Returns:
float: background noise - standard deviation.
"""
idx = np.ravel(image) < 0
neg_val_array = np.ravel(image)[idx]
pos_val_array = -neg_val_array
combined_array =
|
np.append(neg_val_array, pos_val_array)
|
numpy.append
|
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 6 13:07:14 2019
EV Model:
Creates Class GRID and EV classes
GRID Class:
A general setting for EVs, in has the general simulation parameters:
simulation duration, time step, electricity prices
EV Class:
A single EV with base commands that simulate distances driven per day and charging sessions.
Considers a non-systematic plug in behavior
Default charging mode is uncontrolled (starts as soon as possible)
EV modulated class:
Extension of EV class.
Charging mode is modulated (charging at minimum power during the whole charging session)
EV randstart:
Extension of EV class.
Charging mode is similar to base EV, but start of charging is random during the charging session
EV Dumb reverse:
Extension of EV class
Charging mode
@author: U546416
"""
import numpy as np
from matplotlib import pyplot as plt
import pandas as pd
import datetime as dt
import scipy.stats as stats
import cvxopt
import time
cvxopt.solvers.options['show_progress'] = False
bins_dist = np.linspace(0, 100, num=51)
dist_function = np.sin(bins_dist[:-1]/ 100 * np.pi * 2) + 1
dist_function[10:15] = [0, 0, 0 , 0 , 0]
pdfunc = (dist_function/sum(dist_function)).cumsum()
bins_hours = np.linspace(0,24,num=25)
dsnms = ['Mon', 'Tues', 'Wed', 'Thur', 'Fri', 'Sat', 'Sun']
extraevkwargs = ['pmin_charger', 'pop_dur']
def sec_to_time(s):
""" Returns the hours, minutes and seconds of a given time in secs
"""
return (int(s//3600), int((s//60)%60), (s%60))
def random_from_cdf(cdf, bins):
"""Returns a random bin value given a cdf.
cdf has n values, and bins n+1, delimiting initial and final boundaries for each bin
"""
if cdf.max() > 1.0001 or cdf.min() < 0:
raise ValueError('CDF is not a valid cumulative distribution function')
r = np.random.rand(1)
x = int(np.digitize(r, cdf))
return bins[x] + np.random.rand(1) * (bins[x+1] - bins[x])
def random_from_2d_pdf(pdf, bins):
""" Returns a 2-d random value from a joint PDF.
It assumes a square np.matrix as cdf. Sum of all values in the cdf is 1.
"""
val1 = random_from_cdf(pdf.sum(axis=1).cumsum(), bins)
x = np.digitize(val1, bins)
val2 = random_from_cdf(pdf[x-1].cumsum() / pdf[x-1].sum(), bins)
return val1, val2
def discrete_random_data(data_values, values_prob):
""" Returns a random value from a set data_values, according to the probability vector values_prob
"""
return np.random.choice(data_values, p=values_prob)
def set_dist(data_dist):
"""Returns one-way distance given by a cumulative distribution function
The distance is limited to 120km
"""
# Default values for
# Based on O.Borne thesis (ch.3), avg trip +-19km
s=0.736
scale=np.exp(2.75)
loc=0
if type(data_dist) in [int, float]:
return data_dist
if type(data_dist) == dict:
if 's' in data_dist:
# Data as scipy.stats.lognorm params
s = data_dist['s']
loc = data_dist['loc']
scale = data_dist['scale']
if 'cdf' in data_dist:
# data as a cdf, containts cdf and bins values
cdf = data_dist['cdf']
if 'bins' in data_dist:
bins_dist = data_dist['bins']
else:
bins_dist = np.linspace(0, 100, num=51)
return random_from_cdf(cdf, bins_dist)
d = stats.lognorm.rvs(s, loc, scale, 1)
#check that distance is under dmax = 120km, so it can be done with one charge
while d > 120:
d = stats.lognorm.rvs(s, loc, scale, 1)
return d
class Grid:
def __init__(self,
ndays=7, step=30, init_day=0,
name='def', load=0, ss_pmax=0, verbose=True,
buses = []):
"""Instantiates the grid object:
Creates vectors of ev load, conso load
for time horizon ndays = default 7, one week
with steps of 30 min
ev_data is a dict of ev types where each entry has a dict with:
type : 'tou/dumb'
n_ev : # of evs
other : (not needed), dict with other params
**ev_global_params are general params passed to all types of evs
"""
self.verbose = verbose
if verbose:
print('Instantiating Grid {}'.format(name))
self.weekends = [5, 6]
if 60 % step > 0:
raise ValueError('Steps should be a divisor of an 60 minutes \
(ex. 30-15-5min), given value: %d' % step)
# General params
self.step = step # in minutes
self.ndays = ndays
self.periods = int(ndays * 24 * 60 / step)
self.periods_day = int(24 * 60 / step)
self.periods_hour = int(60/step)
self.period_dur = step / 60 #in hours
self.day = 0
self.days = [(i + init_day)%7 for i in range(ndays + 1)]
# times is an array that contains of len=nperiods, where for period i:
# times[i] = [day, hour, day_of_week] ** day_of_week 0 == monday
self.times = [[i, j, (i+init_day)%7]
for i in range(ndays)
for j in np.arange(0,24,self.period_dur)]
#Grid params
self.ss_pmax = ss_pmax #in MW
self.name = name
# Init global vectors
self.init_load_vector()
# TODO: load as dataframe, adjusting interpolation and days to given params
self.buses = []
# Empty arrays for EVs
self.ev_sets = []
self.ev_grid_sets = []
self.evs_sets = {}
self.evs = {}
print('Grid instantiated')
def add_aggregator(self, nameagg, **agg_data):
""" Initiates an Aggregator
"""
if not hasattr(self, 'aggregators'):
self.aggregators = []
self.ev_agg_sets = []
agg = Aggregator(self, nameagg, **agg_data)
if self.verbose:
print('Creating new aggregator')
self.aggregators.append(agg)
return agg
def add_evs(self, nameset, n_evs, ev_type, aggregator=None, charge_schedule=None, **ev_params):
""" Initiates EVs give by the dict ev_data and other ev global params
"""
ev_types = dict(dumb = EV,
mod = EV_Modulated,
randstart = EV_RandStart,
reverse = EV_DumbReverse,
pfc = EV_pfc,
optch = EV_optimcharge)
if not (ev_type in ev_types):
raise ValueError('Invalid EV type "{}" \
Accepted types are: {}'.format(ev_type, [i for i in ev_types.keys()]))
ev_fx = ev_types[ev_type]
# Check that the nameset doesnt exists
if nameset in self.ev_sets:
raise ValueError('EV Nameset "{}" already in the grid. Not created.'.format(nameset))
evset = []
# Create evs
# TODO: improve this
# Check if schedule is given:
if not (charge_schedule is None):
# If schedule has 'User' column, create each EV with its own schedule
if 'User' in charge_schedule:
users = charge_schedule.User.unique()
n_evs = len(users)
for i in users:
evset.append(ev_fx(self, name=str(i), boss=aggregator,
charge_schedule=charge_schedule[charge_schedule.User==i].reset_index(drop=True),
**ev_params))
# else, all EVs with same schedule
else:
for i in range(n_evs):
evset.append(ev_fx(self, name=nameset+str(i), boss=aggregator,
charge_schedule=charge_schedule, **ev_params))
else:
for i in range(n_evs):
evset.append(ev_fx(self, name=nameset+str(i), boss=aggregator, **ev_params))
if self.verbose:
print('Created EV set {} containing {} {} EVs'.format(
nameset, n_evs, ev_type))
# Save in local variables
self.ev_sets.append(nameset)
# Check if the evs are assigned to an aggregator
if aggregator == None:
self.ev_grid_sets.append(nameset)
else:
if not (aggregator in self.aggregators):
raise ValueError('Invalid aggregator')
self.ev_agg_sets.append(nameset)
aggregator.evs += evset
aggregator.nevs += n_evs
self.evs_sets[nameset] = evset
for ev in evset:
self.evs[ev.name] = ev
self.init_ev_vectors(nameset)
return self.evs_sets[nameset]
def init_load_vector(self):
""" Creates empty array for global variables"""
self.ev_load = dict(Total = np.zeros(self.periods))
self.ev_potential = dict(Total = np.zeros(self.periods))
self.ev_off_peak_potential = dict(Total = np.zeros(self.periods))
self.ev_up_flex = dict(Total = np.zeros(self.periods))
self.ev_dn_flex = dict(Total = np.zeros(self.periods))
self.ev_mean_flex = dict(Total = np.zeros(self.periods))
self.ev_batt = dict(Total = np.zeros(self.periods))
def init_ev_vectors(self, nameset):
""" Creates empty array for global EV variables per set of EV"""
self.ev_load[nameset] = np.zeros(self.periods)
self.ev_potential[nameset] = np.zeros(self.periods)
self.ev_off_peak_potential[nameset] = np.zeros(self.periods)
self.ev_up_flex[nameset] = np.zeros(self.periods)
self.ev_dn_flex[nameset] = np.zeros(self.periods)
self.ev_mean_flex[nameset] = np.zeros(self.periods)
self.ev_batt[nameset] = np.zeros(self.periods)
def assign_ev_bus(self, evtype, buses, ev_per_bus):
""" Asign a bus for a group of evs (evtype), in a random fashion,
limited to a maximum number of evs per bus
"""
available_buses = [buses[i] for i in range(len(buses)) for j in range(ev_per_bus[i])]
np.random.shuffle(available_buses)
ev = self.evs_sets[evtype]
if len(ev) > len(available_buses):
strg = ('Not sufficient slots in buses for the number of EVs\n'+
'# slots {}, # EVs {}'.format(len(available_buses), len(ev)))
raise ValueError(strg)
for i in range(len(ev)):
ev[i].bus = available_buses[i]
def charging_sessions(self, key=None, stats=True):
# compute hist
nsessions = np.asarray([ev.ch_status.sum() + (ev.extra_energy > 0).sum()
for ev in self.get_evs(key)])/self.ndays * 7
h_bins = np.append(np.arange(0,8,1), 100)
hs = np.histogram(nsessions, h_bins)
if stats:
return hs[0]/sum(hs[0]), np.mean(nsessions), np.median(nsessions)
return hs[0]/sum(hs[0])
def add_freq_data(self, freq, step_f=10, max_dev=0.2, type_f='dev', base_f=50):
""" Computes scaling factor for frequency response (mu).
Input data is frequency array, either on absolute Hz or in deviation from base frequency (50Hz as default)
step_f is the time step of the frequency array (in seconds)
saves mu array, which is the average frequency deviation for each step
"""
if not type_f=='dev':
freq = freq - base_f
# number of frequency measures per simulation period
nsteps_period = int(self.period_dur * 3600 / step_f)
# if i dont have enough freq data, I repeat ntimes the data
ntimes = (nsteps_period * self.periods) / len(freq)
if ntimes > 1:
print('Insuficient data, replicating it')
freq = np.tile(freq, int(np.ceil(ntimes)))
mu = np.zeros(self.periods)
mu_up = np.zeros(self.periods)
mu_dn = np.zeros(self.periods)
dt_up = np.zeros(self.periods)
dt_dn = np.zeros(self.periods)
freq = (freq / max_dev).clip(-1,1)
for i in range(self.periods):
mu[i] = -freq[i*nsteps_period:(i+1)*nsteps_period].mean()
mu_up[i] = -(freq[i*nsteps_period:(i+1)*nsteps_period][freq[i*nsteps_period:(i+1)*nsteps_period]<=0]).mean()
mu_dn[i] = -(freq[i*nsteps_period:(i+1)*nsteps_period][freq[i*nsteps_period:(i+1)*nsteps_period]>0]).mean()
dt_up[i] = (freq[i*nsteps_period:(i+1)*nsteps_period]<=0).mean()
dt_dn[i] = (freq[i*nsteps_period:(i+1)*nsteps_period]>0).mean()
self.mu = mu
self.mu_up = np.nan_to_num(mu_up)
self.mu_dn = np.nan_to_num(mu_dn)
self.dt_up = dt_up
self.dt_dn = dt_dn
def add_prices(self, prices, step_p=1):
""" Adds price vector.
Input: Price vector (can be one day, week or the whole duration of the sim)
step_p: Length of each price vector step, in hours
Converts input price vector in step_p hours to
output price vector in grid step
Prices in c€/kWh
"""
# number of simulation steps per input price step
nps = int(step_p/self.period_dur)
# if i dont have enough price data, I repeat n_times the data
ntimes = (self.periods) / (len(prices) * nps)
if ntimes > 1:
print('Insuficient data, replicating it')
prices = np.tile(prices, int(np.ceil(ntimes)))
self.prices = np.repeat(prices, nps)[:self.periods]
def add_base_load(self, load, step_p=1):
""" Adds base load vector.
Input: Base load vector (can be one day, week or the whole duration of the sim)
step_p: Length of each price vector step, in hours
Converts input price vector in step_p hours to
output price vector in grid step
Base load in MW
"""
# number of simulation steps per input price step
nps = int(step_p/self.period_dur)
# if i dont have enough price data, I repeat n_times the data
ntimes = (self.periods) / (len(load) * nps)
if ntimes > 1:
print('Insuficient data, replicating it')
base_load = np.tile(load, int(np.ceil(ntimes)))
self.base_load = np.repeat(load, nps)[:self.periods]
def add_evparam_from_dataframe(self, param, df):
""" Add params to EVs from pd.DataFrame.
df.columns are ev.names
"""
for c in df:
if c in self.evs:
setattr(self.evs[c], param, df[c].values)
if param in ['charging_eff', 'batt_size', 'discharging_eff']:
self.evs[c].compute_derived_params(self)
if param in ['tou_ini', 'tou_end', 'tou_we']:
self.evs[c].set_off_peak(self)
def add_evparam_from_dict(self, param, dic):
""" Add params to EVs from dict.
dict.keys are ev.names
"""
for c in dic:
if c in self.evs:
setattr(self.evs[c], param, dic[c])
if param in ['charging_eff', 'batt_size', 'discharging_eff']:
self.evs[c].compute_derived_params(self)
if param in ['tou_ini', 'tou_end', 'tou_we', 'tou_ini_we', 'tou_end_we']:
self.evs[c].set_off_peak(self)
def new_day(self):
""" Iterates over evs to compute new day
"""
for types in self.ev_grid_sets:
for ev in self.evs_sets[types]:
ev.new_day(self)
if hasattr(self, 'aggregators'):
for agg in self.aggregators:
agg.new_day()
def compute_per_bus_data(self):
""" Computes aggregated ev load per bus and ev type
"""
load_ev = {}
for types in self.ev_sets:
for ev in self.evs_sets[types]:
if (types, ev.bus) in load_ev:
load_ev[types, ev.bus] += ev.charging * 1
else:
load_ev[types, ev.bus] = ev.charging * 1
return load_ev
def compute_agg_data(self) :
""" Computes aggregated charging per type of EV and then total for the grid in MW
"""
total = 'Total'
if self.verbose:
print('Grid {}: Computing aggregated data'.format(self.name))
for types in self.ev_sets:
for ev in self.evs_sets[types]:
self.ev_potential[types] += ev.potential / 1000
self.ev_load[types] += ev.charging / 1000
self.ev_off_peak_potential[types] += ev.off_peak_potential / 1000
self.ev_up_flex[types] += ev.up_flex / 1000
self.ev_dn_flex[types] += ev.dn_flex / 1000
self.ev_mean_flex[types] += ev.mean_flex_traj / 1000
self.ev_batt[types] += ev.soc * ev.batt_size / 1000
self.ev_potential[total] = sum([self.ev_potential[types] for types in self.evs_sets])
self.ev_load[total] = sum([self.ev_load[types] for types in self.evs_sets])
self.ev_off_peak_potential[total] = sum([self.ev_off_peak_potential[types] for types in self.evs_sets])
self.ev_up_flex[total] = sum([self.ev_up_flex[types] for types in self.evs_sets])
self.ev_dn_flex[total] = sum([self.ev_dn_flex[types] for types in self.evs_sets])
self.ev_mean_flex[total] = sum([self.ev_mean_flex[types] for types in self.evs_sets])
self.ev_batt[total] = sum([self.ev_batt[types] for types in self.evs_sets])
def do_days(self, agg_data=True):
"""Iterates over days to compute charging
"""
if self.verbose:
t = time.time()
print('Starting simulation, Grid {}'.format(self.name))
k = -1
for d in range(self.ndays):
if self.verbose:
if (d*20)// self.ndays > k:
k = (d*20)// self.ndays
print('\tComputing day {}'.format(self.day))
self.new_day()
self.day += 1
if agg_data:
self.compute_agg_data()
if self.verbose:
print('Finished simulation, Grid {}\nElapsed time {}h {:02d}:{:04.01f}'.format(self.name, *sec_to_time(time.time()-t)))
def set_aspect_plot(self, ax, day_ini=0, days=-1, **plot_params):
""" Set the aspect of the plot to fit in the specified timeframe and adds Week names as ticks
"""
x = [t[0] * 24 + t[1] for t in self.times]
if days == -1:
days = self.ndays
days = min(self.ndays - day_ini, days)
t0 = self.periods_day * day_ini
tf = self.periods_day * (days + day_ini)
daylbl = [dsnms[self.times[i][2]] for i in np.arange(t0, tf, self.periods_day)]
ax.set_xlabel('Time [days]')
if 'title' in plot_params:
ax.set_title(plot_params['title'])
else:
ax.set_title('Load at {}'.format(self.name))
if 'ylim' in plot_params:
ax.set_ylim(top=plot_params['ylim'])
ax.set_ylim(bottom=0)
ax.set_xticks(np.arange(self.ndays+1) * 24)
ax.set_xticklabels(np.tile(daylbl, int(np.ceil((self.ndays+1)/7))), ha='left')
ax.grid(axis='x')
ax.set_xlim(x[t0], x[tf-1])
ax.legend(loc=1)
def plot_ev_load(self, day_ini=0, days=-1, opp=False, **plot_params):
""" Stacked plot of EV charging load
"""
load = np.array([self.ev_load[types] for types in self.ev_sets])
tot = 'Total'
x = [t[0] * 24 + t[1] for t in self.times]
if not 'ax' in plot_params:
f, ax = plt.subplots(1,1)
else:
ax = plot_params['ax']
del plot_params['ax']
ax.stackplot(x, load, labels=self.ev_sets)
if opp:
ax.plot(x, self.ev_potential[tot], label='EV potential')
if not (self.ev_potential[tot] == self.ev_off_peak_potential[tot]).all():
ax.plot(x, self.ev_off_peak_potential[tot], label='EV off-peak potential')
ax.set_ylabel('Power [MW]')
self.set_aspect_plot(ax, day_ini=day_ini, days=days, **plot_params)
return ax
def plot_total_load(self, day_ini=0, days=-1, **plot_params):
""" Stacked plot of EV charging load + base load
"""
tot = 'Total'
x = [t[0] * 24 + t[1] for t in self.times]
if not 'ax' in plot_params:
f, ax = plt.subplots(1,1)
else:
ax = plot_params['ax']
del plot_params['ax']
if not hasattr(self, 'base_load'):
self.base_load = np.zeros(len(times))
ax.stackplot(x, [self.base_load, self.ev_load[tot]], labels=['Base Load', 'EV Load'])
ax.set_ylabel('Power [MW]')
if self.ss_pmax > 0:
ax.axhline(self.ss_pmax, label='Pmax', linestyle='--', color='red')
self.set_aspect_plot(ax, day_ini=day_ini, days=days, **plot_params)
return ax
def plot_flex_pot(self, day_ini=0, days=-1, trajectory=False, **plot_params):
""" Plot of aggregated flex
"""
tot = 'Total'
x = [t[0] * 24 + t[1] for t in self.times]
if not 'ax' in plot_params:
f, ax = plt.subplots(1,1)
else:
ax = plot_params['ax']
ax.plot(x, self.ev_up_flex[tot], label='Upper storage limit')
ax.plot(x, self.ev_dn_flex[tot], label='Lower storage limit')
if trajectory:
ax.plot(x, self.ev_batt[tot], label='Real trajectory')
else:
ax.plot(x, self.ev_mean_flex[tot], label='Mean flexible trajectory', linestyle='--')
ax.set_ylabel('EV energy storage [MWh]')
self.set_aspect_plot(ax, day_ini=day_ini, days=days, **plot_params)
return ax
def get_global_data(self):
""" Some global info
"""
if not hasattr(self, 'base_load'):
self.base_load = np.zeros(len(times))
total = 'Total'
total_ev_charge = self.ev_load[total].sum() * self.period_dur #MWh
flex_pot = self.ev_off_peak_potential[total].sum() * self.period_dur
extra_charge = sum(ev.extra_energy.sum()
for ev in self.get_evs()) / 1000
ev_flex_ratio = 1-total_ev_charge / flex_pot
max_ev_load = self.ev_load[total].max()
max_load = (self.ev_load[total] + self.base_load).max()
max_base_load = self.base_load.max()
peak_charge = max_load / self.ss_pmax
h_overload = ((self.ev_load[total] + self.base_load) > self.ss_pmax).sum() * self.period_dur
return dict(Tot_ev_charge_MWh = total_ev_charge,
Extra_charge_MWh = extra_charge,
Base_load_MWh= self.base_load.sum() * self.period_dur,
Flex_ratio = ev_flex_ratio,
Max_ev_load_MW = max_ev_load,
Max_base_load_MW = max_base_load,
Max_load_MW = max_load,
Peak_ss_charge_pu = peak_charge,
Hours_overload = h_overload
)
def get_ev_data(self):
""" EV charge data per subset
"""
types = [t for t in self.evs_sets]
charge = [self.ev_load[t].sum() * self.period_dur
for t in types]
nevs = [len(self.evs_sets[t])
for t in types]
extra_charge = [sum(ev.extra_energy.sum()
for ev in self.evs_sets[t]) / 1000
for t in types]
flex_ratio = [1 - self.ev_load[t].sum() / self.ev_off_peak_potential[t].sum()
for t in types]
max_load = [self.ev_load[t].max()
for t in types]
avg_d = [np.mean([ev.dist_wd
for ev in self.evs_sets[t]])
for t in types]
avg_plugin = [np.mean([ev.n_plugs
for ev in self.evs_sets[t]]) / self.ndays
for t in types]
return dict(EV_sets = types,
N_EVs = nevs,
EV_charge_MWh = charge,
Extra_charge = extra_charge,
Flex_ratio = flex_ratio,
Max_load = max_load,
Avg_daily_dist = avg_d,
Avg_plug_in_ratio= avg_plugin)
def hist_dist(self, weekday=True, **plot_params):
""" Do histogram of distances (weekday)
"""
if not 'ax' in plot_params:
f, ax = plt.subplots(1,1)
else:
ax = plot_params['ax']
d = np.array([ev.dist_wd if weekday else ev.dist_we
for types in self.evs_sets
for ev in self.evs_sets[types]])
avg = d.mean()
h, _, _ = ax.hist(d, bins=np.arange(0,100,2), **plot_params)
ax.axvline(avg, color='r', linestyle='--')
ax.text(x=avg+1, y=h.max()*0.75, s='Average one-way trip distance {} km'.format(np.round(avg,decimals=1)))
ax.set_xlim([0,100])
ax.set_title('Histogram of trip distances')
ax.set_xlabel('km')
ax.set_ylabel('Frequency')
def hist_ncharging(self, **plot_params):
""" Do histogram of number of charging sessions
"""
if not 'ax' in plot_params:
f, ax = plt.subplots(1,1)
else:
ax = plot_params['ax']
d = np.array([ev.n_plugs for ev in self.get_evs()])/(self.ndays/7)
avg = d.mean()
bins = np.arange(0, 9, 1)
bins[-1] = 10
h, _, _ = ax.hist(d, bins=bins, **plot_params)
ax.set_xlim([0, 8])
ax.axvline(avg, color='r', linestyle='--')
ax.text(x=avg+1, y=h.max()*0.75, s='Average charging sessions per week: {}'.format(np.round(avg,decimals=1)))
ax.set_title('Histogram of charging sessions per week')
ax.set_xlabel('# charging sessions per week')
ax.set_ylabel('Frequency')
ax.set_xticklabels([str(i) for i in range(8)] + ['$\infty$'] )
def get_ev(self):
""" Returns random ev
"""
return np.random.choice(list(self.evs.values()))
def get_evs(self, key=None):
""" Returns list of evs
"""
if key in self.evs_sets:
return self.evs_sets[key]
return list(self.evs.values())
def export_ev_data(self, atts=''):
""" returns a dict with ev data
atts : attributes to export
"""
if atts == '':
atts = ['name', 'batt_size', 'charging_power', 'bus', 'dist_wd', 'dist_we']
ev_data = {}
for types in self.evs:
ev_data[types] = []
for ev in self.evs[types]:
ev_data[types].append({att : getattr(ev, att) for att in atts})
return ev_data
def import_ev_data(self, ev_data):
""" sets ev data
ev_data is a dict
{types0: [{ev0}, {ev1}, ...],
types1: [{ev0}, {ev1}, ...]}
and evi
evi = {att, val}, with attribute and value
"""
for types in ev_data:
ev_set = self.evs[types]
for i in range(len(ev_data[types])):
ev = ev_data[types][i]
for att in ev:
setattr(ev_set[i], att, ev[att])
def evs_per_bus(self):
"""Returns the list of buses and the number of evs per bus
"""
busev = []
for ev in self.get_evs():
busev.append(ev.bus)
busev = np.array(busev)
buslist = np.unique(busev)
evs_bus = []
for b in buslist:
evs_bus.append((busev == b).sum())
return buslist, evs_bus
def reset(self):
""" Resets the status of the grid and of EVs
"""
self.day = 0
self.init_load_vector()
for types in self.evs_sets:
self.init_ev_vectors(types)
for ev in self.evs.values():
ev.reset(self)
def set_evs_param(self, param, value, sets='all'):
if sets == 'all':
evs = self.get_evs()
else:
evs = self.evs[sets]
for ev in evs:
setattr(ev, param, value)
ev.compute_derived_params(self)
def save_ev_data(self, folder='', flex=True):
timestamp = ['{:02d}-{:02d}:{:02d}'.format(t[0],int(t[1]//1),int(round(t[1]%1*60))) for t in self.times]
charging = pd.DataFrame([ev.charging for ev in self.evs.values()],
index=list(self.evs.keys()),
columns=timestamp).T
charging.to_csv(folder + 'ev_charging.csv')
if flex:
pd.DataFrame([ev.up_flex for ev in self.evs.values()],
index=list(self.evs.keys()),
columns=timestamp).T.to_csv(folder + 'up_flex.csv')
pd.DataFrame([ev.dn_flex for ev in self.evs.values()],
index=list(self.evs.keys()),
columns=timestamp).T.to_csv(folder + 'dn_flex.csv')
pd.DataFrame([ev.soc for ev in self.evs.values()],
index=list(self.evs.keys()),
columns=timestamp).T.to_csv(folder + 'soc.csv')
def save_agg_data(self, folder='', flex=True):
timestamp = ['{:02d}-{:02d}:{:02d}'.format(t[0],int(t[1]//1),int(round(t[1]%1*60))) for t in self.times]
charging = pd.DataFrame(self.ev_load,
index=timestamp).drop('Total', axis=1)
charging.to_csv(folder + 'ev_sets_charging.csv')
class EV:
""" Basic EV model with dumb charging
"""
bins_dist = np.linspace(0, 100, num=51)
def __init__(self, model, name,
# Daily distance
dist_wd=None,
dist_we=None,
new_dist=False,
extra_trip_proba=0,
var_dist_wd=0,
var_dist_we=0,
# Charging power, efficiency & V2G
charging_power=3.6,
charging_eff=0.95,
discharging_eff=0.95,
driving_eff=None,
# Battery size
batt_size=40,
# Plug in behavior
charging_type='if_needed',
range_anx_factor=1.5,
alpha=1.31, # default alpha value based on Gonzalez Venegas et al,
# Plug-in behavior of electric vehicles users:
# insights from alarge-scale trial and impacts for grid integration studies" 2021, eTransportation.
# Charging properties (ToU)
tou_ini=0,
tou_end=0,
tou_we=False,
tou_ini_we=0,
tou_end_we=0,
# Charging properties: target soc, capacity limit (vcc)
target_soc=1,
flex_time=0,
vcc=None,
# Arrival& departure data
arrival_departure_data = dict(),
ovn=True,
charge_schedule=None,
# Grid data
bus='',
# Aggregator
boss=None,
**kwargs):
"""Instantiates EV object:
name id
sets home-work distance [km]
sets weekend distance [km]
charging power [kW] = default 3.6kW
efficiency [kWh/km] = default 0.2 kWh/km
battery size [kWh] = default 40 kWh
charging_type = 'all_days' // others
"""
self.name = name
# PARAMS
# Sets distance for weekday and weekend one-way trips
# dist_wx can be a {} with either 's', 'm', 'loc' for lognorm params or with 'cdf', 'bins'
self.dist_wd = set_dist(dist_wd)
self.dist_we = set_dist(dist_we)
self.var_dist_wd = var_dist_wd
self.var_dist_we = var_dist_we
self.new_dist = new_dist
if new_dist:
self.dist_wd_data = dist_wd
self.dist_we_data = dist_we
# Discrete random distribution (non correlated) for battery & charging power
if type(charging_power) is int or type(charging_power) is float:
self.charging_power = charging_power
elif len(charging_power) == 2:
self.charging_power = discrete_random_data(charging_power[0], charging_power[1])
else:
ValueError('Invalid charging_power value')
if type(batt_size) in [int, float, np.int32, np.float]:
self.batt_size = batt_size
elif len(batt_size) == 2:
self.batt_size = discrete_random_data(batt_size[0], batt_size[1])
else:
ValueError('Invalid batt_size value')
self.charging_eff = charging_eff # Charging efficiency, in pu
self.discharging_eff = discharging_eff # Discharging efficiency, in pu
if driving_eff is None: # Driving efficiency kWh / km;
# default value based on Weiss et al, 2020, Energy efficiency trade-offs in small to large electric vehicles, Env Science Europe v32.
self.driving_eff = 0.14 + 0.0009*self.batt_size
else:
self.driving_eff = driving_eff
self.min_soc = 0.2 # Min SOC of battery to define plug-in
self.max_soc = 1 # Max SOC of battery to define plug-in
self.target_soc = target_soc # Target SOC for charging process
self.n_trips = 2 # Number of trips per day (Go and come back)
self.extra_trip_proba = extra_trip_proba # probability of extra trip
if not charging_type in ['if_needed', 'if_needed_sunday', 'all_days', 'if_needed_weekend', 'weekdays', 'weekdays+1']:
ValueError('Invalid charging type %s' %charging_type)
self.charging_type = charging_type # Charging behavior (every day or not)
if charging_type in ['if_needed_weekend']: # Forced charging on Friday, Saturday or Sunday
self.forced_day = np.random.randint(low=0,high=3,size=1)+4
elif charging_type == 'if_needed_sunday':
self.forced_day = 6
else:
self.forced_day = 8 # No forced day
self.range_anx_factor = range_anx_factor # Range anxiety factor for "if needed" charging
self.alpha = alpha # Factor for probabilitic "if needed" charging. High means high plug in rate, low means low plug in rate
self.tou_ini = tou_ini # Time of Use (low tariff) start time (default=0)
self.tou_end = tou_end # Time of Use (low tariff) end time (default=0)
self.tou_we = tou_we # Time of Use for weekend. If false, it's off peak the whole weekend
if tou_we:
self.tou_ini_we = tou_ini_we
self.tou_end_we = tou_end_we
if charge_schedule is None:
# self.arrival_departure_data_wd = arrival_departure_data_wd
# self.arrival_departure_data_we = arrival_departure_data_we
self.create_arr_dep_array(arrival_departure_data)
self.ovn = ovn # Overnight charging Bool
self.charge_schedule = None
else:
cols = ['ArrTime', 'ArrDay', 'DepTime', 'DepDay', 'TripDistance']
for c in cols:
if not (c in charge_schedule):
raise ValueError('Charge schedule should have the following columns: {}'.format(cols))
self.charge_schedule = charge_schedule
# Parameter to compute 'physical realisations' of flexibility
# Flex service corresponds to a sustained injection during flex_time minutes
if flex_time:
if type(flex_time) == int:
flex_time = [flex_time]
for f in flex_time:
if f % model.step > 0:
raise ValueError('Flexibility time [{} minutes] should be a ' +
'multiple of model.step [{} minutes]'.format(flex_time, model.step))
self.flex_time = flex_time # array of Time (minutes) for which the flex needs to be delivered
# Variable capacity contract/limit. It's either a np.array of length>= model.periods
# or a constant value
if type(vcc) in (float, int):
self.vcc = vcc * np.ones(model.periods)
else:
self.vcc = vcc
# DERIVED PARAMS
self.compute_derived_params(model)
# Correct target SOC to minimum charged energy
if self.target_soc < 1:
required_min_soc = float(min(self.dist_wd * self.n_trips * self.range_anx_factor * self.driving_eff / self.batt_size, 1))
if required_min_soc > self.target_soc:
self.target_soc = required_min_soc
# Grid Params
self.bus = ''
# Aggregator params
self.boss = boss
# RESULTS/VARIABLES
self.soc_ini = np.zeros(model.ndays) #list of SOC ini at each day (of charging session)
self.soc_end = np.zeros(model.ndays) #list of SOC end at each day (of charging session)
self.energy_trip = np.zeros(model.ndays) #list of energy consumed per day in trips
self.charged_energy = np.zeros(model.ndays) # charged energy into the battery [kWh]
self.extra_energy = np.zeros(model.ndays) # extra charge needed during the day (bcs too long trips, not enough batt!) [kWh]
self.ch_status = np.zeros(model.ndays) # Charging status for each day (connected or not connected)
self.n_plugs = 0
self.set_ch_vector(model)
self.set_off_peak(model)
for kw in kwargs:
if not kw in extraevkwargs:
print('WARNING: {} is not a recognized parameter'.format(kw))
def create_arr_dep_array(self, arrival_departure_data):
""" It will create a dict() for each day of the week in the structure:
{day i: {arrdep_data day i}}
where each arrdep_data can take the form:
1- Bivariate probability distribution
{'pdf_a_d' : Matrix of joint probability distribution of arrival departure,
'bins' : bins in range [0,24]
2- CDF of not correlated arrival and departure
{'cdf_arr': Array of cumulative distribution function of arrival,
'cdf_dep': Array of cdf of departure}
3- Guassian (normal) distributions
{'mu_arr': ma, 'std_dep': sa
'mu_dep': md, 'std_dep': sd }
INPUT: a dictionnary with keys=days, item=arr_depdata.
days is a string 'we', 'wd' or containing
the number of days for each arrdep data ('0123456')
"""
add = {i:dict() for i in range(7)}
for days, data in arrival_departure_data.items():
if days in ['we', 'weekend']:
add[5] = data
add[6] = data
elif days in ['wd', 'weekdays']:
for i in range(5):
add[i] = data
else:
for d in days:
try:
add[int(d)] = data
except:
pass
self.arrival_departure_data = add
def compute_derived_params(self, model):
""" Computes derived params that are useful
"""
self.eff_per_period = model.period_dur * self.charging_eff
self.soc_eff_per_period = self.eff_per_period / self.batt_size
self.soc_v2geff_per_period = model.period_dur / self.batt_size / self.discharging_eff
def set_arrival_departure(self, mu_arr = 18, mu_dep = 8,
std_arr = 2, std_dep = 2,
**kwargs):
""" Sets arrival and departure times
"""
# If data is a 2d pdf (correlated arrival and departure)
if 'pdf_a_d' in kwargs:
if 'bins' in kwargs:
bins = kwargs['bins']
else:
bins = np.linspace(0,24,num=25)
self.arrival, self.departure = random_from_2d_pdf(kwargs['pdf_a_d'], bins)
# THIS IS SEMI-GOOD! CORRECT!!
dt = (self.departure - self.arrival if self.departure > self.arrival
else self.departure + 24 - self.arrival)
# else, if data is two cdf (not correlated)
# else, random from a normal distribution with mu and std_dev from inputs
else:
dt = 0
# Why this 3! completely arbitrary!!!
while dt < 3:
if 'bins' in kwargs:
bins_hours = kwargs['bins']
if 'cdf_arr' in kwargs:
self.arrival = random_from_cdf(kwargs['cdf_arr'], bins_hours)
else:
self.arrival = np.random.randn(1) * std_arr + mu_arr
if 'cdf_dep' in kwargs:
self.departure = random_from_cdf(kwargs['cdf_dep'], bins_hours)
else:
self.departure = np.random.randn(1) * std_dep + mu_dep
dt = (self.departure - self.arrival if not self.ovn
else self.departure + 24 - self.arrival)
self.dt = dt
def set_ch_vector(self, model):
# Grid view
self.charging = np.zeros(model.periods) # Charging power at time t [kW]
self.off_peak_potential = np.zeros(model.periods) # Connected and chargeable power (only off-peak period and considering VCC) [kW]
self.potential = np.zeros(model.periods) # Connected power at time t [kW]
self.up_flex = np.zeros(model.periods) # Battery flex capacity, upper bound [kWh]
self.dn_flex = np.zeros(model.periods) # Battery flex capacity, lower bound (assumes bidir charger) [kWh]
self.mean_flex_traj = np.zeros(model.periods) # Mean trajectory to be used to compute up & dn flex [soc?]
self.soc = np.zeros(model.periods) # SOC at time t [pu]
if self.flex_time: # kW of flexibility for self.flex_time minutes, for diffs baselines
self.up_flex_kw = np.zeros([len(self.flex_time), model.periods])
self.dn_flex_kw = np.zeros([len(self.flex_time), model.periods])
# self.up_flex_kw_meantraj = np.zeros(model.periods)
# self.up_flex_kw_immediate = np.zeros(model.periods)
# self.up_flex_kw_delayed = np.zeros(model.periods)
# self.dn_flex_kw_meantraj = np.zeros(model.periods)
# self.dn_flex_kw_immediate = np.zeros(model.periods)
# self.dn_flex_kw_delayed = np.zeros(model.periods)
def set_off_peak(self, model):
""" Sets vector for off-peak period (EV will charge only during this period)
"""
# TODO: expand to different off-peak hours during the weekend
self.off_peak = np.ones(model.periods)
if self.tou_ini != self.tou_end:
delta_op = self.tou_end>self.tou_ini
# Compute one day. Assume Off peak hours are less than On peak so less modifs to 1
op_day = np.zeros(model.periods_day)
op_day_we = np.ones(model.periods_day)
for i in range(model.periods_day):
if delta_op:
if (self.tou_ini <= i * model.period_dur < self.tou_end):
op_day[i] = 1
else:
if not (self.tou_end <= i*model.period_dur < self.tou_ini):
op_day[i] = 1
if self.tou_we:
delta_op = self.tou_end_we > self.tou_ini_we
for i in range(model.periods_day):
if delta_op:
if not (self.tou_ini_we <= i * model.period_dur < self.tou_end_we):
op_day_we[i] = 0
else:
if (self.tou_end_we <= i*model.period_dur < self.tou_ini_we):
op_day_we[i] = 0
for d in range(model.ndays):
if not (model.days[d] in model.weekends):
self.off_peak[d * model.periods_day: (d+1) * model.periods_day] = op_day
elif self.tou_we:
self.off_peak[d * model.periods_day: (d+1) * model.periods_day] = op_day_we
# if self.tou_ini < self.tou_end:
# for i in range(model.periods):
# if ((not self.tou_we) and (model.times[i][2] in model.weekends)):
# continue
# # This checks that there is no special ToU in weekends, and that it is not the weekend
# if model.times[i][1] < self.tou_ini or model.times[i][1] >= self.tou_end:
# self.off_peak[i] = 0
# elif self.tou_ini > self.tou_end:
# for i in range(model.periods):
# if ((not self.tou_we) and (model.times[i][2] in model.weekends)):
# continue
# if self.tou_end <= model.times[i][1] < self.tou_ini:
# self.off_peak[i] = 0
def compute_energy_trip(self, model):
""" Computes the energy used during the current day trips and to be charged
in the current session.
"""
# TODO: extend to add stochasticity
dist = (self.dist_wd + max(0, np.random.normal() * self.var_dist_wd)
if model.days[model.day] < 5
else self.dist_we + max(0, np.random.normal() * self.var_dist_we))
if dist * self.n_trips * self.driving_eff > self.batt_size:
#This means that home-work trip is too long to do it without extra charge,
# so forced work charging (i.e one trip)
self.energy_trip[model.day] = dist * self.driving_eff
self.extra_energy[model.day] = (self.n_trips - 1) * self.energy_trip[model.day]
else:
extra_trip = 0
if np.random.rand(1) < self.extra_trip_proba:
# Extra trip probability, normal distribution around 5km +- 1.5 km
# TODO: better way to add extra trip
extra_trip = np.random.randn(1) * 1.5 + 5
self.energy_trip[model.day] = ((self.dist_wd if model.days[model.day] < 5 else self.dist_we)
* self.n_trips + extra_trip) * self.driving_eff
def compute_soc_ini(self, model):
""" Computes soc at the start of the session based on
the energy used during current day
"""
if model.day == 0:
self.soc_ini[model.day] = 1 - self.energy_trip[model.day] / self.batt_size
else:
self.soc_ini[model.day] = self.soc_end[model.day-1] - self.energy_trip[model.day] / self.batt_size
if self.soc_ini[model.day] < 0: # To correct some negative SOCs
self.extra_energy[model.day] += -self.soc_ini[model.day] * self.batt_size
self.soc_ini[model.day] = 0
def define_charging_status(self, model, next_trip_energy=None):
""" Defines charging status for the session.
True means it will charge this session
"""
# TODO: How to compute next_trip?
if next_trip_energy is None:
next_trip_energy = ((self.dist_wd if model.days[model.day + 1] < 5
else self.dist_we) *
self.n_trips * self.driving_eff)
# min_soc_trip = max(next_trip_energy * self.range_anx_factor / self.batt_size, self.min_soc)
min_soc_trip = next_trip_energy * self.range_anx_factor / self.batt_size
# TODO: Other types of charging ?
if self.charging_type == 'all_days':
return True
if self.charging_type in 'weekdays':
if not model.days[model.day] in model.weekends:
return True
return False
if self.charging_type == 'weekdays+1':
if not model.days[model.day] in model.weekends:
return True
return np.random.rand() > 0.5
# if self.charging_type == 'weekends':
# # TODO: Complete if charging is needed
# if model.days[model.day] in model.weekends:
# return True
# return False
if self.charging_type in ['if_needed', 'if_needed_sunday', 'if_needed_weekend']:
# Enough kWh in batt to do next trip?
if model.days[model.day] == self.forced_day:
# Force charging for this EV in this day of the week
return True
if (self.soc_ini[model.day] < min_soc_trip):
# Charging because it is needed for expected trips of next day
return True
if self.soc_ini[model.day] >= self.max_soc:
# Not charging beceause EV has more than the max SOC
return False
if self.alpha >=100:
# Deterministic case, where if it is not needed, no prob of charging
return True
# If alpha = 0, deterministic If_needed
# else: Probabilistic charging: higher proba if low SOC
# alpha == 1 is a linear probability
p = np.random.rand(1)
p_cut = 1-((self.soc_ini[model.day] - min_soc_trip) / (self.max_soc - min_soc_trip)) ** self.alpha
return p < p_cut
def do_charging(self, model):
""" Computes charging potential and calls charging function
"""
# Computes index for charging session
delta_day = model.day * model.periods_day
tini = int(self.arrival * model.periods_hour)
delta_session = int((self.arrival + self.dt) * model.periods_hour)
# if self.departure < self.arrival:
# tend = int((self.departure + 24) * model.periods_hour)
# else:
# tend = int(self.departure * model.periods_hour)
idx_tini = max([0, delta_day + tini])
idx_tend = min([delta_session + delta_day, model.periods-1])
# idx_tend = min([delta + tend, model.periods-1])
if idx_tini >= idx_tend:
self.do_zero_charge(model, idx_tini, idx_tend)
self.compute_soc_end(model, idx_tend)
return idx_tini, idx_tend
# Potential charging vector
potential = np.ones(idx_tend+1-idx_tini) * self.charging_power
# Correct for arrival period
potential[0] = ((model.period_dur - self.arrival % model.period_dur ) /
model.period_dur * self.charging_power)
# And correct for departure period
potential[-1] = (self.departure % model.period_dur /
model.period_dur * self.charging_power)
# Check for aggregators limit
if not (self.boss is None):
if self.boss.param_update in ['capacity', 'all']:
potential =
|
np.min([potential, self.boss.available_capacity[idx_tini:idx_tend+1]], axis=0)
|
numpy.min
|
# coding=utf-8
"""
This is Skip Gram form.
"""
import numpy as np
import tensorflow as tf
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import Examples.Dialog.Dialog as Dialog
# Para crear el json
import json
# Para calcular el tiempo
import time
# Para eliminar tildes
import unicodedata
# Para forzar el recolector de basura de Python
import gc
def create_directory_from_fullpath(fullpath):
"""
Create directory from a fullpath if it not exists.
"""
directory = os.path.dirname(fullpath)
if not os.path.exists(directory):
os.makedirs(directory)
return directory
def write_string_to_pathfile(string, filepath):
"""
Write a string to a path file
:param string: string to write
:param path: path where write
"""
try:
create_directory_from_fullpath(filepath)
file = open(filepath, 'w+')
file.write(str(string))
except:
raise ValueError("No se ha podido escribir el json")
def pt(title=None, text=None):
"""
Use the print function to print a title and an object coverted to string
:param title:
:param text:
"""
if text is None:
text = title
title = "------------------------------------"
else:
title += ':'
print(str(title) + " \n " + str(text))
def initialize_session():
"""
Initialize interactive session and all local and global variables
:return: Session
"""
sess = tf.InteractiveSession()
sess.run(tf.local_variables_initializer())
sess.run(tf.global_variables_initializer())
return sess
def delete_accents_marks(string):
return ''.join((c for c in unicodedata.normalize('NFD', string) if unicodedata.category(c) != 'Mn'))
# function to convert numbers to one hot vectors
def to_one_hot(data_point_index, vocab_size):
temp = np.zeros(vocab_size)
temp[data_point_index] = 1
return temp
def process_for_senteces(sentences):
"""
Preprocesa las frases para quitarle los singos de acentuación, los puntos, comas, (...)
"""
processed_sentences = []
for sentence in sentences:
sentence_split = sentence.split()
new_sentence_processed = []
for word in sentence_split:
new_sentence_processed.append(delete_accents_marks(word).lower())
processed_sentences.append(new_sentence_processed)
#pt("processed_sentences", processed_sentences)
return processed_sentences
def create_dictionaries(words):
"""
Crea los diccionarios int2word y word2int a partir de las palabras y las retorna en ese orden
"""
int2word = {}
word2int = {}
for i, word in enumerate(words):
word2int[word] = i
int2word[i] = word
#pt("word2int", word2int)
#pt("int2word", int2word)
return word2int, int2word
def get_words_set(processes_sentences):
"""
A partir de frases preprocesadas, obtiene el conjunto de palabras (sin repetición) de las que se compone
"""
words = []
to_delete_marks = [",", ".", ":", ";", "!", "¡", "?", "¿"]
corpus = [item for sublist in processes_sentences for item in sublist]
for word in corpus:
if word not in to_delete_marks:
words.append(word)
words = list(set(words)) # Removemos palabras repetidas
#pt("words",words)
return words
def generate_training_data(processes_sentences, question_id):
"""
Genera el conjunto de datos que se utilizará para entrenar a la red una vez estén sean one-hot-vector y a partir de
la "question_id"
"""
data = []
if question_id == "1_x":
pass
else:
WINDOW_SIZE = 5
for sentence in processes_sentences:
for word_index, word in enumerate(sentence):
for nb_word in sentence[max(word_index - WINDOW_SIZE, 0): min(word_index + WINDOW_SIZE, len(sentence)) + 1]:
if nb_word != word:
data.append([word, nb_word])
#pt("data", data)
#pt("data", len(data))
return data
def generate_batches(data, word2int, vocab_size):
"""
Genera las entradas y los labels a partir de los datos generados previamente.
"""
x_input = []
y_label = []
for data_word in data:
#pt("dataword", data_word)
x_input.append(to_one_hot(word2int[data_word[0]], vocab_size))
y_label.append(to_one_hot(word2int[data_word[1]], vocab_size))
return np.asarray(x_input),
|
np.asarray(y_label)
|
numpy.asarray
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
from data import *
from layers.box_utils import decode, nms
import os
import sys
import time
import torch
import torch.nn.functional as F
import torch.nn as nn
import torch.optim as optim
import torch.backends.cudnn as cudnn
import torch.nn.init as init
import torch.utils.data as data
import numpy as np
import math
from statistics import mean, median, variance, pstdev
def active_learning_cycle(
batch_iterator,
labeled_set,
unlabeled_set,
net,
num_classes,
acquisition_budget,
num_total_images,
):
"""Active learning cycle for Mixture Density Networks.
Collect aleatoric and epistemic uncertainties of both tasks (localization and classification)
and normalize each uncertainty values using z-score for having similar scale. Afte that,
the maximum value among the four uncertaintiesc will be the final score for the current image.
"""
# lists of aleatoric and epistemic uncertainties of localization
list_loc_al = []
list_loc_ep = []
# lists of aleatoric and epistemic uncertainties of classification
list_conf_al = []
list_conf_ep = []
# filtering threshold of confidence score
thresh = 0.5
checker = 0
for j in range(len(batch_iterator)):
print(j)
images, _ = next(batch_iterator)
images = images.cuda()
out = net(images)
priors, loc, loc_var, loc_pi, loc_2, loc_var_2, loc_pi_2, \
loc_3, loc_var_3, loc_pi_3, loc_4, loc_var_4, loc_pi_4, \
conf, conf_var, conf_pi, conf_2, conf_var_2, conf_pi_2, \
conf_3, conf_var_3, conf_pi_3, conf_4, conf_var_4, conf_pi_4 = out
# confidence score of classification
# use a softmax function to make valus in probability space
conf = torch.softmax(conf, dim=2)
conf_2 = torch.softmax(conf_2, dim=2)
conf_3 = torch.softmax(conf_3, dim=2)
conf_4 = torch.softmax(conf_4, dim=2)
# mixture weight of classification
conf_p_pi = conf_pi.view(-1, 1)
conf_p_2_pi = conf_pi_2.view(-1, 1)
conf_p_3_pi = conf_pi_3.view(-1, 1)
conf_p_4_pi = conf_pi_4.view(-1, 1)
conf_var = torch.sigmoid(conf_var)
conf_var_2 = torch.sigmoid(conf_var_2)
conf_var_3 = torch.sigmoid(conf_var_3)
conf_var_4 = torch.sigmoid(conf_var_4)
# use a softmax function to keep pi in probability space and split mixture weights
(
conf_pi,
conf_pi_2,
conf_pi_3,
conf_pi_4
) = stack_softamx_unbind(
pi=conf_p_pi,
pi_2=conf_p_2_pi,
pi_3=conf_p_3_pi,
pi_4=conf_p_4_pi,
)
conf_pi = conf_pi.view(conf.size(0), -1, 1)
conf_pi_2 = conf_pi_2.view(conf.size(0), -1, 1)
conf_pi_3 = conf_pi_3.view(conf.size(0), -1, 1)
conf_pi_4 = conf_pi_4.view(conf.size(0), -1, 1)
# classification score
new_conf = conf_pi*conf + conf_pi_2*conf_2 + conf_pi_3*conf_3 + conf_pi_4*conf_4
# aleatoric uncertainty of classification
cls_al_uc = conf_pi*conf_var + conf_pi_2*conf_var_2 + conf_pi_3*conf_var_3 + conf_pi_4*conf_var_4
# epistemic uncertainty of classification
cls_ep_uc = (
conf_pi*(conf-new_conf)**2 +
conf_pi_2*(conf_2-new_conf)**2 +
conf_pi_3*(conf_3-new_conf)**2 +
conf_pi_4*(conf_4-new_conf)**2
)
new_conf = new_conf.view(loc.size(0), priors.size(0), num_classes).transpose(2, 1)
cls_al_uc = cls_al_uc.view(loc.size(0), priors.size(0), num_classes).transpose(2, 1)
cls_ep_uc = cls_ep_uc.view(loc.size(0), priors.size(0), num_classes).transpose(2, 1)
# aleatoric uncertainty of localizaiton
# use a sigmoid function to satisfy the positiveness constraint
loc_var = torch.sigmoid(loc_var)
loc_var_2 = torch.sigmoid(loc_var_2)
loc_var_3 = torch.sigmoid(loc_var_3)
loc_var_4 = torch.sigmoid(loc_var_4)
# mixture weight of localizaiton
loc_p_pi = loc_pi.view(-1, 4)
loc_p_2_pi = loc_pi_2.view(-1, 4)
loc_p_3_pi = loc_pi_3.view(-1, 4)
loc_p_4_pi = loc_pi_4.view(-1, 4)
# use a softmax function to keep pi in probability space and split mixture weights
(
pi_1_after,
pi_2_after,
pi_3_after,
pi_4_after
) = stack_softamx_unbind(
pi=loc_p_pi,
pi_2=loc_p_2_pi,
pi_3=loc_p_3_pi,
pi_4=loc_p_4_pi,
)
pi_1_after = pi_1_after.view(loc.size(0), -1, 4)
pi_2_after = pi_2_after.view(loc.size(0), -1, 4)
pi_3_after = pi_3_after.view(loc.size(0), -1, 4)
pi_4_after = pi_4_after.view(loc.size(0), -1, 4)
# localization coordinates
new_loc = pi_1_after*loc + pi_2_after*loc_2 + pi_3_after*loc_3 + pi_4_after*loc_4
# aleatoric uncertainty of localization
al_uc = (
pi_1_after*loc_var +
pi_2_after*loc_var_2 +
pi_3_after*loc_var_3 +
pi_4_after*loc_var_4
)
# epistemic uncertainty of localization
ep_uc = (
pi_1_after*(loc-new_loc)**2 +
pi_2_after*(loc_2-new_loc)**2 +
pi_3_after*(loc_3-new_loc)**2 +
pi_4_after*(loc_4-new_loc)**2
)
num = loc.size(0)
output = torch.zeros(num, num_classes, 200, 15)
variance = [0.1, 0.2]
for i in range(num):
decoded_boxes = decode(new_loc[i], priors, variance)
conf_scores = new_conf[i]
loc_al_uc_clone = al_uc[i]
loc_ep_uc_clone = ep_uc[i]
conf_al_clone = cls_al_uc[i]
conf_ep_clone = cls_ep_uc[i]
for cl in range(1, num_classes):
c_mask = conf_scores[cl].gt(0.01)
# confidence score
scores = conf_scores[cl][c_mask]
# aleatoric and epistemic uncertainties of classification
conf_al = conf_al_clone[cl][c_mask]
conf_ep = conf_ep_clone[cl][c_mask]
if scores.size(0) == 0:
continue
l_mask = c_mask.unsqueeze(1).expand_as(decoded_boxes)
boxes = decoded_boxes[l_mask].view(-1, 4)
# aleatoric and epistemic uncertainties of localization
loc_al_uc = loc_al_uc_clone[l_mask].view(-1, 4)
loc_ep_uc = loc_ep_uc_clone[l_mask].view(-1, 4)
ids, count = nms(boxes.detach(), scores.detach(), 0.45, 200)
output[i, cl, :count] = torch.cat(
(
scores[ids[:count]].unsqueeze(1),
boxes[ids[:count]],
loc_al_uc[ids[:count]],
loc_ep_uc[ids[:count]],
conf_al[ids[:count]].unsqueeze(1),
conf_ep[ids[:count]].unsqueeze(1)
),
1
)
# store the maximum value of each uncertainty in each jagged list
for p in range(output.size(1)):
q = 0
if j == checker:
list_loc_al.append([])
list_loc_ep.append([])
list_conf_al.append([])
list_conf_ep.append([])
checker = j + 1
while output[0, p, q, 0] >= thresh:
UC_max_al_temp = torch.max(output[0, p, q, 5:9]).item()
UC_max_ep_temp = torch.max(output[0, p, q, 9:13]).item()
UC_max_conf_al_temp = torch.max(output[0, p, q, 13:14]).item()
UC_max_conf_ep_temp = torch.max(output[0, p, q, 14:15]).item()
list_loc_al[j].append(UC_max_al_temp)
list_loc_ep[j].append(UC_max_ep_temp)
list_conf_al[j].append(UC_max_conf_al_temp)
list_conf_ep[j].append(UC_max_conf_ep_temp)
q += 1
# z-score normalization and the deciding labeled and unlabeled dataset
labeled_set, unlabeled_set = normalization_and_select_dataset(
labeled_set=labeled_set,
unlabeled_set=unlabeled_set,
list_loc_al=list_loc_al,
list_loc_ep=list_loc_ep,
list_conf_al=list_conf_al,
list_conf_ep=list_conf_ep,
acquisition_budget=acquisition_budget,
num_total_images=num_total_images,
)
return labeled_set, unlabeled_set
def stack_softamx_unbind(
pi,
pi_2,
pi_3,
pi_4,
):
"""Softmax and split mixture weights (pi)."""
pi_all = torch.stack([pi, pi_2, pi_3, pi_4])
pi_all = torch.softmax(pi_all, dim=0)
(
pi,
pi_2,
pi_3,
pi_4
) = torch.unbind(pi_all, dim=0)
return pi, pi_2, pi_3, pi_4
def normalization_and_select_dataset(
labeled_set,
unlabeled_set,
list_loc_al,
list_loc_ep,
list_conf_al,
list_conf_ep,
acquisition_budget,
num_total_images,
):
"""Z-score normalization and selecting labeled and unlabeled dataset.
Args:
labeled_set: current labeled list
unlabeled_set: current unlabeled list
list_loc_al: aleatoric uncertainty of localization (jagged list)
list_loc_ep: epistemic uncertainty of localization (jagged list)
list_conf_al: aleatoric uncertainty of classification (jagged list)
list_conf_ep: epistemic uncertainty of classification (jagged list)
acquisition_budget: selection budget for unlabeled dataset
num_total_images: number of total dataset
"""
# calculate the mean and variance of each uncertainty list for z-score normalization
mean_loc_al = mean([val for sub in list_loc_al for val in sub])
stdev_loc_al = pstdev([val for sub in list_loc_al for val in sub])
mean_loc_ep = mean([val for sub in list_loc_ep for val in sub])
stdev_loc_ep = pstdev([val for sub in list_loc_ep for val in sub])
mean_conf_al = mean([val for sub in list_conf_al for val in sub])
stdev_conf_al = pstdev([val for sub in list_conf_al for val in sub])
mean_conf_ep = mean([val for sub in list_conf_ep for val in sub])
stdev_conf_ep = pstdev([val for sub in list_conf_ep for val in sub])
# minimum value of z-score (manually selected value)
uc_min = -99999.0
# insert minimum value into empty list in jagged list
# find max value of each index in jagged list
uncertainties = [list_loc_al, list_loc_ep, list_conf_al, list_conf_ep]
for i in range(len(uncertainties)):
uncertainty = uncertainties[i]
for _ in range(uncertainty.count([])):
uncertainty[uncertainty.index([])] = [uc_min]
uncertainties[i] = [max(val) for val in uncertainty]
# z-score normalization
uncertainties[0] = [(val-mean_loc_al)/stdev_loc_al for val in uncertainties[0]]
uncertainties[1] = [(val-mean_loc_ep)/stdev_loc_ep for val in uncertainties[1]]
uncertainties[2] = [(val-mean_conf_al)/stdev_conf_al for val in uncertainties[2]]
uncertainties[3] = [(val-mean_conf_ep)/stdev_conf_ep for val in uncertainties[3]]
# make the minimum value converted by z-score normalization to the original minimum value
# need this part because we need to calculate the maximum of the total 4 uncertainties
for _ in range(uncertainties[0].count((uc_min-mean_loc_al)/stdev_loc_al)):
uncertainties[0][uncertainties[0].index((uc_min-mean_loc_al)/stdev_loc_al)] = uc_min
for _ in range(uncertainties[1].count((uc_min-mean_loc_ep)/stdev_loc_ep)):
uncertainties[1][uncertainties[1].index((uc_min-mean_loc_ep)/stdev_loc_ep)] = uc_min
for _ in range(uncertainties[2].count((uc_min-mean_conf_al)/stdev_conf_al)):
uncertainties[2][uncertainties[2].index((uc_min-mean_conf_al)/stdev_conf_al)] = uc_min
for _ in range(uncertainties[3].count((uc_min-mean_conf_ep)/stdev_conf_ep)):
uncertainties[3][uncertainties[3].index((uc_min-mean_conf_ep)/stdev_conf_ep)] = uc_min
uncertainties = torch.FloatTensor(uncertainties)
uc_list = torch.stack([uncertainties[0], uncertainties[1], uncertainties[2], uncertainties[3]], dim=1)
uc_list = np.array(uc_list)
criterion_UC = np.max(uc_list, axis=1)
sorted_indices = np.argsort(criterion_UC)[::-1]
labeled_set += list(np.array(unlabeled_set)[sorted_indices[:acquisition_budget]])
unlabeled_set = list(
|
np.array(unlabeled_set)
|
numpy.array
|
"""
Tools for in-depth analysis of SUNTANS output
Includes:
- Volume and tracer budget calculations
- ...
<NAME>
Stanford University
March 2014
"""
from .sunpy import Spatial
from .sunslice import MultiSliceEdge
import sfoda.utils.mypandas as mpd
from sfoda.utils.timeseries import timeseries
from sfoda.utils.maptools import maskShpPoly
import numpy as np
import matplotlib.pyplot as plt
from datetime import timedelta
from netCDF4 import Dataset
from scipy import sparse
import os
import pandas as pd
import pdb
# Global constants
RHO0 = 1000.
Cp = 4186 # specific heat of seawater
GRAV = 9.81
class Energetics(Spatial):
fluxvar = 'U_F' # U or U_F
etavar='eta_avg' # 'eta_avg' or 'eta'
verbose=False
def __init__(self,ncfile,**kwargs):
"""
Calculates the energy variables from suntans output
"""
# Initialize the spatial class
Spatial.__init__(self,ncfile,klayer=[-99])
def __call__(self,tstep,cellindex=None):
"""
Calculate the terms for tstep
"""
if self.verbose: print('Calculating energy at time step: %d'%tstep)
if cellindex==None:
self.cellindex=list(range(self.Nc))
else:
self.cellindex=cellindex
self.tstep=[tstep]
###
# Step 1: Load the flux variable and the vertical depths
# These are needed for depth integrals and upwind calculations
###
if self.verbose: print('Loading raw model data...')
self.dzf = self.loadData(variable='dzf')
# dzf is calculated using max free surface height
self.dzz = self.loadData(variable='dzz')
self.u=self.loadData(variable=self.fluxvar)
if self.fluxvar=='U':
if self.verbose: print('Calculating U to flow rate...')
#TBC
# Load the cell variable used by others at all depth
self.eta = self.loadData(variable=self.etavar)
self.uc = self.loadData(variable='uc')
self.vc = self.loadData(variable='vc')
self.buoyancy = self.loadData(variable='buoyancy')
self.nu_v = self.loadData(variable='nu_v')
if self.hasVar('kappa_tv'):
self.kappa_tv = self.loadData(variable='kappa_tv')
else:
self.kappa_tv = self.nu_v
# Make sure that all variables = 0 in masked regions...
# (mask does not work with all operations)
self.u[self.u.mask]=0
self.uc[self.uc.mask]=0
self.vc[self.vc.mask]=0
self.buoyancy[self.buoyancy.mask]=0
self.nu_v[self.nu_v.mask]=0
self.kappa_tv[self.kappa_tv.mask]=0
# Put all of the terms in a dictionary called... energy
self.energy={}
###
# Term: Vertical PE flux
if self.verbose: print('Calculating vertical buoyancy flux...')
self.energy.update({'B_flux':self.calc_buoyflux()})
###
# Term: Wind work
if self.verbose: print('Calculating the wind work...')
self.energy.update({'W_work':self.calc_windwork()})
###
# Depth integrated KE and PE
if self.verbose: print('Calculating energy...')
self.KE = self.calc_KE(u=self.uc,v=self.vc)
self.energy.update({'KE':self.depthint(self.KE,dz=self.dzz)})
self.PE = self.calc_PE(b=self.buoyancy)
self.energy.update({'PE':self.depthint(self.PE,dz=self.dzz)})
###
# Dissipation
if self.verbose: print('Calculating dissipation...')
self.energy.update({'diss':self.calc_dissipation()})
###
# Flux terms
if self.verbose: print('Calculating energy flux divergence terms...')
# Pressure work flux
self.energy.update({'uKE':self.calc_fluxdivergence(self.KE)})
self.energy.update({'uP':self.calc_Pworkflux()})
self.energy.update({'uPE':self.calc_fluxdivergence(self.PE)})
# Tide only estimate
self.energy.update({'ueta':self.calc_fluxdivergence2d(-self.eta*GRAV)})
def write2netcdf(self,outfile,trange):
"""
Write all time steps in trange to an output file
!! Note that all terms are converted to Wm-2 (multiplied by rho0) !!
!! Divergent terms are divided by cell area (self.Ac) !!!
"""
tstep = list(range(0,self.Nt))[trange[0]:trange[1]]
# Write the output to netcdf
print('Writing the output to netcdf...')
self.writeNC(outfile)
nc = Dataset(outfile,'a')
nc.Title = 'SUNTANS energy output'
nc.close()
# Create the new variable names
self.create_nc_var(outfile, 'time', ('time',),\
{'long_name':'time','units':'seconds since 1990-01-01 00:00:00'})
self.create_nc_var(outfile, 'KEz', ('time','Nc'),\
{'long_name':'Depth-integrated kinetic energy',\
'units':'J m-2','coordinates':'yv xv'})
self.create_nc_var(outfile, 'PEz', ('time','Nc'),\
{'long_name':'Depth-integrated potential energy',\
'units':'J m-2','coordinates':'yv xv'})
self.create_nc_var(outfile, 'uP', ('time','Nc'),\
{'long_name':'Depth-integrated pressure work divergence',\
'units':'W m-2','coordinates':'yv xv'})
self.create_nc_var(outfile, 'uKE', ('time','Nc'),\
{'long_name':'Depth-integrated kinetic energy flux divergence',\
'units':'W m-2','coordinates':'yv xv'})
self.create_nc_var(outfile, 'uPE', ('time','Nc'),\
{'long_name':'Depth-integrated potential energy flux divergence',\
'units':'W m-2','coordinates':'yv xv'})
self.create_nc_var(outfile, 'ueta', ('time','Nc'),\
{'long_name':'Depth-integrated tidal energy flux divergence',\
'units':'W m-2','coordinates':'yv xv'})
self.create_nc_var(outfile, 'W_work', ('time','Nc'),\
{'long_name':'Wind work',\
'units':'W m-2','coordinates':'yv xv'})
self.create_nc_var(outfile, 'B_flux', ('time','Nc'),\
{'long_name':'Turbulent vertical buoyancy flux (KE->PE)',\
'units':'W m-2','coordinates':'yv xv'})
self.create_nc_var(outfile, 'diss', ('time','Nc'),\
{'long_name':'Dissipation rate',\
'units':'W m-2','coordinates':'yv xv'})
# Testing variables
self.create_nc_var(outfile, 'S2', ('time','Nk','Nc'),\
{'long_name':'Shear squared',\
'units':'s-2','coordinates':'yv xv'})
self.create_nc_var(outfile, 'Pressure', ('time','Nk','Nc'),\
{'long_name':'Pressure',\
'units':'Pa','coordinates':'yv xv'})
# Calculate the energy for each time step and write the output
print('Writing the variable data to netcdf...')
nc = Dataset(outfile,'a')
for ii, tt in enumerate(tstep):
# Call the object to calculate the variables
print('Writing energy for timestep %d of %d...'%(tt,tstep[-1]))
self.__call__(tt)
# Write the variable data out
nc.variables['time'][ii]=self.timeraw[tt]
nc.variables['KEz'][ii,:]=self.energy['KE']*RHO0
nc.variables['PEz'][ii,:]=self.energy['PE']*RHO0
nc.variables['uP'][ii,:]=self.energy['uP']/self.Ac*RHO0
nc.variables['uKE'][ii,:]=self.energy['uKE']/self.Ac*RHO0
nc.variables['uPE'][ii,:]=self.energy['uPE']/self.Ac*RHO0
nc.variables['ueta'][ii,:]=self.energy['ueta']/self.Ac*RHO0
nc.variables['W_work'][ii,:]=self.energy['W_work']*RHO0
nc.variables['B_flux'][ii,:]=self.energy['B_flux']*RHO0
nc.variables['diss'][ii,:]=self.energy['diss']*RHO0
# Testing variables
nc.variables['S2'][ii,:,:]=self.S2
nc.variables['Pressure'][ii,:,:]=self.pressure*RHO0
nc.close()
def gradZ(self,phi,dzz,dzmin=0.01):
"""
Overloaded vertical gradient calculation function
Make sure the calculation is consistent with turbulence.c
Gradients are evaluated at k-1/2
"""
Nc = phi.shape[1]
dphi_dz = np.zeros((self.Nkmax+1,Nc))
#dzz values less than dzmin are set to dzmin
dzz[dzz<dzmin]=dzmin
# Calculate mid-point gradients
dphi_dz[1:-1,:] = 2.0 * (phi[0:-1,:] - phi[1:,:])/ \
(dzz[0:-1,:]+dzz[1:,:])
# Specify the surface gradient the same as the next layer
ctop = self.getctop(self.eta)
j = list(range(Nc))
dphi_dz[ctop[j],j] = dphi_dz[ctop[j]+1,j]
# Specify the seabed gradients
dphi_dz[self.Nk[j]+1,j]=dphi_dz[self.Nk[j],j]
# Return the average at the mid-layer depth
return 0.5*(dphi_dz[1:,:] + dphi_dz[0:-1,:])
def calc_fluxdivergence(self,phi):
"""
Calculates the flux divergece of a cell-centered scalar, phi.
"""
# Map the data onto the edge
phi_e = np.zeros((self.Nkmax,self.Ne))
for k in range(self.Nkmax):
phi_e[k,:] = \
self.get_edgevar(phi[k,:],k=k,U=self.u[k,:],method='upwind')
face = self.face.copy()
normal = 1.0*self.normal # Convert to float
mask = face.mask.copy()
# Create a mask so that tthe masked face values are not included in the
# flucx calculation
facemask = np.zeros((self.Nc,self.maxfaces))
facemask[mask==False]=1.0
face[mask]=0 # Index the mask to the first cell
# (this is multiplied by zero later..)
# Calculate the fluxes at all cells - dimensions: [Nk, Nc, nfaces]
flux_cell = phi_e[...,face] * self.u[...,face] * normal * facemask
# Sum along all faces - dimensions: [Nk, Nc]
flux_div = flux_cell.sum(axis=-1)
# Return the depth integrated divergence
return flux_div.sum(axis=0)
def calc_fluxdivergence2d(self,phi):
"""
Calculates the flux divergece of a cell-centered scalar, phi.
"""
#depth-integrate the flux rate
U = np.sum(self.u,axis=0)
de = self.get_edgevar(self.dv,method='max')
U/=de # Divide by the edge depth (u_bar)
# Map the data onto the edge
phi_e = self.get_edgevar(phi,k=0,U=U,method='upwind')
face = self.face.copy()
normal = 1.0*self.normal # Convert to float
mask = face.mask.copy()
# Create a mask so that tthe masked face values are not included in the
# flucx calculation
facemask = np.zeros((self.Nc,self.maxfaces))
facemask[mask==False]=1.0
face[mask]=0 # Index the mask to the first cell
# (this is multiplied by zero later..)
# Calculate the fluxes at all cells - dimensions: [Nc, nfaces]
flux_cell = phi_e[face] * U[face] * normal * facemask
# Sum along all faces - dimensions: [Nc]
return flux_cell.sum(axis=-1)
def calc_Pworkflux(self):
"""
Calculate the pressure work flux divergence for all grid cells
"""
# Calculate pressure at the mid-point
# Note that this is already normalized by rho0
#rho = self.buoyancy/GRAV*RHO0+RHO0
#self.pressure = self.depthint(-GRAV*rho,dz=self.dzz,cumulative=True)
# Buoyancy only
self.pressure = self.depthint(self.buoyancy,dz=self.dzz,cumulative=True)
# Need to add the free-surface contribution???
# Shouldn't be necessary since dzz varies with the free-surface
#H = self.depthint(self.dzz,dz=self.dzz,cumulative=True) # total depth
#self.pressure += H*GRAV # H = eta - z
self.pressure += self.eta*GRAV
#return self.calc_fluxdivergence(self.pressure/RHO0)
return self.calc_fluxdivergence(self.pressure)
def calc_windwork(self):
"""
Calculate the wind work component
"""
u_surf = self.get_surfacevar(self.uc,self.eta)
v_surf = self.get_surfacevar(self.vc,self.eta)
tau_x = self.loadData(variable='tau_x')
tau_y = self.loadData(variable='tau_y')
return (u_surf*tau_x + v_surf*tau_y)/RHO0
def calc_buoyflux(self):
"""
Calculates the vertical flux of buoyancy:
B_f = K_v * db/dz
Returns the depth-integral.
"""
db_dz = self.gradZ(self.buoyancy,self.dzz)
return self.depthint(self.kappa_tv*db_dz,dz=self.dzz)
def calc_dissipation(self):
r"""
Calculates the depth-integrated dissipation
eps = nu_v * (du/dz^2 + dv/dz^2)
"""
du_dz = self.gradZ(self.uc,self.dzz)
dv_dz = self.gradZ(self.vc,self.dzz)
self.S2 = (du_dz**2 + dv_dz**2)
# Zero the seabed shear - it is too large??
self.S2[self.Nk,list(range(self.Nc))]=0
diss = self.nu_v * self.S2
return self.depthint(diss,dz=self.dzz)
########################
########################
def energy_budget(energyfile,polyfile,trange):
"""
# Area-integrate the energy terms
"""
varnames = ['KEz','PEz','uP','uKE','uPE','ueta','W_work','B_flux','diss']
# Load the energy file as a suntans object
sun = Spatial(energyfile)
# Create the mask
mask,maskpoly = maskShpPoly(sun.xv,sun.yv,polyfile)
# Initialise the output dictionary
tstep = list(range(0,sun.Nt))[trange[0]:trange[1]]
nt = len(tstep)
budget ={}
for vv in varnames:
budget.update({vv:np.zeros((nt,))})
for ii,tt in enumerate(tstep):
print('Area-integrating step: %d of %d...'%(ii,tstep[-1]))
for vv in varnames:
sun.tstep=[tt]
data = sun.loadData(variable=vv)
budget[vv][ii],areatotal = sun.areaint(data,mask)
budget.update({'time':sun.time[tstep]})
# Calculate the time-rate of change of KE and PE
dt = sun.timeraw[1]-sun.timeraw[0]
budget.update({'dKE_dt':np.zeros((nt,))})
budget.update({'dPE_dt':np.zeros((nt,))})
budget['dKE_dt'][1::] = (budget['KEz'][1::]-budget['KEz'][0:-1])/dt
budget['dPE_dt'][1::] = (budget['PEz'][1::]-budget['PEz'][0:-1])/dt
return budget
########################
########################
def calc_avg_budget(sun, trange, cellindex,plot=False):
"""
Calculate the volume, temperature and salt budgets from
an average output file.
These calculations are very specific to the variables
stored in the averages file.
"""
# Load the SUNTANS average file object
sun.klayer=[-99]
#sun = Spatial(avgfile,klayer=[-99])
# Calculate the time dimensions
tstep = list(range(0,sun.Nt))[trange[0]:trange[1]]
nt = len(tstep)
time = sun.time[tstep]
dt = sun.globalatts['dt']*sun.globalatts['ntaverage']
# Remove cells that are next to type-2 or 3 edges here
# ...
#facemark=sun.get_facemark()
#for cc in cellindex:
# if facemark[cc] in [2,3]:
# print 'Removing edge cell index = %d'%cc
# cellindex.remove(cc)
Nc = len(cellindex)
# Calculate some grid variables
area = sun.Ac[cellindex]
sumarea = np.sum(area)
face = sun.face[cellindex,:] # edge pointers for each cell
normal = 1.0*sun.normal[cellindex,:]
# Create a mask so that the masked face values are not included
# in the flux calculations
facemask = np.zeros_like(normal)
facemask[face.mask==False]=1.0
face[face.mask]=0 # Index masked cells to the first cell (this is multiplied
# by zero
# Initialise the output variables
# Sum of fluxes
Mass_f = np.zeros((nt,),np.float)
Salt_f = np.zeros((nt,),np.float)
Temp_f = np.zeros((nt,),np.float)
# Volume integrals
V = np.zeros((nt,),np.float)
s_V = np.zeros((nt,),np.float)
T_V = np.zeros((nt,),np.float)
# Surface fluxes (T and S only)
s_surf = np.zeros((nt,),np.float)
T_surf = np.zeros((nt,),np.float)
###
# Start stepping through and read all variable time step by time step
###
for ii,tt in enumerate(tstep):
sun.tstep=[tt]
print('Calculating budget for time = %d of %d'%(tt,tstep[-1]))
# Load the depth-average and flux quantities
s_dz = sun.loadDataRaw(variable='s_dz')
T_dz = sun.loadDataRaw(variable='T_dz')
eta = sun.loadDataRaw(variable='eta')
Sflux = sun.loadDataRaw(variable='s_F')
Tflux = sun.loadDataRaw(variable='T_F')
Mflux = sun.loadDataRaw(variable='U_F') #[m3 s-1] * [s] = [m3]
# Subset the variables at cell index only
eta = eta[cellindex]
s_dz = s_dz[cellindex]
T_dz = T_dz[cellindex]
# Calculate the fluxes for each cell [Nk, Nc, maxfaces]
Mflux_cell = Mflux[...,face] * normal * facemask
Sflux_cell = Sflux[...,face] * normal * facemask
Tflux_cell = Tflux[...,face] * normal * facemask
# Compute the total mass/tracer flux in/out of each cell
# sum along all dimension edges
Mass = Mflux_cell.sum(axis=-1)
Salt = Sflux_cell.sum(axis=-1)
Temp = Tflux_cell.sum(axis=-1)
# Sum along all depth
Mass = Mass.sum(axis=0)
Salt = Salt.sum(axis=0)
Temp = Temp.sum(axis=0)
# Sum all cells
Mass_f[ii] = Mass.sum()
Salt_f[ii] = Salt.sum()
Temp_f[ii] = Temp.sum()
# Calculate the volume integrals
s_V[ii] = np.sum(s_dz*area,axis=-1).squeeze() # m3 S
T_V[ii] = np.sum(T_dz*area,axis=-1).squeeze() # m3 C
V[ii] = np.sum(eta*area,axis=-1).squeeze() # m3 [volume]
# Get the surface temp and salinity flux arrays
if sun.hasVar('Hs'):
# Load the surface flux quantities
Hs = sun.loadDataRaw(variable='Hs')
Hsw = sun.loadDataRaw(variable='Hsw')
Hl = sun.loadDataRaw(variable='Hl')
Hlw = sun.loadDataRaw(variable='Hlw')
# Convert heat flux [W m-2] -> temperature flux
Qs = (Hs+Hl+Hlw+Hsw)/(RHO0*Cp) # units [C m s-1]
# Surface flux contribution
T_surf[ii] = np.sum(Qs[...,cellindex]*area) # units [C m3 s-1]
else:
T_surf[ii] = 0
if sun.hasVar('EP'):
EPs0 = sun.loadDataRaw(variable='EP')
s_surf[ii] = np.sum(EPs0[...,cellindex]*area) # units [psu m3 s-1]
else:
s_surf[ii] = 0
##########
# units are:
##########
# s_V [ppt m3]
# T_V [C m3]
# eta [m3]
# Mass_f [m3 s-1]
# Salt_f [ppt m3 s-1]
# Temp_f [C m3 s-1]
###
# Compute each of the terms in the budget
# Tendency
Tend_V = (V[:-1]-V[1:]).squeeze()/dt # m3 s-1
Tend_s = (s_V[:-1]-s_V[1:]).squeeze()/dt # psu m3 s-1
Tend_T = (T_V[:-1]-T_V[1:]).squeeze()/dt # C m3 s-1
# Advective fluxes
Adv_V = Mass_f[1:]# m3 s-1
Adv_s = Salt_f[1:]# psu s-1
Adv_T = Temp_f[1:]# C s-1
# Surface fluxes (note change of sign)
Sflux_T = -T_surf[1:]# C m3 s-1
Sflux_s = s_surf[1:]# psu m3 s-1
# Compute the error (residual) in each budget
Err_V =(Tend_V - Adv_V)
Err_T = (Tend_T - Adv_T - Sflux_T)
Err_s = (Tend_s - Adv_s - Sflux_s)
# Output time
time = time[1:]
# Save the output as a dictionary
budget = {'time':time,\
'cellindex':cellindex,\
'Volume':{'Advection':Adv_V,'Tendency':Tend_V,'Residual':Err_V},\
'Temp':{'Advection':Adv_T,'Tendency':Tend_T,'Surface_Flux':Sflux_T,'Residual':Err_T},\
'Salt':{'Advection':Adv_s,'Tendency':Tend_s,'Surface_Flux':Sflux_s,'Residual':Err_s},\
}
if plot:
# Free-surface
fig1=plt.figure()
f1ax1=fig1.add_subplot(2,1,1)
plt.title('Volume budget')
plt.plot(time,Tend_V,'b',linewidth=2)
plt.plot(time,Adv_V,'r')
plt.ylabel('$m^3 \ s^{-1}$')
plt.ylim(Tend_V.min(),Tend_V.max())
plt.legend(('Tendency','Advection'))
ax2=fig1.add_subplot(2,1,2,sharex=f1ax1)
plt.plot(time,Err_V)
plt.ylabel('error')
fig2=plt.figure()
f2ax1=fig2.add_subplot(2,1,1)
plt.title('Temperature budget')
plt.plot(time,Tend_T,'b',linewidth=2)
plt.plot(time,Adv_T,'r')
plt.plot(time,Adv_T + Sflux_T,'k')
plt.grid(b=True)
plt.ylabel(r'$^\circ C \ m^3 \ s^{-1}$')
plt.legend(('Tendency','Advection','Adv. + Sflux'))
f2ax1=fig2.add_subplot(2,1,2,sharex=f2ax1)
plt.title('Temperature budget')
plt.plot(time,Err_T)
plt.ylabel('error')
fig3=plt.figure()
f3ax1=fig3.add_subplot(2,1,1)
plt.title('Salt budget')
plt.plot(time,Tend_s,'b',linewidth=2)
plt.plot(time,Adv_s,'r')
plt.plot(time,Adv_s + Sflux_s,'k')
plt.grid(b=True)
plt.ylabel('$psu \ m^3 \ s^{-1}$')
plt.legend(('Tendency','Advection','Adv. + Sflux'))
f2ax1=fig3.add_subplot(2,1,2,sharex=f3ax1)
plt.plot(time,Err_s)
plt.ylabel('error')
plt.figure()
sun.plotmesh()
plt.plot(sun.xv[cellindex],sun.yv[cellindex],'m.')
plt.show()
return budget
#
def calc_isopycnal_discharge(ncfile,xpt,ypt,saltbins,tstart,tend,scalarvar='salt'):
"""
Calculates the discharge as a function of salinity along
a transect, defined by xpt/ypt, in the suntans model
Returns a dictionary with the relevant variables
"""
nbins = saltbins.shape[0]
# Load the slice object and extract the data
SE = MultiSliceEdge(ncfile,xpt=xpt,ypt=ypt)
# if SE==None:
# SE = SliceEdge(ncfile,xpt=xpt,ypt=ypt)
# SE.tstep = range(SE.Nt)
# else:
# SE.update_xy(xpt,ypt)
#
SE.tstep = SE.getTstep(tstart,tend)
print('Loading the salt flux data...')
#s_F_all= SE.loadData(variable='s_F')
s_F_all= SE.loadData(variable=scalarvar)
print('Loading the flux data...')
Q_all = SE.loadData(variable='U_F')
def Q_S_flux(salt,Q,saltbins,normal):
# mask sure masked values are zeroed
#s_F[s_F.mask]=0
#Q[Q.mask]=0
Q = Q*normal
Nt,Nk,Ne = Q.shape
#salt = np.abs(s_F)/np.abs(Q)
#salt[np.isnan(salt)]=0
Ns = saltbins.shape[0]
ds = np.diff(saltbins).mean()
###
# Calculate Q(s,x)
###
# Create an arrayo
#Nt = len(SE.tstep)
#ne = len(SE.j) # number of edges
jindex = np.arange(0,Ne)
jindex = np.repeat(jindex[np.newaxis,np.newaxis,:],Nt,axis=0)
jindex = np.repeat(jindex,SE.Nkmax,axis=1)
# Groups the salt matrix into bins
sindex = np.searchsorted(saltbins,salt)
sindex[sindex>=Ns]=Ns-1
#tindex = np.arange(0,Nt)
#tindex = np.repeat(tindex[:,np.newaxis,np.newaxis],ne,axis=-1)
#tindex = np.repeat(tindex,SE.Nkmax,axis=1)
# Calculate the salt flux for each time step
Qs = np.zeros((Nt,Ns,Ne))#
#Fs = np.zeros((Nt,Ne))#
#dQds = np.zeros((Nt,Ns,Ne))# put salt in last dimension for easy multiplication
for tt in range(Nt):
# Create an array output array Q_S
# This sums duplicate elements
Q_S = sparse.coo_matrix((Q[tt,...].ravel(),\
(sindex[tt,...].ravel(),jindex[tt,...].ravel())),\
shape=(Ns,Ne)).todense()
Qs[tt,...] = np.array(Q_S)#/Nt # Units m^3/s
####
## THIS IS WRONG DON'T USE
####
## Compute the gradient (this gives the same result after
## integration)
#dQ_ds, dQ_de = np.gradient(Qs[tt,...],ds,1)
##dQtmp = -1*np.array(dQ_ds).T
#dQds[tt,...] = -1*dQ_ds
#
#Fs[tt,:] = np.sum(-1*dQds[tt,...].T*saltbins ,axis=-1)
output = {'time':SE.time[SE.tstep],'saltbins':saltbins,\
'Qs':Qs}
#'dQds':dQds,'Qs':Qs,'Fs':Fs}
return output
def Q_S_flux_old(s_F,Q,saltbins,x,normal,area,dz):
# mask sure masked values are zeroed
#s_F[s_F.mask]=0
#Q[Q.mask]=0
Q = Q*normal
Nt,Nk,ne = Q.shape
salt = np.abs(s_F)/np.abs(Q)
salt[np.isnan(salt)]=0
# Calculate the mean Q
Qbar = np.sum( np.sum(Q,axis=-1),axis=0)/Nt
###
# Calculate Q(s,x)
###
# Create an arrayo
#Nt = len(SE.tstep)
#ne = len(SE.j) # number of edges
jindex = np.arange(0,ne)
jindex = np.repeat(jindex[np.newaxis,np.newaxis,:],Nt,axis=0)
jindex = np.repeat(jindex,SE.Nkmax,axis=1)
# Groups the salt matrix into bins
sindex = np.searchsorted(saltbins,salt)
sindex[sindex>=nbins]=nbins-1
# Create an array output array Q_S
# This sums duplicate elements
Q_S_x = sparse.coo_matrix((Q.ravel(),(sindex.ravel(),jindex.ravel())),\
shape=(nbins,ne)).todense()
Q_S_x = np.array(Q_S_x)#/Nt # Units m^3/s
###
# Calculate Q(s,t)
###
# Create an arrayo
tindex = np.arange(0,Nt)
tindex = np.repeat(tindex[:,np.newaxis,np.newaxis],ne,axis=-1)
tindex = np.repeat(tindex,SE.Nkmax,axis=1)
# Create an array output array Q_S
# This sums duplicate elements
Q_S_t = sparse.coo_matrix((Q.ravel(),(sindex.ravel(),tindex.ravel())),\
shape=(nbins,Nt)).todense()
Q_S_t = np.array(Q_S_t)#/ne # Units m^3/s
###
# Calculate Q(s)
###
Q_S = np.bincount(sindex.ravel(),weights=Q.ravel(),minlength=nbins)
###
# Calculate the gradients with respect to S
###
ds = np.diff(saltbins).mean()
dsdt_inv = 1./(ds*Nt)
saltbins = 0.5*(saltbins[1:] + saltbins[0:-1])
# Units are: [m^3 s^-1 psu^-1]
dQ_S_x = np.diff(Q_S_x,axis=0) * dsdt_inv
dQ_S_t =
|
np.diff(Q_S_t,axis=0)
|
numpy.diff
|
#!/usr/bin/env python
# Copyright 2014-2019 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: <NAME> <<EMAIL>>
#
'''Density expansion on plane waves'''
import time
import copy
import numpy
from pyscf import lib
from pyscf import gto
from pyscf.lib import logger
from pyscf.pbc import tools
from pyscf.pbc.gto import pseudo, estimate_ke_cutoff, error_for_ke_cutoff
from pyscf.pbc import gto as pbcgto
from pyscf.pbc.df import ft_ao
from pyscf.pbc.df import incore
from pyscf.pbc.lib.kpts_helper import is_zero, gamma_point
from pyscf.pbc.df import aft_jk
from pyscf.pbc.df import aft_ao2mo
from pyscf import __config__
CUTOFF = getattr(__config__, 'pbc_df_aft_estimate_eta_cutoff', 1e-12)
ETA_MIN = getattr(__config__, 'pbc_df_aft_estimate_eta_min', 0.2)
PRECISION = getattr(__config__, 'pbc_df_aft_estimate_eta_precision', 1e-8)
KE_SCALING = getattr(__config__, 'pbc_df_aft_ke_cutoff_scaling', 0.75)
def estimate_eta(cell, cutoff=CUTOFF):
'''The exponent of the smooth gaussian model density, requiring that at
boundary, density ~ 4pi rmax^2 exp(-eta/2*rmax^2) ~ 1e-12
'''
lmax = min(numpy.max(cell._bas[:,gto.ANG_OF]), 4)
# If lmax=3 (r^5 for radial part), this expression guarantees at least up
# to f shell the convergence at boundary
eta = max(numpy.log(4*numpy.pi*cell.rcut**(lmax+2)/cutoff)/cell.rcut**2*2,
ETA_MIN)
return eta
def estimate_eta_for_ke_cutoff(cell, ke_cutoff, precision=PRECISION):
'''Given ke_cutoff, the upper limit of eta to guarantee the required
precision in Coulomb integrals.
'''
lmax = numpy.max(cell._bas[:,gto.ANG_OF])
kmax = (ke_cutoff*2)**.5
log_rest = numpy.log(precision / (32*numpy.pi**2 * kmax**(lmax*2-1)))
log_eta = -1
eta = kmax**2/4 / (-log_eta - log_rest)
return eta
def estimate_ke_cutoff_for_eta(cell, eta, precision=PRECISION):
'''Given eta, the lower limit of ke_cutoff to guarantee the required
precision in Coulomb integrals.
'''
eta = max(eta, 0.2)
lmax = numpy.max(cell._bas[:,gto.ANG_OF])
log_k0 = 5 + numpy.log(eta) / 2
log_rest = numpy.log(precision / (32*numpy.pi**2*eta))
Ecut = 2*eta * (log_k0*(lmax*2-1) - log_rest)
Ecut = max(Ecut, .5)
return Ecut
def get_nuc(mydf, kpts=None):
# Pseudopotential is ignored when computing just the nuclear attraction
with lib.temporary_env(mydf.cell, _pseudo={}):
return get_pp_loc_part1(mydf, kpts)
def get_pp_loc_part1(mydf, kpts=None):
cell = mydf.cell
if kpts is None:
kpts_lst = numpy.zeros((1,3))
else:
kpts_lst = numpy.reshape(kpts, (-1,3))
log = logger.Logger(mydf.stdout, mydf.verbose)
t0 = t1 = (time.clock(), time.time())
mesh = numpy.asarray(mydf.mesh)
nkpts = len(kpts_lst)
nao = cell.nao_nr()
nao_pair = nao * (nao+1) // 2
charges = cell.atom_charges()
kpt_allow = numpy.zeros(3)
if mydf.eta == 0:
if cell.dimension > 0:
ke_guess = estimate_ke_cutoff(cell, cell.precision)
mesh_guess = tools.cutoff_to_mesh(cell.lattice_vectors(), ke_guess)
if numpy.any(mesh[:cell.dimension] < mesh_guess[:cell.dimension]*.8):
logger.warn(mydf, 'mesh %s is not enough for AFTDF.get_nuc function '
'to get integral accuracy %g.\nRecommended mesh is %s.',
mesh, cell.precision, mesh_guess)
Gv, Gvbase, kws = cell.get_Gv_weights(mesh)
vpplocG = pseudo.pp_int.get_gth_vlocG_part1(cell, Gv)
vpplocG = -numpy.einsum('ij,ij->j', cell.get_SI(Gv), vpplocG)
vpplocG *= kws
vG = vpplocG
vj = numpy.zeros((nkpts,nao_pair), dtype=numpy.complex128)
else:
if cell.dimension > 0:
ke_guess = estimate_ke_cutoff_for_eta(cell, mydf.eta, cell.precision)
mesh_guess = tools.cutoff_to_mesh(cell.lattice_vectors(), ke_guess)
if numpy.any(mesh < mesh_guess*.8):
logger.warn(mydf, 'mesh %s is not enough for AFTDF.get_nuc function '
'to get integral accuracy %g.\nRecommended mesh is %s.',
mesh, cell.precision, mesh_guess)
mesh_min = numpy.min((mesh_guess, mesh), axis=0)
if cell.dimension < 2 or cell.low_dim_ft_type == 'inf_vacuum':
mesh[:cell.dimension] = mesh_min[:cell.dimension]
else:
mesh = mesh_min
Gv, Gvbase, kws = cell.get_Gv_weights(mesh)
nuccell = _compensate_nuccell(mydf)
# PP-loc part1 is handled by fakenuc in _int_nuc_vloc
vj = lib.asarray(mydf._int_nuc_vloc(nuccell, kpts_lst))
t0 = t1 = log.timer_debug1('vnuc pass1: analytic int', *t0)
coulG = tools.get_coulG(cell, kpt_allow, mesh=mesh, Gv=Gv) * kws
aoaux = ft_ao.ft_ao(nuccell, Gv)
vG = numpy.einsum('i,xi->x', -charges, aoaux) * coulG
max_memory = max(2000, mydf.max_memory-lib.current_memory()[0])
for aoaoks, p0, p1 in mydf.ft_loop(mesh, kpt_allow, kpts_lst,
max_memory=max_memory, aosym='s2'):
for k, aoao in enumerate(aoaoks):
# rho_ij(G) nuc(-G) / G^2
# = [Re(rho_ij(G)) + Im(rho_ij(G))*1j] [Re(nuc(G)) - Im(nuc(G))*1j] / G^2
if gamma_point(kpts_lst[k]):
vj[k] += numpy.einsum('k,kx->x', vG[p0:p1].real, aoao.real)
vj[k] += numpy.einsum('k,kx->x', vG[p0:p1].imag, aoao.imag)
else:
vj[k] += numpy.einsum('k,kx->x', vG[p0:p1].conj(), aoao)
t1 = log.timer_debug1('contracting Vnuc [%s:%s]'%(p0, p1), *t1)
log.timer_debug1('contracting Vnuc', *t0)
vj_kpts = []
for k, kpt in enumerate(kpts_lst):
if gamma_point(kpt):
vj_kpts.append(lib.unpack_tril(vj[k].real.copy()))
else:
vj_kpts.append(lib.unpack_tril(vj[k]))
if kpts is None or numpy.shape(kpts) == (3,):
vj_kpts = vj_kpts[0]
return numpy.asarray(vj_kpts)
def _int_nuc_vloc(mydf, nuccell, kpts, intor='int3c2e', aosym='s2', comp=1):
'''Vnuc - Vloc'''
cell = mydf.cell
nkpts = len(kpts)
# Use the 3c2e code with steep s gaussians to mimic nuclear density
fakenuc = _fake_nuc(cell)
fakenuc._atm, fakenuc._bas, fakenuc._env = \
gto.conc_env(nuccell._atm, nuccell._bas, nuccell._env,
fakenuc._atm, fakenuc._bas, fakenuc._env)
kptij_lst =
|
numpy.hstack((kpts,kpts))
|
numpy.hstack
|
#!/usr/bin/env python3
from DGSQP.solvers.IBR import IBR
from DGSQP.solvers.DGSQP import DGSQP
from DGSQP.solvers.ALGAMES import ALGAMES
from DGSQP.solvers.PID import PIDLaneFollower
from DGSQP.solvers.solver_types import IBRParams, DGSQPParams, ALGAMESParams, PIDParams
from DGSQP.types import VehicleState, VehicleActuation, Position, ParametricPose, OrientationEuler, BodyLinearVelocity, BodyAngularVelocity
from DGSQP.dynamics.dynamics_models import CasadiKinematicBicycleCombined, CasadiDecoupledMultiAgentDynamicsModel
from DGSQP.dynamics.model_types import KinematicBicycleConfig, MultiAgentModelConfig
from DGSQP.tracks.track_lib import *
import numpy as np
import casadi as ca
from datetime import datetime
import pathlib
import pickle
import copy
# Initial time
t = 0
time_str = datetime.now().strftime('%m-%d-%Y_%H-%M-%S')
data_dir = pathlib.Path(pathlib.Path.home(), f'results/dgsqp_algams_mc_chicane_{time_str}')
if not data_dir.exists():
data_dir.mkdir(parents=True)
# =============================================
# Helper functions
# =============================================
def check_collision(ego_traj, tar_traj, obs_d):
for k in range(ego_traj.shape[0]):
d = np.linalg.norm(ego_traj[k,:2] - tar_traj[k,:2], ord=2)
if d < obs_d:
return True
return False
# Saturation cost fuction
sym_signed_u = ca.SX.sym('u', 1)
saturation_cost = ca.Function('saturation_cost', [sym_signed_u], [ca.fmax(ca.DM.zeros(1), sym_signed_u)])
dt = 0.1
discretization_method='euler'
half_width = 1.0
ego_dynamics_config = KinematicBicycleConfig(dt=dt,
model_name='kinematic_bicycle_cl',
noise=False,
discretization_method=discretization_method,
wheel_dist_front=0.13,
wheel_dist_rear=0.13,
drag_coefficient=0.1,
slip_coefficient=0.1,
code_gen=False)
tar_dynamics_config = KinematicBicycleConfig(dt=dt,
model_name='kinematic_bicycle_cl',
noise=False,
discretization_method=discretization_method,
wheel_dist_front=0.13,
wheel_dist_rear=0.13,
drag_coefficient=0.1,
slip_coefficient=0.1,
code_gen=False)
joint_model_config = MultiAgentModelConfig(dt=dt,
discretization_method=discretization_method,
use_mx=True,
code_gen=False,
verbose=True,
compute_hessians=True)
ego_state_input_max=VehicleState(x=Position(x=np.inf, y=np.inf),
p=ParametricPose(s=np.inf, x_tran=half_width, e_psi=np.inf),
e=OrientationEuler(psi=np.inf),
v=BodyLinearVelocity(v_long=np.inf, v_tran=np.inf),
w=BodyAngularVelocity(w_psi=np.inf),
u=VehicleActuation(u_a=2.1, u_steer=0.436))
ego_state_input_min=VehicleState(x=Position(x=-np.inf, y=-np.inf),
p=ParametricPose(s=-np.inf, x_tran=-half_width, e_psi=-np.inf),
e=OrientationEuler(psi=-np.inf),
v=BodyLinearVelocity(v_long=-np.inf, v_tran=-np.inf),
w=BodyAngularVelocity(w_psi=-np.inf),
u=VehicleActuation(u_a=-2.1, u_steer=-0.436))
ego_state_input_rate_max=VehicleState(u=VehicleActuation(u_a=10.0, u_steer=np.pi))
ego_state_input_rate_min=VehicleState(u=VehicleActuation(u_a=-10.0, u_steer=-np.pi))
tar_state_input_max=VehicleState(x=Position(x=np.inf, y=np.inf),
p=ParametricPose(s=np.inf, x_tran=half_width, e_psi=np.inf),
e=OrientationEuler(psi=np.inf),
v=BodyLinearVelocity(v_long=np.inf, v_tran=np.inf),
w=BodyAngularVelocity(w_psi=np.inf),
u=VehicleActuation(u_a=2.1, u_steer=0.436))
tar_state_input_min=VehicleState(x=Position(x=-np.inf, y=-np.inf),
p=ParametricPose(s=-np.inf, x_tran=-half_width, e_psi=-np.inf),
e=OrientationEuler(psi=-np.inf),
v=BodyLinearVelocity(v_long=-np.inf, v_tran=-np.inf),
w=BodyAngularVelocity(w_psi=-np.inf),
u=VehicleActuation(u_a=-2.1, u_steer=-0.436))
tar_state_input_rate_max=VehicleState(u=VehicleActuation(u_a=10.0, u_steer=np.pi))
tar_state_input_rate_min=VehicleState(u=VehicleActuation(u_a=-10.0, u_steer=-np.pi))
state_input_ub = [ego_state_input_max, tar_state_input_max]
state_input_lb = [ego_state_input_min, tar_state_input_min]
ego_cost_params = dict(input_weight=[1.0, 1.0],
input_rate_weight=[1.0, 1.0],
comp_weights=[10.0, 5.0],
blocking_weight=0,
obs_weight=0,
obs_r=0.3)
tar_cost_params = dict(input_weight=[1.0, 1.0],
input_rate_weight=[1.0, 1.0],
comp_weights=[10.0, 5.0],
blocking_weight=0,
obs_weight=0,
obs_r=0.3)
ego_r=0.2
tar_r=0.2
use_ws=True
ibr_ws=False
exp_N = [10, 15, 20, 25]
exp_theta = np.arange(15, 91, 15)
# exp_N = [25]
# exp_theta = [90]
num_mc = 100
rng = np.random.default_rng()
for theta in exp_theta:
track_obj = ChicaneTrack(enter_straight_length=1,
curve1_length=4,
curve1_swept_angle=theta*np.pi/180,
mid_straight_length=1,
exit_straight_length=5,
curve2_length=4,
curve2_swept_angle=theta*np.pi/180,
width=half_width*2,
slack=0.8,
mirror=False)
for N in exp_N:
# =============================================
# Set up joint model
# =============================================
ego_dyn_model = CasadiKinematicBicycleCombined(t, ego_dynamics_config, track=track_obj)
tar_dyn_model = CasadiKinematicBicycleCombined(t, tar_dynamics_config, track=track_obj)
joint_model = CasadiDecoupledMultiAgentDynamicsModel(t, [ego_dyn_model, tar_dyn_model], joint_model_config)
# =============================================
# Solver setup
# =============================================
dgsqp_params = DGSQPParams(solver_name='SQGAMES',
dt=dt,
N=N,
reg=1e-3,
nonmono_ls=True,
line_search_iters=50,
sqp_iters=50,
p_tol=1e-3,
d_tol=1e-3,
beta=0.01,
tau=0.5,
verbose=False,
code_gen=False,
jit=False,
opt_flag='O3',
solver_dir=None,
debug_plot=False,
pause_on_plot=True)
algames_params = ALGAMESParams(solver_name='ALGAMES',
dt=dt,
N=N,
outer_iters=50,
line_search_iters=50,
line_search_tol=1e-6,
newton_iters=50,
newton_step_tol=1e-9,
ineq_tol=1e-3,
eq_tol=1e-3,
opt_tol=1e-3,
rho=1.0,
gamma=10.0,
rho_max=1e7,
beta=0.01,
tau=0.5,
q_reg=1e-3,
u_reg=1e-3,
verbose=False,
code_gen=False,
jit=False,
opt_flag='O3',
solver_dir=None,
debug_plot=False,
pause_on_plot=False)
# Symbolic placeholder variables
sym_q = ca.MX.sym('q', joint_model.n_q)
sym_u_ego = ca.MX.sym('u_ego', ego_dyn_model.n_u)
sym_u_tar = ca.MX.sym('u_tar', tar_dyn_model.n_u)
sym_um_ego = ca.MX.sym('um_ego', ego_dyn_model.n_u)
sym_um_tar = ca.MX.sym('um_tar', tar_dyn_model.n_u)
ego_x_idx = 0
ego_y_idx = 1
ego_s_idx = 4
ego_ey_idx = 5
tar_x_idx = 6
tar_y_idx = 7
tar_s_idx = 10
tar_ey_idx = 11
ua_idx = 0
us_idx = 1
ego_pos = sym_q[[ego_x_idx, ego_y_idx]]
tar_pos = sym_q[[tar_x_idx, tar_y_idx]]
obs_cost_d = ego_cost_params['obs_r'] + tar_cost_params['obs_r']
# Build symbolic cost functions
ego_quad_input_cost = (1/2)*(ego_cost_params['input_weight'][0]*sym_u_ego[ua_idx]**2 \
+ ego_cost_params['input_weight'][1]*sym_u_ego[us_idx]**2)
ego_quad_input_rate_cost = (1/2)*(ego_cost_params['input_rate_weight'][0]*(sym_u_ego[ua_idx]-sym_um_ego[ua_idx])**2 \
+ ego_cost_params['input_rate_weight'][1]*(sym_u_ego[us_idx]-sym_um_ego[us_idx])**2)
ego_blocking_cost = (1/2)*ego_cost_params['blocking_weight']*(sym_q[ego_ey_idx] - sym_q[tar_ey_idx])**2
ego_obs_cost = (1/2)*ego_cost_params['obs_weight']*saturation_cost(obs_cost_d-ca.norm_2(ego_pos - tar_pos))**2
ego_prog_cost = -ego_cost_params['comp_weights'][0]*sym_q[ego_s_idx]
# ego_comp_cost = ego_cost_params['comp_weights'][1]*(sym_q[tar_s_idx]-sym_q[ego_s_idx])
ego_comp_cost = ego_cost_params['comp_weights'][1]*ca.atan(sym_q[tar_s_idx]-sym_q[ego_s_idx])
ego_sym_stage = ego_quad_input_cost \
+ ego_quad_input_rate_cost \
+ ego_blocking_cost \
+ ego_obs_cost
ego_sym_term = ego_prog_cost \
+ ego_comp_cost \
+ ego_blocking_cost \
+ ego_obs_cost
ego_sym_costs = []
for k in range(N):
ego_sym_costs.append(ca.Function(f'ego_stage_{k}', [sym_q, sym_u_ego, sym_um_ego], [ego_sym_stage],
[f'q_{k}', f'u_{k}', f'u_{k-1}'], [f'ego_stage_cost_{k}']))
ego_sym_costs.append(ca.Function('ego_term', [sym_q], [ego_sym_term],
[f'q_{N}'], ['ego_term_cost']))
tar_quad_input_cost = (1/2)*(tar_cost_params['input_weight'][0]*sym_u_tar[ua_idx]**2 \
+ tar_cost_params['input_weight'][1]*sym_u_tar[us_idx]**2)
tar_quad_input_rate_cost = (1/2)*(tar_cost_params['input_rate_weight'][0]*(sym_u_tar[ua_idx]-sym_um_tar[ua_idx])**2 \
+ tar_cost_params['input_rate_weight'][1]*(sym_u_tar[us_idx]-sym_um_tar[us_idx])**2)
tar_blocking_cost = (1/2)*tar_cost_params['blocking_weight']*(sym_q[ego_ey_idx] - sym_q[tar_ey_idx])**2
tar_obs_cost = (1/2)*tar_cost_params['obs_weight']*saturation_cost(obs_cost_d-ca.norm_2(ego_pos - tar_pos))**2
tar_prog_cost = -tar_cost_params['comp_weights'][0]*sym_q[tar_s_idx]
# tar_comp_cost = tar_cost_params['comp_weights'][1]*(sym_q[ego_s_idx]-sym_q[tar_s_idx])
tar_comp_cost = tar_cost_params['comp_weights'][1]*ca.atan(sym_q[ego_s_idx]-sym_q[tar_s_idx])
tar_sym_stage = tar_quad_input_cost \
+ tar_quad_input_rate_cost \
+ tar_blocking_cost \
+ tar_obs_cost
tar_sym_term = tar_prog_cost \
+ tar_comp_cost \
+ tar_blocking_cost \
+ tar_obs_cost
tar_sym_costs = []
for k in range(N):
tar_sym_costs.append(ca.Function(f'tar_stage_{k}', [sym_q, sym_u_tar, sym_um_tar], [tar_sym_stage],
[f'q_{k}', f'u_{k}', f'u_{k-1}'], [f'tar_stage_cost_{k}']))
tar_sym_costs.append(ca.Function('tar_term', [sym_q], [tar_sym_term],
[f'q_{N}'], ['tar_term_cost']))
sym_costs = [ego_sym_costs, tar_sym_costs]
# Build symbolic constraints g_i(x, u, um) <= 0
ego_input_rate_constr = ca.vertcat((sym_u_ego[ua_idx]-sym_um_ego[ua_idx]) - dt*ego_state_input_rate_max.u.u_a,
dt*ego_state_input_rate_min.u.u_a - (sym_u_ego[ua_idx]-sym_um_ego[ua_idx]),
(sym_u_ego[us_idx]-sym_um_ego[us_idx]) - dt*ego_state_input_rate_max.u.u_steer,
dt*ego_state_input_rate_min.u.u_steer - (sym_u_ego[us_idx]-sym_um_ego[us_idx]))
tar_input_rate_constr = ca.vertcat((sym_u_tar[ua_idx]-sym_um_tar[ua_idx]) - dt*tar_state_input_rate_max.u.u_a,
dt*tar_state_input_rate_min.u.u_a - (sym_u_tar[ua_idx]-sym_um_tar[ua_idx]),
(sym_u_tar[us_idx]-sym_um_tar[us_idx]) - dt*tar_state_input_rate_max.u.u_steer,
dt*tar_state_input_rate_min.u.u_steer - (sym_u_tar[us_idx]-sym_um_tar[us_idx]))
obs_d = ego_r + tar_r
obs_avoid_constr = (obs_d)**2 - ca.bilin(ca.DM.eye(2), ego_pos - tar_pos, ego_pos - tar_pos)
ego_constr_stage = ego_input_rate_constr
ego_constr_term = None
ego_constrs = []
for k in range(N):
ego_constrs.append(ca.Function(f'ego_constrs_{k}', [sym_q, sym_u_ego, sym_um_ego], [ego_constr_stage]))
if ego_constr_term is None:
ego_constrs.append(None)
else:
ego_constrs.append(ca.Function(f'ego_constrs_{N}', [sym_q], [ego_constr_term]))
tar_constr_stage = tar_input_rate_constr
tar_constr_term = None
# constr_stage = obs_avoid_constr
constr_stage = ca.vertcat(ego_input_rate_constr, tar_input_rate_constr, obs_avoid_constr)
constr_term = obs_avoid_constr
tar_constrs = []
for k in range(N):
tar_constrs.append(ca.Function(f'tar_constrs_{k}', [sym_q, sym_u_tar, sym_um_tar], [tar_constr_stage]))
if tar_constr_term is None:
tar_constrs.append(None)
else:
tar_constrs.append(ca.Function(f'tar_constrs_{N}', [sym_q], [tar_constr_term]))
shared_constr_stage = obs_avoid_constr
shared_constr_term = obs_avoid_constr
shared_constrs = []
for k in range(N):
if k == 0:
shared_constrs.append(None)
else:
shared_constrs.append(ca.Function(f'shared_constrs_{k}', [sym_q, ca.vertcat(sym_u_ego, sym_u_tar), ca.vertcat(sym_um_ego, sym_um_tar)], [shared_constr_stage]))
shared_constrs.append(ca.Function(f'shared_constrs_{N}', [sym_q], [shared_constr_term]))
agent_constrs = [ego_constrs, tar_constrs]
dgsqp_solver = DGSQP(joint_model,
sym_costs,
agent_constrs,
shared_constrs,
{'ub': state_input_ub, 'lb': state_input_lb},
dgsqp_params)
joint_constr_stage_0 = ca.vertcat(ego_input_rate_constr, tar_input_rate_constr)
joint_constr_stage = ca.vertcat(ego_input_rate_constr, tar_input_rate_constr, obs_avoid_constr)
joint_constr_term = obs_avoid_constr
joint_constrs = []
for k in range(N):
if k == 0:
joint_constrs.append(ca.Function(f'nl_constrs_{k}', [sym_q, ca.vertcat(sym_u_ego, sym_u_tar), ca.vertcat(sym_um_ego, sym_um_tar)], [joint_constr_stage_0]))
else:
joint_constrs.append(ca.Function(f'nl_constrs_{k}', [sym_q, ca.vertcat(sym_u_ego, sym_u_tar), ca.vertcat(sym_um_ego, sym_um_tar)], [joint_constr_stage]))
joint_constrs.append(ca.Function(f'nl_constrs_{N}', [sym_q], [joint_constr_term]))
algames_solver = ALGAMES(joint_model,
sym_costs,
joint_constrs,
{'ub': state_input_ub, 'lb': state_input_lb},
algames_params)
if ibr_ws:
ibr_params = IBRParams(solver_name='ibr',
dt=dt,
N=N,
line_search_iters=50,
ibr_iters=1,
use_ps=False,
p_tol=1e-3,
d_tol=1e-3,
verbose=False,
code_gen=False,
jit=False,
opt_flag='O3',
solver_dir=None,
debug_plot=False,
pause_on_plot=True)
ibr_solver = IBR(joint_model,
sym_costs,
agent_constrs,
shared_constrs,
{'ub': state_input_ub, 'lb': state_input_lb},
ibr_params)
first_seg_len = track_obj.cl_segs[0,0]
sq_res = []
al_res = []
for i in range(num_mc):
print('========================================================')
print(f'Curved track with {theta} degree turn, control horizon: {N}, trial: {i+1}')
while True:
ego_sim_state = VehicleState(t=t)
tar_sim_state = VehicleState(t=t)
ego_sim_state.p.s = max(0.1, rng.random()*first_seg_len)
ego_sim_state.p.x_tran = rng.random()*half_width*2 - half_width
ego_sim_state.v.v_long = rng.random()+2
d = 2*np.pi*rng.random()
tar_sim_state.p.s = ego_sim_state.p.s + 1.2*obs_d*np.cos(d)
if tar_sim_state.p.s < 0:
continue
tar_sim_state.p.x_tran = ego_sim_state.p.x_tran + 1.2*obs_d*np.sin(d)
if np.abs(tar_sim_state.p.x_tran) > half_width:
continue
# tar_sim_state.p.s = rng.random()*first_seg_len/2
# tar_sim_state.p.x_tran = rng.random()*half_width*2 - half_width
tar_sim_state.v.v_long = rng.random()+2
track_obj.local_to_global_typed(ego_sim_state)
track_obj.local_to_global_typed(tar_sim_state)
# =============================================
# Warm start controller setup
# =============================================
if use_ws:
# Set up PID controllers for warm start
ego_steer_params = PIDParams(dt=dt, Kp=1.0, Ki=0.005,
u_max=ego_state_input_max.u.u_steer,
u_min=ego_state_input_min.u.u_steer,
du_max=ego_state_input_rate_max.u.u_steer,
du_min=ego_state_input_rate_min.u.u_steer)
ego_speed_params = PIDParams(dt=dt, Kp=1.0,
u_max=ego_state_input_max.u.u_a,
u_min=ego_state_input_min.u.u_a,
du_max=ego_state_input_rate_max.u.u_a,
du_min=ego_state_input_rate_min.u.u_a)
ego_v_ref = ego_sim_state.v.v_long
# ego_v_ref = tar_sim_state.v.v_long
ego_x_ref = ego_sim_state.p.x_tran
ego_pid_controller = PIDLaneFollower(ego_v_ref, ego_x_ref, dt, ego_steer_params, ego_speed_params)
tar_steer_params = PIDParams(dt=dt, Kp=1.0, Ki=0.005,
u_max=tar_state_input_max.u.u_steer,
u_min=tar_state_input_min.u.u_steer,
du_max=tar_state_input_rate_max.u.u_steer,
du_min=tar_state_input_rate_min.u.u_steer)
tar_speed_params = PIDParams(dt=dt, Kp=1.0,
u_max=tar_state_input_max.u.u_a,
u_min=tar_state_input_min.u.u_a,
du_max=tar_state_input_rate_max.u.u_a,
du_min=tar_state_input_rate_min.u.u_a)
tar_v_ref = tar_sim_state.v.v_long
tar_x_ref = tar_sim_state.p.x_tran
# tar_x_ref = ego_sim_state.p.x_tran
tar_pid_controller = PIDLaneFollower(tar_v_ref, tar_x_ref, dt, tar_steer_params, tar_speed_params)
# Construct initial guess for ALGAMES MPC with PID
ego_state = [copy.deepcopy(ego_sim_state)]
for i in range(N):
state = copy.deepcopy(ego_state[-1])
ego_pid_controller.step(state)
ego_dyn_model.step(state)
ego_state.append(state)
tar_state = [copy.deepcopy(tar_sim_state)]
for i in range(N):
state = copy.deepcopy(tar_state[-1])
tar_pid_controller.step(state)
tar_dyn_model.step(state)
tar_state.append(state)
ego_q_ws = np.zeros((N+1, ego_dyn_model.n_q))
tar_q_ws = np.zeros((N+1, tar_dyn_model.n_q))
ego_u_ws = np.zeros((N, ego_dyn_model.n_u))
tar_u_ws = np.zeros((N, tar_dyn_model.n_u))
for i in range(N+1):
ego_q_ws[i] = np.array([ego_state[i].x.x, ego_state[i].x.y, ego_state[i].v.v_long, ego_state[i].p.e_psi, ego_state[i].p.s-1e-6, ego_state[i].p.x_tran])
tar_q_ws[i] = np.array([tar_state[i].x.x, tar_state[i].x.y, tar_state[i].v.v_long, tar_state[i].p.e_psi, tar_state[i].p.s-1e-6, tar_state[i].p.x_tran])
if i < N:
ego_u_ws[i] = np.array([ego_state[i+1].u.u_a, ego_state[i+1].u.u_steer])
tar_u_ws[i] =
|
np.array([tar_state[i+1].u.u_a, tar_state[i+1].u.u_steer])
|
numpy.array
|
# Fourier transform using numpy.fft.rfft #
# https://docs.scipy.org/doc/numpy/reference/generated/numpy.fft.rfft.html#numpy.fft.rfft
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
alpha = 0.5
####################################################################
def ft(signal, spacing):
oversample = 6 # oversample folds; to be experiemented further
n = 2**(int(np.log(signal.size)/np.log(2))+1 + oversample)
fourier = np.fft.rfft(signal, n)
freq = np.fft.rfftfreq(n, d=spacing)
power = np.abs(fourier)**2
phase = np.angle(fourier)
return [power, phase, freq]
####################################################################
def gaussian(x, amp, mu, sig, c):
return amp * np.exp(-np.power(x - mu, 2.) / (2 * np.power(sig, 2.))) + c
####################################################################
def plot_overview(x, ccf, power, phase, phase_tpl, freq, freq_HL, freq_HN):
idx = (freq <= freq_HN)
idx_L = (freq < freq_HL)
idx_H = (freq >= freq_HL) & (freq < freq_HN)
# Singal #
plt.subplot(221)
plt.plot(x, ccf, 'k', alpha=alpha)
plt.title('Signal (CCF)')
plt.xlabel('Velocity [km/s]')
plt.ylabel('Normalized intensity')
plt.grid(True)
# power spectrum #
plt.subplot(222)
plt.plot(freq[idx], power[idx], 'k', alpha=alpha)
plt.title('Power spectrum')
plt.xlabel(r'$\xi$ [s/km]')
plt.ylabel('Power')
plt.grid(True)
# differential phase spectrum
plt.subplot(223)
# diff_phase = np.unwrap(phase)-np.unwrap(phase_tpl)
diff_phase = phase - phase_tpl
plt.plot(freq[idx], diff_phase[idx], 'k', alpha=alpha)
plt.title('Differential phase spectrum')
plt.xlabel(r'$\xi$ [s/km]')
plt.ylabel(r'$\Delta \phi$ [radian]')
plt.grid(True)
# shift spectrum #
plt.subplot(224)
rv = -np.gradient(diff_phase, np.mean(np.diff(freq))) / (2*np.pi)
plt.plot(freq[idx], rv[idx] * 1000, 'k', alpha=alpha)
plt.title('Shift spectrum')
plt.xlabel(r'$\xi$ [s/km]')
plt.ylabel('RV [m/s]')
plt.grid(True)
####################################################################
# calculate the "averaged" radial veflocity shift between freq1 and freq2 in Fourier space
def rv_ft(freq1, freq2, freq, diff_phase, power):
idx = (freq >= freq1) & (freq <= freq2)
coeff = np.polyfit(freq[idx], diff_phase[idx], 1, w=power[idx]**0.5)
RV_FT = -coeff[0] / (2*np.pi) * 1000
return RV_FT
####################################################################
def plot_correlation(RV_gauss, RV, RV_L, RV_H):
plt.subplot(131)
plt.plot(RV_gauss, RV, 'k.', alpha=alpha)
b0, b1 =
|
np.polyfit(RV_gauss, RV, 1)
|
numpy.polyfit
|
############################################################
#
# functions relative to subdomains
#
############################################################
try:
from mpi4py import MPI
#print('- mpi4py : found')
except:
print('- mpi4py : not found, please install it')
exit(0)
from numpy import zeros,arange,ones,cumsum,sqrt,array,meshgrid
from gmg.fortran_multigrid import buffertodomain
from time import time
#from plotutils import plot2d
def set_family(myrank,np,mp,ix,iy):
procs=arange(np*mp)
i = procs%np
j = procs//np
col=((i//ix)%2)+2*((j//iy)%2)
if col[myrank]==0:
rank0=myrank
if col[myrank]==1:
rank0=myrank-ix
if col[myrank]==2:
rank0=myrank-iy*np
if col[myrank]==3:
rank0=myrank-ix-iy*np
# print(col[myrank])
if (ix<np) and (iy<mp):
family=array([rank0,rank0+ix,rank0+iy*np,rank0+ix+iy*np])
family=family.reshape((2,2))
elif (ix<np):
if col[myrank] in (0,1):
family=array([rank0,rank0+ix])
else:
family=array([rank0+iy*np,rank0+ix+iy*np])
family=family.reshape((1,2))
elif (iy<mp):
if col[myrank] in (0,2):
family=array([rank0,rank0+iy*np])
else:
family=array([rank0+ix,rank0+iy*np+ix])
family=family.reshape((2,1))
else:
if myrank==0:
print('pb with family')
print(ix,iy,np,mp)
print(col)
exit(0)
# if myrank==0:
# print('defined a new family shape %s / ix,iy=%i,%i / np,mp=%i,%i'%(family.shape,ix,iy,np,mp))
return family
class Subdomains(object):
def __init__(self,nh,n,m,comm,family,method=2):
""" (n,m) is the shape of the small subdomain before gathering """
# print('family shape=',family)
np = family.shape[1]
mp = family.shape[0]
sizes = ones(np*mp)*(n*m)
offsets = zeros(np*mp)
offsets[1:] = cumsum(sizes)[:-1]
self.nh = nh
self.n = n
self.m = m
self.family = family
self.np = np
self.mp = mp
self.n1 = 2*nh+(n-2*nh)*np
self.m1 = 2*nh+(m-2*nh)*mp
self.method = method
self.nbtimes = 1 # redundancy factor for timing purpose (should be 1 except for timing)
myrank = comm.Get_rank()
self.myrank = myrank
j1,i1=(family==myrank).nonzero()
self.i1=i1[0]*(n-2*nh)
self.j1=j1[0]*(m-2*nh)
# if self.myrank==0:
# print("define buffers for family",family,"np*mp=",np*mp,"n,m=",n,m)
self.localcomm = MPI.COMM_WORLD.Split(family[0,0],0)
self.sbuff = zeros(n*m)
self.rbuff =
|
zeros(n*m*np*mp)
|
numpy.zeros
|
import os
import glob
import pathlib
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import roc_auc_score, average_precision_score, precision_recall_curve, roc_curve
from sklearn.utils.fixes import signature
from skimage.measure import compare_ssim as ssim
from scipy.misc import imread
from scipy.io import loadmat, savemat
from ROC import assessment
from ProgressBar import ProgressBar
import cv2
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
def check_path(dataset, cube_size):
cube_str = '%d_%d_%d' % tuple(cube_size)
assert cube_str in dataset['cube_dir']
# [SECTION] IMAGE PROCESSING
# important: load as gray image (i.e. 1 channel)
def resize(datum, size):
assert len(datum.shape) == 2
ret = cv2.resize(datum.astype(float), tuple(size))
return ret
def load_images_and_resize(dataset, new_size=[120, 160], train=True, force_recalc=False, return_entire_data=False):
img_dir = dataset['path_train' if train else 'path_test']
n_images = np.sum(count_sequence_n_frame(dataset, test=not train))
print('number of images: ', n_images)
n_clip = dataset['n_clip_train' if train else 'n_clip_test']
#
if return_entire_data:
resized_image_data = np.empty((n_images, new_size[0], new_size[1], 1), dtype=np.float32)
idx = 0
#
for i in range(n_clip):
clip_path = '%s/%s%s/' % (img_dir, 'Train' if train else 'Test', str(i+1).zfill(3))
print(clip_path)
# image
img_files = sorted(glob.glob(clip_path + '*.tif'))
saved_image_file = '%s/%s_image_clip_%d.npz' % (dataset['cube_dir'], 'training' if train else 'test', i+1)
if os.path.isfile(saved_image_file) and not force_recalc:
image_data = np.load(saved_image_file)['image']
else:
image_data = np.array([resize(imread(img_file, 'L')/255., (new_size[1], new_size[0])) for img_file in img_files]).astype(np.float32)
np.savez_compressed(saved_image_file, image=image_data)
print('clip', i+1, image_data.shape)
if return_entire_data:
resized_image_data[idx:idx+len(image_data)] = image_data
idx += len(image_data)
#
if return_entire_data:
return resized_image_data
def load_images_single_clip(dataset, clip_idx, indices, train=True):
assert clip_idx in np.arange(dataset['n_clip_train' if train else 'n_clip_test'])
img_dir = dataset['path_train' if train else 'path_test']
n_images = count_sequence_n_frame(dataset, test=not train)[clip_idx]
print('number of images: ', n_images)
#
clip_path = '%s/%s%s/' % (img_dir, 'Train' if train else 'Test', str(clip_idx+1).zfill(3))
print(clip_path)
# image
img_files = sorted(glob.glob(clip_path + '*.tif'))
image_data = np.array([imread(img_files[idx])/255. for idx in indices]).astype(np.float32)
print('clip', clip_idx+1, image_data.shape)
return image_data
# [SECTION] CUBE PROCESSING
def split_cubes(dataset, clip_idx, cube_size, training_set=True, force_recalc=False, dist_thresh=None):
check_path(dataset, cube_size)
n_clip = dataset['n_clip_train' if training_set else 'n_clip_test']
assert clip_idx in range(n_clip)
print('clip %2d/%2d' % (clip_idx + 1, n_clip))
# load from file if existed
saved_cube_file = '%s/%s_cubes_clip_%d_size_%d_%d_%d.npz' % \
(dataset['cube_dir'], 'training' if training_set else 'test', clip_idx + 1, cube_size[0], cube_size[1], cube_size[2])
if os.path.isfile(saved_cube_file) and not force_recalc:
loader = np.load(saved_cube_file)
cubes = loader['data']
mapping = loader['mapping']
return cubes, mapping
# first load image data from file
saved_image_file = '%s/%s_image_clip_%d.npz' % (dataset['cube_dir'], 'training' if training_set else 'test', clip_idx + 1)
if not os.path.isfile(saved_image_file):
print('image file not found! (%s)' % saved_image_file)
return None, None
image_data = np.load(saved_image_file)['image']
h, w = image_data.shape[1:3]
assert h % cube_size[0] == 0
assert w % cube_size[1] == 0
h_grid, w_grid = np.array([h, w])//cube_size[:2]
# split images to cubes
d_grid = len(image_data) + 1 - cube_size[2]
cubes = np.zeros(np.concatenate(([h_grid * w_grid * d_grid], cube_size), axis=0), dtype=np.float32)
mapping = np.zeros((h_grid * w_grid * d_grid, 4), dtype=int)
print(cubes.shape, image_data.shape)
for j in range(d_grid):
for k in range(h_grid):
for l in range(w_grid):
cubes[j*h_grid*w_grid+k*w_grid+l] = np.moveaxis(image_data[j:j+cube_size[2],
k*cube_size[0]:(k+1)*cube_size[0],
l*cube_size[1]:(l+1)*cube_size[1]], 0, -1)
mapping[j*h_grid*w_grid+k*w_grid+l] = [clip_idx, j, k, l]
if dist_thresh is not None and training_set:
successive_dist = np.array([np.mean(abs(cubes[i]-cubes[i+1])) for i in range(len(cubes)-1)])
idx = np.where(successive_dist >= dist_thresh)[0]
cubes, mapping = cubes[idx], mapping[idx]
print('new shape:', cubes.shape, image_data.shape)
np.savez_compressed(saved_cube_file, data=cubes, mapping=mapping)
return cubes, mapping
def calc_n_cube_in_set(dataset, h, w, cube_size, training_set=True):
check_path(dataset, cube_size)
assert h % cube_size[0] == 0
assert w % cube_size[1] == 0
h_grid, w_grid = np.array([h, w])//cube_size[:2]
sequence_n_frame = count_sequence_n_frame(dataset, test=not training_set)
n_cube = np.sum([((n_frame + 1 - cube_size[2]) * h_grid * w_grid) for n_frame in sequence_n_frame])
return n_cube
def load_all_cubes_in_set(dataset, h, w, cube_size, training_set=True):
check_path(dataset, cube_size)
n_cube_in_set = calc_n_cube_in_set(dataset, h, w, cube_size, training_set=training_set)
n_clip = dataset['n_clip_train' if training_set else 'n_clip_test']
#
cubes = np.zeros(np.concatenate(([n_cube_in_set], cube_size), axis=0), dtype=np.float32)
mapping = np.zeros((n_cube_in_set, 4), dtype=int)
idx = 0
for clip_idx in range(n_clip):
tmp_cubes, tmp_mapping = split_cubes(dataset, clip_idx, cube_size, training_set=training_set)
assert len(tmp_cubes) == len(tmp_mapping)
cubes[idx:idx+len(tmp_cubes)] = tmp_cubes
mapping[idx:idx+len(tmp_mapping)] = tmp_mapping
idx += len(tmp_mapping)
# to work with thresholding motion in training samples
item_sum = np.array([np.sum(item) for item in cubes])
idx = np.where(item_sum == 0.0)[0]
cubes = np.delete(cubes, idx, axis=0)
mapping = np.delete(mapping, idx, axis=0)
print(cubes.shape, mapping.shape)
#
return cubes, mapping
# get sequence of number of clip's frames
def count_sequence_n_frame(dataset, test=True):
sequence_n_frame = np.zeros(dataset['n_clip_test' if test else 'n_clip_train'], dtype=int)
for i in range(len(sequence_n_frame)):
clip_path = '%s/%s%s/' % (dataset['path_test' if test else 'path_train'], 'Test' if test else 'Train', str(i+1).zfill(3))
sequence_n_frame[i] = len(sorted(glob.glob(clip_path + '*.tif')))
return sequence_n_frame
# 1: abnormal, 0: normal
def get_test_frame_labels(dataset, sequence_n_frame, cube_size, is_subway=False):
ground_truth = dataset['ground_truth']
assert len(ground_truth) == len(sequence_n_frame)
labels_select_last = np.zeros(0, dtype=int)
labels_select_first = np.zeros(0, dtype=int)
labels_select_mid = np.zeros(0, dtype=int)
labels_full = np.zeros(0, dtype=int)
for i in range(len(sequence_n_frame)):
if not is_subway:
seg = ground_truth[i]
# label of full frames
tmp_labels = np.zeros(sequence_n_frame[i])
for j in range(0, len(seg), 2):
tmp_labels[(seg[j]-1):seg[j+1]] = 1
else:
tmp_labels = ground_truth[i]
# label of selected frames
labels_full = np.append(labels_full, tmp_labels)
n_removed_frame = cube_size[2] - 1
labels_select_last = np.append(labels_select_last, tmp_labels[n_removed_frame:])
labels_select_first = np.append(labels_select_first, tmp_labels[:-n_removed_frame])
seq_length = sequence_n_frame[i] + 1 - cube_size[2]
start_idx = cube_size[2]//2
stop_idx = start_idx + seq_length
labels_select_mid = np.append(labels_select_mid, tmp_labels[start_idx:stop_idx])
assert len(np.unique([len(labels_select_last), len(labels_select_first), len(labels_select_mid)])) == 1
# h_grid, w_grid = np.array(image_size)//cube_size[:2]
assert len(labels_select_mid) == np.sum([(n_frame + 1 - cube_size[2]) for n_frame in sequence_n_frame])
write_sequence_to_bin('%s/labels_full.bin' % dataset['path_test'], labels_full)
return labels_select_last, labels_select_first, labels_select_mid
# POST-PROCESSING
def plot_error_map(dataset, image_size, cube_size, clip_idx, frame_idx, model_idx, score_type_idx=3, using_test_data=True):
def scale_range(img):
for i in range(img.shape[-1]):
img[..., i] = (img[..., i] - np.min(img[..., i]))/(np.max(img[..., i]) - np.min(img[..., i]))
return img
# load score maps
score_appe_maps, score_row_maps, score_col_maps = calc_score_one_clip(dataset, image_size, cube_size,
model_idx, clip_idx,
train=not using_test_data,
force_calc=True)
if isinstance(frame_idx, int):
frame_idx = [frame_idx]
if len(frame_idx) > 16:
frame_idx = frame_idx[:16]
score_appe_maps, score_row_maps, score_col_maps = score_appe_maps[frame_idx], score_row_maps[frame_idx], score_col_maps[frame_idx]
print(score_appe_maps.shape, score_row_maps.shape, score_col_maps.shape)
# plot
color_map = 'copper'
r, c = 6, 8
fig, axs = plt.subplots(r, c)
for j in range(c):
if j in np.arange(len(score_appe_maps)):
axs[0, j].imshow(scale_range(score_appe_maps[j, :, :, score_type_idx]), cmap=color_map)
axs[1, j].imshow(scale_range(score_row_maps[j, :, :]), cmap=color_map)
axs[2, j].imshow(scale_range(score_col_maps[j, :, :]), cmap=color_map)
if j+c in np.arange(len(score_appe_maps)):
axs[3, j].imshow(scale_range(score_appe_maps[j+c, :, :, score_type_idx]), cmap=color_map)
axs[4, j].imshow(scale_range(score_row_maps[j+c, :, :]), cmap=color_map)
axs[5, j].imshow(scale_range(score_col_maps[j+c, :, :]), cmap=color_map)
for i in range(r):
axs[i, j].axis('off')
plt.show()
# SCORE PROCESSING
def calc_anomaly_score_cube_pair(in_cube, out_cube):
assert in_cube.shape == out_cube.shape
loss = (in_cube-out_cube)**2
# loss = np.sum(loss, axis=-1) # added
PSNR = -10*np.log10(np.max(out_cube)**2/np.mean(loss))
SSIM = ssim(in_cube, out_cube, data_range=np.max([in_cube, out_cube])-np.min([in_cube, out_cube]),
multichannel=len(in_cube.shape) == 3 and in_cube.shape[-1] > 1)
return np.array([np.mean(loss), np.max(loss), np.median(loss), np.std(loss), PSNR, SSIM])
def find_cube_idx(mapping, d_idx, h_idx, w_idx):
tmp = np.absolute(mapping[:, 1:] - np.array([d_idx, h_idx, w_idx]))
tmp = np.sum(tmp, axis=1)
idx = np.where(tmp == 0)[0]
assert len(idx) == 1
return idx[0]
def calc_anomaly_score_maps_one_clip(in_cubes, in_mapping, out_cubes, out_row_softmax, out_col_softmax, image_size):
h_grid, w_grid = np.array(image_size)//in_cubes.shape[1:3]
# calc score for each cube pair
assert in_cubes.shape == out_cubes.shape
scores_appe = np.array([calc_anomaly_score_cube_pair(in_cubes[i], out_cubes[i]) for i in range(len(in_cubes))])
scores_row = np.mean((seq_to_one_hot(in_mapping[:, 2], h_grid) - out_row_softmax)**2, axis=1)
scores_col = np.mean((seq_to_one_hot(in_mapping[:, 3], w_grid) - out_col_softmax)**2, axis=1)
assert len(np.unique([len(scores_appe), len(scores_row), len(scores_col)])) == 1
# arrange scores according to frames
assert len(np.unique(in_mapping[:, 0])) == 1
d_values = sorted(np.unique(in_mapping[:, 1]))
# process each frame
score_appe_maps = np.zeros((len(d_values), h_grid, w_grid, scores_appe.shape[-1]), dtype=np.float32)
score_row_maps = np.zeros((len(d_values), h_grid, w_grid), dtype=np.float32)
score_col_maps = np.zeros((len(d_values), h_grid, w_grid), dtype=np.float32)
progress = ProgressBar(len(d_values), fmt=ProgressBar.FULL)
for i in range(len(d_values)):
progress.current += 1
progress()
for j in range(h_grid):
for k in range(w_grid):
cube_idx = find_cube_idx(in_mapping, d_values[i], j, k)
score_appe_maps[i, j, k] = scores_appe[cube_idx]
score_row_maps[i, j, k] = scores_row[cube_idx]
score_col_maps[i, j, k] = scores_col[cube_idx]
progress.done()
return score_appe_maps, score_row_maps, score_col_maps
def seq_to_one_hot(seq, n_class):
ret = np.zeros((len(seq), n_class), dtype=np.float32)
ret[np.arange(len(seq)), seq] = 1.0
return ret
# suitable for Avenue
def calc_score_one_clip(dataset, image_size, cube_size, epoch, clip_idx, train=False, force_calc=False):
dataset['cube_dir'] = './training_saver/%s/cube_%d_%d_%d_%d_%d' % \
(dataset['name'], image_size[0], image_size[1], cube_size[0], cube_size[1], cube_size[2])
score_dir = '%s/scores' % dataset['cube_dir']
saved_data_path = '%s/output_%s/%d_epoch' % (score_dir, 'train' if train else 'test', epoch)
saved_score_file = '%s/score_epoch_%d_clip_%d.npz' % (saved_data_path, epoch, clip_idx + 1)
if not force_calc and os.path.isfile(saved_score_file):
loader = np.load(saved_score_file)
return loader['appe'], loader['row'], loader['col']
# load true data
in_cubes, in_mapping = split_cubes(dataset, clip_idx, cube_size, training_set=train)
assert len(np.unique(in_mapping[:, 0])) == 1
print(in_cubes.shape, in_mapping.shape)
# load outputted data
score_dir = '%s/scores' % dataset['cube_dir']
saved_data_path = '%s/output_%s/%d_epoch' % (score_dir, 'train' if train else 'test', epoch)
saved_data_file = '%s/output_%d.npz' % (saved_data_path, clip_idx)
out_loader = np.load(saved_data_file)
out_cubes = out_loader['cube'].astype(np.float32)
out_row_softmax = out_loader['row'].astype(np.float32)
out_col_softmax = out_loader['col'].astype(np.float32)
print(out_cubes.shape, out_row_softmax.shape, out_col_softmax.shape)
# calc score and save to file
score_appe_maps, score_row_maps, score_col_maps = \
calc_anomaly_score_maps_one_clip(in_cubes, in_mapping, out_cubes, out_row_softmax, out_col_softmax, image_size)
np.savez_compressed(saved_score_file, appe=score_appe_maps, row=score_row_maps, col=score_col_maps)
return score_appe_maps, score_row_maps, score_col_maps
def calc_score_full_clips(dataset, image_size, cube_size, epoch, train=False, force_calc=False):
dataset['cube_dir'] = './training_saver/%s/cube_%d_%d_%d_%d_%d' % \
(dataset['name'], image_size[0], image_size[1], cube_size[0], cube_size[1], cube_size[2])
score_dir = '%s/scores' % dataset['cube_dir']
saved_data_path = '%s/output_%s/%d_epoch' % (score_dir, 'train' if train else 'test', epoch)
saved_score_file = '%s/score_epoch_%d_full.npz' % (saved_data_path, epoch)
if os.path.isfile(saved_score_file) and not force_calc:
loader = np.load(saved_score_file)
return loader['appe'], loader['row'], loader['col']
# calc scores for all clips and save to file
n_clip = dataset['n_clip_train' if train else 'n_clip_test']
print('training set' if train else 'test set')
for i in range(n_clip):
if i == 0:
score_appe_maps, score_row_maps, score_col_maps = \
calc_score_one_clip(dataset, image_size, cube_size, epoch, i, train=train, force_calc=force_calc)
else:
tmp_score_appe, tmp_score_row, tmp_score_col = \
calc_score_one_clip(dataset, image_size, cube_size, epoch, i, train=train, force_calc=force_calc)
score_appe_maps = np.concatenate((score_appe_maps, tmp_score_appe), axis=0)
score_row_maps = np.concatenate((score_row_maps, tmp_score_row), axis=0)
score_col_maps = np.concatenate((score_col_maps, tmp_score_col), axis=0)
np.savez_compressed(saved_score_file, appe=score_appe_maps, row=score_row_maps, col=score_col_maps)
return score_appe_maps, score_row_maps, score_col_maps
def score_maps_to_score_seq(score_maps, operation, max_avg_patch_size=None):
assert operation in [np.mean, np.min, np.max, np.median, np.std]
if not max_avg_patch_size:
return np.array([operation(score_map, axis=(0, 1)) for score_map in score_maps])
if len(score_maps.shape) == 4:
return np.array([np.max(cv2.blur(score_map[..., 1], (max_avg_patch_size, max_avg_patch_size))[1:-1, 1:-1]) for score_map in score_maps])
return np.array([np.max(cv2.blur(score_map, (max_avg_patch_size, max_avg_patch_size))[1:-1, 1:-1]) for score_map in score_maps])
def get_weights(dataset, image_size, cube_size, epoch, operation, save_as_image=False):
score_appe_maps, score_row_maps, score_col_maps = \
calc_score_full_clips(dataset, image_size, cube_size, epoch, train=True, force_calc=False)
# score_appe_seq = score_maps_to_score_seq(score_appe_maps, operation)
# score_row_seq = score_maps_to_score_seq(score_row_maps, operation)
# score_col_seq = score_maps_to_score_seq(score_col_maps, operation)
appe_weight, row_weight, col_weight = np.mean(score_appe_maps, axis=0)[..., 1], np.mean(score_row_maps, axis=0), np.mean(score_col_maps, axis=0)
if save_as_image:
from custom_cmap import parula_map
print('shape:', appe_weight.shape, row_weight.shape, col_weight.shape)
print('min:', np.min(appe_weight), np.min(row_weight),
|
np.min(col_weight)
|
numpy.min
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
def coordinate_l2r(ql, flip_axis: str):
szs = {'x': np.array([[-1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]]),
'y': np.array([[1., 0., 0.],
[0., -1., 0.],
[0., 0., 1.]]),
'z': np.array([[1., 0., 0.],
[0., 1., 0.],
[0., 0., -1.]])}
sz = szs[flip_axis]
if ql.size == 4:
sz_homo = np.zeros((4, 4))
sz_homo[:3, :3] = sz
sz_homo[3, 3] = 1
return sz_homo.dot(ql)
elif ql.size == 3:
return sz.dot(ql)
def rot_x_l(roll: float):
return np.array([[1., 0., 0.],
[0., np.cos(roll), -np.sin(roll)],
[0, np.sin(roll), np.cos(roll)]])
def rot_y_l(pitch: float):
return np.array([[np.cos(pitch), 0., np.sin(pitch)],
[0., 1., 0.],
[-np.sin(pitch), 0., np.cos(pitch)]])
def rot_z_l(yaw: float):
return np.array([[np.cos(yaw), -
|
np.sin(yaw)
|
numpy.sin
|
################################################################################
# Code from
# https://github.com/pytorch/vision/blob/master/torchvision/datasets/folder.py
# Modified the original code so that it also loads images from the current
# directory as well as the subdirectories
################################################################################
# import h5py
import torch.utils.data as data
import pickle
import PIL
import numpy as np
import torch
from PIL import Image, ImageEnhance
import os
import math, random
import os.path
import sys, traceback
import cv2
import json
from skimage.transform import rotate
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
def make_dataset(list_name):
text_file = open(list_name, "r")
images_list = text_file.readlines()
text_file.close()
return images_list
def read_array(path):
with open(path, "rb") as fid:
width, height, channels = np.genfromtxt(fid, delimiter="&", max_rows=1,
usecols=(0, 1, 2), dtype=int)
fid.seek(0)
num_delimiter = 0
byte = fid.read(1)
while True:
if byte == b"&":
num_delimiter += 1
if num_delimiter >= 3:
break
byte = fid.read(1)
array = np.fromfile(fid, np.float32)
array = array.reshape((width, height, channels), order="F")
return np.transpose(array, (1, 0, 2)).squeeze()
class LandmarksFolder(data.Dataset):
def __init__(self, opt, data_dir, phase, img_a_name, img_b_name, img_c_name):
json_path = data_dir + 'subset_sphere_0_data_clean.json'
with open(json_path) as json_file:
self.json_data = json.load(json_file)
self.opt = opt
self.phase = phase
self.img_a_name = img_a_name
self.img_b_name = img_b_name
self.img_c_name = img_c_name
num_valid = len(self.json_data)
num_train = int(round(num_valid * 0.85))
ref_depth = 1.25
self.crop_size = 256
json_data_sub = self.json_data
near_plane_depth_list = []
ref_idx = 0
for i in range(len(json_data_sub)):
near_plane_depth_list.append(json_data_sub[i]['near_plane_depth'])
final_depth = np.percentile(np.array(near_plane_depth_list), 10)
self.scene_scale = ref_depth/final_depth
ref_list = json_data_sub
self.mpi_train_list = json_data_sub[0:num_train]
self.mpi_test_list = json_data_sub[num_train:]
if phase == 'train':
self.mpi_list = self.mpi_train_list
elif phase == 'interpolation':
self.mpi_list = json_data_sub
elif phase == 'test':
self.mpi_list = self.mpi_test_list
else:
print('PHASE DOES NOT EXIST')
sys.exit()
if opt.dataset == 'trevi':
scene_id = 36
elif opt.dataset == 'pantheon':
scene_id = 23
elif opt.dataset == 'coeur':
scene_id = 13
elif opt.dataset == 'rushmore':
scene_id = 1589
elif opt.dataset == 'lincoln':
scene_id = 21
elif opt.dataset == 'eiffel':
scene_id = 0
elif opt.dataset == 'rock':
scene_id = 11
elif opt.dataset == 'navona':
scene_id = 57
self.data_dir = data_dir
self.aspect_ratio_threshold_arr = np.array([9./16., 2./3., 3./4., 1., 4./3., 3./2., 16./9.])
self.resized_wh =
|
np.array([[288, 512], [320, 480], [384, 512], [384, 384], [512, 384], [480, 320], [512, 288]])
|
numpy.array
|
#!/usr/bin/env python
import numpy as np
import functools
import datetime
import sys
import lib
class AbstractTester:
@classmethod
def __init__(cls):
for meth in dir(cls):
if 'test' in meth:
getattr(cls, meth)()
class SolveLinearSystemTester(AbstractTester):
@staticmethod
def test_diagonal():
a = np.matrix([[1, 0], [0, 1]])
b = np.array([[1], [2]])
desired = np.array([[1], [2]])
actual = lib.solve_system(a, b)
np.testing.assert_almost_equal(actual, desired)
@staticmethod
def test_anti_diagonal():
a = np.matrix([[0, 1], [1, 0]])
b = np.array([[1], [2]])
desired = np.array([[2], [1]])
actual = lib.solve_system(a, b)
np.testing.assert_almost_equal(actual, desired)
@staticmethod
def test_pseudo():
a = np.matrix([[1, 2], [1, 3], [1, 5]])
b = np.array([[1], [2], [3]])
desired = np.array([[-1/7], [9/14]])
actual = lib.solve_system(a, b)
|
np.testing.assert_almost_equal(actual, desired)
|
numpy.testing.assert_almost_equal
|
# Copyright (C) 2021 <NAME>
#
# SPDX-License-Identifier: MIT
#
# This tests the custom assembly for the unbiased Nitsche formulation in a special case
# that can be expressed using ufl:
# We consider a very simple test case made up of two disconnected elements with a constant
# gap in x[tdim-1]-direction. The contact surfaces are made up of exactly one edge
# from each element that are perfectly aligned such that the quadrature points only
# differ in the x[tdim-1]-direction by the given gap.
# For comparison, we consider a DG function space on a mesh that is constructed by
# removing the gap between the elements and merging the edges making up the contact
# surface into one. This allows us to use DG-functions and ufl to formulate the contact
# terms in the variational form by suitably adjusting the deformation u and using the given
# constant gap.
import numpy as np
import scipy
import pytest
import ufl
from dolfinx.cpp.mesh import to_type
import dolfinx.fem as _fem
from dolfinx.graph import create_adjacencylist
from dolfinx.mesh import (CellType, locate_entities_boundary, locate_entities, create_mesh,
compute_midpoints, meshtags)
from mpi4py import MPI
import dolfinx_cuas
import dolfinx_contact
import dolfinx_contact.cpp
from dolfinx_contact.helpers import (R_minus, dR_minus, R_plus, dR_plus, epsilon, lame_parameters, sigma_func)
kt = dolfinx_contact.cpp.Kernel
def DG_rhs_plus(u0, v0, h, n, gamma, theta, sigma, gap, dS):
# This version of the ufl form agrees with the formulation in https://doi.org/10.1007/s00211-018-0950-x
def Pn_g(u, a, b):
return ufl.dot(u(a) - u(b), -n(b)) - gap - (h(a) / gamma) * ufl.dot(sigma(u(a)) * n(a), -n(b))
def Pn_gtheta(v, a, b):
return ufl.dot(v(a) - v(b), -n(b)) - theta * (h(a) / gamma) * ufl.dot(sigma(v(a)) * n(a), -n(b))
F = 0.5 * (gamma / h('+')) * R_plus(Pn_g(u0, '+', '-')) * Pn_gtheta(v0, '+', '-') * dS
F += 0.5 * (gamma / h('-')) * R_plus(Pn_g(u0, '-', '+')) * Pn_gtheta(v0, '-', '+') * dS
return F
def DG_rhs_minus(u0, v0, h, n, gamma, theta, sigma, gap, dS):
# This version of the ufl form agrees with its one-sided equivalent in nitsche_ufl.py
def Pn_g(u, a, b):
return ufl.dot(sigma(u(a)) * n(a), -n(b)) + (gamma / h(a)) * (gap - ufl.dot(u(a) - u(b), -n(b)))
def Pn_gtheta(v, a, b):
return theta * ufl.dot(sigma(v(a)) * n(a), -n(b)) - (gamma / h(a)) * ufl.dot(v(a) - v(b), -n(b))
F = 0.5 * (h('+') / gamma) * R_minus(Pn_g(u0, '+', '-')) * Pn_gtheta(v0, '+', '-') * dS
F += 0.5 * (h('-') / gamma) * R_minus(Pn_g(u0, '-', '+')) * Pn_gtheta(v0, '-', '+') * dS
return F
def DG_jac_plus(u0, v0, w0, h, n, gamma, theta, sigma, gap, dS):
# This version of the ufl form agrees with the formulation in https://doi.org/10.1007/s00211-018-0950-x
def Pn_g(u, a, b):
return ufl.dot(u(a) - u(b), -n(b)) - gap - (h(a) / gamma) * ufl.dot(sigma(u(a)) * n(a), -n(b))
def Pn_gtheta(v, a, b, t):
return ufl.dot(v(a) - v(b), -n(b)) - t * (h(a) / gamma) * ufl.dot(sigma(v(a)) * n(a), -n(b))
J = 0.5 * (gamma / h('+')) * dR_plus(Pn_g(u0, '+', '-')) * \
Pn_gtheta(w0, '+', '-', 1.0) * Pn_gtheta(v0, '+', '-', theta) * dS
J += 0.5 * (gamma / h('-')) * dR_plus(Pn_g(u0, '-', '+')) * \
Pn_gtheta(w0, '-', '+', 1.0) * Pn_gtheta(v0, '-', '+', theta) * dS
return J
def DG_jac_minus(u0, v0, w0, h, n, gamma, theta, sigma, gap, dS):
# This version of the ufl form agrees with its one-sided equivalent in nitsche_ufl.py
def Pn_g(u, a, b):
return ufl.dot(sigma(u(a)) * n(a), -n(b)) + (gamma / h(a)) * (gap - ufl.dot(u(a) - u(b), -n(b)))
def Pn_gtheta(v, a, b, t):
return t * ufl.dot(sigma(v(a)) * n(a), -n(b)) - (gamma / h(a)) * ufl.dot(v(a) - v(b), -n(b))
J = 0.5 * (h('+') / gamma) * dR_minus(Pn_g(u0, '+', '-')) * \
Pn_gtheta(w0, '+', '-', 1.0) * Pn_gtheta(v0, '+', '-', theta) * dS
J += 0.5 * (h('-') / gamma) * dR_minus(Pn_g(u0, '-', '+')) * \
Pn_gtheta(w0, '-', '+', 1.0) * Pn_gtheta(v0, '-', '+', theta) * dS
return J
def compute_dof_permutations(V_dg, V_cg, gap, facets_dg, facets_cg):
'''The meshes used for the two different formulations are
created independently of each other. Therefore we need to
determine how to map the dofs from one mesh to the other in
order to compare the results'''
mesh_dg = V_dg.mesh
mesh_cg = V_cg.mesh
bs = V_cg.dofmap.index_map_bs
tdim = mesh_dg.topology.dim
mesh_dg.topology.create_connectivity(tdim - 1, tdim)
f_to_c_dg = mesh_dg.topology.connectivity(tdim - 1, tdim)
mesh_cg.topology.create_connectivity(tdim - 1, tdim)
mesh_cg.topology.create_connectivity(tdim, tdim - 1)
f_to_c_cg = mesh_cg.topology.connectivity(tdim - 1, tdim)
c_to_f_cg = mesh_cg.topology.connectivity(tdim, tdim - 1)
x_cg = V_cg.tabulate_dof_coordinates()
x_dg = V_dg.tabulate_dof_coordinates()
for i in range(len(facets_dg)):
facet_dg = facets_dg[i]
dofs_cg = []
coordinates_cg = []
for facet_cg in np.array(facets_cg)[:, 0]:
# retrieve dofs and dof coordinates for mesh with gap
cell = f_to_c_cg.links(facet_cg)[0]
all_facets = c_to_f_cg.links(cell)
local_index = np.argwhere(np.array(all_facets) == facet_cg)[0, 0]
dof_layout = V_cg.dofmap.dof_layout
local_dofs = dof_layout.entity_closure_dofs(tdim - 1, local_index)
dofs_cg0 = V_cg.dofmap.cell_dofs(cell)[local_dofs]
dofs_cg.append(dofs_cg0)
coordinates_cg.append(x_cg[dofs_cg0, :])
# retrieve all dg dofs on mesh without gap for each cell
# and modify coordinates by gap if necessary
cells = f_to_c_dg.links(facet_dg)
for cell in cells:
midpoint = compute_midpoints(mesh_dg, tdim, [cell])[0]
if midpoint[tdim - 1] > 0:
# coordinates of corresponding dofs are identical for both meshes
dofs_dg0 = V_dg.dofmap.cell_dofs(cell)
coordinates_dg0 = x_dg[dofs_dg0, :]
else:
# coordinates of corresponding dofs need to be adjusted by gap
dofs_dg1 = V_dg.dofmap.cell_dofs(cell)
coordinates_dg1 = x_dg[dofs_dg1, :]
coordinates_dg1[:, tdim - 1] -= gap
# create array of indices to access corresponding function values
num_dofs_f = dofs_cg[0].size
indices_cg = np.zeros(bs * 2 * num_dofs_f, dtype=np.int32)
for i, dofs in enumerate(dofs_cg):
for j, dof in enumerate(dofs):
for k in range(bs):
indices_cg[i * num_dofs_f * bs + j * bs + k] = bs * dof + k
indices_dg = np.zeros(indices_cg.size, dtype=np.int32)
for i, dofs in enumerate(dofs_cg[0]):
coordinates = coordinates_cg[0][i, :]
# find dg dofs that correspond to cg dofs for first element
dof = dofs_dg0[np.isclose(coordinates_dg0, coordinates).all(axis=1).nonzero()[0][0]]
# create array of indices to access corresponding function values
for k in range(bs):
indices_dg[i * bs + k] = dof * bs + k
for i, dofs in enumerate(dofs_cg[1]):
coordinates = coordinates_cg[1][i, :]
# find dg dofs that correspond to cg dofs for first element
dof = dofs_dg1[np.isclose(coordinates_dg1, coordinates).all(axis=1).nonzero()[0][0]]
# create array of indices to access corresponding function values
for k in range(bs):
indices_dg[num_dofs_f * bs + i * bs + k] = dof * bs + k
# return indices used for comparing assembled vectors/matrices
return indices_cg, indices_dg
def create_functionspaces(ct, gap):
''' This is a helper function to create the two element function spaces
both for custom assembly and the DG formulation for
quads, triangles, hexes and tetrahedra'''
cell_type = to_type(ct)
if cell_type == CellType.quadrilateral:
x_ufl = np.array([[0, 0], [0.8, 0], [0.1, 1.3], [0.7, 1.2], [-0.1, -1.2], [0.8, -1.1]])
x_cuas = np.array([[0, 0], [0.8, 0], [0.1, 1.3], [0.7, 1.2], [0, -gap],
[0.8, -gap], [-0.1, -1.2 - gap], [0.8, -1.1 - gap]])
cells_ufl = np.array([[0, 1, 2, 3], [4, 5, 0, 1]], dtype=np.int32)
cells_cuas = np.array([[0, 1, 2, 3], [4, 5, 6, 7]], dtype=np.int32)
elif cell_type == CellType.triangle:
x_ufl = np.array([[0, 0, 0], [0.8, 0, 0], [0.3, 1.3, 0.0], [0.4, -1.2, 0.0]])
x_cuas = np.array([[0, 0, 0], [0.8, 0, 0], [0.3, 1.3, 0.0], [
0, -gap, 0], [0.8, -gap, 0], [0.4, -1.2 - gap, 0.0]])
cells_ufl = np.array([[0, 1, 2], [0, 1, 3]], dtype=np.int32)
cells_cuas = np.array([[0, 1, 2], [3, 4, 5]], dtype=np.int32)
elif cell_type == CellType.tetrahedron:
x_ufl =
|
np.array([[0, 0, 0], [1.1, 0, 0], [0.3, 1.0, 0], [1, 1.2, 1.5], [0.8, 1.2, -1.6]])
|
numpy.array
|
#!/usr/bin/env python
import os,time,datetime
import glob
import matplotlib
matplotlib.use('PDF')
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from matplotlib.path import Path
import matplotlib.animation as animate
from matplotlib_scalebar.scalebar import ScaleBar
import numpy as np
import nd2reader as nd2
import bioformats,javabridge
import warnings
warnings.filterwarnings("ignore")
from skimage import feature,morphology,restoration #Edge detection
from skimage.transform import warp,SimilarityTransform
from skimage.feature import ORB, match_descriptors,register_translation
from skimage import measure
from skimage.measure import ransac
from skimage.filters import sobel
from skimage.color import label2rgb
import scipy,cv2
from scipy import ndimage as ndi
from scipy.signal import savgol_filter
from sklearn.cluster import DBSCAN
from sklearn import metrics
class RecruitmentMovieAnalyzer(object):
def __init__(self):
self.nuclear_channel= 'TRITC'
self.irrad_frame = 'auto'
self.roi_buffer = [-10,0]
self.track_nucleus = True
self.autosave = True
self.save_direct = './MovieAnalysis_output/'
self.save_movie = True
self.save_roi_data = True
self.additional_rois= 0
#self.correct_bleach = True
self.bleach_frames = 0
self.threshold = -1
self.bg_correct = True
self.bleach_correct = True
def SetParameters(self,nuclear_channel='TRITC',protein_channel='EGFP',irrad_frame='auto',roi_buffer=[-10,0],track_nucleus=True,autosave=True,save_direct='./MovieAnalysis_output/',save_movie=True,save_roi_data=True,additional_rois=0,bleach_correct=True,bleach_frames=0,threshold=-1,bg_correct=True,verbose=True):
self.nuclear_channel= nuclear_channel
self.protein_channel= protein_channel
self.irrad_frame = irrad_frame
self.roi_buffer = roi_buffer
self.track_nucleus = track_nucleus
self.autosave = autosave
self.save_direct = save_direct
self.save_movie = save_movie
self.save_roi_data = save_roi_data
self.additional_rois= additional_rois
#self.correct_bleach = correct_bleach
self.bleach_frames = bleach_frames
self.threshold = threshold
if str(self.threshold).lower() == 'auto':
self.threshold = 3
self.bg_correct = bg_correct
self.bleach_correct = bleach_correct
self.verbose = verbose
if not os.path.isdir(self.save_direct):
os.mkdir(self.save_direct)
else:
print("WARNING: Directory "+self.save_direct+" already exists! Be aware that you may be overwriting files!!!")
# if self.save_movie:
# self.ffmpeg_writer = animate.writers['ffmpeg']
def LoadFile(self,video_file='',roi_file=''):
if not os.path.isfile(video_file):
print("ERROR: Cannot load file - "+video_file+" - File not found!")
vid_exists = False
else:
vid_exists = True
if not os.path.isfile(roi_file):
print("ERROR: Cannot load file - "+roi_file+" - File not found!")
roi_exists = False
else:
roi_exists = True
if roi_exists and vid_exists:
try:
self.video_list.append(video_file)
except:
self.video_list = [video_file]
try:
self.roif_list.append(roi_file)
except:
self.roif_list = [roi_file]
else:
print("File(s) missing. Cannot load desired experiment for analysis!!!")
def LoadDirectory(self,video_direct='',extension='.nd2',roi_extension='_ROI.tif'):
if not os.path.isdir(video_direct):
print("ERROR: Cannot load directory - "+video_direct+" - Directory not found!")
else:
self.video_direct = video_direct
filelist = glob.glob(os.path.join(video_direct,"*"+extension))
for vidFile in filelist:
roiFile = vidFile.replace(extension,roi_extension)
if not os.path.isfile(roiFile):
print("WARNING: Could not find ROI file ("+roiFile+") for video file ("+vidFile+")! Not adding files to processing list!!!")
else:
try:
self.video_list.append(vidFile)
except:
self.video_list = [vidFile]
try:
self.roif_list.append(roiFile)
except:
self.roif_list = [roiFile]
def ClearFileList(self):
self.video_list = []
self.Nfiles = 0
def ProcessOther(self,input_video):
#Process Metadata
this_omexmlstr = bioformats.get_omexml_metadata(input_video)
this_omexml = bioformats.OMEXML(this_omexmlstr)
these_pixels= this_omexml.image().Pixels
this_numt = these_pixels.get_SizeT()
this_numc = these_pixels.get_SizeC()
self.pix_res= these_pixels.get_PhysicalSizeX()
try:
final_time = these_pixels.Plane(index=this_numt*this_numc-1).DeltaT
self.has_time = True
if os.path.splitext(input_video)[1]==".czi":
#Zeiss microscopes don't count from zero, so need to correct for first time point
self.init_time = these_pixels.Plane(index=0).DeltaT
self.is_zeiss = True
final_time = final_time - self.init_time
else:
self.is_zeiss = False
except:
self.has_time = False
print("Warning: Unable to extract time points from movie! Please extract them by hand!")
print("Loading \""+input_video+"\" :")
if self.has_time:
print("\t\tMovie Length = "+str(np.around(final_time,decimals=2))+" seconds.")
else:
print("\t\tMovie Length = "+str(this_numt)+" frames.")
print("\t\tPixel Resolution = "+str(self.pix_res)+" um")
this_tsteps = np.zeros(this_numt,dtype=float) # We have to fill this up as we open images in bioformats...
self.roi_intensity_array = np.zeros((len(this_tsteps),1+(1+2*self.additional_rois)),dtype=int)
self.roi0_intensity_array = np.zeros((len(this_tsteps),2),dtype=int)
self.total_intensity_array = np.zeros(len(this_tsteps),dtype=float)
for self.ts in range(this_numt):
if self.has_time:
this_tsteps[self.ts] = these_pixels.Plane(index=this_numc*self.ts).DeltaT
if self.is_zeiss:
this_tsteps[self.ts] -= self.init_time
else:
this_tsteps[self.ts] = self.ts
this_nuc_frame = np.array(bioformats.load_image(input_video,c=self.nuclear_channel,t=self.ts,rescale=False))
this_nuc_path,this_nuc_points,this_nuc_fill = self.getNuclearMask(this_nuc_frame,sigma=self.threshold)
if self.ts==0:
self.first_nuc_points= np.copy(this_nuc_points)
self.first_nuc_frame = np.copy(this_nuc_frame)
self.first_nuc_fill = np.copy(this_nuc_fill)
shifted_nuc_fill = np.copy(this_nuc_fill)
else:
if self.track_nucleus:
shift,error,diffphase= register_translation(self.first_nuc_fill,this_nuc_fill)
else:
shift = np.array([0,0],dtype=int)
if (shift[0]!=0) or (shift[1]!=0):
shifted_nuc_fill = np.zeros_like(this_nuc_fill)
shifted_nuc_frame= np.zeros_like(this_nuc_frame)
N1 = len(shifted_nuc_fill)
N2 = len(shifted_nuc_fill[0])
for idx in range(N1):
for idx2 in range(N2):
if (idx - shift[0] >= 0) and (idx2 - shift[1] >= 0) and (idx - shift[0] < N1) and (idx2-shift[1] < N2):
shifted_nuc_fill[idx,idx2] = this_nuc_fill[idx-int(shift[0]),idx2-int(shift[1])]
this_nuc_points[:,0] -= int(shift[0])
this_nuc_points[:,1] -= int(shift[1])
else:
shifted_nuc_fill = np.copy(this_nuc_fill)
this_prot_frame = np.array(bioformats.load_image(input_video,c=self.protein_channel,t=self.ts,rescale=False))
if self.bg_correct:
this_chan_frame = self.correctBackground(this_prot_frame,input_video,self.protein_channel)
else:
this_chan_frame = np.copy(this_prot_frame)
this_vmin = np.min(this_prot_frame)
if self.ts == 0:
shifted_frame = np.copy(this_chan_frame)
else:
if (shift[0]!=0) or (shift[1]!=0):
shifted_frame = np.zeros_like(this_prot_frame)
N1 = len(shifted_frame)
N2 = len(shifted_frame[0])
for idx in range(N1):
for idx2 in range(N2):
if (idx-shift[0] >= 0) and (idx2-shift[1] >= 0) and (idx-shift[0] < N1) and (idx2-shift[1] < N2):
shifted_frame[idx,idx2] = this_chan_frame[idx-int(shift[0]),idx2-int(shift[1])]
else:
shifted_frame = np.copy(this_chan_frame)
if self.save_movie:
self.SaveMovieFrame(this_chan_frame,roi_path_dict=self.roi_path_dict,vmin=this_vmin,suffix='raw')
self.SaveMovieFrame(shifted_frame,roi_path_dict=self.roi_path_dict,vmin=this_vmin,suffix='shifted')
for idx in range(-self.additional_rois,self.additional_rois+1):
this_roi_buffed = self.roi_buff_dict[idx]
this_roi_path = self.roi_path_dict[idx]
this_roi_pix = np.where(np.logical_and(this_roi_buffed>0,shifted_nuc_fill>0))
roi_intensity = np.sum(shifted_frame[this_roi_pix])
this_col_id = idx + self.additional_rois + 1
self.roi_intensity_array[self.ts,this_col_id] = roi_intensity
if idx == 0:
self.roi0_intensity_array[self.ts,1] = roi_intensity
all_nuc_prot_pix = np.where(shifted_nuc_fill > 0)
total_intensity = np.sum(shifted_frame[all_nuc_prot_pix])
self.total_intensity_array[self.ts] = total_intensity
if self.bleach_frames > 0 and self.bg_correct:
pre_bleached = np.average(self.total_intensity_array[:self.bleach_frames])
self.bleach_corrections = np.divide(pre_bleached,self.total_intensity_array)
self.roi_intensity_array[:,1:] = self.roi_intensity_array[:,1:]\
* self.bleach_corrections[:,np.newaxis]
self.bleach_correct_tot = np.multiply(self.total_intensity_array,self.bleach_corrections)
self.normalized_intensity_array = np.copy(self.roi_intensity_array).astype(float)
for colID in range(len(self.roi_intensity_array[0,1:])):
this_col = self.normalized_intensity_array[:,1+colID].astype(float)
self.normalized_intensity_array[:,1+colID] = this_col/np.average(this_col[:self.bleach_frames])
else:
self.bleach_correct_tot = np.copy(self.total_intensity_array)
self.normalized_intensity_array = np.copy(self.roi_intensity_array).astype(float)
for colID in range(len(self.roi_intensity[:,1:])):
this_col = self.normalized_intensity_array[:,1+colID].astype(float)
self.normalized_intensity_array[:,1+colID] = this_col/this_col[0]
#Print the intensity timeseries
ofile = open(self.this_ofile,'w')
nofile = open(self.this_nofile,'w')
ofile.write('Input Filename: '+input_video+'\n')
nofile.write('Input Filename: '+input_video+'\n')
now = datetime.datetime.now()
ofile.write('Analysis Date: '\
+now.strftime('%d-%m-%Y %H:%M:%S')\
+'\n')
nofile.write('Analysis Date: '\
+now.strftime('%d-%m-%Y %H:%M:%S')\
+'\n')
N_columns = 2 * self.additional_rois + 1 #All the ROIs, including 0
N_columns += 1 #Account for the time column
chan_center= 1+self.additional_rois
for idx in range(N_columns):
if idx==chan_center:
ofile.write(self.protein_channel)
nofile.write(self.protein_channel)
ofile.write(',')
nofile.write(',')
ofile.write("\nTime (s)")
nofile.write("\nTime (s)")
roi_tracker = np.arange(-self.additional_rois,self.additional_rois+1)
for idx in range(N_columns-1):
ofile.write(",ROI "+str(roi_tracker[idx]))
nofile.write(",ROI "+str(roi_tracker[idx]))
ofile.write('\n')
nofile.write('\n')
for tidx in range(this_numt):
ofile.write(str(this_tsteps[tidx]/1000.))
nofile.write(str(this_tsteps[tidx]/1000.))
for cidx in range(1,N_columns):
ofile.write(","+str(self.roi_intensity_array[tidx,cidx]))
nofile.write(","+str(self.normalized_intensity_array[tidx,cidx]))
ofile.write("\n")
nofile.write("\n")
ofile.close()
nofile.close()
#Make the intensity plot
plt.figure(figsize=(6,4))
plot_array = np.genfromtxt(self.this_nofile,skip_header=4,delimiter=',')
for idx in range(self.additional_rois+1):
plt.plot(plot_array[:,0],plot_array[:,idx+1],linestyle='',
marker='.',markersize=5,label='ROI '+str(roi_tracker[idx]))
plt.xlabel("Time (s)")
plt.ylabel("Normalized Intensity (A.U.)")
plt.legend(loc=0)
plt.tight_layout()
plt.savefig(self.this_noplot,format='pdf')
if self.save_movie:
movie_basename = os.path.basename(input_video)
extension = "."+movie_basename.split(".")[1]
raw_movie_file = os.path.join(self.save_direct,
movie_basename.replace(
extension,'_raw.mp4'))
shifted_movie_file = os.path.join(self.save_direct,
movie_basename.replace(
extension,"_drift_corrected.mp4"))
if os.path.isfile(raw_movie_file):
os.remove(raw_movie_file)
if os.path.isfile(shifted_movie_file):
os.remove(shifted_movie_file)
print(shifted_movie_file)
os.system("ffmpeg -r 30 -f image2 -i "+os.path.join(self.save_direct,
"frame%04d_raw.png")+" -vcodec libx264"\
+" -crf 25 -pix_fmt yuv420p "\
+raw_movie_file+" &> raw_movie_ffmpeg.log")
os.system("ffmpeg -r 30 -f image2 -i "+os.path.join(self.save_direct,
"frame%04d_shifted.png")+" -vcodec libx264"\
+" -crf 25 -pix_fmt yuv420p "\
+shifted_movie_file+" &> drift_corrected_movie_ffmpeg.log")
movie_frames = glob.glob(os.path.join(self.save_direct,"*.png"))
for FRAME in movie_frames:
os.remove(FRAME)
def ProcessND2(self,input_video):
this_video = nd2.reader.ND2Reader(input_video)
this_tsteps = this_video.get_timesteps()
this_chans = np.array(this_video.metadata['channels'],dtype=str)
this_pix = this_video.metadata['pixel_microns']
self.pix_res= float(this_pix)
print("Loading \""+input_video+"\" :")
print("\t\tMovie length = "+str(np.around(this_tsteps[-1]/1000.,decimals=2))+" seconds.")
print("\t\tChannel Names = "+str(this_chans))
print("\t\tPixel Resolution = "+str(this_pix)+" um")
nuc_chan_check = np.where(this_chans==self.nuclear_channel)[0]
if len(nuc_chan_check)==0:
print("ERROR: Nuclear channel( \""+self.nuclear_channel+"\") not found!! Channel List = "+str(this_chans))
print("ERROR: File (\""+input_video+"\") not processed!!!")
return -1
elif len(nuc_chan_check)>1:
print("ERROR: Nuclear channel (\""+self.nuclear_channel+"\") is not unique!! Channel List = "+str(this_chans))
print("ERROR: File (\""+input_video+"\") not processed!!!")
return -1
else:
nuc_chan = nuc_chan_check[0]
prot_chan_check = np.where(this_chans==self.protein_channel)[0]
if len(prot_chan_check) == 0:
print("ERROR: Protein channel (\""+self.protein_channel+"\") not found!! Channel List = "+str(this_chans))
print("ERROR: File (\""+input_video+"\") not processed!!!")
return -1
elif len(prot_chan_check) > 1:
print("ERROR: Protein channel (\""+self.protein_channel+"\") is not unique!! Channel List = "+str(this_chans))
print("ERROR: File (\""+input_video+"\") not processed!!!")
return -1
else:
prot_chan = prot_chan_check[0]
#Build the intensity timeseries array
self.roi_intensity_array = np.zeros((len(this_tsteps),1+(1+2*self.additional_rois)),dtype=int)
self.roi0_intensity_array = np.zeros((len(this_tsteps),2),dtype=int)
self.total_intensity_array = np.zeros(len(this_tsteps),dtype=float)
for self.ts in range(len(this_video)):
this_nuc_frame = this_video.get_frame_2D(c=nuc_chan,t=self.ts)
this_nuc_path,this_nuc_points,this_nuc_fill = self.getNuclearMask(this_nuc_frame,sigma=self.threshold)
if self.ts==0:
self.first_nuc_points= np.copy(this_nuc_points)
self.first_nuc_frame = np.copy(this_nuc_frame)
self.first_nuc_fill = np.copy(this_nuc_fill)
shifted_nuc_fill= np.copy(this_nuc_fill)
else:
if self.track_nucleus:
shift,error,diffphase= register_translation(self.first_nuc_fill,this_nuc_fill)
else:
shift = np.array([0,0],dtype=int)
if (shift[0]!=0) or (shift[1]!=0):
shifted_nuc_fill = np.zeros_like(this_nuc_fill)
shifted_nuc_frame= np.zeros_like(this_nuc_frame)
N1 = len(shifted_nuc_fill)
N2 = len(shifted_nuc_fill[0])
for idx in range(N1):
for idx2 in range(N2):
if (idx - shift[0] >= 0) and (idx2 - shift[1] >= 0) and (idx - shift[0] < N1) and (idx2-shift[1] < N2):
shifted_nuc_fill[idx,idx2] = this_nuc_fill[idx-int(shift[0]),idx2-int(shift[1])]
this_nuc_points[:,0] -= int(shift[0])
this_nuc_points[:,1] -= int(shift[1])
else:
shifted_nuc_fill = np.copy(this_nuc_fill)
this_prot_frame = this_video.get_frame_2D(c=prot_chan,t=self.ts)
if self.bg_correct:
this_chan_frame = self.correctBackground(this_prot_frame,this_video,prot_chan,is_nd2=True)
else:
this_chan_frame = np.copy(this_prot_frame)
#Need to know minimium pixel count to adjust movie brightness
this_vmin = np.min(this_prot_frame)
if self.ts == 0:
shifted_frame = np.copy(this_prot_frame)
else:
if (shift[0]!=0) or (shift[1]!=0):
shifted_frame = np.zeros_like(this_prot_frame)
N1 = len(shifted_frame)
N2 = len(shifted_frame[0])
for idx in range(N1):
for idx2 in range(N2):
if (idx-shift[0] >= 0) and (idx2-shift[1] >= 0) and (idx - shift[0] < N1) and (idx2-shift[1] < N2):
shifted_frame[idx,idx2] = this_chan_frame[idx-int(shift[0]),idx2-int(shift[1])]
else:
shifted_frame = np.copy(this_chan_frame)
if self.save_movie:
self.SaveMovieFrame(this_chan_frame,roi_path_dict=self.roi_path_dict,vmin=this_vmin,suffix='raw')
self.SaveMovieFrame(shifted_frame,roi_path_dict=self.roi_path_dict,vmin=this_vmin,suffix='shifted')
for idx in range(-self.additional_rois,self.additional_rois+1):
this_roi_buffed = self.roi_buff_dict[idx]
this_roi_path = self.roi_path_dict[idx]
this_roi_pix = np.where(np.logical_and(this_roi_buffed>0,shifted_nuc_fill>0))
roi_intensity = np.sum(shifted_frame[this_roi_pix])
this_col_id = idx + self.additional_rois + 1
self.roi_intensity_array[self.ts,this_col_id] = roi_intensity
if idx==0:
self.roi0_intensity_array[self.ts,1] = roi_intensity
#Determine total intensity for bleach correction
all_nuc_prot_pix = np.where(shifted_nuc_fill > 0)
total_intensity = np.sum(shifted_frame[all_nuc_prot_pix])
self.total_intensity_array[self.ts] = total_intensity
if self.bleach_frames > 0 and self.bg_correct:
pre_bleached = np.average(self.total_intensity_array[:self.bleach_frames])
self.bleach_corrections = np.divide(pre_bleached,self.total_intensity_array)
self.roi_intensity_array[:,1:] = self.roi_intensity_array[:,1:]\
* self.bleach_corrections[:,np.newaxis]
self.bleach_correct_tot = np.multiply(self.total_intensity_array,self.bleach_corrections)
self.normalized_intensity_array= np.copy(self.roi_intensity_array).astype(float)
for colID in range(len(self.roi_intensity_array[0,1:])):
this_col = self.normalized_intensity_array[:,1+colID].astype(float)
self.normalized_intensity_array[:,1+colID] = this_col/np.average(this_col[:self.bleach_frames])
else:
self.bleach_correct_tot = np.copy(self.total_intensity_array)
self.normalized_intensity_array = np.copy(self.roi_intensity_array).astype(float)
for colID in range(len(self.roi_intensity_array[:,1:])):
this_col = self.normalized_intensity_array[:,1+colID].astype(float)
self.normalized_intensity_array[:,1+colID] = this_col/this_col[0]
#Pring the intensity timeseries
ofile = open(self.this_ofile,'w')
nofile= open(self.this_nofile,'w')
ofile.write("Input Filename: "+input_video+"\n")
nofile.write("Input Filename: "+input_video+"\n")
now = datetime.datetime.now()
ofile.write("Analysis Date: "\
+now.strftime("%d-%m-%Y %H:%M:%S")\
+"\n")
nofile.write("Analysis Date: "\
+now.strftime("%d-%m-%Y %H:%M:%S")\
+"\n")
N_columns = 2*self.additional_rois + 1 #All the ROIs, including 0
N_columns += 1 #Account for the time idx
chan_center = 1+self.additional_rois
for idx in range(N_columns):
if idx == chan_center:
ofile.write(self.protein_channel)
nofile.write(self.protein_channel)
ofile.write(",")
nofile.write(",")
ofile.write("\nTime (s)")
nofile.write("\nTime (s)")
roi_tracker = np.arange(-self.additional_rois,self.additional_rois+1)
for idx in range(N_columns-1):
ofile.write(",ROI "+str(roi_tracker[idx]))
nofile.write(",ROI "+str(roi_tracker[idx]))
ofile.write("\n")
nofile.write("\n")
for tidx in range(len(this_tsteps)):
ofile.write(str(this_tsteps[tidx]/1000.))
nofile.write(str(this_tsteps[tidx]/1000.))
for cidx in range(1,N_columns):
ofile.write(","+str(self.roi_intensity_array[tidx,cidx]))
nofile.write(","+str(self.normalized_intensity_array[tidx,cidx]))
ofile.write("\n")
nofile.write("\n")
ofile.close()
nofile.close()
#Plot the intensities
plt.figure(figsize=(6,4))
plot_array = np.genfromtxt(self.this_nofile,skip_header=4,
delimiter=',')
roi_idx = range(-self.additional_rois,self.additional_rois+1)
for idx in range(self.additional_rois+1):
plt.plot(plot_array[:,0],plot_array[:,idx+1],linestyle='',
marker='.',markersize=5,label='ROI '+str(roi_idx[idx]))
plt.xlabel("Time (s)")
plt.ylabel("Normalized Intensity (A.U.)")
plt.legend(loc=0)
plt.tight_layout()
plt.savefig(self.this_noplot,format='pdf')
if self.save_movie:
movie_basename = os.path.basename(input_video)
extension = "."+movie_basename.split(".")[-1]
raw_movie_file = os.path.join(self.save_direct,
movie_basename.replace(
extension,'_raw.mp4'))
shifted_movie_file = os.path.join(self.save_direct,
movie_basename.replace(
extension,"_drift_corrected.mp4"))
if os.path.isfile(raw_movie_file):
os.remove(raw_movie_file)
if os.path.isfile(shifted_movie_file):
os.remove(shifted_movie_file)
os.system("ffmpeg -r 30 -f image2 -i "+os.path.join(self.save_direct,
"frame%04d_raw.png")+" -vcodec libx264"\
+" -crf 25 -pix_fmt yuv420p "\
+raw_movie_file+" &> raw_movie_ffmpeg.log")
os.system("ffmpeg -r 30 -f image2 -i "+os.path.join(self.save_direct,
"frame%04d_shifted.png")+" -vcodec libx264"\
+" -crf 25 -pix_fmt yuv420p "\
+shifted_movie_file+" &> drift_corrected_movie_ffmpeg.log")
movie_frames = glob.glob(os.path.join(self.save_direct,"*.png"))
for FRAME in movie_frames:
os.remove(FRAME)
def SaveMovieFrame(self,frame,roi_path_dict={},vmin=0,suffix='raw'):
if suffix=='raw':
self.raw_frame_string = os.path.join(self.save_direct,
"frame%04d"%(self.ts)+"_raw.png")
save_string = self.raw_frame_string
elif suffix=='shifted':
self.shifted_frame_string = os.path.join(self.save_direct,
"frame%04d"%(self.ts)+"_shifted.png")
save_string = self.shifted_frame_string
plt.figure(figsize=(6.,6.))
ax = plt.subplot(111)
ax.imshow(frame,vmin=vmin)
ax.plot(self.first_nuc_points[:,1],self.first_nuc_points[:,0],color='red',linewidth=1.25)
i=0
for key in roi_path_dict:
this_roi_path = roi_path_dict[key]
this_patch = patches.PathPatch(this_roi_path,facecolor='none',linewidth=1.0,edgecolor='white')
i+=1
ax.add_patch(this_patch)
scalebar = ScaleBar(self.pix_res,'um',location=4)
plt.gca().add_artist(scalebar)
plt.tight_layout()
plt.savefig(save_string,format='png',dpi=300)
plt.close('all')
def ProcessFileList(self):
self.Nfiles = len(self.video_list)
for idx in range(self.Nfiles):
input_video = self.video_list[idx]
input_roif = self.roif_list[idx]
input_ext = os.path.splitext(input_video)[1]
self.output_prefix = os.path.join(self.save_direct,
os.path.splitext(
os.path.basename(input_video))[0])
#File that contains the ROI coordinates
this_roif = np.array(bioformats.load_image(input_roif,rescale=False))
self.roi_buff_dict,self.roi_path_dict = self.growROI(this_roif,self.roi_buffer)
#Output file for intensity timeseries
self.this_ofile = os.path.join(
self.save_direct,
os.path.basename(
input_video
).replace(input_ext,'.csv')
)
self.this_nofile = self.this_ofile.replace('.csv','_normalized.csv')
self.this_noplot = self.this_nofile.replace('.csv','.pdf')
#Currently tested importing nd2 files or TIFF files...theoretically can load any bioformats file
if input_ext=='.nd2':
self.ProcessND2(input_video)
else:
self.ProcessOther(input_video)
def getNuclearMask2(self,nuclear_frame,show_plots=False,radius=20):
temp_nuc= nuclear_frame.astype(float)
if str(self.threshold).lower() =='auto':
centerMask = np.zeros(np.shape(nuclear_frame),dtype=int)
xval= np.arange(len(centerMask[0]))
yval= np.arange(len(centerMask))
#center the values around a "zero"
xval= xval - np.median(xval)
yval= yval - np.median(yval)
xval,yval = np.meshgrid(xvay,yval)
#Determine threshold in a circle at the center of the frame
#(assumes that you've centered your microscope on the cell)
centerMask[np.where(xval**2+yval**2 < radius**2)] = 1
#Calculate the mean and std intensity in the region
mean_int= np.average(temp_nuc[centerMask])
std_int = np.std(temp_nuc[centerMask])
#Determine thresholding level
self.thresh_level = mean_int - 0.5*mean_int
#Check that the threshold level isn't too low
if self.thresh_level <= 0:
thresh_fact = 0.5
while self.thresh_level < mean_int:
thresh_fact = thresh_fact - 0.1
self.thresh_level = (mean_int - thresh_fact * std_int)
else:
try:
self.thresh_level = float(self.threshold)
if np.isnan(self.thresh_level):
print("Could not understand setting for threshold ("\
+str(self.threshold_level)+"). Assuming \"auto\".")
self.threshold = 'auto'
self.getNuclearMask2(nuclear_frame,
show_plots=show_plots,
radius=radius)
except:
print("Could not understand setting for threshold ("\
+str(self.threshold_level)+"). Assuming \"auto\".")
self.threshold = 'auto'
self.getNuclearMask2(nuclear_frame,
show_plots=show_plots,
radius=radius)
#Find all points in image above threshhold level
thresh_masked = np.zeros(temp_nuc.shape,dtype=int)
thresh_masked[np.where(temp_nuc>self.thresh_level)] = 1
thresh_masked = self.imclearborderAnalogue(thresh_masked,8)
thresh_masked = self.bwareaopenAnalogue(thresh_masked,500)
thresh_masked = scipy.ndimage.binary_fill_holes(thresh_masked)
labels = measure.label(thresh_masked,background=1)
props = measure.regionprops(labels)
if len(np.unique(labels))>1:
#We want the central object
best_r = 99999.9
xcent = int(len(thresh_masked)/2)
ycent = int(len(thresh_masked[0])/2)
for nuc_obj in props:
this_center = nuc_obj.centroid
this_r = np.sqrt((this_center[0] - xcent)**2\
+(this_center[1] - ycent)**2)
if this_r < best_r:
best_r = this_r
center_nuc = nuc_obj
these_pix = np.where(labels==nuc_obj.label)
elif len(np.unique(labels))==1:
these_pix = np.where(thresh_masked)
else:
print("ERROR: getNuclearMask2() could not find any nuclei! "\
+"Please specify a lower threshold.")
quit()
nuc_fill = np.zeros(np.shape(nuclear_frame),dtype=int)
nuc_fill[these_pix] = 1.0
nuc_fill = scipy.ndimage.binary_fill_holes(nuc_fill)
for idx in range(len(nuclear_frame)):
this_slice = np.where(nuc_fill[idx]>0)
if len(this_slice[0]) > 0:
this_min = this_slice[0][0]
this_max = this_slice[0][-1]
try:
nuc_points = np.vstack((nuc_points,[idx,this_min]))
except:
nuc_points = np.array([idx,this_min])
if this_max != this_min:
try:
nuc_points = np.vstack((nuc_points,[idx,this_max]))
except:
nuc_points = np.array([idx,this_max])
nuc_points = np.vstack((nuc_points,nuc_points[0]))
#Filter out the sharp edges
nuc_points[:,1] = savgol_filter(nuc_points[:,1],51,3)
nuc_path = Path(nuc_points,closed=True)
if self.ts==0:
self.saveNuclearMask(nuc_points)
return nuc_path, nuc_points, nuc_fill
def imclearborderAnalogue(self,image_frame,radius):
#Contour the image
Nx = len(image_frame)
Ny = len(image_frame[0])
img = cv2.resize(image_frame,(Nx,Ny))
contours, hierarchy = cv2.findContours(img, cv2.RETR_FLOODFILL,
cv2.CHAIN_APPROX_SIMPLE)
#Get dimensions
nrows = image_frame.shape[0]
ncols = image_frame.shape[1]
#Track contours touching the border
contourList = []
for idx in range(len(contours)):
this_contour = contours[idx]
for point in this_contour:
contour_row = point[0][1]
contour_col = point[0][0]
#Check if within radius of border, else remove
rowcheck = (contour_row >= 0 and contour_row < radius)\
or (contour_row >= nrows-1-radius and contour_row\
< nrows)
colcheck = (contour_col >= 0 and contour_col < radius)\
or (contour_col >= ncols-1-radius and contour_col\
< ncols)
if rowcheck or colcheck:
contourList.append(idx)
output_frame = image_frame.copy()
for idx in contourList:
cv2.drawContours(output_frame, contours, idx, (0,0,0), -1)
return output_frame
def bwareaopenAnalogue(self,image_frame,areaPix):
output_frame = image_frame.copy()
#First, identify all the contours
contours,hierarchy = cv2.findContours(output_frame.copy(),
cv2.RETR_FLOODFILL,
cv2.CHAIN_APPROX_SIMPLE)
#then determine occupying area of each contour
for idx in range(len(contours)):
area = cv2.contourArea(contours[idx])
if (area >= 0 and area <= areaPix):
cv2.drawContours(output_frame,contours,idx,(0,0,0),-1)
return output_frame
def getNuclearMask(self,nuclear_frame,sigma=-1):
if sigma < 0:
sigma_est = restoration.estimate_sigma(nuclear_frame)
else:
sigma_est = sigma
filtered= ndi.gaussian_filter(nuclear_frame,0.5*sigma_est)
seed = np.copy(filtered,1)
seed[1:-1,1:-1] = filtered.min()
mask = np.copy(filtered)
dilated = morphology.reconstruction(seed,mask,method='dilation')
bgsub = filtered - dilated
self.lit_pxl = np.where(bgsub > np.average(bgsub))
self.lit_crd = np.vstack((self.lit_pxl[0],self.lit_pxl[1])).T
self.clusters= DBSCAN(eps=np.sqrt(2),min_samples=2).fit(self.lit_crd)
nuc_path, nuc_points, nuc_fill = self.findNucEdgeFromClusters(nuclear_frame)
if self.ts==0:
self.saveNuclearMask(nuc_points)
return nuc_path, nuc_points, nuc_fill
def saveNuclearMask(self,nuc_points):
ofile = open(self.output_prefix+"NuclMask.txt",'w')
for idx in range(len(nuc_points)):
ofile.write(str(nuc_points[idx][0])+","\
+str(nuc_points[idx][1])+"\n")
ofile.close()
def findNucEdgeFromClusters(self,nuclear_frame):
#Assume that the cell is in the center of the frame
Nx = len(nuclear_frame)
Ny = len(nuclear_frame[0])
xMid = int(Nx/2)
yMid = int(Ny/2)
#Find the edges of each cluster
for idx in range(np.max(self.clusters.labels_)+1):
blank = np.zeros_like(nuclear_frame)
members = np.where(self.clusters.labels_ == idx)[0]
x = self.lit_crd[members,0]
y = self.lit_crd[members,1]
xbounds = np.array([np.min(x),np.max(x)])
ybounds = np.array([np.min(y),np.max(y)])
for x_idx in range(xbounds[0],xbounds[1]+1):
these_x = np.where(x == x_idx)[0]
if len(these_x) > 0:
min_y = np.min(y[these_x])
max_y = np.max(y[these_x])
try:
lower_bound= np.vstack((lower_bound,[x_idx,min_y]))
except:
lower_bound= np.array([x_idx,min_y])
try:
upper_bound= np.vstack(([x_idx,max_y],upper_bound))
except:
upper_bound= np.array([x_idx,max_y])
else:
print("Warning: No X values in this lane: "+str(x_idx))
nuc_points = np.vstack((lower_bound,upper_bound))
nuc_points = np.vstack((nuc_points,nuc_points[0]))
for idx2 in range(len(x)):
blank[x[idx2],y[idx2]] = 1
nuc_path = Path(nuc_points,closed=True)
if nuc_path.contains_point([xMid,yMid]):
break
else:
del nuc_points
del upper_bound
del lower_bound
return nuc_path,nuc_points,blank
def growROI(self,roi_file,roi_buffer):
#Store Path objects for all the requested ROIs for later callback
roi_path_dict = {}
roi_frame_dict= {}
roi_pix =
|
np.where(roi_file>0)
|
numpy.where
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 17 18:05:51 2019
@author: ben91
"""
from SimulationClasses import *
from TimeSteppingMethods import *
from FiniteVolumeSchemes import *
from FluxSplittingMethods import *
from InitialConditions import *
from Equations import *
from wholeNetworks import *
from LoadDataMethods import *
from keras import *
from keras.models import *
'''
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as anime
from matplotlib import style
from matplotlib import rcParams
import math
style.use('fivethirtyeight')
rcParams.update({'figure.autolayout': True})
'''
# Import modules/packages
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
plt.close('all') # close all open figures
# Define and set custom LaTeX style
styleNHN = {
"pgf.rcfonts":False,
"pgf.texsystem": "pdflatex",
"text.usetex": False, #TODO: might need to change this to false
"font.family": "serif"
}
mpl.rcParams.update(styleNHN)
xx = np.linspace(0,1,100)
yy = xx**2
# Plotting defaults
ALW = 0.75 # AxesLineWidth
FSZ = 12 # Fontsize
LW = 2 # LineWidth
MSZ = 5 # MarkerSize
SMALL_SIZE = 8 # Tiny font size
MEDIUM_SIZE = 10 # Small font size
BIGGER_SIZE = 14 # Large font size
plt.rc('font', size=FSZ) # controls default text sizes
plt.rc('axes', titlesize=FSZ) # fontsize of the axes title
plt.rc('axes', labelsize=FSZ) # fontsize of the x and y labels
plt.rc('xtick', labelsize=FSZ) # fontsize of the x-tick labels
plt.rc('ytick', labelsize=FSZ) # fontsize of the y-tick labels
plt.rc('legend', fontsize=FSZ) # legend fontsize
plt.rc('figure', titlesize=FSZ) # fontsize of the figure title
plt.rcParams['axes.linewidth'] = ALW # sets the default axes lindewidth to ``ALW''
plt.rcParams["mathtext.fontset"] = 'cm' # Computer Modern mathtext font (applies when ``usetex=False'')
def discTrackStep(c,x,t,u,P,title, a, b, err):
'''
Assume shocks are at middle and end of the x domain at start
Inputs:
c: shock speed
x: x coordinates
y: y coordinates
u: velocity
P: periods advected for
err: plot error if True, otherwise plot solution
'''
u = np.transpose(u)
L = x[-1] - x[0] + x[1] - x[0]
xg, tg = np.meshgrid(x,t)
xp = xg - c*tg
plt.figure()
if err:
ons = np.ones_like(xp)
eex = np.greater(xp%L,ons)
er = eex-u
'''
plt.contourf(xp,tg,u)
plt.colorbar()
plt.title(title)
plt.figure()
plt.contourf(xp,tg,eex)
'''
for i in range(-2,int(P)):
plt.contourf(xp+i*L,tg,abs(er),np.linspace(0,0.7,20))
plt.xlim(a,b)
plt.xlabel('x-ct')
plt.ylabel('t')
plt.colorbar()
plt.title(title)
else:
for i in range(-2,int(P)+1):
plt.contourf(xp+i*L,tg,u,np.linspace(-0.2,1.2,57))
plt.xlim(a,b)
plt.xlabel('x-ct')
plt.ylabel('t')
plt.colorbar()
plt.title(title)
def intError(c,x,t,u,title):
L = x[-1] - x[0] + x[1] - x[0]
dx = x[1] - x[0]
nx = np.size(x)
xg, tg = np.meshgrid(t,x)
xp = xg - c*tg
ons = np.ones_like(xp)
#eex = np.roll(np.greater(ons,xp%L),-1,axis = 0)
eex1 = xp/dx
eex1[eex1>=1] = 1
eex1[eex1<=0] = 0
eex2 = (-xp%L-L/2)/dx
eex2[eex2>=1] = 1
eex2[eex2<=0] = 0
eex3 = (-xp%L-L/2)/dx
eex3[eex3>(nx/2-1)] = -(eex3[eex3>(nx/2-1)]-nx/2)
eex3[eex3>=1] = 1
eex3[eex3<=0] = 0
er = eex3-u
ers = np.power(er,2)
ers0 = np.expand_dims(ers[0,:],axis = 0)
ers_aug = np.concatenate((ers,ers0), axis = 0)
err_int = np.trapz(ers_aug, dx = dx, axis = 0)
plt.plot(t,np.sqrt(err_int),'.')
#plt.title(title)
plt.xlabel('Time')
plt.ylabel('L2 Error')
#plt.ylim([0,0.02])
def totalVariation(t,u,title):#plot total variation over time
us = np.roll(u, 1, axis = 0)
tv = np.sum(np.abs(u-us),axis = 0)
#plt.figure()
plt.plot(t,tv,'.')
#plt.title(title)
plt.xlabel('Time')
plt.ylabel('Total Variation')
#plt.ylim((1.999,2.01))
def totalEnergy(t,u, dx, title):#plot total energy
u0 = np.expand_dims(u[0,:],axis = 0)
u_aug = np.concatenate((u,u0), axis = 0)
energy = 0.5*np.trapz(np.power(u_aug,2), dx = dx, axis = 0)
plt.figure()
plt.plot(t,energy)
plt.title(title)
plt.xlabel('Time')
plt.ylabel('1/2*integral(u^2)')
plt.ylim([0,np.max(energy)*1.1])
def mwn(FVM):
'''
plot modified wavenumber of a finite volume scheme
Inputs:
FVM: finite volume method object to test
'''
nx = 100
nt = 10
L = 2
T = 0.00001
x = np.linspace(0,L,nx,endpoint=False)
t = np.linspace(0,T,nt)
dx = x[1]-x[0]
dt = t[1]-t[0]
sigma = T/dx
EQ = adv()
FS = LaxFriedrichs(EQ, 1)
RK = SSPRK3()
NK = int((np.size(x)-1)/2)
mwn = np.zeros(NK,dtype=np.complex_)
wn = np.zeros(NK)
A = 1
for k in range(2,NK):
IC = cosu(A,k/L)
testCos = Simulation(nx, nt, L, T, RK, FS, FVM, IC)
u_cos = testCos.run()
u_f_cos = u_cos[:,0]
u_l_cos = u_cos[:,-1]
IC = sinu(A,k/L)
testSin = Simulation(nx, nt, L, T, RK, FS, FVM, IC)
u_sin = testSin.run()
u_f_sin = u_sin[:,0]
u_l_sin = u_sin[:,-1]
u_h0 =np.fft.fft(u_f_cos+complex(0,1)*u_f_sin)
u_h = np.fft.fft(u_l_cos+complex(0,1)*u_l_sin)
v_h0 = u_h0[k]
v_h = u_h[k]
mwn[k] = -1/(complex(0,1)*sigma)*np.log(v_h/v_h0)
wn[k] = 2*k*np.pi/nx
plt.plot(wn,np.real(mwn))
#plt.hold
plt.plot(wn,wn)
plt.xlabel('\phi')
plt.ylabel('Modified Wavenumber (real part)')
plt.figure()
plt.plot(wn,np.imag(mwn))
plt.xlabel('\phi')
plt.ylabel('Modified Wavenumber (imaginary part)')
plt.figure()
plt.semilogy(wn,abs(wn-np.real(mwn)))
return wn
def animateSim(x,t,u,pas):
'''
Assume shocks are at middle and end of the x domain at start
Inputs:
x: x coordinates
t: t coordinates
u: velocity
pas: how long to pause between frames
'''
for i in range(0,len(t)):
plt.plot(x,u[:,i])
plt.pause(pas)
plt.clf()
plt.plot(x,u[:,-1])
def specAnalysis(model, u, RKM,WENONN, NNNN, h, giveModel, makePlots):
'''
perform spectral analysis of a finite volume method when operating on a specific waveform
Finds eigenvalues, and then uses this to compute max
Inputs:
Model: WENO5 neural network that will be analyzed
u: the data that is the input to the method
RKM: time stepping method object to analyze for space-time coupling
wenoName: name of layer in model that gives WENO5 coefficicents
NNname: name of layer in model that gives NN coefficients
giveModel: whether or not we are passing layer names or model names
'''
if(giveModel):
pass
else:
WENONN = Model(inputs=model.input, outputs = model.get_layer(WENONN).output)
NNNN = Model(inputs=model.input, outputs = model.get_layer(NNNN).output)
adm = optimizers.adam(lr=0.0001)
WENONN.compile(optimizer=adm,loss='mean_squared_error')
NNNN.compile(optimizer=adm,loss='mean_squared_error')
N = np.size(u)
M = 5#just assume stencil size is 5 for now
sortedU = np.zeros((N,M)) + 1j*np.zeros((N,M))
for i in range(0,M):#assume scheme is upwind or unbiased
sortedU[:,i] = np.roll(u,math.floor(M/2)-i)
def scale(sortedU, NNNN):
min_u = np.amin(sortedU,1)
max_u = np.amax(sortedU,1)
const_n = min_u==max_u
#print('u: ', u)
u_tmp = np.zeros_like(sortedU[:,2])
u_tmp[:] = sortedU[:,2]
#for i in range(0,5):
# sortedU[:,i] = (sortedU[:,i]-min_u)/(max_u-min_u)
cff = NNNN.predict(sortedU)#compute \Delta u
cff[const_n,:] = np.array([1/30,-13/60,47/60,9/20,-1/20])
#print('fl: ', fl)
return cff
if(np.sum(np.iscomplex(u))>=1):
wec = WENONN.predict(np.real(sortedU)) + WENONN.predict(np.imag(sortedU))*1j
nnc = scale(np.real(sortedU), NNNN) + scale(np.imag(sortedU), NNNN)*1j
op_WENO5 = np.zeros((N,N)) + np.zeros((N,N))*1j
op_NN = np.zeros((N,N)) + np.zeros((N,N))*1j
else:
wec = WENONN.predict(np.real(sortedU))
nnc = scale(np.real(sortedU), NNNN)
op_WENO5 = np.zeros((N,N))
op_NN = np.zeros((N,N))
for i in range(0,N):
for j in range(0,M):
op_WENO5[i,(i+j-int(M/2))%N] -= wec[i,j]
op_WENO5[i,(i+j-int(M/2)-1)%N] += wec[(i-1)%N,j]
op_NN[i,(i+j-int(M/2))%N] -= nnc[i,j]
op_NN[i,(i+j-int(M/2)-1)%N] += nnc[(i-1)%N,j]
#print(i,': ', op_WENO5[i,:])
WEeigs, WEvecs = np.linalg.eig(op_WENO5)
NNeigs, NNvecs = np.linalg.eig(op_NN)
con_nn = np.linalg.solve(NNvecs, u)
#now do some rungekutta stuff
x = np.linspace(-3,3,301)
y = np.linspace(-3,3,301)
X,Y = np.meshgrid(x,y)
Z = X + Y*1j
g = abs(1 + Z + np.power(Z,2)/2 + np.power(Z,3)/6)
g_we = abs(1 + (h*WEeigs) + np.power(h*WEeigs,2)/2 + np.power(h*WEeigs,3)/6)
g_nn = abs(1 + (h*NNeigs) + np.power(h*NNeigs,2)/2 + np.power(h*NNeigs,3)/6)
#do some processing for that plot of the contributions vs the amplification factor
c_abs = np.abs(con_nn)
ords = np.argsort(c_abs)
g_sort = g_nn[ords]
c_sort = con_nn[ords]
c_norm = c_sort/np.linalg.norm(c_sort,1)
c_abs2 = np.abs(c_norm)
#do some processing for the most unstable mode
ordsG = np.argsort(g_nn)
unstb = NNvecs[:,ordsG[-1]]
if(makePlots>=1):
plt.figure()
plt.plot(np.sort(g_we),'.')
plt.plot(np.sort(g_nn),'.')
plt.legend(('WENO5','NN'))
plt.title('CFL = '+ str(h))
plt.xlabel('index')
plt.ylabel('|1+HL+(HL^2)/2+(HL^3)/6|')
plt.ylim([0,1.2])
plt.figure()
plt.plot(np.real(WEeigs),np.imag(WEeigs),'.')
plt.plot(np.real(NNeigs),np.imag(NNeigs),'.')
plt.title('Eigenvalues')
plt.legend(('WENO5','NN'))
plt.figure()
plt.plot(g_nn,abs(con_nn),'.')
plt.xlabel('Amplification Factor')
plt.ylabel('Contribution')
print('Max WENO g: ',np.max(g_we))
print('Max NN g: ',np.max(g_nn))
if(makePlots>=2):
plt.figure()
sml = 1E-2
plt.contourf(X, Y, g, [1-sml,1+sml])
plt.figure()
plt.plot(g_sort,c_abs2,'.')
plt.xlabel('Scaled Amplification Factor')
plt.ylabel('Contribution')
return g_nn, con_nn, unstb
#return np.max(g_we), np.max(g_nn)
#plt.contourf(xp+i*L,tg,abs(er),np.linspace(0,0.025,20))
def specAnalysisData(model, u, RKM,WENONN, NNNN, CFL, giveModel):
nx, nt = np.shape(u)
if(giveModel):
pass
else:
WENONN = Model(inputs=model.input, outputs = model.get_layer(WENONN).output)
NNNN = Model(inputs=model.input, outputs = model.get_layer(NNNN).output)
adm = optimizers.adam(lr=0.0001)
WENONN.compile(optimizer=adm,loss='mean_squared_error')
NNNN.compile(optimizer=adm,loss='mean_squared_error')
maxWe = np.zeros(nt)
maxNN = np.zeros(nt)
for i in range(0,nt):
print(i)
maxWe[i], maxNN[i] = specAnalysis(model, u[:,i], RKM, WENONN, NNNN, CFL, True, False)
plt.figure()
plt.plot(maxWe)
plt.figure()
plt.plot(maxNN)
return maxWe, maxNN
def eigenvectorProj(model, u, WENONN, NNNN):
nx = np.shape(u)
WENONN = Model(inputs=model.input, outputs = model.get_layer(WENONN).output)
NNNN = Model(inputs=model.input, outputs = model.get_layer(NNNN).output)
adm = optimizers.adam(lr=0.0001)
WENONN.compile(optimizer=adm,loss='mean_squared_error')
NNNN.compile(optimizer=adm,loss='mean_squared_error')
def evalPerf(x,t,P,u,eex):
'''
Assume shocks are at middle and end of the x domain at start
Inputs:
x: x coordinates
y: y coordinates
P: periods advected for
u: velocity
Outputs:
tvm: max total variation in solution
swm: max shock width in solution
'''
us = np.roll(u, 1, axis = 0)
tv = np.sum(np.abs(u-us),axis = 0)
tvm = np.max(tv)
u = np.transpose(u)
er = np.abs(eex-u)
wdth = np.sum(np.greater(er,0.005),axis=1)
swm = np.max(wdth)
print(tvm)
print(swm)
return tvm, swm
'''
def plotDiscWidth(x,t,P,u,u_WE):
'''
#plot width of discontinuity over time for neural network and WENO5
'''
us = np.roll(u, 1, axis = 0)
u = np.transpose(u)
L = x[-1] - x[0] + x[1] - x[0]
xg, tg = np.meshgrid(x,t)
xp = xg - tg
ons = np.ones_like(xp)
eex = np.greater(xp%L,ons)
er = np.abs(eex-u)
wdth = np.sum(np.greater(er,0.005),axis=1)
swm = np.max(wdth)
print(tvm)
print(swm)
return tvm, swm
'''
def plotDiscWidth(x,t,P,u,u_WE):
'''
plot width of discontinuity over time for neural network and WENO5
'''
u = np.transpose(u)
u_WE = np.transpose(u_WE)
L = x[-1] - x[0] + x[1] - x[0]
xg, tg = np.meshgrid(x,t)
xp = xg - tg
ons = np.ones_like(xp)
dx = x[1]-x[0]
'''
eex = (-xp%L-L/2)/dx
eex[eex>49] = -(eex[eex>49]-50)
eex[eex>=1] = 1
eex[eex<=0] = 0
'''
eex = np.greater(xp%L,ons)
er = np.abs(eex-u)
er_we = np.abs(eex-u_WE)
wdth = np.sum(np.greater(er,0.01),axis=1)*dx/2
wdth_we = np.sum(np.greater(er_we,0.01),axis=1)*dx/2
plt.figure()
plt.plot(t,wdth)
plt.plot(t,wdth_we)
plt.legend(('Neural Network','WENO5'))
plt.xlabel('t')
plt.ylabel('Discontinuity Width')
def convStudy():
'''
Test order of accuracy of an FVM
'''
nr = 21
errNN = np.zeros(nr)
errWE = np.zeros(nr)
errEN = np.zeros(nr)
dxs = np.zeros(nr)
for i in range(0,nr):
print(i)
nx = 10*np.power(10,0.1*i)
L = 2
x = np.linspace(0,L,int(nx),endpoint=False)
dx = x[1]-x[0]
FVM1 = NNWENO5dx(dx)
FVM2 = WENO5()
FVM3 = ENO3()
u = np.sin(4*np.pi*x) + np.cos(4*np.pi*x)
du = 4*np.pi*(np.cos(4*np.pi*x)-np.sin(4*np.pi*x))
resNN = FVM1.evalF(u)
resWE = FVM2.evalF(u)
resEN = FVM3.evalF(u)
du_EN = (resNN-np.roll(resEN,1))/dx
du_NN = (resNN-np.roll(resNN,1))/dx
du_WE = (resWE-np.roll(resWE,1))/dx
errNN[i] = np.linalg.norm(du_NN-du,ord = 2)/np.sqrt(nx)
errEN[i] = np.linalg.norm(du_EN-du,ord = 2)/np.sqrt(nx)
errWE[i] = np.linalg.norm(du_WE-du,ord = 2)/np.sqrt(nx)
dxs[i] = dx
nti = 6
toRegDx = np.ones((nti,2))
toRegDx[:,1] = np.log10(dxs[-nti:])
toRegWe = np.log10(errWE[-nti:])
toRegNN = np.log10(errNN[-nti:])
toRegEN = np.log10(errEN[-nti:])
c_we, m_we = np.linalg.lstsq(toRegDx, toRegWe, rcond=None)[0]
c_nn, m_nn = np.linalg.lstsq(toRegDx, toRegNN, rcond=None)[0]
c_en, m_en = np.linalg.lstsq(toRegDx, toRegEN, rcond=None)[0]
print('WENO5 slope: ',m_we)
print('NN slope: ',m_nn)
print('ENO3 slope: ',m_en)
plt.loglog(dxs,errNN,'o')
plt.loglog(dxs,errWE,'o')
plt.loglog(dxs,errEN,'o')
plt.loglog(dxs,(10**c_we)*(dxs**m_we))
plt.loglog(dxs,(10**c_nn)*(dxs**m_nn))
plt.loglog(dxs,(10**c_en)*(dxs**m_en))
plt.legend(['WENO-NN','WENO5-JS','ENO3'])
plt.xlabel('$\Delta x$')
plt.ylabel('$E$')
def plot_visc(x,t,uv,FVM,P,NN,contours):
nx, nt = np.shape(uv)
L = x[-1] - x[0] + x[1] - x[0]
xg, tg = np.meshgrid(x,t)
xp = xg - tg
def scheme(u,NN):
ust = np.zeros_like(u)
ust = ust + u
min_u = np.amin(u,1)
max_u = np.amax(u,1)
const_n = min_u==max_u
#print('u: ', u)
u_tmp = np.zeros_like(u[:,2])
u_tmp[:] = u[:,2]
for i in range(0,5):
u[:,i] = (u[:,i]-min_u)/(max_u-min_u)
ep = 1E-6
#compute fluxes on sub stencils (similar to derivatives I guess)
f1 = 1/3*u[:,0]-7/6*u[:,1]+11/6*u[:,2]
f2 = -1/6*u[:,1]+5/6*u[:,2]+1/3*u[:,3]
f3 = 1/3*u[:,2]+5/6*u[:,3]-1/6*u[:,4]
#compute derivatives on sub stencils
justU = 1/30*ust[:,0]-13/60*ust[:,1]+47/60*ust[:,2]+9/20*ust[:,3]-1/20*ust[:,4]
dudx = 0*ust[:,0]+1/12*ust[:,1]-5/4*ust[:,2]+5/4*ust[:,3]-1/12*ust[:,4]
dudx = (dudx - np.roll(dudx,1))
d2udx2 = -1/4*ust[:,0]+3/2*ust[:,1]-2*ust[:,2]+1/2*ust[:,3]+1/4*ust[:,4]
d2udx2 = (d2udx2 - np.roll(d2udx2,1))
d3udx3 = 0*ust[:,0]-1*ust[:,1]+3*ust[:,2]-3*ust[:,3]+1*ust[:,4]
d3udx3 = (d3udx3 - np.roll(d3udx3,1))
#compute smoothness indicators
B1 = 13/12*np.power(u[:,0]-2*u[:,1]+u[:,2],2) + 1/4*np.power(u[:,0]-4*u[:,1]+3*u[:,2],2)
B2 = 13/12*np.power(u[:,1]-2*u[:,2]+u[:,3],2) + 1/4*np.power(u[:,1]-u[:,3],2)
B3 = 13/12*np.power(u[:,2]-2*u[:,3]+u[:,4],2) + 1/4*np.power(3*u[:,2]-4*u[:,3]+u[:,4],2)
#assign linear weights
g1 = 1/10
g2 = 3/5
g3 = 3/10
#compute the unscaled nonlinear weights
wt1 = g1/np.power(ep+B1,2)
wt2 = g2/np.power(ep+B2,2)
wt3 = g3/np.power(ep+B3,2)
wts = wt1 + wt2 + wt3
#scale the nonlinear weights
w1 = wt1/wts
w2 = wt2/wts
w3 = wt3/wts
#compute the coefficients
c1 = np.transpose(np.array([1/3*w1,-7/6*w1-1/6*w2,11/6*w1+5/6*w2+1/3*w3,1/3*w2+5/6*w3,-1/6*w3]))
#fl = np.multiply(fl,(max_u-min_u))+min_u
if(NN):
A1 = np.array([[-0.94130915, -0.32270527, -0.06769955],
[-0.37087336, -0.05059665, 0.55401474],
[ 0.40815187, -0.5602299 , -0.01871526],
[ 0.56200236, -0.5348897 , -0.04091108],
[-0.6982639 , -0.49512517, 0.52821904]])
b1 = np.array([-0.04064859, 0. , 0. ])
c2 = np.maximum(np.matmul(c1,A1)+b1,0)
A2 = np.array([[ 0.07149544, 0.9637294 , 0.41981453],
[ 0.75602794, -0.0222342 , -0.95690656],
[ 0.07406807, -0.41880417, -0.4687035 ]])
b2 = np.array([-0.0836111 , -0.00330033, -0.01930024])
c3 = np.maximum(np.matmul(c2,A2)+b2,0)
A3 = np.array([[ 0.8568574 , -0.5809458 , 0.04762125],
[-0.26066098, -0.23142155, -0.6449008 ],
[ 0.7623346 , 0.81388015, -0.03217626]])
b3 = np.array([-0.0133561 , -0.05374921, 0. ])
c4 = np.maximum(np.matmul(c3,A3)+b3,0)
A4 = np.array([[-0.2891752 , -0.53783405, -0.17556567, -0.7775279 , 0.69957024],
[-0.12895434, 0.13607207, 0.12294354, 0.29842544, -0.00198237],
[ 0.5356503 , 0.09317833, 0.5135357 , -0.32794708, 0.13765627]])
b4 = np.array([ 0.00881096, 0.01138764, 0.00464343, 0.0070305 , -0.01644066])
dc = np.matmul(c4,A4)+b4
ct = c1 - dc
Ac = np.array([[-0.2, -0.2, -0.2, -0.2, -0.2],
[-0.2, -0.2, -0.2, -0.2, -0.2],
[-0.2, -0.2, -0.2, -0.2, -0.2],
[-0.2, -0.2, -0.2, -0.2, -0.2],
[-0.2, -0.2, -0.2, -0.2, -0.2]])
bc = np.array([0.2, 0.2, 0.2, 0.2, 0.2])
dc2 = np.matmul(ct,Ac)+bc
C = ct + dc2
Cons = C[:,0] + C[:,1] + C[:,2] + C[:,3] + C[:,4]
C_visc = -5/2*C[:,0] - 3/2*C[:,1] - 1/2*C[:,2] + 1/2*C[:,3] + 3/2*C[:,4]
C_visc2 = 19/6*C[:,0] + 7/6*C[:,1] + 1/6*C[:,2] + 1/6*C[:,3] + 7/6*C[:,4]
C_visc3 = -65/24*C[:,0] - 5/8*C[:,1] - 1/24*C[:,2] + 1/24*C[:,3] + 5/8*C[:,4]
C_visc = C_visc.flatten()
C_visc[const_n] = 0#if const across stencil, there was no viscosity
C_visc2[const_n] = 0#if const across stencil, there was no viscosity
C_visc3[const_n] = 0#if const across stencil, there was no viscosity
else:
Cons = c1[:,0] + c1[:,1] + c1[:,2] + c1[:,3] + c1[:,4]
C_visc = (-5/2*c1[:,0] - 3/2*c1[:,1] - 1/2*c1[:,2] + 1/2*c1[:,3] + 3/2*c1[:,4])
C_visc2 = (19/6*c1[:,0] + 7/6*c1[:,1] + 1/6*c1[:,2] + 1/6*c1[:,3] + 7/6*c1[:,4])
C_visc3 = (-65/24*c1[:,0] - 5/8*c1[:,1] - 1/24*c1[:,2] + 1/24*c1[:,3] + 5/8*c1[:,4])
C_visc[const_n] = 0#if const across stencil, there was no viscosity
C_visc2[const_n] = 0#if const across stencil, there was no viscosity
C_visc3[const_n] = 0#if const across stencil, there was no viscosity
return Cons,-C_visc,-C_visc2,-C_visc3, dudx, d2udx2, d3udx3
C_ = np.zeros_like(uv)
C_i = np.zeros_like(uv)
C_ii = np.zeros_like(uv)
C_iii = np.zeros_like(uv)
d_i = np.zeros_like(uv)
d_ii = np.zeros_like(uv)
d_iii = np.zeros_like(uv)
for i in range(0,nt):
u_part = FVM.partU(uv[:,i])
C_[:,i],C_i[:,i],C_ii[:,i],C_iii[:,i],d_i[:,i],d_ii[:,i],d_iii[:,i] = scheme(u_part,NN)
dx = x[1]-x[0]
C_ = np.transpose(C_)
C_i = np.transpose(C_i)*dx
C_ii = np.transpose(C_ii)*dx**2
C_iii = np.transpose(C_iii)*dx**3
d_i = np.transpose(d_i)/(dx**2)
d_ii = np.transpose(d_ii)/(dx**3)
d_iii = np.transpose(d_iii)/(dx**4)
indFirst = 100#ignore 1st few timesteps for scaling plots due to disconintuity
if(contours):
f, (ax1, ax2, ax3) = plt.subplots(1, 3, sharey=True, figsize=(12, 4))
for i in range(-2,int(P)+1):
xtp = xp+i*L
maxes = np.amax(xtp,axis = 1)
mines = np.amin(xtp,axis = 1)
gdi = mines<=2
gda = maxes>=0
indsTP = gdi & gda
if(np.sum(indsTP)>0):
first = ax1.contourf(xtp[indsTP,:],tg[indsTP,:],C_i[indsTP,:]*np.abs(d_i[indsTP,:]),np.linspace(-0.3,0.3,100))
#first = ax1.contourf(xtp[indsTP,:],tg[indsTP,:],C_i[indsTP,:]*np.abs(d_i[indsTP,:]),np.linspace(np.min((C_i*np.abs(d_i))[indFirst:,:]),np.max((C_i*np.abs(d_i))[indFirst:,:]),100))
#first = ax1.contourf(xp+i*L,tg,C_i*np.abs(d_i),np.linspace(np.min((C_i*np.abs(d_i))[indFirst:,:]),np.max((C_i*np.abs(d_i))[indFirst:,:]),100))
np.savetxt('firstXTP'+str(i)+'.csv',xtp[indsTP,:])
np.savetxt('firstTP'+str(i)+'.csv',tg[indsTP,:])
np.savetxt('firstVisc'+str(i)+'.csv',C_i[indsTP,:]*np.abs(d_i[indsTP,:]))
ax1.set_title('(A)')
ax1.set_xlim(x[0],x[-1])
ax1.set_xlabel('$x-ct$')
ax1.set_ylabel('$t$')
for i in range(-2,int(P)+1):
xtp = xp+i*L
maxes = np.amax(xtp,axis = 1)
mines = np.amin(xtp,axis = 1)
gdi = mines<=2
gda = maxes>=0
indsTP = gdi & gda
if(np.sum(indsTP)>0):
second = ax2.contourf(xtp[indsTP,:],tg[indsTP,:],C_ii[indsTP,:]*np.abs(d_ii[indsTP,:]),np.linspace(-0.3,0.3,100))
#second = ax2.contourf(xp+i*L,tg,C_ii*np.abs(d_ii),np.linspace(np.min((C_ii*np.abs(d_ii))[indFirst:,:]),np.max((C_ii*np.abs(d_ii))[indFirst:,:]),100))
np.savetxt('secondXTP'+str(i)+'.csv',xtp[indsTP,:])
np.savetxt('secondTP'+str(i)+'.csv',tg[indsTP,:])
np.savetxt('secondVisc'+str(i)+'.csv',C_ii[indsTP,:]*np.abs(d_ii[indsTP,:]))
ax2.set_title('(B)')
ax2.set_xlim(x[0],x[-1])
ax2.set_xlabel('$x-ct$')
for i in range(-2,int(P)+1):
xtp = xp+i*L
maxes = np.amax(xtp,axis = 1)
mines = np.amin(xtp,axis = 1)
gdi = mines<=2
gda = maxes>=0
indsTP = gdi & gda
if(np.sum(indsTP)>0):
third = ax3.contourf(xtp[indsTP,:],tg[indsTP,:],C_iii[indsTP,:]*np.abs(d_iii[indsTP,:]),np.linspace(-0.3,0.3,100))
np.savetxt('thirdXTP'+str(i)+'.csv',xtp[indsTP,:])
np.savetxt('thirdTP'+str(i)+'.csv',tg[indsTP,:])
np.savetxt('thirdVisc'+str(i)+'.csv',C_iii[indsTP,:]*np.abs(d_iii[indsTP,:]))
#third = ax3.contourf(xp+i*L,tg,C_iii*np.abs(d_iii),np.linspace(np.min((C_iii*np.abs(d_iii))[indFirst:,:]),np.max((C_iii*np.abs(d_iii))[indFirst:,:]),100))
ax3.set_title('(C)')
ax3.set_xlim(x[0],x[-1])
ax3.set_xlabel('$x-ct$')
f.subplots_adjust(right=0.8)
#cbar_ax1 = f.add_axes([.72, 0.15, 0.05, 0.7])
#cbar_ax2 = f.add_axes([.82, 0.15, 0.05, 0.7])
#cbar_ax3 = f.add_axes([.92, 0.15, 0.05, 0.7])
#f.colorbar(first, cax=cbar_ax1)
#f.colorbar(second, cax=cbar_ax2)
#f.colorbar(third, cax=cbar_ax3)
#f.colorbar(first, ax=ax1)
#f.colorbar(second, ax=ax2)
#f.colorbar(third, ax=ax3)
f.tight_layout()
f.subplots_adjust(right=0.8)
cbar_ax = f.add_axes([.82, 0.15, 0.05, 0.7])
f.colorbar(third, cax=cbar_ax)
#f.tight_layout()
else:
f, (ax1, ax2, ax3) = plt.subplots(1, 3, sharey=True, figsize=(15, 4))
ax1.plot(x,C_i[150,:]*np.abs(d_i[150,:]))
ax1.plot(x,C_i[1500,:]*np.abs(d_i[1500,:]))
ax1.plot(x,C_i[3750,:]*np.abs(d_i[3750,:]))
ax1.plot(x,C_i[7500,:]*np.abs(d_i[7500,:]))
ax1.set_title('(A)')
ax1.set_xlabel('$x$')
ax1.set_ylabel('$t$')
ax2.plot(x,C_ii[150,:]*np.abs(d_ii[150,:]))
ax2.plot(x,C_ii[1500,:]*np.abs(d_ii[1500,:]))
ax2.plot(x,C_ii[3750,:]*np.abs(d_ii[3750,:]))
ax2.plot(x,C_ii[7500,:]*np.abs(d_ii[7500,:]))
ax2.set_title('(B)')
ax2.set_xlabel('$x$')
ax3.plot(x,C_iii[150,:]*np.abs(d_iii[150,:]))
ax3.plot(x,C_iii[1500,:]*np.abs(d_iii[1500,:]))
ax3.plot(x,C_iii[3750,:]*np.abs(d_iii[3750,:]))
ax3.plot(x,C_iii[7500,:]*np.abs(d_iii[7500,:]))
ax3.set_title('(C)')
ax3.set_xlabel('$x$')
ax3.legend(('$t=2$','$t=20$','$t=50$','$t=100$'))
def plot_visc_new(x,t,uv,FVM,P,NN,contours):
nx, nt = np.shape(uv)
L = x[-1] - x[0] + x[1] - x[0]
xg, tg = np.meshgrid(x,t)
xp = xg - tg
def scheme(u,NN):
ust = np.zeros_like(u)
ust = ust + u
min_u = np.amin(u,1)
max_u = np.amax(u,1)
const_n = min_u==max_u
#print('u: ', u)
u_tmp = np.zeros_like(u[:,2])
u_tmp[:] = u[:,2]
for i in range(0,5):
u[:,i] = (u[:,i]-min_u)/(max_u-min_u)
ep = 1E-6
#compute fluxes on sub stencils (similar to derivatives I guess)
f1 = 1/3*u[:,0]-7/6*u[:,1]+11/6*u[:,2]
f2 = -1/6*u[:,1]+5/6*u[:,2]+1/3*u[:,3]
f3 = 1/3*u[:,2]+5/6*u[:,3]-1/6*u[:,4]
#compute derivatives on sub stencils
justU = 1/30*ust[:,0]-13/60*ust[:,1]+47/60*ust[:,2]+9/20*ust[:,3]-1/20*ust[:,4]
dudx = 0*ust[:,0]+1/12*ust[:,1]-5/4*ust[:,2]+5/4*ust[:,3]-1/12*ust[:,4]
deriv2 = (dudx - np.roll(dudx,1))
d2udx2 = -1/4*ust[:,0]+3/2*ust[:,1]-2*ust[:,2]+1/2*ust[:,3]+1/4*ust[:,4]
deriv3 = (d2udx2 - np.roll(d2udx2,1))
d3udx3 = 0*ust[:,0]-1*ust[:,1]+3*ust[:,2]-3*ust[:,3]+1*ust[:,4]
deriv4 = (d3udx3 - np.roll(d3udx3,1))
#compute smoothness indicators
B1 = 13/12*np.power(u[:,0]-2*u[:,1]+u[:,2],2) + 1/4*np.power(u[:,0]-4*u[:,1]+3*u[:,2],2)
B2 = 13/12*np.power(u[:,1]-2*u[:,2]+u[:,3],2) + 1/4*np.power(u[:,1]-u[:,3],2)
B3 = 13/12*np.power(u[:,2]-2*u[:,3]+u[:,4],2) + 1/4*np.power(3*u[:,2]-4*u[:,3]+u[:,4],2)
#assign linear weights
g1 = 1/10
g2 = 3/5
g3 = 3/10
#compute the unscaled nonlinear weights
wt1 = g1/np.power(ep+B1,2)
wt2 = g2/np.power(ep+B2,2)
wt3 = g3/np.power(ep+B3,2)
wts = wt1 + wt2 + wt3
#scale the nonlinear weights
w1 = wt1/wts
w2 = wt2/wts
w3 = wt3/wts
#compute the coefficients
c1 = np.transpose(np.array([1/3*w1,-7/6*w1-1/6*w2,11/6*w1+5/6*w2+1/3*w3,1/3*w2+5/6*w3,-1/6*w3]))
#fl = np.multiply(fl,(max_u-min_u))+min_u
if(NN):
A1 = np.array([[-0.94130915, -0.32270527, -0.06769955],
[-0.37087336, -0.05059665, 0.55401474],
[ 0.40815187, -0.5602299 , -0.01871526],
[ 0.56200236, -0.5348897 , -0.04091108],
[-0.6982639 , -0.49512517, 0.52821904]])
b1 = np.array([-0.04064859, 0. , 0. ])
c2 = np.maximum(np.matmul(c1,A1)+b1,0)
A2 = np.array([[ 0.07149544, 0.9637294 , 0.41981453],
[ 0.75602794, -0.0222342 , -0.95690656],
[ 0.07406807, -0.41880417, -0.4687035 ]])
b2 = np.array([-0.0836111 , -0.00330033, -0.01930024])
c3 = np.maximum(np.matmul(c2,A2)+b2,0)
A3 = np.array([[ 0.8568574 , -0.5809458 , 0.04762125],
[-0.26066098, -0.23142155, -0.6449008 ],
[ 0.7623346 , 0.81388015, -0.03217626]])
b3 = np.array([-0.0133561 , -0.05374921, 0. ])
c4 = np.maximum(np.matmul(c3,A3)+b3,0)
A4 = np.array([[-0.2891752 , -0.53783405, -0.17556567, -0.7775279 , 0.69957024],
[-0.12895434, 0.13607207, 0.12294354, 0.29842544, -0.00198237],
[ 0.5356503 , 0.09317833, 0.5135357 , -0.32794708, 0.13765627]])
b4 = np.array([ 0.00881096, 0.01138764, 0.00464343, 0.0070305 , -0.01644066])
dc = np.matmul(c4,A4)+b4
ct = c1 - dc
Ac = np.array([[-0.2, -0.2, -0.2, -0.2, -0.2],
[-0.2, -0.2, -0.2, -0.2, -0.2],
[-0.2, -0.2, -0.2, -0.2, -0.2],
[-0.2, -0.2, -0.2, -0.2, -0.2],
[-0.2, -0.2, -0.2, -0.2, -0.2]])
bc = np.array([0.2, 0.2, 0.2, 0.2, 0.2])
dc2 = np.matmul(ct,Ac)+bc
C = ct + dc2
Cons = C[:,0] + C[:,1] + C[:,2] + C[:,3] + C[:,4]
C_visc = -5/2*C[:,0] - 3/2*C[:,1] - 1/2*C[:,2] + 1/2*C[:,3] + 3/2*C[:,4]
C_visc2 = 19/6*C[:,0] + 7/6*C[:,1] + 1/6*C[:,2] + 1/6*C[:,3] + 7/6*C[:,4]
C_visc3 = -65/24*C[:,0] - 5/8*C[:,1] - 1/24*C[:,2] + 1/24*C[:,3] + 5/8*C[:,4]
C_visc = C_visc.flatten()
C_visc[const_n] = 0#if const across stencil, there was no viscosity
C_visc2[const_n] = 0#if const across stencil, there was no viscosity
C_visc3[const_n] = 0#if const across stencil, there was no viscosity
else:
Cons = c1[:,0] + c1[:,1] + c1[:,2] + c1[:,3] + c1[:,4]
C_visc = (-5/2*c1[:,0] - 3/2*c1[:,1] - 1/2*c1[:,2] + 1/2*c1[:,3] + 3/2*c1[:,4])
C_visc2 = (19/6*c1[:,0] + 7/6*c1[:,1] + 1/6*c1[:,2] + 1/6*c1[:,3] + 7/6*c1[:,4])
C_visc3 = (-65/24*c1[:,0] - 5/8*c1[:,1] - 1/24*c1[:,2] + 1/24*c1[:,3] + 5/8*c1[:,4])
C_visc[const_n] = 0#if const across stencil, there was no viscosity
C_visc2[const_n] = 0#if const across stencil, there was no viscosity
C_visc3[const_n] = 0#if const across stencil, there was no viscosity
C_visc = (C_visc+np.roll(C_visc,1))/2
C_visc2 = (C_visc2+np.roll(C_visc2,1))/2
C_visc3 = (C_visc3+np.roll(C_visc3,1))/2
return Cons,-C_visc,-C_visc2,C_visc3, deriv2, deriv3, deriv4
C_ = np.zeros_like(uv)
C_i = np.zeros_like(uv)
C_ii = np.zeros_like(uv)
C_iii = np.zeros_like(uv)
d_i = np.zeros_like(uv)
d_ii = np.zeros_like(uv)
d_iii = np.zeros_like(uv)
for i in range(0,nt):
u_part = FVM.partU(uv[:,i])
C_[:,i],C_i[:,i],C_ii[:,i],C_iii[:,i],d_i[:,i],d_ii[:,i],d_iii[:,i] = scheme(u_part,NN)
dx = x[1]-x[0]
C_ = np.transpose(C_)
C_i = np.transpose(C_i)*(dx**2)
C_ii = np.transpose(C_ii)*(dx**3)
C_iii = np.transpose(C_iii)*(dx**4)
d_i = np.transpose(d_i)/(dx**2)
d_ii = np.transpose(d_ii)/(dx**3)
d_iii = np.transpose(d_iii)/(dx**4)
indFirst = 100#ignore 1st few timesteps for scaling plots due to disconintuity
if(contours):
f, (ax1, ax2, ax3) = plt.subplots(1, 3, sharey=True, figsize=(12, 4))
num_cont = 10
cobarlim = 0.003
for i in range(-2,int(P)+1):
xtp = xp+i*L
maxes = np.amax(xtp,axis = 1)
mines = np.amin(xtp,axis = 1)
gdi = mines<=2
gda = maxes>=0
indsTP = gdi & gda
if(np.sum(indsTP)>0):
first = ax1.contourf(xtp[indsTP,:],tg[indsTP,:],C_i[indsTP,:]*np.abs(d_i[indsTP,:]),np.linspace(-cobarlim,cobarlim,num_cont))
np.savetxt('firstXTP'+str(i)+'.csv',xtp[indsTP,:])
np.savetxt('firstTP'+str(i)+'.csv',tg[indsTP,:])
np.savetxt('firstVisc'+str(i)+'.csv',C_i[indsTP,:]*np.abs(d_i[indsTP,:]))
#first = ax1.contourf(xtp[indsTP,:],tg[indsTP,:],C_i[indsTP,:]*np.abs(d_i[indsTP,:]),np.linspace(np.min((C_i*np.abs(d_i))[indFirst:,:]),np.max((C_i*np.abs(d_i))[indFirst:,:]),100))
#first = ax1.contourf(xp+i*L,tg,C_i*np.abs(d_i),np.linspace(np.min((C_i*np.abs(d_i))[indFirst:,:]),np.max((C_i*np.abs(d_i))[indFirst:,:]),100))
ax1.set_title('(A)')
ax1.set_xlim(x[0],x[-1])
ax1.set_xlabel('$x-ct$')
ax1.set_ylabel('$t$')
for i in range(-2,int(P)+1):
xtp = xp+i*L
maxes = np.amax(xtp,axis = 1)
mines = np.amin(xtp,axis = 1)
gdi = mines<=2
gda = maxes>=0
indsTP = gdi & gda
if(np.sum(indsTP)>0):
second = ax2.contourf(xtp[indsTP,:],tg[indsTP,:],C_ii[indsTP,:]*np.abs(d_ii[indsTP,:]),np.linspace(-cobarlim,cobarlim,num_cont))
np.savetxt('secondXTP'+str(i)+'.csv',xtp[indsTP,:])
np.savetxt('secondTP'+str(i)+'.csv',tg[indsTP,:])
np.savetxt('secondVisc'+str(i)+'.csv',C_ii[indsTP,:]*np.abs(d_ii[indsTP,:]))
#second = ax2.contourf(xp+i*L,tg,C_ii*np.abs(d_ii),np.linspace(np.min((C_ii*np.abs(d_ii))[indFirst:,:]),np.max((C_ii*np.abs(d_ii))[indFirst:,:]),100))
ax2.set_title('(B)')
ax2.set_xlim(x[0],x[-1])
ax2.set_xlabel('$x-ct$')
for i in range(-2,int(P)+1):
xtp = xp+i*L
maxes = np.amax(xtp,axis = 1)
mines = np.amin(xtp,axis = 1)
gdi = mines<=2
gda = maxes>=0
indsTP = gdi & gda
if(np.sum(indsTP)>0):
third = ax3.contourf(xtp[indsTP,:],tg[indsTP,:],C_iii[indsTP,:]*np.abs(d_iii[indsTP,:]),np.linspace(-cobarlim,cobarlim,num_cont))
np.savetxt('thirdXTP'+str(i)+'.csv',xtp[indsTP,:])
np.savetxt('thirdTP'+str(i)+'.csv',tg[indsTP,:])
np.savetxt('thirdVisc'+str(i)+'.csv',C_iii[indsTP,:]*np.abs(d_iii[indsTP,:]))
#third = ax3.contourf(xp+i*L,tg,C_iii*np.abs(d_iii),np.linspace(np.min((C_iii*np.abs(d_iii))[indFirst:,:]),np.max((C_iii*np.abs(d_iii))[indFirst:,:]),100))
ax3.set_title('(C)')
ax3.set_xlim(x[0],x[-1])
ax3.set_xlabel('$x-ct$')
#f.subplots_adjust(right=0.8)
#cbar_ax1 = f.add_axes([.72, 0.15, 0.05, 0.7])
#cbar_ax2 = f.add_axes([.82, 0.15, 0.05, 0.7])
#cbar_ax3 = f.add_axes([.92, 0.15, 0.05, 0.7])
#f.colorbar(first, cax=cbar_ax1)
#f.colorbar(second, cax=cbar_ax2)
#f.colorbar(third, cax=cbar_ax3)
#f.colorbar(first, ax=ax1)
#f.colorbar(second, ax=ax2)
#f.colorbar(third, ax=ax3)
f.tight_layout()
f.subplots_adjust(right=0.8)
cbar_ax = f.add_axes([.82, 0.15, 0.05, 0.7])
f.colorbar(third, cax=cbar_ax)
#f.tight_layout()
else:
f, (ax1, ax2, ax3) = plt.subplots(1, 3, sharey=True, figsize=(15, 4))
ax1.plot(x,C_i[150,:]*np.abs(d_i[150,:]))
ax1.plot(x,C_i[1500,:]*np.abs(d_i[1500,:]))
ax1.plot(x,C_i[3750,:]*np.abs(d_i[3750,:]))
ax1.plot(x,C_i[7500,:]*np.abs(d_i[7500,:]))
ax1.set_title('(A)')
ax1.set_xlabel('$x$')
ax1.set_ylabel('$t$')
ax2.plot(x,C_ii[150,:]*np.abs(d_ii[150,:]))
ax2.plot(x,C_ii[1500,:]*np.abs(d_ii[1500,:]))
ax2.plot(x,C_ii[3750,:]*np.abs(d_ii[3750,:]))
ax2.plot(x,C_ii[7500,:]*np.abs(d_ii[7500,:]))
ax2.set_title('(B)')
ax2.set_xlabel('$x$')
ax3.plot(x,C_iii[150,:]*np.abs(d_iii[150,:]))
ax3.plot(x,C_iii[1500,:]*np.abs(d_iii[1500,:]))
ax3.plot(x,C_iii[3750,:]*np.abs(d_iii[3750,:]))
ax3.plot(x,C_iii[7500,:]*np.abs(d_iii[7500,:]))
ax3.set_title('(C)')
ax3.set_xlabel('$x$')
ax3.legend(('$t=2$','$t=20$','$t=50$','$t=100$'))
def plot_visc_Even_newer(x,t,uv,FVM,P,NN,contours):
nx, nt = np.shape(uv)
L = x[-1] - x[0] + x[1] - x[0]
xg, tg = np.meshgrid(x,t)
xp = xg - tg
def scheme(u,NN):
ust = np.zeros_like(u)
ust = ust + u
min_u = np.amin(u,1)
max_u = np.amax(u,1)
const_n = min_u==max_u
#print('u: ', u)
u_tmp = np.zeros_like(u[:,2])
u_tmp[:] = u[:,2]
for i in range(0,5):
u[:,i] = (u[:,i]-min_u)/(max_u-min_u)
ep = 1E-6
#compute fluxes on sub stencils (similar to derivatives I guess)
f1 = 1/3*u[:,0]-7/6*u[:,1]+11/6*u[:,2]
f2 = -1/6*u[:,1]+5/6*u[:,2]+1/3*u[:,3]
f3 = 1/3*u[:,2]+5/6*u[:,3]-1/6*u[:,4]
#compute derivatives on sub stencils
justU = 1/30*ust[:,0]-13/60*ust[:,1]+47/60*ust[:,2]+9/20*ust[:,3]-1/20*ust[:,4]
dudx = 0*ust[:,0]+1/12*ust[:,1]-5/4*ust[:,2]+5/4*ust[:,3]-1/12*ust[:,4]
deriv2 = (dudx - np.roll(dudx,1))
d2udx2 = -1/4*ust[:,0]+3/2*ust[:,1]-2*ust[:,2]+1/2*ust[:,3]+1/4*ust[:,4]
deriv3 = (d2udx2 - np.roll(d2udx2,1))
d3udx3 = 0*ust[:,0]-1*ust[:,1]+3*ust[:,2]-3*ust[:,3]+1*ust[:,4]
deriv4 = (d3udx3 - np.roll(d3udx3,1))
#compute smoothness indicators
B1 = 13/12*np.power(u[:,0]-2*u[:,1]+u[:,2],2) + 1/4*np.power(u[:,0]-4*u[:,1]+3*u[:,2],2)
B2 = 13/12*np.power(u[:,1]-2*u[:,2]+u[:,3],2) + 1/4*np.power(u[:,1]-u[:,3],2)
B3 = 13/12*np.power(u[:,2]-2*u[:,3]+u[:,4],2) + 1/4*np.power(3*u[:,2]-4*u[:,3]+u[:,4],2)
#assign linear weights
g1 = 1/10
g2 = 3/5
g3 = 3/10
#compute the unscaled nonlinear weights
wt1 = g1/np.power(ep+B1,2)
wt2 = g2/np.power(ep+B2,2)
wt3 = g3/np.power(ep+B3,2)
wts = wt1 + wt2 + wt3
#scale the nonlinear weights
w1 = wt1/wts
w2 = wt2/wts
w3 = wt3/wts
#compute the coefficients
c1 = np.transpose(np.array([1/3*w1,-7/6*w1-1/6*w2,11/6*w1+5/6*w2+1/3*w3,1/3*w2+5/6*w3,-1/6*w3]))
#fl = np.multiply(fl,(max_u-min_u))+min_u
if(NN):
A1 = np.array([[-0.94130915, -0.32270527, -0.06769955],
[-0.37087336, -0.05059665, 0.55401474],
[ 0.40815187, -0.5602299 , -0.01871526],
[ 0.56200236, -0.5348897 , -0.04091108],
[-0.6982639 , -0.49512517, 0.52821904]])
b1 = np.array([-0.04064859, 0. , 0. ])
c2 = np.maximum(np.matmul(c1,A1)+b1,0)
A2 = np.array([[ 0.07149544, 0.9637294 , 0.41981453],
[ 0.75602794, -0.0222342 , -0.95690656],
[ 0.07406807, -0.41880417, -0.4687035 ]])
b2 = np.array([-0.0836111 , -0.00330033, -0.01930024])
c3 = np.maximum(np.matmul(c2,A2)+b2,0)
A3 = np.array([[ 0.8568574 , -0.5809458 , 0.04762125],
[-0.26066098, -0.23142155, -0.6449008 ],
[ 0.7623346 , 0.81388015, -0.03217626]])
b3 = np.array([-0.0133561 , -0.05374921, 0. ])
c4 = np.maximum(np.matmul(c3,A3)+b3,0)
A4 = np.array([[-0.2891752 , -0.53783405, -0.17556567, -0.7775279 , 0.69957024],
[-0.12895434, 0.13607207, 0.12294354, 0.29842544, -0.00198237],
[ 0.5356503 , 0.09317833, 0.5135357 , -0.32794708, 0.13765627]])
b4 = np.array([ 0.00881096, 0.01138764, 0.00464343, 0.0070305 , -0.01644066])
dc = np.matmul(c4,A4)+b4
ct = c1 - dc
Ac = np.array([[-0.2, -0.2, -0.2, -0.2, -0.2],
[-0.2, -0.2, -0.2, -0.2, -0.2],
[-0.2, -0.2, -0.2, -0.2, -0.2],
[-0.2, -0.2, -0.2, -0.2, -0.2],
[-0.2, -0.2, -0.2, -0.2, -0.2]])
bc = np.array([0.2, 0.2, 0.2, 0.2, 0.2])
dc2 = np.matmul(ct,Ac)+bc
C = ct + dc2
Cons = C[:,0] + C[:,1] + C[:,2] + C[:,3] + C[:,4]
C_visc = -5/2*C[:,0] - 3/2*C[:,1] - 1/2*C[:,2] + 1/2*C[:,3] + 3/2*C[:,4]
C_visc2 = 19/6*C[:,0] + 7/6*C[:,1] + 1/6*C[:,2] + 1/6*C[:,3] + 7/6*C[:,4]
C_visc3 = -65/24*C[:,0] - 5/8*C[:,1] - 1/24*C[:,2] + 1/24*C[:,3] + 5/8*C[:,4]
C_visc = C_visc.flatten()
C_visc[const_n] = 0#if const across stencil, there was no viscosity
C_visc2[const_n] = 0#if const across stencil, there was no viscosity
C_visc3[const_n] = 0#if const across stencil, there was no viscosity
else:
Cons = c1[:,0] + c1[:,1] + c1[:,2] + c1[:,3] + c1[:,4]
C_visc = (-5/2*c1[:,0] - 3/2*c1[:,1] - 1/2*c1[:,2] + 1/2*c1[:,3] + 3/2*c1[:,4])
C_visc2 = (19/6*c1[:,0] + 7/6*c1[:,1] + 1/6*c1[:,2] + 1/6*c1[:,3] + 7/6*c1[:,4])
C_visc3 = (-65/24*c1[:,0] - 5/8*c1[:,1] - 1/24*c1[:,2] + 1/24*c1[:,3] + 5/8*c1[:,4])
C_visc[const_n] = 0#if const across stencil, there was no viscosity
C_visc2[const_n] = 0#if const across stencil, there was no viscosity
C_visc3[const_n] = 0#if const across stencil, there was no viscosity
C_visc = (C_visc+np.roll(C_visc,1))/2
C_visc2 = (C_visc2+np.roll(C_visc2,1))/2
C_visc3 = (C_visc3+np.roll(C_visc3,1))/2
return Cons,-C_visc,-C_visc2,C_visc3, deriv2, deriv3, deriv4
C_ = np.zeros_like(uv)
C_i = np.zeros_like(uv)
C_ii = np.zeros_like(uv)
C_iii = np.zeros_like(uv)
d_i = np.zeros_like(uv)
d_ii = np.zeros_like(uv)
d_iii = np.zeros_like(uv)
for i in range(0,nt):
u_part = FVM.partU(uv[:,i])
C_[:,i],C_i[:,i],C_ii[:,i],C_iii[:,i],d_i[:,i],d_ii[:,i],d_iii[:,i] = scheme(u_part,NN)
dx = x[1]-x[0]
C_ = np.transpose(C_)
C_i = np.transpose(C_i)*(dx**2)
C_ii = np.transpose(C_ii)*(dx**3)
C_iii = np.transpose(C_iii)*(dx**4)
d_i = np.transpose(d_i)/(dx**2)
d_ii = np.transpose(d_ii)/(dx**3)
d_iii = np.transpose(d_iii)/(dx**4)
indFirst = 41#ignore 1st few timesteps for scaling plots due to disconintuity
if(contours):
indsTP = (np.linspace(0,nt-1,nt)%150==0)
f, (ax1, ax2, ax3) = plt.subplots(1, 3, sharey=True, figsize=(12, 4))
num_cont = 40
cobarlim = 0.004
first = ax1.contourf(xg[indsTP,:],tg[indsTP,:],C_i[indsTP,:]*np.abs(d_i[indsTP,:]),np.linspace(-cobarlim,cobarlim,num_cont))
#first = ax1.contourf(xtp[indsTP,:],tg[indsTP,:],C_i[indsTP,:]*np.abs(d_i[indsTP,:]),np.linspace(np.min((C_i*np.abs(d_i))[indFirst:,:]),np.max((C_i*np.abs(d_i))[indFirst:,:]),100))
#first = ax1.contourf(xp+i*L,tg,C_i*np.abs(d_i),np.linspace(np.min((C_i*np.abs(d_i))[indFirst:,:]),np.max((C_i*np.abs(d_i))[indFirst:,:]),100))
ax1.set_title('(A)')
ax1.set_xlim(x[0],x[-1])
ax1.set_xlabel('$x-ct$')
ax1.set_ylabel('$t$')
second = ax2.contourf(xg[indsTP,:],tg[indsTP,:],C_ii[indsTP,:]*np.abs(d_ii[indsTP,:]),np.linspace(-cobarlim,cobarlim,num_cont))
#second = ax2.contourf(xp+i*L,tg,C_ii*np.abs(d_ii),np.linspace(np.min((C_ii*np.abs(d_ii))[indFirst:,:]),np.max((C_ii*np.abs(d_ii))[indFirst:,:]),100))
ax2.set_title('(B)')
ax2.set_xlim(x[0],x[-1])
ax2.set_xlabel('$x-ct$')
third = ax3.contourf(xg[indsTP,:],tg[indsTP,:],C_iii[indsTP,:]*np.abs(d_iii[indsTP,:]),np.linspace(-cobarlim,cobarlim,num_cont))
#third = ax3.contourf(xp+i*L,tg,C_iii*np.abs(d_iii),np.linspace(np.min((C_iii*np.abs(d_iii))[indFirst:,:]),np.max((C_iii*np.abs(d_iii))[indFirst:,:]),100))
ax3.set_title('(C)')
ax3.set_xlim(x[0],x[-1])
ax3.set_xlabel('$x-ct$')
np.savetxt('XgAll.csv',xg[indsTP,:])
np.savetxt('TgAll.csv',tg[indsTP,:])
np.savetxt('VgAll.csv',C_i[indsTP,:]*np.abs(d_i[indsTP,:]))
np.savetxt('SgAll.csv',C_ii[indsTP,:]*np.abs(d_ii[indsTP,:]))
np.savetxt('MgAll.csv',C_iii[indsTP,:]*np.abs(d_iii[indsTP,:]))
#f.subplots_adjust(right=0.8)
#cbar_ax1 = f.add_axes([.72, 0.15, 0.05, 0.7])
#cbar_ax2 = f.add_axes([.82, 0.15, 0.05, 0.7])
#cbar_ax3 = f.add_axes([.92, 0.15, 0.05, 0.7])
#f.colorbar(first, cax=cbar_ax1)
#f.colorbar(second, cax=cbar_ax2)
#f.colorbar(third, cax=cbar_ax3)
#f.colorbar(first, ax=ax1)
#f.colorbar(second, ax=ax2)
#f.colorbar(third, ax=ax3)
f.tight_layout()
f.subplots_adjust(right=0.8)
cbar_ax = f.add_axes([.82, 0.15, 0.05, 0.7])
f.colorbar(third, cax=cbar_ax)
#f.tight_layout()
else:
f, (ax1, ax2, ax3) = plt.subplots(1, 3, sharey=True, figsize=(15, 4))
ax1.plot(x,C_i[150,:]*np.abs(d_i[150,:]))
ax1.plot(x,C_i[1500,:]*np.abs(d_i[1500,:]))
ax1.plot(x,C_i[3750,:]*np.abs(d_i[3750,:]))
ax1.plot(x,C_i[7500,:]*np.abs(d_i[7500,:]))
ax1.set_title('(A)')
ax1.set_xlabel('$x$')
ax1.set_ylabel('$t$')
ax2.plot(x,C_ii[150,:]*np.abs(d_ii[150,:]))
ax2.plot(x,C_ii[1500,:]*np.abs(d_ii[1500,:]))
ax2.plot(x,C_ii[3750,:]*np.abs(d_ii[3750,:]))
ax2.plot(x,C_ii[7500,:]*np.abs(d_ii[7500,:]))
ax2.set_title('(B)')
ax2.set_xlabel('$x$')
ax3.plot(x,C_iii[150,:]*np.abs(d_iii[150,:]))
ax3.plot(x,C_iii[1500,:]*np.abs(d_iii[1500,:]))
ax3.plot(x,C_iii[3750,:]*np.abs(d_iii[3750,:]))
ax3.plot(x,C_iii[7500,:]*np.abs(d_iii[7500,:]))
ax3.set_title('(C)')
ax3.set_xlabel('$x$')
ax3.legend(('$t=2$','$t=20$','$t=50$','$t=100$'))
#Below here are official paper visualizations
def threeSolutions(x,t,u_NN,u_WE,u_EX,P):
'''
Assume shocks are at middle and end of the x domain at start
Inputs:
c: shock speed
x: x coordinates
y: y coordinates
u: velocity
P: periods advected for
err: plot error if True, otherwise plot solution
'''
f, (ax1, ax2, ax3) = plt.subplots(1, 3, sharey=True, figsize=(12, 4))
u_NN = np.transpose(u_NN)
u_WE = np.transpose(u_WE)
u_EX = np.transpose(u_EX)
L = x[-1] - x[0] + x[1] - x[0]
xg, tg = np.meshgrid(x,t)
xp = xg - tg
for i in range(-2,int(P)+1):
first = ax1.contourf(xp+i*L,tg,u_EX,np.linspace(-0.2,1.2,57))
ax1.set_title('Exact')
ax1.set_xlim(x[0],x[-1])
ax1.set_xlabel('$x-ct$')
ax1.set_ylabel('$t$')
for i in range(-2,int(P)+1):
second = ax2.contourf(xp+i*L,tg,u_WE,np.linspace(-0.2,1.2,57))
ax2.set_title('WENO5-JS')
ax2.set_xlim(x[0],x[-1])
ax2.set_xlabel('$x-ct$')
for i in range(-2,int(P)+1):
third = ax3.contourf(xp+i*L,tg,u_NN,np.linspace(-0.2,1.2,57))
ax3.set_title('WENO5-NN')
ax3.set_xlim(x[0],x[-1])
ax3.set_xlabel('$x-ct$')
f.tight_layout()
f.subplots_adjust(right=0.8)
cbar_ax = f.add_axes([.82, 0.15, 0.05, 0.7])
f.colorbar(third, cax=cbar_ax)
def variousErrors(x,t,u,u_WE):
#Do L2 error, total variation error, and discontinuity width
f, (ax1, ax2, ax3) = plt.subplots(1, 3, sharey=False, figsize=(12, 4))
#here is the l2 error code:
L = x[-1] - x[0] + x[1] - x[0]
dx = x[1] - x[0]
nx = np.size(x)
xg, tg = np.meshgrid(t,x)
xp = xg - tg
ons = np.ones_like(xp)
#eex = np.roll(np.greater(ons,xp%L),-1,axis = 0)
eex1 = xp/dx
eex1[eex1>=1] = 1
eex1[eex1<=0] = 0
eex2 = (-xp%L-L/2)/dx
eex2[eex2>=1] = 1
eex2[eex2<=0] = 0
eex3 = (-xp%L-L/2)/dx
eex3[eex3>(nx/2-1)] = -(eex3[eex3>(nx/2-1)]-nx/2)
eex3[eex3>=1] = 1
eex3[eex3<=0] = 0
er = eex3-u
ers = np.power(er,2)
ers0 = np.expand_dims(ers[0,:],axis = 0)
ers_aug = np.concatenate((ers,ers0), axis = 0)
err_int = np.trapz(ers_aug, dx = dx, axis = 0)
er_we = eex3-u_WE
ers_we = np.power(er_we,2)
ers0_we = np.expand_dims(ers_we[0,:],axis = 0)
ers_aug_we = np.concatenate((ers_we,ers0_we), axis = 0)
err_int_we = np.trapz(ers_aug_we, dx = dx, axis = 0)
ax1.plot(t,np.sqrt(err_int),'o')
ax1.plot(t,np.sqrt(err_int_we),'o')
ax1.set_xlabel('$t$')
ax1.set_ylabel('$E$')
ax1.set_title('(A)')
#here is the total variation error code:
us = np.roll(u, 1, axis = 0)
tv = np.sum(np.abs(u-us),axis = 0)
uswe = np.roll(u_WE, 1, axis = 0)
tvwe = np.sum(np.abs(u_WE-uswe),axis = 0)
#plt.figure()
ax2.plot(t,tv,'.')
ax2.plot(t,tvwe,'.')
#plt.title(title)
ax2.set_xlabel('$t$')
ax2.set_ylabel('$TV$')
ax2.set_title('(B)')
#here is the discontinuity width code:
u = np.transpose(u)
u_WE = np.transpose(u_WE)
L = x[-1] - x[0] + x[1] - x[0]
xg, tg =
|
np.meshgrid(x,t)
|
numpy.meshgrid
|
import glob, os, sys
import h5py
import numpy as np
import matplotlib
from matplotlib.backends.backend_pgf import FigureCanvasPgf
matplotlib.backend_bases.register_backend('pdf', FigureCanvasPgf)
import matplotlib.pyplot as plt
pgf_with_custom_preamble = {
"pgf.texsystem": "lualatex",
"font.family": "serif", # use serif/main font for text elements
"text.usetex": True, # use inline math for ticks
"pgf.rcfonts": False, # don't setup fonts from rc parameters
"axes.labelsize": 16,
"font.size": 18,
"legend.fontsize": 16,
"axes.titlesize": 16, # Title size when one figure
"xtick.labelsize": 16,
"ytick.labelsize": 16,
"figure.titlesize": 18, # Overall figure title
"pgf.preamble": [
r'\usepackage{fontspec}',
r'\usepackage{units}', # load additional packages
r'\usepackage{metalogo}',
r'\usepackage{unicode-math}', # unicode math setup
r'\setmathfont{XITS Math}',
r'\setmonofont{Libertinus Mono}'
r'\setmainfont{Libertinus Serif}', # serif font via preamble
]
}
matplotlib.rcParams.update(pgf_with_custom_preamble)
"""
Basic script Open a HDF5 file of my simulation and compute the Hysteresis loop, saves data in an opportune file with specific naming pattern
at the end plots the calculated loop.
"""
def calcoloMagnMediaVsappField(time, file, versoreu, versorev, versorew):
data = np.array([])
dataset_Magnet = '/Emme%s/Val' % (time)
dataset_Hext = '/Hext%s/Val' % (time)
# print(dataset_Magnet)
datasetM = file[dataset_Magnet]
# print(datasetM.shape, isinstance(datasetM,h5py.Dataset))
# magnetizzazione = np.matrix(datasetM[0:103,:])
magnetizzazione = np.matrix(datasetM[()])
# print(np.shape(magnetizzazione))
proiezu = np.dot(magnetizzazione, versoreu)
proiezv = np.dot(magnetizzazione, versorev)
proiezw = np.dot(magnetizzazione, versorew)
# print(proiezw,i, "\n")
datasetH = file[dataset_Hext]
# print(datasetH.shape, isinstance(datasetH,h5py.Dataset))
# Hext= datasetH[0:103,0]
Hext = datasetH[(0)]
Hext = np.dot(np.dot(Hext, versoreu), np.reshape((1, 0, 0), (1, 3))) + np.dot(np.dot(Hext, versorev),
np.reshape((0, 1, 0),
(1, 3))) + np.dot(
np.dot(Hext, versorew), np.reshape((0, 0, 1), (1, 3)))
np.savetxt("uffa", proiezu)
# print(Hext)
Volumes = np.ones(proiezu.shape[0]) * (5.e-9 * 5.e-9 * 5.e-9)
mediau = np.average(proiezu, axis=0, weights=Volumes)
mediav = np.average(proiezv, axis=0, weights=Volumes)
mediaw =
|
np.average(proiezw, axis=0, weights=Volumes)
|
numpy.average
|
import matplotlib.pyplot as plt
import os.path as path
import numpy as np
from Synthesis.units import *
# import Synthesis.post.histogram as hist
def histogram_mass(pop, m_low_lim=0, a_up_lim=30):
Masses = []
Orb_Dist = []
for sim in pop.SIMS.values():
Masses += list(sim.snaps[sim.N_snaps - 1].satellites['M'].values * M_S / M_E)
Orb_Dist += list(sim.snaps[sim.N_snaps - 1].satellites['a'].values * R_S / au)
print(f'Total Number of Bodies: {len(Masses)}')
data = list(zip(Masses, Orb_Dist))
data2 = data.copy()
data3 = data.copy()
print(
f'Number of Bodies over a mass of M_E: {len([ite[0] for ite in data2 if ite[0] >= 0.1])}; {np.float(len([it[0] for it in data2 if it[0] >= 0.1])) / len(Masses)}')
# print([(m,a) for (m,a) in data])
#
print(f'Number of Bodies within {a_up_lim} au:')
list_within = [(m, a) for (m, a) in data2 if a <= a_up_lim]
print(f'{len(list_within)}; {len(list_within) / len(Masses)}')
print(
f'Number of Bodies over a mass of {m_low_lim} M_E: {len([item[0] for item in data3 if item[0] >= m_low_lim])}; {len([item[0] for item in data3 if item[0] >= m_low_lim]) / len(Masses)}')
#
# print(data)
data = [item for item in data if item[0] >= m_low_lim and item[1] <= a_up_lim]
print(f'Number of Terrestrial Bodies: {len(data)}; {len(data) / len(Masses)}')
Masses, Orb_Dist = zip(*data)
plt.rcParams.update({'figure.autolayout': True})
plt.style.use('seaborn-paper')
plt.rcParams.update({'font.size': pop.fontsize})
N_bins = 15
bins = 10 ** np.linspace(np.log10(min(Masses)), np.log10(max(Masses)), N_bins)
fig, ax = plt.subplots(figsize=pop.figsize)
# ax.hist(Masses, bins=bins)
values, base, _ = plt.hist(Masses, bins=bins, rwidth=0.95)
ax.axvline(1, color='red', linewidth=1)
ax.axvline(M_M / M_E, color='red', linewidth=1)
ax.axvline(M_V / M_E, color='red', linewidth=1)
ax.axvline(M_ME / M_E, color='red', linewidth=1)
ax_bis = ax.twinx()
# values, base = np.histogram(Masses, bins = 10 ** np.linspace(np.log10(min(Masses)), np.log10(max(Masses)), N_bins))
values = np.append(0, values)
ax_bis.plot(base, np.cumsum(values) / np.cumsum(values)[-1], color='black', linestyle='dashed', markersize=0.1)
ax.set(xlabel=r'Mass [$M_E$]', ylabel=r'Counts')
ax_bis.set(ylabel='Cumulative Distribution')
ax.set_xscale('log')
ax_bis.set_xscale('log')
if pop.plot_config == 'presentation':
ax.set(title=r'Histrogram of Planet Masses')
save_name = 'histogram_mass'
if a_up_lim < 30 and m_low_lim > 0:
save_name += '_lim'
fig.savefig(path.join(pop.PLOT, save_name + '.png'), transparent=False, dpi=pop.dpi, bbox_inches="tight")
plt.close(fig)
def histogram_weighted_mass(pop, m_low_lim=0, a_up_lim=30):
Masses = []
Orb_Dist = []
for sim in pop.SIMS.values():
Masses += list(sim.snaps[sim.N_snaps - 1].satellites['M'].values * M_S / M_E)
Orb_Dist += list(sim.snaps[sim.N_snaps - 1].satellites['a'].values * R_S / au)
data = zip(Masses, Orb_Dist)
data = [item for item in data if item[0] >= m_low_lim and item[1] <= a_up_lim]
Masses, Orb_Dist = zip(*data)
plt.rcParams.update({'figure.autolayout': True})
plt.style.use('seaborn-paper')
plt.rcParams.update({'font.size': pop.fontsize})
N_bins = 15
bins = 10 ** np.linspace(np.log10(min(Masses)), np.log10(max(Masses)), N_bins)
fig, ax = plt.subplots(figsize=pop.figsize)
# ax.hist(Masses, bins=bins)
values, base, _ = plt.hist(Masses, bins=bins, rwidth=0.95, weights=Masses / np.sum(Masses))
ax.axvline(1, color='red', linewidth=1)
ax.axvline(M_M / M_E, color='red', linewidth=1)
ax.axvline(M_V / M_E, color='red', linewidth=1)
ax.axvline(M_ME / M_E, color='red', linewidth=1)
ax_bis = ax.twinx()
# values, base = np.histogram(Masses, bins = 10 ** np.linspace(np.log10(min(Masses)), np.log10(max(Masses)), N_bins))
values =
|
np.append(0, values)
|
numpy.append
|
# -*- coding: utf-8 -*-
"""
Created on Sun Dec 4 18:14:29 2016
@author: becker
"""
import numpy as np
import scipy.linalg as linalg
import scipy.sparse as sparse
from simfempy import tools, meshes
from simfempy.fems import p1general
import simfempy.fems.data, simfempy.fems.rt0
#=================================================================#
class P1(p1general.P1general):
def __init__(self, kwargs={}, mesh=None):
super().__init__(mesh=mesh)
for p,v in zip(['masslumpedvol', 'masslumpedbdry'], [False, True]):
self.params_bool[p] = kwargs.pop(p, v)
for p, v in zip(['dirichletmethod', 'convmethod'], ['strong', 'supg']):
self.params_str[p] = kwargs.pop(p, v)
if self.params_str['dirichletmethod'] == 'nitsche':
self.params_float['nitscheparam'] = kwargs.pop('nitscheparam', 4)
def setMesh(self, mesh):
super().setMesh(mesh)
self.computeStencilCell(self.mesh.simplices)
self.cellgrads = self.computeCellGrads()
def prepareAdvection(self, beta, scale):
method = self.params_str['convmethod']
rt = simfempy.fems.rt0.RT0(mesh=self.mesh)
betart = scale*rt.interpolate(beta)
betacell = rt.toCell(betart)
convdata = simfempy.fems.data.ConvectionData(betacell=betacell, betart=betart)
if method == 'upwalg':
return convdata
elif method == 'lps':
self.mesh.constructInnerFaces()
return convdata
elif method == 'supg':
md = meshes.move.move_midpoints(self.mesh, betacell)
# self.md.plot(self.mesh, beta, type='midpoints')
elif method == 'supg2':
md = meshes.move.move_midpoints(self.mesh, betacell, extreme=True)
# self.md.plot(self.mesh, beta, type='midpoints')
elif method == 'upwalg':
pass
elif method == 'upw':
md = meshes.move.move_nodes(self.mesh, -betacell)
# self.md.plot(self.mesh, beta)
elif method == 'upw2':
md = meshes.move.move_nodes(self.mesh, -betacell, second=True)
# self.md.plot(self.mesh, beta)
elif method == 'upwsides':
self.mesh.constructInnerFaces()
md = meshes.move.move_midpoints(self.mesh, -betacell)
else:
raise ValueError(f"don't know {method=}")
convdata.md = md
return convdata
def nlocal(self): return self.mesh.dimension+1
def nunknowns(self): return self.mesh.nnodes
def dofspercell(self): return self.mesh.simplices
def computeCellGrads(self):
ncells, normals, cellsOfFaces, facesOfCells, dV = self.mesh.ncells, self.mesh.normals, self.mesh.cellsOfFaces, self.mesh.facesOfCells, self.mesh.dV
scale = -1/self.mesh.dimension
return scale*(normals[facesOfCells].T * self.mesh.sigma.T / dV.T).T
def tonode(self, u): return u
# bc
def _prepareBoundary(self, colorsdir, colorsflux=[]):
bdrydata = simfempy.fems.data.BdryData()
bdrydata.nodesdir={}
bdrydata.nodedirall = np.empty(shape=(0), dtype=self.mesh.faces.dtype)
for color in colorsdir:
facesdir = self.mesh.bdrylabels[color]
bdrydata.nodesdir[color] = np.unique(self.mesh.faces[facesdir].flat[:])
bdrydata.nodedirall = np.unique(np.union1d(bdrydata.nodedirall, bdrydata.nodesdir[color]))
bdrydata.nodesinner = np.setdiff1d(np.arange(self.mesh.nnodes, dtype=self.mesh.faces.dtype),bdrydata.nodedirall)
bdrydata.nodesdirflux={}
for color in colorsflux:
facesdir = self.mesh.bdrylabels[color]
bdrydata.nodesdirflux[color] = np.unique(self.mesh.faces[facesdir].ravel())
return bdrydata
def matrixBoundaryStrong(self, A, bdrydata):
method = self.params_str['dirichletmethod']
if method not in ['strong','new']: return
nodesdir, nodedirall, nodesinner, nodesdirflux = bdrydata.nodesdir, bdrydata.nodedirall, bdrydata.nodesinner, bdrydata.nodesdirflux
nnodes = self.mesh.nnodes
for color, nodes in nodesdirflux.items():
nb = nodes.shape[0]
help = sparse.dok_matrix((nb, nnodes))
for i in range(nb): help[i, nodes[i]] = 1
bdrydata.Asaved[color] = help.dot(A)
bdrydata.A_inner_dir = A[nodesinner, :][:, nodedirall]
help = np.ones((nnodes), dtype=nodedirall.dtype)
help[nodedirall] = 0
help = sparse.dia_matrix((help, 0), shape=(nnodes, nnodes))
# A = help.dot(A.dot(help))
diag = np.zeros((nnodes))
if method == 'strong':
diag[nodedirall] = 1.0
diag = sparse.dia_matrix((diag, 0), shape=(nnodes, nnodes))
else:
dirparam = self.params_float['nitscheparam']
bdrydata.A_dir_dir = dirparam*A[nodedirall, :][:, nodedirall]
diag[nodedirall] = np.sqrt(dirparam)
diag = sparse.dia_matrix((diag, 0), shape=(nnodes, nnodes))
diag = diag.dot(A.dot(diag))
A = help.dot(A)
A += diag
return A
def vectorBoundaryStrong(self, b, bdrycond, bdrydata):
method = self.params_str['dirichletmethod']
if method not in ['strong','new']: return
nodesdir, nodedirall, nodesinner, nodesdirflux = bdrydata.nodesdir, bdrydata.nodedirall, bdrydata.nodesinner, bdrydata.nodesdirflux
x, y, z = self.mesh.points.T
for color, nodes in nodesdirflux.items():
bdrydata.bsaved[color] = b[nodes]
if method == 'strong':
for color, nodes in nodesdir.items():
if color in bdrycond.fct:
dirichlet = bdrycond.fct[color](x[nodes], y[nodes], z[nodes])
b[nodes] = dirichlet
else:
b[nodes] = 0
# b[nodesinner] -= bdrydata.A_inner_dir * b[nodedirall]
else:
help = np.zeros_like(b)
for color, nodes in nodesdir.items():
if color in bdrycond.fct:
dirichlet = bdrycond.fct[color](x[nodes], y[nodes], z[nodes])
help[nodes] = dirichlet
# b[nodesinner] -= bdrydata.A_inner_dir * help[nodedirall]
b[nodedirall] = bdrydata.A_dir_dir * help[nodedirall]
return b
def vectorBoundaryStrongEqual(self, du, u, bdrydata):
if self.params_str['dirichletmethod']=="nitsche": return
nodedirall = bdrydata.nodedirall
du[nodedirall] = u[nodedirall]
def vectorBoundaryStrongZero(self, du, bdrydata):
if self.params_str['dirichletmethod']=="nitsche": return
du[bdrydata.nodedirall] = 0
def formBoundary(self, du, u, bdrydata, kheatcell, colorsdir):
method = self.params_str['dirichletmethod']
if method=='new':
nodedirall = bdrydata.nodedirall
du[nodedirall] += bdrydata.A_dir_dir*u[bdrydata.nodedirall]
elif method == "nitsche":
self.computeFormNitscheDiffusion(du, u, kheatcell, colorsdir)
def computeRhsNitscheDiffusion(self, b, diffcoff, colorsdir, udir=None, bdrycondfct=None, coeff=1):
if self.params_str['dirichletmethod'] != 'nitsche': return
if udir is None:
udir = self.interpolateBoundary(colorsdir, bdrycondfct)
nitsche_param=self.params_float['nitscheparam']
assert udir.shape[0]==self.mesh.nnodes
dim = self.mesh.dimension
massloc = tools.barycentric.tensor(d=dim - 1, k=2)
massloc = np.diag(np.sum(massloc,axis=1))
faces = self.mesh.bdryFaces(colorsdir)
nodes, cells, normalsS = self.mesh.faces[faces], self.mesh.cellsOfFaces[faces,0], self.mesh.normals[faces,:dim]
dS = linalg.norm(normalsS, axis=1)
simp, dV = self.mesh.simplices[cells], self.mesh.dV[cells]
dS *= nitsche_param * coeff * diffcoff[cells] * dS / dV
r = np.einsum('n,kl,nl->nk', dS, massloc, udir[nodes])
np.add.at(b, nodes, r)
cellgrads = self.cellgrads[cells, :, :dim]
u = udir[nodes].mean(axis=1)
mat = np.einsum('f,fk,fik->fi', coeff*u*diffcoff[cells], normalsS, cellgrads)
np.add.at(b, simp, -mat)
def computeFormNitscheDiffusion(self, du, u, diffcoff, colorsdir):
if self.params_str['dirichletmethod'] != 'nitsche': return
assert u.shape[0]==self.mesh.nnodes
dim = self.mesh.dimension
massloc = tools.barycentric.tensor(d=dim - 1, k=2)
massloc = np.diag(np.sum(massloc,axis=1))
faces = self.mesh.bdryFaces(colorsdir)
nodes, cells, normalsS = self.mesh.faces[faces], self.mesh.cellsOfFaces[faces,0], self.mesh.normals[faces,:dim]
dS = linalg.norm(normalsS, axis=1)
simp, dV = self.mesh.simplices[cells], self.mesh.dV[cells]
dS *= self.dirichlet_nitsche * diffcoff[cells] * dS / dV
r = np.einsum('n,kl,nl->nk', dS, massloc, u[nodes])
np.add.at(du, nodes, r)
cellgrads = self.cellgrads[cells, :, :dim]
um = u[nodes].mean(axis=1)
mat = np.einsum('f,fk,fik->fi', um*diffcoff[cells], normalsS, cellgrads)
np.add.at(du, simp, -mat)
mat = np.einsum('f,fk,fjk,fj->f', diffcoff[cells]/dim, normalsS, cellgrads,u[simp]).repeat(dim).reshape(faces.shape[0],dim)
np.add.at(du, nodes, -mat)
def computeMatrixNitscheDiffusion(self, diffcoff, colors, lumped=True):
nnodes, ncells, dim, nlocal = self.mesh.nnodes, self.mesh.ncells, self.mesh.dimension, self.nlocal()
if self.params_str['dirichletmethod'] != 'nitsche': return sparse.coo_matrix((nnodes,nnodes))
nitsche_param=self.params_float['nitscheparam']
faces = self.mesh.bdryFaces(colors)
cells = self.mesh.cellsOfFaces[faces, 0]
normalsS = self.mesh.normals[faces, :dim]
dS = np.linalg.norm(normalsS, axis=1)
dV = self.mesh.dV[cells]
cellgrads = self.cellgrads[cells, :, :dim]
simp = self.mesh.simplices[cells]
facenodes = self.mesh.faces[faces]
cols = np.tile(simp,dim)
rows = facenodes.repeat(dim+1)
mat = np.einsum('f,fk,fjk,i->fij', diffcoff[cells]/dim, normalsS, cellgrads, np.ones(dim))
# mat = np.repeat(mat,dim)
# print(f"{cols.shape=} {rows.shape=} {mat.shape=}")
AN = sparse.coo_matrix((mat.ravel(), (rows.ravel(), cols.ravel())), shape=(nnodes, nnodes)).tocsr()
massloc = tools.barycentric.tensor(d=dim-1, k=2)
massloc = np.diag(np.sum(massloc,axis=1))
# print(f"{massloc=}")
mat = np.einsum('f,ij->fij', nitsche_param * dS**2/dV*diffcoff[cells], massloc)
# mat = np.repeat(coeff * diffcoff[cells]/dS, dim)
rows = np.repeat(facenodes,dim)
cols = np.tile(facenodes,dim)
AD = sparse.coo_matrix((mat.ravel(), (rows.ravel(), cols.ravel())), shape=(nnodes, nnodes)).tocsr()
return - AN - AN.T + AD
def computeBdryNormalFluxNitsche(self, u, colors, udir, diffcoff):
nitsche_param=self.params_float['nitscheparam']
flux= np.zeros(len(colors))
nnodes, dim = self.mesh.nnodes, self.mesh.dimension
massloc = tools.barycentric.tensor(d=dim - 1, k=2)
massloc = np.diag(np.sum(massloc,axis=1))
for i,color in enumerate(colors):
faces = self.mesh.bdrylabels[color]
normalsS = self.mesh.normals[faces,:dim]
dS = linalg.norm(normalsS, axis=1)
nodes = self.mesh.faces[faces]
cells = self.mesh.cellsOfFaces[faces,0]
simp = self.mesh.simplices[cells]
cellgrads = self.cellgrads[cells, :, :dim]
dV = self.mesh.dV[cells]
flux[i] = np.einsum('nj,n,ni,nji->', u[simp], diffcoff[cells], normalsS, cellgrads)
uD = u[nodes]-udir[nodes]
dV = self.mesh.dV[cells]
flux[i] -= np.einsum('n,kl,nl->', nitsche_param * diffcoff[cells] * dS**2 / dV, massloc, uD)
# flux[i] /= np.sum(dS)
return flux
# interpolate
def interpolate(self, f):
x, y, z = self.mesh.points.T
return f(x, y, z)
def interpolateBoundary(self, colors, f, lumped=False):
"""
:param colors: set of colors to interpolate
:param f: ditct of functions
:return:
"""
b = np.zeros(self.mesh.nnodes)
for color in colors:
if not color in f or not f[color]: continue
faces = self.mesh.bdrylabels[color]
normalsS = self.mesh.normals[faces]
dS = linalg.norm(normalsS,axis=1)
normalsS = normalsS/dS[:,np.newaxis]
nx, ny, nz = normalsS.T
nodes = np.unique(self.mesh.faces[faces].reshape(-1))
x, y, z = self.mesh.points[nodes].T
# constant normal !!
nx, ny, nz = np.mean(normalsS, axis=0)
try:
b[nodes] = f[color](x, y, z, nx, ny, nz)
except:
b[nodes] = f[color](x, y, z)
return b
# matrices
def computeMassMatrix(self, coeff=1, lumped=False):
dim, dV, nnodes = self.mesh.dimension, self.mesh.dV, self.mesh.nnodes
if lumped:
mass = coeff/(dim+1)*dV.repeat(dim+1)
rows = self.mesh.simplices.ravel()
return sparse.coo_matrix((mass, (rows, rows)), shape=(nnodes, nnodes)).tocsr()
massloc = tools.barycentric.tensor(d=dim, k=2)
mass = np.einsum('n,kl->nkl', coeff*dV, massloc).ravel()
return sparse.coo_matrix((mass, (self.rows, self.cols)), shape=(nnodes, nnodes)).tocsr()
def computeBdryMassMatrix(self, colors=None, coeff=1, lumped=False):
nnodes = self.mesh.nnodes
rows = np.empty(shape=(0), dtype=int)
cols = np.empty(shape=(0), dtype=int)
mat = np.empty(shape=(0), dtype=float)
if colors is None: colors = self.mesh.bdrylabels.keys()
for color in colors:
faces = self.mesh.bdrylabels[color]
normalsS = self.mesh.normals[faces]
if isinstance(coeff, dict):
dS = linalg.norm(normalsS, axis=1)*coeff[color]
else:
dS = linalg.norm(normalsS, axis=1)*coeff[faces]
nodes = self.mesh.faces[faces]
if lumped:
dS /= self.mesh.dimension
rows = np.append(rows, nodes)
cols = np.append(cols, nodes)
mass = np.repeat(dS, self.mesh.dimension)
mat = np.append(mat, mass)
else:
nloc = self.mesh.dimension
rows = np.append(rows, np.repeat(nodes, nloc).ravel())
cols = np.append(cols, np.tile(nodes, nloc).ravel())
massloc = tools.barycentric.tensor(d=self.mesh.dimension-1, k=2)
mat = np.append(mat, np.einsum('n,kl->nkl', dS, massloc).ravel())
return sparse.coo_matrix((mat, (rows, cols)), shape=(nnodes, nnodes)).tocsr()
def computeMatrixTransportUpwindAlg(self, data):
A = self.computeMatrixTransportCellWise(data, type='centered')
return tools.checkmmatrix.diffusionForMMatrix(A)
def computeMatrixTransportUpwindSides(self, data):
nnodes, nfaces, ncells, dim, dV = self.mesh.nnodes, self.mesh.nfaces, self.mesh.ncells, self.mesh.dimension, self.mesh.dV
normalsS, cof, simp = self.mesh.normals, self.mesh.cellsOfFaces, self.mesh.simplices
dbS = linalg.norm(normalsS, axis=1)*data.betart/dim/(dim+1)
innerfaces = self.mesh.innerfaces
infaces = np.arange(nfaces)[innerfaces]
ci0 = self.mesh.cellsOfInteriorFaces[:, 0]
ci1 = self.mesh.cellsOfInteriorFaces[:, 1]
rows0 = np.repeat(simp[ci0],dim).ravel()
rows1 = np.repeat(simp[ci1],dim).ravel()
cols = np.tile(self.mesh.faces[infaces], dim + 1).ravel()
matloc = np.ones(shape=(dim,dim+1))
mat = np.einsum('n,kl->nkl', dbS[infaces], matloc).ravel()
A = sparse.coo_matrix((mat, (rows1, cols)), shape=(nnodes, nnodes))
A -= sparse.coo_matrix((mat, (rows0, cols)), shape=(nnodes, nnodes))
faces = self.mesh.bdryFaces()
ci0 = self.mesh.cellsOfFaces[faces, 0]
rows0 = np.repeat(simp[ci0],dim).ravel()
cols = np.tile(self.mesh.faces[infaces], dim + 1).ravel()
mat = np.einsum('n,kl->nkl', dbS[faces], matloc).ravel()
A -= sparse.coo_matrix((mat, (rows0,cols)), shape=(nnodes, nnodes))
A -= self.computeBdryMassMatrix(coeff=np.minimum(data.betart, 0), lumped=True)
B = self.computeMatrixTransportCellWise(data, type='centered')
A = A.tocsr()
B = B.tocsr()
# if not np.allclose(A.A,B.A):
# raise ValueError(f"{A.diagonal()=}\n{B.diagonal()=}\n{A.todense()=}\n{B.todense()=}")
return A.tocsr()
def computeMatrixTransportUpwind(self, data, method):
if method=='upwsides': return self.computeMatrixTransportUpwindSides(data)
self.masslumped = self.computeMassMatrix(coeff=1, lumped=True)
beta, mus, cells, deltas = data.beta, data.md.mus, data.md.cells, data.md.deltas
nnodes, simp= self.mesh.nnodes, self.mesh.simplices
m = data.md.mask()
if hasattr(data.md,'cells2'):
m2 = data.md.mask2()
m = data.md.maskonly1()
print(f"{nnodes=} {np.sum(data.md.mask())=} {np.sum(m2)=} {np.sum(m)=}")
ml = self.masslumped.diagonal()[m]/deltas[m]
rows = np.arange(nnodes)[m]
A = sparse.coo_matrix((ml,(rows,rows)), shape=(nnodes, nnodes))
mat = mus[m]*ml[:,np.newaxis]
rows = rows.repeat(simp.shape[1])
cols = simp[cells[m]]
A -= sparse.coo_matrix((mat.ravel(), (rows.ravel(), cols.ravel())), shape=(nnodes, nnodes))
if hasattr(data.md,'cells2'):
cells2 = data.md.cells2
delta1 = data.md.deltas[m2]
delta2 = data.md.deltas2[m2]
mus2 = data.md.mus2
c0 = (1+delta1/(delta1+delta2))/delta1
c1 = -(1+delta1/delta2)/delta1
c2 = -c0-c1
ml = self.masslumped.diagonal()[m2]
rows = np.arange(nnodes)[m2]
A += sparse.coo_matrix((c0*ml,(rows,rows)), shape=(nnodes, nnodes))
mat = mus[m2]*ml[:,np.newaxis]*c1[:,np.newaxis]
rows1 = rows.repeat(simp.shape[1])
cols = simp[cells[m2]]
A += sparse.coo_matrix((mat.ravel(), (rows1.ravel(), cols.ravel())), shape=(nnodes, nnodes))
mat = mus2[m2] * ml[:, np.newaxis] * c2[:, np.newaxis]
rows2 = rows.repeat(simp.shape[1])
cols = simp[cells2[m2]]
A += sparse.coo_matrix((mat.ravel(), (rows2.ravel(), cols.ravel())), shape=(nnodes, nnodes))
A += self.computeBdryMassMatrix(coeff=-np.minimum(data.betart, 0), lumped=True)
# A = checkmmatrix.makeMMatrix(A)
w1, w2 = tools.checkmmatrix.checkMmatrix(A)
print(f"A {w1=}\n{w2=}")
return A.tocsr()
def computeMatrixTransportSupg(self, data, method):
return self.computeMatrixTransportCellWise(data, type='supg')
def computeMatrixTransportLps(self, data):
A = self.computeMatrixTransportCellWise(data, type='centered')
A += self.computeMatrixLps(data.betart)
return A
def computeMatrixTransportCellWise(self, data, type):
nnodes, ncells, nfaces, dim = self.mesh.nnodes, self.mesh.ncells, self.mesh.nfaces, self.mesh.dimension
if type=='centered':
beta, mus = data.betacell, np.full(dim+1,1.0/(dim+1))
mat = np.einsum('n,njk,nk,i -> nij', self.mesh.dV, self.cellgrads[:,:,:dim], beta, mus)
A = sparse.coo_matrix((mat.ravel(), (self.rows, self.cols)), shape=(nnodes, nnodes)).tocsr()
elif type=='supg':
beta, mus = data.betacell, data.md.mus
mat = np.einsum('n,njk,nk,ni -> nij', self.mesh.dV, self.cellgrads[:,:,:dim], beta, mus)
A = sparse.coo_matrix((mat.ravel(), (self.rows, self.cols)), shape=(nnodes, nnodes)).tocsr()
else: raise ValueError(f"unknown type {type=}")
A -= self.computeBdryMassMatrix(coeff=np.minimum(data.betart, 0), lumped=True)
return A
def computeMassMatrixSupg(self, xd, data, coeff=1):
dim, dV, nnodes, xK = self.mesh.dimension, self.mesh.dV, self.mesh.nnodes, self.mesh.pointsc
massloc = tools.barycentric.tensor(d=dim, k=2)
mass = np.einsum('n,ij->nij', coeff*dV, massloc)
massloc = tools.barycentric.tensor(d=dim, k=1)
# marche si xd = xK + delta*betaC
# mass += np.einsum('n,nik,nk,j -> nij', coeff*delta*dV, self.cellgrads[:,:,:dim], betaC, massloc)
mass += np.einsum('n,nik,nk,j -> nij', coeff*dV, self.cellgrads[:,:,:dim], xd[:,:dim]-xK[:,:dim], massloc)
return sparse.coo_matrix((mass.ravel(), (self.rows, self.cols)), shape=(nnodes, nnodes)).tocsr()
# dotmat
def formDiffusion(self, du, u, coeff):
graduh = np.einsum('nij,ni->nj', self.cellgrads, u[self.mesh.simplices])
graduh = np.einsum('ni,n->ni', graduh, self.mesh.dV*coeff)
# du += np.einsum('nj,nij->ni', graduh, self.cellgrads)
raise ValueError(f"graduh {graduh.shape} {du.shape}")
return du
def massDotCell(self, b, f, coeff=1):
assert f.shape[0] == self.mesh.ncells
dimension, simplices, dV = self.mesh.dimension, self.mesh.simplices, self.mesh.dV
massloc = 1/(dimension+1)
np.add.at(b, simplices, (massloc*coeff*dV*f)[:, np.newaxis])
return b
def massDot(self, b, f, coeff=1):
dim, simplices, dV = self.mesh.dimension, self.mesh.simplices, self.mesh.dV
massloc = tools.barycentric.tensor(d=dim, k=2)
r = np.einsum('n,kl,nl->nk', coeff * dV, massloc, f[simplices])
np.add.at(b, simplices, r)
return b
def massDotSupg(self, b, f, data, coeff=1):
if self.params_str['convmethod'][:4] != 'supg': return
dim, simplices, dV = self.mesh.dimension, self.mesh.simplices, self.mesh.dV
r = np.einsum('n,nk,n->nk', coeff*dV, data.md.mus-1/(dim+1), f[simplices].mean(axis=1))
np.add.at(b, simplices, r)
return b
def massDotBoundary(self, b, f, colors=None, coeff=1, lumped=None):
if lumped is None: lumped=self.params_bool['masslumpedbdry']
if colors is None: colors = self.mesh.bdrylabels.keys()
for color in colors:
faces = self.mesh.bdrylabels[color]
normalsS = self.mesh.normals[faces]
dS = linalg.norm(normalsS, axis=1)
nodes = self.mesh.faces[faces]
if isinstance(coeff, (int,float)): dS *= coeff
elif isinstance(coeff, dict): dS *= coeff[color]
else:
assert coeff.shape[0]==self.mesh.nfaces
dS *= coeff[faces]
# print(f"{scalemass=}")
if lumped:
np.add.at(b, nodes, f[nodes]*dS[:,np.newaxis]/self.mesh.dimension)
else:
massloc = tools.barycentric.tensor(d=self.mesh.dimension-1, k=2)
r = np.einsum('n,kl,nl->nk', dS, massloc, f[nodes])
np.add.at(b, nodes, r)
return b
# rhs
def computeRhsMass(self, b, rhs, mass):
if rhs is None: return b
x, y, z = self.mesh.points.T
b += mass * rhs(x, y, z)
return b
def computeRhsCell(self, b, rhscell):
if rhscell is None: return b
if isinstance(rhscell,dict):
assert set(rhscell.keys())==set(self.mesh.cellsoflabel.keys())
scale = 1 / (self.mesh.dimension + 1)
for label, fct in rhscell.items():
if fct is None: continue
cells = self.mesh.cellsoflabel[label]
xc, yc, zc = self.mesh.pointsc[cells].T
bC = scale * fct(xc, yc, zc) * self.mesh.dV[cells]
# print("bC", bC)
np.add.at(b, self.mesh.simplices[cells].T, bC)
else:
fp1 = self.interpolateCell(rhscell)
self.massDotCell(b, fp1, coeff=1)
return b
def computeRhsPoint(self, b, rhspoint):
if rhspoint is None: return b
for label, fct in rhspoint.items():
if fct is None: continue
points = self.mesh.verticesoflabel[label]
xc, yc, zc = self.mesh.points[points].T
# print("xc, yc, zc, f", xc, yc, zc, fct(xc, yc, zc))
b[points] += fct(xc, yc, zc)
return b
def computeRhsBoundary(self, b, bdryfct, colors):
normals = self.mesh.normals
scale = 1 / self.mesh.dimension
for color in colors:
faces = self.mesh.bdrylabels[color]
if not color in bdryfct or bdryfct[color] is None: continue
normalsS = normals[faces]
dS = linalg.norm(normalsS,axis=1)
normalsS = normalsS/dS[:,np.newaxis]
assert(dS.shape[0] == len(faces))
xf, yf, zf = self.mesh.pointsf[faces].T
nx, ny, nz = normalsS.T
bS = scale * bdryfct[color](xf, yf, zf, nx, ny, nz) * dS
np.add.at(b, self.mesh.faces[faces].T, bS)
return b
def computeRhsBoundaryMass(self, b, bdrycond, types, mass):
normals = self.mesh.normals
help = np.zeros(self.mesh.nnodes)
for color, faces in self.mesh.bdrylabels.items():
if bdrycond.type[color] not in types: continue
if not color in bdrycond.fct or bdrycond.fct[color] is None: continue
normalsS = normals[faces]
dS = linalg.norm(normalsS,axis=1)
normalsS = normalsS/dS[:,np.newaxis]
nx, ny, nz = normalsS.T
assert(dS.shape[0] == len(faces))
nodes = np.unique(self.mesh.faces[faces].reshape(-1))
x, y, z = self.mesh.points[nodes].T
# constant normal !!
nx, ny, nz = np.mean(normalsS, axis=0)
help[nodes] = bdrycond.fct[color](x, y, z, nx, ny, nz)
# print("help", help)
b += mass*help
return b
# postprocess
def computeErrorL2Cell(self, solexact, uh):
xc, yc, zc = self.mesh.pointsc.T
ec = solexact(xc, yc, zc) - np.mean(uh[self.mesh.simplices], axis=1)
return np.sqrt(np.sum(ec**2* self.mesh.dV)), ec
def computeErrorL2(self, solexact, uh):
x, y, z = self.mesh.points.T
en = solexact(x, y, z) - uh
Men = np.zeros_like(en)
return np.sqrt( np.dot(en, self.massDot(Men,en)) ), en
def computeErrorFluxL2(self, solexact, uh, diffcell=None):
xc, yc, zc = self.mesh.pointsc.T
graduh = np.einsum('nij,ni->nj', self.cellgrads, uh[self.mesh.simplices])
errv = 0
for i in range(self.mesh.dimension):
solxi = solexact.d(i, xc, yc, zc)
if diffcell is None: errv +=
|
np.sum((solxi - graduh[:, i]) ** 2 * self.mesh.dV)
|
numpy.sum
|
import time
import warnings
from copy import deepcopy
from typing import Optional, List, Tuple, Dict
import numpy as np
import pandas as pd
from ConfigSpace import Configuration
from IPython.display import JSON
from sklearn.pipeline import Pipeline
from xautoml._helper import XAutoMLManager
from xautoml.config_similarity import ConfigSimilarity
from xautoml.ensemble import EnsembleInspection
from xautoml.graph_similarity import pipeline_to_networkx, GraphMatching, export_json
from xautoml.hp_importance import HPImportance
from xautoml.model_details import ModelDetails, DecisionTreeResult, LimeResult, GlobalSurrogateResult
from xautoml.models import RunHistory, CandidateId, CandidateStructure
from xautoml.output import DESCRIPTION, OutputCalculator, COMPLETE
from xautoml.roc_auc import RocCurve
from xautoml.util import pipeline_utils
from xautoml.util.constants import SINK
from xautoml.util.datasets import down_sample
def as_json(func):
def wrapper(*args, **kwargs):
return JSON(func(*args, **kwargs))
return wrapper
def no_warnings(func):
def wrapper(*args, **kwargs):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
return func(*args, **kwargs)
return wrapper
class XAutoML:
def __init__(self, run_history: RunHistory, X: pd.DataFrame, y: pd.Series, n_samples: int = 5000):
"""
Main class for visualizing AutoML optimization procedures in XAutoML. This class provides methods to render
the visualization, provides endpoints for internal communication, and for exporting data to Jupyter.
:param run_history: A RunHistory instance containing the raw data of an optimization. Can be created via the
provided adapters
:param X: DataFrame containing the test data set. Used for all calculations
:param y: Series containing the test data set. Used for all calculations
:param n_samples: Maximum number of samples in the test data set. Due to the interactive nature of XAutoML,
calculations have to be quite fast. By default, the number of samples is limited to 5000
"""
self.run_history = run_history
if X.shape[0] > n_samples:
warnings.warn(
'The data set exceeds the maximum number of samples with {}. Selecting {} random samples...'.format(
X.shape, n_samples)
)
X, y = down_sample(X, y, n_samples)
self.X: pd.DataFrame = X.reset_index(drop=True)
self.y: pd.Series = y.reset_index(drop=True)
self._calc_pred_times()
XAutoMLManager.open(self)
# Helper Methods
@no_warnings
def _calc_pred_times(self):
for candidate in self.run_history.cid_to_candidate.values():
if 'prediction_time' in candidate.runtime:
continue
try:
start = time.time()
candidate.model.predict(self.X)
end = time.time()
candidate.runtime['prediction_time'] = end - start
except Exception:
candidate.runtime['prediction_time'] = 1000
def _load_models(self, cids: List[CandidateId]) -> Tuple[pd.DataFrame, pd.Series, List[Pipeline]]:
models = []
for cid in cids:
if cid == 'ENSEMBLE':
models.append(deepcopy(self.run_history.ensemble.candidate.model))
else:
models.append(deepcopy(self.run_history.cid_to_candidate[cid].model))
return self.X.copy(), self.y.copy(), [m for m in models if m is not None]
def _load_model(self, cid: CandidateId) -> Tuple[pd.DataFrame, pd.Series, Pipeline]:
X, y, models = self._load_models([cid])
if len(models) == 0:
raise ValueError('Candidate {} does not exist or has no fitted model'.format(cid))
pipeline = models[0]
return X, y, pipeline
@staticmethod
def _get_intermediate_output(X, y, model, method):
df_handler = OutputCalculator()
_, outputs = df_handler.calculate_outputs(model, X, y, method=method)
return outputs
def _calculate_output(self, cid: CandidateId, method: str):
X, y, pipeline = self._load_model(cid)
steps = self._get_intermediate_output(X, y, pipeline, method=method)
return steps
def _get_equivalent_configs(self,
structure: Optional[CandidateStructure],
timestamp: float = np.inf) -> Tuple[List[Configuration], np.ndarray]:
configs = []
loss = []
hash_ = structure.hash if structure is not None else hash(str(None))
# join equivalent structures
for s in self.run_history.structures:
if s.hash == hash_:
configs += [c.config for c in s.configs if c.runtime['timestamp'] < timestamp]
loss += [c.loss for c in s.configs if c.runtime['timestamp'] < timestamp]
return configs, np.array(loss)
def _construct_fanova(self, sid: Optional[str]):
if sid is not None:
matches = filter(lambda s: s.cid == sid, self.run_history.structures)
structure = next(matches)
else:
structure = None
actual_cs = None
configs, loss = self._get_equivalent_configs(structure)
if structure is not None and structure.configspace is not None:
cs = structure.configspace
else:
cs = self.run_history.default_configspace
try:
# noinspection PyUnresolvedReferences
actual_cs = structure.pipeline.configuration_space
except AttributeError:
pass
f, X = HPImportance.construct_fanova(cs, configs, loss)
if X.shape[0] < 2:
raise ValueError('Not enough evaluated configurations to calculate hyperparameter importance.')
return f, X, actual_cs
# Endpoints for internal communication
@as_json
def _output_description(self, cid: CandidateId):
with pd.option_context('display.max_columns', 30, 'display.max_rows', 10):
return self._calculate_output(cid, DESCRIPTION)
@as_json
def _output_complete(self, cid: CandidateId):
with pd.option_context('display.max_columns', 1024, 'display.max_rows', 30, 'display.min_rows', 20):
return self._calculate_output(cid, COMPLETE)
@as_json
def _performance_data(self, cid: CandidateId):
X, y, pipeline = self._load_model(cid)
details = ModelDetails()
duration, val_score, report, accuracy, cm = details.calculate_performance_data(X, y, pipeline,
self.run_history.meta.metric)
return {
'duration': duration,
'val_score': float(val_score),
'report': {
|
np.asscalar(key)
|
numpy.asscalar
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
from matplotlib import pyplot as plt
from scipy import ndimage
from scipy.signal import find_peaks
#network parameters
N = 1000
periodic = True
m = 1; #CV = 1/sqrt(m)
x_prefs = np.arange(1,N+1)/N; #inherited location preferences (m)
spiking= False
#FF input
beta_vel = 1.5; #velocity gain
beta_0 = 70; #uniform input
alpha = 1000; #weight imbalance parameter
gamma = 1.05/100; #Cennter Surround weight params
beta = 1/100; #Cennter Surround weight params
#temporal parameters
eta = 10**(-6)
T = 10; #length of integration time blocks (s)
dt = 1/2000; #step size of numerical integration (s)
#tau_s = np.concatenate((np.linspace(1,2,N)*1000/30,np.linspace(1,2,N)*1000/30),axis=0); #synaptic time constant (s)
#tau_s = np.expand_dims(tau_s,axis=1)
def period_calc(x,sigma):
x_g1d = ndimage.gaussian_filter1d(x, sigma)
peaks,_ = find_peaks(x_g1d)
if(len(peaks)==0):
return 0,0
diifs = np.zeros(len(peaks)-1)
for i in range(0,len(diifs)):
diifs[i] = peaks[i+1]-peaks[i]
return np.mean(diifs),x_g1d
tau_s = 1000/30;
defects = np.random.randint(1,999,int(0.1*N))
#gradient of velocity feedforward input (applied on beta_vel)
#grad = np.linspace(.1,10,N)
grad = np.linspace(1,1,N)
#Graphing parameters
bins = np.linspace(0+.01,1-.01,50);
# Trajectory Data (Sinusoidal)
x = (np.sin(np.linspace(dt,T,int(T/dt))*2*np.pi/10)+1)/2;
v= np.zeros((int(T/dt)));
for i in range(0,int(T/dt)):
v[i] = (x[i]-x[i-1])/dt;
v = -np.ones(int(T/dt))*0.2
z = np.linspace(-N/2,N/2-1,N);
z = np.expand_dims(z,1);
# Feed forward network input
if (periodic == 0):
# gaussian FF input for aperiodic network
envelope = np.exp(-4*(z/(800))**2);
else:
envelope = np.ones((N,1));
s_prev = np.zeros((2*N,1)); #Population activitye
spk = np.zeros((2*N,int(T/dt))); #Total spiking
spk_count = np.zeros((2*N,1)); #Current spiking
# Weight setup
w_RR = np.zeros((N,N));
w_LL = np.zeros((N,N));
w_RL = np.zeros((N,N));
w_LR = np.zeros((N,N));
W_RR = np.zeros((N,N));
W_LL = np.zeros((N,N));
W_RL = np.zeros((N,N));
W_LR =
|
np.zeros((N,N))
|
numpy.zeros
|
import cartopy.crs as ccrs
from shapely.geometry import MultiPoint
import numpy as np
import matplotlib.pyplot as plt
import geopandas as gpd
from zipfile import ZipFile
import tempfile
import shapely
import rasterio.transform
import rasterio.features
import scipy.ndimage
import requests
from io import BytesIO
import aljpy
from . import webcat
import json
from pathlib import Path
locations = Path('locations.json')
if locations.exists():
LOCATIONS = json.loads(locations.read_text())
else:
LOCATIONS = {}
def as_indices(coords, img, extent, **kwargs):
"""Coords should be (lat, lon)"""
h, w = img.shape[:2]
x1, x2, y1, y2 = extent
proj = np.array(ccrs.Mercator.GOOGLE.project_geometry(MultiPoint(coords[..., ::-1])))
# Measured from bottom cause the origin's always 'lower'
j = (w*(proj[:, 0] - x1)/(x2 - x1))
i = (h*(proj[:, 1] - y1)/(y2 - y1))
indices = np.stack([i, j], -1).astype(int)
return indices
def lookup(listings, mapdata, interval=5):
coords = np.stack([listings['latitude'], listings['longitude']], -1)
indices = as_indices(coords, **mapdata)
h, w = mapdata['img'].shape[:2]
b = mapdata['img'][indices[:, 0].clip(0, h-1), indices[:, 1].clip(0, w-1)]
b[(indices[:, 0] < 0) | (indices[:, 0] >= h)] = np.nan
b[(indices[:, 1] < 0) | (indices[:, 1] >= w)] = np.nan
return b
def transform(mapdata):
shape = mapdata['img'].shape[:2]
w, e, s, n = mapdata['extent']
t = rasterio.transform.from_bounds(w, s, e, n, *shape)
return t, shape
def distances(base, shp):
t, shape = transform(base)
# Flip it because rasterio expects a top origin
img = rasterio.features.rasterize([shp], out_shape=shape, transform=t)[::-1]
# Hand-calculated this scale. Should calculate it explicitly really.
dist = 22*scipy.ndimage.distance_transform_edt(1 - img)
time = dist/(60*1.5)
return {'img': time, 'extent': base['extent'], 'origin': base['origin']}
@aljpy.autocache('')
def green_spaces(base, width=250):
"""From: https://geospatialwandering.wordpress.com/2015/05/22/open-spaces-shapefile-for-london """
url = 'http://download1648.mediafire.com/uagkonyt1k3g/uvvwp9hjiatqyss/Green+spaces+London.zip'
r = requests.get(url)
with ZipFile(BytesIO(r.content)) as zf, \
tempfile.TemporaryDirectory() as tmp:
zf.extractall(tmp)
shp = gpd.read_file(tmp + '/Green spaces London/Green_spaces_excluding_private.shp')
shp = shp.geometry[shp.geometry.area > width**2]
shp = shapely.ops.unary_union(shp.to_crs(ccrs.Mercator.GOOGLE.proj4_params))
return distances(base, shp)
@aljpy.autocache()
def _town_centers():
url = "https://data.london.gov.uk/download/town-centre-locations/50e12a40-90c4-4a46-af20-9891d1441a5c/LP_2016_town_centre_points.zip"
r = requests.get(url)
with ZipFile(BytesIO(r.content)) as zf, \
tempfile.TemporaryDirectory() as tmp:
zf.extractall(tmp)
shp = gpd.read_file(tmp + '/LP_2016_town_centre_points.shp')
return shp
@aljpy.autocache('')
def town_centers(base):
shp = _town_centers()
shp = shp[shp['Classifi_1'].isin(['International', 'Metropolitan', 'Major', 'District'])]
shp = shp.geometry.to_crs(ccrs.Mercator.GOOGLE.proj4_params)
shp = shapely.ops.unary_union(shp)
return distances(base, shp)
def reproject(ref, *mapdata):
dst_transform, dst_shape = transform(ref)
crs = ccrs.Mercator.GOOGLE.proj4_params
dsts = []
for m in mapdata:
src_transform, _ = transform(m)
dst = np.zeros(dst_shape)
rasterio.warp.reproject(m['img'], dst,
src_transform=src_transform, dst_transform=dst_transform,
src_crs=crs, dst_crs=crs)
dsts.append(dst)
return
|
np.stack(dsts)
|
numpy.stack
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A library to evaluate Inception on a single GPU.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from PIL import Image
from inception.slim import slim
import numpy as np
import tensorflow as tf
import math
import os.path
import scipy.misc
# import time
# import scipy.io as sio
# from datetime import datetime
import sys
if sys.version_info[0] == 2:
import cPickle as pickle
else:
import pickle
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('checkpoint_dir',
'../inception_finetuned_models/birds_valid299/model.ckpt',
"""Path where to read model checkpoints.""")
tf.app.flags.DEFINE_string('image_folder', '', """Path where to load the images """)
tf.app.flags.DEFINE_integer('num_classes', 50, # 20 for flowers
"""Number of classes """)
tf.app.flags.DEFINE_integer('splits', 10,
"""Number of splits """)
tf.app.flags.DEFINE_integer('batch_size', 64, "batch size")
tf.app.flags.DEFINE_integer('gpu', 0, "The ID of GPU to use")
# Batch normalization. Constant governing the exponential moving average of
# the 'global' mean and variance for all activations.
BATCHNORM_MOVING_AVERAGE_DECAY = 0.9997
# The decay to use for the moving average.
MOVING_AVERAGE_DECAY = 0.9999
fullpath = FLAGS.image_folder
print(fullpath)
def preprocess(img):
# print('img', img.shape, img.max(), img.min())
# img = Image.fromarray(img, 'RGB')
if len(img.shape) == 2:
img = np.resize(img, (img.shape[0], img.shape[1], 3))
img = scipy.misc.imresize(img, (299, 299, 3), interp='bilinear')
img = img.astype(np.float32)
# [0, 255] --> [0, 2] --> [-1, 1]
img = img / 127.5 - 1.
# print('img', img.shape, img.max(), img.min())
return np.expand_dims(img, 0)
def get_inception_score(sess, images, pred_op):
splits = FLAGS.splits
# assert(type(images) == list)
assert(type(images[0]) == np.ndarray)
assert(len(images[0].shape) == 3)
assert(np.max(images[0]) > 10)
assert(np.min(images[0]) >= 0.0)
bs = FLAGS.batch_size
preds = []
num_examples = len(images)
n_batches = int(math.floor(float(num_examples) / float(bs)))
indices = list(np.arange(num_examples))
np.random.shuffle(indices)
for i in range(n_batches):
inp = []
# print('i*bs', i*bs)
for j in range(bs):
if (i*bs + j) == num_examples:
break
img = images[indices[i*bs + j]]
# print('*****', img.shape)
img = preprocess(img)
inp.append(img)
# print("%d of %d batches" % (i, n_batches))
# inp = inps[(i * bs):min((i + 1) * bs, len(inps))]
inp = np.concatenate(inp, 0)
# print('inp', inp.shape)
pred = sess.run(pred_op, {'inputs:0': inp})
preds.append(pred)
# if i % 100 == 0:
# print('Batch ', i)
# print('inp', inp.shape, inp.max(), inp.min())
preds = np.concatenate(preds, 0)
scores = []
for i in range(splits):
istart = i * preds.shape[0] // splits
iend = (i + 1) * preds.shape[0] // splits
part = preds[istart:iend, :]
kl = (part * (np.log(part) -
np.log(np.expand_dims(np.mean(part, 0), 0))))
kl = np.mean(
|
np.sum(kl, 1)
|
numpy.sum
|
import keras
from keras.models import Model
from keras.layers import Input, Dense, Activation, Dropout
from keras.layers import Embedding
from keras.layers import Flatten
from keras.layers import LeakyReLU
from keras.layers import Multiply, Add
from keras.layers import Concatenate
from keras.layers import Lambda
from keras import backend
from keras import optimizers
from keras import regularizers
from keras.utils import plot_model
import matplotlib.pyplot as plt
import pandas as pd
import copy
import os
from datetime import datetime
class KerasBase:
def __init__(self):
return
@staticmethod
def make_output_dir(base_dir_name=os.path.join('.','result'), dir_name='result', with_datetime=True):
'''
make output directory
return output direcotry path
'''
dir_path = dir_name
if with_datetime:
dir_path = dir_path + '_' + datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
dir_path = os.path.join(base_dir_name, dir_path)
#
os.makedirs(dir_path, exist_ok=True)
return dir_path
@staticmethod
def model_visualize(model, save_filename, show_shapes=True, show_layer_names=True):
# https://keras.io/ja/visualization/
plot_model(model, to_file=save_filename, show_shapes=show_shapes, show_layer_names=show_layer_names)
return
@staticmethod
def model_summary(model, save_filename=None, print_console=True):
'''
save model summary to *.txt.
print model summary to console.
'''
# save model to txt
if save_filename is not None:
with open(save_filename, "w") as fp:
model.summary(print_fn=lambda x: fp.write(x + "\n"))
#
if print_console:
model.summary()
return
@staticmethod
def save_learning_history_acc_loss(histroy, val=True, filename=None, show=False):
'''
history : return of model.fit()
'''
fig = plt.figure()
#
epochs = range(len(histroy.history['acc']))
# acc
ax_acc = fig.add_subplot(2, 1, 1)
ax_acc.set_title('accuracy')
# train
label = 'acc'
ax_acc.plot(epochs, histroy.history[label], label=label)
if val:
# validation
label = 'val_acc'
ax_acc.plot(epochs, histroy.history[label], label=label)
ax_acc.legend()
# loss
ax_loss = fig.add_subplot(2, 2, 1)
ax_loss.set_title('loss')
# train
label = 'loss'
ax_loss.plot(epochs, histroy.history[label], label=label)
if val:
# validation
label = 'val_loss'
ax_loss.plot(epochs, histroy.history[label], label=label)
ax_loss.legend()
# save figure
if filename is not None:
fig.savefig(filename)
# show
if show:
fig.show()
return
@staticmethod
def save_learning_history_loss(histroy, val=True, filename=None, show=False):
'''
history : return of model.fit()
'''
fig = plt.figure()
#
epochs = range(len(histroy.history['loss']))
# loss
ax_loss = fig.add_subplot(1, 1, 1)
ax_loss.set_title('loss')
# train
label = 'loss'
ax_loss.plot(epochs, histroy.history[label], label=label)
if val:
# validation
label = 'val_loss'
ax_loss.plot(epochs, histroy.history[label], label=label)
ax_loss.legend()
# save figure
if filename is not None:
fig.savefig(filename)
# show
if show:
fig.show()
return
@staticmethod
def get_weights(model, layer_name=None):
if layer_name is None:
return model.get_weights()
else:
lyr = model.get_layer(name=layer_name)
return lyr.get_weights()
@staticmethod
def activation(act='relu'):
if act == 'relu':
return Activation('relu')
elif act == 'lrelu':
return LeakyReLU()
elif act == 'linear':
return Activation('linear')
else:
return Activation(act)
return
@staticmethod
def sum_layer(name=None):
def func_sum(x_):
return keras.backend.sum(x_, axis=-1, keepdims=True)
return Lambda(func_sum, output_shape=(1,), name=name)
@staticmethod
def mean_layer(name=None):
def func_mean(x_):
return keras.backend.mean(x_, axis=1, keepdims=True)
return Lambda(func_mean, output_shape=(1,), name=name)
class DeepMatrixFactorization:
def __init__(self, unique_user_num, unique_item_num, all_rating_mean=0, rating_scale=1):
self.unique_user_num = unique_user_num
self.unique_item_num = unique_item_num
self.all_rating_mean = all_rating_mean
self.rating_scale = rating_scale
self.model = None
self.history = None
self.__count_call_sum_model = 0
self.__count_call_mean_model = 0
return
#make model
def make_model_mf(self, user_bias=True, item_bias=True, cross_term=True, latent_num=10, cross_term_l2=0):
'''
make normal matrix factorization model with keras.
rating = all_mean + user_bias + item_bias + cross_term
'''
input_user_id = Input(shape=(1,), name='user_id')
input_item_id = Input(shape=(1,), name='item_id')
#user bias
u_bias = None
if user_bias:
u_bias = self.__bias_term(input_id=input_user_id, unique_id_num=self.unique_user_num, l2=0, latent_layer_name='user_bias')
#item bias
i_bias = None
if item_bias:
i_bias = self.__bias_term(input_id=input_item_id, unique_id_num=self.unique_item_num, l2=0, latent_layer_name='item_bias')
#cross term
crs_trm = None
if cross_term:
crs_u = self.__single_term(input_id=input_user_id, unique_id_num=self.unique_user_num, output_dim=latent_num, l2=cross_term_l2, latent_layer_name='user_latent')
crs_i = self.__single_term(input_id=input_item_id, unique_id_num=self.unique_item_num, output_dim=latent_num, l2=cross_term_l2, latent_layer_name='item_latent')
crs_trm = self.__cross_term(crs_u, crs_i, merge='sum')
#concatenate
def append_isNotNone(lst, v):
tls = copy.copy(lst)
if v is not None:
tls.append(v)
return tls
concats = []
concats = append_isNotNone(concats, u_bias)
concats = append_isNotNone(concats, i_bias)
concats = append_isNotNone(concats, crs_trm)
if len(concats) > 1:
y = Add(name='add_bias_crossTerm')(concats)
else:
y = concats[0]
# add mean
y = Lambda(lambda x: x*self.rating_scale + self.all_rating_mean, name='scaling')(y)
self.model = Model(inputs=[input_user_id, input_item_id], outputs=y)
return
def make_model_dmf_deepLatent(self, user_bias=True, item_bias=True, cross_term=True, latent_num=10, cross_term_l2=0, hidden_nodes_latent=[10], hidden_l2=[0], hidden_dropout_rates=[]):
'''
make normal matrix factorization model with keras.
rating = all_mean + user_bias + item_bias + cross_term
'''
input_user_id = Input(shape=(1,), name='user_id')
input_item_id = Input(shape=(1,), name='item_id')
#user bias
u_bias = None
if user_bias:
u_bias = self.__bias_term(input_id=input_user_id, unique_id_num=self.unique_user_num, l2=0, latent_layer_name='user_bias')
#item bias
i_bias = None
if item_bias:
i_bias = self.__bias_term(input_id=input_item_id, unique_id_num=self.unique_item_num, l2=0, latent_layer_name='item_bias')
#cross term
crs_trm = None
if cross_term:
crs_u = self.__single_term(input_id=input_user_id, unique_id_num=self.unique_user_num, output_dim=latent_num, l2=cross_term_l2, hidden_nodes=hidden_nodes_latent, hidden_l2s=hidden_l2, hidden_dropout_rates=hidden_dropout_rates, latent_layer_name='user_latent')
crs_i = self.__single_term(input_id=input_item_id, unique_id_num=self.unique_item_num, output_dim=latent_num, l2=cross_term_l2, hidden_nodes=hidden_nodes_latent, hidden_l2s=hidden_l2, hidden_dropout_rates=hidden_dropout_rates, latent_layer_name='item_latent')
crs_trm = self.__cross_term(crs_u, crs_i, merge='sum')
#concatenate
def append_isNotNone(lst, v):
tls = copy.copy(lst)
if v is not None:
tls.append(v)
return tls
concats = []
concats = append_isNotNone(concats, u_bias)
concats = append_isNotNone(concats, i_bias)
concats = append_isNotNone(concats, crs_trm)
if len(concats) > 1:
y = Add(name='add_bias_crossTerm')(concats)
else:
y = concats[0]
# add mean
y = Lambda(lambda x: x*self.rating_scale + self.all_rating_mean, name='scaling')(y)
self.model = Model(inputs=[input_user_id, input_item_id], outputs=y)
return
def make_model_dmf_deepCrossterm(self, user_bias=True, item_bias=True, cross_term=True, latent_num=10, cross_term_l2=0, hidden_nodes_crossterm=[10], hidden_l2=[0], hidden_dropout_rates=[]):
'''
make normal matrix factorization model with keras.
rating = all_mean + user_bias + item_bias + cross_term
'''
input_user_id = Input(shape=(1,), name='user_id')
input_item_id = Input(shape=(1,), name='item_id')
#user bias
u_bias = None
if user_bias:
u_bias = self.__bias_term(input_id=input_user_id, unique_id_num=self.unique_user_num, l2=0, latent_layer_name='user_bias')
#item bias
i_bias = None
if item_bias:
i_bias = self.__bias_term(input_id=input_item_id, unique_id_num=self.unique_item_num, l2=0, latent_layer_name='item_bias')
#cross term
crs_trm = None
if cross_term:
crs_u = self.__single_term(input_id=input_user_id, unique_id_num=self.unique_user_num, output_dim=latent_num, l2=cross_term_l2, latent_layer_name='user_latent')
crs_i = self.__single_term(input_id=input_item_id, unique_id_num=self.unique_item_num, output_dim=latent_num, l2=cross_term_l2, latent_layer_name='item_latent')
crs_trm = self.__cross_term(crs_u, crs_i, merge='sum', hidden_nodes=hidden_nodes_crossterm, hidden_l2s=hidden_l2, hidden_dropout_rates=hidden_dropout_rates)
#concatenate
def append_isNotNone(lst, v):
tls = copy.copy(lst)
if v is not None:
tls.append(v)
return tls
concats = []
concats = append_isNotNone(concats, u_bias)
concats = append_isNotNone(concats, i_bias)
concats = append_isNotNone(concats, crs_trm)
if len(concats) > 1:
y = Add(name='add_bias_crossTerm')(concats)
else:
y = concats[0]
# add mean
y = Lambda(lambda x: x*self.rating_scale + self.all_rating_mean, name='scaling')(y)
self.model = Model(inputs=[input_user_id, input_item_id], outputs=y)
return
def make_model_dmf_deepLatent_deepCrossterm(self, user_bias=True, item_bias=True, cross_term=True, latent_num=10, cross_term_l2=0, hidden_nodes_latent=[10], hidden_nodes_crossterm=[10], hidden_l2=[0], hidden_dropout_rates=[]):
'''
make normal matrix factorization model with keras.
rating = all_mean + user_bias + item_bias + cross_term
'''
input_user_id = Input(shape=(1,), name='user_id')
input_item_id = Input(shape=(1,), name='item_id')
#user bias
u_bias = None
if user_bias:
u_bias = self.__bias_term(input_id=input_user_id, unique_id_num=self.unique_user_num, l2=0, latent_layer_name='user_bias')
#item bias
i_bias = None
if item_bias:
i_bias = self.__bias_term(input_id=input_item_id, unique_id_num=self.unique_item_num, l2=0, latent_layer_name='item_bias')
#cross term
crs_trm = None
if cross_term:
crs_u = self.__single_term(input_id=input_user_id, unique_id_num=self.unique_user_num, output_dim=latent_num, l2=cross_term_l2, hidden_nodes=hidden_nodes_latent, hidden_dropout_rates=hidden_dropout_rates, latent_layer_name='user_latent')
crs_i = self.__single_term(input_id=input_item_id, unique_id_num=self.unique_item_num, output_dim=latent_num, l2=cross_term_l2, hidden_nodes=hidden_nodes_latent, hidden_dropout_rates=hidden_dropout_rates, latent_layer_name='item_latent')
crs_trm = self.__cross_term(crs_u, crs_i, merge='sum', hidden_nodes=hidden_nodes_crossterm, hidden_l2s=hidden_l2, hidden_dropout_rates=hidden_dropout_rates)
#concatenate
def append_isNotNone(lst, v):
tls = copy.copy(lst)
if v is not None:
tls.append(v)
return tls
concats = []
concats = append_isNotNone(concats, u_bias)
concats = append_isNotNone(concats, i_bias)
concats = append_isNotNone(concats, crs_trm)
if len(concats) > 1:
y = Add(name='add_bias_crossTerm')(concats)
else:
y = concats[0]
# add mean
y = Lambda(lambda x: x*self.rating_scale + self.all_rating_mean, name='scaling')(y)
self.model = Model(inputs=[input_user_id, input_item_id], outputs=y)
return
def make_model_dmf_residualDeepCrossterm(self, user_bias=True, item_bias=True, cross_term=True, latent_num=10, cross_term_l2=0, hidden_nodes_crossterm=[10], hidden_l2=[0], hidden_dropout_rates=[]):
'''
make normal matrix factorization model with keras.
rating = all_mean + user_bias + item_bias + cross_term
'''
input_user_id = Input(shape=(1,), name='user_id')
input_item_id = Input(shape=(1,), name='item_id')
#user bias
u_bias = None
if user_bias:
u_bias = self.__bias_term(input_id=input_user_id, unique_id_num=self.unique_user_num, l2=0, latent_layer_name='user_bias')
#item bias
i_bias = None
if item_bias:
i_bias = self.__bias_term(input_id=input_item_id, unique_id_num=self.unique_item_num, l2=0, latent_layer_name='item_bias')
#cross term
crs_trm = None
res_crs_trm = None
if cross_term:
crs_u = self.__single_term(input_id=input_user_id, unique_id_num=self.unique_user_num, output_dim=latent_num, l2=cross_term_l2, latent_layer_name='user_latent')
crs_i = self.__single_term(input_id=input_item_id, unique_id_num=self.unique_item_num, output_dim=latent_num, l2=cross_term_l2, latent_layer_name='item_latent')
res_crs_trm = self.__res_cross_term(crs_u, crs_i, merge='sum', hidden_nodes=hidden_nodes_crossterm, hidden_l2s=hidden_l2, hidden_dropout_rates=hidden_dropout_rates)
#concatenate
def append_isNotNone(lst, v):
tls = copy.copy(lst)
if v is not None:
tls.append(v)
return tls
concats = []
concats = append_isNotNone(concats, u_bias)
concats = append_isNotNone(concats, i_bias)
concats = append_isNotNone(concats, res_crs_trm)
if len(concats) > 1:
y = Add(name='add_bias_crossTerm')(concats)
else:
y = concats[0]
# add mean
y = Lambda(lambda x: x*self.rating_scale + self.all_rating_mean, name='scaling')(y)
self.model = Model(inputs=[input_user_id, input_item_id], outputs=y)
return
#model of bias term
def __single_term(self, input_id, unique_id_num, output_dim=1,
hidden_nodes=[], activation='lrelu', activation_last='linear',
l2=0, hidden_l2s=[],
dropout_rate=0, hidden_dropout_rates=[],
latent_layer_name=None):
'''
input -> embedding -> flatten -> dropout
-> hidden_layer
(-> dense -> activation -> dropout -> ... -> dense -> activation -> dropout
-> dense -> activation_last -> dropout)
-> output
'''
#
hidden_nodes_ = copy.copy(hidden_nodes)
hidden_nodes_.append(output_dim)
#
hl = input_id
#
for ih, h_dim in enumerate(hidden_nodes_):
# first layer
if ih == 0:
# embedding layer
# input_shape = [batch_size, unique_id_num+1]
# output_shape = [batch_size, input_length, output_dim]
hl = Embedding(input_dim=unique_id_num,
output_dim=hidden_nodes_[0],
#input_length=1,
embeddings_regularizer=regularizers.l2(l2),
name=latent_layer_name
)(hl)
# flatten
hl = Flatten()(hl)
#dropout
hl = Dropout(dropout_rate)(hl)
# 2~ layer
else:
l2_h = 0 if len(hidden_l2s)==0 else hidden_l2s[ih-1]
# hidden layer
hl = Dense(h_dim, kernel_regularizer=regularizers.l2(l2_h))(hl)
#activation
act = activation if ih != len(hidden_nodes_)-1 else activation_last
hl = KerasBase.activation(act)(hl)
#dropout
drp_rt = 0 if len(hidden_dropout_rates)==0 else hidden_dropout_rates[ih-1]
hl = Dropout(drp_rt)(hl)
return hl
def __bias_term(self, input_id, unique_id_num, l2=0, latent_layer_name=None):
'''
input -> embedding -> flatten
-> output
'''
bias = self.__single_term(input_id=input_id, unique_id_num=unique_id_num, output_dim=1,
hidden_nodes=[], activation='lrelu', activation_last='linear',
l2=l2, hidden_l2s=[],
dropout_rate=0, hidden_dropout_rates=[],
latent_layer_name=latent_layer_name)
return bias
#model of cross term
def __cross_term(self, input1, input2, merge='sum',
hidden_nodes=[], activation='lrelu', activation_last='lrelu',
hidden_l2s=[],
dropout_rate=0, hidden_dropout_rates=[]):
'''
input1 and input2 must be already embedded.
(input1, input2) -> Multiply -> dropout
-> hidden_layer
(-> dense -> activation -> dropout -> ... -> dense -> activation -> dropout
-> dense -> activation_last -> dropout)
-> merge(ex. sum, mean)
-> output
'''
multiplied = Multiply()([input1, input2])
#hidden layer
hl = multiplied
for ih, h_dim in enumerate(hidden_nodes):
l2_h = 0 if len(hidden_l2s)==0 else hidden_l2s[ih]
# dense
hl = Dense(h_dim, kernel_regularizer=regularizers.l2(l2_h))(hl)
# activation
act = activation if ih != len(hidden_nodes)-1 else activation_last
hl = KerasBase.activation(act)(hl)
# dropout
drp_rt = 0 if len(hidden_dropout_rates)==0 else hidden_dropout_rates[ih]
hl = Dropout(drp_rt)(hl)
#merge layer
if merge=='sum':
self.__count_call_sum_model += 1
crs_trm = KerasBase.sum_layer(name='sum' + str(self.__count_call_sum_model))(hl)
elif merge=='mean':
self.__count_call_mean_model += 1
crs_trm = KerasBase.mean_layer(name='mean' + str(self.__count_call_mean_model))(hl)
return crs_trm
def __res_cross_term(self, input1, input2, merge='sum',
hidden_nodes=[], activation='lrelu', activation_last='lrelu',
hidden_l2s=[],
dropout_rate=0, hidden_dropout_rates=[]):
'''
input1 and input2 must be already embedded.
(input1, input2) -> Multiply -> dropout
-> hidden_layer
(-> dense -> activation -> dropout -> ... -> dense -> activation -> dropout
-> dense -> activation_last -> dropout)
-> merge(ex. sum, mean)
-> output
'''
multiplied = Multiply()([input1, input2])
#hidden layer
hl = multiplied
for ih, h_dim in enumerate(hidden_nodes):
l2_h = 0 if len(hidden_l2s)==0 else hidden_l2s[ih]
# dense
hl = Dense(h_dim, kernel_regularizer=regularizers.l2(l2_h))(hl)
# activation
act = activation if ih != len(hidden_nodes)-1 else activation_last
hl = KerasBase.activation(act)(hl)
# dropout
drp_rt = 0 if len(hidden_dropout_rates)==0 else hidden_dropout_rates[ih]
hl = Dropout(drp_rt)(hl)
#add
hl = Add()([multiplied, hl])
#merge layer
if merge=='sum':
self.__count_call_sum_model += 1
crs_trm = KerasBase.sum_layer(name='sum' + str(self.__count_call_sum_model))(hl)
elif merge=='mean':
self.__count_call_mean_model += 1
crs_trm = KerasBase.mean_layer(name='mean' + str(self.__count_call_mean_model))(hl)
return crs_trm
def compile(self, optimizer='adam', loss='mean_squared_error'):
self.model.compile(optimizer=optimizer, loss=loss)
return
def fit(self, user_ids, item_ids, rating, batch_size, epochs,
user_ids_val=None, item_ids_val=None, rating_val=None):
'''
user_ids and item_ids must be 0, 1, 2, 3, ...
'''
# validation data
val_data = None
if (user_ids_val is not None) and (item_ids_val is not None) and (rating_val is not None):
val_data = ([user_ids_val, item_ids_val], rating_val)
# fit
self.history = self.model.fit(x=[user_ids, item_ids], y=rating,
batch_size=batch_size, epochs=epochs, verbose=1,
validation_data=val_data)
return
def predict(self, user_ids, item_ids):
return self.model.predict([user_ids, item_ids])[:,0]
def save_bias_latent(self, output_dir):
self.__save_Embedding(output_dir=output_dir, layer_name='user_bias')
self.__save_Embedding(output_dir=output_dir, layer_name='item_bias')
self.__save_Embedding(output_dir=output_dir, layer_name='user_latent')
self.__save_Embedding(output_dir=output_dir, layer_name='item_latent')
return
def __save_Embedding(self, output_dir, layer_name):
weights = KerasBase.get_weights(self.model, layer_name=layer_name)[0].copy()
#
id_num = weights.shape[0]
latent_num = weights.shape[1]
#
oup = weights
# id
ids = np.arange(id_num)[:,np.newaxis]
oup = np.concatenate([ids, oup], axis=1)
# header
header = []
header.append('id')
for ilt in range(latent_num):
header.append('latent' + str(ilt))
header = np.array(header)[np.newaxis,:]
oup = np.concatenate([header, oup])
# to pandas
oup = pd.DataFrame(oup)
#save
#np.savetxt(os.path.join(output_dir, layer_name + '.csv'), oup, delimiter=',')
oup.to_csv(os.path.join(output_dir, layer_name + '.csv'), header=False, index=False)
return
import numpy as np
class MatrixFactorization:
def __init__(self, latent_num):
self.latent_num = latent_num
# r = mu + u_bias + i_bias + dot(u_latent,i_latent)
self.mu = None
self.u_bias = None
self.i_bias = None
self.u_latent = None
self.i_latent = None
# id_index_dict
self.user_id_index_dict = None
self.item_id_index_dict = None
return
def fit(self, user_ids, item_ids, rating, batch_size, epochs, lerning_rate=0.1, l2=0):
print('run MatrixFactorization fit')
# num
user_num = len(np.unique(user_ids))
item_num = len(np.unique(item_ids))
sample_num = len(user_ids)
# id_index_dict
self.user_id_index_dict = self.id_index_dict(np.unique(user_ids))
self.item_id_index_dict = self.id_index_dict(np.unique(item_ids))
# make index
user_idxs = self.convert_ids_to_index(user_ids, self.user_id_index_dict)
item_idxs = self.convert_ids_to_index(item_ids, self.item_id_index_dict)
# mu
self.mu = np.average(rating)
# initialization
self.u_bias = self.__initialization_bias(user_num)
self.i_bias = self.__initialization_bias(item_num)
self.u_latent = self.__initialization_latent(user_num, self.latent_num)
self.i_latent = self.__initialization_latent(item_num, self.latent_num)
# calc
errors_in_epoch = self.__minibatch_sgd(user_idxs, item_idxs, rating, batch_size, epochs, lerning_rate, l2)
return
def __initialization_bias(self, id_num):
# itnialize -0.05 ~ 0.05
b = (np.random.rand(id_num) - 0.5) * 0.1
return b
def __initialization_latent(self, id_num, latent_num):
'''
return latent (shape=[id_num, latent_num])
'''
# itnialize -0.05 ~ 0.05
lt = (np.random.rand(id_num, latent_num) - 0.5) * 0.1
return lt
def __minibatch_sgd(self, user_idxs, item_idxs, rating, batch_size, epochs, lerning_rate=0.1, l2=0, verbose=True):
#
sample_num = len(user_idxs)
steps_per_epoch = int(np.ceil(len(user_idxs) / batch_size))
loss_in_epoch = []
error_in_epoch = []
## for epoch
for iep in range(epochs):
rand_sample_idxs = np.random.permutation(np.arange(sample_num))
## for steps_per_epoch
for istp in range(steps_per_epoch):
# indexs in this mini batch
batch_idxs = rand_sample_idxs[batch_size*istp : batch_size*(istp+1)]
# update
delta_u_bias, delta_i_bias, delta_u_latent, delta_i_latent = self.__delta_param(user_idxs[batch_idxs], item_idxs[batch_idxs], rating[batch_idxs])
self.u_bias += lerning_rate * (delta_u_bias - l2 * self.u_bias)
self.i_bias += lerning_rate * (delta_i_bias - l2 * self.i_bias)
self.u_latent += lerning_rate * (delta_u_latent - l2 * self.u_latent)
self.i_latent += lerning_rate * (delta_i_latent - l2 * self.i_latent)
# recording error
loss_in_epoch.append(self.__loss_function(user_idxs, item_idxs, rating, l2))
error_in_epoch.append(self.__error_function(user_idxs, item_idxs, rating))
# verbose
print(' epoch {0}: error = {1:.4f}, loss = {2:.4f}'.format(iep+1, error_in_epoch[iep], loss_in_epoch[iep]))
return error_in_epoch
def __delta_param(self, user_idxs, item_idxs, rating):
#
delta_u_bias = np.zeros_like(self.u_bias)
delta_i_bias = np.zeros_like(self.i_bias)
delta_u_latent = np.zeros_like(self.u_latent)
delta_i_latent = np.zeros_like(self.i_latent)
#
loss = rating - self.__calc_rating(user_idxs, item_idxs)
#
num_sample = len(user_idxs)
#
u_counter = np.zeros_like(self.u_bias)
i_counter = np.zeros_like(self.i_bias)
# calculate delta
for ismp in range(num_sample):
u_idx = user_idxs[ismp]
i_idx = item_idxs[ismp]
ls = loss[ismp]
#
delta_u_bias[u_idx] += ls
delta_i_bias[i_idx] += ls
delta_u_latent[u_idx] += ls * self.i_latent[i_idx]
delta_i_latent[i_idx] += ls * self.u_latent[u_idx]
#
u_counter[u_idx] += 1
i_counter[i_idx] += 1
# average delta
u_counter = np.maximum(u_counter, 1)
i_counter =
|
np.maximum(i_counter, 1)
|
numpy.maximum
|
import numpy as np
from copy import copy, deepcopy
from contextlib import contextmanager
from ...util.event import Event
from ...util.misc import ensure_iterable
from .._base_layer import Layer
from .._register import add_to_viewer
from ..._vispy.scene.visuals import Mesh, Markers, Compound
from ..._vispy.scene.visuals import Line as VispyLine
from vispy.color import get_color_names
from .view import QtShapesLayer
from .view import QtShapesControls
from ._constants import (Mode, BOX_LINE_HANDLE, BOX_LINE, BOX_TOP_CENTER,
BOX_CENTER, BOX_LEN, BOX_HANDLE, BOX_WITH_HANDLE,
BOX_TOP_LEFT, BOX_BOTTOM_RIGHT, BOX_BOTTOM_LEFT,
BACKSPACE)
from .shape_list import ShapeList
from .shape_util import create_box, point_to_lines
from .shapes import Rectangle, Ellipse, Line, Path, Polygon
@add_to_viewer
class Shapes(Layer):
"""Shapes layer.
Parameters
----------
data : np.array | list
List of np.array of data or np.array. Each element of the list
(or row of a 3D np.array) corresponds to one shape. If a 2D array is
passed it corresponds to just a single shape.
shape_type : string | list
String of shape shape_type, must be one of "{'line', 'rectangle',
'ellipse', 'path', 'polygon'}". If a list is supplied it must be the
same length as the length of `data` and each element will be applied to
each shape otherwise the same value will be used for all shapes.
edge_width : float | list
thickness of lines and edges. If a list is supplied it must be the same
length as the length of `data` and each element will be applied to each
shape otherwise the same value will be used for all shapes.
edge_color : str | tuple | list
If string can be any color name recognized by vispy or hex value if
starting with `#`. If array-like must be 1-dimensional array with 3 or
4 elements. If a list is supplied it must be the same length as
the length of `data` and each element will be applied to each shape
otherwise the same value will be used for all shapes.
face_color : str | tuple | list
If string can be any color name recognized by vispy or hex value if
starting with `#`. If array-like must be 1-dimensional array with 3 or
4 elements. If a list is supplied it must be the same length as
the length of `data` and each element will be applied to each shape
otherwise the same value will be used for all shapes.
opacity : float | list
Opacity of the shapes, must be between 0 and 1.
z_index : int | list
Specifier of z order priority. Shapes with higher z order are displayed
ontop of others. If a list is supplied it must be the same length as
the length of `data` and each element will be applied to each shape
otherwise the same value will be used for all shapes.
name : str, keyword-only
Name of the layer.
Attributes
----------
data : ShapeList
Object containing all the shape data.
edge_width : float
thickness of lines and edges.
edge_color : str
Color of the shape edge.
face_color : str
Color of the shape face.
opacity : float
Opacity value between 0.0 and 1.0.
selected_shapes : list
List of currently selected shapes.
mode : Mode
Interactive mode.
Extended Summary
----------
_mode_history : Mode
Interactive mode captured on press of <space>.
_selected_shapes_history : list
List of currently selected captured on press of <space>.
_selected_shapes_stored : list
List of selected previously displayed. Used to prevent rerendering the
same highlighted shapes when no data has changed.
_selected_box : None | np.ndarray
`None` if no shapes are selected, otherwise a 10x2 array of vertices of
the interaction box. The first 8 points are the corners and midpoints
of the box. The 9th point is the center of the box, and the last point
is the location of the rotation handle that can be used to rotate the
box.
_hover_shape : None | int
Index of any shape currently hovered over if any. `None` otherwise.
_hover_shape_stored : None | int
Index of any shape previously displayed as hovered over if any. `None`
otherwise. Used to prevent rerendering the same highlighted shapes when
no data has changed.
_hover_vertex : None | int
Index of any vertex currently hovered over if any. `None` otherwise.
_hover_vertex_stored : None | int
Index of any vertex previously displayed as hovered over if any. `None`
otherwise. Used to prevent rerendering the same highlighted shapes when
no data has changed.
_moving_shape : None | int
Index of any shape currently being moved if any. `None` otherwise.
_moving_vertex : None | int
Index of any vertex currently being moved if any. `None` otherwise.
_drag_start : None | np.ndarray
If a drag has been started and is in progress then a length 2 array of
the initial coordinates of the drag. `None` otherwise.
_drag_box : None | np.ndarray
If a drag box is being created to select shapes then this is a 2x2
array of the two extreme corners of the drag. `None` otherwise.
_drag_box_stored : None | np.ndarray
If a drag box is being created to select shapes then this is a 2x2
array of the two extreme corners of the drag that have previously been
rendered. `None` otherwise. Used to prevent rerendering the same
drag box when no data has changed.
_is_moving : bool
Bool indicating if any shapes are currently being moved.
_is_selecting : bool
Bool indicating if a drag box is currently being created in order to
select shapes.
_is_creating : bool
Bool indicating if any shapes are currently being created.
_fixed_aspect : bool
Bool indicating if aspect ratio of shapes should be preserved on
resizing.
_aspect_ratio : float
Value of aspect ratio to be preserved if `_fixed_aspect` is `True`.
_fixed_vertex : None | np.ndarray
If a scaling or rotation is in progress then a length 2 array of the
coordinates that are remaining fixed during the move. `None` otherwise.
_fixed_index : int
If a scaling or rotation is in progress then the index of the vertex of
the boudning box that is remaining fixed during the move. `None`
otherwise.
_cursor_coord : np.ndarray
Length 2 array of the current cursor position in Image coordinates.
_update_properties : bool
Bool indicating if properties are to allowed to update the selected
shapes when they are changed. Blocking this prevents circular loops
when shapes are selected and the properties are changed based on that
selection
_clipboard : list
List of shape objects that are to be used during a copy and paste.
_colors : list
List of supported vispy color names.
_vertex_size : float
Size of the vertices of the shapes and boudning box in Canvas
coordinates.
_rotation_handle_length : float
Length of the rotation handle of the boudning box in Canvas
coordinates.
_highlight_color : list
Length 3 list of color used to highlight shapes and the interaction
box.
_highlight_width : float
Width of the edges used to highlight shapes.
"""
_colors = get_color_names()
_vertex_size = 10
_rotation_handle_length = 20
_highlight_color = (0, 0.6, 1)
_highlight_width = 1.5
def __init__(self, data, *, shape_type='rectangle', edge_width=1,
edge_color='black', face_color='white', opacity=0.7,
z_index=0, name=None):
# Create a compound visual with the following four subvisuals:
# Markers: corresponding to the vertices of the interaction box or the
# shapes that are used for highlights.
# Lines: The lines of the interaction box used for highlights.
# Mesh: The mesh of the outlines for each shape used for highlights.
# Mesh: The actual meshes of the shape faces and edges
visual = Compound([Markers(), VispyLine(), Mesh(), Mesh()])
super().__init__(visual, name)
# Freeze refreshes to prevent drawing before the viewer is constructed
with self.freeze_refresh():
# Add the shape data
self.data = ShapeList()
self.add_shapes(data, shape_type=shape_type, edge_width=edge_width,
edge_color=edge_color, face_color=face_color,
opacity=opacity, z_index=z_index)
# The following shape properties are for the new shapes that will
# be drawn. Each shape has a corresponding property with the
# value for itself
if np.isscalar(edge_width):
self._edge_width = edge_width
else:
self._edge_width = 1
if type(edge_color) is str:
self._edge_color = edge_color
else:
self._edge_color = 'black'
if type(face_color) is str:
self._face_color = face_color
else:
self._face_color = 'white'
self._opacity = opacity
# update flags
self._need_display_update = False
self._need_visual_update = False
self._selected_shapes = []
self._selected_shapes_stored = []
self._selected_shapes_history = []
self._selected_box = None
self._hover_shape = None
self._hover_shape_stored = None
self._hover_vertex = None
self._hover_vertex_stored = None
self._moving_shape = None
self._moving_vertex = None
self._drag_start = None
self._fixed_vertex = None
self._fixed_aspect = False
self._aspect_ratio = 1
self._is_moving = False
self._fixed_index = 0
self._is_selecting = False
self._drag_box = None
self._drag_box_stored = None
self._cursor_coord = np.array([0, 0])
self._is_creating = False
self._update_properties = True
self._clipboard = []
self._mode = Mode.PAN_ZOOM
self._mode_history = self._mode
self._status = str(self._mode)
self._help = 'enter a selection mode to edit shape properties'
self.events.add(mode=Event,
edge_width=Event,
edge_color=Event,
face_color=Event)
self._qt_properties = QtShapesLayer(self)
self._qt_controls = QtShapesControls(self)
self.events.deselect.connect(lambda x: self._finish_drawing())
@property
def data(self):
"""ShapeList: object containing all the shape data
"""
return self._data
@data.setter
def data(self, data):
self._data = data
self.refresh()
@property
def edge_width(self):
"""float: width of edges in px
"""
return self._edge_width
@edge_width.setter
def edge_width(self, edge_width):
self._edge_width = edge_width
if self._update_properties:
index = self.selected_shapes
for i in index:
self.data.update_edge_width(i, edge_width)
self.refresh()
self.events.edge_width()
@property
def edge_color(self):
"""str: color of edges and lines
"""
return self._edge_color
@edge_color.setter
def edge_color(self, edge_color):
self._edge_color = edge_color
if self._update_properties:
index = self.selected_shapes
for i in index:
self.data.update_edge_color(i, edge_color)
self.refresh()
self.events.edge_color()
@property
def face_color(self):
"""str: color of faces
"""
return self._face_color
@face_color.setter
def face_color(self, face_color):
self._face_color = face_color
if self._update_properties:
index = self.selected_shapes
for i in index:
self.data.update_face_color(i, face_color)
self.refresh()
self.events.face_color()
@property
def opacity(self):
"""float: Opacity value between 0.0 and 1.0.
"""
return self._opacity
@opacity.setter
def opacity(self, opacity):
if not 0.0 <= opacity <= 1.0:
raise ValueError('opacity must be between 0.0 and 1.0; '
f'got {opacity}')
self._opacity = opacity
if self._update_properties:
index = self.selected_shapes
for i in index:
self.data.update_opacity(i, opacity)
self.refresh()
self.events.opacity()
@property
def selected_shapes(self):
"""list: list of currently selected shapes
"""
return self._selected_shapes
@selected_shapes.setter
def selected_shapes(self, selected_shapes):
self._selected_shapes = selected_shapes
self._selected_box = self.interaction_box(selected_shapes)
# Update properties based on selected shapes
face_colors = list(set([self.data.shapes[i]._face_color_name for i in
selected_shapes]))
if len(face_colors) == 1:
face_color = face_colors[0]
with self.block_update_properties():
self.face_color = face_color
edge_colors = list(set([self.data.shapes[i]._edge_color_name for i in
selected_shapes]))
if len(edge_colors) == 1:
edge_color = edge_colors[0]
with self.block_update_properties():
self.edge_color = edge_color
edge_width = list(set([self.data.shapes[i].edge_width for i in
selected_shapes]))
if len(edge_width) == 1:
edge_width = edge_width[0]
with self.block_update_properties():
self.edge_width = edge_width
opacities = list(set([self.data.shapes[i].opacity for i in
selected_shapes]))
if len(opacities) == 1:
opacity = opacities[0]
with self.block_update_properties():
self.opacity = opacity
@property
def mode(self):
"""MODE: Interactive mode. The normal, default mode is PAN_ZOOM, which
allows for normal interactivity with the canvas.
The SELECT mode allows for entire shapes to be selected, moved and
resized.
The DIRECT mode allows for shapes to be selected and their individual
vertices to be moved.
The VERTEX_INSERT and VERTEX_REMOVE modes allow for individual
vertices either to be added to or removed from shapes that are already
selected. Note that shapes cannot be selected in this mode.
The ADD_RECTANGLE, ADD_ELLIPSE, ADD_LINE, ADD_PATH, and ADD_POLYGON
modes all allow for their corresponding shape type to be added.
"""
return self._mode
@mode.setter
def mode(self, mode):
if mode == self._mode:
return
old_mode = self._mode
if mode == Mode.PAN_ZOOM:
self.cursor = 'standard'
self.interactive = True
self.help = 'enter a selection mode to edit shape properties'
elif mode in [Mode.SELECT, Mode.DIRECT]:
self.cursor = 'pointing'
self.interactive = False
self.help = ('hold <space> to pan/zoom, '
f'press <{BACKSPACE}> to remove selected')
elif mode in [Mode.VERTEX_INSERT, Mode.VERTEX_REMOVE]:
self.cursor = 'cross'
self.interactive = False
self.help = 'hold <space> to pan/zoom'
elif mode in [Mode.ADD_RECTANGLE, Mode.ADD_ELLIPSE, Mode.ADD_LINE]:
self.cursor = 'cross'
self.interactive = False
self.help = 'hold <space> to pan/zoom'
elif mode in [Mode.ADD_PATH, Mode.ADD_POLYGON]:
self.cursor = 'cross'
self.interactive = False
self.help = ('hold <space> to pan/zoom, '
'press <esc> to finish drawing')
else:
raise ValueError("Mode not recongnized")
self.status = str(mode)
self._mode = mode
draw_modes = ([Mode.SELECT, Mode.DIRECT, Mode.VERTEX_INSERT,
Mode.VERTEX_REMOVE])
self.events.mode(mode=mode)
if not (mode in draw_modes and old_mode in draw_modes):
self._finish_drawing()
self.refresh()
@contextmanager
def block_update_properties(self):
self._update_properties = False
yield
self._update_properties = True
def _get_shape(self):
"""Determines the shape of the vertex data.
"""
if len(self.data._vertices) == 0:
return [1, 1]
else:
return np.max(self.data._vertices, axis=0) + 1
def add_shapes(self, data, *, shape_type='rectangle', edge_width=1,
edge_color='black', face_color='white', opacity=0.7,
z_index=0):
"""Add shapes to the current layer.
Parameters
----------
data : np.array | list
List of np.array of data or np.array. Each element of the list
(or row of a 3D np.array) corresponds to one shape. If a 2D array
is passed it corresponds to just a single shape.
shape_type : string | list
String of shape shape_type, must be one of "{'line', 'rectangle',
'ellipse', 'path', 'polygon'}". If a list is supplied it must be
the same length as the length of `data` and each element will be
applied to each shape otherwise the same value will be used for all
shapes.
edge_width : float | list
thickness of lines and edges. If a list is supplied it must be the
same length as the length of `data` and each element will be
applied to each shape otherwise the same value will be used for all
shapes.
edge_color : str | tuple | list
If string can be any color name recognized by vispy or hex value if
starting with `#`. If array-like must be 1-dimensional array with 3
or 4 elements. If a list is supplied it must be the same length as
the length of `data` and each element will be applied to each shape
otherwise the same value will be used for all shapes.
face_color : str | tuple | list
If string can be any color name recognized by vispy or hex value if
starting with `#`. If array-like must be 1-dimensional array with 3
or 4 elements. If a list is supplied it must be the same length as
the length of `data` and each element will be applied to each shape
otherwise the same value will be used for all shapes.
opacity : float | list
Opacity of the shapes, must be between 0 and 1.
z_index : int | list
Specifier of z order priority. Shapes with higher z order are
displayed ontop of others. If a list is supplied it must be the
same length as the length of `data` and each element will be
applied to each shape otherwise the same value will be used for all
shapes.
"""
if len(data) == 0:
return
if np.array(data[0]).ndim == 1:
# If a single array for a shape has been passed
if shape_type in self.data._types.keys():
shape_cls = self.data._types[shape_type]
shape = shape_cls(data, edge_width=edge_width,
edge_color=edge_color, face_color=face_color,
opacity=opacity, z_index=z_index)
else:
raise ValueError("""shape_type not recognized, must be one of
"{'line', 'rectangle', 'ellipse', 'path',
'polygon'}"
""")
self.data.add(shape)
else:
# Turn input arguments into iterables
shape_types = ensure_iterable(shape_type)
edge_widths = ensure_iterable(edge_width)
opacities = ensure_iterable(opacity)
z_indices = ensure_iterable(z_index)
edge_colors = ensure_iterable(edge_color, color=True)
face_colors = ensure_iterable(face_color, color=True)
for d, st, ew, ec, fc, o, z, in zip(data, shape_types, edge_widths,
edge_colors, face_colors,
opacities, z_indices):
shape_cls = self.data._types[st]
shape = shape_cls(d, edge_width=ew, edge_color=ec,
face_color=fc, opacity=o, z_index=z)
self.data.add(shape)
def _update(self):
"""Update the underlying visual.
"""
if self._need_display_update:
self._need_display_update = False
self._set_view_slice()
if self._need_visual_update:
self._need_visual_update = False
self._node.update()
def _refresh(self):
"""Fully refresh the underlying visual.
"""
self._need_display_update = True
self._update()
def _set_view_slice(self, indices=None):
"""Set the shape mesh data to the view.
Parameters
----------
indices : sequence of int or slice
Indices to slice with.
"""
z_order = self.data._mesh.triangles_z_order
faces = self.data._mesh.triangles[z_order]
colors = self.data._mesh.triangles_colors[z_order]
vertices = self.data._mesh.vertices
if len(faces) == 0:
self._node._subvisuals[3].set_data(vertices=None, faces=None)
else:
self._node._subvisuals[3].set_data(vertices=vertices, faces=faces,
face_colors=colors)
self._need_visual_update = True
self._set_highlight(force=True)
self._update()
def interaction_box(self, index):
"""Create the interaction box around a shape or list of shapes.
If a single index is passed then the boudning box will be inherited
from that shapes interaction box. If list of indices is passed it will
be computed directly.
Parameters
----------
index : int | list
Index of a single shape, or a list of shapes around which to
construct the interaction box
Returns
----------
box : np.ndarray
10x2 array of vertices of the interaction box. The first 8 points
are the corners and midpoints of the box in clockwise order
starting in the upper-left corner. The 9th point is the center of
the box, and the last point is the location of the rotation handle
that can be used to rotate the box
"""
if isinstance(index, (list, np.ndarray)):
if len(index) == 0:
box = None
elif len(index) == 1:
box = copy(self.data.shapes[index[0]]._box)
else:
indices = np.isin(self.data._index, index)
box = create_box(self.data._vertices[indices])
else:
box = copy(self.data.shapes[index]._box)
if box is not None:
rot = box[BOX_TOP_CENTER]
length_box = np.linalg.norm(box[BOX_BOTTOM_LEFT] -
box[BOX_TOP_LEFT])
if length_box > 0:
rescale = self._get_rescale()
r = self._rotation_handle_length*rescale
rot = rot-r*(box[BOX_BOTTOM_LEFT] -
box[BOX_TOP_LEFT])/length_box
box = np.append(box, [rot], axis=0)
return box
def _outline_shapes(self):
"""Finds outlines of any selected shapes including any shape hovered
over
Returns
----------
vertices : None | np.ndarray
Nx2 array of any vertices of outline or None
triangles : None | np.ndarray
Mx3 array of any indices of vertices for triangles of outline or
None
"""
if self._hover_shape is not None or len(self.selected_shapes) > 0:
if len(self.selected_shapes) > 0:
index = copy(self.selected_shapes)
if self._hover_shape is not None:
if self._hover_shape in index:
pass
else:
index.append(self._hover_shape)
index.sort()
else:
index = self._hover_shape
centers, offsets, triangles = self.data.outline(index)
rescale = self._get_rescale()
vertices = centers + rescale*self._highlight_width*offsets
else:
vertices = None
triangles = None
return vertices, triangles
def _compute_vertices_and_box(self):
"""Compute the location and properties of the vertices and box that
need to get rendered
Returns
----------
vertices : np.ndarray
Nx2 array of any vertices to be rendered as Markers
face_color : str
String of the face color of the Markers
edge_color : str
String of the edge color of the Markers and Line for the box
pos : np.ndarray
Nx2 array of vertices of the box that will be rendered using a
Vispy Line
width : float
Width of the box edge
"""
if len(self.selected_shapes) > 0:
if self.mode == Mode.SELECT:
# If in select mode just show the interaction boudning box
# including its vertices and the rotation handle
box = self._selected_box[BOX_WITH_HANDLE]
if self._hover_shape is None:
face_color = 'white'
elif self._hover_vertex is None:
face_color = 'white'
else:
face_color = self._highlight_color
edge_color = self._highlight_color
vertices = box
# Use a subset of the vertices of the interaction_box to plot
# the line around the edge
pos = box[BOX_LINE_HANDLE]
width = 1.5
elif self.mode in ([Mode.DIRECT, Mode.ADD_PATH, Mode.ADD_POLYGON,
Mode.ADD_RECTANGLE, Mode.ADD_ELLIPSE,
Mode.ADD_LINE, Mode.VERTEX_INSERT,
Mode.VERTEX_REMOVE]):
# If in one of these mode show the vertices of the shape itself
inds = np.isin(self.data._index, self.selected_shapes)
vertices = self.data._vertices[inds]
# If currently adding path don't show box over last vertex
if self.mode == Mode.ADD_PATH:
vertices = vertices[:-1]
if self._hover_shape is None:
face_color = 'white'
elif self._hover_vertex is None:
face_color = 'white'
else:
face_color = self._highlight_color
edge_color = self._highlight_color
pos = None
width = 0
else:
# Otherwise show nothing
vertices = np.empty((0, 2))
face_color = 'white'
edge_color = 'white'
pos = None
width = 0
elif self._is_selecting:
# If currently dragging a selection box just show an outline of
# that box
vertices = np.empty((0, 2))
edge_color = self._highlight_color
face_color = 'white'
box = create_box(self._drag_box)
width = 1.5
# Use a subset of the vertices of the interaction_box to plot
# the line around the edge
pos = box[BOX_LINE]
else:
# Otherwise show nothing
vertices = np.empty((0, 2))
face_color = 'white'
edge_color = 'white'
pos = None
width = 0
return vertices, face_color, edge_color, pos, width
def _set_highlight(self, force=False):
"""Render highlights of shapes including boundaries, vertices,
interaction boxes, and the drag selection box when appropriate
Parameters
----------
force : bool
Bool that forces a redraw to occur when `True`
"""
# Check if any shape or vertex ids have changed since last call
if (self.selected_shapes == self._selected_shapes_stored and
self._hover_shape == self._hover_shape_stored and
self._hover_vertex == self._hover_vertex_stored and
np.all(self._drag_box == self._drag_box_stored)) and not force:
return
self._selected_shapes_stored = copy(self.selected_shapes)
self._hover_shape_stored = copy(self._hover_shape)
self._hover_vertex_stored = copy(self._hover_vertex)
self._drag_box_stored = copy(self._drag_box)
# Compute the vertices and faces of any shape outlines
vertices, faces = self._outline_shapes()
self._node._subvisuals[2].set_data(vertices=vertices, faces=faces,
color=self._highlight_color)
# Compute the location and properties of the vertices and box that
# need to get rendered
(vertices, face_color, edge_color, pos,
width) = self._compute_vertices_and_box()
self._node._subvisuals[0].set_data(vertices, size=self._vertex_size,
face_color=face_color,
edge_color=edge_color,
edge_width=1.5, symbol='square',
scaling=False)
self._node._subvisuals[1].set_data(pos=pos, color=edge_color,
width=width)
def _finish_drawing(self):
"""Reset properties used in shape drawing so new shapes can be drawn.
"""
index = copy(self._moving_shape)
self._is_moving = False
self.selected_shapes = []
self._drag_start = None
self._drag_box = None
self._fixed_vertex = None
self._moving_shape = None
self._moving_vertex = None
self._hover_shape = None
self._hover_vertex = None
if self._is_creating is True and self.mode == Mode.ADD_PATH:
vertices = self.data._vertices[self.data._index == index]
if len(vertices) <= 2:
self.data.remove(index)
else:
self.data.edit(index, vertices[:-1])
if self._is_creating is True and self.mode == Mode.ADD_POLYGON:
vertices = self.data._vertices[self.data._index == index]
if len(vertices) <= 2:
self.data.remove(index)
self._is_creating = False
self.refresh()
def remove_selected(self):
"""Remove any selected shapes.
"""
to_remove = sorted(self.selected_shapes, reverse=True)
for index in to_remove:
self.data.remove(index)
self.selected_shapes = []
shape, vertex = self._shape_at(self._cursor_coord)
self._hover_shape = shape
self._hover_vertex = vertex
self.status = self.get_message(self._cursor_coord, shape, vertex)
self.refresh()
def _rotate_box(self, angle, center=[0, 0]):
"""Perfrom a rotation on the selected box.
Parameters
----------
angle : float
angle specifying rotation of shapes in degrees.
center : list
coordinates of center of rotation.
"""
theta = np.radians(angle)
transform = np.array([[np.cos(theta), np.sin(theta)],
[-np.sin(theta), np.cos(theta)]])
box = self._selected_box - center
self._selected_box = box @ transform.T + center
def _scale_box(self, scale, center=[0, 0]):
"""Perfrom a scaling on the selected box.
Parameters
----------
scale : float, list
scalar or list specifying rescaling of shape.
center : list
coordinates of center of rotation.
"""
if not isinstance(scale, (list, np.ndarray)):
scale = [scale, scale]
box = self._selected_box - center
box = np.array(box*scale)
if not np.all(box[BOX_TOP_CENTER] == box[BOX_HANDLE]):
rescale = self._get_rescale()
r = self._rotation_handle_length*rescale
handle_vec = box[BOX_HANDLE]-box[BOX_TOP_CENTER]
cur_len = np.linalg.norm(handle_vec)
box[BOX_HANDLE] = box[BOX_TOP_CENTER] + r*handle_vec/cur_len
self._selected_box = box + center
def _transform_box(self, transform, center=[0, 0]):
"""Perfrom a linear transformation on the selected box.
Parameters
----------
transform : np.ndarray
2x2 array specifying linear transform.
center : list
coordinates of center of rotation.
"""
box = self._selected_box - center
box = box @ transform.T
if not np.all(box[BOX_TOP_CENTER] == box[BOX_HANDLE]):
rescale = self._get_rescale()
r = self._rotation_handle_length*rescale
handle_vec = box[BOX_HANDLE]-box[BOX_TOP_CENTER]
cur_len = np.linalg.norm(handle_vec)
box[BOX_HANDLE] = box[BOX_TOP_CENTER] + r*handle_vec/cur_len
self._selected_box = box + center
def _shape_at(self, coord):
"""Determines if any shape at given coord by looking inside triangle
meshes.
Parameters
----------
coord : sequence of float
Image coordinates to check if any shapes are at.
Returns
----------
shape : int | None
Index of shape if any that is at the coordinates. Returns `None`
if no shape is found.
vertex : int | None
Index of vertex if any that is at the coordinates. Returns `None`
if no vertex is found.
"""
# Check selected shapes
if len(self.selected_shapes) > 0:
if self.mode == Mode.SELECT:
# Check if inside vertex of interaction box or rotation handle
box = self._selected_box[BOX_WITH_HANDLE]
distances = abs(box - coord[:2])
# Get the vertex sizes
rescale = self._get_rescale()
sizes = self._vertex_size*rescale/2
# Check if any matching vertices
matches = np.all(distances <= sizes, axis=1).nonzero()
if len(matches[0]) > 0:
return self.selected_shapes[0], matches[0][-1]
elif self.mode in ([Mode.DIRECT, Mode.VERTEX_INSERT,
Mode.VERTEX_REMOVE]):
# Check if inside vertex of shape
inds = np.isin(self.data._index, self.selected_shapes)
vertices = self.data._vertices[inds]
distances = abs(vertices - coord[:2])
# Get the vertex sizes
rescale = self._get_rescale()
sizes = self._vertex_size*rescale/2
# Check if any matching vertices
matches = np.all(distances <= sizes, axis=1).nonzero()[0]
if len(matches) > 0:
index = inds.nonzero()[0][matches[-1]]
shape = self.data._index[index]
_, idx = np.unique(self.data._index, return_index=True)
return shape, index - idx[shape]
# Check if mouse inside shape
shape = self.data.inside(coord)
return shape, None
def _get_rescale(self):
"""Get conversion factor from canvas coordinates to image coordinates.
Depends on the current zoom level.
Returns
----------
rescale : float
Conversion factor from canvas coordinates to image coordinates.
"""
transform = self.viewer._canvas.scene.node_transform(self._node)
rescale = transform.map([1, 1])[:2] - transform.map([0, 0])[:2]
return rescale.mean()
def _get_coord(self, position):
"""Convert a position in canvas coordinates to image coordinates.
Parameters
----------
position : sequence of int
Position of mouse cursor in canvas coordinates.
Returns
----------
coord : sequence of float
Position of mouse cursor in image coordinates.
"""
transform = self.viewer._canvas.scene.node_transform(self._node)
pos = transform.map(position)
coord = np.array([pos[0], pos[1]])
self._cursor_coord = coord
return coord
def get_message(self, coord, shape, vertex):
"""Generates a string based on the coordinates and information about
what shapes are hovered over
Parameters
----------
coord : sequence of int
Position of mouse cursor in image coordinates.
shape : int | None
Index of shape if any to be highlighted.
vertex : int | None
Index of vertex if any to be highlighted.
Returns
----------
msg : string
String containing a message that can be used as a status update.
"""
coord_shift = copy(coord)
coord_shift[0] = int(coord[1])
coord_shift[1] = int(coord[0])
msg = f'{coord_shift.astype(int)}, {self.name}'
if shape is not None:
msg = msg + ', shape ' + str(shape)
if vertex is not None:
msg = msg + ', vertex ' + str(vertex)
return msg
def move_to_front(self):
"""Moves selected objects to be displayed in front of all others.
"""
if len(self.selected_shapes) == 0:
return
new_z_index = max(self.data._z_index) + 1
for index in self.selected_shapes:
self.data.update_z_index(index, new_z_index)
self.refresh()
def move_to_back(self):
"""Moves selected objects to be displayed behind all others.
"""
if len(self.selected_shapes) == 0:
return
new_z_index = min(self.data._z_index) - 1
for index in self.selected_shapes:
self.data.update_z_index(index, new_z_index)
self.refresh()
def _copy_shapes(self):
"""Copy selected shapes to clipboard.
"""
self._clipboard = ([deepcopy(self.data.shapes[i]) for i in
self._selected_shapes])
def _paste_shapes(self):
"""Paste any shapes from clipboard and then selects them.
"""
cur_shapes = len(self.data.shapes)
for s in self._clipboard:
self.data.add(s)
self.selected_shapes = list(range(cur_shapes,
cur_shapes+len(self._clipboard)))
self.move_to_front()
self._copy_shapes()
def _move(self, coord):
"""Moves object at given mouse position and set of indices.
Parameters
----------
coord : sequence of two int
Position of mouse cursor in image coordinates.
"""
vertex = self._moving_vertex
if self.mode in ([Mode.SELECT, Mode.ADD_RECTANGLE, Mode.ADD_ELLIPSE,
Mode.ADD_LINE]):
if len(self.selected_shapes) > 0:
self._is_moving = True
if vertex is None:
# Check where dragging box from to move whole object
if self._drag_start is None:
center = self._selected_box[BOX_CENTER]
self._drag_start = coord - center
center = self._selected_box[BOX_CENTER]
shift = coord - center - self._drag_start
for index in self.selected_shapes:
self.data.shift(index, shift)
self._selected_box = self._selected_box + shift
self.refresh()
elif vertex < BOX_LEN:
# Corner / edge vertex is being dragged so resize object
box = self._selected_box
if self._fixed_vertex is None:
self._fixed_index = (vertex+4) % BOX_LEN
self._fixed_vertex = box[self._fixed_index]
size = (box[(self._fixed_index+4) % BOX_LEN] -
box[self._fixed_index])
offset = box[BOX_HANDLE] - box[BOX_CENTER]
offset = offset/np.linalg.norm(offset)
offset_perp = np.array([offset[1], -offset[0]])
fixed = self._fixed_vertex
new = copy(coord)
if self._fixed_aspect and self._fixed_index % 2 == 0:
if (new - fixed)[0] == 0:
ratio = 1
else:
ratio = abs((new - fixed)[1]/(new - fixed)[0])
if ratio > self._aspect_ratio:
r = self._aspect_ratio/ratio
new[1] = fixed[1]+(new[1]-fixed[1])*r
else:
r = ratio/self._aspect_ratio
new[0] = fixed[0]+(new[0]-fixed[0])*r
if size @ offset == 0:
dist = 1
else:
dist = ((new - fixed) @ offset) / (size @ offset)
if size @ offset_perp == 0:
dist_perp = 1
else:
dist_perp = (((new - fixed) @ offset_perp) /
(size @ offset_perp))
if self._fixed_index % 2 == 0:
# corner selected
scale = np.array([dist_perp, dist])
elif self._fixed_index % 4 == 1:
# top selected
scale = np.array([1, dist])
else:
# side selected
scale = np.array([dist_perp, 1])
# prevent box from shrinking below a threshold size
rescale = self._get_rescale()
threshold = self._vertex_size*rescale/8
scale[abs(scale*size) < threshold] = 1
# check orientation of box
angle = -
|
np.arctan2(offset[0], -offset[1])
|
numpy.arctan2
|
import gym
import numpy as np
from gym.envs.registration import register
from griddly import GriddlyLoader, gd
from griddly.util.action_space import MultiAgentActionSpace
from griddly.util.observation_space import MultiAgentObservationSpace
from griddly.util.vector_visualization import Vector2RGB
class GymWrapper(gym.Env):
metadata = {'render.modes': ['human', 'rgb_array']}
def __init__(self, yaml_file=None, yaml_string=None, level=0, global_observer_type=gd.ObserverType.VECTOR,
player_observer_type=gd.ObserverType.VECTOR, max_steps=None, gdy_path=None, image_path=None,
shader_path=None,
gdy=None, game=None, **kwargs):
"""
Currently only supporting a single player (player 1 as defined in the environment yaml
:param yaml_file:
:param level:
:param global_observer_type: the render mode for the global renderer
:param player_observer_type: the render mode for the players
"""
super(GymWrapper, self).__init__()
# Set up multiple render windows so we can see what the AIs see and what the game environment looks like
self._renderWindow = {}
# If we are loading a yaml file
if yaml_file is not None or yaml_string is not None:
self._is_clone = False
loader = GriddlyLoader(gdy_path, image_path, shader_path)
if yaml_file is not None:
self.gdy = loader.load(yaml_file)
else:
self.gdy = loader.load_string(yaml_string)
self.game = self.gdy.create_game(global_observer_type)
if max_steps is not None:
self.gdy.set_max_steps(max_steps)
if level is not None:
self.game.load_level(level)
self.level_id = level
# if we are loading a copy of the game
elif gdy is not None and game is not None:
self._is_clone = True
self.gdy = gdy
self.game = game
self.level_count = self.gdy.get_level_count()
self._players = []
self.player_count = self.gdy.get_player_count()
self._global_observer_type = global_observer_type
self._player_observer_type = []
for p in range(self.player_count):
self._players.append(self.game.register_player(f'Player {p + 1}', player_observer_type))
self._player_observer_type.append(player_observer_type)
self._player_last_observation = []
self._global_last_observation = None
self.num_action_ids = {}
self._enable_history = False
self.game.init(self._is_clone)
def get_state(self):
return self.game.get_state()
def get_tile_size(self, player=0):
if player == 0:
return self.game.get_tile_size()
else:
return self._players[player - 1].get_tile_size()
def enable_history(self, enable=True):
self._enable_history = enable
self.game.enable_history(enable)
def step(self, action):
"""
Step for a particular player in the environment
"""
player_id = 0
reward = None
done = False
info = {}
# Simple agents executing single actions or multiple actions in a single time step
if self.player_count == 1:
action = np.array(action, dtype=np.int32)
if np.ndim(action) == 0:
action_data = action.reshape(1, -1)
elif np.ndim(action) == 1:
action_data = action.reshape(1, -1)
elif np.ndim(action) == 2:
action_data = np.array(action)
else:
raise ValueError(f'The supplied action is in the wrong format for this environment.\n\n'
f'A valid example: {self.action_space.sample()}')
reward, done, info = self._players[player_id].step_multi(action_data, True)
elif len(action) == self.player_count:
processed_actions = []
multi_action = False
for a in action:
if a is None:
processed_action = np.zeros((len(self.action_space_parts)), dtype=np.int32)
else:
processed_action = np.array(a, dtype=np.int32)
if len(processed_action.shape) > 1 and processed_action.shape[0] > 1:
multi_action = True
processed_actions.append(processed_action)
if not self.has_avatar and multi_action:
# Multiple agents that can perform multiple actions in parallel
# Used in RTS games
reward = []
for p in range(self.player_count):
player_action = processed_actions[p].reshape(-1, len(self.action_space_parts))
final = p == self.player_count - 1
rew, done, info = self._players[p].step_multi(player_action, final)
reward.append(rew)
# Multiple agents executing actions in parallel
# Used in multi-agent environments
else:
action_data = np.array(processed_actions, dtype=np.int32)
action_data = action_data.reshape(self.player_count, -1)
reward, done, info = self.game.step_parallel(action_data)
else:
raise ValueError(f'The supplied action is in the wrong format for this environment.\n\n'
f'A valid example: {self.action_space.sample()}')
for p in range(self.player_count):
# Copy only if the environment is done (it will reset itself)
# This is because the underlying data will be released
self._player_last_observation[p] = np.array(self._players[p].observe(), copy=False)
obs = self._player_last_observation[0] if self.player_count == 1 else self._player_last_observation
if self._enable_history:
info['History'] = self.game.get_history()
return obs, reward, done, info
def reset(self, level_id=None, level_string=None, global_observations=False):
if level_string is not None:
self.game.load_level_string(level_string)
self.level_id = 'custom'
elif level_id is not None:
self.game.load_level(level_id)
self.level_id = level_id
self.game.reset()
self.initialize_spaces()
for p in range(self.player_count):
self._player_last_observation.append(np.array(self._players[p].observe(), copy=False))
if global_observations:
self._global_last_observation = np.array(self.game.observe(), copy=False)
return {
'global': self._global_last_observation,
'player': self._player_last_observation[0] if self.player_count == 1 else self._player_last_observation
}
else:
return self._player_last_observation[0] if self.player_count == 1 else self._player_last_observation
def initialize_spaces(self):
self._player_last_observation = []
self.player_observation_shape = self.game.get_player_observation_shape()
self.global_observation_shape = self.game.get_global_observation_shape()
self.global_observation_space = gym.spaces.Box(low=0, high=255, shape=self.global_observation_shape,
dtype=np.uint8)
self._observation_shape = self.player_observation_shape
observation_space = gym.spaces.Box(low=0, high=255, shape=self._observation_shape, dtype=np.uint8)
if self.player_count > 1:
observation_space = MultiAgentObservationSpace([observation_space for _ in range(self.player_count)])
self.observation_space = observation_space
self.object_names = self.game.get_object_names()
self.variable_names = self.game.get_object_variable_names()
self._vector2rgb = Vector2RGB(10, len(self.object_names))
self.action_space = self._create_action_space()
def render(self, mode='human', observer=0):
if observer == 'global':
observation = np.array(self.game.observe(), copy=False)
if self._global_observer_type == gd.ObserverType.VECTOR:
observation = self._vector2rgb.convert(observation)
if self._global_observer_type == gd.ObserverType.ASCII:
observation = observation \
.swapaxes(2, 0) \
.reshape(-1, observation.shape[0] * observation.shape[1]) \
.view('c')
ascii_string = ''.join(np.column_stack(
(observation, np.repeat(['\n'], observation.shape[0]))
).flatten().tolist())
return ascii_string
else:
observation = self._player_last_observation[observer]
if self._player_observer_type[observer] == gd.ObserverType.VECTOR:
observation = self._vector2rgb.convert(observation)
if self._player_observer_type[observer] == gd.ObserverType.ASCII:
observation = observation \
.swapaxes(2, 0) \
.reshape(-1, observation.shape[0] * observation.shape[1]) \
.view('c')
ascii_string = ''.join(np.column_stack(
(observation,
|
np.repeat(['\n'], observation.shape[0])
|
numpy.repeat
|
# zadanie 5
# Napisz funkcję, która:
# ■ na wejściu przyjmuje jeden parametr określający długość wektora,
# ■ na podstawie parametru generuje wektor, ale w kolejności odwróconej (czyli np. dla n=3 =>[3 2 1])
# ■ generuje macierz diagonalną z w/w wektorem jako przekątną
import numpy as np
def funkcja(n):
wektor =
|
np.arange(n, 0, -1)
|
numpy.arange
|
import numpy as np
import cv2 as cv
import matplotlib.pyplot as plt
def load_matrix(file_num, ex):
""" Load the extrinsic or intrinsic matrix from a relative path
True for extrinsic, false for intrinsic """
return np.loadtxt("{}/extrinsic.txt".format(file_num)) if ex \
else np.loadtxt("{}/intrinsics.txt".format(file_num))
def save_cloud(file_num, cloud_matrix):
""" Save a point cloud matrix to a file at a relative path """
np.savetxt("{}/pointCloud.txt".format(file_num), cloud_matrix, delimiter=',',
fmt=['%.2f', '%.2f', '%.2f', '%d', '%d', '%d'])
def compute_point_cloud(img_rgb, img_depth, file_num, world=True, rot_matrix=None):
""" Compute a point cloud using the rgb and depth images and save it at a relative path
Computes the world coordinate point cloud if world, else camera coordinate with given rotation """
intrinsic_inv = np.linalg.inv(load_matrix(file_num, False))
extrinsic = load_matrix(file_num, True)
ex_rotation_inv = np.linalg.inv(extrinsic[:, :3])
ex_translation = extrinsic[:, 3]
height, width = np.shape(img_depth)
point_cloud = []
for x in range(width):
for h in range(height):
y = height - h - 1 # y axis pointing up from bottom left corner
w = img_depth[y, x]
coord = np.matmul(intrinsic_inv, np.array([w * x, w * y, w]))
if world:
coord = np.matmul(ex_rotation_inv, coord) + ex_translation
else:
coord = np.matmul(rot_matrix, coord)
point_cloud.append(np.concatenate((coord, img_rgb[y, x, :])))
return np.array(point_cloud)
def camera_cloud_to_img(camera_cloud, height, width, file_num):
""" Create an rgb and a depth image from a camera coordinate point cloud """
intrinsic = load_matrix(file_num, False)
depth_img = np.zeros((height, width), dtype=int)
rgb_img = np.zeros((height, width, 3), dtype=int)
for pixel in camera_cloud:
q_w = np.matmul(intrinsic, pixel[:3])
w = q_w[2]
if w != 0:
x = int(round(q_w[0] / w))
y = int(round(q_w[1] / w))
if 0 <= x < width and 0 <= y < height:
depth_img[y, x] = w
rgb_img[y, x, :] = pixel[3:]
return rgb_img, depth_img
def rotate(img_rgb, img_depth, file_num, axis, theta):
""" Rotate the camera (theta is in radians) and return the new rgb and depth images
dir in ['x', 'y', 'z'] """
# source for getting the rotation matrices: http://planning.cs.uiuc.edu/node102.html
assert axis in ['x', 'y', 'z']
if axis == 'x':
rot_matrix = np.array([[1, 0, 0], [0,
|
np.cos(theta)
|
numpy.cos
|
import os, sys
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy import signal
from numpy import linalg as la
cwd = os.getcwd()
data_dir = cwd + '/Calibration_data/BeiBen_halfload_20190426/raw_data'
def ImportDataFromFile(data_dir):
file_list = [f for f in os.listdir(data_dir) if not f.startswith('.')]
df_list = {}
#read in data one by one
for f in file_list:
f_dir = data_dir + '/' + f
df_list[f] = pd.read_csv(f_dir)
print('File loaded: ', f_dir)
return df_list
def DataClean(df, data = 'all'):
#only select io = 1
idxbool = df['io'] == 1
idxbool = idxbool & df['ctlmode'] == 1
idxbool = idxbool & df['driving_mode'] == 1
if data == 'throttle':
idxbool = idxbool & df['throttle_percentage']>0
elif data == 'brake':
idxbool = idxbool & df['brake_percentage']>0
elif data == 'all':
idxbool = idxbool
else:
raise ValueError('Please Specify throttle or brake or all')
return df[idxbool]
def SimpleExplore(df):
# plot all columns by time, except time
for i in range(1,len(df.columns)):
plt.figure()
plt.plot(df['time'], df[df.columns[i]],'r-')
plt.plot(df['time'], df[df.columns[i]], 'bx', markersize= 3)
plt.title(df.columns[i])
plt.show()
return
def ThrottleExplore(df, df_compare):
plt.figure()
plt.plot(df['time'], df['throttle_percentage'], label = 'throttle%')
plt.plot(df['time'], df['vehicle_speed'], label = 'speed')
plt.plot(df['time'], df['engine_rpm'], label = 'rpm')
plt.plot(df['time'], df[' imu'], label = 'imu')
plt.plot(df_compare['time'], df_compare[' imu'], label = 'imu2')
plt.axhline(y = 1, color = 'r', linestyle = '-')
plt.axhline(y =-1, color = 'r', linestyle = '-')
plt.xlabel('time')
plt.ylabel('data')
plt.legend()
plt.figure()
plt.plot(df['vehicle_speed'],df['engine_rpm'],'b-', label = 'rpm vs speed')
plt.xlabel('speed')
plt.ylabel('rpm')
plt.legend()
plt.figure()
plt.plot(df['engine_rpm'], df[' imu'],'bx' ,label = 'acc vs rpm')
plt.xlabel('rpm')
plt.ylabel('imu')
plt.plot(df_compare['engine_rpm'], df_compare[' imu'],'rx' ,label = 'acc vs rpm compare')
plt.plot(df_compare['engine_rpm'], df_compare[' imu'],'r-' ,label = 'acc vs rpm compare')
return
def CorrelationAnalysis(df):
plt.matshow(df.corr())
print(df.corr())
plt.figure()
mat = np.array(df)[:,(2,3,4,5,6,7,8,9,10,11,13)]
print('shape: ', mat.shape)
U, Sigma, VT= la.svd(mat)
print('SVD U = ',U)
print('SVD VT = ', VT[0,:])
print('Sigma%: ', Sigma/np.sum(Sigma)*100)
plt.plot(Sigma, label = 'sigma')
plt.title('svd Sigma')
plt.legend()
plt.figure()
plt.plot(df['time'], df[' imu']/df['engine_rpm'], label = 'rpm/imu')
return
def DataStandardize(df):
df_after = df.copy()
# standardize
throttle_mean = np.mean(df_after['throttle_percentage'])
throttle_std = np.std(df_after['throttle_percentage'])
speed_mean = np.mean(df_after['vehicle_speed'])
speed_std = np.std(df_after['vehicle_speed'])
rpm_mean =
|
np.mean(df_after['engine_rpm'])
|
numpy.mean
|
#!/usr/bin/env python
from collections import namedtuple
import matplotlib.pyplot as plt
import numpy as np
import scipy as sp
from scipy import interpolate
import cv2 as cv
import spatialmath.base.argcheck as argcheck
from machinevisiontoolbox.base import color, int_image, float_image, plot_histogram
class ImageProcessingBaseMixin:
"""
Image processing basic operations on the Image class
"""
def int(self, intclass='uint8'):
"""
Convert image to integer type
:param intclass: either 'uint8', or any integer class supported by np
:type intclass: str
:return: Image with integer pixel types
:rtype: Image instance
- ``IM.int()`` is a copy of image with pixels converted to unsigned
8-bit integer (uint8) elements in the range 0 to 255.
- ``IM.int(intclass)`` as above but the output pixels are converted to
the integer class ``intclass``.
Example:
.. runblock:: pycon
>>> from machinevisiontoolbox import Image
>>> im = Image('flowers1.png', dtype='float64')
>>> print(im)
>>> im_int = im.int()
>>> print(im_int)
.. note::
- Works for an image with arbitrary number of dimensions, eg. a
color image or image sequence.
- If the input image is floating point (single or double) the
pixel values are scaled from an input range of [0,1] to a range
spanning zero to the maximum positive value of the output integer
class.
- If the input image is an integer class then the pixels are cast
to change type but not their value.
:references:
- Robotics, Vision & Control, Section 12.1, <NAME>,
Springer 2011.
"""
out = []
for im in self:
out.append(int_image(im.image, intclass))
return self.__class__(out)
def float(self, floatclass='float32'):
"""
Convert image to float type
:param floatclass: 'single', 'double', 'float32' [default], 'float64'
:type floatclass: str
:return: Image with floating point pixel types
:rtype: Image instance
- ``IM.float()`` is a copy of image with pixels converted to
``float32`` floating point values spanning the range 0 to 1. The
input integer pixels are assumed to span the range 0 to the maximum
value of their integer class.
- ``IM.float(im, floatclass)`` as above but with floating-point pixel
values belonging to the class ``floatclass``.
Example:
.. runblock:: pycon
>>> im = Image('flowers1.png')
>>> print(im)
>>> im_float = im.float()
>>> print(im_float)
:references:
- Robotics, Vision & Control, Section 12.1, <NAME>,
Springer 2011.
"""
out = []
for im in self:
out.append(float_image(im.image, floatclass))
return self.__class__(out)
def mono(self, opt='r601'):
"""
Convert color image to monochrome
:param opt: greyscale conversion option 'r601' [default] or 'r709'
:type opt: string
:return: Image with floating point pixel types
:rtype: Image instance
- ``IM.mono(im)`` is a greyscale equivalent of the color image ``im``
Example:
.. runblock:: pycon
>>> im = Image('flowers1.png')
>>> print(im)
>>> im_mono = im.mono()
>>> print(im_mono)
:references:
- Robotics, Vision & Control, Section 10.1, <NAME>,
Springer 2011.
"""
if not self.iscolor:
return self
out = []
for im in [img.bgr for img in self]:
if opt == 'r601':
new = 0.229 * im[:, :, 2] + 0.587 * im[:, :, 1] + \
0.114 * im[:, :, 0]
new = new.astype(im.dtype)
elif opt == 'r709':
new = 0.2126 * im[:, :, 0] + 0.7152 * im[:, :, 1] + \
0.0722 * im[:, :, 2]
new = new.astype(im.dtype)
elif opt == 'value':
# 'value' refers to the V in HSV space, not the CIE L*
# the mean of the max and min of RGB values at each pixel
mn = im[:, :, 2].min(axis=2)
mx = im[:, :, 2].max(axis=2)
# if np.issubdtype(im.dtype, np.float):
# NOTE let's make a new predicate for Image
if im.isfloat:
new = 0.5 * (mn + mx)
new = new.astype(im.dtype)
else:
z = (np.int32(mx) + np.int32(mn)) / 2
new = z.astype(im.dtype)
else:
raise TypeError('unknown type for opt')
out.append(new)
return self.__class__(out)
def stretch(self, max=1, r=None):
"""
Image normalisation
:param max: M pixels are mapped to the r 0 to M
:type max: scalar integer or float
:param r: r[0] is mapped to 0, r[1] is mapped to 1 (or max value)
:type r: 2-tuple or numpy array (2,1)
:return: Image with pixel values stretched to M across r
:rtype: Image instance
- ``IM.stretch()`` is a normalised image in which all pixel values lie
in the r range of 0 to 1. That is, a linear mapping where the minimum
value of ``im`` is mapped to 0 and the maximum value of ``im`` is
mapped to 1.
Example:
.. runblock:: pycon
.. note::
- For an integer image the result is a float image in the range 0
to max value
:references:
- Robotics, Vision & Control, Section 12.1, <NAME>,
Springer 2011.
"""
# TODO make all infinity values = None?
out = []
for im in [img.image for img in self]:
if r is None:
mn = np.min(im)
mx = np.max(im)
else:
r = argcheck.getvector(r)
mn = r[0]
mx = r[1]
zs = (im - mn) / (mx - mn) * max
if r is not None:
zs = np.maximum(0, np.minimum(max, zs))
out.append(zs)
return self.__class__(out)
def thresh(self, t=None, opt='binary'):
"""
Image threshold
:param t: threshold
:type t: scalar
:param opt: threshold option (see below)
:type opt: string
:return imt: Image thresholded binary image
:rtype imt: Image instance
:return: threshold if opt is otsu or triangle
:rtype: list of scalars
- ``IM.thresh()`` uses Otsu's method for thresholding a greyscale
image.
- ``IM.thresh(t)`` as above but the threshold ``t`` is specified.
- ``IM.thresh(t, opt)`` as above but the threshold option is specified.
See opencv threshold types for threshold options
https://docs.opencv.org/4.2.0/d7/d1b/group__imgproc__
misc.html#gaa9e58d2860d4afa658ef70a9b1115576
Example:
.. runblock:: pycon
:options:
- 'binary' # TODO consider the LaTeX formatting of equations
- 'binary_inv'
- 'trunc'
- 'tozero'
- 'tozero_inv'
- 'otsu'
- 'triangle'
.. note::
- Converts a color image to greyscale.
- For a uint8 class image the slider range is 0 to 255.
- For a floating point class image the slider range is 0 to 1.0
"""
# dictionary of threshold options from OpenCV
threshopt = {
'binary': cv.THRESH_BINARY,
'binary_inv': cv.THRESH_BINARY_INV,
'trunc': cv.THRESH_TRUNC,
'tozero': cv.THRESH_TOZERO,
'tozero_inv': cv.THRESH_TOZERO_INV,
'otsu': cv.THRESH_OTSU,
'triangle': cv.THRESH_TRIANGLE
}
if t is not None:
if not argcheck.isscalar(t):
raise ValueError(t, 't must be a scalar')
else:
# if no threshold is specified, we assume to use Otsu's method
print('No threshold specified. Applying Otsu''s method.')
opt = 'otsu'
# ensure mono images
if self.iscolor:
imono = self.mono()
else:
imono = self
out_t = []
out_imt = []
for im in [img.image for img in imono]:
# for image int class, maxval = max of int class
# for image float class, maxval = 1
if np.issubdtype(im.dtype, np.integer):
maxval = np.iinfo(im.dtype).max
else:
# float image, [0, 1] range
maxval = 1.0
threshvalue, imt = cv.threshold(im, t, maxval, threshopt[opt])
out_t.append(threshvalue)
out_imt.append(imt)
if opt == 'otsu' or opt == 'triangle':
return self.__class__(out_imt), out_t
else:
return self.__class__(out_imt)
def otsu(self, levels=256, valley=None):
"""
Otsu threshold selection
:return t: Otsu's threshold
:rtype t: float
:return imt: Image thresholded to a binary image
:rtype imt: Image instance
- ``otsu(im)`` is an optimal threshold for binarizing an image with a
bimodal intensity histogram. ``t`` is a scalar threshold that
maximizes the variance between the classes of pixels below and above
the thresold ``t``.
Example::
.. runblock:: pycon
.. note::
- Converts a color image to greyscale.
:references:
- A Threshold Selection Method from Gray-Level Histograms, N. Otsu.
IEEE Trans. Systems, Man and Cybernetics Vol SMC-9(1), Jan 1979,
pp 62-66.
- An improved method for image thresholding on the valley-emphasis
method. <NAME>, <NAME> etal. Signal and Info Proc.
Assocn. Annual Summit and Conf (APSIPA). 2013. pp1-4
"""
# mvt-mat has options on levels and valleys, which Opencv does not have
# TODO best option is likely just to code the function itself, with
# default option of simply calling OpenCV's Otsu implementation
im = self.mono()
if (valley is None):
imt, t = im.thresh(opt='otsu')
else:
raise ValueError(valley, 'not implemented yet')
# TODO implement otsu.m
# TODO levels currently ignored
return imt, t
def nonzero(self):
return np.nonzero(self.image)
def meshgrid(self, step=1):
"""
Domain matrices for image
:param a1: array input 1
:type a1: numpy array
:param a2: array input 2
:type a2: numpy array
:return u: domain of image, horizontal
:rtype u: numpy array
:return v: domain of image, vertical
:rtype v: numpy array
- ``IM.imeshgrid()`` are matrices that describe the domain of image
``im (h,w)`` and are each ``(h,w)``. These matrices are used for the
evaluation of functions over the image. The element ``u(r,c) = c``
and ``v(r,c) = r``.
- ``IM.imeshgrid(w, h)`` as above but the domain is ``(w,h)``.
- ``IM.imeshgrid(s)`` as above but the domain is described by ``s``
which can be a scalar ``(s,s)`` or a 2-vector ``s=[w,h]``.
Example:
.. runblock:: pycon
"""
# TODO too complex, simplify
# Use cases
# image.meshgrid() spans image
# image.meshgrid(step=N) spans image with step
# if not (argcheck.isvector(a1) or isinstance(a1, np.ndarray)
# or argcheck.isscalar(a1) or isinstance(a1, self.__class__)):
# raise ValueError(
# a1, 'a1 must be an Image, matrix, vector, or scalar')
# if a2 is not None and (not (argcheck.isvector(a2) or
# isinstance(a2, np.ndarray) or
# argcheck.isscalar(a2) or
# isinstance(a2, self.__class__))):
# raise ValueError(
# a2, 'a2 must be Image, matrix, vector, scalar or None')
# if isinstance(a1, self.__class__):
# a1 = a1.image
# if isinstance(a2, self.__class__):
# a2 = a2.image
# if a2 is None:
# if a1.ndim <= 1 and len(a1) == 1:
# # if a1 is a single number
# # we specify a size for a square output image
# ai = np.arange(0, a1)
# u, v = np.meshgrid(ai, ai)
# elif a1.ndim <= 1 and len(a1) == 2:
# # if a1 is a 2-vector
# # we specify a size for a rectangular output image (w, h)
# a10 = np.arange(0, a1[0])
# a11 = np.arange(0, a1[1])
# u, v = np.meshgrid(a10, a11)
# elif (a1.ndim >= 2): # and (a1.shape[2] > 2):
# u, v = np.meshgrid(np.arange(0, a1.shape[1]),
# np.arange(0, a1.shape[0]))
# else:
# raise ValueError(a1, 'incorrect argument a1 shape')
# else:
# # we assume a1 and a2 are two scalars
# u, v = np.meshgrid(np.arange(0, a1), np.arange(0, a2))
u = np.arange(0, self.width, step)
v = np.arange(0, self.height, step)
return np.meshgrid(v, u, indexing='ij')
def hist(self, nbins=256, opt=None):
"""
Image histogram
:param nbins: number of bins for histogram
:type nbins: integer
:param opt: histogram option
:type opt: string
:return hist: histogram h as a column vector, and corresponding bins x,
cdf and normcdf
:rtype hist: collections.namedtuple
- ``IM.hist()`` is the histogram of intensities for image as a vector.
For an image with multiple planes, the histogram of each plane is
given in a separate column. Additionally, the cumulative histogram
and normalized cumulative histogram, whose maximum value is one, are
computed.
- ``IM.hist(nbins)`` as above with the number of bins specified
- ``IM.hist(opt)`` as above with histogram options specified
:options:
- 'sorted' histogram but with occurrence sorted in descending
magnitude order. Bin coordinates X reflect this sorting.
Example:
.. runblock:: pycon
.. note::
- The bins spans the greylevel range 0-255.
- For a floating point image the histogram spans the greylevel
range 0-1.
- For floating point images all NaN and Inf values are first
removed.
- OpenCV CalcHist only works on floats up to 32 bit, images are
automatically converted from float64 to float32
"""
# check inputs
optHist = ['sorted']
if opt is not None and opt not in optHist:
raise ValueError(opt, 'opt is not a valid option')
if self.isint:
maxrange = np.iinfo(self.dtype).max
else:
# float image
maxrange = 1.0
out = []
for im in self:
# normal histogram case
xc = []
hc = []
hcdf = []
hnormcdf = []
implanes = cv.split(im.image)
for i in range(self.numchannels):
# bin coordinates
x = np.linspace(0, maxrange, nbins, endpoint=True).T
h = cv.calcHist(implanes, [i], None, [nbins], [0, maxrange + 1])
if opt == 'sorted':
h = np.sort(h, axis=0)
isort = np.argsort(h, axis=0)
x = x[isort]
cdf =
|
np.cumsum(h)
|
numpy.cumsum
|
import copy
import os
import sys
import unittest
import numpy as np
import numpy.testing as npt
import tracker.scekf as scekf
sys.path.insert(0, os.path.abspath("../.."))
class TestState(unittest.TestCase):
g =
|
np.array([[0], [0], [-9.81]])
|
numpy.array
|
# Written by <NAME> <EMAIL>
from __future__ import division
import numpy as np
from GPy.kern import Kern
from GPy.core.parameterization import Param
from paramz.transformations import Logexp
import math
import pdb
class Mix_Integral(Kern):
"""
Integral kernel. This kernel allows 1d histogram or binned data to be modelled.
The outputs are the counts in each bin. The inputs (on two dimensions) are the start and end points of each bin.
The kernel's predictions are the latent function which might have generated those binned results.
"""
def __init__(self, input_dim, variance=None, lengthscale=None, ARD=False, active_dims=None, name='mix_integral'):
super(Mix_Integral, self).__init__(input_dim, active_dims, name)
if lengthscale is None:
lengthscale = np.ones(1)
else:
lengthscale = np.asarray(lengthscale)
assert len(lengthscale)==(input_dim-1)/2
self.lengthscale = Param('lengthscale', lengthscale, Logexp()) #Logexp - transforms to allow positive only values...
self.variance = Param('variance', variance, Logexp()) #and here.
self.link_parameters(self.variance, self.lengthscale) #this just takes a list of parameters we need to optimise.
#useful little function to help calculate the covariances.
def g(self, z):
# pdb.set_trace()
return 1.0 * z * np.sqrt(math.pi) * math.erf(z) + np.exp(-(z**2))
def k_ff(self, t, tprime, s, sprime, lengthscale):
"""Covariance between observed values.
s and t are one domain of the integral (i.e. the integral between s and t)
sprime and tprime are another domain of the integral (i.e. the integral between sprime and tprime)
We're interested in how correlated these two integrals are.
Note: We've not multiplied by the variance, this is done in K."""
####### l = lengthscale * np.sqrt(2)###TO REINSTATE
# pdb.set_trace()
l = lengthscale
return 0.5 * (l**2) * ( self.g((t - sprime) / l) + self.g((tprime - s) / l) - self.g((t - tprime) / l) - self.g((s - sprime) / l))
def calc_K_wo_variance(self, X, X2):
"""Calculates K_xx without the variance term"""
# import pdb
# pdb.set_trace()
K_ = np.ones([X.shape[0], X2.shape[0]]) #ones now as a product occurs over each dimension
for i, x in enumerate(X):
for j, x2 in enumerate(X2):
for il,l in enumerate(self.lengthscale):
idx = il * 2 #each pair of input dimensions describe the limits on one actual dimension in the data
K_[i,j] *= self.k(x, x2, idx, l)
return K_
def k_uu(self, t, tprime, lengthscale):
"""Doesn't need s or sprime as we're looking at the 'derivatives', so no domains over which to integrate are required"""
####### l = lengthscale * np.sqrt(2)###TO REINSTATE
l = lengthscale
return np.exp(-((t-tprime)**2) / (l**2)) #rbf
def k_fu(self, t, tprime, s, lengthscale):
"""Covariance between the gradient (latent value) and the actual (observed) value.
Note that sprime isn't actually used in this expression, presumably because the 'primes' are the gradient (latent) values which don't
involve an integration, and thus there is no domain over which they're integrated, just a single value that we want."""
####### l = lengthscale * np.sqrt(2)###TO REINSTATE
l = lengthscale
return 0.5 * np.sqrt(math.pi) * l * (math.erf((t - tprime) / l) + math.erf((tprime - s) / l))
def k(self, x, x2, idx, l):
"""Helper function to compute covariance in one dimension (idx) between a pair of points.
The last element in x and x2 specify if these are integrals (0) or latent values (1).
l = that dimension's lengthscale
"""
# import pdb
# pdb.set_trace()
# print('x:', x)
# print('x2:', x2)
# print('*********************')
if (x[-1] == 0) and (x2[-1] == 0):
return self.k_ff(x[idx], x2[idx], x[idx+1], x2[idx+1], l)
if (x[-1] == 0) and (x2[-1] == 1):
return self.k_fu(x[idx], x2[idx], x[idx+1], l)
if (x[-1] == 1) and (x2[-1] == 0):
return self.k_fu(x2[idx], x[idx], x2[idx+1], l)
if (x[-1] == 1) and (x2[-1] == 1):
return self.k_uu(x[idx], x2[idx], l)
assert False, "Invalid choice of latent/integral parameter (set the last column of X to 0s and 1s to select this)"
def K(self, X, X2=None):
# pdb.set_trace()
if X2 is None:
X2 = X
K = self.calc_K_wo_variance(X, X2)
return K * self.variance[0]
def Kdiag(self, X):
return np.diag(self.K(X))
"""
Maybe we could make Kdiag much more faster, because now every single time it should calculate K and get the diag!!
# TODO
"""
# def Kdiag_Kuu(self, X):
# return self.variance[0]*np.ones(X.shape[0])
"""
Derivatives!
"""
def h(self, z):
return 0.5 * z * np.sqrt(math.pi) * math.erf(z) + np.exp(-(z**2))
def hp(self, z):
return 0.5 * np.sqrt(math.pi) * math.erf(z) - z * np.exp(-(z**2))
def dk_dl(self, t_type, tprime_type, t, tprime, s, sprime, l): #derivative of the kernel wrt lengthscale
#t and tprime are the two start locations
#s and sprime are the two end locations
#if t_type is 0 then t and s should be in the equation
#if tprime_type is 0 then tprime and sprime should be in the equation.
if (t_type == 0) and (tprime_type == 0): #both integrals
return l * ( self.h((t - sprime) / l) - self.h((t - tprime) / l) + self.h((tprime - s) / l) - self.h((s - sprime) / l))
if (t_type == 0) and (tprime_type == 1): #integral vs latent
return self.hp((t - tprime) / l) + self.hp((tprime - s) / l)
if (t_type == 1) and (tprime_type == 0): #integral vs latent
return self.hp((tprime - t) / l) + self.hp((t - sprime) / l)
#swap: t<->tprime (t-s)->(tprime-sprime)
if (t_type == 1) and (tprime_type == 1): #both latent observations
return 2 * (t - tprime) **2 / (l ** 3) * np.exp(-((t - tprime) / l) ** 2)
assert False, "Invalid choice of latent/integral parameter (set the last column of X to 0s and 1s to select this)"
def update_gradients_full(self, dL_dK, X, X2=None):
if X2 is None: #we're finding dK_xx/dTheta
dK_dl_term = np.zeros([X.shape[0], X.shape[0], self.lengthscale.shape[0]])
# print('dK_dl_term shape:', dK_dl_term.shape)
k_term = np.zeros([X.shape[0], X.shape[0], self.lengthscale.shape[0]])
# dK_dl = np.zeros([X.shape[0], X.shape[0], self.lengthscale.shape[0]])
# print('dK_dl.shape:', dK_dl.shape)
# dK_dv = np.zeros([X.shape[0], X.shape[0]])
for il, l in enumerate(self.lengthscale):
idx = il * 2
for i, x in enumerate(X):
for j, x2 in enumerate(X):
dK_dl_term[i, j, il] = self.dk_dl(x[-1], x2[-1], x[idx], x2[idx], x[idx+1], x2[idx+1], l)
k_term[i, j, il] = self.k(x, x2, idx, l)
for il,l in enumerate(self.lengthscale):
dK_dl = self.variance[0] * dK_dl_term[:,:,il]
print ('dK_dl second shape:', dK_dl.shape)
print ('dK_dl_term second shape:', dK_dl_term.shape)
# print('dK_dl:', dK_dl)
# It doesn't work without these three lines but I don't know what is that!!!
for jl, l in enumerate(self.lengthscale): ##@FARIBA Why do I have to comment this out??
if jl != il:
dK_dl *= k_term[:,:,jl]
print('dK_dl inside!! what is this?', dK_dl)
self.lengthscale.gradient[il] = np.sum(dL_dK * dK_dl)
dK_dv = self.calc_K_wo_variance(X,X) #the gradient wrt the variance is k.
self.variance.gradient = np.sum(dL_dK * dK_dv)
else: #we're finding dK_xf/Dtheta
raise NotImplementedError("Currently this function only handles finding the gradient of a single vector of inputs (X) not a pair of vectors (X and X2)")
def dk_dz(self, x, x2, t, tprime, s, lengthscale):
l = lengthscale
if (x[-1] == 0) and (x2[-1] == 1):
return -
|
np.exp(-(t - tprime) ** 2 / l ** 2)
|
numpy.exp
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 4 16:21:46 2020
@author: carl
"""
import numpy as np
from scipy.signal import savgol_filter, tukey
from . import baseline
from .util import load_reference, find_wn_ranges
def cut_wn(wn, y, ranges):
"""
Cut a set of spectra, leaving only the given wavenumber range(s).
Parameters:
wn: array of wavenumbers, sorted in either direction
y: array of spectra, shape (..., wavenumber)
ranges: list or numpy array of shape (..., 2) with desired wavenumber ranges in pairs (low, high)
Returns: (wavenumbers, spectra) with data in the given wavenumber ranges
"""
if isinstance(ranges, list):
ranges = np.array(ranges)
inrange = lambda w: ((w >= ranges[...,0]) & (w <= ranges[...,1])).any()
ix = np.array([inrange(w) for w in wn])
return wn[ix], y[...,ix]
def atmospheric(wn, y, atm=None, cut_co2 = True, extra_iters=5, extra_factor=0.25,
smooth_win=9, progressCallback = None):
"""
Apply atmospheric correction to multiple spectra, subtracting as much of the atompsheric
spectrum as needed to minimize the sum of squares of differences between consecutive points
in the corrected spectra. Each supplied range of wavenumbers is corrected separately.
Parameters:
wn: array of wavenumbers, sorted in either direction
y: array of spectra in the order (pixel, wavenumber), or just one spectrum
atm: atmospheric spectrum; if None, load the default
cut_co2: replace the CO2 region with a neatly fitted spline
extra_iters: number of iterations of subtraction of a locally reshaped atmospheric spectrum
(needed if the relative peak intensities are not always as in the atmospheric reference)
extra_factor: how much of the reshaped atmospheric spectrum to remove per iteration
smooth_win: window size (in cm-1) for smoothing of the spectrum in the atm regions
progressCallback(int a, int b): callback function called to indicated that the processing
is complete to a fraction a/b.
Returns:
tuple of (spectra after correction, array of correction factors; shape (spectra,ranges))
"""
squeeze = False
yorig = y
if y.ndim == 1:
y = y[None,:]
squeeze = True
else:
y = y.copy()
if atm is None or (isinstance(atm, str) and atm == ''):
atm = load_reference(wn, what='water')
elif isinstance(atm, str):
atm = load_reference(wn, matfilename=atm)
else:
atm = atm.copy()
# ranges: numpy array (n, 2) of n non-overlapping wavenumber ranges (typically for H2O only), or None
# extra_winwidth: width of the window (in cm-1) used to locally reshape the atm spectrum
ranges = [[1300, 2100], [3410, 3850], [2190, 2480]]
extra_winwidth = [30, 150, 40]
corr_ranges = 2 if cut_co2 else 3
# ranges = ranges[:2]
# extra_winwidth = extra_winwidth[:2]
if ranges is None:
ranges = np.array([0, len(wn)])
else:
ranges = find_wn_ranges(wn, ranges)
for i in range(corr_ranges):
p, q = ranges[i]
if q - p < 2: continue
atm[p:q] -= baseline.straight(wn[p:q], atm[p:q])
savgolwin = 1 + 2 * int(smooth_win * (len(wn) - 1) / np.abs(wn[0] - wn[-1]))
if progressCallback:
progressA = 0
progressB = 1 + corr_ranges * (extra_iters + (1 if savgolwin > 1 else 0))
progressCallback(progressA, progressB)
dh = atm[:-1] - atm[1:]
dy = y[:,:-1] - y[:,1:]
dh2 = np.cumsum(dh * dh)
dhdy = np.cumsum(dy * dh, 1)
az = np.zeros((len(y), corr_ranges))
for i in range(corr_ranges):
p, q = ranges[i]
if q - p < 2: continue
r = q-2 if q <= len(wn) else q-1
az[:, i] = ((dhdy[:,r] - dhdy[:,p-1]) / (dh2[r] - dh2[p-1])) if p > 0 else (dhdy[:,r] / dh2[r])
y[:, p:q] -= az[:, i, None] @ atm[None, p:q]
if progressCallback:
progressA += 1
progressCallback(progressA, progressB)
for pss in range(extra_iters):
for i in range(corr_ranges):
p, q = ranges[i]
if q - p < 2: continue
window = 2 * int(extra_winwidth[i] * (len(wn) - 1) / np.abs(wn[0] - wn[-1]))
winh = (window+1)//2
dy = y[:,:-1] - y[:,1:]
dhdy = np.cumsum(dy * dh, 1)
aa = np.zeros_like(y)
aa[:,1:winh+1] = dhdy[:,1:window:2] / np.maximum(dh2[1:window:2], 1e-8)
aa[:,1+winh:-winh-1] = (dhdy[:,window:-1] -
dhdy[:,:-1-window]) / np.maximum(dh2[window:-1] - dh2[:-1-window], 1e-8)
aa[:,-winh-1:-1] = (dhdy[:,-1:] -
dhdy[:,-1-window:-1:2]) / np.maximum(dh2[-1] - dh2[-1-window:-1:2], 1e-8)
aa[:, 0] = aa[:, 1]
aa[:, -1] = aa[:, -2]
aa = savgol_filter(aa, window + 1, 3, axis=1)
y[:, p:q] -= extra_factor * aa[:, p:q] * atm[p:q]
if progressCallback:
progressA += 1
progressCallback(progressA, progressB)
if savgolwin > 1:
for i in range(corr_ranges):
p, q = ranges[i]
if q - p < savgolwin: continue
y[:, p:q] = savgol_filter(y[:, p:q], savgolwin, 3, axis=1)
if progressCallback:
progressA += 1
progressCallback(progressA, progressB)
if cut_co2:
rng = np.array([[2190, 2260], [2410, 2480]])
rngm = rng.mean(1)
rngd = rngm[1] - rngm[0]
cr = find_wn_ranges(wn, rng).flatten()
if cr[1] - cr[0] > 2 and cr[3] - cr[2] > 2:
a = np.empty((4, len(y)))
a[0:2,:] = np.polyfit((wn[cr[0]:cr[1]]-rngm[0])/rngd, y[:,cr[0]:cr[1]].T, deg=1)
a[2:4,:] = np.polyfit((wn[cr[2]:cr[3]]-rngm[1])/rngd, y[:,cr[2]:cr[3]].T, deg=1)
P,Q = find_wn_ranges(wn, rngm[None,:])[0]
t = np.interp(wn[P:Q], wn[[Q,P] if wn[0] > wn[-1] else [P,Q]], [1, 0])
tt = np.array([-t**3+t**2, -2*t**3+3*t**2, -t**3+2*t**2-t, 2*t**3-3*t**2+1])
pt = a.T @ tt
y[:, P:Q] += (pt - y[:, P:Q]) * tukey(len(t), .3)
corrs = np.zeros(2)
ncorrs = np.zeros_like(corrs)
for i in range(len(ranges)):
p, q = ranges[i]
if q - p < 2: continue
corr = np.abs(yorig[:, p:q] - y[:, p:q]).sum(1) / np.maximum(np.abs(yorig[:, p:q]),
|
np.abs(y[:, p:q])
|
numpy.abs
|
import numpy as np
import matplotlib.pyplot as plt
import cv2
import math
from skimage import data
import pandas as pd
from itertools import product
from skimage.feature import greycomatrix, greycoprops
from scipy.stats import wilcoxon
from scipy.stats import binom_test
def normalize(x, scale=255):
x = (((x-x.min())/(x.max()-x.min()))*scale)
return x
class MSMFE:
def __init__(self, ref, imgs=None, vmin=0, vmax=255, nbit=8, ks=5, verbose=False,
features = ['Autocorrelation', 'ClusterProminence', 'ClusterShade', 'ClusterTendency', 'Contrast',
'Correlation', 'DifferenceEntropy', 'DifferenceVariance', 'Energy', 'Entropy',
'Id', 'Idm', 'Idmn', 'Idn', 'Imc1', 'Imc2', 'InverseVariance', 'JointAverage',
'MCC', 'MaximumProbability', 'SumAverage', 'SumEntropy', 'SumSquares']):
if verbose: print('\tInitializing ...')
ref = self.normalize(ref)
if imgs is not None:
self.keys = imgs.keys()
for key in imgs.keys():
imgs[key] = self.normalize(imgs[key])
if verbose: print('\tCreating GLCM(s) ...')
self.vmin = vmin
self.vmax = vmax
self.nbit = nbit
self.ks = ks
self.glcm_ref = self.fast_glcm(ref)
self.glcm_imgs = {}
self.features = features
self.error = {}
self.img_feature_maps = {}
self.feature_maps_ref = self.feature_maps(self.glcm_ref, features)
self.imgs = imgs
self.verbose=verbose
if verbose: print('\tDone creating.')
def get_names(self):
names = list(self.keys) + ['_Reference']
return names
def normalize(self, img, max=1, scale=255):
#Needs max set to one to account for PixelMiner not producing pixels up to 1
img = (img - img.min())/(max-img.min())
img *= scale
#img = img.astype(np.uint8)
return img
def get_feature_maps(self):
if self.imgs is not None:
for key in self.keys:
glcm = self.fast_glcm(self.imgs[key])
self.img_feature_maps[key] = self.feature_maps(glcm, self.features)
self.img_feature_maps['Reference'] = self.feature_maps_ref
return self.img_feature_maps
else:
return self.feature_maps_ref
def get_error(self, return_diff=False):
if self.imgs is not None:
for key in self.keys:
glcm = self.fast_glcm(self.imgs[key])
self.img_feature_maps[key] = self.feature_maps(glcm, self.features)
if return_diff:
diff_df = pd.DataFrame(index=self.keys, columns=self.features)
error_df = pd.DataFrame(index=self.keys, columns=self.features)
for feature in self.features:
#if self.verbose: print('\tDoing feature ...', feature, 'x'+str(len(self.keys)))
for key in self.keys:
#print('\t\t'+key)
#print('\t\t'+str(self.img_feature_maps.keys()))
img = self.img_feature_maps[key][feature]
ref = self.feature_maps_ref[feature]
diff = ref - img
if return_diff:
diff_df.at[key, feature] = diff.mean()
error = ((diff) ** 2).mean()
error_df.at[key, feature] = error
if return_diff:
return error_df, diff_df
else:
return error_df
else:
print('Input needs an image and a reference image to calculate error.')
def get_saliency(self, feature):
saliencies = []
for key in self.keys:
img = self.feature_maps[feature][key]
ref = self.feature_maps_ref[feature]
saliencies.append((ref - img) ** 2)
saliencies = np.asarray(saliencies)
return saliencies
def calculate_matrix(self, img, voxelCoordinates=None):
r"""
Compute GLCMs for the input image for every direction in 3D.
Calculated GLCMs are placed in array P_glcm with shape (i/j, a)
i/j = total gray-level bins for image array,
a = directions in 3D (generated by imageoperations.generateAngles)
"""
quant = normalize(img, scale=self.nbit).astype(np.int8)
degrees = [0, np.pi/4, np.pi/2, (3*np.pi)]
distance = [1]
P_glcm = greycomatrix(quant, distance, degrees, levels=self.nbit)
P_glcm = np.moveaxis(P_glcm, -2, 0)
P_glcm = P_glcm.astype(np.float32)
sumP_glcm = np.sum(P_glcm, (1, 2)).astype(np.float32)
sumP_glcm[sumP_glcm == 0] = np.nan
P_glcm /= sumP_glcm[:, None, None, :]
P_glcm = np.moveaxis(P_glcm, -1, 0).squeeze()
return P_glcm
def fast_glcm(self, img, conv=True, scale=False):
min, max = self.vmin, self.vmax
shape = img.shape
if len(shape) > 2:
print('Shape of', shape, 'is invalid, images must be 2d.')
return
h,w = img.shape
# digitize
bins = np.linspace(min, max, self.nbit+1)[1:]
#print('Bins:', bins)
gl = np.digitize(img, bins) - 1
gl.shape
#print('Unique:', np.unique(gl))
#print('GL:', gl.min(), gl.max())
shifts = np.zeros((4, h, w))
shifts[0] = np.append( gl[:, 1:], gl[:, -1:], axis=1) # one
shifts[1] = np.append( gl[1:, :], gl[-1:, :], axis=0) # two
shifts[2] = np.append(shifts[0][1:, :], shifts[0][-1:, :], axis=0) # three
shifts[3] = np.append(shifts[0][:1, :], shifts[0][:-1, :], axis=0) # four
#plt.imshow(gl)
#plt.show()
#plt.imshow(shifts[0])
#plt.show()
# make glcm
glcm = np.zeros((4, self.nbit, self.nbit, h, w), dtype=np.uint8)
for n, shift in enumerate(shifts):
for i in range(self.nbit):
for j in range(self.nbit):
mask = ((gl==i) & (shift==j))
glcm[n, i, j, mask] = 1
if conv:
kernel = np.ones((self.ks, self.ks), dtype=np.uint8)
for i in range(self.nbit):
for j in range(self.nbit):
glcm[n, i, j] = cv2.filter2D(glcm[n, i, j], -1, kernel)
glcm = glcm.astype(np.float32)
if scale:
matrix = self.calculate_matrix(img)
#matrix = glcm.sum((3, 4))
#print('SHAPE OF THE SCIKIT IMAGE MATRIX:', matrix.shape)
glcm = matrix[:, :, :, None, None] * glcm
#for direction in range(4):
# matrix[direction] = self.normalize(matrix[direction], scale=1)
glcm = np.moveaxis(glcm, 0, -1)
return glcm
def get_means(self, img, glcm):
h,w = img.shape
mean_i = np.zeros((h,w), dtype=np.float32)
for i in range(self.nbit):
for j in range(self.nbit):
mean_i += glcm[i,j] * i / (self.nbit)**2
mean_j = np.zeros((h,w), dtype=np.float32)
for j in range(self.nbit):
for i in range(self.nbit):
mean_j += glcm[i,j] * j / (self.nbit)**2
return mean_i, mean_j
def get_stds(self, img, glcm):
h,w = img.shape
mean_i, mean_j = self.get_means(img, glcm)
std_i = np.zeros((h,w), dtype=np.float32)
for i in range(self.nbit):
for j in range(self.nbit):
std_i += (glcm[i,j] * i - mean_i)**2
std_i = np.sqrt(std_i)
std_j = np.zeros((h,w), dtype=np.float32)
for j in range(self.nbit):
for i in range(self.nbit):
std_j += (glcm[i,j] * j - mean_j)**2
std_j = np.sqrt(std_j)
return mean_i, mean_j, std_i, std_j
def get_max(self, glcm):
max_ = np.max(glcm, axis=(0,1))
return(max_)
def feature_maps(self, glcm, features):
glcm = normalize(glcm, scale=2)
#h, w = glcm.shape[-3], glcm.shape[-2]
#glcm *= 16
#print('GLCM:', glcm.min(), glcm.max())
'''
for q in range(4):
count = 1
for o in range(8):
for p in range(8):
plt.xticks([])
plt.yticks([])
plt.subplot(8, 8, count)
test = glcm[o, p, :, :, q]
plt.imshow(test, vmax=25)
count+=1
plt.show()
'''
eps = np.spacing(1)
bitVector = np.arange(0,self.nbit,1)
i, j = np.meshgrid(bitVector, bitVector, indexing='ij', sparse=True)
iAddj = i + j
iSubj = np.abs(i-j)
ux = i[:, :, None, None, None] * glcm
uy = j[:, :, None, None, None] * glcm
#print('UX, UY:', ux.shape, uy.shape, ux.min(), ux.max())
'''
for q in range(4):
count = 1
for o in range(8):
for p in range(8):
plt.xticks([])
plt.yticks([])
plt.subplot(8, 8, count)
test = ux[o, p, :, :, q]
plt.imshow(test, vmax=25)
count+=1
plt.show()
'''
px = np.sum(glcm, 1)
px = px[:, None, :, :, :]
py = np.sum(glcm, 0)
py = py[None, :, :, :, :]
#for m in range(4):
# #plt.subplot(2,2,m+1)
# plt.title(str(ux[:, :, m].min()) + ' ' + str(ux [:, :, m].max()))
# plt.imshow(ux[:, :, m])
# plt.show()
ux = np.sum((i[:, :, None, None, None] * glcm), (0, 1))
ux = normalize(ux, scale=self.nbit)
uy = np.sum((j[:, :, None, None, None] * glcm), (0, 1))
uy = normalize(uy, scale=self.nbit)
'''
print()
print('GLCM stuff:')
print(glcm.min(), glcm.max())
print()
print('IJ Stuff:')
print(i[:, :, None, None, None].shape)
print(j[:, :, None, None, None].shape)
print()
print('U stuff:')
print(ux.shape)
print(uy.shape)
for n in range(4):
plt.title('ux')
plt.imshow(ux[:, :, n])
plt.show()
'''
kValuesSum = np.arange(0, (self.nbit * 2)-1, dtype='float')
#kValuesSum = np.arange(2, (self.nbit * 2) + 1, dtype='float')
kDiagIntensity = np.array([iAddj == k for k in kValuesSum])
GLCMDiagIntensity = np.array([kDiagIntensity[int(k)][:, :, None, None, None] * glcm for k in kValuesSum])
pxAddy = np.sum(GLCMDiagIntensity, (1, 2))
kValuesDiff = np.arange(0, self.nbit, dtype='float')
#kValuesDiff = np.arange(0, self.nbit, dtype='float')
kDiagContrast = np.array([iSubj == k for k in kValuesDiff])
GLCMDiagIntensity = np.array([kDiagContrast[int(k)][:, :, None, None, None] * glcm for k in kValuesDiff])
pxSuby = np.sum(GLCMDiagIntensity, (1, 2))
HXY = (-1) * np.sum((glcm * np.log2(glcm + eps)), (0, 1))
features_dict = {}
if 'Autocorrelation' in features:
ac = np.sum(glcm * (i * j)[:, :, None, None, None], (0, 1))
features_dict['Autocorrelation'] = np.nanmean(ac, -1)
if 'ClusterProminence' in features:
cp = np.sum((glcm * (((i + j)[:, :, None, None, None] - ux - uy) ** 4)), (0, 1))
features_dict['ClusterProminence'] = np.nanmean(cp, -1)
if 'ClusterShade' in features:
cs = np.sum((glcm * (((i + j)[:, :, None, None, None] - ux - uy) ** 3)), (0, 1))
features_dict['ClusterShade'] = np.nanmean(cs, -1)
if 'ClusterTendency' in features:
ct = np.sum((glcm * (((i + j)[:, :, None, None, None] - ux - uy) ** 2)), (0, 1))
features_dict['ClusterTendency'] = np.nanmean(ct, -1)
if 'Contrast' in features:
cont = np.sum((glcm * ((np.abs(i - j))[:, :, None, None, None] ** 2)), (0, 1))
features_dict['Contrast'] = np.nanmean(cont, -1)
if 'Correlation' in features:
# shape = (Nv, 1, 1, angles)
sigx = np.sum(glcm * ((i[:, :, None, None, None] - ux) ** 2), (0, 1), keepdims=True) ** 0.5
# shape = (Nv, 1, 1, angles)
sigy = np.sum(glcm * ((j[:, :, None, None, None] - uy) ** 2), (0, 1), keepdims=True) ** 0.5
corm = np.sum(glcm * (i[:, :, None, None, None] - ux) * (j[:, :, None, None, None] - uy), (0, 1), keepdims=True)
corr = corm / (sigx * sigy + eps)
corr[sigx * sigy == 0] = 1 # Set elements that would be divided by 0 to 1.
features_dict['Correlation'] = np.nanmean(corr, (0, 1, -1))
if 'DifferenceAverage' in features:
features_dict['DifferenceAverage'] = np.sum((kValuesDiff[:, None, None, None] * pxSuby), (0, -1))
if 'DifferenceEntropy' in features:
features_dict['DifferenceEntropy'] = (-1) * np.sum((pxSuby * np.log2(pxSuby + eps)), (0, -1))
if 'DifferenceVariance' in features:
diffavg = np.sum((kValuesDiff[:, None, None, None] * pxSuby), 0, keepdims=True)
diffvar = np.sum((pxSuby * ((kValuesDiff[:, None, None, None] - diffavg) ** 2)), (0, -1))
features_dict['DifferenceVariance'] = diffvar
if 'Energy' in features:
sum_squares = np.sum((glcm ** 2), (0, 1))
features_dict['Energy'] = np.nanmean(sum_squares, -1)
if 'Entropy' in features:
features_dict['Entropy'] = np.sum(HXY, -1)
if 'Id' in features:
features_dict['Id'] = np.sum(pxSuby / (1 + kValuesDiff[:, None, None, None]), (0, -1))
if 'Idm' in features:
features_dict['Idm'] = np.sum(pxSuby / (1 + (kValuesDiff[:, None, None, None] ** 2)), (0, -1))
if 'Idmn' in features:
features_dict['Idmn'] = np.sum(pxSuby / (1 + ((kValuesDiff[:, None, None, None] ** 2) / (self.nbit ** 2))), (0,-1))
if 'Idn' in features:
features_dict['Idn'] = np.sum(pxSuby / (1 + (kValuesDiff[:, None, None, None] / self.nbit)), (0, -1))
if 'Imc1' in features:
# entropy of px # shape = (Nv, angles)
HX = (-1) * np.sum((px * np.log2(px + eps)), (0, 1))
# entropy of py # shape = (Nv, angles)
HY = (-1) * np.sum((py * np.log2(py + eps)), (0, 1))
# shape = (Nv, angles)
HXY1 = (-1) * np.sum((glcm * np.log2(px * py + eps)), (0, 1))
div = np.fmax(HX, HY)
imc1 = HXY - HXY1
imc1[div != 0] /= div[div != 0]
imc1[div == 0] = 0 # Set elements that would be divided by 0 to 0
features_dict['Imc1'] = np.nanmean(imc1, -1)
#print('IMC1:', features_dict['Imc1'].shape)
if 'Imc2' in features:
# shape = (Nv, angles)
HXY2 = (-1) * np.sum(((px * py) *
|
np.log2(px * py + eps)
|
numpy.log2
|
# -*- coding: utf-8 -*-
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import tensorflow as tf
import pickle
import numpy as np
import random
from collections import Counter
from network import load_params
# 读取数据
title_count, title_set, genres2int, features, targets_values, ratings, users, movies, data, movies_orig, users_orig = pickle.load(
open('./data/ml-1m/preprocess.p', mode='rb'))
#电影名长度
sentences_size = title_count # = 15
#电影ID转下标的字典,数据集中电影ID跟下标不一致,比如第5行的数据电影ID不一定是5
movieid2idx = {val[0]:i for i, val in enumerate(movies.values)}
load_dir = load_params()
def get_param_tensors(loaded_graph):
targets = loaded_graph.get_tensor_by_name("targets:0")
dropout_keep_prob = loaded_graph.get_tensor_by_name("dropout_keep_prob:0")
lr = loaded_graph.get_tensor_by_name("learning_rate:0")
#两种不同计算预测评分的方案使用不同的name获取tensor y_pred
y_pred = loaded_graph.get_tensor_by_name("y_pred/ExpandDims:0")#
user_combine_layer_flat = loaded_graph.get_tensor_by_name("user_fc/Reshape:0")
return targets, lr, dropout_keep_prob, y_pred
def get_user_tensors(loaded_graph):
# tf.get_default_graph().as_graph_def().node All tensors
uid = loaded_graph.get_tensor_by_name("uid:0")
user_gender = loaded_graph.get_tensor_by_name("user_gender:0")
user_age = loaded_graph.get_tensor_by_name("user_age:0")
user_job = loaded_graph.get_tensor_by_name("user_job:0")
user_combine_layer_flat = loaded_graph.get_tensor_by_name("user_fc/Reshape:0")
return uid, user_gender, user_age, user_job, user_combine_layer_flat
def get_movie_tensors(loaded_graph):
# tf.get_default_graph().as_graph_def().node All tensors
movie_id = loaded_graph.get_tensor_by_name("movie_id:0")
movie_categories = loaded_graph.get_tensor_by_name("movie_categories:0")
movie_titles = loaded_graph.get_tensor_by_name("movie_titles:0")
movie_combine_layer_flat = loaded_graph.get_tensor_by_name("movie_fc/Reshape:0")
return movie_id, movie_categories, movie_titles, movie_combine_layer_flat
def rating_movie(user_id_val, movie_id_val):
"""
指定用户和电影进行评分
return [array([[3.6275628]], dtype=float32)]
"""
loaded_graph = tf.Graph() #
with tf.Session(graph=loaded_graph) as sess: #
# Load saved model
loader = tf.train.import_meta_graph(load_dir + '.meta')
loader.restore(sess, load_dir)
targets, lr, dropout_keep_prob, y_pred = get_param_tensors(loaded_graph)
uid, user_gender, user_age, user_job, _ = get_user_tensors(loaded_graph)
movie_id, movie_categories, movie_titles, _ = get_movie_tensors(loaded_graph)
categories = np.zeros([1, 18])
categories[0] = movies.values[movieid2idx[movie_id_val]][2]
titles = np.zeros([1, sentences_size])
titles[0] = movies.values[movieid2idx[movie_id_val]][1]
feed = {
uid: np.reshape(users.values[user_id_val-1][0], [1, 1]),
user_gender: np.reshape(users.values[user_id_val-1][1], [1, 1]),
user_age: np.reshape(users.values[user_id_val-1][2], [1, 1]),
user_job: np.reshape(users.values[user_id_val-1][3], [1, 1]),
movie_id: np.reshape(movies.values[movieid2idx[movie_id_val]][0], [1, 1]),
movie_categories: categories, # x.take(6,1)
movie_titles: titles, # x.take(5,1)
dropout_keep_prob: 1}
# Get Prediction
rating_val = sess.run([y_pred], feed)
print(rating_val)
return (rating_val)
def movie_feature_matrics():
"""
生成Movie特征矩阵
"""
loaded_graph = tf.Graph() #
movie_matrics = []
with tf.Session(graph=loaded_graph) as sess:
# Load saved model
loader = tf.train.import_meta_graph(load_dir + '.meta')
loader.restore(sess, load_dir)
# Get Tensors from loaded model
targets, lr, dropout_keep_prob, y_pred = get_param_tensors(loaded_graph)
movie_id, movie_categories, movie_titles, movie_combine_layer_flat = get_movie_tensors(loaded_graph)
for item in movies.values:
categories = np.zeros([1, 18])
categories[0] = item.take(2)
titles =
|
np.zeros([1, sentences_size])
|
numpy.zeros
|
import numpy as np
from skimage import color
class RGB2Lab(object):
def __call__(self, img):
img = np.asarray(img, np.uint8)
img = color.rgb2lab(img)
return img
class RGB2HSV(object):
def __call__(self, img):
img = np.asarray(img, np.uint8)
img = color.rgb2hsv(img)
return img
class RGB2HED(object):
def __call__(self, img):
img = np.asarray(img, np.uint8)
img = color.rgb2hed(img)
return img
class RGB2LUV(object):
def __call__(self, img):
img = np.asarray(img, np.uint8)
img = color.rgb2luv(img)
return img
class RGB2YUV(object):
def __call__(self, img):
img = np.asarray(img, np.uint8)
img = color.rgb2yuv(img)
return img
class RGB2XYZ(object):
def __call__(self, img):
img = np.asarray(img, np.uint8)
img = color.rgb2xyz(img)
return img
class RGB2YCbCr(object):
def __call__(self, img):
img = np.asarray(img, np.uint8)
img = color.rgb2ycbcr(img)
return img
class RGB2YDbDr(object):
def __call__(self, img):
img = np.asarray(img, np.uint8)
img = color.rgb2ydbdr(img)
return img
class RGB2YPbPr(object):
def __call__(self, img):
img = np.asarray(img, np.uint8)
img = color.rgb2ypbpr(img)
return img
class RGB2YIQ(object):
def __call__(self, img):
img = np.asarray(img, np.uint8)
img = color.rgb2yiq(img)
return img
class RGB2CIERGB(object):
def __call__(self, img):
img =
|
np.asarray(img, np.uint8)
|
numpy.asarray
|
# # FAPS PLMAgents
import logging
import os
import random
from collections import deque
import numpy as np
import tensorflow as tf
from keras import backend as k, Input, Model
from keras.layers import Dense, Dropout
from keras.optimizers import RMSprop
from OpenAIGym.exception import FAPSPLMEnvironmentException
logger = logging.getLogger("FAPSPLMAgents")
class FAPSTrainerException(FAPSPLMEnvironmentException):
"""
Related to errors with the Trainer.
"""
pass
class DQN_RC:
"""This class is the abstract class for the trainers"""
def __init__(self, envs, brain_name, trainer_parameters, training, seed):
"""
Responsible for collecting experiences and training a neural network model.
:param envs: The FAPSPLMEnvironment.
:param brain_name: The brain to train.
:param trainer_parameters: The parameters for the trainer (dictionary).
:param training: Whether the trainer is set for training.
:param seed: Random seed.
"""
# initialize global trainer parameters
self.brain_name = brain_name
self.trainer_parameters = trainer_parameters
self.is_training = training
self.seed = seed
self.steps = 0
self.last_reward = 0
self.initialized = False
# initialize specific DQN parameters
self.time_slice = 4
self.env_brains = envs
self.state_size = 0
self.action_size = 0
for env_name, env in self.env_brains.items():
self.action_size = env.action_space.n
self.state_size = env.observation_space.n
# self.action_space_type = envs.actionSpaceType
self.num_layers = self.trainer_parameters['num_layers']
self.batch_size = self.trainer_parameters['batch_size']
self.hidden_units = self.trainer_parameters['hidden_units']
self.replay_memory = deque(maxlen=self.trainer_parameters['memory_size'])
self.replay_sequence = deque(maxlen=self.time_slice)
self.gamma = self.trainer_parameters['gamma'] # discount rate
self.epsilon = self.trainer_parameters['epsilon'] # exploration rate
self.epsilon_min = self.trainer_parameters['epsilon_min']
self.epsilon_decay = self.trainer_parameters['epsilon_decay']
self.alpha = self.trainer_parameters['alpha']
self.alpha_decay = self.trainer_parameters['alpha_decay']
self.alpha_min = self.trainer_parameters['alpha_min']
self.learning_rate = self.trainer_parameters['learning_rate']
self.summary = self.trainer_parameters['summary_path']
self.tensorBoard = tf.summary.FileWriter(logdir=self.summary)
self.model = None
self.target_model = None
def __str__(self):
return '''DQN RC Trainer'''
@property
def parameters(self):
"""
Returns the trainer parameters of the trainer.
"""
return self.trainer_parameters
@property
def get_max_steps(self):
"""
Returns the maximum number of steps. Is used to know when the trainer should be stopped.
:return: The maximum number of steps of the trainer
"""
return self.trainer_parameters['max_steps']
@property
def get_step(self):
"""
Returns the number of steps the trainer has performed
:return: the step count of the trainer
"""
return self.steps
@property
def get_last_reward(self):
"""
Returns the last reward the trainer has had
:return: the new last reward
"""
return self.last_reward
def _build_model(self):
# Neural Net for Deep-Q learning Model
a = Input(shape=[self.state_size * self.time_slice], name='actor_state')
h = Dense(self.hidden_units, activation='relu', kernel_initializer='he_uniform', name="dense_actor")(a)
h = Dropout(0.2)(h)
for x in range(1, self.num_layers):
h = Dense(self.hidden_units, activation='relu', kernel_initializer='he_uniform')(h)
h = Dropout(0.2)(h)
o = Dense(self.action_size, activation='softmax', kernel_initializer='he_uniform')(h)
model = Model(inputs=a, outputs=o)
return model
def is_initialized(self):
"""
check if the trainer is initialized
"""
return self.initialized
def _update_target_model(self):
# copy weights from model to target_model
self.target_model.set_weights(self.model.get_weights())
def initialize(self):
"""
Initialize the trainer
"""
self.model = self._build_model()
self.model.compile(loss='mse', optimizer=RMSprop(lr=self.learning_rate),
metrics=['mse'])
print(self.model.summary())
self.target_model = self._build_model()
self.target_model.compile(loss='mse', optimizer=RMSprop(lr=self.learning_rate),
metrics=['mse'])
print(self.target_model.summary())
self._update_target_model()
self.initialized = True
def clear(self):
"""
Clear the trainer
"""
k.clear_session()
self.replay_memory.clear()
self.model = None
def load_model_and_restore(self, model_path):
"""
Load and restore the model from a defined path.
:param model_path: saved model.
"""
if os.path.exists('./' + model_path + '/DQN_RC.h5'):
self.model = self._build_model()
self.model.load_weights('./' + model_path + '/DQN_RC.h5')
self.model.compile(loss='mse', optimizer=RMSprop(lr=self.learning_rate),
metrics=['mse'])
else:
self.model = self._build_model()
self.model.compile(loss='mse', optimizer=RMSprop(lr=self.learning_rate),
metrics=['mse'])
if os.path.exists('./' + model_path + '/DQN_RC_target.h5'):
self.target_model = self._build_model()
self.target_model.load_weights('./' + model_path + '/DQN_RC_target.h5')
self.target_model.compile(loss='mse', optimizer=RMSprop(lr=self.learning_rate), metrics=['mse'])
else:
self.target_model = self._build_model()
self.target_model.compile(loss='mse', optimizer=RMSprop(lr=self.learning_rate), metrics=['mse'])
def increment_step(self):
"""
Increment the step count of the trainer
"""
self.steps = self.steps + 1
def update_last_reward(self, rewards):
"""
Updates the last reward
"""
self.last_reward = rewards
def take_action(self, observation, _env):
"""
Decides actions given state/observation information, and takes them in environment.
:param observation: The BrainInfo from environment.
:param _env: The environment.
:return: the action array and an object as cookie
"""
if (self.is_training and np.random.rand() <= self.epsilon) or len(self.replay_sequence) < (self.time_slice - 1):
return np.argmax(np.random.randint(0, 2, self.action_size))
else:
last_elements = self.replay_sequence.copy()
last_elements.append(observation)
arr_last_elements = np.array(last_elements)
tmp = arr_last_elements.reshape((1, self.state_size * self.time_slice))
act_values = self.model.predict(tmp)
_max = np.nanmax(act_values[0])
indices = np.argwhere(act_values[0] == _max)
choice = np.random.choice(indices.size)
return indices[choice, 0]
def add_experiences(self, observation, action, next_observation, reward, done, info):
"""
Adds experiences to each agent's experience history.
:param observation: the observation before executing the action
:param action: Current executed action
:param next_observation: the observation after executing the action
:param reward: the reward obtained after executing the action.
:param done: true if the episode ended.
:param info: info after executing the action.
"""
self.replay_sequence.append(observation)
if len(self.replay_sequence) >= self.time_slice:
tmp = np.array(self.replay_sequence.copy()).reshape((1, self.state_size * self.time_slice))
next_last_elements = self.replay_sequence.copy()
next_last_elements.append(next_observation)
next_arr_last_elements = np.array(next_last_elements)
next_tmp = next_arr_last_elements.reshape((1, self.state_size * self.time_slice))
self.replay_memory.append((tmp, action, next_tmp, reward, done, info))
def process_experiences(self, current_info, action_vector, next_info):
"""
Checks agent histories for processing condition, and processes them as necessary.
Processing involves calculating value and advantage targets for model updating step.
:param current_info: Current BrainInfo.
:param action_vector: Current executed action
:param next_info: Next corresponding BrainInfo.
"""
# Nothing to be done in the DQN case
def end_episode(self):
"""
A signal that the Episode has ended. The buffer must be reset.
Get only called when the academy resets.
"""
# print("End Episode...")
def is_ready_update(self):
"""
Returns whether or not the trainer has enough elements to run update model
:return: A boolean corresponding to wether or not update_model() can be run
"""
# The NN is ready to be updated if there is at least a batch in the replay memory
# return (len(self.replay_memory) >= self.batch_size) and (len(self.replay_memory) % self.batch_size == 0)
# The NN is ready to be updated everytime a batch is sampled
return (self.steps > 1) and ((self.steps % self.batch_size) == 0)
def update_model(self):
"""
Uses the memory to update model. Run back propagation.
"""
# TODO: update to support multiple agents. Now only one agent is supported
num_samples = min(self.batch_size, len(self.replay_memory))
mini_batch = random.sample(self.replay_memory, num_samples)
# Start by extracting the necessary parameters (we use a vectorized implementation).
state0_batch = []
reward_batch = []
action_batch = []
terminal1_batch = []
state1_batch = []
for state, action, next_state, reward, done, info in mini_batch:
state0_batch.append(state)
state1_batch.append(next_state)
reward_batch.append(reward)
action_batch.append(action)
terminal1_batch.append(0. if done else 1.)
state0_batch = np.array(state0_batch).reshape((num_samples, self.state_size * self.time_slice))
state1_batch = np.array(state1_batch).reshape((num_samples, self.state_size * self.time_slice))
# terminal1_batch = np.array(terminal1_batch)
reward_batch = np.array(reward_batch)
action_batch = np.array(action_batch)
next_target = self.target_model.predict_on_batch(state1_batch)
discounted_reward_batch = self.gamma *
|
np.amax(next_target, axis=1)
|
numpy.amax
|
import skimage
import numpy as np
from skimage import measure
import matplotlib.pyplot as plt
from shapely.geometry import LinearRing, LineString
from scipy.ndimage import center_of_mass
import tifffile as tiff
def load_nucleus_and_periphery_from_files(target_nucleus_file, target_periphery_file, do_plotting=False):
nucleus_image = skimage.io.imread(target_nucleus_file)
nucleus_image = skimage.util.invert(nucleus_image)
nucleus_contour = measure.find_contours(nucleus_image[:,:,0], 100)[0]
nucleus_center = center_of_mass(nucleus_image[:,:,0])
nucleus_contour_shape = LinearRing(nucleus_contour)
periphery_image = skimage.io.imread(target_periphery_file)
periphery_contours = measure.find_contours(periphery_image[:,:,0], 100)
periphery_contour = np.vstack(periphery_contours)
periphery_contour_shape = LinearRing(periphery_contour)
if do_plotting:
plt.plot(nucleus_center[0], nucleus_center[1], 'o', color='#2ca02c', alpha=0.7)
plt.plot(nucleus_contour[:, 0], nucleus_contour[:, 1], linewidth=2, color = '#1f77b4', alpha=0.7)
peri_for_plot = np.vstack([periphery_contour, periphery_contour[0, :]])
plt.plot(peri_for_plot[:, 0], peri_for_plot[:, 1], linewidth=2, color = '#ff7f0e', alpha=0.7)
return nucleus_center, nucleus_contour_shape, periphery_contour_shape
def get_perinuclearity_for_point(target_point, nucleus_center, nucleus_contour_shape,
periphery_contour_shape, raylength=5000, do_plotting=False):
ray1 = LineString(((nucleus_center[0], nucleus_center[1]),
nucleus_center + raylength/np.linalg.norm(target_point-nucleus_center)*(target_point-nucleus_center)))
int_point = ray1.intersection(nucleus_contour_shape)
if int_point.geom_type == 'MultiPoint':
distances_to_center = np.array([np.linalg.norm(np.array((point.x, point.y))-nucleus_center)
for point in int_point])
point_furthest_from_center = int_point[np.argmax(distances_to_center)]
nucl_intersection = np.array((point_furthest_from_center.x,
point_furthest_from_center.y))
else:
try:
nucl_intersection = np.array((int_point.x, int_point.y))
except AttributeError:
plt.plot(ray1.xy[0], ray1.xy[1])
plt.plot(target_point[0], target_point[1], 'o', color='green', markersize=10)
plt.show()
raise AttributeError
int_point = ray1.intersection(periphery_contour_shape)
if int_point.geom_type == 'MultiPoint':
distances_to_center = np.array([np.linalg.norm(
|
np.array((point.x, point.y))
|
numpy.array
|
import numpy as np
import scipy.signal as sig
import scipy.io as load_mat
import netCDF4
from math import pi
from os import path
data_path = 'data/raw'
class xponder:
def __init__(self):
"""setup common parameters"""
# file samples
self.fs = 1e7 / 256
self.t_load = (5, 12)
samples = np.array(self.t_load) * self.fs
self.samples = samples.astype(np.int_)
self.t_a = np.arange(self.samples[0], self.samples[1]) / self.fs
# frequency domain specifications
self.nfft = 2 ** (int(np.log2(self.t_a.size)) + 1)
self.f_a = np.arange(self.nfft) * self.fs / self.nfft
# hydrophone sensitivities
self.bits2volts = 2.5 / 2 ** 23
self.ampgain = 10 ** (12 / 20)
self.hysens = 10 ** (-168 / 20)
# bandpass filter specifications
self.bp_numtaps = 2 ** 9
self.bp_bw = 0.5e3
self.bp_trans_width = 0.2e3
self.ping_fc = [11e3, 11.5e3, 12e3]
# replica pulse specifications
dt = 1 / self.fs
self.pulse_T = 0.009
pulse_N = self.pulse_T // dt
self.pulse_t_a = dt * np.arange(pulse_N)
# filter and pulse replica banks
filter_bank = []
pulse_bank = []
for f in self.ping_fc:
bp_edges = [0, f - self.bp_bw / 2 - self.bp_trans_width,
f - self.bp_bw / 2, f + self.bp_bw / 2,
f + self.bp_bw / 2 + self.bp_trans_width, self.fs / 2]
filter_bank.append(sig.remez(self.bp_numtaps, bp_edges,
[0, 1, 0], Hz=self.fs))
pulse_bank.append(np.sin(2 * pi * f * self.pulse_t_a))
self.filter_bank = np.array(filter_bank)
self.pulse_bank = np.array(pulse_bank)
# Fourier transform used in filtering
self.filter_bank_ft = np.fft.fft(self.filter_bank, n=self.nfft)
self.pulse_bank_ft = np.fft.fft(self.pulse_bank, n=self.nfft)
# clip filter result
self.num_edge = int(np.maximum(self.bp_numtaps, pulse_N) - 1)
self.t_a_filt = self.t_a[self.num_edge: ]
# surface bounce bounds
self.sb_tbounds = (-0.1, 0.5)
num_sb = np.ceil((self.sb_tbounds[1] - self.sb_tbounds[0]) * self.fs)
self.sb_t_a =
|
np.arange(num_sb)
|
numpy.arange
|
import numpy as np
def standardization(signal, fit=False, param=None):
"""Normalizes a given signal by subtracting the mean and dividing by the standard deviation.
Parameters
----------
signal : nd-array
input signal
Returns
-------
nd-array
standardized signal
"""
if param is not None:
s_mean = param[0]
s_std = param[1]
else:
s_mean = np.mean(signal, axis=0)
s_std = np.std(signal, axis=0)
if fit:
d_mean = np.mean(
|
np.diff(signal, axis=0)
|
numpy.diff
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 3 15:24:48 2020
@author: adityamate, killian-34
"""
import numpy as np
import pandas as pd
import time
import pomdp
from itertools import combinations
from whittle import *
from utils import *
import os
import argparse
import tqdm
def computeAverageTmatrixFromData(N, file_root='.', epsilon=0.005):
"""
Generates a Nx2x2x2 T matrix indexed as: T[patient_number][action][current_state][next_state]
action=0 denotes passive action, a=1 is active action
State 0 denotes NA and state 1 denotes A
"""
fname = os.path.join(file_root, 'data/patient_T_matrices.npy')
real = np.load(fname)
T=np.zeros((N,2,2,2))
#Passive action transition probabilities
penalty_pass_00=0
penalty_pass_11=0
#Active action transition probabilities
benefit_act_00=0
benefit_act_11=0
avg = real.mean(axis=0)
# for i in range(N):
T_base = np.zeros((2,2))
T_base[0,0] = avg[0]
T_base[1,1] = avg[1]
T_base[0,1] = 1 - T_base[0,0]
T_base[1,0] = 1 - T_base[1,1]
T_base = smooth_real_probs(T_base, epsilon)
shift = 0.05
# Patient responds well to call
benefit_act_00=np.random.uniform(low=0., high=shift) # will subtract from prob of staying 0,0
benefit_act_11= benefit_act_00 + np.random.uniform(low=0., high=shift) # will add to prob of staying 1,1
# add benefit_act_00 to benefit_act_11 to guarantee the p11>p01 condition
# Patient does well on their own, low penalty for not calling
penalty_pass_11=np.random.uniform(low=0., high=shift) # will sub from prob of staying 1,1
penalty_pass_00=penalty_pass_11+np.random.uniform(low=0., high=shift) # will add to prob of staying 0,0
T_pass = np.copy(T_base)
T_act = np.copy(T_base)
T_act[0,0] = max(0, T_act[0,0] - benefit_act_00)
T_act[1,1] = min(1, T_act[1,1] + benefit_act_11)
T_pass[0,0] = min(1, T_pass[0,0] + penalty_pass_00)
T_pass[1,1] = max(0, T_pass[1,1] - penalty_pass_11)
T_pass[0,1] = 1 - T_pass[0,0]
T_pass[1,0] = 1 - T_pass[1,1]
T_act[0,1] = 1 - T_act[0,0]
T_act[1,0] = 1 - T_act[1,1]
T_pass = epsilon_clip(T_pass, epsilon)
T_act = epsilon_clip(T_act, epsilon)
#print(T_pass)
#print(T_act)
#print()
if not verify_T_matrix(np.array([T_pass, T_act])):
print("T matrix invalid\n",np.array([T_pass, T_act]))
raise ValueError()
for i in range(N):
T[i,0]=T_pass
T[i,1]=T_act
return T
# See page 7 of:
# https://projects.iq.harvard.edu/files/teamcore/files/2016_15_teamcore_aamas2016_eve_yundi.pdf
def specialTmatrix(N, kfrac=10, distribution=[0.5, 0.5], delta=0.02, option=2, badf=50):
option =3
if option==0:
T=np.zeros((N,2,2,2))
patient_descriptions=[]
T_p_01=[0.3, 0.3]
T_p_11=[0.97, 0.1]
T_a_01=[0.3, 0.9]
T_a_11=[0.97, 0.97]
for i in range(N):
index=np.random.choice(range(len(distribution)), p=distribution)
T[i][0][0][1]=np.random.uniform(T_p_01[index]-delta, T_p_01[index]+delta)
T[i][0][1][1]=np.random.uniform(T_p_11[index]-delta, T_p_11[index]+delta)
T[i][1][0][1]=np.random.uniform(T_a_01[index]-delta, T_a_01[index]+delta)
T[i][1][1][1]=np.random.uniform(T_a_11[index]-delta, T_a_11[index]+delta)
return T
elif option==1:
T=np.zeros((N,2,2,2))
k=int(kfrac*N/100.)
# Myopic wants to pull type 2
'''
type1 = np.array( [[[0.9, 0.1],
[0.6, 0.41]],
[[0.6, 0.4],
[0.3, 0.7]]])
type2 = np.array( [[[0.9, 0.1],
[0.6, 0.4]],
[[0.6, 0.4],
[0.3, 0.7]]])
'''
type1 = np.array( [[[0.6, 0.4],
[0.29, 0.71]],
[[0.35, 0.65],
[0.05, 0.95]]])
type2 = np.array( [[[0.6, 0.4],
[0.3, 0.7]],
[[0.35, 0.65],
[0.05, 0.95]]])
for i in range(k):
T[i] = type2
for j in range(k, N):
type1 = np.array( [[[0.6, 0.4],
[0.29, 0.71+ j*0.001]],
[[0.35, 0.65],
[0.05, 0.95]]])
T[j]=type1
print ("Returning T matrix: ")
print ("N: ", N, "k: ", k)
print ("shape: ", T.shape)
return T
elif option==2:
T=np.zeros((N,2,2,2))
type1= [[[0.97, 0.03],
[0.03, 0.97]],
[[0.96, 0.04],
[0.01, 0.99]]]
type2 = [[[0.25, 0.75],
[0.03, 0.97]],
[[0.23, 0.77],
[0.01 , 0.99 ]]]
T[0]=type1
T[1]=type2
return T
elif option==3:
shift1= 0.05
shift2= 0.05
shift3= 0.05
shift4= 0.05
epsilon=0.01
T=np.zeros((N,2,2,2))
type1= [[[0.97, 0.03],
[0.03, 0.97]],
[[0.96, 0.04],
[0.01, 0.99]]] ###### Bad patient
type2 = [[[0.25, 0.75],
[0.03, 0.97]],
[[0.23, 0.77],
[0.01 , 0.99 ]]] ##### Good patient (self-healing)
for i in range(N):
types=[type1, type2]
type_choice=types[np.random.choice([0, 1],p=[badf/100., 1-(badf/100.)])]
T[i]=np.array(type_choice)
# add benefit_act_00 to benefit_act_11 to guarantee the p11>p01 condition
benefit_act_00=np.random.uniform(low=0., high=shift1) # will subtract from prob of staying 0,0
benefit_act_11= benefit_act_00 + np.random.uniform(low=0., high=shift2) # will add to prob of staying 1,1
# Patient does well on their own, low penalty for not calling
penalty_pass_11=np.random.uniform(low=0., high=shift3) # will sub from prob of staying 1,1
penalty_pass_00=penalty_pass_11+np.random.uniform(low=0., high=shift4) # will add to prob of staying 0,0
T[i][1][0][0]= max(0, T[i][1][0][0] - benefit_act_00)
T[i][1][1][1]= min(1, T[i][1][1][1] + benefit_act_11)
T[i][0][0][0]= min(1, T[i][0][0][0] + penalty_pass_00)
T[i][0][1][1]= max(0, T[i][0][1][1] - penalty_pass_11)
T[i][0][0][1]= 1- T[i][0][0][0]
T[i][0][1][0]= 1- T[i][0][1][1]
T[i][1][0][1]= 1- T[i][1][0][0]
T[i][1][1][0]= 1- T[i][1][1][1]
T[i][0]=epsilon_clip(T[i][0], epsilon)
T[i][1]=epsilon_clip(T[i][1], epsilon)
return T
def generateYundiMyopicFailTmatrix():
# Return a randomly generated T matrix (not unformly random because of sorting)
T=np.zeros((2,2,2,2))
# T[0] = [[[0.95, 0.05],
# [0.05, 0.95]],
# [[0.99, 0.01],
# [0.1, 0.9]]]
# T[1] = [[[0.4, 0.6],
# [0.1, 0.9]],
# [[0.7, 0.3],
# [0.4, 0.6]]]
T[0] = [[[0.99, 0.01],
[0.1, 0.9]],
[[0.95, 0.05],
[0.05, 0.95]]]
T[1] = [[[0.7, 0.3],
[0.4, 0.6]],
[[0.4, 0.6],
[0.1, 0.9]]]
return T
def generateRandomTmatrix(N, random_stream):
# Return a randomly generated T matrix (not unformly random because of sorting)
T=np.zeros((N,2,2,2))
for i in range(N):
p_pass_01, p_pass_11, p_act_01, p_act_11=sorted(random_stream.uniform(size=4))
T[i,0]=np.array([[1-p_pass_01, p_pass_01],[1-p_pass_11, p_pass_11]])
T[i,1]=np.array([[1-p_act_01, p_act_01],[1-p_act_11, p_act_11]])
return T
def generateTmatrix(N, responsive_patient_fraction=0.4,
range_pass_00=(0.8,1.0), range_pass_11=(0.6,0.9),
range_act_g_00=(0,0.2),range_act_g_11=(0.9,1.0),
range_act_b_00=(0.6,0.8), range_act_b_11=(0.9,1.0)):
# p_act01 < p01/(p01+p10)
"""
Generates a Nx2x2x2 T matrix indexed as: T[patient_number][action][current_state][next_state]
action=0 denotes passive action, a=1 is active action
State 0 denotes NA and state 1 denotes A
"""
T=np.zeros((N,2,2,2))
#Passive action transition probabilities
p_pass_00=np.random.uniform(low=range_pass_00[0], high=range_pass_00[1], size=N)
p_pass_11=np.random.uniform(low=range_pass_11[0], high=range_pass_11[1], size=N)
#Active action transition probabilities
#responsive_patient_fraction=0.4
p_act_00=np.zeros(N)
p_act_11=np.zeros(N)
for i in range(N):
if np.random.binomial(1,responsive_patient_fraction)==1:
# Patient responds well to call
p_act_00[i]=np.random.uniform(low=range_act_g_00[0], high=range_act_g_00[1])
p_act_11[i]=np.random.uniform(low=range_act_g_11[0], high=range_act_g_11[1])
else:
# Patient doesn't respond well to call
p_act_00[i]=np.random.uniform(low=range_act_b_00[0], high=range_act_b_00[1])
p_act_11[i]=np.random.uniform(low=range_act_b_11[0], high=range_act_b_11[1])
for i in range(N):
T[i,0]=np.array([[p_pass_00[i], 1-p_pass_00[i]],[1-p_pass_11[i],p_pass_11[i]]])
T[i,1]=np.array([[p_act_00[i], 1-p_act_00[i]],[1-p_act_11[i],p_act_11[i]]])
#print (T[:20])
return T
# guaranteed to generate 'bad patients' according to the definition here:
# p_act01 < p01/(p01+p10) == bad
# as well as good patients according to the same.
# we only want to consider bottom chain bad patients because top chain bad patients
# would mean our action has negative effect on them which isn't realistic.
# but this gives bad separation from myopic
def generateTmatrixBadf(N, responsive_patient_fraction=0.4,
range_pass_00=(0.6,0.8), range_pass_11=(0.6,0.89),
range_act_g_00=(0,0.2),range_act_g_11=(0.9,1.0),
range_act_b_00=(0.7,0.9), range_act_b_11=(0.9,1.0)):
# print("p_act01 < p01/(p01+p10)")
"""
Generates a Nx2x2x2 T matrix indexed as: T[patient_number][action][current_state][next_state]
action=0 denotes passive action, a=1 is active action
State 0 denotes NA and state 1 denotes A
"""
T=np.zeros((N,2,2,2))
#Passive action transition probabilities
p_pass_00=np.random.uniform(low=range_pass_00[0], high=range_pass_00[1], size=N)
p_pass_11=np.random.uniform(low=range_pass_11[0], high=range_pass_11[1], size=N)
#Active action transition probabilities
#responsive_patient_fraction=0.4
p_act_00=np.zeros(N)
p_act_11=np.zeros(N)
for i in range(N):
if np.random.binomial(1,responsive_patient_fraction)==1:
# Patient responds well to call
p_act_00[i]=np.random.uniform(low=range_act_g_00[0], high=range_act_g_00[1])
p_act_11[i]=np.random.uniform(low=range_act_g_11[0], high=range_act_g_11[1])
p_act01 = 1-p_act_00[i]
p01 = 1-p_pass_00[i]
p10 = 1-p_pass_11[i]
if p_act01 < p01/(p01+p10):
raise ValueError("Intended good patient was bad.")
else:
# Patient doesn't respond well to call
p_act_00[i]=
|
np.random.uniform(low=range_act_b_00[0], high=range_act_b_00[1])
|
numpy.random.uniform
|
# -*- coding: utf-8 -*-
import numpy as np
import os
import pickle
import argparse
import time
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
import torchvision.transforms as trn
import torchvision.transforms.functional as trnF
import torchvision.datasets as dset
from models.resnet import resnet18
from models.cbam.model_resnet import ResidualNet
import torch.nn.functional as F
import opencv_functional as cv2f
import cv2
import itertools
import torch.utils.model_zoo as model_zoo
import math
import random
parser = argparse.ArgumentParser(description='Trains a one-class model',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--in_class', '-in', type=int, default=0, help='Class to have as the target/in distribution.')
# Optimization options
parser.add_argument('--epochs', '-e', type=int, default=5, help='Number of epochs to train.')
parser.add_argument('--learning_rate', '-lr', type=float, default=0.1, help='The initial learning rate.')
parser.add_argument('--batch_size', '-b', type=int, default=64, help='Batch size.')
parser.add_argument('--test_bs', type=int, default=200)
parser.add_argument('--momentum', type=float, default=0.9, help='Momentum.')
parser.add_argument('--decay', '-d', type=float, default=0.0005, help='Weight decay (L2 penalty).')
# Checkpoints
parser.add_argument('--save', '-s', type=str, default='./snapshots/',
help='Folder to save checkpoints.')
parser.add_argument('--test', '-t', action='store_true', help='Test only flag.')
# Acceleration
parser.add_argument('--ngpu', type=int, default=1, help='0 = CPU.')
parser.add_argument('--prefetch', type=int, default=10, help='Pre-fetching threads.')
args = parser.parse_args()
state = {k: v for k, v in args._get_kwargs()}
print(state)
torch.manual_seed(1)
np.random.seed(1)
classes = ['acorn', 'airliner', 'ambulance', 'american_alligator', 'banjo', 'barn', 'bikini', 'digital_clock',
'dragonfly', 'dumbbell', 'forklift', 'goblet', 'grand_piano', 'hotdog', 'hourglass', 'manhole_cover',
'mosque', 'nail', 'parking_meter', 'pillow', 'revolver', 'rotary_dial_telephone', 'schooner', 'snowmobile',
'soccer_ball', 'stingray', 'strawberry', 'tank', 'toaster', 'volcano']
train_data_in = dset.ImageFolder('./one_class_train/' + classes[args.in_class])
test_data = dset.ImageFolder('./one_class_test/' + classes[args.in_class])
expanded_params = ((0, -56, 56), (0, -56, 56))
shift = np.cumsum([0] + [len(p) for p in expanded_params[:-1]]).tolist()
num_params = [len(expanded_params[i]) for i in range(len(expanded_params))]
n_p1, n_p2 = num_params[0], num_params[1]
output_dim = sum(num_params) + 4 # +4 due to four rotations
pert_configs = []
for tx, ty in itertools.product(*expanded_params):
pert_configs.append((tx, ty))
num_perts = len(pert_configs)
resize_and_crop = trn.Compose([trn.Resize(256), trn.RandomCrop(224)])
class PerturbDataset(torch.utils.data.Dataset):
def __init__(self, dataset, train_mode=True):
self.dataset = dataset
self.train_mode = train_mode
def __getitem__(self, index):
x, _ = self.dataset[index // num_perts]
pert = pert_configs[index % num_perts]
x = np.asarray(resize_and_crop(x))
if
|
np.random.uniform()
|
numpy.random.uniform
|
# The MIT License (MIT)
#
# Copyright (c) 2021, NVIDIA CORPORATION.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import sys
import os
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import math
import time
from PIL import Image
import numpy as np
import torch
import torch.nn as nn
from tqdm import tqdm
import moviepy.editor as mpy
from scipy.spatial.transform import Rotation as R
import pyexr
from lib.renderer import Renderer
from lib.models import *
from lib.options import parse_options
from lib.geoutils import sample_unif_sphere, sample_fib_sphere, normalized_slice
from lib.geometry import CubeMarcher
def write_exr(path, data):
pyexr.write(path, data,
channel_names={'normal': ['X','Y','Z'],
'x': ['X','Y','Z'],
'view': ['X','Y','Z']},
precision=pyexr.HALF)
class GyroidLattice(nn.Module):
"""Gyroid lattice for the given implicit neural geometry."""
def __init__(self, sdf_net):
"""Constructor.
Args:
sdf_net (nn.Module): SDF network
"""
super().__init__()
self.sdf_net = sdf_net
def forward(self, x):
"""Evaluates uniform grid (N, 3) using gyroid implicit equation. Returns (N,) result."""
# x = uniformGrid[:, 0]
# print(x)
# y = uniformGrid[:, 1]
# z = uniformGrid[:, 2]
kCellSize = 0.014408772790049425*3.
t = 0.5 # the isovalue, change if you want
gyroid = (torch.cos(2*3.14*x[:, 0]/kCellSize) * torch.sin(2*3.14*x[:, 1]/kCellSize) + \
torch.cos(2*3.14*x[:, 1]/kCellSize) * torch.sin(2*3.14*x[:, 2]/kCellSize) + \
torch.cos(2*3.14*x[:, 2]/kCellSize) * torch.sin(2*3.14*x[:, 0]/kCellSize)) - t**2
gyroid = torch.tensor(gyroid, device='cuda:0', dtype=torch.float32)
gyroid = gyroid.reshape(-1, 1)
# return self.sdf_net(x)
# return gyroid
return torch.max(gyroid, self.sdf_net(x))
if __name__ == '__main__':
# Parse
parser = parse_options(return_parser=True)
app_group = parser.add_argument_group('app')
app_group.add_argument('--img-dir', type=str, default='_results/render_app/imgs',
help='Directory to output the rendered images')
app_group.add_argument('--render-2d', action='store_true',
help='Render in 2D instead of 3D')
app_group.add_argument('--exr', action='store_true',
help='Write to EXR')
app_group.add_argument('--r360', action='store_true',
help='Render a sequence of spinning images.')
app_group.add_argument('--rsphere', action='store_true',
help='Render around a sphere.')
app_group.add_argument('--sdf_grid', action='store_true',
help='Creates a uniform grid of with x samples per dimension'
' and evalulates sdf at each point, and dumps the data in ./sdf.csv')
app_group.add_argument('--nb-poses', type=int, default=64,
help='Number of poses to render for sphere rendering.')
app_group.add_argument('--cam-radius', type=float, default=4.0,
help='Camera radius to use for sphere rendering.')
app_group.add_argument('--disable-aa', action='store_true',
help='Disable anti aliasing.')
app_group.add_argument('--export', type=str, default=None,
help='Export model to C++ compatible format.')
app_group.add_argument('--rotate', type=float, default=None,
help='Rotation in degrees.')
app_group.add_argument('--depth', type=float, default=0.0,
help='Depth of 2D slice.')
args = parser.parse_args()
# Pick device
use_cuda = torch.cuda.is_available()
device = torch.device('cuda' if use_cuda else 'cpu')
# Get model name
if args.pretrained is not None:
name = args.pretrained.split('/')[-1].split('.')[0]
else:
assert False and "No network weights specified!"
org_net = globals()[args.net](args)
if args.jit:
org_net = torch.jit.script(org_net)
org_net.load_state_dict(torch.load(args.pretrained))
org_net.to(device)
org_net.eval()
net = GyroidLattice(org_net)
net.to(device)
net.eval()
print("Total number of parameters: {}".format(sum(p.numel() for p in net.parameters())))
if args.export is not None:
net = SOL_NGLOD(net)
net.save(args.export)
sys.exit()
if args.sol:
net = SOL_NGLOD(net)
if args.lod is not None:
net.lod = args.lod
# Make output directory
ins_dir = os.path.join(args.img_dir, name)
if not os.path.exists(ins_dir):
os.makedirs(ins_dir)
for t in ['normal', 'rgb', 'exr']:
_dir = os.path.join(ins_dir, t)
if not os.path.exists(_dir):
os.makedirs(_dir)
renderer = Renderer(args, device, net).eval()
if args.rotate is not None:
rad = np.radians(args.rotate)
model_matrix = torch.FloatTensor(R.from_rotvec(rad * np.array([0,1,0])).as_matrix())
else:
model_matrix = torch.eye(3)
if args.r360:
for angle in np.arange(0, 360, 10):
rad = np.radians(angle)
model_matrix = torch.FloatTensor(R.from_rotvec(rad * np.array([-1./np.sqrt(3.),-1./np.sqrt(3.),-1./np.sqrt(3.)])).as_matrix())
out = renderer.shade_images(f=args.camera_origin,
t=args.camera_lookat,
fv=args.camera_fov,
aa=not args.disable_aa,
mm=model_matrix)
# data = out.float().numpy().exrdict()
idx = int(math.floor(100 * angle))
# if args.exr:
# write_exr('{}/exr/{:06d}.exr'.format(ins_dir, idx), data)
img_out = out.image().byte().numpy()
Image.fromarray(img_out.rgb).save('{}/rgb/{:06d}.png'.format(ins_dir, idx), mode='RGB')
# Image.fromarray(img_out.normal).save('{}/normal/{:06d}.png'.format(ins_dir, idx), mode='RGB')
elif args.rsphere:
views = sample_fib_sphere(args.nb_poses)
cam_origins = args.cam_radius * views
for p, cam_origin in enumerate(cam_origins):
out = renderer.shade_images(f=cam_origin,
t=args.camera_lookat,
fv=args.camera_fov,
aa=not args.disable_aa,
mm=model_matrix)
data = out.float().numpy().exrdict()
if args.exr:
write_exr('{}/exr/{:06d}.exr'.format(ins_dir, p), data)
img_out = out.image().byte().numpy()
# Image.fromarray(img_out.rgb).save('{}/rgb/{:06d}.png'.format(ins_dir, p), mode='RGB')
Image.fromarray(img_out.normal).save('{}/normal/{:06d}.png'.format(ins_dir, p), mode='RGB')
elif args.sdf_grid:
# Create a uniform grid on torch.
# x range [-1.1, 1.1], y range [-1.1, 1.1], z range [-1.1 to 1.1]
K = np.linspace(-1.1, 1.1, 150)
grid = [[x,y,z] for x in K for y in K for z in K]
torch_grid = torch.tensor(np.array(grid), device='cuda:0')
print("shape of torch grid: ", torch_grid.size())
# print(torch_grid)
net.eval()
sdf = net(torch_grid)
print("shape of sdf grid: ", sdf.size())
print(sdf)
# Compute SDF on the torch_grid to torch_sdf
r = np.hstack(sdf.detach().cpu().numpy(), torch_grid.detach().cpu().numpy())
np.savetxt("sdf.csv", r, delimiter=",")
exit(1)
else:
print("[INFO] here it is")
# out = renderer.shade_images(f=args.camera_origin,
# t=args.camera_lookat,
# fv=args.camera_fov,
# aa=not args.disable_aa,
# mm=model_matrix)
# data = out.float().numpy().exrdict()
# if args.render_2d:
# depth = args.depth
print("[INFO] sdf slice")
# data['sdf_slice'] = renderer.sdf_slice(depth=depth)
# renderer.sdf_slice(depth=depth)
Kx = np.linspace(-1.,1., 150)
Ky = np.linspace(-0.3, 0.5, 150)
Kz = np.linspace(-0.3, 0.8, 150)
grid = [[x,y,z] for x in Kx for y in Ky for z in Kz]
torch_grid = torch.tensor(
|
np.array(grid)
|
numpy.array
|
from cosmosis.datablock import names, option_section, SectionOptions
import numpy as np
import os
class BirdLikelihood(object):
# I take as example TwoPointLikelihood
# They subclass the Gaussian one, but we can't
like_name = "bird_like"
def __init__(self, options):
# General options
self.options = options
kmin = options.get_double("kmin")
kmax = options.get_double("kmax")
self.Nl = options.get_int("Nl")
self.model = options.get_int("model")
self.data_directory = options.get_string("dir")
cov_file = options.get_string("cov_file")
# Load data PS and mask the relevant k
kdata, PSdata = self.__load_data()
self.k = kdata.reshape(3, -1)[0]
self.Nk = len(self.k)
kmask0 = np.argwhere((self.k <= kmax) & (self.k >= kmin))[:, 0]
self.kmask = kmask0
# print(self.kmask)
for i in range(self.Nl - 1):
kmaski = np.argwhere((self.k <= kmax) & (self.k >= kmin))[:, 0] + (i + 1) * self.Nk
self.kmask = np.concatenate((self.kmask, kmaski))
# print(self.kmask)
self.ydata = PSdata[self.kmask]
# Load data covariance, mask and invert it
cov = np.loadtxt(os.path.join(self.data_directory, cov_file))
# print(cov.shape)
covred = cov[self.kmask.reshape((len(self.kmask), 1)), self.kmask]
# print(covred.shape)
self.invcov = np.linalg.inv(covred)
self.chi2data = np.dot(self.ydata, np.dot(self.invcov, self.ydata))
self.invcovdata = np.dot(self.ydata, self.invcov)
# Assign priors to the bias parameters to marginalize
self.assign_priors()
# Check for BBNprior
self.use_BBNprior = False
try:
self.omega_b_BBNsigma = options.get_double("omega_b_BBNsigma")
self.omega_b_BBNcenter = options.get_double("omega_b_BBNcenter")
self.use_BBNprior = True
print ('BBN prior on omega_b: on')
except:
print ('BBN prior on omega_b: none')
def __load_data(self):
"""
Helper function to read in the full data vector.
"""
# print("Load data?")
data_file = self.options.get_string("ps_file")
fname = os.path.join(self.data_directory, data_file)
try:
kPS, PSdata, _ = np.loadtxt(fname, unpack=True)
except:
kPS, PSdata = np.loadtxt(fname, unpack=True)
return kPS, PSdata
def assign_priors(self):
# Assigns priors to marginalized bias parameters
if self.Nl is 2:
self.use_prior = True
if self.model == 1:
self.priors = np.array([2., 2., 8., 2., 2.])
b3, cct, cr1, ce2, sn = self.priors
print ('EFT priors: b3: %s, cct: %s, cr1(+cr2): %s, ce2: %s, shotnoise: %s (default)' %
(b3, cct, cr1, ce2, sn))
elif self.model == 2:
self.priors = np.array([2., 2., 8., 2.])
b3, cct, cr1, ce2 = self.priors
print ('EFT priors: b3: %s, cct: %s, cr1(+cr2): %s, ce2: %s (default)' % (b3, cct, cr1, ce2))
elif self.model == 3:
self.priors = np.array([2., 2., 8., 2., 2.]) # np.array([ 10., 4., 8., 4., 2. ])
b3, cct, cr1, ce2, ce1 = self.priors
print ('EFT priors: b3: %s, cct: %s, cr1(+cr2): %s, ce2: %s, ce1: %s (default)' % (b3, cct, cr1, ce2, ce1))
elif self.model == 4:
self.priors = np.array([2., 2., 8., 2., 2., 2.])
b3, cct, cr1, ce2, ce1, sn = self.priors
print ('EFT priors: b3: %s, cct: %s, cr1(+cr2): %s, ce2: %s, ce1: %s, shotnoise: %s (default)' %
(b3, cct, cr1, ce2, ce1, sn))
elif self.model == 5:
self.priors = np.array([2., 2., 8.])
b3, cct, cr1 = self.priors
print ('EFT priors: b3: %s, cct: %s, cr1(+cr2): %s (default)' % (b3, cct, cr1))
elif self.Nl is 3:
self.use_prior = True
if self.model == 1:
self.priors = np.array([2., 2., 4., 4., 2., 2.])
b3, cct, cr1, cr2, ce2, sn = self.priors
print ('EFT priors: b3: %s, cct: %s, cr1: %s, cr2: %s, ce2: %s, shotnoise: %s (default)' %
(b3, cct, cr1, cr2, ce2, sn))
elif self.model == 2:
self.priors = np.array([2., 2., 4., 4., 2.])
b3, cct, cr1, ce2 = self.priors
print ('EFT priors: b3: %s, cct: %s, cr1: %s, cr2: %s, ce2: %s (default)' % (b3, cct, cr1, cr2, ce2))
elif self.model == 3:
self.priors = np.array([2., 2., 4., 4., 2., 2.]) # np.array([ 10., 4., 8., 4., 2. ])
b3, cct, cr1, cr2, ce2, ce1 = self.priors
print ('EFT priors: b3: %s, cct: %s, cr1: %s, cr2: %s, ce2: %s, ce1: %s (default)' %
(b3, cct, cr1, cr2, ce2, ce1))
elif self.model == 4:
self.priors = np.array([2., 2., 4., 4., 2., 2., 2.])
b3, cct, cr1, cr2, ce2, ce1, sn = self.priors
print ('EFT priors: b3: %s, cct: %s, cr1: %s, cr2: %s, ce2: %s, ce1: %s, shotnoise: %s (default)' %
(b3, cct, cr1, cr2, ce2, ce1, sn))
self.priormat =
|
np.diagflat(1. / self.priors**2)
|
numpy.diagflat
|
"""
Inspired by https://github.com/tkarras/progressive_growing_of_gans/blob/master/tfutil.py
"""
import functools
import numpy as np
import tensorflow as tf
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import functional_ops
seed = 1337
np.random.seed(seed)
tf.set_random_seed(seed)
batch_size = 64
# ---------------------------------------------------------------------------------------------
# For convenience :)
def run(*args, **kwargs):
return tf.get_default_session().run(*args, **kwargs)
def is_tf_expression(x):
return isinstance(x, tf.Tensor) or isinstance(x, tf.Variable) or isinstance(x, tf.Operation)
def safe_log(x, eps=1e-12):
with tf.name_scope("safe_log"):
return tf.log(x + eps)
def safe_log2(x, eps=1e-12):
with tf.name_scope("safe_log2"):
return tf.log(x + eps) * np.float32(1. / np.log(2.))
def lerp(a, b, t):
with tf.name_scope("lerp"):
return a + (b - a) * t
def lerp_clip(a, b, t):
with tf.name_scope("lerp_clip"):
return a + (b - a) * tf.clip_by_value(t, 0., 1.)
def gaussian_noise(x, std=5e-2):
noise = tf.random_normal(x.get_shape(), mean=0., stddev=std, dtype=tf.float32)
return x + noise
# ---------------------------------------------------------------------------------------------
# Image Sampling with TF
def down_sampling(img, interp=tf.image.ResizeMethod.BILINEAR):
shape = img.get_shape() # [batch, height, width, channels]
h2 = int(shape[1] // 2)
w2 = int(shape[2] // 2)
return tf.image.resize_images(img, [h2, w2], interp)
def up_sampling(img, interp=tf.image.ResizeMethod.BILINEAR):
shape = img.get_shape() # [batch, height, width, channels]
h2 = int(shape[1] * 2)
w2 = int(shape[2] * 2)
return tf.image.resize_images(img, [h2, w2], interp)
# ---------------------------------------------------------------------------------------------
# Optimizer
class Optimizer(object):
def __init__(self,
name='train',
optimizer='tf.train.AdamOptimizer',
learning_rate=1e-3,
use_loss_scaling=False,
loss_scaling_init=64.,
loss_scaling_inc=5e-4,
loss_scaling_dec=1.,
use_grad_scaling=False,
grad_scaling=7.,
**kwargs):
self.name = name
self.optimizer = optimizer
self.learning_rate = learning_rate
self.use_loss_scaling = use_loss_scaling
self.loss_scaling_init = loss_scaling_init
self.loss_scaling_inc = loss_scaling_inc
self.loss_scaling_dec = loss_scaling_dec
self.use_grad_scaling = use_grad_scaling
self.grad_scaling = grad_scaling
# ---------------------------------------------------------------------------------------------
# Network
class Network:
def __init__(self):
pass
# ---------------------------------------------------------------------------------------------
# Functions
w_init = tf.contrib.layers.variance_scaling_initializer(factor=1., mode='FAN_AVG', uniform=True)
b_init = tf.zeros_initializer()
reg = 5e-4
w_reg = tf.contrib.layers.l2_regularizer(reg)
eps = 1e-5
# Layers
def conv2d_alt(x, f=64, k=3, s=1, pad=0, pad_type='zero', use_bias=True, sn=False, name='conv2d'):
with tf.variable_scope(name):
if pad_type == 'zero':
x = tf.pad(x, [[0, 0], [pad, pad], [pad, pad], [0, 0]])
elif pad_type == 'reflect':
x = tf.pad(x, [[0, 0], [pad, pad], [pad, pad], [0, 0]], mode='REFLECT')
else:
raise NotImplementedError("[-] Only 'zero' & 'reflect' are supported :(")
if sn:
w = tf.get_variable('kernel', shape=[k, k, x.get_shape()[-1], f],
initializer=w_init, regularizer=w_reg)
x = tf.nn.conv2d(x, filter=spectral_norm(w), strides=[1, s, s, 1], padding='VALID')
if use_bias:
b = tf.get_variable('bias', shape=[f], initializer=b_init)
x = tf.nn.bias_add(x, b)
else:
x = conv2d(x, f, k, s, name=name)
return x
def conv2d(x, f=64, k=3, s=1, pad='SAME', reuse=None, is_train=True, name='conv2d'):
"""
:param x: input
:param f: filters
:param k: kernel size
:param s: strides
:param pad: padding
:param reuse: reusable
:param is_train: trainable
:param name: scope name
:return: net
"""
return tf.layers.conv2d(inputs=x,
filters=f, kernel_size=k, strides=s,
kernel_initializer=w_init,
kernel_regularizer=w_reg,
bias_initializer=b_init,
padding=pad,
reuse=reuse,
name=name)
def conv1d(x, f=64, k=3, s=1, pad='SAME', reuse=None, is_train=True, name='conv1d'):
"""
:param x: input
:param f: filters
:param k: kernel size
:param s: strides
:param pad: padding
:param reuse: reusable
:param is_train: trainable
:param name: scope name
:return: net
"""
return tf.layers.conv1d(inputs=x,
filters=f, kernel_size=k, strides=s,
kernel_initializer=w_init,
kernel_regularizer=w_reg,
bias_initializer=b_init,
padding=pad,
reuse=reuse,
name=name)
def sub_pixel_conv2d(x, f, s=2):
"""reference : https://github.com/tensorlayer/SRGAN/blob/master/tensorlayer/layers.py"""
if f is None:
f = int(int(x.get_shape()[-1]) / (s ** 2))
bsize, a, b, c = x.get_shape().as_list()
bsize = tf.shape(x)[0]
x_s = tf.split(x, s, 3)
x_r = tf.concat(x_s, 2)
return tf.reshape(x_r, (bsize, s * a, s * b, f))
def deconv2d_alt(x, f=64, k=3, s=1, use_bias=True, sn=False, name='deconv2d'):
with tf.variable_scope(name):
if sn:
w = tf.get_variable('kernel', shape=[k, k, x.get_shape()[-1], f],
initializer=w_init, regularizer=w_reg)
x = tf.nn.conv2d_transpose(x, filter=spectral_norm(w), strides=[1, s, s, 1], padding='SAME',
output_shape=[x.get_shape()[0], x.get_shape()[1] * s, x.get_shape()[2] * s, f])
if use_bias:
b = tf.get_variable('bias', shape=[f], initializer=b_init)
x = tf.nn.bias_add(x, b)
else:
x = deconv2d(x, f, k, s, name=name)
return x
def deconv2d(x, f=64, k=3, s=1, pad='SAME', reuse=None, name='deconv2d'):
"""
:param x: input
:param f: filters
:param k: kernel size
:param s: strides
:param pad: padding
:param reuse: reusable
:param is_train: trainable
:param name: scope name
:return: net
"""
return tf.layers.conv2d_transpose(inputs=x,
filters=f, kernel_size=k, strides=s,
kernel_initializer=w_init,
kernel_regularizer=w_reg,
bias_initializer=b_init,
padding=pad,
reuse=reuse,
name=name)
def dense_alt(x, f=1024, sn=False, use_bias=True, name='fc'):
with tf.variable_scope(name):
x = flatten(x)
if sn:
w = tf.get_variable('kernel', shape=[x.get_shape()[-1], f],
initializer=w_init, regularizer=w_reg, dtype=tf.float32)
x = tf.matmul(x, spectral_norm(w))
if use_bias:
b = tf.get_variable('bias', shape=[f], initializer=b_init)
x = tf.nn.bias_add(x, b)
else:
x = dense(x, f, name=name)
return x
def dense(x, f=1024, reuse=None, name='fc'):
"""
:param x: input
:param f: fully connected units
:param reuse: reusable
:param name: scope name
:param is_train: trainable
:return: net
"""
return tf.layers.dense(inputs=x,
units=f,
kernel_initializer=w_init,
kernel_regularizer=w_reg,
bias_initializer=b_init,
reuse=reuse,
name=name)
def flatten(x):
return tf.layers.flatten(x)
def hw_flatten(x):
if is_tf_expression(x):
return tf.reshape(x, shape=[x.get_shape()[0], -1, x.get_shape()[-1]])
else:
return np.reshape(x, [x.shape[0], -1, x.shape[-1]])
# Normalize
def l2_norm(x, eps=1e-12):
return x / (tf.sqrt(tf.reduce_sum(tf.square(x))) + eps)
def batch_norm(x, momentum=0.9, center=True, scaling=True, is_train=True, reuse=None, name="bn"):
return tf.layers.batch_normalization(inputs=x,
momentum=momentum,
epsilon=eps,
center=center,
scale=scaling,
training=is_train,
reuse=reuse,
name=name)
def instance_norm(x, std=2e-2, affine=True, reuse=None, name=""):
with tf.variable_scope('instance_normalize-%s' % name, reuse=reuse):
mean, variance = tf.nn.moments(x, [1, 2], keepdims=True)
normalized = tf.div(x - mean, tf.sqrt(variance + eps))
if not affine:
return normalized
else:
depth = x.get_shape()[3] # input channel
scale = tf.get_variable('scale', [depth],
initializer=tf.random_normal_initializer(mean=1., stddev=std, dtype=tf.float32))
offset = tf.get_variable('offset', [depth],
initializer=tf.zeros_initializer())
return scale * normalized + offset
def pixel_norm(x):
return x / tf.sqrt(tf.reduce_mean(tf.square(x), axis=[1, 2, 3]) + eps)
def spectral_norm(x, gain=1., n_iter=1):
x_shape = x.get_shape()
x = tf.reshape(x, (-1, x_shape[-1])) # (n * h * w, c)
u = tf.get_variable('u',
shape=(1, x_shape[-1]),
initializer=tf.truncated_normal_initializer(stddev=gain),
trainable=False)
u_hat = u
v_hat = None
for _ in range(n_iter):
v_ = tf.matmul(u_hat, tf.transpose(x))
v_hat = l2_norm(v_)
u_ = tf.matmul(v_hat, x)
u_hat = l2_norm(u_)
sigma = tf.matmul(tf.matmul(v_hat, x), tf.transpose(u_hat))
x_norm = x / sigma
with tf.control_dependencies([u.assign(u_hat)]):
x_norm = tf.reshape(x_norm, x_shape)
return x_norm
# Activations
def prelu(x, stddev=1e-2, reuse=False, name='prelu'):
with tf.variable_scope(name):
if reuse:
tf.get_variable_scope().reuse_variables()
_alpha = tf.get_variable('_alpha',
shape=[1],
initializer=tf.constant_initializer(stddev),
# initializer=tf.random_normal_initializer(stddev)
dtype=x.dtype)
return tf.maximum(_alpha * x, x)
# Pooling
def global_avg_pooling(x):
return tf.reduce_mean(x, axis=[1, 2])
# Losses
def l1_loss(x, y):
return tf.reduce_mean(tf.abs(x - y))
def l2_loss(x, y):
return tf.nn.l2_loss(y - x)
def mse_loss(x, y, n, is_mean=False): # ~ l2_loss
if is_mean:
return tf.reduce_mean(tf.reduce_mean(tf.squared_difference(x, y)))
else:
return tf.reduce_mean(tf.reduce_sum(tf.squared_difference(x, y)))
def rmse_loss(x, y, n):
return tf.sqrt(mse_loss(x, y, n))
def psnr_loss(x, y, n):
return 20. * tf.log(tf.reduce_max(x) / mse_loss(x, y, n))
def sce_loss(data, label):
return tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=data, labels=label))
def softce_loss(data, label):
return tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=data, labels=label))
def ssoftce_loss(data, label):
return tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=data, labels=label))
# metrics
def inception_score(images, img_size=(299, 299), n_splits=10):
""" referenced from https://github.com/tsc2017/Inception-Score/blob/master/inception_score.py """
assert type(images) == np.ndarray
assert len(images.shape) == 4
assert images.shape[-1] == 3
images = np.clip(images, 0., 255.) # clipped into [0, 255]
def inception_feat(img, n_splits=1):
# img = tf.transpose(img, [0, 2, 3, 1])
img = tf.image.resize_bilinear(img, img_size)
generated_images_list = array_ops.split(img, num_or_size_splits=n_splits)
logits = functional_ops.map_fn(
fn=functools.partial(tf.contrib.gan.eval.run_inception, output_tensor="logits:0"),
elems=array_ops.stack(generated_images_list),
parallel_iterations=1,
back_prop=False,
swap_memory=True,
name="RunClassifier"
)
logits = array_ops.concat(array_ops.unstack(logits), axis=0)
return logits
inception_images = tf.placeholder(tf.float32, [None, None, None, 3], name="inception-images")
logits = inception_feat(inception_images)
def get_inception_probs(x, n_classes=1000):
n_batches = len(x) // batch_size
preds = np.zeros([len(x), n_classes], dtype=np.float32)
for i in range(n_batches):
inp = x[i * batch_size:(i + 1) * batch_size] / 255. * 2 - 1. # scaled into [-1, 1]
preds[i * batch_size:(i + 1) * batch_size] = logits.eval({inception_images: inp})[:, :n_classes]
preds = np.exp(preds) / np.sum(np.exp(preds), 1, keepdims=True)
return preds
def preds2score(preds, splits=10):
scores = []
for i in range(splits):
part = preds[(i * preds.shape[0] // splits):((i + 1) * preds.shape[0] // splits), :]
kl = part * (np.log(part) - np.log(np.expand_dims(np.mean(part, axis=0), axis=0)))
kl = np.mean(np.sum(kl, axis=1))
scores.append(np.exp(kl))
return np.mean(scores), np.std(scores)
preds = get_inception_probs(images)
mean, std = preds2score(preds, splits=n_splits)
return mean, std
def fid_score(real_img, fake_img, img_size=(299, 299), n_splits=10):
assert type(real_img) == np.ndarray and type(fake_img) == np.ndarray
assert len(real_img.shape) == 4 and len(fake_img.shape) == 4
assert real_img.shape[-1] == 3 and fake_img.shape[-1] == 3
assert real_img.shape == fake_img.shape
real_img = np.clip(real_img, 0., 255.) # clipped into [0, 255]
fake_img = np.clip(fake_img, 0., 255.) # clipped into [0, 255]
inception_images = tf.placeholder(tf.float32, [None, None, None, 3], name="inception-images")
real_acts = tf.placeholder(tf.float32, [None, None], name="real_activations")
fake_acts = tf.placeholder(tf.float32, [None, None], name="fake_activations")
def inception_activation(images, n_splits=1):
# images = tf.transpose(images, [0, 2, 3, 1])
images = tf.image.resize_bilinear(images, img_size)
generated_images_list = array_ops.split(images, num_or_size_splits=n_splits)
acts = functional_ops.map_fn(
fn=functools.partial(tf.contrib.gan.eval.run_inception, output_tensor="pool_3:0"),
elems=array_ops.stack(generated_images_list),
parallel_iterations=1,
back_prop=False,
swap_memory=True,
name="RunClassifier"
)
acts = array_ops.concat(array_ops.unstack(acts), axis=0)
return acts
activations = inception_activation(inception_images)
def get_inception_activations(x, feats=2048):
n_batches = len(x) // batch_size
acts = np.zeros([len(x), feats], dtype=np.float32)
for i in range(n_batches):
inp = x[i * batch_size:(i + 1) * batch_size] / 255. * 2 - 1. # scaled into [-1, 1]
acts[i * batch_size:(i + 1) * batch_size] = activations.eval({inception_images: inp})
acts = np.exp(acts) / np.sum(
|
np.exp(acts)
|
numpy.exp
|
# -*- coding: utf-8 -*-
import os
import sys
import time
import numpy as np
import pickle as pkl
import multiprocessing
from itertools import product
from sklearn.model_selection import KFold
from sklearn.metrics import roc_auc_score
try:
sys.path.append(os.getcwd())
import sparse_module
try:
from sparse_module import c_algo_solam
from sparse_module import c_algo_spam
from sparse_module import c_algo_sht_auc
from sparse_module import c_algo_opauc
from sparse_module import c_algo_sto_iht
from sparse_module import c_algo_hsg_ht
from sparse_module import c_algo_fsauc
except ImportError:
print('cannot find some function(s) in sparse_module')
pass
except ImportError:
print('cannot find the module: sparse_module')
pass
"""
Related genes are found by the following paper:
Agarwal, Shivani, and <NAME>.
"Ranking genes by relevance to a disease."
Proceedings of the 8th annual international
conference on computational systems bioinformatics. 2009.
"""
related_genes = {
# markers for AML
# 01: Myeloperoxidase
1778: '"773","1779","MPO Myeloperoxidase","M19507_at"',
# 02: CD13
1816: '"792","1817","ANPEP Alanyl (membrane) aminopeptidase (aminopeptidase N, '
'aminopeptidase M, microsomal aminopeptidase, CD13)","M22324_at"',
# 03: CD33
1833: '"808","1834","CD33 CD33 antigen (differentiation antigen)","M23197_at"',
# 04: HOXA9 Homeo box A9
3188: '"1391","3189","HOXA9 Homeo box A9","U41813_at"',
# 05: MYBL2
4129: '"1788","4130","MYBL2 V-myb avian myeloblastosis viral oncogene homolog-like 2","X13293_at"',
# markers for ALL
# 06: CD19
6224: '"2673","6225","CD19 gene","M84371_rna1_s_at"',
# 07: CD10 (CALLA)
1085: '"493","1086","MME Membrane metallo-endopeptidase '
'(neutral endopeptidase, enkephalinase, CALLA, CD10)","J03779_at"',
# 08: TCL1 (T cell leukemia)
4679: '"2065","4680","TCL1 gene (T cell leukemia) extracted from '
'H.sapiens mRNA for Tcell leukemia/lymphoma 1","X82240_rna1_at"',
# 09: C-myb
5771: '"2489","5772","C-myb gene extracted from Human (c-myb) gene, complete primary cds, '
'and five complete alternatively spliced cds","U22376_cds2_s_at"',
# 10: Deoxyhypusine synthase
6514: '"2801","6515","DHPS Deoxyhypusine synthase","U26266_s_at"',
# 10: Deoxyhypusine synthase
3763: '"1633","3764","DHPS Deoxyhypusine synthase","U79262_at"',
# 12: G-gamma globin
2344: '"1034","2345","G-gamma globin gene extracted from H.sapiens '
'G-gamma globin and A-gamma globin genes","M91036_rna1_at",',
# 13: Delta-globin
6883: '"2945","6884","Delta-globin gene extracted from Human beta'
' globin region on chromosome 11","U01317_cds4_at"',
# 14: Brain-expressed HHCPA78 homolog
2481: '"1103","2482","Brain-expressed HHCPA78 homolog [human, HL-60 acute '
'promyelocytic leukemia cells, mRNA, 2704 nt]","S73591_at"',
# 15:
6214: '"2669","6215","MPO from Human myeloperoxidase gene, '
'exons 1-4./ntype=DNA /annot=exon","M19508_xpt3_s_at"',
# 16: Probable protein disulfide isomerase ER-60 precursor
535: '"257","536","PROBABLE PROTEIN DISULFIDE ISOMERASE ER-60 PRECURSOR","D63878_at"',
# 16: Probable protein disulfide isomerase ER-60 precursor
5577: '"2415","5578","PROBABLE PROTEIN DISULFIDE ISOMERASE ER-60 PRECURSOR","Z49835_s_at"',
# 16: Probable protein disulfide isomerase ER-60 precursor
6167: '"2646","6168","PROBABLE PROTEIN DISULFIDE ISOMERASE ER-60 PRECURSOR","M13560_s_at"',
# 17: NPM1 Nucleophosmin
3577: '"1549","3578","NPM1 Nucleophosmin (nucleolar phosphoprotein B23, numatrin)","U66559_at"',
# 18
2441: '"1087","2442","CD34 CD34 antigen (hemopoietic progenitor cell antigen)","S53911_at"',
# 20
5687: '"2459","5688","CD24 signal transducer mRNA and 3 region","L33930_s_at"',
# 21
281: '"124","282","60S RIBOSOMAL PROTEIN L23","D21260_at"',
# 22
5224: '"2273","5225","5-aminolevulinic acid synthase gene extracted from Human DNA sequence '
'from PAC 296K21 on chromosome X contains cytokeratin exon, delta-aminolevulinate synthase '
'(erythroid); 5-aminolevulinic acid synthase.(EC 2.3.1.37). '
'6-phosphofructo-2-kinase/fructose-2,6-bisphosphatase (EC 2.7.1.105, EC 3.1.3.46),'
' ESTs and STS","Z83821_cds2_at"',
# 23
4016: '"1736","4017","HLA CLASS II HISTOCOMPATIBILITY ANTIGEN, DR ALPHA CHAIN PRECURSOR","X00274_at"',
# 24
2878: '"1257","2879","Epstein-Barr virus-induced protein mRNA","U19261_at"',
# 25
4629: '"2042","4630","HNRPA1 Heterogeneous nuclear ribonucleoprotein A1","X79536_at"',
# 26
2401: '"1069","2402","Azurocidin gene","M96326_rna1_at"',
# 27
4594: '"2026","4595","Red cell anion exchanger (EPB3, AE1, Band 3) 3 non-coding region","X77737_at"',
# 28
5500: '"2386","5501","TOP2B Topoisomerase (DNA) II beta (180kD)","Z15115_at"',
# 30
5551: '"2402","5552","PROBABLE G PROTEIN-COUPLED RECEPTOR LCR1 HOMOLOG","L06797_s_at"',
# 32
3520: '"1527","3521","Int-6 mRNA","U62962_at"',
# 33
1173: '"534","1174","Alpha-tubulin isotype H2-alpha gene, last exon","K03460_at"',
# 33
4467: '"1957","4468","Alpha-tubulin mRNA","X68277_at"',
# 33
4909: '"2156","4910","Alpha-tubulin mRNA","X99325_at"',
# 33
6914: '"2957","6915","Alpha-tubulin mRNA","X01703_at"',
# 34
1684: '"738","1685","Terminal transferase mRNA","M11722_at"',
# 35:
5951: '"2561","5952","GLYCOPHORIN B PRECURSOR","U05255_s_at"'
}
def process_data_21_leukemia():
"""
https://github.com/ramhiser/datamicroarray
http://genomics-pubs.princeton.edu/oncology/affydata/index.html
:return:
"""
data_path = '/enter/your/directory/to/21_leukemia/'
data = {'feature_ids': None, 'x_tr': [], 'y_tr': [], 'feature_names': []}
import csv
with open(data_path + 'golub_x.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
line_count = 0
for row in csv_reader:
if line_count == 0:
data['feature_ids'] = [str(_) for _ in row[1:]]
line_count += 1
elif 1 <= line_count <= 72:
data['x_tr'].append([float(_) for _ in row[1:]])
line_count += 1
data['x_tr'] = np.asarray(data['x_tr'])
for i in range(len(data['x_tr'])):
data['x_tr'][i] = data['x_tr'][i] / np.linalg.norm(data['x_tr'][i])
# AML: 急性粒细胞白血病 ALL:急性淋巴细胞白血病
with open(data_path + 'golub_y.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
line_count = 0
for row in csv_reader:
if line_count == 0:
line_count += 1
continue
elif 1 <= line_count <= 72:
line_count += 1
if row[1] == 'ALL':
data['y_tr'].append(1.)
else:
data['y_tr'].append(-1.)
data['y_tr'] = np.asarray(data['y_tr'])
data['n'] = 72
data['p'] = 7129
data['num_trials'] = 20
data['num_posi'] = len([_ for _ in data['y_tr'] if _ == 1.])
data['num_nega'] = len([_ for _ in data['y_tr'] if _ == -1.])
trial_i = 0
while True:
# since original data is ordered, we need to shuffle it!
rand_perm =
|
np.random.permutation(data['n'])
|
numpy.random.permutation
|
"""module for complete independent games"""
import functools
import itertools
import numpy as np
from gameanalysis import rsgame
from gameanalysis import utils
class _MatrixGame(rsgame._CompleteGame): # pylint: disable=protected-access
"""Matrix game representation
This represents a complete independent game more compactly than a Game, but
only works for complete independent games.
Parameters
----------
role_names : (str,)
The name of each role.
strat_names : ((str,),)
The name of each strategy per role.
payoff_matrix : ndarray
The matrix of payoffs for an asymmetric game. The last axis is the
payoffs for each player, the first axes are the strategies for each
player. matrix.shape[:-1] must correspond to the number of strategies
for each player. matrix.ndim - 1 must equal matrix.shape[-1].
"""
def __init__(self, role_names, strat_names, payoff_matrix):
super().__init__(role_names, strat_names, np.ones(len(role_names), int))
self._payoff_matrix = payoff_matrix
self._payoff_matrix.setflags(write=False)
self._prof_offset = np.zeros(self.num_strats, int)
self._prof_offset[self.role_starts] = 1
self._prof_offset.setflags(write=False)
self._payoff_view = self._payoff_matrix.view()
self._payoff_view.shape = (self.num_profiles, self.num_roles)
def payoff_matrix(self):
"""Return the payoff matrix"""
return self._payoff_matrix.view()
@utils.memoize
def min_strat_payoffs(self):
"""Returns the minimum payoff for each role"""
mpays = np.empty(self.num_strats)
for role, (pays, min_pays, strats) in enumerate(
zip(
np.rollaxis(self._payoff_matrix, -1),
np.split(mpays, self.role_starts[1:]),
self.num_role_strats,
)
):
np.rollaxis(pays, role).reshape((strats, -1)).min(1, min_pays)
mpays.setflags(write=False)
return mpays
@utils.memoize
def max_strat_payoffs(self):
"""Returns the minimum payoff for each role"""
mpays = np.empty(self.num_strats)
for role, (pays, max_pays, strats) in enumerate(
zip(
np.rollaxis(self._payoff_matrix, -1),
np.split(mpays, self.role_starts[1:]),
self.num_role_strats,
)
):
np.rollaxis(pays, role).reshape((strats, -1)).max(1, max_pays)
mpays.setflags(write=False)
return mpays
@functools.lru_cache(maxsize=1)
def payoffs(self):
profiles = self.profiles()
payoffs = np.zeros(profiles.shape)
payoffs[profiles > 0] = self._payoff_matrix.flat
return payoffs
def compress_profile(self, profile):
"""Compress profile in array of ints
Normal profiles are an array of number of players playing a strategy.
Since matrix games always have one player per role, this compresses
each roles counts into a single int representing the played strategy
per role.
"""
utils.check(self.is_profile(profile).all(), "must pass vaid profiles")
profile =
|
np.asarray(profile, int)
|
numpy.asarray
|
import numpy as np
import pytest
from pandas import Categorical, DataFrame
@pytest.mark.parametrize(
"data, expected",
[
# empty
(DataFrame(), True),
# multi-same
(DataFrame({"A": [1, 2], "B": [1, 2]}), True),
# multi-object
(
DataFrame(
{
"A": np.array([1, 2], dtype=object),
"B":
|
np.array(["a", "b"], dtype=object)
|
numpy.array
|
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import linprog
from mpmath import mp
mp.dps = 500
def plot(list_true, list_lb, list_ub):
y = list_true
plt.rcParams.update({'font.size': 16})
plt.figure(figsize=(14, 5.2))
lower_bound_A = list_lb
upper_bound_A = list_ub
lower_error_A = (np.array(y)) - np.array(lower_bound_A)
upper_error_A = (np.array(upper_bound_A) - np.array(y))
xi = np.arange(1, len(y) + 1, 1)
plt.errorbar(xi, y, yerr=[lower_error_A, upper_error_A], fmt='o')
plt.xlabel("Trial")
plt.ylabel("CI")
plt.tight_layout()
plt.savefig('./results/ci_demonstration.png')
plt.show()
def LP_solver(c_vec, S, u, G, h):
# res = linprog(c_vec, A_ub=G, b_ub=h, A_eq=S, b_eq=u, method='simplex')
res = linprog(c_vec, A_ub=G, b_ub=h, A_eq=S, b_eq=u, method='simplex',
options={'maxiter': 10000})
return res
def construct_S_u_G_h(n, m):
dim_t = n * m
M_r = np.zeros((n, dim_t))
for i in range(n):
M_r[i, i * m:i * m + m] = np.ones(m)
M_c = np.zeros((m, dim_t))
for i in range(m):
for j in range(i, dim_t, m):
M_c[i, j] = 1.0
S = np.vstack((M_r, M_c))
u = np.vstack((np.ones((n, 1))/n, np.ones((m, 1))/m))
# Remove any redundant row (e.g., last row)
S = S[:-1, :]
u = u[:-1, :]
# Construct G
G = -np.identity(dim_t)
# Construct h
h = np.zeros((dim_t, 1))
return S, u, G, h
def construct_set_basis_non_basis_idx(t_hat):
A = []
Ac = []
for i in range(len(t_hat)):
if t_hat[i] != 0.0:
A.append(i)
else:
Ac.append(i)
return A, Ac
def construct_Theta(n, m, d, data_obs):
idx_matrix = np.identity(n)
Omega = None
for i in range(n):
temp_vec = None
for j in range(n):
if idx_matrix[i][j] == 1.0:
if j == 0:
temp_vec = np.ones((m, 1))
else:
temp_vec = np.hstack((temp_vec, np.ones((m, 1))))
else:
if j == 0:
temp_vec = np.zeros((m, 1))
else:
temp_vec = np.hstack((temp_vec, np.zeros((m, 1))))
temp_vec = np.hstack((temp_vec, -np.identity(m)))
if i == 0:
Omega = temp_vec.copy()
else:
Omega = np.vstack((Omega, temp_vec))
Theta = np.zeros((n * m, n * d + m * d))
list_sign = []
list_kronecker_product = []
for k in range(d):
e_d_k = np.zeros((d, 1))
e_d_k[k][0] = 1.0
kronecker_product = np.kron(Omega, e_d_k.T)
dot_product = np.dot(kronecker_product, data_obs)
s_k = np.sign(dot_product)
Theta = Theta + s_k * kronecker_product
list_sign.append(s_k)
list_kronecker_product.append(kronecker_product)
return Theta, list_sign, list_kronecker_product
def compute_a_b(data, eta, dim_data):
sq_norm = (
|
np.linalg.norm(eta)
|
numpy.linalg.norm
|
import os
import cv2
import numpy as np
from oc_stereo.builders.config_builder_util import ConfigObj
from oc_stereo.dataloader.kitti import calib_utils, depth_map_utils, instance_utils, evaluation
# KITTI difficulty thresholds (easy, moderate, hard)
HEIGHT = (40, 25, 25)
OCCLUSION = (0, 1, 2)
TRUNCATION = (0.15, 0.3, 0.5)
# Mean object heights from hist_labels.py
MEAN_HEIGHTS = {
'Car': 1.526,
'Pedestrian': 1.761,
'Cyclist': 1.737,
}
class Difficulty:
# Values as integers for indexing
EASY = 0
MODERATE = 1
HARD = 2
ALL = 3
# Mappings from difficulty to string
DIFF_TO_STR_MAPPING = {
EASY: 'easy',
MODERATE: 'moderate',
HARD: 'hard',
ALL: 'all',
}
# Mappings from strings to difficulty
STR_TO_DIFF_MAPPING = {
'easy': EASY,
'moderate': MODERATE,
'hard': HARD,
'all': ALL,
}
@staticmethod
def to_string(difficulty):
return Difficulty.DIFF_TO_STR_MAPPING[difficulty]
@staticmethod
def from_string(difficulty_str):
return Difficulty.STR_TO_DIFF_MAPPING[difficulty_str]
class ObjectFilter:
def __init__(self, config):
self.classes = config.classes
self.difficulty = Difficulty.from_string(config.difficulty_str)
self.box_2d_height = config.box_2d_height
self.truncation = config.truncation
self.occlusion = config.occlusion
self.depth_range = config.depth_range
@staticmethod
def create_obj_filter(classes,
difficulty,
occlusion,
truncation,
box_2d_height,
depth_range):
config = ConfigObj()
config.classes = classes
config.difficulty_str = Difficulty.to_string(difficulty)
config.occlusion = occlusion
config.truncation = truncation
config.box_2d_height = box_2d_height
config.depth_range = depth_range
return ObjectFilter(config)
class ObjectLabel:
"""Object Label
Fields:
type (str): Object type, one of
'Car', 'Van', 'Truck', 'Pedestrian', 'Person_sitting',
'Cyclist', 'Tram', 'Misc', or 'DontCare'
truncation (float): Truncation level, float from 0 (non-truncated) to 1 (truncated),
where truncated refers to the object leaving image boundaries
occlusion (int): Occlusion level, indicating occlusion state:
0 = fully visible,
1 = partly occluded,
2 = largely occluded,
3 = unknown
alpha (float): Observation angle of object [-pi, pi]
x1, y1, x2, y2 (float): 2D bounding box of object in the image. (top left, bottom right)
h, w, l: 3D object dimensions: height, width, length (in meters)
t: 3D object centroid x, y, z in camera coordinates (in meters)
ry: Rotation around Y-axis in camera coordinates [-pi, pi]
score: Only for results, indicating confidence in detection, needed for p/r curves.
"""
def __init__(self):
self.type = None # Type of object
self.truncation = 0.0
self.occlusion = 0
self.alpha = 0.0
self.x1 = 0.0
self.y1 = 0.0
self.x2 = 0.0
self.y2 = 0.0
self.h = 0.0
self.w = 0.0
self.l = 0.0
self.t = (0.0, 0.0, 0.0)
self.ry = 0.0
self.score = 0.0
def __eq__(self, other):
"""Compares the given object to the current ObjectLabel instance.
:param other: object to compare to this instance against
:return: True, if other and current instance is the same
"""
if not isinstance(other, ObjectLabel):
return False
if self.__dict__ != other.__dict__:
return False
else:
return True
def __repr__(self):
return '({}, a:{}, t:{} lwh:({:.03f}, {:.03f}, {:.03f}), ry:{:.03f})'.format(
self.type, self.alpha, self.t, self.l, self.w, self.h, self.ry)
def read_labels(label_dir, sample_name):
"""Reads in label data file from Kitti Dataset
Args:
label_dir: label directory
sample_name: sample_name
Returns:
obj_list: list of ObjectLabels
"""
# Check label file
label_path = label_dir + '/{}.txt'.format(sample_name)
if not os.path.exists(label_path):
raise ValueError('Label file could not be found:', label_path)
if os.stat(label_path).st_size == 0:
return []
labels = np.loadtxt(label_path, delimiter=' ', dtype=str, ndmin=2)
num_rows, num_cols = labels.shape
if num_cols not in [15, 16]:
raise ValueError('Invalid label format')
num_labels = num_rows
is_results = num_cols == 16
obj_list = []
for obj_idx in np.arange(num_labels):
obj = ObjectLabel()
# Fill in the object list
obj.type = labels[obj_idx, 0]
obj.truncation = float(labels[obj_idx, 1])
obj.occlusion = float(labels[obj_idx, 2])
obj.alpha = float(labels[obj_idx, 3])
obj.x1, obj.y1, obj.x2, obj.y2 = (labels[obj_idx, 4:8]).astype(np.float32)
obj.h, obj.w, obj.l = (labels[obj_idx, 8:11]).astype(np.float32)
obj.t = (labels[obj_idx, 11:14]).astype(np.float32)
obj.ry = float(labels[obj_idx, 14])
if is_results:
obj.score = float(labels[obj_idx, 15])
else:
obj.score = 0.0
obj_list.append(obj)
return np.asarray(obj_list)
def filter_labels_by_class(obj_labels, classes):
"""Filters object labels by classes.
Args:
obj_labels: List of object labels
classes: List of classes to keep, e.g. ['Car', 'Pedestrian', 'Cyclist']
Returns:
obj_labels: List of filtered labels
class_mask: Mask of labels to keep
"""
class_mask = [(obj.type in classes) for obj in obj_labels]
return obj_labels[class_mask], class_mask
def filter_labels_by_difficulty(obj_labels, difficulty):
"""Filters object labels by difficulty.
Args:
obj_labels: List of object labels
difficulty: Difficulty level
Returns:
obj_labels: List of filtered labels
difficulty_mask: Mask of labels to keep
"""
difficulty_mask = [_check_difficulty(obj, difficulty) for obj in obj_labels]
return obj_labels[difficulty_mask], difficulty_mask
def _check_difficulty(obj, difficulty):
if difficulty == Difficulty.ALL:
return True
return ((obj.occlusion <= OCCLUSION[difficulty]) and
(obj.truncation <= TRUNCATION[difficulty]) and
(obj.y2 - obj.y1) >= HEIGHT[difficulty])
def filter_labels_by_box_2d_height(obj_labels, box_2d_height):
"""Filters object labels by 2D box height.
Args:
obj_labels: List of object labels
box_2d_height: Minimum 2D box height
Returns:
obj_labels: List of filtered labels
height_mask: Mask of labels to keep
"""
height_mask = [(obj_label.y2 - obj_label.y1) > box_2d_height
for obj_label in obj_labels]
return obj_labels[height_mask], height_mask
def filter_labels_by_truncation(obj_labels, truncation):
"""Filters object labels by truncation.
Args:
obj_labels: List of object labels
truncation: Maximum truncation
Returns:
obj_labels: List of filtered labels
trunc_mask: Mask of labels to keep
"""
trunc_mask = [obj_label.truncation < truncation
for obj_label in obj_labels]
return obj_labels[trunc_mask], trunc_mask
def filter_labels_by_occlusion(obj_labels, occlusion):
"""Filters object labels by truncation.
Args:
obj_labels: List of object labels
occlusion: Maximum occlusion
Returns:
obj_labels: List of filtered labels
occ_mask: Mask of labels to keep
"""
occ_mask = [obj_label.occlusion < occlusion
for obj_label in obj_labels]
return obj_labels[occ_mask], occ_mask
def filter_labels_by_depth_range(obj_labels, depth_range):
"""Filters object labels within a range of depth values.
Args:
obj_labels: List of object labels
depth_range: Range of depth to keep objects
Returns:
obj_labels: List of filtered labels
depth_mask: Mask of labels to keep
"""
depth_mask = [depth_range[0] < obj_label.t[2] < depth_range[1]
for obj_label in obj_labels]
return obj_labels[depth_mask], depth_mask
def filter_labels(obj_labels, classes=None, difficulty=None,
box_2d_height=None, occlusion=None, truncation=None, depth_range=None):
"""Filters object labels by various metrics.
Args:
obj_labels: List of object labels
classes: List of classes to keep, e.g. ['Car', 'Pedestrian', 'Cyclist']
difficulty: Difficulty level
box_2d_height: Minimum 2D box height
occlusion: Minimum occlusion level
truncation: Minimum truncation level
depth_range: Range of depth to keep objects
Returns:
obj_labels: List of filtered labels
obj_mask: Mask of labels to keep
"""
obj_mask = np.full(len(obj_labels), True)
if classes is not None:
_, class_mask = filter_labels_by_class(obj_labels, classes)
obj_mask &= class_mask
if difficulty is not None:
_, difficulty_mask = filter_labels_by_difficulty(obj_labels, difficulty)
obj_mask &= difficulty_mask
if box_2d_height is not None:
_, height_mask = filter_labels_by_box_2d_height(obj_labels, box_2d_height)
obj_mask &= height_mask
if occlusion is not None:
_, occ_mask = filter_labels_by_occlusion(obj_labels, occlusion)
obj_mask &= occ_mask
if truncation is not None:
_, trunc_mask = filter_labels_by_truncation(obj_labels, truncation)
obj_mask &= trunc_mask
if depth_range is not None:
_, depth_mask = filter_labels_by_depth_range(obj_labels, depth_range)
obj_mask &= depth_mask
return obj_labels[obj_mask], obj_mask
def apply_obj_filter(obj_labels, obj_filter):
"""Applies an ObjectFilter to a list of labels
Args:
obj_labels:
obj_filter (ObjectFilter):
Returns:
obj_labels: List of filtered labels
obj_mask: Mask of labels to keep
"""
obj_labels, obj_mask = filter_labels(
obj_labels,
classes=obj_filter.classes,
difficulty=obj_filter.difficulty,
box_2d_height=obj_filter.box_2d_height,
occlusion=obj_filter.occlusion,
truncation=obj_filter.truncation,
depth_range=obj_filter.depth_range)
return obj_labels, obj_mask
def boxes_2d_from_obj_labels(obj_labels):
return np.asarray([object_label_to_box_2d(obj_label)
for obj_label in obj_labels], np.float32)
def boxes_3d_from_obj_labels(obj_labels):
return np.asarray([object_label_to_box_3d(obj_label)
for obj_label in obj_labels], np.float32)
def obj_classes_from_obj_labels(obj_labels):
return np.asarray([obj_label.type
for obj_label in obj_labels])
def get_image(sample_name, image_dir):
image_path = image_dir + '/{}.png'.format(sample_name)
image = cv2.imread(image_path)
return image
def get_instance_masks(sample_name, instance_dir, num_objs):
"""Gets the instance masks
Args:
sample_name: Sample name
instance_dir: Instance directory
num_objs: Total number of objects in the scene
Returns:
instance_masks: (N, H, W) Instance masks
"""
instance_img = instance_utils.get_instance_image(sample_name, instance_dir)
instance_masks = instance_utils.get_instance_mask_list(instance_img, num_objs)
return instance_masks
def read_lidar(velo_dir, sample_name):
"""Reads the lidar bin file for a sample
Args:
velo_dir: velodyne directory
sample_name: sample name
Returns:
xyzi: (N, 4) points and intensities
"""
velo_path = velo_dir + '/{}.bin'.format(sample_name)
if os.path.exists(velo_path):
with open(velo_path, 'rb') as fid:
data_array = np.fromfile(fid, np.single)
xyzi = data_array.reshape(-1, 4)
return xyzi
else:
raise ValueError('Velodyne file not found')
def get_lidar_point_cloud(sample_name, frame_calib, velo_dir):
"""Gets the lidar point cloud in cam0 frame.
Args:
sample_name: Sample name
frame_calib: FrameCalib
velo_dir: Velodyne directory
Returns:
(3, N) point_cloud in the form [[x,...][y,...][z,...]]
"""
xyzi = read_lidar(velo_dir, sample_name)
# Calculate the point cloud
points_in_lidar_frame = xyzi[:, 0:3]
points = calib_utils.lidar_to_cam_frame(points_in_lidar_frame, frame_calib)
return points.T
def get_lidar_point_cloud_for_cam(sample_name, frame_calib, velo_dir,
image_shape=None, cam_idx=2):
"""Gets the lidar point cloud in cam0 frame, and optionally returns only the
points that are projected to an image.
Args:
sample_name: sample name
frame_calib: FrameCalib frame calibration
velo_dir: velodyne directory
image_shape: (optional) image shape [h, w] to filter points inside image
cam_idx: (optional) cam index (2 or 3) for filtering
Returns:
(3, N) point_cloud in the form [[x,...][y,...][z,...]]
"""
# Get points in camera frame
point_cloud = get_lidar_point_cloud(sample_name, frame_calib, velo_dir)
# Only keep points in front of camera (positive z)
point_cloud = point_cloud[:, point_cloud[2] > 1.0]
if image_shape is None:
return point_cloud
else:
# Project to image frame
if cam_idx == 2:
cam_p = frame_calib.p2
elif cam_idx == 3:
cam_p = frame_calib.p3
else:
raise ValueError('Invalid cam_idx', cam_idx)
# Project to image
points_in_img = calib_utils.project_pc_to_image(point_cloud, cam_p=cam_p)
points_in_img_rounded = np.round(points_in_img)
# Filter based on the given image shape
image_filter = (points_in_img_rounded[0] >= 0) & \
(points_in_img_rounded[0] < image_shape[1]) & \
(points_in_img_rounded[1] >= 0) & \
(points_in_img_rounded[1] < image_shape[0])
filtered_point_cloud = point_cloud[:, image_filter].astype(np.float32)
return filtered_point_cloud
def get_stereo_point_cloud(sample_name, calib_dir, disp_dir):
"""
Gets the point cloud for an image calculated from the disparity map
:param sample_name: sample name
:param calib_dir: directory with calibration files
:param disp_dir: directory with disparity images
:return: (3, N) point_cloud in the form [[x,...][y,...][z,...]]
"""
# Read calibration info
frame_calib = calib_utils.get_frame_calib(calib_dir, sample_name)
stereo_calibration_info = calib_utils.get_stereo_calibration(frame_calib.p2,
frame_calib.p3)
# Read disparity
disp = cv2.imread(disp_dir + '/{}.png'.format(sample_name),
cv2.IMREAD_ANYDEPTH)
disp = np.float32(disp)
disp = np.divide(disp, 256)
disp[disp == 0] = 0.1
# Calculate the point cloud
point_cloud = calib_utils.depth_from_disparity(disp, stereo_calibration_info)
return point_cloud
def get_depth_map_path(sample_name, depth_dir):
return depth_dir + '/{}.png'.format(sample_name)
def get_disp_map_path(sample_name, disp_dir):
return disp_dir + '/{}.png'.format(sample_name)
def get_disp_map(sample_name, disp_dir):
disp_map_path = get_disp_map_path(sample_name, disp_dir)
disp_map = cv2.imread(disp_map_path, cv2.IMREAD_ANYDEPTH)
disp_map = disp_map / 256.0
return disp_map
def get_depth_map(sample_name, depth_dir):
depth_map_path = get_depth_map_path(sample_name, depth_dir)
depth_map = depth_map_utils.read_depth_map(depth_map_path)
return depth_map
def get_depth_map_point_cloud(sample_name, frame_calib, depth_dir):
"""Calculates the point cloud from a depth map
Args:
sample_name: sample name
frame_calib: FrameCalib frame calibration
depth_dir: folder with depth maps
cam_idx: cam index (2 or 3)
Returns:
(3, N) point_cloud in the form [[x,...][y,...][z,...]]
"""
depth_map_path = get_depth_map_path(sample_name, depth_dir)
depth_map = depth_map_utils.read_depth_map(depth_map_path)
depth_map_shape = depth_map.shape[0:2]
# Calculate point cloud from depth map
cam_p = frame_calib.p2
# cam_p = frame_calib.p2 if cam_idx == 2 else frame_calib.p3
# # TODO: Call depth_map_utils version
return depth_map_utils.get_depth_point_cloud(depth_map, cam_p)
# Calculate points from depth map
depth_map_flattened = depth_map.flatten()
xx, yy = np.meshgrid(
|
np.arange(0, depth_map_shape[1], 1)
|
numpy.arange
|
"""
Copyright 2019 Samsung SDS
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from . import implicit_als_benfred as implicit2
import numpy as np
import scipy.sparse
import itertools
from sklearn import preprocessing
from scipy.sparse import csr_matrix
import pandas as pd
from brightics.common.repr import BrtcReprBuilder
from brightics.common.repr import strip_margin
from brightics.common.repr import plt2MD
from brightics.common.repr import pandasDF2MD
from brightics.common.repr import dict2MD
from brightics.function.utils import _model_dict
from brightics.common.groupby import _function_by_group
from brightics.common.utils import check_required_parameters
from brightics.common.validation import validate
from brightics.common.validation import greater_than_or_equal_to
from brightics.common.utils import get_default_from_parameters_if_required
from multiprocessing import Pool
#--------------------------------------------------------------------------------------------------------
"""
The MIT License (MIT)
Copyright (c) 2016 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
class MatrixFactorizationBase():
""" MatrixFactorizationBase contains common functionality for recommendation models.
Attributes
----------
item_factors : ndarray
Array of latent factors for each item in the training set
user_factors : ndarray
Array of latent factors for each user in the training set
"""
def __init__(self):
# learned parameters
self.item_factors = None
self.user_factors = None
def predict(self, userid, itemid):
user = self.user_factors[userid]
score = self.item_factors[itemid].dot(user)
return score
def recommend(self, userid, user_items,
N=10, filter_already_liked_items=True, filter_items=None):
user = self.user_factors[userid]
# calculate the top N items, removing the users own liked items from the results
if filter_already_liked_items is True:
liked = set(user_items[userid].indices)
else:
liked = set()
scores = self.item_factors.dot(user)
if filter_items:
liked.update(filter_items)
count = N + len(liked)
if count < len(scores):
ids = np.argpartition(scores, -count)[-count:]
best = sorted(zip(ids, scores[ids]), key=lambda x:-x[1])
else:
best = sorted(enumerate(scores), key=lambda x:-x[1])
return list(itertools.islice((rec for rec in best if rec[0] not in liked), N))
def nonzeros(m, row):
""" returns the non zeroes of a row in csr_matrix """
for index in range(m.indptr[row], m.indptr[row + 1]):
yield m.indices[index], m.data[index]
class AlternatingLeastSquares(MatrixFactorizationBase):
""" Alternating Least Squares
A Recommendation Model based off the algorithms described in the paper 'Collaborative
Filtering for Implicit Feedback Datasets' with performance optimizations described in
'Applications of the Conjugate Gradient Method for Implicit Feedback Collaborative
Filtering.'
Parameters
----------
factors : int, optional
The number of latent factors to compute
regularization : float, optional
The regularization factor to use
iterations : int, optional
The number of ALS iterations to use when fitting data
Attributes
----------
item_factors : ndarray
Array of latent factors for each item in the training set
user_factors : ndarray
Array of latent factors for each user in the training set
"""
def __init__(self, implicit=False, factors=100, regularization=0.01, alpha=1,
iterations=15, seed=None):
self.item_factors = None
self.user_factors = None
# parameters on how to factorize
self.factors = factors
self.regularization = regularization
self.alpha = alpha
# options on how to fit the model
self.seed = seed
self.implicit = implicit
self.iterations = iterations
def fit(self, item_users):
""" Factorizes the item_users matrix.
After calling this method, the members 'user_factors' and 'item_factors' will be
initialized with a latent factor model of the input data.
The item_users matrix does double duty here. It defines which items are liked by which
users (P_iu in the original paper), as well as how much confidence we have that the user
liked the item (C_iu).
The negative items are implicitly defined: This code assumes that non-zero items in the
item_users matrix means that the user liked the item. The negatives are left unset in this
sparse matrix: the library will assume that means Piu = 0 and Riu = 1 for all these items.
Parameters
----------
item_users: csr_matrix
Matrix of confidences for the liked items. This matrix should be a csr_matrix where
the rows of the matrix are the item, the columns are the users that liked that item,
and the value is the confidence that the user liked the item.
"""
Riu = item_users
if not isinstance(Riu, scipy.sparse.csr_matrix):
Riu = Riu.tocsr()
Rui = Riu.T.tocsr()
items, users = Riu.shape
# Initialize the variables randomly if they haven't already been set
if self.user_factors is None:
np.random.seed(self.seed) ; self.user_factors = np.random.rand(users, self.factors)
if self.item_factors is None:
np.random.seed(self.seed) ; self.item_factors = np.random.rand(items, self.factors)
else:
Rui_array = None
Riu_array = None
for iteration in range(self.iterations):
least_squares(self.implicit, self.alpha, Rui, self.user_factors, self.item_factors, self.regularization)
least_squares(self.implicit, self.alpha, Riu, self.item_factors, self.user_factors, self.regularization)
def least_squares(implicit, alpha, Rui, X, Y, regularization):
users, n_factors = X.shape
YtY = Y.T.dot(Y)
for u in range(users):
if implicit:
A = YtY + regularization * np.eye(n_factors)
b = np.zeros(n_factors)
for i, rui in nonzeros(Rui, u):
factor = Y[i]
cui = 1 + alpha * rui
b += cui * factor
A += (cui - 1) * np.outer(factor, factor)
else:
A = regularization * np.eye(n_factors)
b = np.zeros(n_factors)
for i, rui in nonzeros(Rui, u):
factor = Y[i]
if rui != -1:
b += rui * factor
A +=
|
np.outer(factor, factor)
|
numpy.outer
|
"""
Unit tests for the encoding_functions.py file.
"""
import numpy as np
import pytest
from moredataframes.encodings import noop, drop, factorize
def test_noop():
"""
noop() Should only call to_numpy(), so just need to check to make sure things are passed correctly to get full
coverage.
"""
output, extra_info = noop(np.array([1, 2, 3]))
np.testing.assert_equal(output, np.array([1, 2, 3]).reshape([-1, 1]))
output, extra_info = noop(np.array([1, 2, 3]), inverse=True)
np.testing.assert_equal(output, np.array([1, 2, 3]).reshape([-1, 1]))
def test_drop():
"""
drop() should always return None, just need to check this always happens (so I don't change it later without
having to think about it) and that parameters can be passed correctly.
"""
output, extra = drop(
|
np.array([1, 2, 3])
|
numpy.array
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import numpy as np
from bisect import insort, bisect_left
from warnings import warn
def rel_dist(x,y):
x = np.asarray(x)
y = np.asarray(y)
return np.linalg.norm(x-y)/np.linalg.norm(np.mean((x,y)))
class Anchor(tuple):
"""
Class for a single anchor. Behaves mostly like a tuple, except that the respective components can also be accessed via the attributes `time`, `state`, and `diff`, and some copying and checks are performed upon creation.
Also, it implements the less-than operator (<) for comparison by time, which allows to use Python’s sort routines.
"""
def __new__( cls, time, state, diff ):
state = np.atleast_1d(np.array(state,dtype=float,copy=True))
diff = np.atleast_1d(np.array(diff ,dtype=float,copy=True))
if len(state.shape) != 1:
raise ValueError("State must be a number or one-dimensional iterable.")
if state.shape != diff.shape:
raise ValueError("State and diff do not match in shape.")
return super().__new__(cls,(time,state,diff))
def __init__(self, *args):
self.time = self[0]
self.state = self[1]
self.diff = self[2]
# This is for sorting, which is guaranteed (docs.python.org/3/howto/sorting.html) to use __lt__, and bisect_left:
def __lt__(self,other):
if isinstance(other,Anchor):
return self.time < other.time
else:
return self.time < float(other)
def interpolate(t,i,anchors):
"""
Returns the `i`-th value of a cubic Hermite interpolant of the `anchors` at time `t`.
"""
return interpolate_vec(t,anchors)[i]
def interpolate_vec(t,anchors):
"""
Returns all values of a cubic Hermite interpolant of the `anchors` at time `t`.
"""
q = (anchors[1].time-anchors[0].time)
x = (t-anchors[0].time) / q
a = anchors[0].state
b = anchors[0].diff * q
c = anchors[1].state
d = anchors[1].diff * q
return (1-x) * ( (1-x) * (b*x + (a-c)*(2*x+1)) - d*x**2) + c
def interpolate_diff(t,i,anchors):
"""
Returns the `i`-th component of the derivative of a cubic Hermite interpolant of the `anchors` at time `t`.
"""
return interpolate_diff_vec(t,anchors)[i]
def interpolate_diff_vec(t,anchors):
"""
Returns the derivative of a cubic Hermite interpolant of the `anchors` at time `t`.
"""
q = (anchors[1].time-anchors[0].time)
x = (t-anchors[0].time) / q
a = anchors[0].state
b = anchors[0].diff * q
c = anchors[1].state
d = anchors[1].diff * q
return ( (1-x)*(b-x*3*(2*(a-c)+b+d)) + d*x ) /q
sumsq = lambda x: np.sum(x**2)
# The matrix induced by the scalar product of the cubic Hermite interpolants of two anchors, if their distance is normalised to 1.
sp_matrix = np.array([
[156, 22, 54, -13],
[ 22, 4, 13, -3],
[ 54, 13, 156, -22],
[-13, -3, -22, 4],
])/420
# The matrix induced by the scalar product of the cubic Hermite interpolants of two anchors, if their distance is normalised to 1, but the initial portion z of the interval is not considered for the scalar product.
def partial_sp_matrix(z):
h_1 = - 120*z**7 - 350*z**6 - 252*z**5
h_2 = - 60*z**7 - 140*z**6 - 84*z**5
h_3 = - 120*z**7 - 420*z**6 - 378*z**5
h_4 = - 70*z**6 - 168*z**5 - 105*z**4
h_6 = - 105*z**4 - 140*z**3
h_7 = - 210*z**4 - 420*z**3
h_5 = 2*h_2 + 3*h_4
h_8 = - h_5 + h_7 - h_6 - 210*z**2
return np.array([
[ 2*h_3 , h_1 , h_7-2*h_3 , h_5 ],
[ h_1 , h_2 , h_6-h_1 , h_2+h_4 ],
[ h_7-2*h_3, h_6-h_1, 2*h_3-2*h_7-420*z, h_8 ],
[ h_5 , h_2+h_4, h_8 , -h_1+h_2+h_5+h_6 ]
])/420
def norm_sq_interval(anchors, indices):
"""
Returns the squared norm of the interpolant of `anchors` for the `indices`.
"""
q = (anchors[1].time-anchors[0].time)
vector = np.vstack([
anchors[0].state[indices] , # a
anchors[0].diff[indices] * q, # b
anchors[1].state[indices] , # c
anchors[1].diff[indices] * q, # d
])
return np.einsum(
vector , [0,2],
sp_matrix, [0,1],
vector , [1,2],
)*q
def norm_sq_partial(anchors, indices, start):
"""
Returns the sqared norm of the interpolant of `anchors` for the `indices`, but only taking into account the time after `start`.
"""
q = (anchors[1].time-anchors[0].time)
z = (start-anchors[1].time) / q
vector = np.vstack([
anchors[0].state[indices] , # a
anchors[0].diff[indices] * q, # b
anchors[1].state[indices] , # c
anchors[1].diff[indices] * q, # d
])
return np.einsum(
vector , [0,2],
partial_sp_matrix(z), [0,1],
vector , [1,2],
)*q
def scalar_product_interval(anchors, indices_1, indices_2):
"""
Returns the (integral) scalar product of the interpolants of `anchors` for `indices_1` (one side of the product) and `indices_2` (other side).
"""
q = (anchors[1].time-anchors[0].time)
vector_1 = np.vstack([
anchors[0].state[indices_1], # a_1
anchors[0].diff[indices_1] * q, # b_1
anchors[1].state[indices_1], # c_1
anchors[1].diff[indices_1] * q, # d_1
])
vector_2 = np.vstack([
anchors[0].state[indices_2], # a_2
anchors[0].diff[indices_2] * q, # b_2
anchors[1].state[indices_2], # c_2
anchors[1].diff[indices_2] * q, # d_2
])
return np.einsum(
vector_1, [0,2],
sp_matrix, [0,1],
vector_2, [1,2]
)*q
def scalar_product_partial(anchors, indices_1, indices_2, start):
"""
Returns the scalar product of the interpolants of `anchors` for `indices_1` (one side of the product) and `indices_2` (other side), but only taking into account the time after `start`.
"""
q = (anchors[1].time-anchors[0].time)
z = (start-anchors[1].time) / q
vector_1 = np.vstack([
anchors[0].state[indices_1], # a_1
anchors[0].diff[indices_1] * q, # b_1
anchors[1].state[indices_1], # c_1
anchors[1].diff[indices_1] * q, # d_1
])
vector_2 = np.vstack([
anchors[0].state[indices_2], # a_2
anchors[0].diff[indices_2] * q, # b_2
anchors[1].state[indices_2], # c_2
anchors[1].diff[indices_2] * q, # d_2
])
return np.einsum(
vector_1, [0,2],
partial_sp_matrix(z), [0,1],
vector_2, [1,2]
)*q
class Extrema(object):
"""
Class for containing the extrema and their positions in `n` dimensions. These can be accessed via the attributes `minima`, `maxima`, `arg_min`, and `arg_max`.
"""
def __init__(self,n):
self.arg_min = np.full(n,np.nan)
self.arg_max = np.full(n,np.nan)
self.minima = np.full(n, np.inf)
self.maxima = np.full(n,-np.inf)
def update(self,times,values,condition=True):
"""
Updates the extrema if `values` are more extreme.
Parameters
----------
condition : boolean or array of booleans
Only the components where this is `True` are updated.
"""
update_min =
|
np.logical_and(values<self.minima,condition)
|
numpy.logical_and
|
import pathlib as path
import numpy as np
import pandas as pd
from utils import make_map, mappify
def process_card_count(df, client_ids):
r=[]
for id in client_ids:
card_count = 0
rows = df[df['client_id']==id]
if not rows.empty:
cards = rows['card_id'].unique()
card_count = len(cards)
r.append(card_count)
r = np.array(r)
print('card_count',r.shape)
return r
def process_avg_trxn_amount(df, client_ids):
r = []
for id in client_ids:
average = 0
rows = df[df['client_id']==id]
if not rows.empty:
n = len(rows)
total = rows['tran_amt_rur'].sum()
average = total/n
r.append([average, total])
r =
|
np.array(r)
|
numpy.array
|
#!/usr/bin/env python
"""
This is the data handling module for the predstorm package, containing the main
data class SatData and all relevant data handling functions and procedures.
Author: <NAME>, <NAME>, IWF Graz, Austria
twitter @chrisoutofspace, https://github.com/cmoestl
started April 2018, last update May 2019
Python 3.7
Packages not included in anaconda installation: cdflib (https://github.com/MAVENSDC/cdflib)
Issues:
- ...
To-dos:
- ...
Future steps:
- ...
-----------------
MIT LICENSE
Copyright 2019, <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy of this
software and associated documentation files (the "Software"), to deal in the Software
without restriction, including without limitation the rights to use, copy, modify,
merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be included in all copies
or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
# Standard
import os
import sys
import copy
from datetime import datetime, timedelta, timezone
from dateutil.relativedelta import relativedelta
from dateutil import tz
import gzip
import logging
import numpy as np
import pdb
import pickle
import re
import scipy
import scipy.io
import shutil
import subprocess
from matplotlib.dates import date2num, num2date
from glob import iglob
import json
import urllib
# External
import cdflib
import heliosat
from heliosat.spice import transform_frame
import astropy.time
import spiceypy
try:
from netCDF4 import Dataset
except:
pass
# Local
from .predict import make_kp_from_wind, calc_ring_current_term
from .predict import make_aurora_power_from_wind, calc_newell_coupling
from .predict import calc_dst_burton, calc_dst_obrien, calc_dst_temerin_li
from .config.constants import AU, dist_to_L1
logger = logging.getLogger(__name__)
# =======================================================================================
# -------------------------------- I. CLASSES ------------------------------------------
# =======================================================================================
class SatData():
"""Data object containing satellite data.
Init Parameters
===============
--> SatData(input_dict, source=None, header=None)
input_dict : dict(key: dataarray)
Dict containing the input data in the form of key: data (in array or list)
Example: {'time': timearray, 'bx': bxarray}. The available keys for data input
can be accessed in SatData.default_keys.
header : dict(headerkey: value)
Dict containing metadata on the data array provided. Useful data headers are
provided in SatData.empty_header but this can be expanded as needed.
source : str
Provide quick-access name of satellite/data type for source.
Attributes
==========
.data : np.ndarray
Array containing measurements/indices. Best accessed using SatData[key].
.position : np.ndarray
Array containing position data for satellite.
.h : dict
Dict of metadata as defined by input header.
.state : np.array (dtype=object)
Array of None, str if defining state of data (e.g. 'quiet', 'cme').
.vars : list
List of variables stored in SatData.data.
.source : str
Data source name.
Methods
=======
.convert_GSE_to_GSM()
Coordinate conversion.
.convert_RTN_to_GSE()
Coordinate conversion.
.cut(starttime=None, endtime=None)
Cuts data to within timerange and returns.
.get_position(timestamp)
Returns position of spacecraft at time.
.get_newell_coupling()
Calculates Newell coupling indice for data.
.interp_nans(keys=None)
Linearly interpolates over nans in data.
.interp_to_time()
Linearly interpolates over nans.
.load_position_data(position_data_file)
Loads position data from file.
.make_aurora_power_prediction()
Calculates aurora power.
.make_dst_prediction()
Makes prediction of Dst from data.
.make_kp_prediction()
Prediction of kp.
.make_hourly_data()
Takes minute resolution data and interpolates to hourly data points.
.shift_time_to_L1()
Shifts time to L1 from satellite ahead in sw rotation.
Examples
========
"""
default_keys = ['time',
'speed', 'speedx', 'density', 'temp', 'pdyn',
'bx', 'by', 'bz', 'btot',
'br', 'bt', 'bn',
'dst', 'kp', 'aurora', 'ec', 'ae']
empty_header = {'DataSource': '',
'SourceURL' : '',
'SamplingRate': None,
'ReferenceFrame': '',
'FileVersion': {},
'Instruments': [],
'RemovedTimes': [],
'PlasmaDataIntegrity': 10
}
def __init__(self, input_dict, source=None, header=None):
"""Create new instance of class."""
# Check input data
for k in input_dict.keys():
if not k in SatData.default_keys:
raise NotImplementedError("Key {} not implemented in SatData class!".format(k))
if 'time' not in input_dict.keys():
raise Exception("Time variable is required for SatData object!")
dt = [x for x in SatData.default_keys if x in input_dict.keys()]
if len(input_dict['time']) == 0:
logger.warning("SatData.__init__: Inititating empty array! Is the data missing?")
# Create data array attribute
data = [input_dict[x] if x in dt else np.zeros(len(input_dict['time'])) for x in SatData.default_keys]
self.data = np.asarray(data)
# Create array for state classifiers (currently empty)
self.state = np.array([None]*len(self.data[0]), dtype='object')
# Add new attributes to the created instance
self.source = source
if header == None: # Inititalise empty header
self.h = copy.deepcopy(SatData.empty_header)
else:
self.h = header
self.pos = None
self.vars = dt
self.vars.remove('time')
# -----------------------------------------------------------------------------------
# Internal methods
# -----------------------------------------------------------------------------------
def __getitem__(self, var):
if isinstance(var, str):
if var in self.vars+['time']:
return self.data[SatData.default_keys.index(var)]
else:
raise Exception("SatData object does not contain data under the key '{}'!".format(var))
return self.data[:,var]
def __setitem__(self, var, value):
if isinstance(var, str):
if var in self.vars:
self.data[SatData.default_keys.index(var)] = value
elif var in SatData.default_keys and var not in self.vars:
self.data[SatData.default_keys.index(var)] = value
self.vars.append(var)
else:
raise Exception("SatData object does not contain the key '{}'!".format(var))
else:
raise ValueError("Cannot interpret {} as index for __setitem__!".format(var))
def __len__(self):
return len(self.data[0])
def __str__(self):
"""Return string describing object."""
ostr = "Length of data:\t\t{}\n".format(len(self))
ostr += "Keys in data:\t\t{}\n".format(self.vars)
ostr += "First data point:\t{}\n".format(num2date(self['time'][0]))
ostr += "Last data point:\t{}\n".format(num2date(self['time'][-1]))
ostr += "\n"
ostr += "Header information:\n"
for j in self.h:
if self.h[j] != None: ostr += " {:>25}:\t{}\n".format(j, self.h[j])
ostr += "\n"
ostr += "Variable statistics:\n"
ostr += "{:>12}{:>12}{:>12}\n".format('VAR', 'MEAN', 'STD')
for k in self.vars:
ostr += "{:>12}{:>12.2f}{:>12.2f}\n".format(k, np.nanmean(self[k]), np.nanstd(self[k]))
return ostr
# -----------------------------------------------------------------------------------
# Position data handling and coordinate conversions
# -----------------------------------------------------------------------------------
def convert_mag_to(self, refframe):
"""Converts MAG from one refframe to another."""
barray = np.stack((self['bx'], self['by'], self['bz']), axis=1)
tarray = [num2date(t).replace(tzinfo=None) for t in self['time']]
#b_transformed = transform_frame(tarray, barray, self.h['ReferenceFrame'], refframe)
for i in range(0, len(tarray)):
barray[i] = spiceypy.mxv(spiceypy.pxform(self.h['ReferenceFrame'], refframe,
spiceypy.datetime2et(tarray[i])),
barray[i])
self['bx'], self['by'], self['bz'] = barray[:,0], barray[:,1], barray[:,2]
self.h['ReferenceFrame'] = refframe
return self
def convert_GSE_to_GSM(self):
"""GSE to GSM conversion
main issue: need to get angle psigsm after Hapgood 1992/1997, section 4.3
for debugging pdb.set_trace()
for testing OMNI DATA use
[bxc,byc,bzc]=convert_GSE_to_GSM(bx[90000:90000+20],by[90000:90000+20],bz[90000:90000+20],times1[90000:90000+20])
CAUTION: Overwrites original data.
"""
logger.info("Converting GSE magn. values to GSM")
mjd=np.zeros(len(self['time']))
#output variables
bxgsm=np.zeros(len(self['time']))
bygsm=np.zeros(len(self['time']))
bzgsm=np.zeros(len(self['time']))
for i in np.arange(0,len(self['time'])):
#get all dates right
jd=astropy.time.Time(num2date(self['time'][i]), format='datetime', scale='utc').jd
mjd[i]=float(int(jd-2400000.5)) #use modified julian date
T00=(mjd[i]-51544.5)/36525.0
dobj=num2date(self['time'][i])
UT=dobj.hour + dobj.minute / 60. + dobj.second / 3600. #time in UT in hours
#define position of geomagnetic pole in GEO coordinates
pgeo=78.8+4.283*((mjd[i]-46066)/365.25)*0.01 #in degrees
lgeo=289.1-1.413*((mjd[i]-46066)/365.25)*0.01 #in degrees
#GEO vector
Qg=[np.cos(pgeo*np.pi/180)*np.cos(lgeo*np.pi/180), np.cos(pgeo*np.pi/180)*np.sin(lgeo*np.pi/180), np.sin(pgeo*np.pi/180)]
#now move to equation at the end of the section, which goes back to equations 2 and 4:
#CREATE T1, T00, UT is known from above
zeta=(100.461+36000.770*T00+15.04107*UT)*np.pi/180
################### theta und z
T1=np.matrix([[np.cos(zeta), np.sin(zeta), 0], [-np.sin(zeta) , np.cos(zeta) , 0], [0, 0, 1]]) #angle for transpose
LAMBDA=280.460+36000.772*T00+0.04107*UT
M=357.528+35999.050*T00+0.04107*UT
lt2=(LAMBDA+(1.915-0.0048*T00)*np.sin(M*np.pi/180)+0.020*np.sin(2*M*np.pi/180))*np.pi/180
#CREATE T2, LAMBDA, M, lt2 known from above
##################### lamdbda und Z
t2z=np.matrix([[np.cos(lt2), np.sin(lt2), 0], [-np.sin(lt2) , np.cos(lt2) , 0], [0, 0, 1]])
et2=(23.439-0.013*T00)*np.pi/180
###################### epsilon und x
t2x=np.matrix([[1,0,0],[0,np.cos(et2), np.sin(et2)], [0, -np.sin(et2), np.cos(et2)]])
T2=np.dot(t2z,t2x) #equation 4 in Hapgood 1992
#matrix multiplications
T2T1t=np.dot(T2,np.matrix.transpose(T1))
################
Qe=np.dot(T2T1t,Qg) #Q=T2*T1^-1*Qq
psigsm=np.arctan(Qe.item(1)/Qe.item(2)) #arctan(ye/ze) in between -pi/2 to +pi/2
T3=np.matrix([[1,0,0],[0,np.cos(-psigsm), np.sin(-psigsm)], [0, -np.sin(-psigsm), np.cos(-psigsm)]])
GSE=np.matrix([[self['bx'][i]],[self['by'][i]],[self['bz'][i]]])
GSM=np.dot(T3,GSE) #equation 6 in Hapgood
bxgsm[i]=GSM.item(0)
bygsm[i]=GSM.item(1)
bzgsm[i]=GSM.item(2)
#-------------- loop over
self['bx'] = bxgsm
self['by'] = bygsm
self['bz'] = bzgsm
self.h['ReferenceFrame'] = 'GSM'
return self
def convert_RTN_to_GSE(self, pos_obj=[], pos_tnum=[]):
"""Converts RTN to GSE coordinates.
function call [dbr,dbt,dbn]=convert_RTN_to_GSE_sta_l1(sta_br7,sta_bt7,sta_bn7,sta_time7, pos.sta)
pdb.set_trace() for debugging
convert STEREO A magnetic field from RTN to GSE
for prediction of structures seen at STEREO-A later at Earth L1
so we do not include a rotation of the field to the Earth position
"""
logger.info("Converting RTN magn. values to GSE")
#output variables
heeq_bx=np.zeros(len(self['time']))
heeq_by=np.zeros(len(self['time']))
heeq_bz=np.zeros(len(self['time']))
bxgse=np.zeros(len(self['time']))
bygse=np.zeros(len(self['time']))
bzgse=np.zeros(len(self['time']))
AU = 149597870.700 #in km
########## first RTN to HEEQ
# NEW METHOD:
if len(pos_obj) == 0 and len(pos_tnum) == 0:
if self.pos == None:
raise Exception("Load position data (SatData.load_position_data()) before calling convert_RTN_to_GSE()!")
#go through all data points
for i in np.arange(0,len(self['time'])):
#make RTN vectors, HEEQ vectors, and project
#r, long, lat in HEEQ to x y z
# OLD METHOD:
if len(pos_obj) > 0 and len(pos_tnum) > 0:
time_ind_pos=(np.where(pos_tnum < self['time'][i])[-1][-1])
[xa,ya,za]=sphere2cart(pos_obj[0][time_ind_pos],pos_obj[1][time_ind_pos],pos_obj[2][time_ind_pos])
# NEW METHOD:
else:
if self.pos.h['CoordinateSystem'] == 'rlonlat':
xa, ya, za = sphere2cart(self.pos['r'][i], self.pos['lon'][i], self.pos['lat'][i])
xa, ya, za = xa/AU, ya/AU, za/AU
else:
xa, ya, za = self.pos[i]/AU
#HEEQ vectors
X_heeq=[1,0,0]
Y_heeq=[0,1,0]
Z_heeq=[0,0,1]
#normalized X RTN vector
Xrtn=[xa, ya,za]/np.linalg.norm([xa,ya,za])
#solar rotation axis at 0, 0, 1 in HEEQ
Yrtn=np.cross(Z_heeq,Xrtn)/np.linalg.norm(np.cross(Z_heeq,Xrtn))
Zrtn=np.cross(Xrtn, Yrtn)/np.linalg.norm(np.cross(Xrtn, Yrtn))
#project into new system
heeq_bx[i]=np.dot(np.dot(self['br'][i],Xrtn)+np.dot(self['bt'][i],Yrtn)+np.dot(self['bn'][i],Zrtn),X_heeq)
heeq_by[i]=np.dot(np.dot(self['br'][i],Xrtn)+np.dot(self['bt'][i],Yrtn)+np.dot(self['bn'][i],Zrtn),Y_heeq)
heeq_bz[i]=np.dot(np.dot(self['br'][i],Xrtn)+np.dot(self['bt'][i],Yrtn)+np.dot(self['bn'][i],Zrtn),Z_heeq)
# just testing - remove this later!
# heeq_bx=self['br']
# heeq_by=self['bt']
# heeq_bz=self['bn']
#get modified Julian Date for conversion as in Hapgood 1992
jd=np.zeros(len(self['time']))
mjd=np.zeros(len(self['time']))
#then HEEQ to GSM
#-------------- loop go through each date
for i in np.arange(0,len(self['time'])):
jd[i]=astropy.time.Time(num2date(self['time'][i]), format='datetime', scale='utc').jd
mjd[i]=float(int(jd[i]-2400000.5)) #use modified julian date
#then lambda_sun
T00=(mjd[i]-51544.5)/36525.0
dobj=num2date(self['time'][i])
UT=dobj.hour + dobj.minute / 60. + dobj.second / 3600. #time in UT in hours
LAMBDA=280.460+36000.772*T00+0.04107*UT
M=357.528+35999.050*T00+0.04107*UT
#lt2 is lambdasun in Hapgood, equation 5, here in rad
lt2=(LAMBDA+(1.915-0.0048*T00)*np.sin(M*np.pi/180)+0.020*np.sin(2*M*np.pi/180))*np.pi/180
#note that some of these equations are repeated later for the GSE to GSM conversion
S1=np.matrix([[np.cos(lt2+np.pi), np.sin(lt2+np.pi), 0], [-np.sin(lt2+np.pi) , np.cos(lt2+np.pi) , 0], [0, 0, 1]])
#create S2 matrix with angles with reversed sign for transformation HEEQ to HAE
omega_node=(73.6667+0.013958*((mjd[i]+3242)/365.25))*np.pi/180 #in rad
S2_omega=np.matrix([[np.cos(-omega_node), np.sin(-omega_node), 0], [-np.sin(-omega_node) , np.cos(-omega_node) , 0], [0, 0, 1]])
inclination_ecl=7.25*np.pi/180
S2_incl=np.matrix([[1,0,0],[0,np.cos(-inclination_ecl), np.sin(-inclination_ecl)], [0, -np.sin(-inclination_ecl), np.cos(-inclination_ecl)]])
#calculate theta
theta_node=np.arctan(np.cos(inclination_ecl)*np.tan(lt2-omega_node))
#quadrant of theta must be opposite lt2 - omega_node Hapgood 1992 end of section 5
#get lambda-omega angle in degree mod 360
lambda_omega_deg=np.mod(lt2-omega_node,2*np.pi)*180/np.pi
#get theta_node in deg
theta_node_deg=theta_node*180/np.pi
#if in same quadrant, then theta_node = theta_node +pi
if abs(lambda_omega_deg-theta_node_deg) < 180: theta_node=theta_node+np.pi
S2_theta=np.matrix([[np.cos(-theta_node), np.sin(-theta_node), 0], [-np.sin(-theta_node) , np.cos(-theta_node) , 0], [0, 0, 1]])
#make S2 matrix
S2=np.dot(np.dot(S2_omega,S2_incl),S2_theta)
#this is the matrix S2^-1 x S1
HEEQ_to_HEE_matrix=np.dot(S1, S2)
#convert HEEQ components to HEE
HEEQ=np.matrix([[heeq_bx[i]],[heeq_by[i]],[heeq_bz[i]]])
HEE=np.dot(HEEQ_to_HEE_matrix,HEEQ)
#change of sign HEE X / Y to GSE is needed
bxgse[i]=-HEE.item(0)
bygse[i]=-HEE.item(1)
bzgse[i]=HEE.item(2)
#-------------- loop over
self['bx'] = bxgse
self['by'] = bygse
self['bz'] = bzgse
self.h['ReferenceFrame'] += '-GSE'
return self
def get_position(self, timestamp):
"""Returns position of satellite at given timestamp. Coordinates
are provided in (r,lon,lat) format. Change rlonlat to False to get
(x,y,z) coordinates.
Parameters
==========
timestamp : datetime.datetime object / list of dt objs
Times of positions to return.
Returns
=======
position : array(x,y,z), list of arrays for multiple timestamps
Position of satellite in x,y,z or r,lon,lat.
"""
if self.pos == None:
raise Exception("Load position data (SatData.load_position_data()) before calling get_position()!")
tind = np.where(date2num(timestamp) < self.data[0])[0][0]
return self.pos[tind]
def load_positions(self, refframe='HEEQ', units='AU', observer='SUN', rlonlat=True, l1_corr=False):
"""Loads data on satellite position into data object. Data is loaded using a
heliosat.SpiceObject.
Parameters
==========
refframe : str (default=='HEEQ')
observer reference frame
observer : str (default='SUN')
observer body name
units : str (default='AU')
output units - m / km / AU
rlonlat : bool (default=True)
If True, returns coordinates in (r, lon, lat) format, not (x,y,z).
l1_corr : bool (default=False)
Corrects Earth position to L1 position if True.
Returns
=======
self with new data in self.pos
"""
logger.info("load_positions: Loading position data into {} data".format(self.source))
t_traj = [num2date(i).replace(tzinfo=None) for i in self['time']]
traj = self.h['HeliosatObject'].trajectory(t_traj, frame=refframe, units=units,
observer=observer)
posx, posy, posz = traj[:,0], traj[:,1], traj[:,2]
if l1_corr:
if units == 'AU':
corr = dist_to_L1/AU
elif units == 'm':
corr = dist_to_L1/1e3
elif units == 'km':
corr = dist_to_L1
posx = posx - corr
if rlonlat:
r, theta, phi = cart2sphere(posx, posy, posz)
Positions = PositionData([r, phi, theta], 'rlonlat')
else:
Positions = PositionData([posx, posy, posz], 'xyz')
Positions.h['Units'] = units
Positions.h['ReferenceFrame'] = refframe
Positions.h['Observer'] = observer
self.pos = Positions
return self
def return_position_details(self, timestamp, sun_syn=26.24):
"""Returns a string describing STEREO-A's current whereabouts.
Parameters
==========
positions : ???
Array containing spacecraft positions at given time.
DSCOVR_lasttime : float
Date of last DSCOVR measurements in number form.
Returns
=======
stereostr : str
Nicely formatted string with info on STEREO-A's location with
with respect to Earth and L5/L1.
"""
if self.pos == None:
logger.warning("Loading position data (SatData.load_positions()) for return_position_details()!")
self.load_positions()
L1Pos = get_l1_position(timestamp, units=self.pos.h['Units'], refframe=self.pos.h['ReferenceFrame'])
# Find index of current position:
ts_ind = np.where(self['time'] < date2num(timestamp))[0][0]
r = self.pos['r'][ts_ind]
# Get longitude and latitude
long_heeq = self.pos['lon'][ts_ind]*180./np.pi
lat_heeq = self.pos['lat'][ts_ind]*180./np.pi
# Define time lag from STEREO-A to Earth
timelag_l1 = abs(long_heeq)/(360./sun_syn)
arrival_time_l1 = date2num(timestamp) + timelag_l1
arrival_time_l1_str = str(num2date(arrival_time_l1))
satstr = ''
satstr += '{} HEEQ longitude wrt Earth is {:.1f} degrees.\n'.format(self.source, long_heeq)
satstr += 'This is {:.2f} times the location of L5.\n'.format(abs(long_heeq)/60.)
satstr += '{} HEEQ latitude is {:.1f} degrees.\n'.format(self.source, lat_heeq)
satstr += 'Earth L1 HEEQ latitude is {:.1f} degrees.\n'.format(L1Pos['lat']*180./np.pi,1)
satstr += 'Difference HEEQ latitude is {:.1f} degrees.\n'.format(abs(lat_heeq-L1Pos['lat']*180./np.pi))
satstr += '{} heliocentric distance is {:.3f} AU.\n'.format(self.source, r)
satstr += 'The solar rotation period with respect to Earth is chosen as {:.2f} days.\n'.format(sun_syn)
satstr += 'This is a time lag of {:.2f} days.\n'.format(timelag_l1)
satstr += 'Arrival time of {} wind at L1: {}\n'.format(self.source, arrival_time_l1_str[0:16])
return satstr
# -----------------------------------------------------------------------------------
# Object data handling
# -----------------------------------------------------------------------------------
def cut(self, starttime=None, endtime=None):
"""Cuts array down to range defined by starttime and endtime. One limit
can be provided or both.
Parameters
==========
starttime : datetime.datetime object
Start time (>=) of new array.
endtime : datetime.datetime object
End time (<) of new array.
Returns
=======
self : obj within new time range
"""
if starttime != None and endtime == None:
new_inds = np.where(self.data[0] >= date2num(starttime))[0]
self.data = self.data[:,new_inds]
if self.pos != None:
self.pos.positions = self.pos.positions[:,new_inds]
elif starttime == None and endtime != None:
new_inds = np.where(self.data[0] < date2num(endtime))[0]
self.data = self.data[:,new_inds]
if self.pos != None:
self.pos.positions = self.pos.positions[:,new_inds]
elif starttime != None and endtime != None:
new_inds = np.where((self.data[0] >= date2num(starttime)) & (self.data[0] < date2num(endtime)))[0]
self.data = self.data[:,new_inds]
if self.pos != None:
self.pos.positions = self.pos.positions[:,new_inds]
return self
def get_weighted_average(self, key, past_timesteps=4, past_weights=0.65):
"""
Calculates a weighted average of speed and magnetic field bx, by, bz and the Newell coupling ec
for a number of ave_hours (4 by default) back in time
input data time resolution should be 1 hour
aurora output time resolution as given by dt can be higher
corresponds roughly to ap_inter_sol.pro in IDL ovation
Parameters
==========
self : ...
key : str
String of key to return average for.
past_timesteps : int (default=4)
Timesteps previous to integrate over, usually 4 (hours)
past_weights : float (default=0.65)
Reduce weights by factor with each hour back
Returns
=======
avg : np.ndarray
Array containing averaged values. Same length as original.
"""
if key not in self.vars:
raise Exception("Key {} not available in this ({}) SatData object!".format(key, self.source))
avg = np.zeros((len(self)))
for t_ind, timestep in enumerate(self.data[0]):
weights = np.ones(past_timesteps) #make array with weights
for k in np.arange(1,past_timesteps,1):
weights[k] = weights[k-1] * past_weights
t_inds_for_weights = np.arange(t_ind, t_ind-past_timesteps,-1)
t_inds_for_weights[t_inds_for_weights < 0] = 0
#sum last hours with each weight and normalize
avg[t_ind] = np.round(np.nansum(self[key][t_inds_for_weights]*weights) / np.nansum(weights),1)
return avg
def make_hourly_data(self):
"""Takes data with minute resolution and interpolates to hour.
Uses .interp_to_time(). See that function for more usability options.
Parameters
==========
None
Returns
=======
Data_h : new SatData obj
New array with hourly interpolated data. Header is copied from original.
"""
# Round to nearest hour
stime = self['time'][0] - self['time'][0]%(1./24.)
# Roundabout way to get time_h ensures timings with full hours:
nhours = (num2date(self['time'][-1])-num2date(stime)).total_seconds()/60./60.
# Create new time array
time_h = np.array(stime + np.arange(1, nhours)*(1./24.))
Data_h = self.interp_to_time(time_h)
return Data_h
def interp_nans(self, keys=None):
"""Linearly interpolates over nans in array.
Parameters
==========
keys : list (default=None)
Provide list of keys (str) to be interpolated over, otherwise all.
"""
logger.info("interp_nans: Interpolating nans in {} data".format(self.source))
if keys == None:
keys = self.vars
for k in keys:
inds = np.isnan(self[k])
if len(inds) == 0:
return self
self[k][inds] = np.interp(inds.nonzero()[0], (~inds).nonzero()[0], self[k][~inds])
return self
def interp_to_time(self, tarray, keys=None):
"""Linearly interpolates over nans in array.
Parameters
==========
tarray : np.ndarray
Array containing new timesteps in number format.
keys : list (default=None)
Provide list of keys (str) to be interpolated over, otherwise all.
"""
if keys == None:
keys = self.vars
# Create new time array
data_dict = {'time': tarray}
for k in keys:
na = np.interp(tarray, self['time'], self[k])
data_dict[k] = na
# Create new data opject:
newData = SatData(data_dict, header=copy.deepcopy(self.h), source=copy.deepcopy(self.source))
newData.h['SamplingRate'] = tarray[1] - tarray[0]
# Interpolate position data:
if self.pos != None:
newPos = self.pos.interp_to_time(self['time'], tarray)
newData.pos = newPos
return newData
def remove_icmes(self, spacecraft=None):
"""Replaces ICMES in data object with NaNs.
ICMEs are automatically loaded using the HELCATS catalogue in the function
get_icme_catalogue().
NOTE: if you want to remove ICMEs from L1 data, set spacecraft='Wind'.
Parameters
==========
spacecraft : str (default=None)
Specify spacecraft for ICMEs removal. If None, self.source is used.
Returns
=======
self : obj with ICME periods removed
"""
if spacecraft == None:
spacecraft = self.source
icmes = get_icme_catalogue(spacecraft=spacecraft, starttime=num2date(self['time'][0]), endtime=num2date(self['time'][-1]))
if len(set(icmes['SC_INSITU'])) > 1:
logger.warning("Using entire CME list! Variable 'spacecraft' was not defined correctly. Options={}".format(set(icmes['SC_INSITU'])))
for i in icmes:
if spacecraft == 'Wind':
icme_inds = np.where(np.logical_and(self['time'] > i['ICME_START_TIME'], self['time'] < i['ICME_END_TIME']))
else:
icme_inds = np.where(np.logical_and(self['time'] > i['ICME_START_TIME'], self['time'] < i['MO_END_TIME']))
self.data[1:,icme_inds] = np.nan
if self['time'][0] < date2num(datetime(2007,1,1)):
logger.warning("ICMES have only been removed after 2007-01-01. There may be ICMEs before this date unaccounted for!")
if self['time'][-1] > date2num(datetime(2016,1,1)):
logger.warning("ICMES have only been removed until 2016-01-01. There may be ICMEs after this date unaccounted for!")
return self
def remove_nans(self, key=''):
"""Removes nans from data object.
Parameters
==========
key : str (optional, default=self.vars[0])
Key for variable to be used for picking out NaNs.
If multiple variables, call function for each variable.
Returns
=======
self : obj with nans removed
"""
if key == '':
key = self.vars[0]
key_ind = self.default_keys.index(key)
self.data = self.data[:,~np.isnan(self.data[key_ind])]
return self
def remove_times(self, start_remove, end_remove):
"""Removes data within period given by starttime and endtime.
Parameters
==========
start_remove : datetime.datetime object
Start time (>=) of new array.
end_remove : datetime.datetime object
End time (<) of new array.
Returns
=======
newData : new obj with time range removed
"""
before = self.data[:, self.data[0] < date2num(start_remove)]
after = self.data[:, self.data[0] > date2num(end_remove)]
new = np.hstack((before, after))
newData = SatData({'time': [1,2,3], 'bz': [1,2,3]}, header=copy.deepcopy(self.h), source=copy.deepcopy(self.source))
newData.data = new
newData.pos = copy.deepcopy(self.pos)
newData.state = copy.deepcopy(self.state)
newData.vars = copy.deepcopy(self.vars)
newData.h['RemovedTimes'].append("{}--{}".format(start_remove.strftime("%Y-%m-%dT%H:%M:%S"),
end_remove.strftime("%Y-%m-%dT%H:%M:%S")))
return newData
def shift_time_to_L1(self, sun_syn=26.24, method='new'):
"""Shifts the time variable to roughly correspond to solar wind at L1 using a
correction for timing for the Parker spiral.
See Simunac et al. 2009 Ann. Geophys. equation 1 and Thomas et al. 2018 Space Weather,
difference in heliocentric distance STEREO-A to Earth. The value is actually different
for every point but can take average of solar wind speed (method='old').
Omega is 360 deg/sun_syn in days, convert to seconds; sta_r in AU to m to km;
convert to degrees
minus sign: from STEREO-A to Earth the diff_r_deg needs to be positive
because the spiral leads to a later arrival of the solar wind at larger
heliocentric distances (this is reverse for STEREO-B!)
Parameters
==========
sun_syn : float
Sun synodic rotation in days.
method : str (default='new')
Method to be used. 'old' means average of time diff is added, 'new' means full
array of time values is added to original time array.
Returns
=======
self
"""
if method == 'old':
lag_l1, lag_r = get_time_lag_wrt_earth(satname=self.source,
timestamp=num2date(self['time'][-1]),
v_mean=np.nanmean(self['speed']), sun_syn=sun_syn)
logger.info("shift_time_to_L1: Shifting time by {:.2f} hours".format((lag_l1 + lag_r)*24.))
self.data[0] = self.data[0] + lag_l1 + lag_r
elif method == 'new':
if self.pos == None:
logger.warning("Loading position data (SatData.load_positions()) for shift_time_to_L1()!")
self.load_positions()
dttime = [num2date(t).replace(tzinfo=None) for t in self['time']]
L1Pos = get_l1_position(dttime, units=self.pos.h['Units'], refframe=self.pos.h['ReferenceFrame'])
L1_r = L1Pos['r']
timelag_diff_r = np.zeros(len(L1_r))
# define time lag from satellite to Earth
timelag_L1 = abs(self.pos['lon']*180/np.pi)/(360/sun_syn) #days
# Go through all data points
for i in np.arange(0,len(L1_r),1):
if self.pos.h['Units'] == 'AU':
sat_r = self.pos['r'][i]*AU
l1_r = L1_r[i]*AU
elif self.pos.h['Units'] == 'm':
sat_r = self.pos['r'][i]/1000.
l1_r = L1_r[i]/1000.
else:
sat_r = self.pos['r'][i]
l1_r = L1_r[i]
# Thomas et al. (2018): angular speed of rotation of sun * radial diff/speed
# note: dimensions in seconds
diff_r_deg = (360/(sun_syn*86400))*(l1_r - sat_r)/self['speed'][i]
# From lon diff, calculate time by dividing by rotation speed (in days)
timelag_diff_r[i] = np.round(diff_r_deg/(360/sun_syn),3)
## ADD BOTH time shifts to the stbh_t
self.data[0] = self.data[0] + timelag_L1 + timelag_diff_r
logger.info("shift_time_to_L1: Shifting time by {:.1f}-{:.1f} hours".format(
(timelag_L1+timelag_diff_r)[0]*24., (timelag_L1+timelag_diff_r)[-1]*24.))
return self
def shift_wind_to_L1(self):
"""Corrects for differences in B and density values due to solar wind
expansion at different radii.
Exponents taken from Kivelson and Russell, Introduction to Space Physics (Ch. 4.3.2).
Magnetic field components are scaled according to values in
Hanneson et al. (2020) JGR Space Physics paper.
https://doi.org/10.1029/2019JA027139
Parameters
==========
None
Returns
=======
self
"""
dttime = [num2date(t).replace(tzinfo=None) for t in self['time']]
L1Pos = get_l1_position(dttime, units=self.pos.h['Units'], refframe=self.pos.h['ReferenceFrame'])
if 'density' in self.vars:
self['density'] = self['density'] * (self.pos['r']/L1Pos['r'])**(-2)
if 'btot' in self.vars:
self['btot'] = self['btot'] * (self.pos['r']/L1Pos['r'])**(-1.49)
shift_vars_r = ['br', 'bx'] # radial component
shift_vars = [v for v in shift_vars_r if v in self.vars] # behave according to 1/r
for var in shift_vars:
self[var] = self[var] * (self.pos['r']/L1Pos['r'])**(-1.94)
shift_vars_t = ['bt', 'by'] # tangential component
shift_vars = [v for v in shift_vars_t if v in self.vars] # behave according to 1/r
for var in shift_vars:
self[var] = self[var] * (self.pos['r']/L1Pos['r'])**(-1.26)
shift_vars_n = ['bn', 'bz'] # normal component
shift_vars = [v for v in shift_vars_n if v in self.vars] # behave according to 1/r
for var in shift_vars:
self[var] = self[var] * (self.pos['r']/L1Pos['r'])**(-1.34)
logger.info("shift_wind_to_L1: Scaled B and density values to L1 distance")
return self
# -----------------------------------------------------------------------------------
# Index calculations and predictions
# -----------------------------------------------------------------------------------
def extract_local_time_variables(self):
"""Takes the UTC time in numpy date format and
returns local time and day of year variables, cos/sin.
Parameters:
-----------
time : np.array
Contains timestamps in numpy format.
Returns:
--------
sin_DOY, cos_DOY, sin_LT, cos_LT : np.arrays
Sine and cosine of day-of-yeat and local-time.
"""
dtime = num2date(self['time'])
utczone = tz.gettz('UTC')
cetzone = tz.gettz('CET')
# Original data is in UTC:
dtimeUTC = [dt.replace(tzinfo=utczone) for dt in dtime]
# Correct to local time zone (CET) for local time:
dtimeCET = [dt.astimezone(cetzone) for dt in dtime]
dtlocaltime = np.array([(dt.hour + dt.minute/60. + dt.second/3600.) for dt in dtimeCET])
dtdayofyear = np.array([dt.timetuple().tm_yday for dt in dtimeCET])
dtdayofyear = np.array([dt.timetuple().tm_yday for dt in dtimeCET]) + dtlocaltime
sin_DOY, cos_DOY = np.sin(2.*np.pi*dtdayofyear/365.), np.sin(2.*np.pi*dtdayofyear/365.)
sin_LT, cos_LT = np.sin(2.*np.pi*dtlocaltime/24.), np.sin(2.*np.pi*dtlocaltime/24.)
return sin_DOY, cos_DOY, sin_LT, cos_LT
def get_newell_coupling(self):
"""
Empirical Formula for dFlux/dt - the Newell coupling
e.g. paragraph 25 in Newell et al. 2010 doi:10.1029/2009JA014805
IDL ovation: sol_coup.pro - contains 33 coupling functions in total
input: needs arrays for by, bz, v
"""
ec = calc_newell_coupling(self['by'], self['bz'], self['speed'])
ecData = SatData({'time': self['time'], 'ec': ec})
ecData.h['DataSource'] = "Newell coupling parameter from {} data".format(self.source)
ecData.h['SamplingRate'] = self.h['SamplingRate']
return ecData
def make_aurora_power_prediction(self):
"""Makes prediction with data in array.
Parameters
==========
self
Returns
=======
auroraData : new SatData obj
New object containing predicted Dst data.
"""
logger.info("Making auroral power prediction")
aurora_power = np.round(make_aurora_power_from_wind(self['btot'], self['by'], self['bz'], self['speed'], self['density']), 2)
#make sure that no values are < 0
aurora_power[np.where(aurora_power < 0)]=0.0
auroraData = SatData({'time': self['time'], 'aurora': aurora_power})
auroraData.h['DataSource'] = "Auroral power prediction from {} data".format(self.source)
auroraData.h['SamplingRate'] = 1./24.
return auroraData
def make_dst_prediction(self, method='temerin_li', t_correction=False):
"""Makes prediction with data in array.
Parameters
==========
method : str
Options = ['burton', 'obrien', 'temerin_li', 'temerin_li_2006']
t_correction : bool
For TL-2006 method only. Add a time-dependent linear correction to
Dst values (required for anything beyond 2002).
Returns
=======
dstData : new SatData obj
New object containing predicted Dst data.
"""
if method.lower() == 'temerin_li':
if 'speedx' in self.vars:
vx = self['speedx']
else:
vx = self['speed']
logger.info("Calculating Dst for {} using Temerin-Li model 2002 version (updated parameters)".format(self.source))
dst_pred = calc_dst_temerin_li(self['time'], self['btot'], self['bx'], self['by'], self['bz'], self['speed'], vx, self['density'], version='2002n')
elif method.lower() == 'temerin_li_2002':
if 'speedx' in self.vars:
vx = self['speedx']
else:
vx = self['speed']
logger.info("Calculating Dst for {} using Temerin-Li model 2002 version".format(self.source))
dst_pred = calc_dst_temerin_li(self['time'], self['btot'], self['bx'], self['by'], self['bz'], self['speed'], vx, self['density'], version='2002')
elif method.lower() == 'temerin_li_2006':
if 'speedx' in self.vars:
vx = self['speedx']
else:
vx = self['speed']
logger.info("Calculating Dst for {} using Temerin-Li model 2006 version".format(self.source))
dst_pred = calc_dst_temerin_li(self['time'], self['btot'], self['bx'], self['by'], self['bz'], self['speed'], vx, self['density'],
version='2006', linear_t_correction=t_correction)
elif method.lower() == 'obrien':
logger.info("Calculating Dst for {} using OBrien model".format(self.source))
dst_pred = calc_dst_obrien(self['time'], self['bz'], self['speed'], self['density'])
elif method.lower() == 'burton':
logger.info("Calculating Dst for {} using Burton model".format(self.source))
dst_pred = calc_dst_burton(self['time'], self['bz'], self['speed'], self['density'])
dstData = SatData({'time': copy.deepcopy(self['time']), 'dst': dst_pred})
dstData.h['DataSource'] = "Dst prediction from {} data using {} method".format(self.source, method)
dstData.h['SamplingRate'] = 1./24.
return dstData
def make_dst_prediction_from_model(self, model):
"""Makes prediction of Dst from previously trained machine learning model
with data in array.
Parameters
==========
method : sklearn/keras model
Trained model with predict() method.
Returns
=======
dstData : new SatData obj
New object containing predicted Dst data.
"""
logger.info("Making Dst prediction for {} using machine learning model".format(self.source))
dst_pred = model.predict(self.data.T)
dstData = SatData({'time': self['time'], 'dst': dst_pred})
dstData.h['DataSource'] = "Dst prediction from {} data using ML model".format(self.source)
dstData.h['SamplingRate'] = 1./24.
return dstData
def make_kp_prediction(self):
"""Makes prediction with data in array.
Parameters
==========
self
Returns
=======
kpData : new SatData obj
New object containing predicted Dst data.
"""
logger.info("Making kp prediction")
kp_pred = np.round(make_kp_from_wind(self['btot'], self['by'], self['bz'], self['speed'], self['density']), 1)
kpData = SatData({'time': self['time'], 'kp': kp_pred})
kpData.h['DataSource'] = "Kp prediction from {} data".format(self.source)
kpData.h['SamplingRate'] = 1./24.
return kpData
# -----------------------------------------------------------------------------------
# Definition of state
# -----------------------------------------------------------------------------------
def get_state(self):
"""Finds state of wind and fills self.state attribute."""
logger.info("Coming soon.")
# -----------------------------------------------------------------------------------
# Data archiving
# -----------------------------------------------------------------------------------
def archive(self):
"""Make archive of long-term data."""
logger.info("Not yet implemented.")
class PositionData():
"""Data object containing satellite position data.
Init Parameters
===============
--> PositionData(input_dict, source=None, header=None)
posdata : list(x,y,z) or list(r,lon,lat)
Dict containing the input data in the form of key: data (in array or list)
Example: {'time': timearray, 'bx': bxarray}. The available keys for data input
can be accessed in SatData.default_keys.
header : dict(headerkey: value)
Dict containing metadata on the data array provided. Useful data headers are
provided in SatData.empty_header but this can be expanded as needed.
source : str
Provide quick-access name of satellite/data type for source.
Attributes
==========
.positions : np.ndarray
Array containing position information. Best accessed using SatData[key].
.h : dict
Dict of metadata as defined by input header.
Methods
=======
...
Examples
========
"""
empty_header = {'ReferenceFrame': '',
'CoordinateSystem': '',
'Units': '',
'Object': '',
}
# -----------------------------------------------------------------------------------
# Internal methods
# -----------------------------------------------------------------------------------
def __init__(self, posdata, postype, header=None):
"""Create new instance of class."""
if not postype.lower() in ['xyz', 'rlonlat']:
raise Exception("PositionData __init__: postype must be either 'xyz' or 'rlonlat'!")
self.positions = np.asarray(posdata)
if header == None: # Inititalise empty header
self.h = copy.deepcopy(PositionData.empty_header)
else:
self.h = header
self.h['CoordinateSystem'] = postype.lower()
self.coors = ['x','y','z'] if postype == 'xyz' else ['r','lon','lat']
def __getitem__(self, var):
if isinstance(var, str):
if var in self.coors:
return self.positions[self.coors.index(var)]
else:
raise Exception("PositionData object does not contain data under the key '{}'!".format(var))
return self.positions[:,var]
def __setitem__(self, var, value):
if isinstance(var, str):
if var in self.coors:
self.positions[self.coors.index(var)] = value
else:
raise Exception("PositionData object does not contain the key '{}'!".format(var))
else:
raise ValueError("Cannot interpret {} as index for __setitem__!".format(var))
def __len__(self):
return len(self.positions[0])
def __str__(self):
return self.positions.__str__()
# -----------------------------------------------------------------------------------
# Object data handling
# -----------------------------------------------------------------------------------
def interp_to_time(self, t_orig, t_new):
"""Linearly interpolates over nans in array.
Parameters
==========
t_orig : np.ndarray
Array containing original timesteps.
t_new : np.ndarray
Array containing new timesteps.
"""
na = []
for k in self.coors:
na.append(np.interp(t_new, t_orig, self[k]))
# Create new data opject:
newData = PositionData(na, copy.deepcopy(self.h['CoordinateSystem']), header=copy.deepcopy(self.h))
return newData
# =======================================================================================
# -------------------------------- II. FUNCTIONS ----------------------------------------
# =======================================================================================
# ***************************************************************************************
# A. Coordinate conversion functions:
# ***************************************************************************************
def convert_GSE_to_GSM(bxgse,bygse,bzgse,timegse):
"""GSE to GSM conversion
main issue: need to get angle psigsm after Hapgood 1992/1997, section 4.3
for debugging pdb.set_trace()
for testing OMNI DATA use
[bxc,byc,bzc]=convert_GSE_to_GSM(bx[90000:90000+20],by[90000:90000+20],bz[90000:90000+20],times1[90000:90000+20])
"""
mjd=np.zeros(len(timegse))
#output variables
bxgsm=np.zeros(len(timegse))
bygsm=np.zeros(len(timegse))
bzgsm=np.zeros(len(timegse))
for i in np.arange(0,len(timegse)):
#get all dates right
jd=astropy.time.Time(num2date(timegse[i]), format='datetime', scale='utc').jd
mjd[i]=float(int(jd-2400000.5)) #use modified julian date
T00=(mjd[i]-51544.5)/36525.0
dobj=num2date(timegse[i])
UT=dobj.hour + dobj.minute / 60. + dobj.second / 3600. #time in UT in hours
#define position of geomagnetic pole in GEO coordinates
pgeo=78.8+4.283*((mjd[i]-46066)/365.25)*0.01 #in degrees
lgeo=289.1-1.413*((mjd[i]-46066)/365.25)*0.01 #in degrees
#GEO vector
Qg=[np.cos(pgeo*np.pi/180)*np.cos(lgeo*np.pi/180), np.cos(pgeo*np.pi/180)*np.sin(lgeo*np.pi/180), np.sin(pgeo*np.pi/180)]
#now move to equation at the end of the section, which goes back to equations 2 and 4:
#CREATE T1, T00, UT is known from above
zeta=(100.461+36000.770*T00+15.04107*UT)*np.pi/180
################### theta und z
T1=np.matrix([[np.cos(zeta), np.sin(zeta), 0], [-np.sin(zeta) , np.cos(zeta) , 0], [0, 0, 1]]) #angle for transpose
LAMBDA=280.460+36000.772*T00+0.04107*UT
M=357.528+35999.050*T00+0.04107*UT
lt2=(LAMBDA+(1.915-0.0048*T00)*np.sin(M*np.pi/180)+0.020*np.sin(2*M*np.pi/180))*np.pi/180
#CREATE T2, LAMBDA, M, lt2 known from above
##################### lamdbda und Z
t2z=np.matrix([[np.cos(lt2), np.sin(lt2), 0], [-np.sin(lt2) , np.cos(lt2) , 0], [0, 0, 1]])
et2=(23.439-0.013*T00)*np.pi/180
###################### epsilon und x
t2x=np.matrix([[1,0,0],[0,np.cos(et2), np.sin(et2)], [0, -np.sin(et2), np.cos(et2)]])
T2=np.dot(t2z,t2x) #equation 4 in Hapgood 1992
#matrix multiplications
T2T1t=np.dot(T2,np.matrix.transpose(T1))
################
Qe=np.dot(T2T1t,Qg) #Q=T2*T1^-1*Qq
psigsm=np.arctan(Qe.item(1)/Qe.item(2)) #arctan(ye/ze) in between -pi/2 to +pi/2
T3=np.matrix([[1,0,0],[0,np.cos(-psigsm), np.sin(-psigsm)], [0, -np.sin(-psigsm), np.cos(-psigsm)]])
GSE=np.matrix([[bxgse[i]],[bygse[i]],[bzgse[i]]])
GSM=np.dot(T3,GSE) #equation 6 in Hapgood
bxgsm[i]=GSM.item(0)
bygsm[i]=GSM.item(1)
bzgsm[i]=GSM.item(2)
#-------------- loop over
return (bxgsm,bygsm,bzgsm)
def convert_RTN_to_GSE_sta_l1(cbr,cbt,cbn,ctime,pos_stereo_heeq,pos_time_num):
"""function call [dbr,dbt,dbn]=convert_RTN_to_GSE_sta_l1(sta_br7,sta_bt7,sta_bn7,sta_time7, pos.sta)
pdb.set_trace() for debugging
convert STEREO A magnetic field from RTN to GSE
for prediction of structures seen at STEREO-A later at Earth L1
so we do not include a rotation of the field to the Earth position
"""
#output variables
heeq_bx=np.zeros(len(ctime))
heeq_by=np.zeros(len(ctime))
heeq_bz=np.zeros(len(ctime))
bxgse=np.zeros(len(ctime))
bygse=np.zeros(len(ctime))
bzgse=np.zeros(len(ctime))
########## first RTN to HEEQ
#go through all data points
for i in np.arange(0,len(ctime)):
time_ind_pos=(np.where(pos_time_num < ctime[i])[-1][-1])
#make RTN vectors, HEEQ vectors, and project
#r, long, lat in HEEQ to x y z
[xa,ya,za]=sphere2cart(pos_stereo_heeq[0][time_ind_pos],pos_stereo_heeq[1][time_ind_pos],pos_stereo_heeq[2][time_ind_pos])
#HEEQ vectors
X_heeq=[1,0,0]
Y_heeq=[0,1,0]
Z_heeq=[0,0,1]
#normalized X RTN vector
Xrtn=[xa, ya,za]/np.linalg.norm([xa,ya,za])
#solar rotation axis at 0, 0, 1 in HEEQ
Yrtn=np.cross(Z_heeq,Xrtn)/np.linalg.norm(np.cross(Z_heeq,Xrtn))
Zrtn=np.cross(Xrtn, Yrtn)/np.linalg.norm(np.cross(Xrtn, Yrtn))
#project into new system
heeq_bx[i]=np.dot(np.dot(cbr[i],Xrtn)+np.dot(cbt[i],Yrtn)+np.dot(cbn[i],Zrtn),X_heeq)
heeq_by[i]=np.dot(np.dot(cbr[i],Xrtn)+np.dot(cbt[i],Yrtn)+np.dot(cbn[i],Zrtn),Y_heeq)
heeq_bz[i]=np.dot(np.dot(cbr[i],Xrtn)+np.dot(cbt[i],Yrtn)+np.dot(cbn[i],Zrtn),Z_heeq)
#get modified Julian Date for conversion as in Hapgood 1992
jd=np.zeros(len(ctime))
mjd=np.zeros(len(ctime))
#then HEEQ to GSM
#-------------- loop go through each date
for i in np.arange(0,len(ctime)):
jd[i]=astropy.time.Time(num2date(ctime[i]), format='datetime', scale='utc').jd
mjd[i]=float(int(jd[i]-2400000.5)) #use modified julian date
#then lambda_sun
T00=(mjd[i]-51544.5)/36525.0
dobj=num2date(ctime[i])
UT=dobj.hour + dobj.minute / 60. + dobj.second / 3600. #time in UT in hours
LAMBDA=280.460+36000.772*T00+0.04107*UT
M=357.528+35999.050*T00+0.04107*UT
#lt2 is lambdasun in Hapgood, equation 5, here in rad
lt2=(LAMBDA+(1.915-0.0048*T00)*
|
np.sin(M*np.pi/180)
|
numpy.sin
|
import numpy
import os
class RewardBuffer:
def __init__(self, name, inputsize,
directory='save',buffersize=100000):
"""
:param buffersize:
"""
self.states = numpy.ndarray(shape=(buffersize,inputsize), dtype=float)
self.actions = numpy.ndarray(shape=(buffersize,), dtype=int)
self.rewards = numpy.ndarray(shape=(buffersize,), dtype=float)
self.nextstates = numpy.ndarray(shape=(buffersize,inputsize), dtype=float)
self.buffersize = buffersize
self.size = 0
self.head = 0
self.name = name
self.directory = directory
self.dirty = False
def reward(self,inputs,actions,rewards,newinputs):
"""
Provide dicts of {id:item}
:param inputs:
:param actions:
:param rewards:
:param newinputs:
"""
for entityid in inputs.keys():
entityin, entityact = inputs[entityid], actions[entityid]
entityrew, entitynewin = rewards[entityid], newinputs[entityid]
self.states[self.head, :] = entityin
self.actions[self.head] = entityact
self.rewards[self.head] = entityrew
self.nextstates[self.head, :] = entitynewin
self.head = (self.head + 1) % self.buffersize
self.size = min(self.size+1, self.buffersize)
self.dirty = True
def get_batch_gen(self,batchsize,niters):
"""
Make a generator which provides batches of items
:param batchsize: size of batch
:param niters: number of batches to produce
:return:
"""
# Array of all (input, action, reward)
def gen():
# Choose and yield sets of results
for i in range(niters):
choices = numpy.random.choice(self.size,batchsize)
yield self.states[choices], self.actions[choices], self.rewards[choices], self.nextstates[choices]
return gen()
def clear(self):
self.size = 0
self.head = 0
self.dirty = True
def save(self):
if self.dirty:
print("Saving buffer... ",end='')
substates = self.states[:self.size]
subactions = self.actions[:self.size]
subrewards = self.rewards[:self.size]
subnext = self.nextstates[:self.size]
numpy.savez_compressed(os.path.join(self.directory, self.name),
states=substates, actions=subactions,
rewards=subrewards, nexts=subnext)
print("Done!")
self.dirty = False
def load(self):
savename = os.path.join(self.directory, self.name if self.name.endswith('.npz') else self.name + '.npz')
if os.path.exists(savename) and not self.dirty:
print("Loading buffer... ", end='')
loaded =
|
numpy.load(savename)
|
numpy.load
|
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 16 09:04:46 2017
@author: <NAME>
pygemfxns_plotting.py produces figures of simulation results
"""
# Built-in Libraries
import os
import collections
# External Libraries
import numpy as np
import pandas as pd
#import netCDF4 as nc
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
from matplotlib.ticker import MaxNLocator
import matplotlib.patches as mpatches
import scipy
from scipy import stats
from scipy.ndimage import uniform_filter
import cartopy
#import geopandas
import xarray as xr
from osgeo import gdal, ogr, osr
import pickle
# Local Libraries
import pygem_input as input
import pygemfxns_modelsetup as modelsetup
import pygemfxns_massbalance as massbalance
import pygemfxns_gcmbiasadj as gcmbiasadj
import class_mbdata
import class_climate
#import run_simulation
# Script options
option_plot_cmip5_normalizedchange = 1
option_plot_cmip5_runoffcomponents = 0
option_plot_cmip5_map = 0
option_output_tables = 0
option_subset_GRACE = 0
option_plot_modelparam = 0
option_plot_era_normalizedchange = 1
option_compare_GCMwCal = 0
option_plot_mcmc_errors = 0
option_plot_maxloss_issues = 0
option_plot_individual_glaciers = 0
option_plot_degrees = 0
option_plot_pies = 0
option_plot_individual_gcms = 0
#%% ===== Input data =====
netcdf_fp_cmip5 = '/Users/davidrounce/Documents/Dave_Rounce/HiMAT/Output/simulations/spc/'
netcdf_fp_era = '/Users/davidrounce/Documents/Dave_Rounce/HiMAT/Output/simulations/ERA-Interim/ERA-Interim_1980_2017_nochg'
#mcmc_fp = '/Users/davidrounce/Documents/Dave_Rounce/HiMAT/Output/cal_opt2_allglac_1ch_tn_20190108/'
#mcmc_fp = '/Users/davidrounce/Documents/Dave_Rounce/HiMAT/Output/cal_opt2_spc_20190222_adjp10/'
mcmc_fp = '/Users/davidrounce/Documents/Dave_Rounce/HiMAT/Output/cal_opt2_spc_20190308_adjp12/cal_opt2/'
figure_fp = '/Users/davidrounce/Documents/Dave_Rounce/HiMAT/Output/figures/cmip5/'
csv_fp = '/Users/davidrounce/Documents/Dave_Rounce/HiMAT/Output/csv/cmip5/'
cal_fp = '/Users/davidrounce/Documents/Dave_Rounce/HiMAT/Output/cal_opt2_spc_20190308_adjp12/cal_opt2/'
# Regions
rgi_regions = [13, 14, 15]
#rgi_regions = [13]
# Shapefiles
rgiO1_shp_fn = '/Users/davidrounce/Documents/Dave_Rounce/HiMAT/RGI/rgi60/00_rgi60_regions/00_rgi60_O1Regions.shp'
watershed_shp_fn = '/Users/davidrounce/Documents/Dave_Rounce/HiMAT/qgis_himat/HMA_basins_20181018_4plot.shp'
kaab_shp_fn = '/Users/davidrounce/Documents/Dave_Rounce/HiMAT/qgis_himat/kaab2015_regions.shp'
srtm_fn = '/Users/davidrounce/Documents/Dave_Rounce/HiMAT/qgis_himat/SRTM_HMA.tif'
srtm_contour_fn = '/Users/davidrounce/Documents/Dave_Rounce/HiMAT/qgis_himat/SRTM_HMA_countours_2km_gt3000m_smooth.shp'
rgi_glac_shp_fn = '/Users/davidrounce/Documents/Dave_Rounce/HiMAT/qgis_himat/rgi60_HMA.shp'
#kaab_dict_fn = '/Users/davidrounce/Documents/Dave_Rounce/HiMAT/qgis_himat/rgi60_HMA_w_watersheds_kaab.csv'
#kaab_csv = pd.read_csv(kaab_dict_fn)
#kaab_dict = dict(zip(kaab_csv.RGIId, kaab_csv.kaab))
# GCMs and RCP scenarios
#gcm_names = ['CanESM2', 'CCSM4', 'CNRM-CM5', 'CSIRO-Mk3-6-0', 'GFDL-CM3', 'GFDL-ESM2M', 'GISS-E2-R', 'IPSL-CM5A-LR',
# 'IPSL-CM5A-MR', 'MIROC5', 'MRI-CGCM3', 'NorESM1-M']
gcm_names = ['CanESM2']
#gcm_names = ['CanESM2', 'CCSM4', 'CNRM-CM5', 'CSIRO-Mk3-6-0', 'GFDL-CM3', 'GFDL-ESM2M', 'GISS-E2-R', 'IPSL-CM5A-LR',
# 'MPI-ESM-LR', 'NorESM1-M']
rcps = ['rcp26', 'rcp45', 'rcp85']
#rcps = ['rcp26']
# Grouping
grouping = 'all'
#grouping = 'rgi_region'
#grouping = 'watershed'
#grouping = 'kaab'
# Variable name
vn = 'mass_change'
#vn = 'volume_norm'
#vn = 'peakwater'
# Group dictionaries
watershed_dict_fn = '/Users/davidrounce/Documents/Dave_Rounce/HiMAT/qgis_himat/rgi60_HMA_dict_watershed.csv'
watershed_csv = pd.read_csv(watershed_dict_fn)
watershed_dict = dict(zip(watershed_csv.RGIId, watershed_csv.watershed))
kaab_dict_fn = '/Users/davidrounce/Documents/Dave_Rounce/HiMAT/qgis_himat/rgi60_HMA_dict_kaab.csv'
kaab_csv = pd.read_csv(kaab_dict_fn)
kaab_dict = dict(zip(kaab_csv.RGIId, kaab_csv.kaab_name))
# GRACE mascons
mascon_fp = input.main_directory + '/../GRACE/GSFC.glb.200301_201607_v02.4/'
mascon_fn = 'mascon.txt'
mascon_cns = ['CenLat', 'CenLon', 'LatWidth', 'LonWidth', 'Area_arcdeg', 'Area_km2', 'location', 'basin',
'elevation_flag']
mascon_df = pd.read_csv(mascon_fp + mascon_fn, header=None, names=mascon_cns, skiprows=14,
delim_whitespace=True)
mascon_df = mascon_df.sort_values(by=['CenLat', 'CenLon'])
mascon_df.reset_index(drop=True, inplace=True)
degree_size = 0.25
peakwater_Nyears = 10
# Plot label dictionaries
title_dict = {'Amu_Darya': 'Amu Darya',
'Brahmaputra': 'Brahmaputra',
'Ganges': 'Ganges',
'Ili': 'Ili',
'Indus': 'Indus',
'Inner_Tibetan_Plateau': 'Inner TP',
'Inner_Tibetan_Plateau_extended': 'Inner TP ext',
'Irrawaddy': 'Irrawaddy',
'Mekong': 'Mekong',
'Salween': 'Salween',
'Syr_Darya': 'Syr Darya',
'Tarim': 'Tarim',
'Yangtze': 'Yangtze',
'inner_TP': 'Inner TP',
'Karakoram': 'Karakoram',
'Yigong': 'Yigong',
'Yellow': 'Yellow',
'Bhutan': 'Bhutan',
'Everest': 'Everest',
'West Nepal': 'West Nepal',
'Spiti Lahaul': 'Spiti Lahaul',
'tien_shan': 'Tien Shan',
'Pamir': 'Pamir',
'pamir_alai': 'Pamir Alai',
'Kunlun': 'Kunlun',
'Hindu Kush': 'Hindu Kush',
13: 'Central Asia',
14: 'South Asia West',
15: 'South Asia East',
'all': 'HMA'
}
title_location = {'Syr_Darya': [68, 46.1],
'Ili': [83.6, 45.5],
'Amu_Darya': [64.6, 36.9],
'Tarim': [83.0, 39.2],
'Inner_Tibetan_Plateau_extended': [100, 40],
'Indus': [70.7, 31.9],
'Inner_Tibetan_Plateau': [85, 32.4],
'Yangtze': [106.0, 29.8],
'Ganges': [81.3, 26.6],
'Brahmaputra': [92.0, 26],
'Irrawaddy': [96.2, 23.8],
'Salween': [98.5, 20.8],
'Mekong': [103.8, 17.5],
'Yellow': [106.0, 36],
13: [83,39],
14: [70.8, 30],
15: [81,26.8],
'inner_TP': [89, 33.5],
'Karakoram': [68.7, 33.5],
'Yigong': [97.5, 26.2],
'Bhutan': [92.1, 26],
'Everest': [85, 26.3],
'West Nepal': [76.5, 28],
'Spiti Lahaul': [72, 31.9],
'tien_shan': [80, 42],
'Pamir': [67.3, 36.5],
'pamir_alai': [65.2, 40.2],
'Kunlun': [79, 37.5],
'Hindu Kush': [65.3, 35]
}
vn_dict = {'volume_glac_annual': 'Normalized Volume [-]',
'volume_norm': 'Normalized Volume Remaining [-]',
'runoff_glac_annual': 'Normalized Runoff [-]',
'peakwater': 'Peak Water [yr]',
'temp_glac_annual': 'Temperature [$^\circ$C]',
'prec_glac_annual': 'Precipitation [m]',
'precfactor': 'Precipitation Factor [-]',
'tempchange': 'Temperature bias [$^\circ$C]',
'ddfsnow': 'DDFsnow [mm w.e. d$^{-1}$ $^\circ$C$^{-1}$]'}
rcp_dict = {'rcp26': '2.6',
'rcp45': '4.5',
'rcp60': '6.0',
'rcp85': '8.5'}
# Colors list
colors_rgb = [(0.00, 0.57, 0.57), (0.71, 0.43, 1.00), (0.86, 0.82, 0.00), (0.00, 0.29, 0.29), (0.00, 0.43, 0.86),
(0.57, 0.29, 0.00), (1.00, 0.43, 0.71), (0.43, 0.71, 1.00), (0.14, 1.00, 0.14), (1.00, 0.71, 0.47),
(0.29, 0.00, 0.57), (0.57, 0.00, 0.00), (0.71, 0.47, 1.00), (1.00, 1.00, 0.47)]
gcm_colordict = dict(zip(gcm_names, colors_rgb[0:len(gcm_names)]))
rcp_colordict = {'rcp26':'b', 'rcp45':'k', 'rcp60':'m', 'rcp85':'r'}
rcp_styledict = {'rcp26':':', 'rcp45':'--', 'rcp85':'-.'}
east = 60
west = 110
south = 15
north = 50
xtick = 5
ytick = 5
xlabel = 'Longitude [$^\circ$]'
ylabel = 'Latitude [$^\circ$]'
#%% FUNCTIONS
def select_groups(grouping, main_glac_rgi_all):
"""
Select groups based on grouping
"""
if grouping == 'rgi_region':
groups = main_glac_rgi_all.O1Region.unique().tolist()
group_cn = 'O1Region'
elif grouping == 'watershed':
groups = main_glac_rgi_all.watershed.unique().tolist()
group_cn = 'watershed'
elif grouping == 'kaab':
groups = main_glac_rgi_all.kaab.unique().tolist()
group_cn = 'kaab'
groups = [x for x in groups if str(x) != 'nan']
elif grouping == 'degree':
groups = main_glac_rgi_all.deg_id.unique().tolist()
group_cn = 'deg_id'
elif grouping == 'mascon':
groups = main_glac_rgi_all.mascon_idx.unique().tolist()
groups = [int(x) for x in groups]
group_cn = 'mascon_idx'
else:
groups = ['all']
group_cn = 'all_group'
try:
groups = sorted(groups, key=str.lower)
except:
groups = sorted(groups)
return groups, group_cn
def partition_multimodel_groups(gcm_names, grouping, vn, main_glac_rgi_all, rcp=None):
"""Partition multimodel data by each group for all GCMs for a given variable
Parameters
----------
gcm_names : list
list of GCM names
grouping : str
name of grouping to use
vn : str
variable name
main_glac_rgi_all : pd.DataFrame
glacier table
rcp : str
rcp name
Output
------
time_values : np.array
time values that accompany the multimodel data
ds_group : list of lists
dataset containing the multimodel data for a given variable for all the GCMs
ds_glac : np.array
dataset containing the variable of interest for each gcm and glacier
"""
# Groups
groups, group_cn = select_groups(grouping, main_glac_rgi_all)
# variable name
if vn == 'volume_norm' or vn == 'mass_change':
vn_adj = 'volume_glac_annual'
elif vn == 'peakwater':
vn_adj = 'runoff_glac_annual'
else:
vn_adj = vn
ds_group = [[] for group in groups]
for ngcm, gcm_name in enumerate(gcm_names):
for region in rgi_regions:
# Load datasets
if gcm_name == 'ERA-Interim':
netcdf_fp = netcdf_fp_era
ds_fn = 'R' + str(region) + '_ERA-Interim_c2_ba1_100sets_1980_2017.nc'
else:
netcdf_fp = netcdf_fp_cmip5 + vn_adj + '/'
ds_fn = ('R' + str(region) + '_' + gcm_name + '_' + rcp + '_c2_ba' + str(input.option_bias_adjustment) +
'_100sets_2000_2100--' + vn_adj + '.nc')
# Bypass GCMs that are missing a rcp scenario
try:
ds = xr.open_dataset(netcdf_fp + ds_fn)
except:
continue
# Extract time variable
if 'annual' in vn_adj:
try:
time_values = ds[vn_adj].coords['year_plus1'].values
except:
time_values = ds[vn_adj].coords['year'].values
elif 'monthly' in vn_adj:
time_values = ds[vn_adj].coords['time'].values
# Merge datasets
if region == rgi_regions[0]:
vn_glac_all = ds[vn_adj].values[:,:,0]
vn_glac_std_all = ds[vn_adj].values[:,:,1]
else:
vn_glac_all = np.concatenate((vn_glac_all, ds[vn_adj].values[:,:,0]), axis=0)
vn_glac_std_all =
|
np.concatenate((vn_glac_std_all, ds[vn_adj].values[:,:,1]), axis=0)
|
numpy.concatenate
|
import numpy as np
import itertools
def deterministic_solution(time, beta, w, tau_s, tau_a, g_a, s0, a0, unit_active):
"""
:param time: the time of the solution
:param beta: the bias
:param w: the weight that its receiving
:param tau_s: time constant of the unit
:param tau_a: adaptation time constatn
:param g_a: adaptation gain
:param s0: initial value for the synaptic current
:param a0: initial value of the adaptation curent
:param unit_active: whether the unit is active or not.
:return:
"""
fixed_point = beta + w
charge = a0
r = tau_s / tau_a
f = 1.0 / (1 - r)
if unit_active:
fixed_point -= g_a
charge -= 1.0
slow_component = g_a * f * charge * np.exp(-time / tau_a)
fast_component = (s0 - fixed_point + g_a * charge * f) * np.exp(-time / tau_s)
s = fixed_point - slow_component + fast_component
return s
def calculate_persistence_time(tau_a, w_diff, beta_diff, g_a, tau_s, perfect=False):
"""
Formula for approximating the persistence time, the assumption for this is
that the persistent time is >> than tau_s
:param tau_a: the time constant of the adaptation
:param w_diff: the difference between the weighs
:param b_diff: the difference in the bias
:param g_a: the adaptation current gain
:param tau_s: the time constant of the unit
:param perfect: whether the unit is a perfect integrator (capacitor)
:return:
"""
B = (w_diff + beta_diff)/ g_a
T = tau_a * np.log(1 / (1 - B))
if not perfect:
r = tau_s / tau_a
T += tau_a * np.log(1 / (1 - r))
return T
def calculate_recall_quantities(manager, nr, T_recall, T_cue, remove=0.009, reset=True, empty_history=True):
n_seq = nr.shape[0]
I_cue = nr[0]
# Do the recall
manager.run_network_recall(T_recall=T_recall, I_cue=I_cue, T_cue=T_cue,
reset=reset, empty_history=empty_history)
distances = calculate_angle_from_history(manager)
winning = calculate_winning_pattern_from_distances(distances)
timings = calculate_patterns_timings(winning, manager.dt, remove=remove)
# Get the element of the sequence without consecutive duplicates
aux = [x[0] for x in timings]
pattern_sequence = [i for i, x in itertools.groupby(aux)]
# Assume successful until proven otherwise
success = 1.0
for index, pattern_index in enumerate(pattern_sequence[:n_seq]):
pattern = manager.patterns_dic[pattern_index]
goal_pattern = nr[index]
# Compare arrays of the recalled pattern with the goal
if not np.array_equal(pattern, goal_pattern):
success = 0.0
break
if len(pattern_sequence) < n_seq:
success = 0.0
persistent_times = [x[1] for x in timings]
return success, pattern_sequence, persistent_times, timings
def calculate_angle_from_history(manager):
"""
:param manager: A manager of neural networks, it is used to obtain the history of the activity and
the patterns that were stored
:return: A vector with the distances to the stored patterns at every point in time.
"""
if manager.patterns_dic is None:
raise ValueError('You have to run a protocol before or provide a patterns dic')
history = manager.history
patterns_dic = manager.patterns_dic
stored_pattern_indexes = np.array(list(patterns_dic.keys()))
num_patterns = max(stored_pattern_indexes) + 1
o = history['o'][1:]
if o.shape[0] == 0:
raise ValueError('You did not record the history of unit activities o')
distances = np.zeros((o.shape[0], num_patterns))
for index, state in enumerate(o):
# Obtain the dot product between the state of the network at each point in time and each pattern
nominator = [np.dot(state, patterns_dic[pattern_index]) for pattern_index in stored_pattern_indexes]
# Obtain the norm of both the state and the patterns to normalize
denominator = [np.linalg.norm(state) * np.linalg.norm(patterns_dic[pattern_index])
for pattern_index in stored_pattern_indexes]
# Get the angles and store them
dis = [a / b for (a, b) in zip(nominator, denominator)]
distances[index, stored_pattern_indexes] = dis
return distances
def calculate_winning_pattern_from_distances(distances):
# Returns the number of the winning pattern
return np.argmax(distances, axis=1)
def calculate_patterns_timings(winning_patterns, dt, remove=0):
"""
:param winning_patterns: A vector with the winning pattern for each point in time
:param dt: the amount that the time moves at each step
:param remove: only add the patterns if they are bigger than this number, used a small number to remove fluctuations
:return: pattern_timins, a vector with information about the winning pattern, how long the network stayed at that
configuration, when it got there, etc
"""
# First we calculate where the change of pattern occurs
change = np.diff(winning_patterns)
indexes = np.where(change != 0)[0]
# Add the end of the sequence
indexes = np.append(indexes, winning_patterns.size - 1)
patterns = winning_patterns[indexes]
patterns_timings = []
previous = 0
for pattern, index in zip(patterns, indexes):
time = (index - previous + 1) * dt # The one is because of the shift with np.change
if time >= remove:
patterns_timings.append((pattern, time, previous*dt, index * dt))
previous = index
return patterns_timings
def calculate_probability_theo2(Tp, Tstart, Ttotal, tau_z):
"""
Calculate the probability of the unit being activated.
:param tau_z: the time constant of the uncertainty or z-filters
:param Tp: The training time, the time that the unit was activated
:param Tstart: The time at which the unit was activated
:param Ttotal: The total time of observation
:return: the probability of the unit being active
"""
p = Tp - tau_z * np.exp((Tstart - Ttotal) / tau_z) * (np.exp(Tp / tau_z) - 1)
return p / Ttotal
def calculate_probability_theo(Tp, Tstart, Ttotal, tau_z):
"""
Calculate the probability of the unit being activated.
:param tau_z: the time constant of the uncertainty or z-filters
:param Tp: The training time, the time that the unit was activated
:param Tstart: The time at which the unit was activated
:param Ttotal: The total time of observation
:return: the probability of the unit being active
"""
M = 1 - np.exp(-Tp / tau_z)
#p = Tp + tau_z * M * (2 - np.exp(-(Ttotal - Tp)/tau_z))
p = Tp - tau_z * M + tau_z * M * (1 - np.exp(-(Ttotal - Tp) / tau_z))
return p / Ttotal
def calculate_joint_probabilities_theo(T1, Ts, T2, Tt, tau1, tau2):
"""
Calcualtes the joint probability of unit 1 and 2.
:param T1:The time that the unit 1 remained activated (training time)
:param Ts: The time at which the second unit becomes activated
:param T2: The time the unit 2 remained activated (training time)
:param Tt: The total time of observation
:param tau1: the time constant of the z-filter of pre-synaptic unit (z-filter)
:param tau2: the time constant of the z-filter of post-synaptic unit (z-filter)
:return: the joint probability.
"""
tau_p = tau1 * tau2 / (tau1 + tau2)
M1 = 1 - np.exp(-T1 / tau1)
M2 = 1 - np.exp(-T2 / tau2)
aux1 = M1 * tau1 * (np.exp(-(Ts - T1) / tau1) - np.exp(-(Ts + T2 - T1) / tau1))
A1arg = T1 / tau1 + Ts / tau2 - (Ts + T2) / tau_p
A1 = np.exp(A1arg)
A2arg = T1 / tau1 + Ts / tau2 - Ts / tau_p
A2 = np.exp(A2arg)
aux2 = M1 * tau_p * (A1 - A2)
B1arg = T1 / tau1 + (Ts + T2) / tau2 - Tt / tau_p
B1 = np.exp(B1arg)
B2arg = T1 / tau1 + (Ts + T2) / tau2 - (Ts + T2) / tau_p
B2 = np.exp(B2arg)
aux3 = M1 * M2 * tau_p * (B1 - B2)
P = (aux1 + aux2 - aux3) / Tt
return P
def calculate_self_probability_theo(T1, Tt, tau1, tau2):
"""
The joint probability of unit with itself with different uncertainty for pre-unit and
post-unit
:param T1: the time the unit remained activated (training time)
:param Tt: total time of observation
:param tau1: the pre-syanptic time constant.
:param tau2: the post-synaptic time constant.
:return:
"""
tau_p = tau1 * tau2 / (tau1 + tau2)
m1 = 1 - np.exp(-T1 / tau1)
m2 = 1 - np.exp(-T1 / tau2)
mp = 1 - np.exp(-T1 / tau_p)
aux1 = T1 - tau1 * m1 - tau2 * m2 + tau_p * mp
aux2 = tau_p * m1 * m2 * (1 - np.exp(-(Tt - T1) / tau_p))
P_self = aux1 + aux2
return P_self / Tt
def calculate_get_weights_theo(T1, T2, Tt, tau_pre, tau_post, Tr=None, IPI=None):
Tstart = 0.0
if Tr is None:
Tr = T2
if IPI is None:
IPI = 0.0
# Calculate the self weight
pi = calculate_probability_theo(T1, Tstart, Tt, tau_pre)
pii = calculate_self_probability_theo(T1, Tt, tau_pre, tau_post)
w_self = np.log10(pii / (pi * pi))
# Calculate the next weight
Ts = T1 + IPI
pij = calculate_joint_probabilities_theo(T1, Ts, T2, Tt, tau_pre, tau_post)
pj = calculate_probability_theo(T2, Tstart, Tt, tau_post)
w_next = np.log10(pij / (pi * pj))
# Calculate the rest weight
pk = calculate_probability_theo(Tr, Tstart, Tt, tau_post)
Ts = T1 + IPI + T2 + IPI
pik = calculate_joint_probabilities_theo(T1, Ts, Tr, Tt, tau_pre, tau_post)
w_rest = np.log10(pik / (pi * pk))
# Calculate the back weight
Ts = T1 + IPI
pji = calculate_joint_probabilities_theo(T1, Ts, T2, Tt, tau_post, tau_pre)
w_back = np.log10(pji / (pi * pj))
return w_self, w_next, w_rest, w_back
def calculate_triad_connectivity(tt1, tt2, tt3, ipi1, ipi2, tau_z_pre, tau_z_post,
base_time, base_ipi, resting_time, n_patterns):
"""
This function gives you the connectivity among a triad, assuming that all the other temporal structure outside of
the trial is homogeneus
:param tt1:
:param tt2:
:param tt3:
:param ipi1:
:param ipi2:
:param tau_z_pre:
:param tau_z_post:
:param base_time:
:param base_ipi:
:param resting_time:
:param n_patterns:
:return:
"""
Tt = (n_patterns - 3) * base_time + tt1 + tt2 + tt3 + ipi1 + ipi2 + \
(n_patterns - 2) * base_ipi + resting_time
# Single probabilities
p1_pre = calculate_probability_theo(Tp=tt1, Tstart=0.0, Ttotal=Tt, tau_z=tau_z_pre)
p2_pre = calculate_probability_theo(Tp=tt2, Tstart=0.0, Ttotal=Tt, tau_z=tau_z_pre)
p3_pre = calculate_probability_theo(Tp=tt3, Tstart=0.0, Ttotal=Tt, tau_z=tau_z_pre)
p1_post = calculate_probability_theo(Tp=tt1, Tstart=0.0, Ttotal=Tt, tau_z=tau_z_post)
p2_post = calculate_probability_theo(Tp=tt2, Tstart=0.0, Ttotal=Tt, tau_z=tau_z_post)
p3_post = calculate_probability_theo(Tp=tt3, Tstart=0.0, Ttotal=Tt, tau_z=tau_z_post)
# joint-self probabilities
p11 = calculate_self_probability_theo(T1=tt1, Tt=Tt, tau1=tau_z_pre, tau2=tau_z_post)
p22 = calculate_self_probability_theo(T1=tt2, Tt=Tt, tau1=tau_z_pre, tau2=tau_z_post)
p33 = calculate_self_probability_theo(T1=tt3, Tt=Tt, tau1=tau_z_pre, tau2=tau_z_post)
# Joint probabilities
Ts = tt1 + ipi1
p21 = calculate_joint_probabilities_theo(T1=tt1, Ts=Ts, T2=tt2, Tt=Tt, tau1=tau_z_pre, tau2=tau_z_post)
Ts = tt1 + ipi1 + tt2 + ipi2
p31 = calculate_joint_probabilities_theo(T1=tt1, Ts=Ts, T2=tt3, Tt=Tt, tau1=tau_z_pre, tau2=tau_z_post)
Ts = tt1 + ipi1
p12 = calculate_joint_probabilities_theo(T1=tt1, Ts=Ts, T2=tt2, Tt=Tt, tau1=tau_z_post, tau2=tau_z_pre)
Ts = tt2 + ipi2
p32 = calculate_joint_probabilities_theo(T1=tt2, Ts=Ts, T2=tt3, Tt=Tt, tau1=tau_z_pre, tau2=tau_z_post)
Ts = tt1 + ipi1 + tt2 + ipi2
p13 = calculate_joint_probabilities_theo(T1=tt1, Ts=Ts, T2=tt3, Tt=Tt, tau1=tau_z_post, tau2=tau_z_pre)
Ts = tt2 + ipi2
p23 = calculate_joint_probabilities_theo(T1=tt2, Ts=Ts, T2=tt3, Tt=Tt, tau1=tau_z_post, tau2=tau_z_pre)
# Weights
w11 = np.log10(p11 / (p1_pre * p1_post))
w12 = np.log10(p12 / (p1_pre * p2_post))
w13 = np.log10(p13 / (p1_pre * p3_post))
w21 = np.log10(p21 / (p2_pre * p1_post))
w22 = np.log10(p22 / (p2_pre * p2_post))
w23 = np.log10(p23 / (p2_pre * p3_post))
w31 = np.log10(p31 / (p3_pre * p1_post))
w32 = np.log10(p32 / (p3_pre * p2_post))
w33 = np.log10(p33 / (p3_pre * p3_post))
# Betas
beta1 =
|
np.log10(p1_post)
|
numpy.log10
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.