prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# Copyright (c) 2016, wradlib developers.
# Distributed under the MIT License. See LICENSE.txt for more info.
"""
Dual-Pol and Differential Phase
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Overview
--------
This module provides algorithms to process polarimetric radar moments,
namely the differential phase, :math:`Phi_{DP}`, and, based on successful
:math:`Phi_{DP}` retrieval, also the specific differential phase,
:math:`K_{DP}`.
Please note that the actual application of polarimetric moments is implemented
in the corresponding wradlib modules, e.g.:
- fuzzy echo classification from polarimetric moments
(:meth:`wradlib.clutter.classify_echo_fuzzy`)
- attenuation correction (:meth:`wradlib.atten.pia_from_kdp`)
- direct precipitation retrieval from Kdp (:meth:`wradlib.trafo.kdp2r`)
Establishing a valid :math:`Phi_{DP}` profile for :math:`K_{DP}` retrieval
involves despeckling (linear_despeckle), phase unfolding, and iterative
retrieval of :math:`Phi_{DP}` form :math:`K_{DP}`.
The main workflow and its single steps is based on a publication by
:cite:`Vulpiani2012`. For convenience, the entire workflow has been
put together in the function :meth:`wradlib.dp.process_raw_phidp_vulpiani`.
Once a valid :math:`Phi_{DP}` profile has been established, the
`kdp_from_phidp` functions can be used to retrieve :math:`K_{DP}`.
Please note that so far, the functions in this module were designed to increase
performance. This was mainly achieved by allowing the simultaneous application
of functions over multiple array dimensions. The only requirement to apply
these function is that the **range dimension must be the last dimension** of
all input arrays.
.. autosummary::
:nosignatures:
:toctree: generated/
process_raw_phidp_vulpiani
kdp_from_phidp_finitediff
kdp_from_phidp_linregress
kdp_from_phidp_convolution
kdp_from_phidp_sobel
unfold_phi_vulpiani
unfold_phi
linear_despeckle
texture
"""
import numpy as np
from scipy.interpolate import interp1d
from scipy.signal import medfilt
from scipy.stats import linregress
from scipy.ndimage.filters import convolve1d
from . import util as util
def process_raw_phidp_vulpiani(phidp, dr, N_despeckle=5, L=7,
niter=2, copy=False):
"""Establish consistent :math:`Phi_{DP}` profiles from raw data.
This approach is based on :cite:`Vulpiani2012` and involves a
two step procedure of :math:`Phi_{DP}` reconstruction.
Processing of raw :math:`Phi_{DP}` data contains the following steps:
- Despeckle
- Initial :math:`K_{DP}` estimation
- Removal of artifacts
- Phase unfolding
- :math:`Phi_{DP}` reconstruction using iterative estimation
of :math:`K_{DP}`
Parameters
----------
phidp : array
array of shape (n azimuth angles, n range gates)
dr : gate length in km
N_despeckle : integer
*N* parameter of function dp.linear_despeckle
L : integer
*L* parameter of :meth:`~wradlib.dp.kdp_from_phidp_convolution`
niter : integer
Number of iterations in which phidp is retrieved from kdp
and vice versa
copy : boolean
if True, the original phidp array will remain unchanged
Returns
-------
phidp : array of shape (n azimuth angles, n range gates)
reconstructed phidp
kdp : array of shape (n azimuth angles, n range gates)
kdp estimate corresponding to phidp output
Examples
--------
See :ref:`notebooks/verification/wradlib_verify_example.ipynb`.
"""
if copy:
phidp = phidp.copy()
# despeckle
phidp = linear_despeckle(phidp, N_despeckle)
# kdp retrieval first guess
kdp = kdp_from_phidp_convolution(phidp, dr=dr, L=L)
# remove extreme values
kdp[kdp > 20] = 0
kdp[np.logical_and(kdp < -2, kdp > -20)] = 0
# unfold phidp
phidp = unfold_phi_vulpiani(phidp, kdp)
# clean up unfolded PhiDP
phidp[phidp > 360] = np.nan
# kdp retrieval second guess
kdp = kdp_from_phidp_convolution(phidp, dr=dr, L=L)
kdp = _fill_sweep(kdp)
# remove remaining extreme values
kdp[kdp > 20] = 0
kdp[kdp < -2] = 0
# start the actual phidp/kdp iteration
for i in range(niter):
# phidp from kdp through integration
phidp = 2 * np.cumsum(kdp, axis=-1) * dr
# kdp from phidp by convolution
kdp = kdp_from_phidp_convolution(phidp, dr=dr, L=L)
# convert all NaNs to zeros (normally, this line can be assumed
# to be redundant)
kdp = _fill_sweep(kdp)
return phidp, kdp
def unfold_phi_vulpiani(phidp, kdp):
"""Alternative phase unfolding which completely relies on Kdp.
This unfolding should be used in oder to iteratively reconstruct
phidp and Kdp (see :cite:`Vulpiani2012`).
Parameters
----------
phidp : array of floats
kdp : array of floats
"""
# unfold phidp
shape = phidp.shape
phidp = phidp.reshape((-1, shape[-1]))
kdp = kdp.reshape((-1, shape[-1]))
for beam in range(len(phidp)):
below_th3 = kdp[beam] < -20
try:
idx1 = np.where(below_th3)[0][2]
phidp[beam, idx1:] += 360
except Exception:
pass
return phidp.reshape(shape)
def _fill_sweep(dat, kind="nan_to_num", fill_value=0.):
"""Fills missing data in a 1d profile
Parameters
----------
dat : array of shape (n azimuth angles, n range gates)
kind : string
Defines how the filling is done.
fill_value : float
Fill value in areas of extrapolation.
"""
if kind == "nan_to_num":
return np.nan_to_num(dat)
if not np.any(np.isnan(dat)):
return dat
shape = dat.shape
dat = dat.reshape((-1, shape[-1]))
for beam in range(len(dat)):
invalid = np.isnan(dat[beam])
validx = np.where(~invalid)[0]
if len(validx) < 2:
dat[beam, invalid] = 0.
continue
f = interp1d(validx, dat[beam, validx], kind=kind,
bounds_error=False, fill_value=fill_value)
invalidx = np.where(invalid)[0]
dat[beam, invalidx] = f(invalidx)
return dat.reshape(shape)
def kdp_from_phidp_finitediff(phidp, L=7, dr=1.):
"""Retrieves :math:`K_{DP}` from :math:`Phi_{DP}` by applying a moving
window range finite difference derivative.
See :cite:`Vulpiani2012` for details about this approach.
Please note that the moving window size *L* is specified as the number of
range gates. Thus, this argument might need adjustment in case the
range resolution changes.
In the original publication (:cite:`Vulpiani2012`), the value L=7 was
chosen for a range resolution of 1km.
Warning
-------
The function is designed for speed by allowing to process
multiple dimensions in one step. For this purpose, the RANGE dimension
needs to be the LAST dimension of the input array.
Parameters
----------
phidp : multi-dimensional array
Note that the range dimension must be the last dimension of
the input array.
L : integer
Width of the window (as number of range gates)
dr : gate length in km
"""
assert (L % 2) == 1, \
"Window size N for function kdp_from_phidp must be an odd number."
# Make really sure L is an integer
L = int(L)
kdp = np.zeros(phidp.shape)
for r in range(int(L / 2), phidp.shape[-1] - int(L / 2)):
kdp[..., r] = (phidp[..., r + int(L / 2)] -
phidp[..., r - int(L / 2)]) / (L - 1)
return kdp / 2. / dr
def kdp_from_phidp_linregress(phidp, L=7, dr=1.):
"""Alternative :math:`K_{DP}` from :math:`Phi_{DP}` by applying a moving
window linear regression.
Please note that the moving window size *L* is specified as the number of
range gates. Thus, this argument might need adjustment in case the range
resolution changes.
In the original publication (:cite:`Vulpiani2012`), the value L=7
was chosen for a range resolution of 1km.
Warning
-------
The function is designed for speed by allowing to process
multiple dimensions in one step. For this purpose, the RANGE dimension
needs to be the LAST dimension of the input array.
Parameters
----------
phidp : multi-dimensional array
Note that the range dimension must be the last dimension of the
input array.
L : integer
Width of the window (as number of range gates)
dr : gate length in km
Examples
--------
>>> import wradlib
>>> import numpy as np
>>> import pylab as pl
>>> pl.interactive(True)
>>> kdp_true = np.sin(3 * np.arange(0, 10, 0.1))
>>> phidp_true = np.cumsum(kdp_true)
>>> phidp_raw = phidp_true + np.random.uniform(-1, 1, len(phidp_true))
>>> gaps = np.concatenate([range(10, 20), range(30, 40), range(60, 80)])
>>> phidp_raw[gaps] = np.nan
>>> kdp_re = wradlib.dp.kdp_from_phidp_linregress(phidp_raw)
>>> line1 = pl.plot(np.ma.masked_invalid(phidp_true), "b--", label="phidp_true") # noqa
>>> line2 = pl.plot(np.ma.masked_invalid(phidp_raw), "b-", label="phidp_raw") # noqa
>>> line3 = pl.plot(kdp_true, "g-", label="kdp_true")
>>> line4 = pl.plot(np.ma.masked_invalid(kdp_re), "r-", label="kdp_reconstructed") # noqa
>>> lgnd = pl.legend(("phidp_true", "phidp_raw", "kdp_true", "kdp_reconstructed")) # noqa
"""
assert (L % 2) == 1, \
"Window size N for function kdp_from_phidp must be an odd number."
shape = phidp.shape
phidp = phidp.reshape((-1, shape[-1]))
# Make really sure L is an integer
L = int(L)
x = np.arange(phidp.shape[-1])
valids = ~np.isnan(phidp)
kdp = np.zeros(phidp.shape) * np.nan
for beam in range(len(phidp)):
for r in range(int(L / 2), phidp.shape[-1] - int(L / 2)):
# iterate over gates
ix = np.arange(r - L / 2, r + L / 2 + 1, dtype=np.int)
if np.sum(valids[beam, ix]) < L / 2:
# not enough valid values inside our window
continue
kdp[beam, r] = linregress(x[ix][valids[beam, ix]],
phidp[beam, ix[valids[beam, ix]]])[0]
# take care of the start and end of the beam
# start
ix = np.arange(0, L)
if np.sum(valids[beam, ix]) >= L / 2:
kdp[beam, ix] = linregress(x[ix][valids[beam, ix]],
phidp[beam, ix[valids[beam, ix]]])[0]
# end
ix = np.arange(shape[-1] - L, shape[-1])
if np.sum(valids[beam, ix]) >= L / 2:
kdp[beam, ix] = linregress(x[ix][valids[beam, ix]],
phidp[beam, ix[valids[beam, ix]]])[0]
# accounting for forward/backward propagation AND gate length
return kdp.reshape(shape) / 2. / dr
def kdp_from_phidp_sobel(phidp, L=7, dr=1.):
"""Alternative :math:`K_{DP}` from :math:`Phi_{DP}` by applying a sobel
filter where possible and linear regression otherwise.
The results are quite similar to the moving window linear regression, but
this is much faster, depending on the percentage of NaN values in the beam,
though. The Sobel filter is applied everywhere but will return NaNs in case
only one value in the moving window is NaN. The remaining NaN values are
then dealt with by using local linear regression
(see :meth:`~wradlib.dp.kdp_from_phidp_linregress`).
This Sobel filter solution has been provided by <NAME> at
StackOverflow :cite:`Sobel-linfit`
Please note that the moving window size *L* is specified as the number of
range gates. Thus, this argument might need adjustment in case the range
resolution changes.
In the original publication (:cite:`Vulpiani2012`), the value L=7
was chosen for a range resolution of 1km.
Warning
-------
The function is designed for speed by allowing to process
multiple dimensions in one step. For this purpose, the RANGE dimension
needs to be the LAST dimension of the input array.
Parameters
----------
phidp : multi-dimensional array
Note that the range dimension must be the last dimension of the
input array.
L : integer
Width of the window (as number of range gates)
dr : gate length in km
Examples
--------
>>> import wradlib
>>> import numpy as np
>>> import pylab as pl
>>> pl.interactive(True)
>>> kdp_true = np.sin(3 * np.arange(0, 10, 0.1))
>>> phidp_true = np.cumsum(kdp_true)
>>> phidp_raw = phidp_true + np.random.uniform(-1, 1, len(phidp_true))
>>> gaps = np.concatenate([range(10, 20), range(30, 40), range(60, 80)])
>>> phidp_raw[gaps] = np.nan
>>> kdp_re = wradlib.dp.kdp_from_phidp_linregress(phidp_raw)
>>> line1 = pl.plot(np.ma.masked_invalid(phidp_true), "b--", label="phidp_true") # noqa
>>> line2 = pl.plot(np.ma.masked_invalid(phidp_raw), "b-", label="phidp_raw") # noqa
>>> line3 = pl.plot(kdp_true, "g-", label="kdp_true")
>>> line4 = pl.plot(np.ma.masked_invalid(kdp_re), "r-", label="kdp_reconstructed") # noqa
>>> lgnd = pl.legend(("phidp_true", "phidp_raw", "kdp_true", "kdp_reconstructed")) # noqa
"""
assert (L % 2) == 1, \
"Window size N for function kdp_from_phidp must be an odd number."
shape = phidp.shape
phidp = phidp.reshape((-1, shape[-1]))
# Make really sure L is an integer
L = int(L)
kdp = np.zeros(phidp.shape) * np.nan
# do it fast using the sobel filter
for beam in range(len(phidp)):
kdp[beam, :] = sobel(phidp[beam, :], window_len=L)
# find remaining NaN values with valid neighbours
x = np.arange(phidp.shape[-1])
invalidkdp = np.isnan(kdp)
validphidp = ~np.isnan(phidp)
kernel = np.ones(L, dtype="i4")
# and do the slow moving window linear regression
for beam in range(len(phidp)):
# number of valid neighbours around one gate
nvalid = np.convolve(validphidp[beam], kernel, "same") > L / 2
# find those gates which have invalid Kdp AND enough valid neighbours
nangates = np.where(invalidkdp[beam] & nvalid)[0]
# now iterate over those
for r in nangates:
ix = np.arange(min(0, r - L / 2), max(shape[-1], r + L / 2 + 1))
# check again (just to make sure...)
if np.sum(validphidp[beam, ix]) < L / 2:
# not enough valid values inside our window
continue
kdp[beam, r] = linregress(x[ix][validphidp[beam, ix]],
phidp[beam,
ix[validphidp[beam, ix]]])[0]
# take care of the start and end of the beam
# start
ix = np.arange(0, L)
if np.sum(validphidp[beam, ix]) >= L / 2:
kdp[beam, ix] = linregress(x[ix][validphidp[beam, ix]],
phidp[beam,
ix[validphidp[beam, ix]]])[0]
# end
ix = np.arange(shape[-1] - L, shape[-1])
if np.sum(validphidp[beam, ix]) >= L / 2:
kdp[beam, ix] = linregress(x[ix][validphidp[beam, ix]],
phidp[beam,
ix[validphidp[beam, ix]]])[0]
# accounting for forward/backward propagation AND gate length
return kdp.reshape(shape) / 2. / dr
def sobel(x, window_len=7):
"""Sobel differential filter for calculating KDP.
This solution has been taken from StackOverflow :cite:`Sobel-linfit`
Returns
-------
output : differential signal (unscaled for gate spacing)
"""
s = np.r_[x[window_len - 1:0:-1], x, x[-1:-window_len:-1]]
w = 2.0 * np.arange(window_len) / (window_len - 1.0) - 1.0
w = w / (abs(w).sum())
y = np.convolve(w, s, mode='valid')
return (-1.0 * y[int(window_len / 2):len(x) + int(window_len / 2)] /
(window_len / 3.0))
def kdp_from_phidp_convolution(phidp, L=7, dr=1.):
"""Alternative :math:`K_{DP}` from :math:`Phi_{DP}` by applying a
convolution filter where possible and linear regression otherwise.
The results are very similar to the moving window linear regression, but
the convolution is *much* faster, depending on the percentage of NaN values
in the beam, though.
The convolution filter was suggested by <NAME> (University of Bonn).
The filter provides fast :math:`K_{DP}` retrieval but will return NaNs in
case at least one value in the moving window is NaN. The remaining gates
are treated by using local linear regression where possible
(see :meth:`~wradlib.dp.kdp_from_phidp_linregress`).
Please note that the moving window size *L* is specified as the number of
range gates. Thus, this argument might need adjustment in case the
range resolution changes.
In the original publication (:cite:`Vulpiani2012`), the value L=7
was chosen for a range resolution of 1km.
Warning
-------
The function is designed for speed by allowing to process
multiple dimensions in one step. For this purpose, the RANGE dimension
needs to be the LAST dimension of the input array.
Parameters
----------
phidp : multi-dimensional array
Note that the range dimension must be the last dimension of the
input array.
L : integer
Width of the window (as number of range gates)
dr : gate length in km
Examples
--------
>>> import wradlib
>>> import numpy as np
>>> import matplotlib.pyplot as pl
>>> pl.interactive(True)
>>> kdp_true = np.sin(3 * np.arange(0, 10, 0.1))
>>> phidp_true = np.cumsum(kdp_true)
>>> phidp_raw = phidp_true + np.random.uniform(-1, 1, len(phidp_true))
>>> gaps = np.concatenate([range(10, 20), range(30, 40), range(60, 80)])
>>> phidp_raw[gaps] = np.nan
>>> kdp_re = wradlib.dp.kdp_from_phidp_linregress(phidp_raw)
>>> line1 = pl.plot(np.ma.masked_invalid(phidp_true), "b--", label="phidp_true") # noqa
>>> line2 = pl.plot(np.ma.masked_invalid(phidp_raw), "b-", label="phidp_raw") # noqa
>>> line3 = pl.plot(kdp_true, "g-", label="kdp_true")
>>> line4 = pl.plot(np.ma.masked_invalid(kdp_re), "r-", label="kdp_reconstructed") # noqa
>>> lgnd = pl.legend(("phidp_true", "phidp_raw", "kdp_true", "kdp_reconstructed")) # noqa
>>> pl.show()
"""
assert (L % 2) == 1, \
"Window size N for function kdp_from_phidp must be an odd number."
shape = phidp.shape
phidp = phidp.reshape((-1, shape[-1]))
# Make really sure L is an integer
L = int(L)
window = 2. * np.arange(L) / (L - 1.0) - 1.0
window = window / (abs(window).sum())
window = window[::-1]
kdp = convolve1d(phidp, window, axis=1) / (len(window) / 3.0)
# find remaining NaN values with valid neighbours
invalidkdp = np.isnan(kdp)
if not np.any(invalidkdp.ravel()):
# No NaN? Return KdP
return kdp.reshape(shape) / 2. / dr
# Otherwise continue
x = np.arange(phidp.shape[-1])
validphidp = ~np.isnan(phidp)
kernel = np.ones(L, dtype="i4")
# and do the slow moving window linear regression
for beam in range(len(phidp)):
# number of valid neighbours around one gate
nvalid = np.convolve(validphidp[beam], kernel, "same") > L / 2
# find those gates which have invalid Kdp AND enough valid neighbours
nangates = np.where(invalidkdp[beam] & nvalid)[0]
# now iterate over those
for r in nangates:
ix = np.arange(max(0, r - int(L / 2)),
min(r + int(L / 2) + 1, shape[-1]))
# check again (just to make sure...)
if np.sum(validphidp[beam, ix]) < L / 2:
# not enough valid values inside our window
continue
kdp[beam, r] = linregress(x[ix][validphidp[beam, ix]],
phidp[beam, ix[validphidp[beam, ix]]])[0]
# take care of the start and end of the beam
# start
ix = np.arange(0, L)
if np.sum(validphidp[beam, ix]) >= 2:
kdp[beam, 0:int(L / 2)] = linregress(x[ix][validphidp[beam, ix]],
phidp[beam,
ix[validphidp[beam,
ix]]])[0]
# end
ix = np.arange(shape[-1] - L, shape[-1])
if np.sum(validphidp[beam, ix]) >= 2:
kdp[beam, -int(L / 2):] = linregress(x[ix][validphidp[beam, ix]],
phidp[beam,
ix[validphidp[beam,
ix]]])[0]
# accounting for forward/backward propagation AND gate length
return kdp.reshape(shape) / 2. / dr
def unfold_phi(phidp, rho, width=5, copy=False):
"""
Unfolds differential phase by adjusting values that exceeded maximum
ambiguous range.
Accepts arbitrarily dimensioned arrays, but THE LAST DIMENSION MUST BE
THE RANGE.
This is the fast Fortran-based implementation (RECOMMENDED).
The algorithm is based on the paper of :cite:`Wang2009`.
Parameters
----------
phidp : array of shape (...,nr) with nr being the number of range bins
rho : array of same shape as phidp
width : integer
Width of the analysis window
copy : boolean
Leaves original phidp array unchanged if set to True (default: False)
"""
# Check whether fast Fortran implementation is available
speedup = util.import_optional("wradlib.speedup")
shape = phidp.shape
assert rho.shape == shape, "rho and phidp must have the same shape."
phidp = phidp.reshape((-1, shape[-1]))
if copy:
phidp = phidp.copy()
rho = rho.reshape((-1, shape[-1]))
gradphi = gradient_from_smoothed(phidp)
beams, rs = phidp.shape
# Compute the standard deviation within windows of 9 range bins
stdarr = np.zeros(phidp.shape, dtype=np.float32)
for r in range(rs - 9):
stdarr[..., r] = np.std(phidp[..., r:r + 9], -1)
phidp = speedup.f_unfold_phi(phidp=phidp.astype("f4"),
rho=rho.astype("f4"),
gradphi=gradphi.astype("f4"),
stdarr=stdarr.astype("f4"),
beams=beams, rs=rs, w=width)
return phidp.reshape(shape)
def unfold_phi_naive(phidp, rho, width=5, copy=False):
"""
Unfolds differential phase by adjusting values that exceeded maximum
ambiguous range.
Accepts arbitrarily dimensioned arrays, but THE LAST DIMENSION MUST BE
THE RANGE.
This is the slow Python-based implementation (NOT RECOMMENDED).
The algorithm is based on the paper of :cite:`Wang2009`.
Parameters
----------
phidp : array of shape (...,nr) with nr being the number of range bins
rho : array of same shape as phidp
width : integer
Width of the analysis window
copy : boolean
Leaves original phidp array unchanged if set to True (default: False)
"""
shape = phidp.shape
assert rho.shape == shape, "rho and phidp must have the same shape."
phidp = phidp.reshape((-1, shape[-1]))
if copy:
phidp = phidp.copy()
rho = rho.reshape((-1, shape[-1]))
gradphi = gradient_from_smoothed(phidp)
beams, rs = phidp.shape
# Compute the standard deviation within windows of 9 range bins
stdarr = np.zeros(phidp.shape, dtype=np.float32)
for r in range(rs - 9):
stdarr[..., r] = np.std(phidp[..., r:r + 9], -1)
# phi_corr = np.zeros(phidp.shape)
for beam in range(beams):
if np.all(phidp[beam] == 0):
continue
# step 1: determine location where meaningful PhiDP profile begins
for j in range(0, rs - width):
if (np.sum(stdarr[beam, j:j + width] < 5) == width) and \
(np.sum(rho[beam, j:j + 5] > 0.9) == width):
break
ref = np.mean(phidp[beam, j:j + width])
for k in range(j + width, rs):
if np.sum(stdarr[beam, k - width:k] < 5) and \
np.logical_and(gradphi[beam, k] > -5,
gradphi[beam, k] < 20):
ref += gradphi[beam, k] * 0.5
if phidp[beam, k] - ref < -80:
if phidp[beam, k] < 0:
phidp[beam, k] += 360
elif phidp[beam, k] - ref < -80:
if phidp[beam, k] < 0:
phidp[beam, k] += 360
return phidp
def linear_despeckle(data, N=3, copy=False):
"""Remove floating pixels in between NaNs in a multi-dimensional array.
Warning
-------
This function changes the original input array if argument copy is set to
default (False).
Parameters
----------
data : multi-dimensional array
Note that the range dimension must be the last dimension of the
input array.
N : integer (must be either 3 or 5, 3 by default)
Width of the window in which we check for speckle
copy : Boolean
If True, the input array will remain unchanged.
"""
assert N in (3, 5), \
"Window size N for function linear_despeckle must be 3 or 5."
if copy:
data = data.copy()
axis = data.ndim - 1
arr = np.ones(data.shape, dtype="i4")
arr[np.isnan(data)] = 0
arr_plus1 = np.roll(arr, shift=1, axis=axis)
arr_minus1 = np.roll(arr, shift=-1, axis=axis)
if N == 3:
# for a window of size 3
test = arr + arr_plus1 + arr_minus1
data[np.logical_and(np.logical_not(np.isnan(data)), test < 2)] = np.nan
else:
# for a window of size 5
arr_plus2 = np.roll(arr, shift=2, axis=axis)
arr_minus2 = np.roll(arr, shift=-2, axis=axis)
test = arr + arr_plus1 + arr_minus1 + arr_plus2 + arr_minus2
data[np.logical_and(np.logical_not(np.isnan(data)), test < 3)] = np.nan
# remove isolated pixels at the first gate
secondgate = np.squeeze(np.take(data, range(1, 2), data.ndim - 1))
data[..., 0][np.isnan(secondgate)] = np.nan
return data
def texture(data):
"""
Compute the texture of the data by comparing values with a 3x3 neighborhood
(based on :cite:`Gourley2007`).
NaN values in the original array have NaN textures.
Parameters
----------
data : multi-dimensional array with shape (..., number of beams, number
of range bins)
Returns
------
texture : array of textures with the same shape as data
"""
x1 = np.roll(data, 1, -2) # center:2
x2 = np.roll(data, 1, -1) # 4
x3 = np.roll(data, -1, -2) # 8
x4 = np.roll(data, -1, -1) # 6
x5 = np.roll(x1, 1, -1) # 1
x6 = np.roll(x4, 1, -2) # 3
x7 = np.roll(x3, -1, -1) # 9
x8 = np.roll(x2, -1, -2) # 7
# at least one NaN would give a sum of NaN
xa = np.array([x1, x2, x3, x4, x5, x6, x7, x8])
# get count of valid neighboring pixels
xa_valid = np.ones(np.shape(xa))
xa_valid[np.isnan(xa)] = 0
# count number of valid neighbors
xa_valid_count = np.sum(xa_valid, axis=0)
num = np.zeros(data.shape)
for xarr in xa:
diff = data - xarr
# difference of NaNs will be converted to zero
# (to not affect the summation)
diff[np.isnan(diff)] = 0
# only those with valid values are considered in the summation
num += diff ** 2
# reinforce that NaN values should have NaN textures
num[np.isnan(data)] = np.nan
return np.sqrt(num / xa_valid_count)
def contiguous_regions(condition):
"""Finds contiguous True regions of the boolean array "condition".
This function was adopted from an StackOverflow answer as proposed
by <NAME> in 2010 :cite:`Consecutive-values`.
Parameters
----------
condition : 1d boolean array
Returns
-------
output : a 2D array where the first column is the start index of the region
and the second column is the end index.
"""
# Find the indices of changes in "condition"
d = np.diff(condition)
idx, = d.nonzero()
# We need to start things after the change in "condition". Therefore,
# we'll shift the index by 1 to the right.
idx += 1
if condition[0]:
# If the start of condition is True prepend a 0
idx = np.r_[0, idx]
if condition[-1]:
# If the end of condition is True, append the length of the array
idx = np.r_[idx, condition.size] # Edit
# Reshape the result into two columns
idx.shape = (-1, 2)
return idx
# TO UTILS
def medfilt_along_axis(x, N, axis=-1):
"""Applies median filter smoothing on one axis of an N-dimensional array.
"""
kernel_size = np.array(x.shape)
kernel_size[:] = 1
kernel_size[axis] = N
return medfilt(x, kernel_size)
# TO UTILS
def gradient_along_axis(x):
"""Computes gradient along last axis of an N-dimensional array
"""
axis = -1
newshape = | np.array(x.shape) | numpy.array |
import pyart
import h5py
import numpy as np
import math as m
from scipy import ndimage
"""
dualprf_cor
===========
Correct dual-PRF dealiasing errors
correct_dualprf
fold_circular
instrument_parameters_odim5
local_cmean
local_mean
local_median
local_valid
_add_vcor_field
_dualprf_error_unwrap
_dummy_cols
_get_prf_pars
_get_prf_pars_odimh5
_mask_diff_above
_min_valid_mask
_prf_factor_array
_prf_hl_kernels
_sign_array
_vel_ref
_vref_cmean_sc
"""
def correct_dualprf(radar, method_det, vel_field='velocity',
kernel_det=np.ones((7,7)), min_valid_det=1,
max_dev=1.0, two_step=True, method_cor=None,
kernel_cor=None, min_valid_cor=1, new_field='velocity_cor',
replace=False, new_field_name='velocity_cor',
new_field_lname='Dual-PRF outlier corrected velocity'):
"""
Correction of dual-PRF outliers in radar velocity data.
Includes the corrected field in the input radar object.
Available reference statistics:
'mean' : local mean velocity (Joe and May, 2003)
'median' : local median velocity (Holleman and Beekhuis, 2003)
'cmean_sc' : local circular mean velocity (PRF-scaled) (Altube et al., 2017)
'cmean' : local circular mean velocity (Hengstebeck et al., 2018)
Parameters
----------
radar : Radar
Py-ART radar structure
method_det : str
Detection method
vel_field: str
Input velocity field name (dual-PRF)
kernel_det : array
Neighbour kernel, 1/0 values (detection), if None a 7x7 ones array
is used, excluding the central value
min_valid_det : int
Minimum number of valid neighbours (detection)
max_dev : float
Maximum deviation threshold (detection)
two_step : bool
Whether to separate detection and correction stages
method_cor : str or None
Correction method, if None, method_det is used (except in the case of
'cmean_sc', for which 'cmean' is used by default, due to error
propagation issues when PRF scaling)
kernel_cor : array
Neighbour kernel 1/0 values (correction), if None, kernel_det is used
min_valid_cor : int
Minimum number of valid neighbours (correction)
new_field : str
Output (corrected) velocity field name
replace : bool
Whether to replace input field
new_field_name : str
Output (corrected) velocity field standard name
new_field_lname : str
Output (corrected) velocity field long name
"""
vcorr = radar.fields[vel_field]['data'].copy()
# Dual-PRF parameters
v_ny, prf_h, prf_factor, prf_flag = _get_prf_pars(radar)
prf_factor = _prf_factor_array(radar)
# primary velocities
vp = v_ny/prf_factor
for sw, sw_slice in enumerate(radar.iter_slice()):
v_sw = radar.fields[vel_field]['data'][sw_slice]
vp_sw = vp[sw_slice]
prf_factor_sw = prf_factor[sw_slice]
# ERROR DETECTION
# Reference velocities at each gate
vref_det = _vel_ref(data_ma=v_sw, method=method_det,
kernel=kernel_det, v_ny=v_ny,
mask=None,
prf_factor_arr=prf_factor_sw,
min_valid=min_valid_det)
if (method_det=='cmean')|(method_det=='cmean_sc'):
# Calculate difference in phase space
ph_obs = v_sw*(m.pi/v_ny)
ph_ref = vref_det*(m.pi/v_ny)
ph_diff = ph_obs - ph_ref
diff_ma = (v_ny/m.pi)*np.ma.arctan2(np.ma.sin(ph_diff), np.ma.cos(ph_diff))
else:
diff_ma = v_sw - vref_det
# Outlier mask
err_mask = _mask_diff_above(diff_ma=diff_ma, th_ma=max_dev*vp_sw)
if two_step:
if kernel_cor is None:
kernel_cor = kernel_det
if method_cor is None:
if (method_det=='cmean')|(method_det=='cmean_sc'):
method_cor = 'median'
else:
method_cor = method_det
vref_cor = _vel_ref(data_ma=v_sw, method=method_cor,
kernel=kernel_cor, v_ny=v_ny,
mask=err_mask,
prf_factor_arr=prf_factor_sw,
min_valid=min_valid_cor)
else:
vref_cor = vref_det
# ERROR CORRECTION
# Unwrap number and corrected velocity field
uwp = _dualprf_error_unwrap(data_ma=v_sw, ref_ma=vref_cor,
err_mask=err_mask, pvel_arr=vp_sw,
prf_arr=prf_factor_sw)
# Correct velocity field
vc = v_sw + 2 * uwp * vp_sw
# Fold velocity values into Nyquist interval
vcorr[sw_slice] = fold_circular(data_ma=vc, mod=v_ny)
# ADD CORRECTED VELOCITY FIELD
_add_vcor_field(radar, field_i=vel_field, field_o=new_field,
data=vcorr, std_name=new_field_name,
long_name=new_field_lname, replace=replace)
def fold_circular(data_ma, mod):
"""
Values outside the specified interval are folded back into
the interval.
Parameters
----------
data_ma : masked array
Data
mod: float
Interval (module)
Returns
-------
ma_fold : masked array
Folded data
"""
# Phase space
ph = data_ma*m.pi/mod
ph_fold = np.ma.arctan2(np.ma.sin(ph), np.ma.cos(ph))
# Back to original variable
ma_fold = ph_fold*mod/m.pi
return ma_fold
def instrument_parameters_odim5(radar, odim_file):
"""
Builds the dictionary 'instrument_parameters' in the radar instance,
using the parameter metadata in the input odim5 file.
Parameters
----------
radar : Radar
Py-ART radar structure
odim_file : str
Complete path and filename of input file
Returns
-------
radar : Radar
Py-ART radar structure with added 'instrument_parameters' dictionary.
"""
ny, prt, prt_mode, prt_ratio, prf_flag = _get_prf_pars_odimh5(odim_file, nrays=radar.nrays,
nsweeps=radar.nsweeps, sw_start_end=radar.get_start_end)
# Create dictionaries
mode_dict = {'comments': 'Pulsing mode Options are: "fixed", "staggered", "dual". Assumed "fixed" if missing.',
'meta_group': 'instrument_parameters',
'long_name': 'Pulsing mode',
'units': 'unitless',
'data': prt_mode}
prt_dict = {'units': 'seconds',
'comments': 'Pulse repetition time. For staggered prt, also see prt_ratio.',
'meta_group': 'instrument_parameters',
'long_name': 'Pulse repetition time',
'data': prt}
ratio_dict = {'units': 'unitless',
'meta_group': 'instrument_parameters',
'long_name': 'Pulse repetition frequency ratio',
'data': prt_ratio}
ny_dict = {'units': 'meters_per_second',
'comments': 'Unambiguous velocity',
'meta_group': 'instrument_parameters',
'long_name': 'Nyquist velocity',
'data': ny}
flag_dict = {'units': 'unitless',
'comments': 'PRF used to collect ray. 0 for high PRF, 1 for low PRF.',
'meta_group': 'instrument_parameters',
'long_name': 'PRF flag',
'data': prf_flag}
# add metadata in radar object:
radar.instrument_parameters = {'nyquist_velocity':ny_dict, 'prt':prt_dict,
'prt_ratio':ratio_dict, 'prt_mode':mode_dict,
'prf_flag':flag_dict}
return radar
def local_cmean(data_ma, kernel):
"""
Calculates local circular mean of a masked array;
edges are wrapped in azimuth and padded with NA in range.
Parameters
----------
data_ma : masked array
Data
kernel : array
Local neighbour kernel, 1/0 values
Returns
-------
cmean_ma : masked array
Local circular mean of the data.
"""
# Arrays of trigonometric variables
cos_ma = np.ma.cos(data_ma)
sin_ma = | np.ma.sin(data_ma) | numpy.ma.sin |
from __future__ import print_function
import astropy.units as astropy_units
import numpy as np
import six
from scipy.interpolate import RegularGridInterpolator
from astromodels.functions.function import Function1D, FunctionMeta
from astromodels.utils import _get_data_file_path
from astromodels.utils.logging import setup_logger
log = setup_logger(__name__)
class DMFitFunction(Function1D, metaclass=FunctionMeta):
r"""
description :
Class that evaluates the spectrum for a DM particle of a given
mass, channel, cross section, and J-factor. Based on standard
Fermi Science Tools function DMFitFunction. Note input table only
calculated spectra up to m_DM of 10 TeV
The parameterization is given by
F(x) = 1 / (8 * pi) * (1/mass^2) * sigmav * J * dN/dE(E,mass,i)
latex : $$
parameters :
mass :
desc : DM mass (GeV)
initial value : 10
fix : yes
channel :
desc : DM annihilation channel
initial value : 4
fix : yes
sigmav :
desc : DM annihilation cross section (cm^3/s)
initial value : 1.e-26
J :
desc : Target total J-factor (GeV^2 cm^-5)
initial value : 1.e20
fix : yes
"""
def _setup(self):
tablepath = _get_data_file_path("dark_matter/gammamc_dif.dat")
self._data = np.loadtxt(tablepath)
"""
Mapping between the channel codes and the rows in the gammamc file
1 : 8, # ee
2 : 6, # mumu
3 : 3, # tautau
4 : 1, # bb
5 : 2, # tt
6 : 7, # gg
7 : 4, # ww
8 : 5, # zz
9 : 0, # cc
10 : 10, # uu
11 : 11, # dd
12 : 9, # ss
"""
channel_index_mapping = {
1: 8, # ee
2: 6, # mumu
3: 3, # tautau
4: 1, # bb
5: 2, # tt
6: 7, # gg
7: 4, # ww
8: 5, # zz
9: 0, # cc
10: 10, # uu
11: 11, # dd
12: 9, # ss
}
# Number of decades in x = log10(E/M)
ndec = 10.0
xedge = np.linspace(0, 1.0, 251)
self._x = 0.5 * (xedge[1:] + xedge[:-1]) * ndec - ndec
ichan = channel_index_mapping[int(self.channel.value)]
# These are the mass points
self._mass = np.array(
[
2.0,
4.0,
6.0,
8.0,
10.0,
25.0,
50.0,
80.3,
91.2,
100.0,
150.0,
176.0,
200.0,
250.0,
350.0,
500.0,
750.0,
1000.0,
1500.0,
2000.0,
3000.0,
5000.0,
7000.0,
1e4,
]
)
self._dn = self._data.reshape((12, 24, 250))
self._dn_interp = RegularGridInterpolator(
[self._mass, self._x],
self._dn[ichan, :, :],
bounds_error=False,
fill_value=None,
)
if self.mass.value > 10000:
print("Warning: DMFitFunction only appropriate for masses <= 10 TeV")
print("To model DM from 2 GeV < mass < 1 PeV use DMSpectra")
def _set_units(self, x_unit, y_unit):
# Usually a model should not assume fixed units for energy or anything else. However,
# in this case this model is so specialistic that we can assume GeV
self.mass.unit = astropy_units.GeV
self.channel.unit = astropy_units.dimensionless_unscaled
self.sigmav.unit = astropy_units.cm ** 3 / astropy_units.s
self.J.unit = astropy_units.GeV ** 2 / astropy_units.cm ** 5
def print_channel_mapping(self):
channel_mapping = {
1: "ee",
2: "mumu",
3: "tautau",
4: "bb",
5: "tt",
6: "gg",
7: "ww",
8: "zz",
9: "cc",
10: "uu",
11: "dd",
12: "ss",
}
print(channel_mapping)
return channel_mapping
# noinspection PyPep8Naming
def evaluate(self, x, mass, channel, sigmav, J):
if isinstance(x, astropy_units.Quantity):
# We need to convert to GeV
xx = x.to(astropy_units.GeV)
else:
# We can assume that the input is in keV
keVtoGeV = 1e-6
# xm expects gamma ray energies in MeV
xx = np.multiply(x, keVtoGeV)
xm = np.log10(np.divide(xx, mass))
phip = (
1.0 / (8.0 * np.pi) * np.power(mass, -2) * (sigmav * J)
) # units of this should be 1 / cm**2 / s
dn = self._dn_interp((mass, xm))
dn[xm > 0] = 0
return np.multiply(phip, np.divide(dn, x))
class DMSpectra(Function1D, metaclass=FunctionMeta):
r"""
description :
Class that evaluates the spectrum for a DM particle of a given
mass, channel, cross section, and J-factor. Combines Pythia-based tables
from both Fermi (2 GeV < m_DM < 10 TeV) and HAWC (10 TeV < m_dm < 1 PeV)
The parameterization is given by
F(x) = 1 / (8 * pi) * (1/mass^2) * sigmav * J * dN/dE(E,mass,i)
Note that this class assumes that mass and J-factor are provided
in units of GeV and GeV^2 cm^-5
latex : $$
parameters :
mass :
desc : DM mass (GeV)
initial value : 10
fix : yes
channel :
desc : DM annihilation channel
initial value : 4
fix : yes
sigmav :
desc : DM annihilation cross section (cm^3/s)
initial value : 1.e-26
J :
desc : Target total J-factor (GeV^2 cm^-5)
initial value : 1.e20
fix : yes
"""
def _setup(self):
# Get and open the two data files
tablepath_h = _get_data_file_path("dark_matter/dmSpecTab.npy")
self._data_h = | np.load(tablepath_h) | numpy.load |
import numpy as np
def compute_fans(shape):
if len(shape) == 2:
fan_in, fan_out = shape[0], shape[1]
else:
fan_in, fan_out = np.prod(shape[1:]), shape[0]
return fan_in, fan_out
class initializer(object):
def __call__(self, shape):
return self.init(shape).astype(np.float32)
def init(self, shape):
raise NotImplementedError
class constant(initializer):
def __init__(self, val):
self._val = val
def init(self, shape):
return np.full(shape=shape, fill_value=self._val).astype(np.float32)
class zeros(constant):
def __init__(self):
super(zeros, self).__init__(0.0)
class xavieruniform(initializer):
def __init__(self, gain=1.0):
self._gain = gain
def init(self, shape):
fan_in, fan_out = compute_fans(shape)
a = self._gain * np.sqrt(6.0 / (fan_in + fan_out))
return | np.random.uniform(low=-a, high=a, size=shape) | numpy.random.uniform |
import numpy as np
import matplotlib.pyplot as plt
populationSize = int(input('Enter population size:\n'))
generations = 200
# Hyperparameters for mutation and crossover
CROSS_RATE = 0.7
MUTATE_RATE = 0.001
# The state space consisting of a 2D grid with start point end point and obstacle
class environment():
def __init__(self, start, end, obs):
self.start = start
self.end = end
self.obs = obs
def plotenv(self):
plt.scatter(*self.start)
plt.scatter(*self.end)
plt.plot([self.obs[0][0], self.obs[1][0]],
[self.obs[0][1], self.obs[1][1]])
plt.ion()
# plt.show()
# Normalise population and convert into [0,.25]
def normPop(population, start, region):
population = (population - 0.5) / 2
population[:, 0], population[:, region] = start[0], start[1]
x = | np.cumsum(population[:, region:], axis=1) | numpy.cumsum |
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 7 09:58:55 2021
@author: emari
"""
import numpy as np
import pandas as pd
class node():
def __init__(self):
self.parent_node = ""
self.child_connections = [ | np.array([],dtype=object) | numpy.array |
import numpy as np
from xrcnn.util import bbox as B
class Anchor:
def __init__(self, config):
# def __init__(self, base_size=16,
# anchor_ratios=[
# (1. / math.sqrt(2), 2. / math.sqrt(2)),
# (1., 1.),
# (2. / math.sqrt(2), 1. / math.sqrt(2))],
# anchor_scales=[128 / 4, 256 / 4, 512 / 4],
# backbone_shape=[64 / 4, 64 / 4]):
"""RoI予測の基準となるアンカーを生成する。
アンカーの基準となる値を指定する。
Args:
base_size (number): アンカーを適用する特徴マップ1ピクセルが、入力画像において何ピクセルに値するか。
anchor_ratios (list of float): アンカーのアスペクト比。
:math:`[(h, w), ...]`
anchor_scales (list of numbers): アンカーのサイズ(入力画像におけるサイズ)。
このサイズの正方形をアンカーの領域とする。
anchor_ratios (list of numbers): アンカーのアスペクト比
"""
self.base_size = config.stride_per_base_nn_feature
self.backbone_shape = config.backbone_shape
self.anchor_ratios = config.anchor_box_aspect_ratios
self.anchor_scales = config.anchor_box_scales
self.bbox_refinement_std = config.bbox_refinement_std
self.anchor_base = self._anchor_base(
self.base_size, self.anchor_ratios, self.anchor_scales)
self.anchors = self._generate_anchors(self.backbone_shape)
def generate_gt_offsets(self, bbox_gt, img_size,
pos_iou_thresh=0.5,
neg_iou_thresh=0.3,
n_max_sample=256,
pos_ratio=0.5):
"""anchorにGroud truthなBBoxを適用し、anchor毎に最もIoUが大きいBBoxを特定し、そのBBoxとのオフセットを得る。
IoU値により、各アンカーを以下に分類する。
0.7以上:オブジェクト
→0.5にする。
0.7だとVOCdevkit/VOC2007/Annotations/007325.xmlにあるようなサイズのBboxが
GTとして得られなかったため。
0.3未満:非オブジェクト
それ以外:評価対象外。つまり、トレーニングには使わないアンカー。
Args:
bbox_gt (array): Ground truthなBBox
Its shape is :math:`(R, 4)`.
img_size (h,w): 入力画像の高さと幅のタプル.
pos_iou_thresh: この値以上のIoUをclass=1とする。
pos_iou_thresh: この値未満のIoUをclass=0とする。
n_max_sample: 評価対象とする(classが1or0である)オフセットの上限
pos_ratio: 評価対象サンプル中のPositiveの割合
n_max_sample, pos_ratioは論文中の以下への対応。
考慮無しではNegativeサンプルが支配的になる。学習効率も考慮し、このような処理を行うものと思われる。
Each mini-batch arises from a single image that contains many
positive and negative example anchors. It is possible to
optimize for the loss functions of all anchors,
but this will bias towards negative samples as they are
dominate. Instead, we randomly sample 256 anchors in an image
to compute the loss function of a mini-batch, where the sampled
positive and negative anchors have a ratio of up to 1:1.
If there are fewer than 128 positive samples in an image,
we pad the mini-batch with negative ones.
Returns:
(offsets, obj_flags):
offsets (array) : 各アンカーとGround TruthなBBoxとのオフセット。
Its shape is :math:`(S, 4)`.
2軸目の内容は以下の通り。
(x, y ,h, w)
objects (array): 各アンカーがオブジェクトか否か。
Its shape is :math:`(S, 1)`.
2軸目の内容は以下の通り。
1:オブジェクト
0:非オブジェクト
−1:評価対象外
"""
h, w = img_size
anchor = self.anchors
n_anchor_initial = len(anchor)
# 入力領域をはみ出すアンカーを除外
index_inside = np.where(
(anchor[:, 0] >= 0) &
(anchor[:, 1] >= 0) &
(anchor[:, 2] <= h) &
(anchor[:, 3] <= w)
)[0]
anchor = anchor[index_inside]
# 各アンカー毎にGTとのIoUを算出し、最大か0.7以上のIoUを残す。
# IoU >= 0.7はオブジェクト候補とする(class = 1)
# IoU < 0.3は非オブジェクト候補とする(class = 0)
# それ以外のIoUは評価対象外とする(class = -1)
argmax_ious, objects = self._create_label(anchor, bbox_gt,
pos_iou_thresh,
neg_iou_thresh,
n_max_sample,
pos_ratio)
# アンカーとGroud truthのオフセットを得る。
offsets = B.get_offset(anchor, bbox_gt[argmax_ious])
# 既存実装に合わせた精度向上
offsets /= | np.array(self.bbox_refinement_std) | numpy.array |
# -*- coding: utf-8 -*-
"""
Simulações de eletrostática utilizando o Método das Diferenças Finitas.
Created on Fri Apr 25 16:17:46 2014
@author: leo
"""
import matplotlib.pyplot as plt
import numpy as np
import libs.MDF as m
plt.subplot(2,2,1)
ex1 = m.MDF( | np.arange(0,10,.5) | numpy.arange |
'''
adapted from Section 3.1 in Lee and Kitanidis WRR 2014
Note: this is not a reproduction of the paper!!!
'''
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import numpy as np
from pyPCGA import PCGA
import mf
import math
import datetime as dt
import os
import sys
# model domain and discretization
Lx = 1000.; Ly = 750.; Lz = 1; nlay = 1; nrow = 75; ncol = 100
Q = 25.; Rch = 0.001
ztop = 0.; zbot = -1.
# seems confusing considering flopy notation, remember python array ordering of col, row and lay
N = np.array([ncol, nrow, nlay])
m = np.prod(N)
dx = np.array([10., 10., 1.])
xmin = np.array([0. + dx[0] / 2., 0. + dx[1] / 2., 0. + dx[2] / 2.])
xmax = np.array([Lx - dx[0] / 2., Ly - dx[1] / 2., Lz - dx[2] / 2.])
# parameters
if os.name == 'nt':
mf_exec = 'mf2005.exe'
elif sys.platform == 'darwin':
mf_exec = 'mf2005_mac'
else:
mf_exec = 'mf2005'
input_dir = "./input_files"
sim_dir = './simul'
# location of observations
obs_locmat = np.zeros((nlay, nrow, ncol), np.bool)
for i in range(5, 71, 16):
for j in range(9, 96, 16):
obs_locmat[0, i, j] = 1
# Hydraulic tomography - crosswell pumping test setting
Q_locs_idx = np.where(obs_locmat == True)
Q_locs = []
for Q_loc in zip(Q_locs_idx[0], Q_locs_idx[1], Q_locs_idx[2]):
Q_locs.append(Q_loc)
# covairance kernel and scale parameters
prior_std = 1.0
prior_cov_scale = np.array([200., 200., 1.])
def kernel(r): return (prior_std ** 2) * np.exp(-r)
# for plotting
x = np.linspace(0. + dx[0] / 2., Lx - dx[0] / 2., N[0])
y = np.linspace(0. + dx[1] / 2., Ly - dx[1] / 2., N[1])
XX, YY = np.meshgrid(x, y)
pts = np.hstack((XX.ravel()[:, np.newaxis], YY.ravel()[:, np.newaxis]))
# load true value for comparison purpose
s_true = np.loadtxt('true_logK.txt')
s_true = np.array(s_true).reshape(-1, 1) # make it 2D array
obs = np.loadtxt('obs.txt')
mf_params = {'mf_exec': mf_exec, 'input_dir': input_dir,
'sim_dir': sim_dir,
'Lx': Lx, 'Ly': Ly,
'Q': Q, 'Rch': Rch,
'nlay': nlay, 'nrow': nrow, 'ncol': ncol,
'zbot': zbot, 'ztop': ztop,
'obs_locmat': obs_locmat, 'Q_locs': Q_locs}
# prepare interface to run as a function
def forward_model(s, parallelization, ncores=None):
model = mf.Model(mf_params)
if parallelization:
simul_obs = model.run(s, parallelization, ncores)
else:
simul_obs = model.run(s, parallelization)
return simul_obs
params = {'R': (0.5) ** 2, 'n_pc': 50,
'maxiter': 10, 'restol': 0.01,
'matvec': 'FFT', 'xmin': xmin, 'xmax': xmax, 'N': N,
'prior_std': prior_std, 'prior_cov_scale': prior_cov_scale,
'kernel': kernel, 'post_cov': "diag",
'precond': True, 'LM': True, #'LM_smin' : 1.0, 'LM_smax' : 4.0,
'parallel': True, 'linesearch': True,
'forward_model_verbose': False, 'verbose': False,
'iter_save': True}
# params['objeval'] = False, if true, it will compute accurate objective function
# params['ncores'] = 36, with parallell True, it will determine maximum physcial core unless specified
s_init = np.ones((m, 1))
# s_init = np.copy(s_true) # you can try with s_true!
# initialize
prob = PCGA(forward_model, s_init, pts, params, s_true, obs)
# prob = PCGA(forward_model, s_init, pts, params, s_true, obs, X = X) #if you want to add your own drift X
# run inversion
s_hat, simul_obs, post_diagv, iter_best = prob.Run()
# plotting results
s_hat3d = s_hat.reshape(nlay, nrow, ncol)
s_hat2d = s_hat3d[0,:,:]
s_true3d = s_true.reshape(nlay, nrow, ncol)
s_true2d = s_true3d[0,:,:]
post_diagv[post_diagv < 0.] = 0. # just in case
post_std = np.sqrt(post_diagv)
post_std3d = post_std.reshape(nlay, nrow, ncol)
post_std2d = post_std3d[0,:,:]
minv = s_true.min()
maxv = s_true.max()
# best result
fig, axes = plt.subplots(1, 2, figsize=(15, 5))
plt.suptitle('prior var.: (%g)^2, n_pc : %d' % (prior_std, params['n_pc']))
im = axes[0].pcolormesh(XX,YY,s_true2d, vmin=minv, vmax=maxv, cmap=plt.get_cmap('jet'))
axes[0].set_title('(a) True', loc='left')
axes[0].set_aspect('equal')
axes[0].set_xlabel('x (m)')
axes[0].set_ylabel('y (m)')
axes[0].axis([XX.min(), XX.max(), YY.min(), YY.max()])
axes[1].pcolormesh(XX, YY, s_hat2d, vmin=minv, vmax=maxv, cmap=plt.get_cmap('jet'))
axes[1].set_title('(b) Estimate', loc='left')
axes[1].set_xlabel('x (m)')
axes[1].set_aspect('equal')
axes[1].axis([XX.min(), XX.max(), YY.min(), YY.max()])
fig.subplots_adjust(right=0.8)
cbar_ax = fig.add_axes([0.85, 0.15, 0.05, 0.7])
fig.colorbar(im, cax=cbar_ax)
fig.savefig('best.png')
plt.close(fig)
# uncertainty
fig = plt.figure()
im = plt.pcolormesh(XX,YY,post_std2d, cmap=plt.get_cmap('jet'))
plt.axis([XX.min(), XX.max(), YY.min(), YY.max()])
plt.title('Uncertainty (std)', loc='left')
plt.xlabel('x (m)')
plt.ylabel('y (m)')
plt.gca().set_aspect('equal', adjustable='box')
fig.colorbar(im)
fig.savefig('std.png')
plt.close(fig)
# observation mismatch
nobs = prob.obs.shape[0]
fig = plt.figure()
plt.title('obs. vs simul.')
plt.plot(prob.obs, simul_obs, '.')
plt.xlabel('observation')
plt.ylabel('simulation')
minobs = np.vstack((prob.obs, simul_obs)).min(0)
maxobs = np.vstack((prob.obs, simul_obs)).max(0)
plt.plot( | np.linspace(minobs, maxobs, 20) | numpy.linspace |
import itertools
from distutils.version import LooseVersion
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.colors import to_rgb, to_rgba
import pytest
from numpy.testing import assert_array_equal, assert_array_almost_equal
from .. import distributions as dist
from ..palettes import (
color_palette,
light_palette,
)
from .._core import (
categorical_order,
)
from .._statistics import (
KDE,
Histogram,
_no_scipy,
)
from ..distributions import (
_DistributionPlotter,
displot,
distplot,
histplot,
ecdfplot,
kdeplot,
rugplot,
)
from ..axisgrid import FacetGrid
from .._testing import (
assert_plots_equal,
assert_legends_equal,
assert_colors_equal,
)
class TestDistPlot(object):
rs = np.random.RandomState(0)
x = rs.randn(100)
def test_hist_bins(self):
fd_edges = np.histogram_bin_edges(self.x, "fd")
with pytest.warns(FutureWarning):
ax = distplot(self.x)
for edge, bar in zip(fd_edges, ax.patches):
assert pytest.approx(edge) == bar.get_x()
plt.close(ax.figure)
n = 25
n_edges = np.histogram_bin_edges(self.x, n)
with pytest.warns(FutureWarning):
ax = distplot(self.x, bins=n)
for edge, bar in zip(n_edges, ax.patches):
assert pytest.approx(edge) == bar.get_x()
def test_elements(self):
with pytest.warns(FutureWarning):
n = 10
ax = distplot(self.x, bins=n,
hist=True, kde=False, rug=False, fit=None)
assert len(ax.patches) == 10
assert len(ax.lines) == 0
assert len(ax.collections) == 0
plt.close(ax.figure)
ax = distplot(self.x,
hist=False, kde=True, rug=False, fit=None)
assert len(ax.patches) == 0
assert len(ax.lines) == 1
assert len(ax.collections) == 0
plt.close(ax.figure)
ax = distplot(self.x,
hist=False, kde=False, rug=True, fit=None)
assert len(ax.patches) == 0
assert len(ax.lines) == 0
assert len(ax.collections) == 1
class Norm:
"""Dummy object that looks like a scipy RV"""
def fit(self, x):
return ()
def pdf(self, x, *params):
return np.zeros_like(x)
plt.close(ax.figure)
ax = distplot(
self.x, hist=False, kde=False, rug=False, fit=Norm())
assert len(ax.patches) == 0
assert len(ax.lines) == 1
assert len(ax.collections) == 0
def test_distplot_with_nans(self):
f, (ax1, ax2) = plt.subplots(2)
x_null = np.append(self.x, [np.nan])
with pytest.warns(FutureWarning):
distplot(self.x, ax=ax1)
distplot(x_null, ax=ax2)
line1 = ax1.lines[0]
line2 = ax2.lines[0]
assert np.array_equal(line1.get_xydata(), line2.get_xydata())
for bar1, bar2 in zip(ax1.patches, ax2.patches):
assert bar1.get_xy() == bar2.get_xy()
assert bar1.get_height() == bar2.get_height()
class SharedAxesLevelTests:
def test_color(self, long_df, **kwargs):
ax = plt.figure().subplots()
self.func(data=long_df, x="y", ax=ax, **kwargs)
assert_colors_equal(self.get_last_color(ax, **kwargs), "C0", check_alpha=False)
ax = plt.figure().subplots()
self.func(data=long_df, x="y", ax=ax, **kwargs)
self.func(data=long_df, x="y", ax=ax, **kwargs)
assert_colors_equal(self.get_last_color(ax, **kwargs), "C1", check_alpha=False)
ax = plt.figure().subplots()
self.func(data=long_df, x="y", color="C2", ax=ax, **kwargs)
assert_colors_equal(self.get_last_color(ax, **kwargs), "C2", check_alpha=False)
class TestRugPlot(SharedAxesLevelTests):
func = staticmethod(rugplot)
def get_last_color(self, ax, **kwargs):
return ax.collections[-1].get_color()
def assert_rug_equal(self, a, b):
assert_array_equal(a.get_segments(), b.get_segments())
@pytest.mark.parametrize("variable", ["x", "y"])
def test_long_data(self, long_df, variable):
vector = long_df[variable]
vectors = [
variable, vector, np.asarray(vector), vector.to_list(),
]
f, ax = plt.subplots()
for vector in vectors:
rugplot(data=long_df, **{variable: vector})
for a, b in itertools.product(ax.collections, ax.collections):
self.assert_rug_equal(a, b)
def test_bivariate_data(self, long_df):
f, (ax1, ax2) = plt.subplots(ncols=2)
rugplot(data=long_df, x="x", y="y", ax=ax1)
rugplot(data=long_df, x="x", ax=ax2)
rugplot(data=long_df, y="y", ax=ax2)
self.assert_rug_equal(ax1.collections[0], ax2.collections[0])
self.assert_rug_equal(ax1.collections[1], ax2.collections[1])
def test_wide_vs_long_data(self, wide_df):
f, (ax1, ax2) = plt.subplots(ncols=2)
rugplot(data=wide_df, ax=ax1)
for col in wide_df:
rugplot(data=wide_df, x=col, ax=ax2)
wide_segments = np.sort(
np.array(ax1.collections[0].get_segments())
)
long_segments = np.sort(
np.concatenate([c.get_segments() for c in ax2.collections])
)
assert_array_equal(wide_segments, long_segments)
def test_flat_vector(self, long_df):
f, ax = plt.subplots()
rugplot(data=long_df["x"])
rugplot(x=long_df["x"])
self.assert_rug_equal(*ax.collections)
def test_datetime_data(self, long_df):
ax = rugplot(data=long_df["t"])
vals = np.stack(ax.collections[0].get_segments())[:, 0, 0]
assert_array_equal(vals, mpl.dates.date2num(long_df["t"]))
def test_empty_data(self):
ax = rugplot(x=[])
assert not ax.collections
def test_a_deprecation(self, flat_series):
f, ax = plt.subplots()
with pytest.warns(FutureWarning):
rugplot(a=flat_series)
rugplot(x=flat_series)
self.assert_rug_equal(*ax.collections)
@pytest.mark.parametrize("variable", ["x", "y"])
def test_axis_deprecation(self, flat_series, variable):
f, ax = plt.subplots()
with pytest.warns(FutureWarning):
rugplot(flat_series, axis=variable)
rugplot(**{variable: flat_series})
self.assert_rug_equal(*ax.collections)
def test_vertical_deprecation(self, flat_series):
f, ax = plt.subplots()
with pytest.warns(FutureWarning):
rugplot(flat_series, vertical=True)
rugplot(y=flat_series)
self.assert_rug_equal(*ax.collections)
def test_rug_data(self, flat_array):
height = .05
ax = rugplot(x=flat_array, height=height)
segments = np.stack(ax.collections[0].get_segments())
n = flat_array.size
assert_array_equal(segments[:, 0, 1], np.zeros(n))
assert_array_equal(segments[:, 1, 1], np.full(n, height))
assert_array_equal(segments[:, 1, 0], flat_array)
def test_rug_colors(self, long_df):
ax = rugplot(data=long_df, x="x", hue="a")
order = categorical_order(long_df["a"])
palette = color_palette()
expected_colors = np.ones((len(long_df), 4))
for i, val in enumerate(long_df["a"]):
expected_colors[i, :3] = palette[order.index(val)]
assert_array_equal(ax.collections[0].get_color(), expected_colors)
def test_expand_margins(self, flat_array):
f, ax = plt.subplots()
x1, y1 = ax.margins()
rugplot(x=flat_array, expand_margins=False)
x2, y2 = ax.margins()
assert x1 == x2
assert y1 == y2
f, ax = plt.subplots()
x1, y1 = ax.margins()
height = .05
rugplot(x=flat_array, height=height)
x2, y2 = ax.margins()
assert x1 == x2
assert y1 + height * 2 == pytest.approx(y2)
def test_matplotlib_kwargs(self, flat_series):
lw = 2
alpha = .2
ax = rugplot(y=flat_series, linewidth=lw, alpha=alpha)
rug = ax.collections[0]
assert np.all(rug.get_alpha() == alpha)
assert np.all(rug.get_linewidth() == lw)
def test_axis_labels(self, flat_series):
ax = rugplot(x=flat_series)
assert ax.get_xlabel() == flat_series.name
assert not ax.get_ylabel()
class TestKDEPlotUnivariate(SharedAxesLevelTests):
func = staticmethod(kdeplot)
def get_last_color(self, ax, fill=True):
if fill:
return ax.collections[-1].get_facecolor()
else:
return ax.lines[-1].get_color()
@pytest.mark.parametrize("fill", [True, False])
def test_color(self, long_df, fill):
super().test_color(long_df, fill=fill)
if fill:
ax = plt.figure().subplots()
self.func(data=long_df, x="y", facecolor="C3", fill=True, ax=ax)
assert_colors_equal(self.get_last_color(ax), "C3", check_alpha=False)
ax = plt.figure().subplots()
self.func(data=long_df, x="y", fc="C4", fill=True, ax=ax)
assert_colors_equal(self.get_last_color(ax), "C4", check_alpha=False)
@pytest.mark.parametrize(
"variable", ["x", "y"],
)
def test_long_vectors(self, long_df, variable):
vector = long_df[variable]
vectors = [
variable, vector, vector.to_numpy(), vector.to_list(),
]
f, ax = plt.subplots()
for vector in vectors:
kdeplot(data=long_df, **{variable: vector})
xdata = [l.get_xdata() for l in ax.lines]
for a, b in itertools.product(xdata, xdata):
assert_array_equal(a, b)
ydata = [l.get_ydata() for l in ax.lines]
for a, b in itertools.product(ydata, ydata):
assert_array_equal(a, b)
def test_wide_vs_long_data(self, wide_df):
f, (ax1, ax2) = plt.subplots(ncols=2)
kdeplot(data=wide_df, ax=ax1, common_norm=False, common_grid=False)
for col in wide_df:
kdeplot(data=wide_df, x=col, ax=ax2)
for l1, l2 in zip(ax1.lines[::-1], ax2.lines):
assert_array_equal(l1.get_xydata(), l2.get_xydata())
def test_flat_vector(self, long_df):
f, ax = plt.subplots()
kdeplot(data=long_df["x"])
kdeplot(x=long_df["x"])
assert_array_equal(ax.lines[0].get_xydata(), ax.lines[1].get_xydata())
def test_empty_data(self):
ax = kdeplot(x=[])
assert not ax.lines
def test_singular_data(self):
with pytest.warns(UserWarning):
ax = kdeplot(x=np.ones(10))
assert not ax.lines
with pytest.warns(UserWarning):
ax = kdeplot(x=[5])
assert not ax.lines
def test_variable_assignment(self, long_df):
f, ax = plt.subplots()
kdeplot(data=long_df, x="x", fill=True)
kdeplot(data=long_df, y="x", fill=True)
v0 = ax.collections[0].get_paths()[0].vertices
v1 = ax.collections[1].get_paths()[0].vertices[:, [1, 0]]
assert_array_equal(v0, v1)
def test_vertical_deprecation(self, long_df):
f, ax = plt.subplots()
kdeplot(data=long_df, y="x")
with pytest.warns(FutureWarning):
kdeplot(data=long_df, x="x", vertical=True)
assert_array_equal(ax.lines[0].get_xydata(), ax.lines[1].get_xydata())
def test_bw_deprecation(self, long_df):
f, ax = plt.subplots()
kdeplot(data=long_df, x="x", bw_method="silverman")
with pytest.warns(FutureWarning):
kdeplot(data=long_df, x="x", bw="silverman")
assert_array_equal(ax.lines[0].get_xydata(), ax.lines[1].get_xydata())
def test_kernel_deprecation(self, long_df):
f, ax = plt.subplots()
kdeplot(data=long_df, x="x")
with pytest.warns(UserWarning):
kdeplot(data=long_df, x="x", kernel="epi")
assert_array_equal(ax.lines[0].get_xydata(), ax.lines[1].get_xydata())
def test_shade_deprecation(self, long_df):
f, ax = plt.subplots()
kdeplot(data=long_df, x="x", shade=True)
kdeplot(data=long_df, x="x", fill=True)
fill1, fill2 = ax.collections
assert_array_equal(
fill1.get_paths()[0].vertices, fill2.get_paths()[0].vertices
)
@pytest.mark.parametrize("multiple", ["layer", "stack", "fill"])
def test_hue_colors(self, long_df, multiple):
ax = kdeplot(
data=long_df, x="x", hue="a",
multiple=multiple,
fill=True, legend=False
)
# Note that hue order is reversed in the plot
lines = ax.lines[::-1]
fills = ax.collections[::-1]
palette = color_palette()
for line, fill, color in zip(lines, fills, palette):
assert_colors_equal(line.get_color(), color)
assert_colors_equal(fill.get_facecolor(), to_rgba(color, .25))
def test_hue_stacking(self, long_df):
f, (ax1, ax2) = plt.subplots(ncols=2)
kdeplot(
data=long_df, x="x", hue="a",
multiple="layer", common_grid=True,
legend=False, ax=ax1,
)
kdeplot(
data=long_df, x="x", hue="a",
multiple="stack", fill=False,
legend=False, ax=ax2,
)
layered_densities = np.stack([
l.get_ydata() for l in ax1.lines
])
stacked_densities = np.stack([
l.get_ydata() for l in ax2.lines
])
assert_array_equal(layered_densities.cumsum(axis=0), stacked_densities)
def test_hue_filling(self, long_df):
f, (ax1, ax2) = plt.subplots(ncols=2)
kdeplot(
data=long_df, x="x", hue="a",
multiple="layer", common_grid=True,
legend=False, ax=ax1,
)
kdeplot(
data=long_df, x="x", hue="a",
multiple="fill", fill=False,
legend=False, ax=ax2,
)
layered = np.stack([l.get_ydata() for l in ax1.lines])
filled = np.stack([l.get_ydata() for l in ax2.lines])
assert_array_almost_equal(
(layered / layered.sum(axis=0)).cumsum(axis=0),
filled,
)
@pytest.mark.parametrize("multiple", ["stack", "fill"])
def test_fill_default(self, long_df, multiple):
ax = kdeplot(
data=long_df, x="x", hue="a", multiple=multiple, fill=None
)
assert len(ax.collections) > 0
@pytest.mark.parametrize("multiple", ["layer", "stack", "fill"])
def test_fill_nondefault(self, long_df, multiple):
f, (ax1, ax2) = plt.subplots(ncols=2)
kws = dict(data=long_df, x="x", hue="a")
kdeplot(**kws, multiple=multiple, fill=False, ax=ax1)
kdeplot(**kws, multiple=multiple, fill=True, ax=ax2)
assert len(ax1.collections) == 0
assert len(ax2.collections) > 0
def test_color_cycle_interaction(self, flat_series):
color = (.2, 1, .6)
f, ax = plt.subplots()
kdeplot(flat_series)
kdeplot(flat_series)
assert_colors_equal(ax.lines[0].get_color(), "C0")
assert_colors_equal(ax.lines[1].get_color(), "C1")
plt.close(f)
f, ax = plt.subplots()
kdeplot(flat_series, color=color)
kdeplot(flat_series)
assert_colors_equal(ax.lines[0].get_color(), color)
assert_colors_equal(ax.lines[1].get_color(), "C0")
plt.close(f)
f, ax = plt.subplots()
kdeplot(flat_series, fill=True)
kdeplot(flat_series, fill=True)
assert_colors_equal(ax.collections[0].get_facecolor(), to_rgba("C0", .25))
assert_colors_equal(ax.collections[1].get_facecolor(), to_rgba("C1", .25))
plt.close(f)
@pytest.mark.parametrize("fill", [True, False])
def test_artist_color(self, long_df, fill):
color = (.2, 1, .6)
alpha = .5
f, ax = plt.subplots()
kdeplot(long_df["x"], fill=fill, color=color)
if fill:
artist_color = ax.collections[-1].get_facecolor().squeeze()
else:
artist_color = ax.lines[-1].get_color()
default_alpha = .25 if fill else 1
assert_colors_equal(artist_color, to_rgba(color, default_alpha))
kdeplot(long_df["x"], fill=fill, color=color, alpha=alpha)
if fill:
artist_color = ax.collections[-1].get_facecolor().squeeze()
else:
artist_color = ax.lines[-1].get_color()
assert_colors_equal(artist_color, to_rgba(color, alpha))
def test_datetime_scale(self, long_df):
f, (ax1, ax2) = plt.subplots(2)
kdeplot(x=long_df["t"], fill=True, ax=ax1)
kdeplot(x=long_df["t"], fill=False, ax=ax2)
assert ax1.get_xlim() == ax2.get_xlim()
def test_multiple_argument_check(self, long_df):
with pytest.raises(ValueError, match="`multiple` must be"):
kdeplot(data=long_df, x="x", hue="a", multiple="bad_input")
def test_cut(self, rng):
x = rng.normal(0, 3, 1000)
f, ax = plt.subplots()
kdeplot(x=x, cut=0, legend=False)
xdata_0 = ax.lines[0].get_xdata()
assert xdata_0.min() == x.min()
assert xdata_0.max() == x.max()
kdeplot(x=x, cut=2, legend=False)
xdata_2 = ax.lines[1].get_xdata()
assert xdata_2.min() < xdata_0.min()
assert xdata_2.max() > xdata_0.max()
assert len(xdata_0) == len(xdata_2)
def test_clip(self, rng):
x = rng.normal(0, 3, 1000)
clip = -1, 1
ax = kdeplot(x=x, clip=clip)
xdata = ax.lines[0].get_xdata()
assert xdata.min() >= clip[0]
assert xdata.max() <= clip[1]
def test_line_is_density(self, long_df):
ax = kdeplot(data=long_df, x="x", cut=5)
x, y = ax.lines[0].get_xydata().T
assert integrate(y, x) == pytest.approx(1)
@pytest.mark.skipif(_no_scipy, reason="Test requires scipy")
def test_cumulative(self, long_df):
ax = kdeplot(data=long_df, x="x", cut=5, cumulative=True)
y = ax.lines[0].get_ydata()
assert y[0] == pytest.approx(0)
assert y[-1] == pytest.approx(1)
@pytest.mark.skipif(not _no_scipy, reason="Test requires scipy's absence")
def test_cumulative_requires_scipy(self, long_df):
with pytest.raises(RuntimeError):
kdeplot(data=long_df, x="x", cut=5, cumulative=True)
def test_common_norm(self, long_df):
f, (ax1, ax2) = plt.subplots(ncols=2)
kdeplot(
data=long_df, x="x", hue="c", common_norm=True, cut=10, ax=ax1
)
kdeplot(
data=long_df, x="x", hue="c", common_norm=False, cut=10, ax=ax2
)
total_area = 0
for line in ax1.lines:
xdata, ydata = line.get_xydata().T
total_area += integrate(ydata, xdata)
assert total_area == pytest.approx(1)
for line in ax2.lines:
xdata, ydata = line.get_xydata().T
assert integrate(ydata, xdata) == pytest.approx(1)
def test_common_grid(self, long_df):
f, (ax1, ax2) = plt.subplots(ncols=2)
order = "a", "b", "c"
kdeplot(
data=long_df, x="x", hue="a", hue_order=order,
common_grid=False, cut=0, ax=ax1,
)
kdeplot(
data=long_df, x="x", hue="a", hue_order=order,
common_grid=True, cut=0, ax=ax2,
)
for line, level in zip(ax1.lines[::-1], order):
xdata = line.get_xdata()
assert xdata.min() == long_df.loc[long_df["a"] == level, "x"].min()
assert xdata.max() == long_df.loc[long_df["a"] == level, "x"].max()
for line in ax2.lines:
xdata = line.get_xdata().T
assert xdata.min() == long_df["x"].min()
assert xdata.max() == long_df["x"].max()
def test_bw_method(self, long_df):
f, ax = plt.subplots()
kdeplot(data=long_df, x="x", bw_method=0.2, legend=False)
kdeplot(data=long_df, x="x", bw_method=1.0, legend=False)
kdeplot(data=long_df, x="x", bw_method=3.0, legend=False)
l1, l2, l3 = ax.lines
assert (
np.abs(np.diff(l1.get_ydata())).mean()
> np.abs(np.diff(l2.get_ydata())).mean()
)
assert (
np.abs(np.diff(l2.get_ydata())).mean()
> np.abs(np.diff(l3.get_ydata())).mean()
)
def test_bw_adjust(self, long_df):
f, ax = plt.subplots()
kdeplot(data=long_df, x="x", bw_adjust=0.2, legend=False)
kdeplot(data=long_df, x="x", bw_adjust=1.0, legend=False)
kdeplot(data=long_df, x="x", bw_adjust=3.0, legend=False)
l1, l2, l3 = ax.lines
assert (
np.abs(np.diff(l1.get_ydata())).mean()
> np.abs(np.diff(l2.get_ydata())).mean()
)
assert (
np.abs(np.diff(l2.get_ydata())).mean()
> np.abs(np.diff(l3.get_ydata())).mean()
)
def test_log_scale_implicit(self, rng):
x = rng.lognormal(0, 1, 100)
f, (ax1, ax2) = plt.subplots(ncols=2)
ax1.set_xscale("log")
kdeplot(x=x, ax=ax1)
kdeplot(x=x, ax=ax1)
xdata_log = ax1.lines[0].get_xdata()
assert (xdata_log > 0).all()
assert (np.diff(xdata_log, 2) > 0).all()
assert np.allclose(np.diff(np.log(xdata_log), 2), 0)
f, ax = plt.subplots()
ax.set_yscale("log")
kdeplot(y=x, ax=ax)
assert_array_equal(ax.lines[0].get_xdata(), ax1.lines[0].get_ydata())
def test_log_scale_explicit(self, rng):
x = rng.lognormal(0, 1, 100)
f, (ax1, ax2, ax3) = plt.subplots(ncols=3)
ax1.set_xscale("log")
kdeplot(x=x, ax=ax1)
kdeplot(x=x, log_scale=True, ax=ax2)
kdeplot(x=x, log_scale=10, ax=ax3)
for ax in f.axes:
assert ax.get_xscale() == "log"
supports = [ax.lines[0].get_xdata() for ax in f.axes]
for a, b in itertools.product(supports, supports):
assert_array_equal(a, b)
densities = [ax.lines[0].get_ydata() for ax in f.axes]
for a, b in itertools.product(densities, densities):
assert_array_equal(a, b)
f, ax = plt.subplots()
kdeplot(y=x, log_scale=True, ax=ax)
assert ax.get_yscale() == "log"
def test_log_scale_with_hue(self, rng):
data = rng.lognormal(0, 1, 50), rng.lognormal(0, 2, 100)
ax = kdeplot(data=data, log_scale=True, common_grid=True)
assert_array_equal(ax.lines[0].get_xdata(), ax.lines[1].get_xdata())
def test_log_scale_normalization(self, rng):
x = rng.lognormal(0, 1, 100)
ax = kdeplot(x=x, log_scale=True, cut=10)
xdata, ydata = ax.lines[0].get_xydata().T
integral = integrate(ydata, np.log10(xdata))
assert integral == pytest.approx(1)
def test_weights(self):
x = [1, 2]
weights = [2, 1]
ax = kdeplot(x=x, weights=weights, bw_method=.1)
xdata, ydata = ax.lines[0].get_xydata().T
y1 = ydata[np.abs(xdata - 1).argmin()]
y2 = ydata[np.abs(xdata - 2).argmin()]
assert y1 == pytest.approx(2 * y2)
def test_sticky_edges(self, long_df):
f, (ax1, ax2) = plt.subplots(ncols=2)
kdeplot(data=long_df, x="x", fill=True, ax=ax1)
assert ax1.collections[0].sticky_edges.y[:] == [0, np.inf]
kdeplot(
data=long_df, x="x", hue="a", multiple="fill", fill=True, ax=ax2
)
assert ax2.collections[0].sticky_edges.y[:] == [0, 1]
def test_line_kws(self, flat_array):
lw = 3
color = (.2, .5, .8)
ax = kdeplot(x=flat_array, linewidth=lw, color=color)
line, = ax.lines
assert line.get_linewidth() == lw
assert_colors_equal(line.get_color(), color)
def test_input_checking(self, long_df):
err = "The x variable is categorical,"
with pytest.raises(TypeError, match=err):
kdeplot(data=long_df, x="a")
def test_axis_labels(self, long_df):
f, (ax1, ax2) = plt.subplots(ncols=2)
kdeplot(data=long_df, x="x", ax=ax1)
assert ax1.get_xlabel() == "x"
assert ax1.get_ylabel() == "Density"
kdeplot(data=long_df, y="y", ax=ax2)
assert ax2.get_xlabel() == "Density"
assert ax2.get_ylabel() == "y"
def test_legend(self, long_df):
ax = kdeplot(data=long_df, x="x", hue="a")
assert ax.legend_.get_title().get_text() == "a"
legend_labels = ax.legend_.get_texts()
order = categorical_order(long_df["a"])
for label, level in zip(legend_labels, order):
assert label.get_text() == level
legend_artists = ax.legend_.findobj(mpl.lines.Line2D)[::2]
palette = color_palette()
for artist, color in zip(legend_artists, palette):
assert_colors_equal(artist.get_color(), color)
ax.clear()
kdeplot(data=long_df, x="x", hue="a", legend=False)
assert ax.legend_ is None
class TestKDEPlotBivariate:
def test_long_vectors(self, long_df):
ax1 = kdeplot(data=long_df, x="x", y="y")
x = long_df["x"]
x_values = [x, x.to_numpy(), x.to_list()]
y = long_df["y"]
y_values = [y, y.to_numpy(), y.to_list()]
for x, y in zip(x_values, y_values):
f, ax2 = plt.subplots()
kdeplot(x=x, y=y, ax=ax2)
for c1, c2 in zip(ax1.collections, ax2.collections):
assert_array_equal(c1.get_offsets(), c2.get_offsets())
def test_singular_data(self):
with pytest.warns(UserWarning):
ax = dist.kdeplot(x=np.ones(10), y=np.arange(10))
assert not ax.lines
with pytest.warns(UserWarning):
ax = dist.kdeplot(x=[5], y=[6])
assert not ax.lines
def test_fill_artists(self, long_df):
for fill in [True, False]:
f, ax = plt.subplots()
kdeplot(data=long_df, x="x", y="y", hue="c", fill=fill)
for c in ax.collections:
if fill:
assert isinstance(c, mpl.collections.PathCollection)
else:
assert isinstance(c, mpl.collections.LineCollection)
def test_common_norm(self, rng):
hue = np.repeat(["a", "a", "a", "b"], 40)
x, y = rng.multivariate_normal([0, 0], [(.2, .5), (.5, 2)], len(hue)).T
x[hue == "a"] -= 2
x[hue == "b"] += 2
f, (ax1, ax2) = plt.subplots(ncols=2)
kdeplot(x=x, y=y, hue=hue, common_norm=True, ax=ax1)
kdeplot(x=x, y=y, hue=hue, common_norm=False, ax=ax2)
n_seg_1 = sum([len(c.get_segments()) > 0 for c in ax1.collections])
n_seg_2 = sum([len(c.get_segments()) > 0 for c in ax2.collections])
assert n_seg_2 > n_seg_1
def test_log_scale(self, rng):
x = rng.lognormal(0, 1, 100)
y = rng.uniform(0, 1, 100)
levels = .2, .5, 1
f, ax = plt.subplots()
kdeplot(x=x, y=y, log_scale=True, levels=levels, ax=ax)
assert ax.get_xscale() == "log"
assert ax.get_yscale() == "log"
f, (ax1, ax2) = plt.subplots(ncols=2)
kdeplot(x=x, y=y, log_scale=(10, False), levels=levels, ax=ax1)
assert ax1.get_xscale() == "log"
assert ax1.get_yscale() == "linear"
p = _DistributionPlotter()
kde = KDE()
density, (xx, yy) = kde(np.log10(x), y)
levels = p._quantile_to_level(density, levels)
ax2.contour(10 ** xx, yy, density, levels=levels)
for c1, c2 in zip(ax1.collections, ax2.collections):
assert_array_equal(c1.get_segments(), c2.get_segments())
def test_bandwiddth(self, rng):
n = 100
x, y = rng.multivariate_normal([0, 0], [(.2, .5), (.5, 2)], n).T
f, (ax1, ax2) = plt.subplots(ncols=2)
kdeplot(x=x, y=y, ax=ax1)
kdeplot(x=x, y=y, bw_adjust=2, ax=ax2)
for c1, c2 in zip(ax1.collections, ax2.collections):
seg1, seg2 = c1.get_segments(), c2.get_segments()
if seg1 + seg2:
x1 = seg1[0][:, 0]
x2 = seg2[0][:, 0]
assert np.abs(x2).max() > np.abs(x1).max()
def test_weights(self, rng):
import warnings
warnings.simplefilter("error", np.VisibleDeprecationWarning)
n = 100
x, y = rng.multivariate_normal([1, 3], [(.2, .5), (.5, 2)], n).T
hue = np.repeat([0, 1], n // 2)
weights = rng.uniform(0, 1, n)
f, (ax1, ax2) = plt.subplots(ncols=2)
kdeplot(x=x, y=y, hue=hue, ax=ax1)
kdeplot(x=x, y=y, hue=hue, weights=weights, ax=ax2)
for c1, c2 in zip(ax1.collections, ax2.collections):
if c1.get_segments() and c2.get_segments():
seg1 = np.concatenate(c1.get_segments(), axis=0)
seg2 = np.concatenate(c2.get_segments(), axis=0)
assert not np.array_equal(seg1, seg2)
def test_hue_ignores_cmap(self, long_df):
with pytest.warns(UserWarning, match="cmap parameter ignored"):
ax = kdeplot(data=long_df, x="x", y="y", hue="c", cmap="viridis")
assert_colors_equal(ax.collections[0].get_color(), "C0")
def test_contour_line_colors(self, long_df):
color = (.2, .9, .8, 1)
ax = kdeplot(data=long_df, x="x", y="y", color=color)
for c in ax.collections:
assert_colors_equal(c.get_color(), color)
def test_contour_fill_colors(self, long_df):
n = 6
color = (.2, .9, .8, 1)
ax = kdeplot(
data=long_df, x="x", y="y", fill=True, color=color, levels=n,
)
cmap = light_palette(color, reverse=True, as_cmap=True)
lut = cmap(np.linspace(0, 1, 256))
for c in ax.collections:
color = c.get_facecolor().squeeze()
assert color in lut
def test_colorbar(self, long_df):
ax = kdeplot(data=long_df, x="x", y="y", fill=True, cbar=True)
assert len(ax.figure.axes) == 2
def test_levels_and_thresh(self, long_df):
f, (ax1, ax2) = plt.subplots(ncols=2)
n = 8
thresh = .1
plot_kws = dict(data=long_df, x="x", y="y")
kdeplot(**plot_kws, levels=n, thresh=thresh, ax=ax1)
kdeplot(**plot_kws, levels=np.linspace(thresh, 1, n), ax=ax2)
for c1, c2 in zip(ax1.collections, ax2.collections):
assert_array_equal(c1.get_segments(), c2.get_segments())
with pytest.raises(ValueError):
kdeplot(**plot_kws, levels=[0, 1, 2])
ax1.clear()
ax2.clear()
kdeplot(**plot_kws, levels=n, thresh=None, ax=ax1)
kdeplot(**plot_kws, levels=n, thresh=0, ax=ax2)
for c1, c2 in zip(ax1.collections, ax2.collections):
assert_array_equal(c1.get_segments(), c2.get_segments())
for c1, c2 in zip(ax1.collections, ax2.collections):
assert_array_equal(c1.get_facecolors(), c2.get_facecolors())
def test_quantile_to_level(self, rng):
x = rng.uniform(0, 1, 100000)
isoprop = np.linspace(.1, 1, 6)
levels = _DistributionPlotter()._quantile_to_level(x, isoprop)
for h, p in zip(levels, isoprop):
assert (x[x <= h].sum() / x.sum()) == pytest.approx(p, abs=1e-4)
def test_input_checking(self, long_df):
with pytest.raises(TypeError, match="The x variable is categorical,"):
kdeplot(data=long_df, x="a", y="y")
class TestHistPlotUnivariate(SharedAxesLevelTests):
func = staticmethod(histplot)
def get_last_color(self, ax, element="bars", fill=True):
if element == "bars":
if fill:
return ax.patches[-1].get_facecolor()
else:
return ax.patches[-1].get_edgecolor()
else:
if fill:
artist = ax.collections[-1]
facecolor = artist.get_facecolor()
edgecolor = artist.get_edgecolor()
assert_colors_equal(facecolor, edgecolor, check_alpha=False)
return facecolor
else:
return ax.lines[-1].get_color()
@pytest.mark.parametrize(
"element,fill",
itertools.product(["bars", "step", "poly"], [True, False]),
)
def test_color(self, long_df, element, fill):
super().test_color(long_df, element=element, fill=fill)
@pytest.mark.parametrize(
"variable", ["x", "y"],
)
def test_long_vectors(self, long_df, variable):
vector = long_df[variable]
vectors = [
variable, vector, vector.to_numpy(), vector.to_list(),
]
f, axs = plt.subplots(3)
for vector, ax in zip(vectors, axs):
histplot(data=long_df, ax=ax, **{variable: vector})
bars = [ax.patches for ax in axs]
for a_bars, b_bars in itertools.product(bars, bars):
for a, b in zip(a_bars, b_bars):
assert_array_equal(a.get_height(), b.get_height())
assert_array_equal(a.get_xy(), b.get_xy())
def test_wide_vs_long_data(self, wide_df):
f, (ax1, ax2) = plt.subplots(2)
histplot(data=wide_df, ax=ax1, common_bins=False)
for col in wide_df.columns[::-1]:
histplot(data=wide_df, x=col, ax=ax2)
for a, b in zip(ax1.patches, ax2.patches):
assert a.get_height() == b.get_height()
assert a.get_xy() == b.get_xy()
def test_flat_vector(self, long_df):
f, (ax1, ax2) = plt.subplots(2)
histplot(data=long_df["x"], ax=ax1)
histplot(data=long_df, x="x", ax=ax2)
for a, b in zip(ax1.patches, ax2.patches):
assert a.get_height() == b.get_height()
assert a.get_xy() == b.get_xy()
def test_empty_data(self):
ax = histplot(x=[])
assert not ax.patches
def test_variable_assignment(self, long_df):
f, (ax1, ax2) = plt.subplots(2)
histplot(data=long_df, x="x", ax=ax1)
histplot(data=long_df, y="x", ax=ax2)
for a, b in zip(ax1.patches, ax2.patches):
assert a.get_height() == b.get_width()
@pytest.mark.parametrize("element", ["bars", "step", "poly"])
@pytest.mark.parametrize("multiple", ["layer", "dodge", "stack", "fill"])
def test_hue_fill_colors(self, long_df, multiple, element):
ax = histplot(
data=long_df, x="x", hue="a",
multiple=multiple, bins=1,
fill=True, element=element, legend=False,
)
palette = color_palette()
if multiple == "layer":
if element == "bars":
a = .5
else:
a = .25
else:
a = .75
for bar, color in zip(ax.patches[::-1], palette):
assert_colors_equal(bar.get_facecolor(), to_rgba(color, a))
for poly, color in zip(ax.collections[::-1], palette):
assert_colors_equal(poly.get_facecolor(), to_rgba(color, a))
def test_hue_stack(self, long_df):
f, (ax1, ax2) = plt.subplots(2)
n = 10
kws = dict(data=long_df, x="x", hue="a", bins=n, element="bars")
histplot(**kws, multiple="layer", ax=ax1)
histplot(**kws, multiple="stack", ax=ax2)
layer_heights = np.reshape([b.get_height() for b in ax1.patches], (-1, n))
stack_heights = np.reshape([b.get_height() for b in ax2.patches], (-1, n))
assert_array_equal(layer_heights, stack_heights)
stack_xys = np.reshape([b.get_xy() for b in ax2.patches], (-1, n, 2))
assert_array_equal(
stack_xys[..., 1] + stack_heights,
stack_heights.cumsum(axis=0),
)
def test_hue_fill(self, long_df):
f, (ax1, ax2) = plt.subplots(2)
n = 10
kws = dict(data=long_df, x="x", hue="a", bins=n, element="bars")
histplot(**kws, multiple="layer", ax=ax1)
histplot(**kws, multiple="fill", ax=ax2)
layer_heights = np.reshape([b.get_height() for b in ax1.patches], (-1, n))
stack_heights = np.reshape([b.get_height() for b in ax2.patches], (-1, n))
assert_array_almost_equal(
layer_heights / layer_heights.sum(axis=0), stack_heights
)
stack_xys = np.reshape([b.get_xy() for b in ax2.patches], (-1, n, 2))
assert_array_almost_equal(
(stack_xys[..., 1] + stack_heights) / stack_heights.sum(axis=0),
stack_heights.cumsum(axis=0),
)
def test_hue_dodge(self, long_df):
f, (ax1, ax2) = plt.subplots(2)
bw = 2
kws = dict(data=long_df, x="x", hue="c", binwidth=bw, element="bars")
histplot(**kws, multiple="layer", ax=ax1)
histplot(**kws, multiple="dodge", ax=ax2)
layer_heights = [b.get_height() for b in ax1.patches]
dodge_heights = [b.get_height() for b in ax2.patches]
assert_array_equal(layer_heights, dodge_heights)
layer_xs = np.reshape([b.get_x() for b in ax1.patches], (2, -1))
dodge_xs = np.reshape([b.get_x() for b in ax2.patches], (2, -1))
assert_array_almost_equal(layer_xs[1], dodge_xs[1])
assert_array_almost_equal(layer_xs[0], dodge_xs[0] - bw / 2)
def test_hue_as_numpy_dodged(self, long_df):
# https://github.com/mwaskom/seaborn/issues/2452
ax = histplot(
long_df,
x="y", hue=long_df["a"].to_numpy(),
multiple="dodge", bins=1,
)
# Note hue order reversal
assert ax.patches[1].get_x() < ax.patches[0].get_x()
def test_multiple_input_check(self, flat_series):
with pytest.raises(ValueError, match="`multiple` must be"):
histplot(flat_series, multiple="invalid")
def test_element_input_check(self, flat_series):
with pytest.raises(ValueError, match="`element` must be"):
histplot(flat_series, element="invalid")
def test_count_stat(self, flat_series):
ax = histplot(flat_series, stat="count")
bar_heights = [b.get_height() for b in ax.patches]
assert sum(bar_heights) == len(flat_series)
def test_density_stat(self, flat_series):
ax = histplot(flat_series, stat="density")
bar_heights = [b.get_height() for b in ax.patches]
bar_widths = [b.get_width() for b in ax.patches]
assert np.multiply(bar_heights, bar_widths).sum() == pytest.approx(1)
def test_density_stat_common_norm(self, long_df):
ax = histplot(
data=long_df, x="x", hue="a",
stat="density", common_norm=True, element="bars",
)
bar_heights = [b.get_height() for b in ax.patches]
bar_widths = [b.get_width() for b in ax.patches]
assert np.multiply(bar_heights, bar_widths).sum() == pytest.approx(1)
def test_density_stat_unique_norm(self, long_df):
n = 10
ax = histplot(
data=long_df, x="x", hue="a",
stat="density", bins=n, common_norm=False, element="bars",
)
bar_groups = ax.patches[:n], ax.patches[-n:]
for bars in bar_groups:
bar_heights = [b.get_height() for b in bars]
bar_widths = [b.get_width() for b in bars]
bar_areas = np.multiply(bar_heights, bar_widths)
assert bar_areas.sum() == pytest.approx(1)
def test_probability_stat(self, flat_series):
ax = histplot(flat_series, stat="probability")
bar_heights = [b.get_height() for b in ax.patches]
assert sum(bar_heights) == pytest.approx(1)
def test_probability_stat_common_norm(self, long_df):
ax = histplot(
data=long_df, x="x", hue="a",
stat="probability", common_norm=True, element="bars",
)
bar_heights = [b.get_height() for b in ax.patches]
assert sum(bar_heights) == pytest.approx(1)
def test_probability_stat_unique_norm(self, long_df):
n = 10
ax = histplot(
data=long_df, x="x", hue="a",
stat="probability", bins=n, common_norm=False, element="bars",
)
bar_groups = ax.patches[:n], ax.patches[-n:]
for bars in bar_groups:
bar_heights = [b.get_height() for b in bars]
assert sum(bar_heights) == pytest.approx(1)
def test_percent_stat(self, flat_series):
ax = histplot(flat_series, stat="percent")
bar_heights = [b.get_height() for b in ax.patches]
assert sum(bar_heights) == 100
def test_common_bins(self, long_df):
n = 10
ax = histplot(
long_df, x="x", hue="a", common_bins=True, bins=n, element="bars",
)
bar_groups = ax.patches[:n], ax.patches[-n:]
assert_array_equal(
[b.get_xy() for b in bar_groups[0]],
[b.get_xy() for b in bar_groups[1]]
)
def test_unique_bins(self, wide_df):
ax = histplot(wide_df, common_bins=False, bins=10, element="bars")
bar_groups = np.split(np.array(ax.patches), len(wide_df.columns))
for i, col in enumerate(wide_df.columns[::-1]):
bars = bar_groups[i]
start = bars[0].get_x()
stop = bars[-1].get_x() + bars[-1].get_width()
assert start == wide_df[col].min()
assert stop == wide_df[col].max()
def test_weights_with_missing(self, missing_df):
ax = histplot(missing_df, x="x", weights="s", bins=5)
bar_heights = [bar.get_height() for bar in ax.patches]
total_weight = missing_df[["x", "s"]].dropna()["s"].sum()
assert sum(bar_heights) == pytest.approx(total_weight)
def test_discrete(self, long_df):
ax = histplot(long_df, x="s", discrete=True)
data_min = long_df["s"].min()
data_max = long_df["s"].max()
assert len(ax.patches) == (data_max - data_min + 1)
for i, bar in enumerate(ax.patches):
assert bar.get_width() == 1
assert bar.get_x() == (data_min + i - .5)
def test_discrete_categorical_default(self, long_df):
ax = histplot(long_df, x="a")
for i, bar in enumerate(ax.patches):
assert bar.get_width() == 1
def test_categorical_yaxis_inversion(self, long_df):
ax = histplot(long_df, y="a")
ymax, ymin = ax.get_ylim()
assert ymax > ymin
def test_discrete_requires_bars(self, long_df):
with pytest.raises(ValueError, match="`element` must be 'bars'"):
histplot(long_df, x="s", discrete=True, element="poly")
@pytest.mark.skipif(
LooseVersion(np.__version__) < "1.17",
reason="Histogram over datetime64 requires numpy >= 1.17",
)
def test_datetime_scale(self, long_df):
f, (ax1, ax2) = plt.subplots(2)
histplot(x=long_df["t"], fill=True, ax=ax1)
histplot(x=long_df["t"], fill=False, ax=ax2)
assert ax1.get_xlim() == ax2.get_xlim()
@pytest.mark.parametrize("stat", ["count", "density", "probability"])
def test_kde(self, flat_series, stat):
ax = histplot(
flat_series, kde=True, stat=stat, kde_kws={"cut": 10}
)
bar_widths = [b.get_width() for b in ax.patches]
bar_heights = [b.get_height() for b in ax.patches]
hist_area = np.multiply(bar_widths, bar_heights).sum()
density, = ax.lines
kde_area = integrate(density.get_ydata(), density.get_xdata())
assert kde_area == pytest.approx(hist_area)
@pytest.mark.parametrize("multiple", ["layer", "dodge"])
@pytest.mark.parametrize("stat", ["count", "density", "probability"])
def test_kde_with_hue(self, long_df, stat, multiple):
n = 10
ax = histplot(
long_df, x="x", hue="c", multiple=multiple,
kde=True, stat=stat, element="bars",
kde_kws={"cut": 10}, bins=n,
)
bar_groups = ax.patches[:n], ax.patches[-n:]
for i, bars in enumerate(bar_groups):
bar_widths = [b.get_width() for b in bars]
bar_heights = [b.get_height() for b in bars]
hist_area = np.multiply(bar_widths, bar_heights).sum()
x, y = ax.lines[i].get_xydata().T
kde_area = integrate(y, x)
if multiple == "layer":
assert kde_area == pytest.approx(hist_area)
elif multiple == "dodge":
assert kde_area == pytest.approx(hist_area * 2)
def test_kde_default_cut(self, flat_series):
ax = histplot(flat_series, kde=True)
support = ax.lines[0].get_xdata()
assert support.min() == flat_series.min()
assert support.max() == flat_series.max()
def test_kde_hue(self, long_df):
n = 10
ax = histplot(data=long_df, x="x", hue="a", kde=True, bins=n)
for bar, line in zip(ax.patches[::n], ax.lines):
assert_colors_equal(
bar.get_facecolor(), line.get_color(), check_alpha=False
)
def test_kde_yaxis(self, flat_series):
f, ax = plt.subplots()
histplot(x=flat_series, kde=True)
histplot(y=flat_series, kde=True)
x, y = ax.lines
assert_array_equal(x.get_xdata(), y.get_ydata())
assert_array_equal(x.get_ydata(), y.get_xdata())
def test_kde_line_kws(self, flat_series):
lw = 5
ax = histplot(flat_series, kde=True, line_kws=dict(lw=lw))
assert ax.lines[0].get_linewidth() == lw
def test_kde_singular_data(self):
with pytest.warns(UserWarning):
ax = histplot(x=np.ones(10), kde=True)
assert not ax.lines
with pytest.warns(UserWarning):
ax = histplot(x=[5], kde=True)
assert not ax.lines
def test_element_default(self, long_df):
f, (ax1, ax2) = plt.subplots(2)
histplot(long_df, x="x", ax=ax1)
histplot(long_df, x="x", ax=ax2, element="bars")
assert len(ax1.patches) == len(ax2.patches)
f, (ax1, ax2) = plt.subplots(2)
histplot(long_df, x="x", hue="a", ax=ax1)
histplot(long_df, x="x", hue="a", ax=ax2, element="bars")
assert len(ax1.patches) == len(ax2.patches)
def test_bars_no_fill(self, flat_series):
alpha = .5
ax = histplot(flat_series, element="bars", fill=False, alpha=alpha)
for bar in ax.patches:
assert bar.get_facecolor() == (0, 0, 0, 0)
assert bar.get_edgecolor()[-1] == alpha
def test_step_fill(self, flat_series):
f, (ax1, ax2) = plt.subplots(2)
n = 10
histplot(flat_series, element="bars", fill=True, bins=n, ax=ax1)
histplot(flat_series, element="step", fill=True, bins=n, ax=ax2)
bar_heights = [b.get_height() for b in ax1.patches]
bar_widths = [b.get_width() for b in ax1.patches]
bar_edges = [b.get_x() for b in ax1.patches]
fill = ax2.collections[0]
x, y = fill.get_paths()[0].vertices[::-1].T
assert_array_equal(x[1:2 * n:2], bar_edges)
assert_array_equal(y[1:2 * n:2], bar_heights)
assert x[n * 2] == bar_edges[-1] + bar_widths[-1]
assert y[n * 2] == bar_heights[-1]
def test_poly_fill(self, flat_series):
f, (ax1, ax2) = plt.subplots(2)
n = 10
histplot(flat_series, element="bars", fill=True, bins=n, ax=ax1)
histplot(flat_series, element="poly", fill=True, bins=n, ax=ax2)
bar_heights = np.array([b.get_height() for b in ax1.patches])
bar_widths = np.array([b.get_width() for b in ax1.patches])
bar_edges = np.array([b.get_x() for b in ax1.patches])
fill = ax2.collections[0]
x, y = fill.get_paths()[0].vertices[::-1].T
assert_array_equal(x[1:n + 1], bar_edges + bar_widths / 2)
assert_array_equal(y[1:n + 1], bar_heights)
def test_poly_no_fill(self, flat_series):
f, (ax1, ax2) = plt.subplots(2)
n = 10
histplot(flat_series, element="bars", fill=False, bins=n, ax=ax1)
histplot(flat_series, element="poly", fill=False, bins=n, ax=ax2)
bar_heights = np.array([b.get_height() for b in ax1.patches])
bar_widths = np.array([b.get_width() for b in ax1.patches])
bar_edges = np.array([b.get_x() for b in ax1.patches])
x, y = ax2.lines[0].get_xydata().T
assert_array_equal(x, bar_edges + bar_widths / 2)
assert_array_equal(y, bar_heights)
def test_step_no_fill(self, flat_series):
f, (ax1, ax2) = plt.subplots(2)
histplot(flat_series, element="bars", fill=False, ax=ax1)
histplot(flat_series, element="step", fill=False, ax=ax2)
bar_heights = [b.get_height() for b in ax1.patches]
bar_widths = [b.get_width() for b in ax1.patches]
bar_edges = [b.get_x() for b in ax1.patches]
x, y = ax2.lines[0].get_xydata().T
assert_array_equal(x[:-1], bar_edges)
assert_array_equal(y[:-1], bar_heights)
assert x[-1] == bar_edges[-1] + bar_widths[-1]
assert y[-1] == y[-2]
def test_step_fill_xy(self, flat_series):
f, ax = plt.subplots()
histplot(x=flat_series, element="step", fill=True)
histplot(y=flat_series, element="step", fill=True)
xverts = ax.collections[0].get_paths()[0].vertices
yverts = ax.collections[1].get_paths()[0].vertices
assert_array_equal(xverts, yverts[:, ::-1])
def test_step_no_fill_xy(self, flat_series):
f, ax = plt.subplots()
histplot(x=flat_series, element="step", fill=False)
histplot(y=flat_series, element="step", fill=False)
xline, yline = ax.lines
assert_array_equal(xline.get_xdata(), yline.get_ydata())
assert_array_equal(xline.get_ydata(), yline.get_xdata())
def test_weighted_histogram(self):
ax = histplot(x=[0, 1, 2], weights=[1, 2, 3], discrete=True)
bar_heights = [b.get_height() for b in ax.patches]
assert bar_heights == [1, 2, 3]
def test_weights_with_auto_bins(self, long_df):
with pytest.warns(UserWarning):
ax = histplot(long_df, x="x", weights="f")
assert len(ax.patches) == 10
def test_shrink(self, long_df):
bw = 2
shrink = .5
ax = histplot(long_df, x="x", binwidth=bw, shrink=shrink)
assert ax.patches[0].get_width() == bw * shrink
def test_log_scale_explicit(self, rng):
x = rng.lognormal(0, 2, 1000)
ax = histplot(x, log_scale=True, binwidth=1)
bar_widths = [b.get_width() for b in ax.patches]
steps = np.divide(bar_widths[1:], bar_widths[:-1])
assert np.allclose(steps, 10)
def test_log_scale_implicit(self, rng):
x = rng.lognormal(0, 2, 1000)
f, ax = plt.subplots()
ax.set_xscale("log")
histplot(x, binwidth=1, ax=ax)
bar_widths = [b.get_width() for b in ax.patches]
steps = np.divide(bar_widths[1:], bar_widths[:-1])
assert np.allclose(steps, 10)
@pytest.mark.parametrize(
"fill", [True, False],
)
def test_auto_linewidth(self, flat_series, fill):
get_lw = lambda ax: ax.patches[0].get_linewidth() # noqa: E731
kws = dict(element="bars", fill=fill)
f, (ax1, ax2) = plt.subplots(2)
histplot(flat_series, **kws, bins=10, ax=ax1)
histplot(flat_series, **kws, bins=100, ax=ax2)
assert get_lw(ax1) > get_lw(ax2)
f, ax1 = plt.subplots(figsize=(10, 5))
f, ax2 = plt.subplots(figsize=(2, 5))
histplot(flat_series, **kws, bins=30, ax=ax1)
histplot(flat_series, **kws, bins=30, ax=ax2)
assert get_lw(ax1) > get_lw(ax2)
def test_bar_kwargs(self, flat_series):
lw = 2
ec = (1, .2, .9, .5)
ax = histplot(flat_series, binwidth=1, ec=ec, lw=lw)
for bar in ax.patches:
assert_colors_equal(bar.get_edgecolor(), ec)
assert bar.get_linewidth() == lw
def test_step_fill_kwargs(self, flat_series):
lw = 2
ec = (1, .2, .9, .5)
ax = histplot(flat_series, element="step", ec=ec, lw=lw)
poly = ax.collections[0]
assert_colors_equal(poly.get_edgecolor(), ec)
assert poly.get_linewidth() == lw
def test_step_line_kwargs(self, flat_series):
lw = 2
ls = "--"
ax = histplot(flat_series, element="step", fill=False, lw=lw, ls=ls)
line = ax.lines[0]
assert line.get_linewidth() == lw
assert line.get_linestyle() == ls
class TestHistPlotBivariate:
def test_mesh(self, long_df):
hist = Histogram()
counts, (x_edges, y_edges) = hist(long_df["x"], long_df["y"])
ax = histplot(long_df, x="x", y="y")
mesh = ax.collections[0]
mesh_data = mesh.get_array()
assert_array_equal(mesh_data.data, counts.T.flat)
assert_array_equal(mesh_data.mask, counts.T.flat == 0)
edges = itertools.product(y_edges[:-1], x_edges[:-1])
for i, (y, x) in enumerate(edges):
path = mesh.get_paths()[i]
assert path.vertices[0, 0] == x
assert path.vertices[0, 1] == y
def test_mesh_with_hue(self, long_df):
ax = histplot(long_df, x="x", y="y", hue="c")
hist = Histogram()
hist.define_bin_edges(long_df["x"], long_df["y"])
for i, sub_df in long_df.groupby("c"):
mesh = ax.collections[i]
mesh_data = mesh.get_array()
counts, (x_edges, y_edges) = hist(sub_df["x"], sub_df["y"])
assert_array_equal(mesh_data.data, counts.T.flat)
assert_array_equal(mesh_data.mask, counts.T.flat == 0)
edges = itertools.product(y_edges[:-1], x_edges[:-1])
for i, (y, x) in enumerate(edges):
path = mesh.get_paths()[i]
assert path.vertices[0, 0] == x
assert path.vertices[0, 1] == y
def test_mesh_with_hue_unique_bins(self, long_df):
ax = histplot(long_df, x="x", y="y", hue="c", common_bins=False)
for i, sub_df in long_df.groupby("c"):
hist = Histogram()
mesh = ax.collections[i]
mesh_data = mesh.get_array()
counts, (x_edges, y_edges) = hist(sub_df["x"], sub_df["y"])
assert_array_equal(mesh_data.data, counts.T.flat)
assert_array_equal(mesh_data.mask, counts.T.flat == 0)
edges = itertools.product(y_edges[:-1], x_edges[:-1])
for i, (y, x) in enumerate(edges):
path = mesh.get_paths()[i]
assert path.vertices[0, 0] == x
assert path.vertices[0, 1] == y
def test_mesh_log_scale(self, rng):
x, y = rng.lognormal(0, 1, (2, 1000))
hist = Histogram()
counts, (x_edges, y_edges) = hist(np.log10(x), np.log10(y))
ax = histplot(x=x, y=y, log_scale=True)
mesh = ax.collections[0]
mesh_data = mesh.get_array()
assert_array_equal(mesh_data.data, counts.T.flat)
edges = itertools.product(y_edges[:-1], x_edges[:-1])
for i, (y_i, x_i) in enumerate(edges):
path = mesh.get_paths()[i]
assert path.vertices[0, 0] == 10 ** x_i
assert path.vertices[0, 1] == 10 ** y_i
def test_mesh_thresh(self, long_df):
hist = Histogram()
counts, (x_edges, y_edges) = hist(long_df["x"], long_df["y"])
thresh = 5
ax = histplot(long_df, x="x", y="y", thresh=thresh)
mesh = ax.collections[0]
mesh_data = mesh.get_array()
assert_array_equal(mesh_data.data, counts.T.flat)
assert_array_equal(mesh_data.mask, (counts <= thresh).T.flat)
def test_mesh_sticky_edges(self, long_df):
ax = histplot(long_df, x="x", y="y", thresh=None)
mesh = ax.collections[0]
assert mesh.sticky_edges.x == [long_df["x"].min(), long_df["x"].max()]
assert mesh.sticky_edges.y == [long_df["y"].min(), long_df["y"].max()]
ax.clear()
ax = histplot(long_df, x="x", y="y")
mesh = ax.collections[0]
assert not mesh.sticky_edges.x
assert not mesh.sticky_edges.y
def test_mesh_common_norm(self, long_df):
stat = "density"
ax = histplot(
long_df, x="x", y="y", hue="c", common_norm=True, stat=stat,
)
hist = Histogram(stat="density")
hist.define_bin_edges(long_df["x"], long_df["y"])
for i, sub_df in long_df.groupby("c"):
mesh = ax.collections[i]
mesh_data = mesh.get_array()
density, (x_edges, y_edges) = hist(sub_df["x"], sub_df["y"])
scale = len(sub_df) / len(long_df)
assert_array_equal(mesh_data.data, (density * scale).T.flat)
def test_mesh_unique_norm(self, long_df):
stat = "density"
ax = histplot(
long_df, x="x", y="y", hue="c", common_norm=False, stat=stat,
)
hist = Histogram()
hist.define_bin_edges(long_df["x"], long_df["y"])
for i, sub_df in long_df.groupby("c"):
sub_hist = Histogram(bins=hist.bin_edges, stat=stat)
mesh = ax.collections[i]
mesh_data = mesh.get_array()
density, (x_edges, y_edges) = sub_hist(sub_df["x"], sub_df["y"])
assert_array_equal(mesh_data.data, density.T.flat)
@pytest.mark.parametrize("stat", ["probability", "percent"])
def test_mesh_normalization(self, long_df, stat):
ax = histplot(
long_df, x="x", y="y", stat=stat,
)
mesh_data = ax.collections[0].get_array()
expected_sum = {"probability": 1, "percent": 100}[stat]
assert mesh_data.data.sum() == expected_sum
def test_mesh_colors(self, long_df):
color = "r"
f, ax = plt.subplots()
histplot(
long_df, x="x", y="y", color=color,
)
mesh = ax.collections[0]
assert_array_equal(
mesh.get_cmap().colors,
_DistributionPlotter()._cmap_from_color(color).colors,
)
f, ax = plt.subplots()
histplot(
long_df, x="x", y="y", hue="c",
)
colors = color_palette()
for i, mesh in enumerate(ax.collections):
assert_array_equal(
mesh.get_cmap().colors,
_DistributionPlotter()._cmap_from_color(colors[i]).colors,
)
def test_color_limits(self, long_df):
f, (ax1, ax2, ax3) = plt.subplots(3)
kws = dict(data=long_df, x="x", y="y")
hist = Histogram()
counts, _ = hist(long_df["x"], long_df["y"])
histplot(**kws, ax=ax1)
assert ax1.collections[0].get_clim() == (0, counts.max())
vmax = 10
histplot(**kws, vmax=vmax, ax=ax2)
counts, _ = hist(long_df["x"], long_df["y"])
assert ax2.collections[0].get_clim() == (0, vmax)
pmax = .8
pthresh = .1
f = _DistributionPlotter()._quantile_to_level
histplot(**kws, pmax=pmax, pthresh=pthresh, ax=ax3)
counts, _ = hist(long_df["x"], long_df["y"])
mesh = ax3.collections[0]
assert mesh.get_clim() == (0, f(counts, pmax))
assert_array_equal(
mesh.get_array().mask,
(counts <= f(counts, pthresh)).T.flat,
)
def test_hue_color_limits(self, long_df):
_, (ax1, ax2, ax3, ax4) = plt.subplots(4)
kws = dict(data=long_df, x="x", y="y", hue="c", bins=4)
hist = Histogram(bins=kws["bins"])
hist.define_bin_edges(long_df["x"], long_df["y"])
full_counts, _ = hist(long_df["x"], long_df["y"])
sub_counts = []
for _, sub_df in long_df.groupby(kws["hue"]):
c, _ = hist(sub_df["x"], sub_df["y"])
sub_counts.append(c)
pmax = .8
pthresh = .05
f = _DistributionPlotter()._quantile_to_level
histplot(**kws, common_norm=True, ax=ax1)
for i, mesh in enumerate(ax1.collections):
assert mesh.get_clim() == (0, full_counts.max())
histplot(**kws, common_norm=False, ax=ax2)
for i, mesh in enumerate(ax2.collections):
assert mesh.get_clim() == (0, sub_counts[i].max())
histplot(**kws, common_norm=True, pmax=pmax, pthresh=pthresh, ax=ax3)
for i, mesh in enumerate(ax3.collections):
assert mesh.get_clim() == (0, f(full_counts, pmax))
assert_array_equal(
mesh.get_array().mask,
(sub_counts[i] <= f(full_counts, pthresh)).T.flat,
)
histplot(**kws, common_norm=False, pmax=pmax, pthresh=pthresh, ax=ax4)
for i, mesh in enumerate(ax4.collections):
assert mesh.get_clim() == (0, f(sub_counts[i], pmax))
assert_array_equal(
mesh.get_array().mask,
(sub_counts[i] <= f(sub_counts[i], pthresh)).T.flat,
)
def test_colorbar(self, long_df):
f, ax = plt.subplots()
histplot(long_df, x="x", y="y", cbar=True, ax=ax)
assert len(ax.figure.axes) == 2
f, (ax, cax) = plt.subplots(2)
histplot(long_df, x="x", y="y", cbar=True, cbar_ax=cax, ax=ax)
assert len(ax.figure.axes) == 2
class TestECDFPlotUnivariate(SharedAxesLevelTests):
func = staticmethod(ecdfplot)
def get_last_color(self, ax):
return to_rgb(ax.lines[-1].get_color())
@pytest.mark.parametrize("variable", ["x", "y"])
def test_long_vectors(self, long_df, variable):
vector = long_df[variable]
vectors = [
variable, vector, vector.to_numpy(), vector.to_list(),
]
f, ax = plt.subplots()
for vector in vectors:
ecdfplot(data=long_df, ax=ax, **{variable: vector})
xdata = [l.get_xdata() for l in ax.lines]
for a, b in itertools.product(xdata, xdata):
assert_array_equal(a, b)
ydata = [l.get_ydata() for l in ax.lines]
for a, b in itertools.product(ydata, ydata):
assert_array_equal(a, b)
def test_hue(self, long_df):
ax = ecdfplot(long_df, x="x", hue="a")
for line, color in zip(ax.lines[::-1], color_palette()):
assert_colors_equal(line.get_color(), color)
def test_line_kwargs(self, long_df):
color = "r"
ls = "--"
lw = 3
ax = ecdfplot(long_df, x="x", color=color, ls=ls, lw=lw)
for line in ax.lines:
assert_colors_equal(line.get_color(), color)
assert line.get_linestyle() == ls
assert line.get_linewidth() == lw
@pytest.mark.parametrize("data_var", ["x", "y"])
def test_drawstyle(self, flat_series, data_var):
ax = ecdfplot(**{data_var: flat_series})
drawstyles = dict(x="steps-post", y="steps-pre")
assert ax.lines[0].get_drawstyle() == drawstyles[data_var]
@pytest.mark.parametrize(
"data_var,stat_var", [["x", "y"], ["y", "x"]],
)
def test_proportion_limits(self, flat_series, data_var, stat_var):
ax = ecdfplot(**{data_var: flat_series})
data = getattr(ax.lines[0], f"get_{stat_var}data")()
assert data[0] == 0
assert data[-1] == 1
sticky_edges = getattr(ax.lines[0].sticky_edges, stat_var)
assert sticky_edges[:] == [0, 1]
@pytest.mark.parametrize(
"data_var,stat_var", [["x", "y"], ["y", "x"]],
)
def test_proportion_limits_complementary(self, flat_series, data_var, stat_var):
ax = ecdfplot(**{data_var: flat_series}, complementary=True)
data = getattr(ax.lines[0], f"get_{stat_var}data")()
assert data[0] == 1
assert data[-1] == 0
sticky_edges = getattr(ax.lines[0].sticky_edges, stat_var)
assert sticky_edges[:] == [0, 1]
@pytest.mark.parametrize(
"data_var,stat_var", [["x", "y"], ["y", "x"]],
)
def test_proportion_count(self, flat_series, data_var, stat_var):
n = len(flat_series)
ax = ecdfplot(**{data_var: flat_series}, stat="count")
data = getattr(ax.lines[0], f"get_{stat_var}data")()
assert data[0] == 0
assert data[-1] == n
sticky_edges = getattr(ax.lines[0].sticky_edges, stat_var)
assert sticky_edges[:] == [0, n]
def test_weights(self):
ax = ecdfplot(x=[1, 2, 3], weights=[1, 1, 2])
y = ax.lines[0].get_ydata()
| assert_array_equal(y, [0, .25, .5, 1]) | numpy.testing.assert_array_equal |
import jax.numpy as jnp
from jax import grad, vmap, hessian
from jax.config import config;
config.update("jax_enable_x64", True)
# numpy
import numpy as onp
from numpy import random
import argparse
import logging
import datetime
from time import time
import os
from scipy.special import gamma
def volumeball(d,R):
return onp.pi**(d/2)/gamma(d/2+1)*R**d
def areasphere(d,R):
return d*onp.pi**(d/2)/gamma(d/2+1)*R**(d-1)
def Nbd(N_int,d):
return N_int ** (1-1/d) * areasphere(d,1)/volumeball(d-1,1)
# solving -grad(a*grad u) + alpha u^m = f
def get_parser():
parser = argparse.ArgumentParser(description='NonLinElliptic equation GP solver')
parser.add_argument("--freq_a", type=float, default = 1.0)
parser.add_argument("--freq_u", type=float, default = 1.0)
parser.add_argument("--alpha", type=float, default = 1.0)
parser.add_argument("--m", type = int, default = 3)
parser.add_argument("--dim_low", type = int, default = 2)
parser.add_argument("--dim_high", type = int, default = 6)
parser.add_argument("--kernel", type=str, default="Matern_7half", choices=["gaussian","inv_quadratics","Matern_3half","Matern_5half","Matern_7half","Matern_9half","Matern_11half"])
parser.add_argument("--sigma-scale", type = float, default = 0.25)
# sigma = args.sigma-scale*sqrt(dim)
parser.add_argument("--N_domain", type = int, default = 1000)
parser.add_argument("--N_test", type = int, default = 4000)
parser.add_argument("--nugget", type = float, default = 1e-10)
parser.add_argument("--GNsteps", type = int, default = 8)
parser.add_argument("--logroot", type=str, default='./logs_AutoNbd/')
parser.add_argument("--randomseed", type=int, default=1)
parser.add_argument("--num_exp", type=int, default=10)
args = parser.parse_args()
return args
def get_GNkernel_train(x,y,wx0,wx1,wxg,wy0,wy1,wyg,d,sigma):
# wx0 * delta_x + wxg * nabla delta_x + wx1 * Delta delta_x
return wx0*wy0*kappa(x,y,d,sigma) + wx0*wy1*Delta_y_kappa(x,y,d,sigma) + wy0*wx1*Delta_x_kappa(x,y,d,sigma) + wx1*wy1*Delta_x_Delta_y_kappa(x,y,d,sigma) + wx0*D_wy_kappa(x,y,d,sigma,wyg) + wy0*D_wx_kappa(x,y,d,sigma,wxg) + wx1*Delta_x_D_wy_kappa(x,y,d,sigma,wyg) + wy1*D_wx_Delta_y_kappa(x,y,d,sigma,wxg) + D_wx_D_wy_kappa(x,y,d,sigma,wxg,wyg)
def get_GNkernel_train_boundary(x,y,wy0,wy1,wyg,d,sigma):
return wy0*kappa(x,y,d,sigma) + wy1*Delta_y_kappa(x,y,d,sigma) + D_wy_kappa(x,y,d,sigma,wyg)
def get_GNkernel_val_predict(x,y,wy0,wy1,wyg,d,sigma):
return wy0*kappa(x,y,d,sigma) + wy1*Delta_y_kappa(x,y,d,sigma) + D_wy_kappa(x,y,d,sigma,wyg)
def assembly_Theta(X_domain, X_boundary, w0, w1, wg, sigma):
# X_domain, dim: N_domain*d;
# w0 col vec: coefs of Diracs, dim: N_domain;
# w1 coefs of Laplacians, dim: N_domain
N_domain,d = onp.shape(X_domain)
N_boundary,_ = onp.shape(X_boundary)
Theta = onp.zeros((N_domain+N_boundary,N_domain+N_boundary))
XdXd0 = onp.reshape(onp.tile(X_domain,(1,N_domain)),(-1,d))
XdXd1 = onp.tile(X_domain,(N_domain,1))
XbXd0 = onp.reshape(onp.tile(X_boundary,(1,N_domain)),(-1,d))
XbXd1 = onp.tile(X_domain,(N_boundary,1))
XbXb0 = onp.reshape(onp.tile(X_boundary,(1,N_boundary)),(-1,d))
XbXb1 = onp.tile(X_boundary,(N_boundary,1))
arr_wx0 = onp.reshape(onp.tile(w0,(1,N_domain)),(-1,1))
arr_wx1 = onp.reshape(onp.tile(w1,(1,N_domain)),(-1,1))
arr_wxg = onp.reshape(onp.tile(wg,(1,N_domain)),(-1,d))
arr_wy0 = onp.tile(w0,(N_domain,1))
arr_wy1 = onp.tile(w1,(N_domain,1))
arr_wyg = | onp.tile(wg,(N_domain,1)) | numpy.tile |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 19 15:32:18 2021
@author: nick
"""
import numpy as np
class LinThompsonBandit:
def __init__(self,arms,horizon,theta,sigma=1,L=1,m2=1):
"""
Implementation of the linear UCB algorithm for Multi-armed bandits
Parameters
----------
arms : TYPE
DESCRIPTION.
horizon : TYPE
DESCRIPTION.
theta : TYPE
DESCRIPTION.
sigma : float, optional
Variance of Gaussian noise added to rewards. The default is 1.
Returns
-------
None.
"""
self.arms = arms
self.horizon = horizon
# True theta used to generate rewards
self.theta = theta
self.sigma = sigma
# Dimension of the action space
self.d = self.arms.shape[1]
# Number of possible actions (arms)
self.n = self.arms.shape[0]
# V matrix used to compute estimate of theta
self.V = np.eye(self.d)
self.VInv = np.linalg.inv(self.V)
# Track running sum of rewards times arms
self.rewardsTimesArms = np.zeros((self.d,))
# Current round
self.t = 1
# Define prior parameters
self.priorMean = np.zeros((self.d,))
self.priorCovar = np.eye(self.d)
# Store posterior params here. We are using MVG prior w/ assumed known identity covar
self.posteriorMeans = np.zeros((self.horizon,self.d))
self.posteriorCovars = np.zeros((self.horizon,self.d,self.d))
self.posteriorMeans[0,:] = self.priorMean
self.posteriorCovars[0,:] = self.priorCovar
# Store the number of times each arms has been played here
self.num_plays = np.zeros((self.n,))
# Store our estimates of theta here
self.estimates = np.zeros((self.horizon,self.d))
# Store the rewards here
self.rewards = np.zeros((self.horizon,))
# Record which arm is pulled in each round here
self.history = np.zeros(shape=(self.horizon,)).astype(int)
# Record regret at each round here
self.regret = np.zeros(shape=(self.horizon,))
# Compute the maximum possible reward (for computing regret)
self.opt_reward = np.max(np.sum(self.arms * self.theta, axis=1))
def pull(self,arm):
"""
Pull arm and generate random reward
Parameters
----------
arm : int
Index of the arm to pull
Returns
-------
outcome : float
The random reward.
"""
action = self.arms[arm]
outcome = np.dot(action,self.theta) + np.random.normal(0,self.sigma**2)
return outcome
def estimate(self):
"""
Compute the regularized least squares estimator for theta. This should
happen when self.t is up-to-date.
Returns
-------
thetaHat : float
The regularized least squares estimator of theta
"""
# From equation 19.5 in Szepsvari, Lattimore
# b = np.sum(self.rewards[:self.t-1,None]*self.arms[self.history[:self.t-1]],axis=0)
thetaHat = self.VInv @ self.rewardsTimesArms
return thetaHat
def chooseArm(self):
"""
Choose the best arm by drawing theta_t from posterior and choosing action
which maximizes reward.
Returns
-------
arm : int
Index of the best arm to play in current round
"""
posteriorMean = self.estimates[self.t-2]
# posteriorCovar = np.linalg.inv(self.V)
posteriorCovar = self.VInv
self.posteriorMeans[self.t-1,:] = posteriorMean
self.posteriorCovars[self.t-1,:,:] = posteriorCovar
# Draw theta_t from posterior
theta_t = np.random.multivariate_normal(posteriorMean, posteriorCovar, size=1)
# Choose arm which maximizes inner product with theta_t
objFn = np.sum(self.arms * theta_t, axis=1)
optArm = np.argmax(objFn)
return optArm
def update(self,arm,outcome):
"""
Update the state of the bandit after a round.
Parameters
----------
arm : int
Index of the arm that was played.
outcome : float
The random reward which was observed.
Returns
-------
None.
"""
_arm = self.arms[arm]
# Update V matrix and its inverse
B = np.outer(_arm,_arm)
self.V += B
# Invert the new V using a neat trick: https://math.stackexchange.com/questions/17776/inverse-of-the-sum-of-matrices
self.VInv = self.VInv - (self.VInv @ B @ self.VInv)/(1+np.trace(B @ self.VInv))
self.rewardsTimesArms += _arm * outcome
# Compute new estimate of theta if we have played all of the arms once
thetaHat = self.estimate()
self.estimates[self.t-1] = thetaHat
# Update the state of the bandit
self.history[self.t-1] = arm
self.num_plays[arm] += 1
self.rewards[self.t-1] = outcome
self.regret[self.t-1] = self.opt_reward - np.dot(_arm, self.theta)
# Increment the round
self.t += 1
if self.t%1000 == 0:
print(self.t)
def play(self):
"""
Play the bandit using LinUCB algorithm
Returns
-------
None.
"""
while self.t <= self.horizon:
arm = self.chooseArm()
reward = self.pull(arm)
self.update(arm,reward)
if __name__=='__main__':
np.random.seed(1234)
d = 100
n = 100
T = 10000
theta = np.zeros((d,))
theta[0] = 1
# Draw points on unit sphere
X = np.random.multivariate_normal(np.zeros(d), | np.eye(d) | numpy.eye |
import numpy as np
from planner import PolicyIterationPlanner
from tqdm import tqdm
class MaxEntIRL():
def __init__(self, env):
self.env = env
self.planner = PolicyIterationPlanner(env)
def estimate(self, trajectories, epoch=20, learning_rate=0.01, gamma=0.9):
state_features = np.vstack([self.env.state_to_feature(s)
for s in self.env.states])
theta = | np.random.uniform(size=state_features.shape[1]) | numpy.random.uniform |
# ---------------------------------------------------------------
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the NVIDIA Source Code License for
# CLD-SGM. To view a copy of this license, see the LICENSE file.
# ---------------------------------------------------------------
import os
import numpy as np
import torch
import torch.distributed as dist
from torchvision.utils import make_grid
import matplotlib.pyplot as plt
import logging
from scipy import linalg
from torch.optim import Adamax, AdamW
try:
from apex.optimizers import FusedAdam as Adam
except ImportError:
logging.info('Apex is not available. Falling back to PyTorch\'s native Adam. Install Apex for faster training.')
from torch.optim import Adam as Adam
def make_dir(dir):
if not os.path.exists(dir):
os.makedirs(dir)
else:
raise ValueError('Directory already exists.')
def optimization_manager(config):
def optimize_fn(optimizer,
params,
step,
scaler=None,
lr=config.learning_rate,
grad_clip=config.grad_clip):
if config.n_warmup_iters > 0 and step <= config.n_warmup_iters:
for g in optimizer.param_groups:
g['lr'] = lr * np.minimum(step / config.n_warmup_iters, 1.0)
if scaler is None:
if grad_clip is not None:
torch.nn.utils.clip_grad_norm_(params, max_norm=grad_clip)
optimizer.step()
else:
if grad_clip is not None:
scaler.unscale_(optimizer)
torch.nn.utils.clip_grad_norm_(params, max_norm=grad_clip)
scaler.step(optimizer)
scaler.update()
return optimize_fn
def get_optimizer(config, params):
if config.optimizer == 'Adam':
optimizer = Adam(params,
lr=config.learning_rate,
weight_decay=config.weight_decay)
elif config.optimizer == 'Adamax':
optimizer = Adamax(params,
lr=config.learning_rate,
weight_decay=config.weight_decay)
elif config.optimizer == 'AdamW':
optimizer = AdamW(params,
lr=config.learning_rate,
weight_decay=config.weight_decay)
else:
raise NotImplementedError('Optimizer %s is not supported.' % config.optimizer)
return optimizer
def get_data_scaler(config):
if config.center_image and config.is_image:
return lambda x: x * 2. - 1. # Rescale from [0, 1] to [-1, 1]
else:
return lambda x: x
def get_data_inverse_scaler(config):
if config.center_image and config.is_image:
return lambda x: (x + 1.) / 2. # Rescale from [-1, 1] to [0, 1]
else:
return lambda x: x
def compute_bpd_from_nll(nll, D, inverse_scaler):
offset = 7 - inverse_scaler(-1)
bpd = nll / (np.log(2.) * D) + offset
return bpd
def batched_cov(x):
covars = np.empty((x.shape[0], x.shape[2], x.shape[2]))
for i in range(x.shape[0]):
covars[i] = np.cov(x[i], rowvar=False)
return covars
def reduce_tensor(tensor, world_size):
rt = tensor.clone()
dist.all_reduce(rt, op=dist.ReduceOp.SUM)
rt /= world_size
return rt
def concatenate(tensor, world_size):
tensor_list = [torch.empty_like(tensor) for _ in range(world_size)]
dist.all_gather(tensor_list, tensor)
return torch.cat(tensor_list)
def split_tensor(tensor, global_rank, global_size):
if tensor.shape[0] / global_size - tensor.shape[0] // global_size > 1e-6:
raise ValueError('Tensor is not divisible by global size.')
return torch.chunk(tensor, global_size)[global_rank]
def set_seeds(rank, seed):
torch.manual_seed(rank + seed)
np.random.seed(rank + seed)
torch.cuda.manual_seed(rank + seed)
torch.cuda.manual_seed_all(rank + seed)
torch.backends.cudnn.benchmark = True
def broadcast_params(params):
for param in params:
dist.broadcast(param.data, src=0)
def save_img(x, filename, figsize=None):
figsize = figsize if figsize is not None else (6, 6)
nrow = int( | np.sqrt(x.shape[0]) | numpy.sqrt |
# Import folder where sorting algorithms
import sys
import unittest
import numpy as np
# For importing from different folders
# OBS: This is supposed to be done with automated testing,
# hence relative to folder we want to import from
sys.path.append("ML/algorithms/linearregression")
# If run from local:
# sys.path.append('../../ML/algorithms/linearregression/')
from linear_regression_normal_equation import linear_regression_normal_equation
class TestLinearRegression_NormalEq(unittest.TestCase):
def setUp(self):
# test cases we want to run
self.X1 = np.array([[0, 1, 2]]).T
self.y1 = np.array([1, 2, 3])
self.W1_correct = np.array([[1, 1]])
self.X2 = np.array([[0, 1]]).T
self.y2 = np.array([1, 0])
self.W2_correct = np.array([[1, -1]])
self.X3 = np.array([[1, 2, 3], [1, 2, 4]]).T
self.y3 = np.array([5, 10, 18])
self.W3_correct = np.array([[0, 2, 3]])
self.X4 = np.array([[0, 0]]).T
self.y4 = np.array([0, 0])
self.W4_correct = np.array([[0, 0]])
self.X5 = np.array([[0, 1, 2, 3, 4, 5]]).T
self.y5 = np.array([0, 0.99, 2.01, 2.99, 4.01, 4.99])
self.W5_correct = np.array([[0, 1]])
def test_perfectpositiveslope(self):
W = linear_regression_normal_equation(self.X1, self.y1)
print(W.shape)
print(self.W1_correct.shape)
boolean_array = np.isclose(W, self.W1_correct)
self.assertTrue(boolean_array.all())
def test_perfectnegativeslope(self):
W = linear_regression_normal_equation(self.X2, self.y2)
boolean_array = np.isclose(W, self.W2_correct)
self.assertTrue(boolean_array.all())
def test_multipledimension(self):
W = linear_regression_normal_equation(self.X3, self.y3)
print(W)
print(self.W3_correct)
boolean_array = np.isclose(W, self.W3_correct)
self.assertTrue(boolean_array.all())
def test_zeros(self):
W = linear_regression_normal_equation(self.X4, self.y4)
boolean_array = np.isclose(W, self.W4_correct)
self.assertTrue(boolean_array.all())
def test_noisydata(self):
W = linear_regression_normal_equation(self.X5, self.y5)
boolean_array = | np.isclose(W, self.W5_correct, atol=1e-3) | numpy.isclose |
import pytest
import numpy as np
from scipy.signal import fftconvolve
from dicodile.utils.csc import _choose_convolve_multi,\
_dense_transpose_convolve, compute_ztz,\
_dense_convolve_multi_uv, reconstruct
from dicodile.utils.dictionary import compute_DtD, get_D
from dicodile.utils import check_random_state
from dicodile.utils.shape_helpers import get_valid_support
@pytest.mark.parametrize('valid_support, atom_support', [((500,), (30,)),
((72, 60), (10, 8))])
@pytest.mark.parametrize('sparsity', [1, .01])
def test_ztz(valid_support, atom_support, sparsity):
n_atoms = 7
n_channels = 5
random_state = None
rng = check_random_state(random_state)
z = rng.randn(n_atoms, *valid_support)
z *= rng.rand(*z.shape) < sparsity
D = rng.randn(n_atoms, n_channels, *atom_support)
ztz = compute_ztz(z, atom_support)
grad = np.sum([[[fftconvolve(ztz_k0_k, d_kp, mode='valid') for d_kp in d_k]
for ztz_k0_k, d_k in zip(ztz_k0, D)]
for ztz_k0 in ztz], axis=1)
cost = np.dot(D.ravel(), grad.ravel())
X_hat = reconstruct(z, D)
assert np.isclose(cost, np.dot(X_hat.ravel(), X_hat.ravel()))
@pytest.mark.parametrize('atom_support',
[(35, ), (40, 30), (10, 12, 3)])
def test_dense_convolve_multi_uv_shape(atom_support):
n_channels = 3
sig_shape = (n_channels, *[8 * n for n in atom_support])
n_atoms = 25
valid_support = get_valid_support(sig_support=sig_shape[1:],
atom_support=atom_support)
z_hat = np.ones((n_atoms, *valid_support))
u = np.ones((n_atoms, n_channels))
v = np.ones((n_atoms, *atom_support))
Xi = _dense_convolve_multi_uv(z_hat, (u, v))
assert Xi.shape == sig_shape
def test_convolve_uv_and_convolve_d_match():
n_channels = 3
sig_shape = (n_channels, 800, 600)
atom_shape = (n_channels, 40, 30)
atom_support = atom_shape[1:]
n_atoms = 20
valid_support = get_valid_support(sig_support=sig_shape[1:],
atom_support=atom_support)
rng = np.random.default_rng(seed=42)
z_hat = rng.uniform(size=(n_atoms, *valid_support))
u = rng.uniform(size=(n_atoms, n_channels))
v = rng.uniform(size=(n_atoms, *atom_support))
uv_convolution = _choose_convolve_multi(z_hat, (u, v))
d = get_D(u, v)
d_convolution = _choose_convolve_multi(z_hat, d)
assert np.allclose(uv_convolution, d_convolution)
@pytest.mark.parametrize('atom_support',
[(35, ), (40, 30), (10, 12, 3)])
def test_rank1_DtD_matches_full_DtD(atom_support):
n_channels = 3
n_atoms = 25
rng = np.random.default_rng(seed=42)
u = rng.uniform(size=(n_atoms, n_channels))
v = rng.uniform(size=(n_atoms, *atom_support))
D = get_D(u, v)
d_dtd = compute_DtD(D)
uv_dtd = compute_DtD((u, v))
assert np.allclose(d_dtd, uv_dtd)
def test_dense_transpose_convolve_uv_and_D_match():
n_channels = 3
n_atoms = 25
atom_support = (40, 32)
signal_support = (800, 600)
rng = | np.random.default_rng(seed=42) | numpy.random.default_rng |
# Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
"""
Module houses `TextFileDispatcher` class.
`TextFileDispatcher` contains utils for text formats files, inherits util functions for
files from `FileDispatcher` class and can be used as base class for dipatchers of SQL queries.
"""
import warnings
import os
import io
import codecs
from typing import Union, Sequence, Optional, Tuple, Callable
from csv import QUOTE_NONE
import numpy as np
import pandas
import pandas._libs.lib as lib
from pandas.core.dtypes.common import is_list_like
from modin.core.io.file_dispatcher import FileDispatcher, OpenFile
from modin.core.storage_formats.pandas.utils import compute_chunksize
from modin.utils import _inherit_docstrings
from modin.core.io.text.utils import CustomNewlineIterator
from modin.config import NPartitions
from modin.error_message import ErrorMessage
ColumnNamesTypes = Tuple[Union[pandas.Index, pandas.MultiIndex]]
IndexColType = Union[int, str, bool, Sequence[int], Sequence[str], None]
class TextFileDispatcher(FileDispatcher):
"""Class handles utils for reading text formats files."""
# The variable allows to set a function with which one partition will be read;
# Used in dispatchers and parsers
read_callback = None
@classmethod
def get_path_or_buffer(cls, filepath_or_buffer):
"""
Extract path from `filepath_or_buffer`.
Parameters
----------
filepath_or_buffer : str, path object or file-like object
`filepath_or_buffer` parameter of `read_csv` function.
Returns
-------
str or path object
verified `filepath_or_buffer` parameter.
Notes
-----
Given a buffer, try and extract the filepath from it so that we can
use it without having to fall back to pandas and share file objects between
workers. Given a filepath, return it immediately.
"""
if hasattr(filepath_or_buffer, "name"):
buffer_filepath = filepath_or_buffer.name
if cls.file_exists(buffer_filepath):
warnings.warn(
"For performance reasons, the filepath will be "
+ "used in place of the file handle passed in "
+ "to load the data"
)
return cls.get_path(buffer_filepath)
return filepath_or_buffer
@classmethod
def build_partition(cls, partition_ids, row_lengths, column_widths):
"""
Build array with partitions of `cls.frame_partition_cls` class.
Parameters
----------
partition_ids : list
Array with references to the partitions data.
row_lengths : list
Partitions rows lengths.
column_widths : list
Number of columns in each partition.
Returns
-------
np.ndarray
array with shape equals to the shape of `partition_ids` and
filed with partitions objects.
"""
return np.array(
[
[
cls.frame_partition_cls(
partition_ids[i][j],
length=row_lengths[i],
width=column_widths[j],
)
for j in range(len(partition_ids[i]))
]
for i in range(len(partition_ids))
]
)
@classmethod
def pathlib_or_pypath(cls, filepath_or_buffer):
"""
Check if `filepath_or_buffer` is instance of `py.path.local` or `pathlib.Path`.
Parameters
----------
filepath_or_buffer : str, path object or file-like object
`filepath_or_buffer` parameter of `read_csv` function.
Returns
-------
bool
Whether or not `filepath_or_buffer` is instance of `py.path.local`
or `pathlib.Path`.
"""
try:
import py
if isinstance(filepath_or_buffer, py.path.local):
return True
except ImportError: # pragma: no cover
pass
try:
import pathlib
if isinstance(filepath_or_buffer, pathlib.Path):
return True
except ImportError: # pragma: no cover
pass
return False
@classmethod
def offset(
cls,
f,
offset_size: int,
quotechar: bytes = b'"',
is_quoting: bool = True,
encoding: str = None,
newline: bytes = None,
):
"""
Move the file offset at the specified amount of bytes.
Parameters
----------
f : file-like object
File handle that should be used for offset movement.
offset_size : int
Number of bytes to read and ignore.
quotechar : bytes, default: b'"'
Indicate quote in a file.
is_quoting : bool, default: True
Whether or not to consider quotes.
encoding : str, optional
Encoding of `f`.
newline : bytes, optional
Byte or sequence of bytes indicating line endings.
Returns
-------
bool
If file pointer reached the end of the file, but did not find
closing quote returns `False`. `True` in any other case.
"""
if is_quoting:
chunk = f.read(offset_size)
outside_quotes = not chunk.count(quotechar) % 2
else:
f.seek(offset_size, os.SEEK_CUR)
outside_quotes = True
# after we read `offset_size` bytes, we most likely break the line but
# the modin implementation doesn't work correctly in the case, so we must
# make sure that the line is read completely to the lineterminator,
# which is what the `_read_rows` does
outside_quotes, _ = cls._read_rows(
f,
nrows=1,
quotechar=quotechar,
is_quoting=is_quoting,
outside_quotes=outside_quotes,
encoding=encoding,
newline=newline,
)
return outside_quotes
@classmethod
def partitioned_file(
cls,
f,
num_partitions: int = None,
nrows: int = None,
skiprows: int = None,
quotechar: bytes = b'"',
is_quoting: bool = True,
encoding: str = None,
newline: bytes = None,
header_size: int = 0,
pre_reading: int = 0,
):
"""
Compute chunk sizes in bytes for every partition.
Parameters
----------
f : file-like object
File handle of file to be partitioned.
num_partitions : int, optional
For what number of partitions split a file.
If not specified grabs the value from `modin.config.NPartitions.get()`.
nrows : int, optional
Number of rows of file to read.
skiprows : int, optional
Specifies rows to skip.
quotechar : bytes, default: b'"'
Indicate quote in a file.
is_quoting : bool, default: True
Whether or not to consider quotes.
encoding : str, optional
Encoding of `f`.
newline : bytes, optional
Byte or sequence of bytes indicating line endings.
header_size : int, default: 0
Number of rows, that occupied by header.
pre_reading : int, default: 0
Number of rows between header and skipped rows, that should be read.
Returns
-------
list
List with the next elements:
int : partition start read byte
int : partition end read byte
"""
read_rows_counter = 0
outside_quotes = True
if num_partitions is None:
num_partitions = NPartitions.get() - 1 if pre_reading else NPartitions.get()
rows_skipper = cls.rows_skipper_builder(
f, quotechar, is_quoting=is_quoting, encoding=encoding, newline=newline
)
result = []
file_size = cls.file_size(f)
rows_skipper(header_size)
if pre_reading:
pre_reading_start = f.tell()
outside_quotes, read_rows = cls._read_rows(
f,
nrows=pre_reading,
quotechar=quotechar,
is_quoting=is_quoting,
outside_quotes=outside_quotes,
encoding=encoding,
newline=newline,
)
read_rows_counter += read_rows
result.append((pre_reading_start, f.tell()))
# add outside_quotes
if is_quoting and not outside_quotes:
warnings.warn("File has mismatched quotes")
rows_skipper(skiprows)
start = f.tell()
if nrows:
partition_size = max(1, num_partitions, nrows // num_partitions)
while f.tell() < file_size and read_rows_counter < nrows:
if read_rows_counter + partition_size > nrows:
# it's possible only if is_quoting==True
partition_size = nrows - read_rows_counter
outside_quotes, read_rows = cls._read_rows(
f,
nrows=partition_size,
quotechar=quotechar,
is_quoting=is_quoting,
encoding=encoding,
newline=newline,
)
result.append((start, f.tell()))
start = f.tell()
read_rows_counter += read_rows
# add outside_quotes
if is_quoting and not outside_quotes:
warnings.warn("File has mismatched quotes")
else:
partition_size = max(1, num_partitions, file_size // num_partitions)
while f.tell() < file_size:
outside_quotes = cls.offset(
f,
offset_size=partition_size,
quotechar=quotechar,
is_quoting=is_quoting,
encoding=encoding,
newline=newline,
)
result.append((start, f.tell()))
start = f.tell()
# add outside_quotes
if is_quoting and not outside_quotes:
warnings.warn("File has mismatched quotes")
return result
@classmethod
def _read_rows(
cls,
f,
nrows: int,
quotechar: bytes = b'"',
is_quoting: bool = True,
outside_quotes: bool = True,
encoding: str = None,
newline: bytes = None,
):
"""
Move the file offset at the specified amount of rows.
Parameters
----------
f : file-like object
File handle that should be used for offset movement.
nrows : int
Number of rows to read.
quotechar : bytes, default: b'"'
Indicate quote in a file.
is_quoting : bool, default: True
Whether or not to consider quotes.
outside_quotes : bool, default: True
Whether the file pointer is within quotes or not at the time this function is called.
encoding : str, optional
Encoding of `f`.
newline : bytes, optional
Byte or sequence of bytes indicating line endings.
Returns
-------
bool
If file pointer reached the end of the file, but did not find closing quote
returns `False`. `True` in any other case.
int
Number of rows that were read.
"""
if nrows is not None and nrows <= 0:
return True, 0
rows_read = 0
if encoding and (
"utf" in encoding
and "8" not in encoding
or encoding == "unicode_escape"
or encoding.replace("-", "_") == "utf_8_sig"
):
iterator = CustomNewlineIterator(f, newline)
else:
iterator = f
for line in iterator:
if is_quoting and line.count(quotechar) % 2:
outside_quotes = not outside_quotes
if outside_quotes:
rows_read += 1
if rows_read >= nrows:
break
if isinstance(iterator, CustomNewlineIterator):
iterator.seek()
# case when EOF
if not outside_quotes:
rows_read += 1
return outside_quotes, rows_read
@classmethod
def compute_newline(cls, file_like, encoding, quotechar):
"""
Compute byte or sequence of bytes indicating line endings.
Parameters
----------
file_like : file-like object
File handle that should be used for line endings computing.
encoding : str
Encoding of `file_like`.
quotechar : str
Quotechar used for parsing `file-like`.
Returns
-------
bytes
line endings
"""
newline = None
if encoding is None:
return newline, quotechar.encode("UTF-8")
quotechar = quotechar.encode(encoding)
encoding = encoding.replace("-", "_")
if (
"utf" in encoding
and "8" not in encoding
or encoding == "unicode_escape"
or encoding == "utf_8_sig"
):
# trigger for computing f.newlines
file_like.readline()
# in bytes
newline = file_like.newlines.encode(encoding)
boms = ()
if encoding == "utf_8_sig":
boms = (codecs.BOM_UTF8,)
elif "16" in encoding:
boms = (codecs.BOM_UTF16_BE, codecs.BOM_UTF16_LE)
elif "32" in encoding:
boms = (codecs.BOM_UTF32_BE, codecs.BOM_UTF32_LE)
for bom in boms:
if newline.startswith(bom):
bom_len = len(bom)
newline = newline[bom_len:]
quotechar = quotechar[bom_len:]
break
return newline, quotechar
# _read helper functions
@classmethod
def rows_skipper_builder(
cls, f, quotechar, is_quoting, encoding=None, newline=None
):
"""
Build object for skipping passed number of lines.
Parameters
----------
f : file-like object
File handle that should be used for offset movement.
quotechar : bytes
Indicate quote in a file.
is_quoting : bool
Whether or not to consider quotes.
encoding : str, optional
Encoding of `f`.
newline : bytes, optional
Byte or sequence of bytes indicating line endings.
Returns
-------
object
skipper object.
"""
def skipper(n):
if n == 0 or n is None:
return 0
else:
return cls._read_rows(
f,
quotechar=quotechar,
is_quoting=is_quoting,
nrows=n,
encoding=encoding,
newline=newline,
)[1]
return skipper
@classmethod
def _define_header_size(
cls,
header: Union[int, Sequence[int], str, None] = "infer",
names: Optional[Sequence] = lib.no_default,
) -> int:
"""
Define the number of rows that are used by header.
Parameters
----------
header : int, list of int or str, default: "infer"
Original `header` parameter of `read_csv` function.
names : array-like, optional
Original names parameter of `read_csv` function.
Returns
-------
header_size : int
The number of rows that are used by header.
"""
header_size = 0
if header == "infer" and names in [lib.no_default, None]:
header_size += 1
elif isinstance(header, int):
header_size += header + 1
elif hasattr(header, "__iter__") and not isinstance(header, str):
header_size += max(header) + 1
return header_size
@classmethod
def _define_metadata(
cls,
df: pandas.DataFrame,
column_names: ColumnNamesTypes,
) -> Tuple[list, int]:
"""
Define partitioning metadata.
Parameters
----------
df : pandas.DataFrame
The DataFrame to split.
column_names : ColumnNamesTypes
Column names of df.
Returns
-------
column_widths : list
Column width to use during new frame creation (number of
columns for each partition).
num_splits : int
The maximum number of splits to separate the DataFrame into.
"""
# This is the number of splits for the columns
num_splits = min(len(column_names) or 1, NPartitions.get())
column_chunksize = compute_chunksize(df.shape[1], num_splits)
if column_chunksize > len(column_names):
column_widths = [len(column_names)]
# This prevents us from unnecessarily serializing a bunch of empty
# objects.
num_splits = 1
else:
# split columns into chunks with maximal size column_chunksize, for example
# if num_splits == 4, len(column_names) == 80 and column_chunksize == 32,
# column_widths will be [32, 32, 16, 0]
column_widths = [
column_chunksize
if len(column_names) > (column_chunksize * (i + 1))
else 0
if len(column_names) < (column_chunksize * i)
else len(column_names) - (column_chunksize * i)
for i in range(num_splits)
]
return column_widths, num_splits
@classmethod
def _launch_tasks(cls, splits: list, **partition_kwargs) -> Tuple[list, list, list]:
"""
Launch tasks to read partitions.
Parameters
----------
splits : list
List of tuples with partitions data, which defines
parser task (start/end read bytes and etc.).
**partition_kwargs : dict
`kwargs` that should be passed to the parser function.
Returns
-------
partition_ids : list
array with references to the partitions data.
index_ids : list
array with references to the partitions index objects.
dtypes_ids : list
array with references to the partitions dtypes objects.
"""
partition_ids = [None] * len(splits)
index_ids = [None] * len(splits)
dtypes_ids = [None] * len(splits)
for idx, (start, end) in enumerate(splits):
partition_kwargs.update({"start": start, "end": end})
*partition_ids[idx], index_ids[idx], dtypes_ids[idx] = cls.deploy(
cls.parse,
num_returns=partition_kwargs.get("num_splits") + 2,
**partition_kwargs,
)
return partition_ids, index_ids, dtypes_ids
@classmethod
def check_parameters_support(
cls,
filepath_or_buffer,
read_kwargs: dict,
skiprows_md: Union[Sequence, callable, int],
header_size: int,
) -> bool:
"""
Check support of only general parameters of `read_*` function.
Parameters
----------
filepath_or_buffer : str, path object or file-like object
`filepath_or_buffer` parameter of `read_*` function.
read_kwargs : dict
Parameters of `read_*` function.
skiprows_md : int, array or callable
`skiprows` parameter modified for easier handling by Modin.
header_size : int
Number of rows that are used by header.
Returns
-------
bool
Whether passed parameters are supported or not.
"""
skiprows = read_kwargs.get("skiprows")
if isinstance(filepath_or_buffer, str):
if not cls.file_exists(filepath_or_buffer):
return False
elif not cls.pathlib_or_pypath(filepath_or_buffer):
return False
if read_kwargs["chunksize"] is not None:
return False
skiprows_supported = True
if is_list_like(skiprows_md) and skiprows_md[0] < header_size:
skiprows_supported = False
elif callable(skiprows):
# check if `skiprows` callable gives True for any of header indices
is_intersection = any(
cls._get_skip_mask(pandas.RangeIndex(header_size), skiprows)
)
if is_intersection:
skiprows_supported = False
if not skiprows_supported:
ErrorMessage.single_warning(
"Values of `header` and `skiprows` parameters have intersections. "
+ "This case is unsupported by Modin, so pandas implementation will be used"
)
return False
return True
@classmethod
@_inherit_docstrings(pandas.io.parsers.base_parser.ParserBase._validate_usecols_arg)
def _validate_usecols_arg(cls, usecols):
msg = (
"'usecols' must either be list-like of all strings, all unicode, "
+ "all integers or a callable."
)
if usecols is not None:
if callable(usecols):
return usecols, None
if not is_list_like(usecols):
raise ValueError(msg)
usecols_dtype = lib.infer_dtype(usecols, skipna=False)
if usecols_dtype not in ("empty", "integer", "string"):
raise ValueError(msg)
usecols = set(usecols)
return usecols, usecols_dtype
return usecols, None
@classmethod
def _manage_skiprows_parameter(
cls,
skiprows: Union[int, Sequence[int], Callable, None] = None,
header_size: int = 0,
) -> Tuple[Union[int, Sequence, Callable], bool, int]:
"""
Manage `skiprows` parameter of read_csv and read_fwf functions.
Change `skiprows` parameter in the way Modin could more optimally
process it. `csv_dispatcher` and `fwf_dispatcher` have two mechanisms of rows skipping:
1) During file partitioning (setting of file limits that should be read
by each partition) exact rows can be excluded from partitioning scope,
thus they won't be read at all and can be considered as skipped. This is
the most effective way of rows skipping (since it doesn't require any
actual data reading and postprocessing), but in this case `skiprows`
parameter can be an integer only. When it possible Modin always uses
this approach by setting of `skiprows_partitioning` return value.
2) Rows for skipping can be dropped after full dataset import. This is
more expensive way since it requires extra IO work and postprocessing
afterwards, but `skiprows` parameter can be of any non-integer type
supported by any pandas read function. These rows is
specified by setting of `skiprows_md` return value.
In some cases, if `skiprows` is uniformly distributed array (e.g. [1,2,3]),
`skiprows` can be "squashed" and represented as integer to make a fastpath.
If there is a gap between the first row for skipping and the last line of
the header (that will be skipped too), then assign to read this gap first
(assign the first partition to read these rows be setting of `pre_reading`
return value). See `Examples` section for details.
Parameters
----------
skiprows : int, array or callable, optional
Original `skiprows` parameter of any pandas read function.
header_size : int, default: 0
Number of rows that are used by header.
Returns
-------
skiprows_md : int, array or callable
Updated skiprows parameter. If `skiprows` is an array, this
array will be sorted. Also parameter will be aligned to
actual data in the `query_compiler` (which, for example,
doesn't contain header rows)
pre_reading : int
The number of rows that should be read before data file
splitting for further reading (the number of rows for
the first partition).
skiprows_partitioning : int
The number of rows that should be skipped virtually (skipped during
data file partitioning).
Examples
--------
Let's consider case when `header`="infer" and `skiprows`=[3,4,5]. In
this specific case fastpath can be done since `skiprows` is uniformly
distributed array, so we can "squash" it to integer and set
`skiprows_partitioning`=3. But if no additional action will be done,
these three rows will be skipped right after header line, that corresponds
to `skiprows`=[1,2,3]. Now, to avoid this discrepancy, we need to assign
the first partition to read data between header line and the first
row for skipping by setting of `pre_reading` parameter, so setting
`pre_reading`=2. During data file partitiong, these lines will be assigned
for reading for the first partition, and then file position will be set at
the beginning of rows that should be skipped by `skiprows_partitioning`.
After skipping of these rows, the rest data will be divided between the
rest of partitions, see rows assignement below:
0 - header line (skip during partitioning)
1 - pre_reading (assign to read by the first partition)
2 - pre_reading (assign to read by the first partition)
3 - skiprows_partitioning (skip during partitioning)
4 - skiprows_partitioning (skip during partitioning)
5 - skiprows_partitioning (skip during partitioning)
6 - data to partition (divide between the rest of partitions)
7 - data to partition (divide between the rest of partitions)
"""
pre_reading = skiprows_partitioning = skiprows_md = 0
if isinstance(skiprows, int):
skiprows_partitioning = skiprows
elif is_list_like(skiprows):
skiprows_md = np.sort(skiprows)
if np.all( | np.diff(skiprows_md) | numpy.diff |
import numpy as np
from numpy import array
import pandas as pd
import os
import glob
import tensorflow as tf
from keras.models import Sequential
from keras.layers import Dense, Dropout, Conv2D, Activation, MaxPooling2D, Flatten
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
# enumerate adds a number like [0, [22, 35, 42], ... ] to each sample
# creates data and then looks for row with max length, then adds 0 triplet until each row is the same length as max
# length
def create_all_data():
data = []
temp = []
max_len = 0
label_names = [f for f in os.listdir("HMP_Dataset")]
for label in label_names:
file_list = glob.glob(os.path.join(os.getcwd(), "HMP_Dataset/" + label, "*.txt"))
for file in file_list:
with open(file) as f:
for line in f:
line = line.split()
line = [int(i) for i in line]
temp.append(line)
data.append(temp)
temp = []
for row in data:
if len(row) > max_len:
max_len = len(row)
for index, row in enumerate(data):
while len(row) != max_len:
data[index].append([0, 0, 0])
return data
def create_labels():
labels = []
label_names = [f for f in os.listdir("HMP_Dataset")]
for label in label_names:
file_list = glob.glob(os.path.join(os.getcwd(), "HMP_Dataset/" + label, "*.txt"))
for num in range(len(file_list)):
labels.append(label)
return labels
# data is a list of labels, turns data into array called values
# LabelEncoder turns the 'string' labels into labels between 0 and n where n is number of labels
# fit_transform actually takes in array of strings and turns them into numbers
# after this, it reshapes the array so that there is now a row for each label
# OneHotEncoder and fit_transform then turns the number that represents the label in each row into a one hot encoding
def create_onehot_labels(labels):
data = labels
values = array(data)
le = LabelEncoder()
num_labels = le.fit_transform(values)
num_labels = num_labels.reshape(len(num_labels), 1)
enc = OneHotEncoder(sparse=False, categories='auto')
onehot_labels = enc.fit_transform(num_labels)
return onehot_labels
def stratify(features, labels):
x_train, x_test, y_train, y_test = train_test_split(features, labels, test_size=0.2, shuffle=True, stratify=labels)
return x_train, x_test, y_train, y_test
def create_np_labels():
np_labels = create_onehot_labels(create_labels())
return np_labels
def create_np_data(one_d, two_d, three_d):
pd_data = pd.DataFrame(create_all_data()).values
np_data = np.zeros((one_d, two_d, three_d))
for i in range(one_d):
for j in range(two_d):
for k in range(three_d):
np_data[i, j, k] = pd_data[i, j][k]
np_data = np.reshape(np_data, (one_d, (two_d*three_d)))
return np_data
def create_np_csv(two_d):
#two_d max is 9318
np_data = create_np_data(850, two_d, 3)
np_labels = create_np_labels()
x_train, x_test, y_train, y_test = train_test_split(np_data, np_labels, test_size=0.2, shuffle=True, stratify=np_labels)
x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, test_size=0.2, shuffle=True, stratify=y_train)
np.savetxt('x_train', x_train, delimiter=',', fmt='%0.0f')
| np.savetxt('x_test', x_test, delimiter=',', fmt='%0.0f') | numpy.savetxt |
# -*- coding: utf-8 -*-
import os
import torch
from torch.autograd import Variable
import numpy as np
import scipy
import matplotlib.pyplot as plt
import cv2
import scipy.ndimage
import shutil
import scipy.misc as misc
from PIL import Image
def mkdirs(folders, erase=False):
if type(folders) is not list:
folders = [folders]
for fold in folders:
if not os.path.exists(fold):
os.makedirs(fold)
else:
if erase:
shutil.rmtree(fold)
os.makedirs(fold)
def normalize_img(X):
min_, max_ = np.min(X), np.max(X)
X = (X - min_) / (max_ - min_ + 1e-9)
X = X * 255
return X.astype(np.uint8)
def imread(imgfile):
assert os.path.exists(imgfile), '{} does not exist!'.format(imgfile)
srcBGR = cv2.imread(imgfile)
destRGB = cv2.cvtColor(srcBGR, cv2.COLOR_BGR2RGB)
return destRGB
def writeImg(array, savepath):
scipy.misc.imsave(savepath, array)
def imresize(img, resizeratio=1):
'''Take care of cv2 reshape squeeze behevaior'''
if resizeratio == 1:
return img
outshape = (int(img.shape[1] * resizeratio), int(img.shape[0] * resizeratio))
# temp = cv2.resize(img, outshape).astype(float)
temp = misc.imresize(img, size=outshape).astype(float)
if len(img.shape) == 3 and img.shape[2] == 1:
temp = np.reshape(temp, temp.shape + (1,))
return temp
def imresize_shape(img, outshape):
if len(img.shape) == 3:
if img.shape[0] == 1 or img.shape[0] == 3:
transpose_img = np.transpose(img, (1, 2, 0))
_img = imresize_shape(transpose_img, outshape)
return np.transpose(_img, (2, 0, 1))
if len(img.shape) == 4:
img_out = []
for this_img in img:
img_out.append(imresize_shape(this_img, outshape))
return np.stack(img_out, axis=0)
img = img.astype(np.float32)
outshape = (int(outshape[1]), int(outshape[0]))
# temp = cv2.resize(img, outshape).astype(float)
temp = misc.imresize(img, size=outshape, interp='bilinear').astype(float)
if len(img.shape) == 3 and img.shape[2] == 1:
temp = np.reshape(temp, temp.shape + (1,))
return temp
def imshow(img, size=None):
if size is not None:
plt.figure(figsize=size)
else:
plt.figure()
plt.imshow(img)
plt.show()
def Indexflow(Totalnum, batch_size, random=True):
numberofchunk = int(Totalnum + batch_size - 1) // int(batch_size) # the floor
# Chunkfile = np.zeros((batch_size, row*col*channel))
totalIndx = np.arange(Totalnum).astype(np.int)
if random is True:
totalIndx = | np.random.permutation(totalIndx) | numpy.random.permutation |
# -*- coding: utf-8 -*-
"""
This file is part of pyCMBS.
(c) 2012- <NAME>
For COPYING and LICENSE details, please refer to the LICENSE file
"""
from unittest import TestCase
import unittest
from pycmbs.data import *
from pycmbs.diagnostic import RegionalAnalysis
import scipy as sc
import numpy as np
import tempfile
class TestData(TestCase):
def setUp(self):
self.D = Data(None, None)
self.D._init_sample_object(nt=1000, ny=1, nx=1)
self._tmpdir = tempfile.mkdtemp()
def test_regional_analysis(self):
# generate two datasets
ny = 2
nx = 6
nt = 500
# regional mask looks like the following
#
# | 1 | 2 | 2 | 3 | 4 | 3 |
# | 1 | 2 | 2 | 4 | 3 | 4 |
m = | np.zeros((2,6)) | numpy.zeros |
import talib
import numpy as np
import jtrade.core.instrument.equity as Equity
# ========== TECH OVERLAP INDICATORS **START** ==========
def BBANDS(equity, start=None, end=None, timeperiod=5, nbdevup=2, nbdevdn=2, matype=0):
"""Bollinger Bands
:param timeperiod:
:param nbdevup:
:param nbdevdn:
:param matype:
:return:
"""
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
upperband, middleband, lowerband = talib.BBANDS(close, timeperiod=timeperiod, nbdevup=nbdevup, nbdevdn=nbdevdn, matype=matype)
return upperband, middleband, lowerband
def DEMA(equity, start=None, end=None, timeperiod=30):
"""Double Exponential Moving Average
:param timeperiod:
:return:
"""
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.DEMA(close, timeperiod=timeperiod)
return real
def EMA(equity, start=None, end=None, timeperiod=30):
"""Exponential Moving Average
NOTE: The EMA function has an unstable period.
:param timeperiod:
:return:
"""
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.EMA(close, timeperiod=timeperiod)
return real
def HT_TRENDLINE(equity, start=None, end=None):
"""Hilbert Transform - Instantaneous Trendline
NOTE: The HT_TRENDLINE function has an unstable period.
:return:
"""
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.HT_TRENDLINE(close)
return real
def KAMA(equity, start=None, end=None, timeperiod=30):
"""Kaufman Adaptive Moving Average
NOTE: The KAMA function has an unstable period.
:param timeperiod:
:return:
"""
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.KAMA(close, timeperiod=timeperiod)
return real
def MA(equity, start=None, end=None, timeperiod=30, matype=0):
"""Moving average
:param timeperiod:
:param matype:
:return:
"""
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.MA(close, timeperiod=timeperiod, matype=matype)
return real
def MAMA(equity, start=None, end=None, fastlimit=0, slowlimit=0):
"""MESA Adaptive Moving Average
NOTE: The MAMA function has an unstable period.
:param fastlimit:
:param slowlimit:
:return:
"""
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
mama, fama = talib.MAMA(close, fastlimit=fastlimit, slowlimit=slowlimit)
return mama, fama
def MAVP(equity, periods, start=None, end=None, minperiod=2, maxperiod=30, matype=0):
"""Moving average with variable period
:param periods:
:param minperiod:
:param maxperiod:
:param matype:
:return:
"""
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.MAVP(close, periods, minperiod=minperiod, maxperiod=maxperiod, matype=matype)
return real
def MIDPOINT(equity, start=None, end=None, timeperiod=14):
"""MidPoint over period
:param timeperiod:
:return:
"""
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.MIDPOINT(close, timeperiod=timeperiod)
return real
def MIDPRICE(equity, start=None, end=None, timeperiod=14):
"""Midpoint Price over period
:param timeperiod:
:return:
"""
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
real = talib.MIDPRICE(high, low, timeperiod=timeperiod)
return real
def SAR(equity, start=None, end=None, acceleration=0, maximum=0):
"""Parabolic SAR
:param acceleration:
:param maximum:
:return:
"""
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
real = talib.SAR(high, low, acceleration=acceleration, maximum=maximum)
return real
def SAREXT(equity, start=None, end=None, startvalue=0, offsetonreverse=0, accelerationinitlong=0, accelerationlong=0,
accelerationmaxlong=0, accelerationinitshort=0, accelerationshort=0, accelerationmaxshort=0):
"""Parabolic SAR - Extended
:param startvalue:
:param offsetonreverse:
:param accelerationinitlong:
:param accelerationlong:
:param accelerationmaxlong:
:param accelerationinitshort:
:param accelerationshort:
:param accelerationmaxshort:
:return:
"""
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
real = talib.SAREXT(high, low, startvalue=startvalue, offsetonreverse=offsetonreverse, accelerationinitlong=accelerationinitlong,
accelerationlong=accelerationlong, accelerationmaxlong=accelerationmaxlong, accelerationinitshort=accelerationinitshort,
accelerationshort=accelerationshort, accelerationmaxshort=accelerationmaxshort)
return real
def SMA(equity, start=None, end=None, timeperiod=30):
"""Simple Moving Average
:param timeperiod:
:return:
"""
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.SMA(close, timeperiod=timeperiod)
return real
def T3(equity, start=None, end=None, timeperiod=5, vfactor=0):
"""Triple Exponential Moving Average (T3)
NOTE: The T3 function has an unstable period.
:param timeperiod:
:param vfactor:
:return:
"""
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.T3(close, timeperiod=timeperiod, vfactor=vfactor)
return real
def TEMA(equity, start=None, end=None, timeperiod=30):
"""Triple Exponential Moving Average
:param timeperiod:
:return:
"""
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.TEMA(close, timeperiod=timeperiod)
return real
def TRIMA(equity, start=None, end=None, timeperiod=30):
"""Triangular Moving Average
:param timeperiod:
:return:
"""
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.TRIMA(close, timeperiod=timeperiod)
return real
def WMA(equity, start=None, end=None, timeperiod=30):
"""Weighted Moving Average
:param timeperiod:
:return:
"""
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.WMA(close, timeperiod=timeperiod)
return real
# ========== TECH OVERLAP INDICATORS **END** ==========
# ========== TECH MOMENTUM INDICATORS **START** ==========
def ADX(equity, start=None, end=None, timeperiod=14):
"""Average Directional Movement Index
NOTE: The ADX function has an unstable period.
:param timeperiod:
:return:
"""
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.ADX(high, low, close, timeperiod=timeperiod)
return real
def ADXR(equity, start=None, end=None, timeperiod=14):
"""Average Directional Movement Index Rating
NOTE: The ADXR function has an unstable period.
:param timeperiod:
:return:
"""
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.ADXR(high, low, close, timeperiod=timeperiod)
return real
def APO(equity, start=None, end=None, fastperiod=12, slowperiod=26, matype=0):
"""Absolute Price Oscillator
:param fastperiod:
:param slowperiod:
:param matype:
:return:
"""
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.APO(close, fastperiod=fastperiod, slowperiod=slowperiod, matype=matype)
return real
def AROON(equity, start=None, end=None, timeperiod=14):
"""Aroon
:param timeperiod:
:return:
"""
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
aroondown, aroonup = talib.AROON(high, low, timeperiod=timeperiod)
return aroondown, aroonup
def AROONOSC(equity, start=None, end=None, timeperiod=14):
"""Aroon Oscillator
:param timeperiod:
:return:
"""
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
real = talib.AROONOSC(high, low, timeperiod=timeperiod)
return real
def BOP(equity, start=None, end=None):
"""Balance Of Power
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.BOP(opn, high, low, close)
return real
def CCI(equity, start=None, end=None, timeperiod=14):
"""Commodity Channel Index
:param timeperiod:
:return:
"""
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.CCI(high, low, close, timeperiod=timeperiod)
return real
def CMO(equity, start=None, end=None, timeperiod=14):
"""Chande Momentum Oscillator
NOTE: The CMO function has an unstable period.
:param timeperiod:
:return:
"""
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.CMO(close, timeperiod=timeperiod)
return real
def DX(equity, start=None, end=None, timeperiod=14):
"""Directional Movement Index
NOTE: The DX function has an unstable period.
:param timeperiod:
:return:
"""
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.DX(high, low, close, timeperiod=timeperiod)
return real
def MACD(equity, start=None, end=None, fastperiod=12, slowperiod=26, signalperiod=9):
"""Moving Average Convergence/Divergence
:param fastperiod:
:param slowperiod:
:param signalperiod:
:return:
"""
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
macd, macdsignal, macdhist = talib.MACD(close, fastperiod=fastperiod, slowperiod=slowperiod, signalperiod=signalperiod)
return macd, macdsignal, macdhist
def MACDEXT(equity, start=None, end=None, fastperiod=12, fastmatype=0, slowperiod=26, slowmatype=0, signalperiod=9, signalmatype=0):
"""MACD with controllable MA type
:param fastperiod:
:param fastmatype:
:param slowperiod:
:param slowmatype:
:param signalperiod:
:param signalmatype:
:return:
"""
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
macd, macdsignal, macdhist = talib.MACDEXT(close, fastperiod=12, fastmatype=0, slowperiod=26, slowmatype=0,
signalperiod=9, signalmatype=0)
return macd, macdsignal, macdhist
def MACDFIX(equity, start=None, end=None, signalperiod=9):
"""Moving Average Convergence/Divergence Fix 12/26
:param signalperiod:
:return:
"""
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
macd, macdsignal, macdhist = talib.MACDFIX(close, signalperiod=signalperiod)
return macd, macdsignal, macdhist
def MFI(equity, start=None, end=None, timeperiod=14):
"""Money Flow Index
NOTE: The MFI function has an unstable period.
:param timeperiod:
:return:
"""
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
volume = np.array(equity.hp.loc[start:end, 'volume'], dtype='f8')
real = talib.MFI(high, low, close, volume, timeperiod=timeperiod)
return real
def MINUS_DI(equity, start=None, end=None, timeperiod=14):
"""Minus Directional signal
NOTE: The MINUS_DI function has an unstable period.
:param timeperiod:
:return:
"""
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.MINUS_DI(high, low, close, timeperiod=timeperiod)
return real
def MINUS_DM(equity, start=None, end=None, timeperiod=14):
"""Minus Directional Movement
NOTE: The MINUS_DM function has an unstable period.
:param timeperiod:
:return:
"""
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
real = talib.MINUS_DM(high, low, timeperiod=timeperiod)
return real
def MOM(equity, start=None, end=None, timeperiod=10):
"""Momentum
:param timeperiod:
:return:
"""
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.MOM(close, timeperiod=timeperiod)
return real
def PLUS_DI(equity, start=None, end=None, timeperiod=14):
"""Plus Directional signal
NOTE: The PLUS_DI function has an unstable period.
:param timeperiod:
:return:
"""
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.PLUS_DI(high, low, close, timeperiod=timeperiod)
return real
def PLUS_DM(equity, start=None, end=None, timeperiod=14):
"""Plus Directional Movement
NOTE: The PLUS_DM function has an unstable period.
:param timeperiod:
:return:
"""
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
real = talib.PLUS_DM(high, low, timeperiod=timeperiod)
return real
def PPO(equity, start=None, end=None, fastperiod=12, slowperiod=26, matype=0):
"""Percentage Price Oscillator
:param fastperiod:
:param slowperiod:
:param matype:
:return:
"""
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.PPO(close, fastperiod=fastperiod, slowperiod=slowperiod, matype=matype)
return real
def ROC(equity, start=None, end=None, timeperiod=10):
"""Rate of change : ((price/prevPrice)-1)*100
:param timeperiod:
:return:
"""
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.ROC(close, timeperiod=timeperiod)
return real
def ROCP(equity, start=None, end=None, timeperiod=10):
"""Rate of change Percentage: (price-prevPrice)/prevPrice
:param timeperiod:
:return:
"""
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.ROCP(close, timeperiod=timeperiod)
return real
def ROCR(equity, start=None, end=None, timeperiod=10):
"""Rate of change ratio: (price/prevPrice)
:param timeperiod:
:return:
"""
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.ROCR(close, timeperiod=timeperiod)
return real
def ROCR100(equity, start=None, end=None, timeperiod=10):
"""Rate of change ratio 100 scale: (price/prevPrice)*100
:param timeperiod:
:return:
"""
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.ROCR100(close, timeperiod=timeperiod)
return real
def RSI(equity, start=None, end=None, timeperiod=14):
"""Relative Strength Index
NOTE: The RSI function has an unstable period.
:param timeperiod:
:return:
"""
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.RSI(close, timeperiod=timeperiod)
return real
def STOCH(equity, start=None, end=None, fastk_period=5, slowk_period=3, slowk_matype=0, slowd_period=3, slowd_matype=0):
"""Stochastic
:param fastk_period:
:param slowk_period:
:param slowk_matype:
:param slowd_period:
:param slowd_matype:
:return:
"""
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
slowk, slowd = talib.STOCH(high, low, close, fastk_period=fastk_period, slowk_period=slowk_period,
slowk_matype=slowk_matype, slowd_period=slowd_period, slowd_matype=slowd_matype)
return slowk, slowd
def STOCHF(equity, start=None, end=None, fastk_period=5, fastd_period=3, fastd_matype=0):
"""Stochastic Fast
:param fastk_period:
:param fastd_period:
:param fastd_matype:
:return:
"""
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
fastk, fastd = talib.STOCHF(high, low, close, fastk_period=fastk_period, fastd_period=fastd_period,
fastd_matype=fastd_matype)
return fastk, fastd
def STOCHRSI(equity, start=None, end=None, timeperiod=14, fastk_period=5, fastd_period=3, fastd_matype=0):
"""Stochastic Relative Strength Index
NOTE: The STOCHRSI function has an unstable period.
:param timeperiod:
:param fastk_period:
:param fastd_period:
:param fastd_matype:
:return:
"""
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
fastk, fastd = talib.STOCHRSI(close, timeperiod=timeperiod, fastk_period=fastk_period,
fastd_period=fastd_period, fastd_matype=fastd_matype)
return fastk, fastd
def TRIX(equity, start=None, end=None, timeperiod=30):
"""1-day Rate-Of-Change (ROC) of a Triple Smooth EMA
:param timeperiod:
:return:
"""
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.TRIX(close, timeperiod=timeperiod)
return real
def ULTOSC(equity, start=None, end=None, timeperiod1=7, timeperiod2=14, timeperiod3=28):
"""Ultimate Oscillator
:param timeperiod1:
:param timeperiod2:
:param timeperiod3:
:return:
"""
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.ULTOSC(high, low, close, timeperiod1=timeperiod1, timeperiod2=timeperiod2, timeperiod3=timeperiod3)
def WILLR(equity, start=None, end=None, timeperiod=14):
"""Williams' %R
:param timeperiod:
:return:
"""
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.WILLR(high, low, close, timeperiod=14)
return real
# ========== TECH MOMENTUM INDICATORS **END** ==========
# ========== PRICE TRANSFORM FUNCTIONS **START** ==========
def AVGPRICE(equity, start=None, end=None):
"""Average Price
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.AVGPRICE(opn, high, low, close)
return real
def MEDPRICE(equity, start=None, end=None):
"""Median Price
:return:
"""
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
real = talib.MEDPRICE(high, low)
return real
def TYPPRICE(equity, start=None, end=None):
"""Typical Price
:return:
"""
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.TYPPRICE(high, low, close)
return real
def WCLPRICE(equity, start=None, end=None):
"""Weighted Close Price
:return:
"""
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
real = talib.WCLPRICE(high, low, close)
return real
# ========== PRICE TRANSFORM FUNCTIONS **END** ==========
# ========== PATTERN RECOGNITION FUNCTIONS **START** ==========
def CDL2CROWS(equity, start=None, end=None):
"""Two Crows
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDL2CROWS(opn, high, low, close)
return integer
def CDL3BLACKCROWS(equity, start=None, end=None):
"""Three Black Crows
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDL3BLACKCROWS(opn, high, low, close)
return integer
def CDL3INSIDE(equity, start=None, end=None):
"""Three Inside Up/Down
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDL3INSIDE(opn, high, low, close)
return integer
def CDL3LINESTRIKE(equity, start=None, end=None):
"""Three-Line Strike
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDL3LINESTRIKE(opn, high, low, close)
return integer
def CDL3OUTSIDE(equity, start=None, end=None):
"""Three Outside Up/Down
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDL3OUTSIDE(opn, high, low, close)
return integer
def CDL3STARSINSOUTH(equity, start=None, end=None):
"""Three Stars In The South
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDL3STARSINSOUTH(opn, high, low, close)
return integer
def CDL3WHITESOLDIERS(equity, start=None, end=None):
"""Three Advancing White Soldiers
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDL3WHITESOLDIERS(opn, high, low, close)
return integer
def CDLABANDONEDBABY(equity, start=None, end=None, penetration=0):
"""Abandoned Baby
:param penetration:
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLABANDONEDBABY(opn, high, low, close, penetration=penetration)
return integer
def CDLADVANCEBLOCK(equity, start=None, end=None):
"""Advance Block
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLADVANCEBLOCK(opn, high, low, close)
return integer
def CDLBELTHOLD(equity, start=None, end=None):
"""Belt-hold
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLBELTHOLD(opn, high, low, close)
return integer
def CDLBREAKAWAY(equity, start=None, end=None):
"""Breakaway
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLBREAKAWAY(opn, high, low, close)
return integer
def CDLCLOSINGMARUBOZU(equity, start=None, end=None):
"""Closing Marubozu
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLCLOSINGMARUBOZU(opn, high, low, close)
return integer
def CDLCONCEALBABYSWALL(equity, start=None, end=None):
"""Concealing Baby Swallow
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLCONCEALBABYSWALL(opn, high, low, close)
return integer
def CDLCOUNTERATTACK(equity, start=None, end=None):
"""Counterattack
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLCOUNTERATTACK(opn, high, low, close)
return integer
def CDLDARKCLOUDCOVER(equity, start=None, end=None, penetration=0):
"""Dark Cloud Cover
:param penetration:
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLDARKCLOUDCOVER(opn, high, low, close, penetration=penetration)
return integer
def CDLDOJI(equity, start=None, end=None):
"""Doji
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLDOJI(opn, high, low, close)
return integer
def CDLDOJISTAR(equity, start=None, end=None):
"""Doji Star
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLDOJISTAR(opn, high, low, close)
return integer
def CDLDRAGONFLYDOJI(equity, start=None, end=None):
"""Dragonfly Doji
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLDRAGONFLYDOJI(opn, high, low, close)
return integer
def CDLENGULFING(equity, start=None, end=None):
"""Engulfing Pattern
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLENGULFING(opn, high, low, close)
return integer
def CDLEVENINGDOJISTAR(equity, start=None, end=None, penetration=0):
"""Evening Doji Star
:param penetration:
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLEVENINGDOJISTAR(opn, high, low, close, penetration=penetration)
return integer
def CDLEVENINGSTAR(equity, start=None, end=None, penetration=0):
"""Evening Star
:param penetration:
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLEVENINGSTAR(opn, high, low, close, penetration=penetration)
return integer
def CDLGAPSIDESIDEWHITE(equity, start=None, end=None):
"""Up/Down-gap side-by-side white lines
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLGAPSIDESIDEWHITE(opn, high, low, close)
return integer
def CDLGRAVESTONEDOJI(equity, start=None, end=None):
"""Gravestone Doji
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLGRAVESTONEDOJI(opn, high, low, close)
return integer
def CDLHAMMER(equity, start=None, end=None):
"""Hammer
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLHAMMER(opn, high, low, close)
return integer
def CDLHANGINGMAN(equity, start=None, end=None):
"""Hanging Man
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLHANGINGMAN(opn, high, low, close)
return integer
def CDLHARAMI(equity, start=None, end=None):
"""Harami Pattern
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8')
integer = talib.CDLHARAMI(opn, high, low, close)
return integer
def CDLHARAMICROSS(equity, start=None, end=None):
"""Harami Cross Pattern
:return:
"""
opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8')
high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8')
low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8')
close = | np.array(equity.hp.loc[start:end, 'close'], dtype='f8') | numpy.array |
import sys
import os
import unittest
import numpy as np
import numpy.testing as npt
from oct2py import octave
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'build', 'debug'))
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'build', 'debug', 'swig'))
import yafft
class Test2PointFFT(unittest.TestCase):
def setUp(self):
octave.restart()
octave.addpath('./octave')
octave.addpath('./test/octave')
octave.eval('pkg load signal') # load signal package
# Test data
self.in1 = np.array([1, 0], dtype=np.complex64)
self.out1 = np.array([1, 1], dtype=np.complex64)
self.in2 = np.array([0, 1], dtype=np.complex64)
self.out2 = np.array([1, -1], dtype=np.complex64)
def tearDown(self):
octave.exit()
def test_octave_2point_fft(self):
res = octave.my_fft(self.in1)
res = np.squeeze(res)
npt.assert_almost_equal(res, self.out1)
res = octave.my_fft(self.in2)
res = np.squeeze(res)
npt.assert_almost_equal(res, self.out2)
def test_dit_2point_fft(self):
data = self.in1
yafft.fft_radix2(data, yafft.DECIMATION_IN_TIME)
npt.assert_almost_equal(data, self.out1)
data = self.in2
yafft.fft_radix2(data, yafft.DECIMATION_IN_TIME)
npt.assert_almost_equal(data, self.out2)
def test_dif_2point_fft(self):
data = self.in1
yafft.fft_radix2(data, yafft.DECIMATION_IN_FREQUENCY)
npt.assert_almost_equal(data, self.out1)
data = self.in2
yafft.fft_radix2(data, yafft.DECIMATION_IN_FREQUENCY)
npt.assert_almost_equal(data, self.out2)
class Test4PointFFT(unittest.TestCase):
def setUp(self):
octave.restart()
octave.addpath('./octave')
octave.addpath('./test/octave')
octave.eval('pkg load signal') # load signal package
# Test data
self.in1 = np.array([1, 0, 0, 0], dtype=np.complex64)
self.out1 = np.array([1, 1, 1, 1], dtype=np.complex64)
self.in2 = np.array([0, 1, 0, 0], dtype=np.complex64)
self.out2 = np.array([1, -1j, -1, 1j], dtype=np.complex64)
self.in3 = np.array([0, 0, 1, 0], dtype=np.complex64)
self.out3 = | np.array([1, -1, 1, -1], dtype=np.complex64) | numpy.array |
# -*- coding: utf-8 -*-
"""
Author: <NAME> / <NAME>
This program was developed in the scope of WESE H2020 project
The FEMM model here developed is used to estimate the EMF's surrounding a
3-phase submarine power cable.
For more information on the femm functions used, please check the manual
available here -> https://www.femm.info/wiki/pyFEMM
"""
import os
import numpy as np
import scipy as sp
import femm
import matplotlib.pyplot as plt
import matplotlib.ticker as mticker
from matplotlib.ticker import FormatStrFormatter
dir = os.getcwd()
#==============================================================================
# Subsea Cable Parameters
#==============================================================================
# User Inputs -----------------------------------------------------------------
A_cond = 50 # Conductor cross sectional area (mm^2)
cond_screen = 0.2 # Conductor screen thickness (mm)
insulation = 3.4 # Conductor insulation thickness (mm)
insul_screen_NM = 3.0 # Conductor insulation screen - non metallic thickness (mm)
insul_screen_ME = 0.2 # Conductor insulation screen (sheath) - metallic thickness (mm)
bedding = 0.2 # Bedding thickness (mm)
r_armour = 2.00 # Armour radius (mm)
r_armour_2 = 0 # 2nd Armour layer radius (mm) -> set 0 if non-existant
over_sheath = 2.0 # Over Sheath thickness (mm)
AMPS_RMS = 0.057 # RMS Phase Current (A)
AMPS = sp.sqrt(2) * AMPS_RMS #Peak Phase Current (A)
freq = 50 #Hz grid frequency
burial_depth = 2000 # Burial depth [mm] -> set 0 if surface laid
# Support Variables -----------------------------------------------------------
r_cond = sp.sqrt( (A_cond/sp.pi) ) # Conductor radius (mm)
r_cond_screen = r_cond + cond_screen # Conductor screen radius
r_cond_insul = r_cond + cond_screen + insulation # Conductor insulation radius
r_cond_insu_screen_NM = r_cond + cond_screen + insulation + insul_screen_NM # Conductor insul. screen radius (non metallic)
r_cond_total = r_cond + cond_screen + insulation + insul_screen_NM + insul_screen_ME # Total conductor radius
dist1 = 0.01 # Distance to create a small gap between the conductors and the bedding (mm)
dist2 = 0.01 # Distance to create a small gap between the subsea cable and the core (mm)
cond_surf = 0.2 #distance between conductor surfaces
cond_center = 2*r_cond_total + cond_surf # distance between conductor centers
#==============================================================================
# Environment and Cable component characteristics
#==============================================================================
# copper conductors
copper_conductivity = 58000000 # [S/m]
copper_permeability = 1.0 # relative permeability [unitless]
# XLPE insulation
XLPE_conductivity = 0.0 # [S/m]
XLPE_permeability = 1.0 # relative permeability [unitless]
# Non metallic Insulation Screen (e.g. semi-conductive conductor screen, insulation screen,...)
NM_screen_conductivity = 1.0 # [S/m]
NM_screen_permeability = 1.0 # relative permeability [unitless]
# Metallic Insulation screen (e.g. lead -> check cable material)
M_screen_conductivity = 5000000 # [S/m]
M_screen_permeability = 1.0 # relative permeability [unitless]
# Inner Sheath (e.g. PVC)
inner_conductivity = 0.0 # [S/m]
inner_permeability = 1.0 # relative permeability [unitless]
# Bedding (e.g. PVC)
bedding_conductivity = 0.0 # [S/m]
bedding_permeability = 1.0 # relative permeability [unitless]
# Armour (e.g. Galvanised steel wire)
armour_conductivity = 1100000 # [S/m]
armour_permeability = 300 # relative permeability [unitless]
# Over Sheath
over_conductivity = 0.0 # [S/m]
over_permeability = 1.0 # relative permeability [unitless]
# Seawater
seawater_conductivity = 5.0 # [S/m]
seawater_permeability = 1.0 # relative permeability [unitless]
# Seabed (sand)
seabed_conductivity = 1.0 # [S/m]
seabed_permeability = 1.0 # relative permeability [unitless]
#==============================================================================
## Starting FEMM and creating a new FEMM-document
#==============================================================================
# Starting FEMM
femm.openfemm() # opens FEMM app, add (1) to hide the main window
# Creating new magnostatic problem
femm.newdocument(0) # Specify doctype to be 0 for a magnetics problem,
# 1 for an electrostatics problem,
# 2 for a heat flow problem,
# 3 for a current flow problem.
# Problem definition: Frequency, units, problemtype, precision, depth,
# minangle(Mesh), solver type: 0 for successive appr., 1 for Newton
femm.mi_probdef(freq, 'millimeters', 'planar', 1.e-8, 5000, 10, 0)
# Set display area ------------------------------------------------------------
# mi_zoom(x1,y1,x2,y2) sets the display area to be from the bottom left corner
# specified by (x1,y1) to the top right corner specified by (x2,y2).
femm.mi_zoom(-400, -400, 400, 400)
# Component Materials
femm.mi_addmaterial('copper_conductors', copper_permeability, copper_permeability,
0, 0, seawater_conductivity*10**-6, 0, 0, 0, 0, 0, 0, 0, 0)
femm.mi_addmaterial('XLPE', XLPE_permeability, XLPE_permeability, 0, 0,
XLPE_conductivity*10**-6, 0, 0, 0, 0, 0, 0, 0, 0)
femm.mi_addmaterial('M_screen', M_screen_permeability, M_screen_permeability, 0, 0,
M_screen_conductivity*10**-6, 0, 0, 0, 0, 0, 0, 0, 0)
femm.mi_addmaterial('NM_screen', NM_screen_permeability, NM_screen_permeability,
0, 0, NM_screen_conductivity*10**-6, 0, 0, 0, 0, 0, 0, 0, 0)
femm.mi_addmaterial('inner', inner_permeability, inner_permeability, 0, 0,
inner_conductivity*10**-6, 0, 0, 0, 0, 0, 0, 0, 0)
femm.mi_addmaterial('bedding', bedding_permeability, bedding_permeability, 0, 0,
bedding_conductivity*10**-6, 0, 0, 0, 0, 0, 0, 0, 0)
femm.mi_addmaterial('armour', armour_permeability, armour_permeability, 0, 0,
armour_conductivity*10**-6, 0, 0, 0, 0, 0, 0, 0, 0)
femm.mi_addmaterial('over', over_permeability, over_permeability, 0, 0,
over_conductivity*10**-6, 0, 0, 0, 0, 0, 0, 0, 0)
femm.mi_addmaterial('seawater', seawater_permeability, seawater_permeability, 0, 0,
seawater_conductivity*10**-6, 0, 0, 0, 0, 0, 0, 0, 0)
femm.mi_addmaterial('seabed', seabed_permeability, seabed_permeability, 0, 0,
seabed_conductivity*10**-6, 0, 0, 0, 0, 0, 0, 0, 0)
# General
GridSize = 0.3 * r_cond_total
# Geometry: Subsea Cable
# Apothem of an equilateral triangle
delta_y_triangle = (1 / 6) * sp.sqrt(3) * cond_center
#==============================================================================
# Draw Environment Geometry
#==============================================================================
#Process:
# 1- Draw the lines/arcs segments
# 2- Add a "Block Label" marker into each enclosed section to define material
# properties and mesh size
#Constant to define the Environment
limit_fine = 400 # in [mm]
limit_coarse = 3500 # in [mm]
# Draw a square to use as the outer boundary for the problem
femm.mi_addblocklabel(limit_coarse*4/5, limit_coarse*4/5)
femm.mi_drawline(limit_coarse, limit_coarse, limit_coarse, -limit_coarse)
femm.mi_drawline(limit_coarse, limit_coarse, -limit_coarse, limit_coarse)
femm.mi_drawline(-limit_coarse, -limit_coarse, limit_coarse, -limit_coarse)
femm.mi_drawline(-limit_coarse, -limit_coarse, -limit_coarse, limit_coarse)
# Draw a secound square to use a finer mesh close to the cable
femm.mi_addblocklabel(limit_fine*4/5, limit_fine*4/5)
femm.mi_drawline(limit_fine, limit_fine, limit_fine, -limit_fine)
femm.mi_drawline(limit_fine, limit_fine, -limit_fine, limit_fine)
femm.mi_drawline(-limit_fine, -limit_fine, limit_fine, -limit_fine)
femm.mi_drawline(-limit_fine, -limit_fine, -limit_fine, limit_fine)
################
# Seabed
################
if burial_depth!=0 and burial_depth>limit_fine:
# Draw a square to use as the outer boundary for the problem
femm.mi_addblocklabel(limit_coarse*4/5, -limit_coarse*4/5)
femm.mi_drawline(-limit_coarse, burial_depth, limit_coarse, burial_depth)
################
#Conductor 1
################
#Constants
cos60 = sp.cos(np.radians(60))
sin60 = sp.sin(np.radians(60))
# Draw a circle corresponding to 'Conductor 1'
femm.mi_addblocklabel(cond_center*cos60, -delta_y_triangle),
femm.mi_drawarc(cond_center*cos60-r_cond, -delta_y_triangle, cond_center*cos60+r_cond, -delta_y_triangle, 180, 3) # (x1, y1 , x2, y2, angle, maxseg)
femm.mi_drawarc(cond_center*cos60+r_cond, -delta_y_triangle, cond_center*cos60-r_cond, -delta_y_triangle, 180, 3) # (x1, y1 , x2, y2, angle, maxseg)
# Draw a circle corresponding to the 'Conductor Screen 1',
femm.mi_addblocklabel(cond_center*cos60+r_cond_screen-0.5*cond_screen, -delta_y_triangle),
femm.mi_drawarc(cond_center*cos60-r_cond_screen, -delta_y_triangle, cond_center*cos60+r_cond_screen, -delta_y_triangle, 180, 3) # (x1, y1 , x2, y2, angle, maxseg)
femm.mi_drawarc(cond_center*cos60+r_cond_screen, -delta_y_triangle, cond_center*cos60-r_cond_screen, -delta_y_triangle, 180, 3) # (x1, y1 , x2, y2, angle, maxseg)
# Draw a circle corresponding to 'Insulation 1'
femm.mi_addblocklabel(cond_center*cos60+r_cond_insul-0.5*insulation, -delta_y_triangle),
femm.mi_drawarc(cond_center*cos60-r_cond_insul, -delta_y_triangle, cond_center*cos60+r_cond_insul, -delta_y_triangle, 180, 3) # (x1, y1 , x2, y2, angle, maxseg)
femm.mi_drawarc(cond_center*cos60+r_cond_insul, -delta_y_triangle, cond_center*cos60-r_cond_insul, -delta_y_triangle, 180, 3) # (x1, y1 , x2, y2, angle, maxseg)
# Draw a circle corresponding to 'Insulation Screen - Non metallic 1'
femm.mi_addblocklabel(cond_center*cos60+r_cond_insu_screen_NM-0.5*insul_screen_NM, -delta_y_triangle),
femm.mi_drawarc(cond_center*cos60-r_cond_insu_screen_NM, -delta_y_triangle, cond_center*cos60+r_cond_insu_screen_NM, -delta_y_triangle, 180, 3) # (x1, y1 , x2, y2, angle, maxseg)
femm.mi_drawarc(cond_center*cos60+r_cond_insu_screen_NM, -delta_y_triangle, cond_center*cos60-r_cond_insu_screen_NM, -delta_y_triangle, 180, 3) # (x1, y1 , x2, y2, angle, maxseg)
# Draw a circle corresponding to the 'Insulation Screen - Metallic 1',
femm.mi_addblocklabel(cond_center*cos60+r_cond_total-0.5*insul_screen_ME, -delta_y_triangle),
femm.mi_drawarc(cond_center*cos60-r_cond_total, -delta_y_triangle, cond_center*cos60+r_cond_total, -delta_y_triangle, 180, 3) # (x1, y1 , x2, y2, angle, maxseg)
femm.mi_drawarc(cond_center*cos60+r_cond_total, -delta_y_triangle, cond_center*cos60-r_cond_total, -delta_y_triangle, 180, 3) # (x1, y1 , x2, y2, angle, maxseg)
################
#Conductor 2
################
# Draw a circle corresponding to 'Conductor 2'
femm.mi_addblocklabel(0, cond_center*sin60-delta_y_triangle),
femm.mi_drawarc(0, cond_center*sin60-r_cond-delta_y_triangle, 0, cond_center*sin60+r_cond-delta_y_triangle, 180, 3) # (x1, y1 , x2, y2, angle, maxseg)
femm.mi_drawarc(0, cond_center*sin60+r_cond-delta_y_triangle, 0, cond_center*sin60-r_cond-delta_y_triangle, 180, 3) # (x1, y1 , x2, y2, angle, maxseg)
# Draw a circle corresponding to the 'Conductor Screen 2',
femm.mi_addblocklabel(0+r_cond_screen-0.5*cond_screen, cond_center*sin60-delta_y_triangle),
femm.mi_drawarc(0, cond_center*sin60-r_cond_screen-delta_y_triangle, 0, cond_center*sin60+r_cond_screen-delta_y_triangle, 180, 3) # (x1, y1 , x2, y2, angle, maxseg)
femm.mi_drawarc(0, cond_center*sin60+r_cond_screen-delta_y_triangle, 0, cond_center*sin60-r_cond_screen-delta_y_triangle, 180, 3) # (x1, y1 , x2, y2, angle, maxseg)
# Draw a circle corresponding to 'Insulation 2'
femm.mi_addblocklabel(0+r_cond_insul-0.5*insulation, cond_center*sin60-delta_y_triangle),
femm.mi_drawarc(0, cond_center*sin60-r_cond_insul-delta_y_triangle, 0, cond_center*sin60+r_cond_insul-delta_y_triangle, 180, 3) # (x1, y1 , x2, y2, angle, maxseg)
femm.mi_drawarc(0, cond_center*sin60+r_cond_insul-delta_y_triangle, 0, cond_center*sin60-r_cond_insul-delta_y_triangle, 180, 3) # (x1, y1 , x2, y2, angle, maxseg)
# Draw a circle corresponding to 'Insulation Screen - Non metallic 2'
femm.mi_addblocklabel(0+r_cond_insu_screen_NM-0.5*insul_screen_NM, cond_center*sin60-delta_y_triangle),
femm.mi_drawarc(0, cond_center*sin60-r_cond_insu_screen_NM-delta_y_triangle, 0, cond_center*sin60+r_cond_insu_screen_NM-delta_y_triangle, 180, 3) # (x1, y1 , x2, y2, angle, maxseg)
femm.mi_drawarc(0, cond_center*sin60+r_cond_insu_screen_NM-delta_y_triangle, 0, cond_center*sin60-r_cond_insu_screen_NM-delta_y_triangle, 180, 3)
# Draw a circle corresponding to 'Insulation Screen - Metallic 2',
femm.mi_addblocklabel(0+r_cond_total-0.5*insul_screen_ME, cond_center*sin60-delta_y_triangle),
femm.mi_drawarc(0, cond_center*sin60-r_cond_total-delta_y_triangle, 0, cond_center*sin60+r_cond_total-delta_y_triangle, 180, 3) # (x1, y1 , x2, y2, angle, maxseg)
femm.mi_drawarc(0, cond_center*sin60+r_cond_total-delta_y_triangle, 0, cond_center*sin60-r_cond_total-delta_y_triangle, 180, 3)
################
#Conductor 3
################
# Draw a circle corresponding to 'Conductor 3'
femm.mi_addblocklabel(-cond_center*cos60, -delta_y_triangle),
femm.mi_drawarc(-cond_center*cos60-r_cond, 0-delta_y_triangle, -cond_center*cos60+r_cond, 0-delta_y_triangle, 180, 3) # (x1, y1 , x2, y2, angle, maxseg)
femm.mi_drawarc(-cond_center*cos60+r_cond, 0-delta_y_triangle, -cond_center*cos60-r_cond, 0-delta_y_triangle, 180, 3)
# Draw a circle corresponding to the 'Conductor Screen 3',
femm.mi_addblocklabel(-cond_center*cos60+r_cond_screen-0.5*cond_screen, -delta_y_triangle)
femm.mi_drawarc(-cond_center*cos60-r_cond_screen, 0-delta_y_triangle, -cond_center*cos60+r_cond_screen, 0-delta_y_triangle, 180, 3) # (x1, y1 , x2, y2, angle, maxseg)
femm.mi_drawarc(-cond_center*cos60+r_cond_screen, 0-delta_y_triangle, -cond_center*cos60-r_cond_screen, 0-delta_y_triangle, 180, 3)
# Draw a circle corresponding to 'Insulation 3'
femm.mi_addblocklabel(-cond_center*cos60+r_cond_insul-0.5*insulation, -delta_y_triangle)
femm.mi_drawarc(-cond_center*cos60-r_cond_insul, 0-delta_y_triangle, -cond_center*cos60+r_cond_insul, 0-delta_y_triangle, 180, 3) # (x1, y1 , x2, y2, angle, maxseg)
femm.mi_drawarc(-cond_center*cos60+r_cond_insul, 0-delta_y_triangle, -cond_center*cos60-r_cond_insul, 0-delta_y_triangle, 180, 3)
# Draw a circle corresponding to 'Insulation Screen - Non metallic 3'
femm.mi_addblocklabel(-cond_center*cos60+r_cond_insu_screen_NM-0.5*insul_screen_NM, -delta_y_triangle)
femm.mi_drawarc(-cond_center*cos60-r_cond_insu_screen_NM, 0-delta_y_triangle, -cond_center*cos60+r_cond_insu_screen_NM, 0-delta_y_triangle, 180, 3) # (x1, y1 , x2, y2, angle, maxseg)
femm.mi_drawarc(-cond_center*cos60+r_cond_insu_screen_NM, 0-delta_y_triangle, -cond_center*cos60-r_cond_insu_screen_NM, 0-delta_y_triangle, 180, 3)
# Draw a circle corresponding to 'Insulation Screen - Metallic 3'
femm.mi_addblocklabel(-cond_center*cos60+r_cond_total-0.5*insul_screen_ME, -delta_y_triangle),
femm.mi_drawarc(-cond_center*cos60-r_cond_total, 0-delta_y_triangle, -cond_center*cos60+r_cond_total, 0-delta_y_triangle, 180, 3) # (x1, y1 , x2, y2, angle, maxseg)
femm.mi_drawarc(-cond_center*cos60+r_cond_total, 0-delta_y_triangle, -cond_center*cos60-r_cond_total, 0-delta_y_triangle, 180, 3)
################
#Laying Up / Filling
################
femm.mi_addblocklabel(0,0),
################
#Bedding
################
# Draw a circle corresponding to the 'Bedding' - inner radius
femm.mi_addblocklabel(0,cond_center*sin60+r_cond_total-delta_y_triangle+0.5*bedding),
femm.mi_drawarc(0, cond_center*sin60+r_cond_total-delta_y_triangle+dist1, 0, -cond_center*sin60-r_cond_total+delta_y_triangle-dist1, 180, 3), # (x1, y1 , x2, y2, angle, maxseg)
femm.mi_drawarc(0, -cond_center*sin60-r_cond_total+delta_y_triangle-dist1, 0, cond_center*sin60+r_cond_total-delta_y_triangle+dist1, 180, 3),
# Draw a circle corresponding to the 'Bedding' - outer radius
femm.mi_drawarc(0, cond_center*sin60+r_cond_total-delta_y_triangle+bedding, 0, -cond_center*sin60-r_cond_total+delta_y_triangle-bedding, 180, 3), # (x1, y1 , x2, y2, angle, maxseg)
femm.mi_drawarc(0, -cond_center*sin60-r_cond_total+delta_y_triangle-bedding, 0, cond_center*sin60+r_cond_total-delta_y_triangle+bedding, 180, 3),
################
# Armour - 1st Layer
################
#Calculate the number of shielding elements in the cable
space = 0.05 #distance between armour elements
R_armour = (cond_center * sin60 - delta_y_triangle) + r_cond_total + bedding + r_armour
Armour_perimeter = 2 * sp.pi * (R_armour)
NbrOfArmourElem = int( sp.floor( Armour_perimeter/(2*r_armour + space)) )
theta = np.radians(360/NbrOfArmourElem)
# Draw a circle cooresponding to the 1st shielding layer;
for i in range(1,NbrOfArmourElem+1):
femm.mi_addblocklabel(R_armour*sp.sin((i-1)*theta) , R_armour*sp.cos((i-1)*theta) )
femm.mi_drawarc((R_armour-r_armour+dist1)*sp.sin((i-1)*theta), (R_armour-r_armour+dist1)*sp.cos((i-1)*theta), (R_armour+r_armour-dist1)*sp.sin((i-1)*theta), (R_armour+r_armour-dist1)*sp.cos((i-1)*theta), 180, 3) # (x1, y1 , x2, y2, angle, maxseg)
femm.mi_drawarc((R_armour+r_armour-dist1)*sp.sin((i-1)*theta), (R_armour+r_armour-dist1)*sp.cos((i-1)*theta), (R_armour-r_armour+dist1)*sp.sin((i-1)*theta), (R_armour-r_armour+dist1)*sp.cos((i-1)*theta), 180, 3) # (x1, y1 , x2, y2, angle, maxseg)
################
# Area between armouring
################
femm.mi_addblocklabel((R_armour-0.4*r_armour)*sp.sin((1-0.5)*theta), (R_armour-0.4*r_armour)*sp.cos((1-0.5)*theta));
if r_armour_2!=0: # check if cable has a 2nd armour layer
################
# Armour - 2nd Layer
################
#Calculate the number of shielding elements in the cable
space = 0.05 #distance between armour elements
R_armour_2 = R_armour + r_armour + space + r_armour_2
Armour_perimeter_2 = 2 * sp.pi * (R_armour_2)
NbrOfArmourElem2 = int( sp.floor( Armour_perimeter_2/(2*r_armour_2 + space)) )
theta_2 = np.radians(360/NbrOfArmourElem2)
# Draw a circle cooresponding to the 2nd shielding layer;
for i in range(1,NbrOfArmourElem2+1):
femm.mi_addblocklabel(R_armour_2*sp.sin((i-1)*theta_2) , R_armour_2*sp.cos((i-1)*theta_2) )
femm.mi_drawarc((R_armour_2-r_armour_2+dist1)*sp.sin((i-1)*theta_2), (R_armour_2-r_armour_2+dist1)*sp.cos((i-1)*theta_2), (R_armour_2+r_armour_2-dist1)*sp.sin((i-1)*theta_2), (R_armour_2+r_armour_2-dist1)*sp.cos((i-1)*theta_2), 180, 3) # (x1, y1 , x2, y2, angle, maxseg)
femm.mi_drawarc((R_armour_2+r_armour_2-dist1)*sp.sin((i-1)*theta_2), (R_armour_2+r_armour_2-dist1)*sp.cos((i-1)*theta_2), (R_armour_2-r_armour_2+dist1)*sp.sin((i-1)*theta_2), (R_armour_2-r_armour_2+dist1)*sp.cos((i-1)*theta_2), 180, 3) # (x1, y1 , x2, y2, angle, maxseg)
################
#Over Sheath
################
# Draw a circle corresponding to the 'Over Sheath' - inner radius
femm.mi_addblocklabel(0,R_armour_2+r_armour_2+dist1+0.5*over_sheath)
femm.mi_drawarc(0, R_armour_2+r_armour_2+dist1, 0, -R_armour_2-r_armour_2-dist1, 180, 2) # ([x1 y1 ; x2, y2], angle, maxseg)
femm.mi_drawarc(0, -R_armour_2-r_armour_2-dist1, 0, R_armour_2+r_armour_2+dist1, 180, 2)
# Draw a circle corresponding to the 'Over Sheath' - outer radius
femm.mi_drawarc(0, R_armour_2+r_armour_2+over_sheath+2*dist1, 0, -R_armour_2-r_armour_2-over_sheath-2*dist1, 180, 2) # ([x1 y1 ; x2, y2], angle, maxseg)
femm.mi_drawarc(0, -R_armour_2-r_armour_2-over_sheath-2*dist1, 0, R_armour_2+r_armour_2+over_sheath+2*dist1, 180, 2);
#Overall Cable Radius
R_cable_total = R_armour_2 + r_armour_2 + over_sheath + dist1
#Display
print('Overall Cable Diameter :',2*R_cable_total)
else:
################
#Over Sheath
################
# Draw a circle corresponding to the 'Over Sheath' - inner radius
femm.mi_addblocklabel(0,R_armour+r_armour+dist1+0.5*over_sheath)
femm.mi_drawarc(0, R_armour+r_armour+dist1, 0, -R_armour-r_armour-dist1, 180, 2) # ([x1 y1 ; x2, y2], angle, maxseg)
femm.mi_drawarc(0, -R_armour-r_armour-dist1, 0, R_armour+r_armour+dist1, 180, 2)
# Draw a circle corresponding to the 'Over Sheath' - outer radius
femm.mi_drawarc(0, R_armour+r_armour+over_sheath+2*dist1, 0, -R_armour-r_armour-over_sheath-2*dist1, 180, 2) # ([x1 y1 ; x2, y2], angle, maxseg)
femm.mi_drawarc(0, -R_armour-r_armour-over_sheath-2*dist1, 0, R_armour+r_armour+over_sheath+2*dist1, 180, 2);
#Overall Cable Radius
R_cable_total = R_armour + r_armour + over_sheath + dist1
#Display
print('Overall Cable Diameter :',2*R_cable_total)
#Display
print('Objects geometries defined')
#==============================================================================
# Define Object Properties
#==============================================================================
# Note: Because the function "femm.mi_clearselected" doesn't seem to be working
# to deselect the labels, the most simple way found to achieve this was to
# double selected using a for loop, odd but effective.
################
# Environment (rectangles):
################
# Apply the materials to the appropriate block labels
for i in range(2):
femm.mi_selectlabel(limit_coarse*4/5, limit_coarse*4/5) #outer rectangle
femm.mi_setblockprop('seawater', 0, 10*GridSize, '<None>', 0, 0, 0)
if burial_depth!=0 and burial_depth>limit_fine:
for i in range(2):
femm.mi_selectlabel(limit_fine*4/5, limit_fine*4/5) #inner rectangle
femm.mi_setblockprop('seabed', 0, 6*GridSize, '<None>', 0, 0, 0)
for i in range(2):
femm.mi_selectlabel(limit_coarse*4/5, -limit_coarse*4/5)
femm.mi_setblockprop('seabed', 0, 10*GridSize, '<None>', 0, 0, 0)
else:
for i in range(2):
femm.mi_selectlabel(limit_fine*4/5, limit_fine*4/5) #inner rectangle
femm.mi_setblockprop('seawater', 0, 6*GridSize, '<None>', 0, 0, 0)
################
#Armour - 1st Layer
################
# Apply the materials to the appropriate block labels
for i in range(1,2*NbrOfArmourElem+1):
#armour
femm.mi_selectlabel(R_armour*sp.sin((i-1)*theta), R_armour*sp.cos((i-1)*theta))
femm.mi_setblockprop('armour', 0, 0.5*GridSize, '<None>', 0, 0, 0)
femm.mi_clearselected
################
# Area between armouring
################
for i in range(2):
femm.mi_selectlabel((R_armour-0.4*r_armour)*sp.sin((1-0.5)*theta), (R_armour-0.4*r_armour)*sp.cos((1-0.5)*theta))
femm.mi_setblockprop('seawater', 0, GridSize, '<None>', 0, 0, 0)
if r_armour_2!=0: # check if cable has a 2nd armour layer
################
#Armour - 2nd Layer
################
# Apply the materials to the appropriate block labels
for i in range(1,2*NbrOfArmourElem2+1):
#armour
femm.mi_selectlabel(R_armour_2*sp.sin((i-1)*theta_2), R_armour_2*sp.cos((i-1)*theta_2))
femm.mi_setblockprop('armour', 0, 0.5*GridSize, '<None>', 0, 0, 0)
femm.mi_clearselected
################
#Over Sheath:
################
# Apply the materials to the appropriate block labels
for i in range(2):
femm.mi_selectlabel(0, R_armour_2+r_armour_2+dist1+0.5*over_sheath)
femm.mi_setblockprop('over', 0, GridSize, '<None>', 0, 0, 0)
else:
################
#Over Sheath:
################
# Apply the materials to the appropriate block labels
for i in range(2):
femm.mi_selectlabel(0, R_armour+r_armour+dist1+0.5*over_sheath)
femm.mi_setblockprop('over', 0, GridSize, '<None>', 0, 0, 0)
################
#Bedding
################
# Apply the materials to the appropriate block labels
for i in range(2):
femm.mi_selectlabel(0,cond_center*sin60+r_cond_total-delta_y_triangle+0.5*bedding)
femm.mi_setblockprop('bedding', 0, GridSize, '<None>', 0, 0, 0)
################
#Laying Up / Filling
################
for i in range(2):
femm.mi_selectlabel(0, 0)
femm.mi_setblockprop('bedding', 0, GridSize, '<None>', 0, 0, 0)
################
#Conductors
################
for i in range(2):
# Metallic Insulation Screen (e.g. lead)
femm.mi_selectlabel(cond_center*cos60+r_cond_total-0.5*insul_screen_ME, -delta_y_triangle)
# Sheath - Metallic 2
femm.mi_selectlabel(0+r_cond_total-0.5*insul_screen_ME, cond_center*sin60-delta_y_triangle)
# Sheath - Metallic 3
femm.mi_selectlabel(-cond_center*cos60+r_cond_total-0.5*insul_screen_ME, -delta_y_triangle)
# Apply the materials to the appropriate block labels
femm.mi_setblockprop('M_screen', 0, GridSize, '<None>', 0, 0, 0)
for i in range(2):
# Non Metallic Insulation Screen (semi-conductive tape)
femm.mi_selectlabel(cond_center*cos60+r_cond_insu_screen_NM-0.5*insul_screen_NM, -delta_y_triangle)
# Insulation Screen - Non-Metallic 2
femm.mi_selectlabel(0+r_cond_insu_screen_NM-0.5*insul_screen_NM, cond_center*sin60-delta_y_triangle)
# Insulation Screen - Non-Metallic 3
femm.mi_selectlabel(-cond_center*cos60+r_cond_insu_screen_NM-0.5*insul_screen_NM, -delta_y_triangle)
# Apply the materials to the appropriate block labels
femm.mi_setblockprop('NM_screen', 0, GridSize, '<None>', 0, 0, 0)
for i in range(2):
# Insulation 1
femm.mi_selectlabel(cond_center*cos60+r_cond_insul-0.5*insulation, -delta_y_triangle)
# Insulation 2
femm.mi_selectlabel(0+r_cond_insul-0.5*insulation, cond_center*sin60-delta_y_triangle)
# Insulation 3
femm.mi_selectlabel(-cond_center*cos60+r_cond_insul-0.5*insulation, -delta_y_triangle)
# Apply the materials to the appropriate block labels
femm.mi_setblockprop('XLPE', 0, GridSize, '<None>', 0, 0, 0)
for i in range(2):
# Conductor Screen 1
femm.mi_selectlabel(cond_center*cos60+r_cond_screen-0.5*cond_screen, -delta_y_triangle)
# Conductor Screen 2
femm.mi_selectlabel(0+r_cond_screen-0.5*cond_screen, cond_center*sin60-delta_y_triangle)
# Conductor Screen 3
femm.mi_selectlabel(-cond_center*cos60+r_cond_screen-0.5*cond_screen, -delta_y_triangle)
# Apply the materials to the appropriate block labels
femm.mi_setblockprop('NM_screen', 0, GridSize, '<None>', 0, 0, 0)
for i in range(2):
#Conductor 1
femm.mi_selectlabel(cond_center*cos60, -delta_y_triangle)
femm.mi_setblockprop('copper_conductors', 0, 0.5*GridSize, 'icoil1', 0, 0, 0)
femm.mi_clearselected
#Conductor 2
femm.mi_selectlabel(0, cond_center*sin60-delta_y_triangle)
femm.mi_setblockprop('copper_conductors', 0, 0.5*GridSize, 'icoil2', 0, 0, 0)
femm.mi_clearselected
#Conductor 3
femm.mi_selectlabel(-cond_center*cos60, -delta_y_triangle);
femm.mi_setblockprop('copper_conductors', 0, 0.5*GridSize, 'icoil3', 0, 0, 0)
# Add a "circuit property" so that we can calculate the properties of the
# coil as seen from the terminals.
femm.mi_addcircprop('icoil1', AMPS+0j, 1)
femm.mi_addcircprop('icoil2', -0.5*AMPS+AMPS*0.866j, 1)
femm.mi_addcircprop('icoil3', -0.5*AMPS-AMPS*0.866j, 1)
#Display
print('Objects properties defined')
#==============================================================================
# Analysis and Reporting
#==============================================================================
# We have to give the geometry a name before we can analyze it.
femm.mi_saveas('SubseaCable_model.fem');
# Run the magnetic solver (including mesh generation)
femm.mi_analyze()
# Visualize Solution in Femm
femm.mi_loadsolution()
point_value = femm.mo_getpointvalues(0,30)
fig1, ax1 = plt.subplots()
fig2, ax2 = plt.subplots()
fig3, ax3 = plt.subplots()
fig4, ax4 = plt.subplots()
fig5, ax5 = plt.subplots()
fig6, ax6 = plt.subplots()
for p in range(1,4):
contour=[]
b_value=[]
e_value=[]
b_value_far=[]
e_value_far=[]
for n in range(-500,500,1):
b=femm.mo_getb(n,int(R_cable_total)+100*p) # output B [T]
e=femm.mo_getj(n,int(R_cable_total)+100*p) # output J [MA/m^2]
b_far=femm.mo_getb(n,int(R_cable_total)+1000*p)
e_far=femm.mo_getj(n,int(R_cable_total)+1000*p)
_b_= np.sqrt(np.abs(b[0])**2+np.abs(b[1])**2)*10**6
if burial_depth!=0 and (int(R_cable_total)+100*p)<=burial_depth:
_e_= ( | np.abs(e) | numpy.abs |
import numpy as np
from labrad.gpib import GPIBDeviceWrapper
from twisted.internet.defer import inlineCallbacks, returnValue
class RigolDSA800Wrapper(GPIBDeviceWrapper):
# SYSTEM
@inlineCallbacks
def reset(self):
yield self.write('*RST')
@inlineCallbacks
def clear_buffers(self):
yield self.write('*CLS')
# ATTENUATION
@inlineCallbacks
def preamplifier(self, status):
if status is not None:
yield self.write(':SENS:POW:RF:GAIN:STAT {:d}'.format(status))
resp = yield self.query(':SENS:POW:RF:GAIN:STAT?')
returnValue(bool(int(resp)))
@inlineCallbacks
def attenuation(self, att):
if att is not None:
if (att > 0) and (att < 30):
yield self.write(':SENS:POW:RF:ATT {:f}'.format(att))
else:
raise Exception('Error: RF attenuation must be in range: [0, 30].')
resp = yield self.query(':SENS:POW:RF:ATT?')
returnValue(float(resp))
# FREQUENCY RANGE
@inlineCallbacks
def frequencyStart(self, freq):
if freq is not None:
if (freq > 0) and (freq < 7.5e9):
yield self.write(':SENS:FREQ:STAR {:f}'.format(freq))
else:
raise Exception('Error: start frequency must be in range: [0, 7.5e9].')
resp = yield self.query(':SENS:FREQ:STAR?')
returnValue(float(resp))
@inlineCallbacks
def frequencyStop(self, freq):
if freq is not None:
if (freq > 0) and (freq < 7.5e9):
yield self.write(':SENS:FREQ:STOP {:f}'.format(freq))
else:
raise Exception('Error: stop frequency must be in range: [0, 7.5e9].')
resp = yield self.query(':SENS:FREQ:STOP?')
returnValue(float(resp))
@inlineCallbacks
def frequencyCenter(self, freq):
if freq is not None:
if (freq > 0) and (freq < 7.5e9):
yield self.write(':SENS:FREQ:CENT {:f}'.format(freq))
else:
raise Exception('Error: center frequency must be in range: [0, 7.5e9].')
resp = yield self.query(':SENS:FREQ:CENT?')
returnValue(float(resp))
# AMPLITUDE
@inlineCallbacks
def amplitudeReference(self, ampl):
if ampl is not None:
if (ampl > -100) and (ampl < 20):
yield self.write(':DISP:WIN:TRAC:Y:SCAL:RLEV {:f}'.format(ampl))
else:
raise Exception('Error: display reference value must be in range: [-100, 20].')
resp = yield self.query(':DISP:WIN:TRAC:Y:SCAL:RLEV?')
returnValue(float(resp))
@inlineCallbacks
def amplitudeOffset(self, ampl):
if ampl is not None:
if (ampl > -300) and (ampl < 300):
yield self.write(':DISP:WIN:TRAC:Y:SCAL:RLEV:OFFS {:f}'.format(ampl))
else:
raise Exception('Error: display offset must be in range: [-300, 300].')
resp = yield self.query(':DISP:WIN:TRAC:Y:SCAL:RLEV:OFFS?')
returnValue(float(resp))
@inlineCallbacks
def amplitudeScale(self, factor):
if factor is not None:
if (factor > 0.1) and (factor < 20):
yield self.write(':DISP:WIN:TRAC:Y:SCAL:PDIV {:f}'.format(factor))
else:
raise Exception('Error: display scale must be in range: [0.1, 20].')
resp = yield self.query(':DISP:WIN:TRAC:Y:SCAL:PDIV?')
returnValue(float(resp))
# MARKER SETUP
@inlineCallbacks
def markerToggle(self, channel, status):
if status is not None:
yield self.write(':CALC:MARK{:d}:STAT {:d}'.format(channel, status))
resp = yield self.query(':CALC:MARK{:d}:STAT?'.format(channel))
returnValue(bool(int(resp)))
@inlineCallbacks
def markerTrace(self, channel, trace):
if trace is not None:
yield self.write(':CALC:MARK{:d}:TRAC {:d}'.format(channel, trace))
resp = yield self.query(':CALC:MARK{:d}:TRAC?'.format(channel))
returnValue(int(resp))
@inlineCallbacks
def markerMode(self, channel, mode):
modeConvert = {0: 'POS', 1: 'DELT', 2: 'BAND', 3: 'SPAN'}
modeInvert = {val: key for key, val in modeConvert.items()}
if mode is not None:
mode = modeConvert[mode]
yield self.write(':CALC:MARK{:d}:MODE {:s}'.format(channel, mode))
resp = yield self.query(':CALC:MARK{:d}:MODE?'.format(channel))
returnValue(modeInvert[resp])
@inlineCallbacks
def markerReadoutMode(self, channel, mode):
modeConvert = {0: 'FREQ', 1: 'TIME', 2: 'ITIM', 3: 'PER'}
modeInvert = {val: key for key, val in modeConvert.items()}
if mode is not None:
mode = modeConvert[mode]
yield self.write(':CALC:MARK{:d}:X:READ {:s}'.format(channel, mode))
resp = yield self.query(':CALC:MARK{:d}:X:READ?'.format(channel))
returnValue(modeInvert[resp])
@inlineCallbacks
def markerTrack(self, channel, status):
if status is not None:
yield self.write(':CALC:MARK{:d}:TRAC:STAT {:d}'.format(channel, status))
resp = yield self.query(':CALC:MARK{:d}:TRAC:STAT?'.format(channel))
returnValue(bool(int(resp)))
# MARKER READOUT
@inlineCallbacks
def markerAmplitude(self, channel):
resp = yield self.query(':CALC:MARK{:d}:Y?'.format(channel))
returnValue(float(resp))
@inlineCallbacks
def markerFrequency(self, channel, freq):
if freq is not None:
if (freq > 0) and (freq < 1.5e9):
yield self.write(':CALC:MARK{:d}:X {:d}'.format(channel, freq))
else:
raise Exception('Error: marker frequency must be in range: [0, 1.5e9].')
resp = yield self.query(':CALC:MARK{:d}:X?'.format(channel))
returnValue(float(resp))
# PEAK
@inlineCallbacks
def peakSearch(self, status):
if status is not None:
yield self.write(':CALC:MARK:CPE:STAT {:d}'.format(status))
resp = yield self.query(':CALC:MARK:CPE:STAT?')
returnValue(bool(int(resp)))
@inlineCallbacks
def peakSet(self, status):
# todo:
pass
@inlineCallbacks
def peakNext(self, status):
# todo:
pass
# BANDWIDTH
@inlineCallbacks
def bandwidthSweepTime(self, time):
if time is not None:
if (time > 2e-6) and (time < 7500):
yield self.write(':SENS:SWE:TIME {:f}'.format(time))
else:
raise Exception('Error: sweep time must be in range: [2e-6, 7500].')
resp = yield self.query(':SENS:SWE:TIME?')
returnValue(float(resp))
@inlineCallbacks
def bandwidthResolution(self, bw):
if bw is not None:
if (bw > 10) and (bw < 1e7):
yield self.write(':SENS:BAND:RES {:f}'.format(bw))
else:
raise Exception('Error: resolution bandwidth must be in range: [10, 1e7].')
resp = yield self.query(':SENS:BAND:RES?')
returnValue(float(resp))
@inlineCallbacks
def bandwidthResolution(self, bw):
if bw is not None:
if (bw > 10) and (bw < 1e7):
yield self.write(':SENS:BAND:VID {:f}'.format(bw))
else:
raise Exception('Error: video bandwidth must be in range: [10, 1e7].')
resp = yield self.query(':SENS:BAND:VID?')
returnValue(float(resp))
# TRACE
@inlineCallbacks
@inlineCallbacks
def getTrace(self, channel, points=1200):
# todo: :TRAC:DATA, set format :FORM:TRAC:DATA, set endianness :FORM:BORD
# oscilloscope must be stopped to get trace
yield self.write(':STOP')
# set max points
max_points = yield self.query(':ACQ:MDEP?')
max_points = int(max_points)
if points > max_points:
points = max_points
# configure trace
yield self.write(':WAV:SOUR CHAN{:d}'.format(channel))
yield self.write(':WAV:MODE RAW')
yield self.write(':WAV:FORM BYTE')
yield self.write(':WAV:STAR 1')
yield self.write(':WAV:STOP {:d}'.format(points))
# transfer waveform preamble
preamble = yield self.query(':WAV:PRE?')
# get waveform data
data = yield self.query(':WAV:DATA?')
# start oscope back up
yield self.write(':RUN')
# parse waveform preamble
points, xincrement, xorigin, xreference, yincrement, yorigin, yreference = yield self._parsePreamble(preamble)
# parse data
trace = yield self._parseByteData(data)
# format data
xAxis = | np.arange(points) | numpy.arange |
import numpy
import sys
from Algorithms.Logistic.Executor.logistic_executor import LogisticExecutor
from Algorithms.Logistic.Solver.logistic_solver import LogisticSolver
from Utils.DCA import sparse_optimization_dca, feasibility_detection_dca, dca_solver
from constants import GS_DCA, GS_SDR, PERFECT_AGGREGATION, DCA_ONLY, SDR_ONLY
home_dir = '../../../'
sys.path.append(home_dir)
# eta_list = 1 / (2 ** numpy.arange(0, 10))
eta_list = [10, 1, 0.1, 0.01, 0.001, 1e-4]
class FedGDLogisticSolver(LogisticSolver):
def fit(self, x_mat, y_vec, data_size_list):
"""
Partition X and y to self.m blocks.
If s is not given, then we set s=n/m and the partition has no overlap.
"""
n, self.d = x_mat.shape
# perm = numpy.random.permutation(n)
# self.x_mat = x_mat[perm, :]
# self.y_vec = y_vec[perm, :]
#
# self.s = int(numpy.floor(n / self.m))
# self.n = int(self.s * self.m)
#
# i_begin = 0
# for i in range(self.m):
# idx = range(i_begin, i_begin + self.s)
# i_begin += self.s
# x_blk = x_mat[idx, :]
# y_blk = y_vec[idx, :].reshape(self.s, 1)
#
# executor = LogisticExecutor(x_blk, y_blk)
# self.executor_list.append(executor)
self.data_size_list = data_size_list
i_begin = 0
for i in range(self.m):
idx = range(i_begin, i_begin + data_size_list[i])
i_begin += data_size_list[i]
x_blk = x_mat[idx, :]
y_blk = y_vec[idx, :].reshape(data_size_list[i], 1)
self.n += data_size_list[i]
executor = LogisticExecutor(x_blk, y_blk)
self.executor_list.append(executor)
self.s = int(numpy.floor(self.n / self.m))
def update(self, w, is_search, t):
p_vec_list = list()
grad = numpy.zeros((self.d, 1))
optimization_scaling_factor = 0
# local computation
for i in range(self.m):
data_size = self.executor_list[i].get_data_size()
local_grad = self.executor_list[i].compute_gradient()
self.executor_list[i].update_p(local_grad)
p_vec_list.append(data_size * local_grad)
optimization_scaling_factor = max(optimization_scaling_factor,
1e3 * (data_size / 1) * numpy.linalg.norm(
self.executor_list[i].get_p()))
self.h_mat[t, :, i] = self.h_mat[t, :, i] / (
(data_size / 1) * numpy.linalg.norm(self.executor_list[i].get_p()))
self.h_mat[t, :, :] = optimization_scaling_factor * self.h_mat[t, :, :]
# system optimization
theta = numpy.sqrt(10)
v = sparse_optimization_dca(self.h_mat[t], theta)
# print(v)
self.selected_set, self.a = feasibility_detection_dca(v, self.h_mat[t], theta)
# self.selected_set = range(self.m)
# self.a = dca_solver(self.selected_set, self.h_mat[t])
self.a = numpy.multiply(optimization_scaling_factor, self.a)
print(self.selected_set)
# aggregation
total_data_size = 0
for j in self.selected_set:
total_data_size += self.executor_list[j].get_data_size()
grad += p_vec_list[j]
grad /= total_data_size
if self.opt_mode != PERFECT_AGGREGATION:
# noise = []
# for i in range(self.k):
# noise.append(numpy.random.normal(0, self.tau, (1, 2 * self.d)).view(numpy.complex128))
# noise = numpy.concatenate(noise)
#
# coefficient = numpy.sqrt(p_var / self.p) / len(self.selected_set)
# noise_vec = numpy.multiply(coefficient, numpy.dot(self.a.T, noise).T)
# p_vec = numpy.real(numpy.add(p_vec, noise_vec))
noise = | numpy.sqrt(self.tau / 2) | numpy.sqrt |
# -*- coding: utf-8 -*-
import os
import shutil
import pathlib
import datetime
import tempfile
import unittest
import numpy as np
from numpy import testing as npt
from geomodels import GravityModel
from geomodels import get_default_data_path
from geomodels.tests.utils import dms_to_dec
class StaticMethodsTestCase(unittest.TestCase):
def test_default_gravity_path(self):
self.assertIsInstance(
GravityModel.default_gravity_path(), str)
self.assertEqual(
GravityModel.default_gravity_path(),
os.path.join(get_default_data_path(), 'gravity')
)
def test_default_gravity_name(self):
names = (
'egm84',
'egm96',
'egm2008',
'wgs84',
)
self.assertTrue(GravityModel.default_gravity_name() in names)
class InstantiationTestCase00(unittest.TestCase):
def test_no_args(self):
model = GravityModel()
self.assertIsInstance(model, GravityModel)
self.assertEqual(model.gravity_model_name(),
GravityModel.default_gravity_name())
class InstantiationTestCase(unittest.TestCase):
MODEL_NAME = 'egm96'
def test_name(self):
model = GravityModel(self.MODEL_NAME)
self.assertIsInstance(model, GravityModel)
self.assertEqual(model.gravity_model_name(), self.MODEL_NAME)
def test_default_path(self):
path = GravityModel.default_gravity_path()
model = GravityModel(self.MODEL_NAME, path)
self.assertEqual(model.gravity_model_name(), self.MODEL_NAME)
self.assertEqual(model.gravity_model_directory(), path)
def test_custom_path(self):
default_path = pathlib.Path(GravityModel.default_gravity_path())
with tempfile.TemporaryDirectory() as dirname:
gravity_model_path = pathlib.Path(dirname) / default_path.name
gravity_model_path.mkdir()
for filename in default_path.glob(f'{self.MODEL_NAME}*'):
shutil.copy(filename, gravity_model_path)
model = GravityModel(self.MODEL_NAME, gravity_model_path)
self.assertEqual(model.gravity_model_name(), self.MODEL_NAME)
self.assertEqual(
model.gravity_model_directory(), str(gravity_model_path))
def test_custom_path_from_env01(self):
default_path = pathlib.Path(GravityModel.default_gravity_path())
with tempfile.TemporaryDirectory() as dirname:
gravity_model_path = pathlib.Path(dirname) / default_path.name
gravity_model_path.mkdir()
for filename in default_path.glob(f'{self.MODEL_NAME}*'):
shutil.copy(filename, gravity_model_path)
old_env = os.environ.get('GEOGRAPHICLIB_DATA')
os.environ['GEOGRAPHICLIB_DATA'] = dirname
try:
model = GravityModel(self.MODEL_NAME)
self.assertEqual(model.gravity_model_name(), self.MODEL_NAME)
self.assertEqual(
model.gravity_model_directory(), str(gravity_model_path))
finally:
if old_env is None:
del os.environ['GEOGRAPHICLIB_DATA']
else:
os.environ['GEOGRAPHICLIB_DATA'] = old_env
def test_custom_path_from_env02(self):
default_path = pathlib.Path(GravityModel.default_gravity_path())
with tempfile.TemporaryDirectory() as dirname:
gravity_model_path = pathlib.Path(dirname) / default_path.name
gravity_model_path.mkdir()
for filename in default_path.glob(f'{self.MODEL_NAME}*'):
shutil.copy(filename, gravity_model_path)
old_env = os.environ.get('GEOGRAPHICLIB_GRAVITY_PATH')
os.environ['GEOGRAPHICLIB_GRAVITY_PATH'] = str(gravity_model_path)
try:
model = GravityModel(self.MODEL_NAME)
self.assertEqual(model.gravity_model_name(), self.MODEL_NAME)
self.assertEqual(
model.gravity_model_directory(), str(gravity_model_path))
finally:
if old_env is None:
del os.environ['GEOGRAPHICLIB_GRAVITY_PATH']
else:
os.environ['GEOGRAPHICLIB_GRAVITY_PATH'] = old_env
class InfoMethodsTestCase(unittest.TestCase):
def setUp(self) -> None:
self.name = GravityModel.default_gravity_name()
self.datapath = GravityModel.default_gravity_path()
self.model = GravityModel(self.name, self.datapath)
def test_description(self):
description = self.model.description()
self.assertIsInstance(description, str)
self.assertNotEqual(description, 'NONE')
def test_datetime(self):
datestr = self.model.datetime()
self.assertIsInstance(datestr, str)
self.assertNotEqual(datestr, 'UNKNOWN')
date = datetime.datetime.strptime(datestr, '%Y-%m-%d')
# date = datetime.datetime.strptime(datestr, '%Y-%m-%d %H:%M:%S')
self.assertLess(date, datetime.datetime.now())
def test_gravity_file(self):
filename = self.model.gravity_file()
self.assertIn(self.name, filename)
self.assertIn(self.datapath, filename)
def test_gravity_model_name(self):
name = self.model.gravity_model_name()
self.assertEqual(name, self.name)
def test_gravity_model_directory(self):
path = self.model.gravity_model_directory()
self.assertEqual(path, self.datapath)
def equator_radius(self):
self.assertIsInstance(self.model.equator_radius(), float)
self.assertGreater(self.model.equator_radius(), 5e6)
def test_flattening(self):
self.assertIsInstance(self.model.flattening(), float)
self.assertGreater(self.model.flattening(), 0)
self.assertLess(self.model.flattening(), 1)
def test_mass_constant(self):
self.assertIsInstance(self.model.mass_constant(), float)
self.assertGreater(self.model.mass_constant(), 0)
def test_reference_mass_constant(self):
self.assertIsInstance(self.model.reference_mass_constant(), float)
self.assertGreater(self.model.reference_mass_constant(), 0)
def test_angular_velocity(self):
self.assertIsInstance(self.model.angular_velocity(), float)
self.assertGreater(self.model.angular_velocity(), 0)
class ComputationTestCase(unittest.TestCase):
MODEL_NAME = 'egm96'
LAT = +dms_to_dec(27, 59, 17) # 27:59:17N
LON = +dms_to_dec(86, 55, 32) # 86:55:32E
HEIGHT = +8820.0 # [m]
X = 302714. # [m]
Y = 5636006. # [m]
Z = 2979476. # [m]
GX = -0.0002103214548 # [m/s**2]
GY = +0.0008380880427 # [m/s**2]
GZ = -9.7665319359240 # [m/s**2]
MGAL = 1.e-5 # [m/s**2]
DELTAX = -21.03214547840420 * MGAL # [m/s**2]
DELTAY = +89.75849905064960 * MGAL # [m/s**2]
DELTAZ = -199.4330543962868 * MGAL # [m/s**2]
GHEIGHT = -28.743736628353
ARCSEC = 1./3600.
DG01 = +208.0275697808106 * MGAL # [m/s**2]
XI = -18.8435137999035 * ARCSEC # [rad]
ETA = +4.4428027084385 * ARCSEC # [rad]
# $ echo 27:59:17N 86:55:32E 8820 | Gravity -n egm96 -p 13
# -0.0002103214548 0.0008380880427 -9.7665319359240
# $ echo 27.988055555555555 86.92555555555556 8820 | Gravity -n egm96 -p 13 -G
# -0.0002103214548 0.0008380880427 -9.7665319359240
# $ echo 27.988055555555555 86.92555555555556 8820 | Gravity -n egm96 -p 13 -D
# -21.0321454784042 89.7584990506496 -199.4330543962868
# echo 27.988055555555555 86.92555555555556 | Gravity -n egm96 -p 13 -H
# -28.743736628353
# $ echo 27.988055555555555 86.92555555555556 8820 | Gravity -n egm96 -p 13 -A
# 208.0275697808106 -18.8435137999035 4.4428027084385
def setUp(self) -> None:
self.model = GravityModel(self.MODEL_NAME)
def test_gravity_scalar(self):
w, gx, gy, gz = self.model.gravity(self.LAT, self.LON, self.HEIGHT)
npt.assert_allclose(gx, self.GX)
npt.assert_allclose(gy, self.GY)
npt.assert_allclose(gz, self.GZ)
def test_disturbance_scalar(self):
t, deltax, deltay, deltaz = self.model.disturbance(
self.LAT, self.LON, self.HEIGHT)
npt.assert_allclose(deltax, self.DELTAX)
npt.assert_allclose(deltay, self.DELTAY)
npt.assert_allclose(deltaz, self.DELTAZ)
def test_geoid_heigt_scalar(self):
gheight = self.model.geoid_height(self.LAT, self.LON)
npt.assert_allclose(gheight, self.GHEIGHT)
def test_spherical_anomaly_scalar(self):
dg01, xi, eta = self.model.spherical_anomaly(
self.LAT, self.LON, self.HEIGHT)
npt.assert_allclose(dg01, self.DG01)
npt.assert_allclose(xi, self.XI)
npt.assert_allclose(eta, self.ETA)
def test_w_scalar(self):
w, gx, gy, gz = self.model.w(self.X, self.Y, self.Z)
self.assertTrue(np.isscalar(w))
self.assertTrue(np.isscalar(gx))
self.assertTrue(np.isscalar(gy))
self.assertTrue(np.isscalar(gz))
def test_v_scalar(self):
v, gx, gy, gz = self.model.v(self.X, self.Y, self.Z)
self.assertTrue(np.isscalar(v))
self.assertTrue(np.isscalar(gx))
self.assertTrue(np.isscalar(gy))
self.assertTrue(np.isscalar(gz))
def test_t_components_scalar(self):
t, gx, gy, gz = self.model.t_components(self.X, self.Y, self.Z)
self.assertTrue(np.isscalar(t))
self.assertTrue(np.isscalar(gx))
self.assertTrue(np.isscalar(gy))
self.assertTrue(np.isscalar(gz))
def test_t_scalar(self):
t = self.model.t(self.X, self.Y, self.Z)
self.assertTrue(np.isscalar(t))
def test_u_scalar(self):
u, gammax, gammay, gammaz = self.model.u(self.X, self.Y, self.Z)
self.assertTrue(np.isscalar(u))
self.assertTrue(np.isscalar(gammax))
self.assertTrue(np.isscalar(gammay))
self.assertTrue(np.isscalar(gammaz))
def test_phi_scalar(self):
phi, fx, fy = self.model.phi(self.X, self.Y)
self.assertTrue(np.isscalar(phi))
self.assertTrue(np.isscalar(fx))
self.assertTrue(np.isscalar(fy))
class VectorComputationTestCase(unittest.TestCase):
MODEL_NAME = 'egm96'
LAT = np.asarray([
[+dms_to_dec(16, 46, 33), -dms_to_dec(16, 46, 43)],
[-dms_to_dec(16, 56, 33), +dms_to_dec(16, 56, 43)],
])
LON = np.asarray([
[-dms_to_dec(3, 0, 34), +dms_to_dec(3, 0, 44)],
[+dms_to_dec(3, 10, 34), -dms_to_dec(3, 10, 44)],
])
HEIGHT = np.asarray([
[+300, +400000],
[+400000, +300],
])
X = np.asarray([
[6100258., 6482309.],
[6475723., 6093852.],
])
Y = np.asarray([
[-320709., +341110.],
[+359341., -338447.],
])
Z = np.asarray([
[+1829182., -1944859.],
[-1963312., +1847129.],
])
GX = np.asarray([
[-0.0000185562383, +0.0000171252863],
[+0.0000179414111, +0.0000255265776],
])
GY = np.asarray([
[-0.0000022160340, +0.0016776569530],
[+0.0016913972644, -0.0000471425191],
])
GZ = np.asarray([
[-9.7839489045745, -8.6568801936947],
[-8.6569658422907, -9.7840427535630],
])
MGAL = 1.e-5 # [m/s**2]
DELTAX = np.asarray([
[-1.8556238327792, +1.7125286304307],
[+1.7941411100352, +2.5526577571442],
]) * MGAL
DELTAY = np.asarray([
[-0.0864975107851, -4.2925736778895],
[-4.3994466419252, -4.5779437005530],
]) * MGAL
DELTAZ = np.asarray([
[-24.6995051809167, -3.77044400298770],
[-3.89353944957160, -25.6012585484130],
]) * MGAL
GHEIGHT = np.asarray([
[+28.707423353273, +14.886288004977],
[+15.032497786636, +28.660865719440],
])
ARCSEC = 1./3600.
DG01 = np.asarray([
[+15.7255802520963, -0.28121454847100],
[-0.19222301397180, +16.6326172077381],
]) * MGAL
XI = np.asarray([
[+0.0278739163708, +1.0212161827314],
[+1.0466052916775, +0.9752134528843],
]) * ARCSEC
ETA = np.asarray([
[+0.3912117252501, -0.4080406679571],
[-0.4274821365431, -0.5381591725495],
]) * ARCSEC
def setUp(self) -> None:
self.model = GravityModel(self.MODEL_NAME)
def test_gravity_vector(self):
w, gx, gy, gz = self.model.gravity(
self.LAT.flatten(), self.LON.flatten(), self.HEIGHT.flatten())
npt.assert_allclose(gx, self.GX.flatten())
npt.assert_allclose(gy, self.GY.flatten())
npt.assert_allclose(gz, self.GZ.flatten())
def test_gravity_matrix(self):
w, gx, gy, gz = self.model.gravity(self.LAT, self.LON, self.HEIGHT)
npt.assert_allclose(gx, self.GX)
npt.assert_allclose(gy, self.GY)
npt.assert_allclose(gz, self.GZ)
def test_disturbance_vector(self):
t, deltax, deltay, deltaz = self.model.disturbance(
self.LAT.flatten(), self.LON.flatten(), self.HEIGHT.flatten())
npt.assert_allclose(deltax, self.DELTAX.flatten())
npt.assert_allclose(deltay, self.DELTAY.flatten())
npt.assert_allclose(deltaz, self.DELTAZ.flatten())
def test_disturbance_matrix(self):
t, deltax, deltay, deltaz = self.model.disturbance(
self.LAT, self.LON, self.HEIGHT)
npt.assert_allclose(deltax, self.DELTAX)
npt.assert_allclose(deltay, self.DELTAY)
npt.assert_allclose(deltaz, self.DELTAZ)
def test_geoid_heigt_vector(self):
gheight = self.model.geoid_height(
self.LAT.flatten(), self.LON.flatten())
npt.assert_allclose(gheight, self.GHEIGHT.flatten())
def test_geoid_heigt_matrix(self):
gheight = self.model.geoid_height(self.LAT, self.LON)
npt.assert_allclose(gheight, self.GHEIGHT)
def test_spherical_anomaly_vector(self):
dg01, xi, eta = self.model.spherical_anomaly(
self.LAT.flatten(), self.LON.flatten(), self.HEIGHT.flatten())
npt.assert_allclose(dg01, self.DG01.flatten())
npt.assert_allclose(xi, self.XI.flatten())
npt.assert_allclose(eta, self.ETA.flatten())
def test_spherical_anomaly_matrix(self):
dg01, xi, eta = self.model.spherical_anomaly(
self.LAT, self.LON, self.HEIGHT)
npt.assert_allclose(dg01, self.DG01)
npt.assert_allclose(xi, self.XI)
| npt.assert_allclose(eta, self.ETA) | numpy.testing.assert_allclose |
import sys
import os
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
import cooler
from EnHiC.utils import operations
from EnHiC import model, fit
tf.keras.backend.set_floatx('float32')
# 'Dixon2012-H1hESC-HindIII-allreps-filtered.10kb.cool'
# data from ftp://cooler.csail.mit.edu/coolers/hg19/
def addAtPos(mat1, mat2, xypos = (0,0)):
pos_v, pos_h = xypos[0], xypos[1] # offset
v1 = slice(max(0, pos_v), max(min(pos_v + mat2.shape[0], mat1.shape[0]), 0))
h1 = slice(max(0, pos_h), max(min(pos_h + mat2.shape[1], mat1.shape[1]), 0))
v2 = slice(max(0, -pos_v), min(-pos_v + mat1.shape[0], mat2.shape[0]))
h2 = slice(max(0, -pos_h), min(-pos_h + mat1.shape[1], mat2.shape[1]))
mat1[v1, h1] += mat2[v2, h2]
return mat1
def run(path='./data',
raw_path='raw',
raw_file='Rao2014-GM12878-DpnII-allreps-filtered.10kb.cool',
model_path=None,
sr_path='output',
chromosome='22',
scale=4,
len_size=200,
genomic_distance=2000000,
start=None, end=None, draw_out=False):
sr_file = raw_file.split('-')[0] + '_' + raw_file.split('-')[1] + \
'_' + raw_file.split('-')[2] + '_' + raw_file.split('.')[1]
directory_sr = os.path.join(path, sr_path, sr_file, 'SR', 'chr'+chromosome)
if not os.path.exists(directory_sr):
os.makedirs(directory_sr)
name = os.path.join(path, raw_path, raw_file)
c = cooler.Cooler(name)
resolution = c.binsize
mat = c.matrix(balance=True).fetch('chr'+chromosome)
[Mh, idx] = operations.remove_zeros(mat)
print('Shape HR: {}'.format(Mh.shape), end='\t')
if start is None:
start = 0
if end is None:
end = Mh.shape[0]
Mh = Mh[start:end, start:end]
print('MH: {}'.format(Mh.shape), end='\t')
Ml = operations.sampling_hic(Mh, scale**2, fix_seed=True)
print('ML: {}'.format(Ml.shape))
# Normalization
# the input should not be type of np.matrix!
Ml = np.asarray(Ml)
Mh = np.asarray(Mh)
Ml, Dl = operations.scn_normalization(Ml, max_iter=3000)
print('Dl shape:{}'.format(Dl.shape))
Mh, Dh = operations.scn_normalization(Mh, max_iter=3000)
print('Dh shape:{}'.format(Dh.shape))
if genomic_distance is None:
max_boundary = None
else:
max_boundary = np.ceil(genomic_distance/(resolution))
residual = Mh.shape[0] % int(len_size/2)
print('residual: {}'.format(residual))
hic_hr_front, index_1d_2d_front, index_2d_1d_front = operations.divide_pieces_hic(
Mh[0:-residual, 0:-residual], block_size=len_size, max_distance=max_boundary, save_file=False)
hic_hr_front = np.asarray(hic_hr_front, dtype=np.float32)
print('shape hic_hr front: ', hic_hr_front.shape)
true_hic_hr_front = hic_hr_front
print('shape true hic_hr: ', true_hic_hr_front.shape)
hic_hr_offset, index_1d_2d_offset, index_2d_1d_offset = operations.divide_pieces_hic(
Mh[residual:, residual:], block_size=len_size, max_distance=max_boundary, save_file=False)
hic_hr_offset = np.asarray(hic_hr_offset, dtype=np.float32)
print('shape hic_hr offset: ', hic_hr_offset.shape)
true_hic_hr_offset = hic_hr_offset
print('shape true hic_hr: ', true_hic_hr_offset.shape)
Ml_front = Ml[0:-residual, 0:-residual]
hic_lr_front, _, _ = operations.divide_pieces_hic(
Ml_front, block_size=len_size, max_distance=max_boundary, save_file=False)
hic_lr_front = np.asarray(hic_lr_front, dtype=np.float32)
print('shape hic_lr: ', hic_lr_front.shape)
hic_lr_ds = tf.data.Dataset.from_tensor_slices(
hic_lr_front[..., np.newaxis]).batch(9)
predict_hic_hr_front = fit.predict(model_path, len_size, scale, hic_lr_ds)
predict_hic_hr_front = np.squeeze(predict_hic_hr_front, axis=3)
print('Shape of prediction front: ', predict_hic_hr_front.shape)
file_path = os.path.join(directory_sr, sr_file+'_chr'+chromosome)
np.savez_compressed(file_path+'_front.npz', predict_hic=predict_hic_hr_front, true_hic=true_hic_hr_front,
index_1D_2D=index_1d_2d_front, index_2D_1D=index_2d_1d_front,
start_id=start, end_id=end, residual=0)
predict_hic_hr_merge_front = operations.merge_hic(
predict_hic_hr_front, index_1D_2D=index_1d_2d_front, max_distance=max_boundary)
print('Shape of merge predict hic HR front',
predict_hic_hr_merge_front.shape)
Ml_offset = Ml[residual:, residual:]
hic_lr_offset, _, _ = operations.divide_pieces_hic(
Ml_offset, block_size=len_size, max_distance=max_boundary, save_file=False)
hic_lr_offset = np.asarray(hic_lr_offset, dtype=np.float32)
print('Shape hic_lr_offset: ', hic_lr_offset.shape)
hic_lr_ds = tf.data.Dataset.from_tensor_slices(
hic_lr_offset[..., np.newaxis]).batch(9)
predict_hic_hr_offset = fit.predict(model_path, len_size, scale, hic_lr_ds)
predict_hic_hr_offset = | np.squeeze(predict_hic_hr_offset, axis=3) | numpy.squeeze |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
# Deep Recurrent Reinforcement Learning: 1 capa LSTM y 4 capas Dense, Funcion de activacion tanh, 12 episodes, 50 iteraciones
drnnLSTMtanhMakespan0=[799, 798, 799, 799, 805, 806, 799, 805, 805, 800, 798, 798]
drnnLSTMtanhMakespan1=[800, 798, 796, 800, 796, 794, 795, 798, 800, 798, 805, 798]
drnnLSTMtanhMakespan2=[796, 800, 798, 804, 800, 798, 798, 798, 800, 800, 802, 797]
drnnLSTMtanhMakespan3=[805, 800, 800, 803, 794, 802, 800, 798, 799, 804, 799, 806]
drnnLSTMtanhMakespan4=[796, 798, 795, 798, 796, 799, 800, 796, 796, 798, 806, 800]
drnnLSTMtanhMakespan5=[798, 798, 799, 800, 800, 808, 798, 798, 801, 796, 799, 798]
drnnLSTMtanhMakespan6=[800, 796, 805, 798, 798, 796, 799, 800, 803, 800, 798, 800]
drnnLSTMtanhMakespan7=[799, 805, 802, 805, 800, 799, 800, 799, 805, 800, 794, 796]
drnnLSTMtanhMakespan8=[799, 798, 800, 798, 798, 800, 800, 800, 804, 799, 800, 804]
drnnLSTMtanhMakespan9=[795, 800, 795, 796, 798, 796, 797, 800, 797, 798, 796, 795]
drnnLSTMtanhMakespan10=[804, 799, 805, 798, 798, 798, 805, 800, 796, 804, 796, 799]
drnnLSTMtanhMakespan11=[795, 803, 805, 798, 795, 801, 798, 798, 804, 803, 799, 804]
drnnLSTMtanhMakespan12=[798, 798, 799, 800, 798, 798, 799, 799, 801, 796, 799, 798]
drnnLSTMtanhMakespan13=[798, 798, 799, 797, 796, 796, 800, 797, 805, 800, 800, 794]
drnnLSTMtanhMakespan14=[800, 798, 798, 796, 800, 800, 798, 798, 802, 798, 802, 798]
drnnLSTMtanhMakespan15=[796, 796, 800, 801, 800, 800, 796, 794, 796, 800, 796, 798]
drnnLSTMtanhMakespan16=[798, 798, 795, 797, 795, 799, 800, 796, 795, 796, 800, 800]
drnnLSTMtanhMakespan17=[794, 795, 800, 798, 795, 796, 798, 796, 795, 794, 798, 796]
drnnLSTMtanhMakespan18=[797, 795, 794, 794, 800, 796, 796, 795, 798, 795, 798, 794]
drnnLSTMtanhMakespan19=[797, 795, 795, 796, 798, 799, 795, 799, 795, 794, 795, 795]
drnnLSTMtanhMakespan20=[796, 794, 798, 797, 798, 799, 795, 795, 797, 795, 795, 792]
drnnLSTMtanhMakespan21=[797, 795, 797, 793, 794, 794, 800, 794, 798, 795, 797, 795]
drnnLSTMtanhMakespan22=[794, 800, 798, 795, 795, 796, 796, 799, 795, 794, 795, 795]
drnnLSTMtanhMakespan23=[795, 795, 794, 795, 794, 794, 797, 799, 796, 794, 794, 795]
drnnLSTMtanhMakespan24=[798, 795, 795, 795, 792, 794, 795, 794, 794, 795, 795, 795]
drnnLSTMtanhMakespan25=[794, 792, 794, 795, 795, 794, 794, 794, 794, 795, 794, 793]
drnnLSTMtanhMakespan26=[794, 794, 795, 796, 798, 795, 794, 794, 794, 794, 795, 794]
drnnLSTMtanhMakespan27=[795, 794, 795, 795, 795, 794, 794, 794, 794, 794, 795, 795]
drnnLSTMtanhMakespan28=[795, 794, 794, 795, 794, 795, 795, 795, 795, 794, 795, 794]
drnnLSTMtanhMakespan29=[792, 794, 795, 794, 794, 795, 794, 793, 795, 794, 795, 792]
drnnLSTMtanhMakespan30=[795, 794, 795, 795, 794, 794, 794, 795, 794, 794, 794, 794]
drnnLSTMtanhMakespan31=[794, 794, 795, 794, 795, 793, 795, 795, 795, 792, 794, 794]
drnnLSTMtanhMakespan32=[795, 795, 794, 793, 795, 795, 795, 795, 794, 794, 795, 794]
drnnLSTMtanhMakespan33=[793, 794, 795, 793, 792, 795, 794, 794, 794, 794, 794, 795]
drnnLSTMtanhMakespan34=[794, 795, 795, 794, 794, 794, 794, 793, 794, 794, 794, 794]
drnnLSTMtanhMakespan35=[794, 794, 797, 793, 792, 794, 793, 794, 795, 794, 795, 792]
drnnLSTMtanhMakespan36=[794, 794, 793, 794, 795, 797, 795, 795, 794, 795, 793, 794]
drnnLSTMtanhMakespan37=[795, 793, 795, 794, 795, 798, 795, 794, 795, 793, 795, 794]
drnnLSTMtanhMakespan38=[794, 795, 793, 795, 794, 794, 794, 794, 794, 794, 797, 795]
drnnLSTMtanhMakespan39=[794, 794, 795, 794, 795, 795, 794, 795, 794, 795, 798, 797]
drnnLSTMtanhMakespan40=[795, 795, 794, 795, 794, 795, 795, 794, 794, 794, 795, 795]
drnnLSTMtanhMakespan41=[794, 795, 792, 794, 794, 798, 795, 794, 794, 794, 793, 795]
drnnLSTMtanhMakespan42=[793, 795, 794, 793, 794, 794, 792, 794, 795, 794, 794, 793]
drnnLSTMtanhMakespan43=[793, 792, 793, 794, 794, 795, 792, 794, 795, 794, 795, 794]
drnnLSTMtanhMakespan44=[793, 794, 795, 795, 794, 794, 795, 798, 794, 792, 795, 794]
drnnLSTMtanhMakespan45=[795, 794, 794, 794, 794, 792, 794, 795, 794, 796, 795, 794]
drnnLSTMtanhMakespan46=[794, 793, 793, 795, 795, 794, 794, 794, 794, 796, 794, 794]
drnnLSTMtanhMakespan47=[794, 794, 795, 794, 794, 795, 792, 795, 794, 795, 795, 794]
drnnLSTMtanhMakespan48=[794, 795, 794, 794, 794, 792, 794, 795, 796, 794, 794, 795]
drnnLSTMtanhMakespan49=[794, 794, 794, 794, 794, 794, 792, 794, 793, 794, 795, 794]
drnnLSTMtanhRewards0=[-0.1759911894273128, -0.17580964970257765, -0.1759911894273128, -0.1759911894273128, -0.177078750549934, -0.17725973169122497, -0.1759911894273128, -0.177078750549934, -0.177078750549934, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765]
drnnLSTMtanhRewards1=[-0.17617264919621228, -0.17580964970257765, -0.17544633017412387, -0.17617264919621228, -0.17544633017412387, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17617264919621228, -0.177078750549934, -0.17580964970257765, -0.17580964970257765]
drnnLSTMtanhRewards2=[-0.17544633017412387, -0.17617264919621228, -0.17580964970257765, -0.1768976897689769, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17653532907770195, -0.17562802996914942]
drnnLSTMtanhRewards3=[-0.177078750549934, -0.17617264919621228, -0.17617264919621228, -0.17671654929577466, -0.17508269018743108, -0.17653532907770195, -0.17617264919621228, -0.17580964970257765, -0.1759911894273128, -0.1768976897689769, -0.1759911894273128, -0.17725973169122497]
drnnLSTMtanhRewards4=[-0.17580964970257765, -0.17544633017412387, -0.17526455026455026, -0.17580964970257765, -0.17544633017412387, -0.1759911894273128, -0.17617264919621228, -0.17544633017412387, -0.17544633017412387, -0.17580964970257765, -0.17725973169122497, -0.17617264919621228]
drnnLSTMtanhRewards5=[-0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17617264919621228, -0.17617264919621228, -0.1776214552648934, -0.17580964970257765, -0.17580964970257765, -0.1763540290620872, -0.17544633017412387, -0.1759911894273128, -0.17580964970257765]
drnnLSTMtanhRewards6=[-0.17617264919621228, -0.17544633017412387, -0.177078750549934, -0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.1759911894273128, -0.17617264919621228, -0.17671654929577466, -0.17617264919621228, -0.17580964970257765, -0.17617264919621228]
drnnLSTMtanhRewards7=[-0.1759911894273128, -0.177078750549934, -0.17653532907770195, -0.177078750549934, -0.17617264919621228, -0.1759911894273128, -0.17617264919621228, -0.1759911894273128, -0.177078750549934, -0.17617264919621228, -0.17508269018743108, -0.17544633017412387]
drnnLSTMtanhRewards8=[-0.1759911894273128, -0.17580964970257765, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17617264919621228, -0.1768976897689769, -0.1759911894273128, -0.17617264919621228, -0.1768976897689769]
drnnLSTMtanhRewards9=[-0.17526455026455026, -0.17617264919621228, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.17544633017412387, -0.17562802996914942, -0.17617264919621228, -0.17562802996914942, -0.17580964970257765, -0.17544633017412387, -0.17526455026455026]
drnnLSTMtanhRewards10=[-0.1768976897689769, -0.1759911894273128, -0.177078750549934, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.177078750549934, -0.17617264919621228, -0.17544633017412387, -0.1768976897689769, -0.17544633017412387, -0.1759911894273128]
drnnLSTMtanhRewards11=[-0.17526455026455026, -0.17671654929577466, -0.177078750549934, -0.17580964970257765, -0.17526455026455026, -0.1763540290620872, -0.17580964970257765, -0.17580964970257765, -0.1768976897689769, -0.17671654929577466, -0.1759911894273128, -0.1768976897689769]
drnnLSTMtanhRewards12=[-0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.1759911894273128, -0.1763540290620872, -0.17544633017412387, -0.1759911894273128, -0.17580964970257765]
drnnLSTMtanhRewards13=[-0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17562802996914942, -0.17544633017412387, -0.17544633017412387, -0.17617264919621228, -0.17562802996914942, -0.177078750549934, -0.17617264919621228, -0.17617264919621228, -0.17508269018743108]
drnnLSTMtanhRewards14=[-0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.17653532907770195, -0.17580964970257765, -0.17653532907770195, -0.17580964970257765]
drnnLSTMtanhRewards15=[-0.17544633017412387, -0.17544633017412387, -0.17617264919621228, -0.1763540290620872, -0.17617264919621228, -0.17617264919621228, -0.17544633017412387, -0.17508269018743108, -0.17544633017412387, -0.17617264919621228, -0.17544633017412387, -0.17580964970257765]
drnnLSTMtanhRewards16=[-0.17580964970257765, -0.17580964970257765, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.1759911894273128, -0.17617264919621228, -0.17544633017412387, -0.17544633017412387, -0.17526455026455026, -0.17617264919621228, -0.17617264919621228]
drnnLSTMtanhRewards17=[-0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17617264919621228, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17544633017412387]
drnnLSTMtanhRewards18=[-0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17617264919621228, -0.17544633017412387, -0.17544633017412387, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17580964970257765, -0.17508269018743108]
drnnLSTMtanhRewards19=[-0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.1759911894273128, -0.17526455026455026, -0.1759911894273128, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026]
drnnLSTMtanhRewards20=[-0.17544633017412387, -0.17508269018743108, -0.17580964970257765, -0.17562802996914942, -0.17580964970257765, -0.1759911894273128, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17471872931833224]
drnnLSTMtanhRewards21=[-0.17562802996914942, -0.17526455026455026, -0.17562802996914942, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17617264919621228, -0.17508269018743108, -0.17580964970257765, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026]
drnnLSTMtanhRewards22=[-0.17508269018743108, -0.17617264919621228, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17544633017412387, -0.1759911894273128, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026]
drnnLSTMtanhRewards23=[-0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17562802996914942, -0.1759911894273128, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026]
drnnLSTMtanhRewards24=[-0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026]
drnnLSTMtanhRewards25=[-0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.1749007498897221]
drnnLSTMtanhRewards26=[-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108]
drnnLSTMtanhRewards27=[-0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026]
drnnLSTMtanhRewards28=[-0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108]
drnnLSTMtanhRewards29=[-0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.1749007498897221, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224]
drnnLSTMtanhRewards30=[-0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drnnLSTMtanhRewards31=[-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026, -0.17471872931833224, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108]
drnnLSTMtanhRewards32=[-0.17526455026455026, -0.17526455026455026, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108]
drnnLSTMtanhRewards33=[-0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026]
drnnLSTMtanhRewards34=[-0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drnnLSTMtanhRewards35=[-0.17508269018743108, -0.17508269018743108, -0.17562802996914942, -0.17471872931833224, -0.1749007498897221, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224]
drnnLSTMtanhRewards36=[-0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.17508269018743108]
drnnLSTMtanhRewards37=[-0.17526455026455026, -0.1749007498897221, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026, -0.17508269018743108]
drnnLSTMtanhRewards38=[-0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026]
drnnLSTMtanhRewards39=[-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17562802996914942]
drnnLSTMtanhRewards40=[-0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026]
drnnLSTMtanhRewards41=[-0.17508269018743108, -0.17526455026455026, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17526455026455026]
drnnLSTMtanhRewards42=[-0.1749007498897221, -0.17526455026455026, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221]
drnnLSTMtanhRewards43=[-0.1749007498897221, -0.17471872931833224, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108]
drnnLSTMtanhRewards44=[-0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108]
drnnLSTMtanhRewards45=[-0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108]
drnnLSTMtanhRewards46=[-0.17508269018743108, -0.1749007498897221, -0.1749007498897221, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108]
drnnLSTMtanhRewards47=[-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108]
drnnLSTMtanhRewards48=[-0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026]
drnnLSTMtanhRewards49=[-0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108]
# Deep Recurrent Reinforcement Learning: 1 capa LSTM y 4 capas Dense, Funcion de activacion relu, 12 episodes, 50 iteraciones
drnnLSTMreluMakespan0=[805, 800, 800, 800, 794, 800, 798, 809, 795, 800, 798, 798]
drnnLSTMreluMakespan1=[798, 798, 796, 799, 800, 796, 796, 798, 798, 794, 798, 800]
drnnLSTMreluMakespan2=[805, 805, 798, 799, 806, 799, 806, 799, 800, 798, 805, 795]
drnnLSTMreluMakespan3=[800, 800, 800, 796, 800, 800, 799, 806, 808, 798, 797, 798]
drnnLSTMreluMakespan4=[805, 805, 795, 796, 799, 804, 798, 794, 798, 794, 796, 810]
drnnLSTMreluMakespan5=[798, 798, 798, 795, 800, 798, 796, 802, 800, 800, 805, 801]
drnnLSTMreluMakespan6=[800, 798, 798, 795, 800, 796, 800, 798, 799, 796, 805, 800]
drnnLSTMreluMakespan7=[800, 800, 800, 799, 798, 798, 800, 805, 800, 799, 800, 801]
drnnLSTMreluMakespan8=[799, 800, 800, 799, 795, 795, 805, 795, 798, 800, 798, 800]
drnnLSTMreluMakespan9=[800, 796, 805, 798, 798, 795, 805, 800, 799, 795, 800, 805]
drnnLSTMreluMakespan10=[805, 798, 805, 800, 801, 805, 799, 805, 798, 800, 800, 798]
drnnLSTMreluMakespan11=[798, 803, 800, 797, 795, 796, 794, 799, 800, 800, 800, 796]
drnnLSTMreluMakespan12=[799, 798, 799, 795, 798, 795, 798, 798, 798, 795, 798, 798]
drnnLSTMreluMakespan13=[798, 798, 799, 796, 798, 796, 800, 799, 796, 794, 796, 795]
drnnLSTMreluMakespan14=[796, 798, 806, 799, 804, 798, 805, 798, 800, 805, 794, 800]
drnnLSTMreluMakespan15=[806, 795, 800, 796, 798, 796, 810, 798, 799, 798, 800, 800]
drnnLSTMreluMakespan16=[799, 796, 798, 798, 798, 800, 798, 810, 796, 805, 800, 795]
drnnLSTMreluMakespan17=[798, 798, 798, 794, 798, 805, 801, 798, 800, 799, 798, 798]
drnnLSTMreluMakespan18=[795, 800, 794, 798, 797, 798, 794, 800, 797, 796, 794, 794]
drnnLSTMreluMakespan19=[798, 802, 794, 798, 799, 795, 797, 795, 800, 796, 797, 796]
drnnLSTMreluMakespan20=[794, 797, 795, 794, 799, 795, 795, 795, 800, 797, 794, 798]
drnnLSTMreluMakespan21=[799, 798, 796, 795, 794, 798, 795, 795, 798, 798, 795, 794]
drnnLSTMreluMakespan22=[794, 794, 795, 797, 795, 795, 795, 792, 794, 795, 794, 794]
drnnLSTMreluMakespan23=[794, 794, 794, 794, 795, 796, 793, 794, 795, 794, 797, 795]
drnnLSTMreluMakespan24=[794, 792, 792, 794, 796, 792, 794, 795, 794, 792, 796, 795]
drnnLSTMreluMakespan25=[794, 795, 795, 794, 794, 792, 795, 792, 795, 794, 794, 794]
drnnLSTMreluMakespan26=[795, 794, 794, 795, 794, 794, 793, 794, 797, 795, 794, 795]
drnnLSTMreluMakespan27=[794, 794, 795, 796, 795, 797, 794, 794, 795, 801, 794, 795]
drnnLSTMreluMakespan28=[795, 795, 795, 795, 794, 792, 794, 797, 794, 795, 795, 795]
drnnLSTMreluMakespan29=[794, 792, 798, 794, 797, 795, 793, 795, 795, 794, 795, 795]
drnnLSTMreluMakespan30=[795, 794, 798, 794, 794, 795, 792, 796, 794, 796, 794, 794]
drnnLSTMreluMakespan31=[794, 795, 795, 794, 795, 794, 795, 795, 794, 794, 795, 795]
drnnLSTMreluMakespan32=[798, 794, 794, 794, 798, 792, 795, 795, 795, 796, 794, 795]
drnnLSTMreluMakespan33=[794, 796, 794, 794, 794, 795, 794, 794, 797, 793, 793, 795]
drnnLSTMreluMakespan34=[794, 794, 795, 794, 794, 793, 794, 795, 793, 795, 795, 794]
drnnLSTMreluMakespan35=[798, 796, 795, 794, 795, 795, 795, 795, 794, 795, 797, 795]
drnnLSTMreluMakespan36=[794, 796, 794, 794, 794, 794, 795, 795, 797, 796, 795, 795]
drnnLSTMreluMakespan37=[795, 794, 796, 795, 795, 795, 795, 794, 792, 797, 794, 793]
drnnLSTMreluMakespan38=[794, 798, 794, 792, 794, 792, 795, 797, 793, 794, 794, 797]
drnnLSTMreluMakespan39=[792, 794, 794, 794, 792, 795, 795, 795, 794, 794, 795, 794]
drnnLSTMreluMakespan40=[792, 795, 795, 792, 795, 795, 794, 795, 794, 795, 794, 795]
drnnLSTMreluMakespan41=[794, 797, 795, 794, 795, 795, 798, 794, 795, 796, 796, 794]
drnnLSTMreluMakespan42=[794, 795, 795, 795, 794, 795, 795, 794, 794, 795, 793, 795]
drnnLSTMreluMakespan43=[795, 794, 795, 794, 795, 795, 792, 794, 794, 795, 794, 795]
drnnLSTMreluMakespan44=[795, 794, 792, 795, 794, 794, 795, 794, 796, 795, 796, 794]
drnnLSTMreluMakespan45=[795, 794, 793, 794, 793, 795, 794, 794, 795, 794, 795, 794]
drnnLSTMreluMakespan46=[794, 796, 793, 794, 794, 795, 799, 795, 794, 794, 794, 794]
drnnLSTMreluMakespan47=[794, 794, 794, 794, 795, 793, 795, 795, 794, 795, 795, 795]
drnnLSTMreluMakespan48=[794, 794, 795, 794, 795, 795, 795, 794, 794, 795, 795, 794]
drnnLSTMreluMakespan49=[795, 795, 795, 794, 795, 795, 794, 795, 793, 793, 792, 792]
drnnLSTMreluRewards0=[-0.177078750549934, -0.17617264919621228, -0.17617264919621228, -0.17617264919621228, -0.17508269018743108, -0.17617264919621228, -0.17580964970257765, -0.1778021978021978, -0.17526455026455026, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765]
drnnLSTMreluRewards1=[-0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.1759911894273128, -0.17617264919621228, -0.17544633017412387, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.17508269018743108, -0.17580964970257765, -0.17617264919621228]
drnnLSTMreluRewards2=[-0.177078750549934, -0.177078750549934, -0.17580964970257765, -0.1759911894273128, -0.17725973169122497, -0.1759911894273128, -0.17725973169122497, -0.1759911894273128, -0.17617264919621228, -0.17580964970257765, -0.177078750549934, -0.17526455026455026]
drnnLSTMreluRewards3=[-0.17617264919621228, -0.17617264919621228, -0.17617264919621228, -0.17544633017412387, -0.17617264919621228, -0.17617264919621228, -0.1759911894273128, -0.17725973169122497, -0.1776214552648934, -0.17580964970257765, -0.17562802996914942, -0.17580964970257765]
drnnLSTMreluRewards4=[-0.177078750549934, -0.177078750549934, -0.17526455026455026, -0.17544633017412387, -0.1759911894273128, -0.1768976897689769, -0.17580964970257765, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17544633017412387, -0.17798286090969018]
drnnLSTMreluRewards5=[-0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17526455026455026, -0.17617264919621228, -0.17580964970257765, -0.17544633017412387, -0.17653532907770195, -0.17617264919621228, -0.17617264919621228, -0.177078750549934, -0.1763540290620872]
drnnLSTMreluRewards6=[-0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.17526455026455026, -0.17617264919621228, -0.17544633017412387, -0.17617264919621228, -0.17580964970257765, -0.1759911894273128, -0.17544633017412387, -0.177078750549934, -0.17617264919621228]
drnnLSTMreluRewards7=[-0.17617264919621228, -0.17617264919621228, -0.17617264919621228, -0.1759911894273128, -0.17580964970257765, -0.17580964970257765, -0.17617264919621228, -0.177078750549934, -0.17617264919621228, -0.1759911894273128, -0.17617264919621228, -0.1763540290620872]
drnnLSTMreluRewards8=[-0.1759911894273128, -0.17617264919621228, -0.17617264919621228, -0.1759911894273128, -0.17526455026455026, -0.17526455026455026, -0.177078750549934, -0.17526455026455026, -0.17580964970257765, -0.17617264919621228, -0.17580964970257765, -0.17617264919621228]
drnnLSTMreluRewards9=[-0.17617264919621228, -0.17544633017412387, -0.177078750549934, -0.17580964970257765, -0.17580964970257765, -0.177078750549934, -0.17526455026455026, -0.17617264919621228, -0.1759911894273128, -0.17526455026455026, -0.17617264919621228, -0.177078750549934]
drnnLSTMreluRewards10=[-0.177078750549934, -0.17580964970257765, -0.177078750549934, -0.17617264919621228, -0.1763540290620872, -0.1759911894273128, -0.177078750549934, -0.177078750549934, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765]
drnnLSTMreluRewards11=[-0.17580964970257765, -0.17671654929577466, -0.17617264919621228, -0.17562802996914942, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108, -0.1759911894273128, -0.17617264919621228, -0.17617264919621228, -0.17617264919621228, -0.17544633017412387]
drnnLSTMreluRewards12=[-0.1759911894273128, -0.17580964970257765, -0.1759911894273128, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17526455026455026, -0.17580964970257765, -0.17580964970257765]
drnnLSTMreluRewards13=[-0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17544633017412387, -0.17580964970257765, -0.17544633017412387, -0.17617264919621228, -0.1759911894273128, -0.17544633017412387, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026]
drnnLSTMreluRewards14=[-0.17544633017412387, -0.17580964970257765, -0.17725973169122497, -0.1759911894273128, -0.1768976897689769, -0.17580964970257765, -0.177078750549934, -0.17580964970257765, -0.17617264919621228, -0.177078750549934, -0.17508269018743108, -0.17617264919621228]
drnnLSTMreluRewards15=[-0.17725973169122497, -0.17526455026455026, -0.17617264919621228, -0.17544633017412387, -0.17580964970257765, -0.17544633017412387, -0.17798286090969018, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228]
drnnLSTMreluRewards16=[-0.1759911894273128, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17617264919621228, -0.17580964970257765, -0.17798286090969018, -0.17544633017412387, -0.177078750549934, -0.17617264919621228, -0.17526455026455026]
drnnLSTMreluRewards17=[-0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17508269018743108, -0.17580964970257765, -0.177078750549934, -0.1763540290620872, -0.17580964970257765, -0.17617264919621228, -0.1759911894273128, -0.17580964970257765, -0.17580964970257765]
drnnLSTMreluRewards18=[-0.17526455026455026, -0.17617264919621228, -0.17508269018743108, -0.17580964970257765, -0.17562802996914942, -0.17580964970257765, -0.17508269018743108, -0.17617264919621228, -0.17562802996914942, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108]
drnnLSTMreluRewards19=[-0.17580964970257765, -0.17653532907770195, -0.17508269018743108, -0.17580964970257765, -0.1759911894273128, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17617264919621228, -0.17544633017412387, -0.17562802996914942, -0.17544633017412387]
drnnLSTMreluRewards20=[-0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.1759911894273128, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17617264919621228, -0.17562802996914942, -0.17508269018743108, -0.17580964970257765]
drnnLSTMreluRewards21=[-0.1759911894273128, -0.17580964970257765, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108]
drnnLSTMreluRewards22=[-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108]
drnnLSTMreluRewards23=[-0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026]
drnnLSTMreluRewards24=[-0.17508269018743108, -0.17471872931833224, -0.17471872931833224, -0.17508269018743108, -0.17544633017412387, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17471872931833224, -0.17544633017412387, -0.17526455026455026]
drnnLSTMreluRewards25=[-0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17526455026455026, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drnnLSTMreluRewards26=[-0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026]
drnnLSTMreluRewards27=[-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.17562802996914942, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.1763540290620872, -0.17508269018743108, -0.17526455026455026]
drnnLSTMreluRewards28=[-0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17562802996914942, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026]
drnnLSTMreluRewards29=[-0.17508269018743108, -0.17471872931833224, -0.17580964970257765, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026]
drnnLSTMreluRewards30=[-0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224, -0.17544633017412387, -0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108]
drnnLSTMreluRewards31=[-0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026]
drnnLSTMreluRewards32=[-0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.17471872931833224, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108, -0.17526455026455026]
drnnLSTMreluRewards33=[-0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17562802996914942, -0.1749007498897221, -0.1749007498897221, -0.17526455026455026]
drnnLSTMreluRewards34=[-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108]
drnnLSTMreluRewards35=[-0.17580964970257765, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026]
drnnLSTMreluRewards36=[-0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17544633017412387, -0.17526455026455026, -0.17526455026455026]
drnnLSTMreluRewards37=[-0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17471872931833224, -0.17562802996914942, -0.17508269018743108, -0.1749007498897221]
drnnLSTMreluRewards38=[-0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17471872931833224, -0.17526455026455026, -0.17562802996914942, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17562802996914942]
drnnLSTMreluRewards39=[-0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108]
drnnLSTMreluRewards40=[-0.17471872931833224, -0.17526455026455026, -0.17526455026455026, -0.17471872931833224, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026]
drnnLSTMreluRewards41=[-0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387, -0.17544633017412387, -0.17508269018743108]
drnnLSTMreluRewards42=[-0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026]
drnnLSTMreluRewards43=[-0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026]
drnnLSTMreluRewards44=[-0.17526455026455026, -0.17508269018743108, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108]
drnnLSTMreluRewards45=[-0.17526455026455026, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.1749007498897221, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108]
drnnLSTMreluRewards46=[-0.17508269018743108, -0.17544633017412387, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.1759911894273128, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drnnLSTMreluRewards47=[-0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026]
drnnLSTMreluRewards48=[-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108]
drnnLSTMreluRewards49=[-0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.1749007498897221, -0.17471872931833224, -0.17471872931833224]
# Deep Recurrent Reinforcement Learning: 1 capa GRU y 4 capas Dense, Funcion de activacion tanh, 12 episodes, 50 iteraciones
drnnGRUtanhMakespan0 = [798, 799, 798, 804, 805, 799, 801, 801, 801, 799, 798, 796]
drnnGRUtanhMakespan1 = [800, 798, 798, 798, 798, 798, 801, 798, 795, 796, 800, 796]
drnnGRUtanhMakespan2 = [795, 804, 805, 800, 800, 796, 804, 800, 795, 798, 798, 801]
drnnGRUtanhMakespan3 = [806, 796, 794, 797, 798, 800, 800, 808, 805, 798, 800, 809]
drnnGRUtanhMakespan4 = [805, 801, 795, 798, 798, 800, 796, 796, 805, 798, 799, 798]
drnnGRUtanhMakespan5 = [804, 799, 798, 804, 796, 799, 798, 805, 796, 805, 798, 800]
drnnGRUtanhMakespan6 = [800, 799, 794, 801, 799, 796, 800, 804, 797, 796, 800, 798]
drnnGRUtanhMakespan7 = [798, 800, 810, 810, 805, 800, 795, 798, 800, 805, 799, 800]
drnnGRUtanhMakespan8 = [798, 797, 800, 800, 804, 805, 798, 798, 801, 795, 798, 809]
drnnGRUtanhMakespan9 = [803, 800, 800, 805, 805, 798, 804, 803, 805, 801, 810, 801]
drnnGRUtanhMakespan10 = [798, 799, 798, 798, 805, 804, 805, 798, 799, 798, 800, 800]
drnnGRUtanhMakespan11 = [796, 795, 805, 800, 800, 798, 795, 804, 805, 798, 800, 800]
drnnGRUtanhMakespan12 = [799, 799, 809, 800, 799, 799, 797, 805, 799, 800, 798, 795]
drnnGRUtanhMakespan13 = [805, 800, 800, 805, 800, 799, 798, 801, 798, 797, 805, 800]
drnnGRUtanhMakespan14 = [800, 798, 800, 800, 800, 804, 804, 799, 799, 800, 798, 798]
drnnGRUtanhMakespan15 = [805, 800, 795, 800, 804, 795, 800, 798, 799, 798, 800, 796]
drnnGRUtanhMakespan16 = [806, 795, 801, 799, 799, 796, 796, 794, 802, 796, 800, 802]
drnnGRUtanhMakespan17 = [796, 800, 798, 800, 794, 800, 804, 805, 798, 810, 800, 798]
drnnGRUtanhMakespan18 = [798, 800, 794, 794, 797, 798, 800, 805, 798, 798, 804, 798]
drnnGRUtanhMakespan19 = [796, 800, 806, 799, 796, 800, 798, 805, 798, 799, 797, 805]
drnnGRUtanhMakespan20 = [805, 800, 799, 796, 805, 805, 805, 794, 809, 796, 800, 797]
drnnGRUtanhMakespan21 = [798, 800, 800, 800, 798, 801, 796, 801, 801, 801, 795, 799]
drnnGRUtanhMakespan22 = [798, 801, 797, 800, 799, 795, 799, 799, 800, 801, 800, 799]
drnnGRUtanhMakespan23 = [800, 798, 799, 805, 794, 800, 798, 796, 796, 804, 800, 794]
drnnGRUtanhMakespan24 = [800, 800, 798, 805, 804, 799, 798, 801, 800, 798, 798, 798]
drnnGRUtanhMakespan25 = [798, 798, 798, 795, 800, 803, 798, 798, 800, 799, 796, 798]
drnnGRUtanhMakespan26 = [796, 798, 798, 798, 805, 796, 798, 798, 805, 795, 801, 796]
drnnGRUtanhMakespan27 = [794, 796, 796, 800, 800, 798, 800, 798, 802, 798, 797, 798]
drnnGRUtanhMakespan28 = [799, 799, 800, 800, 798, 802, 799, 798, 795, 795, 794, 798]
drnnGRUtanhMakespan29 = [798, 796, 796, 797, 796, 798, 800, 800, 796, 798, 800, 795]
drnnGRUtanhMakespan30 = [799, 798, 795, 795, 800, 795, 798, 798, 799, 798, 805, 799]
drnnGRUtanhMakespan31 = [795, 799, 794, 794, 796, 795, 795, 794, 798, 797, 798, 795]
drnnGRUtanhMakespan32 = [797, 798, 795, 796, 798, 795, 797, 798, 795, 794, 795, 796]
drnnGRUtanhMakespan33 = [799, 795, 794, 794, 798, 795, 798, 797, 800, 796, 795, 794]
drnnGRUtanhMakespan34 = [798, 795, 798, 796, 798, 794, 796, 798, 798, 798, 796, 797]
drnnGRUtanhMakespan35 = [795, 798, 796, 798, 794, 801, 795, 800, 795, 800, 794, 800]
drnnGRUtanhMakespan36 = [798, 799, 796, 797, 795, 794, 800, 795, 795, 794, 795, 795]
drnnGRUtanhMakespan37 = [799, 798, 795, 795, 794, 795, 795, 796, 805, 795, 798, 796]
drnnGRUtanhMakespan38 = [798, 794, 795, 795, 795, 796, 795, 796, 800, 798, 797, 796]
drnnGRUtanhMakespan39 = [794, 795, 795, 797, 795, 795, 794, 794, 798, 795, 794, 798]
drnnGRUtanhMakespan40 = [795, 795, 795, 795, 795, 795, 794, 794, 793, 797, 794, 795]
drnnGRUtanhMakespan41 = [794, 794, 795, 793, 795, 795, 792, 794, 795, 794, 794, 794]
drnnGRUtanhMakespan42 = [795, 795, 795, 796, 794, 797, 795, 795, 792, 795, 796, 793]
drnnGRUtanhMakespan43 = [794, 795, 795, 794, 795, 794, 798, 794, 797, 795, 794, 794]
drnnGRUtanhMakespan44 = [795, 795, 793, 794, 795, 794, 795, 795, 794, 794, 795, 794]
drnnGRUtanhMakespan45 = [794, 794, 794, 794, 794, 794, 795, 794, 794, 794, 796, 795]
drnnGRUtanhMakespan46 = [795, 794, 795, 794, 794, 794, 793, 794, 795, 795, 794, 797]
drnnGRUtanhMakespan47 = [794, 794, 794, 794, 795, 794, 795, 792, 794, 795, 794, 794]
drnnGRUtanhMakespan48 = [795, 794, 794, 794, 795, 798, 794, 794, 794, 795, 794, 794]
drnnGRUtanhMakespan49 = [795, 795, 794, 795, 793, 795, 796, 794, 795, 794, 794, 797]
drnnGRUtanhRewards0 = [-0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.1768976897689769, -0.177078750549934, -0.1759911894273128, -0.1763540290620872, -0.1763540290620872, -0.1763540290620872, -0.1759911894273128, -0.17580964970257765, -0.17544633017412387]
drnnGRUtanhRewards1 = [-0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.1763540290620872, -0.17580964970257765, -0.17526455026455026, -0.17544633017412387, -0.17617264919621228, -0.17544633017412387]
drnnGRUtanhRewards2 = [-0.17526455026455026, -0.1768976897689769, -0.177078750549934, -0.17617264919621228, -0.17617264919621228, -0.17544633017412387, -0.1768976897689769, -0.17617264919621228, -0.17526455026455026, -0.17580964970257765, -0.17580964970257765, -0.1763540290620872]
drnnGRUtanhRewards3 = [-0.17725973169122497, -0.17544633017412387, -0.17508269018743108, -0.17562802996914942, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.1776214552648934, -0.177078750549934, -0.17580964970257765, -0.17617264919621228, -0.1778021978021978]
drnnGRUtanhRewards4 = [-0.177078750549934, -0.1763540290620872, -0.17526455026455026, -0.17580964970257765, -0.17580964970257765, -0.17617264919621228, -0.17544633017412387, -0.17544633017412387, -0.177078750549934, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765]
drnnGRUtanhRewards5 = [-0.1768976897689769, -0.1759911894273128, -0.17580964970257765, -0.1768976897689769, -0.17544633017412387, -0.1759911894273128, -0.17580964970257765, -0.177078750549934, -0.17544633017412387, -0.177078750549934, -0.17580964970257765, -0.17617264919621228]
drnnGRUtanhRewards6 = [-0.17617264919621228, -0.1759911894273128, -0.17508269018743108, -0.1763540290620872, -0.1759911894273128, -0.17544633017412387, -0.17617264919621228, -0.1768976897689769, -0.17562802996914942, -0.17544633017412387, -0.17617264919621228, -0.17580964970257765]
drnnGRUtanhRewards7 = [-0.17580964970257765, -0.17617264919621228, -0.17798286090969018, -0.177078750549934, -0.17798286090969018, -0.17617264919621228, -0.17526455026455026, -0.17580964970257765, -0.17617264919621228, -0.177078750549934, -0.1759911894273128, -0.17617264919621228]
drnnGRUtanhRewards8 = [-0.17580964970257765, -0.17562802996914942, -0.17617264919621228, -0.17617264919621228, -0.1768976897689769, -0.177078750549934, -0.17580964970257765, -0.17580964970257765, -0.17526455026455026, -0.1763540290620872, -0.17580964970257765, -0.1778021978021978]
drnnGRUtanhRewards9 = [-0.17671654929577466, -0.17617264919621228, -0.17617264919621228, -0.177078750549934, -0.177078750549934, -0.17580964970257765, -0.1768976897689769, -0.17671654929577466, -0.177078750549934, -0.1763540290620872, -0.17798286090969018, -0.1763540290620872]
drnnGRUtanhRewards10 = [-0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.17580964970257765, -0.177078750549934, -0.1768976897689769, -0.177078750549934, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228]
drnnGRUtanhRewards11 = [-0.17544633017412387, -0.17526455026455026, -0.177078750549934, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.17526455026455026, -0.1768976897689769, -0.177078750549934, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228]
drnnGRUtanhRewards12 = [-0.1759911894273128, -0.1759911894273128, -0.1778021978021978, -0.17617264919621228, -0.1759911894273128, -0.1759911894273128, -0.17562802996914942, -0.177078750549934, -0.1759911894273128, -0.17617264919621228, -0.17580964970257765, -0.17526455026455026]
drnnGRUtanhRewards13 = [-0.177078750549934, -0.17617264919621228, -0.17617264919621228, -0.177078750549934, -0.17617264919621228, -0.1759911894273128, -0.17580964970257765, -0.1763540290620872, -0.17580964970257765, -0.17562802996914942, -0.177078750549934, -0.17617264919621228]
drnnGRUtanhRewards14 = [-0.17617264919621228, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17617264919621228, -0.1768976897689769, -0.1768976897689769, -0.1759911894273128, -0.1759911894273128, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765]
drnnGRUtanhRewards15 = [-0.177078750549934, -0.17617264919621228, -0.17617264919621228, -0.17526455026455026, -0.1768976897689769, -0.17526455026455026, -0.17617264919621228, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.17617264919621228, -0.17544633017412387]
drnnGRUtanhRewards16 = [-0.17725973169122497, -0.17526455026455026, -0.1763540290620872, -0.1759911894273128, -0.1759911894273128, -0.17544633017412387, -0.17544633017412387, -0.17508269018743108, -0.17653532907770195, -0.17544633017412387, -0.17617264919621228, -0.17653532907770195]
drnnGRUtanhRewards17 = [-0.17544633017412387, -0.17617264919621228, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17508269018743108, -0.1768976897689769, -0.177078750549934, -0.17580964970257765, -0.17798286090969018, -0.17617264919621228, -0.17580964970257765]
drnnGRUtanhRewards18 = [-0.17580964970257765, -0.17617264919621228, -0.17508269018743108, -0.17508269018743108, -0.17562802996914942, -0.17580964970257765, -0.17617264919621228, -0.177078750549934, -0.17580964970257765, -0.17580964970257765, -0.1768976897689769, -0.17580964970257765]
drnnGRUtanhRewards19 = [-0.17544633017412387, -0.17617264919621228, -0.17725973169122497, -0.1759911894273128, -0.17544633017412387, -0.17617264919621228, -0.17580964970257765, -0.177078750549934, -0.17580964970257765, -0.17562802996914942, -0.1759911894273128, -0.177078750549934]
drnnGRUtanhRewards20 = [-0.17617264919621228, -0.177078750549934, -0.1759911894273128, -0.17544633017412387, -0.177078750549934, -0.177078750549934, -0.177078750549934, -0.17508269018743108, -0.1778021978021978, -0.17544633017412387, -0.17617264919621228, -0.17562802996914942]
drnnGRUtanhRewards21 = [-0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.1763540290620872, -0.17544633017412387, -0.1763540290620872, -0.1763540290620872, -0.1763540290620872, -0.17526455026455026, -0.1759911894273128]
drnnGRUtanhRewards22 = [-0.17580964970257765, -0.1763540290620872, -0.17562802996914942, -0.17617264919621228, -0.1759911894273128, -0.17526455026455026, -0.1759911894273128, -0.1759911894273128, -0.17617264919621228, -0.1763540290620872, -0.17617264919621228, -0.1759911894273128]
drnnGRUtanhRewards23 = [-0.17617264919621228, -0.17580964970257765, -0.1759911894273128, -0.177078750549934, -0.17508269018743108, -0.17617264919621228, -0.17580964970257765, -0.17544633017412387, -0.17544633017412387, -0.1768976897689769, -0.17617264919621228, -0.17508269018743108]
drnnGRUtanhRewards24 = [-0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.1759911894273128, -0.177078750549934, -0.1768976897689769, -0.17580964970257765, -0.1763540290620872, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765]
drnnGRUtanhRewards25 = [-0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17526455026455026, -0.17617264919621228, -0.17580964970257765, -0.17671654929577466, -0.17580964970257765, -0.17617264919621228, -0.1759911894273128, -0.17544633017412387, -0.17580964970257765]
drnnGRUtanhRewards26 = [-0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.177078750549934, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.177078750549934, -0.17526455026455026, -0.1763540290620872, -0.17544633017412387]
drnnGRUtanhRewards27 = [-0.17508269018743108, -0.17544633017412387, -0.17544633017412387, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.17617264919621228, -0.17580964970257765, -0.17653532907770195, -0.17580964970257765, -0.17562802996914942, -0.17580964970257765]
drnnGRUtanhRewards28 = [-0.1759911894273128, -0.1759911894273128, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.17653532907770195, -0.17580964970257765, -0.1759911894273128, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765]
drnnGRUtanhRewards29 = [-0.17580964970257765, -0.17544633017412387, -0.17544633017412387, -0.17562802996914942, -0.17544633017412387, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17544633017412387, -0.17580964970257765, -0.17617264919621228, -0.17526455026455026]
drnnGRUtanhRewards30 = [-0.1759911894273128, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17617264919621228, -0.17526455026455026, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.17580964970257765, -0.177078750549934, -0.1759911894273128]
drnnGRUtanhRewards31 = [-0.17526455026455026, -0.1759911894273128, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17562802996914942, -0.17580964970257765, -0.17526455026455026]
drnnGRUtanhRewards32 = [-0.17562802996914942, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.17562802996914942, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026]
drnnGRUtanhRewards33 = [-0.1759911894273128, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17580964970257765, -0.17562802996914942, -0.17617264919621228, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108]
drnnGRUtanhRewards34 = [-0.17580964970257765, -0.17526455026455026, -0.17580964970257765, -0.17544633017412387, -0.17580964970257765, -0.17544633017412387, -0.17508269018743108, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.17562802996914942]
drnnGRUtanhRewards35 = [-0.17526455026455026, -0.17580964970257765, -0.17544633017412387, -0.17580964970257765, -0.17508269018743108, -0.1763540290620872, -0.17526455026455026, -0.17617264919621228, -0.17526455026455026, -0.17617264919621228, -0.17508269018743108, -0.17617264919621228]
drnnGRUtanhRewards36 = [-0.17580964970257765, -0.1759911894273128, -0.17544633017412387, -0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.17617264919621228, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026]
drnnGRUtanhRewards37 = [-0.1759911894273128, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.177078750549934, -0.17526455026455026, -0.17580964970257765, -0.17544633017412387]
drnnGRUtanhRewards38 = [-0.17580964970257765, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.17544633017412387, -0.17617264919621228, -0.17580964970257765, -0.17562802996914942, -0.17544633017412387]
drnnGRUtanhRewards39 = [-0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765]
drnnGRUtanhRewards40 = [-0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17562802996914942, -0.17508269018743108, -0.17526455026455026]
drnnGRUtanhRewards41 = [-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drnnGRUtanhRewards42 = [-0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17471872931833224, -0.17526455026455026, -0.17544633017412387, -0.1749007498897221]
drnnGRUtanhRewards43 = [-0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108]
drnnGRUtanhRewards44 = [-0.17526455026455026, -0.17526455026455026, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108]
drnnGRUtanhRewards45 = [-0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026]
drnnGRUtanhRewards46 = [-0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17562802996914942]
drnnGRUtanhRewards47 = [-0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108]
drnnGRUtanhRewards48 = [-0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108]
drnnGRUtanhRewards49 = [-0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17562802996914942]
# Deep Recurrent Reinforcement Learning: 1 capa GRU y 4 capas Dense, Funcion de activacion relu, 12 episodes, 50 iteraciones
drnnGRUreluMakespan0 = [800, 799, 798, 797, 798, 800, 800, 796, 800, 794, 800, 800]
drnnGRUreluMakespan1 = [798, 800, 805, 795, 799, 808, 795, 800, 796, 798, 799, 798]
drnnGRUreluMakespan2 = [799, 800, 806, 800, 800, 805, 805, 798, 799, 807, 800, 800]
drnnGRUreluMakespan3 = [798, 795, 799, 800, 800, 796, 798, 800, 800, 804, 805, 800]
drnnGRUreluMakespan4 = [811, 800, 799, 800, 805, 798, 798, 799, 796, 804, 805, 804]
drnnGRUreluMakespan5 = [799, 795, 797, 800, 798, 800, 800, 798, 800, 797, 800, 798]
drnnGRUreluMakespan6 = [798, 800, 798, 799, 797, 798, 800, 796, 801, 799, 795, 798]
drnnGRUreluMakespan7 = [800, 804, 795, 801, 796, 806, 805, 798, 800, 799, 799, 804]
drnnGRUreluMakespan8 = [800, 799, 799, 800, 805, 796, 800, 800, 810, 796, 800, 798]
drnnGRUreluMakespan9 = [794, 800, 799, 805, 800, 800, 798, 798, 796, 795, 798, 796]
drnnGRUreluMakespan10 = [798, 800, 798, 801, 795, 802, 796, 809, 800, 800, 798, 795]
drnnGRUreluMakespan11 = [804, 800, 799, 799, 798, 803, 798, 798, 805, 803, 800, 796]
drnnGRUreluMakespan12 = [800, 799, 805, 797, 798, 796, 799, 794, 799, 805, 799, 800]
drnnGRUreluMakespan13 = [796, 800, 798, 800, 795, 799, 800, 804, 800, 794, 805, 805]
drnnGRUreluMakespan14 = [800, 795, 796, 798, 798, 801, 805, 794, 800, 801, 801, 796]
drnnGRUreluMakespan15 = [798, 800, 796, 796, 798, 794, 797, 800, 796, 801, 795, 799]
drnnGRUreluMakespan16 = [800, 805, 794, 800, 799, 800, 805, 801, 798, 800, 801, 799]
drnnGRUreluMakespan17 = [797, 803, 801, 808, 794, 799, 799, 800, 805, 796, 801, 796]
drnnGRUreluMakespan18 = [805, 800, 800, 804, 799, 798, 800, 799, 804, 796, 800, 804]
drnnGRUreluMakespan19 = [804, 798, 800, 799, 799, 799, 805, 795, 801, 799, 799, 805]
drnnGRUreluMakespan20 = [799, 804, 796, 798, 796, 798, 800, 805, 799, 810, 800, 800]
drnnGRUreluMakespan21 = [798, 799, 799, 805, 798, 798, 805, 798, 794, 799, 798, 798]
drnnGRUreluMakespan22 = [799, 798, 798, 796, 798, 805, 799, 798, 798, 799, 796, 798]
drnnGRUreluMakespan23 = [798, 805, 808, 798, 798, 805, 810, 796, 804, 799, 800, 799]
drnnGRUreluMakespan24 = [798, 796, 798, 795, 800, 798, 799, 798, 797, 805, 798, 800]
drnnGRUreluMakespan25 = [799, 796, 799, 798, 805, 798, 798, 800, 796, 794, 810, 798]
drnnGRUreluMakespan26 = [799, 798, 805, 800, 802, 798, 799, 799, 799, 794, 802, 797]
drnnGRUreluMakespan27 = [798, 800, 805, 796, 798, 795, 802, 796, 798, 800, 798, 794]
drnnGRUreluMakespan28 = [796, 805, 798, 800, 800, 798, 810, 798, 798, 798, 796, 796]
drnnGRUreluMakespan29 = [800, 798, 798, 802, 794, 798, 796, 808, 800, 800, 798, 799]
drnnGRUreluMakespan30 = [798, 796, 798, 798, 794, 798, 794, 800, 796, 794, 800, 800]
drnnGRUreluMakespan31 = [794, 802, 797, 799, 798, 800, 799, 799, 796, 796, 798, 798]
drnnGRUreluMakespan32 = [799, 798, 794, 795, 798, 805, 804, 797, 795, 800, 796, 798]
drnnGRUreluMakespan33 = [803, 799, 805, 796, 794, 798, 797, 798, 798, 794, 794, 798]
drnnGRUreluMakespan34 = [810, 796, 795, 798, 799, 798, 796, 795, 795, 797, 798, 798]
drnnGRUreluMakespan35 = [799, 799, 799, 799, 795, 798, 795, 800, 796, 795, 795, 796]
drnnGRUreluMakespan36 = [795, 797, 798, 799, 799, 799, 800, 794, 796, 795, 798, 800]
drnnGRUreluMakespan37 = [800, 798, 799, 794, 800, 796, 798, 798, 797, 800, 794, 798]
drnnGRUreluMakespan38 = [800, 799, 794, 796, 795, 800, 796, 804, 800, 795, 800, 798]
drnnGRUreluMakespan39 = [794, 798, 795, 804, 805, 799, 798, 800, 796, 798, 795, 794]
drnnGRUreluMakespan40 = [799, 798, 796, 798, 798, 799, 800, 796, 798, 798, 799, 798]
drnnGRUreluMakespan41 = [796, 798, 800, 797, 799, 796, 797, 796, 799, 804, 805, 798]
drnnGRUreluMakespan42 = [798, 794, 795, 799, 799, 798, 797, 798, 798, 798, 798, 795]
drnnGRUreluMakespan43 = [799, 798, 794, 794, 795, 794, 795, 799, 799, 800, 799, 794]
drnnGRUreluMakespan44 = [795, 796, 795, 799, 794, 795, 794, 796, 795, 794, 795, 796]
drnnGRUreluMakespan45 = [794, 797, 794, 795, 796, 795, 794, 799, 795, 794, 798, 798]
drnnGRUreluMakespan46 = [795, 795, 794, 795, 794, 794, 792, 794, 795, 797, 794, 794]
drnnGRUreluMakespan47 = [798, 796, 797, 798, 794, 798, 794, 797, 794, 803, 798, 798]
drnnGRUreluMakespan48 = [795, 794, 796, 798, 795, 794, 796, 795, 796, 794, 796, 796]
drnnGRUreluMakespan49 = [798, 798, 796, 798, 798, 796, 796, 798, 798, 798, 796, 798]
drnnGRUreluRewards0 = [-0.17617264919621228, -0.1759911894273128, -0.17580964970257765, -0.17562802996914942, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17544633017412387, -0.17617264919621228, -0.17508269018743108, -0.17617264919621228, -0.17617264919621228]
drnnGRUreluRewards1 = [-0.17580964970257765, -0.17617264919621228, -0.177078750549934, -0.17526455026455026, -0.1759911894273128, -0.1776214552648934, -0.17526455026455026, -0.17617264919621228, -0.17544633017412387, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765]
drnnGRUreluRewards2 = [-0.1759911894273128, -0.17617264919621228, -0.17725973169122497, -0.17617264919621228, -0.17617264919621228, -0.177078750549934, -0.177078750549934, -0.17580964970257765, -0.1759911894273128, -0.1774406332453826, -0.17617264919621228, -0.17617264919621228]
drnnGRUreluRewards3 = [-0.17580964970257765, -0.17526455026455026, -0.1759911894273128, -0.17617264919621228, -0.17617264919621228, -0.17544633017412387, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.1768976897689769, -0.177078750549934, -0.17617264919621228]
drnnGRUreluRewards4 = [-0.1781634446397188, -0.17617264919621228, -0.1759911894273128, -0.17617264919621228, -0.177078750549934, -0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17544633017412387, -0.1768976897689769, -0.177078750549934, -0.1768976897689769]
drnnGRUreluRewards5 = [-0.1759911894273128, -0.17526455026455026, -0.17562802996914942, -0.17617264919621228, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.17617264919621228, -0.17562802996914942, -0.17617264919621228, -0.17580964970257765]
drnnGRUreluRewards6 = [-0.17580964970257765, -0.17617264919621228, -0.17580964970257765, -0.1759911894273128, -0.17562802996914942, -0.17580964970257765, -0.17544633017412387, -0.17617264919621228, -0.1763540290620872, -0.1759911894273128, -0.17526455026455026, -0.17580964970257765]
drnnGRUreluRewards7 = [-0.17617264919621228, -0.1768976897689769, -0.17526455026455026, -0.1763540290620872, -0.17544633017412387, -0.17725973169122497, -0.177078750549934, -0.17580964970257765, -0.17617264919621228, -0.1759911894273128, -0.1759911894273128, -0.1768976897689769]
drnnGRUreluRewards8 = [-0.17617264919621228, -0.1759911894273128, -0.1759911894273128, -0.17617264919621228, -0.177078750549934, -0.17544633017412387, -0.17617264919621228, -0.17617264919621228, -0.17798286090969018, -0.17544633017412387, -0.17617264919621228, -0.17580964970257765]
drnnGRUreluRewards9 = [-0.17508269018743108, -0.17617264919621228, -0.1759911894273128, -0.177078750549934, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.17526455026455026, -0.17580964970257765, -0.17544633017412387]
drnnGRUreluRewards10 = [-0.17580964970257765, -0.17617264919621228, -0.17580964970257765, -0.1763540290620872, -0.17526455026455026, -0.17653532907770195, -0.17544633017412387, -0.1778021978021978, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.17526455026455026]
drnnGRUreluRewards11 = [-0.1768976897689769, -0.17617264919621228, -0.1759911894273128, -0.1759911894273128, -0.17580964970257765, -0.17671654929577466, -0.17580964970257765, -0.17580964970257765, -0.177078750549934, -0.17671654929577466, -0.17617264919621228, -0.17544633017412387]
drnnGRUreluRewards12 = [-0.17617264919621228, -0.1759911894273128, -0.177078750549934, -0.17562802996914942, -0.17580964970257765, -0.17544633017412387, -0.1759911894273128, -0.17508269018743108, -0.1759911894273128, -0.177078750549934, -0.1759911894273128, -0.17617264919621228]
drnnGRUreluRewards13 = [-0.17544633017412387, -0.17617264919621228, -0.17580964970257765, -0.17617264919621228, -0.17526455026455026, -0.1759911894273128, -0.17617264919621228, -0.1768976897689769, -0.17617264919621228, -0.17508269018743108, -0.177078750549934, -0.177078750549934]
drnnGRUreluRewards14 = [-0.17617264919621228, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.1763540290620872, -0.177078750549934, -0.17508269018743108, -0.17617264919621228, -0.1763540290620872, -0.1763540290620872, -0.17544633017412387]
drnnGRUreluRewards15 = [-0.17580964970257765, -0.17617264919621228, -0.17544633017412387, -0.17544633017412387, -0.17580964970257765, -0.17508269018743108, -0.17562802996914942, -0.17617264919621228, -0.17544633017412387, -0.1763540290620872, -0.17526455026455026, -0.1759911894273128]
drnnGRUreluRewards16 = [-0.17617264919621228, -0.177078750549934, -0.17508269018743108, -0.17617264919621228, -0.1759911894273128, -0.17617264919621228, -0.177078750549934, -0.1763540290620872, -0.17580964970257765, -0.17617264919621228, -0.1763540290620872, -0.1759911894273128]
drnnGRUreluRewards17 = [-0.17562802996914942, -0.17671654929577466, -0.1763540290620872, -0.1776214552648934, -0.17508269018743108, -0.1759911894273128, -0.17617264919621228, -0.1759911894273128, -0.177078750549934, -0.17544633017412387, -0.1763540290620872, -0.17544633017412387]
drnnGRUreluRewards18 = [-0.177078750549934, -0.17617264919621228, -0.17617264919621228, -0.1768976897689769, -0.1759911894273128, -0.17580964970257765, -0.17617264919621228, -0.1759911894273128, -0.1768976897689769, -0.17544633017412387, -0.17617264919621228, -0.1768976897689769]
drnnGRUreluRewards19 = [-0.1768976897689769, -0.17580964970257765, -0.17617264919621228, -0.1759911894273128, -0.1759911894273128, -0.1759911894273128, -0.177078750549934, -0.17526455026455026, -0.1763540290620872, -0.1759911894273128, -0.1759911894273128, -0.177078750549934]
drnnGRUreluRewards20 = [-0.1759911894273128, -0.1768976897689769, -0.17544633017412387, -0.17580964970257765, -0.17544633017412387, -0.17580964970257765, -0.17617264919621228, -0.177078750549934, -0.1759911894273128, -0.17798286090969018, -0.17617264919621228, -0.17617264919621228]
drnnGRUreluRewards21 = [-0.17580964970257765, -0.1759911894273128, -0.1759911894273128, -0.177078750549934, -0.17580964970257765, -0.17580964970257765, -0.177078750549934, -0.17580964970257765, -0.17508269018743108, -0.1759911894273128, -0.17580964970257765, -0.17580964970257765]
drnnGRUreluRewards22 = [-0.1759911894273128, -0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.17580964970257765, -0.177078750549934, -0.1759911894273128, -0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17544633017412387, -0.17580964970257765]
drnnGRUreluRewards23 = [-0.17580964970257765, -0.177078750549934, -0.1776214552648934, -0.17580964970257765, -0.17580964970257765, -0.177078750549934, -0.17798286090969018, -0.17544633017412387, -0.1768976897689769, -0.1759911894273128, -0.17617264919621228, -0.1759911894273128]
drnnGRUreluRewards24 = [-0.17580964970257765, -0.17544633017412387, -0.17580964970257765, -0.17526455026455026, -0.17617264919621228, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.17562802996914942, -0.177078750549934, -0.17580964970257765, -0.17617264919621228]
drnnGRUreluRewards25 = [-0.1759911894273128, -0.17544633017412387, -0.1759911894273128, -0.17580964970257765, -0.177078750549934, -0.17580964970257765, -0.17580964970257765, -0.17617264919621228, -0.17544633017412387, -0.17508269018743108, -0.17798286090969018, -0.17580964970257765]
drnnGRUreluRewards26 = [-0.1759911894273128, -0.17580964970257765, -0.177078750549934, -0.17617264919621228, -0.17653532907770195, -0.17580964970257765, -0.1759911894273128, -0.1759911894273128, -0.1759911894273128, -0.17508269018743108, -0.17653532907770195, -0.17562802996914942]
drnnGRUreluRewards27 = [-0.17580964970257765, -0.17617264919621228, -0.177078750549934, -0.17544633017412387, -0.17580964970257765, -0.17526455026455026, -0.17653532907770195, -0.17544633017412387, -0.17580964970257765, -0.17617264919621228, -0.17580964970257765, -0.17508269018743108]
drnnGRUreluRewards28 = [-0.17544633017412387, -0.177078750549934, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.17798286090969018, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.17544633017412387]
drnnGRUreluRewards29 = [-0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.17653532907770195, -0.17508269018743108, -0.17580964970257765, -0.17544633017412387, -0.1776214552648934, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.1759911894273128]
drnnGRUreluRewards30 = [-0.17580964970257765, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17617264919621228, -0.17544633017412387, -0.17508269018743108, -0.17617264919621228, -0.17617264919621228]
drnnGRUreluRewards31 = [-0.17508269018743108, -0.17653532907770195, -0.17562802996914942, -0.1759911894273128, -0.17580964970257765, -0.17617264919621228, -0.1759911894273128, -0.1759911894273128, -0.17544633017412387, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765]
drnnGRUreluRewards32 = [-0.1759911894273128, -0.17580964970257765, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.1768976897689769, -0.177078750549934, -0.17562802996914942, -0.17526455026455026, -0.17617264919621228, -0.17544633017412387, -0.17580964970257765]
drnnGRUreluRewards33 = [-0.17671654929577466, -0.1759911894273128, -0.177078750549934, -0.17544633017412387, -0.17508269018743108, -0.17580964970257765, -0.17562802996914942, -0.17580964970257765, -0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765]
drnnGRUreluRewards34 = [-0.17798286090969018, -0.17544633017412387, -0.17526455026455026, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17580964970257765, -0.17580964970257765]
drnnGRUreluRewards35 = [-0.1759911894273128, -0.1759911894273128, -0.1759911894273128, -0.1759911894273128, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17617264919621228, -0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387]
drnnGRUreluRewards36 = [-0.17526455026455026, -0.17562802996914942, -0.17580964970257765, -0.1759911894273128, -0.1759911894273128, -0.1759911894273128, -0.17617264919621228, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17580964970257765, -0.17617264919621228]
drnnGRUreluRewards37 = [-0.17617264919621228, -0.17580964970257765, -0.1759911894273128, -0.17508269018743108, -0.17617264919621228, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.17562802996914942, -0.17617264919621228, -0.17508269018743108, -0.17580964970257765]
drnnGRUreluRewards38 = [-0.17617264919621228, -0.1759911894273128, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17617264919621228, -0.17544633017412387, -0.1768976897689769, -0.17617264919621228, -0.17526455026455026, -0.17617264919621228, -0.17580964970257765]
drnnGRUreluRewards39 = [-0.17508269018743108, -0.17580964970257765, -0.17526455026455026, -0.1768976897689769, -0.177078750549934, -0.1759911894273128, -0.17580964970257765, -0.17617264919621228, -0.17544633017412387, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108]
drnnGRUreluRewards40 = [-0.1759911894273128, -0.17580964970257765, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17617264919621228, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765]
drnnGRUreluRewards41 = [-0.17544633017412387, -0.17580964970257765, -0.17617264919621228, -0.17562802996914942, -0.1759911894273128, -0.17544633017412387, -0.17562802996914942, -0.17544633017412387, -0.1759911894273128, -0.1768976897689769, -0.177078750549934, -0.17580964970257765]
drnnGRUreluRewards42 = [-0.17580964970257765, -0.17508269018743108, -0.17526455026455026, -0.1759911894273128, -0.1759911894273128, -0.17580964970257765, -0.17562802996914942, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17526455026455026]
drnnGRUreluRewards43 = [-0.1759911894273128, -0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.1759911894273128, -0.1759911894273128, -0.17617264919621228, -0.1759911894273128, -0.17508269018743108]
drnnGRUreluRewards44 = [-0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.1759911894273128, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387]
drnnGRUreluRewards45 = [-0.17508269018743108, -0.17562802996914942, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108, -0.1759911894273128, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17580964970257765]
drnnGRUreluRewards46 = [-0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17562802996914942, -0.17508269018743108, -0.17508269018743108]
drnnGRUreluRewards47 = [-0.17580964970257765, -0.17544633017412387, -0.17562802996914942, -0.17580964970257765, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17562802996914942, -0.17508269018743108, -0.17671654929577466, -0.17580964970257765, -0.17580964970257765]
drnnGRUreluRewards48 = [-0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108, -0.17544633017412387, -0.17544633017412387]
drnnGRUreluRewards49 = [-0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.17580964970257765]
# Deep Reinforcement Learning: 5 capas Dense, Funcion de activacion tanh, 12 episodios, 50 iteraciones
drlTanhMakespan0 = [794, 794, 805, 799, 810, 800, 794, 810, 804, 806, 812, 808]
drlTanhMakespan1 = [796, 795, 795, 798, 799, 800, 800, 795, 797, 796, 797, 799]
drlTanhMakespan2 = [800, 797, 798, 801, 799, 800, 796, 795, 797, 796, 794, 798]
drlTanhMakespan3 = [800, 795, 799, 796, 799, 798, 795, 799, 795, 799, 798, 796]
drlTanhMakespan4 = [809, 795, 795, 800, 797, 795, 798, 798, 799, 799, 798, 798]
drlTanhMakespan5 = [795, 795, 795, 799, 795, 798, 795, 800, 795, 796, 795, 805]
drlTanhMakespan6 = [794, 800, 795, 793, 798, 795, 794, 798, 795, 799, 795, 796]
drlTanhMakespan7 = [795, 795, 795, 795, 798, 795, 797, 797, 795, 795, 798, 797]
drlTanhMakespan8 = [795, 795, 795, 794, 800, 800, 794, 795, 794, 794, 797, 795]
drlTanhMakespan9 = [793, 794, 796, 795, 796, 800, 794, 797, 793, 795, 798, 795]
drlTanhMakespan10 = [795, 795, 797, 794, 795, 798, 797, 795, 798, 794, 794, 794]
drlTanhMakespan11 = [795, 795, 795, 795, 797, 795, 795, 794, 795, 795, 795, 794]
drlTanhMakespan12 = [794, 798, 795, 794, 795, 795, 795, 797, 799, 795, 795, 795]
drlTanhMakespan13 = [795, 797, 795, 800, 796, 795, 796, 795, 795, 795, 798, 794]
drlTanhMakespan14 = [795, 795, 796, 794, 794, 794, 797, 795, 798, 795, 795, 793]
drlTanhMakespan15 = [799, 794, 795, 795, 795, 796, 801, 797, 795, 794, 795, 799]
drlTanhMakespan16 = [795, 795, 796, 798, 795, 795, 795, 795, 795, 798, 798, 796]
drlTanhMakespan17 = [800, 798, 795, 795, 798, 794, 795, 795, 797, 795, 796, 794]
drlTanhMakespan18 = [797, 800, 798, 797, 796, 794, 799, 797, 795, 796, 799, 798]
drlTanhMakespan19 = [797, 800, 795, 794, 794, 796, 795, 798, 796, 798, 797, 795]
drlTanhMakespan20 = [794, 795, 795, 799, 798, 797, 795, 795, 798, 795, 798, 795]
drlTanhMakespan21 = [796, 795, 795, 795, 795, 797, 798, 794, 797, 795, 796, 794]
drlTanhMakespan22 = [799, 796, 795, 795, 795, 795, 796, 795, 796, 798, 796, 795]
drlTanhMakespan23 = [799, 799, 795, 796, 796, 799, 796, 797, 794, 794, 798, 796]
drlTanhMakespan24 = [795, 795, 797, 800, 797, 795, 795, 796, 795, 795, 798, 799]
drlTanhMakespan25 = [795, 797, 795, 795, 795, 795, 800, 796, 795, 797, 795, 795]
drlTanhMakespan26 = [795, 795, 799, 794, 797, 794, 794, 798, 794, 796, 795, 798]
drlTanhMakespan27 = [796, 796, 795, 796, 798, 797, 794, 795, 794, 794, 794, 798]
drlTanhMakespan28 = [795, 795, 794, 798, 796, 796, 800, 797, 797, 796, 795, 794]
drlTanhMakespan29 = [795, 795, 798, 800, 797, 794, 796, 794, 792, 794, 794, 795]
drlTanhMakespan30 = [798, 797, 795, 799, 797, 800, 798, 799, 797, 800, 794, 796]
drlTanhMakespan31 = [794, 795, 800, 798, 800, 794, 800, 798, 799, 798, 798, 798]
drlTanhMakespan32 = [795, 795, 795, 794, 794, 794, 793, 795, 794, 793, 794, 795]
drlTanhMakespan33 = [794, 797, 792, 794, 795, 795, 797, 795, 795, 794, 792, 795]
drlTanhMakespan34 = [795, 794, 795, 798, 795, 796, 794, 795, 794, 794, 795, 794]
drlTanhMakespan35 = [796, 794, 797, 793, 794, 798, 795, 794, 793, 793, 795, 794]
drlTanhMakespan36 = [795, 795, 794, 795, 795, 795, 794, 795, 795, 793, 795, 794]
drlTanhMakespan37 = [794, 794, 798, 794, 794, 796, 795, 794, 793, 795, 795, 792]
drlTanhMakespan38 = [794, 796, 795, 794, 798, 798, 795, 795, 794, 794, 795, 794]
drlTanhMakespan39 = [794, 795, 795, 796, 792, 794, 795, 794, 795, 794, 794, 795]
drlTanhMakespan40 = [798, 795, 794, 795, 794, 794, 793, 795, 794, 794, 797, 794]
drlTanhMakespan41 = [795, 792, 795, 794, 794, 795, 794, 795, 792, 797, 795, 795]
drlTanhMakespan42 = [792, 794, 794, 795, 794, 794, 795, 794, 792, 794, 794, 794]
drlTanhMakespan43 = [794, 796, 794, 793, 795, 795, 793, 798, 794, 794, 798, 794]
drlTanhMakespan44 = [794, 794, 794, 794, 795, 794, 793, 794, 794, 795, 795, 794]
drlTanhMakespan45 = [790, 794, 793, 794, 793, 794, 795, 794, 791, 795, 795, 794]
drlTanhMakespan46 = [792, 794, 794, 794, 794, 794, 794, 793, 794, 794, 794, 794]
drlTanhMakespan47 = [794, 794, 794, 794, 794, 794, 794, 794, 792, 795, 793, 795]
drlTanhMakespan48 = [794, 794, 792, 792, 797, 794, 792, 794, 794, 795, 794, 795]
drlTanhMakespan49 = [795, 794, 794, 796, 794, 797, 794, 794, 794, 794, 794, 794]
drlTanhMakespan50 = [794, 792, 795, 794, 794, 794, 794, 794, 795, 794, 795, 794]
drlTanhMakespan51 = [794, 792, 796, 795, 794, 794, 795, 794, 795, 795, 795, 794]
drlTanhMakespan52 = [794, 794, 795, 792, 795, 795, 795, 792, 794, 793, 795, 794]
drlTanhMakespan53 = [794, 792, 794, 792, 794, 794, 794, 795, 795, 794, 794, 792]
drlTanhMakespan54 = [795, 793, 794, 794, 794, 792, 795, 794, 794, 792, 794, 796]
drlTanhMakespan55 = [795, 794, 794, 795, 795, 793, 794, 795, 794, 797, 795, 792]
drlTanhMakespan56 = [795, 795, 792, 795, 794, 795, 794, 794, 794, 795, 795, 795]
drlTanhMakespan57 = [795, 792, 795, 794, 795, 795, 792, 795, 794, 797, 792, 792]
drlTanhMakespan58 = [795, 795, 794, 795, 792, 794, 794, 794, 792, 792, 792, 793]
drlTanhMakespan59 = [795, 794, 792, 794, 794, 794, 792, 794, 794, 794, 793, 795]
drlTanhMakespan60 = [794, 795, 795, 795, 798, 794, 794, 794, 794, 794, 794, 792]
drlTanhMakespan61 = [792, 795, 794, 794, 795, 794, 792, 795, 795, 794, 794, 795]
drlTanhMakespan62 = [795, 794, 794, 794, 799, 794, 792, 794, 795, 795, 794, 793]
drlTanhMakespan63 = [791, 795, 792, 796, 794, 794, 792, 795, 793, 794, 792, 794]
drlTanhRewards0 = [-0.17508269018743108, -0.17508269018743108, -0.177078750549934, -0.1759911894273128, -0.17798286090969018, -0.17617264919621228, -0.17508269018743108, -0.17798286090969018, -0.1768976897689769, -0.17725973169122497, -0.17834394904458598, -0.1776214552648934]
drlTanhRewards1 = [-0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17617264919621228, -0.1759911894273128, -0.17617264919621228, -0.17526455026455026, -0.17562802996914942, -0.17544633017412387, -0.17562802996914942, -0.1759911894273128]
drlTanhRewards2 = [-0.17617264919621228, -0.17562802996914942, -0.17580964970257765, -0.1763540290620872, -0.1759911894273128, -0.17617264919621228, -0.17544633017412387, -0.17526455026455026, -0.17562802996914942, -0.17508269018743108, -0.17544633017412387, -0.17580964970257765]
drlTanhRewards3 = [-0.17617264919621228, -0.1759911894273128, -0.17526455026455026, -0.17544633017412387, -0.1759911894273128, -0.17580964970257765, -0.17526455026455026, -0.1759911894273128, -0.17526455026455026, -0.1759911894273128, -0.17580964970257765, -0.17544633017412387]
drlTanhRewards4 = [-0.1778021978021978, -0.17526455026455026, -0.17526455026455026, -0.17617264919621228, -0.17562802996914942, -0.17580964970257765, -0.17526455026455026, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765]
drlTanhRewards5 = [-0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.1759911894273128, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17617264919621228, -0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.177078750549934]
drlTanhRewards6 = [-0.17508269018743108, -0.17617264919621228, -0.17526455026455026, -0.1749007498897221, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.1759911894273128, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17544633017412387]
drlTanhRewards7 = [-0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17562802996914942, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17562802996914942]
drlTanhRewards8 = [-0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17617264919621228, -0.17508269018743108, -0.17617264919621228, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026]
drlTanhRewards9 = [-0.1749007498897221, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17544633017412387, -0.17617264919621228, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.1749007498897221, -0.17580964970257765, -0.17526455026455026]
drlTanhRewards10 = [-0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17562802996914942, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drlTanhRewards11 = [-0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026]
drlTanhRewards12 = [-0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.1759911894273128, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026]
drlTanhRewards13 = [-0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17617264919621228, -0.17544633017412387, -0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17508269018743108]
drlTanhRewards14 = [-0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.1749007498897221]
drlTanhRewards15 = [-0.1759911894273128, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17562802996914942, -0.1763540290620872, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.1759911894273128]
drlTanhRewards16 = [-0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17580964970257765, -0.17544633017412387]
drlTanhRewards17 = [-0.17617264919621228, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108]
drlTanhRewards18 = [-0.17562802996914942, -0.17617264919621228, -0.17580964970257765, -0.17562802996914942, -0.17544633017412387, -0.1759911894273128, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.17544633017412387, -0.1759911894273128, -0.17580964970257765]
drlTanhRewards19 = [-0.17562802996914942, -0.17617264919621228, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.17562802996914942, -0.17526455026455026]
drlTanhRewards20 = [-0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.1759911894273128, -0.17580964970257765, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026]
drlTanhRewards21 = [-0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17562802996914942, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108]
drlTanhRewards22 = [-0.1759911894273128, -0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17544633017412387, -0.17580964970257765, -0.17544633017412387, -0.17526455026455026]
drlTanhRewards23 = [-0.1759911894273128, -0.1759911894273128, -0.17544633017412387, -0.17544633017412387, -0.17526455026455026, -0.1759911894273128, -0.17544633017412387, -0.17562802996914942, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.17544633017412387]
drlTanhRewards24 = [-0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17617264919621228, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.17580964970257765, -0.1759911894273128]
drlTanhRewards25 = [-0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17617264919621228, -0.17544633017412387, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026]
drlTanhRewards26 = [-0.17526455026455026, -0.17526455026455026, -0.1759911894273128, -0.17508269018743108, -0.17562802996914942, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17580964970257765]
drlTanhRewards27 = [-0.17544633017412387, -0.17544633017412387, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.17562802996914942, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108]
drlTanhRewards28 = [-0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17544633017412387, -0.17562802996914942, -0.17544633017412387, -0.17617264919621228, -0.17562802996914942, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108]
drlTanhRewards29 = [-0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17617264919621228, -0.17562802996914942, -0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026]
drlTanhRewards30 = [-0.17580964970257765, -0.17562802996914942, -0.17526455026455026, -0.1759911894273128, -0.17562802996914942, -0.17617264919621228, -0.17580964970257765, -0.1759911894273128, -0.17562802996914942, -0.17617264919621228, -0.17508269018743108, -0.17544633017412387]
drlTanhRewards31 = [-0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17508269018743108, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.17580964970257765]
drlTanhRewards32 = [-0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17526455026455026, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026]
drlTanhRewards33 = [-0.17508269018743108, -0.17562802996914942, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17471872931833224, -0.17526455026455026]
drlTanhRewards34 = [-0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108]
drlTanhRewards35 = [-0.17544633017412387, -0.17508269018743108, -0.17562802996914942, -0.1749007498897221, -0.17508269018743108, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.1749007498897221, -0.1749007498897221, -0.17526455026455026, -0.17508269018743108]
drlTanhRewards36 = [-0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026, -0.17508269018743108]
drlTanhRewards37 = [-0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17471872931833224]
drlTanhRewards38 = [-0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108]
drlTanhRewards39 = [-0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026]
drlTanhRewards40 = [-0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17562802996914942, -0.17508269018743108]
drlTanhRewards41 = [-0.17526455026455026, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026]
drlTanhRewards42 = [-0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drlTanhRewards43 = [-0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.1749007498897221, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108]
drlTanhRewards44 = [-0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108]
drlTanhRewards45 = [-0.1749007498897221, -0.17435444714191128, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17453662842012357, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108]
drlTanhRewards46 = [-0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drlTanhRewards47 = [-0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026]
drlTanhRewards48 = [-0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17471872931833224, -0.17508269018743108, -0.17562802996914942, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026]
drlTanhRewards49 = [-0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17562802996914942, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drlTanhRewards50 = [-0.17508269018743108, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108]
drlTanhRewards51 = [-0.17508269018743108, -0.17471872931833224, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108]
drlTanhRewards52 = [-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17471872931833224, -0.17508269018743108, -0.1749007498897221, -0.17526455026455026, -0.17508269018743108]
drlTanhRewards53 = [-0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224]
drlTanhRewards54 = [-0.17526455026455026, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17544633017412387]
drlTanhRewards55 = [-0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.17471872931833224]
drlTanhRewards56 = [-0.17526455026455026, -0.17526455026455026, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026]
drlTanhRewards57 = [-0.17526455026455026, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108, -0.17562802996914942, -0.17471872931833224, -0.17471872931833224]
drlTanhRewards58 = [-0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17471872931833224, -0.17471872931833224, -0.1749007498897221]
drlTanhRewards59 = [-0.17526455026455026, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17526455026455026]
drlTanhRewards60 = [-0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224]
drlTanhRewards61 = [-0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026]
drlTanhRewards62 = [-0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.1759911894273128, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.1749007498897221]
drlTanhRewards63 = [-0.17453662842012357, -0.17471872931833224, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17526455026455026, -0.1749007498897221, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108]
# Deep Reinforcement Learning: 5 capas Dense, Funcion de activacion relu, 12 episodios, 50 iteraciones
drlReluMakespan0 = [796, 798, 809, 798, 796, 800, 798, 799, 800, 794, 800, 798]
drlReluMakespan1 = [800, 800, 801, 806, 804, 806, 808, 798, 796, 796, 798, 800]
drlReluMakespan2 = [805, 805, 798, 800, 800, 798, 801, 799, 800, 806, 800, 800]
drlReluMakespan3 = [798, 799, 798, 795, 798, 808, 803, 800, 798, 795, 799, 800]
drlReluMakespan4 = [805, 805, 799, 796, 798, 803, 799, 800, 800, 800, 795, 794]
drlReluMakespan5 = [799, 796, 795, 800, 801, 796, 800, 795, 803, 800, 800, 805]
drlReluMakespan6 = [799, 795, 798, 794, 805, 796, 795, 799, 798, 795, 804, 796]
drlReluMakespan7 = [795, 798, 799, 798, 798, 799, 795, 794, 796, 794, 795, 805]
drlReluMakespan8 = [805, 794, 794, 795, 798, 795, 798, 795, 799, 800, 796, 798]
drlReluMakespan9 = [797, 797, 797, 794, 795, 794, 794, 797, 796, 795, 801, 799]
drlReluMakespan10 = [799, 794, 797, 795, 794, 794, 795, 795, 795, 796, 797, 799]
drlReluMakespan11 = [796, 798, 800, 795, 805, 794, 798, 796, 795, 794, 798, 795]
drlReluMakespan12 = [800, 795, 794, 798, 800, 805, 800, 798, 804, 799, 794, 803]
drlReluMakespan13 = [796, 799, 798, 794, 800, 794, 795, 796, 798, 795, 794, 799]
drlReluMakespan14 = [795, 798, 798, 798, 805, 798, 798, 798, 795, 794, 800, 796]
drlReluMakespan15 = [795, 798, 795, 805, 798, 794, 795, 798, 796, 794, 795, 796]
drlReluMakespan16 = [798, 795, 796, 799, 796, 798, 798, 795, 795, 795, 795, 799]
drlReluMakespan17 = [794, 798, 796, 798, 795, 801, 794, 798, 797, 795, 796, 801]
drlReluMakespan18 = [798, 795, 798, 798, 801, 798, 795, 795, 797, 800, 794, 800]
drlReluMakespan19 = [795, 798, 794, 800, 796, 795, 798, 797, 795, 794, 796, 796]
drlReluMakespan20 = [794, 794, 795, 795, 795, 795, 796, 798, 799, 799, 799, 795]
drlReluMakespan21 = [802, 796, 794, 797, 797, 800, 794, 794, 804, 803, 798, 797]
drlReluMakespan22 = [794, 795, 795, 795, 798, 795, 794, 799, 794, 803, 795, 794]
drlReluMakespan23 = [794, 798, 799, 794, 795, 795, 799, 795, 796, 795, 797, 799]
drlReluMakespan24 = [795, 794, 797, 800, 794, 795, 795, 795, 795, 800, 800, 798]
drlReluMakespan25 = [795, 794, 797, 796, 798, 795, 795, 794, 799, 795, 794, 798]
drlReluMakespan26 = [801, 795, 800, 794, 794, 796, 800, 798, 798, 799, 794, 796]
drlReluMakespan27 = [796, 795, 796, 795, 796, 795, 795, 800, 794, 794, 794, 796]
drlReluMakespan28 = [794, 794, 795, 796, 794, 795, 795, 797, 794, 794, 796, 795]
drlReluMakespan29 = [793, 794, 795, 800, 795, 795, 794, 798, 798, 796, 795, 794]
drlReluMakespan30 = [802, 794, 794, 798, 794, 796, 805, 794, 800, 794, 796, 794]
drlReluMakespan31 = [797, 794, 794, 794, 800, 800, 794, 794, 798, 795, 794, 798]
drlReluMakespan32 = [794, 798, 794, 795, 794, 795, 798, 794, 794, 795, 794, 798]
drlReluMakespan33 = [798, 794, 798, 795, 794, 793, 797, 798, 794, 794, 801, 793]
drlReluMakespan34 = [794, 798, 794, 795, 794, 793, 798, 795, 794, 800, 794, 795]
drlReluMakespan35 = [794, 796, 794, 796, 806, 795, 795, 795, 796, 795, 795, 799]
drlReluMakespan36 = [795, 794, 794, 796, 796, 798, 794, 796, 794, 795, 794, 795]
drlReluMakespan37 = [795, 794, 795, 798, 794, 794, 794, 794, 794, 794, 795, 797]
drlReluMakespan38 = [794, 798, 794, 798, 797, 794, 794, 795, 795, 794, 795, 795]
drlReluMakespan39 = [797, 794, 795, 796, 796, 796, 798, 794, 794, 795, 794, 798]
drlReluMakespan40 = [798, 795, 795, 798, 792, 795, 795, 794, 795, 794, 798, 794]
drlReluMakespan41 = [795, 794, 794, 794, 794, 794, 798, 793, 794, 794, 794, 793]
drlReluMakespan42 = [794, 794, 794, 794, 799, 794, 795, 794, 796, 794, 794, 794]
drlReluMakespan43 = [794, 797, 795, 794, 795, 794, 794, 795, 794, 794, 793, 794]
drlReluMakespan44 = [794, 792, 793, 794, 794, 796, 794, 798, 795, 794, 794, 796]
drlReluMakespan45 = [795, 794, 799, 794, 794, 793, 794, 795, 795, 793, 796, 794]
drlReluMakespan46 = [794, 796, 794, 794, 794, 794, 794, 793, 799, 792, 794, 794]
drlReluMakespan47 = [795, 794, 793, 794, 796, 797, 794, 794, 795, 794, 794, 794]
drlReluMakespan48 = [794, 794, 794, 792, 794, 794, 795, 794, 794, 794, 794, 794]
drlReluMakespan49 = [794, 794, 795, 792, 797, 797, 794, 794, 792, 800, 795, 795]
drlReluRewards0 = [-0.17544633017412387, -0.17580964970257765, -0.1778021978021978, -0.17544633017412387, -0.17580964970257765, -0.17617264919621228, -0.17580964970257765, -0.1759911894273128, -0.17508269018743108, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765]
drlReluRewards1 = [-0.17617264919621228, -0.17617264919621228, -0.1763540290620872, -0.17725973169122497, -0.1768976897689769, -0.17725973169122497, -0.1776214552648934, -0.17580964970257765, -0.17544633017412387, -0.17544633017412387, -0.17580964970257765, -0.17617264919621228]
drlReluRewards2 = [-0.177078750549934, -0.177078750549934, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.1763540290620872, -0.1759911894273128, -0.17617264919621228, -0.17725973169122497, -0.17617264919621228, -0.17617264919621228]
drlReluRewards3 = [-0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.17526455026455026, -0.17580964970257765, -0.1776214552648934, -0.17671654929577466, -0.17617264919621228, -0.17580964970257765, -0.17526455026455026, -0.1759911894273128, -0.17617264919621228]
drlReluRewards4 = [-0.177078750549934, -0.177078750549934, -0.1759911894273128, -0.17544633017412387, -0.17580964970257765, -0.17671654929577466, -0.1759911894273128, -0.17617264919621228, -0.17617264919621228, -0.17617264919621228, -0.17526455026455026, -0.17508269018743108]
drlReluRewards5 = [-0.1759911894273128, -0.17544633017412387, -0.17526455026455026, -0.17617264919621228, -0.1763540290620872, -0.17544633017412387, -0.17526455026455026, -0.17617264919621228, -0.17671654929577466, -0.17617264919621228, -0.17617264919621228, -0.177078750549934]
drlReluRewards6 = [-0.1759911894273128, -0.17526455026455026, -0.17580964970257765, -0.17508269018743108, -0.177078750549934, -0.17544633017412387, -0.17526455026455026, -0.1759911894273128, -0.17580964970257765, -0.17526455026455026, -0.1768976897689769, -0.17544633017412387]
drlReluRewards7 = [-0.17526455026455026, -0.1759911894273128, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.177078750549934]
drlReluRewards8 = [-0.177078750549934, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.1759911894273128, -0.17617264919621228, -0.17544633017412387, -0.17580964970257765]
drlReluRewards9 = [-0.17562802996914942, -0.17562802996914942, -0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17562802996914942, -0.17544633017412387, -0.17526455026455026, -0.1763540290620872, -0.1759911894273128]
drlReluRewards10 = [-0.1759911894273128, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17562802996914942, -0.1759911894273128]
drlReluRewards11 = [-0.17544633017412387, -0.17580964970257765, -0.17617264919621228, -0.17526455026455026, -0.177078750549934, -0.17508269018743108, -0.17580964970257765, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17526455026455026]
drlReluRewards12 = [-0.17617264919621228, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17617264919621228, -0.177078750549934, -0.17617264919621228, -0.17580964970257765, -0.1768976897689769, -0.1759911894273128, -0.17508269018743108, -0.17671654929577466]
drlReluRewards13 = [-0.17544633017412387, -0.1759911894273128, -0.17580964970257765, -0.17508269018743108, -0.17617264919621228, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.1759911894273128]
drlReluRewards14 = [-0.17526455026455026, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.177078750549934, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17617264919621228, -0.17544633017412387]
drlReluRewards15 = [-0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.177078750549934, -0.17580964970257765, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17544633017412387, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387]
drlReluRewards16 = [-0.17580964970257765, -0.17526455026455026, -0.17544633017412387, -0.1759911894273128, -0.17580964970257765, -0.17544633017412387, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.1759911894273128]
drlReluRewards17 = [-0.17508269018743108, -0.17580964970257765, -0.17544633017412387, -0.17580964970257765, -0.17526455026455026, -0.1763540290620872, -0.17508269018743108, -0.17580964970257765, -0.17562802996914942, -0.17526455026455026, -0.17544633017412387, -0.1763540290620872]
drlReluRewards18 = [-0.17580964970257765, -0.17526455026455026, -0.17580964970257765, -0.17580964970257765, -0.1763540290620872, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17617264919621228, -0.17508269018743108, -0.17617264919621228]
drlReluRewards19 = [-0.17526455026455026, -0.17580964970257765, -0.17508269018743108, -0.17617264919621228, -0.17544633017412387, -0.17526455026455026, -0.17580964970257765, -0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17544633017412387]
drlReluRewards20 = [-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.1759911894273128, -0.1759911894273128, -0.1759911894273128, -0.17526455026455026]
drlReluRewards21 = [-0.17653532907770195, -0.17544633017412387, -0.17562802996914942, -0.17508269018743108, -0.17562802996914942, -0.17617264919621228, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.1768976897689769, -0.17671654929577466, -0.17562802996914942]
drlReluRewards22 = [-0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.1759911894273128, -0.17508269018743108, -0.17671654929577466, -0.17526455026455026, -0.17508269018743108]
drlReluRewards23 = [-0.17508269018743108, -0.17580964970257765, -0.17526455026455026, -0.1759911894273128, -0.17508269018743108, -0.17526455026455026, -0.1759911894273128, -0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.17562802996914942, -0.1759911894273128]
drlReluRewards24 = [-0.17526455026455026, -0.17508269018743108, -0.17562802996914942, -0.17617264919621228, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765]
drlReluRewards25 = [-0.17526455026455026, -0.17508269018743108, -0.17562802996914942, -0.17544633017412387, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.1759911894273128, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765]
drlReluRewards26 = [-0.1763540290620872, -0.17526455026455026, -0.17617264919621228, -0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17508269018743108, -0.17544633017412387]
drlReluRewards27 = [-0.17544633017412387, -0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.17617264919621228, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387]
drlReluRewards28 = [-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026]
drlReluRewards29 = [-0.1749007498897221, -0.17526455026455026, -0.17508269018743108, -0.17617264919621228, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108]
drlReluRewards30 = [-0.17653532907770195, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17544633017412387, -0.177078750549934, -0.17508269018743108, -0.17617264919621228, -0.17508269018743108, -0.17544633017412387, -0.17508269018743108]
drlReluRewards31 = [-0.17562802996914942, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17617264919621228, -0.17617264919621228, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17508269018743108, -0.17580964970257765]
drlReluRewards32 = [-0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765]
drlReluRewards33 = [-0.17580964970257765, -0.17508269018743108, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.1749007498897221, -0.17562802996914942, -0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.1763540290620872, -0.1749007498897221]
drlReluRewards34 = [-0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.1749007498897221, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17617264919621228, -0.17508269018743108, -0.17526455026455026]
drlReluRewards35 = [-0.17508269018743108, -0.17544633017412387, -0.17725973169122497, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.1759911894273128]
drlReluRewards36 = [-0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387, -0.17544633017412387, -0.17580964970257765, -0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026]
drlReluRewards37 = [-0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17562802996914942]
drlReluRewards38 = [-0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17580964970257765, -0.17562802996914942, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026]
drlReluRewards39 = [-0.17562802996914942, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387, -0.17544633017412387, -0.17544633017412387, -0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765]
drlReluRewards40 = [-0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17471872931833224, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108]
drlReluRewards41 = [-0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221]
drlReluRewards42 = [-0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.1759911894273128, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drlReluRewards43 = [-0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108]
drlReluRewards44 = [-0.17508269018743108, -0.17471872931833224, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387]
drlReluRewards45 = [-0.17526455026455026, -0.17508269018743108, -0.1759911894273128, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.1749007498897221, -0.17544633017412387, -0.17508269018743108]
drlReluRewards46 = [-0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.1759911894273128, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108]
drlReluRewards47 = [-0.17526455026455026, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17544633017412387, -0.17562802996914942, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drlReluRewards48 = [-0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drlReluRewards49 = [-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224, -0.17562802996914942, -0.17562802996914942, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17617264919621228, -0.17526455026455026, -0.17526455026455026]
if __name__ == "__main__":
##############################################
##############################################
##############################################
# Deep Recurrent Reinforcement Learning with 1 GRU layer and 4 Dense layers
drnnGRUtanhMakespan = []
drnnGRUtanhRewards = []
drnnGRUtanhMakespanList = []
drnnGRUtanhRewardsList = []
drnnGRUtanhMakespanValues = []
drnnGRUtanhRewardsValues = []
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan0))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan1))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan2))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan3))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan4))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan5))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan6))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan7))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan8))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan9))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan10))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan11))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan12))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan13))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan14))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan15))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan16))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan17))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan18))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan19))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan20))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan21))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan22))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan23))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan24))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan25))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan26))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan27))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan28))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan29))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan30))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan31))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan32))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan33))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan34))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan35))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan36))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan37))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan38))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan39))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan40))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan41))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan42))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan43))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan44))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan45))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan46))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan47))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan48))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan49))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards0))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards1))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards2))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards3))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards4))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards5))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards6))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards7))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards8))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards9))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards10))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards11))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards12))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards13))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards14))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards15))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards16))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards17))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards18))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards19))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards20))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards21))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards22))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards23))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards24))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards25))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards26))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards27))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards28))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards29))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards30))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards31))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards32))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards33))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards34))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards35))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards36))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards37))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards38))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards39))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards40))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards41))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards42))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards43))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards44))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards45))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards46))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards47))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards48))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards49))
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan0)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan1)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan2)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan3)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan4)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan5)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan6)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan7)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan8)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan9)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan10)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan11)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan12)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan13)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan14)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan15)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan16)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan17)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan18)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan19)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan20)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan21)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan22)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan23)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan24)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan25)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan26)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan27)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan28)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan29)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan30)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan31)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan32)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan33)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan34)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan35)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan36)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan37)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan38)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan39)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan40)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan41)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan42)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan43)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan44)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan45)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan46)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan47)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan48)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan49)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards0)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards1)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards2)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards3)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards4)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards5)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards6)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards7)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards8)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards9)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards10)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards11)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards12)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards13)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards14)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards15)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards16)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards17)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards18)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards19)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards20)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards21)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards22)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards23)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards24)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards25)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards26)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards27)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards28)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards29)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards30)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards31)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards32)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards33)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards34)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards35)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards36)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards37)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards38)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards39)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards40)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards41)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards42)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards43)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards44)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards45)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards46)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards47)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards48)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards49)
drnnGRUreluMakespan = []
drnnGRUreluRewards = []
drnnGRUreluMakespanList = []
drnnGRUreluRewardsList = []
drnnGRUreluMakespanValues = []
drnnGRUreluRewardsValues = []
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan0))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan1))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan2))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan3))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan4))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan5))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan6))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan7))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan8))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan9))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan10))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan11))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan12))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan13))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan14))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan15))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan16))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan17))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan18))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan19))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan20))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan21))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan22))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan23))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan24))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan25))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan26))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan27))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan28))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan29))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan30))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan31))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan32))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan33))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan34))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan35))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan36))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan37))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan38))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan39))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan40))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan41))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan42))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan43))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan44))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan45))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan46))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan47))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan48))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan49))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards0))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards1))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards2))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards3))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards4))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards5))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards6))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards7))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards8))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards9))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards10))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards11))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards12))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards13))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards14))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards15))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards16))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards17))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards18))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards19))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards20))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards21))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards22))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards23))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards24))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards25))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards26))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards27))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards28))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards29))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards30))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards31))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards32))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards33))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards34))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards35))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards36))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards37))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards38))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards39))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards40))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards41))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards42))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards43))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards44))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards45))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards46))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards47))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards48))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards49))
drnnGRUreluMakespanList.append(drnnGRUreluMakespan0)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan1)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan2)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan3)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan4)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan5)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan6)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan7)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan8)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan9)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan10)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan11)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan12)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan13)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan14)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan15)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan16)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan17)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan18)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan19)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan20)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan21)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan22)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan23)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan24)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan25)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan26)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan27)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan28)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan29)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan30)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan31)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan32)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan33)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan34)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan35)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan36)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan37)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan38)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan39)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan40)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan41)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan42)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan43)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan44)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan45)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan46)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan47)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan48)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan49)
drnnGRUreluRewardsList.append(drnnGRUreluRewards0)
drnnGRUreluRewardsList.append(drnnGRUreluRewards1)
drnnGRUreluRewardsList.append(drnnGRUreluRewards2)
drnnGRUreluRewardsList.append(drnnGRUreluRewards3)
drnnGRUreluRewardsList.append(drnnGRUreluRewards4)
drnnGRUreluRewardsList.append(drnnGRUreluRewards5)
drnnGRUreluRewardsList.append(drnnGRUreluRewards6)
drnnGRUreluRewardsList.append(drnnGRUreluRewards7)
drnnGRUreluRewardsList.append(drnnGRUreluRewards8)
drnnGRUreluRewardsList.append(drnnGRUreluRewards9)
drnnGRUreluRewardsList.append(drnnGRUreluRewards10)
drnnGRUreluRewardsList.append(drnnGRUreluRewards11)
drnnGRUreluRewardsList.append(drnnGRUreluRewards12)
drnnGRUreluRewardsList.append(drnnGRUreluRewards13)
drnnGRUreluRewardsList.append(drnnGRUreluRewards14)
drnnGRUreluRewardsList.append(drnnGRUreluRewards15)
drnnGRUreluRewardsList.append(drnnGRUreluRewards16)
drnnGRUreluRewardsList.append(drnnGRUreluRewards17)
drnnGRUreluRewardsList.append(drnnGRUreluRewards18)
drnnGRUreluRewardsList.append(drnnGRUreluRewards19)
drnnGRUreluRewardsList.append(drnnGRUreluRewards20)
drnnGRUreluRewardsList.append(drnnGRUreluRewards21)
drnnGRUreluRewardsList.append(drnnGRUreluRewards22)
drnnGRUreluRewardsList.append(drnnGRUreluRewards23)
drnnGRUreluRewardsList.append(drnnGRUreluRewards24)
drnnGRUreluRewardsList.append(drnnGRUreluRewards25)
drnnGRUreluRewardsList.append(drnnGRUreluRewards26)
drnnGRUreluRewardsList.append(drnnGRUreluRewards27)
drnnGRUreluRewardsList.append(drnnGRUreluRewards28)
drnnGRUreluRewardsList.append(drnnGRUreluRewards29)
drnnGRUreluRewardsList.append(drnnGRUreluRewards30)
drnnGRUreluRewardsList.append(drnnGRUreluRewards31)
drnnGRUreluRewardsList.append(drnnGRUreluRewards32)
drnnGRUreluRewardsList.append(drnnGRUreluRewards33)
drnnGRUreluRewardsList.append(drnnGRUreluRewards34)
drnnGRUreluRewardsList.append(drnnGRUreluRewards35)
drnnGRUreluRewardsList.append(drnnGRUreluRewards36)
drnnGRUreluRewardsList.append(drnnGRUreluRewards37)
drnnGRUreluRewardsList.append(drnnGRUreluRewards38)
drnnGRUreluRewardsList.append(drnnGRUreluRewards39)
drnnGRUreluRewardsList.append(drnnGRUreluRewards40)
drnnGRUreluRewardsList.append(drnnGRUreluRewards41)
drnnGRUreluRewardsList.append(drnnGRUreluRewards42)
drnnGRUreluRewardsList.append(drnnGRUreluRewards43)
drnnGRUreluRewardsList.append(drnnGRUreluRewards44)
drnnGRUreluRewardsList.append(drnnGRUreluRewards45)
drnnGRUreluRewardsList.append(drnnGRUreluRewards46)
drnnGRUreluRewardsList.append(drnnGRUreluRewards47)
drnnGRUreluRewardsList.append(drnnGRUreluRewards48)
drnnGRUreluRewardsList.append(drnnGRUreluRewards49)
for vector in drnnGRUtanhMakespanList:
for element in vector:
drnnGRUtanhMakespanValues.append(element)
for vector in drnnGRUtanhRewardsList:
for element in vector:
drnnGRUtanhRewardsValues.append(element)
##################
for vector in drnnGRUreluMakespanList:
for element in vector:
drnnGRUreluMakespanValues.append(element)
for vector in drnnGRUreluRewardsList:
for element in vector:
drnnGRUreluRewardsValues.append(element)
#####################
smoothGRUtanhMakespanValues = pd.Series(drnnGRUtanhMakespanValues).rolling(12).mean()
plt.plot(smoothGRUtanhMakespanValues)
plt.xlabel("Episodios")
plt.ylabel("Segundos")
plt.title("'Makespan' con red neuronal profunda que incluye 1 capa GRU")
plt.show()
smoothGRUtanhRewardsValues = pd.Series(drnnGRUtanhRewardsValues).rolling(12).mean()
plt.plot(smoothGRUtanhRewardsValues)
plt.xlabel("Episodios")
plt.ylabel("Premio")
plt.title("'Reward' con red neuronal profunda que incluye 1 capa GRU")
plt.show()
#####################
smoothGRUreluMakespanValues = pd.Series(drnnGRUreluMakespanValues).rolling(12).mean()
plt.plot(smoothGRUreluMakespanValues)
plt.xlabel("Episodios")
plt.ylabel("Segundos")
plt.title("'Makespan' con red neuronal profunda que incluye 1 capa GRU y ReLU")
plt.show()
smoothGRUreluRewardsValues = pd.Series(drnnGRUreluRewardsValues).rolling(12).mean()
plt.plot(smoothGRUreluRewardsValues)
plt.xlabel("Episodios")
plt.ylabel("Premio")
plt.title("'Reward' con red neuronal profunda que incluye 1 capa GRU y ReLU")
plt.show()
###################
plt.plot(smoothGRUtanhMakespanValues, color='blue', label='tanh')
plt.plot(smoothGRUreluMakespanValues, color='orange', label='relu')
plt.xlabel("Episodios")
plt.ylabel("Segundos")
plt.title("'Makespan' con red neuronal profunda que incluye 1 capa GRU")
plt.legend()
plt.show()
###################
plt.plot(smoothGRUtanhRewardsValues, color='blue', label='tanh')
plt.plot(smoothGRUreluRewardsValues, color='orange', label='relu')
plt.xlabel("Episodios")
plt.ylabel("Premio")
plt.title("'Reward' con red neuronal profunda que incluye 1 capa GRU")
plt.legend()
plt.show()
###################
drnnLSTMtanhMakespan = []
drnnLSTMtanhRewards = []
drnnLSTMtanhMakespanList = []
drnnLSTMtanhRewardsList = []
drnnLSTMtanhMakespanValues = []
drnnLSTMtanhRewardsValues = []
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan0))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan1))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan2))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan3))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan4))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan5))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan6))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan7))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan8))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan9))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan10))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan11))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan12))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan13))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan14))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan15))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan16))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan17))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan18))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan19))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan20))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan21))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan22))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan23))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan24))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan25))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan26))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan27))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan28))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan29))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan30))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan31))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan32))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan33))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan34))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan35))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan36))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan37))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan38))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan39))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan40))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan41))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan42))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan43))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan44))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan45))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan46))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan47))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan48))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan49))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards0))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards1))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards2))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards3))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards4))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards5))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards6))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards7))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards8))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards9))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards10))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards11))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards12))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards13))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards14))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards15))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards16))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards17))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards18))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards19))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards20))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards21))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards22))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards23))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards24))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards25))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards26))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards27))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards28))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards29))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards30))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards31))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards32))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards33))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards34))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards35))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards36))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards37))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards38))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards39))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards40))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards41))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards42))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards43))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards44))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards45))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards46))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards47))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards48))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards49))
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan0)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan1)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan2)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan3)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan4)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan5)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan6)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan7)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan8)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan9)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan10)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan11)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan12)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan13)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan14)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan15)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan16)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan17)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan18)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan19)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan20)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan21)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan22)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan23)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan24)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan25)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan26)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan27)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan28)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan29)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan30)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan31)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan32)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan33)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan34)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan35)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan36)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan37)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan38)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan39)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan40)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan41)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan42)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan43)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan44)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan45)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan46)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan47)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan48)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan49)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards0)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards1)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards2)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards3)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards4)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards5)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards6)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards7)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards8)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards9)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards10)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards11)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards12)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards13)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards14)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards15)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards16)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards17)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards18)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards19)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards20)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards21)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards22)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards23)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards24)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards25)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards26)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards27)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards28)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards29)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards30)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards31)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards32)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards33)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards34)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards35)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards36)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards37)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards38)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards39)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards40)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards41)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards42)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards43)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards44)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards45)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards46)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards47)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards48)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards49)
for vector in drnnLSTMtanhMakespanList:
for element in vector:
drnnLSTMtanhMakespanValues.append(element)
for vector in drnnLSTMtanhRewardsList:
for element in vector:
drnnLSTMtanhRewardsValues.append(element)
smoothLSTMtanhMakespanValues = pd.Series(drnnLSTMtanhMakespanValues).rolling(12).mean()
plt.plot(smoothLSTMtanhMakespanValues)
plt.xlabel("Episodios")
plt.ylabel("Segundos")
plt.title("'Makespan' utilizando LSTM con tanh")
plt.show()
smoothLSTMtanhRewardsValues = pd.Series(drnnLSTMtanhRewardsValues).rolling(12).mean()
plt.plot(smoothLSTMtanhRewardsValues)
plt.xlabel("Episodios")
plt.ylabel("Premio")
plt.title("'Reward' utilizando LSTM con tanh")
plt.show()
####################
drnnLSTMreluMakespan = []
drnnLSTMreluRewards = []
drnnLSTMreluMakespanList = []
drnnLSTMreluRewardsList = []
drnnLSTMreluMakespanValues = []
drnnLSTMreluRewardsValues = []
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan0))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan1))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan2))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan3))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan4))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan5))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan6))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan7))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan8))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan9))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan10))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan11))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan12))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan13))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan14))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan15))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan16))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan17))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan18))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan19))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan20))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan21))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan22))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan23))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan24))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan25))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan26))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan27))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan28))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan29))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan30))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan31))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan32))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan33))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan34))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan35))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan36))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan37))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan38))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan39))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan40))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan41))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan42))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan43))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan44))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan45))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan46))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan47))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan48))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan49))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards0))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards1))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards2))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards3))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards4))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards5))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards6))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards7))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards8))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards9))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards10))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards11))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards12))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards13))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards14))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards15))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards16))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards17))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards18))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards19))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards20))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards21))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards22))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards23))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards24))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards25))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards26))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards27))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards28))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards29))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards30))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards31))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards32))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards33))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards34))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards35))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards36))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards37))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards38))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards39))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards40))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards41))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards42))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards43))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards44))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards45))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards46))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards47))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards48))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards49))
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan0)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan1)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan2)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan3)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan4)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan5)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan6)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan7)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan8)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan9)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan10)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan11)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan12)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan13)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan14)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan15)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan16)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan17)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan18)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan19)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan20)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan21)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan22)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan23)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan24)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan25)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan26)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan27)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan28)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan29)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan30)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan31)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan32)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan33)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan34)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan35)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan36)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan37)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan38)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan39)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan40)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan41)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan42)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan43)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan44)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan45)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan46)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan47)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan48)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan49)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards0)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards1)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards2)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards3)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards4)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards5)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards6)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards7)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards8)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards9)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards10)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards11)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards12)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards13)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards14)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards15)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards16)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards17)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards18)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards19)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards20)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards21)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards22)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards23)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards24)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards25)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards26)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards27)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards28)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards29)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards30)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards31)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards32)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards33)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards34)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards35)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards36)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards37)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards38)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards39)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards40)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards41)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards42)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards43)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards44)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards45)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards46)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards47)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards48)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards49)
for vector in drnnLSTMreluMakespanList:
for element in vector:
drnnLSTMreluMakespanValues.append(element)
for vector in drnnLSTMreluRewardsList:
for element in vector:
drnnLSTMreluRewardsValues.append(element)
smoothLSTMreluMakespanValues = pd.Series(drnnLSTMreluMakespanValues).rolling(12).mean()
plt.plot(smoothLSTMreluMakespanValues)
plt.xlabel("Episodios")
plt.ylabel("Segundos")
plt.title("'Makespan' utilizando LSTM con relu")
plt.show()
smoothLSTMreluRewardsValues = pd.Series(drnnLSTMreluRewardsValues).rolling(12).mean()
plt.plot(smoothLSTMreluRewardsValues)
plt.xlabel("Episodios")
plt.ylabel("Premio")
plt.title("'Reward' utilizando LSTM con relu")
plt.show()
##################
plt.plot(smoothLSTMtanhMakespanValues, color='blue', label='tanh')
plt.plot(smoothLSTMreluMakespanValues, color='orange', label='relu')
plt.xlabel("Episodios")
plt.ylabel("Segundos")
plt.title("'Makespan' con red neuronal profunda que incluye 1 capa LSTM")
plt.legend()
plt.show()
##################
plt.plot(smoothLSTMtanhRewardsValues, color='blue', label='tanh')
plt.plot(smoothLSTMreluRewardsValues, color='orange', label='relu')
plt.xlabel("Episodios")
plt.ylabel("Premio")
plt.title("'Reward' con red neuronal profunda que incluye 1 capa LSTM")
plt.legend()
plt.show()
##################
##################
##################
drlTanhMakespan = []
drlTanhRewards = []
drlTanhMakespanList = []
drlTanhRewardsList = []
drlTanhMakespanValues = []
drlTanhRewardsValues = []
drlTanhMakespan.append(np.mean(drlTanhMakespan0))
drlTanhMakespan.append(np.mean(drlTanhMakespan1))
drlTanhMakespan.append(np.mean(drlTanhMakespan2))
drlTanhMakespan.append(np.mean(drlTanhMakespan3))
drlTanhMakespan.append(np.mean(drlTanhMakespan4))
drlTanhMakespan.append(np.mean(drlTanhMakespan5))
drlTanhMakespan.append(np.mean(drlTanhMakespan6))
drlTanhMakespan.append(np.mean(drlTanhMakespan7))
drlTanhMakespan.append(np.mean(drlTanhMakespan8))
drlTanhMakespan.append(np.mean(drlTanhMakespan9))
drlTanhMakespan.append(np.mean(drlTanhMakespan10))
drlTanhMakespan.append(np.mean(drlTanhMakespan11))
drlTanhMakespan.append(np.mean(drlTanhMakespan12))
drlTanhMakespan.append(np.mean(drlTanhMakespan13))
drlTanhMakespan.append(np.mean(drlTanhMakespan14))
drlTanhMakespan.append(np.mean(drlTanhMakespan15))
drlTanhMakespan.append(np.mean(drlTanhMakespan16))
drlTanhMakespan.append(np.mean(drlTanhMakespan17))
drlTanhMakespan.append(np.mean(drlTanhMakespan18))
drlTanhMakespan.append(np.mean(drlTanhMakespan19))
drlTanhMakespan.append(np.mean(drlTanhMakespan20))
drlTanhMakespan.append(np.mean(drlTanhMakespan21))
drlTanhMakespan.append(np.mean(drlTanhMakespan22))
drlTanhMakespan.append(np.mean(drlTanhMakespan23))
drlTanhMakespan.append(np.mean(drlTanhMakespan24))
drlTanhMakespan.append(np.mean(drlTanhMakespan25))
drlTanhMakespan.append(np.mean(drlTanhMakespan26))
drlTanhMakespan.append(np.mean(drlTanhMakespan27))
drlTanhMakespan.append(np.mean(drlTanhMakespan28))
drlTanhMakespan.append(np.mean(drlTanhMakespan29))
drlTanhMakespan.append(np.mean(drlTanhMakespan30))
drlTanhMakespan.append(np.mean(drlTanhMakespan31))
drlTanhMakespan.append(np.mean(drlTanhMakespan32))
drlTanhMakespan.append(np.mean(drlTanhMakespan33))
drlTanhMakespan.append(np.mean(drlTanhMakespan34))
drlTanhMakespan.append(np.mean(drlTanhMakespan35))
drlTanhMakespan.append(np.mean(drlTanhMakespan36))
drlTanhMakespan.append(np.mean(drlTanhMakespan37))
drlTanhMakespan.append(np.mean(drlTanhMakespan38))
drlTanhMakespan.append(np.mean(drlTanhMakespan39))
drlTanhMakespan.append(np.mean(drlTanhMakespan40))
drlTanhMakespan.append(np.mean(drlTanhMakespan41))
drlTanhMakespan.append(np.mean(drlTanhMakespan42))
drlTanhMakespan.append(np.mean(drlTanhMakespan43))
drlTanhMakespan.append(np.mean(drlTanhMakespan44))
drlTanhMakespan.append(np.mean(drlTanhMakespan45))
drlTanhMakespan.append(np.mean(drlTanhMakespan46))
drlTanhMakespan.append(np.mean(drlTanhMakespan47))
drlTanhMakespan.append(np.mean(drlTanhMakespan48))
drlTanhMakespan.append(np.mean(drlTanhMakespan49))
drlTanhRewards.append(np.mean(drlTanhRewards0))
drlTanhRewards.append(np.mean(drlTanhRewards1))
drlTanhRewards.append(np.mean(drlTanhRewards2))
drlTanhRewards.append(np.mean(drlTanhRewards3))
drlTanhRewards.append(np.mean(drlTanhRewards4))
drlTanhRewards.append(np.mean(drlTanhRewards5))
drlTanhRewards.append(np.mean(drlTanhRewards6))
drlTanhRewards.append(np.mean(drlTanhRewards7))
drlTanhRewards.append(np.mean(drlTanhRewards8))
drlTanhRewards.append(np.mean(drlTanhRewards9))
drlTanhRewards.append(np.mean(drlTanhRewards10))
drlTanhRewards.append(np.mean(drlTanhRewards11))
drlTanhRewards.append( | np.mean(drlTanhRewards12) | numpy.mean |
import unittest
import numpy as np
from nptest import nptest
from scipy.interpolate import interp1d
class Test_NumericOperationsTests(unittest.TestCase):
def test_add_operations(self):
a = np.arange(0, 20, 1, dtype = np.int16)
a = a.reshape(5,-1)
print(a)
print(a.shape)
print(a.strides)
b = a + 8
print(b)
print(b.shape)
print(b.strides)
a = np.arange(0, 20, 1, dtype = np.int64)
a = a.reshape(5,-1)
print(a)
print(a.shape)
print(a.strides)
b = a + 2400
print(b)
print(b.shape)
print(b.strides)
def test_add_operations_2(self):
a = np.arange(0, 20, 1, dtype = np.int16)
a = a.reshape(5,-1)
print(a)
b = np.array([2], dtype = np.int16);
c = a + b;
print(c)
b = np.array([10,20,30,40], dtype = np.int16);
d = a + b;
print(d)
def test_subtract_operations(self):
a = np.arange(0, 20, 1, dtype = np.int16)
a = a.reshape(5,-1)
print(a)
print(a.shape)
print(a.strides)
b = a - 8
print(b)
print(b.shape)
print(b.strides)
a = np.arange(0, 20, 1, dtype = np.int64)
a = a.reshape(5,-1)
print(a)
print(a.shape)
print(a.strides)
b = a - 2400
print(b)
print(b.shape)
print(b.strides)
def test_subtract_operations_2(self):
a = np.arange(100, 102, 1, dtype = np.int16)
b = np.array([1,63], dtype = np.int16)
c = a-b
print(a)
print("****")
print(b)
print("****")
print(c)
print("****")
a = np.arange(0, 4, 1, dtype = np.int16).reshape((2,2))
b = np.array([65,78], dtype = np.int16).reshape(1,2)
c = a-b
print(a)
print("****")
print(b)
print("****")
print(c)
def test_multiply_operations(self):
a = np.arange(0, 20, 1, dtype = np.int16)
a = a.reshape(5,-1)
print(a)
print(a.shape)
print(a.strides)
b = a * 8
print(b)
print(b.shape)
print(b.strides)
a = np.arange(0, 20, 1, dtype = np.int64)
a = a.reshape(5,-1)
print(a)
print(a.shape)
print(a.strides)
b = a * 2400
print(b)
print(b.shape)
print(b.strides)
def test_division_operations(self):
a = np.arange(20000, 20020, 1, dtype = np.int16)
a = a.reshape(5,-1)
print(a)
print(a.shape)
print(a.strides)
b = a / 8
print(b)
print(b.shape)
print(b.strides)
a = np.arange(2000000, 2000020, 1, dtype = np.int64)
a = a.reshape(5,-1)
print(a)
print(a.shape)
print(a.strides)
b = a / 2400
print(b)
print(b.shape)
print(b.strides)
def test_leftshift_operations(self):
a = np.arange(0, 20, 1, dtype = np.int16)
a = a.reshape(5,-1)
print(a)
print(a.shape)
print(a.strides)
b = a << 8
print(b)
print(b.shape)
print(b.strides)
a = np.arange(0, 20, 1, dtype = np.int64)
a = a.reshape(5,-1)
print(a)
print(a.shape)
print(a.strides)
b = a << 24
print(b)
print(b.shape)
print(b.strides)
def test_leftshift_operations2(self):
a = np.arange(0, 20, 1, dtype = np.int8)
a = a.reshape(5,-1)
print(a)
print(a.shape)
print(a.strides)
b = a << 16
print(b)
print(b.shape)
print(b.strides)
a = np.arange(0, 20, 1, dtype = np.int64)
a = a.reshape(5,-1)
print(a)
print(a.shape)
print(a.strides)
b = a << 48
print(b)
print(b.shape)
print(b.strides)
def test_rightshift_operations(self):
a = np.arange(20000, 20020, 1, dtype = np.int16)
a = a.reshape(5,-1)
print(a)
print(a.shape)
print(a.strides)
b = a >> 8
print(b)
print(b.shape)
print(b.strides)
a = np.arange(2123450, 2123470, 1, dtype = np.int64)
a = a.reshape(5,-1)
print(a)
print(a.shape)
print(a.strides)
b = a >> 8
print(b)
print(b.shape)
print(b.strides)
def test_bitwiseand_operations(self):
a = np.arange(0, 32, 1, dtype = np.int16)
print(a)
b = a & 0x0f
print(b)
a = np.arange(2048, 2048+32, 1, dtype = np.int64)
print(a)
b = a & 0xFF
print(b)
def test_bitwiseor_operations(self):
a = np.arange(0, 32, 1, dtype = np.int16)
print(a)
b = a | 0x100
print(b)
a = np.arange(2048, 2048+32, 1, dtype = np.int64)
print(a)
b = a | 0x1000
print(b)
def test_bitwisexor_operations(self):
a = np.arange(0, 32, 1, dtype = np.int16)
print(a)
b = a ^ 0xAAA
print(b)
a = np.arange(2048, 2048+32, 1, dtype = np.int64)
print(a)
b = a ^ 0xAAAA
print(b)
def test_remainder_operations(self):
a = np.arange(0, 32, 1, dtype = np.int16)
print(a)
b = a % 6
print(b)
a = np.arange(2048, 2048+32, 1, dtype = np.int64)
print(a)
b = a % 6
print(b)
def test_negative_operations(self):
a = np.arange(0, 32, 1, dtype = np.int16)
print(a)
b = -a
print(b)
def test_invert_operations(self):
a = np.arange(-32, 32, 1, dtype = np.int16)
print(a)
b = ~a
print(b)
def test_LESS_operations(self):
a = np.arange(-5, 5, 1, dtype = np.int16)
print(a)
b = a < -2
print(b)
def test_LESSEQUAL_operations(self):
a = np.arange(-5, 5, 1, dtype = np.int16)
print(a)
b = a <= -2
print(b)
def test_EQUAL_operations(self):
a = np.arange(-5, 5, 1, dtype = np.int16)
print(a)
b = a == -2
print(b)
def test_NOTEQUAL_operations(self):
a = np.arange(-5, 5, 1, dtype = np.int16)
print(a)
b = a != -2
print(b)
def test_GREATER_operations(self):
a = np.arange(-5, 5, 1, dtype = np.int16)
print(a)
b = a > -2
print(b)
def test_GREATEREQUAL_operations(self):
a = np.arange(-5, 5, 1, dtype = np.int16)
print(a)
b = a >= -2
print(b)
def test_LOGICALOR_operations(self):
a = np.arange(-5, 5, 1, dtype = np.int16)
print(a)
b = np.logical_or(a == 0, False)
print(b)
def test_arrayarray_or(self):
a = np.arange(0, 32, 1, dtype = np.int16)
b = np.arange(33, 33+32, 1, dtype = np.int16)
c = a | b
print("A")
print(a)
print("B")
print(b)
print("C")
print(c)
def test_bitwise_and(self):
x = np.arange(1023, 1039, dtype= np.uint32).reshape(2, -1)
y = np.bitwise_and(x, 0x3FF);
z = x & 0x3FF;
print(x)
print(y)
print(z)
return
def test_bitwise_or(self):
x = np.arange(1023, 1039, dtype= np.uint32).reshape(2, -1)
y = np.bitwise_or(x, 0x10);
z = x | 0x10;
print(x)
print(y)
print(z)
return
def test_bitwise_xor(self):
a = np.bitwise_xor(13, 17)
print(a)
b = np.bitwise_xor(31, 5)
print(b)
c = np.bitwise_xor([31,3], 5)
print(c)
d = np.bitwise_xor([31,3], [5,6])
print(d)
e = np.bitwise_xor([True, True], [False, True])
print(e)
return
def test_bitwise_not(self):
a = np.bitwise_not(13)
print(a)
b = np.bitwise_not(31)
print(b)
c = np.bitwise_not([31,3])
print(c)
d = np.bitwise_not([31,3])
print(d)
e = np.bitwise_not([True, False])
print(e)
return
def test_invert(self):
a = np.invert(13)
print(a)
b = np.invert(31)
print(b)
c = np.invert([31,3])
print(c)
d = np.invert([31,3])
print(d)
e = np.invert([True, False])
print(e)
return
def test_right_shift(self):
x = np.arange(1023, 1039, dtype= np.uint32).reshape(2, -1)
y = np.right_shift(x, 2);
z = x >> 2;
print(x)
print(y)
print(z)
return
def test_right_shift_2(self):
a = np.right_shift([10], [1,2,3]);
print(a)
def test_left_shift(self):
x = np.arange(1023, 1039, dtype= np.uint32).reshape(2, -1)
y = np.left_shift(x, 2);
z = x << 2;
print(x)
print(y)
print(z)
return
def test_left_shift_2(self):
a = np.left_shift([10], [1,2,3]);
print(a)
def test_NAN(self):
x = np.arange(1023, 1039, dtype= np.float).reshape(2, -1)
x[:] = np.NaN
print(x)
return
def test_min(self):
x = np.array([2.5, -1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
y = np.min(x)
print(x)
print(y)
return
def test_max(self):
x = np.array([2.5, -1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
y = np.max(x)
print(x)
print(y)
return
def test_isnan(self):
x = np.array([-1.7, np.nan, np.nan, 0.2, 1.5, np.nan, 2.0], dtype=np.float)
y = np.isnan(x)
z = x == np.nan
print(x)
print(y)
print(z);
return
def test_setdiff1d(self):
a = np.array([1, 2, 3, 2, 4, 1])
b = np.array([3, 4, 5, 6])
c = np.setdiff1d(a, b)
print(a)
print(b)
print(c)
return
def test_setdiff1d_2(self):
a = np.arange(1, 39, dtype= np.uint32).reshape(2, -1)
b = np.array([3, 4, 5, 6])
c = np.setdiff1d(a, b)
print(a)
print(b)
print(c)
return
def test_interp1d(self):
x = np.arange(2, 12, 2)
#y = np.arange(1, 6, 1)
y = np.exp(-x/3.0)
#y = x/2.0
f = interp1d(x, y)
#xnew = np.arange(0,9, 1)
ynew = f(x);
print(x)
print(y)
print(ynew)
#plt.plot(x, y, 'o', xnew, ynew, '-')
#plt.show()
return
def test_interp1d_2(self):
x = np.arange(0, 10)
y = np.exp(-x/3.0)
f = interp1d(x, y)
xnew = np.arange(0, 9, 0.1)
ynew = f(xnew) # use interpolation function returned by `interp1d`
print(x)
print(y)
print(xnew)
print(ynew)
return
def test_rot90_1(self):
m = np.array([[1,2],[3,4]], int)
print(m)
print("************")
n = np.rot90(m)
print(n)
print("************")
n = np.rot90(m, 2)
print(n)
print("************")
m = np.arange(8).reshape((2,2,2))
n = np.rot90(m, 1, (1,2))
print(n)
def test_flip_1(self):
A = np.arange(8).reshape((2,2,2))
B = np.flip(A, 0)
print(A)
print("************")
print(B)
print("************")
C = np.flip(A, 1)
print(C)
print("************")
def test_iterable_1(self):
print(np.iterable([1, 2, 3]))
print(np.iterable(2))
def test_trim_zeros_1(self):
a = np.array((0, 0, 0, 1, 2, 3, 0, 2, 1, 0))
print(np.trim_zeros(a))
print(np.trim_zeros(a, 'b'))
def test_logical_and_1(self):
a = np.logical_and(True, False)
print(a)
b = np.logical_and([True, False], [False, False])
print(b)
x = np.arange(5)
c = np.logical_and(x>1, x<4)
print(c)
y = np.arange(6).reshape(2,3)
d = np.logical_and(y>1, y<4)
print(d)
def test_logical_or_1(self):
a = np.logical_or(True, False)
print(a)
b = np.logical_or([True, False], [False, False])
print(b)
x = np.arange(5)
c = np.logical_or(x<1, x>3)
print(c)
y = np.arange(6).reshape(2,3)
d = np.logical_or(y<1, y>3)
print(d)
def test_logical_xor_1(self):
a = np.logical_xor(True, False)
print(a)
b = np.logical_xor([True, False], [False, False])
print(b)
x = np.arange(5)
c = np.logical_xor(x<1, x>3)
print(c)
y = np.arange(6).reshape(2,3)
d = np.logical_xor(y<1, y>3)
print(d)
e = np.logical_xor(0, np.eye(2))
print(e)
def test_logical_not_1(self):
a = np.logical_not(3)
print(a)
b = np.logical_not([0, -1, 0, 1])
print(b)
x = np.arange(5)
c = np.logical_not(x<3)
print(c)
def test_greater_1(self):
a = np.greater([4, 2, 1], [2, 2, 2])
print(a)
b = np.greater([4, 2, 1], 1)
print(b)
c = np.greater(2, [4, 2, 1])
print(c)
def test_greater_equal_1(self):
a = np.greater_equal([4, 2, 1], [2, 2, 2])
print(a)
b = np.greater_equal([4, 2, 1], 1)
print(b)
c = np.greater_equal(2, [4, 2, 1])
print(c)
def test_less_1(self):
a = np.less([4, 2, 1], [2, 2, 2])
print(a)
b = np.less([4, 2, 1], 1)
print(b)
c = np.less(2, [4, 2, 1])
print(c)
def test_less_equal_1(self):
a = np.less_equal([4, 2, 1], [2, 2, 2])
print(a)
b = np.less_equal([4, 2, 1], 1)
print(b)
c = np.less_equal(2, [4, 2, 1])
print(c)
def test_equal_1(self):
a = np.equal([4, 2, 1], [2, 2, 2])
print(a)
b = np.equal([4, 2, 1], 1)
print(b)
c = np.equal(2, [4, 2, 1])
print(c)
def test_not_equal_1(self):
a = np.not_equal([4, 2, 1], [2, 2, 2])
print(a)
b = np.not_equal([4, 2, 1], 1)
print(b)
c = np.not_equal(2, [4, 2, 1])
print(c)
def test_isfinite_1(self):
a = np.isfinite(1)
print(a)
b = np.isfinite(0)
print(b)
c = np.isfinite(np.nan)
print(c)
d = np.isfinite(np.inf)
print(d)
e = np.isfinite(np.NINF)
print(e)
f = np.isfinite([np.log(-1.),1.,np.log(0)])
print(f)
x = np.array([-np.inf, 0., np.inf, np.inf]).reshape(2,2)
y = np.array([2, 2, 2])
g = np.isfinite(x)
print(g)
print(y)
def test_isinf_1(self):
a = np.isinf(1)
print(a)
b = np.isinf(0)
print(b)
c = np.isinf(np.nan)
print(c)
d = np.isinf(np.inf)
print(d)
e = np.isinf(np.NINF)
print(e)
f = np.isinf([np.log(-1.),1.,np.log(0)])
print(f)
x = np.array([-np.inf, 0., np.inf, np.inf]).reshape(2,2)
y = np.array([2, 2, 2])
g = np.isinf(x)
print(g)
print(y)
def test_isneginf_1(self):
a = np.isneginf(1)
print(a)
b = np.isneginf(0)
print(b)
c = np.isneginf(np.nan)
print(c)
d = np.isneginf(np.inf)
print(d)
e = np.isneginf(np.NINF)
print(e)
f = np.isneginf([np.log(-1.),1.,np.log(0)])
print(f)
x = np.array([-np.inf, 0., np.inf, np.inf]).reshape(2,2)
y = np.array([2, 2, 2])
g = | np.isneginf(x) | numpy.isneginf |
import numpy as np
from scipy import signal, ndimage
from math import floor
import SBM_gauss
def ssim(img1, img2, cs_map=False):
"""Return the Structural Similarity Map corresponding to input images img1
and img2 (images are assumed to be uint8)
This function attempts to mimic precisely the functionality of ssim.m a
MATLAB provided by the author's of SSIM
https://ece.uwaterloo.ca/~z70wang/research/ssim/ssim_index.m
"""
img1 = img1.astype(float)
img2 = img2.astype(float)
size = min(img1.shape[0], 11)
sigma = 1.5
window = SBM_gauss.fspecial_gauss(size, sigma)
K1 = 0.01
K2 = 0.03
L = 255 #bitdepth of image
C1 = (K1 * L) ** 2
C2 = (K2 * L) ** 2
mu1 = signal.fftconvolve(img1, window, mode = 'valid')
mu2 = signal.fftconvolve(img2, window, mode = 'valid')
mu1_sq = mu1 * mu1
mu2_sq = mu2 * mu2
mu1_mu2 = mu1 * mu2
sigma1_sq = signal.fftconvolve(img1 * img1, window, mode = 'valid') - mu1_sq
sigma2_sq = signal.fftconvolve(img2 * img2, window, mode = 'valid') - mu2_sq
sigma12 = signal.fftconvolve(img1 * img2, window, mode = 'valid') - mu1_mu2
if cs_map:
return (((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) * (sigma1_sq + sigma2_sq + C2)),
(2.0 * sigma12 + C2) / (sigma1_sq + sigma2_sq + C2))
else:
return ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) *
(sigma1_sq + sigma2_sq + C2))
def msssim(img1, img2):
"""This function implements Multi-Scale Structural Similarity (MSSSIM) Image
Quality Assessment according to Z. Wang's "Multi-scale structural similarity
for image quality assessment" Invited Paper, IEEE Asilomar Conference on
Signals, Systems and Computers, Nov. 2003
Author's MATLAB implementation:-
http://www.cns.nyu.edu/~lcv/ssim/msssim.zip
"""
level = 5
weight = np.array([0.0448, 0.2856, 0.3001, 0.2363, 0.1333])
downsample_filter = np.ones((2, 2)) / 4.0
im1 = img1.astype(np.float64)
im2 = img2.astype(np.float64)
mssim = np.array([])
mcs = np.array([])
for l in range(level):
ssim_map, cs_map = ssim(im1, im2, cs_map = True)
mssim = np.append(mssim, ssim_map.mean())
mcs = np.append(mcs, cs_map.mean())
filtered_im1 = ndimage.filters.convolve(im1, downsample_filter,
mode = 'reflect')
filtered_im2 = ndimage.filters.convolve(im2, downsample_filter,
mode = 'reflect')
im1 = filtered_im1[: : 2, : : 2]
im2 = filtered_im2[: : 2, : : 2]
# Note: Remove the negative and add it later to avoid NaN in exponential.
sign_mcs = np.sign(mcs[0 : level - 1])
sign_mssim = np.sign(mssim[level - 1])
mcs_power = np.power(np.abs(mcs[0 : level - 1]), weight[0 : level - 1])
mssim_power = np.power(np.abs(mssim[level - 1]), weight[level - 1])
return np.prod(sign_mcs * mcs_power) * sign_mssim * mssim_power
#return (np.prod(mcs[0 : level - 1] ** weight[0 : level - 1]) * (mssim[level - 1] ** weight[level - 1]))
def PeakSignaltoNoiseRatio(origImg, distImg, max_value=255):
origImg = origImg.astype(float)
distImg = distImg.astype(float)
M, N = np.shape(origImg)
error = origImg - distImg
MSE = sum(sum(error * error)) / (M * N)
if MSE > 0:
PSNR = 10 * np.log10(max_value * max_value / MSE)
else:
PSNR = 99
return PSNR
def cqm(orig_img, dist_img):
M, N, C = np.shape(orig_img)
if C != 3:
CQM = float("inf")
return CQM
Ro = orig_img[:, :, 0]
Go = orig_img[:, :, 1]
Bo = orig_img[:, :, 2]
Rd = dist_img[:, :, 0]
Gd = dist_img[:, :, 1]
Bd = dist_img[:, :, 2]
################################################
### Reversible YUV Transformation ###
################################################
YUV_img1 = np.zeros((M, N, 3))
YUV_img2 = np.zeros((M, N, 3))
for i in range(M):
for j in range(N):
### Original Image Trasnformation ###
# Y=(R+2*G+B)/4
YUV_img1[i, j, 0] = floor((Ro[i, j] + Go[i, j] * 2 + Bo[i, j]) / 4)
YUV_img2[i, j, 0] = floor((Rd[i, j] + Gd[i, j] * 2 + Bd[i, j]) / 4)
# U=R-G
YUV_img1[i, j, 1] = max(0, Ro[i, j] - Go[i, j])
YUV_img2[i, j, 1] = max(0, Rd[i, j] - Gd[i, j])
# V=B-G
YUV_img1[i, j, 2] = max(0, Bo[i, j] - Go[i, j])
YUV_img2[i, j, 2] = max(0, Bd[i, j] - Gd[i, j])
################################################
### CQM Calculation ###
################################################
Y_psnr = PeakSignaltoNoiseRatio(YUV_img1[:, :, 0], YUV_img2[:, :, 0]); # PSNR for Y channel
U_psnr = PeakSignaltoNoiseRatio(YUV_img1[:, :, 1], YUV_img2[:, :, 1]); # PSNR for U channel
V_psnr = PeakSignaltoNoiseRatio(YUV_img1[:, :, 2], YUV_img2[:, :, 2]); # PSNR for V channel
CQM = (Y_psnr * 0.9449) + (U_psnr + V_psnr) / 2 * 0.0551
return CQM
def Evaluate(GT, BC):
[M, N, C] = np.shape(GT)
dimension = M * N
GT = np.ndarray((M, N, 3), 'u1', GT.tostring()).astype(float)
BC = np.ndarray((M, N, 3), 'u1', BC.tostring()).astype(float)
if C == 3: # In case of color images, use luminance in YCbCr
R = GT[:, :, 0]
G = GT[:, :, 1]
B = GT[:, :, 2]
YGT = .299 * R + .587 * G + .114 * B
R = BC[:, :, 0]
G = BC[:, :, 1]
B = BC[:, :, 2]
YBC = .299 * R + .587 * G + .114 * B
else:
YGT = GT
YBC = BC
############################# AGE ########################################
Diff = abs(YGT - YBC).round().astype(np.uint8)
AGE = | np.mean(Diff) | numpy.mean |
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 3 11:33:53 2021
@author: <NAME>
"""
"""
Pseudo-experimental data generation program for glucose (component A)-fructose (component B) system
References
Multi-column chromatographic process development using simulated
moving bed superstructure and simultaneous optimization – Model
correction framework
<NAME>, <NAME>
Chemical Engineering Science 116 (2014) 428–441
Ha,Hb,ka,kb = 0.301,0.531,0.0047,0.0083
Synergistic effects in competitive adsorption of
carbohydrates on an ion-exchange resin
<NAME>, <NAME>, <NAME>,
<NAME>, <NAME>
Journal of Chromatography A, 1164 (2007) 224–234
ba,bb = 6.34e-4, 2.48e-4
"""
#Import the required libraries
import numpy as np #Numeric computation libraries (multi-dimensional arrays, statistical functions, etc.)
from scipy.integrate import odeint #Used to solve ordinary differential equations.
import matplotlib.pyplot as plt #used to draw graphs
import pandas as pd #Used to read and process data
import numba #Used for JIT compilation
#Load data from Experiment A
SampleFile1 = pd.read_csv('Pseudo_A.csv').dropna(how="all")
tData1 = SampleFile1["t[s]"].values
CaOutData1 = SampleFile1["C[g/L]"].values
#Load data from Experiment B
SampleFile2 = pd.read_csv("Pseudo_B.csv").dropna(how="all")
tData2 = SampleFile2["t[s]"].values
CbOutData2 = SampleFile2["C[g/L]"].values
#Load data from Experiment C
SampleFile3 = pd.read_csv("Pseudo_C.csv").dropna(how="all")
tData3 = SampleFile3["t[s]"].values
CTotalOutData3 = SampleFile3["C[g/L]"].values
#Load data for validation of experiment C
SampleFile4 = pd.read_csv("Pseudo_C_deconvoluted.csv").dropna(how="all")
tData4 = SampleFile4["t[s]"].values
CaOutData4 = SampleFile4["Ca[g/L]"].values
CbOutData4 = SampleFile4["Cb[g/L]"].values
tEnd = 2500.0
tData = | np.arange(0.0,tEnd,tEnd/100) | numpy.arange |
from itertools import combinations
from joblib import Parallel, delayed
import numpy as np
from sklearn.utils import check_random_state
from sklearn.cluster import MiniBatchKMeans
from sklearn.metrics import calinski_harabasz_score, davies_bouldin_score
from sklearn.metrics.pairwise import pairwise_distances
from ..utils.os_utl import filter_kwargs
# ############################################### FUNCTIONS #################################################
def cluster_density(x, labels, metric='euclidean', n_pts=-1):
dens = list()
max_per_label = list()
for k in np.unique(labels):
pts = x[labels == k]
dist = pairwise_distances(pts, metric=metric)
mean_dist = dist.mean(axis=1) if n_pts < 0 else np.sort(dist, axis=1)[:, 1:n_pts+1].mean(axis=1)
dens.append(mean_dist.mean())
max_per_label.append(mean_dist.max())
dens = 1 - np.array(dens)/max(max_per_label)
return np.unique(labels), np.round(dens, 3)
def local_cluster_density(x, labels, metric='euclidean', n_pts=10):
return cluster_density(x, labels, metric, n_pts)
def gap_statistic(k, inertia, data_shape, n_refs=3, **kwargs):
# Generate a reference inertia to compare to and
# filter kwargs
kwargs = filter_kwargs(kwargs, MiniBatchKMeans.__init__)
if 'batch_size' not in kwargs:
kwargs['batch_size'] = data_shape[0] // 100 if data_shape[0] > 100000 else 1000
# For n references, generate random sample and perform clustering getting resulting dispersion of each loop
ref_disps = np.zeros(n_refs)
for i in range(n_refs):
# Create new random reference set
rand_ref = np.random.random_sample(size=data_shape)
# Cluster this data and save the inertias
ref_disps[i] = MiniBatchKMeans(k, **kwargs).fit(rand_ref).inertia_
# return the log difference to the original inertia
return np.log(np.mean(ref_disps)) - np.log(inertia)
def silhouette_score_block(x, labels, metric='euclidean', sample_size=None,
random_state=None, n_jobs=1, **kwargs):
"""Compute the mean Silhouette Coefficient of all samples.
The Silhouette Coefficient is calculated using the mean intra-cluster distance (a) and the mean nearest-cluster
distance (b) for each sample. The Silhouette Coefficient for a sample is ``(b - a) / max(a, b)``. To clarify,
b is the distance between a sample and the nearest cluster that b is not a part of. This function returns the
mean Silhouette Coefficient over all samples. To obtain the values for each sample, use silhouette_samples .The
best value is 1 and the worst value is -1. Values near 0 indicate overlapping clusters. Negative values generally
indicate that a sample has been assigned to the wrong cluster, as a different cluster is more similar.
:param numpy.ndarray x: feature array of shape (n_samples_a, n_features)
:param numpy.ndarray labels: label values for each sample as an array of shape (n_samples, )
:param string metric: default: 'euclidean'. The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options allowed by
metrics.pairwise.pairwise_distances. If X is the distance array itself, use "precomputed" as the metric.
:param int sample_size: The size of the sample to use when computing the Silhouette Coefficient. If sample_size
is None, no sampling is used.
:param int|numpy.RandomState random_state: Optional. The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random number generator.
:param int n_jobs: number of processing cores to use. -1 for all
:param kwargs: optional keyword parameters. Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still metric dependent. See the scipy docs for
usage examples.
:return float: silhouette. Mean Silhouette Coefficient for all samples.
References
----------
<NAME> (1987). "Silhouettes: a Graphical Aid to the
Interpretation and Validation of Cluster Analysis". Computational
and Applied Mathematics 20: 53-65. doi:10.1016/0377-0427(87)90125-7.
http://en.wikipedia.org/wiki/Silhouette_(clustering)
"""
if sample_size is not None:
random_state = check_random_state(random_state)
indices = random_state.permutation(x.shape[0])[:sample_size]
if metric == "precomputed":
raise ValueError('Distance matrix cannot be precomputed')
else:
x, labels = x[indices], labels[indices]
return np.mean(silhouette_samples_block(
x, labels, metric=metric, n_jobs=n_jobs, **kwargs))
def silhouette_samples_block(x, labels, metric='euclidean', n_jobs=1, **kwargs):
"""Compute the Silhouette Coefficient for each sample.
:param numpy.ndarray x: feature array of shape (n_samples_a, n_features)
:param numpy.ndarray labels: label values for each sample as an array of shape (n_samples, )
:param string metric: default: 'euclidean'. The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options allowed by
metrics.pairwise.pairwise_distances. If X is the distance array itself, use "precomputed" as the metric.
:param int n_jobs: number of processing cores to use. -1 for all
:param kwargs: optional keyword parameters. Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still metric dependent. See the scipy docs for
usage examples.
:return numpy.ndarray: silhouette values. Clusters of size 1 have silhouette 0
"""
a = _intra_cluster_distances_block(x, labels, metric, n_jobs=n_jobs,
**kwargs)
b = _nearest_cluster_distance_block(x, labels, metric, n_jobs=n_jobs,
**kwargs)
sil_samples = (b - a) / np.maximum(a, b)
# nan values are for clusters of size 1, and should be 0
return np.nan_to_num(sil_samples)
def _intra_cluster_distances_block_(sub_x, metric, **kwargs):
"""Calculates the intra cluster distances for each cluster
:param numpy.ndarray sub_x: subset of all the samples that have the same cluster. array of shape
(n_samples, n_features)
:param string metric: The metric to use when calculating distance between instances in a feature array. If metric
is a string, it must be one of the options allowed by metrics.pairwise.pairwise_distances. If X is the distance
array itself, use "precomputed" as the metric.
:param kwargs: optional keyword parameters. Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still metric dependent. See the scipy docs for
usage examples.
:return float: intra_cluster mean pairwise distance value
"""
distances = pairwise_distances(sub_x, metric=metric, **kwargs)
return distances.sum(axis=1) / (distances.shape[0] - 1)
# noinspection PyUnresolvedReferences
def _intra_cluster_distances_block(x, labels, metric, n_jobs=1, **kwargs):
"""Calculate the mean intra-cluster distance for sample i.
:param numpy.ndarray x: feature array of shape (n_samples_a, n_features)
:param numpy.ndarray labels: label values for each sample as an array of shape (n_samples, )
:param string metric: default: 'euclidean'. The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options allowed by
metrics.pairwise.pairwise_distances. If X is the distance array itself, use "precomputed" as the metric.
:param int n_jobs: number of processing cores to use. -1 for all
:param kwargs: optional keyword parameters. Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still metric dependent. See the scipy docs for
usage examples.
:return numpy.ndarray: shape (n_samples). Mean intra-cluster distance
"""
intra_dist = np.zeros(labels.size, dtype=float)
values = Parallel(n_jobs=n_jobs)(delayed(_intra_cluster_distances_block_)(
x[np.where(labels == label)[0]], metric, **kwargs) for label in np.unique(labels))
for label, values_ in zip(np.unique(labels), values):
intra_dist[np.where(labels == label)[0]] = values_
return intra_dist
def _nearest_cluster_distance_block_(sub_x_a, sub_x_b, metric, **kwargs):
"""Calculate the mean nearest-cluster distance for sample i.
:param numpy.ndarray sub_x_a: subset of all the samples that have the same cluster. array of shape
(n_samples, n_features)
:param numpy.ndarray sub_x_b: subset of all the samples that have the same cluster (different from the cluster
represented by sub_x_a). array of shape (n_samples, n_features)
:param string metric: The metric to use when calculating distance between instances in a feature array. If metric
is a string, it must be one of the options allowed by metrics.pairwise.pairwise_distances. If X is the distance
array itself, use "precomputed" as the metric.
:param kwargs: optional keyword parameters. Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still metric dependent. See the scipy docs for
usage examples.
:return float: intra_cluster mean pairwise distance value
"""
dist = pairwise_distances(sub_x_a, sub_x_b, metric=metric, **kwargs)
dist_a = dist.mean(axis=1)
dist_b = dist.mean(axis=0)
return dist_a, dist_b
def _nearest_cluster_distance_block(x, labels, metric, n_jobs=1, **kwargs):
"""Calculate the mean nearest-cluster distance for sample i.
:param numpy.ndarray x: feature array of shape (n_samples_a, n_features)
:param numpy.ndarray labels: label values for each sample as an array of shape (n_samples, )
:param string metric: default: 'euclidean'. The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options allowed by
metrics.pairwise.pairwise_distances. If X is the distance array itself, use "precomputed" as the metric.
:param int n_jobs: number of processing cores to use. -1 for all
:param kwargs: optional keyword parameters. Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still metric dependent. See the scipy docs for
usage examples.
:return numpy.ndarray: shape (n_samples). Mean intra-cluster distance
"""
# noinspection PyUnresolvedReferences
inter_dist = np.empty(labels.size, dtype=float)
inter_dist.fill(np.inf)
# Compute cluster distance between pairs of clusters
unique_labels = np.unique(labels)
values = Parallel(n_jobs=n_jobs)(
delayed(_nearest_cluster_distance_block_)(
x[ | np.where(labels == label_a) | numpy.where |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# LMFIT for least square fitting
# https://lmfit.github.io/lmfit-py/intro.html
# pip install git+https://github.com/lmfit/lmfit-py.git --upgrade --user
#
import lmfit
import numpy as np
def init_params(): return lmfit.Parameters()
def unpack_params(params): return [ v.value for _, v in params.items() ]
def bisector(xyz1, xyz2, xyz3):
''' Khan method.
'''
v12 = xyz1 - xyz2
v32 = xyz3 - xyz2
return v12 + v32
def estimate_axis(xyzs):
''' Find an init axis.
Refer to DOI: 10.1016/0097-8485(89)85005-3 for details.
'''
# Remove np.nan
xyzs_nonan = xyzs[~np.isnan(xyzs).any(axis = 1)]
# Compute
nv = np.zeros(3) # Axis vector
for i in range(len(xyzs_nonan) - 3):
h1 = bisector(xyzs_nonan[i], xyzs_nonan[i+1], xyzs_nonan[i+2])
h2 = bisector(xyzs_nonan[i+1], xyzs_nonan[i+2], xyzs_nonan[i+3])
hv = np.cross(h1, h2)
nv += hv
nv /= np.linalg.norm(nv)
return nv
def helixcore(parvals, num):
''' Return modeled coordinates (x, y, z).
The length of the helix is represented by the num of progress.
Angle related variables are all subject to the unit of radian.
'''
# Unpack parameters...
px, py, pz, nx, ny, nz, s, omega, r, phi, t = parvals
# Consider phase shift...
psi_list = np.array([ omega * i for i in range(num) ], dtype = np.float64)
# Form a orthonormal system...
# Direction cosine: http://www.geom.uiuc.edu/docs/reference/CRC-formulas/node52.html
n = np.array([nx, ny, nz], dtype = np.float64)
n = np.cos(n)
# Model it and save result in q...
p = np.array([px, py, pz], dtype = np.float64)
q = np.zeros((len(psi_list), 3))
q += p.reshape(1, -1)
q += n.reshape(1, -1) * s * psi_list.reshape(-1, 1) / (2 * np.pi)
q += n.reshape(1, -1) * t
return q
def helixmodel(parvals, num, pt0):
''' Return modeled coordinates (x, y, z).
The length of the helix is represented by the num of progress.
pt0 is the beginning position of the helix.
'''
# Unpack parameters...
px, py, pz, nx, ny, nz, s, omega, r, phi, t = parvals
# Consider phase shift...
psi_list = np.array([ omega * i for i in range(num) ], dtype = np.float64)
# Get vector connecting p and first atom...
pv = np.array([px, py, pz], dtype = np.float64)
pt0 = np.array(pt0)
c = pt0 - pv
# Form a orthonormal system...
# Direction cosine: http://www.geom.uiuc.edu/docs/reference/CRC-formulas/node52.html
n = np.array([nx, ny, nz], dtype = np.float64)
n = np.cos(n)
# Derive the v, w vector (third vector) to facilitate the construction of a helix...
v = np.cross(n, c)
w = np.cross(n, v)
v /= np.linalg.norm(v)
w /= np.linalg.norm(w)
# Model it and save result in q...
p = np.array([px, py, pz], dtype = np.float64)
q = np.zeros((len(psi_list), 3))
q += p.reshape(1, -1)
q += n.reshape(1, -1) * s * psi_list.reshape(-1, 1) / (2 * np.pi)
q += n.reshape(1, -1) * t
q += v.reshape(1, -1) * r * np.cos(psi_list.reshape(-1, 1) + phi) + \
w.reshape(1, -1) * r * np.sin(psi_list.reshape(-1, 1) + phi)
return q
def residual_purehelix(params, xyzs):
# Calculate size of the helix...
num = xyzs.shape[0]
# Avoid np.nan as the first valid point
xyzs_nonan = xyzs[~np.isnan(xyzs).any(axis = 1)]
# Unpack parameters...
parvals = unpack_params(params)
# Compute...
res = helixmodel(parvals, num, xyzs_nonan[0]) - xyzs
return res.reshape(-1)
def residual_helix(params, xyzs_dict, pa0, lam):
# Unpack parameters...
parvals = unpack_params(params)
px, py, pz, nx, ny, nz, s, omega = parvals[ :8]
rN, rCA, rC, rO = parvals[8:8+4]
phiN, phiCA, phiC, phiO = parvals[12:12+4]
tN, tCA, tC, tO = parvals[16:16+4]
# Construct paramters for each atom...
parval_dict = {}
parval_dict["N"] = px, py, pz, nx, ny, nz, s, omega, rN, phiN, tN
parval_dict["CA"] = px, py, pz, nx, ny, nz, s, omega, rCA, phiCA, tCA
parval_dict["C"] = px, py, pz, nx, ny, nz, s, omega, rC, phiC, tC
parval_dict["O"] = px, py, pz, nx, ny, nz, s, omega, rO, phiO, tO
# Consider residuals...
# Create dictionary to store values
num_dict = {}
xyzs_nonan_dict = {}
res_dict = {}
# Computation for each type of atom
for i in xyzs_dict.keys():
num_dict[i] = xyzs_dict[i].shape[0]
# Avoid np.nan as the first valid point
xyzs_nonan_dict[i] = xyzs_dict[i][~np.isnan(xyzs_dict[i]).any(axis = 1)]
# Compute
res = helixmodel(parval_dict[i], num_dict[i], xyzs_nonan_dict[i][0]) \
- xyzs_dict[i]
res_dict[i] = res
# Format results for minimization
num_coords = np.sum( [ v for _, v in num_dict.items() ] )
res_matrix = np.zeros( (num_coords, 3) )
# Assign values
idx = 0
for i, v in res_dict.items():
res_matrix[idx:idx + num_dict[i], :] = v
idx += num_dict[i]
# Consider regularization (penalty)...
# Facilitate regularization based on pa0
pv = np.array([px, py, pz], dtype = np.float64)
pen_matrix = | np.sqrt(lam[0]) | numpy.sqrt |
# -*- coding: utf-8 -*-
#
# Copyright 2015 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
Utility functions for data loading and training of VGSL networks.
"""
import json
import regex
import torch
import traceback
import unicodedata
import numpy as np
import pkg_resources
import bidi.algorithm as bd
import shapely.geometry as geom
import torch.nn.functional as F
import torchvision.transforms.functional as tf
from os import path
from functools import partial
from shapely.ops import split, snap
from PIL import Image, ImageDraw
from itertools import groupby
from collections import Counter, defaultdict
from torchvision import transforms
from torch.utils.data import Dataset, DataLoader
from torch.nn.utils.rnn import pad_sequence
from typing import Dict, List, Tuple, Iterable, Sequence, Callable, Optional, Any, Union, cast
from skimage.draw import polygon
from kraken.lib.xml import parse_alto, parse_page, parse_xml
from kraken.lib.util import is_bitonal
from kraken.lib.codec import PytorchCodec
from kraken.lib.models import TorchSeqRecognizer
from kraken.lib.segmentation import extract_polygons, calculate_polygonal_environment
from kraken.lib.exceptions import KrakenInputException
from kraken.lib.lineest import CenterNormalizer, dewarp
from kraken.lib import functional_im_transforms as F_t
__all__ = ['BaselineSet', 'PolygonGTDataset', 'GroundTruthDataset', 'compute_error', 'generate_input_transforms', 'preparse_xml_data']
import logging
logger = logging.getLogger(__name__)
def generate_input_transforms(batch: int, height: int, width: int, channels: int, pad: int, valid_norm: bool = True, force_binarization=False) -> transforms.Compose:
"""
Generates a torchvision transformation converting a PIL.Image into a
tensor usable in a network forward pass.
Args:
batch (int): mini-batch size
height (int): height of input image in pixels
width (int): width of input image in pixels
channels (int): color channels of input
pad (int): Amount of padding on horizontal ends of image
valid_norm (bool): Enables/disables baseline normalization as a valid
preprocessing step. If disabled we will fall back to
standard scaling.
force_binarization (bool): Forces binarization of input images using
the nlbin algorithm.
Returns:
A torchvision transformation composition converting the input image to
the appropriate tensor.
"""
scale = (height, width) # type: Tuple[int, int]
center_norm = False
mode = 'RGB' if channels == 3 else 'L'
if height == 1 and width == 0 and channels > 3:
perm = (1, 0, 2)
scale = (channels, 0)
if valid_norm:
center_norm = True
mode = 'L'
elif height > 1 and width == 0 and channels in (1, 3):
perm = (0, 1, 2)
if valid_norm and channels == 1:
center_norm = True
elif height == 0 and width > 1 and channels in (1, 3):
perm = (0, 1, 2)
# fixed height and width image => bicubic scaling of the input image, disable padding
elif height > 0 and width > 0 and channels in (1, 3):
perm = (0, 1, 2)
pad = 0
elif height == 0 and width == 0 and channels in (1, 3):
perm = (0, 1, 2)
pad = 0
else:
raise KrakenInputException('Invalid input spec {}, {}, {}, {}, {}'.format(batch,
height,
width,
channels,
pad))
if mode != 'L' and force_binarization:
raise KrakenInputException('Invalid input spec {}, {}, {}, {} in'
' combination with forced binarization.'.format(batch,
height,
width,
channels,
pad))
out_transforms = []
out_transforms.append(transforms.Lambda(partial(F_t.pil_to_mode, mode=mode)))
if force_binarization:
out_transforms.append(transforms.Lambda(F_t.pil_to_bin))
# dummy transforms to ensure we can determine color mode of input material
# from first two transforms. It's stupid but it works.
out_transforms.append(transforms.Lambda(F_t.dummy))
if scale != (0, 0):
if center_norm:
lnorm = CenterNormalizer(scale[0])
out_transforms.append(transforms.Lambda(partial(F_t.pil_dewarp, lnorm=lnorm)))
out_transforms.append(transforms.Lambda(partial(F_t.pil_to_mode, mode=mode)))
else:
out_transforms.append(transforms.Lambda(partial(F_t.pil_fixed_resize, scale=scale)))
if pad:
out_transforms.append(transforms.Pad((pad, 0), fill=255))
out_transforms.append(transforms.ToTensor())
# invert
out_transforms.append(transforms.Lambda(F_t.tensor_invert))
out_transforms.append(transforms.Lambda(partial(F_t.tensor_permute, perm=perm)))
return transforms.Compose(out_transforms)
def _fast_levenshtein(seq1: Sequence[Any], seq2: Sequence[Any]) -> int:
oneago = None
thisrow = list(range(1, len(seq2) + 1)) + [0]
rows = [thisrow]
for x in range(len(seq1)):
oneago, thisrow = thisrow, [0] * len(seq2) + [x + 1]
for y in range(len(seq2)):
delcost = oneago[y] + 1
addcost = thisrow[y - 1] + 1
subcost = oneago[y - 1] + (seq1[x] != seq2[y])
thisrow[y] = min(delcost, addcost, subcost)
rows.append(thisrow)
return thisrow[len(seq2) - 1]
def global_align(seq1: Sequence[Any], seq2: Sequence[Any]) -> Tuple[int, List[str], List[str]]:
"""
Computes a global alignment of two strings.
Args:
seq1 (Sequence[Any]):
seq2 (Sequence[Any]):
Returns a tuple (distance, list(algn1), list(algn2))
"""
# calculate cost and direction matrix
cost = [[0] * (len(seq2) + 1) for x in range(len(seq1) + 1)]
for i in range(1, len(cost)):
cost[i][0] = i
for i in range(1, len(cost[0])):
cost[0][i] = i
direction = [[(0, 0)] * (len(seq2) + 1) for x in range(len(seq1) + 1)]
direction[0] = [(0, x) for x in range(-1, len(seq2))]
for i in range(-1, len(direction) - 1):
direction[i + 1][0] = (i, 0)
for i in range(1, len(cost)):
for j in range(1, len(cost[0])):
delcost = ((i - 1, j), cost[i - 1][j] + 1)
addcost = ((i, j - 1), cost[i][j - 1] + 1)
subcost = ((i - 1, j - 1), cost[i - 1][j - 1] + (seq1[i - 1] != seq2[j - 1]))
best = min(delcost, addcost, subcost, key=lambda x: x[1])
cost[i][j] = best[1]
direction[i][j] = best[0]
d = cost[-1][-1]
# backtrace
algn1: List[Any] = []
algn2: List[Any] = []
i = len(direction) - 1
j = len(direction[0]) - 1
while direction[i][j] != (-1, 0):
k, l = direction[i][j]
if k == i - 1 and l == j - 1:
algn1.insert(0, seq1[i - 1])
algn2.insert(0, seq2[j - 1])
elif k < i:
algn1.insert(0, seq1[i - 1])
algn2.insert(0, '')
elif l < j:
algn1.insert(0, '')
algn2.insert(0, seq2[j - 1])
i, j = k, l
return d, algn1, algn2
def compute_confusions(algn1: Sequence[str], algn2: Sequence[str]):
"""
Compute confusion matrices from two globally aligned strings.
Args:
align1 (Sequence[str]): sequence 1
align2 (Sequence[str]): sequence 2
Returns:
A tuple (counts, scripts, ins, dels, subs) with `counts` being per-character
confusions, `scripts` per-script counts, `ins` a dict with per script
insertions, `del` an integer of the number of deletions, `subs` per
script substitutions.
"""
counts: Dict[Tuple[str, str], int] = Counter()
with pkg_resources.resource_stream(__name__, 'scripts.json') as fp:
script_map = json.load(fp)
def _get_script(c):
for s, e, n in script_map:
if ord(c) == s or (e and s <= ord(c) <= e):
return n
return 'Unknown'
scripts: Dict[Tuple[str, str], int] = Counter()
ins: Dict[Tuple[str, str], int] = Counter()
dels: int = 0
subs: Dict[Tuple[str, str], int] = Counter()
for u,v in zip(algn1, algn2):
counts[(u, v)] += 1
for k, v in counts.items():
if k[0] == '':
dels += v
else:
script = _get_script(k[0])
scripts[script] += v
if k[1] == '':
ins[script] += v
elif k[0] != k[1]:
subs[script] += v
return counts, scripts, ins, dels, subs
def compute_error(model: TorchSeqRecognizer, validation_set: Iterable[Dict[str, torch.Tensor]]) -> Tuple[int, int]:
"""
Computes error report from a model and a list of line image-text pairs.
Args:
model (kraken.lib.models.TorchSeqRecognizer): Model used for recognition
validation_set (list): List of tuples (image, text) for validation
Returns:
A tuple with total number of characters and edit distance across the
whole validation set.
"""
total_chars = 0
error = 0
for batch in validation_set:
preds = model.predict_string(batch['image'], batch['seq_lens'])
total_chars += batch['target_lens'].sum()
for pred, text in zip(preds, batch['target']):
error += _fast_levenshtein(pred, text)
return total_chars, error
def preparse_xml_data(filenames, format_type='xml', repolygonize=False):
"""
Loads training data from a set of xml files.
Extracts line information from Page/ALTO xml files for training of
recognition models.
Args:
filenames (list): List of XML files.
format_type (str): Either `page`, `alto` or `xml` for
autodetermination.
repolygonize (bool): (Re-)calculates polygon information using the
kraken algorithm.
Returns:
A list of dicts {'text': text, 'baseline': [[x0, y0], ...], 'boundary':
[[x0, y0], ...], 'image': PIL.Image}.
"""
training_pairs = []
if format_type == 'xml':
parse_fn = parse_xml
elif format_type == 'alto':
parse_fn = parse_alto
elif format_type == 'page':
parse_fn = parse_page
else:
raise Exception(f'invalid format {format_type} for preparse_xml_data')
for fn in filenames:
try:
data = parse_fn(fn)
except KrakenInputException as e:
logger.warning(e)
continue
try:
with open(data['image'], 'rb') as fp:
Image.open(fp)
except FileNotFoundError as e:
logger.warning(f'Could not open file {e.filename} in {fn}')
continue
if repolygonize:
logger.info('repolygonizing {} lines in {}'.format(len(data['lines']), data['image']))
data['lines'] = _repolygonize(data['image'], data['lines'])
for line in data['lines']:
training_pairs.append({'image': data['image'], **line})
return training_pairs
def _repolygonize(im: Image.Image, lines):
"""
Helper function taking an output of the lib.xml parse_* functions and
recalculating the contained polygonization.
Args:
im (Image.Image): Input image
lines (list): List of dicts [{'boundary': [[x0, y0], ...], 'baseline': [[x0, y0], ...], 'text': 'abcvsd'}, {...]
Returns:
A data structure `lines` with a changed polygonization.
"""
im = Image.open(im).convert('L')
polygons = calculate_polygonal_environment(im, [x['baseline'] for x in lines])
return [{'boundary': polygon, 'baseline': orig['baseline'], 'text': orig['text'], 'script': orig['script']} for orig, polygon in zip(lines, polygons)]
def collate_sequences(batch):
"""
Sorts and pads sequences.
"""
sorted_batch = sorted(batch, key=lambda x: x['image'].shape[2], reverse=True)
seqs = [x['image'] for x in sorted_batch]
seq_lens = torch.LongTensor([seq.shape[2] for seq in seqs])
max_len = seqs[0].shape[2]
seqs = torch.stack([F.pad(seq, pad=(0, max_len-seq.shape[2])) for seq in seqs])
if isinstance(sorted_batch[0]['target'], str):
labels = [x['target'] for x in sorted_batch]
else:
labels = torch.cat([x['target'] for x in sorted_batch]).long()
label_lens = torch.LongTensor([len(x['target']) for x in sorted_batch])
return {'image': seqs, 'target': labels, 'seq_lens': seq_lens, 'target_lens': label_lens}
class InfiniteDataLoader(DataLoader):
"""
Version of DataLoader that auto-reinitializes the iterator once it is
exhausted.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.dataset_iter = super().__iter__()
def __iter__(self):
return self
def __next__(self):
try:
sample = next(self.dataset_iter)
except StopIteration:
self.dataset_iter = super().__iter__()
sample = next(self.dataset_iter)
return sample
class PolygonGTDataset(Dataset):
"""
Dataset for training a line recognition model from polygonal/baseline data.
"""
def __init__(self,
normalization: Optional[str] = None,
whitespace_normalization: bool = True,
reorder: bool = True,
im_transforms: Callable[[Any], torch.Tensor] = transforms.Compose([]),
preload: bool = True,
augmentation: bool = False) -> None:
self._images = [] # type: Union[List[Image], List[torch.Tensor]]
self._gt = [] # type: List[str]
self.alphabet = Counter() # type: Counter
self.text_transforms = [] # type: List[Callable[[str], str]]
# split image transforms into two. one part giving the final PIL image
# before conversion to a tensor and the actual tensor conversion part.
self.head_transforms = transforms.Compose(im_transforms.transforms[:2])
self.tail_transforms = transforms.Compose(im_transforms.transforms[2:])
self.transforms = im_transforms
self.preload = preload
self.aug = None
self.seg_type = 'baselines'
# built text transformations
if normalization:
self.text_transforms.append(partial(F_t.text_normalize, normalization=normalization))
if whitespace_normalization:
self.text_transforms.append(F_t.text_whitespace_normalize)
if reorder:
self.text_transforms.append(F_t.text_reorder)
if augmentation:
from albumentations import (
Compose, ToFloat, FromFloat, Flip, OneOf, MotionBlur, MedianBlur, Blur,
ShiftScaleRotate, OpticalDistortion, ElasticTransform, RandomBrightnessContrast,
)
self.aug = Compose([
ToFloat(),
OneOf([
MotionBlur(p=0.2),
MedianBlur(blur_limit=3, p=0.1),
Blur(blur_limit=3, p=0.1),
], p=0.2),
ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.2, rotate_limit=3, p=0.2),
OneOf([
OpticalDistortion(p=0.3),
ElasticTransform(p=0.1),
], p=0.2),
], p=0.5)
self.im_mode = '1'
def add(self, *args, **kwargs):
"""
Adds a line to the dataset.
Args:
im (path): Path to the whole page image
text (str): Transcription of the line.
baseline (list): A list of coordinates [[x0, y0], ..., [xn, yn]].
boundary (list): A polygon mask for the line.
"""
if 'preparse' not in kwargs or not kwargs['preparse']:
kwargs = self.parse(image, text, baseline, boundary, *args, **kwargs)
if kwargs['preload']:
self.im_mode = kwargs['im_mode']
self._images.append(kwargs['image'])
else:
self._images.append((kwargs['image'], kwargs['baseline'], kwargs['boundary']))
self._gt.append(kwargs['text'])
self.alphabet.update(kwargs['text'])
def parse(self, image: Union[str, Image.Image], text: str, baseline: List[Tuple[int, int]], boundary: List[Tuple[int, int]], *args, **kwargs):
"""
Parses a sample for the dataset and returns it.
This function is mainly uses for parallelized loading of training data.
Args:
im (path): Path to the whole page image
text (str): Transcription of the line.
baseline (list): A list of coordinates [[x0, y0], ..., [xn, yn]].
boundary (list): A polygon mask for the line.
"""
for func in self.text_transforms:
text = func(text)
if not text:
raise KrakenInputException('Text line is empty after transformations')
if not baseline:
raise KrakenInputException('No baseline given for line')
if not boundary:
raise KrakenInputException('No boundary given for line')
if self.preload:
if not isinstance(image, Image.Image):
im = Image.open(image)
try:
im, _ = next(extract_polygons(im, {'type': 'baselines', 'lines': [{'baseline': baseline, 'boundary': boundary}]}))
except IndexError:
raise KrakenInputException('Patch extraction failed for baseline')
try:
im = self.head_transforms(im)
im = self.tail_transforms(im)
except ValueError:
raise KrakenInputException(f'Image transforms failed on {image}')
self._images.append(im)
return {'text': text, 'image': im, 'baseline': baseline, 'boundary': boundary, 'im_mode': im.mode, 'preload': True, 'preparse': True}
else:
return {'text': text, 'image': image, 'baseline': baseline, 'boundary': boundary, 'preload': False, 'preparse': True}
def encode(self, codec: Optional[PytorchCodec] = None) -> None:
"""
Adds a codec to the dataset and encodes all text lines.
Has to be run before sampling from the dataset.
"""
if codec:
self.codec = codec
else:
self.codec = PytorchCodec(''.join(self.alphabet.keys()))
self.training_set = [] # type: List[Tuple[Union[Image, torch.Tensor], torch.Tensor]]
for im, gt in zip(self._images, self._gt):
self.training_set.append((im, self.codec.encode(gt)))
def no_encode(self) -> None:
"""
Creates an unencoded dataset.
"""
self.training_set = [] # type: List[Tuple[Union[Image, torch.Tensor], str]]
for im, gt in zip(self._images, self._gt):
self.training_set.append((im, gt))
def __getitem__(self, index: int) -> Tuple[torch.Tensor, torch.Tensor]:
if self.preload:
x, y = self.training_set[index]
if self.aug:
x = x.permute((1, 2, 0)).numpy()
o = self.aug(image=x)
x = torch.tensor(o['image'].transpose(2, 0, 1))
return {'image': x, 'target': y}
else:
item = self.training_set[index]
try:
logger.debug(f'Attempting to load {item[0]}')
im = item[0][0]
if not isinstance(im, Image.Image):
im = Image.open(im)
im, _ = next(extract_polygons(im, {'type': 'baselines', 'lines': [{'baseline': item[0][1], 'boundary': item[0][2]}]}))
im = self.head_transforms(im)
if not is_bitonal(im):
self.im_mode = im.mode
im = self.tail_transforms(im)
if self.aug:
im = im.permute((1, 2, 0)).numpy()
o = self.aug(image=im)
im = torch.tensor(o['image'].transpose(2, 0, 1))
return {'image': im, 'target': item[1]}
except Exception:
idx = np.random.randint(0, len(self.training_set))
logger.debug(traceback.format_exc())
logger.info(f'Failed. Replacing with sample {idx}')
return self[np.random.randint(0, len(self.training_set))]
def __len__(self) -> int:
return len(self.training_set)
class GroundTruthDataset(Dataset):
"""
Dataset for training a line recognition model.
All data is cached in memory.
"""
def __init__(self, split: Callable[[str], str] = F_t.default_split,
suffix: str = '.gt.txt',
normalization: Optional[str] = None,
whitespace_normalization: bool = True,
reorder: bool = True,
im_transforms: Callable[[Any], torch.Tensor] = transforms.Compose([]),
preload: bool = True,
augmentation: bool = False) -> None:
"""
Reads a list of image-text pairs and creates a ground truth set.
Args:
split (func): Function for generating the base name without
extensions from paths
suffix (str): Suffix to attach to image base name for text
retrieval
mode (str): Image color space. Either RGB (color) or L
(grayscale/bw). Only L is compatible with vertical
scaling/dewarping.
scale (int, tuple): Target height or (width, height) of dewarped
line images. Vertical-only scaling is through
CenterLineNormalizer, resizing with Lanczos
interpolation. Set to 0 to disable.
normalization (str): Unicode normalization for gt
whitespace_normalization (str): Normalizes unicode whitespace and
strips whitespace.
reorder (bool): Whether to rearrange code points in "display"/LTR
order
im_transforms (func): Function taking an PIL.Image and returning a
tensor suitable for forward passes.
preload (bool): Enables preloading and preprocessing of image files.
"""
self.suffix = suffix
self.split = partial(F_t.suffix_split, split=split, suffix=suffix)
self._images = [] # type: Union[List[Image], List[torch.Tensor]]
self._gt = [] # type: List[str]
self.alphabet = Counter() # type: Counter
self.text_transforms = [] # type: List[Callable[[str], str]]
# split image transforms into two. one part giving the final PIL image
# before conversion to a tensor and the actual tensor conversion part.
self.head_transforms = transforms.Compose(im_transforms.transforms[:2])
self.tail_transforms = transforms.Compose(im_transforms.transforms[2:])
self.aug = None
self.preload = preload
self.seg_type = 'bbox'
# built text transformations
if normalization:
self.text_transforms.append(partial(F_t.text_normalize, normalization=normalization))
if whitespace_normalization:
self.text_transforms.append(F_t.text_whitespace_normalize)
if reorder:
self.text_transforms.append(F_t.text_reorder)
if augmentation:
from albumentations import (
Compose, ToFloat, FromFloat, Flip, OneOf, MotionBlur, MedianBlur, Blur,
ShiftScaleRotate, OpticalDistortion, ElasticTransform, RandomBrightnessContrast,
)
self.aug = Compose([
ToFloat(),
OneOf([
MotionBlur(p=0.2),
MedianBlur(blur_limit=3, p=0.1),
Blur(blur_limit=3, p=0.1),
], p=0.2),
ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, p=0.2),
OneOf([
OpticalDistortion(p=0.3),
ElasticTransform(p=0.1),
], p=0.2),
], p=0.5)
self.im_mode = '1'
def add(self, *args, **kwargs) -> None:
"""
Adds a line-image-text pair to the dataset.
Args:
image (str): Input image path
"""
if 'preparse' not in kwargs or not kwargs['preparse']:
kwargs = self.parse(image, *args, **kwargs)
if kwargs['preload']:
self.im_mode = kwargs['im_mode']
self._images.append(kwargs['image'])
self._gt.append(kwargs['text'])
self.alphabet.update(kwargs['text'])
def parse(self, image: Union[str, Image.Image], *args, **kwargs) -> Dict:
"""
Parses a sample for this dataset.
This is mostly used to parallelize populating the dataset.
Args:
image (str): Input image path
"""
with open(self.split(image), 'r', encoding='utf-8') as fp:
gt = fp.read().strip('\n\r')
for func in self.text_transforms:
gt = func(gt)
if not gt:
raise KrakenInputException(f'Text line is empty ({fp.name})')
if self.preload:
try:
im = Image.open(image)
im = self.head_transforms(im)
im = self.tail_transforms(im)
except ValueError:
raise KrakenInputException(f'Image transforms failed on {image}')
return {'image': im, 'text': gt, 'im_mode': im.mode, 'preload': True, 'preparse': True}
else:
return {'image': image, 'text': gt, 'preload': False, 'preparse': True}
def add_loaded(self, image: Image.Image, gt: str) -> None:
"""
Adds an already loaded line-image-text pair to the dataset.
Args:
image (PIL.Image.Image): Line image
gt (str): Text contained in the line image
"""
if self.preload:
try:
im = self.head_transforms(im)
if not is_bitonal(im):
self.im_mode = im.mode
im = self.tail_transforms(im)
except ValueError:
raise KrakenInputException(f'Image transforms failed on {image}')
self._images.append(im)
else:
self._images.append(image)
for func in self.text_transforms:
gt = func(gt)
self._gt.append(gt)
self.alphabet.update(gt)
def encode(self, codec: Optional[PytorchCodec] = None) -> None:
"""
Adds a codec to the dataset and encodes all text lines.
Has to be run before sampling from the dataset.
"""
if codec:
self.codec = codec
else:
self.codec = PytorchCodec(''.join(self.alphabet.keys()))
self.training_set = [] # type: List[Tuple[Union[Image, torch.Tensor], torch.Tensor]]
for im, gt in zip(self._images, self._gt):
self.training_set.append((im, self.codec.encode(gt)))
def no_encode(self) -> None:
"""
Creates an unencoded dataset.
"""
self.training_set = [] # type: List[Tuple[Union[Image, torch.Tensor], str]]
for im, gt in zip(self._images, self._gt):
self.training_set.append((im, gt))
def __getitem__(self, index: int) -> Tuple[torch.Tensor, torch.Tensor]:
if self.preload:
x, y = self.training_set[index]
if self.aug:
im = x.permute((1, 2, 0)).numpy()
o = self.aug(image=im)
im = torch.tensor(o['image'].transpose(2, 0, 1))
return {'image': im, 'target': y}
return {'image': x, 'target': y}
else:
item = self.training_set[index]
try:
logger.debug(f'Attempting to load {item[0]}')
im = item[0]
if not isinstance(im, Image.Image):
im = Image.open(im)
im = self.head_transforms(im)
if not is_bitonal(im):
self.im_mode = im.mode
im = self.tail_transforms(im)
if self.aug:
im = im.permute((1, 2, 0)).numpy()
o = self.aug(image=im)
im = torch.tensor(o['image'].transpose(2, 0, 1))
return {'image': im, 'target': item[1]}
except Exception:
idx = np.random.randint(0, len(self.training_set))
logger.debug(traceback.format_exc())
logger.info(f'Failed. Replacing with sample {idx}')
return self[np.random.randint(0, len(self.training_set))]
def __len__(self) -> int:
return len(self.training_set)
class BaselineSet(Dataset):
"""
Dataset for training a baseline/region segmentation model.
"""
def __init__(self, imgs: Sequence[str] = None,
suffix: str = '.path',
line_width: int = 4,
im_transforms: Callable[[Any], torch.Tensor] = transforms.Compose([]),
mode: str = 'path',
augmentation: bool = False,
valid_baselines: Sequence[str] = None,
merge_baselines: Dict[str, Sequence[str]] = None,
valid_regions: Sequence[str] = None,
merge_regions: Dict[str, Sequence[str]] = None):
"""
Reads a list of image-json pairs and creates a data set.
Args:
imgs (list):
suffix (int): Suffix to attach to image base name to load JSON
files from.
line_width (int): Height of the baseline in the scaled input.
target_size (tuple): Target size of the image as a (height, width) tuple.
mode (str): Either path, alto, page, xml, or None. In alto, page,
and xml mode the baseline paths and image data is
retrieved from an ALTO/PageXML file. In `None` mode
data is iteratively added through the `add` method.
augmentation (bool): Enable/disable augmentation.
valid_baselines (list): Sequence of valid baseline identifiers. If
`None` all are valid.
merge_baselines (dict): Sequence of baseline identifiers to merge.
Note that merging occurs after entities not
in valid_* have been discarded.
valid_regions (list): Sequence of valid region identifiers. If
`None` all are valid.
merge_regions (dict): Sequence of region identifiers to merge.
Note that merging occurs after entities not
in valid_* have been discarded.
"""
super().__init__()
self.mode = mode
self.im_mode = '1'
self.aug = None
self.targets = []
# n-th entry contains semantic of n-th class
self.class_mapping = {'aux': {'_start_separator': 0, '_end_separator': 1}, 'baselines': {}, 'regions': {}}
self.class_stats = {'baselines': defaultdict(int), 'regions': defaultdict(int)}
self.num_classes = 2
self.mbl_dict = merge_baselines if merge_baselines is not None else {}
self.mreg_dict = merge_regions if merge_regions is not None else {}
self.valid_baselines = valid_baselines
self.valid_regions = valid_regions
if mode in ['alto', 'page', 'xml']:
if mode == 'alto':
fn = parse_alto
elif mode == 'page':
fn = parse_page
elif mode == 'xml':
fn = parse_xml
im_paths = []
self.targets = []
for img in imgs:
try:
data = fn(img)
im_paths.append(data['image'])
lines = defaultdict(list)
for line in data['lines']:
if valid_baselines is None or line['script'] in valid_baselines:
lines[self.mbl_dict.get(line['script'], line['script'])].append(line['baseline'])
self.class_stats['baselines'][self.mbl_dict.get(line['script'], line['script'])] += 1
regions = defaultdict(list)
for k, v in data['regions'].items():
if valid_regions is None or k in valid_regions:
regions[self.mreg_dict.get(k, k)].extend(v)
self.class_stats['regions'][self.mreg_dict.get(k, k)] += len(v)
data['regions'] = regions
self.targets.append({'baselines': lines, 'regions': data['regions']})
except KrakenInputException as e:
logger.warning(e)
continue
# get line types
imgs = im_paths
# calculate class mapping
line_types = set()
region_types = set()
for page in self.targets:
for line_type in page['baselines'].keys():
line_types.add(line_type)
for reg_type in page['regions'].keys():
region_types.add(reg_type)
idx = -1
for idx, line_type in enumerate(line_types):
self.class_mapping['baselines'][line_type] = idx + self.num_classes
self.num_classes += idx + 1
idx = -1
for idx, reg_type in enumerate(region_types):
self.class_mapping['regions'][reg_type] = idx + self.num_classes
self.num_classes += idx + 1
elif mode == 'path':
pass
elif mode is None:
imgs = []
else:
raise Exception('invalid dataset mode')
if augmentation:
from albumentations import (
Compose, ToFloat, FromFloat, RandomRotate90, Flip, OneOf, MotionBlur, MedianBlur, Blur,
ShiftScaleRotate, OpticalDistortion, ElasticTransform, RandomBrightnessContrast,
HueSaturationValue,
)
self.aug = Compose([
ToFloat(),
RandomRotate90(),
Flip(),
OneOf([
MotionBlur(p=0.2),
MedianBlur(blur_limit=3, p=0.1),
Blur(blur_limit=3, p=0.1),
], p=0.2),
ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, p=0.2),
OneOf([
OpticalDistortion(p=0.3),
ElasticTransform(p=0.1),
], p=0.2),
HueSaturationValue(hue_shift_limit=20, sat_shift_limit=0.1, val_shift_limit=0.1, p=0.3),
], p=0.5)
self.imgs = imgs
self.line_width = line_width
# split image transforms into two. one part giving the final PIL image
# before conversion to a tensor and the actual tensor conversion part.
self.head_transforms = transforms.Compose(im_transforms.transforms[:2])
self.tail_transforms = transforms.Compose(im_transforms.transforms[2:])
self.seg_type = None
def add(self,
image: Union[str, Image.Image],
baselines: List[List[List[Tuple[int, int]]]] = None,
regions: Dict[str, List[List[Tuple[int, int]]]] = None,
*args,
**kwargs):
"""
Adds a page to the dataset.
Args:
im (path): Path to the whole page image
baseline (dict): A list containing dicts with a list of coordinates
and script types [{'baseline': [[x0, y0], ...,
[xn, yn]], 'script': 'script_type'}, ...]
regions (dict): A dict containing list of lists of coordinates {'region_type_0': [[x0, y0], ..., [xn, yn]]], 'region_type_1': ...}.
"""
if self.mode:
raise Exception(f'The `add` method is incompatible with dataset mode {self.mode}')
baselines_ = defaultdict(list)
for line in baselines:
line_type = self.mbl_dict.get(line['script'], line['script'])
if self.valid_baselines is None or line['script'] in self.valid_baselines:
baselines_[line_type].append(line['baseline'])
self.class_stats['baselines'][line_type] += 1
if line_type not in self.class_mapping['baselines']:
self.num_classes += 1
self.class_mapping['baselines'][line_type] = self.num_classes - 1
regions_ = defaultdict(list)
for k, v in regions.items():
reg_type = self.mreg_dict.get(k, k)
if self.valid_regions is None or reg_type in self.valid_regions:
regions_[reg_type].extend(v)
self.class_stats['baselines'][reg_type] += len(v)
if reg_type not in self.class_mapping['regions']:
self.num_classes += 1
self.class_mapping['regions'][reg_type] = self.num_classes - 1
self.targets.append({'baselines': baselines_, 'regions': regions_})
self.imgs.append(image)
def __getitem__(self, idx):
im = self.imgs[idx]
if self.mode != 'path':
target = self.targets[idx]
else:
with open('{}.path'.format(path.splitext(im)[0]), 'r') as fp:
target = json.load(fp)
if not isinstance(im, Image.Image):
try:
logger.debug(f'Attempting to load {im}')
im = Image.open(im)
im, target = self.transform(im, target)
return {'image': im, 'target': target}
except Exception:
idx = np.random.randint(0, len(self.imgs))
logger.debug(traceback.format_exc())
logger.info(f'Failed. Replacing with sample {idx}')
return self[np.random.randint(0, len(self.imgs))]
im, target = self.transform(im, target)
return {'image': im, 'target': target}
@staticmethod
def _get_ortho_line(lineseg, point, line_width, offset):
lineseg = np.array(lineseg)
norm_vec = lineseg[1,...] - lineseg[0,...]
norm_vec_len = np.sqrt(np.sum(norm_vec**2))
unit_vec = norm_vec / norm_vec_len
ortho_vec = unit_vec[::-1] * ((1,-1), (-1,1))
if offset == 'l':
point -= unit_vec * line_width
else:
point += unit_vec * line_width
return (ortho_vec * 10 + point).astype('int').tolist()
def transform(self, image, target):
orig_size = image.size
image = self.head_transforms(image)
if not is_bitonal(image):
self.im_mode = image.mode
image = self.tail_transforms(image)
scale = image.shape[2]/orig_size[0]
t = torch.zeros((self.num_classes,) + image.shape[1:])
start_sep_cls = self.class_mapping['aux']['_start_separator']
end_sep_cls = self.class_mapping['aux']['_end_separator']
for key, lines in target['baselines'].items():
try:
cls_idx = self.class_mapping['baselines'][key]
except KeyError:
# skip lines of classes not present in the training set
continue
for line in lines:
# buffer out line to desired width
line = [k for k, g in groupby(line)]
line = np.array(line)*scale
shp_line = geom.LineString(line)
split_offset = min(5, shp_line.length/2)
line_pol = np.array(shp_line.buffer(-self.line_width, cap_style=2, single_sided=True).boundary, dtype=np.int)
rr, cc = polygon(line_pol[:,1], line_pol[:,0], shape=image.shape[1:])
t[cls_idx, rr, cc] = 1
split_pt = shp_line.interpolate(split_offset).buffer(0.001)
# top
start_sep = np.array((split(shp_line, split_pt)[0].parallel_offset(0.5*self.line_width, side='right').buffer(1.5*self.line_width, cap_style=3).boundary), dtype=np.int)
rr_s, cc_s = polygon(start_sep[:,1], start_sep[:,0], shape=image.shape[1:])
t[start_sep_cls, rr_s, cc_s] = 1
t[start_sep_cls, rr, cc] = 0
split_pt = shp_line.interpolate(-split_offset).buffer(0.001)
# top
end_sep = np.array((split(shp_line, split_pt)[-1].parallel_offset(0.5*self.line_width, side='right').buffer(1.5*self.line_width, cap_style=3).boundary), dtype=np.int)
rr_s, cc_s = polygon(end_sep[:,1], end_sep[:,0], shape=image.shape[1:])
t[end_sep_cls, rr_s, cc_s] = 1
t[end_sep_cls, rr, cc] = 0
for key, regions in target['regions'].items():
try:
cls_idx = self.class_mapping['regions'][key]
except KeyError:
# skip regions of classes not present in the training set
continue
for region in regions:
region = | np.array(region) | numpy.array |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
'''
Created on 8. jan. 2018
@author: ELP
'''
import sys
import numpy as np
from PyQt5.QtWidgets import (QMainWindow,QAction,qApp,QFileDialog,
QFrame,QSplitter,QLabel,QActionGroup,QSpinBox,QMessageBox,
QApplication,QTextEdit,QListWidget,QGroupBox,QAbstractItemView,
QLineEdit,QCheckBox,QGridLayout, QDockWidget,QComboBox,QVBoxLayout,QRadioButton)
from PyQt5.QtGui import QIcon,QKeySequence,QImageWriter,QPixmap,QFont
from PyQt5.QtCore import QSettings,QVariant,QSize,QPoint,QTimer,Qt
import matplotlib.pyplot as plt
from matplotlib.backends.backend_qt5agg import (
FigureCanvasQTAgg as FigureCanvas)
from netCDF4 import num2date
from tkinter.filedialog import askopenfilename,askdirectory
from matplotlib import gridspec
from matplotlib.ticker import MaxNLocator
import os
import readdata_qmain
#import path
from messages import Messages
__version__ = "1.0.0"
class Window(QMainWindow):
def __init__(self,parent = None):
super(Window,self).__init__(parent)
self.initUI()
'''def loadInitialFile(self):
settings = QSettings()
fname = (settings.value("LastFile").toString())
if fname and QFile.exists(fname):
self.loadFile(fname) '''
def initUI(self):
self.filename = None
self.figure = plt.figure(figsize=(7.69 , 6.27),
facecolor='None',edgecolor='None')
self.canvas = FigureCanvas(self.figure)
self.setCentralWidget(self.canvas)
logDockWidget = QDockWidget("List of Variables", self)
logDockWidget.setObjectName("LogDockWidget")
logDockWidget.setAllowedAreas(Qt.LeftDockWidgetArea|
Qt.RightDockWidgetArea)
self.listWidget = QListWidget()
self.listWidget.setFocusPolicy(Qt.ClickFocus)
self.listWidget.setSelectionMode(
QAbstractItemView.ExtendedSelection)
logDockWidget.setWidget(self.listWidget)
self.addDockWidget(Qt.LeftDockWidgetArea,
logDockWidget)
self.printer = None
self.sizeLabel = QLabel()
self.sizeLabel.setFrameStyle(
QFrame.StyledPanel|QFrame.Sunken)
status = self.statusBar()
status.setSizeGripEnabled(False)
status.addPermanentWidget(self.sizeLabel)
status.showMessage("Ready", 5000)
fileOpenAct = self.createAction("Open",
self.openFile,QKeySequence.Open,None,
"Open file")
fileSaveAct = self.createAction("SaveAs",
self.fileSaveAs,QKeySequence.Save,None,
"Save figure")
fileQuitAct = self.createAction("&Quit",
self.close,"Ctrl+Q", None,
"Close the application")
alltimeicon = r'img\icon.png'
pltAllTimeAct = self.createAction("Plot",
self.callPlot,icon = alltimeicon,
tip = "Click to Plot")
#editZoomAct = self.createAction("&Zoom...",
# self.editZoom,"Alt+Z","editzoom",
# tip = "Zoom the image")
#editCmapAct = self.createAction("Change Color map",
# self.changeCmap,
# tip = "Change Colormap")
#
self.SedInFileAct = self.createAction(
"Use sediment Subplot", #slot = self.plotTime,
tip = "Use sediment Subplot", checkable= True)
self.SedInFileAct.setChecked(True)
self.editCmapLimsAct = self.createAction(
"Use Manual Cmap limits",None, "Ctrl+M",
None,"Use manual cmap limits", True)
self.editCmapLimsAct.setChecked(False)
self.interpCmapLimsAct = self.createAction(
"Dont Interploate Cmap limits",None, "Ctrl+I",
None,"Don't Interpolate Cmap limits", True)
self.limsAllCols = self.createAction(
"All columns Cmap limits",None, None,
None,"Scale: all columns, all time", True)
self.showIceAct = self.createAction(
"Show Ice Thickness",None, None,
None,"Scale: all columns, all time", True)
self.yearLinesAct = self.createAction(
'Draw year lines',None, None, None,
'Draw year lines', True)
self.formatTimeAct = self.createAction(
'Format time axis',None, None, None,
'Show time axis in the format of date and time', True)
self.formatTimeAct.setChecked(True)
self.intepolateAct = self.createAction(
'Interpolate data',None, None, None,
'Interpolate data', True)
self.intepolateAct.setChecked(True)
menubar = self.menuBar()
filemenu = self.addMultipleAction(
'File',
[fileOpenAct,fileSaveAct,fileQuitAct],
menubar.addMenu)
#TODO:add edit menu later
'''editMenu = self.addOneAction(
'Edit',
#[editZoomAct],
menubar.addMenu)'''
proprtsMenu = self.addMultipleAction(
'Properties',
[self.editCmapLimsAct,self.SedInFileAct,self.interpCmapLimsAct,
self.limsAllCols,self.formatTimeAct,
self.intepolateAct,self.yearLinesAct],
menubar.addMenu)
self.toolbar_plot = self.addOneAction('Plot',pltAllTimeAct,self.addToolBar)
self.toolbar_cmap = self.addToolBar('Properties colormap')
self.toolbar_distance = self.addToolBar('Properties distance')
self.toolbar_time = self.addToolBar('Properties2')
self.toolbar_plottype = self.addToolBar('Properties3')
self.makeToolbar()
self.show()
def makeToolbar(self, time_prop = None):
self.createCmapLimitsGroup()
self.createDistGroup()
self.createTimeGroup()
self.RadioButtons_Plot_type()
self.toolbar_cmap.addWidget(self.cmap_groupBox)
self.toolbar_distance.addWidget(self.dist_groupBox)
self.toolbar_time.addWidget(self.time_groupBox)
self.toolbar_plottype.addWidget(self.radio_groupBox)
def updateToolbar(self,time_prop = None):
self.toolbar_cmap.clear()
self.toolbar_distance.clear()
self.toolbar_plottype.clear()
self.toolbar_time.clear()
self.makeToolbar()
#def updateCmap(self):
# #self.toolbar_cmap.clear()
# self.cmap_sed_box.setCurrentIndex(1)
# self.cmap_box.setCurrentIndex(2)
def updateTime_range(self,time_prop = None):
if time_prop == 'Manual time limits' and self.filename is not None:
self.toolbar_time.clear()
self.start = 0
self.stop = self.lentime
self.updateTimeGroup(time_prop)
self.toolbar_time.addWidget(self.time_groupBox)
elif time_prop == 'Last Year' and self.filename is not None:
self.toolbar_time.clear()
self.stop = self.lentime
self.start = self.stop-365
self.updateTimeGroup(time_prop)
self.toolbar_time.addWidget(self.time_groupBox)
else:
pass
def createCmapLimitsGroup(self):
self.cmap_groupBox = QGroupBox("Colour map limits ")
self.wat_label = QLabel('Water')
self.sed_label = QLabel('Sediment')
self.box_minwater = QLineEdit()
self.box_maxwater = QLineEdit()
self.box_minsed = QLineEdit()
self.box_maxsed = QLineEdit()
self.cmap_box = QComboBox()
self.cmap_box.addItems(sorted(m for m in plt.cm.datad))
self.cmap_box.setCurrentIndex(5)
self.cmap_sed_box = QComboBox()
self.cmap_sed_box.addItems(sorted(m for m in plt.cm.datad))
self.cmap_sed_box.setCurrentIndex(7)
#self.cmap_sed_box.currentTextChanged.connect(self.updateCmap)
grd = QGridLayout(self.cmap_groupBox)
grd.addWidget(self.wat_label,1,0,1,1)
grd.addWidget(self.sed_label,2,0,1,1)
grd.addWidget(QLabel('min:'),1,1,1,1)
grd.addWidget(QLabel('min:'),2,1,1,1)
grd.addWidget(self.box_minwater,1,2,1,1)
grd.addWidget(self.box_minsed,2,2,1,1)
grd.addWidget(QLabel('max:'),1,3,1,1)
grd.addWidget(QLabel('max:'),2,3,1,1)
grd.addWidget(self.box_maxwater,1,4,1,1)
grd.addWidget(self.box_maxsed,2,4,1,1)
grd.addWidget(self.cmap_box,1,5,1,1)
grd.addWidget(self.cmap_sed_box,2,5,1,1)
return self
def updateCmapLimitsGroup(self):
self.cmap_groupBox = QGroupBox("Colour map limits ")
self.wat_label = QLabel('Water')
self.sed_label = QLabel('Sediment')
self.box_minwater = QLineEdit()
self.box_maxwater = QLineEdit()
self.box_minsed = QLineEdit()
self.box_maxsed = QLineEdit()
self.cmap_box = QComboBox()
self.cmap_box.addItems(sorted(m for m in plt.cm.datad))
self.cmap_sed_box = QComboBox()
self.cmap_sed_box.addItems(sorted(m for m in plt.cm.datad))
grd = QGridLayout(self.cmap_groupBox)
grd.addWidget(self.wat_label,1,0,1,1)
grd.addWidget(self.sed_label,2,0,1,1)
grd.addWidget(QLabel('min:'),1,1,1,1)
grd.addWidget(QLabel('min:'),2,1,1,1)
grd.addWidget(self.box_minwater,1,2,1,1)
grd.addWidget(self.box_minsed,2,2,1,1)
grd.addWidget(QLabel('max:'),1,3,1,1)
grd.addWidget(QLabel('max:'),2,3,1,1)
grd.addWidget(self.box_maxwater,1,4,1,1)
grd.addWidget(self.box_maxsed,2,4,1,1)
grd.addWidget(self.cmap_box,1,5,1,1)
grd.addWidget(self.cmap_sed_box,2,5,1,1)
def createDistGroup(self):
self.dist_groupBox = QGroupBox("Distance axis")
dist_grid = QGridLayout(self.dist_groupBox)
self.col_label = QLabel('Column: ')
self.numcol_2d = QSpinBox()
self.maxcol_label = QLabel('Max (from 0): ')
try:
self.nmaxcol_label = QLabel(str(
self.max_num_col))
if self.max_num_col > 0:
self.numcol_2d.setRange(0,self.max_num_col)
except AttributeError:
self.nmaxcol_label = QLabel(' ')
dist_grid.addWidget(self.col_label,0,0,1,1)
dist_grid.addWidget(self.numcol_2d,1,0,1,1)
dist_grid.addWidget(self.maxcol_label,0,1,1,1)
dist_grid.addWidget(self.nmaxcol_label,1,1,1,1)
return self
def updateTimeGroup(self,time_prop):
self.time_groupBox = QGroupBox("Time axis")
self.time_properties = QComboBox()
self.time_properties.addItems(['All time','Last Year','Manual time limits'])
self.numday_start_label = QLabel('Start: ')
self.numday_stop_label = QLabel('Stop: ')
self.maxday_label = QLabel('Maxday: ')
try:
self.value_maxday_l = QLabel(str(self.lentime))
except AttributeError:
self.value_maxday_l = QLabel(' ')
self.numday_box = QSpinBox()
self.numday_stop_box = QSpinBox()
if time_prop is not None:
index = self.time_properties.findText(time_prop)
self.time_properties.setCurrentIndex(index)
self.time_properties.currentTextChanged.connect(self.updateTime_range)
self.numday_box.setRange(0,self.lentime-1)
self.numday_stop_box.setRange(0,self.lentime)
self.numday_box.setValue(self.start)
self.numday_stop_box.setValue(self.stop)
self.time_properties.currentTextChanged.connect(self.updateTime_range)
self.make_time_group_grid()
def createTimeGroup(self):
self.time_groupBox = QGroupBox("Time axis")
self.time_properties = QComboBox()
self.time_properties.addItems(['All time','Last Year','Manual time limits'])
self.time_properties.setCurrentIndex(0)
self.time_properties.currentTextChanged.connect(self.updateTime_range)
self.numday_start_label = QLabel('Start: ')
self.numday_stop_label = QLabel('Stop: ')
self.maxday_label = QLabel('Maxday: ')
try:
self.value_maxday_l = QLabel(str(self.lentime))
except AttributeError:
self.value_maxday_l = QLabel(' ')
self.numday_box = QSpinBox()
self.numday_stop_box = QSpinBox()
try:
self.numday_box.setRange(self.start,self.lentime-1)
self.numday_stop_box.setRange(self.start,self.lentime)
self.numday_stop_box.setValue(self.start)
self.numday_stop_box.setValue(self.stop)
except AttributeError:
pass
self.make_time_group_grid()
def make_time_group_grid(self):
time_grid = QGridLayout(self.time_groupBox)
#line 1
time_grid.addWidget(self.time_properties,0,0,1,3)
time_grid.addWidget(self.numday_start_label,1,0,1,1)
time_grid.addWidget( self.numday_stop_label,1,1,1,1)
time_grid.addWidget( self.maxday_label,1,2,1,1)
#line 2
time_grid.addWidget( self.numday_box,2,0,1,1)
time_grid.addWidget(self.numday_stop_box,2,1,1,1)
time_grid.addWidget( self.value_maxday_l,2,2,1,1)
def RadioButtons_Plot_type(self):
self.radio_groupBox = QGroupBox("Plot_type")
self.radio_timeseries = QRadioButton("&Timeseries")
self.radio_dist = QRadioButton("&Distance transect")
self.radio_1d = QRadioButton("&1D plot")
#TODO: check if it is 2d or 1d
self.radio_timeseries.setChecked(True)
vbox = QVBoxLayout()
vbox.addWidget(self.radio_timeseries)
vbox.addWidget(self.radio_dist)
vbox.addWidget(self.radio_1d)
vbox.addStretch(1)
self.radio_groupBox.setLayout(vbox)
def addOneAction(self,text,action,target):
item = target(text)
item.addAction(action)
return item
def addMultipleAction(self,text,actions,target):
item = target(text)
item.addActions(actions)
return item
def createAction(self, text, slot=None, shortcut=None,
icon=None,tip=None, checkable=False):
action = QAction(text, self)
if icon is not None:
ic = QIcon()
pixmap1 = QPixmap(icon).scaled(164, 164)
ic.addPixmap(pixmap1, QIcon.Normal, QIcon.Off)
action.setIcon(ic)
if shortcut is not None:
action.setShortcut(shortcut)
if tip is not None:
action.setToolTip(tip)
action.setStatusTip(tip)
if slot is not None:
action.triggered.connect(slot)
if checkable:
action.setCheckable(True)
return action
#TODO: add Zoom
def editZoom(self):
pass
#TODO: add changeCMAP
def getCmap(self):
#TODO: make it changeable
cmap = plt.get_cmap(self.cmap_box.currentText())
cmap1 = plt.get_cmap(self.cmap_sed_box.currentText())
#except:
# self.cmap = plt.get_cmap('gist')
# self.cmap1 = plt.get_cmap('gist')
return cmap,cmap1
def changeCmap(self):
pass
def openFile(self):
import numpy as np
self.filename ,_ = (QFileDialog.getOpenFileName(
self,'Open netcdf ', os.getcwd(),
"netcdf (*.nc);; all (*)"))
self.array = readdata_qmain.ReadVar(self.filename)
var_list = self.array.get_variables_list()
self.listWidget.addItems(var_list)
self.max_num_col = self.array.max_numcol()
self.lentime = self.array.lentime()
self.start = 0
self.stop = self.lentime
self.updateToolbar()
def fileSaveAs(self):
if not (self.filename == None):
self.plotTime()
try:
formats = [r'bmp',r'png',r'pdf']
self.fname, _ = QFileDialog.getSaveFileName(self, "Save File",
"Figure.png", "All Files (*);; png (*png) ;; pdf (*pdf)")
plt.savefig(self.fname, dpi=150)
except:
pass
else:
Messages.Save()
#TODO: implement recent files
#sef.addRecentFile(fname)
def openFolder(self):
import tkinter as tk
root = tk.Tk()
root.withdraw()
self.directory = askdirectory()
def plotTransect(self):
pass
def add_sed_plot(self,array,z,x,xlen,y):
try:
y2max,ny2max = array.y_watmax()
y_wat = y[:ny2max-1]
y_sed = (array.depth_sed(y,y2max))[ny2max-1:]
assert len(y_sed) > 1
array.close()
X,Y_wat = np.meshgrid(x,y_wat)
X_sed,Y_sed = np.meshgrid(x,y_sed)
Z_wat = z[:ny2max-1,:]
Z_sed = z[ny2max-1:,:]
cmap,cmap1 = self.getCmap()
gs = gridspec.GridSpec(2, 1)
gs.update(left = 0.07,right = 0.85 )
self.cax1 = self.figure.add_axes([0.86, 0.11, 0.02, 0.35])
self.cax = self.figure.add_axes([0.86, 0.53, 0.02, 0.35])
ax0 = self.figure.add_subplot(gs[0])
ax1 = self.figure.add_subplot(gs[1])
if self.formatTimeAct.isChecked():
X = readdata_qmain.format_time_axis(self,ax0,xlen,X)
X_sed = readdata_qmain.format_time_axis(self,ax1,xlen,X_sed)
if self.editCmapLimsAct.isChecked():
#levels_wat = MaxNLocator(nbins=25).tick_values(0, 1000)
try:
min = float(self.box_minwater.text())
max = float(self.box_maxwater.text())
levels_wat = np.linspace(min,max,num = 50)
except:
Messages.no_limits("water column")
levels_wat = MaxNLocator(nbins=25).tick_values(
Z_wat.min(), Z_wat.max())
else:
levels_wat = MaxNLocator(nbins=25).tick_values(
Z_wat.min(), Z_wat.max())
if self.intepolateAct.isChecked():
if self.editCmapLimsAct.isChecked():
#levels_wat = MaxNLocator(nbins=25).tick_values(0, 1000)
try:
min = float(self.box_minsed.text())
max = float(self.box_maxsed.text())
levels_sed = | np.linspace(min,max,num = 50) | numpy.linspace |
# Copyright 2016 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from numpy import array, nan
from numpy.testing import assert_almost_equal
from pandas import DataFrame, Timestamp
from catalyst.assets import Equity, Future
from catalyst.data.dispatch_bar_reader import (
AssetDispatchMinuteBarReader,
AssetDispatchSessionBarReader,
)
from catalyst.data.resample import (
MinuteResampleSessionBarReader,
ReindexMinuteBarReader,
ReindexSessionBarReader,
)
from catalyst.testing.fixtures import (
WithBcolzEquityMinuteBarReader,
WithBcolzEquityDailyBarReader,
WithBcolzFutureMinuteBarReader,
WithTradingSessions,
ZiplineTestCase,
)
OHLC = ['open', 'high', 'low', 'close']
class AssetDispatchSessionBarTestCase(WithBcolzEquityDailyBarReader,
WithBcolzFutureMinuteBarReader,
WithTradingSessions,
ZiplineTestCase):
TRADING_CALENDAR_STRS = ('us_futures', 'NYSE')
TRADING_CALENDAR_PRIMARY_CAL = 'us_futures'
ASSET_FINDER_EQUITY_SIDS = 1, 2, 3
START_DATE = Timestamp('2016-08-22', tz='UTC')
END_DATE = Timestamp('2016-08-24', tz='UTC')
@classmethod
def make_future_minute_bar_data(cls):
m_opens = [
cls.trading_calendar.open_and_close_for_session(session)[0]
for session in cls.trading_sessions['us_futures']]
yield 10001, DataFrame({
'open': [10000.5, 10001.5, nan],
'high': [10000.9, 10001.9, nan],
'low': [10000.1, 10001.1, nan],
'close': [10000.3, 10001.3, nan],
'volume': [1000, 1001, 0],
}, index=m_opens)
yield 10002, DataFrame({
'open': [20000.5, nan, 20002.5],
'high': [20000.9, nan, 20002.9],
'low': [20000.1, nan, 20002.1],
'close': [20000.3, nan, 20002.3],
'volume': [2000, 0, 2002],
}, index=m_opens)
yield 10003, DataFrame({
'open': [nan, 30001.5, 30002.5],
'high': [nan, 30001.9, 30002.9],
'low': [nan, 30001.1, 30002.1],
'close': [nan, 30001.3, 30002.3],
'volume': [0, 3001, 3002],
}, index=m_opens)
@classmethod
def make_equity_daily_bar_data(cls):
sessions = cls.trading_sessions['NYSE']
yield 1, DataFrame({
'open': [100.5, 101.5, nan],
'high': [100.9, 101.9, nan],
'low': [100.1, 101.1, nan],
'close': [100.3, 101.3, nan],
'volume': [1000, 1001, 0],
}, index=sessions)
yield 2, DataFrame({
'open': [200.5, nan, 202.5],
'high': [200.9, nan, 202.9],
'low': [200.1, nan, 202.1],
'close': [200.3, nan, 202.3],
'volume': [2000, 0, 2002],
}, index=sessions)
yield 3, DataFrame({
'open': [301.5, 302.5, nan],
'high': [301.9, 302.9, nan],
'low': [301.1, 302.1, nan],
'close': [301.3, 302.3, nan],
'volume': [3001, 3002, 0],
}, index=sessions)
@classmethod
def make_futures_info(cls):
return DataFrame({
'sid': [10001, 10002, 10003],
'root_symbol': ['FOO', 'BAR', 'BAZ'],
'symbol': ['FOOA', 'BARA', 'BAZA'],
'start_date': [cls.START_DATE] * 3,
'end_date': [cls.END_DATE] * 3,
# TODO: Make separate from 'end_date'
'notice_date': [cls.END_DATE] * 3,
'expiration_date': [cls.END_DATE] * 3,
'multiplier': [500] * 3,
'exchange': ['CME'] * 3,
})
@classmethod
def init_class_fixtures(cls):
super(AssetDispatchSessionBarTestCase, cls).init_class_fixtures()
readers = {
Equity: ReindexSessionBarReader(
cls.trading_calendar,
cls.bcolz_equity_daily_bar_reader,
cls.START_DATE,
cls.END_DATE),
Future: MinuteResampleSessionBarReader(
cls.trading_calendar,
cls.bcolz_future_minute_bar_reader,
)
}
cls.dispatch_reader = AssetDispatchSessionBarReader(
cls.trading_calendar,
cls.asset_finder,
readers
)
def test_load_raw_arrays(self):
sessions = self.trading_calendar.sessions_in_range(
self.START_DATE, self.END_DATE)
results = self.dispatch_reader.load_raw_arrays(
['high', 'volume'],
sessions[0], sessions[2], [2, 10003, 1, 10001])
expected_per_sid = (
(2, [ | array([200.9, nan, 202.9]) | numpy.array |
from pandapipes.component_models.abstract_models.start_therm_stor_euler import StratThermStor
import math
import numpy as np
import pandas as pd
from numpy.linalg import inv
class TestStratThermStor(StratThermStor):
def __init__(self, init_strata_temp_c, t_source_c, t_sink_c, mdot_source_max_kg_per_s, mdot_sink_max_kg_per_s,
delta_t_s, tank_height_mm=1700, tank_diameter_mm=810, wall_thickness_mm=160,
source_ind=(0, -1), load_ind=(-1, 0), tol=1e-6):
super().__init__(init_strata_temp_c, t_source_c, t_sink_c, mdot_source_max_kg_per_s, mdot_sink_max_kg_per_s,
delta_t_s, tank_height_mm, tank_diameter_mm, wall_thickness_mm,
source_ind, load_ind, tol)
self.a1, self.b1, self.c1, self.a2, self.b2, self.c2 = 0, 0, 0, 0, 0, 0
self.t1_np1, self.t2_np1 = 0, 0
def calculate_test_matrix(self):
deltap, deltam = self.get_delta(True), self.get_delta(False)
fac_t_m = self.delta_t_s / self.m_strat_kg
fac_al_zc = self.A_m2 * self.lambda_eff_w_per_m_k / self.z_m / self.c_p_w_s_per_kg_k
fac_kaext_c = self.k_w_per_m2_k * self.A_ext_m2 / self.c_p_w_s_per_kg_k
fac_kaexttop_c = self.k_w_per_m2_k * (self.A_ext_m2 + self.A_m2) / self.c_p_w_s_per_kg_k
self.a1 = 1 + fac_t_m * (fac_kaext_c - self.mdot_kg_per_ts * deltam + fac_al_zc + self.mdot_source_kg_per_ts)
self.a2 = - fac_t_m * (deltap * self.mdot_kg_per_ts + fac_al_zc)
self.b1 = fac_t_m * (deltam * self.mdot_kg_per_ts - fac_al_zc)
self.b2 = 1 + fac_t_m * (deltap * self.mdot_kg_per_ts + fac_kaext_c + fac_al_zc + self.mdot_sink_kg_per_ts)
self.c1 = self.t_i_k[0] + fac_t_m * (fac_kaext_c * self.t_amb_k + self.t_source_k * self.mdot_source_kg_per_ts)
self.c2 = self.t_i_k[1] + fac_t_m * (fac_kaext_c * self.t_amb_k + self.t_sink_k * self.mdot_sink_kg_per_ts)
def calculate_temperatures_from_test_matrix(self):
try:
self.t1_np1 = (self.c2 - self.c1 * self.b2 / self.b1) / (self.a2 - self.a1 * self.b2 / self.b1)
except ZeroDivisionError:
print("ZeroDivisionError")
self.t1_np1 = self.c1 / self.a1
try:
self.t2_np1 = (self.c2 - self.c1 * self.a2 / self.a1) / (self.b2 - self.b1 * self.a2 / self.a1)
except ZeroDivisionError:
print("ZeroDivisionError")
self.t2_np1 = self.c2 / self.b2
def iterate(self):
super().iterate()
self.calculate_test_matrix()
self.calculate_temperatures_from_test_matrix()
def test_two_strata():
q_source_w, q_sink_w = | np.array([20, 60, 40, 80, 80]) | numpy.array |
import numpy as np
from harmonic_equation import harmonic_equation
from equation import equation
import low_level_tools as llt
################################################################################
def eq_11_bc(current):
N, M = current[0].shape
z = np.linspace(0, 1, N)
x, y = np.meshgrid(z, z, indexing='ij')
u_exact = np.exp(x * y)
current[0][0, :] = u_exact[0, :]
current[0][-1, :] = u_exact[-1, :]
current[0][:, 0] = u_exact[:, 0]
current[0][:, -1] = u_exact[:, -1]
return current
def eq_11_exact(current):
N, M = current[0].shape
z = np.linspace(0, 1, N)
x, y = np.meshgrid(z, z, indexing='ij')
u_exact = np.exp(x * y)
return np.array([u_exact])
def eq_11_rhs(current):
N, M = current[0].shape
z = np.linspace(0, 1, N)
x, y = np.meshgrid(z, z, indexing='ij')
u_rhs = ((1 + np.exp(x * y)) * x ** 2 + (2 + np.cos(np.pi * x)) * y ** 2 + \
(1 + x * y) * np.exp(-x * y) + y * np.exp(x) + x * np.exp(y) + np.sin(np.pi * x * y)) * np.exp(x * y)
u_rhs[0, :] = 0
u_rhs[N - 1, :] = 0
u_rhs[:, N - 1] = 0
u_rhs[:, 0] = 0
rhs = np.array([u_rhs])
return rhs
def eq_11_coeff(current):
N, M = current[0].shape
z = np.linspace(0, 1, N)
x, y = np.meshgrid(z, z, indexing='ij')
###
a11 = 1 + np.exp(x * y)
b11 = 2 + np.cos(np.pi * x)
c11 = np.exp(-x * y)
d11 = np.exp(x)
e11 = np.exp(y)
f11 = np.sin(np.pi * x * y)
###
coeff1 = [a11, b11, c11, d11, e11, f11]
coeff = np.array([coeff1])
return coeff
################################################################################
def eq_red_fox_bc(current):
N, M = current[0].shape
z = np.linspace(0, 1, N)
x, y = np.meshgrid(z, z, indexing='ij')
u_exact = np.exp(x * y)
current[0][0, :] = u_exact[0, :]
current[0][-1, :] = u_exact[-1, :]
current[0][:, 0] = u_exact[:, 0]
current[0][:, -1] = u_exact[:, -1]
return current
def eq_red_fox_exact(current):
N, M = current[0].shape
z = np.linspace(0, 1, N)
x, y = np.meshgrid(z, z, indexing='ij')
u_exact = np.exp(x * y)
return np.array([u_exact])
def eq_red_fox_rhs(current, a=1):
N, M = current[0].shape
z = np.linspace(0, 1, N)
x, y = np.meshgrid(z, z, indexing='ij')
u_rhs = (x**2 + y**2 + a*y)*np.exp(x*y)
u_rhs[0, :] = 0
u_rhs[N - 1, :] = 0
u_rhs[:, N - 1] = 0
u_rhs[:, 0] = 0
rhs = np.array([u_rhs])
return rhs
def eq_red_fox_coeff(current, a=1):
N, M = current[0].shape
z = np.linspace(0, 1, N)
x, y = np.meshgrid(z, z, indexing='ij')
###
a11 = np.ones((N, N))
b11 = np.ones((N, N))
c11 = np.zeros((N, N))
d11 = a*np.ones((N, N))
e11 = np.zeros((N, N))
f11 = np.zeros((N, N))
###
coeff1 = [a11, b11, c11, d11, e11, f11]
coeff = np.array([coeff1])
return coeff
################################################################################
def eq_00_bc(current):
N, M = current[0].shape
z = np.linspace(0, 1, N)
x, y = np.meshgrid(z, z, indexing='ij')
u_exact = np.sin(np.pi * x) * np.sin(np.pi * y) / 2
current[0][0, :] = u_exact[0, :]
current[0][-1, :] = u_exact[-1, :]
current[0][:, 0] = u_exact[:, 0]
current[0][:, -1] = u_exact[:, -1]
return current
def eq_00_exact(current):
N, M = current[0].shape
z = np.linspace(0, 1, N)
x, y = np.meshgrid(z, z, indexing='ij')
u_exact = np.sin(np.pi * x) * np.sin(np.pi * y) / 2
return np.array([u_exact])
def eq_00_rhs(current):
N, M = current[0].shape
z = np.linspace(0, 1, N)
x, y = np.meshgrid(z, z, indexing='ij')
u_rhs = -np.pi ** 2 * np.sin(np.pi * x) * np.sin(np.pi * y) * (
4 + y * np.cos(x * np.pi) + 4 + x * np.exp(-x * y)) / 2 + \
np.pi ** 2 * np.cos(np.pi * x) * np.cos(np.pi * y) * np.exp(y * x) / 2 + \
np.pi * np.cos(np.pi * x) * np.sin(np.pi * y) * x * y ** 3 / 2 + \
np.pi * np.sin(np.pi * x) * np.cos(np.pi * y) * (y + x ** 2 + 0.2) / 2 + \
np.sinh(x + 3 * y) * np.sin(np.pi * x) * np.sin(np.pi * y) / 2
u_rhs[0, :] = 0;
u_rhs[N - 1, :] = 0;
u_rhs[:, N - 1] = 0;
u_rhs[:, 0] = 0
rhs = np.array([u_rhs])
return rhs
def eq_00_coeff(current):
N, M = current[0].shape
z = np.linspace(0, 1, N)
x, y = np.meshgrid(z, z, indexing='ij')
###
a11 = 4 + y * np.cos(x * np.pi)
b11 = 4 + x * np.exp(-x * y)
c11 = np.exp(y * x)
d11 = x * y ** 3
e11 = y + x ** 2 + 0.2
f11 = np.sinh(x + 3 * y)
###
coeff1 = [a11, b11, c11, d11, e11, f11]
coeff = np.array([coeff1])
return coeff
################################################################################
def eq_12_bc(current):
N, M = current[0].shape
z = np.linspace(0, 1, N)
x, y = np.meshgrid(z, z, indexing='ij')
u_exact = np.exp(x + y)
current[0][0, :] = u_exact[0, :]
current[0][-1, :] = u_exact[-1, :]
current[0][:, 0] = u_exact[:, 0]
current[0][:, -1] = u_exact[:, -1]
return current
def eq_12_exact(current):
N, M = current[0].shape
z = np.linspace(0, 1, N)
x, y = np.meshgrid(z, z, indexing='ij')
u_exact = np.exp(x + y)
return np.array([u_exact])
def eq_12_rhs(current):
N, M = current[0].shape
z = np.linspace(0, 1, N)
x, y = np.meshgrid(z, z, indexing='ij')
u_rhs = (4 + np.cos(2 * np.pi * x * y) + 2 + np.sin(np.pi * x * y) + np.exp(-x * y) \
+ np.exp(x) + np.exp(y) + np.sin(np.pi * x * y) + 2) * np.exp(x + y)
u_rhs[0, :] = 0;
u_rhs[N - 1, :] = 0;
u_rhs[:, N - 1] = 0;
u_rhs[:, 0] = 0
rhs = np.array([u_rhs])
return rhs
def eq_12_coeff(current):
N, M = current[0].shape
z = np.linspace(0, 1, N)
x, y = np.meshgrid(z, z, indexing='ij')
###
a11 = 4 + np.cos(2 * np.pi * x * y)
b11 = 2 + np.sin(np.pi * x * y)
c11 = np.exp(-x * y)
d11 = np.exp(x)
e11 = np.exp(y)
f11 = np.sin(np.pi * x * y) + 2
###
coeff1 = [a11, b11, c11, d11, e11, f11]
coeff = np.array([coeff1])
return coeff
################################################################################
def eq_13_bc(current):
N, M = current[0].shape
z = np.linspace(0, 1, N)
x, y = np.meshgrid(z, z, indexing='ij')
u_exact = y * np.exp(x)
current[0][0, :] = u_exact[0, :]
current[0][-1, :] = u_exact[-1, :]
current[0][:, 0] = u_exact[:, 0]
current[0][:, -1] = u_exact[:, -1]
return current
def eq_13_exact(current):
N, M = current[0].shape
z = np.linspace(0, 1, N)
x, y = np.meshgrid(z, z, indexing='ij')
u_exact = y * np.exp(x)
return np.array([u_exact])
def eq_13_rhs(current):
N, M = current[0].shape
z = np.linspace(0, 1, N)
x, y = np.meshgrid(z, z, indexing='ij')
u_rhs = (2 + x * np.exp(x * y) + 6 + np.sin(np.pi * x * y)) * y * np.exp(x) + \
x * np.exp(-x * y) * np.exp(x) + y ** 2 * np.exp(2 * x) + x * y ** 2 * np.exp(x) * np.exp(y)
u_rhs[0, :] = 0;
u_rhs[N - 1, :] = 0;
u_rhs[:, N - 1] = 0;
u_rhs[:, 0] = 0
rhs = np.array([u_rhs])
return rhs
def eq_13_coeff(current):
N, M = current[0].shape
z = np.linspace(0, 1, N)
x, y = np.meshgrid(z, z, indexing='ij')
###
a11 = 4 + y * np.exp(-x * y)
b11 = 2 + x * np.exp(x * y)
c11 = x * np.exp(-x * y)
d11 = y * np.exp(x)
e11 = x * y ** 2 * np.exp(y)
f11 = 6 + np.sin(np.pi * x * y)
###
coeff1 = [a11, b11, c11, d11, e11, f11]
coeff = np.array([coeff1])
return coeff
################################################################################
def eq_14_bc(current):
N, M = current[0].shape
z = np.linspace(0, 1, N)
x, y = np.meshgrid(z, z, indexing='ij')
u_exact = np.exp(x + y)
current[0][0, :] = u_exact[0, :]
current[0][-1, :] = u_exact[-1, :]
current[0][:, 0] = u_exact[:, 0]
current[0][:, -1] = u_exact[:, -1]
return current
def eq_14_exact(current):
N, M = current[0].shape
z = np.linspace(0, 1, N)
x, y = np.meshgrid(z, z, indexing='ij')
u_exact = np.exp(x + y)
return np.array([u_exact])
def eq_14_rhs(current):
N, M = current[0].shape
z = np.linspace(0, 1, N)
x, y = np.meshgrid(z, z, indexing='ij')
b = 4
a = 3
u_rhs = (b + np.exp(x * y) + a + np.exp(-x * y) +
np.cos(np.pi*(x + 2*y)) + np.sin(np.pi*(y + 2*x)))*np.exp(x + y)
u_rhs[0, :] = 0;
u_rhs[N - 1, :] = 0;
u_rhs[:, N - 1] = 0;
u_rhs[:, 0] = 0
rhs = np.array([u_rhs])
return rhs
def eq_14_coeff(current):
N, M = current[0].shape
z = np.linspace(0, 1, N)
x, y = np.meshgrid(z, z, indexing='ij')
###
b = 4
a = 3
a11 = b + np.exp(x * y)
b11 = a + np.exp(-x * y)
c11 = np.zeros((N, N))
d11 = np.cos(np.pi*(x + 2*y))
e11 = np.sin(np.pi*(y + 2*x))
f11 = np.zeros((N, N))
###
coeff1 = [a11, b11, c11, d11, e11, f11]
coeff = np.array([coeff1])
return coeff
################################################################################
def eq_21_bc(current):
N, M = current[0].shape
z = np.linspace(0, 1, N)
x, y = np.meshgrid(z, z, indexing='ij')
u_exact = np.exp(x * y)
v_exact = np.exp(2 * x * y)
###
current[0][0, :] = u_exact[0, :]
current[0][-1, :] = u_exact[-1, :]
current[0][:, 0] = u_exact[:, 0]
current[0][:, -1] = u_exact[:, -1]
###
current[1][0, :] = v_exact[0, :]
current[1][-1, :] = v_exact[-1, :]
current[1][:, 0] = v_exact[:, 0]
current[1][:, -1] = v_exact[:, -1]
###
return current
def eq_21_exact(current):
N, M = current[0].shape
z = np.linspace(0, 1, N)
x, y = np.meshgrid(z, z, indexing='ij')
u_exact = np.exp(x * y)
v_exact = np.exp(2 * x * y)
exact = np.array([u_exact, v_exact])
return exact
def eq_21_rhs(current):
N, M = current[0].shape
z = np.linspace(0, 1, N)
x, y = np.meshgrid(z, z, indexing='ij')
u_rhs = 20 * np.exp(2 * x * y) * x ** 2 - x - np.exp(-x * y) * y
v_rhs = np.exp(x * y) + 4 * (7 + (np.sin(np.pi * x * y)) ** 2) * np.exp(2 * x * y) * y ** 2 + 16 * np.exp(
3 * x * y) * x ** 2 - \
2 * x * np.exp(2 * x * y - x) - 2 * y * np.exp(2 * x * y - y) + (2 + 4 * x * y) * np.sin(
np.pi * x * y) * np.exp(2 * x * y)
v_rhs[0, :] = 0
v_rhs[N - 1, :] = 0
v_rhs[:, N - 1] = 0
v_rhs[:, 0] = 0
u_rhs[0, :] = 0
u_rhs[N - 1, :] = 0
u_rhs[:, N - 1] = 0
u_rhs[:, 0] = 0
rhs = np.array([u_rhs, v_rhs])
return rhs
def eq_21_coeff(current):
N, M = current[0].shape
z = np.linspace(0, 1, N)
x, y = np.meshgrid(z, z, indexing='ij')
###
a11 = 20 * np.exp(x * y)
b11 = 7 + (np.cos(np.pi * x * y)) ** 2
c11 = np.cos(np.pi * x * y)
d11 = -np.exp(-2 * x * y)
e11 = -np.exp(-x * y)
f11 = np.zeros((N, N))
###
a12 = np.zeros((N, N))
b12 = np.zeros((N, N))
c12 = np.zeros((N, N))
d12 = np.zeros((N, N))
e12 = np.zeros((N, N))
f12 = -((7 + (np.cos(np.pi * x * y)) ** 2) * y ** 2 + np.cos(np.pi * x * y) * (1 + x * y)) * np.exp(-x * y)
###
a22 = 4 * np.exp(x * y)
b22 = 7 + (np.sin(np.pi * x * y)) ** 2
c22 = np.sin(np.pi * x * y)
d22 = -np.exp(-y)
e22 = -np.exp(-x)
f22 = np.zeros((N, N))
###
a21 = np.zeros((N, N))
b21 = np.zeros((N, N))
c21 = np.zeros((N, N))
d21 = np.zeros((N, N))
e21 = np.zeros((N, N))
f21 = np.ones((N, N))
###
coeff1 = [a11, b11, c11, d11, e11, f11, a12, b12, c12, d12, e12, f12]
coeff2 = [a21, b21, c21, d21, e21, f21, a22, b22, c22, d22, e22, f22]
coeff = np.array([coeff1, coeff2])
return coeff
################################################################################
def eq_22_bc(current):
N, M = current[0].shape
z = np.linspace(0, 1, N)
x, y = np.meshgrid(z, z, indexing='ij')
u_exact = np.exp(x * y)
v_exact = np.exp(x + y)
###
current[0][0, :] = u_exact[0, :]
current[0][-1, :] = u_exact[-1, :]
current[0][:, 0] = u_exact[:, 0]
current[0][:, -1] = u_exact[:, -1]
###
current[1][0, :] = v_exact[0, :]
current[1][-1, :] = v_exact[-1, :]
current[1][:, 0] = v_exact[:, 0]
current[1][:, -1] = v_exact[:, -1]
###
return current
def eq_22_exact(current):
N, M = current[0].shape
z = np.linspace(0, 1, N)
x, y = np.meshgrid(z, z, indexing='ij')
u_exact = np.exp(x * y)
v_exact = np.exp(x + y)
exact = np.array([u_exact, v_exact])
return exact
def eq_22_rhs(current):
N, M = current[0].shape
z = np.linspace(0, 1, N)
x, y = np.meshgrid(z, z, indexing='ij')
u_rhs = ((1 + np.exp(x * y)) * x ** 2 + (2 + np.cos(np.pi * x)) * y ** 2 +
(1 + x * y) * np.exp(-x * y) + y * np.exp(x) + x * np.exp(y) + np.sin(np.pi * x * y)) * np.exp(x * y) + \
(4 + np.cos(2 * np.pi * x * y) + 2 + np.sin(np.pi * x * y) + np.exp(-x * y)
+ np.exp(x) + np.exp(y) + np.sin(np.pi * x * y) + 2) * np.exp(x + y)
v_rhs = (2 + np.log(1 + x) + 4 + np.exp(2 * x * y + 3) / 200 + | np.log(1 + x * y) | numpy.log |
# Copyright (c) 2012-2014 <NAME>, <NAME>
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
from scipy import stats, special
import scipy as sp
from . import link_functions
from scipy import stats, integrate
from scipy.special import gammaln, gamma
from .likelihood import Likelihood
from ..core.parameterization import Param
from paramz.transformations import Logexp
from scipy.special import psi as digamma
class StudentT(Likelihood):
"""
Student T likelihood
For nomanclature see Bayesian Data Analysis 2003 p576
.. math::
p(y_{i}|\\lambda(f_{i})) = \\frac{\\Gamma\\left(\\frac{v+1}{2}\\right)}{\\Gamma\\left(\\frac{v}{2}\\right)\\sqrt{v\\pi\\sigma^{2}}}\\left(1 + \\frac{1}{v}\\left(\\frac{(y_{i} - f_{i})^{2}}{\\sigma^{2}}\\right)\\right)^{\\frac{-v+1}{2}}
"""
def __init__(self,gp_link=None, deg_free=5, sigma2=2):
if gp_link is None:
gp_link = link_functions.Identity()
super(StudentT, self).__init__(gp_link, name='Student_T')
# sigma2 is not a noise parameter, it is a squared scale.
self.sigma2 = Param('t_scale2', float(sigma2), Logexp())
self.v = Param('deg_free', float(deg_free), Logexp())
self.link_parameter(self.sigma2)
self.link_parameter(self.v)
#self.v.constrain_fixed()
self.log_concave = False
def update_gradients(self, grads):
"""
Pull out the gradients, be careful as the order must match the order
in which the parameters are added
"""
self.sigma2.gradient = grads[0]
self.v.gradient = grads[1]
def pdf_link(self, inv_link_f, y, Y_metadata=None):
"""
Likelihood function given link(f)
.. math::
p(y_{i}|\\lambda(f_{i})) = \\frac{\\Gamma\\left(\\frac{v+1}{2}\\right)}{\\Gamma\\left(\\frac{v}{2}\\right)\\sqrt{v\\pi\\sigma^{2}}}\\left(1 + \\frac{1}{v}\\left(\\frac{(y_{i} - \\lambda(f_{i}))^{2}}{\\sigma^{2}}\\right)\\right)^{\\frac{-v+1}{2}}
:param inv_link_f: latent variables link(f)
:type inv_link_f: Nx1 array
:param y: data
:type y: Nx1 array
:param Y_metadata: Y_metadata which is not used in student t distribution
:returns: likelihood evaluated for this point
:rtype: float
"""
assert | np.atleast_1d(inv_link_f) | numpy.atleast_1d |
import argparse
import matplotlib
matplotlib.use("AGG")
import numpy as np
from matplotlib import pyplot as plt
import matplotlib.patches as mpatches
import seaborn as sns
import pandas as pd
from sklearn.decomposition import PCA
sns.set_context('poster')
sns.set_style('white')
pd.options.mode.chained_assignment = None # default='warn'
import hdbscan
from collections import Counter
from collections import defaultdict
from numpy import random
#_____________________________
def normalize(x, r):
M = np.divide(x, r)
M_norm = np.full_like(M, 0)
for i in range( | np.shape(M) | numpy.shape |
import numpy as np
__all__=['int2bitarray','bitarray2int','Bin2Gray','Gray2Bin','RGB2Bin','Bin2RGB']
def int2bitarray(N,k):
"""
Changes array's base from int (base 10) to binary (base 2)
Parameters:
N: int N
k: Width of the binary array you would like to change N into. N must not be greater than 2^k - 1.
>> Examples: int2bitarray(6,3) returns [1, 1, 0]
int2bitarray(6,5) returns [0, 0, 1, 1,0]
int2bitarray(255,8) returns [1, 1, 1, 1, 1, 1, 1, 1]
int2bitarray(255,10) returns [0, 0, 1, 1, 1, 1, 1, 1, 1, 1]
"""
binary_string = bin(N)
length = len(binary_string)
bitarray = np.zeros(k, 'int')
for i in range(length-2):
bitarray[k-i-1] = int(binary_string[length-i-1])
return bitarray
def bitarray2int(bitarray):
""" Changes array's base from binary (base 2) to int (base 10).
Parameters:
bitarray: Binary Array.
>> Examples: bitarray2int([1, 1, 0]) returns 6
bitarray2int([0, 0, 1, 1,0]) returns 6
bitarray2int([1, 1, 1, 1, 1, 1, 1, 1]) returns 255
"""
bitstring = "".join([str(i) for i in bitarray])
return int(bitstring,2)
def Gray2Bin(img):
""" Puts a GrayScale Image on a binary form
Parameters:
img_array: 2-D array of a grayscale image (no 3rd dimension)
returns:
3-D img_array in a binary form, each pixel uint8 is transformed to an 8-bits array
>>> Example: the grayscale (2x2) image [[2, 127],
[255, 0]]
will be conveterted to the (2x2x8) binary image: [[[0, 0, 0, 0, 0, 0, 1, 0],[0, 1, 1, 1, 1, 1, 1, 1]],
[[1, 1, 1, 1, 1, 1, 1, 1],[0, 0, 0, 0, 0, 0, 0, 0]]]
"""
if not len(img.shape)==2:
raise ValueError('{} must have 2 dimensions. Make sure it\'s a grayscale image.')
height,width = img.shape
img_bin = np.zeros(shape=(height,width,8),dtype=int)
for i in range(height):
for j in range(width):
img_bin[i,j,:] = int2bitarray(img[i,j],8)
return img_bin
def Bin2Gray(img_bin):
""" Puts a 8-bits binary Image to uint8
Parameters:
img_array: 3-D array (height, width, 8)
returns:
2-D img_array in grayscale
>>> Example: the (2x2x8) binary image: [[[0, 0, 0, 0, 0, 0, 1, 0],[0, 1, 1, 1, 1, 1, 1, 1]],
[[1, 1, 1, 1, 1, 1, 1, 1],[0, 0, 0, 0, 0, 0, 0, 0]]]
will be conveterted to the (2x2) uint8 image [[2, 127],
[255, 0]]
"""
height,width,k = img_bin.shape
img_grayscale = np.zeros(shape=(height,width),dtype=np.uint8)
for i in range(height):
for j in range(width):
img_grayscale[i,j] = bitarray2int(img_bin[i,j,:])
return img_grayscale
def RGB2Bin(img):
""" Puts an RGB Image on a binary form
Parameters:
img_array: 3-D array of an RGB image ( 3rd dimension = 3)
returns:
3-D img_array in a binary form, each pixel is transformed to an 24-bits binary array.
>>> Example: the grayscale (2x1x3) image [[[2, 127,0]],
[[255, 0,1]]]
will be conveterted to the (2x1x24) binary image:
[[[0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0]],
[[1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]]]
"""
height,width,depth = img.shape
if not depth==3:
raise ValueError('{}\'s 3rd dimension must be equal to 3 (RGB). Make sure it\'s an RGB image.')
img_bin = np.zeros(shape=(height,width,24),dtype=int)
for i in range(height):
for j in range(width):
R = int2bitarray(img[i,j,0],8)
G = int2bitarray(img[i,j,1],8)
B = int2bitarray(img[i,j,2],8)
img_bin[i,j,:] = | np.concatenate((R,G,B)) | numpy.concatenate |
"""
desispec.sky
============
Utility functions to compute a sky model and subtract it.
"""
import numpy as np
from desispec.resolution import Resolution
from desispec.linalg import cholesky_solve
from desispec.linalg import cholesky_invert
from desispec.linalg import spline_fit
from desiutil.log import get_logger
from desispec import util
from desiutil import stats as dustat
import scipy,scipy.sparse,scipy.stats,scipy.ndimage
import sys
def compute_sky(frame, nsig_clipping=4.,max_iterations=100,model_ivar=False,add_variance=True,angular_variation_deg=0,chromatic_variation_deg=0) :
"""Compute a sky model.
Input flux are expected to be flatfielded!
We don't check this in this routine.
Args:
frame : Frame object, which includes attributes
- wave : 1D wavelength grid in Angstroms
- flux : 2D flux[nspec, nwave] density
- ivar : 2D inverse variance of flux
- mask : 2D inverse mask flux (0=good)
- resolution_data : 3D[nspec, ndiag, nwave] (only sky fibers)
nsig_clipping : [optional] sigma clipping value for outlier rejection
Optional:
max_iterations : int , number of iterations
model_ivar : replace ivar by a model to avoid bias due to correlated flux and ivar. this has a negligible effect on sims.
add_variance : evaluate calibration error and add this to the sky model variance
angular_variation_deg : Degree of polynomial for sky flux variation with focal plane coordinates (default=0, i.e. no correction, a uniform sky)
chromatic_variation_deg : Wavelength degree for the chromatic x angular terms. If negative, use as many 2D polynomials of x and y as wavelength entries.
returns SkyModel object with attributes wave, flux, ivar, mask
"""
if angular_variation_deg == 0 :
return compute_uniform_sky(frame, nsig_clipping=nsig_clipping,max_iterations=max_iterations,model_ivar=model_ivar,add_variance=add_variance)
else :
if chromatic_variation_deg < 0 :
return compute_non_uniform_sky(frame, nsig_clipping=nsig_clipping,max_iterations=max_iterations,model_ivar=model_ivar,add_variance=add_variance,angular_variation_deg=angular_variation_deg)
else :
return compute_polynomial_times_sky(frame, nsig_clipping=nsig_clipping,max_iterations=max_iterations,model_ivar=model_ivar,add_variance=add_variance,angular_variation_deg=angular_variation_deg,chromatic_variation_deg=chromatic_variation_deg)
def _model_variance(frame,cskyflux,cskyivar,skyfibers) :
"""look at chi2 per wavelength and increase sky variance to reach chi2/ndf=1
"""
log = get_logger()
tivar = util.combine_ivar(frame.ivar[skyfibers], cskyivar[skyfibers])
# the chi2 at a given wavelength can be large because on a cosmic
# and not a psf error or sky non uniformity
# so we need to consider only waves for which
# a reasonable sky model error can be computed
# mean sky
msky = np.mean(cskyflux,axis=0)
dwave = np.mean(np.gradient(frame.wave))
dskydw = np.zeros(msky.shape)
dskydw[1:-1]=(msky[2:]-msky[:-2])/(frame.wave[2:]-frame.wave[:-2])
dskydw = np.abs(dskydw)
# now we consider a worst possible sky model error (20% error on flat, 0.5A )
max_possible_var = 1./(tivar+(tivar==0)) + (0.2*msky)**2 + (0.5*dskydw)**2
# exclude residuals inconsistent with this max possible variance (at 3 sigma)
bad = (frame.flux[skyfibers]-cskyflux[skyfibers])**2 > 3**2*max_possible_var
tivar[bad]=0
ndata = np.sum(tivar>0,axis=0)
ok=np.where(ndata>1)[0]
chi2 = np.zeros(frame.wave.size)
chi2[ok] = np.sum(tivar*(frame.flux[skyfibers]-cskyflux[skyfibers])**2,axis=0)[ok]/(ndata[ok]-1)
chi2[ndata<=1] = 1. # default
# now we are going to evaluate a sky model error based on this chi2,
# but only around sky flux peaks (>0.1*max)
tmp = np.zeros(frame.wave.size)
tmp = (msky[1:-1]>msky[2:])*(msky[1:-1]>msky[:-2])*(msky[1:-1]>0.1*np.max(msky))
peaks = np.where(tmp)[0]+1
dpix = int(np.ceil(3/dwave)) # +- n Angstrom around each peak
skyvar = 1./(cskyivar+(cskyivar==0))
# loop on peaks
for peak in peaks :
b=peak-dpix
e=peak+dpix+1
mchi2 = np.mean(chi2[b:e]) # mean reduced chi2 around peak
mndata = np.mean(ndata[b:e]) # mean number of fibers contributing
# sky model variance = sigma_flat * msky + sigma_wave * dmskydw
sigma_flat=0.000 # the fiber flat error is already included in the flux ivar
sigma_wave=0.005 # A, minimum value
res2=(frame.flux[skyfibers,b:e]-cskyflux[skyfibers,b:e])**2
var=1./(tivar[:,b:e]+(tivar[:,b:e]==0))
nd=np.sum(tivar[:,b:e]>0)
while(sigma_wave<2) :
pivar=1./(var+(sigma_flat*msky[b:e])**2+(sigma_wave*dskydw[b:e])**2)
pchi2=np.sum(pivar*res2)/nd
if pchi2<=1 :
log.info("peak at {}A : sigma_wave={}".format(int(frame.wave[peak]),sigma_wave))
skyvar[:,b:e] += ( (sigma_flat*msky[b:e])**2 + (sigma_wave*dskydw[b:e])**2 )
break
sigma_wave += 0.005
return (cskyivar>0)/(skyvar+(skyvar==0))
def compute_uniform_sky(frame, nsig_clipping=4.,max_iterations=100,model_ivar=False,add_variance=True) :
"""Compute a sky model.
Sky[fiber,i] = R[fiber,i,j] Flux[j]
Input flux are expected to be flatfielded!
We don't check this in this routine.
Args:
frame : Frame object, which includes attributes
- wave : 1D wavelength grid in Angstroms
- flux : 2D flux[nspec, nwave] density
- ivar : 2D inverse variance of flux
- mask : 2D inverse mask flux (0=good)
- resolution_data : 3D[nspec, ndiag, nwave] (only sky fibers)
nsig_clipping : [optional] sigma clipping value for outlier rejection
Optional:
max_iterations : int , number of iterations
model_ivar : replace ivar by a model to avoid bias due to correlated flux and ivar. this has a negligible effect on sims.
add_variance : evaluate calibration error and add this to the sky model variance
returns SkyModel object with attributes wave, flux, ivar, mask
"""
log=get_logger()
log.info("starting")
# Grab sky fibers on this frame
skyfibers = np.where(frame.fibermap['OBJTYPE'] == 'SKY')[0]
assert np.max(skyfibers) < 500 #- indices, not fiber numbers
nwave=frame.nwave
nfibers=len(skyfibers)
current_ivar=frame.ivar[skyfibers].copy()*(frame.mask[skyfibers]==0)
flux = frame.flux[skyfibers]
Rsky = frame.R[skyfibers]
input_ivar=None
if model_ivar :
log.info("use a model of the inverse variance to remove bias due to correlated ivar and flux")
input_ivar=current_ivar.copy()
median_ivar_vs_wave = np.median(current_ivar,axis=0)
median_ivar_vs_fiber = np.median(current_ivar,axis=1)
median_median_ivar = np.median(median_ivar_vs_fiber)
for f in range(current_ivar.shape[0]) :
threshold=0.01
current_ivar[f] = median_ivar_vs_fiber[f]/median_median_ivar * median_ivar_vs_wave
# keep input ivar for very low weights
ii=(input_ivar[f]<=(threshold*median_ivar_vs_wave))
#log.info("fiber {} keep {}/{} original ivars".format(f,np.sum(ii),current_ivar.shape[1]))
current_ivar[f][ii] = input_ivar[f][ii]
sqrtw=np.sqrt(current_ivar)
sqrtwflux=sqrtw*flux
chi2=np.zeros(flux.shape)
nout_tot=0
for iteration in range(max_iterations) :
# the matrix A is 1/2 of the second derivative of the chi2 with respect to the parameters
# A_ij = 1/2 d2(chi2)/di/dj
# A_ij = sum_fiber sum_wave_w ivar[fiber,w] d(model)/di[fiber,w] * d(model)/dj[fiber,w]
# the vector B is 1/2 of the first derivative of the chi2 with respect to the parameters
# B_i = 1/2 d(chi2)/di
# B_i = sum_fiber sum_wave_w ivar[fiber,w] d(model)/di[fiber,w] * (flux[fiber,w]-model[fiber,w])
# the model is model[fiber]=R[fiber]*sky
# and the parameters are the unconvolved sky flux at the wavelength i
# so, d(model)/di[fiber,w] = R[fiber][w,i]
# this gives
# A_ij = sum_fiber sum_wave_w ivar[fiber,w] R[fiber][w,i] R[fiber][w,j]
# A = sum_fiber ( diag(sqrt(ivar))*R[fiber] ) ( diag(sqrt(ivar))* R[fiber] )^t
# A = sum_fiber sqrtwR[fiber] sqrtwR[fiber]^t
# and
# B = sum_fiber sum_wave_w ivar[fiber,w] R[fiber][w] * flux[fiber,w]
# B = sum_fiber sum_wave_w sqrt(ivar)[fiber,w]*flux[fiber,w] sqrtwR[fiber,wave]
#A=scipy.sparse.lil_matrix((nwave,nwave)).tocsr()
A=np.zeros((nwave,nwave))
B=np.zeros((nwave))
# diagonal sparse matrix with content = sqrt(ivar)*flat of a given fiber
SD=scipy.sparse.lil_matrix((nwave,nwave))
# loop on fiber to handle resolution
for fiber in range(nfibers) :
if fiber%10==0 :
log.info("iter %d sky fiber %d/%d"%(iteration,fiber,nfibers))
R = Rsky[fiber]
# diagonal sparse matrix with content = sqrt(ivar)
SD.setdiag(sqrtw[fiber])
sqrtwR = SD*R # each row r of R is multiplied by sqrtw[r]
A += (sqrtwR.T*sqrtwR).todense()
B += sqrtwR.T*sqrtwflux[fiber]
log.info("iter %d solving"%iteration)
w = A.diagonal()>0
A_pos_def = A[w,:]
A_pos_def = A_pos_def[:,w]
parameters = B*0
try:
parameters[w]=cholesky_solve(A_pos_def,B[w])
except:
log.info("cholesky failed, trying svd in iteration {}".format(iteration))
parameters[w]=np.linalg.lstsq(A_pos_def,B[w])[0]
log.info("iter %d compute chi2"%iteration)
for fiber in range(nfibers) :
# the parameters are directly the unconvolve sky flux
# so we simply have to reconvolve it
fiber_convolved_sky_flux = Rsky[fiber].dot(parameters)
chi2[fiber]=current_ivar[fiber]*(flux[fiber]-fiber_convolved_sky_flux)**2
log.info("rejecting")
nout_iter=0
if iteration<1 :
# only remove worst outlier per wave
# apply rejection iteratively, only one entry per wave among fibers
# find waves with outlier (fastest way)
nout_per_wave=np.sum(chi2>nsig_clipping**2,axis=0)
selection=np.where(nout_per_wave>0)[0]
for i in selection :
worst_entry=np.argmax(chi2[:,i])
current_ivar[worst_entry,i]=0
sqrtw[worst_entry,i]=0
sqrtwflux[worst_entry,i]=0
nout_iter += 1
else :
# remove all of them at once
bad=(chi2>nsig_clipping**2)
current_ivar *= (bad==0)
sqrtw *= (bad==0)
sqrtwflux *= (bad==0)
nout_iter += np.sum(bad)
nout_tot += nout_iter
sum_chi2=float(np.sum(chi2))
ndf=int(np.sum(chi2>0)-nwave)
chi2pdf=0.
if ndf>0 :
chi2pdf=sum_chi2/ndf
log.info("iter #%d chi2=%f ndf=%d chi2pdf=%f nout=%d"%(iteration,sum_chi2,ndf,chi2pdf,nout_iter))
if nout_iter == 0 :
break
log.info("nout tot=%d"%nout_tot)
# we know have to compute the sky model for all fibers
# and propagate the uncertainties
# no need to restore the original ivar to compute the model errors when modeling ivar
# the sky inverse variances are very similar
log.info("compute the parameter covariance")
# we may have to use a different method to compute this
# covariance
try :
parameter_covar=cholesky_invert(A)
# the above is too slow
# maybe invert per block, sandwich by R
except np.linalg.linalg.LinAlgError :
log.warning("cholesky_solve_and_invert failed, switching to np.linalg.lstsq and np.linalg.pinv")
parameter_covar = np.linalg.pinv(A)
log.info("compute mean resolution")
# we make an approximation for the variance to save CPU time
# we use the average resolution of all fibers in the frame:
mean_res_data=np.mean(frame.resolution_data,axis=0)
Rmean = Resolution(mean_res_data)
log.info("compute convolved sky and ivar")
# The parameters are directly the unconvolved sky
# First convolve with average resolution :
convolved_sky_covar=Rmean.dot(parameter_covar).dot(Rmean.T.todense())
# and keep only the diagonal
convolved_sky_var= | np.diagonal(convolved_sky_covar) | numpy.diagonal |
modelPath="../model/01/01.h5"
testSetPath="../data/mytest"
import keras
from keras.models import load_model
from keras.preprocessing.image import ImageDataGenerator
import matplotlib.pyplot as pyplot
import numpy as np
import common
import preprocessing
import batch
model = load_model(modelPath)
test_generator = batch.batchGenerator.flow_from_directory(
directory=testSetPath,
target_size=(common.imageSize, common.imageSize),
color_mode='grayscale',
shuffle=True,
batch_size=1)
filenames = test_generator.filenames
nb_samples = len(filenames)
fig=pyplot.figure()
columns = 4
rows = 4
for i in range(1, columns*rows):
x_batch, y_batch = test_generator.next()
name = model.predict(x_batch)
name = np.argmax(name, axis=-1)
true_name = y_batch
true_name = | np.argmax(true_name, axis=-1) | numpy.argmax |
#! /usr/bin/env python3
# Copyright(c) 2018 Senscape Corporation.
# License: Apache 2.0
import numpy, cv2, sys
sys.path.append('../../../')
import hsapi as hs
WEBCAM = False # Set to True if use Webcam
gender_list = ['Male', 'Famale']
age_list = ['0-2','4-6','8-12','15-20','25-32','38-43','48-53','60-100']
fc_net = hs.FaceDetector(zoom = True, verbose = 2, thresh=0.55)
age_net = hs.AgeDetector(device=fc_net.device)
genfer_net = hs.GenderDetector(device=fc_net.device)
if WEBCAM: video_capture = cv2.VideoCapture(0)
try:
while True:
if WEBCAM: _, img = video_capture.read()
else: img = None
result = fc_net.run(img)
img = fc_net.plot(result)
for i in range(len(result[1])):
image = result[0]
face = image[int(result[1][i][3]):int(result[1][i][5]), int(result[1][i][2]):int(result[1][i][4]), :]
out_age = age_net.run(face)[1]
age_cls = numpy.argmax(out_age)
age_out = age_list[age_cls]
cv2.putText(image, age_out, (int(result[1][i][2]), int(result[1][i][3])), cv2.FONT_HERSHEY_SIMPLEX,1.0, (0, 255, 255), 2)
out_gender = genfer_net.run(face)[1]
gender_cls = | numpy.argmax(out_gender) | numpy.argmax |
import numpy as np
#Questions on NumPy Sorting and Searching
# How to get the indices of the sorted array using NumPy in Python?
#argsort Returns the indices that would sort an array.
np.argsort(np.array([3, 1, 2]))
# Finding the k smallest values of a NumPy array
#sort Return a sorted copy of an array.
k = 4
arr = np.array([23, 12, 1, 3, 4, 5, 6])
arr1 = np.sort(arr)
arr1[:k]
# How to get the n-largest values of an array using NumPy?
k = 4
arr = | np.array([23, 12, 1, 3, 4, 5, 6]) | numpy.array |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# # PyKOALA: KOALA data processing and analysis
# by <NAME> and <NAME>
# Extra work by <NAME> (MQ PACE student)
# Plus Taylah and Matt (sky subtraction)
from __future__ import absolute_import, division, print_function
from past.utils import old_div
version = "Version 0.72 - 13th February 2020"
import copy
import os.path as pth
import sys
from astropy.convolution import Gaussian2DKernel, interpolate_replace_nans
from astropy.io import fits
from astropy.wcs import WCS
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import numpy as np
from scipy import interpolate
from scipy.ndimage.interpolation import shift
import scipy.signal as sig
from .constants import C, PARSEC as pc
from .utils.cube_alignment import offset_between_cubes, compare_cubes, align_n_cubes
from .utils.flux import search_peaks, fluxes, dfluxes, substract_given_gaussian
from .utils.io import read_table, save_rss_fits, save_fits_file
from .utils.moffat import fit_Moffat
from .utils.plots import (
plot_redshift_peaks, plot_weights_for_getting_smooth_spectrum,
plot_correction_in_fibre_p_fibre, plot_suspicious_fibres_graph, plot_skyline_5578,
plot_offset_between_cubes, plot_response, plot_telluric_correction, plot_plot
)
from .utils.sky_spectrum import scale_sky_spectrum, median_filter
from .utils.spectrum_tools import rebin_spec_shift, smooth_spectrum
from .utils.utils import (
FitsExt, FitsFibresIFUIndex, coord_range, median_absolute_deviation,
)
from ._version import get_versions
__version__ = get_versions()["version"]
del get_versions
# -----------------------------------------------------------------------------
# Define constants
# -----------------------------------------------------------------------------
DATA_PATH = pth.join(pth.dirname(__file__), "data")
# -----------------------------------------------------------------------------
# Define COLOUR scales
# -----------------------------------------------------------------------------
fuego_color_map = colors.LinearSegmentedColormap.from_list(
"fuego",
(
(0.25, 0, 0),
(0.5, 0, 0),
(1, 0, 0),
(1, 0.5, 0),
(1, 0.75, 0),
(1, 1, 0),
(1, 1, 1),
),
N=256,
gamma=1.0,
)
fuego_color_map.set_bad("lightgray")
plt.register_cmap(cmap=fuego_color_map)
projo = [0.25, 0.5, 1, 1.0, 1.00, 1, 1]
pverde = [0.00, 0.0, 0, 0.5, 0.75, 1, 1]
pazul = [0.00, 0.0, 0, 0.0, 0.00, 0, 1]
# -----------------------------------------------------------------------------
# RSS CLASS
# -----------------------------------------------------------------------------
class RSS(object):
"""
Collection of row-stacked spectra (RSS).
Attributes
----------
wavelength: np.array(float)
Wavelength, in Angstroms.
intensity: np.array(float)
Intensity :math:`I_\lambda` per unit wavelength.
variance: np.array(float)
Variance :math:`\sigma^2_\lambda` per unit wavelength
(note the square in the definition of the variance).
"""
# -----------------------------------------------------------------------------
def __init__(self):
self.description = "Undefined row-stacked spectra (RSS)"
self.n_spectra = 0
self.n_wave = 0
self.wavelength = np.zeros((0))
self.intensity = np.zeros((0, 0))
self.intensity_corrected = self.intensity
self.variance = np.zeros_like(self.intensity)
self.RA_centre_deg = 0.0
self.DEC_centre_deg = 0.0
self.offset_RA_arcsec = np.zeros((0))
self.offset_DEC_arcsec = np.zeros_like(self.offset_RA_arcsec)
self.ALIGNED_RA_centre_deg = 0.0 # Added by ANGEL, 6 Sep
self.ALIGNED_DEC_centre_deg = 0.0 # Added by ANGEL, 6 Sep
self.relative_throughput = np.ones((0)) # Added by ANGEL, 16 Sep
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def compute_integrated_fibre(
self,
list_spectra="all",
valid_wave_min=0,
valid_wave_max=0,
min_value=0.1,
plot=False,
title=" - Integrated values",
warnings=True,
text="...",
correct_negative_sky=False,
):
"""
Compute the integrated flux of a fibre in a particular range, valid_wave_min to valid_wave_max.
Parameters
----------
list_spectra: float (default "all")
list with the number of fibres for computing integrated value
if using "all" it does all fibres
valid_wave_min, valid_wave_max : float
the integrated flux value will be computed in the range [valid_wave_min, valid_wave_max]
(default = , if they all 0 we use [self.valid_wave_min, self.valid_wave_max]
min_value: float (default 0)
For values lower than min_value, we set them as min_value
plot : Boolean (default = False)
Plot
title : string
Title for the plot
text: string
A bit of extra text
warnings : Boolean (default = False)
Write warnings, e.g. when the integrated flux is negative
correct_negative_sky : Boolean (default = False)
Corrects negative values making 0 the integrated flux of the lowest fibre
Example
----------
integrated_fibre_6500_6600 = star1r.compute_integrated_fibre(valid_wave_min=6500, valid_wave_max=6600,
title = " - [6500,6600]", plot = True)
"""
print("\n Computing integrated fibre values {}".format(text))
if list_spectra == "all":
list_spectra = list(range(self.n_spectra))
if valid_wave_min == 0:
valid_wave_min = self.valid_wave_min
if valid_wave_max == 0:
valid_wave_max = self.valid_wave_max
self.integrated_fibre = np.zeros(self.n_spectra)
region = np.where(
(self.wavelength > valid_wave_min) & (self.wavelength < valid_wave_max)
)
waves_in_region = len(region[0])
n_negative_fibres = 0
negative_fibres = []
for i in range(self.n_spectra):
self.integrated_fibre[i] = np.nansum(self.intensity_corrected[i, region])
if self.integrated_fibre[i] < 0:
if warnings:
print(
" WARNING: The integrated flux in fibre {:4} is negative, flux/wave = {:10.2f}, (probably sky), CHECK !".format(
i, self.integrated_fibre[i]/waves_in_region
))
n_negative_fibres = n_negative_fibres + 1
# self.integrated_fibre[i] = min_value
negative_fibres.append(i)
if len(negative_fibres) != 0:
print("\n> Number of fibres with integrated flux < 0 : {:4}, that is the {:5.2f} % of the total !".format(
n_negative_fibres, n_negative_fibres * 100.0 / self.n_spectra
))
negative_fibres_sorted = []
integrated_intensity_sorted = np.argsort(
self.integrated_fibre/waves_in_region
)
for fibre_ in range(n_negative_fibres):
negative_fibres_sorted.append(integrated_intensity_sorted[fibre_])
# print "\n> Checking results using",n_negative_fibres,"fibres with the lowest integrated intensity"
# print " which are :",negative_fibres_sorted
if correct_negative_sky:
min_sky_value = self.integrated_fibre[negative_fibres_sorted[0]]
min_sky_value_per_wave = min_sky_value/waves_in_region
print(
"\n> Correcting negative values making 0 the integrated flux of the lowest fibre, which is {:4} with {:10.2f} counts/wave".format(
negative_fibres_sorted[0], min_sky_value_per_wave
))
# print self.integrated_fibre[negative_fibres_sorted[0]]
self.integrated_fibre = self.integrated_fibre - min_sky_value
for i in range(self.n_spectra):
self.intensity_corrected[i] = (
self.intensity_corrected[i] - min_sky_value_per_wave
)
else:
print(
"\n> Adopting integrated flux = {:5.2f} for all fibres with negative integrated flux (for presentation purposes)".format(
min_value
))
for i in negative_fibres_sorted:
self.integrated_fibre[i] = min_value
# for i in range(self.n_spectra):
# if self.integrated_fibre[i] < 0:
# if warnings: print " WARNING: The integrated flux in fibre {:4} STILL is negative, flux/wave = {:10.2f}, (probably sky), CHECK !".format(i,self.integrated_fibre[i]/waves_in_region)
if plot:
# print"\n Plotting map with integrated values:"
self.RSS_map(
self.integrated_fibre,
norm=colors.PowerNorm(gamma=1.0 / 4.0),
title=title,
)
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def identify_el(
self,
high_fibres=10,
brightest_line="Ha",
cut=1.5,
fibre=0,
broad=1.0,
verbose=True,
plot=True,
):
"""
Identify fibres with highest intensity (high_fibres=10).
Add all in a single spectrum.
Identify emission features.
These emission features should be those expected in all the cube!
Also, choosing fibre=number, it identifies el in a particular fibre.
Parameters
----------
high_fibres: float (default 10)
use the high_fibres highest intensity fibres for identifying
brightest_line : string (default "Ha")
string name with the emission line that is expected to be the brightest in integrated spectrum
cut: float (default 1.5)
The peak has to have a cut higher than cut to be considered as emission line
fibre: integer (default 0)
If fibre is given, it identifies emission lines in the given fibre
broad: float (default 1.0)
Broad (FWHM) of the expected emission lines
verbose : boolean (default = True)
Write results
plot : boolean (default = False)
Plot results
Example
----------
self.el=self.identify_el(high_fibres=10, brightest_line = "Ha",
cut=2., verbose=True, plot=True, fibre=0, broad=1.5)
"""
if fibre == 0:
integrated_intensity_sorted = np.argsort(self.integrated_fibre)
region = []
for fibre in range(high_fibres):
region.append(integrated_intensity_sorted[-1 - fibre])
if verbose:
print("\n> Identifying emission lines using the {} fibres with the highest integrated intensity".format(high_fibres))
print(" which are : {}".format(region))
combined_high_spectrum = np.nansum(self.intensity_corrected[region], axis=0)
else:
combined_high_spectrum = self.intensity_corrected[fibre]
if verbose:
print("\n> Identifying emission lines in fibre {}".format(fibre))
# Search peaks
peaks, peaks_name, peaks_rest, continuum_limits = search_peaks(
self.wavelength,
combined_high_spectrum,
plot=plot,
cut=cut,
brightest_line=brightest_line,
verbose=False,
)
p_peaks_l = []
p_peaks_fwhm = []
# Do Gaussian fit and provide center & FWHM (flux could be also included, not at the moment as not abs. flux-cal done)
if verbose:
print("\n Emission lines identified:")
for eline in range(len(peaks)):
lowlow = continuum_limits[0][eline]
lowhigh = continuum_limits[1][eline]
highlow = continuum_limits[2][eline]
highhigh = continuum_limits[3][eline]
resultado = fluxes(
self.wavelength,
combined_high_spectrum,
peaks[eline],
verbose=False,
broad=broad,
lowlow=lowlow,
lowhigh=lowhigh,
highlow=highlow,
highhigh=highhigh,
plot=plot,
fcal=False,
)
p_peaks_l.append(resultado[1])
p_peaks_fwhm.append(resultado[5])
if verbose:
print(" {:3}. {:7s} {:8.2f} centered at {:8.2f} and FWHM = {:6.2f}".format(
eline + 1,
peaks_name[eline],
peaks_rest[eline],
p_peaks_l[eline],
p_peaks_fwhm[eline],
))
return [peaks_name, peaks_rest, p_peaks_l, p_peaks_fwhm]
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def correct_high_cosmics_and_defects(
self,
step=50,
correct_high_cosmics=False,
fibre_p=0,
remove_5578=False, # if fibre_p=fibre plots the corrections in that fibre
clip_high=100,
warnings=False,
plot=True,
plot_suspicious_fibres=True,
verbose=False,
fig_size=12,
):
"""
Task for correcting high cosmics and CCD defects using median values of nearby pixels.
2dFdr corrects for (the majority) of the cosmic rays, usually correct_high_cosmics = False.
ANGEL COMMENT: Check, probably can be improved using MATT median running + plotting outside
Parameters
----------
rect_high_cosmics: boolean (default = False)
Correct ONLY CCD defects
re_p: integer (default = 0)
Plots the corrections in fibre fibre_p
ove_5578: boolean (default = False)
Removes skyline 5578 (blue spectrum) using Gaussian fit
ND CHECK: This also MODIFIES the throughput correction correcting for flux_5578_medfilt /median_flux_5578_medfilt
step: integer (default = 50)
Number of points for calculating median value
clip_high : float (default = 100)
Minimum value of flux/median in a pixel to be consider as a cosmic
if s[wave] > clip_high*fit_median[wave] -> IT IS A COSMIC
verbose: boolean (default = False)
Write results
warnings: boolean (default = False)
Write warnings
plot: boolean (default = False)
Plot results
plot_suspicious_fibres: boolean (default = False)
Plots fibre(s) that could have a cosmic left (but it could be OK)
IF self.integrated_fibre[fibre]/median_running[fibre] > max_value -> SUSPICIOUS FIBRE
Example
----------
self.correct_high_cosmics_and_defects(correct_high_cosmics=False, step=40, remove_5578 = True,
clip_high=120, plot_suspicious_fibres=True, warnings=True, verbose=False, plot=True)
"""
print("\n> Correcting for high cosmics and CCD defects...")
wave_min = self.valid_wave_min # CHECK ALL OF THIS...
wave_max = self.valid_wave_max
wlm = self.wavelength
if correct_high_cosmics == False:
print(" Only CCD defects (nan and negative values) are considered.")
else:
print(" Using clip_high = {} for high cosmics".format(clip_high))
print(" IMPORTANT: Be sure that any emission or sky line is fainter than clip_high/continuum !! ")
flux_5578 = [] # For correcting sky line 5578 if requested
if wave_min < 5578 and remove_5578:
print(" Sky line 5578 will be removed using a Gaussian fit...")
integrated_fibre_uncorrected = self.integrated_fibre
print(" ")
output_every_few = np.sqrt(self.n_spectra) + 1
next_output = -1
max_ratio_list = []
for fibre in range(self.n_spectra):
if fibre > next_output:
sys.stdout.write("\b" * 30)
sys.stdout.write(
" Cleaning... {:5.2f}% completed".format(
fibre * 100.0 / self.n_spectra
)
)
sys.stdout.flush()
next_output = fibre + output_every_few
s = self.intensity_corrected[fibre]
running_wave = []
running_step_median = []
cuts = np.int(self.n_wave/step) # using np.int instead of // for improved readability
for cut in range(cuts):
if cut == 0:
next_wave = wave_min
else:
next_wave = np.nanmedian(
(wlm[np.int(cut * step)] + wlm[np.int((cut + 1) * step)])/2
)
if next_wave < wave_max:
running_wave.append(next_wave)
# print("SEARCHFORME1", step, running_wave[cut])
region = np.where(
(wlm > running_wave[cut] - np.int(step/2)) # step/2 doesn't need to be an int, but probably
& (wlm < running_wave[cut] + np.int(step/2)) # want it to be so the cuts are uniform.
)
# print('SEARCHFORME3', region)
running_step_median.append(
np.nanmedian(self.intensity_corrected[fibre, region])
)
running_wave.append(wave_max)
region = np.where((wlm > wave_max - step) & (wlm < wave_max))
running_step_median.append(
np.nanmedian(self.intensity_corrected[fibre, region])
)
for i in range(len(running_step_median)):
if np.isnan(running_step_median[i]) == True:
if i < 10:
running_step_median[i] = np.nanmedian(running_step_median[0:9])
if i > 10:
running_step_median[i] = np.nanmedian(
running_step_median[-9:-1]
)
a7x, a6x, a5x, a4x, a3x, a2x, a1x, a0x = np.polyfit(
running_wave, running_step_median, 7
)
fit_median = (
a0x
+ a1x * wlm
+ a2x * wlm ** 2
+ a3x * wlm ** 3
+ a4x * wlm ** 4
+ a5x * wlm ** 5
+ a6x * wlm ** 6
+ a7x * wlm ** 7
)
if fibre == fibre_p:
espectro_old = copy.copy(self.intensity_corrected[fibre, :])
espectro_fit_median = fit_median
for wave in range(self.n_wave): # (1,self.n_wave-3):
if s[wave] < 0:
s[wave] = fit_median[wave] # Negative values for median values
if np.isnan(s[wave]) == True:
s[wave] = fit_median[wave] # nan for median value
if (
correct_high_cosmics and fit_median[wave] > 0
): # NEW 15 Feb 2019, v7.1 2dFdr takes well cosmic rays
if s[wave] > clip_high * fit_median[wave]:
if verbose:
print(" "
"CLIPPING HIGH = {} in fibre {} w = {} value= {} v/median= {}".format(clip_high, fibre, wlm[wave], s[wave], s[wave]/fit_median[wave])) # " median=",fit_median[wave]
s[wave] = fit_median[wave]
if fibre == fibre_p:
espectro_new = copy.copy(s)
max_ratio_list.append(np.nanmax(s/fit_median))
self.intensity_corrected[fibre, :] = s
# Removing Skyline 5578 using Gaussian fit if requested
if wave_min < 5578 and remove_5578:
resultado = fluxes(
wlm, s, 5578, plot=False, verbose=False
) # fmin=-5.0E-17, fmax=2.0E-16,
# resultado = [rms_cont, fit[0], fit_error[0], gaussian_flux, gaussian_flux_error, fwhm, fwhm_error, flux, flux_error, ew, ew_error, spectrum ]
self.intensity_corrected[fibre] = resultado[11]
flux_5578.append(resultado[3])
sys.stdout.write("\b" * 30)
sys.stdout.write(" Cleaning... 100.00 completed")
sys.stdout.flush()
max_ratio = np.nanmax(max_ratio_list)
print("\n Maximum value found of flux/continuum = {}".format(max_ratio))
if correct_high_cosmics:
print(" Recommended value for clip_high = {} , here we used {}".format(int(max_ratio + 1), clip_high))
# Plot correction in fibre p_fibre
if fibre_p > 0:
plot_correction_in_fibre_p_fibre(fig_size,
wlm,
espectro_old,
espectro_fit_median,
espectro_new,
fibre_p,
clip_high)
# print" "
if correct_high_cosmics == False:
text = "for spectra corrected for defects..."
title = " - Throughput + CCD defects corrected"
else:
text = "for spectra corrected for high cosmics and defects..."
title = " - Throughput + high-C & D corrected"
self.compute_integrated_fibre(
valid_wave_min=wave_min,
valid_wave_max=wave_max,
text=text,
plot=plot,
title=title,
)
if plot:
print(" Plotting integrated fibre values before and after correcting for high cosmics and CCD defects:\n")
plt.figure(figsize=(fig_size, fig_size / 2.5))
plt.plot(integrated_fibre_uncorrected, "r", label="Uncorrected", alpha=0.5)
plt.ylabel("Integrated Flux")
plt.xlabel("Fibre")
plt.ylim(
[np.nanmin(self.integrated_fibre), np.nanmax(self.integrated_fibre)]
)
plt.title(self.description)
# Check if integrated value is high
median_running = []
step_f = 10
max_value = 2.0 # For stars this is not accurate, as i/m might be between 5 and 100 in the fibres with the star
skip = 0
suspicious_fibres = []
for fibre in range(self.n_spectra):
if fibre < step_f:
median_value = np.nanmedian(
self.integrated_fibre[0: np.int(step_f)]
)
skip = 1
if fibre > self.n_spectra - step_f:
median_value = np.nanmedian(
self.integrated_fibre[-1 - np.int(step_f): -1]
)
skip = 1
if skip == 0:
median_value = np.nanmedian(
self.integrated_fibre[
fibre - np.int(step_f/2): fibre + np.int(step_f/2) # np.int is used instead of // of readability
]
)
median_running.append(median_value)
if self.integrated_fibre[fibre]/median_running[fibre] > max_value:
print(" Fibre {} has a integrated/median ratio of {} -> Might be a cosmic left!".format(fibre, self.integrated_fibre[fibre]/median_running[fibre]))
label = np.str(fibre)
plt.axvline(x=fibre, color="k", linestyle="--")
plt.text(fibre, self.integrated_fibre[fibre] / 2.0, label)
suspicious_fibres.append(fibre)
skip = 0
plt.plot(self.integrated_fibre, label="Corrected", alpha=0.6)
plt.plot(median_running, "k", label="Median", alpha=0.6)
plt.legend(frameon=False, loc=1, ncol=3)
plt.minorticks_on()
#plt.show()
#plt.close()
if plot_suspicious_fibres == True and len(suspicious_fibres) > 0:
# Plotting suspicious fibres..
figures = plot_suspicious_fibres_graph(
self,
suspicious_fibres,
fig_size,
wave_min,
wave_max,
intensity_corrected_fiber=self.intensity_corrected)
if remove_5578 and wave_min < 5578:
print(" Skyline 5578 has been removed. Checking throughput correction...")
flux_5578_medfilt = sig.medfilt(flux_5578, np.int(5))
median_flux_5578_medfilt = np.nanmedian(flux_5578_medfilt)
extra_throughput_correction = flux_5578_medfilt/median_flux_5578_medfilt
# plt.plot(extra_throughput_correction)
# plt.show()
# plt.close()
if plot:
fig = plot_skyline_5578(fig_size, flux_5578, flux_5578_medfilt)
print(" Variations in throughput between {} and {} ".format(
np.nanmin(extra_throughput_correction), np.nanmax(extra_throughput_correction)
))
print(" Applying this extra throughtput correction to all fibres...")
for i in range(self.n_spectra):
self.intensity_corrected[i, :] = (
self.intensity_corrected[i, :]/extra_throughput_correction[i]
)
self.relative_throughput = (
self.relative_throughput * extra_throughput_correction
)
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def clean_sky_residuals(
self,
extra_w=1.3,
step=25,
dclip=3.0,
wave_min=0,
wave_max=0,
verbose=False,
plot=False,
fig_size=12,
fibre=0,
):
"""
This task HAVE TO BE USED WITH EXTREME CARE
as it has not been properly tested!!!
It CAN DELETE REAL (faint) ABSORPTION/EMISSION features in spectra!!!
Use the "1dfit" option for getting a better sky substraction
ANGEL is keeping this here just in case it is eventually useful...
Parameters
----------
extra_w
step
dclip
wave_min
wave_max
verbose
plot
fig_size
fibre
Returns
-------
"""
# verbose=True
wlm = self.wavelength
if wave_min == 0:
wave_min = self.valid_wave_min
if wave_max == 0:
wave_max = self.valid_wave_max
# Exclude ranges with emission lines if needed
exclude_ranges_low = []
exclude_ranges_high = []
exclude_ranges_low_ = []
exclude_ranges_high_ = []
if self.el[1][0] != 0:
# print " Emission lines identified in the combined spectrum:"
for el in range(len(self.el[0])):
# print " {:3}. - {:7s} {:8.2f} centered at {:8.2f} and FWHM = {:6.2f}".format(el+1,self.el[0][el],self.el[1][el],self.el[2][el],self.el[3][el])
if (
self.el[0][el] == "Ha" or self.el[1][el] == 6583.41
): # Extra extend for Ha and [N II] 6583
extra = extra_w * 1.6
else:
extra = extra_w
exclude_ranges_low_.append(
self.el[2][el] - self.el[3][el] * extra
) # center-1.3*FWHM/2
exclude_ranges_high_.append(
self.el[2][el] + self.el[3][el] * extra
) # center+1.3*FWHM/2
# print self.el[0][el],self.el[1][el],self.el[2][el],self.el[3][el],exclude_ranges_low[el],exclude_ranges_high[el],extra
# Check overlapping ranges
skip_next = 0
for i in range(len(exclude_ranges_low_) - 1):
if skip_next == 0:
if exclude_ranges_high_[i] > exclude_ranges_low_[i + 1]:
# Ranges overlap, now check if next range also overlaps
if i + 2 < len(exclude_ranges_low_):
if exclude_ranges_high_[i + 1] > exclude_ranges_low_[i + 2]:
exclude_ranges_low.append(exclude_ranges_low_[i])
exclude_ranges_high.append(exclude_ranges_high_[i + 2])
skip_next = 2
if verbose:
print("Double overlap {} {}".format(exclude_ranges_low[-1], exclude_ranges_high[-1]))
else:
exclude_ranges_low.append(exclude_ranges_low_[i])
exclude_ranges_high.append(exclude_ranges_high_[i + 1])
skip_next = 1
if verbose:
print("Overlap {} {}".format(exclude_ranges_low[-1], exclude_ranges_high[-1]))
else:
exclude_ranges_low.append(exclude_ranges_low_[i])
exclude_ranges_high.append(exclude_ranges_high_[i])
if verbose:
print("Overlap {} {}".format(exclude_ranges_low[-1], exclude_ranges_high[-1]))
else:
if skip_next == 1:
skip_next = 0
if skip_next == 2:
skip_next = 1
if verbose:
print(exclude_ranges_low_[i], exclude_ranges_high_[i], skip_next)
if skip_next == 0:
exclude_ranges_low.append(exclude_ranges_low_[-1])
exclude_ranges_high.append(exclude_ranges_high_[-1])
if verbose:
print(exclude_ranges_low_[-1], exclude_ranges_high_[-1], skip_next)
# print "\n> Cleaning sky residuals in range [",wave_min,",",wave_max,"] avoiding emission lines... "
print("\n> Cleaning sky residuals avoiding emission lines... ")
if verbose:
print(" Excluded ranges using emission line parameters:")
for i in range(len(exclude_ranges_low_)):
print(exclude_ranges_low_[i], exclude_ranges_high_[i])
print(" Excluded ranges considering overlaps: ")
for i in range(len(exclude_ranges_low)):
print(exclude_ranges_low[i], exclude_ranges_high[i])
print(" ")
else:
exclude_ranges_low.append(20000.0)
exclude_ranges_high.append(30000.0)
print("\n> Cleaning sky residuals...")
say_status = 0
if fibre != 0:
f_i = fibre
f_f = fibre + 1
print(" Checking fibre {} (only this fibre is corrected, use fibre = 0 for all)...".format(fibre))
plot = True
else:
f_i = 0
f_f = self.n_spectra
for fibre in range(f_i, f_f): # (self.n_spectra):
if fibre == say_status:
print(" Checking fibre {} ...".format(fibre))
say_status = say_status + 100
s = self.intensity_corrected[fibre]
fit_median = smooth_spectrum(
wlm,
s,
step=step,
wave_min=wave_min,
wave_max=wave_max,
weight_fit_median=1.0,
plot=False,
)
old = []
if plot:
for i in range(len(s)):
old.append(s[i])
disp = s - fit_median
dispersion = np.nanmedian(np.abs(disp))
rango = 0
imprimir = 1
for i in range(len(wlm) - 1):
# if wlm[i] > wave_min and wlm[i] < wave_max : # CLEAN ONLY IN VALID WAVEVELENGTHS
if (
wlm[i] >= exclude_ranges_low[rango]
and wlm[i] <= exclude_ranges_high[rango]
):
if verbose == True and imprimir == 1:
print(" Excluding range [ {} , {} ] as it has an emission line".format(
exclude_ranges_low[rango], exclude_ranges_high[rango]))
if imprimir == 1:
imprimir = 0
# print " Checking ", wlm[i]," NOT CORRECTED ",s[i], s[i]-fit_median[i]
else:
if np.isnan(s[i]) == True:
s[i] = fit_median[i] # nan for median value
if (
disp[i] > dispersion * dclip
and disp[i + 1] < -dispersion * dclip
):
s[i] = fit_median[i]
s[i + 1] = fit_median[i + 1] # "P-Cygni-like structures
if verbose:
print(" Found P-Cygni-like feature in {}".format(wlm[i]))
if disp[i] > dispersion * dclip or disp[i] < -dispersion * dclip:
s[i] = fit_median[i]
if verbose:
print(" Clipping feature in {}".format(wlm[i]))
if wlm[i] > exclude_ranges_high[rango] and imprimir == 0:
if verbose:
print(" Checked {} End range {} {} {}".format(
wlm[i], rango,
exclude_ranges_low[rango],
exclude_ranges_high[rango]
)
)
rango = rango + 1
imprimir = 1
if rango == len(exclude_ranges_low):
rango = len(exclude_ranges_low) - 1
# print " Checking ", wlm[i]," CORRECTED IF NEEDED",s[i], s[i]-fit_median[i]
# if plot:
# for i in range(6):
# plt.figure(figsize=(fig_size, fig_size/2.5))
# plt.plot(wlm,old-fit_median, "r-", alpha=0.4)
# plt.plot(wlm,fit_median-fit_median,"g-", alpha=0.5)
# plt.axhline(y=dispersion*dclip, color="g", alpha=0.5)
# plt.axhline(y=-dispersion*dclip, color="g", alpha=0.5)
# plt.plot(wlm,s-fit_median, "b-", alpha=0.7)
#
# for exclude in range(len(exclude_ranges_low)):
# plt.axvspan(exclude_ranges_low[exclude], exclude_ranges_high[exclude], facecolor='g', alpha=0.15,zorder=3)
#
# plt.ylim(-100,200)
# if i == 0: plt.xlim(wlm[0]-10,wlm[-1]+10)
# if i == 1: plt.xlim(wlm[0],6500) # THIS IS FOR 1000R
# if i == 2: plt.xlim(6500,6700)
# if i == 3: plt.xlim(6700,7000)
# if i == 4: plt.xlim(7000,7300)
# if i == 5: plt.xlim(7300,wlm[-1])
# plt.minorticks_on()
# plt.xlabel("Wavelength [$\AA$]")
# plt.ylabel("Flux / continuum")
# plt.show()
# plt.close()
if plot:
for i in range(6):
plt.figure(figsize=(fig_size, fig_size / 2.5))
plt.plot(wlm, old, "r-", alpha=0.4)
plt.plot(wlm, fit_median, "g-", alpha=0.5)
# plt.axhline(y=dispersion*dclip, color="g", alpha=0.5)
# plt.axhline(y=-dispersion*dclip, color="g", alpha=0.5)
plt.plot(wlm, s, "b-", alpha=0.7)
for exclude in range(len(exclude_ranges_low)):
plt.axvspan(
exclude_ranges_low[exclude],
exclude_ranges_high[exclude],
facecolor="g",
alpha=0.15,
zorder=3,
)
plt.ylim(-300, 300)
if i == 0:
plt.xlim(wlm[0] - 10, wlm[-1] + 10)
if i == 1:
plt.xlim(wlm[0], 6500) # THIS IS FOR 1000R
if i == 2:
plt.xlim(6500, 6700)
if i == 3:
plt.xlim(6700, 7000)
if i == 4:
plt.xlim(7000, 7300)
if i == 5:
plt.xlim(7300, wlm[-1])
plt.minorticks_on()
plt.xlabel("Wavelength [$\AA$]")
plt.ylabel("Flux / continuum")
# plt.show()
# plt.close()
self.intensity_corrected[fibre, :] = s
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def fit_and_substract_sky_spectrum(
self,
sky,
w=1000,
spectra=1000,
# If rebin == True, it fits all wavelengths to be at the same wavelengths that SKY spectrum...
rebin=False,
brightest_line="Ha",
brightest_line_wavelength=6563.0,
maxima_sigma=3.0,
ymin=-50,
ymax=1000,
wmin=0,
wmax=0,
auto_scale_sky=False,
warnings=False,
verbose=False,
plot=False,
fig_size=12,
fibre=0,
):
"""
Given a 1D sky spectrum, this task fits
sky lines of each spectrum individually and substracts sky
Needs the observed wavelength (brightest_line_wavelength) of the brightest emission line (brightest_line) .
w is the wavelength
spec the 2D spectra
Parameters
----------
sky
w
spectra
rebin
brightest_line
brightest_line_wavelength
maxima_sigma
ymin
ymax
wmin
wmax
auto_scale_sky
warnings
verbose
plot
fig_size
fibre
Returns
-------
"""
if brightest_line_wavelength == 6563:
print("\n\n> WARNING: This is going to FAIL as the wavelength of the brightest emission line has not been included !!!")
print(" USING brightest_line_wavelength = 6563 as default ...\n\n")
brightest_line_wavelength_rest = 6562.82
if brightest_line == "O3" or brightest_line == "O3b":
brightest_line_wavelength_rest = 5006.84
if brightest_line == "Hb" or brightest_line == "hb":
brightest_line_wavelength_rest = 4861.33
print(" Using {:3} at rest wavelength {:6.2f} identified by the user at {:6.2f} to avoid fitting emission lines...".format(
brightest_line, brightest_line_wavelength_rest, brightest_line_wavelength
))
redshift = brightest_line_wavelength/brightest_line_wavelength_rest - 1.0
if w == 1000:
w = self.wavelength
if spectra == 1000:
spectra = copy.deepcopy(self.intensity_corrected)
if wmin == 0:
wmin = w[0]
if wmax == 0:
wmax = w[-1]
# Read file with sky emission lines
sky_lines_file = "sky_lines.dat"
(
sl_center,
sl_name,
sl_fnl,
sl_lowlow,
sl_lowhigh,
sl_highlow,
sl_highhigh,
sl_lmin,
sl_lmax,
) = read_table(sky_lines_file, ["f", "s", "f", "f", "f", "f", "f", "f", "f"])
number_sl = len(sl_center)
# MOST IMPORTANT EMISSION LINES IN RED
# 6300.30 [OI] -0.263 30.0 15.0 20.0 40.0
# 6312.10 [SIII] -0.264 30.0 18.0 5.0 20.0
# 6363.78 [OI] -0.271 20.0 4.0 5.0 30.0
# 6548.03 [NII] -0.296 45.0 15.0 55.0 75.0
# 6562.82 Ha -0.298 50.0 25.0 35.0 60.0
# 6583.41 [NII] -0.300 62.0 42.0 7.0 35.0
# 6678.15 HeI -0.313 20.0 6.0 6.0 20.0
# 6716.47 [SII] -0.318 40.0 15.0 22.0 45.0
# 6730.85 [SII] -0.320 50.0 30.0 7.0 35.0
# 7065.28 HeI -0.364 30.0 7.0 7.0 30.0
# 7135.78 [ArIII] -0.374 25.0 6.0 6.0 25.0
# 7318.39 [OII] -0.398 30.0 6.0 20.0 45.0
# 7329.66 [OII] -0.400 40.0 16.0 10.0 35.0
# 7751.10 [ArIII] -0.455 30.0 15.0 15.0 30.0
# 9068.90 [S-III] -0.594 30.0 15.0 15.0 30.0
el_list_no_z = [
6300.3,
6312.10,
6363.78,
6548.03,
6562.82,
6583.41,
6678.15,
6716.47,
6730.85,
7065.28,
7135.78,
7318.39,
7329.66,
7751.1,
9068.9,
]
el_list = (redshift + 1) * np.array(el_list_no_z)
# [OI] [SIII] [OI] Ha+[NII] HeI [SII] HeI [ArIII] [OII] [ArIII] [SIII]
el_low_list_no_z = [
6296.3,
6308.1,
6359.8,
6544.0,
6674.2,
6712.5,
7061.3,
7131.8,
7314.4,
7747.1,
9063.9,
]
el_high_list_no_z = [
6304.3,
6316.1,
6367.8,
6590.0,
6682.2,
6736.9,
7069.3,
7139.8,
7333.7,
7755.1,
9073.9,
]
el_low_list = (redshift + 1) * np.array(el_low_list_no_z)
el_high_list = (redshift + 1) * np.array(el_high_list_no_z)
# Double Skylines
dsky1 = [
6257.82,
6465.34,
6828.22,
6969.70,
7239.41,
7295.81,
7711.50,
7750.56,
7853.391,
7913.57,
7773.00,
7870.05,
8280.94,
8344.613,
9152.2,
9092.7,
9216.5,
8827.112,
8761.2,
0,
] # 8760.6, 0]#
dsky2 = [
6265.50,
6470.91,
6832.70,
6978.45,
7244.43,
7303.92,
7715.50,
7759.89,
7860.662,
7921.02,
7780.43,
7879.96,
8288.34,
8352.78,
9160.9,
9102.8,
9224.8,
8836.27,
8767.7,
0,
] # 8767.2, 0] #
say_status = 0
# plot=True
# verbose = True
# warnings = True
self.wavelength_offset_per_fibre = []
self.sky_auto_scale = []
if fibre != 0:
f_i = fibre
f_f = fibre + 1
print(" Checking fibre {} (only this fibre is corrected, use fibre = 0 for all)...".format(fibre))
plot = True
verbose = True
warnings = True
else:
f_i = 0
f_f = self.n_spectra
for fibre in range(f_i, f_f): # (self.n_spectra):
if fibre == say_status:
print(" Checking fibre {:4} ... ({:6.2f} % completed) ...".format(
fibre,
fibre * 100.0 / self.n_spectra
)
)
say_status = say_status + 20
# Gaussian fits to the sky spectrum
sl_gaussian_flux = []
sl_gaussian_sigma = []
sl_gauss_center = []
skip_sl_fit = [] # True emission line, False no emission line
j_lines = 0
el_low = el_low_list[j_lines]
el_high = el_high_list[j_lines]
sky_sl_gaussian_fitted = copy.deepcopy(sky)
di = 0
if verbose:
print("\n> Performing Gaussian fitting to sky lines in sky spectrum...")
for i in range(number_sl):
if sl_center[i] > el_high:
while sl_center[i] > el_high:
j_lines = j_lines + 1
if j_lines < len(el_low_list) - 1:
el_low = el_low_list[j_lines]
el_high = el_high_list[j_lines]
# print "Change to range ",el_low,el_high
else:
el_low = w[-1] + 1
el_high = w[-1] + 2
if sl_fnl[i] == 0:
plot_fit = False
else:
plot_fit = True
if sl_center[i] == dsky1[di]:
warnings_ = False
if sl_fnl[i] == 1:
warnings_ = True
if verbose:
print(" Line {} blended with {}".format(sl_center[i], dsky2[di]))
resultado = dfluxes(
w,
sky_sl_gaussian_fitted,
sl_center[i],
dsky2[di],
lowlow=sl_lowlow[i],
lowhigh=sl_lowhigh[i],
highlow=sl_highlow[i],
highhigh=sl_highhigh[i],
lmin=sl_lmin[i],
lmax=sl_lmax[i],
fmin=0,
fmax=0,
broad1=2.1 * 2.355,
broad2=2.1 * 2.355,
plot=plot_fit,
verbose=False,
plot_sus=False,
fcal=False,
warnings=warnings_,
) # Broad is FWHM for Gaussian sigm a= 1,
di = di + 1
else:
resultado = fluxes(
w,
sky_sl_gaussian_fitted,
sl_center[i],
lowlow=sl_lowlow[i],
lowhigh=sl_lowhigh[i],
highlow=sl_highlow[i],
highhigh=sl_highhigh[i],
lmin=sl_lmin[i],
lmax=sl_lmax[i],
fmin=0,
fmax=0,
broad=2.1 * 2.355,
plot=plot_fit,
verbose=False,
plot_sus=False,
fcal=False,
warnings=warnings,
) # Broad is FWHM for Gaussian sigm a= 1,
sl_gaussian_flux.append(resultado[3])
sky_sl_gaussian_fitted = resultado[11]
sl_gauss_center.append(resultado[1])
sl_gaussian_sigma.append(resultado[5] / 2.355)
if el_low < sl_center[i] < el_high:
if verbose:
print(" SKY line {} in EMISSION LINE !".format(sl_center[i]))
skip_sl_fit.append(True)
else:
skip_sl_fit.append(False)
# print " Fitted wavelength for sky line ",sl_center[i]," : ",resultado[1]," ",resultado[5]
if plot_fit:
if verbose:
print(" Fitted wavelength for sky line {} : {} sigma = {}".format(
sl_center[i], sl_gauss_center[i], sl_gaussian_sigma[i])
)
wmin = sl_lmin[i]
wmax = sl_lmax[i]
# Gaussian fit to object spectrum
object_sl_gaussian_flux = []
object_sl_gaussian_sigma = []
ratio_object_sky_sl_gaussian = []
dif_center_obj_sky = []
spec = spectra[fibre]
object_sl_gaussian_fitted = copy.deepcopy(spec)
object_sl_gaussian_center = []
di = 0
if verbose:
print("\n> Performing Gaussian fitting to sky lines in fibre {} of object data...".format(fibre))
for i in range(number_sl):
if sl_fnl[i] == 0:
plot_fit = False
else:
plot_fit = True
if skip_sl_fit[i]:
if verbose:
print(" SKIPPING SKY LINE {} as located within the range of an emission line!".format(
sl_center[i]))
object_sl_gaussian_flux.append(
float("nan")
) # The value of the SKY SPECTRUM
object_sl_gaussian_center.append(float("nan"))
object_sl_gaussian_sigma.append(float("nan"))
dif_center_obj_sky.append(float("nan"))
else:
if sl_center[i] == dsky1[di]:
warnings_ = False
if sl_fnl[i] == 1:
warnings_ = True
if verbose:
print(" Line {} blended with {}".format(sl_center[i], dsky2[di]))
resultado = dfluxes(
w,
object_sl_gaussian_fitted,
sl_center[i],
dsky2[di],
lowlow=sl_lowlow[i],
lowhigh=sl_lowhigh[i],
highlow=sl_highlow[i],
highhigh=sl_highhigh[i],
lmin=sl_lmin[i],
lmax=sl_lmax[i],
fmin=0,
fmax=0,
broad1=sl_gaussian_sigma[i] * 2.355,
broad2=sl_gaussian_sigma[i] * 2.355,
plot=plot_fit,
verbose=False,
plot_sus=False,
fcal=False,
warnings=warnings_,
)
di = di + 1
if (
resultado[3] > 0
and resultado[5] / 2.355 < maxima_sigma
and resultado[13] > 0
and resultado[14] / 2.355 < maxima_sigma
): # and resultado[5] < maxima_sigma: # -100000.: #0:
use_sigma = resultado[5] / 2.355
object_sl_gaussian_flux.append(resultado[3])
object_sl_gaussian_fitted = resultado[11]
object_sl_gaussian_center.append(resultado[1])
object_sl_gaussian_sigma.append(use_sigma)
dif_center_obj_sky.append(
object_sl_gaussian_center[i] - sl_gauss_center[i]
)
else:
if verbose:
print(" Bad fit for {}! ignoring it...".format(sl_center[i]))
object_sl_gaussian_flux.append(float("nan"))
object_sl_gaussian_center.append(float("nan"))
object_sl_gaussian_sigma.append(float("nan"))
dif_center_obj_sky.append(float("nan"))
skip_sl_fit[i] = True # We don't substract this fit
else:
resultado = fluxes(
w,
object_sl_gaussian_fitted,
sl_center[i],
lowlow=sl_lowlow[i],
lowhigh=sl_lowhigh[i],
highlow=sl_highlow[i],
highhigh=sl_highhigh[i],
lmin=sl_lmin[i],
lmax=sl_lmax[i],
fmin=0,
fmax=0,
broad=sl_gaussian_sigma[i] * 2.355,
plot=plot_fit,
verbose=False,
plot_sus=False,
fcal=False,
warnings=warnings,
) # Broad is FWHM for Gaussian sigma= 1,
# print sl_center[i],sl_gaussian_sigma[i], resultado[5]/2.355, maxima_sigma
if (
resultado[3] > 0 and resultado[5] / 2.355 < maxima_sigma
): # and resultado[5] < maxima_sigma: # -100000.: #0:
object_sl_gaussian_flux.append(resultado[3])
object_sl_gaussian_fitted = resultado[11]
object_sl_gaussian_center.append(resultado[1])
object_sl_gaussian_sigma.append(resultado[5] / 2.355)
dif_center_obj_sky.append(
object_sl_gaussian_center[i] - sl_gauss_center[i]
)
else:
if verbose:
print(" Bad fit for {}! ignoring it...".format(sl_center[i]))
object_sl_gaussian_flux.append(float("nan"))
object_sl_gaussian_center.append(float("nan"))
object_sl_gaussian_sigma.append(float("nan"))
dif_center_obj_sky.append(float("nan"))
skip_sl_fit[i] = True # We don't substract this fit
ratio_object_sky_sl_gaussian.append(
old_div(object_sl_gaussian_flux[i], sl_gaussian_flux[i])
) # TODO: to remove once sky_line_fitting is active and we can do 1Dfit
# Scale sky lines that are located in emission lines or provided negative values in fit
# reference_sl = 1 # Position in the file! Position 1 is sky line 6363.4
# sl_ref_ratio = sl_gaussian_flux/sl_gaussian_flux[reference_sl]
if verbose:
print("\n> Correcting skylines for which we couldn't get a Gaussian fit...\n")
for i in range(number_sl):
if skip_sl_fit[i] == True:
# Use known center, sigma of the sky and peak
gauss_fix = sl_gaussian_sigma[i]
small_center_correction = 0.0
# Check if center of previous sky line has a small difference in wavelength
small_center_correction = np.nanmedian(dif_center_obj_sky[0:i])
if verbose:
print("- Small correction of center wavelength of sky line {} : {}".format(
sl_center[i], small_center_correction))
object_sl_gaussian_fitted = substract_given_gaussian(
w,
object_sl_gaussian_fitted,
sl_center[i] + small_center_correction,
peak=0,
sigma=gauss_fix,
flux=0,
search_peak=True,
lowlow=sl_lowlow[i],
lowhigh=sl_lowhigh[i],
highlow=sl_highlow[i],
highhigh=sl_highhigh[i],
lmin=sl_lmin[i],
lmax=sl_lmax[i],
plot=False,
verbose=verbose,
)
# Substract second Gaussian if needed !!!!!
for di in range(len(dsky1) - 1):
if sl_center[i] == dsky1[di]:
if verbose:
print(" This was a double sky line, also substracting {} at {}".format(
dsky2[di], np.array(dsky2[di]) + small_center_correction))
object_sl_gaussian_fitted = substract_given_gaussian(
w,
object_sl_gaussian_fitted,
np.array(dsky2[di]) + small_center_correction,
peak=0,
sigma=gauss_fix,
flux=0,
search_peak=True,
lowlow=sl_lowlow[i],
lowhigh=sl_lowhigh[i],
highlow=sl_highlow[i],
highhigh=sl_highhigh[i],
lmin=sl_lmin[i],
lmax=sl_lmax[i],
plot=False,
verbose=verbose,
)
# wmin,wmax = 6100,6500
# ymin,ymax= -100,400
#
# wmin,wmax = 6350,6700
# wmin,wmax = 7100,7700
# wmin,wmax = 7600,8200
# wmin,wmax = 8200,8500
# wmin,wmax = 7350,7500
# wmin,wmax=6100, 8500 #7800, 8000#6820, 6850 #6700,7000 #6300,6450#7500
# wmin,wmax = 8700,9300
# ymax=800
if plot:
plt.figure(figsize=(11, 4))
plt.plot(w, spec, "y", alpha=0.7, label="Object")
plt.plot(
w,
object_sl_gaussian_fitted,
"k",
alpha=0.5,
label="Obj - sky fitted",
)
plt.plot(w, sky_sl_gaussian_fitted, "r", alpha=0.5, label="Sky fitted")
plt.plot(w, spec - sky, "g", alpha=0.5, label="Obj - sky")
plt.plot(
w,
object_sl_gaussian_fitted - sky_sl_gaussian_fitted,
"b",
alpha=0.9,
label="Obj - sky fitted - rest sky",
)
plt.xlim(wmin, wmax)
plt.ylim(ymin, ymax)
ptitle = "Fibre " + np.str(fibre) # +" with rms = "+np.str(rms[i])
plt.title(ptitle)
plt.xlabel("Wavelength [$\AA$]")
plt.ylabel("Flux [counts]")
plt.legend(frameon=True, loc=2, ncol=5)
plt.minorticks_on()
for i in range(len(el_list)):
plt.axvline(x=el_list[i], color="k", linestyle="--", alpha=0.5)
for i in range(number_sl):
if sl_fnl[i] == 1:
plt.axvline(
x=sl_center[i], color="brown", linestyle="-", alpha=1
)
else:
plt.axvline(
x=sl_center[i], color="y", linestyle="--", alpha=0.6
)
for i in range(len(dsky2) - 1):
plt.axvline(x=dsky2[i], color="orange", linestyle="--", alpha=0.6)
# plt.show()
# plt.close()
offset = np.nanmedian(
np.array(object_sl_gaussian_center) - | np.array(sl_gauss_center) | numpy.array |
import numpy as np
import numpy.matlib as npm
import pyroms
from pyroms_toolbox.CGrid_GLORYS import CGrid_GLORYS
# def get_nc_CGrid_CMEMS_IBI(grdfile, name='IBI12', area='regional', xrange=(185, 340), yrange=(100, 210), ystart=245):
def get_nc_CGrid_CMEMS_IBI(grdfile, name='IBI', area='regional', xrange=(1, 290), yrange=(1, 362), ystart=245):
"""
grd = get_nc_CGrid_GLORYS(grdfile)
Load Cgrid object for GLORYS from netCDF file
"""
nc = pyroms.ipop.Dataset(grdfile)
# lon_t = nc.variables['nav_lon'][:] # nav_lon is 2D longitude, not available in file
# lat_t = nc.variables['nav_lat'][:] # nav_lat is 2D latitude, not available in file
lon = nc.variables['longitude'][:] # 289 length
lat = nc.variables['latitude'][:] # 361 length
# end result must be 2D array (289*361)
lon_t = npm.repmat(lon, lat.shape[0], 1)
lat_t = npm.repmat(lat, lon.shape[0], 1)
lat_t = np.transpose(lat_t)
# lambda is longitude
# phi is latitude
# lat_u = nc.variables['gphiu'][:]
# lon_u = nc.variables['glamu'][:]
lat_u = lat_t
lon_u = lon_t
# lat_v = nc.variables['gphiv'][:]
# lon_v = nc.variables['glamv'][:]
lat_v = lat_t
lon_v = lon_t
# depth = nc.variables['gdept_0'][:]
depth = nc.variables['depth'][:]
# depth_w = nc.variables['gdepw_0'][:]
depth_w = np.zeros_like(depth)
depth_w[1:] = (depth[1:] + depth[:-1])/2
depth_bnds = np.zeros(depth.shape[0] + 1)
depth_bnds[:-1] = depth_w[:]
depth_bnds[-1] = depth_bnds[-2] + 200.
nc_mask_t = nc.variables['mask_thetao']
# mask_t = np.array(~nc_mask_t[:].mask, dtype='int')
mask_t = np.array(nc_mask_t[:], dtype='int')
nc_mask_u = nc.variables['mask_uo']
# mask_u = np.array(~nc_mask_u[:].mask, dtype='int')
mask_u = np.array(nc_mask_u[:], dtype='int')
nc_mask_v = nc.variables['mask_vo']
# mask_v = np.array(~nc_mask_v[:].mask, dtype='int')
mask_v = | np.array(nc_mask_v[:], dtype='int') | numpy.array |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division, print_function
__all__ = ["test_log_like", "test_kernel"]
import numpy as np
def test_log_like(llfn):
np.random.seed(1234)
for N in [10, 50, 100, 500]:
r = np.random.randn(N)
x = np.sort(np.random.rand(N))
K = np.exp(-0.5 * ((x[:, None] - x[None, :])/0.1) ** 2)
K[np.diag_indices_from(K)] += 1e-3
assert np.allclose(llfn(r, K), _baseline_lnlike(r, K)), N
print(u"☺︎")
def _baseline_lnlike(r, K):
s, ld = | np.linalg.slogdet(K) | numpy.linalg.slogdet |
from abc import ABCMeta, abstractmethod
import numpy as np
from ..Solver import Solver
class BasicData(metaclass=ABCMeta):
def __init__(self, data: np.ndarray, electrode_num: int = 16, contain_excitation: bool = False):
"""
Data Initializer
Args:
data: data array
electrode_num: total electrode number
contain_excitation: if it contains excitation data
"""
self.data = data
self.data_mean = np.mean(self.data, axis=0)
# self.data_mean = self.data[0, :]
self.contain_excitation = contain_excitation
if contain_excitation:
self.data_mean = self.exclude_excitation(electrode_num)
self.delta_V = None
def exclude_excitation(self, electrode_num):
new_val = []
for i, val in enumerate(self.data_mean):
if i % electrode_num != 0:
new_val.append(val)
return | np.array(new_val) | numpy.array |
# Source 1: https://medium.com/technology-invention-and-more/how-to-build-a-simple-neural-network-in-9-lines-of-python-code-cc8f23647ca1
# Source 2: https://iamtrask.github.io/2015/07/12/basic-python-network/
# Source 3: https://www.youtube.com/watch?v=h3l4qz76JhQ&list=PL2-dafEMk2A5BoX3KyKu6ti5_Pytp91sk
'''
Description:
- simple three layer network (input, hidden, output)
'''
import numpy as np
# Seed the random number generator, so it generates the same numbers
# every time the program runs.
np.random.seed(1)
# S shaped function. We pass the weighted sum to normalize it between 0 and 1
def sigmoid(x, deriv=False):
if deriv:
return x * (1 - x)
return 1 / (1 + np.exp(-x))
training_set_inputs = np.array([
[0, 0, 1],
[1, 1, 1],
[1, 0, 1],
[0, 1, 1]
])
training_set_outputs = np.array([[0, 1, 1, 0]]).T # transpose matrix from horizontal to vertical
# synapses, weights between layers
# weights between input and hidden layer. 3 inputs to 4 hidden
syn0 = 2 * np.random.random((3, 4)) - 1
# weights between hidden and output layer. 4 hidden to 1 output
syn1 = 2 * np.random.random((4, 1)) - 1
# training interation. Aim to reduce error.
for j in range(60000):
l0 = training_set_inputs # input layer
# np.dot(l, syn) - sum of all weights and inputs
l1 = sigmoid(np.dot(l0, syn0)) # hidden layer
l2 = sigmoid(np.dot(l1, syn1)) # output layer
l2_error = training_set_outputs - l2
if j % 10000 == 0:
print("Error: {0}".format(np.mean( | np.abs(l2_error) | numpy.abs |
""" A Quadrotor Learned using the SymplectivODENet thing"""
import numpy as np
from environments import learned_params, simplequad
from symplectic.analysis import get_one_step_prediction, np_to_integratable_type_1D, get_model
from symplectic.au_functional import jacobian
from symplectic.utils import ObjectView
class LearnedQuadEnv(simplequad.SimpleQuadEnv):
def __init__(self, model_type='structure', verbose=False):
""" Inputs: model type. Either structure or naive
structure is the structured Sympode
naive is the naive baseline model.
"""
simplequad.SimpleQuadEnv.__init__(self)
# TODO(rebecca): rewrite these correctly with a dictionary or something instead of pulling from params
# convert goal to embedded states
self.goal = to_embed_state(self.goal)
def error(x): raise Exception(x) # TODO: remove lol
self.states = error('Unimplemented') # TODO: fix, shold be number of embedded states
self.state = np.zeros((self.states))
self.Q_r_ddp = np.zeros([self.states, self.states])
self.Q_f_ddp = error('Unimplemented')
self.Q_f_ddp = np.diag([100, 100, 1]) # TODO: fix to be the right number of states
# Set up the model arguments
EXPERIMENT_DIR = '/experiment_simple_quad'
# TODO: fill out args correctly
self.args = error('Need to go filll out args correctly in learned_params')
#self.args = ObjectView(learned_params.get_quad_args(EXPERIMENT_DIR, 'LearnedQuad-v0'))
self.device = learned_params.get_device(self.args.gpu)
# Fetch the model
self.model_type = model_type
if model_type == 'structure':
self.model, self.stats = get_model(self.args, baseline=False, structure=True, naive=False,
device=self.device, verbose=verbose)
elif model_type == 'naive':
self.model, self.stats = get_model(self.args, baseline=False, structure=False, naive=True,
device=self.device, verbose=verbose)
else:
raise RuntimeError('Model type %s not accepted' % model_type)
def step(self, u):
""" Do one step of simulation given an input u
# TODO(rebecca): make this a super class and use inheiritance
"""
# Assemble combined state-input vector
y0_u = | np.hstack([self.state, u]) | numpy.hstack |
import sys
import getopt
from naoth.log import Reader as LogReader
from naoth.log import Parser
import numpy
from PIL import Image
def parse_arguments(argv):
input_file = ''
try:
opts, args = getopt.getopt(argv, "hi:", ["ifile="])
except getopt.GetoptError:
print('patchReader.py -i <input file>')
sys.exit(2)
if not opts:
print('python patchReader.py -i <logfile>')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print('patchReader.py -i <input file>')
sys.exit()
elif opt in ("-i", "--ifile"):
input_file = arg
return input_file
def image_from_proto(message):
# read each channel of yuv422 separately
yuv422 = numpy.fromstring(message.data, dtype=numpy.uint8)
y = yuv422[0::2]
u = yuv422[1::4]
v = yuv422[3::4]
# convert from yuv422 to yuv888
yuv888 = | numpy.zeros(message.height * message.width * 3, dtype=numpy.uint8) | numpy.zeros |
import numpy as np
import os
def vol(ftraj='atoms.traj'):
from ase.io import read
a=read(ftraj,':')
v=a[-1].get_volume()
with open('vol','w') as f:
f.write(str(v))
return v
# do eos fitting and Debye model for one functional and save results in files
def eos_gpaw(ev='e-v.dat',struc='rel.in',eos_only='False'):
from gthermo_open import debye
try:
with open(ev, 'r') as f:
c=f.readlines()
cc=[]
for i in c:
if not '#' in i and i!='\n' and i!=' \n':
cc.append(i.strip('\n'))
v=[1 for i in cc]
e=[0 for i in cc]
for i in range(len(cc)):
cc[i]=cc[i].split(' ')
for j in range(cc[i].count('')):
cc[i].remove('')
v[i]=float(cc[i][0])
e[i]=float(cc[i][1])
emin=min(e)
for i in range(len(e)):
if e[i]==emin:
imin=i
v=v[max(0,imin-2):min(len(v),imin+3)]
e=e[max(0,imin-2):min(len(e),imin+3)]
with open(ev,'w') as ff:
ff.write('\n')
for i in range(len(v)):
ff.write(str(v[i])+' '+str(e[i])+'\n')
D=debye(expt='noread',ev=ev,Tmax=1000,Tstep_write=10,struc=struc,show='F',write='F',eos_only=eos_only)
name=['T','G','VT','BT','S','H','TEC','Cp','Cv','BP']
name2=['V0','E0','B0','BP0','y','T_debye0']
if eos_only=='False':
for j in range(1,len(D)):
with open('beef/'+name[j-1],'a') as f:
f.write(str(D[j][0])[1:-1]+' ')
f.write('\n')
for j in range(0,len(D[0])):
with open('beef/'+name2[j],'a') as f3:
f3.write(str(D[0][j][0])+' ')
f3.write('\n')
with open('beef/'+'cunt','r') as fcunt:
ccunt=fcunt.readlines()
ncunt=len(ccunt)+1
with open('beef/'+'cunt','a') as f2:
f2.write(str(ncunt)+'\n')
except:
print('EOS fails!')
name=['T','G','VT','BT','S','H','TEC','Cp','Cv','BP']
name2=['V0','E0','B0','BP0','y','T_debye0']
if eos_only=='False':
for j in range(len(name)):
with open('beef/'+name[j],'a') as f:
f.write('\n')
for j in range(len(name2)):
with open('beef/'+name2[j],'a') as f3:
f3.write('\n')
with open('beef/'+'cunt','a') as f2:
f2.write('\n')
# calculate probability distribution function (PDF) from property ensembles by BEEF
def pdf(dir='beef/'):
import matplotlib
matplotlib.use('pdf')
import matplotlib.pyplot as plt
name=['G','VT','BT','S','H','TEC','Cp','Cv','BP']
for j in range(len(name)):
if os.path.exists(dir+'/'+name[j]):
a=np.loadtxt(dir+'/'+name[j],delimiter=',')
if os.path.exists(dir+'/Cp'):
acp=np.loadtxt(dir+'/Cp',delimiter=',')
else:
acp=a
aa= | np.append(a[:,0],a[:,-1]) | numpy.append |
import os
from PIL import Image
from torch.utils.data import Dataset
import torch
import torchvision.transforms as transforms
import numpy as np
def get_transforms(cfg):
train_transform = transforms.Compose([
transforms.Resize((cfg.DATA.RESIZE_SIZE, cfg.DATA.RESIZE_SIZE)),
transforms.RandomCrop((cfg.DATA.CROP_SIZE, cfg.DATA.CROP_SIZE)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(cfg.DATA.IMAGE_MEAN, cfg.DATA.IMAGE_STD)
])
test_transform = transforms.Compose([
transforms.Resize((cfg.DATA.CROP_SIZE, cfg.DATA.CROP_SIZE)),
transforms.ToTensor(),
transforms.Normalize(cfg.DATA.IMAGE_MEAN, cfg.DATA.IMAGE_STD)
])
test_tencrops_transform = transforms.Compose([
transforms.Resize((cfg.DATA.RESIZE_SIZE, cfg.DATA.RESIZE_SIZE)),
transforms.TenCrop(cfg.DATA.CROP_SIZE),
transforms.Lambda(lambda crops: torch.stack(
[transforms.Normalize(cfg.DATA.IMAGE_MEAN, cfg.DATA.IMAGE_STD)
(transforms.ToTensor()(crop)) for crop in crops])),
])
return train_transform, test_transform, test_tencrops_transform
class ImageNetDataset(Dataset):
def __init__(self, root, cfg, is_train):
self.root = root
self.cfg = cfg
self.is_train = is_train
self.resize_size = cfg.DATA.RESIZE_SIZE
self.crop_size = cfg.DATA.CROP_SIZE
if self.is_train:
datalist = os.path.join(self.root, 'ILSVRC2012_list', 'train.txt')
self.image_dir = os.path.join(self.root, 'train')
else:
datalist = os.path.join(self.root, 'ILSVRC2012_list', 'val_folder_new.txt')
self.image_dir = os.path.join(self.root, 'val')
names = []
labels = []
bboxes = []
with open(datalist) as f:
for line in f:
info = line.strip().split()
names.append(info[0][:-5])
labels.append(int(info[1]))
if self.is_train is False:
bboxes.append(np.array(list(map(float, info[2:]))).reshape(-1,4))
# bboxes.append([float(info[i]) for i in range(2, 6)])
self.names = names
self.labels = labels
if self.is_train is False:
self.bboxes = bboxes
self.train_transform, self.onecrop_transform, self.tencrops_transform = get_transforms(cfg)
if cfg.TEST.TEN_CROPS:
self.test_transform = self.tencrops_transform
else:
self.test_transform = self.onecrop_transform
# def __getitem__(self, idx):
# name = self.names[idx]
# label = self.labels[idx]
# if self.is_train:
# image = Image.open(os.path.join(self.image_dir, name+'.JPEG')).convert('RGB')
# else:
# #image = Image.open(os.path.join(self.image_dir, name.split('/')[1]+'.JPEG')).convert('RGB')
# image = Image.open(os.path.join(self.image_dir, name + '.JPEG')).convert('RGB')
# image_size = list(image.size)
#
# if self.is_train:
# image = self.train_transform(image)
# return image, label
#
# else:
# image = self.test_transform(image)
#
# bbox = self.bboxes[idx]
# [x, y, x2, y2] = bbox
# bbox_width = x2-x
# bbox_height = y2-y
# # if self.is_train:
# # resize_size = self.resize_size
# # crop_size = self.crop_size
# # shift_size = (resize_size - crop_size) // 2
# resize_size = self.crop_size
# crop_size = self.crop_size
# shift_size = 0
# [image_width, image_height] = image_size
# left_bottom_x = int(max(x / image_width * resize_size - shift_size, 0))
# left_bottom_y = int(max(y / image_height * resize_size - shift_size, 0))
# right_top_x = int(min((x + bbox_width) / image_width * resize_size - shift_size, crop_size - 1))
# right_top_y = int(min((y + bbox_height) / image_height * resize_size - shift_size, crop_size - 1))
#
# gt_bbox = [left_bottom_x, left_bottom_y, right_top_x, right_top_y]
# gt_bbox = torch.tensor(gt_bbox)
#
# return image, label, gt_bbox, name+'.jpg'
# For multiple bbox
def __getitem__(self, idx):
name = self.names[idx]
label = self.labels[idx]
if self.is_train:
image = Image.open(os.path.join(self.image_dir, name+'.JPEG')).convert('RGB')
else:
#image = Image.open(os.path.join(self.image_dir, name.split('/')[1]+'.JPEG')).convert('RGB')
image = Image.open(os.path.join(self.image_dir, name + '.JPEG')).convert('RGB')
image_size = list(image.size)
if self.is_train:
image = self.train_transform(image)
return image, label
else:
image = self.test_transform(image)
bbox = self.bboxes[idx]
[x1, y1, x2, y2] = np.split(bbox, 4, 1)
resize_size = self.crop_size
crop_size = self.crop_size
shift_size = 0
[image_width, image_height] = image_size
left_bottom_x = np.maximum(x1 / image_width * resize_size - shift_size, 0).astype(int)
left_bottom_y = np.maximum(y1 / image_height * resize_size - shift_size, 0).astype(int)
right_top_x = np.minimum(x2 / image_width * resize_size - shift_size, crop_size - 1).astype(int)
right_top_y = np.minimum(y2 / image_height * resize_size - shift_size, crop_size - 1).astype(int)
gt_bbox = | np.concatenate((left_bottom_x, left_bottom_y, right_top_x, right_top_y),axis=1) | numpy.concatenate |
import numpy as np
import pandas as pd
from statsmodels.regression.dimred import (
SlicedInverseReg, SAVE, PHD, CORE)
from numpy.testing import (assert_equal, assert_allclose)
from statsmodels.tools.numdiff import approx_fprime
def test_poisson():
np.random.seed(43242)
# Generate a non-orthogonal design matrix
xmat = np.random.normal(size=(500, 5))
xmat[:, 1] = 0.5*xmat[:, 0] + np.sqrt(1 - 0.5**2) * xmat[:, 1]
xmat[:, 3] = 0.5*xmat[:, 2] + np.sqrt(1 - 0.5**2) * xmat[:, 3]
b = np.r_[0, 1, -1, 0, 0.5]
lpr = np.dot(xmat, b)
ev = np.exp(lpr)
y = np.random.poisson(ev)
for method in range(6):
if method == 0:
model = SlicedInverseReg(y, xmat)
rslt = model.fit()
elif method == 1:
model = SAVE(y, xmat)
rslt = model.fit(slice_n=100)
elif method == 2:
model = SAVE(y, xmat, bc=True)
rslt = model.fit(slice_n=100)
elif method == 3:
df = pd.DataFrame({"y": y,
"x0": xmat[:, 0],
"x1": xmat[:, 1],
"x2": xmat[:, 2],
"x3": xmat[:, 3],
"x4": xmat[:, 4]})
model = SlicedInverseReg.from_formula(
"y ~ 0 + x0 + x1 + x2 + x3 + x4", data=df)
rslt = model.fit()
elif method == 4:
model = PHD(y, xmat)
rslt = model.fit()
elif method == 5:
model = PHD(y, xmat)
rslt = model.fit(resid=True)
# Check for concentration in one direction (this is
# a single index model)
assert_equal(np.abs(rslt.eigs[0] / rslt.eigs[1]) > 5, True)
# Check that the estimated direction aligns with the true
# direction
params = np.asarray(rslt.params)
q = np.dot(params[:, 0], b)
q /= np.sqrt(np.sum(params[:, 0]**2))
q /= np.sqrt(np.sum(b**2))
assert_equal(np.abs(q) > 0.95, True)
def test_sir_regularized_numdiff():
# Use numeric gradients to check the analytic gradient
# for the regularized SIRobjective function.
np.random.seed(93482)
n = 1000
p = 10
xmat = np.random.normal(size=(n, p))
y1 = np.dot(xmat, np.linspace(-1, 1, p))
y2 = xmat.sum(1)
y = y2 / (1 + y1**2) + np.random.normal(size=n)
model = SlicedInverseReg(y, xmat)
_ = model.fit()
# Second difference penalty matrix.
fmat = np.zeros((p-2, p))
for i in range(p-2):
fmat[i, i:i+3] = [1, -2, 1]
_ = model.fit_regularized(2, 3*fmat)
# Compare the gradients to the numerical derivatives
for _ in range(5):
pa = np.random.normal(size=(p, 2))
pa, _, _ = np.linalg.svd(pa, 0)
gn = approx_fprime(pa.ravel(), model._regularized_objective, 1e-7)
gr = model._regularized_grad(pa.ravel())
assert_allclose(gn, gr, atol=1e-5, rtol=1e-4)
def test_sir_regularized_1d():
# Compare regularized SIR to traditional SIR, in a setting where the
# regularization is compatible with the true parameters (i.e. there
# is no regularization bias).
np.random.seed(93482)
n = 1000
p = 10
xmat = np.random.normal(size=(n, p))
y = np.dot(xmat[:, 0:4], np.r_[1, 1, -1, -1]) + np.random.normal(size=n)
model = SlicedInverseReg(y, xmat)
rslt = model.fit()
# The penalty drives p[0] ~ p[1] and p[2] ~ p[3]]
fmat = np.zeros((2, p))
fmat[0, 0:2] = [1, -1]
fmat[1, 2:4] = [1, -1]
rslt2 = model.fit_regularized(1, 3*fmat)
pa0 = np.zeros(p)
pa0[0:4] = [1, 1, -1, -1]
pa1 = rslt.params[:, 0]
pa2 = rslt2.params[:, 0:2]
# Compare two 1d subspaces
def sim(x, y):
x = x / np.sqrt(np.sum(x * x))
y = y / np.sqrt( | np.sum(y * y) | numpy.sum |
"""
Tests for Series cumulative operations.
See also
--------
tests.frame.test_cumulative
"""
from itertools import product
import numpy as np
import pytest
import pandas as pd
from pandas import _is_numpy_dev
import pandas._testing as tm
def _check_accum_op(name, series, check_dtype=True):
func = getattr(np, name)
tm.assert_numpy_array_equal(
func(series).values, func(np.array(series)), check_dtype=check_dtype,
)
# with missing values
ts = series.copy()
ts[::2] = np.NaN
result = func(ts)[1::2]
expected = func(np.array(ts.dropna()))
tm.assert_numpy_array_equal(result.values, expected, check_dtype=False)
class TestSeriesCumulativeOps:
def test_cumsum(self, datetime_series):
_check_accum_op("cumsum", datetime_series)
def test_cumprod(self, datetime_series):
_check_accum_op("cumprod", datetime_series)
@pytest.mark.xfail(
_is_numpy_dev,
reason="https://github.com/pandas-dev/pandas/issues/31992",
strict=False,
)
def test_cummin(self, datetime_series):
tm.assert_numpy_array_equal(
datetime_series.cummin().values,
np.minimum.accumulate( | np.array(datetime_series) | numpy.array |
import numpy as np
import matplotlib.pyplot as plt
import sympy
from sympy import *
import sys
sys.path.append(r'C:\Users\elira\Google Drive\butools2\Python')
sys.path.append('/home/d/dkrass/eliransc/Python')
from tqdm import tqdm
from butools.ph import *
from butools.map import *
from butools.queues import *
from butools.mam import *
from butools.dph import *
from scipy.linalg import expm, sinm, cosm
from sympy import *
from sympy import Symbol
from sympy.physics.quantum import TensorProduct
import pickle as pkl
import pandas as pd
from sympy import diff, sin, exp
from numpy.linalg import matrix_power
def busy(s, lam2, mu2):
return ((lam2 + mu2 + s) - ((lam2 + mu2 + s) ** 2 - 4 * lam2 * mu2) ** 0.5) / (2 * lam2)
def ser_lap(s, mu):
return mu / (s + mu)
def hyper(s, lam1, lam2, mu1, mu2):
return ser_lap(s, mu1) * lam1 / (lam1 + lam2) + ser_lap(s, mu2) * lam2 / (lam1 + lam2)
def rho(lam1, lam2, mu1, mu2):
return (lam1 + lam2) * ((lam1 / ((lam1 + lam2) * mu1)) + (lam2 / ((lam1 + lam2) * mu2)))
def w_lap(s, lam1, lam2, mu1, mu2):
return ((1 - rho(lam1, lam2, mu1, mu2)) * s) / (s - (lam1 + lam2) * (1 - hyper(s, lam1, lam2, mu1, mu2)))
def F(s, lam1, lam2, mu1, mu2):
return w_lap(s, lam1, lam2, mu1, mu2) * ser_lap(s, mu1)
def A(s, lam1, lam2, mu2):
return (lam1 / (lam1 + lam2 - lam2 * (ser_lap(s, mu2))))
def beta(s, lam1, lam2, mu1, mu2):
return (lam1 / (lam1 + lam2 + s) + ((A(s, lam1, lam2, mu2) * lam2) / (lam1 + lam2 + s)) * (
ser_lap(s, mu2) - busy(s + lam1, lam2, mu2))) / (
1 - ((lam2 * busy(s + lam1, lam2, mu2)) / (lam1 + lam2 + s)))
def tau(s, lam1, lam2, mu1, mu2):
return ser_lap(s, mu1) * (A(s, lam1, lam2, mu2) * (
1 - F(lam1 + lam2 - lam2 * busy(s + lam1, lam2, mu2), lam1, lam2, mu1, mu2)) + F(
lam1 + lam2 - lam2 * busy(s + lam1, lam2, mu2), lam1, lam2, mu1, mu2) * beta(s, lam1, lam2, mu1, mu2))
def get_var(lam1, lam2, mu1, mu2):
s = Symbol('s')
y = tau(s, lam1, lam2, mu1, mu2)
dx = diff(y, s)
dxdx = diff(dx, s)
return dxdx.subs(s, 0) - (dx.subs(s, 0)) ** 2
def get_nth_moment(lam1, lam2, mu1, mu2, n):
s = Symbol('s')
y = tau(s, lam1, lam2, mu1, mu2)
for i in range(n):
if i == 0:
dx = diff(y, s)
else:
dx = diff(dx, s)
return dx.subs(s, 0)
def get_first_n_moments(parameters, n=5):
lam1, lam2, mu1, mu2 = parameters
moments = []
for n in range(1, n + 1):
moments.append(get_nth_moment(lam1, lam2, mu1, mu2, n) * (-1) ** n)
moments = np.array([moments], dtype='float')
return moments
def kroneker_sum(G, H):
size_g = G.shape[0]
size_h = H.shape[0]
return np.kron(G, np.identity(size_h)) + np.kron(np.identity(size_g), H)
def give_boundry_probs(R, A0, A1, A, B, C0, ro):
p00, p01, p02, p100, p110, p120, p101, p111, p121 = symbols('p00 p01 p02 p100 p110 p120 p101 p111 p121')
eqns = [np.dot(np.array([p00, p01, p02]), np.ones((A0.shape[0]))) - (1 - ro)]
eq3 = np.dot(np.array([p00, p01, p02]), A0) + np.dot(np.array([p100, p110, p120, p101, p111, p121]), A1)
eq1 = np.dot( | np.array([p00, p01, p02]) | numpy.array |
# Copyright 2019 MilaGraph. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: <NAME>
"""
Dataset module of GraphVite
Graph
- :class:`BlogCatalog`
- :class:`Youtube`
- :class:`Flickr`
- :class:`Hyperlink2012`
- :class:`Friendster`
- :class:`Wikipedia`
Knowledge Graph
- :class:`Math`
- :class:`FB15k`
- :class:`FB15k237`
- :class:`WN18`
- :class:`WN18RR`
- :class:`Wikidata5m`
- :class:`Freebase`
Visualization
- :class:`MNIST`
- :class:`CIFAR10`
- :class:`ImageNet`
"""
from __future__ import absolute_import, division
import os
import glob
import shutil
import logging
import gzip, zipfile, tarfile
import multiprocessing
from collections import defaultdict
import numpy as np
from . import cfg
logger = logging.getLogger(__name__)
class Dataset(object):
"""
Graph dataset.
Parameters:
name (str): name of dataset
urls (dict, optional): url(s) for each split,
can be either str or list of str
members (dict, optional): zip member(s) for each split,
leave empty for default
Datasets contain several splits, such as train, valid and test.
For each split, there are one or more URLs, specifying the file to download.
You may also specify the zip member to extract.
When a split is accessed, it will be automatically downloaded and decompressed
if it is not present.
You can assign a preprocess for each split, by defining a function with name [split]_preprocess::
class MyDataset(Dataset):
def __init__(self):
super(MyDataset, self).__init__(
"my_dataset",
train="url/to/train/split",
test="url/to/test/split"
)
def train_preprocess(self, input_file, output_file):
with open(input_file, "r") as fin, open(output_file, "w") as fout:
fout.write(fin.read())
f = open(MyDataset().train)
If the preprocess returns a non-trivial value, then it is assigned to the split,
otherwise the file name is assigned.
By convention, only splits ending with ``_data`` have non-trivial return value.
See also:
Pre-defined preprocess functions
:func:`csv2txt`,
:func:`top_k_label`,
:func:`induced_graph`,
:func:`edge_split`,
:func:`link_prediction_split`,
:func:`image_feature_data`
"""
def __init__(self, name, urls=None, members=None):
self.name = name
self.urls = urls or {}
self.members = members or {}
for key in self.urls:
if isinstance(self.urls[key], str):
self.urls[key] = [self.urls[key]]
if key not in self.members:
self.members[key] = [None] * len(self.urls[key])
elif isinstance(self.members[key], str):
self.members[key] = [self.members[key]]
if len(self.urls[key]) != len(self.members[key]):
raise ValueError("Number of members is inconsistent with number of urls in `%s`" % key)
self.path = os.path.join(cfg.dataset_path, self.name)
def relpath(self, path):
return os.path.relpath(path, self.path)
def download(self, url):
from six.moves.urllib.request import urlretrieve
save_file = os.path.basename(url)
if "?" in save_file:
save_file = save_file[:save_file.find("?")]
save_file = os.path.join(self.path, save_file)
if save_file in self.local_files():
return save_file
logger.info("downloading %s to %s" % (url, self.relpath(save_file)))
urlretrieve(url, save_file)
return save_file
def extract(self, zip_file, member=None):
zip_name, extension = os.path.splitext(zip_file)
if zip_name.endswith(".tar"):
extension = ".tar" + extension
zip_name = zip_name[:-4]
if extension == ".txt":
return zip_file
elif member is None:
save_file = zip_name
else:
save_file = os.path.join(os.path.dirname(zip_name), os.path.basename(member))
if save_file in self.local_files():
return save_file
if extension == ".gz":
logger.info("extracting %s to %s" % (self.relpath(zip_file), self.relpath(save_file)))
with gzip.open(zip_file, "rb") as fin, open(save_file, "wb") as fout:
shutil.copyfileobj(fin, fout)
elif extension == ".tar.gz" or extension == ".tar":
if member is None:
logger.info("extracting %s to %s" % (self.relpath(zip_file), self.relpath(save_file)))
with tarfile.open(zip_file, "r") as fin:
fin.extractall(save_file)
else:
logger.info("extracting %s from %s to %s" % (member, self.relpath(zip_file), self.relpath(save_file)))
with tarfile.open(zip_file, "r").extractfile(member) as fin, open(save_file, "wb") as fout:
shutil.copyfileobj(fin, fout)
elif extension == ".zip":
if member is None:
logger.info("extracting %s to %s" % (self.relpath(zip_file), self.relpath(save_file)))
with zipfile.ZipFile(zip_file) as fin:
fin.extractall(save_file)
else:
logger.info("extracting %s from %s to %s" % (member, self.relpath(zip_file), self.relpath(save_file)))
with zipfile.ZipFile(zip_file).open(member, "r") as fin, open(save_file, "wb") as fout:
shutil.copyfileobj(fin, fout)
else:
raise ValueError("Unknown file extension `%s`" % extension)
return save_file
def get_file(self, key):
file_name = os.path.join(self.path, "%s_%s.txt" % (self.name, key))
if file_name in self.local_files():
return file_name
urls = self.urls[key]
members = self.members[key]
preprocess_name = key + "_preprocess"
preprocess = getattr(self, preprocess_name, None)
if len(urls) > 1 and preprocess is None:
raise AttributeError(
"There are non-trivial number of files, but function `%s` is not found" % preprocess_name)
extract_files = []
for url, member in zip(urls, members):
download_file = self.download(url)
extract_file = self.extract(download_file, member)
extract_files.append(extract_file)
if preprocess:
result = preprocess(*(extract_files + [file_name]))
if result is not None:
return result
elif os.path.isfile(extract_files[0]):
logger.info("renaming %s to %s" % (self.relpath(extract_files[0]), self.relpath(file_name)))
shutil.move(extract_files[0], file_name)
else:
raise AttributeError(
"There are non-trivial number of files, but function `%s` is not found" % preprocess_name)
return file_name
def local_files(self):
if not os.path.exists(self.path):
os.mkdir(self.path)
return set(glob.glob(os.path.join(self.path, "*")))
def __getattr__(self, key):
if key in self.__dict__:
return self.__dict__[key]
if key in self.urls:
return self.get_file(key)
raise AttributeError("Can't resolve split `%s`" % key)
def csv2txt(self, csv_file, txt_file):
"""
Convert ``csv`` to ``txt``.
Parameters:
csv_file: csv file
txt_file: txt file
"""
logger.info("converting %s to %s" % (self.relpath(csv_file), self.relpath(txt_file)))
with open(csv_file, "r") as fin, open(txt_file, "w") as fout:
for line in fin:
fout.write(line.replace(",", "\t"))
def top_k_label(self, label_file, save_file, k, format="node-label"):
"""
Extract top-k labels.
Parameters:
label_file (str): label file
save_file (str): save file
k (int): top-k labels will be extracted
format (str, optional): format of label file,
can be 'node-label' or '(label)-nodes':
- **node-label**: each line is [node] [label]
- **(label)-nodes**: each line is [node]..., no explicit label
"""
logger.info("extracting top-%d labels of %s to %s" % (k, self.relpath(label_file), self.relpath(save_file)))
if format == "node-label":
label2nodes = defaultdict(list)
with open(label_file, "r") as fin:
for line in fin:
node, label = line.split()
label2nodes[label].append(node)
elif format == "(label)-nodes":
label2nodes = {}
with open(label_file, "r") as fin:
for i, line in enumerate(fin):
label2nodes[i] = line.split()
else:
raise ValueError("Unknown file format `%s`" % format)
labels = sorted(label2nodes, key=lambda x: len(label2nodes[x]), reverse=True)[:k]
with open(save_file, "w") as fout:
for label in sorted(labels):
for node in sorted(label2nodes[label]):
fout.write("%s\t%s\n" % (node, label))
def induced_graph(self, graph_file, label_file, save_file):
"""
Induce a subgraph from labeled nodes. All edges in the induced graph have at least one labeled node.
Parameters:
graph_file (str): graph file
label_file (str): label file
save_file (str): save file
"""
logger.info("extracting subgraph of %s induced by %s to %s" %
(self.relpath(graph_file), self.relpath(label_file), self.relpath(save_file)))
nodes = set()
with open(label_file, "r") as fin:
for line in fin:
nodes.update(line.split())
with open(graph_file, "r") as fin, open(save_file, "w") as fout:
for line in fin:
if not line.startswith("#"):
u, v = line.split()
if u not in nodes or v not in nodes:
continue
fout.write("%s\t%s\n" % (u, v))
def edge_split(self, graph_file, files, portions):
"""
Divide a graph into several splits.
Parameters:
graph_file (str): graph file
files (list of str): file names
portions (list of float): split portions
"""
assert len(files) == len(portions)
logger.info("splitting graph %s into %s" %
(self.relpath(graph_file), ", ".join([self.relpath(file) for file in files])))
np.random.seed(1024)
portions = np.cumsum(portions, dtype=np.float32) / np.sum(portions)
files = [open(file, "w") for file in files]
with open(graph_file, "r") as fin:
for line in fin:
i = np.searchsorted(portions, np.random.rand())
files[i].write(line)
for file in files:
file.close()
def link_prediction_split(self, graph_file, files, portions):
"""
Divide a normal graph into a train split and several test splits for link prediction use.
Each test split contains half true and half false edges.
Parameters:
graph_file (str): graph file
files (list of str): file names,
the first file is treated as train file
portions (list of float): split portions
"""
assert len(files) == len(portions)
logger.info("splitting graph %s into %s" %
(self.relpath(graph_file), ", ".join([self.relpath(file) for file in files])))
np.random.seed(1024)
nodes = set()
edges = set()
portions = np.cumsum(portions, dtype=np.float32) / np.sum(portions)
files = [open(file, "w") for file in files]
num_edges = [0] * len(files)
with open(graph_file, "r") as fin:
for line in fin:
u, v = line.split()[:2]
nodes.update([u, v])
edges.add((u, v))
i = np.searchsorted(portions, np.random.rand())
if i == 0:
files[i].write(line)
else:
files[i].write("%s\t%s\t1\n" % (u, v))
num_edges[i] += 1
nodes = list(nodes)
for file, num_edge in zip(files[1:], num_edges[1:]):
for _ in range(num_edge):
valid = False
while not valid:
u = nodes[int(np.random.rand() * len(nodes))]
v = nodes[int(np.random.rand() * len(nodes))]
valid = u != v and (u, v) not in edges and (v, u) not in edges
file.write("%s\t%s\t0\n" % (u, v))
for file in files:
file.close()
def image_feature_data(self, dataset, model="resnet50", batch_size=128):
"""
Compute feature vectors for an image dataset using a neural network.
Parameters:
dataset (torch.utils.data.Dataset): dataset
model (str or torch.nn.Module, optional): pretrained model.
If it is a str, use the last hidden model of that model.
batch_size (int, optional): batch size
"""
import torch
import torchvision
from torch import nn
logger.info("computing %s feature" % model)
if isinstance(model, str):
full_model = getattr(torchvision.models, model)(pretrained=True)
model = nn.Sequential(*list(full_model.children())[:-1])
num_worker = multiprocessing.cpu_count()
data_loader = torch.utils.data.DataLoader(dataset,
batch_size=batch_size, num_workers=num_worker, shuffle=False)
model = model.cuda()
model.eval()
features = []
with torch.no_grad():
for i, (batch_images, batch_labels) in enumerate(data_loader):
if i % 100 == 0:
logger.info("%g%%" % (100.0 * i * batch_size / len(dataset)))
batch_images = batch_images.cuda()
batch_features = model(batch_images).view(batch_images.size(0), -1).cpu().numpy()
features.append(batch_features)
features = np.concatenate(features)
return features
class BlogCatalog(Dataset):
"""
BlogCatalog social network dataset.
Splits:
graph, label, train, test
Train and test splits are used for link prediction purpose.
"""
def __init__(self):
super(BlogCatalog, self).__init__(
"blogcatalog",
urls={
"graph": "https://www.dropbox.com/s/cf21ouuzd563cqx/BlogCatalog-dataset.zip?dl=1",
"label": "https://www.dropbox.com/s/cf21ouuzd563cqx/BlogCatalog-dataset.zip?dl=1",
"train": [], # depends on `graph`
"valid": [], # depends on `graph`
"test": [] # depends on `graph`
},
members={
"graph": "BlogCatalog-dataset/data/edges.csv",
"label": "BlogCatalog-dataset/data/group-edges.csv"
}
)
def graph_preprocess(self, raw_file, save_file):
self.csv2txt(raw_file, save_file)
def label_preprocess(self, raw_file, save_file):
self.csv2txt(raw_file, save_file)
def train_preprocess(self, train_file):
valid_file = train_file[:train_file.rfind("train.txt")] + "valid.txt"
test_file = train_file[:train_file.rfind("train.txt")] + "test.txt"
self.link_prediction_split(self.graph, [train_file, valid_file, test_file], portions=[100, 1, 1])
def valid_preprocess(self, valid_file):
train_file = valid_file[:valid_file.rfind("valid.txt")] + "train.txt"
test_file = valid_file[:valid_file.rfind("valid.txt")] + "test.txt"
self.link_prediction_split(self.graph, [train_file, valid_file, test_file], portions=[100, 1, 1])
def test_preprocess(self, test_file):
train_file = test_file[:test_file.rfind("test.txt")] + "train.txt"
valid_file = test_file[:test_file.rfind("test.txt")] + "valid.txt"
self.link_prediction_split(self.graph, [train_file, valid_file, test_file], portions=[100, 1, 1])
class Youtube(Dataset):
"""
Youtube social network dataset.
Splits:
graph, label
"""
def __init__(self):
super(Youtube, self).__init__(
"youtube",
urls={
"graph": "http://socialnetworks.mpi-sws.mpg.de/data/youtube-links.txt.gz",
"label": "http://socialnetworks.mpi-sws.mpg.de/data/youtube-groupmemberships.txt.gz"
}
)
def label_preprocess(self, raw_file, save_file):
self.top_k_label(raw_file, save_file, k=47)
class Flickr(Dataset):
"""
Flickr social network dataset.
Splits:
graph, label
"""
def __init__(self):
super(Flickr, self).__init__(
"flickr",
urls={
"graph": "http://socialnetworks.mpi-sws.mpg.de/data/flickr-links.txt.gz",
"label": "http://socialnetworks.mpi-sws.mpg.de/data/flickr-groupmemberships.txt.gz"
}
)
def label_preprocess(self, label_file, save_file):
self.top_k_label(label_file, save_file, k=5)
class Hyperlink2012(Dataset):
"""
Hyperlink 2012 graph dataset.
Splits:
pld_train, pld_test
"""
def __init__(self):
super(Hyperlink2012, self).__init__(
"hyperlink2012",
urls={
"pld_train": "http://data.dws.informatik.uni-mannheim.de/hyperlinkgraph/2012-08/pld-arc.gz",
"pld_valid": "http://data.dws.informatik.uni-mannheim.de/hyperlinkgraph/2012-08/pld-arc.gz",
"pld_test": "http://data.dws.informatik.uni-mannheim.de/hyperlinkgraph/2012-08/pld-arc.gz"
}
)
def pld_train_preprocess(self, graph_file, train_file):
valid_file = train_file[:train_file.rfind("pld_train.txt")] + "pld_valid.txt"
test_file = train_file[:train_file.rfind("pld_train.txt")] + "pld_test.txt"
self.link_prediction_split(graph_file, [train_file, valid_file, test_file], portions=[10000, 1, 1])
def pld_valid_preprocess(self, graph_file, valid_file):
train_file = valid_file[:valid_file.rfind("pld_valid.txt")] + "pld_train.txt"
test_file = valid_file[:valid_file.rfind("pld_valid.txt")] + "pld_test.txt"
self.link_prediction_split(graph_file, [train_file, valid_file, test_file], portions=[10000, 1, 1])
def pld_test_preprocess(self, graph_file, test_file):
train_file = test_file[:test_file.rfind("pld_test.txt")] + "pld_train.txt"
valid_file = test_file[:test_file.rfind("pld_test.txt")] + "pld_valid.txt"
self.link_prediction_split(graph_file, [train_file, valid_file, test_file], portions=[10000, 1, 1])
class Friendster(Dataset):
"""
Friendster social network dataset.
Splits:
graph, small_graph, label
"""
def __init__(self):
super(Friendster, self).__init__(
"friendster",
urls={
"graph": "https://snap.stanford.edu/data/bigdata/communities/com-friendster.ungraph.txt.gz",
"small_graph": ["https://snap.stanford.edu/data/bigdata/communities/com-friendster.ungraph.txt.gz",
"https://snap.stanford.edu/data/bigdata/communities/com-friendster.all.cmty.txt.gz"],
"label": "https://snap.stanford.edu/data/bigdata/communities/com-friendster.top5000.cmty.txt.gz"
}
)
def small_graph_preprocess(self, graph_file, label_file, save_file):
self.induced_graph(graph_file, label_file, save_file)
def label_preprocess(self, label_file, save_file):
self.top_k_label(label_file, save_file, k=100, format="(label)-nodes")
class Wikipedia(Dataset):
"""
Wikipedia dump for word embedding.
Splits:
graph
"""
def __init__(self):
super(Wikipedia, self).__init__(
"wikipedia",
urls={
"graph": "https://www.dropbox.com/s/q6w950e5f7g7ax8/enwiki-latest-pages-articles-sentences.txt.gz?dl=1"
}
)
class Math(Dataset):
"""
Synthetic math knowledge graph dataset.
Splits:
train, valid, test
"""
NUM_ENTITY = 1000
NUM_RELATION = 30
OPERATORS = [
("+", lambda x, y: (x + y) % Math.NUM_ENTITY),
("-", lambda x, y: (x - y) % Math.NUM_ENTITY),
("*", lambda x, y: (x * y) % Math.NUM_ENTITY),
("/", lambda x, y: x // y),
("%", lambda x, y: x % y)
]
def __init__(self):
super(Math, self).__init__(
"math",
urls={
"train": [],
"valid": [],
"test": []
}
)
def train_preprocess(self, save_file):
np.random.seed(1023)
self.generate_math(save_file, num_triplet=20000)
def valid_preprocess(self, save_file):
np.random.seed(1024)
self.generate_math(save_file, num_triplet=1000)
def test_preprocess(self, save_file):
np.random.seed(1025)
self.generate_math(save_file, num_triplet=1000)
def generate_math(self, save_file, num_triplet):
with open(save_file, "w") as fout:
for _ in range(num_triplet):
i = int(np.random.rand() * len(self.OPERATORS))
op, f = self.OPERATORS[i]
x = int(np.random.rand() * self.NUM_ENTITY)
y = int(np.random.rand() * self.NUM_RELATION) + 1
fout.write("%d\t%s%d\t%d\n" % (x, op, y, f(x, y)))
class FB15k(Dataset):
"""
FB15k knowledge graph dataset.
Splits:
train, valid, test
"""
def __init__(self):
super(FB15k, self).__init__(
"fb15k",
urls={
"train": "https://github.com/DeepGraphLearning/KnowledgeGraphEmbedding/raw/master/data/FB15k/train.txt",
"valid": "https://github.com/DeepGraphLearning/KnowledgeGraphEmbedding/raw/master/data/FB15k/valid.txt",
"test": "https://github.com/DeepGraphLearning/KnowledgeGraphEmbedding/raw/master/data/FB15k/test.txt"
}
)
class FB15k237(Dataset):
"""
FB15k-237 knowledge graph dataset.
Splits:
train, valid, test
"""
def __init__(self):
super(FB15k237, self).__init__(
"fb15k-237",
urls={
"train": "https://github.com/DeepGraphLearning/KnowledgeGraphEmbedding/raw/master/data/FB15k-237/train.txt",
"valid": "https://github.com/DeepGraphLearning/KnowledgeGraphEmbedding/raw/master/data/FB15k-237/valid.txt",
"test": "https://github.com/DeepGraphLearning/KnowledgeGraphEmbedding/raw/master/data/FB15k-237/test.txt"
}
)
class WN18(Dataset):
"""
WN18 knowledge graph dataset.
Splits:
train, valid, test
"""
def __init__(self):
super(WN18, self).__init__(
"wn18",
urls={
"train": "https://github.com/DeepGraphLearning/KnowledgeGraphEmbedding/raw/master/data/wn18/train.txt",
"valid": "https://github.com/DeepGraphLearning/KnowledgeGraphEmbedding/raw/master/data/wn18/valid.txt",
"test": "https://github.com/DeepGraphLearning/KnowledgeGraphEmbedding/raw/master/data/wn18/test.txt"
}
)
class WN18RR(Dataset):
"""
WN18RR knowledge graph dataset.
Splits:
train, valid, test
"""
def __init__(self):
super(WN18RR, self).__init__(
"wn18rr",
urls={
"train": "https://github.com/DeepGraphLearning/KnowledgeGraphEmbedding/raw/master/data/wn18rr/train.txt",
"valid": "https://github.com/DeepGraphLearning/KnowledgeGraphEmbedding/raw/master/data/wn18rr/valid.txt",
"test": "https://github.com/DeepGraphLearning/KnowledgeGraphEmbedding/raw/master/data/wn18rr/test.txt"
}
)
class Wikidata5m(Dataset):
"""
Wikidata5m knowledge graph dataset.
Splits:
train, valid, test
"""
def __init__(self):
super(Wikidata5m, self).__init__(
"wikidata5m",
urls={
"train": "https://www.dropbox.com/s/dty6ufe1gg6keuc/wikidata5m.txt.gz?dl=1",
"valid": "https://www.dropbox.com/s/dty6ufe1gg6keuc/wikidata5m.txt.gz?dl=1",
"test": "https://www.dropbox.com/s/dty6ufe1gg6keuc/wikidata5m.txt.gz?dl=1",
"entity": "https://www.dropbox.com/s/bgmgvk8brjwpc9w/entity.txt.gz?dl=1",
"relation": "https://www.dropbox.com/s/37jxki93gguv0pp/relation.txt.gz?dl=1",
"alias2entity": [], # depends on `entity`
"alias2relation": [] # depends on `relation`
}
)
def train_preprocess(self, graph_file, train_file):
valid_file = train_file[:train_file.rfind("train.txt")] + "valid.txt"
test_file = train_file[:train_file.rfind("train.txt")] + "test.txt"
self.edge_split(graph_file, [train_file, valid_file, test_file], portions=[4000, 1, 1])
def valid_preprocess(self, graph_file, valid_file):
train_file = valid_file[:valid_file.rfind("valid.txt")] + "train.txt"
test_file = valid_file[:valid_file.rfind("valid.txt")] + "test.txt"
self.edge_split(graph_file, [train_file, valid_file, test_file], portions=[4000, 1, 1])
def test_preprocess(self, graph_file, test_file):
train_file = test_file[:test_file.rfind("valid.txt")] + "train.txt"
valid_file = test_file[:test_file.rfind("train.txt")] + "valid.txt"
self.edge_split(graph_file, [train_file, valid_file, test_file], portions=[4000, 1, 1])
def load_alias(self, alias_file):
alias2object = {}
ambiguous = set()
with open(alias_file, "r") as fin:
for line in fin:
tokens = line.strip().split("\t")
object = tokens[0]
for alias in tokens[1:]:
if alias in alias2object and alias2object[alias] != object:
ambiguous.add(alias)
alias2object[alias] = object
for alias in ambiguous:
alias2object.pop(alias)
return alias2object
def alias2entity_preprocess(self, save_file):
return self.load_alias(self.entity)
def alias2relation_preprocess(self, save_file):
return self.load_alias(self.relation)
class Freebase(Dataset):
"""
Freebase knowledge graph dataset.
Splits:
train
"""
def __init__(self):
super(Freebase, self).__init__(
"freebase",
urls={
"train": "http://commondatastorage.googleapis.com/freebase-public/rdf/freebase-rdf-latest.gz"
}
)
class MNIST(Dataset):
"""
MNIST dataset for visualization.
Splits:
train_image_data, train_label_data, test_image_data, test_label_data, image_data, label_data
"""
def __init__(self):
super(MNIST, self).__init__(
"mnist",
urls={
"train_image_data": "http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz",
"train_label_data": "http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz",
"test_image_data": "http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz",
"test_label_data": "http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz",
"image_data": [], # depends on `train_image_data` & `test_image_data`
"label_data": [] # depends on `train_label_data` & `test_label_data`
}
)
def train_image_data_preprocess(self, raw_file, save_file):
images = np.fromfile(raw_file, dtype=np.uint8)
return images[16:].reshape(-1, 28*28)
def train_label_data_preprocess(self, raw_file, save_file):
labels = np.fromfile(raw_file, dtype=np.uint8)
return labels[8:]
test_image_data_preprocess = train_image_data_preprocess
test_label_data_preprocess = train_label_data_preprocess
def image_data_preprocess(self, save_file):
return np.concatenate([self.train_image_data, self.test_image_data])
def label_data_preprocess(self, save_file):
return | np.concatenate([self.train_label_data, self.test_label_data]) | numpy.concatenate |
"""
Functions to estimate observed ACA magnitudes
"""
import sys
import traceback
import logging
import collections
import scipy.stats
import scipy.special
import numpy as np
import numba
from astropy.table import Table, vstack
from Chandra.Time import DateTime
from cheta import fetch
from Quaternion import Quat
import Ska.quatutil
from mica.archive import aca_l0
from mica.archive.aca_dark.dark_cal import get_dark_cal_image
from chandra_aca.transform import count_rate_to_mag, pixels_to_yagzag
from cxotime import CxoTime
from kadi import events
from . import star_obs_catalogs
from agasc import get_star
logger = logging.getLogger('agasc.supplement')
MAX_MAG = 15
MASK = {
'mouse_bit': np.array([[True, True, True, True, True, True, True, True],
[True, True, False, False, False, False, True, True],
[True, False, False, False, False, False, False, True],
[True, False, False, False, False, False, False, True],
[True, False, False, False, False, False, False, True],
[True, False, False, False, False, False, False, True],
[True, True, False, False, False, False, True, True],
[True, True, True, True, True, True, True, True]])
}
EXCEPTION_MSG = {
-1: 'Unknown',
0: 'OK',
1: 'No level 0 data',
2: 'No telemetry data',
3: 'Mismatch in telemetry between aca_l0 and cheta',
4: 'Time mismatch between cheta and level0',
5: 'Failed job',
6: 'Suspect observation'
}
EXCEPTION_CODES = collections.defaultdict(lambda: -1)
EXCEPTION_CODES.update({msg: code for code, msg in EXCEPTION_MSG.items() if code > 0})
class MagStatsException(Exception):
def __init__(self, msg='', agasc_id=None, obsid=None, timeline_id=None, mp_starcat_time=None,
**kwargs):
super().__init__(msg)
self.error_code = EXCEPTION_CODES[msg]
self.msg = msg
self.agasc_id = agasc_id
self.obsid = obsid[0] if type(obsid) is list and len(obsid) == 1 else obsid
self.timeline_id = timeline_id
self.mp_starcat_time = (mp_starcat_time[0] if type(mp_starcat_time) is list
and len(mp_starcat_time) == 1 else mp_starcat_time)
for k in kwargs:
setattr(self, k, kwargs[k])
def __str__(self):
return f'MagStatsException: {self.msg} (agasc_id: {self.agasc_id}, ' \
f'obsid: {self.obsid}, mp_starcat_time: {self.mp_starcat_time})'
def __iter__(self):
yield 'error_code', self.error_code
yield 'msg', self.msg
yield 'agasc_id', self.agasc_id
yield 'obsid', self.obsid
yield 'timeline_id', self.timeline_id
yield 'mp_starcat_time', self.mp_starcat_time
def _magnitude_correction(time, mag_aca):
"""
Get a time-dependent correction to AOACMAG (prior to dynamic background subtraction).
:param time: Chandra.Time.DateTime
:param mag_aca: np.array
:return: np.array
"""
params = {"t_ref": "2011-01-01 12:00:00.000",
"p": [0.005899340720522751,
0.12029019332761458,
-2.99386247406073e-10,
-6.9534637950633265,
0.7916261423307238]}
q = params['p']
t_ref = DateTime(params['t_ref'])
dmag = (q[0] + (q[1] + q[2] * np.atleast_1d(time))
* np.exp(q[3] + q[4] * np.atleast_1d(mag_aca)))
dmag[np.atleast_1d(time) < t_ref.secs] = 0
return np.squeeze(dmag)
def get_responsivity(time):
"""
ACA magnitude response over time.
This was estimated with bright stars that were observed more than a hundred times during the
mission. More details in the `responsivity notebook`_:
.. _responsivity notebook: https://nbviewer.jupyter.org/urls/cxc.cfa.harvard.edu/mta/ASPECT/jgonzalez/mag_stats/notebooks/03-high_mag_responsivity-fit.ipynb # noqa
:param time: float
Time in CXC seconds
:return:
"""
a, b, c = [3.19776750e-02, 5.35201479e+08, 8.49670756e+07]
return - a * (1 + scipy.special.erf((time - b) / c)) / 2
def get_droop_systematic_shift(magnitude):
"""
Difference between the magnitude determined from DC-subtracted image telemetry and
the catalog ACA magnitude.
The magnitude shift is time-independent. It depends only on the catalog magnitude and is zero
for bright stars. More details in the `droop notebook`_:
.. _droop notebook: https://nbviewer.jupyter.org/urls/cxc.cfa.harvard.edu/mta/ASPECT/jgonzalez/mag_stats/notebooks/04-DroopAfterSubtractionAndResponsivity-fit.ipynb # noqa
:param magnitude: float
Catalog ACA magnitude
:return:
"""
a, b = [11.25572, 0.59486369]
return np.exp((magnitude - a) / b)
def rolling_mean(t, f, window, selection=None):
"""
Calculate the rolling mean of the 'f' array, using a centered square window in time.
:param t: np.array
the time array.
:param f: np.array
the array to average.
:param window: float
the window size (in the same units as the time array).
:param selection: np.array
An optional array of bool.
:return: np.array
An array with the same type and shape as 'f'
"""
result = np.ones_like(f) * np.nan
if selection is None:
selection = np.ones_like(f, dtype=bool)
assert len(f) == len(t)
assert len(f) == len(selection)
assert len(selection.shape) == 1
_rolling_mean_(result, t, f, window, selection)
return result
@numba.jit(nopython=True)
def _rolling_mean_(result, t, f, window, selection):
i_min = 0
i_max = 0
n = 0
f_sum = 0
for i in range(len(f)):
if not selection[i]:
continue
while i_max < len(f) and t[i_max] < t[i] + window / 2:
if selection[i_max]:
f_sum += f[i_max]
n += 1
i_max += 1
while t[i_min] < t[i] - window / 2:
if selection[i_min]:
f_sum -= f[i_min]
n -= 1
i_min += 1
result[i] = f_sum / n
def get_star_position(star, telem):
"""
Residuals for a given AGASC record at a given slot/time.
:param star:
Table Row of one AGASC entry
:param telem: table
Table with columns AOATTQT1, AOATTQT2, AOATTQT3, AOATTQT4.
:return:
"""
aca_misalign = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
rad_to_arcsec = 206264.81
q = np.array([telem['AOATTQT1'],
telem['AOATTQT2'],
telem['AOATTQT3'],
telem['AOATTQT4']]).transpose()
norm = np.sum(q**2, axis=1, keepdims=True)
# I am just normalizing q, just in case.
n = np.squeeze(np.sqrt(norm))
q[n != 0] /= np.sqrt(norm)[n != 0] # prevent warning when dividing by zero (it happens)
q_att = Quat(q=q)
ts = q_att.transform
star_pos_eci = Ska.quatutil.radec2eci(star['RA_PMCORR'], star['DEC_PMCORR'])
d_aca = np.dot(np.dot(aca_misalign, ts.transpose(0, 2, 1)),
star_pos_eci).transpose()
yag = np.arctan2(d_aca[:, 1], d_aca[:, 0]) * rad_to_arcsec
zag = np.arctan2(d_aca[:, 2], d_aca[:, 0]) * rad_to_arcsec
logger.debug(f' star position. AGASC_ID={star["AGASC_ID"]}, '
f'{len(yag)} samples, ({yag[0]}, {zag[0]})...')
return {
'yang_star': yag,
'zang_star': zag,
}
# this is in case one has to return empty telemetry
_telem_dtype = [('times', 'float64'),
('IMGSIZE', 'int32'),
('IMGROW0', 'int16'),
('IMGCOL0', 'int16'),
('IMGRAW', 'float32'),
('AOACASEQ', '<U4'),
('AOPCADMD', '<U4'),
('AOATTQT1', 'float64'),
('AOATTQT2', 'float64'),
('AOATTQT3', 'float64'),
('AOATTQT4', 'float64'),
('AOACIIR', '<U3'),
('AOACISP', '<U3'),
('AOACYAN', 'float64'),
('AOACZAN', 'float64'),
('AOACMAG', 'float32'),
('AOACFCT', '<U4'),
('mags_img', 'float64'),
('yang_img', 'float64'),
('zang_img', 'float64'),
('yang_star', 'float64'),
('zang_star', 'float64'),
('mags', 'float64'),
('dy', 'float64'),
('dz', 'float64'),
('dr', 'float64')]
def get_telemetry(obs):
"""
Get all telemetry relevant for the magnitude estimation task.
This gets:
- AOACASEQ
- AOPCADMD
- AOACMAG (ACA estimated magnitude)
- AOACIIR (ACA ionizing radiation flag)
- AOACISP (ACA saturated pixel flag)
MSIDs are renamed to remove the slot number.
This assumes all MSIDs occur at the same times (they do)
:param obs: astropy.table.Row
It must have the following columns: 'agasc_id', 'mp_starcat_time', 'mag', 'slot'
:return: dict
"""
star_obs_catalogs.load()
dwell = star_obs_catalogs.DWELLS_NP[star_obs_catalogs.DWELLS_MAP[obs['mp_starcat_time']]]
star = get_star(obs['agasc_id'], date=dwell['tstart'], use_supplement=False)
start = dwell['tstart']
stop = dwell['tstop']
slot = obs['slot']
logger.debug(f' Getting telemetry for AGASC ID={obs["agasc_id"]}, OBSID={obs["obsid"]}, '
f'mp_starcat_time={obs["mp_starcat_time"]}')
# first we get slot data from mica and magnitudes from cheta and match them in time
# to match them in time, we assume they come in steps of 1.025 seconds, starting from the first
# time sample.
slot_data_cols = ['TIME', 'END_INTEG_TIME', 'IMGSIZE',
'IMGROW0', 'IMGCOL0', 'TEMPCCD', 'IMGRAW']
slot_data = aca_l0.get_slot_data(start, stop, slot=obs['slot'],
centered_8x8=True, columns=slot_data_cols)
names = ['AOACASEQ', 'AOPCADMD', 'CVCMJCTR', 'CVCMNCTR',
f'AOACIIR{slot}', f'AOACISP{slot}', f'AOACMAG{slot}', f'AOACFCT{slot}',
f'AOACZAN{slot}', f'AOACYAN{slot}'] + [f'AOATTQT{i}' for i in range(1, 5)]
msids = fetch.Msidset(names, start, stop)
if len(slot_data) == 0:
raise MagStatsException('No level 0 data',
agasc_id=obs["agasc_id"],
obsid=obs["obsid"],
mp_starcat_time=obs["mp_starcat_time"],
time_range=[start, stop],
slot=obs['slot'])
times = msids[f'AOACMAG{slot}'].times
tmin = np.min([np.min(slot_data['END_INTEG_TIME']), np.min(times)])
t1 = np.round((times - tmin) / 1.025)
t2 = np.round((slot_data['END_INTEG_TIME'].data - tmin) / 1.025)
_, i1, i2 = np.intersect1d(t1, t2, return_indices=True)
times = times[i1]
slot_data = slot_data[i2]
if len(times) == 0:
# the intersection was null.
raise MagStatsException('Either no telemetry or no matching times between cheta and level0',
agasc_id=obs["agasc_id"],
obsid=obs["obsid"],
mp_starcat_time=obs["mp_starcat_time"])
# Now that we have the times, we get the rest of the MSIDs
telem = {
'times': times
}
telem.update({k: slot_data[k] for k in slot_data_cols[2:]})
telem.update({
name: msids[name].vals[np.in1d(msids[name].times, times)]
for name in names
})
# get the normal sun and safe sun mode intervals, which will be removed
excluded_ranges = []
for event in [events.normal_suns, events.safe_suns]:
excluded_ranges += event.intervals(times[0] - 4, times[-1] + 4)
excluded_ranges = [(CxoTime(t[0]).cxcsec, CxoTime(t[1]).cxcsec) for t in excluded_ranges]
if excluded_ranges:
excluded = np.zeros_like(times, dtype=bool)
for excluded_range in excluded_ranges:
excluded |= ((times >= excluded_range[0]) & (times <= excluded_range[1]))
telem.update({k: telem[k][~excluded] for k in telem})
slot_data = slot_data[~excluded]
if len(slot_data) == 0:
# the intersection was null.
raise MagStatsException('Nothing left after removing excluded ranges',
agasc_id=obs["agasc_id"],
obsid=obs["obsid"],
mp_starcat_time=obs["mp_starcat_time"])
for name in ['AOACIIR', 'AOACISP', 'AOACYAN', 'AOACZAN', 'AOACMAG', 'AOACFCT']:
telem[name] = telem[f'{name}{slot}']
del telem[f'{name}{slot}']
for name in ['AOACIIR', 'AOACISP']:
telem[name] = np.char.rstrip(telem[name])
ok = (telem['AOACASEQ'] == 'KALM') & (telem['AOACIIR'] == 'OK') & \
(telem['AOPCADMD'] == 'NPNT') & (telem['AOACFCT'] == 'TRAK')
# etc...
logger.debug(' Adding magnitude estimates')
telem.update(get_mag_from_img(slot_data, start, ok))
logger.debug(' Adding star position')
telem.update(get_star_position(star=star, telem=telem))
logger.debug(' Correcting for droop')
droop_shift = get_droop_systematic_shift(star['MAG_ACA'])
logger.debug(' Correcting for responsivity')
responsivity = get_responsivity(start)
telem['mags'] = telem['mags_img'] - responsivity - droop_shift
telem['mags'][~ok] = 0.
telem['ok'] = ok
telem['dy'] = np.ones(len(ok)) * np.inf
telem['dz'] = np.ones(len(ok)) * np.inf
telem['dr'] = np.ones(len(ok)) * np.inf
yang = telem['yang_img'] - telem['yang_star']
zang = telem['zang_img'] - telem['zang_star']
rang = np.sqrt(yang**2 + zang**2)
if np.any(ok & (rang < 10)):
y25, y50, y75 = np.quantile(yang[ok & (rang < 10)], [0.25, 0.5, 0.75])
z25, z50, z75 = np.quantile(zang[ok & (rang < 10)], [0.25, 0.5, 0.75])
centroid_outlier = ((yang > y75 + 3 * (y75 - y25))
| (yang < y25 - 3 * (y75 - y25))
| (zang > z75 + 3 * (z75 - z25))
| (zang < z25 - 3 * (z75 - z25)))
telem['dy'] = yang - np.mean(yang[ok & ~centroid_outlier])
telem['dz'] = zang - np.mean(zang[ok & ~centroid_outlier])
telem['dr'] = (telem['dy'] ** 2 + telem['dz'] ** 2) ** .5
return telem
def get_telemetry_by_agasc_id(agasc_id, obsid=None, ignore_exceptions=False):
"""
Get all telemetry relevant for the magnitude estimation, given an AGASC ID.
This gets all observations of a given star, it gets the telemetry for each, and stacks them.
:param agasc_id: int
:param obsid: int (optional)
:param ignore_exceptions: bool
if True, any exception is ignored. Useful in some cases. Default is False.
:return: dict
"""
logger.debug(f' Getting telemetry for AGASC ID={agasc_id}')
star_obs_catalogs.load()
if obsid is None:
obs = star_obs_catalogs.STARS_OBS[
(star_obs_catalogs.STARS_OBS['agasc_id'] == agasc_id)]
else:
obs = star_obs_catalogs.STARS_OBS[(star_obs_catalogs.STARS_OBS['agasc_id'] == agasc_id)
& (star_obs_catalogs.STARS_OBS['obsid'] == obsid)]
if len(obs) > 1:
obs = obs.loc['mp_starcat_time', sorted(obs['mp_starcat_time'])]
telem = []
for i, o in enumerate(obs):
try:
t = Table(get_telemetry(o))
t['obsid'] = o['obsid']
t['agasc_id'] = agasc_id
telem.append(t)
except Exception:
if not ignore_exceptions:
logger.info(f'{agasc_id=}, obsid={o["obsid"]} failed')
exc_type, exc_value, exc_traceback = sys.exc_info()
trace = traceback.extract_tb(exc_traceback)
logger.info(f'{exc_type.__name__} {exc_value}')
for step in trace:
logger.info(f' in {step.filename}:{step.lineno}/{step.name}:')
logger.info(f' {step.line}')
raise
return vstack(telem)
def add_obs_info(telem, obs_stats):
"""
Add observation-specific information to a telemetry table (ok flag, and outlier flag).
This is done as part of get_agasc_id_stats. It is a convenience for writing reports.
:param telem: list of tables
One or more telemetry tables (potentially many observations)
:param obs_stats: table
The result of calc_obs_stats.
:return:
"""
logger.debug(' Adding observation info to telemetry...')
obs_stats['obs_ok'] = (
(obs_stats['n'] > 10)
& (obs_stats['f_track'] > 0.3)
& (obs_stats['lf_variability_100s'] < 1)
)
obs_stats['comments'] = np.zeros(len(obs_stats), dtype='<U80')
telem = vstack(telem)
telem['obs_ok'] = True
telem['obs_outlier'] = False
for s in obs_stats:
obsid = s['obsid']
o = (telem['obsid'] == obsid)
telem['obs_ok'][o] = np.ones(np.sum(o), dtype=bool) * s['obs_ok']
if (np.any(telem['ok'][o]) and s['f_track'] > 0
and np.isfinite(s['q75']) and np.isfinite(s['q25'])):
iqr = s['q75'] - s['q25']
telem['obs_outlier'][o] = (
telem[o]['ok'] & (iqr > 0)
& ((telem[o]['mags'] < s['q25'] - 1.5 * iqr)
| (telem[o]['mags'] > s['q75'] + 1.5 * iqr))
)
logger.debug(f' Adding observation info to telemetry {obsid=}')
return telem
@numba.jit(nopython=True)
def staggered_aca_slice(array_in, array_out, row, col):
for i in np.arange(len(row)):
if row[i] + 8 < 1024 and col[i] + 8 < 1024:
array_out[i] = array_in[row[i]:row[i] + 8, col[i]:col[i] + 8]
def get_mag_from_img(slot_data, t_start, ok=True):
"""
Vectorized estimate of the magnitude from mica archive image telemetry data.
:param slot_data: astropy.Table.
The data returned by mica.archive.aca_l0.get_slot_data
:param t_start:
The starting time of the observation (by convention, the starcat time)
:param ok: np.array.
An boolean array with the same length as slot_data.
Only magnitudes for entries with ok=True are calculated. The rest are set to MAX_MAG.
:return:
"""
logger.debug(' magnitude from images...')
dark_cal = get_dark_cal_image(t_start, 'nearest',
t_ccd_ref=np.mean(slot_data['TEMPCCD'] - 273.16),
aca_image=False)
# all images will be 8x8, with a centered mask, imgrow will always be the one of the 8x8 corner.
imgrow_8x8 = np.where(slot_data['IMGSIZE'] == 8,
slot_data['IMGROW0'],
slot_data['IMGROW0'] - 1
)
imgcol_8x8 = np.where(slot_data['IMGSIZE'] == 8,
slot_data['IMGCOL0'],
slot_data['IMGCOL0'] - 1
)
# subtract closest dark cal
dark = np.zeros([len(slot_data), 8, 8], dtype=np.float64)
staggered_aca_slice(dark_cal.astype(float), dark, 512 + imgrow_8x8, 512 + imgcol_8x8)
img_sub = slot_data['IMGRAW'] - dark * 1.696 / 5
img_sub.mask |= MASK['mouse_bit']
# calculate magnitude
mag = np.ones(len(slot_data)) * MAX_MAG
counts = np.ma.sum(np.ma.sum(img_sub, axis=1), axis=1)
m = ok & np.isfinite(counts) & (counts > 0)
mag[m] = count_rate_to_mag(counts[m] * 5 / 1.7)
mag[mag > MAX_MAG] = MAX_MAG
# this extra step is to investigate the background scale
dark = np.ma.array(dark * 1.696 / 5, mask=img_sub.mask)
img_raw = np.ma.array(slot_data['IMGRAW'], mask=img_sub.mask)
dark_count = np.ma.sum(np.ma.sum(dark, axis=1), axis=1)
img_count = np.ma.sum(np.ma.sum(img_raw, axis=1), axis=1)
# centroids
yag = np.zeros(len(slot_data))
zag = np.zeros(len(slot_data))
pixel_center = np.arange(8) + 0.5
projected_image = np.ma.sum(slot_data['IMGRAW'], axis=1)
col = np.ma.sum(pixel_center * projected_image, axis=1) / np.ma.sum(projected_image, axis=1)
projected_image = np.ma.sum(slot_data['IMGRAW'], axis=2)
row = np.ma.sum(pixel_center * projected_image, axis=1) / np.ma.sum(projected_image, axis=1)
y_pixel = row + imgrow_8x8
z_pixel = col + imgcol_8x8
yag[m], zag[m] = pixels_to_yagzag(y_pixel[m], z_pixel[m])
logger.debug(f' magnitude from images... {len(mag)} samples: {mag[0]:.2f}...')
return {
'mags_img': mag,
'yang_img': yag,
'zang_img': zag,
'counts_img': img_count,
'counts_dark': dark_count
}
OBS_STATS_INFO = {
'agasc_id': 'AGASC ID of the star',
'obsid': 'OBSID corresponding to the dwell when the observation is made',
'slot': 'Slot number',
'type': 'GUI/ACQ/BOT',
'mp_starcat_time':
'Timestamp (from kadi.commands) for starcat command '
'preceding the dwell of an observation.',
'timeline_id': 'starcat command timeline_id from kadi.commands.get_cmds',
'tstart': 'Dwell start time from kadi.events.manvrs',
'tstop': 'Dwell end time from kadi.events.manvrs',
'mag_correction': 'Overall correction applied to the magnitude estimate',
'responsivity': 'Responsivity correction applied to the magnitude estimate',
'droop_shift': 'Droop shift correction applied to the magnitude estimate',
'mag_aca': 'ACA star magnitude from the AGASC catalog',
'mag_aca_err': 'ACA star magnitude uncertainty from the AGASC catalog',
'row':
'Expected row number, based on star location and yanf/zang from mica.archive.starcheck DB',
'col':
'Expected col number, based on star location and yanf/zang from mica.archive.starcheck DB',
'mag_img': 'Magnitude estimate from image telemetry (uncorrected)',
'mag_obs': 'Estimated ACA star magnitude',
'mag_obs_err': 'Estimated ACA star magnitude uncertainty',
'aoacmag_mean': 'Mean of AOACMAG from telemetry',
'aoacmag_err': 'Standard deviation of AOACMAG from telemetry',
'aoacmag_q25': '1st quartile of AOACMAG from telemetry',
'aoacmag_median': 'Median of AOACMAG from telemetry',
'aoacmag_q75': '3rd quartile of AOACMAG from telemetry',
'counts_img': 'Raw counts from image telemetry, summed over the mouse-bit window',
'counts_dark': 'Expected counts from background, summed over the mouse-bit window',
'f_kalman':
'Fraction of all samples where AOACASEQ == "KALM" and AOPCADMD == "NPNT" (n_kalman/n)',
'f_track':
'Fraction of kalman samples with AOACIIR == "OK" and AOACFCT == "TRAK" (n_track/n_kalman)',
'f_dr5': 'Fraction of "track" samples with angle residual less than 5 arcsec (n_dr5/n_track)',
'f_dr3': 'Fraction of "track" samples with angle residual less than 3 arcsec (n_dr3/n_track)',
'f_ok': 'Fraction of all samples with (kalman & track & dr5) == True (n_ok/n)',
'q25': '1st quartile of estimated magnitude',
'median': 'Median of estimated magnitude',
'q75': '1st quartile of estimated magnitude',
'mean': 'Mean of estimated magnitude',
'mean_err': 'Uncrtainty in the mean of estimated magnitude',
'std': 'Standard deviation of estimated magnitude',
'skew': 'Skewness of estimated magnitude',
'kurt': 'Kurtosis of estimated magnitude',
't_mean': 'Mean of estimated magnitude after removing outliers',
't_mean_err': 'Uncertainty in the mean of estimated magnitude after removing outliers',
't_std': 'Standard deviation of estimated magnitude after removing outliers',
't_skew': 'Skewness of estimated magnitude after removing outliers',
't_kurt': 'Kurtosis of estimated magnitude after removing outliers',
'n': 'Number of samples',
'n_ok': 'Number of samples with (kalman & track & dr5) == True',
'outliers': 'Number of outliers (+- 3 IQR)',
'lf_variability_100s': 'Rolling mean of OK magnitudes with a 100 second window',
'lf_variability_500s': 'Rolling mean of OK magnitudes with a 500 second window',
'lf_variability_1000s': 'Rolling mean of OK magnitudes with a 1000 second window',
'tempccd': 'CCD temperature',
'dr_star': 'Angle residual',
'obs_ok': 'Boolean flag: everything OK with this observation',
'obs_suspect': 'Boolean flag: this observation is "suspect"',
'obs_fail': 'Boolean flag: a processing error when estimating magnitude for this observation',
'comments': '',
'w': 'Weight to be used on a weighted mean (1/std)',
'mean_corrected': 'Corrected mean used in weighted mean (t_mean + mag_correction)',
'weighted_mean': 'Mean weighted by inverse of standard deviation (mean/std)',
}
def get_obs_stats(obs, telem=None):
"""
Get summary magnitude statistics for an observation.
:param obs: astropy.table.Row
a "star observation" row. From the join of starcheck catalog and starcat commands
It must have the following columns: 'agasc_id', 'mp_starcat_time', 'mag', 'slot'
:param telem: dict
Dictionary with telemetry (output of get_telemetry)
:return: dict
dictionary with stats
"""
logger.debug(f' Getting OBS stats for AGASC ID {obs["agasc_id"]},'
f' OBSID {obs["agasc_id"]} at {obs["mp_starcat_time"]}')
star_obs_catalogs.load()
star = get_star(obs['agasc_id'], use_supplement=False)
dwell = star_obs_catalogs.DWELLS_NP[star_obs_catalogs.DWELLS_MAP[obs['mp_starcat_time']]]
start = dwell['tstart']
stop = dwell['tstop']
stats = {k: obs[k] for k in
['agasc_id', 'obsid', 'slot', 'type', 'mp_starcat_time', 'timeline_id']}
stats['mp_starcat_time'] = stats['mp_starcat_time']
droop_shift = get_droop_systematic_shift(star['MAG_ACA'])
responsivity = get_responsivity(start)
stats.update({'tstart': start,
'tstop': stop,
'mag_correction': - responsivity - droop_shift,
'responsivity': responsivity,
'droop_shift': droop_shift,
'mag_aca': star['MAG_ACA'],
'mag_aca_err': star['MAG_ACA_ERR'] / 100,
'row': obs['row'],
'col': obs['col'],
})
# other default values
stats.update({
'mag_img': np.inf,
'mag_obs': np.inf,
'mag_obs_err': np.inf,
'aoacmag_mean': np.inf,
'aoacmag_err': np.inf,
'aoacmag_q25': np.inf,
'aoacmag_median': np.inf,
'aoacmag_q75': np.inf,
'counts_img': np.inf,
'counts_dark': np.inf,
'f_kalman': 0.,
'f_track': 0.,
'f_dr5': 0.,
'f_dr3': 0.,
'f_ok': 0.,
'q25': np.inf,
'median': np.inf,
'q75': np.inf,
'mean': np.inf,
'mean_err': np.inf,
'std': np.inf,
'skew': np.inf,
'kurt': np.inf,
't_mean': np.inf,
't_mean_err': np.inf,
't_std': np.inf,
't_skew': np.inf,
't_kurt': np.inf,
'n': 0,
'n_ok': 0,
'outliers': -1,
'lf_variability_100s': np.inf,
'lf_variability_500s': np.inf,
'lf_variability_1000s': np.inf,
'tempccd': np.nan,
'dr_star': np.inf,
})
if telem is None:
telem = get_telemetry(obs)
if len(telem) > 0:
stats.update(calc_obs_stats(telem))
logger.debug(f' slot={stats["slot"]}, f_ok={stats["f_ok"]:.3f}, '
f'f_track={stats["f_track"]:.3f}, f_dr3={stats["f_dr3"]:.3f},'
f' mag={stats["mag_obs"]:.2f}')
return stats
def calc_obs_stats(telem):
"""
Get summary magnitude statistics for an observation.
:param telem: dict
Dictionary with telemetry (output of get_telemetry)
:return: dict
dictionary with stats
"""
times = telem['times']
kalman = (telem['AOACASEQ'] == 'KALM') & (telem['AOPCADMD'] == 'NPNT')
track = (telem['AOACIIR'] == 'OK') & (telem['AOACFCT'] == 'TRAK')
dr3 = (telem['dr'] < 3)
dr5 = (telem['dr'] < 5)
f_kalman = np.sum(kalman) / len(kalman)
n_kalman = np.sum(kalman)
f_track = np.sum(kalman & track) / n_kalman if n_kalman else 0
n_track = np.sum(kalman & track)
f_3 = ( | np.sum(kalman & track & dr3) | numpy.sum |
from abc import ABC, abstractmethod
from sklearn import metrics
import numpy as np
from nnrf.utils import one_hot, check_XY
from nnrf.utils._base import Base
def get_metrics(name):
"""
Lookup table of default metrics.
Parameters
----------
name : Metric, None, str
Metric to look up. Must be one of:
- 'accuracy' : Accuracy.
- 'precision' : Precision.
- 'recall' : Recall.
- 'f-score' : F1-Score.
- 'roc-auc' : ROC-AUC.
- Metric : A custom implementation.
- None : Return None.
Custom Metrics must implement `score` which
by default should return a single float value.
Returns
-------
metric : Metric or None
The metric.
"""
if name == 'accuracy' : return Accuracy()
elif name == 'precision' : return Precision()
elif name == 'recall' : return Recall()
elif name == 'f-score' : return FScore()
elif name == 'roc-auc' : return ROCAUC()
elif isinstance(name, (type(None), Metric)) : return name
else : raise ValueError("Invalid metric function")
class Metric(Base, ABC):
"""
Base Metric class.
"""
def __init__(self):
super().__init__()
self.name = "metric"
@abstractmethod
def calculate(self, Y_hat, Y, average='micro', weights=None):
"""
Calculate metric of given labels, `Y_hat`,
compared to ground truth, `Y`. By default, gives
overall metric, or micro-average.
Parameters
----------
Y_hat : array-like, shape=(n_samples,)
Prediction labels.
Y : array-like, shape=(n_samples,)
Ground truth labels.
average : {'micro', 'macro', 'weighted', None}, default='micro'
Method to average metric. Must be one of:
- 'micro' : Overall metric.
- 'macro' : Unweighted mean metric across classes.
- 'weighted' : Weighted mean metric across classes.
- None : Metrics for each class.
weights : array-like, shape=(n_samples,), default=None
Sample weights.
Returns
-------
metric : dict, ndarray, float
The metric conducted with the given method.
Returns a dictionary if `average` is None,
an ndarray if `average` is `macro` or `weighted`,
or a float if `average` is `micro` or if there are
no samples.
"""
raise NotImplementedError("No calculate function implemented")
def score(self, Y_hat, Y, weights=None):
"""
Calculate overall score of given labels, `Y_hat`,
compared to ground truth, `Y`.
Parameters
----------
Y_hat : array-like, shape=(n_samples,)
Prediction labels.
Y : array-like, shape=(n_samples,)
Ground truth labels.
weights : array-like, shape=(n_samples,), default=None
Sample weights.
Returns
-------
score : float, range=[0,1]
The score.
"""
return self.calculate(Y_hat, Y, weights=weights)
class Accuracy(Metric):
"""
Accuracy Metric.
"""
def __init__(self):
super().__init__()
self.name = "accuracy"
def calculate(self, Y_hat, Y, weights=None):
Y_hat, Y = check_XY(X=Y_hat, Y=Y)
Y_hat, Y = np.argmax(Y_hat, axis=1), np.argmax(Y, axis=1)
return metrics.accuracy_score(Y, Y_hat, sample_weight=weights)
class Precision(Metric):
"""
Precision Metric.
"""
def __init__(self):
super().__init__()
self.name = "precision"
def calculate(self, Y_hat, Y, average='micro', weights=None):
Y_hat, Y = check_XY(X=Y_hat, Y=Y)
Y_hat, Y = np.argmax(Y_hat, axis=1), np.argmax(Y, axis=1)
return metrics.precision_score(Y, Y_hat, average=average,
sample_weight=weights)
class Recall(Metric):
"""
Recall Metric.
"""
def __init__(self):
super().__init__()
self.name = "recall"
def calculate(self, Y_hat, Y, average='micro', weights=None):
Y_hat, Y = check_XY(X=Y_hat, Y=Y)
Y_hat, Y = np.argmax(Y_hat, axis=1), np.argmax(Y, axis=1)
return metrics.recall_score(Y, Y_hat, average=average,
sample_weight=weights)
class FScore(Metric):
"""
F-Score Metric.
Parameters
----------
beta : float, default=1
Weight of recall in F-score.
"""
def __init__(self, beta=1):
super().__init__()
self.name = "f-score"
self.beta = beta
def calculate(self, Y_hat, Y, average='micro', weights=None):
Y_hat, Y = check_XY(X=Y_hat, Y=Y)
Y_hat, Y = np.argmax(Y_hat, axis=1), np.argmax(Y, axis=1)
return metrics.fbeta_score(Y, Y_hat, self.beta, average=average,
sample_weight=weights)
class ROCAUC(Metric):
"""
Area under the Receiver Operative Curve (ROC AUC) Metric.
"""
def __init__(self, multi_class='ovr'):
super().__init__()
self.name = 'roc-auc'
self.multi_class = multi_class
def calculate(self, Y_hat, Y, average='macro', weights=None):
Y_hat, Y = check_XY(X=Y_hat, Y=Y)
Y_hat, Y = np.argmax(Y_hat, axis=1), | np.argmax(Y, axis=1) | numpy.argmax |
"""
nuplandb models, schema version: 3.0, code generated by schema_gen.py.
DO NOT MODIFY THIS FILE UNLESS YOU KNOW WHAT YOU ARE DOING!
"""
from __future__ import annotations # postpone evaluation of annotations
import bisect
import logging
import os.path as osp
from typing import Any, BinaryIO, Dict, List, NamedTuple, Optional, Sequence, Set, Tuple
import cv2
import geopandas as gpd
import matplotlib.pyplot as plt
import numpy as np
import numpy.typing as npt
import PIL
from cachetools import LRUCache, cached
from cachetools.keys import hashkey
from matplotlib.axes import Axes
from nuplan.common.actor_state.agent import Agent, AgentType
from nuplan.common.actor_state.oriented_box import OrientedBox
from nuplan.common.actor_state.state_representation import StateSE2, StateVector2D
from nuplan.database.utils.label.label import Label
from nuplan.database.common import data_types, sql_types
from nuplan.database.common.db import Table
from nuplan.database.common.utils import default_color, default_color_np, simple_repr
from nuplan.database.maps_db.layer import MapLayer
from nuplan.database.maps_db.utils import build_lane_segments_from_blps, connect_blp_predecessor, connect_blp_successor
from nuplan.database.nuplan_db.frame import Frame
from nuplan.database.nuplan_db.utils import crop_rect, get_boxes, get_candidates, get_future_box_sequence, \
pack_future_boxes, render_on_map
from nuplan.database.utils.boxes.box3d import Box3D, BoxVisibility, box_in_image
from nuplan.database.utils.geometry import quaternion_yaw, view_points
from nuplan.database.utils.label.utils import local2agent_type, raw_mapping
from nuplan.database.utils.pointclouds.lidar import LidarPointCloud
from pyquaternion import Quaternion
from scipy import ndimage
from scipy.spatial.transform import Rotation as R
from sqlalchemy import Column, func, inspect
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from sqlalchemy.schema import ForeignKey
from sqlalchemy.types import Boolean, Float, Integer, PickleType, String, Text
__all__ = ['Category', 'Log', 'Camera', 'Lidar', 'EgoPose', 'Image', 'LidarPc', 'Track', 'LidarBox',
'Scene', 'ScenarioTag', 'TrafficLightStatus']
Base = declarative_base()
MICROSECONDS_IN_A_SECOND = 1000000
LRU_CACHE_SIZE = 20480
logger = logging.getLogger()
class Category(Base): # type: ignore
"""
A category within our taxonomy. Includes both things (e.g. cars) or stuff (e.g. lanes, sidewalks).
Subcategories are delineated by a period.
"""
__tablename__ = "category"
token = Column(sql_types.UUID, primary_key=True) # type: str
name = Column(String(64)) # type: str
description = Column(Text) # type: str
tracks = relationship("Track", foreign_keys="Track.category_token",
back_populates="category") # type: List[Track]
@property
def table(self) -> Table[Category]:
"""
Get the category table.
self._table is injected at runtime:
db = self._table.db # get db instance
session = self._table.db.session # get session instance
:return: The category table.
"""
return self._table # type: ignore
def __repr__(self) -> str:
"""
Return the string representation.
:return: The string representation.
"""
desc: str = simple_repr(self)
return desc
@property
def color(self) -> Tuple[int, int, int]:
"""
Get category color.
:return: The category color tuple.
"""
c: Tuple[int, int, int] = default_color(self.name)
return c
@property
def color_np(self) -> npt.NDArray[np.float64]:
"""
Get category color in numpy.
:return: The category color in numpy.
"""
c: npt.NDArray[np.float64] = default_color_np(self.name)
return c
class Log(Base): # type: ignore
"""
Information about the log from which the data was extracted.
"""
__tablename__ = "log"
token = Column(sql_types.UUID, primary_key=True) # type: str
vehicle_name = Column(String(64)) # type: str
vehicle_type = Column(String(64)) # type: str
date = Column(String(64)) # type: str
timestamp = Column(Integer) # type: int
logfile = Column(String(64)) # type: str
location = Column(String(64)) # type: str
map_version = Column(String(64)) # type: str
cameras = relationship("Camera", foreign_keys="Camera.log_token", back_populates="log") # type: List[Camera]
ego_poses = relationship("EgoPose", foreign_keys="EgoPose.log_token", back_populates="log") # type: List[EgoPose]
lidars = relationship("Lidar", foreign_keys="Lidar.log_token", back_populates="log") # type: List[Lidar]
scenes = relationship("Scene", foreign_keys="Scene.log_token", back_populates="log") # type: List[Scene]
def map_layer(self, layer: str) -> MapLayer:
"""
Get map layer by name.
:param layer: The name of the map layer.
:return: Map layer.
"""
return self.table.db.maps_db.load_layer(self.map_version, layer) # type: ignore
def list_map_layers(self) -> None:
""" List the name of all map layers. """
logger.info(self.table.db.maps_db.layer_names(self.map_version)) # type: ignore
def map_vector_layer(self, layer: str) -> gpd.geodataframe:
"""
Get vector map layer by name.
:param layer: The name of the vector map layer.
:return: Vector map layer.
"""
# TODO: Remove temporary workaround once map_version is cleaned
map_version = self.map_version.replace('.gpkg', '')
return self.table.db.maps_db.load_vector_layer(map_version, layer) # type: ignore
def list_map_vector_layers(self) -> Sequence[str]:
"""
Get the name of all vector map layers.
:return: The name of all vector map layers.
"""
return self.table.db.maps_db.vector_layer_names(self.map_version) # type: ignore
@property
def table(self) -> Table[Log]:
"""
Get the log table.
self._table is injected at runtime:
db = self._table.db # get db instance
session = self._table.db.session # get session instance
:return: The log table.
"""
return self._table # type: ignore
@property
def images(self) -> List[Image]:
"""
Returns list of Images contained in the Log.
:return: The list of Images contained in the log.
"""
log_images = []
for camera in self.cameras:
log_images.extend(camera.images)
return log_images
@property
def lidar_pcs(self) -> List[LidarPc]:
"""
Returns list of Lidar PCs in the Log.
:return: The list of Lidar PCs in the log.
"""
log_lidar_pcs = []
for lidar in self.lidars:
log_lidar_pcs.extend(lidar.lidar_pcs)
return log_lidar_pcs
@property
def lidar_boxes(self) -> List[LidarBox]:
"""
Returns list of Lidar Boxes in the Log.
:return: The list of Lidar Boxes in the log.
"""
log_lidar_boxes = []
for lidar_pc in self.lidar_pcs:
log_lidar_boxes.extend(lidar_pc.lidar_boxes)
return log_lidar_boxes
def __repr__(self) -> str:
"""
Return the string representation.
:return: The string representation.
"""
desc: str = simple_repr(self)
return desc
class Camera(Base): # type: ignore
"""
Defines a calibrated camera used to record a particular log.
"""
__tablename__ = "camera"
token = Column(sql_types.UUID, primary_key=True) # type: str
log_token = Column(sql_types.UUID, ForeignKey("log.token"), nullable=False) # type: str
channel = Column(String(64)) # type: str
model = Column(String(64)) # type: str
translation = Column(sql_types.SqlTranslation) # type: data_types.Translation
rotation = Column(sql_types.SqlRotation) # type: data_types.Rotation
intrinsic = Column(sql_types.SqlCameraIntrinsic) # type: data_types.CameraIntrinsic
distortion = Column(PickleType) # type: list[float]
width = Column(Integer) # type: int
height = Column(Integer) # type: int
log = relationship("Log", foreign_keys=[log_token], back_populates="cameras") # type: Log
images = relationship("Image", foreign_keys="Image.camera_token", back_populates="camera") # type: List[Image]
@property
def table(self) -> Table[Camera]:
"""
Get the camera table.
self._table is injected at runtime:
db = self._table.db # get db instance
session = self._table.db.session # get session instance
:return: The camera table.
"""
return self._table # type: ignore
def __repr__(self) -> str:
"""
Return the string representation.
:return : The string representation.
"""
desc: str = simple_repr(self)
return desc
@property
def intrinsic_np(self) -> npt.NDArray[np.float64]:
"""
Get the intrinsic in numpy format.
:return: <np.float: 3, 3> Camera intrinsic.
"""
return np.array(self.intrinsic)
@property
def distortion_np(self) -> npt.NDArray[np.float64]:
"""
Get the distortion in numpy format.
:return: <np.float: N> Camera distrotion.
"""
return np.array(self.distortion)
@property
def translation_np(self) -> npt.NDArray[np.float64]:
"""
Get the translation in numpy format.
:return: <np.float: 3> Translation.
"""
return np.array(self.translation)
@property
def quaternion(self) -> Quaternion:
"""
Get the rotation in quaternion.
:return: Rotation in quaternion.
"""
return Quaternion(self.rotation)
@property
def trans_matrix(self) -> npt.NDArray[np.float64]:
"""
Get the transformation matrix.
:return: <np.float: 4, 4>. Transformation matrix.
"""
tm: npt.NDArray[np.float64] = self.quaternion.transformation_matrix
tm[:3, 3] = self.translation_np
return tm
@property
def trans_matrix_inv(self) -> npt.NDArray[np.float64]:
"""
Get the inverse transformation matrix.
:return: <np.float: 4, 4>. Inverse transformation matrix.
"""
tm: npt.NDArray[np.float64] = np.eye(4)
rot_inv = self.quaternion.rotation_matrix.T
tm[:3, :3] = rot_inv
tm[:3, 3] = rot_inv.dot(np.transpose(-self.translation_np))
return tm
class Lidar(Base): # type: ignore
"""
Defines a calibrated lidar used to record a particular log.
"""
__tablename__ = "lidar"
token = Column(sql_types.UUID, primary_key=True) # type: str
log_token = Column(sql_types.UUID, ForeignKey("log.token"), nullable=False) # type: str
channel = Column(String(64)) # type: str
model = Column(String(64)) # type: str
translation = Column(sql_types.SqlTranslation) # type: data_types.Translation
rotation = Column(sql_types.SqlRotation) # type: data_types.Rotation
max_nbr_points = Column(Integer) # type: int
log = relationship("Log", foreign_keys=[log_token], back_populates="lidars") # type: Log
lidar_pcs = relationship("LidarPc", foreign_keys="LidarPc.lidar_token",
back_populates="lidar") # type: List[LidarPc]
@property
def table(self) -> Table[Lidar]:
"""
Get the lidar table.
self._table is injected at runtime:
db = self._table.db # get db instance
session = self._table.db.session # get session instance
:return: The lidar table.
"""
return self._table # type: ignore
def __repr__(self) -> str:
"""
Return the string representation.
:return: The string representation.
"""
desc: str = simple_repr(self)
return desc
@property
def translation_np(self) -> npt.NDArray[np.float64]:
"""
Get the translation in numpy format.
:return: <np.float: 3> Translation.
"""
return np.array(self.translation)
@property
def quaternion(self) -> Quaternion:
"""
Get the rotation in quaternion.
:return: The rotation in quaternion.
"""
return Quaternion(self.rotation)
@property
def trans_matrix(self) -> npt.NDArray[np.float64]:
"""
Get the transformation matrix.
:return: <np.float: 4, 4>. Transformation matrix.
"""
tm: npt.NDArray[np.float64] = self.quaternion.transformation_matrix
tm[:3, 3] = self.translation_np
return tm
@property
def trans_matrix_inv(self) -> npt.NDArray[np.float64]:
"""
Get the inverse transformation matrix.
:return: <np.float: 4, 4>. Inverse transformation matrix.
"""
tm: npt.NDArray[np.float64] = | np.eye(4) | numpy.eye |
# Licensed under an MIT open source license - see LICENSE
from __future__ import print_function, absolute_import, division
import numpy as np
import statsmodels.api as sm
import warnings
import astropy.units as u
from numpy.fft import fftshift
from .lm_seg import Lm_Seg
from .psds import pspec, make_radial_freq_arrays
from .fitting_utils import clip_func, residual_bootstrap
from .elliptical_powerlaw import (fit_elliptical_powerlaw,
inverse_interval_transform,
inverse_interval_transform_stderr)
from .apodizing_kernels import *
from .rfft_to_fft import rfft_to_fft
class StatisticBase_PSpec2D(object):
"""
Common features shared by 2D power spectrum methods.
"""
@property
def ps2D(self):
'''
Two-dimensional power spectrum.
'''
return self._ps2D[::-1]
@property
def ps1D(self):
'''
One-dimensional power spectrum.
'''
return self._ps1D
@property
def ps1D_stddev(self):
'''
1-sigma standard deviation of the 1D power spectrum.
'''
return self._ps1D_stddev
@property
def freqs(self):
'''
Corresponding spatial frequencies of the 1D power spectrum.
'''
return self._freqs
@property
def wavenumbers(self):
return self._freqs * min(self._ps2D.shape)
def compute_beam_pspec(self):
'''
Compute the power spectrum of the beam element.
'''
if not hasattr(self, '_beam'):
raise AttributeError("Beam correction cannot be applied since"
" no beam object was given.")
beam_kern = self._beam.as_kernel(self._ang_size,
y_size=self._ps2D.shape[0],
x_size=self._ps2D.shape[1])
beam_fft = fftshift(rfft_to_fft(beam_kern.array))
self._beam_pow = np.abs(beam_fft**2)
# Avoid infs when dividing out by the beam power spectrum
self._beam_pow[self._beam_pow == 0.0] = np.NaN
def compute_radial_pspec(self, logspacing=False, max_bin=None, **kwargs):
'''
Computes the radially averaged power spectrum.
Parameters
----------
logspacing : bool, optional
Return logarithmically spaced bins for the lags.
max_bin : float, optional
Maximum spatial frequency to bin values at.
kwargs : passed to `~turbustat.statistics.psds.pspec`.
'''
# Check if azimuthal constraints are given
if kwargs.get("theta_0"):
azim_constraint_flag = True
else:
azim_constraint_flag = False
out = pspec(self.ps2D, return_stddev=True,
logspacing=logspacing, max_bin=max_bin, **kwargs)
self._azim_constraint_flag = azim_constraint_flag
if azim_constraint_flag:
self._freqs, self._ps1D, self._ps1D_stddev, self._azim_mask = out
else:
self._freqs, self._ps1D, self._ps1D_stddev = out
# Attach units to freqs
self._freqs = self.freqs / u.pix
def fit_pspec(self, brk=None, log_break=False, low_cut=None,
high_cut=None, min_fits_pts=10, weighted_fit=False,
bootstrap=False, bootstrap_kwargs={},
verbose=False):
'''
Fit the 1D Power spectrum using a segmented linear model. Note that
the current implementation allows for only 1 break point in the
model. If the break point is estimated via a spline, the breaks are
tested, starting from the largest, until the model finds a good fit.
Parameters
----------
brk : float or None, optional
Guesses for the break points. If given as a list, the length of
the list sets the number of break points to be fit. If a choice is
outside of the allowed range from the data, Lm_Seg will raise an
error. If None, a spline is used to estimate the breaks.
log_break : bool, optional
Sets whether the provided break estimates are log-ed (base 10)
values. This is disabled by default. When enabled, the brk must
be a unitless `~astropy.units.Quantity`
(`u.dimensionless_unscaled`).
low_cut : `~astropy.units.Quantity`, optional
Lowest frequency to consider in the fit.
high_cut : `~astropy.units.Quantity`, optional
Highest frequency to consider in the fit.
min_fits_pts : int, optional
Sets the minimum number of points needed to fit. If not met, the
break found is rejected.
weighted_fit : bool, optional
Fit using weighted least-squares. The weights are
the inverse-squared standard deviations in each radial bin.
bootstrap : bool, optional
Bootstrap using the model residuals to estimate the parameter
standard errors. This tends to give more realistic intervals than
the covariance matrix.
bootstrap_kwargs : dict, optional
Pass keyword arguments to `~turbustat.statistics.fitting_utils.residual_bootstrap`.
verbose : bool, optional
Enables verbose mode in Lm_Seg.
'''
self._bootstrap_flag = bootstrap
# Make the data to fit to
if low_cut is None:
# Default to the largest frequency, since this is just 1 pixel
# in the 2D PSpec.
self.low_cut = 1. / (0.5 * float(max(self.ps2D.shape)) * u.pix)
else:
self.low_cut = self._to_pixel_freq(low_cut)
if high_cut is None:
self.high_cut = self.freqs.max().value / u.pix
else:
self.high_cut = self._to_pixel_freq(high_cut)
x = np.log10(self.freqs[clip_func(self.freqs.value, self.low_cut.value,
self.high_cut.value)].value)
clipped_ps1D = self.ps1D[clip_func(self.freqs.value,
self.low_cut.value,
self.high_cut.value)]
y = np.log10(clipped_ps1D)
if weighted_fit:
clipped_stddev = self.ps1D_stddev[clip_func(self.freqs.value,
self.low_cut.value,
self.high_cut.value)]
clipped_stddev[clipped_stddev == 0.] = np.NaN
y_err = 0.434 * clipped_stddev / clipped_ps1D
if brk is not None:
# Try the fit with a break in it.
if not log_break:
brk = self._to_pixel_freq(brk).value
brk = np.log10(brk)
else:
# A value given in log shouldn't have dimensions
if hasattr(brk, "unit"):
assert brk.unit == u.dimensionless_unscaled
brk = brk.value
if weighted_fit:
weights = 1 / y_err**2
else:
weights = None
brk_fit = Lm_Seg(x, y, brk, weights=weights)
brk_fit.fit_model(verbose=verbose, cov_type='HC3')
if brk_fit.params.size == 5:
# Check to make sure this leaves enough to fit to.
if sum(x < brk_fit.brk) < min_fits_pts:
warnings.warn("Not enough points to fit to." +
" Ignoring break.")
self._brk = None
else:
good_pts = x.copy() < brk_fit.brk
x = x[good_pts]
y = y[good_pts]
self._brk = 10**brk_fit.brk / u.pix
self._slope = brk_fit.slopes
if bootstrap:
stderrs = residual_bootstrap(brk_fit.fit,
**bootstrap_kwargs)
self._slope_err = stderrs[1:-1]
self._brk_err = np.log(10) * self.brk.value * \
stderrs[-1] / u.pix
else:
self._slope_err = brk_fit.slope_errs
self._brk_err = np.log(10) * self.brk.value * \
brk_fit.brk_err / u.pix
self.fit = brk_fit.fit
self._model = brk_fit
else:
self._brk = None
# Break fit failed, revert to normal model
warnings.warn("Model with break failed, reverting to model\
without break.")
else:
self._brk = None
self._brk_err = None
if self.brk is None:
x = sm.add_constant(x)
if weighted_fit:
model = sm.WLS(y, x, missing='drop', weights=1 / y_err**2)
else:
model = sm.OLS(y, x, missing='drop')
self.fit = model.fit(cov_type='HC3')
self._slope = self.fit.params[1]
if bootstrap:
stderrs = residual_bootstrap(self.fit,
**bootstrap_kwargs)
self._slope_err = stderrs[1]
else:
self._slope_err = self.fit.bse[1]
@property
def slope(self):
'''
Power spectrum slope(s).
'''
return self._slope
@property
def slope_err(self):
'''
1-sigma error on the power spectrum slope(s).
'''
return self._slope_err
@property
def brk(self):
'''
Fitted break point.
'''
return self._brk
@property
def brk_err(self):
'''
1-sigma on the break point.
'''
return self._brk_err
def apodizing_kernel(self, kernel_type="tukey", alpha=0.1, beta=0.0):
'''
Return an apodizing kernel to be applied to the image before taking
Fourier transform
Returns
-------
window : `~numpy.ndarray`
Apodizing kernel
'''
if self.data is not None:
shape = self.data.shape
else:
# MVC doesn't have a data attribute set
shape = self.centroid.shape
# Assume first axis is velocity if >2 dimensions
if len(shape) > 2:
shape = shape[1:]
avail_types = ['splitcosinebell', 'hanning', 'tukey',
'cosinebell']
if kernel_type == "splitcosinebell":
return SplitCosineBellWindow(alpha, beta)(shape)
elif kernel_type == "hanning":
return HanningWindow()(shape)
elif kernel_type == "tukey":
return TukeyWindow(alpha)(shape)
elif kernel_type == 'cosinebell':
return CosineBellWindow(alpha)(shape)
else:
raise ValueError("kernel_type {0} is not one of the available "
"types: {1}".format(kernel_type, avail_types))
return window
def fit_2Dpspec(self, fit_method='LevMarq', p0=(), low_cut=None,
high_cut=None, bootstrap=True, niters=100,
use_azimmask=False, radial_weighting=False,
fix_ellip_params=False):
'''
Model the 2D power-spectrum surface with an elliptical power-law model.
Parameters
----------
fit_method : str, optional
The algorithm fitting to use. Only 'LevMarq' is currently
available.
p0 : tuple, optional
Initial parameters for fitting. If no values are given, the initial
parameters start from the 1D fit parameters.
low_cut : `~astropy.units.Quantity`, optional
Lowest frequency to consider in the fit.
high_cut : `~astropy.units.Quantity`, optional
Highest frequency to consider in the fit.
bootstrap : bool, optional
Bootstrap using the model residuals to estimate the parameter
standard errors. This tends to give more realistic intervals than
the covariance matrix.
niters : int, optional
Number of bootstrap iterations.
use_azimmask : bool, optional
Use the azimuthal mask defined for the 1D spectrum, when azimuthal
limit have been given.
radial_weighting : bool, optional
To account for the increasing number of samples at greater radii,
the fit can be weighted by :math:`1/\\mathrm{radius}` to emphasize the
points at small radii. DO NOT enabled weighting when the field is
elliptical! This will bias the fit parameters! Default is False.
fix_ellip_params : bool, optional
If the field is expected to be isotropic, the ellipticity and theta
parameters can be fixed in the fit. This will help the fit since
the isotropic case sits at the edge of the ellipticity parameter
space and can be difficult to correctly converge to.
'''
# Make the data to fit to
if low_cut is None:
# Default to the largest frequency, since this is just 1 pixel
# in the 2D PSpec.
self.low_cut = 1. / (0.5 * float(max(self.ps2D.shape)) * u.pix)
else:
self.low_cut = self._to_pixel_freq(low_cut)
if high_cut is None:
self.high_cut = self.freqs.max().value / u.pix
else:
self.high_cut = self._to_pixel_freq(high_cut)
yy_freq, xx_freq = make_radial_freq_arrays(self.ps2D.shape)
freqs_dist = np.sqrt(yy_freq**2 + xx_freq**2)
mask = clip_func(freqs_dist, self.low_cut.value, self.high_cut.value)
if hasattr(self, "_azim_mask") and use_azimmask:
mask = | np.logical_and(mask, self._azim_mask) | numpy.logical_and |
import numpy as np
import math
from sympy.physics.quantum.cg import CG
from scipy.special import sph_harm
def radial_function(x, J, alpha=1, beta=0.5):
return np.exp(alpha * np.linalg.norm(x) + beta + J * 0.001)
def get_Q_lk_transpose(k, l, J, m):
Q_lk_transpose = np.zeros((2 * l + 1, 2 * k + 1))
for i in range(2 * l + 1):
for j in range(2 * k + 1):
Q_lk_transpose[i, j] = CG(J, m, k, j - k, l, i - l).doit()
return Q_lk_transpose
def cart2sph(x, y, z):
XsqPlusYsq = x ** 2 + y ** 2
r = math.sqrt(XsqPlusYsq + z ** 2) # r
elev = math.atan2(z, math.sqrt(XsqPlusYsq)) # theta
az = math.atan2(y, x) # phi
return r, elev, az
def get_real_spherical_harmonics(x, l, m):
_, po, az = cart2sph(x[0], x[1], x[2])
Y = sph_harm(abs(m), l, az, po)
if m < 0:
Y = np.sqrt(2) * (-1) ** m * Y.imag
elif m > 0:
Y = | np.sqrt(2) | numpy.sqrt |
#!/usr/bin/python
import numpy as np
from timer import Timer
from GenericLinkage import *
def testBasic0():
L1 = np.array([[0,0,0,0,0],[1,1,1,1,1]])
L2 = np.array([[1,1,1,1,1],[0,0,0,0,0]])
L1 = np.array(L1, dtype = np.bool)
L2 = np.array(L2, dtype = np.bool)
RF = np.array([0.05,0.05,0.05,0.05])
with Timer('generating 3 progenies'):
Y = cross2(L1,L2,RF,3)
print(Y)
def testBasic1():
# number of columns in a gene
n = 1400000
# number of progenies to produce
k = 30
# generate two random 2 by n matrices
# filled with random 0 and 1
L1 = np.random.randint(2, size=(2,n))
L1 = np.array(L1, dtype = np.bool)
L2 = np.random.randint(2, size=(2,n))
L2 = np.array(L2, dtype = np.bool)
# array of recombination frequencies of size (n-1)
RF = 0.1*np.random.random(n-1)
# generate progenies
timerName = 'generating ' + str(k) + ' progenies'
with Timer(timerName):
Y = cross2(L1,L2,RF,k)
def testRotation0():
# up in column-based corresponds to left in row-based
# down in column-based corresponds to right in row-based
# conversion from column-based to row-based
# will be done by rotating the matrices
# by 270 degrees
L1 = np.array([[0,1],[0,1],[0,1],[0,1],[0,1]])
L2 = np.array([[1,0],[1,0],[1,0],[1,0],[1,0]])
RF = np.array([[0.05],[0.05],[0.05],[0.05]])
# convert
L1 = np.rot90(L1, 3)
L2 = np.rot90(L2, 3)
RF = RF[:,0]
with Timer('generating 3 progenies'):
Y = cross2(L1,L2,RF,3)
def testRotation1():
# up in column-based corresponds to left in row-based
# down in column-based corresponds to right in row-based
# conversion from column-based to row-based
# will be done by rotating the matrices
# by 270 degrees
# number of rows in a gene
n = 1400000
#n = 6480
# number of progenies to produce
k = 30
#k = 3240
# generate two random n by 2 matrices
# filled with 0 and 1
L1 = np.random.randint(2, size=(n,2))
L2 = np.random.randint(2, size=(n,2))
# generate (n-1)*1 matrix of random recombination frequencies
RF = 0.1*np.random.random((n-1, 1))
# generate progenies
# convert
L1 = np.rot90(L1, 3)
L2 = np.rot90(L2, 3)
RF = RF[:,0]
with Timer('generating 30 progenies'):
Y = cross2(L1,L2,RF,k)
def testMP0():
L1 = np.array([[0,0,0,0,0],[1,1,1,1,1]])
L2 = np.array([[1,1,1,1,1],[0,0,0,0,0]])
RF = np.array([0.05,0.05,0.05,0.05])
k = 35
timerName = 'generating ' + str(k) + ' progenies'
with Timer(timerName):
Y = cross2mp(L1,L2,RF,k)
def testMP1():
# number of rows in a gene
n = 1400000
# number of progenies to produce
k = 489
# generate two random 2 by n matrices
# filled with random 0 and 1
L1 = np.random.randint(2, size=(2,n))
L2 = np.random.randint(2, size=(2,n))
# array of recombination frequencies of size (n-1)
RF = 0.1*np.random.random(n-1)
# generate progenies
timerName = 'generating ' + str(k) + ' progenies'
with Timer(timerName):
Y = cross2mp(L1,L2,RF,k)
def testSP0():
L1 = np.array([[0,0,0,0,0],[1,1,1,1,1]])
L2 = | np.array([[1,1,1,1,1],[0,0,0,0,0]]) | numpy.array |
import pdb
import re
import os
import json
import datetime
import time
import pandas as pd
import numpy as np
import matplotlib
matplotlib.use("tkAgg")
import matplotlib.pyplot as plt
import seaborn as sns
import sleep_scorer.plottools as pt
import sleep_scorer.remtools as rt
class EEGPlotter(object):
"""dashboard-like plotting for EEG data
The panel view incorporates:
- raw signal timeseries
- features (power spectra)
- pca (or lowD) feature projections
- scores (human/model scores or other categoricals like
consensus/transition/conflict)
input:
------
std: (StagedTrialData object) (raw edr *and* features)
pca: (PCA object) pca avg/vecs/vals
scores : (ScoreBlock)
methods:
------
plot(fig, ti/tf): high level, just specify target fig and start/end times
TODO:
GENERAL
- general time series plotting (e.g. EMG power)
- general 2d/histogram plots (e.g. EMG power)
SPEED HAX
- fuse time series
- keep axes, clear data
- keep histograms
- scrolling w/pseudocache:
- plot some range beyond axes limits,
- adjust axes limits for small steps
- replot big chunks less often/ asneeded
PDF OVERLAY
- comet
- conflicts
- switch epochs
"""
def __init__(self, std=None, pca=None, scores=None, params={}):
self.std = std
self.pca = pca
self.scores = scores
self.df_prj = self.pca.project(std.features.data)
self.params = self.default_params()
self.params.update(params)
# FEATURES
self.X = self.std.features.data
# RAW features (normalize, stride)
# # raw features, look like shit w/current formatting
# self.df_feat = rt.SxxBundle.from_EDFData(self.std.edf).to_dataframe()
# stash (time consuming) computed values here for re-use
self.stash = {}
# initial viewstate
self.viewEpoch = 100
self.viewWidth = 20
# render the first frame
self.make_fig()
self.render()
@property
def viewrange(self):
ea = self.viewEpoch - self.viewWidth
eb = self.viewEpoch + self.viewWidth
return [ea, eb]
def default_params(self):
"""make default params"""
params = dict(
name='gallahad',
quest='grail'
)
return params
def about(self):
"""helpful information at a glance"""
print('------ EEGPlotter.about() ------')
print('params:', self.params)
self.scoreblock.about()
self.pca.about()
self.std.about()
def make_fig(self):
"""create the figure w/event handling"""
aa = '707070'
#aa = 'a0a0a0'
b0 = 'b0b0b0'
gg = 'c0c0c0'
pp = {
'lines.linewidth':2,
'axes.facecolor':'k',
'axes.edgecolor': gg,
'axes.labelcolor': gg,
'figure.facecolor':'k',
'figure.edgecolor':'k',
'savefig.facecolor':'k',
'savefig.edgecolor':'k',
'xtick.color' : gg,
'ytick.color' : gg,
'grid.color' : aa,
'text.color' : gg
}
matplotlib.rcParams.update(pp)
self.fig = plt.figure(figsize=(16, 8), dpi=80)
self.fig.canvas.mpl_connect('key_press_event', self.kupdate)
self.fig.canvas.mpl_connect('button_press_event', self.mupdate)
self.fig.canvas.mpl_connect('scroll_event', self.mupdate)
# self.fig.set_facecolor('k')
def kupdate(self, event):
"""keypress updates"""
# step sizes
s = [1, 5, 10]
# print(event.key)
if event.key == 'left':
self.lstep(inc=s[0])
# if event.key == 'shift+left':
# self.lstep(inc=s[1])
if event.key == 'ctrl+left':
self.lstep(inc=s[2])
if event.key == 'right':
self.rstep(inc=s[0])
# if event.key == 'shift+right':
# self.lstep(inc=s[1])
if event.key == 'ctrl+right':
self.rstep(inc=s[2])
if event.key == 'up':
self.viewWidth = max(self.viewWidth-1, 3)
self.render()
if event.key == 'ctrl+up':
self.viewWidth = max(self.viewWidth-2, 3)
self.render()
if event.key == 'down':
self.viewWidth += 1
self.render()
if event.key == 'ctrl+down':
self.viewWidth += 2
self.render()
def mupdate(self, event):
"""update when mouse buttons pushed or wheel spun"""
# step sizes
s = [1, 5, 10]
# STEP LEFT (backward in time)
if event.button == 1:
if event.key is None:
self.lstep(inc=s[0])
elif event.key == 'shift':
self.lstep(inc=s[1])
elif event.key == 'control':
self.lstep(inc=s[2])
# STEP RIGHT (forward in time)
if event.button == 3:
if event.key is None:
self.rstep(inc=s[0])
elif event.key == 'shift':
self.rstep(inc=s[1])
elif event.key == 'control':
self.rstep(inc=s[2])
# zoom out
if event.button == 'down':
self.viewWidth += 1
self.render()
# zoom in
if event.button == 'up':
self.viewWidth = max(self.viewWidth-1, 3)
self.render()
def rstep(self, inc=1):
"""step right, next epoch"""
self.viewEpoch += inc
self.render()
def lstep(self, inc=1):
"""step left, prev epoch"""
self.viewEpoch -= inc
self.render()
def render(self, viewEpoch=None, viewWidth=None):
"""render the figure
render?! I hardly know 'er!
"""
t00 = time.time()
sig_labels_plot = ['EEG1', 'EEG2', 'EMG']
if viewEpoch is not None:
self.viewEpoch = viewEpoch
if viewWidth is not None:
self.viewWidth = viewWidth
[ia, ib] = self.viewrange
ie = self.viewEpoch
chunksize = ib-ia
edf = self.std.edf
dfmerge = self.df_prj
num_epochs = edf.num_epochs
epoch_duration = edf.epoch_duration
spectrograms = edf.spectrograms
signal_traces = edf.signal_traces
t05 = time.time()
figx = self.fig
t10 = time.time()
#== plot AXES
# if self.fig.axes == []:
# self.ax = [
# plt.subplot2grid((4,7),(0,0), rowspan=1, colspan=4),
# plt.subplot2grid((4,7),(1,0), rowspan=1, colspan=4),
# plt.subplot2grid((4,7),(2,0), rowspan=2, colspan=2),
# plt.subplot2grid((4,7),(2,2), rowspan=2, colspan=2),
# plt.subplot2grid((4,7),(0,4), rowspan=4, colspan=2)
# ]
# else:
# #pdb.set_trace()
# for axi in self.fig.axes:
# axi.clear()
self.ax = [
plt.subplot2grid((4,7),(0,0), rowspan=1, colspan=4),
plt.subplot2grid((4,7),(1,0), rowspan=1, colspan=4),
plt.subplot2grid((4,7),(2,0), rowspan=2, colspan=2),
plt.subplot2grid((4,7),(2,2), rowspan=2, colspan=2),
plt.subplot2grid((4,7),(0,4), rowspan=4, colspan=2)
]
axx = self.ax[0:4]
axb = [self.ax[-1]]
t15 = time.time()
# print(' --')
# print(' t assign: %4.2f' % (t05-t00))
# print(' t fig : %4.2f' % (t10-t05))
# print(' t ax : %4.2f' % (t15-t10))
#======================================================================
#======================================================================
#======== LHS (signals/pca)
#======================================================================
#======================================================================
t20 = time.time()
#==================================================
#== panel 0, RAW signal time series
raw_stride = 5
dy_raw = -300
tr000 = time.time()
xxx, yyy, lbl = [], [], []
for i, label in enumerate(sig_labels_plot):
st = signal_traces[label]
ndxi = int(st.samples_per_epoch*ia)
ndxf = int(st.samples_per_epoch*ib)
ti = ndxi/st.f
tf = ndxf/st.f
xx = np.linspace(ti, tf, int(st.samples_per_epoch)*chunksize)
yy = st.sig[ndxi:ndxf]+dy_raw*i
xxx.append(xx[::raw_stride])
yyy.append(yy[::raw_stride])
lbl.append(label)
tr001 = time.time()
tr002 = time.time()
xxx = np.asarray(xxx).T
yyy = np.asarray(yyy).T
lobj = axx[0].plot(xxx, yyy, lw=1)
# BOX BOX
ndxe = int(st.samples_per_epoch*ie)
te = ndxe/st.f
xbox = [te, te, te-10, te-10, te]
ybox = [-900, 300, 300, -900, -900]
axx[0].plot(xbox, ybox, 'c-', ms=0, lw=2)
tr003 = time.time()
axx[0].set_ylim([-400+2*dy_raw, 400])
axx[0].set_xlim([xx[0], xx[-1]])
#axx[0].set_xticks(np.linspace(ti, tf, chunksize+1))
#axx[0].set_xticklabels([])
axx[0].grid(True)
axx[0].set_ylabel('raw signals')
axx[0].set_xlabel('t [s]')
axx[0].spines['top'].set_visible(False)
axx[0].spines['right'].set_visible(False)
axx[0].spines['bottom'].set_visible(False)
leg = axx[0].legend(lobj, lbl, loc='upper right') #, ncol=len(lbl))
leg.get_frame().set_edgecolor('none')
tr004 = time.time()
# print('raw 1 : %3.0f' % ((tr001-tr000)*1000))
# print('raw 2 : %3.0f' % ((tr002-tr001)*1000))
# print('raw 3 : %3.0f' % ((tr003-tr002)*1000))
# print('raw 4 : %3.0f' % ((tr004-tr003)*1000))
# PCA histos and projections
t40 = time.time()
#==================================================
#== panel 1, PC time series
pcvec_cols = ['PC1', 'PC2', 'PC3']
for i, col in enumerate(pcvec_cols):
dy = -1
xx = | np.arange(ia+1, ib+1) | numpy.arange |
import unittest
from pythran.tests import TestEnv
import numpy
import tempfile
import os
from pythran.typing import NDArray, List, Tuple
@TestEnv.module
class TestNumpyFunc0(TestEnv):
def test_extended_sum0(self):
self.run_test("def numpy_extended_sum0(a): import numpy ; return numpy.sum(a)",
numpy.arange(120).reshape((3,5,4,2)),
numpy_extended_sum0=[NDArray[int,:,:,:,:]])
def test_extended_sum1(self):
self.run_test("def numpy_extended_sum1(a): import numpy ; return numpy.sum(a[1])",
numpy.arange(120).reshape((3,5,4,2)),
numpy_extended_sum1=[NDArray[int,:,:,:,:]])
def test_extended_sum2(self):
self.run_test("def numpy_extended_sum2(a): import numpy ; return numpy.sum(a[1,0])",
numpy.arange(120).reshape((3,5,4,2)),
numpy_extended_sum2=[NDArray[int,:,:,:,:]])
def test_extended_sum3(self):
self.run_test("def numpy_extended_sum3(a): import numpy ; return numpy.sum(a[1:-1])",
numpy.arange(120).reshape((3,5,4,2)),
numpy_extended_sum3=[NDArray[int,:,:,:,:]])
def test_extended_sum4(self):
self.run_test("def numpy_extended_sum4(a): import numpy ; return numpy.sum(a[1:-1,0])",
numpy.arange(120).reshape((3,5,4,2)),
numpy_extended_sum4=[NDArray[int,:,:,:,:]])
def test_extended_sum5(self):
self.run_test("def numpy_extended_sum5(a): import numpy ; return numpy.sum(a)",
numpy.arange(120).reshape((3,5,4,2)),
numpy_extended_sum5=[NDArray[int,:,:,:,:]])
def test_out_sum0(self):
self.run_test("def numpy_out_sum0(a, b): import numpy ; return numpy.sum(a, axis=0, out=b)",
numpy.arange(10).reshape((5,2)),
numpy.zeros(2, dtype=int),
numpy_out_sum0=[NDArray[int,:,:], NDArray[int,:]])
def test_out_sum1(self):
self.run_test("def numpy_out_sum1(a, b): import numpy ; return numpy.sum(a, axis=0, out=b)",
numpy.arange(10).reshape((5,2)),
numpy.ones(2, dtype=int),
numpy_out_sum1=[NDArray[int,:,:], NDArray[int,:]])
def test_out_sum2(self):
self.run_test("def numpy_out_sum2(a, b): import numpy ; return numpy.sum(a, axis=1, out=b)",
numpy.arange(10).reshape((5,2)),
numpy.zeros(5, dtype=int),
numpy_out_sum2=[NDArray[int,:,:], NDArray[int,:]])
def test_numpy_shape_as_function(self):
self.run_test("def numpy_shape_as_function(a): import numpy ; return numpy.shape(a)",
numpy.ones(3, numpy.int16),
numpy_shape_as_function=[NDArray[numpy.int16,:]])
def test_numpy_size_as_function(self):
self.run_test("def numpy_size_as_function(a): import numpy ; return numpy.size(a)",
numpy.ones(3, numpy.int16),
numpy_size_as_function=[NDArray[numpy.int16,:]])
def test_numpy_ndim_as_function(self):
self.run_test("def numpy_ndim_as_function(a): import numpy ; return numpy.ndim(a)",
numpy.ones(3, numpy.int16),
numpy_ndim_as_function=[NDArray[numpy.int16,:]])
def test_frexp0(self):
self.run_test("def np_frexp0(a): import numpy as np ; return np.frexp(a)", 1.5, np_frexp0=[float])
def test_frexp1(self):
self.run_test("def np_frexp1(a): import numpy as np ; return np.frexp(a)", numpy.array([1.1,2.2,3.3]), np_frexp1=[NDArray[float,:]])
def test_frexp2(self):
self.run_test("def np_frexp2(a): import numpy as np ; return np.frexp(a+a)", numpy.array([1.1,2.2,3.3]), np_frexp2=[NDArray[float,:]])
def test_ndindex0(self):
self.run_test("def np_ndindex0(): import numpy as np ; return [x for x in np.ndindex(5,6)]",
np_ndindex0=[])
def test_ndindex1(self):
self.run_test("def np_ndindex1(a): import numpy as np ; return [x for x in np.ndindex(a)]", 3, np_ndindex1=[int])
def test_ndindex2(self):
self.run_test("def np_ndindex2(n): import numpy as np ; return [x for x in np.ndindex((n,n))]", 3, np_ndindex2=[int])
def test_ndenumerate0(self):
self.run_test("def np_ndenumerate0(a): import numpy as np ; return [x for x in np.ndenumerate(a)]", numpy.array([[1, 2], [3, 4]]), np_ndenumerate0=[NDArray[int,:,:]])
def test_ndenumerate1(self):
self.run_test("def np_ndenumerate1(a): import numpy as np ; return [x for x in np.ndenumerate(a)]", numpy.array([1, 2, 3, 4]), np_ndenumerate1=[NDArray[int,:]])
def test_nansum0(self):
self.run_test("def np_nansum0(a): import numpy as np ; return np.nansum(a)" , numpy.array([[1, 2], [3, numpy.nan]]), np_nansum0=[NDArray[float,:,:]])
def test_nansum1(self):
self.run_test("def np_nansum1(a): import numpy as np ; return np.nansum(a)" , numpy.array([[1, 2], [numpy.NINF, numpy.nan]]), np_nansum1=[NDArray[float,:,:]])
def test_nansum2(self):
self.run_test("def np_nansum2(a): import numpy as np ; return np.nansum(a)", [1., numpy.nan], np_nansum2=[List[float]])
def test_nanmin0(self):
self.run_test("def np_nanmin0(a): import numpy as np ; return np.nanmin(a)" , numpy.array([[1, 2], [3, numpy.nan]]), np_nanmin0=[NDArray[float,:,:]])
def test_nanmin1(self):
self.run_test("def np_nanmin1(a): import numpy as np ; return np.nanmin(a)" , numpy.array([[1, 2], [numpy.NINF, numpy.nan]]), np_nanmin1=[NDArray[float,:,:]])
def test_nanmax0(self):
self.run_test("def np_nanmax0(a): import numpy as np ; return np.nanmax(a)" , numpy.array([[1, 2], [3, numpy.nan]]), np_nanmax0=[NDArray[float,:,:]])
def test_nanmax1(self):
self.run_test("def np_nanmax1(a): import numpy as np ; return np.nanmax(a)" , numpy.array([[1, 2], [numpy.inf, numpy.nan]]) , np_nanmax1=[NDArray[float,:,:]])
def test_np_residual(self):
self.run_test("""import numpy as np
def np_residual():
nx, ny, nz= 75, 75, 100
hx, hy = 1./(nx-1), 1./(ny-1)
P_left, P_right = 0, 0
P_top, P_bottom = 1, 0
P = np.ones((nx, ny, nz), np.float64)
d2x = np.zeros_like(P)
d2y = np.zeros_like(P)
d2x[1:-1] = (P[2:] - 2*P[1:-1] + P[:-2]) / hx/hx
d2x[0] = (P[1] - 2*P[0] + P_left)/hx/hx
d2x[-1] = (P_right - 2*P[-1] + P[-2])/hx/hx
d2y[:,1:-1] = (P[:,2:] - 2*P[:,1:-1] + P[:,:-2])/hy/hy
d2y[:,0] = (P[:,1] - 2*P[:,0] + P_bottom)/hy/hy
d2y[:,-1] = (P_top - 2*P[:,-1] + P[:,-2])/hy/hy
return d2x + d2y + 5*np.cosh(P).mean()**2
""", np_residual=[])
def test_np_func2(self):
self.run_test("""import numpy as np
def np_func2(x):
f = [x[0] * np.cos(x[1]) - 4,
x[1]*x[0] - x[1] - 5]
df = np.array([[np.cos(x[1]), -x[0] * np.sin(x[1])],
[x[1], x[0] - 1]])
return f, df
""", [1.0, 2.0, 3.0], np_func2=[List[float]])
def test_np_peval(self):
self.run_test("""import numpy
def np_peval(x, p):
return p[0]*numpy.sin(2*numpy.pi*p[1]*x+p[2])
""", 12., [1.0, 2.0, 3.0], np_peval=[float, List[float]])
def test_np_residuals(self):
self.run_test("""import numpy
def np_residuals():
x = numpy.arange(0,6e-2,6e-2/30)
A,k,theta = 10, 1.0/3e-2, numpy.pi/6
return A*numpy.sin(2*numpy.pi*k*x+theta)
""", np_residuals=[])
def test_np_func_deriv(self):
self.run_test("""import numpy
def np_func_deriv(x, sign=1.0):
dfdx0 = sign*(-2*x[0] + 2*x[1] + 2)
dfdx1 = sign*(2*x[0] - 4*x[1])
return numpy.array([ dfdx0, dfdx1 ])
""", [-1.0, 1.0], -1.0, np_func_deriv=[List[float], float])
def test_np_func(self):
self.run_test("""import numpy
def np_func(x, sign=1.0):
return sign*(2*x[0]*x[1] + 2*x[0] - x[0]**2 - 2*x[1]**2)
""", [-1.0, 1.0], -1.0, np_func=[List[float], float])
def test_rosen_hess_p(self):
self.run_test("""import numpy
def np_rosen_hess_p(x, p):
x = numpy.asarray(x)
Hp = numpy.zeros_like(x)
Hp[0] = (1200*x[0]**2 - 400*x[1] + 2)*p[0] - 400*x[0]*p[1]
Hp[1:-1] = -400*x[:-2]*p[:-2]+(202+1200*x[1:-1]**2-400*x[2:])*p[1:-1] \
-400*x[1:-1]*p[2:]
Hp[-1] = -400*x[-2]*p[-2] + 200*p[-1]
return Hp
""",
numpy.array([1.3, 0.7, 0.8, 1.9, 1.2]),
numpy.array([2.3, 1.7, 1.8, 2.9, 2.2]),
np_rosen_hess_p=[NDArray[float,:], NDArray[float,:]])
def test_rosen_hess(self):
self.run_test("""import numpy
def np_rosen_hess(x):
x = numpy.asarray(x)
H = numpy.diag(-400*x[:-1],1) - numpy.diag(400*x[:-1],-1)
diagonal = numpy.zeros_like(x)
diagonal[0] = 1200*x[0]**2-400*x[1]+2
diagonal[-1] = 200
diagonal[1:-1] = 202 + 1200*x[1:-1]**2 - 400*x[2:]
H = H + numpy.diag(diagonal)
return H
""",
numpy.array([1.3, 0.7, 0.8, 1.9, 1.2]),
np_rosen_hess=[NDArray[float,:]])
def test_rosen_der(self):
self.run_test("""import numpy
def np_rosen_der(x):
xm = x[1:-1]
xm_m1 = x[:-2]
xm_p1 = x[2:]
der = numpy.zeros_like(x)
der[1:-1] = 200*(xm-xm_m1**2) - 400*(xm_p1 - xm**2)*xm - 2*(1-xm)
der[0] = -400*x[0]*(x[1]-x[0]**2) - 2*(1-x[0])
der[-1] = 200*(x[-1]-x[-2]**2)
return der
""",
numpy.array([1.3, 0.7, 0.8, 1.9, 1.2]),
np_rosen_der=[NDArray[float,:]])
def test_rosen(self):
self.run_test("import numpy\ndef np_rosen(x): return sum(100.0*(x[1:]-x[:-1]**2.0)**2.0 + (1-x[:-1])**2.0)",
numpy.array([1.3, 0.7, 0.8, 1.9, 1.2]),
np_rosen=[NDArray[float,:]])
def test_nanargmax0(self):
self.run_test("def np_nanargmax0(a): from numpy import nanargmax; return nanargmax(a)", numpy.array([[numpy.nan, 4], [2, 3]]), np_nanargmax0=[NDArray[float,:,:]])
def test_nanargmin0(self):
self.run_test("def np_nanargmin0(a): from numpy import nanargmin ; return nanargmin(a)", numpy.array([[numpy.nan, 4], [2, 3]]), np_nanargmin0=[NDArray[float,:,:]])
def test_nan_to_num0(self):
self.run_test("def np_nan_to_num0(a): import numpy as np ; return np.nan_to_num(a)", numpy.array([numpy.inf, -numpy.inf, numpy.nan, -128, 128]), np_nan_to_num0=[NDArray[float,:]])
def test_median0(self):
self.run_test("def np_median0(a): from numpy import median ; return median(a)", numpy.array([[1, 2], [3, 4]]), np_median0=[NDArray[int,:,:]])
def test_median1(self):
self.run_test("def np_median1(a): from numpy import median ; return median(a)", numpy.array([1, 2, 3, 4,5]), np_median1=[NDArray[int,:]])
def test_median2(self):
self.run_test("def np_median2(a): from numpy import median ; return median(a, None)", numpy.array([1, 2, 3, 4,5]), np_median2=[NDArray[int,:]])
def test_median3(self):
self.run_test("def np_median3(a): from numpy import median ; return median(a, 0)", numpy.array([[1, 2, 3], [4,5,6]]), np_median3=[NDArray[int,:,:]])
def test_median4(self):
self.run_test("def np_median4(a): from numpy import median ; return median(a, 1)", numpy.array([[1, 2, 3], [4,5,6]]), np_median4=[NDArray[int,:,:]])
def test_median5(self):
self.run_test("def np_median5(a): from numpy import median ; return median(a, -1)", numpy.array([[[1], [2], [3]], [[4],[5],[6]]]), np_median5=[NDArray[int,:,:,:]])
def test_median6(self):
self.run_test("def np_median6(l): from numpy import median ; return l + median(l)", numpy.array([3, 1]), np_median6=[NDArray[int, :]])
def test_mean0(self):
self.run_test("def np_mean0(a): from numpy import mean ; return mean(a)", numpy.array([[1, 2], [3, 4]]), np_mean0=[NDArray[int,:,:]])
def test_mean1(self):
self.run_test("def np_mean1(a): from numpy import mean ; return mean(a, 1)", numpy.array([[1, 2], [3, 4.]]), np_mean1=[NDArray[float,:,:]])
def test_mean2(self):
self.run_test("def np_mean2(a): from numpy import mean ; return mean(a)", numpy.array([[[1, 2], [3, 4.]]]), np_mean2=[NDArray[float,:,:,:]])
def test_mean3(self):
self.run_test("def np_mean3(a): from numpy import mean ; return mean(a, 0)", numpy.array([[[1, 2], [3, 4.]]]), np_mean3=[NDArray[float,:,:,:]])
def test_mean4(self):
self.run_test("def np_mean4(a): from numpy import mean ; return mean(a, 1)", numpy.array([[[1, 2], [3, 4.]]]), np_mean4=[NDArray[float,:,:,:]])
def test_mean5(self):
self.run_test("def np_mean5(a): from numpy import mean ; return mean(a, 2)", numpy.array([[[1, 2], [3, 4.]]]), np_mean5=[NDArray[float,:,:,:]])
def test_var0(self):
self.run_test("def np_var0(a): return a.var()", numpy.array([[1, 2], [3, 4]], dtype=float), np_var0=[NDArray[float,:,:]])
def test_var1(self):
self.run_test("def np_var1(a): from numpy import var ; return var(a, 1)", numpy.array([[1, 2], [3, 4.]]), np_var1=[NDArray[float,:,:]])
def test_var2(self):
self.run_test("def np_var2(a): from numpy import var ; return var(a)", numpy.array([[[1, 2], [3, 4.]]]), np_var2=[NDArray[float,:,:,:]])
def test_var3(self):
self.run_test("def np_var3(a): from numpy import var ; return var(a, 0)", numpy.array([[[1, 2], [3, 4.]]]), np_var3=[NDArray[float,:,:,:]])
def test_var4(self):
self.run_test("def np_var4(a): from numpy import var ; return var(a, 1)", numpy.array([[[1, 2], [3, 4.]]]), np_var4=[NDArray[float,:,:,:]])
def test_var5(self):
self.run_test("def np_var5(a): from numpy import var ; return var(a, 2)", numpy.array([[[1, 2], [3, 4.]]]), np_var5=[NDArray[float,:,:,:]])
def test_var6(self):
self.run_test("def np_var6(a): from numpy import var ; return var(1j * a)", numpy.array([[[1, 2], [3, 4.]]]), np_var6=[NDArray[float,:,:,:]])
def test_var7(self):
self.run_test("def np_var7(a): from numpy import var ; return var(1j * a, 2)", numpy.array([[[1, 2], [3, 4.]]]), np_var7=[NDArray[float,:,:,:]])
def test_var8(self):
self.run_test("def np_var8(a): from numpy import var ; return var(1j * a, 2)", numpy.array([[[1, 2], [3, 4]]]), np_var8=[NDArray[int,:,:,:]])
def test_var9(self):
self.run_test("def np_var9(a): from numpy import var ; return var(1j * a)", numpy.array([[[1, 2], [3, 4]]]), np_var9=[NDArray[int,:,:,:]])
def test_std0(self):
self.run_test("def np_std0(a): from numpy import std ; return std(a)", numpy.array([[[1, 2], [3, 4]]]), np_std0=[NDArray[int, :, :, :]])
def test_std1(self):
self.run_test("def np_std1(a): from numpy import std ; return std(a, 0)", numpy.array([[[1, 2], [3, 4]]]), np_std1=[NDArray[int, :, :, :]])
def test_std2(self):
self.run_test("def np_std2(a): from numpy import std ; return std(a, 1)", numpy.array([[[1, 2], [3, 4]]]), np_std2=[NDArray[int, :, :, :]])
def test_std3(self):
self.run_test("def np_std3(a): from numpy import std ; return std(1j*a, 1)", numpy.array([[[1, 2], [3, 4]]]), np_std3=[NDArray[int, :, :, :]])
def test_logspace0(self):
self.run_test("def np_logspace0(start, stop): from numpy import logspace ; start, stop = 3., 4. ; return logspace(start, stop, 4)", 3., 4., np_logspace0=[float, float])
def test_logspace1(self):
self.run_test("def np_logspace1(start, stop): from numpy import logspace ; return logspace(start, stop, 4, False)", 3., 4., np_logspace1=[float, float])
def test_logspace2(self):
self.run_test("def np_logspace2(start, stop): from numpy import logspace ; return logspace(start, stop, 4, True, 2.0)", 3., 4., np_logspace2=[float, float])
def test_lexsort0(self):
self.run_test("def np_lexsort0(surnames): from numpy import lexsort ; first_names = ('Heinrich', 'Galileo', 'Gustav') ; return lexsort((first_names, surnames))", ('Hertz', 'Galilei', 'Hertz'), np_lexsort0=[Tuple[str, str, str]])
def test_lexsort1(self):
self.run_test("def np_lexsort1(a): from numpy import lexsort ; b = [1,5,1,4,3,4,4] ; return lexsort((a,b))", [9,4,0,4,0,2,1], np_lexsort1=[List[int]])
def test_lexsort2(self):
self.run_test("def np_lexsort2(a): from numpy import lexsort ; return lexsort((a+1,a-1))", numpy.array([1,5,1,4,3,4,4]), np_lexsort2=[NDArray[int,:]])
def test_issctype0(self):
self.run_test("def np_issctype0(): from numpy import issctype, int32 ; a = int32 ; return issctype(a)", np_issctype0=[])
def test_issctype1(self):
self.run_test("def np_issctype1(): from numpy import issctype ; a = list ; return issctype(a)", np_issctype1=[])
def test_issctype2(self):
self.run_test("def np_issctype2(a): from numpy import issctype ; return issctype(a)", 3.1, np_issctype2=[float])
def test_isscalar0(self):
self.run_test("def np_isscalar0(a): from numpy import isscalar ; return isscalar(a)", 3.1, np_isscalar0=[float])
def test_isscalar1(self):
self.run_test("def np_isscalar1(a): from numpy import isscalar ; return isscalar(a)", [3.1], np_isscalar1=[List[float]])
def test_isscalar2(self):
self.run_test("def np_isscalar2(a): from numpy import isscalar ; return isscalar(a)", '3.1', np_isscalar2=[str])
def test_isrealobj0(self):
self.run_test("def np_isrealobj0(a): from numpy import isrealobj ; return isrealobj(a)", numpy.array([1,2,3.]), np_isrealobj0=[NDArray[float,:]])
def test_isrealobj1(self):
self.run_test("def np_isrealobj1(a): from numpy import isrealobj ; return isrealobj(a)", numpy.array([1,2,3.,4 + 1j]).reshape((2,2)), np_isrealobj1=[NDArray[complex,:,:]])
def test_isreal0(self):
self.run_test("def np_isreal0(a): from numpy import isreal ; return isreal(a)", numpy.array([1,2,3.]), np_isreal0=[NDArray[float,:]])
def test_isreal1(self):
self.run_test("def np_isreal1(a): from numpy import isreal ; return isreal(a)", numpy.array([1,2,3.,4 + 1j]).reshape((2,2)), np_isreal1=[NDArray[complex,:,:]])
def test_iscomplex0(self):
self.run_test("def np_iscomplex0(a): from numpy import iscomplex ; return iscomplex(a)", numpy.array([1, 2, 3.]), np_iscomplex0=[NDArray[float,:]])
def test_iscomplex1(self):
self.run_test("def np_iscomplex1(a): from numpy import iscomplex ; return iscomplex(a)", numpy.array([1,2,3.,4 + 1j]).reshape((2,2)), np_iscomplex1=[NDArray[complex,:,:]])
def test_intersect1d0(self):
self.run_test("def np_intersect1d0(a): from numpy import intersect1d ; b = [3, 1, 2, 1] ; return intersect1d(a,b)", [1, 3, 4, 3], np_intersect1d0=[List[int]])
def test_insert0(self):
self.run_test("def np_insert0(a): from numpy import insert ; return insert(a, 1, 5)", numpy.array([[1, 1], [2, 2], [3, 3]]), np_insert0=[NDArray[int,:,:]])
def test_insert1(self):
self.run_test("def np_insert1(a): from numpy import insert ; return insert(a, [1,2], [5,6])", numpy.array([[1, 1], [2, 2], [3, 3]]), np_insert1=[NDArray[int,:,:]])
def test_insert2(self):
self.run_test("def np_insert2(a): from numpy import insert ; return insert(a, [1,1], [5.2,6])", numpy.array([[1, 1], [2, 2], [3, 3]]), np_insert2=[NDArray[int,:,:]])
def test_inner0(self):
self.run_test("def np_inner0(x): from numpy import inner ; y = 3 ; return inner(x,y)", 2, np_inner0=[int])
def test_inner1(self):
self.run_test("def np_inner1(x): from numpy import inner ; y = [2, 3] ; return inner(x,y)", [2, 3], np_inner1=[List[int]])
def test_indices0(self):
self.run_test("def np_indices0(s): from numpy import indices ; return indices(s)", (2, 3), np_indices0=[Tuple[int, int]])
def test_identity0(self):
self.run_test("def np_identity0(a): from numpy import identity ; return identity(a)", 3, np_identity0=[int])
def test_identity1(self):
self.run_test("def np_identity1(a): from numpy import identity ;return identity(a)", 4, np_identity1=[int])
def test_tofile0(self):
temp_name = tempfile.mkstemp()[1]
x = numpy.random.randint(0,2**8,1000).astype(numpy.uint8)
try:
self.run_test("def np_tofile0(x,file): import numpy ; x.tofile(file); return numpy.fromfile(file)", x, temp_name, np_tofile0=[NDArray[numpy.uint8,:], str])
finally:
os.remove(temp_name)
def test_tofile1(self):
temp_name = tempfile.mkstemp()[1]
x = numpy.random.randint(0,2**16,1000).astype(numpy.uint16)
try:
self.run_test("def np_tofile1(x,file): import numpy ; x.tofile(file); return numpy.fromfile(file)", x, temp_name, np_tofile1=[NDArray[numpy.uint16,:], str])
finally:
os.remove(temp_name)
def test_tofile2(self):
temp_name = tempfile.mkstemp()[1]
x = numpy.random.randint(0,2**32,1000).astype(numpy.uint32)
try:
self.run_test("def np_tofile2(x,file): import numpy ; x.tofile(file); return numpy.fromfile(file)", x, temp_name, np_tofile2=[NDArray[numpy.uint32,:], str])
finally:
os.remove(temp_name)
def test_tofile3(self):
temp_name = tempfile.mkstemp()[1]
x = numpy.random.random(1000).astype(numpy.float32)
try:
self.run_test("def np_tofile3(x,file): import numpy ; x.tofile(file); return numpy.fromfile(file)", x, temp_name, np_tofile3=[NDArray[numpy.float32,:], str])
finally:
os.remove(temp_name)
def test_tofile4(self):
temp_name = tempfile.mkstemp()[1]
x = numpy.random.random(1000).astype(numpy.float64)
try:
self.run_test("def np_tofile4(x,file): import numpy ; x.tofile(file); return numpy.fromfile(file)", x, temp_name, np_tofile4=[NDArray[numpy.float64,:], str])
finally:
os.remove(temp_name)
def test_fromfile0(self):
temp_name = tempfile.mkstemp()[1]
x = numpy.random.randint(0,2**8,1000).astype(numpy.uint8)
x.tofile(temp_name)
try:
self.run_test("def np_fromfile0(file): from numpy import fromfile, uint8 ; return fromfile(file, uint8)", temp_name, np_fromfile0=[str])
finally:
os.remove(temp_name)
def test_fromfile1(self):
temp_name = tempfile.mkstemp()[1]
x = numpy.random.randint(0,2**16,1000).astype(numpy.uint16)
x.tofile(temp_name)
try:
self.run_test("def np_fromfile1(file): from numpy import fromfile, uint16 ; return fromfile(file, uint16)", temp_name, np_fromfile1=[str])
finally:
os.remove(temp_name)
def test_fromfile2(self):
temp_name = tempfile.mkstemp()[1]
x = numpy.random.randint(0,2**32,1000).astype(numpy.uint32)
x.tofile(temp_name)
try:
self.run_test("def np_fromfile2(file): from numpy import fromfile, uint32 ; return fromfile(file, uint32)", temp_name, np_fromfile2=[str])
finally:
os.remove(temp_name)
def test_fromfile3(self):
temp_name = tempfile.mkstemp()[1]
x = numpy.random.random(1000).astype(numpy.float32)
x.tofile(temp_name)
try:
self.run_test("def np_fromfile3(file): from numpy import fromfile, float32 ; return fromfile(file, float32)", temp_name, np_fromfile3=[str])
finally:
os.remove(temp_name)
def test_fromfile4(self):
temp_name = tempfile.mkstemp()[1]
x = numpy.random.random(1000).astype(numpy.float64)
x.tofile(temp_name)
try:
self.run_test("def np_fromfile4(file): from numpy import fromfile, float64 ; return fromfile(file, float64)", temp_name, np_fromfile4=[str])
finally:
os.remove(temp_name)
def test_fromfile5(self):
temp_name = tempfile.mkstemp()[1]
x = numpy.random.random(1000).astype(numpy.float64)
x.tofile(temp_name)
try:
self.run_test("def np_fromfile5(file): from numpy import fromfile, float64 ; return fromfile(file, float64, 100)", temp_name, np_fromfile5=[str])
finally:
os.remove(temp_name)
def test_fromstring0(self):
self.run_test("def np_fromstring0(a): from numpy import fromstring, uint8 ; return fromstring(a, uint8)", '\x01\x02', np_fromstring0=[str])
def test_fromstring1(self):
self.run_test("def np_fromstring1(a): from numpy import fromstring, uint8 ; a = '\x01\x02\x03\x04' ; return fromstring(a, uint8,3)", '\x01\x02\x03\x04', np_fromstring1=[str])
def test_fromstring2(self):
self.run_test("def np_fromstring2(a): from numpy import fromstring, uint32 ; return fromstring(a, uint32,-1, ' ')", '1 2 3 4', np_fromstring2=[str])
def test_fromstring3(self):
self.run_test("def np_fromstring3(a): from numpy import fromstring, uint32 ; return fromstring(a, uint32,2, ',')", '1,2, 3, 4', np_fromstring3=[str])
def test_outer0(self):
self.run_test("def np_outer0(x): from numpy import outer ; return outer(x, x+2)", numpy.arange(6).reshape(2,3), np_outer0=[NDArray[int,:,:]])
def test_outer1(self):
self.run_test("def np_outer1(x): from numpy import outer; return outer(x, range(6))", numpy.arange(6).reshape((2,3)), np_outer1=[NDArray[int,:,:]])
def test_place0(self):
self.run_test("def np_place0(x): from numpy import place, ravel ; place(x, x>1, ravel(x**2)); return x", numpy.arange(6).reshape((2,3)), np_place0=[NDArray[int,:,:]])
def test_place1(self):
self.run_test("def np_place1(x): from numpy import place ; place(x, x>1, [57, 58]); return x", numpy.arange(6).reshape((2,3)), np_place1=[NDArray[int,:,:]])
def test_product(self):
self.run_test("def np_product(x):\n from numpy import product\n return product(x)", numpy.arange(1, 10), np_product=[NDArray[int,:]])
def test_ptp0(self):
self.run_test("def np_ptp0(x): return x.ptp()", numpy.arange(4).reshape((2,2)), np_ptp0=[NDArray[int,:,:]])
def test_ptp1(self):
self.run_test("def np_ptp1(x): from numpy import ptp ; return ptp(x,0)", numpy.arange(4).reshape((2,2)), np_ptp1=[NDArray[int,:,:]])
def test_ptp2(self):
self.run_test("def np_ptp2(x): from numpy import ptp ; return ptp(x,1)", numpy.arange(4).reshape((2,2)), np_ptp2=[NDArray[int,:,:]])
def test_put0(self):
self.run_test("def np_put0(x): x.put([0,2], [-44, -55]); return x", numpy.arange(5), np_put0=[NDArray[int,:]])
def test_put1(self):
self.run_test("def np_put1(x): from numpy import put ; put(x, [0,2,3], [57, 58]); return x", numpy.arange(6).reshape((2, 3)), np_put1=[NDArray[int,:,:]])
def test_put2(self):
self.run_test("def np_put2(x): from numpy import put ; put(x, 2, 57); return x", numpy.arange(6).reshape((2,3)), np_put2=[NDArray[int,:,:]])
def test_putmask0(self):
self.run_test("def np_putmask0(x): from numpy import putmask ; putmask(x, x>1, x**2); return x", numpy.arange(6).reshape((2,3)), np_putmask0=[NDArray[int,:,:]])
def test_putmask1(self):
self.run_test("def np_putmask1(x): from numpy import putmask; putmask(x, x>1, [57, 58]); return x", numpy.arange(6).reshape((2,3)), np_putmask1=[NDArray[int,:,:]])
def test_ravel0(self):
self.run_test("def np_ravel0(x): from numpy import ravel ; return ravel(x)", numpy.arange(6).reshape((2,3)), np_ravel0=[NDArray[int,:,:]])
def test_ravel1(self):
self.run_test("def np_ravel1(x): return x.ravel()", numpy.arange(6).reshape((2,3)), np_ravel1=[NDArray[int,:,:]])
def test_repeat0(self):
self.run_test("def np_repeat0(x): from numpy import repeat; return repeat(x, 3)", numpy.arange(3), np_repeat0=[NDArray[int,:]])
def test_repeat1(self):
self.run_test("def np_repeat1(x): return x.repeat(3)", numpy.arange(6).reshape(2,3), np_repeat1=[NDArray[int,:,:]])
def test_repeat2(self):
self.run_test("def np_repeat2(x): from numpy import repeat; return repeat(x, 4, axis=0)", numpy.arange(6).reshape(2,3), np_repeat2=[NDArray[int,:,:]])
def test_repeat3(self):
self.run_test("def np_repeat3(x): from numpy import repeat; return repeat(x, 4, axis=1)", numpy.arange(6).reshape(2,3), np_repeat3=[NDArray[int,:,:]])
def test_resize4(self):
self.run_test("def np_resize4(x): from numpy import resize ; return resize(x, (6,7))", numpy.arange(24).reshape((2,3,4)), np_resize4=[NDArray[int, :, :, :]])
def test_resize3(self):
self.run_test("def np_resize3(x): from numpy import resize; return resize(x, (6,6))", numpy.arange(24).reshape((2,3,4)), np_resize3=[NDArray[int, :, :, :]])
def test_resize2(self):
self.run_test("def np_resize2(x): from numpy import resize; return resize(x, (3,3))", numpy.arange(24).reshape((2,3,4)), np_resize2=[NDArray[int, :, :, :]])
def test_resize1(self):
self.run_test("def np_resize1(x): from numpy import resize; return resize(x, 32)", numpy.arange(24), np_resize1=[NDArray[int,:]])
def test_resize0(self):
self.run_test("def np_resize0(x): from numpy import resize; return resize(x, 12)", numpy.arange(24), np_resize0=[NDArray[int,:]])
def test_rollaxis3(self):
self.run_test("def np_rollaxis3(x): from numpy import rollaxis; return rollaxis(x, 0, 3)", numpy.arange(24).reshape((2,3,4)), np_rollaxis3=[NDArray[int, :, :, :]])
def test_rollaxis2(self):
self.run_test("def np_rollaxis2(x): from numpy import rollaxis; return rollaxis(x, 2)", numpy.arange(24).reshape((2,3,4)), np_rollaxis2=[NDArray[int, :, :, :]])
def test_rollaxis1(self):
self.run_test("def np_rollaxis1(x): from numpy import rollaxis; return rollaxis(x, 1, 2)", numpy.arange(24).reshape(2,3,4), np_rollaxis1=[NDArray[int, :, :, :]])
def test_rollaxis0(self):
self.run_test("def np_rollaxis0(x): from numpy import rollaxis; return rollaxis(x, 1)", numpy.arange(24).reshape(2,3,4), np_rollaxis0=[NDArray[int, :, :, :]])
def test_roll6(self):
self.run_test("def np_roll6(x): from numpy import roll; return roll(x[:,:,:-1], -1, 2)", numpy.arange(24).reshape(2,3,4), np_roll6=[NDArray[int, :, :, :]])
def test_roll5(self):
self.run_test("def np_roll5(x): from numpy import roll; return roll(x, -1, 2)", numpy.arange(24).reshape(2,3,4), np_roll5=[NDArray[int, :, :, :]])
def test_roll4(self):
self.run_test("def np_roll4(x): from numpy import roll; return roll(x, 1, 1)", numpy.arange(24).reshape(2,3,4), np_roll4=[NDArray[int, :, :, :]])
def test_roll3(self):
self.run_test("def np_roll3(x): from numpy import roll; return roll(x, -1, 0)", numpy.arange(24).reshape(2,3,4), np_roll3=[NDArray[int, :, :, :]])
def test_roll2(self):
self.run_test("def np_roll2(x): from numpy import roll; return roll(x, -1)", numpy.arange(24).reshape(2,3,4), np_roll2=[NDArray[int, :, :, :]])
def test_roll1(self):
self.run_test("def np_roll1(x): from numpy import roll; return roll(x, 10)", numpy.arange(24).reshape(2,3,4), np_roll1=[NDArray[int, :, :, :]])
def test_roll0(self):
self.run_test("def np_roll0(x): from numpy import roll; return roll(x, 3)", numpy.arange(24).reshape(2,3,4), np_roll0=[NDArray[int, :, :, :]])
def test_searchsorted3(self):
self.run_test("def np_searchsorted3(x): from numpy import searchsorted; return searchsorted(x, [[3,4],[1,87]])", numpy.arange(6), np_searchsorted3=[NDArray[int,:]])
def test_searchsorted2(self):
self.run_test("def np_searchsorted2(x): from numpy import searchsorted; return searchsorted(x, [[3,4],[1,87]], 'right')", numpy.arange(6), np_searchsorted2=[NDArray[int,:]])
def test_searchsorted1(self):
self.run_test("def np_searchsorted1(x): from numpy import searchsorted; return searchsorted(x, 3)", numpy.arange(6), np_searchsorted1=[NDArray[int,:]])
def test_searchsorted0(self):
self.run_test("def np_searchsorted0(x): from numpy import searchsorted; return searchsorted(x, 3, 'right')", numpy.arange(6), np_searchsorted0=[NDArray[int,:]])
def test_rot904(self):
self.run_test("def np_rot904(x): from numpy import rot90; return rot90(x, 4)", numpy.arange(24).reshape(2,3,4), np_rot904=[NDArray[int, :, :, :]])
def test_rot903(self):
self.run_test("def np_rot903(x): from numpy import rot90; return rot90(x, 2)", numpy.arange(24).reshape(2,3,4), np_rot903=[NDArray[int, :, :, :]])
def test_rot902(self):
self.run_test("def np_rot902(x): from numpy import rot90; return rot90(x, 3)", numpy.arange(24).reshape(2,3,4), np_rot902=[NDArray[int, :, :, :]])
def test_rot900(self):
self.run_test("def np_rot900(x): from numpy import rot90; return rot90(x)", numpy.arange(24).reshape(2,3,4), np_rot900=[NDArray[int, :, :, :]])
def test_rot901(self):
self.run_test("def np_rot901(x): from numpy import rot90; return rot90(x)", numpy.arange(4).reshape(2,2), np_rot901=[NDArray[int,:,:]])
def test_select2(self):
self.run_test("def np_select2(x): from numpy import select; condlist = [x<3, x>5]; choicelist = [x**3, x**2]; return select(condlist, choicelist)", numpy.arange(10).reshape(2,5), np_select2=[NDArray[int,:,:]])
def test_select1(self):
self.run_test("def np_select1(x): from numpy import select; condlist = [x<3, x>5]; choicelist = [x+3, x**2]; return select(condlist, choicelist)", numpy.arange(10), np_select1=[NDArray[int,:]])
def test_select0(self):
self.run_test("def np_select0(x): from numpy import select; condlist = [x<3, x>5]; choicelist = [x, x**2]; return select(condlist, choicelist)", numpy.arange(10), np_select0=[NDArray[int,:]])
def test_sometrue0(self):
self.run_test("def np_sometrue0(a): from numpy import sometrue ; return sometrue(a)", numpy.array([[True, False], [True, True]]), np_sometrue0=[NDArray[bool,:,:]])
def test_sometrue1(self):
self.run_test("def np_sometrue1(a): from numpy import sometrue ; return sometrue(a, 0)", numpy.array([[True, False], [False, False]]), np_sometrue1=[NDArray[bool,:,:]])
def test_sometrue2(self):
self.run_test("def np_sometrue2(a): from numpy import sometrue ; return sometrue(a)", [-1, 0, 5], np_sometrue2=[List[int]])
def test_sort0(self):
self.run_test("def np_sort0(a): from numpy import sort ; return sort(a)", numpy.array([[1,6],[7,5]]), np_sort0=[NDArray[int,:,:]])
def test_sort1(self):
self.run_test("def np_sort1(a): from numpy import sort ; return sort(a)", numpy.array([2, 1, 6, 3, 5]), np_sort1=[NDArray[int,:]])
def test_sort2(self):
self.run_test("def np_sort2(a): from numpy import sort ; return sort(a)", numpy.arange(2*3*4, 0, -1).reshape(2,3,4), np_sort2=[NDArray[int, :, :, :]])
def test_sort3(self):
self.run_test("def np_sort3(a): from numpy import sort ; return sort(a, 0)", numpy.arange(2*3*4, 0, -1).reshape(2,3,4), np_sort3=[NDArray[int, :, :, :]])
def test_sort4(self):
self.run_test("def np_sort4(a): from numpy import sort ; return sort(a, 1, kind='quicksort')", numpy.arange(2*3*4, 0, -1).reshape(2,3,4), np_sort4=[NDArray[int, :, :, :]])
def test_sort5(self):
self.run_test("def np_sort5(a): from numpy import sort ; return sort(a, 1, kind='heapsort')", numpy.arange(2*3*5, 0, -1).reshape(2,3,5), np_sort5=[NDArray[int, :, :, :]])
def test_sort6(self):
self.run_test("def np_sort6(a): from numpy import sort ; return sort(a, 0, kind='stable')", numpy.arange(2*3*6, 0, -1).reshape(2,3,6), np_sort6=[NDArray[int, :, :, :]])
def test_sort7(self):
self.run_test("def np_sort7(a): from numpy import sort ; return sort(a, 2, kind='mergesort')", numpy.arange(2*3*7, 0, -1).reshape(2,3,7), np_sort7=[NDArray[int, :, :, :]])
def test_sort8(self):
self.run_test("def np_sort8(a): from numpy import sort ; return sort(a, None)", numpy.arange(2*3*7, 0, -1).reshape(2,3,7), np_sort8=[NDArray[int, :, :, :]])
def test_sort9(self):
self.run_test("def np_sort9(a): from numpy import sort ; return sort(2 * a, None)", numpy.arange(2*3*7, 0, -1).reshape(2,3,7), np_sort9=[NDArray[int, :, :, :]])
def test_sort10(self):
self.run_test("def np_sort10(a): from numpy import sort ; return sort(3*a, 0)", | numpy.arange(2*3*4, 0, -1) | numpy.arange |
import sys
import random
import numpy as np
from collections import namedtuple
def log(*args, **kwargs):
print(*args, flush=True, file=sys.stderr, **kwargs)
class Delta(namedtuple("Recipe", ['array'])):
def __new__(cls, array):
return super().__new__(cls, np.array(array))
@staticmethod
def ceil(array, value=0):
return np.array([x if x < value else value for x in array])
def uniform_distance(self, other):
delta = self.array + other.array
delta = self.ceil(delta)
return abs(sum(delta))
def weight_distance(self, other):
delta = self.array + other.array
delta = self.ceil(delta)
r = | np.dot(delta, [1, 1.5, 2.5, 3.5]) | numpy.dot |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 20 17:25:54 2018
@author: <NAME> <EMAIL>
"""
import numpy as np
import os
import svgwrite
# import json
from PIL import Image, ImageDraw
from rdp import rdp # pip install rdp
# from IPython.display import SVG, display
from svgpathtools import real, imag, svg2paths, wsvg # pip install git+https://github.com/mathandy/svgpathtools#egg=svgpathtools
def slerp(p0, p1, t):
"""Spherical interpolation."""
omega = np.arccos(np.dot(p0 / np.linalg.norm(p0), p1 / np.linalg.norm(p1)))
so = np.sin(omega)
if so < 1e-6: # p0 = p1
return p0
else:
return np.sin((1.0 - t) * omega) / so * p0 + np.sin(t * omega) / so * p1
def lerp(p0, p1, t):
"""Linear interpolation."""
return (1.0 - t) * p0 + t * p1
def get_bounds(data, factor=1.0):
"""Return bounds of stroke-3 data."""
min_x = 0
max_x = 0
min_y = 0
max_y = 0
abs_x = 0
abs_y = 0
for i in range(len(data)):
x = float(data[i, 0]) / factor
y = float(data[i, 1]) / factor
abs_x += x
abs_y += y
min_x = min(min_x, abs_x)
min_y = min(min_y, abs_y)
max_x = max(max_x, abs_x)
max_y = max(max_y, abs_y)
return min_x, max_x, min_y, max_y
def lines_to_strokes(lines, omit_first_point=True):
"""
Convert polyline format to stroke-3 format.
lines: list of strokes, each stroke has format Nx2
"""
strokes = []
for line in lines:
linelen = len(line)
for i in range(linelen):
eos = 0 if i < linelen - 1 else 1
strokes.append([line[i][0], line[i][1], eos])
strokes = np.array(strokes)
strokes[1:, 0:2] -= strokes[:-1, 0:2]
return strokes[1:, :] if omit_first_point else strokes
def strokes_to_lines(strokes, scale=1.0, start_from_origin=False):
"""
convert strokes3 to polyline format ie. absolute x-y coordinates
note: the sketch can be negative
:param strokes: stroke3, Nx3
:param scale: scale factor applied on stroke3
:param start_from_origin: sketch starts from [0,0] if True
:return: list of strokes, each stroke has format Nx2
"""
x = 0
y = 0
lines = []
line = [[0, 0]] if start_from_origin else []
for i in range(len(strokes)):
x_, y_ = strokes[i, :2] * scale
x += x_
y += y_
line.append([x, y])
if strokes[i, 2] == 1:
line_array = np.array(line) + np.zeros((1, 2), dtype=np.uint8)
lines.append(line_array)
line = []
if lines == []:
line_array = np.array(line) + np.zeros((1, 2), dtype=np.uint8)
lines.append(line_array)
return lines
def centralise_lines(lines, shape=None, jitter=False):
"""
put polyline in centre of a canvas specified by shape
:param lines: list of strokes each having format Nx2 (e.g. output of strokes_to_lines)
:param shape: shape of the canvas (x,y); None if you want the canvas auto fit the lines
:param jitter: if True, random offset within shape canvas
:return: lines after centred and offset
"""
# find line boundary
line_array = np.concatenate(lines, axis=0) # Nx2
min_x, min_y = line_array.min(axis=0)
max_x, max_y = line_array.max(axis=0)
if shape is None:
shape = (max_x - min_x, max_y - min_y)
if jitter:
dx = max(int((shape[0] - max_x + min_x)/2.0) - 2, 0)
dy = max(int((shape[1] - max_y + min_y)/2.0) - 2, 0)
offset = (np.random.randint(-dx, dx+1), np.random.randint(-dy, dy+1))
else:
offset = (0, 0)
sx = offset[0] + (shape[0] - max_x - min_x)/2 # total shift along x
sy = offset[1] + (shape[1] - max_y - min_y)/2 # --- y
sxy = np.array([sx, sy])[None, ...]
out = [line + sxy for line in lines]
return out
def normalise_strokes3(stroke3, max_bound=1.0):
"""
normalise skt to max_bound
:param stroke3: stroke3 format (N,3)
:param max_bound: max len
:return: (N, 3)
"""
stroke = np.array(stroke3, dtype=np.float32)
min_x, max_x, min_y, max_y = get_bounds(stroke)
max_dim = max([max_x - min_x, max_y - min_y, 1])
stroke[:, :2] = stroke[:, :2] / max_dim * max_bound
return stroke
def aggregate(strokes, max_bound=1.0):
"""
concat stroke3 data into a single array
also rescale to have max_bound
used to create a hdf5 database
:param strokes: array of stroke-3, length N
:param max_bound: maximum bound of sketch along x and y dimension
:return: (concated, ids, N) where
concated: all data concatenated in a single array
ids: has size Nx2 showing start and end position in concated
N: number of datum
"""
N = len(strokes)
# get start and end position
dlen = [len(x) for x in strokes]
ids = np.repeat(np.cumsum(dlen), 2).tolist()
ids = [0, ] + ids[:-1]
ids = | np.int64(ids) | numpy.int64 |
# -*- coding: utf-8 -*-
import re
import warnings
from datetime import timedelta
from itertools import product
import pytest
import numpy as np
import pandas as pd
from pandas import (CategoricalIndex, DataFrame, Index, MultiIndex,
compat, date_range, period_range)
from pandas.compat import PY3, long, lrange, lzip, range, u, PYPY
from pandas.errors import PerformanceWarning, UnsortedIndexError
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.indexes.base import InvalidIndexError
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas._libs.tslib import Timestamp
import pandas.util.testing as tm
from pandas.util.testing import assert_almost_equal, assert_copy
from .common import Base
class TestMultiIndex(Base):
_holder = MultiIndex
_compat_props = ['shape', 'ndim', 'size']
def setup_method(self, method):
major_axis = Index(['foo', 'bar', 'baz', 'qux'])
minor_axis = Index(['one', 'two'])
major_labels = np.array([0, 0, 1, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
self.index_names = ['first', 'second']
self.indices = dict(index=MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels
], names=self.index_names,
verify_integrity=False))
self.setup_indices()
def create_index(self):
return self.index
def test_can_hold_identifiers(self):
idx = self.create_index()
key = idx[0]
assert idx._can_hold_identifiers_and_holds_name(key) is True
def test_boolean_context_compat2(self):
# boolean context compat
# GH7897
i1 = MultiIndex.from_tuples([('A', 1), ('A', 2)])
i2 = MultiIndex.from_tuples([('A', 1), ('A', 3)])
common = i1.intersection(i2)
def f():
if common:
pass
tm.assert_raises_regex(ValueError, 'The truth value of a', f)
def test_labels_dtypes(self):
# GH 8456
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
assert i.labels[0].dtype == 'int8'
assert i.labels[1].dtype == 'int8'
i = MultiIndex.from_product([['a'], range(40)])
assert i.labels[1].dtype == 'int8'
i = MultiIndex.from_product([['a'], range(400)])
assert i.labels[1].dtype == 'int16'
i = MultiIndex.from_product([['a'], range(40000)])
assert i.labels[1].dtype == 'int32'
i = pd.MultiIndex.from_product([['a'], range(1000)])
assert (i.labels[0] >= 0).all()
assert (i.labels[1] >= 0).all()
def test_where(self):
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
def f():
i.where(True)
pytest.raises(NotImplementedError, f)
def test_where_array_like(self):
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
klasses = [list, tuple, np.array, pd.Series]
cond = [False, True]
for klass in klasses:
def f():
return i.where(klass(cond))
pytest.raises(NotImplementedError, f)
def test_repeat(self):
reps = 2
numbers = [1, 2, 3]
names = np.array(['foo', 'bar'])
m = MultiIndex.from_product([
numbers, names], names=names)
expected = MultiIndex.from_product([
numbers, names.repeat(reps)], names=names)
tm.assert_index_equal(m.repeat(reps), expected)
with tm.assert_produces_warning(FutureWarning):
result = m.repeat(n=reps)
tm.assert_index_equal(result, expected)
def test_numpy_repeat(self):
reps = 2
numbers = [1, 2, 3]
names = np.array(['foo', 'bar'])
m = MultiIndex.from_product([
numbers, names], names=names)
expected = MultiIndex.from_product([
numbers, names.repeat(reps)], names=names)
tm.assert_index_equal(np.repeat(m, reps), expected)
msg = "the 'axis' parameter is not supported"
tm.assert_raises_regex(
ValueError, msg, np.repeat, m, reps, axis=1)
def test_set_name_methods(self):
# so long as these are synonyms, we don't need to test set_names
assert self.index.rename == self.index.set_names
new_names = [name + "SUFFIX" for name in self.index_names]
ind = self.index.set_names(new_names)
assert self.index.names == self.index_names
assert ind.names == new_names
with tm.assert_raises_regex(ValueError, "^Length"):
ind.set_names(new_names + new_names)
new_names2 = [name + "SUFFIX2" for name in new_names]
res = ind.set_names(new_names2, inplace=True)
assert res is None
assert ind.names == new_names2
# set names for specific level (# GH7792)
ind = self.index.set_names(new_names[0], level=0)
assert self.index.names == self.index_names
assert ind.names == [new_names[0], self.index_names[1]]
res = ind.set_names(new_names2[0], level=0, inplace=True)
assert res is None
assert ind.names == [new_names2[0], self.index_names[1]]
# set names for multiple levels
ind = self.index.set_names(new_names, level=[0, 1])
assert self.index.names == self.index_names
assert ind.names == new_names
res = ind.set_names(new_names2, level=[0, 1], inplace=True)
assert res is None
assert ind.names == new_names2
@pytest.mark.parametrize('inplace', [True, False])
def test_set_names_with_nlevel_1(self, inplace):
# GH 21149
# Ensure that .set_names for MultiIndex with
# nlevels == 1 does not raise any errors
expected = pd.MultiIndex(levels=[[0, 1]],
labels=[[0, 1]],
names=['first'])
m = pd.MultiIndex.from_product([[0, 1]])
result = m.set_names('first', level=0, inplace=inplace)
if inplace:
result = m
tm.assert_index_equal(result, expected)
def test_set_levels_labels_directly(self):
# setting levels/labels directly raises AttributeError
levels = self.index.levels
new_levels = [[lev + 'a' for lev in level] for level in levels]
labels = self.index.labels
major_labels, minor_labels = labels
major_labels = [(x + 1) % 3 for x in major_labels]
minor_labels = [(x + 1) % 1 for x in minor_labels]
new_labels = [major_labels, minor_labels]
with pytest.raises(AttributeError):
self.index.levels = new_levels
with pytest.raises(AttributeError):
self.index.labels = new_labels
def test_set_levels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
levels = self.index.levels
new_levels = [[lev + 'a' for lev in level] for level in levels]
def assert_matching(actual, expected, check_dtype=False):
# avoid specifying internal representation
# as much as possible
assert len(actual) == len(expected)
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp)
tm.assert_numpy_array_equal(act, exp, check_dtype=check_dtype)
# level changing [w/o mutation]
ind2 = self.index.set_levels(new_levels)
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, new_levels)
# level changing specific level [w/o mutation]
ind2 = self.index.set_levels(new_levels[0], level=0)
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.set_levels(new_levels[1], level=1)
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/o mutation]
ind2 = self.index.set_levels(new_levels, level=[0, 1])
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[0], level=0, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[1], level=1, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, level=[0, 1],
inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# illegal level changing should not change levels
# GH 13754
original_index = self.index.copy()
for inplace in [True, False]:
with tm.assert_raises_regex(ValueError, "^On"):
self.index.set_levels(['c'], level=0, inplace=inplace)
assert_matching(self.index.levels, original_index.levels,
check_dtype=True)
with tm.assert_raises_regex(ValueError, "^On"):
self.index.set_labels([0, 1, 2, 3, 4, 5], level=0,
inplace=inplace)
assert_matching(self.index.labels, original_index.labels,
check_dtype=True)
with tm.assert_raises_regex(TypeError, "^Levels"):
self.index.set_levels('c', level=0, inplace=inplace)
assert_matching(self.index.levels, original_index.levels,
check_dtype=True)
with tm.assert_raises_regex(TypeError, "^Labels"):
self.index.set_labels(1, level=0, inplace=inplace)
assert_matching(self.index.labels, original_index.labels,
check_dtype=True)
def test_set_labels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
labels = self.index.labels
major_labels, minor_labels = labels
major_labels = [(x + 1) % 3 for x in major_labels]
minor_labels = [(x + 1) % 1 for x in minor_labels]
new_labels = [major_labels, minor_labels]
def assert_matching(actual, expected):
# avoid specifying internal representation
# as much as possible
assert len(actual) == len(expected)
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp, dtype=np.int8)
tm.assert_numpy_array_equal(act, exp)
# label changing [w/o mutation]
ind2 = self.index.set_labels(new_labels)
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, new_labels)
# label changing specific level [w/o mutation]
ind2 = self.index.set_labels(new_labels[0], level=0)
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.set_labels(new_labels[1], level=1)
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/o mutation]
ind2 = self.index.set_labels(new_labels, level=[0, 1])
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[0], level=0, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[1], level=1, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, level=[0, 1],
inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing for levels of different magnitude of categories
ind = pd.MultiIndex.from_tuples([(0, i) for i in range(130)])
new_labels = range(129, -1, -1)
expected = pd.MultiIndex.from_tuples(
[(0, i) for i in new_labels])
# [w/o mutation]
result = ind.set_labels(labels=new_labels, level=1)
assert result.equals(expected)
# [w/ mutation]
result = ind.copy()
result.set_labels(labels=new_labels, level=1, inplace=True)
assert result.equals(expected)
def test_set_levels_labels_names_bad_input(self):
levels, labels = self.index.levels, self.index.labels
names = self.index.names
with tm.assert_raises_regex(ValueError, 'Length of levels'):
self.index.set_levels([levels[0]])
with tm.assert_raises_regex(ValueError, 'Length of labels'):
self.index.set_labels([labels[0]])
with tm.assert_raises_regex(ValueError, 'Length of names'):
self.index.set_names([names[0]])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_names(names[0])
# should have equal lengths
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_levels(levels, level=0)
# should have equal lengths
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_labels(labels, level=0)
# should have equal lengths
with tm.assert_raises_regex(ValueError, 'Length of names'):
self.index.set_names(names[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'string'):
self.index.set_names(names, level=0)
def test_set_levels_categorical(self):
# GH13854
index = MultiIndex.from_arrays([list("xyzx"), [0, 1, 2, 3]])
for ordered in [False, True]:
cidx = CategoricalIndex(list("bac"), ordered=ordered)
result = index.set_levels(cidx, 0)
expected = MultiIndex(levels=[cidx, [0, 1, 2, 3]],
labels=index.labels)
tm.assert_index_equal(result, expected)
result_lvl = result.get_level_values(0)
expected_lvl = CategoricalIndex(list("bacb"),
categories=cidx.categories,
ordered=cidx.ordered)
tm.assert_index_equal(result_lvl, expected_lvl)
def test_metadata_immutable(self):
levels, labels = self.index.levels, self.index.labels
# shouldn't be able to set at either the top level or base level
mutable_regex = re.compile('does not support mutable operations')
with tm.assert_raises_regex(TypeError, mutable_regex):
levels[0] = levels[0]
with tm.assert_raises_regex(TypeError, mutable_regex):
levels[0][0] = levels[0][0]
# ditto for labels
with tm.assert_raises_regex(TypeError, mutable_regex):
labels[0] = labels[0]
with tm.assert_raises_regex(TypeError, mutable_regex):
labels[0][0] = labels[0][0]
# and for names
names = self.index.names
with tm.assert_raises_regex(TypeError, mutable_regex):
names[0] = names[0]
def test_inplace_mutation_resets_values(self):
levels = [['a', 'b', 'c'], [4]]
levels2 = [[1, 2, 3], ['a']]
labels = [[0, 1, 0, 2, 2, 0], [0, 0, 0, 0, 0, 0]]
mi1 = MultiIndex(levels=levels, labels=labels)
mi2 = MultiIndex(levels=levels2, labels=labels)
vals = mi1.values.copy()
vals2 = mi2.values.copy()
assert mi1._tuples is not None
# Make sure level setting works
new_vals = mi1.set_levels(levels2).values
tm.assert_almost_equal(vals2, new_vals)
# Non-inplace doesn't kill _tuples [implementation detail]
tm.assert_almost_equal(mi1._tuples, vals)
# ...and values is still same too
tm.assert_almost_equal(mi1.values, vals)
# Inplace should kill _tuples
mi1.set_levels(levels2, inplace=True)
tm.assert_almost_equal(mi1.values, vals2)
# Make sure label setting works too
labels2 = [[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]]
exp_values = np.empty((6,), dtype=object)
exp_values[:] = [(long(1), 'a')] * 6
# Must be 1d array of tuples
assert exp_values.shape == (6,)
new_values = mi2.set_labels(labels2).values
# Not inplace shouldn't change
tm.assert_almost_equal(mi2._tuples, vals2)
# Should have correct values
tm.assert_almost_equal(exp_values, new_values)
# ...and again setting inplace should kill _tuples, etc
mi2.set_labels(labels2, inplace=True)
tm.assert_almost_equal(mi2.values, new_values)
def test_copy_in_constructor(self):
levels = np.array(["a", "b", "c"])
labels = np.array([1, 1, 2, 0, 0, 1, 1])
val = labels[0]
mi = MultiIndex(levels=[levels, levels], labels=[labels, labels],
copy=True)
assert mi.labels[0][0] == val
labels[0] = 15
assert mi.labels[0][0] == val
val = levels[0]
levels[0] = "PANDA"
assert mi.levels[0][0] == val
def test_set_value_keeps_names(self):
# motivating example from #3742
lev1 = ['hans', 'hans', 'hans', 'grethe', 'grethe', 'grethe']
lev2 = ['1', '2', '3'] * 2
idx = pd.MultiIndex.from_arrays([lev1, lev2], names=['Name', 'Number'])
df = pd.DataFrame(
np.random.randn(6, 4),
columns=['one', 'two', 'three', 'four'],
index=idx)
df = df.sort_index()
assert df._is_copy is None
assert df.index.names == ('Name', 'Number')
df.at[('grethe', '4'), 'one'] = 99.34
assert df._is_copy is None
assert df.index.names == ('Name', 'Number')
def test_copy_names(self):
# Check that adding a "names" parameter to the copy is honored
# GH14302
multi_idx = pd.Index([(1, 2), (3, 4)], names=['MyName1', 'MyName2'])
multi_idx1 = multi_idx.copy()
assert multi_idx.equals(multi_idx1)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx1.names == ['MyName1', 'MyName2']
multi_idx2 = multi_idx.copy(names=['NewName1', 'NewName2'])
assert multi_idx.equals(multi_idx2)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx2.names == ['NewName1', 'NewName2']
multi_idx3 = multi_idx.copy(name=['NewName1', 'NewName2'])
assert multi_idx.equals(multi_idx3)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx3.names == ['NewName1', 'NewName2']
def test_names(self):
# names are assigned in setup
names = self.index_names
level_names = [level.name for level in self.index.levels]
assert names == level_names
# setting bad names on existing
index = self.index
tm.assert_raises_regex(ValueError, "^Length of names",
setattr, index, "names",
list(index.names) + ["third"])
tm.assert_raises_regex(ValueError, "^Length of names",
setattr, index, "names", [])
# initializing with bad names (should always be equivalent)
major_axis, minor_axis = self.index.levels
major_labels, minor_labels = self.index.labels
tm.assert_raises_regex(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first'])
tm.assert_raises_regex(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first', 'second', 'third'])
# names are assigned
index.names = ["a", "b"]
ind_names = list(index.names)
level_names = [level.name for level in index.levels]
assert ind_names == level_names
def test_astype(self):
expected = self.index.copy()
actual = self.index.astype('O')
assert_copy(actual.levels, expected.levels)
assert_copy(actual.labels, expected.labels)
self.check_level_names(actual, expected.names)
with tm.assert_raises_regex(TypeError, "^Setting.*dtype.*object"):
self.index.astype(np.dtype(int))
@pytest.mark.parametrize('ordered', [True, False])
def test_astype_category(self, ordered):
# GH 18630
msg = '> 1 ndim Categorical are not supported at this time'
with tm.assert_raises_regex(NotImplementedError, msg):
self.index.astype(CategoricalDtype(ordered=ordered))
if ordered is False:
# dtype='category' defaults to ordered=False, so only test once
with tm.assert_raises_regex(NotImplementedError, msg):
self.index.astype('category')
def test_constructor_single_level(self):
result = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
labels=[[0, 1, 2, 3]], names=['first'])
assert isinstance(result, MultiIndex)
expected = Index(['foo', 'bar', 'baz', 'qux'], name='first')
tm.assert_index_equal(result.levels[0], expected)
assert result.names == ['first']
def test_constructor_no_levels(self):
tm.assert_raises_regex(ValueError, "non-zero number "
"of levels/labels",
MultiIndex, levels=[], labels=[])
both_re = re.compile('Must pass both levels and labels')
with tm.assert_raises_regex(TypeError, both_re):
MultiIndex(levels=[])
with tm.assert_raises_regex(TypeError, both_re):
MultiIndex(labels=[])
def test_constructor_mismatched_label_levels(self):
labels = [np.array([1]), np.array([2]), np.array([3])]
levels = ["a"]
tm.assert_raises_regex(ValueError, "Length of levels and labels "
"must be the same", MultiIndex,
levels=levels, labels=labels)
length_error = re.compile('>= length of level')
label_error = re.compile(r'Unequal label lengths: \[4, 2\]')
# important to check that it's looking at the right thing.
with tm.assert_raises_regex(ValueError, length_error):
MultiIndex(levels=[['a'], ['b']],
labels=[[0, 1, 2, 3], [0, 3, 4, 1]])
with tm.assert_raises_regex(ValueError, label_error):
MultiIndex(levels=[['a'], ['b']], labels=[[0, 0, 0, 0], [0, 0]])
# external API
with tm.assert_raises_regex(ValueError, length_error):
self.index.copy().set_levels([['a'], ['b']])
with tm.assert_raises_regex(ValueError, label_error):
self.index.copy().set_labels([[0, 0, 0, 0], [0, 0]])
def test_constructor_nonhashable_names(self):
# GH 20527
levels = [[1, 2], [u'one', u'two']]
labels = [[0, 0, 1, 1], [0, 1, 0, 1]]
names = ((['foo'], ['bar']))
message = "MultiIndex.name must be a hashable type"
tm.assert_raises_regex(TypeError, message,
MultiIndex, levels=levels,
labels=labels, names=names)
# With .rename()
mi = MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=('foo', 'bar'))
renamed = [['foor'], ['barr']]
tm.assert_raises_regex(TypeError, message, mi.rename, names=renamed)
# With .set_names()
tm.assert_raises_regex(TypeError, message, mi.set_names, names=renamed)
@pytest.mark.parametrize('names', [['a', 'b', 'a'], ['1', '1', '2'],
['1', 'a', '1']])
def test_duplicate_level_names(self, names):
# GH18872
pytest.raises(ValueError, pd.MultiIndex.from_product,
[[0, 1]] * 3, names=names)
# With .rename()
mi = pd.MultiIndex.from_product([[0, 1]] * 3)
tm.assert_raises_regex(ValueError, "Duplicated level name:",
mi.rename, names)
# With .rename(., level=)
mi.rename(names[0], level=1, inplace=True)
tm.assert_raises_regex(ValueError, "Duplicated level name:",
mi.rename, names[:2], level=[0, 2])
def assert_multiindex_copied(self, copy, original):
# Levels should be (at least, shallow copied)
tm.assert_copy(copy.levels, original.levels)
tm.assert_almost_equal(copy.labels, original.labels)
# Labels doesn't matter which way copied
tm.assert_almost_equal(copy.labels, original.labels)
assert copy.labels is not original.labels
# Names doesn't matter which way copied
assert copy.names == original.names
assert copy.names is not original.names
# Sort order should be copied
assert copy.sortorder == original.sortorder
def test_copy(self):
i_copy = self.index.copy()
self.assert_multiindex_copied(i_copy, self.index)
def test_shallow_copy(self):
i_copy = self.index._shallow_copy()
self.assert_multiindex_copied(i_copy, self.index)
def test_view(self):
i_view = self.index.view()
self.assert_multiindex_copied(i_view, self.index)
def check_level_names(self, index, names):
assert [level.name for level in index.levels] == list(names)
def test_changing_names(self):
# names should be applied to levels
level_names = [level.name for level in self.index.levels]
self.check_level_names(self.index, self.index.names)
view = self.index.view()
copy = self.index.copy()
shallow_copy = self.index._shallow_copy()
# changing names should change level names on object
new_names = [name + "a" for name in self.index.names]
self.index.names = new_names
self.check_level_names(self.index, new_names)
# but not on copies
self.check_level_names(view, level_names)
self.check_level_names(copy, level_names)
self.check_level_names(shallow_copy, level_names)
# and copies shouldn't change original
shallow_copy.names = [name + "c" for name in shallow_copy.names]
self.check_level_names(self.index, new_names)
def test_get_level_number_integer(self):
self.index.names = [1, 0]
assert self.index._get_level_number(1) == 0
assert self.index._get_level_number(0) == 1
pytest.raises(IndexError, self.index._get_level_number, 2)
tm.assert_raises_regex(KeyError, 'Level fourth not found',
self.index._get_level_number, 'fourth')
def test_from_arrays(self):
arrays = []
for lev, lab in zip(self.index.levels, self.index.labels):
arrays.append(np.asarray(lev).take(lab))
# list of arrays as input
result = MultiIndex.from_arrays(arrays, names=self.index.names)
tm.assert_index_equal(result, self.index)
# infer correctly
result = MultiIndex.from_arrays([[pd.NaT, Timestamp('20130101')],
['a', 'b']])
assert result.levels[0].equals(Index([Timestamp('20130101')]))
assert result.levels[1].equals(Index(['a', 'b']))
def test_from_arrays_iterator(self):
# GH 18434
arrays = []
for lev, lab in zip(self.index.levels, self.index.labels):
arrays.append(np.asarray(lev).take(lab))
# iterator as input
result = MultiIndex.from_arrays(iter(arrays), names=self.index.names)
tm.assert_index_equal(result, self.index)
# invalid iterator input
with tm.assert_raises_regex(
TypeError, "Input must be a list / sequence of array-likes."):
MultiIndex.from_arrays(0)
def test_from_arrays_index_series_datetimetz(self):
idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
idx2 = pd.date_range('2015-01-01 10:00', freq='H', periods=3,
tz='Asia/Tokyo')
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_timedelta(self):
idx1 = pd.timedelta_range('1 days', freq='D', periods=3)
idx2 = pd.timedelta_range('2 hours', freq='H', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_period(self):
idx1 = pd.period_range('2011-01-01', freq='D', periods=3)
idx2 = pd.period_range('2015-01-01', freq='H', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_datetimelike_mixed(self):
idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
idx2 = pd.date_range('2015-01-01 10:00', freq='H', periods=3)
idx3 = pd.timedelta_range('1 days', freq='D', periods=3)
idx4 = pd.period_range('2011-01-01', freq='D', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2, idx3, idx4])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
tm.assert_index_equal(result.get_level_values(2), idx3)
tm.assert_index_equal(result.get_level_values(3), idx4)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1),
pd.Series(idx2),
pd.Series(idx3),
pd.Series(idx4)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result2.get_level_values(2), idx3)
tm.assert_index_equal(result2.get_level_values(3), idx4)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_categorical(self):
# GH13743
idx1 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=False)
idx2 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=True)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
result3 = pd.MultiIndex.from_arrays([idx1.values, idx2.values])
tm.assert_index_equal(result3.get_level_values(0), idx1)
tm.assert_index_equal(result3.get_level_values(1), idx2)
def test_from_arrays_empty(self):
# 0 levels
with tm.assert_raises_regex(
ValueError, "Must pass non-zero number of levels/labels"):
MultiIndex.from_arrays(arrays=[])
# 1 level
result = MultiIndex.from_arrays(arrays=[[]], names=['A'])
assert isinstance(result, MultiIndex)
expected = Index([], name='A')
tm.assert_index_equal(result.levels[0], expected)
# N levels
for N in [2, 3]:
arrays = [[]] * N
names = list('ABC')[:N]
result = MultiIndex.from_arrays(arrays=arrays, names=names)
expected = MultiIndex(levels=[[]] * N, labels=[[]] * N,
names=names)
tm.assert_index_equal(result, expected)
def test_from_arrays_invalid_input(self):
invalid_inputs = [1, [1], [1, 2], [[1], 2],
'a', ['a'], ['a', 'b'], [['a'], 'b']]
for i in invalid_inputs:
pytest.raises(TypeError, MultiIndex.from_arrays, arrays=i)
def test_from_arrays_different_lengths(self):
# see gh-13599
idx1 = [1, 2, 3]
idx2 = ['a', 'b']
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
idx1 = []
idx2 = ['a', 'b']
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
idx1 = [1, 2, 3]
idx2 = []
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
def test_from_product(self):
first = ['foo', 'bar', 'buz']
second = ['a', 'b', 'c']
names = ['first', 'second']
result = MultiIndex.from_product([first, second], names=names)
tuples = [('foo', 'a'), ('foo', 'b'), ('foo', 'c'), ('bar', 'a'),
('bar', 'b'), ('bar', 'c'), ('buz', 'a'), ('buz', 'b'),
('buz', 'c')]
expected = MultiIndex.from_tuples(tuples, names=names)
tm.assert_index_equal(result, expected)
def test_from_product_iterator(self):
# GH 18434
first = ['foo', 'bar', 'buz']
second = ['a', 'b', 'c']
names = ['first', 'second']
tuples = [('foo', 'a'), ('foo', 'b'), ('foo', 'c'), ('bar', 'a'),
('bar', 'b'), ('bar', 'c'), ('buz', 'a'), ('buz', 'b'),
('buz', 'c')]
expected = MultiIndex.from_tuples(tuples, names=names)
# iterator as input
result = MultiIndex.from_product(iter([first, second]), names=names)
tm.assert_index_equal(result, expected)
# Invalid non-iterable input
with tm.assert_raises_regex(
TypeError, "Input must be a list / sequence of iterables."):
MultiIndex.from_product(0)
def test_from_product_empty(self):
# 0 levels
with tm.assert_raises_regex(
ValueError, "Must pass non-zero number of levels/labels"):
MultiIndex.from_product([])
# 1 level
result = MultiIndex.from_product([[]], names=['A'])
expected = pd.Index([], name='A')
tm.assert_index_equal(result.levels[0], expected)
# 2 levels
l1 = [[], ['foo', 'bar', 'baz'], []]
l2 = [[], [], ['a', 'b', 'c']]
names = ['A', 'B']
for first, second in zip(l1, l2):
result = MultiIndex.from_product([first, second], names=names)
expected = MultiIndex(levels=[first, second],
labels=[[], []], names=names)
tm.assert_index_equal(result, expected)
# GH12258
names = ['A', 'B', 'C']
for N in range(4):
lvl2 = lrange(N)
result = MultiIndex.from_product([[], lvl2, []], names=names)
expected = MultiIndex(levels=[[], lvl2, []],
labels=[[], [], []], names=names)
tm.assert_index_equal(result, expected)
def test_from_product_invalid_input(self):
invalid_inputs = [1, [1], [1, 2], [[1], 2],
'a', ['a'], ['a', 'b'], [['a'], 'b']]
for i in invalid_inputs:
pytest.raises(TypeError, MultiIndex.from_product, iterables=i)
def test_from_product_datetimeindex(self):
dt_index = date_range('2000-01-01', periods=2)
mi = pd.MultiIndex.from_product([[1, 2], dt_index])
etalon = construct_1d_object_array_from_listlike([(1, pd.Timestamp(
'2000-01-01')), (1, pd.Timestamp('2000-01-02')), (2, pd.Timestamp(
'2000-01-01')), (2, pd.Timestamp('2000-01-02'))])
tm.assert_numpy_array_equal(mi.values, etalon)
def test_from_product_index_series_categorical(self):
# GH13743
first = ['foo', 'bar']
for ordered in [False, True]:
idx = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=ordered)
expected = pd.CategoricalIndex(list("abcaab") + list("abcaab"),
categories=list("bac"),
ordered=ordered)
for arr in [idx, pd.Series(idx), idx.values]:
result = pd.MultiIndex.from_product([first, arr])
tm.assert_index_equal(result.get_level_values(1), expected)
def test_values_boxed(self):
tuples = [(1, pd.Timestamp('2000-01-01')), (2, pd.NaT),
(3, pd.Timestamp('2000-01-03')),
(1, pd.Timestamp('2000-01-04')),
(2, pd.Timestamp('2000-01-02')),
(3, pd.Timestamp('2000-01-03'))]
result = pd.MultiIndex.from_tuples(tuples)
expected = construct_1d_object_array_from_listlike(tuples)
tm.assert_numpy_array_equal(result.values, expected)
# Check that code branches for boxed values produce identical results
tm.assert_numpy_array_equal(result.values[:4], result[:4].values)
def test_values_multiindex_datetimeindex(self):
# Test to ensure we hit the boxing / nobox part of MI.values
ints = np.arange(10 ** 18, 10 ** 18 + 5)
naive = pd.DatetimeIndex(ints)
aware = pd.DatetimeIndex(ints, tz='US/Central')
idx = pd.MultiIndex.from_arrays([naive, aware])
result = idx.values
outer = pd.DatetimeIndex([x[0] for x in result])
tm.assert_index_equal(outer, naive)
inner = pd.DatetimeIndex([x[1] for x in result])
tm.assert_index_equal(inner, aware)
# n_lev > n_lab
result = idx[:2].values
outer = pd.DatetimeIndex([x[0] for x in result])
tm.assert_index_equal(outer, naive[:2])
inner = pd.DatetimeIndex([x[1] for x in result])
tm.assert_index_equal(inner, aware[:2])
def test_values_multiindex_periodindex(self):
# Test to ensure we hit the boxing / nobox part of MI.values
ints = np.arange(2007, 2012)
pidx = pd.PeriodIndex(ints, freq='D')
idx = pd.MultiIndex.from_arrays([ints, pidx])
result = idx.values
outer = pd.Int64Index([x[0] for x in result])
tm.assert_index_equal(outer, pd.Int64Index(ints))
inner = pd.PeriodIndex([x[1] for x in result])
tm.assert_index_equal(inner, pidx)
# n_lev > n_lab
result = idx[:2].values
outer = pd.Int64Index([x[0] for x in result])
tm.assert_index_equal(outer, pd.Int64Index(ints[:2]))
inner = pd.PeriodIndex([x[1] for x in result])
tm.assert_index_equal(inner, pidx[:2])
def test_append(self):
result = self.index[:3].append(self.index[3:])
assert result.equals(self.index)
foos = [self.index[:1], self.index[1:3], self.index[3:]]
result = foos[0].append(foos[1:])
assert result.equals(self.index)
# empty
result = self.index.append([])
assert result.equals(self.index)
def test_append_mixed_dtypes(self):
# GH 13660
dti = date_range('2011-01-01', freq='M', periods=3, )
dti_tz = date_range('2011-01-01', freq='M', periods=3, tz='US/Eastern')
pi = period_range('2011-01', freq='M', periods=3)
mi = MultiIndex.from_arrays([[1, 2, 3],
[1.1, np.nan, 3.3],
['a', 'b', 'c'],
dti, dti_tz, pi])
assert mi.nlevels == 6
res = mi.append(mi)
exp = MultiIndex.from_arrays([[1, 2, 3, 1, 2, 3],
[1.1, np.nan, 3.3, 1.1, np.nan, 3.3],
['a', 'b', 'c', 'a', 'b', 'c'],
dti.append(dti),
dti_tz.append(dti_tz),
pi.append(pi)])
tm.assert_index_equal(res, exp)
other = MultiIndex.from_arrays([['x', 'y', 'z'], ['x', 'y', 'z'],
['x', 'y', 'z'], ['x', 'y', 'z'],
['x', 'y', 'z'], ['x', 'y', 'z']])
res = mi.append(other)
exp = MultiIndex.from_arrays([[1, 2, 3, 'x', 'y', 'z'],
[1.1, np.nan, 3.3, 'x', 'y', 'z'],
['a', 'b', 'c', 'x', 'y', 'z'],
dti.append(pd.Index(['x', 'y', 'z'])),
dti_tz.append(pd.Index(['x', 'y', 'z'])),
pi.append(pd.Index(['x', 'y', 'z']))])
tm.assert_index_equal(res, exp)
def test_get_level_values(self):
result = self.index.get_level_values(0)
expected = Index(['foo', 'foo', 'bar', 'baz', 'qux', 'qux'],
name='first')
tm.assert_index_equal(result, expected)
assert result.name == 'first'
result = self.index.get_level_values('first')
expected = self.index.get_level_values(0)
tm.assert_index_equal(result, expected)
# GH 10460
index = MultiIndex(
levels=[CategoricalIndex(['A', 'B']),
CategoricalIndex([1, 2, 3])],
labels=[np.array([0, 0, 0, 1, 1, 1]),
np.array([0, 1, 2, 0, 1, 2])])
exp = CategoricalIndex(['A', 'A', 'A', 'B', 'B', 'B'])
tm.assert_index_equal(index.get_level_values(0), exp)
exp = CategoricalIndex([1, 2, 3, 1, 2, 3])
tm.assert_index_equal(index.get_level_values(1), exp)
def test_get_level_values_int_with_na(self):
# GH 17924
arrays = [['a', 'b', 'b'], [1, np.nan, 2]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = Index([1, np.nan, 2])
tm.assert_index_equal(result, expected)
arrays = [['a', 'b', 'b'], [np.nan, np.nan, 2]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = Index([np.nan, np.nan, 2])
tm.assert_index_equal(result, expected)
def test_get_level_values_na(self):
arrays = [[np.nan, np.nan, np.nan], ['a', np.nan, 1]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([np.nan, np.nan, np.nan])
tm.assert_index_equal(result, expected)
result = index.get_level_values(1)
expected = pd.Index(['a', np.nan, 1])
tm.assert_index_equal(result, expected)
arrays = [['a', 'b', 'b'], pd.DatetimeIndex([0, 1, pd.NaT])]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = pd.DatetimeIndex([0, 1, pd.NaT])
tm.assert_index_equal(result, expected)
arrays = [[], []]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([], dtype=object)
tm.assert_index_equal(result, expected)
def test_get_level_values_all_na(self):
# GH 17924 when level entirely consists of nan
arrays = [[np.nan, np.nan, np.nan], ['a', np.nan, 1]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([np.nan, np.nan, np.nan], dtype=np.float64)
tm.assert_index_equal(result, expected)
result = index.get_level_values(1)
expected = pd.Index(['a', np.nan, 1], dtype=object)
tm.assert_index_equal(result, expected)
def test_reorder_levels(self):
# this blows up
tm.assert_raises_regex(IndexError, '^Too many levels',
self.index.reorder_levels, [2, 1, 0])
def test_nlevels(self):
assert self.index.nlevels == 2
def test_iter(self):
result = list(self.index)
expected = [('foo', 'one'), ('foo', 'two'), ('bar', 'one'),
('baz', 'two'), ('qux', 'one'), ('qux', 'two')]
assert result == expected
def test_legacy_pickle(self):
if PY3:
pytest.skip("testing for legacy pickles not "
"support on py3")
path = tm.get_data_path('multiindex_v1.pickle')
obj = pd.read_pickle(path)
obj2 = MultiIndex.from_tuples(obj.values)
assert obj.equals(obj2)
res = obj.get_indexer(obj)
exp = np.arange(len(obj), dtype=np.intp)
assert_almost_equal(res, exp)
res = obj.get_indexer(obj2[::-1])
exp = obj.get_indexer(obj[::-1])
exp2 = obj2.get_indexer(obj2[::-1])
assert_almost_equal(res, exp)
assert_almost_equal(exp, exp2)
def test_legacy_v2_unpickle(self):
# 0.7.3 -> 0.8.0 format manage
path = tm.get_data_path('mindex_073.pickle')
obj = pd.read_pickle(path)
obj2 = MultiIndex.from_tuples(obj.values)
assert obj.equals(obj2)
res = obj.get_indexer(obj)
exp = np.arange(len(obj), dtype=np.intp)
assert_almost_equal(res, exp)
res = obj.get_indexer(obj2[::-1])
exp = obj.get_indexer(obj[::-1])
exp2 = obj2.get_indexer(obj2[::-1])
assert_almost_equal(res, exp)
assert_almost_equal(exp, exp2)
def test_roundtrip_pickle_with_tz(self):
# GH 8367
# round-trip of timezone
index = MultiIndex.from_product(
[[1, 2], ['a', 'b'], date_range('20130101', periods=3,
tz='US/Eastern')
], names=['one', 'two', 'three'])
unpickled = tm.round_trip_pickle(index)
assert index.equal_levels(unpickled)
def test_from_tuples_index_values(self):
result = MultiIndex.from_tuples(self.index)
assert (result.values == self.index.values).all()
def test_contains(self):
assert ('foo', 'two') in self.index
assert ('bar', 'two') not in self.index
assert None not in self.index
def test_contains_top_level(self):
midx = MultiIndex.from_product([['A', 'B'], [1, 2]])
assert 'A' in midx
assert 'A' not in midx._engine
def test_contains_with_nat(self):
# MI with a NaT
mi = MultiIndex(levels=[['C'],
pd.date_range('2012-01-01', periods=5)],
labels=[[0, 0, 0, 0, 0, 0], [-1, 0, 1, 2, 3, 4]],
names=[None, 'B'])
assert ('C', pd.Timestamp('2012-01-01')) in mi
for val in mi.values:
assert val in mi
def test_is_all_dates(self):
assert not self.index.is_all_dates
def test_is_numeric(self):
# MultiIndex is never numeric
assert not self.index.is_numeric()
def test_getitem(self):
# scalar
assert self.index[2] == ('bar', 'one')
# slice
result = self.index[2:5]
expected = self.index[[2, 3, 4]]
assert result.equals(expected)
# boolean
result = self.index[[True, False, True, False, True, True]]
result2 = self.index[np.array([True, False, True, False, True, True])]
expected = self.index[[0, 2, 4, 5]]
assert result.equals(expected)
assert result2.equals(expected)
def test_getitem_group_select(self):
sorted_idx, _ = self.index.sortlevel(0)
assert sorted_idx.get_loc('baz') == slice(3, 4)
assert sorted_idx.get_loc('foo') == slice(0, 2)
def test_get_loc(self):
assert self.index.get_loc(('foo', 'two')) == 1
assert self.index.get_loc(('baz', 'two')) == 3
pytest.raises(KeyError, self.index.get_loc, ('bar', 'two'))
pytest.raises(KeyError, self.index.get_loc, 'quux')
pytest.raises(NotImplementedError, self.index.get_loc, 'foo',
method='nearest')
# 3 levels
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
pytest.raises(KeyError, index.get_loc, (1, 1))
assert index.get_loc((2, 0)) == slice(3, 5)
def test_get_loc_duplicates(self):
index = Index([2, 2, 2, 2])
result = index.get_loc(2)
expected = slice(0, 4)
assert result == expected
# pytest.raises(Exception, index.get_loc, 2)
index = Index(['c', 'a', 'a', 'b', 'b'])
rs = index.get_loc('c')
xp = 0
assert rs == xp
def test_get_value_duplicates(self):
index = MultiIndex(levels=[['D', 'B', 'C'],
[0, 26, 27, 37, 57, 67, 75, 82]],
labels=[[0, 0, 0, 1, 2, 2, 2, 2, 2, 2],
[1, 3, 4, 6, 0, 2, 2, 3, 5, 7]],
names=['tag', 'day'])
assert index.get_loc('D') == slice(0, 3)
with pytest.raises(KeyError):
index._engine.get_value(np.array([]), 'D')
def test_get_loc_level(self):
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
loc, new_index = index.get_loc_level((0, 1))
expected = slice(1, 2)
exp_index = index[expected].droplevel(0).droplevel(0)
assert loc == expected
assert new_index.equals(exp_index)
loc, new_index = index.get_loc_level((0, 1, 0))
expected = 1
assert loc == expected
assert new_index is None
pytest.raises(KeyError, index.get_loc_level, (2, 2))
index = MultiIndex(levels=[[2000], lrange(4)], labels=[np.array(
[0, 0, 0, 0]), np.array([0, 1, 2, 3])])
result, new_index = index.get_loc_level((2000, slice(None, None)))
expected = slice(None, None)
assert result == expected
assert new_index.equals(index.droplevel(0))
@pytest.mark.parametrize('level', [0, 1])
@pytest.mark.parametrize('null_val', [np.nan, pd.NaT, None])
def test_get_loc_nan(self, level, null_val):
# GH 18485 : NaN in MultiIndex
levels = [['a', 'b'], ['c', 'd']]
key = ['b', 'd']
levels[level] = np.array([0, null_val], dtype=type(null_val))
key[level] = null_val
idx = MultiIndex.from_product(levels)
assert idx.get_loc(tuple(key)) == 3
def test_get_loc_missing_nan(self):
# GH 8569
idx = MultiIndex.from_arrays([[1.0, 2.0], [3.0, 4.0]])
assert isinstance(idx.get_loc(1), slice)
pytest.raises(KeyError, idx.get_loc, 3)
pytest.raises(KeyError, idx.get_loc, np.nan)
pytest.raises(KeyError, idx.get_loc, [np.nan])
@pytest.mark.parametrize('dtype1', [int, float, bool, str])
@pytest.mark.parametrize('dtype2', [int, float, bool, str])
def test_get_loc_multiple_dtypes(self, dtype1, dtype2):
# GH 18520
levels = [ | np.array([0, 1]) | numpy.array |
'''
Includes:
* Function to compute the IoU similarity for axis-aligned, rectangular, 2D bounding boxes
* Function for coordinate conversion for axis-aligned, rectangular, 2D bounding boxes
Copyright (C) 2018 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from __future__ import division
import numpy as np
def convert_coordinates(tensor, start_index, conversion, border_pixels='half'):
'''
Convert coordinates for axis-aligned 2D boxes between two coordinate formats.
Creates a copy of `tensor`, i.e. does not operate in place. Currently there are
three supported coordinate formats that can be converted from and to each other:
1) (xmin, xmax, ymin, ymax) - the 'minmax' format
2) (xmin, ymin, xmax, ymax) - the 'corners' format
2) (cx, cy, w, h) - the 'centroids' format
Arguments:
tensor (array): A Numpy nD array containing the four consecutive coordinates
to be converted somewhere in the last axis.
start_index (int): The index of the first coordinate in the last axis of `tensor`.
conversion (str, optional): The conversion direction. Can be 'minmax2centroids',
'centroids2minmax', 'corners2centroids', 'centroids2corners', 'minmax2corners',
or 'corners2minmax'.
border_pixels (str, optional): How to treat the border pixels of the bounding boxes.
Can be 'include', 'exclude', or 'half'. If 'include', the border pixels belong
to the boxes. If 'exclude', the border pixels do not belong to the boxes.
If 'half', then one of each of the two horizontal and vertical borders belong
to the boxex, but not the other.
Returns:
A Numpy nD array, a copy of the input tensor with the converted coordinates
in place of the original coordinates and the unaltered elements of the original
tensor elsewhere.
'''
if border_pixels == 'half':
d = 0
elif border_pixels == 'include':
d = 1
elif border_pixels == 'exclude':
d = -1
ind = start_index
tensor1 = np.copy(tensor).astype(np.float)
if conversion == 'minmax2centroids':
tensor1[..., ind] = (tensor[..., ind] + tensor[..., ind+1]) / 2.0 # Set cx
tensor1[..., ind+1] = (tensor[..., ind+2] + tensor[..., ind+3]) / 2.0 # Set cy
tensor1[..., ind+2] = tensor[..., ind+1] - tensor[..., ind] + d # Set w
tensor1[..., ind+3] = tensor[..., ind+3] - tensor[..., ind+2] + d # Set h
elif conversion == 'centroids2minmax':
tensor1[..., ind] = tensor[..., ind] - tensor[..., ind+2] / 2.0 # Set xmin
tensor1[..., ind+1] = tensor[..., ind] + tensor[..., ind+2] / 2.0 # Set xmax
tensor1[..., ind+2] = tensor[..., ind+1] - tensor[..., ind+3] / 2.0 # Set ymin
tensor1[..., ind+3] = tensor[..., ind+1] + tensor[..., ind+3] / 2.0 # Set ymax
elif conversion == 'corners2centroids':
tensor1[..., ind] = (tensor[..., ind] + tensor[..., ind+2]) / 2.0 # Set cx
tensor1[..., ind+1] = (tensor[..., ind+1] + tensor[..., ind+3]) / 2.0 # Set cy
tensor1[..., ind+2] = tensor[..., ind+2] - tensor[..., ind] + d # Set w
tensor1[..., ind+3] = tensor[..., ind+3] - tensor[..., ind+1] + d # Set h
elif conversion == 'centroids2corners':
tensor1[..., ind] = tensor[..., ind] - tensor[..., ind+2] / 2.0 # Set xmin
tensor1[..., ind+1] = tensor[..., ind+1] - tensor[..., ind+3] / 2.0 # Set ymin
tensor1[..., ind+2] = tensor[..., ind] + tensor[..., ind+2] / 2.0 # Set xmax
tensor1[..., ind+3] = tensor[..., ind+1] + tensor[..., ind+3] / 2.0 # Set ymax
elif (conversion == 'minmax2corners') or (conversion == 'corners2minmax'):
tensor1[..., ind+1] = tensor[..., ind+2]
tensor1[..., ind+2] = tensor[..., ind+1]
else:
raise ValueError("Unexpected conversion value. Supported values are 'minmax2centroids', 'centroids2minmax', 'corners2centroids', 'centroids2corners', 'minmax2corners', and 'corners2minmax'.")
return tensor1
def convert_coordinates2(tensor, start_index, conversion):
'''
A matrix multiplication implementation of `convert_coordinates()`.
Supports only conversion between the 'centroids' and 'minmax' formats.
This function is marginally slower on average than `convert_coordinates()`,
probably because it involves more (unnecessary) arithmetic operations (unnecessary
because the two matrices are sparse).
For details please refer to the documentation of `convert_coordinates()`.
'''
ind = start_index
tensor1 = np.copy(tensor).astype(np.float)
if conversion == 'minmax2centroids':
M = np.array([[0.5, 0. , -1., 0.],
[0.5, 0. , 1., 0.],
[0. , 0.5, 0., -1.],
[0. , 0.5, 0., 1.]])
tensor1[..., ind:ind+4] = np.dot(tensor1[..., ind:ind+4], M)
elif conversion == 'centroids2minmax':
M = np.array([[ 1. , 1. , 0. , 0. ],
[ 0. , 0. , 1. , 1. ],
[-0.5, 0.5, 0. , 0. ],
[ 0. , 0. , -0.5, 0.5]]) # The multiplicative inverse of the matrix above
tensor1[..., ind:ind+4] = np.dot(tensor1[..., ind:ind+4], M)
else:
raise ValueError("Unexpected conversion value. Supported values are 'minmax2centroids' and 'centroids2minmax'.")
return tensor1
def intersection_area(boxes1, boxes2, coords='centroids', mode='outer_product', border_pixels='half'):
'''
Computes the intersection areas of two sets of axis-aligned 2D rectangular boxes.
Let `boxes1` and `boxes2` contain `m` and `n` boxes, respectively.
In 'outer_product' mode, returns an `(m,n)` matrix with the intersection areas for all possible
combinations of the boxes in `boxes1` and `boxes2`.
In 'element-wise' mode, `m` and `n` must be broadcast-compatible. Refer to the explanation
of the `mode` argument for details.
Arguments:
boxes1 (array): Either a 1D Numpy array of shape `(4, )` containing the coordinates for one box in the
format specified by `coords` or a 2D Numpy array of shape `(m, 4)` containing the coordinates for `m` boxes.
If `mode` is set to 'element_wise', the shape must be broadcast-compatible with `boxes2`.
boxes2 (array): Either a 1D Numpy array of shape `(4, )` containing the coordinates for one box in the
format specified by `coords` or a 2D Numpy array of shape `(n, 4)` containing the coordinates for `n` boxes.
If `mode` is set to 'element_wise', the shape must be broadcast-compatible with `boxes1`.
coords (str, optional): The coordinate format in the input arrays. Can be either 'centroids' for the format
`(cx, cy, w, h)`, 'minmax' for the format `(xmin, xmax, ymin, ymax)`, or 'corners' for the format
`(xmin, ymin, xmax, ymax)`.
mode (str, optional): Can be one of 'outer_product' and 'element-wise'. In 'outer_product' mode, returns an
`(m,n)` matrix with the intersection areas for all possible combinations of the `m` boxes in `boxes1` with the
`n` boxes in `boxes2`. In 'element-wise' mode, returns a 1D array and the shapes of `boxes1` and `boxes2`
must be boadcast-compatible. If both `boxes1` and `boxes2` have `m` boxes, then this returns an array of
length `m` where the i-th position contains the intersection area of `boxes1[i]` with `boxes2[i]`.
border_pixels (str, optional): How to treat the border pixels of the bounding boxes.
Can be 'include', 'exclude', or 'half'. If 'include', the border pixels belong
to the boxes. If 'exclude', the border pixels do not belong to the boxes.
If 'half', then one of each of the two horizontal and vertical borders belong
to the boxex, but not the other.
Returns:
A 1D or 2D Numpy array (refer to the `mode` argument for details) of dtype float containing values with
the intersection areas of the boxes in `boxes1` and `boxes2`.
'''
# Make sure the boxes have the right shapes.
if boxes1.ndim > 2: raise ValueError("boxes1 must have rank either 1 or 2, but has rank {}.".format(boxes1.ndim))
if boxes2.ndim > 2: raise ValueError("boxes2 must have rank either 1 or 2, but has rank {}.".format(boxes2.ndim))
if boxes1.ndim == 1: boxes1 = np.expand_dims(boxes1, axis=0)
if boxes2.ndim == 1: boxes2 = np.expand_dims(boxes2, axis=0)
if not (boxes1.shape[1] == boxes2.shape[1] == 4): raise ValueError("All boxes must consist of 4 coordinates, but the boxes in `boxes1` and `boxes2` have {} and {} coordinates, respectively.".format(boxes1.shape[1], boxes2.shape[1]))
if not mode in {'outer_product', 'element-wise'}: raise ValueError("`mode` must be one of 'outer_product' and 'element-wise', but got '{}'.",format(mode))
# Convert the coordinates if necessary.
if coords == 'centroids':
boxes1 = convert_coordinates(boxes1, start_index=0, conversion='centroids2corners')
boxes2 = convert_coordinates(boxes2, start_index=0, conversion='centroids2corners')
coords = 'corners'
elif not (coords in {'minmax', 'corners'}):
raise ValueError("Unexpected value for `coords`. Supported values are 'minmax', 'corners' and 'centroids'.")
m = boxes1.shape[0] # The number of boxes in `boxes1`
n = boxes2.shape[0] # The number of boxes in `boxes2`
# Set the correct coordinate indices for the respective formats.
if coords == 'corners':
xmin = 0
ymin = 1
xmax = 2
ymax = 3
elif coords == 'minmax':
xmin = 0
xmax = 1
ymin = 2
ymax = 3
if border_pixels == 'half':
d = 0
elif border_pixels == 'include':
d = 1 # If border pixels are supposed to belong to the bounding boxes, we have to add one pixel to any difference `xmax - xmin` or `ymax - ymin`.
elif border_pixels == 'exclude':
d = -1 # If border pixels are not supposed to belong to the bounding boxes, we have to subtract one pixel from any difference `xmax - xmin` or `ymax - ymin`.
# Compute the intersection areas.
if mode == 'outer_product':
# For all possible box combinations, get the greater xmin and ymin values.
# This is a tensor of shape (m,n,2).
min_xy = np.maximum(np.tile(np.expand_dims(boxes1[:,[xmin,ymin]], axis=1), reps=(1, n, 1)),
np.tile(np.expand_dims(boxes2[:,[xmin,ymin]], axis=0), reps=(m, 1, 1)))
# For all possible box combinations, get the smaller xmax and ymax values.
# This is a tensor of shape (m,n,2).
max_xy = np.minimum(np.tile(np.expand_dims(boxes1[:,[xmax,ymax]], axis=1), reps=(1, n, 1)),
np.tile(np.expand_dims(boxes2[:,[xmax,ymax]], axis=0), reps=(m, 1, 1)))
# Compute the side lengths of the intersection rectangles.
side_lengths = | np.maximum(0, max_xy - min_xy + d) | numpy.maximum |
import os
import numpy as np
import cv2
import pandas as pd
import matplotlib.pyplot as plt
import zipfile
from tqdm import tqdm
import torch
from lib.core.model.semodel.SeResnet import se_resnet50
HEIGHT = 137
WIDTH = 236
SIZE = 128
def bbox(img):
rows = np.any(img, axis=1)
cols = np.any(img, axis=0)
rmin, rmax = | np.where(rows) | numpy.where |
# Copyright 2021 The ParallelAccel Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import pytest
import jax
import numpy as np
import sympy
import linear_algebra
from linear_algebra.study import ParamResolver
from asic_la.parser import (
get_unitary,
get_autodiff_gradient,
get_finitediff_gradient,
parse,
assert_is_allowed_expression,
SIMPLE_BUILDING_BLOCKS,
)
from asic_la.testutils import build_random_acyclic_graph
jax.config.update("jax_enable_x64", True)
eigen_building_blocks = sorted(SIMPLE_BUILDING_BLOCKS, key=repr)
prob_basis_axis_building_blocks = [linear_algebra.flip_x_axis, linear_algebra.flip_y_axis, linear_algebra.flip_z_axis, linear_algebra.flip_pi_over_4_axis]
rotation_building_blocks = [linear_algebra.rotate_x_axis, linear_algebra.rotate_y_axis, linear_algebra.rotate_z_axis]
phased_building_blocks = [linear_algebra.x_axis_two_angles, linear_algebra.imaginary_swap_two_angles]
def test_assert_is_allowed_expression():
pi = sympy.pi
a, b = sympy.symbols("a b")
with pytest.raises(ValueError):
assert_is_allowed_expression(a ** 2)
with pytest.raises(ValueError):
assert_is_allowed_expression(a + b)
with pytest.raises(ValueError):
assert_is_allowed_expression(a / b)
with pytest.raises(ValueError):
assert_is_allowed_expression(2 / b)
with pytest.raises(ValueError):
assert_is_allowed_expression(pi / b)
assert_is_allowed_expression(a + a)
assert_is_allowed_expression(a * 2)
assert_is_allowed_expression(2 * a * 2)
assert_is_allowed_expression(pi * a * pi)
assert_is_allowed_expression(pi * a * pi ** 2)
assert_is_allowed_expression(pi ** 2 * a * pi)
assert_is_allowed_expression(2 * a)
assert_is_allowed_expression(a / 2)
assert_is_allowed_expression(a / pi)
@pytest.mark.parametrize("seed", np.arange(10))
@pytest.mark.parametrize("linear_algebra_building_block", rotation_building_blocks)
def test_get_unitary_rotation_regular(linear_algebra_building_block, seed):
np.random.seed(seed)
resolver = ParamResolver({})
phase = (np.random.rand(1) - 0.5) * 100
building_block = linear_algebra_building_block(phase)
actual = get_unitary(building_block, resolver)
expected = linear_algebra.unitary(building_block)
eps = np.finfo(actual.dtype).eps * 500
np.testing.assert_allclose(expected, actual, atol=eps, rtol=eps)
@pytest.mark.parametrize("seed", np.arange(10))
@pytest.mark.parametrize("linear_algebra_building_block", prob_basis_axis_building_blocks)
def test_get_unitary_prob_basis_axis_regular(linear_algebra_building_block, seed):
np.random.seed(seed)
resolver = ParamResolver({})
building_block = linear_algebra_building_block
actual = get_unitary(building_block, resolver)
expected = linear_algebra.unitary(building_block)
eps = np.finfo(actual.dtype).eps * 500
np.testing.assert_allclose(expected, actual, atol=eps, rtol=eps)
@pytest.mark.parametrize("linear_algebra_building_block", eigen_building_blocks)
@pytest.mark.parametrize("seed", np.arange(10))
def test_get_unitary_eigen(linear_algebra_building_block, seed):
t = sympy.Symbol("time")
np.random.seed(seed)
resolver = ParamResolver({"time": np.random.rand(1)[0] * 100.0})
building_block = linear_algebra_building_block(exponent=t)
actual = get_unitary(building_block, resolver)
expected = linear_algebra.unitary(linear_algebra.resolve_parameters(building_block, resolver))
eps = np.finfo(actual.dtype).eps * 500
np.testing.assert_allclose(expected, actual, atol=eps, rtol=eps)
@pytest.mark.parametrize("linear_algebra_building_block", phased_building_blocks)
@pytest.mark.parametrize("seed", np.random.randint(0, 100000, 102))
def test_get_unitary_phased(linear_algebra_building_block, seed):
t = sympy.Symbol("time")
p = sympy.Symbol("phase")
np.random.seed(seed)
resolver = ParamResolver(
{
"time": np.random.rand(1)[0] * 100.0,
"phase": np.random.rand(1)[0] * 100.0,
}
)
building_block = linear_algebra_building_block(phase_exponent=p, exponent=t)
actual = get_unitary(building_block, resolver)
expected = linear_algebra.unitary(linear_algebra.resolve_parameters(building_block, resolver))
eps = np.finfo(actual.dtype).eps * 500
np.testing.assert_allclose(expected, actual, atol=eps, rtol=eps)
@pytest.mark.parametrize("seed", np.arange(10))
def test_get_unitary_fsimbuilding_block(seed):
t = sympy.Symbol("theta")
p = sympy.Symbol("phi")
np.random.seed(seed)
resolver = ParamResolver(
{
"theta": np.random.rand(1)[0] * 100.0,
"phi": np.random.rand(1)[0] * 100.0,
}
)
building_block = linear_algebra.rotate_on_xy_plane(theta=t, phi=p)
actual = get_unitary(building_block, resolver)
expected = linear_algebra.unitary(linear_algebra.resolve_parameters(building_block, resolver))
eps = np.finfo(actual.dtype).eps * 500
np.testing.assert_allclose(expected, actual, atol=eps, rtol=eps)
@pytest.mark.parametrize("linear_algebra_building_block", prob_basis_axis_building_blocks)
@pytest.mark.parametrize("seed", np.random.randint(0, 100000, 2))
def test_autodiff_gradient_pauli(linear_algebra_building_block, seed):
t = sympy.Symbol("time")
np.random.seed(seed)
eps = 1e-7
tol = 1e-6
resolver = ParamResolver({"time": np.random.rand(1)[0] * 100.0})
building_block = linear_algebra_building_block ** t
finitediff = get_finitediff_gradient(building_block, resolver, eps)
grads = get_autodiff_gradient(building_block, resolver)
g = {k.name: v for k, v in grads.items()}
for k, expected in finitediff.items():
actual = g[k]
np.testing.assert_allclose(expected, actual, atol=tol, rtol=tol)
@pytest.mark.parametrize("linear_algebra_building_block", prob_basis_axis_building_blocks)
@pytest.mark.parametrize("seed", np.arange(10))
def test_autodiff_gradient_prob_basis_axis_regular(linear_algebra_building_block, seed):
np.random.seed(seed)
t = (np.random.randint(1) - 0.5) * 100
resolver = ParamResolver({"time": | np.random.rand(1) | numpy.random.rand |
# -*- coding: utf-8 -*-
__all__ = ["QuadPotentialDenseAdapt", "get_dense_nuts_step", "sample"]
import numpy as np
import pymc3 as pm
import theano
from pymc3.model import all_continuous, modelcontext
from pymc3.step_methods.hmc.quadpotential import QuadPotential
from pymc3.step_methods.step_sizes import DualAverageAdaptation
from scipy.linalg import LinAlgError, cholesky, solve_triangular
from .utils import logger
class QuadPotentialDenseAdapt(QuadPotential):
"""Adapt a dense mass matrix from the sample covariances."""
def __init__(
self,
n,
initial_mean=None,
initial_cov=None,
initial_weight=0,
adaptation_window=101,
doubling=True,
update_steps=None,
dtype="float64",
):
if initial_mean is None:
initial_mean = np.zeros(n, dtype=dtype)
if initial_cov is None:
initial_cov = np.eye(n, dtype=dtype)
initial_weight = 1
if initial_cov is not None and initial_cov.ndim != 2:
raise ValueError("Initial covariance must be two-dimensional.")
if initial_mean is not None and initial_mean.ndim != 1:
raise ValueError("Initial mean must be one-dimensional.")
if initial_cov is not None and initial_cov.shape != (n, n):
raise ValueError(
"Wrong shape for initial_cov: expected %s got %s"
% (n, initial_cov.shape)
)
if len(initial_mean) != n:
raise ValueError(
"Wrong shape for initial_mean: expected %s got %s"
% (n, len(initial_mean))
)
self.dtype = dtype
self._n = n
self._cov = np.array(initial_cov, dtype=self.dtype, copy=True)
self._cov_theano = theano.shared(self._cov)
self._chol = cholesky(self._cov, lower=True)
self._chol_error = None
self._foreground_cov = _WeightedCovariance(
self._n, initial_mean, initial_cov, initial_weight, self.dtype
)
self._background_cov = _WeightedCovariance(self._n, dtype=self.dtype)
self._n_samples = 0
# For backwards compatibility
self._doubling = doubling
self._adaptation_window = int(adaptation_window)
self._previous_update = 0
# New interface
if update_steps is None:
self._update_steps = None
else:
self._update_steps = np.atleast_1d(update_steps).astype(int)
def velocity(self, x, out=None):
return np.dot(self._cov, x, out=out)
def energy(self, x, velocity=None):
if velocity is None:
velocity = self.velocity(x)
return 0.5 * np.dot(x, velocity)
def velocity_energy(self, x, v_out):
self.velocity(x, out=v_out)
return self.energy(x, v_out)
def random(self):
vals = np.random.normal(size=self._n).astype(self.dtype)
return solve_triangular(self._chol.T, vals, overwrite_b=True)
def _update_from_weightvar(self, weightvar):
weightvar.current_covariance(out=self._cov)
try:
self._chol = cholesky(self._cov, lower=True)
except (LinAlgError, ValueError) as error:
self._chol_error = error
self._cov_theano.set_value(self._cov)
def update(self, sample, grad, tune):
if not tune:
return
self._foreground_cov.add_sample(sample, weight=1)
self._background_cov.add_sample(sample, weight=1)
self._update_from_weightvar(self._foreground_cov)
# Support the two methods for updating the mass matrix
delta = self._n_samples - self._previous_update
do_update = (
self._update_steps is not None
and self._n_samples in self._update_steps
) or (self._update_steps is None and delta >= self._adaptation_window)
if do_update:
self._foreground_cov = self._background_cov
self._background_cov = _WeightedCovariance(
self._n, dtype=self.dtype
)
if self._update_steps is None:
self._previous_update = self._n_samples
if self._doubling:
self._adaptation_window *= 2
self._n_samples += 1
def raise_ok(self, vmap):
if self._chol_error is not None:
raise ValueError("{0}".format(self._chol_error))
class _WeightedCovariance:
"""Online algorithm for computing mean and covariance."""
def __init__(
self,
nelem,
initial_mean=None,
initial_covariance=None,
initial_weight=0,
dtype="float64",
):
self._dtype = dtype
self.n_samples = float(initial_weight)
if initial_mean is None:
self.mean = np.zeros(nelem, dtype=dtype)
else:
self.mean = np.array(initial_mean, dtype=dtype, copy=True)
if initial_covariance is None:
self.raw_cov = np.eye(nelem, dtype=dtype)
else:
self.raw_cov = | np.array(initial_covariance, dtype=dtype, copy=True) | numpy.array |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Built-in imports
from typing import Union
# 3rd party imports
import numpy as np
from scipy import interpolate
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__copyright__ = "Copyright 2020-2021"
__license__ = "MIT"
__version__ = "2.3.7"
__status__ = "Prototype"
surface_materials = ['cluster', 'themis', 'cassini', 'aluminium', 'aquadag',
'gold', 'graphite', 'solar cells', '1eV', 'TiN',
'elgiloy']
j_zeros = {"cassini": 25e-6, "tin": 25e-6, "cluster": 25e-6,
"aluminium": 30e-6, "aquadag": 18e-6, "gold": 29e-6,
"graphite": 7.2e-6, "solar cells": 20e-6, "solar cell": 20e-6,
"elgiloy": 30e-6}
def photo_current(iluminated_area: float = None,
u: Union[float, np.ndarray] = None,
distance_sun: float = None,
flag: Union[str, float] = "cluster") -> Union[float,
np.ndarray]:
r"""Calculates the photo-current emitted by an arbitrary body.
Parameters
----------
iluminated_area : float
Cross section area [m^2].
u : float or numpy.ndarray
Potential [V].
distance_sun : float
Distance form the Sun [AU].
flag : str or float, Optional
Surface materials or surface photoemission in [A/m^2].
Default is "cluster".
Returns
-------
j_photo : float or numpy.ndarray
Photo-current emitted.
Notes
-----
Estimates are done for the solar minimum conditions.
"""
assert isinstance(flag, (str, float))
if not iluminated_area and not u and not distance_sun:
for surf in surface_materials:
j0 = photo_current(1, 0, 1, surf)
print(f"{surf}: Io= {j0 * 1e6:3.2f} uA/m2")
return
# Assert than u is an array
u = np.atleast_1d(u)
if isinstance(flag, (float, int)):
photoemisson = flag
# Initialize
j_photo = np.ones(u.shape)
# initialize to current valid for negative potentials
j_photo *= photoemisson * iluminated_area / distance_sun ** 2
a_ = 5.0e-5 / 5.6e-5 * np.exp(- u[u >= 0.] / 2.74)
b_ = 1.2e-5 / 5.6e-5 * np.exp(- (u[u >= 0] + 10.0) / 14.427)
j_photo[u >= 0] *= a_ + b_
elif flag.lower() == "1ev":
j_photo = | np.ones(u.shape) | numpy.ones |
# GIR - <NAME> and <NAME>
import numpy as np
import pandas as pd
import numexpr as ne
import scipy as sp
from pathlib import Path
from tqdm import tqdm
def return_empty_emissions(df_to_copy=False, start_year=1765, end_year=2500, timestep=1, scen_names=[0], gases_in = ['CO2','CH4','N2O'], help=False):
if help:
print('This function returns a dataframe of zeros in the correct format for use in GIR. Pass an existing emission/ concentration array to return a corresponding forcing array.')
if type(df_to_copy)==pd.core.frame.DataFrame:
df = pd.DataFrame(index = df_to_copy.index,columns=pd.MultiIndex.from_product([df_to_copy.columns.levels[0],gases_in],names=['Scenario','Gas'])).fillna(0).apply(pd.to_numeric)
else:
df = pd.DataFrame(index=np.arange(start_year,end_year+1,timestep)+(timestep!=1)*timestep/2,columns=pd.MultiIndex.from_product([scen_names,gases_in],names=['Scenario','Gas'])).fillna(0).apply(pd.to_numeric)
df.index.rename('Year',inplace=True)
return df
def return_empty_forcing(df_to_copy=False, start_year=1765, end_year=2500, timestep=1, scen_names=[0], help=False):
if help:
print('This function returns a dataframe of zeros in the correct format for use in GIR. Pass an existing emission/ concentration array to return a corresponding forcing array.')
if type(df_to_copy)==pd.core.frame.DataFrame:
df = pd.DataFrame(index = df_to_copy.index,columns=pd.MultiIndex.from_product([df_to_copy.columns.levels[0],['forcing']],names=['Scenario','Variable'])).fillna(0).apply(pd.to_numeric)
else:
df = pd.DataFrame(index=np.arange(start_year,end_year+1,timestep)+(timestep!=1)*timestep/2,columns=pd.MultiIndex.from_product([scen_names,['forcing']],names=['Scenario','Gas'])).fillna(0).apply(pd.to_numeric)
df.index.rename('Year',inplace=True)
return df
def input_to_numpy(input_df):
# converts the dataframe input into a numpy array for calculation, dimension order = [name, gas, time/parameter]
return input_df.values.T.reshape(input_df.columns.levels[0].size, input_df.columns.levels[1].size, input_df.index.size)
def get_gas_parameter_defaults(choose_gases=pd.read_csv(Path(__file__).parent / "./Parameter_Sets/Complete_gas_cycle_params.csv",header=[0,1],index_col=0).columns.levels[-1],CH4_forc_feedbacks=False, help=False):
if help:
print('This function returns the GIR default parameter set for a gas set of your choice. You can choose from the following gas species:')
possible_gases = list(pd.read_csv(Path(__file__).parent / "./Parameter_Sets/Complete_gas_cycle_params.csv",header=[0,1],index_col=0).columns.levels[-1])
return possible_gases
CHOOSE_params = pd.read_csv(Path(__file__).parent / "./Parameter_Sets/Complete_gas_cycle_params.csv",header=[0,1],index_col=0).reindex(choose_gases,axis=1,level=1)
return CHOOSE_params
def get_thermal_parameter_defaults(TCR=1.77,RWF=0.55,F_2x=3.76):
# thermal_parameter_list = ['d','q']
# thermal_parameters = pd.DataFrame(columns=[1,2,3],index=thermal_parameter_list)
# d = np.array([283,9.88,0.85])
# q = np.array([0,0,0.242])
# k = 1-(d/70)*(1-np.exp(-70/d))
# q[:2] = ((TCR_ECS[0]/F_2x - k[2]*q[2]) - np.roll(k[:2],axis=0,shift=1)*(TCR_ECS[1]/F_2x - q[2]))/(k[:2] - np.roll(k[:2],axis=0,shift=1))
# thermal_parameters.loc['d'] = d
# thermal_parameters.loc['q'] = q
# thermal_parameters = pd.concat([thermal_parameters], keys = ['default'], axis = 1)
# thermal_parameters.index = thermal_parameters.index.rename('param_name')
# thermal_parameters.columns = thermal_parameters.columns.rename(['Thermal_param_set','Box'])
lnd1,lnd2,q1 = np.array([-0.20227299, 2.05243353, 0.20285564])
ln_d3_mean = 5.76338587
d1 = np.exp(lnd1)
d2 = np.exp(lnd2)
d3 = np.exp(ln_d3_mean)
ECS = TCR/RWF
v1 = (1-(d1/69.66) * (1-np.exp(-69.66/d1)) )
v2 = (1-(d2/69.66) * (1-np.exp(-69.66/d2)) )
v3 = (1-(d3/69.66) * (1-np.exp(-69.66/d3)) )
q3 = (((TCR/F_2x) - q1*(v1-v2) - (ECS/F_2x)*v2) / (v3-v2))
q2 = (ECS/F_2x - q1 - q3)
GIR_thermal_defaults = pd.DataFrame([[d1,d2,d3],[q1,q2,q3]],index=['d','q'],columns=pd.MultiIndex.from_product([['default'],[1,2,3]]))
return GIR_thermal_defaults.apply(pd.to_numeric)
def get_more_gas_cycle_params(N,choose_gases=['CO2','CH4','N2O'],CH4_forc_feedbacks=False, help=False):
param_defaults = get_gas_parameter_defaults(choose_gases=choose_gases,CH4_forc_feedbacks=CH4_forc_feedbacks)
param_uncert = pd.read_pickle(Path(__file__).parent / "./Parameter_Sets/Complete_parameter_uncertainty.p")
param_ensemble = pd.concat(N*[param_defaults['default']],keys=['gas'+str(x) for x in np.arange(N)],axis=1)
for gas in choose_gases:
for param in param_defaults.index:
select_param = param_uncert.loc[param,('default',gas)]
if select_param:
param_sample = select_param[0].rvs(*select_param[1],N)
param_ensemble.loc[param,(slice(None),gas)] = param_sample
return param_ensemble
def get_more_thermal_params(N=100,F_2x=3.84):
from copulas.multivariate import GaussianMultivariate
d1_d2_q1_copula = GaussianMultivariate.load(Path(__file__).parent / "./Parameter_Sets/d1_d2_q1_CMIP6_copula.pkl")
d1_d2_q1_df = d1_d2_q1_copula.sample(10*N)
while (d1_d2_q1_df<0).any(axis=1).sum() != 0:
d1_d2_q1_df.loc[(d1_d2_q1_df<0).any(axis=1)] = d1_d2_q1_copula.sample((d1_d2_q1_df<0).any(axis=1).sum()).values
d2_samples = d1_d2_q1_df['d2'].values
d3_samples = d1_d2_q1_df['d1'].values
q3_samples = d1_d2_q1_df['q1'].values
d1_samples = sp.stats.truncnorm(-2,2,loc=283,scale=116).rvs(10*N)
TCR_samples = np.random.lognormal(np.log(2.5)/2,np.log(2.5)/(2*1.645),10*N)
RWF_samples = sp.stats.truncnorm(-2.75,2.75,loc=0.582,scale=0.06).rvs(10*N)
ECS_samples = TCR_samples/RWF_samples
d = np.array([d1_samples,d2_samples,d3_samples])
k = 1-(d/70)*(1-np.exp(-70/d))
q = ((TCR_samples/F_2x - k[2]*q3_samples)[np.newaxis,:] - np.roll(k[:2],axis=0,shift=1)*(ECS_samples/F_2x - q3_samples)[np.newaxis,:])/(k[:2] - np.roll(k[:2],axis=0,shift=1))
sample_df = pd.DataFrame(index=['d','q'],columns = [1,2,3]).apply(pd.to_numeric)
df_list = []
i=0
j=0
while j<N:
curr_df = sample_df.copy()
curr_df.loc['d'] = d[:,i]
curr_df.loc['q',3] = q3_samples[i]
curr_df.loc['q',[1,2]] = q[:,i]
if curr_df.loc['q',2]<=0:
i+=1
continue
df_list += [curr_df]
j+=1
i+=1
thermal_params = pd.concat(df_list,axis=1,keys=['therm'+str(x) for x in np.arange(N)])
return thermal_params
def tcr_ecs_to_q(input_parameters=True , F_2x=3.76 , help=False):
# converts a 2-box tcr / ecs / d dataframe into a d / q dataframe for use in GIR
# F2x is the GIR default forcing parameter value
if help:
tcr_ecs_test = default_thermal_params()
tcr_ecs_test = pd.concat([tcr_ecs_test['default']]*2,keys=['default','1'],axis=1)
tcr_ecs_test.loc['tcr_ecs'] = [1.6,2.75,1.4,2.4]
tcr_ecs_test = tcr_ecs_test.loc[['d','tcr_ecs']]
print('Example input format:')
return tcr_ecs_test
if type(input_parameters.columns) != pd.core.indexes.multi.MultiIndex:
return 'input_parameters not in MultiIndex DataFrame. Set help=True for formatting of input.'
else:
output_params = input_parameters.copy()
param_arr = input_to_numpy(input_parameters)
k = 1.0 - (param_arr[:,:,0]/69.66)*(1.0 - np.exp(-69.66/param_arr[:,:,0]))
output_params.loc['q'] = ( ( param_arr[:,0,1][:,np.newaxis] - param_arr[:,1,1][:,np.newaxis] * np.roll(k,shift=1) )/( F_2x * ( k - np.roll(k,shift=1) ) ) ) .flatten()
return output_params.loc[['d','q']]
def q_to_tcr_ecs(input_parameters=True , F_2x=3.76 , help=False):
if help:
tcr_ecs_test = default_thermal_params()
tcr_ecs_test = pd.concat([tcr_ecs_test['default']]*2,keys=['default','1'],axis=1)
tcr_ecs_test.loc['q'] = [0.33,0.41,0.31,0.43]
tcr_ecs_test = tcr_ecs_test.loc[['d','q']]
print('Example input format:')
return tcr_ecs_test
if type(input_parameters.columns) != pd.core.indexes.multi.MultiIndex:
return 'input_parameters not in MultiIndex DataFrame. Set help=True for formatting of input.'
else:
output_params = pd.DataFrame(index = ['ECS','TCR'],columns = input_parameters.columns.levels[0])
for param_set in input_parameters.columns.levels[0]:
params = input_parameters.xs(param_set,level=0,axis=1)
ECS = F_2x * params.loc['q'].sum()
TCR = F_2x * ( params.loc['q'] * (1 - (params.loc['d']/69.66) * ( 1 - np.exp(-69.66/params.loc['d']) ) ) ).sum()
output_params.loc[:,param_set] = [ECS,TCR]
return output_params
def calculate_alpha(G,G_A,T,r,g0,g1,iirf100_max = False):
# iirf100_val = r[...,0] + r[...,1] * (G-G_A) + r[...,2] * T + r[...,3] * G_A
# iirf100_val = np.abs(iirf100_val)
# if iirf100_max:
# iirf100_val = (iirf100_val>iirf100_max) * iirf100_max + iirf100_val * (iirf100_val<iirf100_max)
# alpha_val = g0 * np.sinh(iirf100_val / g1)
iirf100_val = ne.evaluate("abs(r0 + rU * (G-G_A) + rT * T + rA * G_A)",{'r0':r[...,0],'rU':r[...,1],'rT':r[...,2],'rA':r[...,3],'G':G,'G_A':G_A,'T':T})
if iirf100_max:
iirf100_val = ne.evaluate("where(iirf100_val>iirf100_max,iirf100_max,iirf100_val)")
alpha_val = ne.evaluate("g0 * exp(iirf100_val / g1)")
return alpha_val
def step_concentration(R_old,G_A_old,E,alpha,a,tau,PI_conc,emis2conc,dt=1):
# decay_rate = dt/(alpha*tau)
# decay_factor = np.exp( -decay_rate )
# R_new = E * a * 1/decay_rate * ( 1. - decay_factor ) + R_old * decay_factor
# G_A = np.sum(R_new,axis=-1)
# C = PI_conc + emis2conc * (G_A + G_A_old) / 2
decay_rate = ne.evaluate("dt/(alpha*tau)")
decay_factor = ne.evaluate("exp(-decay_rate)")
R_new = ne.evaluate("E * a / decay_rate * ( 1. - decay_factor ) + R_old * decay_factor") # there shouldn't be a dt in the first decay rate
G_A = ne.evaluate("sum(R_new,axis=4)")
C = ne.evaluate("PI_conc + emis2conc * (G_A + G_A_old) / 2")
return C,R_new,G_A
def unstep_concentration(R_old,G_A,alpha,a,tau,PI_conc,emis2conc,dt=1):
decay_rate = dt/(alpha*tau)
decay_factor = np.exp( -decay_rate )
E = (( G_A - np.sum(R_old*decay_factor,axis=-1) ) / np.sum( a / decay_rate * ( 1. - decay_factor ) ,axis=-1 ))
R_new = E[...,None] * a * 1/decay_rate * ( 1. - decay_factor ) + R_old * decay_factor
return E,R_new
def step_forcing(C,PI_conc,f):
# if the logarithmic/sqrt term is undefined (ie. C is zero or negative), this contributes zero to the overall forcing. An exception will appear, however.
# logforc = f[...,0] * np.log(C / PI_conc)
# linforc = f[...,1] * ( C - PI_conc )
# sqrtforc = f[...,2] * (np.sqrt(C) - np.sqrt(PI_conc))
# logforc[np.isnan(logforc)] = 0
# sqrtforc[np.isnan(sqrtforc)] = 0
logforc = ne.evaluate("f1 * where( (C/PI_conc) <= 0, 0, log(C/PI_conc) )",{'f1':f[...,0],'C':C,'PI_conc':PI_conc})
linforc = ne.evaluate("f2 * (C - PI_conc)",{'f2':f[...,1],'C':C,'PI_conc':PI_conc})
sqrtforc = ne.evaluate("f3 * ( (sqrt( where(C<0 ,0 ,C ) ) - sqrt(PI_conc)) )",{'f3':f[...,2],'C':C,'PI_conc':PI_conc})
RF = logforc + linforc + sqrtforc
return RF
def step_temperature(S_old,F,q,d,dt=1):
# decay_factor = np.exp(-dt/d)
# S_new = q * F * ( 1 - decay_factor ) + S_old * decay_factor
# T = np.sum(S_old + S_new,axis=-1) / 2
decay_factor = ne.evaluate("exp(-dt/d)")
S_new = ne.evaluate("q * F * (1 - decay_factor) + S_old * decay_factor")
T = ne.evaluate("sum( (S_old + S_new)/2, axis=3 )")
return S_new,T
def run_GIR( emissions_in = False , concentrations_in = False , forcing_in = False , gas_parameters = get_gas_parameter_defaults() , thermal_parameters = get_thermal_parameter_defaults() , show_run_info = True , aer_concs_in = False ):
"""
Runs the development version of the FaIRv2.0 model, maintained by <NAME> and <NAME>.
Model description paper: https://doi.org/10.5194/gmd-2019-379
Parameters:
emissions_in (pandas.core.frame.DataFrame strictly with column index as pandas.core.indexes.multi.MultiIndex):
A pandas DataFrame containing emission data for the desired GHG and aerosol species. The columns most be a MultiIndex with [scenarios , species] as the levels. The species must be consistent between scenarios.
concentrations_in (pandas.core.frame.DataFrame strictly with column index as pandas.core.indexes.multi.MultiIndex):
A pandas DataFrame containing concentration data for the desired GHG and aerosol species. The columns most be a MultiIndex with [scenarios , species] as the levels. The species must be consistent between scenarios.
forcing_in (pandas.core.frame.DataFrame strictly with column index as pandas.core.indexes.multi.MultiIndex):
A pandas DataFrame containing data for aggregated external forcing. The columns most be a MultiIndex with [scenarios , forcing] as the levels. Note that the length of the inner column level dimension must be one (ie. forcings must be aggregated).
gas_parameters (pandas.core.frame.DataFrame strictly with column index as pandas.core.indexes.multi.MultiIndex):
A pandas DataFrame containing the gas cycle parameters for the desired GHG and aerosol species. The columns most be a MultiIndex with [parameter set , species] as the levels. The species must be consistent between parameter sets. 'Indirect' forcings can be specified by adding species with the syntax 'x|y': this means the gas cycle of species 'x' is used to compute an additional forcing based on the f parameters specified. 'y' designates the name of the indirect forcing, such as 'methane|strat_h2o'.
thermal_parameters (pandas.core.frame.DataFrame strictly with column index as pandas.core.indexes.multi.MultiIndex):
A pandas DataFrame containing the response parameters used for each box. The columns most be a MultiIndex with [parameter set , response box] as the levels. Any number of boxes can be specified by varying the number of timescales 'd' and coefficients 'q' supplied.
show_run_info (bool):
Specify whether to show information about the current run. Suggest setting to True for normal use, but False if optimising parameters or running recursively.
aer_concs_in (bool or list):
If list is passed, determines whether any gases in a concentration driven run are to be treated as emissions (aerosols). The Pre-industrial concentration of these gases is added to the input concentration before integration.
"""
# Determine the number of scenario runs , parameter sets , gases , integration period, timesteps
# There are 2 modes : emissions_driven , concentration_driven
# The model will assume if both are given then emissions take priority
if emissions_in is False: # check if concentration driven
concentration_driven = True
emissions_in = pd.DataFrame().reindex_like(concentrations_in)
time_index = concentrations_in.index
else: # otherwise emissions driven
concentration_driven=False
time_index = emissions_in.index
[(dim_scenario,scen_names),(dim_gas_param,gas_set_names),(dim_thermal_param,thermal_set_names)]=[(x.size,list(x)) for x in [emissions_in.columns.levels[0],gas_parameters.columns.levels[0],thermal_parameters.columns.levels[0]]]
gas_names = [x for x in gas_parameters.columns.levels[1] if '|' not in x]
n_gas = len(gas_names)
n_forc,forc_names = gas_parameters.columns.levels[1].size,list(gas_parameters.columns.levels[1])
n_year = time_index.size
## map the concentrations onto the forcings (ie. so the correct indirect forcing parameters read the correct concentration arrays)
gas_forc_map = [gas_names.index(forc_names[x].split('|')[0]) for x in np.arange(len(forc_names))]
## if there are aerosol "concentrations" input that need to be treated as emissions (ie. added to the PI_conc):
if concentration_driven and not aer_concs_in is False:
gas_aer_map = [gas_names.index(aer_concs_in[x]) for x in np.arange(len(aer_concs_in))]
names_list = [scen_names,gas_set_names,thermal_set_names,gas_names]
names_titles = ['Scenario','Gas cycle set','Thermal set','Gas name']
forc_names_list = [scen_names,gas_set_names,thermal_set_names,forc_names]
forc_names_titles = ['Scenario','Gas cycle set','Thermal set','Forcing component']
timestep = np.append(np.diff(time_index),np.diff(time_index)[-1])
# check if no dimensions are degenerate
if (set(scen_names) != set(gas_set_names))&(set(scen_names) != set(thermal_set_names))&(set(gas_set_names) != set(thermal_set_names)):
gas_shape, gas_slice = [1,dim_gas_param,1],gas_set_names
therm_shape, therm_slice = [1,1,dim_thermal_param],thermal_set_names
# check if all degenerate
elif (set(scen_names) == set(gas_set_names))&(set(scen_names) == set(thermal_set_names)):
gas_shape, gas_slice = [dim_scenario,1,1],scen_names
therm_shape, therm_slice = [dim_scenario,1,1],scen_names
dim_gas_param = 1
dim_thermal_param = 1
[x.pop(1) for x in [names_list,names_titles,forc_names_list,forc_names_titles]]
[x.pop(1) for x in [names_list,names_titles,forc_names_list,forc_names_titles]]
# check other possibilities
else:
if set(scen_names) == set(gas_set_names):
gas_shape, gas_slice = [dim_scenario,1,1],scen_names
therm_shape, therm_slice = [1,1,dim_thermal_param],thermal_set_names
dim_gas_param = 1
[x.pop(1) for x in [names_list,names_titles,forc_names_list,forc_names_titles]]
elif set(scen_names) == set(thermal_set_names):
gas_shape, gas_slice = [1,dim_gas_param,1],gas_set_names
therm_shape, therm_slice = [dim_scenario,1,1],scen_names
dim_thermal_param = 1
[x.pop(2) for x in [names_list,names_titles,forc_names_list,forc_names_titles]]
else:
gas_shape, gas_slice = [1,dim_gas_param,1],gas_set_names
therm_shape, therm_slice = [1,dim_gas_param,1],gas_set_names
dim_thermal_param = 1
[x.pop(2) for x in [names_list,names_titles,forc_names_list,forc_names_titles]]
## Reindex to align columns:
emissions = emissions_in.reindex(scen_names,axis=1,level=0).reindex(gas_names,axis=1,level=1).values.T.reshape(dim_scenario,1,1,n_gas,n_year)
if forcing_in is False:
ext_forcing = np.zeros((dim_scenario,1,1,1,n_year))
else:
forcing_in = forcing_in.reindex(scen_names,axis=1,level=0)
ext_forcing = forcing_in.loc[:,(scen_names,slice(None))].values.T.reshape(dim_scenario,1,1,1,n_year)
gas_cycle_parameters = gas_parameters.reindex(gas_slice,axis=1,level=0).reindex(gas_names,axis=1,level=1)
thermal_parameters = thermal_parameters.reindex(therm_slice,axis=1,level=0)
## get parameter arrays
a,tau,r,PI_conc,emis2conc=[gas_cycle_parameters.loc[x].values.T.reshape(gas_shape+[n_gas,-1]) for x in [['a1','a2','a3','a4'],['tau1','tau2','tau3','tau4'],['r0','rC','rT','rA'],'PI_conc','emis2conc']]
f = gas_parameters.reindex(gas_slice,axis=1,level=0).reindex(forc_names,axis=1,level=1).loc['f1':'f3'].values.T.reshape(gas_shape+[n_forc,-1])
d,q = [thermal_parameters.loc[x].values.T.reshape(therm_shape+[-1]) for x in ['d','q']]
if show_run_info:
print('Integrating ' + str(dim_scenario) + ' scenarios, ' + str(dim_gas_param) + ' gas cycle parameter sets, ' + str(dim_thermal_param) + ' thermal response parameter sets, over ' + str(forc_names) + ' forcing agents, between ' + str(time_index[0]) + ' and ' + str(time_index[-1]) + '...',flush=True)
# Dimensions : [scenario, gas params, thermal params, gas, time, (gas/thermal pools)]
g1 = np.sum( a * tau * ( 1. - ( 1. + 100/tau ) * np.exp(-100/tau) ), axis=-1 )
g0 = np.exp( -1 * np.sum( a * tau * ( 1. - np.exp(-100/tau) ) , axis=-1) / g1 )
# Create appropriate shape variable arrays / calculate RF if concentration driven
C = np.empty((dim_scenario,dim_gas_param,dim_thermal_param,n_gas,n_year))
RF = np.empty((dim_scenario,dim_gas_param,dim_thermal_param,n_forc,n_year))
T = np.empty((dim_scenario,dim_gas_param,dim_thermal_param,n_year))
alpha = np.empty((dim_scenario,dim_gas_param,dim_thermal_param,n_gas,n_year))
alpha[...,0] = calculate_alpha(G=0,G_A=0,T=0,r=r,g0=g0,g1=g1)
if concentration_driven:
diagnosed_emissions = np.zeros((dim_scenario,dim_gas_param,dim_thermal_param,n_gas,n_year))
C[:] = concentrations_in.reindex(scen_names,axis=1,level=0).reindex(gas_names,axis=1,level=1).values.T.reshape(dim_scenario,1,1,n_gas,n_year)
if not aer_concs_in is False:
C[...,gas_aer_map,:] += PI_conc[...,gas_aer_map,:]
G_A = np.zeros_like(C)
G_A[...,:-1] = concentrations_in.reindex(scen_names,axis=1,level=0).reindex(gas_names,axis=1,level=1).rolling(2).mean().dropna().values.T.reshape(dim_scenario,1,1,n_gas,n_year-1)
G_A[...,-1] = G_A[...,-2] + (C[...,-1]-C[...,-2])
G_A = (G_A-PI_conc)/emis2conc
RF[:] = step_forcing(C[...,gas_forc_map,:],PI_conc[...,gas_forc_map,:],f[...,np.newaxis,:])
diagnosed_emissions[...,0],R = unstep_concentration(R_old=0,G_A=G_A[...,0],alpha=alpha[...,0,np.newaxis],a=a,tau=tau,PI_conc=PI_conc[...,0],emis2conc=emis2conc[...,0],dt=timestep[0])
S,T[...,0] = step_temperature(S_old=0,F=np.sum(RF[...,0],axis=-1)[...,np.newaxis]+ext_forcing[...,0],q=q,d=d,dt=timestep[0])
for t in tqdm(np.arange(1,n_year),unit=' timestep'):
G = np.sum(diagnosed_emissions,axis=-1)
alpha[...,t] = calculate_alpha(G=G,G_A=G_A[...,t-1],T=np.sum(S,axis=-1)[...,np.newaxis],r=r,g0=g0,g1=g1)
diagnosed_emissions[...,t],R = unstep_concentration(R_old=R,G_A=G_A[...,t],alpha=alpha[...,t,np.newaxis],a=a,tau=tau,PI_conc=PI_conc[...,0],emis2conc=emis2conc[...,0],dt=timestep[t])
S,T[...,t] = step_temperature(S_old=S,F=np.sum(RF[...,t],axis=-1)[...,np.newaxis]+ext_forcing[...,t],q=q,d=d,dt=timestep[t])
C_out = concentrations_in
E_out = pd.DataFrame(np.moveaxis(diagnosed_emissions,-1,0).reshape(diagnosed_emissions.shape[-1],-1),index = time_index,columns=pd.MultiIndex.from_product(names_list,names=names_titles))
if not concentration_driven:
G = np.cumsum(emissions,axis=-1)
C[...,0],R,G_A = step_concentration(R_old = 0,G_A_old = 0,alpha=alpha[...,0,np.newaxis],E=emissions[...,0,np.newaxis],a=a,tau=tau,PI_conc=PI_conc[...,0],emis2conc=emis2conc[...,0],dt=timestep[0])
RF[...,0] = step_forcing(C=C[...,gas_forc_map,0],PI_conc=PI_conc[...,gas_forc_map,0],f=f)
S,T[...,0] = step_temperature(S_old=0,F=np.sum(RF[...,0],axis=-1)[...,np.newaxis]+ext_forcing[...,0],q=q,d=d,dt=timestep[0])
for t in tqdm(np.arange(1,n_year),unit=' timestep'):
alpha[...,t] = calculate_alpha(G=G[...,t-1],G_A=G_A,T=np.sum(S,axis=-1)[...,np.newaxis],r=r,g0=g0,g1=g1)
C[...,t],R,G_A = step_concentration(R_old = R,G_A_old=G_A,alpha=alpha[...,t,np.newaxis],E=emissions[...,t,np.newaxis],a=a,tau=tau,PI_conc=PI_conc[...,0],emis2conc=emis2conc[...,0],dt=timestep[t])
RF[...,t] = step_forcing(C=C[...,gas_forc_map,t],PI_conc=PI_conc[...,gas_forc_map,0],f=f)
S,T[...,t] = step_temperature(S_old=S,F=np.sum(RF[...,t],axis=-1)[...,np.newaxis]+ext_forcing[...,t],q=q,d=d,dt=timestep[t])
C_out = pd.DataFrame(np.moveaxis(C,-1,0).reshape(C.shape[-1],-1),index = time_index,columns=pd.MultiIndex.from_product(names_list,names=names_titles))
E_out = emissions_in
ext_forcing = np.zeros(np.sum(RF,axis=-2)[...,np.newaxis,:].shape) + ext_forcing
RF = np.concatenate((RF,ext_forcing),axis=-2)
RF = np.concatenate((RF,np.sum(RF,axis=-2)[...,np.newaxis,:]),axis=-2)
alpha_out = pd.DataFrame(np.moveaxis(alpha,-1,0).reshape(alpha.shape[-1],-1),index = time_index,columns=pd.MultiIndex.from_product(names_list,names=names_titles))
RF_out = pd.DataFrame(np.moveaxis(RF,-1,0).reshape(RF.shape[-1],-1),index = time_index,columns=pd.MultiIndex.from_product([x+['External','Total']*(x==forc_names_list[-1]) for x in forc_names_list],names=forc_names_titles))
T_out = pd.DataFrame(np.moveaxis(T,-1,0).reshape(T.shape[-1],-1),index = time_index,columns=pd.MultiIndex.from_product(names_list[:-1],names=names_titles[:-1]))
out_dict = {'C':C_out, \
'RF':RF_out, \
'T':T_out, \
'alpha':alpha_out, \
'Emissions':E_out , \
'gas_parameters':gas_parameters , \
'thermal parameters':thermal_parameters}
for axis in [x for x in list(out_dict.keys())[:-2] if type(x)==pd.core.frame.DataFrame]:
out_dict[axis].index = out_dict[axis].index.rename('Year')
return out_dict
############################### Advanced Tools #################################
def prescribed_temps_gas_cycle(emissions_in , gas_parameters , T):
# for running the gas cycle module only, with a prescribed temperature dataset. For fitting cycle parameters
dim_scenario = emissions_in.columns.levels[0].size
scen_names = list(emissions_in.columns.levels[0])
dim_gas_param = gas_parameters.columns.levels[0].size
gas_set_names = list(gas_parameters.columns.levels[0])
n_gas = emissions_in.columns.levels[1].size
gas_names = list(gas_parameters.columns.levels[1])
n_year = emissions_in.index.size
emissions = input_to_numpy(emissions_in)[:,np.newaxis,...]
timestep = np.append(np.diff(emissions_in.index)[0],np.diff(emissions_in.index))
T = T[np.newaxis,np.newaxis,:]
a = input_to_numpy(gas_parameters.loc['a1':'a4'])[np.newaxis,:,np.newaxis,...]
tau = input_to_numpy(gas_parameters.loc['tau1':'tau4'])[np.newaxis,:,np.newaxis,...]
r = input_to_numpy(gas_parameters.loc['r0':'rA'])[np.newaxis,:,np.newaxis,...]
emis2conc = gas_parameters.loc['emis2conc'].values.reshape(gas_parameters.loc['emis2conc'].index.levels[0].size,gas_parameters.loc['emis2conc'].index.levels[1].size)[np.newaxis,:,np.newaxis,...]
PI_conc = gas_parameters.loc['PI_conc'].values.reshape(gas_parameters.loc['PI_conc'].index.levels[0].size,gas_parameters.loc['PI_conc'].index.levels[1].size)[np.newaxis,:,np.newaxis,...]
f = input_to_numpy(gas_parameters.loc['f1':'f3'])[np.newaxis,:,np.newaxis,...]
G = np.cumsum(emissions,axis=-1)
C = np.zeros((dim_scenario,dim_gas_param,n_gas,n_year))
alpha = np.zeros((dim_scenario,dim_gas_param,n_gas,n_year))
g1 = np.sum( a * tau * ( 1. - ( 1. + 100/tau ) * np.exp(-100/tau) ), axis=-1 )
g0 = np.exp( -1 * np.sum( a * tau * ( 1. - np.exp(-100/tau) ) , axis=-1) / g1 )
alpha[...,0] = calculate_alpha(G=np.zeros(C[...,0].shape),G_A=np.zeros(C[...,0].shape),T=T[...,0,np.newaxis],r=r,g0=g0,g1=g1)
C[...,0],R,G_A = step_concentration(R = np.zeros(a.shape),alpha=alpha[...,0,np.newaxis],E=emissions[...,0,np.newaxis],\
a=a,tau=tau,PI_conc=PI_conc,emis2conc=emis2conc,dt=timestep[0])
for t in np.arange(1,emissions.shape[-1]):
alpha[...,t] = calculate_alpha(G=G[...,t-1],G_A=G_A,T=T[...,t-1,np.newaxis],r=r,g0=g0,g1=g1)
C[...,t],R,G_A = step_concentration(R = R,alpha=alpha[...,t,np.newaxis],E=emissions[...,t,np.newaxis],\
a=a,tau=tau,PI_conc=PI_conc,emis2conc=emis2conc,dt=timestep[t])
C_out = pd.DataFrame(C.T.swapaxes(1,-1).swapaxes(2,-2).reshape(n_year,n_gas*dim_scenario*dim_gas_param),index = emissions_in.index,columns=pd.MultiIndex.from_product([scen_names,gas_set_names,gas_names],names=['Scenario','Gas cycle set','Gas name']))
alpha_out = pd.DataFrame(alpha.T.swapaxes(1,-1).swapaxes(2,-2).reshape(n_year,n_gas*dim_scenario*dim_gas_param),index = emissions_in.index,columns=pd.MultiIndex.from_product([scen_names,gas_set_names,gas_names],names=['Scenario','Gas cycle set','Gas name']))
E_out = emissions_in
out_dict = { \
'C':C_out, \
'alpha':alpha_out, \
'Emissions':E_out , \
'gas_parameters':gas_parameters , \
}
for axis in [x for x in list(out_dict.keys())[:-2] if type(x)==pd.core.frame.DataFrame]:
out_dict[axis].index = out_dict[axis].index.rename('Year')
return out_dict
def invert_concentrations_prescribed_T( concentrations_in, gas_parameters , T ):
time_index = concentrations_in.index
[(dim_scenario,scen_names),(dim_gas_param,gas_set_names)]=[(x.size,list(x)) for x in [concentrations_in.columns.levels[0],gas_parameters.columns.levels[0]]]
gas_names = [x for x in gas_parameters.columns.levels[1] if '|' not in x]
n_gas = len(gas_names)
n_year = time_index.size
names_list = [scen_names,gas_set_names,gas_names]
names_titles = ['Scenario','Gas cycle set','Gas name']
timestep = np.append(np.diff(time_index),np.diff(time_index)[-1])
if set(scen_names) == set(gas_set_names):
gas_shape, gas_slice = [dim_scenario,1],scen_names
dim_gas_param = 1
[x.pop(1) for x in [names_list,names_titles]]
else:
gas_shape, gas_slice = [1,dim_gas_param],gas_set_names
a,tau,r,PI_conc,emis2conc=[gas_parameters.loc[x,(gas_slice,gas_names)].values.T.reshape(gas_shape+[n_gas,-1]) for x in [['a1','a2','a3','a4'],['tau1','tau2','tau3','tau4'],['r0','rC','rT','rA'],'PI_conc','emis2conc']]
# Dimensions : [scenario, gas params, gas, time, (gas/thermal pools)]
g1 = np.sum( a * tau * ( 1. - ( 1. + 100/tau ) * np.exp(-100/tau) ), axis=-1 )
g0 = np.exp( -1 * np.sum( a * tau * ( 1. - np.exp(-100/tau) ) , axis=-1) / g1 )
# Create appropriate shape variable arrays / calculate RF if concentration driven
C = np.zeros((dim_scenario,dim_gas_param,n_gas,n_year))
T = T.values.flatten().reshape(1,1,-1)
alpha = np.zeros((dim_scenario,dim_gas_param,n_gas,n_year))
alpha[...,0] = calculate_alpha(G=np.zeros(C[...,0].shape),G_A=np.zeros(C[...,0].shape),T=np.zeros(C[...,0].shape),r=r,g0=g0,g1=g1)
diagnosed_emissions = np.zeros((dim_scenario,dim_gas_param,n_gas,n_year))
C[:] = input_to_numpy(concentrations_in.reindex(scen_names,axis=1,level=0).reindex(gas_names,axis=1,level=1))[:,np.newaxis,...]
G_A = np.zeros_like(C)
G_A[...,:-1] = concentrations_in.reindex(scen_names,axis=1,level=0).reindex(gas_names,axis=1,level=1).rolling(2).mean().dropna().values.T.reshape(dim_scenario,1,1,n_gas,n_year-1)
G_A[...,-1] = G_A[...,-2] + (C[...,-1]-C[...,-2])
G_A = (G_A-PI_conc)/emis2conc
diagnosed_emissions[...,0],R = unstep_concentration(R_old=0,G_A=G_A[...,0],alpha=alpha[...,0,np.newaxis],a=a,tau=tau,PI_conc=PI_conc[...,0],emis2conc=emis2conc[...,0],dt=timestep[0])
for t in tqdm(np.arange(1,n_year),unit=' timestep'):
G = np.sum(diagnosed_emissions,axis=-1)
alpha[...,t] = calculate_alpha(G=G,G_A=G_A[...,t-1],T=T[...,t-1,np.newaxis],r=r,g0=g0,g1=g1)
diagnosed_emissions[...,t],R = unstep_concentration(R_old=R,G_A=G_A[...,t],alpha=alpha[...,t,np.newaxis],a=a,tau=tau,PI_conc=PI_conc[...,0],emis2conc=emis2conc[...,0],dt=timestep[t])
C_out = concentrations_in
E_out = pd.DataFrame(np.moveaxis(diagnosed_emissions,-1,0).reshape(diagnosed_emissions.shape[-1],-1),index = time_index,columns=pd.MultiIndex.from_product(names_list,names=names_titles))
alpha_out = pd.DataFrame(np.moveaxis(alpha,-1,0).reshape(alpha.shape[-1],-1),index = time_index,columns=pd.MultiIndex.from_product(names_list,names=names_titles))
out_dict = {'C':C_out, \
'alpha':alpha_out, \
'Emissions':E_out , \
'gas_parameters':gas_parameters , \
'T':T}
for axis in [x for x in list(out_dict.keys())[:-2] if type(x)==pd.core.frame.DataFrame]:
out_dict[axis].index = out_dict[axis].index.rename('Year')
return out_dict
def invert_carbon_cycle_prescribed_T(C,T,a,tau,r,PI_conc,emis2conc):
g1 = np.sum( a * tau * ( 1. - ( 1. + 100/tau ) * np.exp(-100/tau) ), axis=-1 )
g0 = np.exp( -1 * np.sum( a * tau * ( 1. - np.exp(-100/tau) ) , axis=-1) / g1 )
diagnosed_emissions = np.zeros(C.size)
alpha = np.zeros(C.size)
G_A = (np.array([np.mean(C[i:i+2]) for i in np.arange(C.size)])-PI_conc)/emis2conc
G_A[-1]=2*G_A[-1]-G_A[-2]
alpha[0] = calculate_alpha(G=0,G_A=0,T=0,r=r,g0=g0,g1=g1)
diagnosed_emissions[0],R = unstep_concentration(R_old=0,G_A=G_A[0],alpha=alpha[0,np.newaxis],a=a,tau=tau,PI_conc=PI_conc,emis2conc=emis2conc)
for t in np.arange(1,C.size):
G = np.sum(diagnosed_emissions)
alpha[t] = calculate_alpha(G=G,G_A=G_A[t-1],T=T[t-1],r=r,g0=g0,g1=g1)
diagnosed_emissions[t],R = unstep_concentration(R_old=R,G_A=G_A[t],alpha=alpha[t,np.newaxis],a=a,tau=tau,PI_conc=PI_conc,emis2conc=emis2conc)
return pd.Series(index=np.arange(C.size),data=diagnosed_emissions)
def unstep_forcing(forcing_in,gas_parameters=get_gas_parameter_defaults(),thermal_params=get_thermal_parameter_defaults()):
f = input_to_numpy(gas_parameters.loc['f1':'f3'])[np.newaxis,:,np.newaxis,...]
forcing_in = return_empty_emissions(forcing_in,gases_in=forcing_in.columns.levels[1]) + forcing_in.values
forcing = input_to_numpy(forcing_in)[:,np.newaxis,np.newaxis,...]
time_index = forcing_in.index
dim_scenario = forcing_in.columns.levels[0].size
scen_names = list(forcing_in.columns.levels[0])
dim_gas_param = gas_parameters.columns.levels[0].size
gas_set_names = list(gas_parameters.columns.levels[0])
gas_names = list(gas_parameters.columns.levels[1])
dim_thermal_param = thermal_params.columns.get_level_values(0).unique().size
thermal_set_names = list(thermal_params.columns.get_level_values(0).unique())
n_gas = forcing_in.columns.levels[1].size
n_year = time_index.size
f = input_to_numpy(gas_parameters.loc['f1':'f3'])[np.newaxis,:,np.newaxis,...]
PI_conc = gas_parameters.loc['PI_conc'].values.reshape(gas_parameters.loc['PI_conc'].index.levels[0].size,gas_parameters.loc['PI_conc'].index.levels[1].size)[np.newaxis,:,np.newaxis,...]
def root_function(C,PI_conc,f,forcing_target):
RF = f[...,0] * np.log( C/PI_conc ) + f[...,1] * ( C - PI_conc ) + f[...,2] * ( np.sqrt(C) - np.sqrt(PI_conc) )
return RF - forcing_target
concentrations = np.zeros(forcing.shape)
for scenario in np.arange(dim_scenario):
for gas_param in np.arange(dim_gas_param):
for thermal_param in np.arange(dim_thermal_param):
for gas in np.arange(n_gas):
concentrations[scenario,gas_param,thermal_param,gas,:]=sp.optimize.root(root_function,\
np.zeros(forcing[scenario,gas_param,thermal_param,gas,:].shape)+\
PI_conc[0,gas_param,0,gas],\
args=(PI_conc[0,gas_param,0,gas],\
f[0,gas_param,0,gas,:],\
forcing[scenario,gas_param,thermal_param,gas,:])).x.squeeze()
C_out = pd.DataFrame(concentrations.T.swapaxes(1,-1).swapaxes(2,-2).reshape(n_year,n_gas*dim_scenario*dim_gas_param*dim_thermal_param),index = time_index,columns=pd.MultiIndex.from_product([scen_names,gas_set_names,thermal_set_names,gas_names],names=['Scenario','Gas cycle set','Thermal set','Gas name']))
return C_out
## Fitting the r parameters from Emissions and Concentrations __ WIP ##
def OLSE_NORM(X,Y,add_intercept=True):
## computes a multiple OLS regression over a field against several indices. First dimension is time, second is features (X), or targets (Y)
if add_intercept:
X_1 = np.concatenate((np.ones(X.shape[0])[:,np.newaxis],X),axis=1)
else:
X_1 = X.copy()
B = np.dot( np.linalg.inv( np.dot( X_1.T , X_1 ) ) , np.dot( X_1.T , Y ) )
e = Y - np.dot(X_1,B)
SSE = np.sum(e**2,axis=0)
MSE_var = SSE / (X_1.shape[0] - X_1.shape[-1])
SE_B = np.sqrt( np.diag( np.linalg.inv( np.dot( X_1.T , X_1 ) ) )[:,np.newaxis] * MSE_var[np.newaxis,:] )
return {'coefs':B[1:],'coef_err':SE_B[1:],'res':e,'intercept':B[0],'intercept_err':SE_B[0]}
def alpha_root(alpha,R_old,C,E,a,tau,PI_conc,emis2conc,dt=1):
# computes alpha through a root finding algorithm from emissions and concentrations
return E - ( C - PI_conc - np.sum(R_old * np.exp( -dt/(alpha*tau) ) , axis=-1 ) ) / ( emis2conc * np.sum( a * alpha * ( tau / dt ) * ( 1. - np.exp( -dt / ( alpha * tau ) ) ) , axis=-1 ) )
def get_alpha_from_E_C(C,E,a,tau,PI_conc,emis2conc,timestep=False):
# returns alpha from concentrations and emissions
if timestep is False:
timestep = np.ones_like(C)
C_end = | np.zeros_like(C) | numpy.zeros_like |
#!/usr/bin/env python
import argparse
import os
import sys
import csv
import h5py
import tensorflow.keras as keras
import tensorflow as tf
import numpy as np
from tqdm import tqdm
import cv2
import SimpleITK as sitk
import time
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
tf.compat.v1.enable_v2_behavior()
if __name__ == "__main__" and __package__ is None:
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
import fl_covid.bin # noqa: F401
__package__ = "fl_covid.bin"
# Change these to absolute imports if you copy this script outside the fl_covid package.
from ..utils.anchors import compute_overlap
from .. import models
from ..preprocessing.csv_generator import CSVGenerator
from ..utils.eval import _compute_ap, _get_annotations, _get_annotations_and_img_path
from ..utils.config import read_config_file, parse_anchor_parameters
from ..utils.keras_version import check_keras_version
from ..utils.visualization import draw_detections, draw_annotations
from ..utils.visualization import draw_box, label_color, draw_caption
from ..utils.image import preprocess_image, resize_image
from fl_covid.bin.train_fed import create_models
from fl_covid.bin.evaluate_overall import fp_reduce
def get_session():
""" Construct a modified tf session.
"""
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
return tf.Session(config=config)
def draw_label_hit(image, box, caption):
""" Draws a caption above the box in an image.
# Arguments
image : The image to draw on.
box : A list of 4 elements (x1, y1, x2, y2).
caption : String containing the text to draw.
"""
b = np.array(box).astype(int)
cv2.putText(image, caption, (b[0]+5, b[3] - 5), cv2.FONT_HERSHEY_PLAIN, 1, (0, 0, 0), 2)
cv2.putText(image, caption, (b[0]+5, b[3] - 5), cv2.FONT_HERSHEY_PLAIN, 1, (255, 255, 255), 1)
def draw_detections(image, boxes, scores, labels, color=None, label_to_name=None, slice_id=None, bbox_writer=None, score_threshold=0.4): # score_threshold used to be 0.5
""" Draws detections in an image.
# Arguments
image : The image to draw on.
boxes : A [N, 4] matrix (x1, y1, x2, y2).
scores : A list of N classification scores.
labels : A list of N labels.
color : The color of the boxes. By default the color from keras_retinanet.utils.colors.label_color will be used.
label_to_name : (optional) Functor for mapping a label to a name.
score_threshold : Threshold used for determining what detections to draw.
"""
selection = np.where(scores > score_threshold)[0]
for i in selection:
c = color if color is not None else label_color(labels[i])
if bbox_writer is not None and slice_id is not None:
tar_path = 'slice_{}.png'.format(slice_id)
b = np.array(boxes[i, :]).astype(int)
bbox_writer.writerow([tar_path]+ [b[0],b[1],b[2],b[3]]+['lesion'])
draw_box(image, boxes[i, :], color=c,thickness=1)
# draw labels
caption = (label_to_name(labels[i]) if label_to_name else str(labels[i])) + ': {0:.2f}'.format(scores[i])
draw_caption(image, boxes[i, :], caption)
def read_h5(img_path):
with h5py.File(img_path, "r") as hf:
arr = hf['arr'][:]
return arr
def draw_colorful_result(
args,
client_name,
patient_name,
iou_threshold=0.5,
score_threshold=0.05,
max_detections=100,
save_path=None
):
def _parse(value, function, fmt):
"""Parse a string into a value, and format a nice ValueError if it fails.
Returns `function(value)`.
Any `ValueError` raised is catched and a new `ValueError` is raised
with message `fmt.format(e)`, where `e` is the caught `ValueError`.
"""
try:
return function(value)
except ValueError as e:
raise ValueError(fmt.format(e))
if args.reduce_fp:
sign = 'fp_reduced_'
else:
sign=''
bbox_result_path = os.path.join(save_path,'{}_{}_score_thres_{}_bbox.csv'.format(client_name, patient_name, score_threshold))
anno_result_path = os.path.join(save_path,'{}_{}_score_thres_{}_anno.csv'.format(client_name, patient_name, score_threshold))
all_annotations_img_path = np.load(os.path.join(save_path, '{}_{}_annotations_img_path.npy'.format(client_name, patient_name)), allow_pickle=True)
# prepare annotation result
anno_result = {}
annos = open(anno_result_path, 'r')
classes = {'lesion': 0}
for line, row in enumerate(annos):
splits = row.split(',')
# print(splits)
# print(len(splits))
try:
img_file, x1, y1, x2, y2, class_name, hit_cnt = splits
hit_cnt = hit_cnt.replace('\n', '')
except ValueError:
raise ValueError(
'line {}: format should be \'img_file,x1,y1,x2,y2,class_name\' or \'img_file,,,,,\''.format(line))
if img_file not in anno_result:
anno_result[img_file] = []
x1 = _parse(x1, int, 'line {}: malformed x1: {{}}'.format(line))
y1 = _parse(y1, int, 'line {}: malformed y1: {{}}'.format(line))
x2 = _parse(x2, int, 'line {}: malformed x2: {{}}'.format(line))
y2 = _parse(y2, int, 'line {}: malformed y2: {{}}'.format(line))
hit_cnt = _parse(hit_cnt, int, 'line {}: malformed hit count: {{}}'.format(line))
if x2 <= x1:
raise ValueError('line {}: x2 ({}) must be higher than x1 ({})'.format(line, x2, x1))
if y2 <= y1:
raise ValueError('line {}: y2 ({}) must be higher than y1 ({})'.format(line, y2, y1))
# check if the current class name is correctly present
if str(class_name) not in classes:
raise ValueError(
'line {}: unknown class name: \'{}\' (classes: {})'.format(line, class_name, classes))
anno_result[img_file].append({'x1': x1, 'x2': x2, 'y1': y1, 'y2': y2, 'class': class_name, 'hit_cnt':hit_cnt})
# prepare prediction bbox result
bbox_result = {}
bboxs = open(bbox_result_path, 'r')
classes = {'lesion': 0}
for line, row in enumerate(bboxs):
splits = row.split(',')
try:
img_file, x1, y1, x2, y2, class_name, score, box_type = splits
box_type = box_type.replace('\n', '')
except ValueError:
raise ValueError(
'line {}: format should be \'img_file,x1,y1,x2,y2,class_name\' or \'img_file,,,,,\''.format(line))
if img_file not in bbox_result:
bbox_result[img_file] = []
x1 = _parse(x1, int, 'line {}: malformed x1: {{}}'.format(line))
y1 = _parse(y1, int, 'line {}: malformed y1: {{}}'.format(line))
x2 = _parse(x2, int, 'line {}: malformed x2: {{}}'.format(line))
y2 = _parse(y2, int, 'line {}: malformed y2: {{}}'.format(line))
if x2 <= x1:
raise ValueError('line {}: x2 ({}) must be higher than x1 ({})'.format(line, x2, x1))
if y2 <= y1:
raise ValueError('line {}: y2 ({}) must be higher than y1 ({})'.format(line, y2, y1))
# check if the current class name is correctly present
if str(class_name) not in classes:
raise ValueError(
'line {}: unknown class name: \'{}\' (classes: {})'.format(line, class_name, classes))
bbox_result[img_file].append({'x1': x1, 'x2': x2, 'y1': y1, 'y2': y2, 'class': class_name, 'score':score, 'box_type': str(box_type)})
detection_out = np.zeros([len(all_annotations_img_path), 512, 512, 3])
for i in tqdm(range(len(all_annotations_img_path)), desc='Drawing colorful {} result on {} {}: '.format(sign, client_name, patient_name)):
img_path = all_annotations_img_path[i]
raw_img = read_h5(img_path)
# print(img_path)
image = raw_img.copy()
if keras.backend.image_data_format() == 'channels_first':
image = image.transpose((2, 0, 1))
if img_path in anno_result:
for anno_index in range(len(anno_result[img_path])):
# draw annotation
hit_cnt = anno_result[img_path][anno_index]['hit_cnt']
caption = '{}'.format(hit_cnt)
anno_box = [anno_result[img_path][anno_index]['x1'], anno_result[img_path][anno_index]['y1'], anno_result[img_path][anno_index]['x2'],anno_result[img_path][anno_index]['y2']]
draw_label_hit(image, anno_box , caption)
draw_box(image, anno_box, color=[0,255,0], thickness=1)
if img_path in bbox_result:
for bbox_index in range(len(bbox_result[img_path])):
pred_box = [bbox_result[img_path][bbox_index]['x1'], bbox_result[img_path][bbox_index]['y1'], bbox_result[img_path][bbox_index]['x2'],bbox_result[img_path][bbox_index]['y2']]
box_type = str(bbox_result[img_path][bbox_index]['box_type'])
score = float(bbox_result[img_path][bbox_index]['score'])
# print(box_type)
# print('assigned_gt')
# print(box_type=='assigned_gt')
if box_type == 'max_overlap':
box_color = [31, 0, 255]
elif box_type == 'assigned_pre':
box_color =[184, 0, 255]
elif box_type == 'assigned_gt':
box_color = [139, 69, 19]
elif box_type == 'fp':
box_color = [225, 0, 0]
else:
raise ValueError("Unknown box type :{}".format(box_type))
draw_box(image, pred_box, color=box_color, thickness=1)
caption = ('{0:.2f}'.format(score))
draw_caption(image, pred_box, caption)
detection_out[i, :, :] = image
print('Writing colorful results on {} {}...'.format(client_name, patient_name))
detection_out = sitk.GetImageFromArray(detection_out)
sitk.WriteImage(detection_out, os.path.join(save_path, '{}_{}_colorful_detection_{}result.nii.gz'.format(client_name, patient_name, sign)))
def create_generator(args):
""" Create generators for evaluation.
"""
if args.dataset_type == 'csv':
validation_generator = CSVGenerator(
args.annotations,
args.classes,
image_min_side=args.image_min_side,
image_max_side=args.image_max_side,
config=args.config,
shuffle_groups=False
)
else:
raise ValueError('Invalid data type received: {}'.format(args.dataset_type))
return validation_generator
def _seg_filter(bboxes,scores_sort,seg):
image_boxes = bboxes
inner = np.asarray([],dtype=np.bool)
flag = False
for i in range(image_boxes.shape[0]):
x1 = int(image_boxes[i][0])
y1 = int(image_boxes[i][1])
x2 = int(image_boxes[i][2])
y2 = int(image_boxes[i][3])
x1 = 511 if x1 > 511 else x1
y1 = 511 if y1 > 511 else y1
x2 = 511 if x2 > 511 else x2
y2 = 511 if y2 > 511 else y2
# print(scores_sort)
# print(scores_sort.shape)
if (seg[y1,x1,:] == 0).all() and (seg[y2,x2,:] == 0).all() and (seg[y1,x2,:] == 0).all() and (seg[y2,x1,:] == 0).all():
inner = np.append(inner,False)
flag=True
# scores_sort = np.delete(scores_sort,i,axis=0)
else:
inner = np.append(inner, True)
# print(inner)
# cnt = 1
# if flag:
# if cnt > 0:
# print("FP out of lung filtered")
# cnt -= 1
scores_sort = scores_sort[inner]
# print('scores_sort after filter')
# print(scores_sort.shape)
# print(scores_sort)
return scores_sort
def _print_detections_to_npy(args, generator, model, client_idx, client_name, patient_name, score_threshold=0.05, max_detections=100, save_path=None):
all_detections = [[None for i in range(generator.num_classes()) if generator.has_label(i)] for j in range(generator.size())]
detection_out = np.zeros([generator.size(),512,512,3])
# detection_out = np.zeros([generator.size(),512,512])
attention_out = np.zeros([generator.size(),512,512])
mask_out = np.zeros([generator.size(),512,512])
results = open(os.path.join(save_path, '{}_{}_output_bbox.csv'.format(client_name, patient_name)), 'w', newline='')
result_writer = csv.writer(results, delimiter=',')
for i in tqdm(range(generator.size()), desc='Running network on {}_{}: '.format(client_name, patient_name)):
raw_image = generator.load_image(i)
# image = np.expand_dims(raw_image.copy(), axis=-1)
# image = np.repeat(image, 3, axis=-1)
# image = generator.preprocess_image(image)
image = generator.preprocess_image(raw_image.copy())
image, scale = generator.resize_image(image)
if keras.backend.image_data_format() == 'channels_first':
image = image.transpose((2, 0, 1))
# run network
# boxes, scores, labels = model.predict_on_batch(np.expand_dims(image, axis=0))[:3]
boxes, scores, labels, masks, attention_map = model.predict_on_batch(np.expand_dims(image, axis=0))
# print('boxes:', boxes.shape)
# print('scores:', scores.shape)
# print('labels',labels.shape)
# correct boxes for image scale
boxes /= scale
# select indices which have a score above the threshold
indices = np.where(scores[0, :] > -1)[0]
# print('indices', indices)
# print(type(scores))
if type(scores) is not np.ndarray:
scores = scores.numpy()
boxes = boxes.numpy()
labels = labels.numpy()
masks = masks.numpy()
attention_map = attention_map.numpy()
# select those scores
scores = scores[0][indices]
# find the order with which to sort the scores
scores_sort = np.argsort(-scores)[:max_detections]
image_boxes = boxes[0, indices[scores_sort], :]
# print('seletec_boxes',image_boxes.shape)
# print(image_boxes)
# filter out of lung
if args.lung_filter:
client_paths = ['private_1', 'private_2', 'private_3']
# client_paths = ['private_4/B']
lung_filter_path = '/research/dept8/qdou/data/covid/{}/lung_seg_png/'.format(client_paths[client_idx])
# lungfilter = '/covid/private_2/lung_seg_png/
# print('---img path---')
img_path = generator.image_path(i)
patient = img_path.split('/')[-2]
slice_idx = img_path.split('/')[-1].replace('slice_', '').replace('.h5', '')
# print('patient:', patient)
# print('slice:', slice_idx)
seg_path = os.path.join(lung_filter_path,'{}_slice_{}.png').format(patient,slice_idx)
# print(seg_path)
seg = cv2.imread(seg_path)
filter_mask = np.zeros([1, 512, 512, 1])
filter_mask[0, np.where(seg == 255)[0], np.where(seg == 255)[1], 0] = masks[0, np.where(seg == 255)[0], np.where(seg == 255)[1], 0]
scores_sort = _seg_filter(image_boxes,scores_sort,seg)
image_boxes = boxes[0, indices[scores_sort], :]
image_scores = scores[scores_sort]
image_labels = labels[0, indices[scores_sort]]
image_detections = np.concatenate([image_boxes, np.expand_dims(image_scores, axis=1), np.expand_dims(image_labels, axis=1)], axis=1)
# copy detections to all_detections
for label in range(generator.num_classes()):
if not generator.has_label(label):
continue
all_detections[i][label] = image_detections[image_detections[:, -1] == label, :-1]
if args.save_result == 1:
img_path = generator.image_path(i)
img_path = img_path.replace('h5_normalize', 'h5')
# print(img_path)
with h5py.File(img_path, "r") as hf:
h5_raw_image = hf['arr'][:]
draw_annotations(h5_raw_image, generator.load_annotations(i), label_to_name=generator.label_to_name)
# draw_detections(raw_image, image_boxes, image_scores, image_labels, score_threshold=args.score_threshold, label_to_name=generator.label_to_name)
draw_detections(h5_raw_image, image_boxes, image_scores, image_labels, slice_id=i, bbox_writer=result_writer, score_threshold=args.score_threshold)
# if args.lung_filter:
# slice_idx = generator.image_path(i).split('/')[-1].replace('slice', '').replace('.png', '')
# cv2.imwrite('../COVID/slice_{}.png'.format(slice_idx),raw_image)
# print("Shape of load Image")
# print(arr.shape)
detection_out[i, :, :] = h5_raw_image
attention_map[np.where(attention_map < args.attention_threshold)] = 0
# attention_out[i, :, :] = cv2.flip( cv2.resize(np.squeeze(np.uint8(attention_map * 255)), (origin_shape[1], origin_shape[0])), 0)
attention_out[i, :, :] = cv2.resize(np.squeeze(np.uint8(attention_map * 255)), (512, 512))
masks[masks < args.segmentation_threshold] = 0
filter_mask[filter_mask < args.segmentation_threshold] = 0
filter_mask = cv2.resize(np.squeeze(np.uint8(filter_mask * 255)), (512, 512))
masks = cv2.resize(np.squeeze(np.uint8(masks * 255)), (512, 512))
# mask_out[i, :, :] = masks
mask_out[i, :, :] = filter_mask
if save_path is not None and args.save_result == 1:
print('Writing Results...')
# detection_out = sitk.GetImageFromArray(detection_out)
# sitk.WriteImage(detection_out, os.path.join(save_path, '{}_{}_detection_result.nii.gz'.format(client_name, patient_name)))
# attention_out = sitk.GetImageFromArray(attention_out)
# sitk.WriteImage(attention_out, os.path.join(save_path, '{}_{}_attention_result.nii.gz'.format(client_name, patient_name)))
mask_out = sitk.GetImageFromArray(mask_out)
sitk.WriteImage(mask_out, os.path.join(save_path, '{}_{}_masks_result.nii.gz'.format(client_name, patient_name)))
np.save(os.path.join(save_path, '{}_{}_prediction.npy'.format(client_name, patient_name)), all_detections)
all_annotations, all_annotations_img_path = _get_annotations_and_img_path(generator)
np.save(os.path.join(save_path, '{}_{}_annotations.npy'.format(client_name, patient_name)), all_annotations)
np.save(os.path.join(save_path, '{}_{}_annotations_img_path.npy'.format(client_name, patient_name)), all_annotations_img_path)
return 0
def evaluate_from_npy(
args,
client_name,
patient_name,
iou_threshold=0.5,
score_threshold=0.05,
max_detections=100,
save_path=None,
verbose=1,
):
""" Evaluate a given dataset using a given model.
# Arguments
iou_threshold : The threshold used to consider when a detection is positive or negative.
score_threshold : The score confidence threshold to use for detections.
max_detections : The maximum number of detections to use per image.
save_path : The path to save images with visualized detections to.
# Returns
A dict mapping class names to mAP scores.
"""
# gather all detections and annotations
if args.reduce_fp:
all_detections = np.load(os.path.join(save_path, '{}_{}_prediction_fp_reduced.npy'.format(client_name, patient_name)), allow_pickle=True)
else:
all_detections = np.load(os.path.join(save_path, '{}_{}_prediction.npy'.format(client_name, patient_name)), allow_pickle=True)
# all_detections = np.load(os.path.join(save_path, '{}_{}_prediction.npy'.format(client_name, patient_name)), allow_pickle=True)
all_annotations = np.load(os.path.join(save_path, '{}_{}_annotations.npy'.format(client_name, patient_name)), allow_pickle=True)
all_annotations_img_path = np.load(os.path.join(save_path, '{}_{}_annotations_img_path.npy'.format(client_name, patient_name)), allow_pickle=True)
all_fp_detections = [[] for j in range(all_annotations.shape[0])]
if not args.score_loop:
auc_ci_csv_path = os.path.join(save_path, '{}_{}_score_{}_AUC_CI_bbox.csv'.format(client_name, patient_name, score_threshold))
tar_bbox_csv_path = os.path.join(save_path, '{}_{}_score_thres_{}_bbox.csv'.format(client_name, patient_name, score_threshold))
tar_anno_csv_path = os.path.join(save_path, '{}_{}_score_thres_{}_anno.csv'.format(client_name, patient_name, score_threshold))
bbox_output = open(tar_bbox_csv_path, 'w', newline='')
bbox_writer = csv.writer(bbox_output, delimiter=',')
anno_output = open(tar_anno_csv_path, 'w', newline='')
anno_writer = csv.writer(anno_output, delimiter=',')
auc_output = open(auc_ci_csv_path, 'w', newline='')
auc_writer = csv.writer(auc_output, delimiter=',')
# print (all_detections[0][0].shape)
# print (all_detections[0])
# print (all_annotations.shape)
# print('all detections:', all_detections)
# print('all all_annotations:', all_annotations)
average_precisions = {}
for label in range(1):
false_positives = np.zeros((0,))
true_positives = np.zeros((0,))
false_negatives = np.zeros((0,))
scores = np.zeros((0,))
num_annotations = 0.0
fp_all = {}
tp_all = {}
hitter_all = {}
# print('---slices num---')
# print(all_annotations.shape[0])
for i in range(all_annotations.shape[0]):
detections = all_detections[i][label]
detections = detections[detections[:, -1] >= score_threshold]
annotations = all_annotations[i][label]
num_annotations += annotations.shape[0]
detected_annotations = []
wrote_annotations = []
# print('slice{}'.format(i))
# print(annotations)
slice_fp_detections = np.empty([0, 5], dtype=np.float32)
hitter = np.zeros(annotations.shape[0])
fp = 0
for d in detections:
# print('#############each detection##########')
if annotations.shape[0] == 0:
continue
ious, overlaps_pre_arr, overlaps_gt_arr = compute_overlap(np.expand_dims(d, axis=0), annotations)
# print('---ious--')
# print(ious)
# print('--overlaps_pre_arr--')
# print(overlaps_pre_arr)
# print('--overlaps_gt_arr--')
# print(overlaps_gt_arr)
assigned_annotation = np.argmax(ious, axis=1)
max_overlap = ious[0, assigned_annotation]
# print(assigned_annotation)
# print(max_overlap)
if max_overlap >= iou_threshold:
if hitter[assigned_annotation] == 0:
scores = np.append(scores, d[4])
false_positives = np.append(false_positives, 0)
true_positives = np.append(true_positives, 1)
if not args.score_loop:
auc_writer.writerow([all_annotations_img_path[i], int(d[0]), int(d[1]), int(d[2]), int(d[3])] + ['lesion'] + [float(d[4])] + ['max_overlap'])
hitter[assigned_annotation] += 1
if not args.score_loop:
bbox_writer.writerow([all_annotations_img_path[i], int(d[0]), int(d[1]), int(d[2]), int(d[3])] + ['lesion'] + [float(d[4])] + ['max_overlap'])
else:
assigned_annotation_pre = np.where(overlaps_pre_arr > iou_threshold)
# assigned_annotation_pre = np.where(overlaps_pre_arr > 0.6)
assigned_annotation_gt = np.where(overlaps_gt_arr > iou_threshold)
# assigned_annotation_gt = np.where(overlaps_gt_arr > 0.6)
# print('--assigned_annotation_pre--')
# print(assigned_annotation_pre)
# print(len(assigned_annotation_pre))
# print(len(assigned_annotation_pre[0]))
# print('--assigned_annotation_gt--')
# print(assigned_annotation_gt)
# print(len(assigned_annotation_gt))
# print(len(assigned_annotation_gt[0]))
if len(assigned_annotation_pre[0]) > 0:
if not args.score_loop:
bbox_writer.writerow([all_annotations_img_path[i], int(d[0]), int(d[1]), int(d[2]), int(d[3])] + ['lesion'] + [float(d[4])] + ['assigned_pre'])
for index in assigned_annotation_pre[1]:
if hitter[index] == 0:
scores = np.append(scores, d[4])
false_positives = np.append(false_positives, 0)
true_positives = np.append(true_positives, 1)
if not args.score_loop:
auc_writer.writerow([all_annotations_img_path[i], int(d[0]), int(d[1]), int(d[2]), int(d[3])] + ['lesion'] + [float(d[4])] + ['assigned_pre'])
hitter[index] += 1
if len(assigned_annotation_gt[0]) > 0:
if not args.score_loop:
bbox_writer.writerow([all_annotations_img_path[i], int(d[0]), int(d[1]), int(d[2]), int(d[3])] + ['lesion'] + [float(d[4])] + ['assigned_gt'])
for index in assigned_annotation_gt[1]:
if hitter[index] == 0:
scores = np.append(scores, d[4])
false_positives = np.append(false_positives, 0)
true_positives = np.append(true_positives, 1)
if not args.score_loop:
auc_writer.writerow([all_annotations_img_path[i], int(d[0]), int(d[1]), int(d[2]), int(d[3])] + ['lesion'] + [float(d[4])] + ['assigned_gt'])
hitter[index] += 1
if len(assigned_annotation_pre[0]) + len(assigned_annotation_gt[0]) == 0:
fp += 1
scores = np.append(scores, d[4])
false_positives = np.append(false_positives, 1)
true_positives = np.append(true_positives, 0)
slice_fp_detections = np.concatenate((slice_fp_detections, np.expand_dims(d, axis=0)), axis=0)
if not args.score_loop:
bbox_writer.writerow([all_annotations_img_path[i], int(d[0]), int(d[1]), int(d[2]), int(d[3])] + ['lesion'] + [float(d[4])] + ['fp'])
auc_writer.writerow([all_annotations_img_path[i], int(d[0]), int(d[1]), int(d[2]), int(d[3])] + ['lesion'] + [float(d[4])] + ['fp'])
# assigned_annotation = np.argmax(ious, axis=1)
# max_overlap = ious[0, assigned_annotation]
# if max_overlap >= iou_threshold and assigned_annotation not in detected_annotations:
# false_positives = np.append(false_positives, 0)
# true_positives = np.append(true_positives, 1)
# detected_annotations.append(assigned_annotation)
# elif max_overlap < iou_threshold:
# false_positives = np.append(false_positives, 1)
# true_positives = np.append(true_positives, 0)
# else:
# false_positives = np.append(false_positives, 0)
# true_positives = np.append(true_positives, 0)
all_fp_detections[i] = slice_fp_detections
for each_anno in range(len(hitter)):
# print(len(annotations))
if len(annotations) > 0:
anno = annotations[each_anno]
if not args.score_loop:
anno_writer.writerow([all_annotations_img_path[i], int(anno[0]), int(anno[1]), int(anno[2]), int(anno[3])] + ['lesion'] + [int(hitter[each_anno])])
if hitter[each_anno] == 0:
if not args.score_loop:
auc_writer.writerow([all_annotations_img_path[i], int(anno[0]), int(anno[1]), int(anno[2]), int(anno[3])] + ['lesion'] + [0] + ['gt_not_hit'])
# print('--hitter--')
# print(hitter)
# print('--where hitter > 0--')
# print(np.where(hitter > 0))
hitter_all[i] = hitter
tp_all[i] = len(np.where(hitter > 0)[0])
fp_all[i] = fp
# no annotations -> AP for this class is 0 (is this correct?)
if num_annotations == 0:
average_precisions[label] = 0, 0
continue
if not args.score_loop:
bbox_output.flush()
bbox_output.close()
anno_output.flush()
anno_output.close()
auc_output.flush()
auc_output.close()
before_reduce_fp = 0
for i in range(len(all_fp_detections)):
before_reduce_fp += len(all_fp_detections[i])
# reduce fp in detections
deleted_all_fp_detections = fp_reduce(all_fp_detections)
deleted_fp_num = 0
# after_reduce_fp = 0
for i in range(len(deleted_all_fp_detections)):
# if len(deleted_all_fp_detections[i]) > 0:
# print('deleted in deleted_all_fp', deleted_all_fp_detections[i])
deleted_fp_num += len(deleted_all_fp_detections[i])
TP_ALL = 0
FP_ALL = 0
for key in tp_all.keys():
TP_ALL += tp_all[key]
for key in fp_all.keys():
FP_ALL += fp_all[key]
FP_ALL -= deleted_fp_num
new_TP_slice = TP_ALL / all_annotations.shape[0]
new_FP_slice = FP_ALL / all_annotations.shape[0]
new_Sensitivity = TP_ALL / num_annotations
new_Precision = TP_ALL / (TP_ALL + FP_ALL) if (TP_ALL + FP_ALL) > 0 else 1
# print('num_annotations',num_annotations)
# print('all_annotationsa',all_annotations.shape[0])
# print(num_annotations)
# print(all_detections[0][label].shape)
# print(all_detections[0][label])
new_res = [TP_ALL,FP_ALL, new_Sensitivity, new_Precision, new_FP_slice]
# sort by score
# print('before sort')
# print(sum(true_positives))
# print(sum(false_positives))
indices = np.argsort(-scores)
false_positives = false_positives[indices]
true_positives = true_positives[indices]
# print('length of old tp list and fp list')
# print(len(true_positives),"----" ,len(false_positives))
TP = sum(true_positives)
FP = sum(false_positives)
FN = sum(false_negatives)
# compute false positives and true positives
false_positives = np.cumsum(false_positives)
true_positives = np.cumsum(true_positives)
# print(false_positives)
# print(true_positives)
# compute recall and precision
recall = true_positives / num_annotations
precision = true_positives / np.maximum(true_positives + false_positives, np.finfo(np.float64).eps)
TP_slice = TP/all_annotations.shape[0]
FP_slice = FP/all_annotations.shape[0]
Sensitivity = TP / num_annotations
Precision = TP / (TP + FP) if (TP + FP) > 0 else 1
if verbose==1:
print('------{} {}------'.format(client_name, patient_name))
print(' # New_TP:{} New_FP{} before_reduce:{}'.format(TP_ALL, FP_ALL, FP_ALL + deleted_fp_num))
print(' # FP/slice:{:.4f} Sensitivity:{:.5f} Precision:{:.5f}'.format(new_FP_slice, new_Sensitivity,
new_Precision))
# print('-------old--------')
# print(' TP:{} FP{}'.format(TP,FP))
# print(' FP/slice:{:.4f} Sensitivity:{:.5f} Precision:{:.5f}'.format(FP_slice, Sensitivity, Precision))
old_res = [TP,FP, recall, precision, FP_slice]
# compute average precision
average_precision = _compute_ap(recall, precision)
average_precisions[label] = average_precision, num_annotations
all_detections_reduce_fp = all_detections.copy()
for i in range(len(all_detections_reduce_fp)):
slice_detections = all_detections_reduce_fp[i][0]
deleted_slice_fp = deleted_all_fp_detections[i]
new_slice_detections = np.empty([0,5],dtype=np.float32)
if len(deleted_slice_fp) == 1:
fp_indices = np.where(slice_detections==deleted_slice_fp)
x,y = fp_indices
deleted_idx = set(x)
all_idx = set(list(range(len(slice_detections))))
remain_idx = all_idx.difference(deleted_idx)
remain_idx = list(remain_idx)
# for idx in range(len(new_slice_detections)):
for idx in remain_idx:
new_slice_detections = np.concatenate((new_slice_detections, np.expand_dims(slice_detections[idx],axis=0)),axis=0)
assert len(remain_idx) == len(new_slice_detections)
elif len(deleted_slice_fp) > 1:
# print(len(deleted_slice_fp))
all_deleted_idx = []
# print(slice_detections)
for each_deleted_slice_fp in deleted_slice_fp:
x,y = np.where(slice_detections==each_deleted_slice_fp)
deleted_idx = set(x)
# print(deleted_idx)
all_deleted_idx.append(list(deleted_idx)[0])
all_idx = set(list(range(len(slice_detections))))
all_deleted_idx = set(all_deleted_idx)
remain_idx = all_idx.difference(all_deleted_idx)
remain_idx = list(remain_idx)
for idx in remain_idx:
new_slice_detections = np.concatenate((new_slice_detections, np.expand_dims(slice_detections[idx], axis=0)), axis=0)
assert len(remain_idx) == len(new_slice_detections)
else:
continue
all_detections_reduce_fp[i][0] = new_slice_detections
if not args.reduce_fp:
np.save(os.path.join(save_path, '{}_{}_prediction_fp_reduced.npy'.format(client_name, patient_name)), all_detections_reduce_fp)
# return average_precisions, TP_ALL, FP_ALL, new_TP_slice, new_FP_slice, new_Sensitivity, new_Precision
# return tp fp recall precison on new,old and num_annotations and slices
return average_precisions, old_res, new_res, num_annotations, all_annotations.shape[0]
def parse_args(args):
""" Parse the arguments.
"""
parser = argparse.ArgumentParser(description='Evaluation script for a RetinaNet network.')
# subparsers = parser.add_subparsers(help='Arguments for specific dataset types.', dest='dataset_type')
# subparsers.required = True
# csv_parser = subparsers.add_parser('csv')
# csv_parser.add_argument('annotations', help='Path to CSV file containing annotations for evaluation.')
# csv_parser.add_argument('classes', help='Path to a CSV file containing class label mapping.')
parser.add_argument('--model', help='Path to RetinaNet model.', default=None)
parser.add_argument('--weights', help='only load weights.', default=None)
parser.add_argument('--nii', help='path to nii files.')
parser.add_argument('--convert-model', help='Convert the model to an inference model (ie. the input is a training model).', action='store_true')
parser.add_argument('--backbone', help='The backbone of the model.', default='vgg19')
parser.add_argument('--gpu', help='Id of the GPU to use (as reported by nvidia-smi).')
parser.add_argument('--multi-gpu', help='Number of GPUs to use for parallel processing.', type=int, default=0)
parser.add_argument('--score-threshold', help='Threshold on score to filter detections with (defaults to 0.05).', default=0.4, type=float)
parser.add_argument('--iou-threshold', help='IoU Threshold to count for a positive detection (defaults to 0.5).', default=0.5, type=float)
parser.add_argument('--max-detections', help='Max Detections per image (defaults to 100).', default=100, type=int)
parser.add_argument('--detection-threshold', help='Threshold used for determining what detections to draw.', default=0.4, type=int)
parser.add_argument('--segmentation-threshold', help='Threshold used for filter segmentation map.', default=0.1, type=int)
parser.add_argument('--attention-threshold', help='Threshold used for filter attention map.', default=0.8, type=int)
parser.add_argument('--save-path', help='Path for saving images with detections (doesn\'t work for COCO).',default=None)
parser.add_argument('--get_predicted_bbox', help='Save predicted bbox to csv.', action='store_true')
parser.add_argument('--save-result', help='Save result or not.', type=int, default=0)
parser.add_argument('--lung-filter', help='Path for lung seg filter images', default=False, action='store_true')
parser.add_argument('--draw-colorful', help='draw difficult type of predict with color', default=False, action='store_true')
parser.add_argument('--reduce-fp', help='reduce fp, must use after completing first evaluation', default=False, action='store_true')
parser.add_argument('--score-loop', help='reduce fp, must use after completing first evaluation', default=False, action='store_true')
parser.add_argument('--log', help='Path for saving log file', default=None)
parser.add_argument('--image-min-side', help='Rescale the image so the smallest side is min_side.', type=int, default=512)
parser.add_argument('--image-max-side', help='Rescale the image if the largest side is larger than max_side.', type=int, default=512)
parser.add_argument('--config', help='Path to a configuration parameters .ini file (only used with --convert-model).')
parser.add_argument('--weighted-average', help='Compute the mAP using the weighted average of precisions among classes.', action='store_true')
parser.add_argument('--dataset_type', help='Path to CSV file containing annotations for evaluation.', default='csv')
parser.add_argument('--annotations', help='Path to CSV file containing annotations for evaluation.')
parser.add_argument('--classes', help='Path to a CSV file containing class label mapping.', default='mapping.csv')
return parser.parse_args(args)
def main(args=None):
# parse arguments
if args is None:
args = sys.argv[1:]
args = parse_args(args)
# make sure keras is the minimum required version
check_keras_version()
# optionally choose specific GPU
if args.gpu:
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
# keras.backend.tensorflow_backend.set_session(get_session())
# make save path if it doesn't exist
if args.save_path is not None and not os.path.exists(args.save_path):
os.makedirs(args.save_path)
# optionally load config parameters
if args.config:
args.config = read_config_file(args.config)
anno_base_dir = '/research/dept8/qdou/data/covid/'
args.classes = os.path.join(anno_base_dir, args.classes)
# create the generator
# print model summary
# print(model.summary())
# Internal
client_name = ['Dataset1', 'Dataset2', 'Dataset3']
# External-1
# client_name = ['Dataset4']
# External-3
# client_name = ['Dataset4']
data_path = ['private_1/h5_normalize', 'private_2/h5_normalize', 'private_3/h5_normalize']
# data_path = ['private_4/B/h5_normalize']
# test_data_list = ['test_public_4.csv', 'test_private_1_all.csv', 'test_private_2.csv', 'test_private_3.csv', 'test_mos_all.csv']
# test_data_list = ['test_public_4.csv', 'test_private_1_all_whole_vol.csv', 'test_private_2.csv', 'test_private_3.csv', 'test_mos_all.csv']
private_1 = ['P5_annotations_h5_whole_vol.csv']
private_2 = ['case1_annotations_h5_whole_vol.csv', 'case4_annotations_h5_whole_vol.csv']
private_3 = ['case19_annotations_h5_whole_vol.csv', 'case23_annotations_h5_whole_vol.csv', 'case40_annotations_h5_whole_vol.csv', 'case42_annotations_h5_whole_vol.csv',
'case46_annotations_h5_whole_vol.csv', 'case49_annotations_h5_whole_vol.csv', 'case51_annotations_h5_whole_vol.csv', 'case54_annotations_h5_whole_vol.csv',
'case58_annotations_h5_whole_vol.csv', 'case60_annotations_h5_whole_vol.csv', 'case61_annotations_h5_whole_vol.csv', 'case62_annotations_h5_whole_vol.csv']
public_4 = ['coronacases006_annotations_h5_whole_vol.csv', 'coronacases008_annotations_h5_whole_vol.csv']
# private_4 = ['001_annotations_h5_whole_vol.csv', '005_annotations_h5_whole_vol.csv', '006_annotations_h5_whole_vol.csv', '007_annotations_h5_whole_vol.csv',
# '008_annotations_h5_whole_vol.csv', '009_annotations_h5_whole_vol.csv', '010_annotations_h5_whole_vol.csv', '011_annotations_h5_whole_vol.csv',
# '012_annotations_h5_whole_vol.csv', '013_annotations_h5_whole_vol.csv']
private_4 = ['001_annotations_h5_whole_vol.csv', '005_annotations_h5_whole_vol.csv',
'006_annotations_h5_whole_vol.csv', '008_annotations_h5_whole_vol.csv',
'009_annotations_h5_whole_vol.csv', '010_annotations_h5_whole_vol.csv',
'011_annotations_h5_whole_vol.csv', '012_annotations_h5_whole_vol.csv',
'013_annotations_h5_whole_vol.csv','014_annotations_h5_whole_vol.csv']
# private_4 =['014_annotations_h5_whole_vol.csv']
# test_data_list = [private_1, private_2, private_3]
test_data_list = [private_1, private_2, private_3]
assert len(client_name) == len(data_path) == len(test_data_list)
# generate patient name based on csv
patient_names = {}
for i in range(len(client_name)):
for j in range(len(test_data_list[i])):
if client_name[i] not in patient_names:
patient_names[client_name[i]] = []
patient_names[client_name[i]].append(test_data_list[i][j].split('_')[0])
else:
patient_names[client_name[i]].append(test_data_list[i][j].split('_')[0])
# start evaluation
log_path = args.log if args.log else './evaluate_internal_patient_wise_3_June.txt'
logfile = open(log_path,'a')
# save prediction to npy
if args.get_predicted_bbox == 1:
logfile.write('*********************************\n')
logfile.write('*{}*\n'.format(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())))
logfile.write('Save prediction of model:{} to .npy file\n'.format(args.model))
backbone = models.backbone(args.backbone)
# optionally load anchor parameters
anchor_params = None
if args.config and 'anchor_parameters' in args.config:
anchor_params = parse_anchor_parameters(args.config)
# load model
if args.model is not None:
print('Loading model, this may take a second...')
model = models.load_model(args.model, backbone_name=args.backbone)
elif args.weights is not None:
weights = args.weights
print('Creating model and Loading weights, this may take a second...')
model, training_model, prediction_model = create_models(
backbone_retinanet=backbone.retinanet,
# note : when mapping.csv only contains lesion,0, generator.num_classes() ==1
num_classes=1,
weights=weights,
multi_gpu=args.multi_gpu,
freeze_backbone=False,
config=args.config,
model_config={}
)
else:
raise ValueError("You have to specify a model")
# optionally convert the model
if args.convert_model:
model = models.convert_model(model, anchor_params=anchor_params)
# create generator
# generators = []
generators = {}
for i in range(len(client_name)):
for j in range(len(test_data_list[i])):
args.annotations = os.path.join(anno_base_dir, data_path[i], test_data_list[i][j])
print('---client {}---'.format(client_name[i]),flush=True)
print('validation csv {}'.format(args.annotations),flush=True)
generator = create_generator(args)
if client_name[i] not in generators:
generators[client_name[i]] = []
generators[client_name[i]].append(generator)
# patient_names[client_name[i]] = []
# patient_names[client_name[i]].append(test_data_list[i][j].split('_')[0])
else:
generators[client_name[i]].append(generator)
# patient_names[client_name[i]].append(test_data_list[i][j].split('_')[0])
if args.lung_filter:
print('do lung filter')
logfile.write('do lung filter\n')
else:
print('no lung filter')
for i in range(len(generators)):
print('------client {}-----'.format(client_name[i]), flush=True)
for j in range(len(generators[client_name[i]])):
logfile.write('Writing client {} patient {} prediction results to .npy... \n'.format(client_name[i], patient_names[client_name[i]]))
print('------patient {}-----'.format(patient_names[client_name[i]][j]), flush=True)
generator = generators[client_name[i]][j]
patient_name = patient_names[client_name[i]][j]
_print_detections_to_npy(
args,
generator,
model,
client_idx=i,
client_name=client_name[i],
patient_name = patient_name,
score_threshold=args.score_threshold,
max_detections=args.max_detections,
save_path=args.save_path,
)
logfile.write('Finish writing \n'.format(i))
logfile.write('*********************************')
sys.exit(0)
# score loop and write precision-recall pairs
if args.score_loop:
recall_flag_patient_wise = 1000.
precision_flag_patient_wise = 0.
recall_flag_client_wise = 1000.
precision_flag_client_wise = 0.
roc_patient_avg = open(os.path.join(args.save_path, 'roc_data_iou_{}_internal_patient_wise.csv'.format(args.iou_threshold)), 'w', newline='')
roc_writer_patient_avg = csv.writer(roc_patient_avg, delimiter=',')
roc_writer_patient_avg.writerow(
['client_name', 'FP rate', 'Score threshold', 'Sensitivity', 'CI', 'Precision', 'CI', 'mAP',
'CI'])
roc_client_avg = open(os.path.join(args.save_path, 'roc_data_iou_{}_internal_client_wise.csv'.format(args.iou_threshold)), 'w', newline='')
roc_writer_client_avg = csv.writer(roc_client_avg, delimiter=',')
roc_writer_client_avg.writerow(
['client_name', 'FP rate', 'Score threshold', 'Sensitivity', 'CI', 'Precision', 'CI', 'mAP',
'CI'])
for score in range(0, 99):
score_threshold = score / 100
all_client_recall, all_client_precision, all_client_fpr, all_client_map = [], [], [], []
all_patient_recall, all_patient_precision, all_patient_fpr, all_patient_map = [], [], [], []
# print('client {}'.format(client_name[i]), flush=True)
# fp_rate = 1000.
print('Using score:{}'.format(score_threshold), flush=True)
for i in range(len(client_name)):
client_tps, client_fps, client_num_annotations, client_num_slices = 0., 0., 0., 0.
client_precision, client_recall, client_fp_slice = [], [], []
client_mAP = []
for j in range(len(test_data_list[i])):
patient_name = patient_names[client_name[i]][j]
if not os.path.exists(os.path.join(args.save_path, 'roc_data_iou_{}_{}_{}.csv'.format(args.iou_threshold, client_name[i], patient_name))):
roc_per_patient = open(os.path.join(args.save_path, 'roc_data_iou_{}_{}_{}.csv'.format(args.iou_threshold,client_name[i], patient_name)), 'w', newline='')
roc_writer_per_patient = csv.writer(roc_per_patient, delimiter=',')
roc_writer_per_patient.writerow(['patient_name', 'FP rate', 'Score threshold', 'Sensitivity', 'Precision','mAP'])
else:
roc_per_patient = open(
os.path.join(args.save_path, 'roc_data_iou_{}_{}_{}.csv'.format(args.iou_threshold, client_name[i], patient_name)),
'a', newline='')
roc_writer_per_patient = csv.writer(roc_per_patient, delimiter=',')
average_precisions, old, new, num_annotations, num_slices = evaluate_from_npy(
args,
client_name=client_name[i],
patient_name=patient_name,
iou_threshold=args.iou_threshold,
score_threshold=score_threshold,
max_detections=args.max_detections,
save_path=args.save_path,
verbose=0
)
# add together and calc (total)
patient_tp = new[0]
patient_fp = new[1]
client_tps += patient_tp
client_fps += patient_fp
client_num_annotations += num_annotations
client_num_slices += num_slices
# print evaluation
total_instances = []
precisions = []
for label, (average_precision, num_annotations) in average_precisions.items():
total_instances.append(num_annotations)
precisions.append(average_precision)
if sum(total_instances) == 0:
print('No test instances found.')
return
mAP = sum(precisions) / sum(x > 0 for x in total_instances)
# calc average
client_recall.append(new[2])
client_precision.append(new[3])
client_fp_slice.append(new[4])
client_mAP.append(mAP)
# collect all patient metrics
all_patient_recall.append(new[2])
all_patient_precision.append(new[3])
all_patient_fpr.append(new[4])
all_patient_map.append(mAP)
roc_writer_per_patient.writerow(
['internal_client_wise'] + [round(new[4], 4)] + [score_threshold] + [round(new[2], 5)] + [round(new[3], 5)] + [round(mAP, 5)])
roc_per_patient.flush()
roc_per_patient.close()
# client_total_recall = client_tps / client_num_annotations
# client_total_precision = client_tps / (client_tps + client_fps) if (client_tps + client_fps)>0 else 1
# clint_total_fp_slice = client_fps / client_num_slices
single_client_avg_recall = sum(client_recall) / len(test_data_list[i])
single_client_avg_precision = sum(client_precision) / len(test_data_list[i])
single_client_avg_fp_slice = sum(client_fp_slice) / len(test_data_list[i])
single_client_avg_mAP = sum(client_mAP) / len(test_data_list[i])
all_client_recall.append(single_client_avg_recall)
all_client_precision.append(single_client_avg_precision)
all_client_fpr.append(single_client_avg_fp_slice)
all_client_map.append(single_client_avg_mAP)
# calc confidence interval
client_mean_precision = np.mean(np.asanyarray(client_precision))
client_p_value_precision = 1.96*(np.std(np.asanyarray(client_precision))/np.sqrt(len(test_data_list[i])))
client_mean_recall= np.mean(np.asanyarray(client_recall))
client_p_value_recall= 1.96 *(np.std(np.asanyarray(client_recall)) / np.sqrt(len((test_data_list[i]))))
client_mean_map = np.mean(np.asanyarray(client_mAP))
client_p_value_map = 1.96 * (np.std(np.asanyarray(client_mAP)) / np.sqrt(len((test_data_list[i]))))
print('------{}------'.format(client_name[i]))
print(' average:')
print(' # TP:{} FP{}'.format(client_tps, client_fps))
print(' # FP/slice:{:.4f} Sensitivity:{:.5f} Precision:{:.5f}'.format(single_client_avg_fp_slice,
single_client_avg_recall,
single_client_avg_precision))
print(' # mAP:{:.5f} {} {}'.format(single_client_avg_mAP, sum(client_mAP), len(test_data_list[i])))
print(' # Sensitivity:[{:.4f}+-{:.4f}] Precision:[{:.4f}+-{:.4f}] mAP:[{:.4f}+-{:.4f}]'.format(
client_mean_recall, client_p_value_recall, client_mean_precision, client_p_value_precision,
client_mean_map, client_p_value_map))
# Client wise average
all_client_avg_recall = np.mean(all_client_recall)
all_client_avg_precision = np.mean(all_client_precision)
all_client_avg_map = np.mean(all_client_map)
all_client_avg_fpr= np.mean(all_client_fpr)
assert len(all_client_recall) == len(all_client_precision) == len(all_client_map)
all_client_p_value_recall = 1.96 * (np.std(all_client_recall) / np.sqrt(len(all_client_recall)))
all_client_p_value_precision = 1.96 * (np.std(all_client_precision) / np.sqrt(len(all_client_precision)))
all_client_p_value_map = 1.96 * (np.std(all_client_map) / np.sqrt(len(all_client_map)))
if ((recall_flag_client_wise - all_client_avg_recall) < 0.0001 and (precision_flag_client_wise - all_client_avg_precision) < 0.0001) or (
recall_flag_client_wise < all_client_avg_recall or precision_flag_client_wise > all_client_avg_precision):
print('CONTIENUE')
continue
else:
recall_flag_client_wise = all_client_avg_recall
precision_flag_client_wise = all_client_avg_precision
roc_writer_client_avg.writerow(
['internal_client_wise'] + [round(all_client_avg_fpr, 4)] + [score_threshold] +
[round(all_client_avg_recall, 5)] + ['[{}+-{}]'.format(all_client_avg_recall, all_client_p_value_recall)] +
[round(all_client_avg_precision, 5)] + ['[{}+-{}]'.format(all_client_avg_precision, all_client_p_value_precision)] +
[round(all_client_avg_map, 5)] + ['[{}+-{}]'.format(all_client_avg_map, all_client_p_value_map)])
# Patient wise average
all_patient_avg_recall = np.mean(all_patient_recall)
all_patient_avg_precision = np.mean(all_patient_precision)
all_patient_avg_map = np.mean(all_patient_map)
all_patient_avg_fpr = | np.mean(all_patient_fpr) | numpy.mean |
###############################################################################
#
# Faster-RCNN is composed of 3 neural networks
# Feature Network
# - usually a well-known pre-trained image classifier such as VGG or ResNet50
# minus a few layers
# - to generate good features from the images
# Region Proposal Network (RPN)
# - usually a simple network with 3 convolutional layers
# - to generate a number of bounding boxes called Region of interests (ROIs)
# that has high probability of containing any object
# Detection Network (RCNN network)
# - takes input from both the feature network and RPN, and generates the
# final class and bounding box
#
#
# based on the work by yhenon (https://github.com/yhenon/keras-frcnn/) and
# RockyXu66 (https://github.com/RockyXu66/Faster_RCNN_for_Open_Images_Dataset_Keras),
# - converted to use tensorflow.keras
# - refactored to be used as a library, following tensorflow.keras Model API
###############################################################################
import tensorflow as tf
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Add, Input, InputSpec, Dense, Activation, Dropout
from tensorflow.keras.layers import Flatten, Conv2D
from tensorflow.keras.layers import AveragePooling2D, TimeDistributed
from tensorflow.keras import optimizers
from tensorflow.keras import initializers, regularizers
from tensorflow.keras.backend import categorical_crossentropy
import tensorflow.keras.utils as utils
import tensorflow.keras.backend as K
import numpy as np
import pandas as pd
import cv2
import time
import random
import math
import copy
import os
import sys
import imgaug.augmenters as iaa
from imgaug.augmentables.bbs import BoundingBox, BoundingBoxesOnImage
import imgaug as ia
from matplotlib import pyplot as plt
from collections import Counter
from numba import jit
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
# tf.config.optimizer.set_jit(True)
DEBUG = False
class FRCNN(): # class FRCNN(tf.keras.Model):
def __init__(
self,
base_net_type='resnet50', base_trainable=False,
num_classes=10, input_shape=(None, None, 3),
num_rois=256, num_anchors=9
):
# super(FRCNN, self).__init__(name='frcnn')
self.num_classes = num_classes
self.num_anchors = num_anchors
self.num_rois = num_rois
self.base_net_type = base_net_type
# Checking of inputs for Feature Network (Base Net),
# allow some flexibility in name of base_net
base_net_type = base_net_type.lower()
if ('resnet' in base_net_type):
base_net_type = 'resnet50'
elif ('vgg' in base_net_type):
base_net_type = 'vgg'
if (base_net_type != 'resnet50' and base_net_type != 'vgg'):
print("Only resnet50 and vgg are currently supported as base models")
raise ValueError
elif (base_net_type == 'resnet50'):
from tensorflow.keras.applications import ResNet50 as fn
elif (base_net_type == 'vgg'):
from tensorflow.keras.applications import VGG16 as fn
img_input = Input(shape=input_shape)
roi_input = Input(shape=(None, 4))
# Define Feature Network
# Assume we will always use pretrained weights for Feature Network for now
base_net = fn(weights='imagenet', include_top=False, input_tensor=img_input)
for layer in base_net.layers:
layer.trainable = base_trainable
layer._name = layer.name + "a" # prevent duplicate layer name
# For VGG, the last max pooling layer in VGGNet is also removed
if (base_net_type == 'vgg'):
# base_net.layers.pop() # does not work - https://github.com/tensorflow/tensorflow/issues/22479
feature_network = base_net.layers[-2].output
num_features = 512
# For Resnet50, the last stage (stage 5) is also removed
else:
# feature_network = base_net.outputs[0]
# lastStage = max([i if 'res5a_branch2a' in x.name else 0 for i,x in enumerate(base_net.layers)]) - 1
lastStage = 142
feature_network = base_net.layers[lastStage].output
num_features = 1024
self.feature_network = feature_network
# Define RPN, built upon the base layers
rpn = _rpn(feature_network, num_anchors)
classifier = _classifier(
feature_network, roi_input, num_rois, nb_classes=num_classes,
trainable=True, base_net_type=base_net_type)
self.model_rpn = Model(img_input, rpn[:2])
self.model_classifier = Model([img_input, roi_input], classifier)
# this will be the model that holds both the RPN and the classifier, used to load/save weights for the models
self.model_all = Model([img_input, roi_input], rpn[:2] + classifier)
# Create models that will be used for predictions
roi_input = Input(shape=(num_rois, 4))
feature_map_input = Input(shape=(None, None, num_features))
p_classifier = _classifier(
feature_map_input, roi_input, num_rois, nb_classes=num_classes,
trainable=True, base_net_type=base_net_type)
self.predict_rpn = Model(img_input, rpn)
self.predict_classifier = Model([feature_map_input, roi_input], p_classifier)
# return model_all
def summary(self, line_length=None, positions=None, print_fn=None):
"""Prints a string summary of the overall FRCNN network
Arguments:
line_length: Total length of printed lines
(e.g. set this to adapt the display to different
terminal window sizes).
positions: Relative or absolute positions of log elements
in each line. If not provided,
defaults to `[.33, .55, .67, 1.]`.
print_fn: Print function to use. Defaults to `print`.
It will be called on each line of the summary.
You can set it to a custom function
in order to capture the string summary.
Raises:
ValueError: if `summary()` is called before the model is built.
"""
return self.model_all.summary(line_length=line_length, positions=positions, print_fn=print_fn)
def compile(
self,
optimizer=None,
loss=None,
**kwargs):
"""Configures the model for training.
Arguments:
optimizer: Array of String (name of optimizer), array of optimizer instance,
String (name of optimizer) or optimizer instance
See `tf.keras.optimizers`. If it is not an array, the same optimizer will
be used for all submodels. Otherwise index 0-rpn, 1-classifier, 2-all.
Set to None to use defaults
Example: [optimizers.Adam(lr=1e-5), optimizers.Adam(lr=1e-5), 'sgd']
loss: Array of String (name of objective function), array of objective function,
String (name of objective function), objective function or
`tf.losses.Loss` instance. See `tf.losses`. If it is not an array,
the same loss will be used for all submodels. Otherwise, index
0-rpn, 1-classifier, 2-all.
If the model has multiple outputs, you can use a different loss on each output by
passing a dictionary or a list of losses. The loss value that will
be minimized by the model will then be the sum of all individual
losses.
Set to None to use defaults
**kwargs: Any additional arguments.
Raises:
ValueError: In case of invalid arguments for
`optimizer`, `loss`, `metrics` or `sample_weight_mode`.
"""
# Allow user to override defaults
if optimizer is not None:
# Ideally optimizer settings should be specified individually
if (isinstance(optimizer, list)):
if (len(optimizer) != 3):
print("Length of list for optimizer should be 3")
raise ValueError
else:
optimizer_rpn = optimizer[0]
optimizer_classifier = optimizer[1]
optimizer_all = optimizer[2]
# Use same optimizer for all
else:
optimizer_rpn = optimizer
optimizer_classifier = optimizer
optimizer_all = optimizer
# Use defaults for optimizers if not specified
else:
optimizer_rpn = optimizers.Adam(lr=1e-5)
optimizer_classifier = optimizers.Adam(lr=1e-5)
optimizer_all = 'sgd'
if loss is not None:
if (isinstance(loss, list)):
if (len(loss) != 3):
print("Length of list for loss should be 3")
raise ValueError
else:
loss_rpn = loss[0]
loss_classifier = loss[1]
loss_all = loss[2]
# Use same loss function for all
else:
loss_rpn = loss
loss_classifier = loss
# Use defaults for loss if not specified
else:
loss_rpn = [rpn_loss_cls(self.num_anchors), rpn_loss_regr(self.num_anchors)]
loss_classifier = [class_loss_cls, class_loss_regr(self.num_classes - 1)]
loss_all = 'mae'
self.model_rpn.compile(optimizer=optimizer_rpn, loss=loss_rpn)
self.model_classifier.compile(
optimizer=optimizer_classifier,
loss=loss_classifier, metrics={'dense_class_{}'.format(self.num_classes): 'accuracy'})
self.model_all.compile(optimizer=optimizer_all, loss=loss_all)
self.predict_rpn.compile(optimizer='sgd', loss='mse')
self.predict_classifier.compile(optimizer='sgd', loss='mse')
def fit_generator(
self,
generator,
steps_per_epoch=1000,
epochs=1,
verbose=1,
initial_epoch=-1,
class_mapping=None,
target_size=-1, # length of shorter size
anchor_box_scales=[128, 256, 512],
anchor_box_ratios=[[1, 1], [1./math.sqrt(2), 2./math.sqrt(2)], [2./math.sqrt(2), 1./math.sqrt(2)]],
std_scaling=4.0, # for scaling of standard deviation
classifier_regr_std=[8.0, 8.0, 4.0, 4.0], #
classifier_min_overlap=0.1, # repo values
classifier_max_overlap=0.5, # repo values
rpn_stride=16, # stride at the RPN (this depends on the network configuration)
model_path='./frcnn.hdf5',
csv_path="./frcnn.csv"
):
"""Fits the model on data yielded batch-by-batch by FRCNNGenerator.
Will automatically save model and csv to the specified paths
model_path and csv_path respectively.
If file at model_path exists, will automatically resume training
if initial_epoch is set to -1. Otherwise, will prompt user to resume
training
Arguments:
generator: Generator that was created via FRCNNGenerator
The generator is expected to loop over its data
indefinitely. An epoch finishes when `steps_per_epoch`
batches have been seen by the model.
steps_per_epoch: Total number of steps (batches of samples)
to yield from `generator` before declaring one epoch
finished and starting the next epoch.
epochs: Integer, total number of iterations on the data.
verbose: Verbosity mode. 0 = Silent, 1 = progress bar
initial_epoch: Integer. Epoch at which to start training
(useful for resuming a previous training run)
model_path: Path for saving model hdf5. Also used to resume training
csv_path: Path for saving training csv. Also used to resume training
class_mapping: Class mapping based on training set. This is the third output from parseAnnotationFile()
target_size: Integer. Shorter-side length. Used for image resizing based on the shorter length
Returns:
None
Raises:
ValueError: In case the generator yields data in an invalid format.
"""
epoch_length = steps_per_epoch
iter_num = 0
losses = np.zeros((epoch_length, 5))
rpn_accuracy_rpn_monitor = []
rpn_accuracy_for_epoch = []
best_loss = np.Inf
# input validation
if (class_mapping is None):
print("class_mapping should not be None")
raise ValueError
elif (target_size < 0):
print("target_size (shorter-side size) must be a positive integer")
raise ValueError
print()
# let's check if model file exists
if not os.path.isfile(model_path):
print('Starting training')
initial_epoch = 0
# Create the record.csv file to record losses, acc and mAP
record_df = pd.DataFrame(
columns=[
'mean_overlapping_bboxes', 'class_acc', 'loss_rpn_cls',
'loss_rpn_regr', 'loss_class_cls', 'loss_class_regr', 'curr_loss', 'elapsed_time', 'mAP'])
else:
# if setting is not to continue training and file exists, confirm with user again,
# before overwriting file, just in case
if (initial_epoch != -1):
ask = input('File %s exists. Continue training? [Y/N]' % (model_path))
if (ask.lower() in ['y', 'yes', 'ya']):
initial_epoch = -1
else:
print('Restarting training and overwriting %s and %s' % (model_path, csv_path))
if (initial_epoch == -1):
# If this is a continued training, load the trained model from before
print('Continuing training based on previous trained model')
print('Loading weights from {}'.format(model_path))
self.model_rpn.load_weights(model_path, by_name=True)
self.model_classifier.load_weights(model_path, by_name=True)
record_df = pd.read_csv(csv_path)
initial_epoch = len(record_df)
# for debugging
# r_mean_overlapping_bboxes = record_df['mean_overlapping_bboxes']
# r_class_acc = record_df['class_acc']
# r_loss_rpn_cls = record_df['loss_rpn_cls']
# r_loss_rpn_regr = record_df['loss_rpn_regr']
# r_loss_class_cls = record_df['loss_class_cls']
# r_loss_class_regr = record_df['loss_class_regr']
# r_elapsed_time = record_df['elapsed_time']
# r_mAP = record_df['mAP']
r_curr_loss = record_df['curr_loss']
best_loss = np.min(r_curr_loss)
if verbose:
print('Already trained %dK batches' % (len(record_df)))
####
start_time = time.time()
total_epoch = initial_epoch + epochs # We might be resuming training, so we will start with initial_epoch
for epoch_num in range(epochs):
progbar = utils.Progbar(epoch_length)
print('Epoch {}/{}'.format(initial_epoch + 1 + epoch_num, total_epoch))
while True:
try:
if len(rpn_accuracy_rpn_monitor) == epoch_length and verbose:
mean_overlapping_bboxes = float(sum(rpn_accuracy_rpn_monitor))/len(rpn_accuracy_rpn_monitor)
rpn_accuracy_rpn_monitor = []
if mean_overlapping_bboxes == 0:
print('RPN is not producing bounding boxes that overlap the ground truth boxes. Check RPN settings or keep training.')
# Generate X (x_img) and label Y ([y_rpn_cls, y_rpn_regr])
X, Y, img_data, debug_img, debug_num_pos = next(generator)
if DEBUG:
print("DEBUG", img_data['filepath'])
# Train rpn model and get loss value [_, loss_rpn_cls, loss_rpn_regr]
loss_rpn = self.model_rpn.train_on_batch(X, Y)
# Get predicted rpn from rpn model [rpn_cls, rpn_regr]
P_rpn = self.model_rpn.predict_on_batch(X)
# R: bboxes (shape=(300,4))
# Convert rpn layer to roi bboxes
R = rpn_to_roi(
P_rpn[0], P_rpn[1],
std_scaling, anchor_box_ratios, anchor_box_scales, rpn_stride,
use_regr=True, overlap_thresh=0.7, max_boxes=300)
# note: calc_iou converts from (x1,y1,x2,y2) to (x,y,w,h) format
# X2: bboxes that iou > C.classifier_min_overlap for all gt bboxes in 300 non_max_suppression bboxes
# Y1: one hot code for bboxes from above => x_roi (X)
# Y2: corresponding labels and corresponding gt bboxes
X2, Y1, Y2, IouS = calc_iou(
R, img_data, [classifier_min_overlap, classifier_max_overlap],
target_size, rpn_stride, class_mapping, classifier_regr_std)
if DEBUG:
print("DEBUG calc_iou (inputs)", classifier_min_overlap, classifier_max_overlap, target_size, rpn_stride, class_mapping, classifier_regr_std)
print("DEBUG calc_iou", X2, Y1, Y2, IouS)
# If X2 is None means there are no matching bboxes
if X2 is None:
rpn_accuracy_rpn_monitor.append(0)
rpn_accuracy_for_epoch.append(0)
continue
# Find out the positive anchors and negative anchors
neg_samples = np.where(Y1[0, :, -1] == 1)
pos_samples = np.where(Y1[0, :, -1] == 0)
if len(neg_samples) > 0:
neg_samples = neg_samples[0]
else:
neg_samples = []
if len(pos_samples) > 0:
pos_samples = pos_samples[0]
else:
pos_samples = []
rpn_accuracy_rpn_monitor.append(len(pos_samples))
rpn_accuracy_for_epoch.append((len(pos_samples)))
if self.num_rois > 1:
# If number of positive anchors is larger than 4//2 = 2, randomly choose 2 pos samples
if len(pos_samples) < self.num_rois//2:
selected_pos_samples = pos_samples.tolist()
else:
selected_pos_samples = np.random.choice(pos_samples, self.num_rois//2, replace=False).tolist()
# Randomly choose (num_rois - num_pos) neg samples
try:
selected_neg_samples = np.random.choice(neg_samples, self.num_rois - len(selected_pos_samples), replace=False).tolist()
except ValueError:
try:
selected_neg_samples = np.random.choice(neg_samples, self.num_rois - len(selected_pos_samples), replace=True).tolist()
except Exception as e:
if DEBUG: print(e)
# The neg_samples is [[1 0 ]] only, therefore there's no negative sample
continue
# Save all the pos and neg samples in sel_samples
sel_samples = selected_pos_samples + selected_neg_samples
else:
# in the extreme case where num_rois = 1, we pick a random pos or neg sample
selected_pos_samples = pos_samples.tolist()
selected_neg_samples = neg_samples.tolist()
if np.random.randint(0, 2):
sel_samples = random.choice(neg_samples)
else:
sel_samples = random.choice(pos_samples)
# training_data: [X, X2[:, sel_samples, :]]
# labels: [Y1[:, sel_samples, :], Y2[:, sel_samples, :]]
# X => img_data resized image
# X2[:, sel_samples, :] => num_rois (4 in here) bboxes which contains selected neg and pos
# Y1[:, sel_samples, :] => one hot encode for num_rois bboxes which contains selected neg and pos
# Y2[:, sel_samples, :] => labels and gt bboxes for num_rois bboxes which contains selected neg and pos
loss_class = self.model_classifier.train_on_batch([X, X2[:, sel_samples, :]], [Y1[:, sel_samples, :], Y2[:, sel_samples, :]])
losses[iter_num, 0] = loss_rpn[1]
losses[iter_num, 1] = loss_rpn[2]
losses[iter_num, 2] = loss_class[1]
losses[iter_num, 3] = loss_class[2]
losses[iter_num, 4] = loss_class[3]
iter_num += 1
progbar.update(
iter_num, [
('rpn_cls', np.mean(losses[:iter_num, 0])), ('rpn_regr', np.mean(losses[:iter_num, 1])),
('final_cls', np.mean(losses[:iter_num, 2])), ('final_regr', np.mean(losses[:iter_num, 3]))
])
if iter_num == epoch_length:
loss_rpn_cls = np.mean(losses[:, 0])
loss_rpn_regr = np.mean(losses[:, 1])
loss_class_cls = np.mean(losses[:, 2])
loss_class_regr = np.mean(losses[:, 3])
class_acc = np.mean(losses[:, 4])
mean_overlapping_bboxes = float(sum(rpn_accuracy_for_epoch)) / len(rpn_accuracy_for_epoch)
rpn_accuracy_for_epoch = []
if verbose:
print('Mean number of bounding boxes from RPN overlapping ground truth boxes: {}'.format(mean_overlapping_bboxes))
print('Classifier accuracy for bounding boxes from RPN: {}'.format(class_acc))
print('Loss RPN classifier: {}'.format(loss_rpn_cls))
print('Loss RPN regression: {}'.format(loss_rpn_regr))
print('Loss Detector classifier: {}'.format(loss_class_cls))
print('Loss Detector regression: {}'.format(loss_class_regr))
print('Total loss: {}'.format(loss_rpn_cls + loss_rpn_regr + loss_class_cls + loss_class_regr))
print('Elapsed time: {}'.format(time.time() - start_time))
elapsed_time = (time.time()-start_time)/60
curr_loss = loss_rpn_cls + loss_rpn_regr + loss_class_cls + loss_class_regr
iter_num = 0
start_time = time.time()
if curr_loss < best_loss:
if verbose:
print('Total loss decreased from {} to {}, saving weights'.format(best_loss, curr_loss))
best_loss = curr_loss
self.model_all.save_weights(model_path)
new_row = {
'mean_overlapping_bboxes': round(mean_overlapping_bboxes, 3),
'class_acc': round(class_acc, 3),
'loss_rpn_cls': round(loss_rpn_cls, 3),
'loss_rpn_regr': round(loss_rpn_regr, 3),
'loss_class_cls': round(loss_class_cls, 3),
'loss_class_regr': round(loss_class_regr, 3),
'curr_loss': round(curr_loss, 3),
'elapsed_time': round(elapsed_time, 3),
'mAP': 0}
record_df = record_df.append(new_row, ignore_index=True)
record_df.to_csv(csv_path, index=0)
break
except Exception as e:
print('Exception: {}'.format(e))
continue
print('-- Training complete, exiting.')
return None
def load_config(
self,
anchor_box_scales=[128, 256, 512],
anchor_box_ratios=[[1, 1], [1./math.sqrt(2), 2./math.sqrt(2)], [2./math.sqrt(2), 1./math.sqrt(2)]],
std_scaling=4.0,
rpn_stride=16, # stride at the RPN (this depends on the network configuration)
num_rois=32,
target_size=600,
img_channel_mean=[103.939, 116.779, 123.68],
img_scaling_factor=1,
classifier_regr_std=[8.0, 8.0, 4.0, 4.0],
):
"""Loads configuration settings for FRCNN model.
These will be used for predictions
Arguments:
anchor_box_scales: Anchor box scales array
anchor_box_ratios: Anchor box ratios array
std_scaling: For scaling of standard deviation
rpn_stride: RPN stride. This should be the same as what was passed into the generator
num_rois: number of regions of interest to be used
target_size: Integer. Shorter-side length. Used for image resizing based on the shorter length
img_channel_mean: image channel-wise (RGB) mean to subtract for standardisation
img_scaling_factor: scaling factor to divide by, for standardisation
classifier_regr_std: For scaling of standard deviation for classifier regression for x,y,w,h
Returns:
None
"""
self.anchor_box_scales = anchor_box_scales
self.anchor_box_ratios = anchor_box_ratios
self.std_scaling = std_scaling
self.rpn_stride = rpn_stride
self.im_size = target_size
self.img_channel_mean = img_channel_mean
self.img_scaling_factor = 1
return None
def load_weights(self, filepath):
"""Loads all layer weights, from an HDF5 file.
Weights are loaded with 'by_name' true, meaning that weights are loaded into
layers only if they share the same name. This assumes a single HDF5 file and
consistent layer names
If it is desired to load weights with 'by_name' is False, and load
weights based on the network's topology, please access the individual embedded
sub-models in this class. eg frcnn.model_rpn.load_weights(filepath, by_name=False)
Arguments:
filepath: String, path to the weights file to load. For weight files in
TensorFlow format, this is the file prefix (the same as was passed
to 'save_weights').
Returns:
None
"""
if (not os.path.isfile(filepath)):
raise FileNotFoundError('File does not exist: %s ' % filepath)
self.model_rpn.load_weights(filepath, by_name=True)
self.model_classifier.load_weights(filepath, by_name=True)
self.predict_rpn.load_weights(filepath, by_name=True)
self.predict_classifier.load_weights(filepath, by_name=True)
return None
def predict(
self,
x, #
verbose=2, #
class_mapping=None,
bbox_threshold=0.7,
overlap_thres=0.2
): #
"""Generates output predictions for the input samples.
Computation is done in batches.
Arguments:
x: Input samples. This should be a list of img data
or a list of dict containing groundtruth bounding
boxes (key=bboxes) and path of the image (key=filepath)
verbose: Verbosity mode.
0 = silent. 1 = print results. 2 = print results and show images
class_mapping: Class mapping based on training set
bbox_threshold: If box classification value is less than this, we will ignore that box
overlap_thres: Non-maximum suppression setting. If overlap > overlap_thres, we will remove the box
Returns:
Numpy array(s) of predictions.
"""
return self._loopSamplesAndPredictOrEvaluate(
x, class_mapping,
bbox_threshold, overlap_thresh=overlap_thres, verbose=verbose)
def evaluate(
self,
x=None,
verbose=2,
class_mapping=None,
overlap_thresh=0.3
):
"""Returns the mean average precision (mAP) for the model in test mode,
using metrics used by the VOC Pascal 2012 challenge.
Computation is done in batches.
Arguments:
x: Input samples. This should be a list of dict containing
groundtruth bounding boxes (key=bboxes) and path of the image (key=filepath)
verbose: Verbosity mode.
0 = silent. 1 = print results. 2 = print results and show images
class_mapping: Class mapping based on training set
overlap_thres: Non-maximum suppression setting. If overlap > overlap_thres, we will remove the box
Returns:
List of mAPs
Raises:
ValueError: in case of invalid arguments.
"""
return self._loopSamplesAndPredictOrEvaluate(
x, class_mapping, overlap_thresh=overlap_thresh, verbose=verbose, mode='evaluate')
# from profilehooks import profile
# @profile
def _loopSamplesAndPredictOrEvaluate(
self, samples, class_mapping, bbox_threshold=None,
overlap_thresh=0.5, verbose=1, mode='predict'
):
visualise = (verbose > 1)
my_bboxes = RPBoundingBoxes() # using rafaelpadilla/Object-Detection-Metrics
# from sklearn.metrics import average_precision_score
output = []
i = 1
isImgData = True
if isinstance(samples[0], dict):
isImgData = False
# For evaluation of mAP, we will need the ground-truth bboxes
if (mode == 'evaluate' and isImgData):
print('For evaluate, please provide input as array of dict containing bboxes and filepath')
raise ValueError
elif (mode == 'evaluate' and visualise):
print('Green: Ground truth bounding boxes. Red: Detected Objects')
if (class_mapping is None):
print("class_mapping should not be None")
raise ValueError
# Switch key and value for class_mapping 'Person': 0 --> 0: 'Person'
class_mapping = {v: k for k, v in class_mapping.items()}
# Assign color to each
class_to_color = {class_mapping[v]: np.random.randint(0, 255, 3) for v in class_mapping}
def get_mAP_RP(pred, gt, f, i, imgSize, my_bboxes1):
imgName = '{:03d}'.format(i)
# add groundtruth bboxes
for bbox in gt:
my_bboxes1.addBoundingBox(RPBoundingBox(
imageName=imgName, classId=bbox['class'],
x=bbox['x1'], y=bbox['y1'],
w=bbox['x2'], h=bbox['y2'],
typeCoordinates=CoordinatesType.Absolute,
bbType=BBType.GroundTruth, format=BBFormat.XYX2Y2, imgSize=imgSize)
)
# add prediction bboxes
for bbox in pred:
my_bboxes1.addBoundingBox(RPBoundingBox(
imageName=imgName, classId=bbox['class'],
x=bbox['x1'], y=bbox['y1'],
w=bbox['x2'], h=bbox['y2'],
typeCoordinates=CoordinatesType.Absolute, classConfidence= bbox['prob'],
bbType=BBType.Detected, format=BBFormat.XYX2Y2, imgSize=imgSize)
)
if visualise:
my_bboxes1.drawAllBoundingBoxes(img_original, imgName)
return my_bboxes1
def plotBBox(real_x1, real_y1, real_x2, real_y2):
cv2.rectangle(
img_original, (real_x1, real_y1), (real_x2, real_y2),
(int(class_to_color[key][0]), int(class_to_color[key][1]), int(class_to_color[key][2])), 4)
textLabel = '{}: {}'.format(key, int(100*new_probs[jk]))
# (retval,baseLine) = cv2.getTextSize(textLabel,cv2.FONT_HERSHEY_COMPLEX,1,1)
textOrg = (real_x1, real_y1-0)
y = real_y1+10 if real_y1 < 10 else real_y1
textOrg = (real_x1, y)
cv2.putText(
img_original, textLabel, textOrg, cv2.FONT_HERSHEY_DUPLEX,
0.5, (255, 255, 255), 2, lineType=cv2.LINE_AA)
cv2.putText(
img_original, textLabel, textOrg, cv2.FONT_HERSHEY_DUPLEX,
0.5, (0, 0, 0), 1, lineType=cv2.LINE_AA)
def calcPredictOutput():
# Calculate real coordinates on original image and save coordinates, and (key and prob) separately
(real_x1, real_y1, real_x2, real_y2) = _get_real_coordinates(ratio, x1, y1, x2, y2)
all_pos.append((real_x1, real_y1, real_x2, real_y2))
all_dets.append((key, 100*new_probs[jk]))
if (visualise):
plotBBox(real_x1, real_y1, real_x2, real_y2)
def calcEvalOutput():
# Save coordinates, class and probability
det = {'x1': x1, 'x2': x2, 'y1': y1, 'y2': y2, 'class': key, 'prob': new_probs[jk]}
all_dets.append(det)
calcOutput = calcPredictOutput if mode == 'predict' else calcEvalOutput # check once, instead of every loop
overall_st = time.time()
for data in samples:
if verbose and not isImgData:
print('{}/{} - {}'.format(i, len(samples), data['filepath']))
i = i + 1
st = time.time()
# convert image
img_original = data if isImgData else cv2.imread(data['filepath'])
img, ratio, fx, fy = _format_img(img_original, self.img_channel_mean, self.img_scaling_factor, self.im_size)
img = np.transpose(img, (0, 2, 3, 1))
# get output layer Y1, Y2 from the RPN and the feature maps F
# Y1: y_rpn_cls, Y2: y_rpn_regr
[Y1, Y2, F] = self.predict_rpn.predict(img) #
# Get bboxes by applying NMS
# R.shape = (300, 4)
R = rpn_to_roi(
Y1, Y2, self.std_scaling, self.anchor_box_ratios, self.anchor_box_scales, self.rpn_stride,
use_regr=True, overlap_thresh=0.7)
# convert from (x1,y1,x2,y2) to (x,y,w,h)
R[:, 2] -= R[:, 0]
R[:, 3] -= R[:, 1]
# apply the spatial pyramid pooling to the proposed regions
bboxes = {}
probs = {}
for jk in range(R.shape[0]//self.num_rois + 1):
ROIs = np.expand_dims(R[self.num_rois*jk:self.num_rois*(jk+1), :], axis=0)
if ROIs.shape[1] == 0:
break
if jk == R.shape[0]//self.num_rois:
# pad R
curr_shape = ROIs.shape
target_shape = (curr_shape[0], self.num_rois, curr_shape[2])
ROIs_padded = np.zeros(target_shape).astype(ROIs.dtype)
ROIs_padded[:, :curr_shape[1], :] = ROIs
ROIs_padded[0, curr_shape[1]:, :] = ROIs[0, 0, :]
ROIs = ROIs_padded
[P_cls, P_regr] = self.predict_classifier.predict([F, ROIs])
# Calculate bboxes coordinates on resized image
for ii in range(P_cls.shape[1]):
# Ignore 'bg' class
if ((bbox_threshold is not None and np.max(P_cls[0, ii, :]) < bbox_threshold) or np.argmax(P_cls[0, ii, :]) == (P_cls.shape[2] - 1)):
continue
# Get class name
cls_name = class_mapping[np.argmax(P_cls[0, ii, :])]
if cls_name not in bboxes:
bboxes[cls_name] = []
probs[cls_name] = []
(x, y, w, h) = ROIs[0, ii, :]
cls_num = np.argmax(P_cls[0, ii, :])
try:
(tx, ty, tw, th) = P_regr[0, ii, 4*cls_num:4*(cls_num+1)]
tx /= self.classifier_regr_std[0]
ty /= self.classifier_regr_std[1]
tw /= self.classifier_regr_std[2]
th /= self.classifier_regr_std[3]
x, y, w, h = apply_regr(x, y, w, h, tx, ty, tw, th)
except Exception as e:
if DEBUG: print(e)
pass
bboxes[cls_name].append([self.rpn_stride*x, self.rpn_stride*y, self.rpn_stride*(x+w), self.rpn_stride*(y+h)])
probs[cls_name].append(np.max(P_cls[0, ii, :]))
all_dets = []
all_pos = []
for key in bboxes:
bbox = np.array(bboxes[key])
# Apply non-max-suppression on final bboxes to get the output bounding boxes
new_boxes, new_probs = non_max_suppression_fast(bbox, np.array(probs[key]), overlap_thresh=overlap_thresh)
for jk in range(new_boxes.shape[0]):
(x1, y1, x2, y2) = new_boxes[jk, :]
# Upate all_dets and all_poos
calcOutput()
if verbose > 0:
print('Elapsed time = {}'.format(time.time() - st))
if mode == 'predict':
if verbose == -1 and len(all_dets)>0 and not isImgData:
print(data['filepath'])
print(all_dets)
if verbose > 0 :
print(all_dets)
output.append((all_dets, all_pos)) # store all predictions and their positions for each image
else:
my_bboxes = get_mAP_RP(all_dets, data['bboxes'], (fx, fy), i, img_original.shape, my_bboxes)
evaluator = Evaluator()
metricsPerClass = evaluator.GetPascalVOCMetrics(my_bboxes, IOUThreshold=0.2)
if verbose:
# Loop through classes to obtain their metrics
for mc in metricsPerClass:
# Get metric values per each class
c = mc['class']
precision = mc['precision']
recall = mc['recall']
average_precision = mc['AP']
ipre = mc['interpolated precision']
irec = mc['interpolated recall']
# Print AP per class
print('%s AP: %f' % (c, average_precision))
output.append(metricsPerClass)
# t, p = get_map(all_dets, data['bboxes'], (fx, fy))
# for key in t.keys():
# if key not in T:
# T[key] = []
# P[key] = []
# T[key].extend(t[key])
# P[key].extend(p[key])
# all_aps = []
# for key in T.keys():
# ap = average_precision_score(T[key], P[key])
# all_aps.append(ap)
# if verbose:
# print('{} AP: {}'.format(key, ap))
# if verbose:
# print('mAP = {}'.format(np.nanmean(np.array(all_aps))))
# print()
# output.append(np.nanmean(np.array(all_aps)))
if visualise:
# plt.figure(figsize=(10,10))
plt.figure()
plt.grid()
plt.imshow(cv2.cvtColor(img_original, cv2.COLOR_BGR2RGB))
plt.show()
if verbose:
print('Total elapsed time = {}'.format(time.time() - overall_st))
output = np.asarray(output)
return output
###############################################################################
def _get_real_coordinates(ratio, x1, y1, x2, y2):
real_x1 = int(round(x1 // ratio))
real_y1 = int(round(y1 // ratio))
real_x2 = int(round(x2 // ratio))
real_y2 = int(round(y2 // ratio))
return (real_x1, real_y1, real_x2, real_y2)
def _format_img(img, img_channel_mean, img_scaling_factor, target_size):
""" format image for prediction or mAP calculation. Resize original image to target_size
Arguments:
img: cv2 image
img_channel_mean: image channel-wise (RGB) mean to subtract for standardisation
img_scaling_factor: scaling factor to divide by, for standardisation
target_size: shorter-side length. Used for image resizing based on the shorter length
Returns:
img: Scaled and normalized image with expanding dimension
ratio: img_min_side / original min side eg img_min_side / width if width <= height
fx: ratio for width scaling (original width / new width)
fy: ratio for height scaling (original height/ new height)
"""
""" resize image based on config """
img_min_side = float(target_size)
(height, width, _) = img.shape
if width <= height:
ratio = img_min_side/width
new_height = int(ratio * height)
new_width = int(img_min_side)
else:
ratio = img_min_side/height
new_width = int(ratio * width)
new_height = int(img_min_side)
fx = width/float(new_width)
fy = height/float(new_height)
img = cv2.resize(img, (new_width, new_height), interpolation=cv2.INTER_CUBIC)
""" format image channels based on config """
img = img[:, :, (2, 1, 0)]
img = img.astype(np.float32)
img[:, :, 0] -= img_channel_mean[0]
img[:, :, 1] -= img_channel_mean[1]
img[:, :, 2] -= img_channel_mean[2]
img /= img_scaling_factor
img = np.transpose(img, (2, 0, 1))
img = np.expand_dims(img, axis=0)
return img, ratio, fx, fy
def _get_img_output_length(width, height, base_net_type='resnet50'):
b = base_net_type
def get_output_length(input_length, b):
if ('resnet'in b):
# zero_pad
input_length += 6
# apply 4 strided convolutions
filter_sizes = [7, 3, 1, 1]
stride = 2
for filter_size in filter_sizes:
input_length = (input_length - filter_size + stride) // stride
return input_length
else:
return input_length//16
return get_output_length(width, b), get_output_length(height, b)
def _rpn(base_layers, num_anchors):
# common layer fed to 2 layers
# - x_class for classification (is object in bounding box?)
# - x_regr for bounding box regression (ROIs)
x = Conv2D(512, (3, 3), padding='same', activation='relu', kernel_initializer='normal', name='rpn_conv1')(base_layers)
x_class = Conv2D(num_anchors, (1, 1), activation='sigmoid', kernel_initializer='uniform', name='rpn_out_class')(x)
x_regr = Conv2D(num_anchors * 4, (1, 1), activation='linear', kernel_initializer='zero', name='rpn_out_regress')(x)
return [x_class, x_regr, base_layers]
def _classifier(base_layers, input_rois, num_rois, nb_classes=4, trainable=True, base_net_type='resnet50'):
if ('resnet' in base_net_type):
pooling_regions = 14
input_shape = (num_rois, pooling_regions, pooling_regions, 1024)
out_roi_pool = RoiPoolingConv(pooling_regions, num_rois, name='roi_pooling_conv')([base_layers, input_rois])
# out = _classifier_layers(out_roi_pool, input_shape=input_shape, trainable=True)
trainable = True
out = _conv_block_td(out_roi_pool, 3, [512, 512, 2048], stage=5, block='a', input_shape=input_shape, strides=(2, 2), trainable=trainable)
out = _identity_block_td(out, 3, [512, 512, 2048], stage=5, block='b', trainable=trainable)
out = _identity_block_td(out, 3, [512, 512, 2048], stage=5, block='c', trainable=trainable)
out = TimeDistributed(AveragePooling2D((7, 7)), name='avg_pool')(out)
out = TimeDistributed(Flatten())(out)
else:
pooling_regions = 7
input_shape = (num_rois, pooling_regions, pooling_regions, 512)
out_roi_pool = RoiPoolingConv(pooling_regions, num_rois, name='roi_pooling_conv')([base_layers, input_rois])
# flatten convolution layer and connect to 2 FC with dropout
# print(out_roi_pool.shape)
out = TimeDistributed(Flatten(name='flatten'), name='time_distributed')(out_roi_pool)
out = TimeDistributed(Dense(4096, activation='relu', name='fc1'), name='time_distributed_1')(out)
out = TimeDistributed(Dropout(rate=0.5), name='time_distributed_2')(out)
out = TimeDistributed(Dense(4096, activation='relu', name='fc2'), name='time_distributed_3')(out)
out = TimeDistributed(Dropout(rate=0.5), name='time_distributed_4')(out)
# There are two output layer
# out_class: softmax acivation function for classification of the class name of the object
# out_regr: linear activation function for bboxes coordinates regression
out_class = TimeDistributed(Dense(nb_classes, activation='softmax', kernel_initializer='zero'), name='dense_class_{}'.format(nb_classes))(out)
# note: no regression target for bg class
out_regr = TimeDistributed(Dense(4 * (nb_classes-1), activation='linear', kernel_initializer='zero'), name='dense_regress_{}'.format(nb_classes))(out)
return [out_class, out_regr]
def _conv_block_td(input_tensor, kernel_size, filters, stage, block, input_shape, strides=(2, 2), trainable=True):
# conv block time distributed
nb_filter1, nb_filter2, nb_filter3 = filters
bn_axis = 3
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = TimeDistributed(Conv2D(nb_filter1, (1, 1), strides=strides, trainable=trainable, kernel_initializer='normal'), input_shape=input_shape, name=conv_name_base + '2a')(input_tensor)
x = TimeDistributed(FixedBatchNormalization(axis=bn_axis), name=bn_name_base + '2a')(x)
x = Activation('relu')(x)
x = TimeDistributed(Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same', trainable=trainable, kernel_initializer='normal'), name=conv_name_base + '2b')(x)
x = TimeDistributed(FixedBatchNormalization(axis=bn_axis), name=bn_name_base + '2b')(x)
x = Activation('relu')(x)
x = TimeDistributed(Conv2D(nb_filter3, (1, 1), kernel_initializer='normal'), name=conv_name_base + '2c', trainable=trainable)(x)
x = TimeDistributed(FixedBatchNormalization(axis=bn_axis), name=bn_name_base + '2c')(x)
shortcut = TimeDistributed(Conv2D(nb_filter3, (1, 1), strides=strides, trainable=trainable, kernel_initializer='normal'), name=conv_name_base + '1')(input_tensor)
shortcut = TimeDistributed(FixedBatchNormalization(axis=bn_axis), name=bn_name_base + '1')(shortcut)
x = Add()([x, shortcut])
x = Activation('relu')(x)
return x
def _identity_block_td(input_tensor, kernel_size, filters, stage, block, trainable=True):
# identity block time distributed
nb_filter1, nb_filter2, nb_filter3 = filters
bn_axis = 3
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = TimeDistributed(Conv2D(nb_filter1, (1, 1), trainable=trainable, kernel_initializer='normal'), name=conv_name_base + '2a')(input_tensor)
x = TimeDistributed(FixedBatchNormalization(axis=bn_axis), name=bn_name_base + '2a')(x)
x = Activation('relu')(x)
x = TimeDistributed(Conv2D(nb_filter2, (kernel_size, kernel_size), trainable=trainable, kernel_initializer='normal', padding='same'), name=conv_name_base + '2b')(x)
x = TimeDistributed(FixedBatchNormalization(axis=bn_axis), name=bn_name_base + '2b')(x)
x = Activation('relu')(x)
x = TimeDistributed(Conv2D(nb_filter3, (1, 1), trainable=trainable, kernel_initializer='normal'), name=conv_name_base + '2c')(x)
x = TimeDistributed(FixedBatchNormalization(axis=bn_axis), name=bn_name_base + '2c')(x)
x = Add()([x, input_tensor])
x = Activation('relu')(x)
return x
###############################################################################
# Definition for custom layers
class RoiPoolingConv(tf.keras.layers.Layer):
'''ROI pooling layer for 2D inputs.
See Spatial Pyramid Pooling in Deep Convolutional Networks for Visual Recognition,
<NAME>, <NAME>, <NAME>, <NAME>
# Arguments
pool_size: int
Size of pooling region to use. pool_size = 7 will result in a 7x7 region.
num_rois: number of regions of interest to be used
# Input shape
list of two 4D tensors [X_img,X_roi] with shape:
X_img:
`(1, rows, cols, channels)`
X_roi:
`(1,num_rois,4)` list of rois, with ordering (x,y,w,h)
# Output shape
3D tensor with shape:
`(1, num_rois, channels, pool_size, pool_size)`
'''
def __init__(self, pool_size, num_rois, **kwargs):
self.pool_size = pool_size
self.num_rois = num_rois
super(RoiPoolingConv, self).__init__(**kwargs)
def build(self, input_shape):
self.nb_channels = input_shape[0][3]
def compute_output_shape(self, input_shape):
return None, self.num_rois, self.pool_size, self.pool_size, self.nb_channels
def call(self, x, mask=None):
assert(len(x) == 2)
# x[0] is image with shape (rows, cols, channels)
img = x[0]
# x[1] is roi with shape (num_rois,4) with ordering (x,y,w,h)
rois = x[1]
# input_shape = K.shape(img)
outputs = []
for roi_idx in range(self.num_rois):
x = rois[0, roi_idx, 0]
y = rois[0, roi_idx, 1]
w = rois[0, roi_idx, 2]
h = rois[0, roi_idx, 3]
x = K.cast(x, 'int32')
y = K.cast(y, 'int32')
w = K.cast(w, 'int32')
h = K.cast(h, 'int32')
# Resized roi of the image to pooling size (7x7)
rs = tf.image.resize_images(img[:, y:y+h, x:x+w, :], (self.pool_size, self.pool_size))
outputs.append(rs)
final_output = K.concatenate(outputs, axis=0)
# Reshape to (1, num_rois, pool_size, pool_size, nb_channels)
# Might be (1, 4, 7, 7, 3)
final_output = K.reshape(final_output, (1, self.num_rois, self.pool_size, self.pool_size, self.nb_channels))
# permute_dimensions is similar to transpose
final_output = K.permute_dimensions(final_output, (0, 1, 2, 3, 4))
# print(final_output.shape)
return final_output
def get_config(self):
config = {'pool_size': self.pool_size,
'num_rois': self.num_rois}
base_config = super(RoiPoolingConv, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class FixedBatchNormalization(tf.keras.layers.Layer):
def __init__(self, epsilon=1e-3, axis=-1,
weights=None, beta_init='zero', gamma_init='one',
gamma_regularizer=None, beta_regularizer=None, **kwargs):
self.supports_masking = True
self.beta_init = initializers.get(beta_init)
self.gamma_init = initializers.get(gamma_init)
self.epsilon = epsilon
self.axis = axis
self.gamma_regularizer = regularizers.get(gamma_regularizer)
self.beta_regularizer = regularizers.get(beta_regularizer)
self.initial_weights = weights
super(FixedBatchNormalization, self).__init__(**kwargs)
def build(self, input_shape):
self.input_spec = [InputSpec(shape=input_shape)]
shape = (input_shape[self.axis],)
self.gamma = self.add_weight(shape=shape,
initializer=self.gamma_init,
regularizer=self.gamma_regularizer,
name='{}_gamma'.format(self.name),
trainable=False)
self.beta = self.add_weight(shape=shape,
initializer=self.beta_init,
regularizer=self.beta_regularizer,
name='{}_beta'.format(self.name),
trainable=False)
self.running_mean = self.add_weight(shape=shape, initializer='zero',
name='{}_running_mean'.format(self.name),
trainable=False)
self.running_std = self.add_weight(shape=shape, initializer='one',
name='{}_running_std'.format(self.name),
trainable=False)
if self.initial_weights is not None:
self.set_weights(self.initial_weights)
del self.initial_weights
self.built = True
def call(self, x, mask=None):
assert self.built, 'Layer must be built before being called'
input_shape = K.int_shape(x)
reduction_axes = list(range(len(input_shape)))
del reduction_axes[self.axis]
broadcast_shape = [1] * len(input_shape)
broadcast_shape[self.axis] = input_shape[self.axis]
if sorted(reduction_axes) == range(K.ndim(x))[:-1]:
x_normed = K.batch_normalization(
x, self.running_mean, self.running_std,
self.beta, self.gamma,
epsilon=self.epsilon)
else:
# need broadcasting
broadcast_running_mean = K.reshape(self.running_mean, broadcast_shape)
broadcast_running_std = K.reshape(self.running_std, broadcast_shape)
broadcast_beta = K.reshape(self.beta, broadcast_shape)
broadcast_gamma = K.reshape(self.gamma, broadcast_shape)
x_normed = K.batch_normalization(
x, broadcast_running_mean, broadcast_running_std,
broadcast_beta, broadcast_gamma,
epsilon=self.epsilon)
return x_normed
def get_config(self):
config = {'epsilon': self.epsilon,
'axis': self.axis,
'gamma_regularizer': self.gamma_regularizer.get_config() if self.gamma_regularizer else None,
'beta_regularizer': self.beta_regularizer.get_config() if self.beta_regularizer else None}
base_config = super(FixedBatchNormalization, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
###############################################################################
# Definitions for losses
lambda_rpn_regr = 1.0
lambda_rpn_class = 1.0
lambda_cls_regr = 1.0
lambda_cls_class = 1.0
epsilon = 1e-4
def rpn_loss_regr(num_anchors):
"""Loss function for rpn regression
Args:
num_anchors: number of anchors (9 in here)
Returns:
Smooth L1 loss function
0.5*x*x (if x_abs < 1)
x_abx - 0.5 (otherwise)
"""
def rpn_loss_regr_fixed_num(y_true, y_pred):
# x is the difference between true value and predicted vaue
x = y_true[:, :, :, 4 * num_anchors:] - y_pred
# absolute value of x
x_abs = K.abs(x)
# If x_abs <= 1.0, x_bool = 1
x_bool = K.cast(K.less_equal(x_abs, 1.0), tf.float32)
return lambda_rpn_regr * K.sum(
y_true[:, :, :, :4 * num_anchors] * (x_bool * (0.5 * x * x) + (1 - x_bool) * (x_abs - 0.5))) / K.sum(epsilon + y_true[:, :, :, :4 * num_anchors])
return rpn_loss_regr_fixed_num
def rpn_loss_cls(num_anchors):
"""Loss function for rpn classification
Args:
num_anchors: number of anchors (9 in here)
y_true[:, :, :, :9]: [0,1,0,0,0,0,0,1,0] means only the second and the eighth box is valid which contains pos or neg anchor => isValid
y_true[:, :, :, 9:]: [0,1,0,0,0,0,0,0,0] means the second box is pos and eighth box is negative
Returns:
lambda * sum((binary_crossentropy(isValid*y_pred,y_true))) / N
"""
def rpn_loss_cls_fixed_num(y_true, y_pred):
return lambda_rpn_class * K.sum(y_true[:, :, :, :num_anchors] * K.binary_crossentropy(y_pred[:, :, :, :], y_true[:, :, :, num_anchors:])) / K.sum(epsilon + y_true[:, :, :, :num_anchors])
return rpn_loss_cls_fixed_num
def class_loss_regr(num_classes):
"""Loss function for rpn regression
Args:
num_anchors: number of anchors (9 in here)
Returns:
Smooth L1 loss function
0.5*x*x (if x_abs < 1)
x_abx - 0.5 (otherwise)
"""
def class_loss_regr_fixed_num(y_true, y_pred):
x = y_true[:, :, 4*num_classes:] - y_pred
x_abs = K.abs(x)
x_bool = K.cast(K.less_equal(x_abs, 1.0), 'float32')
return lambda_cls_regr * K.sum(y_true[:, :, :4*num_classes] * (x_bool * (0.5 * x * x) + (1 - x_bool) * (x_abs - 0.5))) / K.sum(epsilon + y_true[:, :, :4*num_classes])
return class_loss_regr_fixed_num
def class_loss_cls(y_true, y_pred):
return lambda_cls_class * K.mean(categorical_crossentropy(y_true[0, :, :], y_pred[0, :, :]))
###############################################################################
# Definitions for roi related helpers
# @jit(nopython=True)
def calc_iou(R, img_data, classifier_overlap, im_size, rpn_stride, class_mapping, classifier_regr_std):
"""Converts from (x1,y1,x2,y2) to (x,y,w,h) format
"""
bboxes = img_data['bboxes']
(width, height) = (img_data['width'], img_data['height'])
# get image dimensions for resizing
(resized_width, resized_height) = get_new_img_size(width, height, im_size)
gta = np.zeros((len(bboxes), 4))
for bbox_num, bbox in enumerate(bboxes):
# get the GT box coordinates, and resize to account for image resizing
gta[bbox_num, 0] = int(round(bbox['x1'] * (resized_width / float(width))/rpn_stride))
gta[bbox_num, 1] = int(round(bbox['x2'] * (resized_width / float(width))/rpn_stride))
gta[bbox_num, 2] = int(round(bbox['y1'] * (resized_height / float(height))/rpn_stride))
gta[bbox_num, 3] = int(round(bbox['y2'] * (resized_height / float(height))/rpn_stride))
x_roi = []
y_class_num = []
y_class_regr_coords = []
y_class_regr_label = []
IoUs = [] # for debugging only
# R.shape[0]: number of bboxes (=300 from non_max_suppression)
for ix in range(R.shape[0]):
(x1, y1, x2, y2) = R[ix, :]
x1 = int(round(x1))
y1 = int(round(y1))
x2 = int(round(x2))
y2 = int(round(y2))
best_iou = 0.0
best_bbox = -1
# Iterate through all the ground-truth bboxes to calculate the iou
for bbox_num in range(len(bboxes)):
curr_iou = iou([gta[bbox_num, 0], gta[bbox_num, 2], gta[bbox_num, 1], gta[bbox_num, 3]], [x1, y1, x2, y2])
# Find out the corresponding ground-truth bbox_num with larget iou
if curr_iou > best_iou:
best_iou = curr_iou
best_bbox = bbox_num
if best_iou < classifier_overlap[0]:
continue
else:
w = x2 - x1
h = y2 - y1
x_roi.append([x1, y1, w, h])
IoUs.append(best_iou)
if classifier_overlap[0] <= best_iou < classifier_overlap[1]:
# hard negative example
cls_name = 'bg'
elif classifier_overlap[1] <= best_iou:
cls_name = bboxes[best_bbox]['class']
cxg = (gta[best_bbox, 0] + gta[best_bbox, 1]) / 2.0
cyg = (gta[best_bbox, 2] + gta[best_bbox, 3]) / 2.0
cx = x1 + w / 2.0
cy = y1 + h / 2.0
tx = (cxg - cx) / float(w)
ty = (cyg - cy) / float(h)
tw = np.log((gta[best_bbox, 1] - gta[best_bbox, 0]) / float(w))
th = np.log((gta[best_bbox, 3] - gta[best_bbox, 2]) / float(h))
else:
print('roi = {}'.format(best_iou))
raise RuntimeError
class_num = class_mapping[cls_name]
class_label = len(class_mapping) * [0]
class_label[class_num] = 1
y_class_num.append(copy.deepcopy(class_label))
coords = [0] * 4 * (len(class_mapping) - 1)
labels = [0] * 4 * (len(class_mapping) - 1)
if cls_name != 'bg':
label_pos = 4 * class_num
sx, sy, sw, sh = classifier_regr_std
coords[label_pos:4+label_pos] = [sx*tx, sy*ty, sw*tw, sh*th]
labels[label_pos:4+label_pos] = [1, 1, 1, 1]
y_class_regr_coords.append(copy.deepcopy(coords))
y_class_regr_label.append(copy.deepcopy(labels))
else:
y_class_regr_coords.append(copy.deepcopy(coords))
y_class_regr_label.append(copy.deepcopy(labels))
if len(x_roi) == 0:
return None, None, None, None
# bboxes that iou > C.classifier_min_overlap for all gt bboxes in 300 non_max_suppression bboxes
X = np.array(x_roi)
# one hot code for bboxes from above => x_roi (X)
Y1 = np.array(y_class_num)
Y2 = np.concatenate([np.array(y_class_regr_label), np.array(y_class_regr_coords)], axis=1)
return np.expand_dims(X, axis=0), | np.expand_dims(Y1, axis=0) | numpy.expand_dims |
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 5 15:23:15 2018
@author: MM
"""
import flopy
import numpy as np
import time
import pickle
import calendar
class model():
# Initializer / Instance attributes
def __init__(self, scenario, xll, yll, xur, yur, cellsize, strt_yr, end_yr, ACTIVE, THICKNESS, GEO, DEM, IH, MUN, PAR, exe_file = r'C:\WRDAPP\MF2005.1_12\bin\mf2005.exe'):
self.name = scenario # Assign name
self.xll = xll # X coordinate of the lower left corner
self.yll = yll # Y coordinate of the lower left corner
self.xur = xur # X coordinate of the upper right corner
self.yur = yur # Y coordinate of the upper right corner
self.cellsize = cellsize # Grid size
self.ncol = int((self.xur - self.xll) / self.cellsize) # Number of rows
self.nrow = int((self.yur - self.yll) / self.cellsize) # Number of columns
self.strt_yr = strt_yr
self.end_yr = end_yr
self.actv = [np.loadtxt(i,skiprows=6) for i in ACTIVE] # Extent of model layers 1 through n
self.thck = [np.loadtxt(i,skiprows=6) for i in THICKNESS] # Thickness of model layers 1 through n
self.geo = [np.loadtxt(i,skiprows=6) for i in GEO] # Geologic formations in layers 1 through n
self.dem = np.loadtxt(DEM,skiprows=6) # Digital elevation model of the basin (model top)
self.ih = np.loadtxt(IH,skiprows=6) # Initial hydraulic head in layer 1 and layer 2
self.mun = np.loadtxt(MUN,skiprows=6) # Geographic extent of each municipality
self.nlay = 2 # This model only accepts 2 layers
self.exe = exe_file
# Create adjustable parameter dictionary
self.params = {}
with open(PAR) as f:
pval = f.readlines()
for i in pval:
try:
self.params[i[:i.rfind('_')]].append(float(i[i.rfind(' '):]))
except:
try:
self.params[i[:i.rfind('_')]] = [float(i[i.rfind(' '):])]
except:
pass
def initializeFM(self):
# modelname to set the file root
mf = flopy.modflow.Modflow(r'model_files\modflow\VM_' + self.name, exe_name=self.exe)
# Model domain and grid definition
botm = np.zeros((self.nlay, self.nrow, self.ncol))
sumthck = np.zeros((self.nrow, self.ncol))
for b, thickness in enumerate(self.thck):
sumthck = np.add(sumthck,thickness)
botm[b,:,:] = self.dem - sumthck # Current layer bottom elevation
self.botm = botm
# Time discretization
nper = (self.end_yr - self.strt_yr)*12 # Number of stress periods
nstp = []
for y in range(self.strt_yr,self.end_yr):
for m in range(1,13):
nstp.append(calendar.monthrange(y,m)[1])
nstp = np.array(nstp)
steady = np.zeros((nper),dtype=bool)
dis = flopy.modflow.ModflowDis(mf, nlay=self.nlay, nrow=self.nrow, ncol=self.ncol, nper=nper, delr=self.cellsize, delc=self.cellsize, top=self.dem, botm=botm, perlen=nstp, nstp=9, tsmult=1.3, steady=steady, start_datetime='01/01/1984')
# Model Boundaries & initial conditions
# Active areas
ibound = np.ones((self.nlay, self.nrow, self.ncol), dtype=np.int32)
for a, active in enumerate(self.actv):
ibound[a,:,:] = active # Current layer active area
# Variables for the BAS package
strt = np.array([self.ih]*2)
bas = flopy.modflow.ModflowBas(mf, ibound=ibound, strt=strt, ifrefm=True, ichflg=True, stoper=3)
# Layer properties
# Add LPF package to the MODFLOW model
# Create a dictionary of arrays with geologic characteristics: HK = Hydraulic conductivity, VANI = Vertical anisotropy (H:V) of hydraulic conductivity, SS = Specific storage, SY = Specific yield
geoarrays = {}
# Loop through the layers and formations for each layer to apply the geologic parameters to each array
for p in ['HK', 'VANI', 'SS', 'SY']:
geoarrays[p] = np.zeros((self.nlay,self.nrow,self.ncol))
for l in range(self.nlay):
for f, fval in enumerate(self.params[p]):
geoarrays[p][l,:,:] += (self.geo[l] == f+1) * fval
layvka = [1]*self.nlay # Indicates that VANI represents the ratio of H:V hydraulic conductivity
lpf = flopy.modflow.mflpf.ModflowLpf(mf, ipakcb=9, laytyp=[1,1], layvka=layvka, hk=geoarrays['HK'], vka=geoarrays['VANI'], ss=geoarrays['SS'], sy=geoarrays['SY'], laywet=[1,1])
# lpf = flopy.modflow.mflpf.ModflowLpf(mf, ipakcb=9, laytyp=[0,0], layvka=layvka, hk=geoarrays['HK'], vka=geoarrays['VANI'], ss=geoarrays['SS'], sy=geoarrays['SY'])
return mf, dis, bas, lpf
def addNewWells(self, New_WEL, LYR, WEL_Dict=0, INFO_Dict=0, WEL_mult=1, start=0, end=0, dateType='per', coordType='xy', pumpwell=False, changepumping=False):
'''
New_WEL is an np array of the following format: X (or C), Y (or R), Start Year, End Year, Flow (m3/d)
WEL_mult is a scalar multiplier to be applied to all wells in the data set New_WEL
WEL_Dict is a dictionary that contains dictionary for each stress period, each dictionary contains an entry for each well with the layer, row, column, and pumping rate
coordType is a marker that should be either 'xy' or 'rc' depending on the coordinate definition in the New_WEL array
dateType is a marker that should be either 'yr' or 'per' depending on whether a value of year or stress period is passed in the Start and End time columns
pumpwell
changepumping
'''
# Initialize dictionary
if WEL_Dict == 0:
WEL_Dict = {}
if INFO_Dict == 0:
INFO_Dict = {}
# Assign start period and end period if defined in input
if start > 0:
New_WEL[:,2] = np.ones((New_WEL.shape[0]))*start
if end > 0:
New_WEL[:,3] = np.ones((New_WEL.shape[0]))*end
# Convert X and Y to Column and Row
if coordType == 'xy':
cconvert = lambda x: int(np.floor((x - self.xll) / self.cellsize))
New_WEL[:,0] = np.array([cconvert(xi) for xi in New_WEL[:,0]])
rconvert = lambda y: int(np.floor((self.yur - y) / self.cellsize))
New_WEL[:,1] = np.array([rconvert(yi) for yi in New_WEL[:,1]])
if coordType == 'rc':
New_WEL[:,0] = np.array([int(xi) for xi in New_WEL[:,0] - 1])
New_WEL[:,1] = np.array([int(yi) for yi in New_WEL[:,1] - 1])
# Convert data in year format to stress period format (months)
if dateType == 'yr':
New_WEL[:,2] = (New_WEL[:,2] - self.strt_yr) * 12 + 1
New_WEL[:,3] = (New_WEL[:,3] - self.strt_yr) * 12 + 1
# Loop through all wells in the dataset to fill dictionary
for w in range(0,New_WEL.shape[0]):
r = New_WEL[w,1]
c = New_WEL[w,0]
wellmun = self.mun[int(r),int(c)]
# Reduce the pumping amount by a percentage by municipality
if changepumping:
P = float(self.altpump[np.where(self.altpump==wellmun)[0],1]) # the ratio of new pumping to old pumping
else:
P = 1
# Assign flow rate for each well to all stress periods indicated by start and end years
for per in range(int(New_WEL[w,2] - 1),int(New_WEL[w,3] - 1)):
if pumpwell:
R = self.ratiogn['PER'][per]
else:
R = 1
try:
WEL_Dict[per].append([LYR,r,c,New_WEL[w,4]*WEL_mult*P*R])
INFO_Dict[per].append([LYR,r,c,New_WEL[w,4]*WEL_mult*P*R,wellmun]) # layer, row, column, volume (m3/d), municipality, well type
except:
WEL_Dict[per] = [[LYR,r,c,New_WEL[w,4]*WEL_mult*P*R]]
INFO_Dict[per]= [[LYR,r,c,New_WEL[w,4]*WEL_mult*P*R,wellmun]]
return WEL_Dict,INFO_Dict
def addRecharge(self,LU_arrays, PRECIP, start=0, end=0, RCH_Dict=0, RCH_mult=[1,1,1], dateType='per'):
'''
Outputs a dictionary of recharge arrays based on land use multiplier, land use cover, and precipitation input
LU_arrays: dictionary with 3 eantries, one for each land use type which contains gridded percent amounts for each land use type
PRECIP: dictionary with 361 entries, one for each stress period which contains gridded precipitation
RCH_Dict: existing dictionary holding recharge data or 0 if the dictionary must be initialized dateType: the date format for the start and end variables
'''
# Initialize dictionary: if there is no exisiting dictionary, create dictionary with no entries
if RCH_Dict == 0:
RCH_Dict = {}
# If the recharge is for the first time step, apply only to the first time step
if start == 0:
for l, landuse in enumerate(['URBAN','NATURAL','WATER']):
# If there is not already an entry for the selected stress period, create a new array
try:
RCH_Dict[0] += PRECIP[0] * LU_arrays[landuse] * RCH_mult[l]
except:
RCH_Dict[0] = PRECIP[0] * LU_arrays[landuse] * RCH_mult[l]
# Convert data in year format to stress period format (months)
if dateType == 'yr':
start = (start - self.strt_yr) * 12 + 1
end = (end - self.strt_yr) * 12 + 1
# Loop through all stress periods between S_YR and E_YR
else:
for per in range(int(start - 1), int(end - 1)):
# Apply recharge amounts for each land use type
for l, landuse in enumerate(['URBAN','NATURAL','WATER']):
# If there is not already an entry for the selected stress period, create a new array
try:
RCH_Dict[per] += PRECIP[per]*LU_arrays[landuse]*RCH_mult[l]
except:
RCH_Dict[per] = PRECIP[per]*LU_arrays[landuse]*RCH_mult[l]
return RCH_Dict
def outputControl(self,mf):
'''
Generate Output Control and Solver packages
Add OC package to the MODFLOW model
'''
spd = {}
data2record = ['save head', 'save drawdown', 'save budget', 'print budget']
for y in range(0,30):
for m in range(1,13):
spd[y * 12 + m - 1, 8] = data2record.copy()
# spd[y * 12 + m - 1, calendar.monthrange(self.strt_yr + y, m)[1] - 1] = data2record.copy() # If time steps in month is equal to number of days
# for d in range(0,calendar.monthrange(self.strt_yr + y, m)[1]):
# spd[y * 12 + m - 1, d] = data2record.copy()
spd[26,8] = ['save head', 'save drawdown', 'save budget', 'print budget', 'ddreference']
oc = flopy.modflow.ModflowOc(mf, stress_period_data=spd, compact=True)
# Add PCG package to the MODFLOW model
pcg = flopy.modflow.ModflowPcg(mf,mxiter=20, iter1=20)
return oc, pcg
def run_scenario_model(self,num_WWTP,num_RCHBASIN,fixleak,seed=1):
'''
num_WWTP is the number of wastewater treatment plants to rehabilitate for wastewater injection into the aquifer
num_RCHBASIN is the number of infiltration basins that will recharge the aquifer using imported water
fixleak is the percent of fixed leaks to historical leaks, 0 indicates the same level as historical leaks and 100 indicates all leaks are fixed
'''
np.random.seed(seed)
timestart = time.time()
print('Processing data...')
# Phase starting stress period
PHASE_PER = [0, 132, 252, 360]
phases = len(PHASE_PER) - 1
S_per = PHASE_PER[0:len(PHASE_PER) - 1]
E_per = PHASE_PER[1:len(PHASE_PER)]
# Phase land use dataset year
LU_PAR = ['1990', '2000', '2010']
# Model internal variables
# drains = False
fixleak = fixleak/100 # convert from integer to decimal
cost = 0 # Initial cost
sec2day = 60*60*24 # Second to day conversion
LID_PAR = [1, 1, 1] # Phase LID increase multiplier
# Water supply data
hist_water_use = np.loadtxt(r'model_files\optimization_data\decisions\twu.csv', delimiter=',', skiprows=1, usecols=(1,2,3)) # Initial (original) all other supplies before alternatives, matrix of size municipalities by phases (m3/s)
total_water_use = hist_water_use*self.params['TWU']*sec2day # Multiply by total water use parameters (m3/d)
self.twateruse = total_water_use.sum(axis=0) # Total water use for each model phase (m3/d)
i_other = np.loadtxt(r'model_files\optimization_data\decisions\initial_supply.csv', delimiter=',', skiprows=1, usecols=(1,2,3)) # Initial (original) all other supplies before alternatives (m3/s)
i_other = i_other.sum(axis=0)*sec2day # Initial other water supply for each model phase (m3/d)
new_other = i_other.copy() # New other water supply for each model phase (m3/d)
# Alternatives changes to supply
# Import alternative pumping scheme (percentage changes in pumping), total groundwater pumping must be equal to original
self.altpump = np.loadtxt(r'model_files\optimization_data\decisions\altpump.csv', delimiter=',', skiprows=1)
# Calculate historical quantity of leaks in each municipality
LEAK_MUN = np.loadtxt(r'data_processed\leak\LEAK_TOT_MUN.csv',delimiter=',',skiprows=1) # Total recharge percent per municipality: equal to percent of total water use (1997 values) x percent leak (~35%) x recharge percent (15%)
leaks = np.zeros((LEAK_MUN.shape[0],phases+1))
leaks[:,0] = LEAK_MUN[:,0]
for i in range(phases):
leaks[:,i+1] = self.params['LK'][i]*LEAK_MUN[:,1]*total_water_use[:,i] # Total leak per municipality by model phase (m3/d)
new_other += leaks[:,1:].sum(axis=0) * fixleak # Add the leaks averted as an alternative supply
self.ratiogn = {}
self.ratiogn['PHASE'] = (self.twateruse - new_other)/(self.twateruse - i_other) # Create a ratio of groundwater use with new other supplies to groundwater use with initial other supplies to apply to all groundwater pumping (dimensionless)
self.ratiogn['PER'] = np.zeros(PHASE_PER[phases])
for i, LUset in enumerate(LU_PAR):
for p in range(PHASE_PER[i],PHASE_PER[i+1]):
self.ratiogn['PER'][p] = self.ratiogn['PHASE'][i]
# Initialize the modflow model with the boundary conditions input above
mf, dis, bas, lpf = self.initializeFM()
print('Basic, Discretization, and Layer packages generated in', str(time.time() - timestart), 'seconds')
'''
Land Use Type
Fill a land use dictionary with the ARRAYs that represent the % of each land use cover in each cell and the LISTs that contain all the cells and percentages of each land use type
'''
LU = {}
for i, LUset in enumerate(LU_PAR):
LU[LUset] = {'ARRAY':{},'LIST':{}}
for l, LUtype in enumerate(['URBAN','NATURAL','WATER']):
filename = r'data_processed\landuse\LU-' + LUset + '-' + LUtype + '.asc'
perarea = np.loadtxt(filename,skiprows=6)
LU[LUset]['ARRAY'][LUtype] = perarea
LU[LUset]['LIST'][LUtype] = np.zeros((perarea.shape[0]*perarea.shape[1],5))
l = 0
for row in range(0,perarea.shape[0]):
for col in range(0,perarea.shape[1]):
if perarea[row,col] > 0.001:
LU[LUset]['LIST'][LUtype][l,2] = perarea[row,col]
LU[LUset]['LIST'][LUtype][l,0] = col
LU[LUset]['LIST'][LUtype][l,1] = row
LU[LUset]['LIST'][LUtype][l,3] = 1 - self.geo[0][row,col] # 0 if clay layer, 1 if no clay layer
LU[LUset]['LIST'][LUtype][l,4] = self.mun[row,col]
l += 1
LU[LUset]['LIST'][LUtype] = LU[LUset]['LIST'][LUtype][LU[LUset]['LIST'][LUtype][:,2]>0,:]
# Save land use database for use in mounding objective
winfofile = r'model_files\optimization_data\objectives\LU_' + self.name + '.pickle'
with open(winfofile, 'wb') as handle:
pickle.dump(LU, handle, protocol=pickle.HIGHEST_PROTOCOL)
'''
Recharge
Create recharge dictionary for MODFLOW RCH package based on land use multipliers and interpolated precipitation rasters
'''
newtime = time.time()
RCH_DICT = {}
Precip_Dict = {}
for year in range(int(self.strt_yr),int(self.end_yr)):
for month in range(1,13):
per = (year - self.strt_yr) * 12 + month - 1
filename = r'data_processed\recharge\claymult\PrecipCM_' + str(year) + '_' + '{num:02d}'.format(num=month) + '.asc'
Precip_Dict[per] = np.loadtxt(filename,skiprows=6)
for i, LUset in enumerate(LU_PAR):
RCH_DICT = self.addRecharge(LU_arrays=LU[LUset]['ARRAY'], PRECIP=Precip_Dict, start=S_per[i]+1, end=E_per[i]+1, RCH_Dict=RCH_DICT, RCH_mult=self.params['RCH'])
# Create MODFLOW RCH package
rch = flopy.modflow.ModflowRch(mf, nrchop=3, ipakcb=9, rech=RCH_DICT)
print('RCH_Dict generated in', str(time.time() - newtime), 'seconds')
'''
Well objects: supply wells, distribution leaks, injection wells, wastewater reuse, recharge basins
'''
newtime = time.time()
WEL_DICT = {}
WEL_INFO = {}
# Add supply wells, includes the ratioGn multiplier to reduce pumping when new supplies are added
# Import CONAGUA and SACM pumping datasets
CAEM_array = np.loadtxt(r'data_processed\wells\PUMP_C.csv', delimiter=',', skiprows=1, usecols=[1,2,7,8,11]) # pumping in m3 per day
WEL_DICT, WEL_INFO = self.addNewWells(CAEM_array, LYR=1, WEL_Dict=WEL_DICT, INFO_Dict=WEL_INFO, pumpwell=True)
SACM_array = np.loadtxt(r'data_processed\wells\PUMP_S-ERRORS.csv',delimiter=',', skiprows=1, usecols=[1,2,7,8,11]) # pumping in m3 per day
WEL_DICT, WEL_INFO = self.addNewWells(SACM_array, LYR=1, WEL_Dict=WEL_DICT, INFO_Dict=WEL_INFO, pumpwell=True)
REPDA_array = np.loadtxt(r'data_processed\wells\PUMP_RC_Q-Repeats.csv', delimiter=',', skiprows=1, usecols=[1,2,4,5,11,16]) # pumping in m3 per day
total_urban_repda = REPDA_array[REPDA_array[:,5]==1,4].sum() * -1 # Pumping sum is negative
total_periurban_repda = REPDA_array[REPDA_array[:,5]==0,4].sum() * -1 # Pumping sum is negative
# Include only municipalities with urban land cover
mun = np.unique(self.mun)[1:].copy()
# Loop through model phases
for i, l in enumerate(LU_PAR):
# Calculate unaccounted for water supply by subtraction to determine pumping in REPDA dataset
total_mthly_pumping = self.twateruse[i] - new_other[i] # Monthly pumping is equal to the total water use minus other supply (m3/d)
LEAK_array = np.zeros((LU[l]['LIST']['URBAN'].shape[0] * (PHASE_PER[i + 1] - PHASE_PER[i]),5))
j = 0
# Generate monthly pumping datasets for REPDA data in single pumping value format
# Loop through monthly periods in each model phase
for p in range(S_per[i]+1,E_per[i]+1):
unknown_pumping = total_mthly_pumping - (-1 * np.sum(list(zip(*WEL_INFO[p-1]))[3])) # Unknown pumping is the total monthly pumping for each model period minus the known pumping from SACM and CAEM (which are negative)
# Urban wells
WEL_DICT, WEL_INFO = self.addNewWells(New_WEL=REPDA_array[REPDA_array[:,5]==1,:5], LYR=1, WEL_Dict=WEL_DICT, INFO_Dict=WEL_INFO, WEL_mult=self.params['Q'][i], start=p, end=(p + 1), pumpwell=True)
p_multiplier = (unknown_pumping - total_urban_repda*self.params['Q'][i]*self.ratiogn['PHASE'][i])/total_periurban_repda # Determine the monthly multiplier by dividing the estimated unknown pumping by the total pumping in the REPDA dataset
# Peri-urban wells
WEL_DICT, WEL_INFO = self.addNewWells(New_WEL=REPDA_array[REPDA_array[:,5]==0,:5], LYR=1, WEL_Dict=WEL_DICT, INFO_Dict=WEL_INFO, WEL_mult=p_multiplier, start=p, end=(p + 1), pumpwell=True)
'''
Leak Repair
Create a well dictionary for all the leak cells. The leak cells will be treated
as an injection well at each cell in which there is urban land cover. MODFLOW
distributes injection wells evenly across the area of the cell. The leak
percentage is based on the leak percentage determined by municipality. Then
the leak amount determined for each cell is multiplied by the percent of
urban land cover in that cell. Finally, leaks in cells that are located in the
lacustrine zone are reduced by 90% assuming that the low hydraulic
conductivity does not allow for high levels of infiltration and the sewer
provides a preferential flow path out of the basin
'''
for n,m in enumerate(mun):
# Create an array for all urban model cells in this municipality
tempLeak = LU[l]['LIST']['URBAN'][(LU[l]['LIST']['URBAN'][:,4]==m),:4]
if len(tempLeak) > 0:
u_cells = tempLeak[:, 2].sum() # Number of urban model cells in municipality m
# Use total pumping for each stress period to determine leak quantities
LperCell = float(leaks[np.where(leaks==m)[0],i+1]) * (1 - fixleak) * self.params['IN'][0] / u_cells # Reference the leaks in the municipality (m3/d) multiply by (1 - fixleak), multiply by infiltration rate, divide by the number of urban cells
tempLeak[:,2] *= LperCell
# apply 90% returns to sewer under clay layer (Geologic formation 1)
tempLeak[tempLeak[:,3]==1, 2] *= 0.1
# Get rows of all cells of urban land use type from list
LEAK_array[j:(j + tempLeak.shape[0]), 0] = tempLeak[:, 0]
# Get columns of all cells of urban land use type from list
LEAK_array[j:(j + tempLeak.shape[0]), 1] = tempLeak[:, 1]
# Set the period to the current stress period for all urban cells
LEAK_array[j:(j + tempLeak.shape[0]), 2] = p
# Set the end of the period to the next stress period
LEAK_array[j:(j + tempLeak.shape[0]), 3] = p + 1
# Set the multiplier to the percentage of urban land use type stored in list
LEAK_array[j:(j + tempLeak.shape[0]), 4] = tempLeak[:,2]
# Set the new index to the previous index plus the number of cells added
j += tempLeak.shape[0]
WEL_DICT, WEL_INFO = self.addNewWells(LEAK_array, LYR=1, WEL_Dict=WEL_DICT, INFO_Dict=WEL_INFO, WEL_mult=self.params['LK'][i], coordType='rc')
total_mthly_leak = | np.zeros(PHASE_PER[3]) | numpy.zeros |
# -*- coding: utf-8 -*-
"""
Sky Subtract
"""
import matplotlib
matplotlib.use('agg')
import glob
import numpy as np
import os.path as op
import fitsio
from astropy.io import fits
from input_utils import setup_parser, set_daterange, setup_logging
from scipy.interpolate import splev, splrep
from astropy.stats import mad_std
def check_if_type(date, obsid, args):
''' Test if header has IMAGETYP '''
filenames = glob.glob(op.join(args.rootdir, date, args.instrument,
args.instrument + obsid, 'exp01',
args.instrument, 'multi_*_*_*_LL.fits'))
try:
kind = fits.open(filenames[0])[0].header['IMAGETYP']
except:
args.log.warn('No IMAGETYP in header for %s and observation %s'
% (date, obsid))
return False
if kind == args.type:
return True
else:
return False
def build_filenames(date, obsid, args):
'''
Build directory structure and search for all the files in a given
observation and exposure.
'''
if args.type == 'twi':
expstr = '01'
else:
expstr = '*'
filenames = glob.glob(op.join(args.rootdir, date, args.instrument,
args.instrument + obsid, 'exp%s' % expstr,
args.instrument, 'multi_*_*_*_LL.fits'))
ifuslot_list = [op.basename(fn).split('_')[2] for fn in filenames]
ifuslots = np.unique(ifuslot_list)
exposure_list = [op.basename(op.dirname(op.dirname(fn)))[3:]
for fn in filenames]
exposures = | np.unique(exposure_list) | numpy.unique |
################################################################################
# Peach - Computational Intelligence for Python
# <NAME>
#
# This file: optm/optm.py
# Basic definitions and base class
################################################################################
# Doc string, reStructuredText formatted:
__doc__ = """
Basic definitons and base class for optimizers
This sub-package exports some auxiliary functions to work with cost functions,
namely, a function to calculate gradient vectors and hessian matrices, which are
extremely important in optimization.
Also, a base class, ``Optimizer``, for all optimizers. Sub-class this class if
you want to create your own optmizer, and follow the interface. This will allow
easy configuration of your own scripts and comparison between methods.
"""
################################################################################
from numpy import array, zeros
################################################################################
# Auxiliary functions
################################################################################
def gradient(f, dx=1e-5):
'''
Creates a function that calculates the gradient vector of a scalar field.
This function takes as a parameter a scalar function and creates a new
function that is able to calculate the derivative (in case of single
variable functions) or the gradient vector (in case of multivariable
functions. Please, note that this function takes as a parameter a
*function*, and returns as a result *another function*. Calling the returned
function on a point will give the gradient vector of the original function
at that point::
>>> def f(x):
return x^2
>>> df = gradient(f)
>>> df(1)
2
In the above example, ``df`` is a generated function which will return the
result of the expression ``2*x``, the derivative of the original function.
In the case ``f`` is a multivariable function, it is assumed that its
argument is a line vector.
:Parameters:
f
Any function, one- or multivariable. The function must be an scalar
function, though there is no checking at the moment the function is
created. If ``f`` is not an scalar function, an exception will be
raised at the moment the returned function is used.
dx
Optional argument that gives the precision of the calculation. It is
recommended that ``dx = sqrt(D)``, where ``D`` is the machine precision.
It defaults to ``1e-5``, which usually gives a good estimate.
:Returns:
A new function which, upon calling, gives the derivative or gradient
vector of the original function on the analised point. The parameter of
the returned function is a real number or a line vector where the gradient
should be calculated.
'''
def _df(x):
try:
x = float(x)
return (f(x+dx) - f(x-dx)) / (2.*dx)
except TypeError:
n = x.size
df = zeros((n, ))
for i in xrange(n):
xl = array(x)
xl[i] = xl[i] - dx
xr = array(x)
xr[i] = xr[i] + dx
df[i] = (f(xr) - f(xl)) / (2.*dx)
return df
return _df
def hessian(f, dx=1e-5):
'''
Creates a function that calculates the hessian matrix of a scalar field.
This function takes as a parameter a scalar function and creates a new
function that is able to calculate the second derivative (in case of single
variable functions) or the hessian matrix (in case of multivariable
functions. Please, note that this function takes as a parameter a
*function*, and returns as a result *another function*. Calling the returned
function on a point will give the hessian matrix of the original function
at that point::
>>> def f(x):
return x^4
>>> ddf = hessian(f)
>>> ddf(1)
12
In the above example, ``ddf`` is a generated function which will return the
result of the expression ``12*x**2``, the second derivative of the original
function. In the case ``f`` is a multivariable function, it is assumed that
its argument is a line vector.
:Parameters:
f
Any function, one- or multivariable. The function must be an scalar
function, though there is no checking at the moment the function is
created. If ``f`` is not an scalar function, an exception will be
raised at the moment the returned function is used.
dx
Optional argument that gives the precision of the calculation. It is
recommended that ``dx = sqrt(D)``, where ``D`` is the machine precision.
It defaults to ``1e-5``, which usually gives a good estimate.
:Returns:
A new function which, upon calling, gives the second derivative or hessian
matrix of the original function on the analised point. The parameter of
the returned function is a real number or a line vector where the hessian
should be calculated.
'''
def _hf(x):
try:
x = float(x)
return (f(x+dx) - 2*f(x) + f(x-dx)) / (4.*dx*dx)
except TypeError:
n = x.size
hf = zeros((n, n))
for i in range(n):
for j in range(n):
xll = | array(x) | numpy.array |
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
#
# Copyright (c) 2019, Eurecat / UPF
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the <organization> nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
#
# @file utils.py
# @author <NAME>
# @date 30/07/2019
#
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
import csv
import numpy as np
from scipy.special import sph_harm
from masp.validate_data_types import _validate_int, _validate_ndarray_2D, _validate_string, _validate_ndarray_1D, \
_validate_ndarray
c = 343.
C = 3
def get_capsule_positions(mic_array_name):
"""
Retrieve the geometry of a selected set of microphone arrays.
Parameters
----------
mic_array_name : str
One of: 'eigenmike', 'ambeo'
Returns
-------
array_sigs : ndarray
Capsule positions, in spherical coordinates (radians). Dimension = (nMic, C)
Raises
-----
TypeError, ValueError: if method arguments mismatch in type, dimension or value.
"""
_validate_string('mic_array_name', mic_array_name, choices=['eigenmike', 'ambeo'])
capsule_positions = None
if mic_array_name is 'eigenmike':
mic_dirs_deg = np.array([[0, 32, 0, 328, 0, 45, 69, 45, 0, 315, 291, 315, 91, 90, 90, 89, 180, 212, 180, 148,
180, 225, 249, 225, 180, 135, 111, 135, 269, 270, 270, 271],
[21, 0, -21, 0, 58, 35, 0, -35, -58, -35, 0, 35, 69, 32, -31, -69, 21, 0, -21, 0, 58,
35, 0, -35, -58, -35, 0, 35, 69, 32, -32, -69]])
mic_dirs_rad = mic_dirs_deg * np.pi / 180.
r = 0.042
mic_dirs_rad = np.row_stack((mic_dirs_rad, r*np.ones(np.shape(mic_dirs_rad)[1])))
capsule_positions = mic_dirs_rad.T
elif mic_array_name is 'ambeo':
r = 0.015
capsule_positions = [[np.pi / 4, np.arcsin(1. / np.sqrt(3)), r], # FLU
[7 * np.pi / 4, -1 * np.arcsin(1. / np.sqrt(3)), r], # FRD
[3 * np.pi / 4, -1 * np.arcsin(1. / np.sqrt(3)), r], # BLD
[5 * np.pi / 4, np.arcsin(1. / np.sqrt(3)), r]] # BRU
return capsule_positions
def cart2sph(cart):
"""
Cartesian to spherical coordinates transformation, in matrix form.
Parameters
----------
cart : ndarray
Cartesian coordinates. Dimension = (nCoords, C)
Returns
-------
sph : ndarray
Spherical coordinates, in radians, aed. Dimension = (nCoords, C)
Raises
-----
TypeError, ValueError: if method arguments mismatch in type, dimension or value.
Notes
-----
As a dimensionality exception, in case the input matrix is 1D (just one point),
the output matrix will be as well 1D.
"""
arg = cart.copy()
_validate_ndarray('cart', cart)
if cart.ndim == 1:
_validate_ndarray_1D('cart', cart, size=C)
cart = cart[np.newaxis, :]
elif cart.ndim == 2:
_validate_ndarray_2D('cart', cart, shape1=C)
else:
raise ValueError('cart must be either 1D or 2D array')
sph = np.empty(cart.shape)
hypotxy = np.hypot(cart[:, 0], cart[:, 1])
sph[:, 2] = np.hypot(hypotxy, cart[:, 2])
sph[:, 1] = np.arctan2(cart[:, 2], hypotxy)
sph[:, 0] = np.arctan2(cart[:, 1], cart[:, 0])
if arg.ndim == 1:
sph = sph.squeeze()
return sph
def sph2cart(sph):
"""
Spherical to cartesian coordinates transformation, in matrix form.
Parameters
----------
sph : ndarray
Spherical coordinates, in radians, aed. Dimension = (nCoords, C)
Returns
-------
sph : ndarray
Cartesian coordinates. Dimension = (nCoords, C)
Raises
-----
TypeError, ValueError: if method arguments mismatch in type, dimension or value.
Notes
-----
As a dimensionality exception, in case the input matrix is 1D (just one point),
the output matrix will be as well 1D.
"""
arg = sph.copy()
_validate_ndarray('sph', sph)
if sph.ndim == 1:
_validate_ndarray_1D('sph', sph, size=C)
sph = sph[np.newaxis, :]
elif sph.ndim == 2:
_validate_ndarray_2D('sph', sph, shape1=C)
else:
raise ValueError('sph must be either 1D or 2D array')
cart = np.empty(sph.shape)
cart[:, 2] = sph[:, 2] * np.sin( sph[:, 1])
rcoselev = sph[:, 2] * np.cos( sph[:, 1])
cart[:, 0] = rcoselev * np.cos( sph[:, 0])
cart[:, 1] = rcoselev * np.sin( sph[:, 0])
if arg.ndim == 1:
cart = cart.squeeze()
return cart
def elev2incl(dirs):
"""
Spherical coordinates: elevation to inclination reference system
Parameters
----------
dirs : ndarray
Spherical coordinates, in radians, aed. Dimension = (nCoords, {2,3})
Returns
-------
incl : ndarray
Transformed coordinates. Dimension = (nCoords, {2,3})
Raises
-----
TypeError, ValueError: if method arguments mismatch in type, dimension or value.
Notes
-----
The input matrix might have dimension 1 = 2 ([azimuth, elevation]),
or dimension 1 = 3 ([azimuth, elevation, distance]).
The output matrix will propagate input dimensionality.
"""
_validate_ndarray_2D('dirs',dirs)
if dirs.shape[1] == 2:
incl = np.column_stack((dirs[:, 0], np.pi / 2 - dirs[:, 1]))
elif dirs.shape[1] == 3:
incl = np.column_stack((dirs[:, 0], np.pi / 2 - dirs[:, 1], dirs[:, 2]))
else:
raise ValueError('dirs must have dimension 1={2,3}')
return incl
def incl2elev(dirs):
"""
Spherical coordinates: inclination to elevation reference system
Parameters
----------
dirs : ndarray
Spherical coordinates, in radians, aid. Dimension = (nCoords, {2,3})
Returns
-------
elev : ndarray
Transformed coordinates. Dimension = (nCoords, {2,3})
Raises
-----
TypeError, ValueError: if method arguments mismatch in type, dimension or value.
Notes
-----
The input matrix might have dimension 1 = 2 ([azimuth, inclination]),
or dimension 1 = 3 ([azimuth, inclination, distance]).
The output matrix will propagate input dimensionality.
"""
return elev2incl(dirs)
def lagrange(N, delays):
"""
Design a fractional delay order-N filter matrix with polynomial interpolation.
Parameters
----------
N : int
Filter order.
delays : ndarray
Target fractional delays, in samples. Dimension = (1).
Returns
-------
h : ndarray
Target filter. Dimension = (N+1, len(delays))
Raises
-----
TypeError, ValueError: if method arguments mismatch in type, dimension or value.
Notes
-----
For best results, delay should be near N/2 +/- 1.
"""
_validate_int('N', N, positive=True)
_validate_ndarray_1D('delays', delays, positive=True)
n = np.arange(N+1)
h = np.ones((N+1, delays.size))
for l in range(delays.size):
for k in range(N+1):
idx = n[n != k]
h[idx, l] = h[idx, l] * (delays[l]-k) / (n[idx]-k)
return h
def isLambda(v):
"""
Determine if a given argument is a lambda expression.
Parameters
----------
v : arg
Argument to test.
Returns
-------
isLambda : boolean
Result.
"""
LAMBDA = lambda:0
return isinstance(v, type(LAMBDA)) and v.__name__ == LAMBDA.__name__
def load_sph_grid(file_path):
"""
todo: implementation_ at the moment only 2D matrix
:param file_path:
:return:
"""
with open(file_path) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=' ')
rows = list(csv_reader)
dim1 = int(rows[0][0])
dim0 = int(rows[1][0])
sph_grid = np.asarray(rows[2:], dtype='float')
assert np.shape(sph_grid) == (dim0, dim1)
return sph_grid
#############################################################################################
#
# Spherical Harmonic Transform library.
# TODO: separate package?
def get_sh(N, dirs, basisType):
"""
Get spherical harmonics up to order N evaluated at given angular positions.
Parameters
----------
N : int
Maximum spherical harmonic expansion order.
dirs : ndarray
Evaluation directions. Dimension = (nDirs, 2).
Directions are expected in radians, expressed in pairs [azimuth, inclination].
basisType : str
Type of spherical harmonics. Either 'complex' or 'real'.
Returns
-------
Y : ndarray
Spherical harmonics . Dimension = (nDirs, nHarm).
Raises
-----
TypeError, ValueError: if method arguments mismatch in type, dimension or value.
NotImplementedError: for basisType = 'complex'
Notes
-----
Ouput dimension is given by: nHarm = (N+1)^2
Inclination is defined as the angle from zenith: inclination = pi/2-elevation
TODO: implement complex basis?
"""
_validate_int('order', N, positive=True)
_validate_ndarray_2D('dirs', dirs, shape1=C-1)
_validate_string('basisType', basisType, choices=['complex', 'real'])
nDirs = dirs.shape[0]
nHarm = np.power(N+1, 2)
Y_N = np.zeros((nDirs, nHarm))
def delta_kronecker(q1, q2):
return 1 if q1 == q2 else 0
# TODO
# it looks like the output of shs is N3d (max 1, sqrt(3)!3)
# so it needs to be scaled as * np.sqrt(4*np.pi) * [1, 1./np.sqrt(3), 1./np.sqrt(3), 1./np.sqrt(3)]
def norm(m):
"""
TODO
SN3D FACTOR, REMOVE CONDON SHOTLEY
IT MUST BE MULTIPLIED BY sqrt(4PI) to be normalized to 1
:param m:
:return:
"""
return np.power(-1, np.abs(m)) * np.sqrt(2 - delta_kronecker(0, np.abs(m)))
if basisType is 'complex':
# TODO
raise NotImplementedError
elif basisType is 'real':
harm_idx = 0
for l in range(N + 1):
for m in range(-l,0):
Y_N[:, harm_idx] = np.imag(sph_harm(np.abs(m), l, dirs[:, 0], dirs[:, 1])) * norm(m)
harm_idx += 1
for m in range(l + 1):
Y_N[:, harm_idx] = np.real(sph_harm(m, l, dirs[:, 0], dirs[:, 1])) * norm(m)
harm_idx += 1
return Y_N
def check_cond_number_sht(N, dirs, basisType, W=None):
"""
Computes the condition number for a least-squares SHT.
Parameters
----------
N : int
Maximum order to be tested for the given set of points.
dirs : ndarray
Evaluation directions. Dimension = (nDirs, 2).
Directions are expected in radians, expressed in pairs [azimuth, inclination].
basisType : str
Type of spherical harmonics. Either 'complex' or 'real'.
W : ndarray, optional.
Weights for each measurement point to condition the inversion,
in case of a weighted least-squares. Dimension = (nDirs)
Returns
-------
cond_N : ndarray
Condition number for each order. Dimension = (N+1)
Raises
-----
TypeError, ValueError: if method arguments mismatch in type, dimension or value.
Notes
-----
Inclination is defined as the angle from zenith: inclination = pi/2-elevation
TODO: implement complex basis?
TODO: implement W
"""
_validate_int('N', N, positive=True)
_validate_ndarray_2D('dirs', dirs, shape1=C-1)
_validate_string('basisType', basisType, choices=['complex', 'real'])
if W is not None:
_validate_ndarray_1D('W', W, size=dirs.shape[0])
# Compute the harmonic coefficients
Y_N = get_sh(N, dirs, basisType)
# Compute condition number for progressively increasing order up to N
cond_N = | np.zeros(N + 1) | numpy.zeros |
import argparse
import numpy as np
import time
import torch
import json
import torch.nn as nn
import torch.nn.functional as FN
import cv2
import random
from tqdm import tqdm
from solver import Solver
from removalmodels.models import Generator, Discriminator
from removalmodels.models import GeneratorDiff, GeneratorDiffWithInp, GeneratorDiffAndMask, GeneratorDiffAndMask_V2, VGGLoss
from os.path import basename, exists, join, splitext
from os import makedirs
from torch.autograd import Variable
from utils.data_loader_stargan import get_dataset
from torch.backends import cudnn
from utils.utils import show
from skimage.measure import compare_ssim, compare_psnr
class ParamObject(object):
def __init__(self, adict):
"""Convert a dictionary to a class
@param :adict Dictionary
"""
self.__dict__.update(adict)
for k, v in list(adict.items()):
if isinstance(v, dict):
self.__dict__[k] = ParamObject(v)
def __getitem__(self,key):
return self.__dict__[key]
def values(self):
return list(self.__dict__.values())
def itemsAsDict(self):
return dict(list(self.__dict__.items()))
def VOCap(rec,prec):
nc = rec.shape[1]
mrec=np.concatenate([np.zeros((1,rec.shape[1])),rec,np.ones((1,rec.shape[1]))],axis=0)
mprec=np.concatenate([np.zeros((1,rec.shape[1])),prec,np.zeros((1,rec.shape[1]))],axis=0)
for i in reversed(np.arange(mprec.shape[0]-1)):
mprec[i,:]=np.maximum(mprec[i,:],mprec[i+1,:])
#-------------------------------------------------------
# Now do the step wise integration
# Original matlab code is
#-------------------------------------------------------
# i=find(mrec(2:end)~=mrec(1:end-1))+1;
# ap=sum((mrec(i)-mrec(i-1)).*mpre(i));
# Here we use boolean indexing of numpy instead of find
steps = (mrec[1:,:] != mrec[:-1,:])
ap = np.zeros(nc)
for i in range(nc):
ap[i]=sum((mrec[1:,:][steps[:,i], i] - mrec[:-1,:][steps[:,i], i])*mprec[1:,][steps[:,i],i])
return ap
def computeAP(allSc, allLb):
si = (-allSc).argsort(axis=0)
cid = np.arange(20)
tp = allLb[si[:,cid],cid] > 0.
fp = allLb[si[:,cid],cid] == 0.
tp = tp.cumsum(axis=0).astype(np.float32)
fp = fp.cumsum(axis=0).astype(np.float32)
rec = tp/(allLb>0.).sum(axis=0).astype(np.float32)
prec = tp/ (tp+ fp)
ap = VOCap(rec,prec)
return ap
def get_sk_image(img):
img = img[:,[0,0,0], ::] if img.shape[1] == 1 else img
img = np.clip(img.data.cpu().numpy().transpose(0, 2, 3, 1),-1,1)
img = 255*((img[0,::] + 1) / 2)
return img
def gen_samples(params):
# For fast training
#cudnn.benchmark = True
gpu_id = 0
use_cuda = params['cuda']
b_sz = params['batch_size']
solvers = []
configs = []
for i, mfile in enumerate(params['model']):
model = torch.load(mfile)
configs.append(model['arch'])
configs[-1]['pretrained_model'] = mfile
configs[-1]['load_encoder'] = 1
configs[-1]['load_discriminator'] = 0
configs[-1]['image_size'] = params['image_size']
if i==0:
configs[i]['onlypretrained_discr'] = params['evaluating_discr']
else:
configs[i]['onlypretrained_discr'] = None
if params['withExtMask'] and params['mask_size']!= 32:
configs[-1]['lowres_mask'] = 0
configs[-1]['load_encoder'] = 0
solvers.append(Solver(None, None, ParamObject(configs[-1]), mode='test' if i > 0 else 'eval', pretrainedcv=model))
solvers[-1].G.eval()
if configs[-1]['train_boxreconst'] >0:
solvers[-1].E.eval()
solvers[0].D.eval()
solvers[0].D_cls.eval()
dataset = get_dataset('', '', params['image_size'], params['image_size'], params['dataset'], params['split'],
select_attrs=configs[0]['selected_attrs'], datafile=params['datafile'], bboxLoader=1,
bbox_size = params['box_size'], randomrotate = params['randomrotate'],
randomscale=params['randomscale'], max_object_size=params['max_object_size'],
use_gt_mask = 0, n_boxes = params['n_boxes'])#configs[0]['use_gtmask_inp'])#, imagenet_norm=(configs[0]['use_imagenet_pretrained'] is not None))
#gt_mask_data = get_dataset('','', params['mask_size'], params['mask_size'], params['dataset'], params['split'],
# select_attrs=configs[0]['selected_attrs'], bboxLoader=0, loadMasks = True)
#data_iter = DataLoader(targ_split, batch_size=b_sz, shuffle=True, num_workers=8)
targ_split = dataset #train if params['split'] == 'train' else valid if params['split'] == 'val' else test
data_iter = np.random.permutation(len(targ_split) if params['nImages'] == -1 else params['nImages'])
if params['withExtMask'] or params['computeSegAccuracy']:
gt_mask_data = get_dataset('','', params['mask_size'], params['mask_size'],
params['dataset'] if params['extMask_source']=='gt' else params['extMask_source'],
params['split'], select_attrs=configs[0]['selected_attrs'], bboxLoader=0, loadMasks = True)
commonIds = set(gt_mask_data.valid_ids).intersection(set(dataset.valid_ids))
commonIndexes = [i for i in range(len(dataset.valid_ids)) if dataset.valid_ids[i] in commonIds]
data_iter = commonIndexes if params['nImages'] == -1 else commonIndexes[:params['nImages']]
print('-----------------------------------------')
print(('%s'%(' | '.join(targ_split.selected_attrs))))
print('-----------------------------------------')
flatten = lambda l: [item for sublist in l for item in sublist]
selected_attrs = configs[0]['selected_attrs']
if params['showreconst'] and len(params['names'])>0:
params['names'] = flatten([[nm,nm+'-R'] for nm in params['names']])
#discriminator.load_state_dict(cv['discriminator_state_dict'])
c_idx = 0
np.set_printoptions(precision=2)
padimg = np.zeros((params['image_size'],5,3),dtype=np.uint8)
padimg[:,:,:] = 128
vggLoss = VGGLoss(network='squeeze')
cimg_cnt = 0
mask_bin_size = 0.1
n_bins = int(1.0/mask_bin_size)
vLTotal = np.zeros((n_bins,))
pSNRTotal = np.zeros((n_bins,))
ssimTotal = | np.zeros((n_bins,)) | numpy.zeros |
import numpy as np
from perf import perf_timed
from glove import glove
def exact_nearest_neighbors(row, matrix, n=100):
""" nth nearest neighbors as array
with indices of nearest neighbors"""
token_vect = matrix[row]
if exact_nearest_neighbors.normed is None:
exact_nearest_neighbors.normed = np.linalg.norm(matrix, axis=1)
dotted = np.dot(matrix, token_vect)
nn = np.divide(dotted, exact_nearest_neighbors.normed)
top_n = | np.argpartition(-nn, n) | numpy.argpartition |
import numpy as np
import torch
from dataclasses import dataclass
from typing import List
from jiant.tasks.core import (
BaseExample,
BaseTokenizedExample,
BaseDataRow,
BatchMixin,
Task,
TaskTypes,
)
from jiant.tasks.lib.templates.shared import (
labels_to_bimap,
add_cls_token,
create_input_set_from_tokens_and_segments,
)
from jiant.tasks.utils import truncate_sequences
from jiant.utils.python.io import read_json_lines
@dataclass
class Example(BaseExample):
guid: str
input_obs1: str
input_hyp1: str
input_hyp2: str
input_obs2: str
label: str
def tokenize(self, tokenizer):
return TokenizedExample(
guid=self.guid,
input_obs1=tokenizer.tokenize(self.input_obs1),
input_hyp1=tokenizer.tokenize(self.input_hyp1),
input_hyp2=tokenizer.tokenize(self.input_hyp2),
input_obs2=tokenizer.tokenize(self.input_obs2),
label_id=AbductiveNliTask.LABEL_TO_ID[self.label],
)
@dataclass
class TokenizedExample(BaseTokenizedExample):
guid: str
input_obs1: List
input_hyp1: List
input_hyp2: List
input_obs2: List
label_id: int
def featurize(self, tokenizer, feat_spec):
if feat_spec.sep_token_extra:
maybe_extra_sep = [tokenizer.sep_token]
maybe_extra_sep_segment_id = [feat_spec.sequence_a_segment_id]
special_tokens_count = 6 # CLS, SEP-SEP, SEP-SEP, SEP
else:
maybe_extra_sep = []
maybe_extra_sep_segment_id = []
special_tokens_count = 4 # CLS, SEP, SEP, SEP
input_obs1_a, input_hyp1_a, input_obs2_a = truncate_sequences(
tokens_ls=[self.input_obs1, self.input_hyp1, self.input_obs2],
max_length=feat_spec.max_seq_length - special_tokens_count - 1,
# -1 for self.question
)
input_obs1_b, input_hyp2_b, input_obs2_b = truncate_sequences(
tokens_ls=[self.input_obs1, self.input_hyp2, self.input_obs2],
max_length=feat_spec.max_seq_length - special_tokens_count - 1,
# -1 for self.question
)
unpadded_inputs_1 = add_cls_token(
unpadded_tokens=(
input_obs1_a
+ [tokenizer.sep_token]
+ maybe_extra_sep
+ input_hyp1_a
+ [tokenizer.sep_token]
+ maybe_extra_sep
+ input_obs2_a
+ [tokenizer.sep_token]
),
unpadded_segment_ids=(
# question + sep(s)
[feat_spec.sequence_a_segment_id] * (len(input_obs1_a) + 1)
+ maybe_extra_sep_segment_id
# premise + sep(s)
+ [feat_spec.sequence_a_segment_id] * (len(input_hyp1_a) + 1)
+ maybe_extra_sep_segment_id
# choice + sep
+ [feat_spec.sequence_b_segment_id] * (len(input_obs2_a) + 1)
),
tokenizer=tokenizer,
feat_spec=feat_spec,
)
unpadded_inputs_2 = add_cls_token(
unpadded_tokens=(
input_obs1_b
+ [tokenizer.sep_token]
+ maybe_extra_sep
+ input_hyp2_b
+ [tokenizer.sep_token]
+ maybe_extra_sep
+ input_obs2_b
+ [tokenizer.sep_token]
),
unpadded_segment_ids=(
# question + sep(s)
[feat_spec.sequence_a_segment_id] * (len(input_obs1_b) + 1)
+ maybe_extra_sep_segment_id
# premise + sep(s)
+ [feat_spec.sequence_a_segment_id] * (len(input_hyp2_b) + 1)
+ maybe_extra_sep_segment_id
# choice + sep
+ [feat_spec.sequence_b_segment_id] * (len(input_obs2_b) + 1)
),
tokenizer=tokenizer,
feat_spec=feat_spec,
)
input_set1 = create_input_set_from_tokens_and_segments(
unpadded_tokens=unpadded_inputs_1.unpadded_tokens,
unpadded_segment_ids=unpadded_inputs_1.unpadded_segment_ids,
tokenizer=tokenizer,
feat_spec=feat_spec,
)
input_set2 = create_input_set_from_tokens_and_segments(
unpadded_tokens=unpadded_inputs_2.unpadded_tokens,
unpadded_segment_ids=unpadded_inputs_2.unpadded_segment_ids,
tokenizer=tokenizer,
feat_spec=feat_spec,
)
return DataRow(
guid=self.guid,
input_ids= | np.stack([input_set1.input_ids, input_set2.input_ids]) | numpy.stack |
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from prostate_cancer_nomograms.statistical_analysis.base.base_performance_evaluation import BasePerformanceEvaluation
from .decision_curve_analysis import DecisionCurveAnalysis
class DCA(BasePerformanceEvaluation):
def __init__(self, dataframe: pd.DataFrame, nomogram: str):
super().__init__(dataframe, nomogram)
@property
def binary_outcome_array(self):
binary_outcome = pd.DataFrame(data=np.zeros(len(self.y.index)), index=self.y.index)
positive_value_idx = self.y.index[self.y == self.positive_label].tolist()
binary_outcome.loc[positive_value_idx] = 1
binary_outcome_array = | np.array(binary_outcome) | numpy.array |
import numpy as np
import pkg_resources
from seekr import kmer_counts
class TestBasicCounter:
def _create_basic_counter_with_data(self, **kwargs):
infasta = 'tests/data/example.fa'
infasta = pkg_resources.resource_filename('seekr', infasta)
counter = kmer_counts.BasicCounter(infasta=infasta,
silent=True,
log2=kmer_counts.Log2.post,
**kwargs)
return counter
def test_counter_init(self):
counter = self._create_basic_counter_with_data()
assert len(counter.seqs) == 5
assert counter.seqs[0] == 'AAAAAA'
def test_occurrences_k1(self):
counter = self._create_basic_counter_with_data(k=1)
row = np.zeros(4)
expected = row.copy()
expected[0] = 1000
row = counter.occurrences(row, counter.seqs[0])
assert np.allclose(row, expected)
row = np.zeros(4)
expected=row.copy()
expected[1] = 500
expected[2] = 500
row = counter.occurrences(row, counter.seqs[1])
assert np.allclose(row, expected)
def test_occurrences_k2(self):
counter = self._create_basic_counter_with_data(k=2)
row = np.zeros(16)
expected=row.copy()
expected[5] = 454.545
expected[9] = 90.909
expected[10] = 454.545
row = counter.occurrences(row, counter.seqs[1])
assert np.allclose(row, expected)
def test_center_true(self):
counter = self._create_basic_counter_with_data(k=1)
counts = np.array([[1,2,3,4], [1, -2, 5, 10]], dtype=np.float32)
counter.counts = counts
expected = np.array([[0, 2, -1, -3], [0, -2, 1, 3]], dtype=np.float32)
counter.center()
assert | np.allclose(counter.counts, expected) | numpy.allclose |
"""
Train an MNIST image recognition model.
"""
import keras
import numpy as np
from keras.datasets import mnist
from keras.layers import (Conv2D, Dense, Dropout, Flatten, Input, MaxPooling1D,
MaxPooling2D, BatchNormalization)
from keras.models import Model, Sequential
from tensorflow.python.client import device_lib
print(device_lib.list_local_devices())
# training data
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = np.expand_dims(x_train / np.max(x_train), -1)
x_test = np.expand_dims(x_test / | np.max(x_test) | numpy.max |
import pickle
import gzip
import numpy as np
import tensorflow as tf
def load_batch_gcnn(sample_files):
"""
Loads and concatenates a bunch of samples into one mini-batch.
"""
c_features = []
e_indices = []
e_features = []
v_features = []
candss = []
cand_choices = []
cand_scoress = []
# load samples
for filename_tensor in sample_files:
with gzip.open(str(filename_tensor.numpy(), 'utf-8'), 'rb') as f:
sample = pickle.load(f)
sample_state, _, sample_action, sample_cands, cand_scores = sample['data']
sample_cands = np.array(sample_cands)
cand_choice = np.where(sample_cands == sample_action)[0][0] # action index relative to candidates
c, e, v = sample_state
c_features.append(c['values']) # c['values'].shape=(4196, 5)
e_indices.append(e['indices']) # e['indices'].shape=(2, 32941)
e_features.append(e['values']) # e['values'].shape=(32941, 1)
v_features.append(v['values']) # v['values'].shape=(10100, 19)
candss.append(sample_cands) # (23,)
cand_choices.append(cand_choice) # 17
cand_scoress.append(cand_scores) # list, len(...)=23
n_cs_per_sample = [c.shape[0] for c in c_features]
n_vs_per_sample = [v.shape[0] for v in v_features]
n_cands_per_sample = [cds.shape[0] for cds in candss]
# concatenate samples in one big graph
c_features = np.concatenate(c_features, axis=0) # n x d
v_features = np.concatenate(v_features, axis=0) # K x e
e_features = np.concatenate(e_features, axis=0) # m x c
# edge indices have to be adjusted accordingly
# cv_shift 有两行,对应 e_indices 的两行,第一行对应 constraints,第二行对应 variables
# 假设 n_cs_per_sample 为 [n1, n2, n3],那么执行完下面这句之后,cv_shift[0,:]为[0,n1,n1+n2],
# 然后剩下的就是要把 cv_shift[0,:] 加到 e_indices 第一行的每一块中,也把cv_shift[1,:]的内容加到e_indices第二行的每一块中,
# e_indices 横向每一块的长度分别是 K1, K2, K3...
# 注意,list的加法就是 append
cv_shift = np.cumsum([
[0] + n_cs_per_sample[:-1],
[0] + n_vs_per_sample[:-1]
], axis=1)
# 用 j:(j+1) 而不是 j,这样可以保持维度不变
# np.array的加法:shape为nxm的矩阵加上shape为nx1的矩阵,就是给第一个矩阵每一行都加上第二个矩阵的第二行的那个数
e_indices = np.concatenate([e_ind + cv_shift[:, j:(j+1)]
for j, e_ind in enumerate(e_indices)], axis=1)
# candidate indices as well
candss = np.concatenate([cands + shift
for cands, shift in zip(candss, cv_shift[1])]) # cands 指的就是变量的候选,所以加上shift的时候shift的值就是前面sample中变量的个数,即cv_shift[1]
cand_choices = | np.array(cand_choices) | numpy.array |
'''
Arm
dynamic
@version 0.1
@author <NAME> <<EMAIL>>
Research Group of Robots and Intelligent Machines
Date: August/2017
'''
import GRMI_MUR.Arm.arm_parts as Arm_parts
import GRMI_MUR.Arm.directkine as kine
import numpy as np
import math
DH=Arm_parts.GetDH()
num_dof=Arm_parts.GetDOF()
l=Arm_parts.Getlength()
l1=l[0]
l2=l[1]
l3=l[2]
l4=l[3]
m=Arm_parts.GetMass()
m1=m[0]
m2=m[1]
m3=m[2]
m4=m[3]
m5=m[4]
def torques(q,qp,qpp,fe):
q1=q[0];
q2=q[1];
q3=q[2];
q4=q[3];
q5=q[4];
qp1=qp[0];
qp2=qp[1];
qp3=qp[2];
qp4=qp[3];
qp5=qp[4];
qpp1=qpp[0];
qpp2=qpp[1];
qpp3=qpp[2];
qpp4=qpp[3];
qpp5=qpp[4];
fx=fe[0];
fy=fe[1];
fz=fe[2];
nx=fe[3];
ny=fe[4];
nz=fe[5];
## N-E 1
A01=np.matrix([[cos(q1), 0, -sin(q1), l1*cos(q1)], [sin(q1), 0, cos(q1), l1*sin(q1)], [0, -1, 0, 0], [0, 0, 0, 1]]);
A12=np.matrix([[cos(q2), -sin(q2), 0, l2*cos(q2)], [sin(q2), cos(q2), 0, l2*sin(q2)], [0, 0, 1, 0], [0, 0, 0, 1]]);
A23=np.matrix([[cos(q3), -sin(q3), 0, l2*cos(q3)], [sin(q3), cos(q3), 0, l2*sin(q3)], [0, 0, 1, 0], [0, 0, 0, 1]]);
A34=np.matrix([[cos(q4), 0, -sin(q4), 0], [sin(q4), 0, cos(q4), 0], [0, -1, 0, -l4], [0, 0, 0, 1]]);
A45=np.matrix([[cos(q5), -sin(q5), 0, 0], [sin(q5), cos(q5), 0, 0], [0, 0, 1, l3], [0, 0, 0, 1]]);
## N-E 2
wx0=0;
wy0=0;
wz0=0;
wpx0=0;
wpy0=0;
wpz0=0;
vx0=0;
vy0=0;
vz0=0;
vpx0=0;
vpy0=0;
g=9.81;
# Base Velocity
w00=np.matrix([[wx0],[wy0],[wz0]]);
wp00=np.matrix([[wpx0],[wpy0],[wpz0]]);
v00=np.matrix([[vx0],[vy0],[vz0]]);
vp00=np.matrix([[vpx0],[vpy0],[-g]]);
# Forces and Torques Vectors
f66=np.matrix([[fx],[fy],[fz]]);
n66=np.matrix([[nx],[ny],[nz]]);
# Set Positions and inertials
z0=np.matrix([[0],[0],[1]]);
p11=np.matrix([[l1],[0],[0]]);
p22=np.matrix([[l2],[0],[0]]);
p33=np.matrix([[l2],[0],[0]]);
p44=np.matrix([[0],[-l4*sin(-pi/2)],[-l4*cos(-pi/2)]]);
p55=np.matrix([[0],[l3*sin(0)],[l3*cos(0)]]);
# Centers distance
s11x=0.041007; s11y=-0.017564; s11z=0.015976;
s22x=0.036678; s22y=-0.018167; s22z=-0.016524;
s33x=0.036678; s33y=0.018167; s33z=0.016524;
s44x=0.027035; s44y=-0.019879; s44z=0.011106;
s55x=0.014492; s55y=-0.024296; s55z=0.05929;
s11=np.matrix([[s11x],[s11y],[s11z]]);
s22=np.matrix([[s22x],[s22y],[s22z]]);
s33=np.matrix([[s33x],[s33y],[s33z]]);
s44=np.matrix([[s44x],[s44y],[s44z]]);
s55=np.matrix([[s55x],[s55y],[s55z]]);
I=np.identity(3)
## N-E 3
A01=kine.directkine_A01(q1)
A12=kine.directkine_A12(q2)
A23=kine.directkine_A23(q3)
A34=kine.directkine_A34(q4)
A45=kine.directkine_A45(q5)
R01=A01[np.ix_([0,3],[0,3])]
R12=A12[np.ix_([0,3],[0,3])]
R23=A23[np.ix_([0,3],[0,3])]
R34=A34[np.ix_([0,3],[0,3])]
R45=A45[np.ix_([0,3],[0,3])]
R05= | np.round(R01*R12*R23*R34*R45,4) | numpy.round |
import numpy as np
def invert_rankings(rankings, dtype=None):
'''
Invert indices in a matrix of rankings, ranking per row.
'''
if dtype is None:
inverted = np.zeros(rankings.shape)
else:
inverted = | np.zeros(rankings.shape, dtype=dtype) | numpy.zeros |
import pandas as pd
import numpy as np
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
from matplotlib import cm, colors
from astropy.modeling import models, fitting
# Reading in all data files at once
import glob
path_normal ='/projects/p30137/ageller/testing/EBLSST/add_m5/output_files'
allFiles_normal = glob.glob(path_normal + "/*.csv")
path_fast = '/projects/p30137/ageller/testing/EBLSST/add_m5/fast/old/output_files'
allFiles_fast = glob.glob(path_fast + "/*.csv")
path_obsDist = '/projects/p30137/ageller/testing/EBLSST/add_m5/fast/old/obsDist/output_files'
allFiles_obsDist = glob.glob(path_obsDist + "/*.csv")
N_totalnormal_array = []
N_totalobservablenormal_array = []
N_totalrecoverablenormal_array = []
N_totalnormal_array_03 = []
N_totalobservablenormal_array_03 = []
N_totalrecoverablenormal_array_03 = []
N_totalnormal_array_1 = []
N_totalobservablenormal_array_1 = []
N_totalrecoverablenormal_array_1 = []
N_totalnormal_array_10 = []
N_totalobservablenormal_array_10 = []
N_totalrecoverablenormal_array_10 = []
N_totalnormal_array_30 = []
N_totalobservablenormal_array_30 = []
N_totalrecoverablenormal_array_30 = []
N_totalnormal_array_100 = []
N_totalobservablenormal_array_100 = []
N_totalrecoverablenormal_array_100 = []
N_totalnormal_array_1000 = []
N_totalobservablenormal_array_1000 = []
N_totalrecoverablenormal_array_1000 = []
N_totalnormal22_array = []
N_totalobservablenormal22_array = []
N_totalrecoverablenormal22_array = []
N_totalnormal22_array_03 = []
N_totalobservablenormal22_array_03 = []
N_totalrecoverablenormal22_array_03 = []
N_totalnormal22_array_1 = []
N_totalobservablenormal22_array_1 = []
N_totalrecoverablenormal22_array_1 = []
N_totalnormal22_array_10 = []
N_totalobservablenormal22_array_10 = []
N_totalrecoverablenormal22_array_10 = []
N_totalnormal22_array_30 = []
N_totalobservablenormal22_array_30 = []
N_totalrecoverablenormal22_array_30 = []
N_totalnormal22_array_100 = []
N_totalobservablenormal22_array_100 = []
N_totalrecoverablenormal22_array_100 = []
N_totalnormal22_array_1000 = []
N_totalobservablenormal22_array_1000 = []
N_totalrecoverablenormal22_array_1000 = []
N_totalnormal195_array = []
N_totalobservablenormal195_array = []
N_totalrecoverablenormal195_array = []
N_totalnormal195_array_03 = []
N_totalobservablenormal195_array_03 = []
N_totalrecoverablenormal195_array_03 = []
N_totalnormal195_array_1 = []
N_totalobservablenormal195_array_1 = []
N_totalrecoverablenormal195_array_1 = []
N_totalnormal195_array_10 = []
N_totalobservablenormal195_array_10 = []
N_totalrecoverablenormal195_array_10 = []
N_totalnormal195_array_30 = []
N_totalobservablenormal195_array_30 = []
N_totalrecoverablenormal195_array_30 = []
N_totalnormal195_array_100 = []
N_totalobservablenormal195_array_100 = []
N_totalrecoverablenormal195_array_100 = []
N_totalnormal195_array_1000 = []
N_totalobservablenormal195_array_1000 = []
N_totalrecoverablenormal195_array_1000 = []
N_totalfast_array = []
N_totalobservablefast_array = []
N_totalrecoverablefast_array = []
N_totalfast_array_03 = []
N_totalobservablefast_array_03 = []
N_totalrecoverablefast_array_03 = []
N_totalfast_array_1 = []
N_totalobservablefast_array_1 = []
N_totalrecoverablefast_array_1 = []
N_totalfast_array_10 = []
N_totalobservablefast_array_10 = []
N_totalrecoverablefast_array_10 = []
N_totalfast_array_30 = []
N_totalobservablefast_array_30 = []
N_totalrecoverablefast_array_30 = []
N_totalfast_array_100 = []
N_totalobservablefast_array_100 = []
N_totalrecoverablefast_array_100 = []
N_totalfast_array_1000 = []
N_totalobservablefast_array_1000 = []
N_totalrecoverablefast_array_1000 = []
N_totalfast22_array = []
N_totalobservablefast22_array = []
N_totalrecoverablefast22_array = []
N_totalfast22_array_03 = []
N_totalobservablefast22_array_03 = []
N_totalrecoverablefast22_array_03 = []
N_totalfast22_array_1 = []
N_totalobservablefast22_array_1 = []
N_totalrecoverablefast22_array_1 = []
N_totalfast22_array_10 = []
N_totalobservablefast22_array_10 = []
N_totalrecoverablefast22_array_10 = []
N_totalfast22_array_30 = []
N_totalobservablefast22_array_30 = []
N_totalrecoverablefast22_array_30 = []
N_totalfast22_array_100 = []
N_totalobservablefast22_array_100 = []
N_totalrecoverablefast22_array_100 = []
N_totalfast22_array_1000 = []
N_totalobservablefast22_array_1000 = []
N_totalrecoverablefast22_array_1000 = []
N_totalfast195_array = []
N_totalobservablefast195_array = []
N_totalrecoverablefast195_array = []
N_totalfast195_array_03 = []
N_totalobservablefast195_array_03 = []
N_totalrecoverablefast195_array_03 = []
N_totalfast195_array_1 = []
N_totalobservablefast195_array_1 = []
N_totalrecoverablefast195_array_1 = []
N_totalfast195_array_10 = []
N_totalobservablefast195_array_10 = []
N_totalrecoverablefast195_array_10 = []
N_totalfast195_array_30 = []
N_totalobservablefast195_array_30 = []
N_totalrecoverablefast195_array_30 = []
N_totalfast195_array_100 = []
N_totalobservablefast195_array_100 = []
N_totalrecoverablefast195_array_100 = []
N_totalfast195_array_1000 = []
N_totalobservablefast195_array_1000 = []
N_totalrecoverablefast195_array_1000 = []
N_totalobsDist_array = []
N_totalobservableobsDist_array = []
N_totalrecoverableobsDist_array = []
N_totalobsDist_array_03 = []
N_totalobservableobsDist_array_03 = []
N_totalrecoverableobsDist_array_03 = []
N_totalobsDist_array_1 = []
N_totalobservableobsDist_array_1 = []
N_totalrecoverableobsDist_array_1 = []
N_totalobsDist_array_10 = []
N_totalobservableobsDist_array_10 = []
N_totalrecoverableobsDist_array_10 = []
N_totalobsDist_array_30 = []
N_totalobservableobsDist_array_30 = []
N_totalrecoverableobsDist_array_30 = []
N_totalobsDist_array_100 = []
N_totalobservableobsDist_array_100 = []
N_totalrecoverableobsDist_array_100 = []
N_totalobsDist_array_1000 = []
N_totalobservableobsDist_array_1000 = []
N_totalrecoverableobsDist_array_1000 = []
N_totalobsDist22_array = []
N_totalobservableobsDist22_array = []
N_totalrecoverableobsDist22_array = []
N_totalobsDist22_array_03 = []
N_totalobservableobsDist22_array_03 = []
N_totalrecoverableobsDist22_array_03 = []
N_totalobsDist22_array_1 = []
N_totalobservableobsDist22_array_1 = []
N_totalrecoverableobsDist22_array_1 = []
N_totalobsDist22_array_10 = []
N_totalobservableobsDist22_array_10 = []
N_totalrecoverableobsDist22_array_10 = []
N_totalobsDist22_array_30 = []
N_totalobservableobsDist22_array_30 = []
N_totalrecoverableobsDist22_array_30 = []
N_totalobsDist22_array_100 = []
N_totalobservableobsDist22_array_100 = []
N_totalrecoverableobsDist22_array_100 = []
N_totalobsDist22_array_1000 = []
N_totalobservableobsDist22_array_1000 = []
N_totalrecoverableobsDist22_array_1000 = []
N_totalobsDist195_array = []
N_totalobservableobsDist195_array = []
N_totalrecoverableobsDist195_array = []
N_totalobsDist195_array_03 = []
N_totalobservableobsDist195_array_03 = []
N_totalrecoverableobsDist195_array_03 = []
N_totalobsDist195_array_1 = []
N_totalobservableobsDist195_array_1 = []
N_totalrecoverableobsDist195_array_1 = []
N_totalobsDist195_array_10 = []
N_totalobservableobsDist195_array_10 = []
N_totalrecoverableobsDist195_array_10 = []
N_totalobsDist195_array_30 = []
N_totalobservableobsDist195_array_30 = []
N_totalrecoverableobsDist195_array_30 = []
N_totalobsDist195_array_100 = []
N_totalobservableobsDist195_array_100 = []
N_totalrecoverableobsDist195_array_100 = []
N_totalobsDist195_array_1000 = []
N_totalobservableobsDist195_array_1000 = []
N_totalrecoverableobsDist195_array_1000 = []
def fitRagfb():
x = [0.05, 0.1, 1, 8, 15] #estimates of midpoints in bins, and using this: https:/sites.uni.edu/morgans/astro/course/Notes/section2/spectralmasses.html
y = [0.20, 0.35, 0.50, 0.70, 0.75]
init = models.PowerLaw1D(amplitude=0.5, x_0=1, alpha=-1.)
fitter = fitting.LevMarLSQFitter()
fit = fitter(init, x, y)
return fit
fbFit= fitRagfb()
mbins = np.arange(0,10, 0.1, dtype='float')
cutP = 0.10 #condition on recoverability/tolerance
for filenormal_ in sorted(allFiles_normal):
filename = filenormal_[60:]
fileid = filename.strip('output_file.csv')
print ("I'm starting " + fileid)
datnormal = pd.read_csv(filenormal_, sep = ',', header=2)
PeriodIn = datnormal['p'] # input period -- 'p' in data file
##########################################################
datnormal1 = pd.read_csv(filenormal_, sep = ',', header=0, nrows=1)
N_tri = datnormal1["NstarsTRILEGAL"][0]
#print("N_tri = ", N_tri)
Nall = len(PeriodIn)
m1hAll0, m1b = np.histogram(datnormal["m1"], bins=mbins)
dm1 = np.diff(m1b)
m1val = m1b[:-1] + dm1/2.
fb = np.sum(m1hAll0/Nall*fbFit(m1val))
N_mult = N_tri*fb
##########################################################
if len(PeriodIn) == 0.:
continue
if N_tri == 0:
continue
else:
PeriodOut = datnormal['LSM_PERIOD'] #LSM_PERIOD in data file
appMagMean = datnormal['appMagMean'] #apparent magnitude, will use to make cuts for 24 (default), 22, and then Kepler's range (?? -- brighter than LSST can manage-- to 19) OR 19.5 (SNR = 10)
observable = datnormal.loc[PeriodOut != -999].index
observable_03 = datnormal.loc[(PeriodIn <= 0.3) & (PeriodOut != -999)].index
observable_1 = datnormal.loc[(PeriodIn <= 1) & (PeriodOut != -999)].index
observable_10 = datnormal.loc[(PeriodIn <= 10) & (PeriodOut != -999)].index
observable_30 = datnormal.loc[(PeriodIn <= 30) & (PeriodOut != -999)].index
observable_100 = datnormal.loc[(PeriodIn <= 100) & (PeriodOut != -999)].index
observable_1000 = datnormal.loc[(PeriodIn <= 1000) & (PeriodOut != -999)].index
observable_22 = datnormal.loc[(PeriodOut != -999) & (appMagMean <= 22.)].index
observable_03_22 = datnormal.loc[(PeriodIn <= 0.3) & (PeriodOut != -999) & (appMagMean <= 22.)].index
observable_1_22 = datnormal.loc[(PeriodIn <= 1) & (PeriodOut != -999) & (appMagMean <= 22.)].index
observable_10_22 = datnormal.loc[(PeriodIn <= 10) & (PeriodOut != -999) & (appMagMean <= 22.)].index
observable_30_22 = datnormal.loc[(PeriodIn <= 30) & (PeriodOut != -999) & (appMagMean <= 22.)].index
observable_100_22 = datnormal.loc[(PeriodIn <= 100) & (PeriodOut != -999) & (appMagMean <= 22.)].index
observable_1000_22 = datnormal.loc[(PeriodIn <= 1000) & (PeriodOut != -999) & (appMagMean <= 22.)].index
observable_195 = datnormal.loc[(PeriodOut != -999) & (appMagMean <= 19.5)].index
observable_03_195 = datnormal.loc[(PeriodIn <= 0.3) & (PeriodOut != -999) & (appMagMean <= 19.5)].index
observable_1_195 = datnormal.loc[(PeriodIn <= 1) & (PeriodOut != -999) & (appMagMean <= 19.5)].index
observable_10_195 = datnormal.loc[(PeriodIn <= 10) & (PeriodOut != -999) & (appMagMean <= 19.5)].index
observable_30_195 = datnormal.loc[(PeriodIn <= 30) & (PeriodOut != -999) & (appMagMean <= 19.5)].index
observable_100_195 = datnormal.loc[(PeriodIn <= 100) & (PeriodOut != -999) & (appMagMean <= 19.5)].index
observable_1000_195 = datnormal.loc[(PeriodIn <= 1000) & (PeriodOut != -999) & (appMagMean <= 19.5)].index
fullP = abs(PeriodOut - PeriodIn)/PeriodIn
halfP = abs(PeriodOut - 0.5*PeriodIn)/(0.5*PeriodIn)
twiceP = abs(PeriodOut - 2*PeriodIn)/(2*PeriodIn)
recoverable = datnormal.loc[(PeriodOut != -999) & ((fullP < cutP) | (halfP < cutP) | (twiceP < cutP))].index
recoverable_03 = datnormal.loc[(PeriodIn <= 0.3) & (PeriodOut != -999) & ((fullP < cutP) | (halfP < cutP) | (twiceP < cutP))].index
recoverable_1 = datnormal.loc[(PeriodIn <= 1) & (PeriodOut != -999) & ((fullP < cutP) | (halfP < cutP) | (twiceP < cutP))].index
recoverable_10 = datnormal.loc[(PeriodIn <= 10) & (PeriodOut != -999) & ((fullP < cutP) | (halfP < cutP) | (twiceP < cutP))].index
recoverable_30 = datnormal.loc[(PeriodIn <= 30) & (PeriodOut != -999) & ((fullP < cutP) | (halfP < cutP) | (twiceP < cutP))].index
recoverable_100 = datnormal.loc[(PeriodIn <= 100) & (PeriodOut != -999) & ((fullP < cutP) | (halfP < cutP) | (twiceP < cutP))].index
recoverable_1000 = datnormal.loc[(PeriodIn <= 1000) & (PeriodOut != -999) & ((fullP < cutP) | (halfP < cutP) | (twiceP < cutP))].index
recoverable_22 = datnormal.loc[(PeriodOut != -999) & ((fullP < cutP) | (halfP < cutP) | (twiceP < cutP)) & (appMagMean <= 22.)].index
recoverable_03_22 = datnormal.loc[(PeriodIn <= 0.3) & (PeriodOut != -999) & ((fullP < cutP) | (halfP < cutP) | (twiceP < cutP)) & (appMagMean <= 22.)].index
recoverable_1_22 = datnormal.loc[(PeriodIn <= 1) & (PeriodOut != -999) & ((fullP < cutP) | (halfP < cutP) | (twiceP < cutP)) & (appMagMean <= 22.)].index
recoverable_10_22 = datnormal.loc[(PeriodIn <= 10) & (PeriodOut != -999) & ((fullP < cutP) | (halfP < cutP) | (twiceP < cutP)) & (appMagMean <= 22.)].index
recoverable_30_22 = datnormal.loc[(PeriodIn <= 30) & (PeriodOut != -999) & ((fullP < cutP) | (halfP < cutP) | (twiceP < cutP)) & (appMagMean <= 22.)].index
recoverable_100_22 = datnormal.loc[(PeriodIn <= 100) & (PeriodOut != -999) & ((fullP < cutP) | (halfP < cutP) | (twiceP < cutP)) & (appMagMean <= 22.)].index
recoverable_1000_22 = datnormal.loc[(PeriodIn <= 1000) & (PeriodOut != -999) & ((fullP < cutP) | (halfP < cutP) | (twiceP < cutP)) & (appMagMean <= 22.)].index
recoverable_195 = datnormal.loc[(PeriodOut != -999) & ((fullP < cutP) | (halfP < cutP) | (twiceP < cutP)) & (appMagMean <= 19.5)].index
recoverable_03_195 = datnormal.loc[(PeriodIn <= 0.3) & (PeriodOut != -999) & ((fullP < cutP) | (halfP < cutP) | (twiceP < cutP)) & (appMagMean <= 19.5)].index
recoverable_1_195 = datnormal.loc[(PeriodIn <= 1) & (PeriodOut != -999) & ((fullP < cutP) | (halfP < cutP) | (twiceP < cutP)) & (appMagMean <= 19.5)].index
recoverable_10_195 = datnormal.loc[(PeriodIn <= 10) & (PeriodOut != -999) & ((fullP < cutP) | (halfP < cutP) | (twiceP < cutP)) & (appMagMean <= 19.5)].index
recoverable_30_195 = datnormal.loc[(PeriodIn <= 30) & (PeriodOut != -999) & ((fullP < cutP) | (halfP < cutP) | (twiceP < cutP)) & (appMagMean <= 19.5)].index
recoverable_100_195 = datnormal.loc[(PeriodIn <= 100) & (PeriodOut != -999) & ((fullP < cutP) | (halfP < cutP) | (twiceP < cutP)) & (appMagMean <= 19.5)].index
recoverable_1000_195 = datnormal.loc[(PeriodIn <= 1000) & (PeriodOut != -999) & ((fullP < cutP) | (halfP < cutP) | (twiceP < cutP)) & (appMagMean <= 19.5)].index
P03 = datnormal.loc[PeriodIn <= 0.3].index
P1 = datnormal.loc[PeriodIn <= 1].index
P10 = datnormal.loc[PeriodIn <= 10].index
P30 = datnormal.loc[PeriodIn <= 30].index
P100 = datnormal.loc[PeriodIn <= 100].index
P1000 = datnormal.loc[PeriodIn <= 1000].index
P_22 = datnormal.loc[appMagMean <= 22.].index
P03_22 = datnormal.loc[(PeriodIn <= 0.3) & (appMagMean <= 22.)].index
P1_22 = datnormal.loc[(PeriodIn <= 1) & (appMagMean <= 22.)].index
P10_22 = datnormal.loc[(PeriodIn <= 10) & (appMagMean <= 22.)].index
P30_22 = datnormal.loc[(PeriodIn <= 30) & (appMagMean <= 22.)].index
P100_22 = datnormal.loc[(PeriodIn <= 100) & (appMagMean <= 22.)].index
P1000_22 = datnormal.loc[(PeriodIn <= 1000) & (appMagMean <= 22.)].index
P_195 = datnormal.loc[appMagMean <= 19.5].index
P03_195 = datnormal.loc[(PeriodIn <= 0.3) & (appMagMean <= 19.5)].index
P1_195 = datnormal.loc[(PeriodIn <= 1) & (appMagMean <= 19.5)].index
P10_195 = datnormal.loc[(PeriodIn <= 10) & (appMagMean <= 19.5)].index
P30_195 = datnormal.loc[(PeriodIn <= 30) & (appMagMean <= 19.5)].index
P100_195 = datnormal.loc[(PeriodIn <= 100) & (appMagMean <= 19.5)].index
P1000_195 = datnormal.loc[(PeriodIn <= 1000) & (appMagMean <= 19.5)].index
N_all = (len(PeriodIn)/len(PeriodIn))*N_mult
N_all03 = (len(P03)/len(PeriodIn))*N_mult
N_all1 = (len(P1)/len(PeriodIn))*N_mult
N_all10 = (len(P10)/len(PeriodIn))*N_mult
N_all30 = (len(P30)/len(PeriodIn))*N_mult
N_all100 = (len(P100)/len(PeriodIn))*N_mult
N_all1000 = (len(P1000)/len(PeriodIn))*N_mult
N_all_22 = (len(P_22)/len(PeriodIn))*N_mult
N_all03_22 = (len(P03_22)/len(PeriodIn))*N_mult
N_all1_22 = (len(P1_22)/len(PeriodIn))*N_mult
N_all10_22 = (len(P10_22)/len(PeriodIn))*N_mult
N_all30_22 = (len(P30_22)/len(PeriodIn))*N_mult
N_all100_22 = (len(P100_22)/len(PeriodIn))*N_mult
N_all1000_22 = (len(P1000_22)/len(PeriodIn))*N_mult
N_all_195 = (len(P_195)/len(PeriodIn))*N_mult
N_all03_195 = (len(P03_195)/len(PeriodIn))*N_mult
N_all1_195 = (len(P1_195)/len(PeriodIn))*N_mult
N_all10_195 = (len(P10_195)/len(PeriodIn))*N_mult
N_all30_195 = (len(P30_195)/len(PeriodIn))*N_mult
N_all100_195 = (len(P100_195)/len(PeriodIn))*N_mult
N_all1000_195 = (len(P1000_195)/len(PeriodIn))*N_mult
N_obs = (len(observable)/len(PeriodIn))*N_mult
N_obs03 = (len(observable_03)/len(PeriodIn))*N_mult
N_obs1 = (len(observable_1)/len(PeriodIn))*N_mult
N_obs10 = (len(observable_10)/len(PeriodIn))*N_mult
N_obs30 = (len(observable_30)/len(PeriodIn))*N_mult
N_obs100 = (len(observable_100)/len(PeriodIn))*N_mult
N_obs1000 = (len(observable_1000)/len(PeriodIn))*N_mult
N_obs_22 = (len(observable_22)/len(PeriodIn))*N_mult
N_obs03_22 = (len(observable_03_22)/len(PeriodIn))*N_mult
N_obs1_22 = (len(observable_1_22)/len(PeriodIn))*N_mult
N_obs10_22 = (len(observable_10_22)/len(PeriodIn))*N_mult
N_obs30_22 = (len(observable_30_22)/len(PeriodIn))*N_mult
N_obs100_22 = (len(observable_100_22)/len(PeriodIn))*N_mult
N_obs1000_22 = (len(observable_1000_22)/len(PeriodIn))*N_mult
N_obs_195 = (len(observable_195)/len(PeriodIn))*N_mult
N_obs03_195 = (len(observable_03_195)/len(PeriodIn))*N_mult
N_obs1_195 = (len(observable_1_195)/len(PeriodIn))*N_mult
N_obs10_195 = (len(observable_10_195)/len(PeriodIn))*N_mult
N_obs30_195 = (len(observable_30_195)/len(PeriodIn))*N_mult
N_obs100_195 = (len(observable_100_195)/len(PeriodIn))*N_mult
N_obs1000_195 = (len(observable_1000_195)/len(PeriodIn))*N_mult
N_rec = (len(recoverable)/len(PeriodIn))*N_mult
N_rec03 = (len(recoverable_03)/len(PeriodIn))*N_mult
N_rec1 = (len(recoverable_1)/len(PeriodIn))*N_mult
N_rec10 = (len(recoverable_10)/len(PeriodIn))*N_mult
N_rec30 = (len(recoverable_30)/len(PeriodIn))*N_mult
N_rec100 = (len(recoverable_100)/len(PeriodIn))*N_mult
N_rec1000 = (len(recoverable_1000)/len(PeriodIn))*N_mult
N_rec_22 = (len(recoverable_22)/len(PeriodIn))*N_mult
N_rec03_22 = (len(recoverable_03_22)/len(PeriodIn))*N_mult
N_rec1_22 = (len(recoverable_1_22)/len(PeriodIn))*N_mult
N_rec10_22 = (len(recoverable_10_22)/len(PeriodIn))*N_mult
N_rec30_22 = (len(recoverable_30_22)/len(PeriodIn))*N_mult
N_rec100_22 = (len(recoverable_100_22)/len(PeriodIn))*N_mult
N_rec1000_22 = (len(recoverable_1000_22)/len(PeriodIn))*N_mult
N_rec_195 = (len(recoverable_195)/len(PeriodIn))*N_mult
N_rec03_195 = (len(recoverable_03_195)/len(PeriodIn))*N_mult
N_rec1_195 = (len(recoverable_1_195)/len(PeriodIn))*N_mult
N_rec10_195 = (len(recoverable_10_195)/len(PeriodIn))*N_mult
N_rec30_195 = (len(recoverable_30_195)/len(PeriodIn))*N_mult
N_rec100_195 = (len(recoverable_100_195)/len(PeriodIn))*N_mult
N_rec1000_195 = (len(recoverable_1000_195)/len(PeriodIn))*N_mult
N_totalnormal_array.append(float(N_all))
N_totalobservablenormal_array.append(float(N_obs))
N_totalrecoverablenormal_array.append(float(N_rec))
N_totalnormal_array_03.append(float(N_all03))
N_totalobservablenormal_array_03.append(float(N_obs03))
N_totalrecoverablenormal_array_03.append(float(N_rec03))
N_totalnormal_array_1.append(float(N_all1))
N_totalobservablenormal_array_1.append(float(N_obs1))
N_totalrecoverablenormal_array_1.append(float(N_rec1))
N_totalnormal_array_10.append(float(N_all10))
N_totalobservablenormal_array_10.append(float(N_obs10))
N_totalrecoverablenormal_array_10.append(float(N_rec10))
N_totalnormal_array_30.append(float(N_all30))
N_totalobservablenormal_array_30.append(float(N_obs30))
N_totalrecoverablenormal_array_30.append(float(N_rec30))
N_totalnormal_array_100.append(float(N_all100))
N_totalobservablenormal_array_100.append(float(N_obs100))
N_totalrecoverablenormal_array_100.append(float(N_rec100))
N_totalnormal_array_1000.append(float(N_all1000))
N_totalobservablenormal_array_1000.append(float(N_obs1000))
N_totalrecoverablenormal_array_1000.append(float(N_rec1000))
N_totalnormal22_array.append(float(N_all_22))
N_totalobservablenormal22_array.append(float(N_obs_22))
N_totalrecoverablenormal22_array.append(float(N_rec_22))
N_totalnormal22_array_03.append(float(N_all03_22))
N_totalobservablenormal22_array_03.append(float(N_obs03_22))
N_totalrecoverablenormal22_array_03.append(float(N_rec03_22))
N_totalnormal22_array_1.append(float(N_all1_22))
N_totalobservablenormal22_array_1.append(float(N_obs1_22))
N_totalrecoverablenormal22_array_1.append(float(N_rec1_22))
N_totalnormal22_array_10.append(float(N_all10_22))
N_totalobservablenormal22_array_10.append(float(N_obs10_22))
N_totalrecoverablenormal22_array_10.append(float(N_rec10_22))
N_totalnormal22_array_30.append(float(N_all30_22))
N_totalobservablenormal22_array_30.append(float(N_obs30_22))
N_totalrecoverablenormal22_array_30.append(float(N_rec30_22))
N_totalnormal22_array_100.append(float(N_all100_22))
N_totalobservablenormal22_array_100.append(float(N_obs100_22))
N_totalrecoverablenormal22_array_100.append(float(N_rec100_22))
N_totalnormal22_array_1000.append(float(N_all1000_22))
N_totalobservablenormal22_array_1000.append(float(N_obs1000_22))
N_totalrecoverablenormal22_array_1000.append(float(N_rec1000_22))
N_totalnormal195_array.append(float(N_all_195))
N_totalobservablenormal195_array.append(float(N_obs_195))
N_totalrecoverablenormal195_array.append(float(N_rec_195))
N_totalnormal195_array_03.append(float(N_all03_195))
N_totalobservablenormal195_array_03.append(float(N_obs03_195))
N_totalrecoverablenormal195_array_03.append(float(N_rec03_195))
N_totalnormal195_array_1.append(float(N_all1_195))
N_totalobservablenormal195_array_1.append(float(N_obs1_195))
N_totalrecoverablenormal195_array_1.append(float(N_rec1_195))
N_totalnormal195_array_10.append(float(N_all10_195))
N_totalobservablenormal195_array_10.append(float(N_obs10_195))
N_totalrecoverablenormal195_array_10.append(float(N_rec10_195))
N_totalnormal195_array_30.append(float(N_all30_195))
N_totalobservablenormal195_array_30.append(float(N_obs30_195))
N_totalrecoverablenormal195_array_30.append(float(N_rec30_195))
N_totalnormal195_array_100.append(float(N_all100_195))
N_totalobservablenormal195_array_100.append(float(N_obs100_195))
N_totalrecoverablenormal195_array_100.append(float(N_rec100_195))
N_totalnormal195_array_1000.append(float(N_all1000_195))
N_totalobservablenormal195_array_1000.append(float(N_obs1000_195))
N_totalrecoverablenormal195_array_1000.append(float(N_rec1000_195))
N_totalnormal = np.sum(N_totalnormal_array)
N_totalnormal_03 = np.sum(N_totalnormal_array_03)
N_totalnormal_1 = np.sum(N_totalnormal_array_1)
N_totalnormal_10 = np.sum(N_totalnormal_array_10)
N_totalnormal_30 = np.sum(N_totalnormal_array_30)
N_totalnormal_100 = np.sum(N_totalnormal_array_100)
N_totalnormal_1000 = np.sum(N_totalnormal_array_1000)
N_totalobservablenormal = np.sum(N_totalobservablenormal_array)
N_totalobservablenormal_03 = np.sum(N_totalobservablenormal_array_03)
N_totalobservablenormal_1 = np.sum(N_totalobservablenormal_array_1)
N_totalobservablenormal_10 = np.sum(N_totalobservablenormal_array_10)
N_totalobservablenormal_30 = np.sum(N_totalobservablenormal_array_30)
N_totalobservablenormal_100 = np.sum(N_totalobservablenormal_array_100)
N_totalobservablenormal_1000 = np.sum(N_totalobservablenormal_array_1000)
N_totalrecoverablenormal = np.sum(N_totalrecoverablenormal_array)
N_totalrecoverablenormal_03 = np.sum(N_totalrecoverablenormal_array_03)
N_totalrecoverablenormal_1 = np.sum(N_totalrecoverablenormal_array_1)
N_totalrecoverablenormal_10 = np.sum(N_totalrecoverablenormal_array_10)
N_totalrecoverablenormal_30 = np.sum(N_totalrecoverablenormal_array_30)
N_totalrecoverablenormal_100 = np.sum(N_totalrecoverablenormal_array_100)
N_totalrecoverablenormal_1000 = np.sum(N_totalrecoverablenormal_array_1000)
N_totalnormal22 = np.sum(N_totalnormal22_array)
N_totalnormal22_03 = np.sum(N_totalnormal22_array_03)
N_totalnormal22_1 = np.sum(N_totalnormal22_array_1)
N_totalnormal22_10 = np.sum(N_totalnormal22_array_10)
N_totalnormal22_30 = np.sum(N_totalnormal22_array_30)
N_totalnormal22_100 = np.sum(N_totalnormal22_array_100)
N_totalnormal22_1000 = np.sum(N_totalnormal22_array_1000)
N_totalobservablenormal22 = np.sum(N_totalobservablenormal22_array)
N_totalobservablenormal22_03 = np.sum(N_totalobservablenormal22_array_03)
N_totalobservablenormal22_1 = np.sum(N_totalobservablenormal22_array_1)
N_totalobservablenormal22_10 = np.sum(N_totalobservablenormal22_array_10)
N_totalobservablenormal22_30 = np.sum(N_totalobservablenormal22_array_30)
N_totalobservablenormal22_100 = np.sum(N_totalobservablenormal22_array_100)
N_totalobservablenormal22_1000 = np.sum(N_totalobservablenormal22_array_1000)
N_totalrecoverablenormal22 = np.sum(N_totalrecoverablenormal22_array)
N_totalrecoverablenormal22_03 = np.sum(N_totalrecoverablenormal22_array_03)
N_totalrecoverablenormal22_1 = np.sum(N_totalrecoverablenormal22_array_1)
N_totalrecoverablenormal22_10 = np.sum(N_totalrecoverablenormal22_array_10)
N_totalrecoverablenormal22_30 = np.sum(N_totalrecoverablenormal22_array_30)
N_totalrecoverablenormal22_100 = np.sum(N_totalrecoverablenormal22_array_100)
N_totalrecoverablenormal22_1000 = np.sum(N_totalrecoverablenormal22_array_1000)
N_totalnormal195 = | np.sum(N_totalnormal195_array) | numpy.sum |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
# CODE NAME HERE
# CODE DESCRIPTION HERE
Created on 2019-01-19 at 12:03
@author: cook
"""
import numpy as np
from astropy import version as av
from astropy.table import Table
import os
from collections import OrderedDict
import copy
import warnings
from typing import Type
from apero.core.core import drs_log
from apero.core import constants
from apero.core import math as mp
from apero.core.instruments.default import output_filenames as outf
from apero import lang
from apero.io import drs_text
from apero.io import drs_fits
from apero.io import drs_path
from apero.io import drs_strings
# =============================================================================
# Define variables
# =============================================================================
__NAME__ = 'drs_file.py'
__INSTRUMENT__ = 'None'
# Get constants
Constants = constants.load(__INSTRUMENT__)
# Get version and author
__version__ = Constants['DRS_VERSION']
__author__ = Constants['AUTHORS']
__date__ = Constants['DRS_DATE']
__release__ = Constants['DRS_RELEASE']
# Get function string
display_func = drs_log.display_func
# Get Logging function
WLOG = drs_log.wlog
# Get the text types
TextEntry = lang.drs_text.TextEntry
TextDict = lang.drs_text.TextDict
HelpText = lang.drs_text.HelpDict
# TODO: This should be changed for astropy -> 2.0.1
# bug that hdu.scale has bug before version 2.0.1
if av.major < 2 or (av.major == 2 and av.minor < 1):
SCALEARGS = dict(bscale=(1.0 + 1.0e-8), bzero=1.0e-8)
else:
SCALEARGS = dict(bscale=1, bzero=0)
# -----------------------------------------------------------------------------
# =============================================================================
# Define File classes
# =============================================================================
class DrsInputFile:
def __init__(self, name, **kwargs):
"""
Create a DRS Input File object
:param name: string, the name of the DRS input file
:param ext: string, the extension for the DRS input file (without
the '.' i.e. A.txt ext='txt'
- Parent class for Drs Fits File object (DrsFitsFile)
"""
# set function name
_ = display_func(None, '__init__', __NAME__, 'DrsInputFile')
# define a name
self.name = name
# define the extension
self.filetype = kwargs.get('filetype', '')
self.suffix = kwargs.get('suffix', '')
self.remove_insuffix = kwargs.get('remove_insuffix', False)
self.prefix = kwargs.get('prefix', '')
self.filename = None
self.intype = None
# get fiber type (if set)
self.fibers = kwargs.get('fibers', None)
self.fiber = kwargs.get('fiber', None)
# allow instance to be associated with a recipe
self.recipe = kwargs.get('recipe', None)
# set empty file attributes
self.filename = kwargs.get('filename', None)
self.path = kwargs.get('path', None)
self.basename = kwargs.get('basename', None)
self.inputdir = kwargs.get('inputdir', None)
self.directory = kwargs.get('directory', None)
self.data = kwargs.get('data', None)
self.header = kwargs.get('header', None)
self.fileset = kwargs.get('fileset', [])
self.filesetnames = kwargs.get('filesetnames', [])
self.indextable = kwargs.get('index', None)
self.outfunc = kwargs.get('outfunc', None)
# allow instance to be associated with a filename
self.set_filename(kwargs.get('filename', None))
def set_filename(self, filename):
"""
Set the filename, basename and directory name from an absolute path
:param filename: string, absolute path to the file
:return None:
"""
# set function name
_ = display_func(None, 'set_filename', __NAME__, 'DrsInputFile')
# skip if filename is None
if filename is None:
return True
# set filename, basename and directory name
self.filename = str(os.path.abspath(filename))
self.basename = os.path.basename(filename)
self.path = os.path.dirname(filename)
def check_filename(self):
# set function name
_ = display_func(None, 'check_filename', __NAME__, 'DrsInputFile')
# check that filename isn't None
if self.filename is None:
func = self.__repr__()
eargs = [func, func + '.set_filename()']
self.__error__(TextEntry('00-001-00002', args=eargs))
def file_exists(self):
# set function name
_ = display_func(None, 'set_filename', __NAME__, 'DrsInputFile')
# assume file does not exist
found = False
# if filename is set check filename exists
if self.filename is not None:
# if filename is string check filename exists
if isinstance(self.filename, str):
# update found
found = os.path.exists(self.filename)
# return whether file was found
return found
def set_recipe(self, recipe):
"""
Set the associated recipe for the file (i.e. gives access to
drs_parameters etc
:param recipe: DrsRecipe instance, the recipe object to associate to
this file
:return:
"""
# set function name
_ = display_func(None, 'set_recipe', __NAME__, 'DrsInputFile')
# set the recipe
self.recipe = recipe
def newcopy(self, **kwargs):
# set function name
_ = display_func(None, 'newcopy', __NAME__, 'DrsInputFile')
# copy this instances values (if not overwritten)
name = kwargs.get('name', self.name)
kwargs['filetype'] = kwargs.get('filetype', self.filetype)
kwargs['suffix'] = kwargs.get('suffix', self.suffix)
kwargs['remove_insuffix'] = kwargs.get('remove_insuffix',
self.remove_insuffix)
kwargs['prefix'] = kwargs.get('prefix', self.prefix)
kwargs['filename'] = kwargs.get('filename', self.filename)
kwargs['intype'] = kwargs.get('infile', self.intype)
kwargs['fiber'] = kwargs.get('fiber', self.fiber)
kwargs['fibers'] = kwargs.get('fibers', self.fibers)
kwargs['recipe'] = kwargs.get('recipe', self.recipe)
kwargs['filename'] = kwargs.get('filename', self.filename)
kwargs['path'] = kwargs.get('path', self.path)
kwargs['basename'] = kwargs.get('basename', self.basename)
kwargs['inputdir'] = kwargs.get('inputdir', self.inputdir)
kwargs['directory'] = kwargs.get('directory', self.directory)
kwargs['data'] = kwargs.get('data', self.data)
kwargs['header'] = kwargs.get('header', self.header)
kwargs['fileset'] = kwargs.get('fileset', self.fileset)
kwargs['filesetnames'] = kwargs.get('filesetnames', self.filesetnames)
kwargs['indextable'] = kwargs.get('indextable', self.indextable)
kwargs['outfunc'] = kwargs.get('outfunc', self.outfunc)
# return new instance
return DrsInputFile(name, **kwargs)
def check_recipe(self):
# set function name
_ = display_func(None, 'check_recipe', __NAME__, 'DrsInputFile')
# ---------------------------------------------------------------------
# check that recipe isn't None
if self.recipe is None:
func = self.__repr__()
eargs = [func, self.filename, func + '.set_filename()']
self.__error__(TextEntry('00-001-00003', args=eargs))
def __str__(self):
"""
Defines the str(DrsInputFile) return for DrsInputFile
:return str: the string representation of DrsInputFile
i.e. DrsInputFile[name]
"""
# set function name
_ = display_func(None, '__str__', __NAME__, 'DrsInputFile')
# return the string representation of DrsInputFile
return 'DrsInputFile[{0}]'.format(self.name)
def __repr__(self):
"""
Defines the print(DrsInputFile) return for DrsInputFile
:return str: the string representation of DrsInputFile
i.e. DrsInputFile[name]
"""
# set function name
_ = display_func(None, '__repr__', __NAME__, 'DrsInputFile')
# return the string representation of DrsInputFile
return 'DrsInputFile[{0}]'.format(self.name)
def __error__(self, messages):
# set function name
_ = display_func(None, '__error__', __NAME__, 'DrsInputFile')
# run the log method: error mode
self.__log__(messages, 'error')
def __warning__(self, messages):
# set function name
_ = display_func(None, '__warning__', __NAME__, 'DrsInputFile')
# run the log method: warning mode
self.__log__(messages, 'warning')
def __message__(self, messages):
# set function name
_ = display_func(None, '__message__', __NAME__, 'DrsInputFile')
# get log_opt
if self.recipe is not None:
params = self.recipe.drs_params
else:
params = None
# print and log via wlogger
WLOG(params, '', messages)
def __log__(self, messages, kind):
# set function name
_ = display_func(None, '__log__', __NAME__, 'DrsInputFile')
# format initial error message
m0args = [kind.capitalize(), self.__repr__()]
message0 = TextEntry('{0}: {1}'.format(*m0args))
# append initial error message to messages
messages = message0 + messages
# get log_opt
if self.recipe is not None:
params = self.recipe.drs_params
else:
params = None
# print and log via wlogger
WLOG(params, kind, messages)
def addset(self, drsfile):
"""
For generic Input files only
Add to a list of associated drs files
:param drsfile:
:return:
"""
# set function name
_ = display_func(None, 'addset', __NAME__, 'DrsInputFile')
# append drs file to file set
self.fileset.append(drsfile)
# apeend drs file name to file set name list
self.filesetnames.append(drsfile.name)
def copyother(self, drsfile, **kwargs):
# set function name
_ = display_func(None, 'copyother', __NAME__, 'DrsInputFile')
# check recipe has been set
if 'recipe' not in kwargs:
self.check_recipe()
else:
self.recipe = kwargs['recipe']
# get parameters
params = self.recipe.drs_params
# set empty file attributes
nkwargs = dict()
nkwargs['name'] = kwargs.get('name', self.name)
nkwargs['recipe'] = kwargs.get('recipe', self.recipe)
nkwargs['filename'] = kwargs.get('filename', drsfile.filename)
nkwargs['intype'] = kwargs.get('infile', drsfile.intype)
nkwargs['path'] = kwargs.get('path', drsfile.path)
nkwargs['basename'] = kwargs.get('basename', drsfile.basename)
nkwargs['inputdir'] = kwargs.get('inputdir', drsfile.inputdir)
nkwargs['directory'] = kwargs.get('directory', drsfile.directory)
nkwargs['data'] = kwargs.get('data', drsfile.data)
nkwargs['header'] = kwargs.get('header', drsfile.header)
nkwargs['fileset'] = kwargs.get('fileset', self.fileset)
nkwargs['filesetnames'] = kwargs.get('filesetnames', self.filesetnames)
# return new instance of DrsFitsFile
return DrsInputFile(**nkwargs)
def completecopy(self, drsfile):
# set function name
_ = display_func(None, 'completecopy', __NAME__, 'DrsInputFile')
# set empty file attributes
nkwargs = dict()
nkwargs['name'] = copy.deepcopy(drsfile.name)
nkwargs['filetype'] = copy.deepcopy(drsfile.filetype)
nkwargs['suffix'] = copy.deepcopy(drsfile.suffix)
nkwargs['remove_insuffix'] = bool(drsfile.remove_insuffix)
nkwargs['prefix'] = copy.deepcopy(drsfile.prefix)
nkwargs['fiber'] = copy.deepcopy(drsfile.fiber)
nkwargs['fibers'] = copy.deepcopy(drsfile.fibers)
nkwargs['recipe'] = copy.deepcopy(drsfile.recipe)
nkwargs['filename'] = copy.deepcopy(drsfile.filename)
nkwargs['intype'] = drsfile.intype
nkwargs['path'] = copy.deepcopy(drsfile.path)
nkwargs['basename'] = copy.deepcopy(drsfile.basename)
nkwargs['inputdir'] = copy.deepcopy(drsfile.inputdir)
nkwargs['directory'] = copy.deepcopy(drsfile.directory)
nkwargs['data'] = copy.deepcopy(drsfile.data)
nkwargs['header'] = copy.deepcopy(drsfile.header)
# ------------------------------------------------------------------
if drsfile.fileset is None:
nkwargs['fileset'] = None
elif isinstance(drsfile.fileset, list):
# set up new file set storage
newfileset = []
# loop around file sets
for fileseti in drsfile.fileset:
newfileset.append(fileseti.completecopy(fileseti))
# append to nkwargs
nkwargs['fileset'] = newfileset
else:
nkwargs['fileset'] = drsfile.fileset
nkwargs['filesetnames'] = drsfile.filesetnames
# ------------------------------------------------------------------
nkwargs['indextable'] = copy.deepcopy(drsfile.indextable)
nkwargs['outfunc'] = drsfile.outfunc
# return new instance
return DrsInputFile(**nkwargs)
# -------------------------------------------------------------------------
# file checking
# -------------------------------------------------------------------------
def check_another_file(self, input_file):
"""
Checks that another file is consistent with this file type
:param input_file: DrsInputFile
:returns: True or False and the reason why (if False)
"""
# set function name
_ = display_func(None, 'check_another_file', __NAME__, 'DrsInputFile')
# 1. check extension
cond1, msg1 = self.has_correct_extension(input_file.ext)
if not cond1:
return False, msg1
# 2. check file header keys exist
cond2, msg2 = self.hkeys_exist(None)
if not cond2:
return False, msg2
# 3. check file header keys are correct
cond3, msg3 = self.has_correct_hkeys(None)
if not cond2:
return False, msg3
# if 1, 2 and 3 pass return True
return True, None
def check_file(self):
"""
Checks that this file is correct
:returns: True or False and the reason why (if False)
"""
# set function name
_ = display_func(None, 'check_file', __NAME__, 'DrsInputFile')
# 1. check extension
cond1, msg1 = self.has_correct_extension()
if not cond1:
return False, msg1
# 2. check file header keys exist
cond2, msg2 = self.hkeys_exist()
if not cond2:
return False, msg2
# 3. check file header keys are correct
cond3, msg3 = self.has_correct_hkeys()
if not cond3:
return False, msg3
# if 1, 2 and 3 pass return True
return True, None
def has_correct_extension(self, filename=None, filetype=None, argname=None):
# set function name
_ = display_func(None, 'has_correct_extension', __NAME__,
'DrsInputFile')
# always return True and None (abstract placeholder)
return True, None
def hkeys_exist(self, header=None, filename=None, argname=None):
# set function name
_ = display_func(None, 'hkeys_exist', __NAME__,
'DrsInputFile')
# always return True and None (abstract placeholder)
return True, None
def has_correct_hkeys(self, header=None, argname=None, log=True):
# set function name
_ = display_func(None, 'has_correct_hkeys', __NAME__, 'DrsInputFile')
# always return True and None (abstract placeholder)
return True, None
# -------------------------------------------------------------------------
# file checking (old)
# -------------------------------------------------------------------------
def check_file_exists(self, quiet=False):
# set function name
_ = display_func(None, 'check_file_exists', __NAME__, 'DrsInputFile')
# check that filename exists
cond = os.path.exists(self.filename)
# if it does add to log that file is found
if cond:
eargs = [self.basename, self.path]
emsg = TextEntry('09-000-00001', args=eargs)
# if it does not add to log that file is not found
else:
eargs = [self.basename, self.path]
emsg = TextEntry('09-000-00002', args=eargs)
# deal with printout and return
# if we failed and aren't in quiet mode --> display error
if (not cond) and (not quiet):
self.__error__(emsg)
# if we didn't fail and aren't in quiet mode --> display message
elif not quiet:
self.__message__(emsg)
# return the condition and (error) message
return cond, emsg
def check_file_extension(self, quiet=False):
# set function name
_ = display_func(None, 'check_file_exists', __NAME__, 'DrsInputFile')
# if filetype is None add message and set pass condition
# to True
if self.filetype is None:
msg = TextEntry('09-000-00003', args=[self.basename])
cond = True
# if filetype is set and is correct add message and set pass condition
# to True
elif self.filename.endswith(self.filetype):
msg = TextEntry('09-000-00004', args=[self.basename, self.filetype])
cond = True
# if it has failed set error message and set pass condition to False
else:
msg = TextEntry('09-000-00005', args=[self.basename, self.filetype])
cond = False
# deal with printout and return
if (not cond) and (not quiet):
self.__error__(msg)
elif not quiet:
self.__message__(msg)
# return condition and message
return cond, msg
def check_file_header(self, quiet=False):
# set function name
_ = display_func(None, 'check_file_header', __NAME__, 'DrsInputFile')
# there is no header for non-fits input files --> return True and
# a blank message
return True, TextEntry('')
# -------------------------------------------------------------------------
# read/write methods
# -------------------------------------------------------------------------
def read_file(self, ext=None, check=False, params=None):
# set function name
_ = display_func(None, 'read_file', __NAME__, 'DrsInputFile')
# do nothing else (no current read option for generic input files)
def write_file(self, params=None):
# set function name
_ = display_func(None, 'write_file', __NAME__, 'DrsInputFile')
# do nothing else (no current write option for generic input files)
# -------------------------------------------------------------------------
# user functions
# -------------------------------------------------------------------------
def construct_filename(self, params, infile=None, check=True, **kwargs):
"""
Constructs the filename from the parameters defined at instance
definition and using the infile (if required). If check is True, checks
the infile type against "intype". Uses "outfunc" in instance definition
to set the suffices/prefixes/fiber etc
:param params: Param Dict
:param infile: Drsfile, the input DrsFile
:param check: bool, whether to check infile.name against self.intype
:param kwargs: keyword arguments passed to self.outfunc
:return: Sets self.filename and self.basename to the correct values
"""
# set function name
func_name = display_func(params, 'construct_filename', __NAME__,
'DrsInputFile')
# set outfile from self
kwargs['outfile'] = self
kwargs['func'] = func_name
kwargs['infile'] = infile
# if we have a function use it
if self.outfunc is not None:
abspath = self.outfunc(params, **kwargs)
self.filename = abspath
self.basename = os.path.basename(abspath)
# else raise an error
else:
eargs = [self.__repr__(), func_name]
WLOG(params, 'error', TextEntry('00-008-00004', args=eargs))
# check that we are allowed to use infile (if set)
if infile is not None and check:
if self.intype is not None:
# get required names
reqfiles = self.generate_reqfiles()
reqstr = ' or '.join(reqfiles)
# see if infile is in reqfiles
if infile.name not in reqfiles:
eargs = [infile.name, reqstr, self.filename, func_name]
WLOG(params, 'error', TextEntry('00-008-00017', args=eargs))
def generate_reqfiles(self):
"""
Takes "intype" and works out all the combinations of file names that
are valid for this "intype" (i.e. if we have a fileset in one of the
"intypes" we should add all files from this set)
:return: list of DrsInputFile names (drsfile.name) to know which names
are valid
:rtype list:
"""
# set function name
_ = display_func(None, 'generate_reqfiles', __NAME__, 'DrsInputFile')
# deal with intype being unset
if self.intype is None:
return []
# set out list storage
required_names = []
# deal with having a list of files
if isinstance(self.intype, list):
# loop around intypes
for intype in self.intype:
# deal with intype having fileset (set of files associated
# with this file)
if len(intype.filesetnames) != 0:
required_names += list(intype.filesetnames)
required_names.append(intype.name)
else:
required_names.append(intype.name)
else:
intype = self.intype
# deal with intype having fileset (set of files associated
# with this file)
if len(intype.filesetnames) != 0:
required_names += list(intype.filesetnames)
required_names.append(intype.name)
else:
required_names.append(intype.name)
# clean up required name list by only keeping unique names
required_names = list(np.unique(required_names))
# return required names
return required_names
def reconstruct_filename(self, params, outext, prefix=None, suffix=None,
inext=None, fiber=None):
currentpath = os.path.dirname(self.filename)
currentfile = self.basename
# ----------------------------------------------------------------------
# deal with non set value
if prefix is None:
prefix = self.prefix
if suffix is None:
suffix = self.suffix
if inext is None:
inext = self.filetype
if fiber is None:
fiber = self.fiber
# ----------------------------------------------------------------------
# create infilename
# ----------------------------------------------------------------------
# remove inext
if self.inext is not None and currentfile.endswith(self.inext):
currentfile = currentfile[:-len(self.inext)]
# remove fiber
if self.fiber is not None and currentfile.endswith('_' + self.fiber):
currentfile = currentfile[:-len('_' + self.fiber)]
# remove prefix
if self.prefix is not None and currentfile.startswith(self.prefix):
currentfile = currentfile[len(self.prefix):]
# remove suffix
if self.suffix is not None and currentfile.endswith(self.suffix):
currentfile = currentfile[:-len(self.suffix)]
# add back the inext
if inext is not None and not currentfile.endswith(inext):
currentfile = currentfile + inext
# ----------------------------------------------------------------------
# get re-constructed out file name
outfilename = outf.get_outfilename(params, currentfile, prefix, suffix,
inext, outext, fiber)
# ----------------------------------------------------------------------
# update self
self.prefix = prefix
self.suffix = suffix
self.inext = inext
self.fiber = fiber
self.filename = os.path.join(currentpath, outfilename)
self.basename = outfilename
class DrsFitsFile(DrsInputFile):
def __init__(self, name, **kwargs):
"""
Create a DRS Fits File Input object
:param name: string, the name of the DRS input file
:param kwargs: currently allowed kwargs are:
- ext: string or None, the extension for the DRS input file
(without the '.' i.e. A.txt ext='txt'). This will be
checked if used in a DrsArgument is not None.
- fiber: string or None, the fiber of the Fits File.
- KW_{str}: string, any keywordstore variable name currently
defined in spirouKeywords.py. If used in DrsArgument
the HEADER of this fits file must have the value
of this KW_{str} to be a valid argument.
- KW_OUTPUT: this will set the output type for this file (i.e.
file.outtag
"""
# set function name
_ = display_func(None, '__init__', __NAME__, 'DrsFitsFile')
# define a name
self.name = name
# get super init
DrsInputFile.__init__(self, name, **kwargs)
# if ext in kwargs then we have a file extension to check
self.filetype = kwargs.get('filetype', '.fits')
# set the input extension type
self.inext = kwargs.get('inext', '.fits')
# get the input type (another DrsFitsFile that was or will be used
# to create this one i.e. for pp intype is a raw drs fits file,
# for out intype is most likely a pp drs fits file)
self.intype = kwargs.get('intype', None)
# get fiber types allowed for this drs fits file
self.fibers = kwargs.get('fibers', None)
# get the specific fiber linked to this drs fits file
self.fiber = kwargs.get('fiber', None)
# get the function used for writing output file
self.outfunc = kwargs.get('outfunc', None)
# get the out tag # TODO: Is this used still?
self.outtag = kwargs.get('KW_OUTPUT', 'UNKNOWN')
# get the database name (only set if intended to go into a database)
self.dbname = kwargs.get('dbname', None)
# get the raw database key name (only set if intended to go into a
# database) - this is the one set in constants
self.raw_dbkey = str(kwargs.get('dbkey', None))
# get the current database key -- this can change i.e. adding of a
# fiber to the end -- for the default set key see raw_dbkey
self.dbkey = str(kwargs.get('dbkey', None))
# add required header keys storage
self.required_header_keys = kwargs.get('rkeys', dict())
# if we don't have any required keys pushed in we get these using
# the get_header_keys method (kwargs is passed to allow setting
# individual keys when drs fits instance is constructed)
if len(self.required_header_keys) == 0:
self.get_header_keys(kwargs)
# get the fits data array (or set it to None)
self.data = kwargs.get('data', None)
# get the fits header (or set it to None)
self.header = kwargs.get('header', None)
# update fiber parameter from header
if self.header is not None:
self.fiber = self.get_key('KW_FIBER', dtype=str, required=False)
# get the index table related to this file # TODO: Is this used still?
self.indextable = kwargs.get('index', None)
# get the number of files associated with this drs fits file
self.numfiles = kwargs.get('numfiles', 0)
# get the shape of the fits data array (self.data)
self.shape = kwargs.get('shape', None)
# get the hdict (header dictionary storage) that will be passed to
# self.header - if not passed this is set as an empty header
# kept separate from self.header until all keys are added
# (to allow clean up + checking to occur only once)
self.hdict = kwargs.get('hdict', drs_fits.Header())
# get the storage dictionary for output parameters
self.output_dict = kwargs.get('output_dict', OrderedDict())
# get the data type for this drs fits file (either image or table)
self.datatype = kwargs.get('datatype', 'image')
# get the dtype internally for fits image files (i.e. float or int)
self.dtype = kwargs.get('dtype', None)
# get the data array (for multi-extension fits)
self.data_array = None
# get the header array (fro multi-extnesion fits)
self.header_array = None
# flag whether file is a combined file
self.is_combined = kwargs.get('is_combined', False)
self.combined_list = kwargs.get('combined_list', [])
# TODO: IS this used?? What is it used for?
self.s1d = kwargs.get('s1d', [])
# update database key based on whether we have the fiber
if self.fiber is not None:
self.get_dbkey()
def get_header_keys(self, kwargs):
# set function name
_ = display_func(None, 'get_header_keys', __NAME__, 'DrsFitsFile')
# add values to the header
for kwarg in kwargs:
if 'KW_' in kwarg.upper():
self.required_header_keys[kwarg] = kwargs[kwarg]
# TODO: Merge/combine with completecopy and copyother
def newcopy(self, **kwargs):
"""
Make a new copy of this class (using all default parameters
set when constructed) - does not copy data/header related information
:param kwargs:
- name: string, the name of the DRS input file
- ext: string or None, the extension for the DRS input file
(without the '.' i.e. A.txt ext='txt'). This will be
checked if used in a DrsArgument if not None.
- fiber: string or None, the fiber of the Fits File.
- KW_{str}: string, any keywordstore variable name currently
defined in spirouKeywords.py. If used in DrsArgument
the HEADER of this fits file must have the value
of this KW_{str} to be a valid argument.
- KW_OUTPUT: this will set the output type for this file (i.e.
file.outtag
:return:
"""
# set function name
_ = display_func(None, 'newcopy', __NAME__, 'DrsFitsFile')
# copy this instances values (if not overwritten)
name = kwargs.get('name', self.name)
kwargs['filetype'] = kwargs.get('filetype', self.filetype)
kwargs['suffix'] = kwargs.get('suffix', self.suffix)
kwargs['remove_insuffix'] = kwargs.get('remove_insuffix',
self.remove_insuffix)
kwargs['prefix'] = kwargs.get('prefix', self.prefix)
kwargs['recipe'] = kwargs.get('recipe', self.recipe)
kwargs['filename'] = kwargs.get('filename', self.filename)
kwargs['path'] = kwargs.get('path', self.path)
kwargs['basename'] = kwargs.get('basename', self.basename)
kwargs['inputdir'] = kwargs.get('inputdir', self.inputdir)
kwargs['intype'] = kwargs.get('intype', self.intype)
kwargs['directory'] = kwargs.get('directory', self.directory)
kwargs['data'] = kwargs.get('data', self.data)
kwargs['header'] = kwargs.get('header', self.header)
kwargs['fileset'] = kwargs.get('fileset', self.fileset)
kwargs['filesetnames'] = kwargs.get('filesetnames', self.filesetnames)
kwargs['check_ext'] = kwargs.get('check_ext', self.filetype)
kwargs['fiber'] = kwargs.get('fiber', self.fiber)
kwargs['fibers'] = kwargs.get('fibers', self.fibers)
kwargs['outtag'] = kwargs.get('KW_OUTPUT', self.outtag)
kwargs['outfunc'] = kwargs.get('outfunc', self.outfunc)
kwargs['dbname'] = kwargs.get('dbname', self.dbname)
kwargs['dbkey'] = kwargs.get('dbkey', self.dbkey)
kwargs['datatype'] = kwargs.get('datatype', self.datatype)
kwargs['dtype'] = kwargs.get('dtype', self.dtype)
kwargs['s1d'] = kwargs.get('s1d', self.s1d)
kwargs['shape'] = kwargs.get('shape', self.shape)
kwargs['numfiles'] = kwargs.get('numfiles', self.numfiles)
for key in self.required_header_keys:
kwargs[key] = self.required_header_keys[key]
newfile = DrsFitsFile(name, **kwargs)
newfile.get_header_keys(kwargs)
# return new instance
return newfile
def string_output(self):
"""
String output for DrsFitsFile. If fiber is not None this also
contains the fiber type
i.e. DrsFitsFile[{name}_{fiber}] or DrsFitsFile[{name}]
:return string: str, the string to print
"""
# set function name
_ = display_func(None, 'string_output', __NAME__, 'DrsFitsFile')
# if we don't have the fiber print the drs fits file string
if self.fiber is None:
return 'DrsFitsFile[{0}]'.format(self.name)
# if we have the fiber add it and print the drs fits file string
else:
return 'DrsFitsFile[{0}_{1}]'.format(self.name, self.fiber)
def set_required_key(self, key, value):
# set function name
_ = display_func(None, 'set_required_key', __NAME__, 'DrsFitsFile')
# if we have a keyword (prefix 'KW_')
if 'KW_' in key:
# set required header keys
self.required_header_keys[key] = value
def __str__(self):
"""
Defines the str(DrsFitsFile) return for DrsFitsFile
:return str: the string representation of DrsFitsFile
i.e. DrsFitsFile[name] or DrsFitsFile[name_fiber]
"""
# set function name
_ = display_func(None, '__str__', __NAME__, 'DrsFitsFile')
# return the string output
return self.string_output()
def __repr__(self):
"""
Defines the print(DrsFitsFile) return for DrsFitsFile
:return str: the string representation of DrsFitsFile
i.e. DrsFitsFile[name] or DrsFitsFile[name_fiber]
"""
# set function name
_ = display_func(None, '__repr__', __NAME__, 'DrsFitsFile')
# return the string output
return self.string_output()
# TODO: Merge/combine with newcopy and copyother
def completecopy(self, drsfile):
"""
Copies all attributes from one drsfile to another
:param drsfile:
:return:
"""
# set function name
_ = display_func(None, 'completecopy', __NAME__, 'DrsFitsFile')
# set empty file attributes
nkwargs = dict()
nkwargs['name'] = copy.deepcopy(drsfile.name)
nkwargs['filetype'] = copy.deepcopy(drsfile.filetype)
nkwargs['suffix'] = copy.deepcopy(drsfile.suffix)
nkwargs['remove_insuffix'] = bool(drsfile.remove_insuffix)
nkwargs['prefix'] = copy.deepcopy(drsfile.prefix)
nkwargs['recipe'] = drsfile.recipe
nkwargs['fiber'] = copy.deepcopy(drsfile.fiber)
nkwargs['fibers'] = copy.deepcopy(drsfile.fibers)
nkwargs['rkeys'] = copy.deepcopy(drsfile.required_header_keys)
nkwargs['filename'] = copy.deepcopy(drsfile.filename)
nkwargs['path'] = copy.deepcopy(drsfile.path)
nkwargs['basename'] = copy.deepcopy(drsfile.basename)
nkwargs['inputdir'] = copy.deepcopy(drsfile.inputdir)
nkwargs['intype'] = drsfile.intype
nkwargs['directory'] = copy.deepcopy(drsfile.directory)
nkwargs['data'] = copy.deepcopy(drsfile.data)
nkwargs['header'] = copy.deepcopy(drsfile.header)
nkwargs['shape'] = copy.deepcopy(drsfile.shape)
nkwargs['hdict'] = copy.deepcopy(drsfile.hdict)
nkwargs['output_dict'] = copy.deepcopy(drsfile.output_dict)
nkwargs['is_combined'] = bool(drsfile.is_combined)
nkwargs['combined_list'] = copy.deepcopy(drsfile.combined_list)
# ------------------------------------------------------------------
if drsfile.fileset is None:
nkwargs['fileset'] = None
elif isinstance(drsfile.fileset, list):
# set up new file set storage
newfileset = []
# loop around file sets
for fileseti in drsfile.fileset:
newfileset.append(fileseti.completecopy(fileseti))
# append to nkwargs
nkwargs['fileset'] = newfileset
else:
nkwargs['fileset'] = drsfile.fileset
nkwargs['filesetnames'] = list(drsfile.filesetnames)
# ------------------------------------------------------------------
nkwargs['outfunc'] = drsfile.outfunc
nkwargs['dbname'] = copy.deepcopy(drsfile.dbname)
nkwargs['dbkey'] = copy.deepcopy(drsfile.dbkey)
nkwargs['datatype'] = copy.deepcopy(drsfile.datatype)
nkwargs['dtype'] = copy.deepcopy(drsfile.dtype)
nkwargs['shape'] = copy.deepcopy(drsfile.shape)
nkwargs['numfiles'] = copy.deepcopy(drsfile.numfiles)
nkwargs['s1d'] = copy.deepcopy(drsfile.s1d)
for key in drsfile.required_header_keys:
nkwargs[key] = drsfile.required_header_keys[key]
newfile = DrsFitsFile(**nkwargs)
newfile.get_header_keys(nkwargs)
# return new instance of DrsFitsFile
return newfile
# TODO: Merge/combine with completecopy and copyother
def copyother(self, drsfile, **kwargs):
# check recipe has been set
if 'recipe' not in kwargs:
self.check_recipe()
else:
self.recipe = kwargs['recipe']
# must check recipe of drsfile
if drsfile.recipe is None:
drsfile.recipe = self.recipe
# set function name
_ = display_func(None, 'copyother', __NAME__, 'DrsFitsFile')
# set empty file attributes
nkwargs = dict()
nkwargs['name'] = kwargs.get('name', self.name)
nkwargs['filetype'] = kwargs.get('filetype', self.filetype)
nkwargs['suffix'] = kwargs.get('suffix', self.suffix)
nkwargs['remove_insuffix'] = kwargs.get('remove_insuffix',
self.remove_insuffix)
nkwargs['prefix'] = kwargs.get('prefix', self.prefix)
nkwargs['recipe'] = kwargs.get('recipe', self.recipe)
nkwargs['fiber'] = kwargs.get('fiber', self.fiber)
nkwargs['fibers'] = kwargs.get('fibers', self.fibers)
nkwargs['rkeys'] = kwargs.get('rkeys', self.required_header_keys)
nkwargs['filename'] = kwargs.get('filename', drsfile.filename)
nkwargs['path'] = kwargs.get('path', drsfile.path)
nkwargs['basename'] = kwargs.get('basename', drsfile.basename)
nkwargs['inputdir'] = kwargs.get('inputdir', drsfile.inputdir)
nkwargs['intype'] = kwargs.get('intype', drsfile.intype)
nkwargs['directory'] = kwargs.get('directory', drsfile.directory)
nkwargs['data'] = kwargs.get('data', drsfile.data)
nkwargs['header'] = kwargs.get('header', drsfile.header)
nkwargs['shape'] = kwargs.get('shape', drsfile.shape)
nkwargs['hdict'] = kwargs.get('hdict', drsfile.hdict)
nkwargs['output_dict'] = kwargs.get('output_dict', drsfile.output_dict)
nkwargs['fileset'] = kwargs.get('fileset', self.fileset)
nkwargs['filesetnames'] = kwargs.get('filesetnames', self.filesetnames)
nkwargs['outfunc'] = kwargs.get('outfunc', self.outfunc)
nkwargs['dbname'] = kwargs.get('dbname', self.dbname)
nkwargs['dbkey'] = kwargs.get('dbkey', self.dbkey)
nkwargs['datatype'] = kwargs.get('datatype', self.datatype)
nkwargs['dtype'] = kwargs.get('dtype', drsfile.dtype)
nkwargs['shape'] = kwargs.get('shape', drsfile.shape)
nkwargs['numfiles'] = kwargs.get('numfiles', drsfile.numfiles)
nkwargs['s1d'] = kwargs.get('s1d', drsfile.s1d)
# return new instance of DrsFitsFile
return DrsFitsFile(**nkwargs)
# -------------------------------------------------------------------------
# file checking
# -------------------------------------------------------------------------
def check_file(self):
"""
Checks that this file is correct
:returns: True or False and the reason why (if False)
"""
# set function name
_ = display_func(None, 'check_file', __NAME__, 'DrsFitsFile')
# 1. check extension
cond1, msg1 = self.has_correct_extension()
if not cond1:
return False, msg1
# 2. check file header keys exist
cond2, msg2 = self.hkeys_exist()
if not cond2:
return False, msg2
# 3. check file header keys are correct
cond3, msg3 = self.has_correct_hkeys()
if not cond3:
return False, msg3
# 4. check if we have a fiber defined
self.has_fiber()
# if 1, 2 and 3 pass return True
return True, None
def has_correct_extension(self, filename=None, filetype=None, argname=None):
# set function name
_ = display_func(None, 'has_correct_extension', __NAME__, 'DrsFitsFile')
# deal with no input extension
if filetype is None:
filetype = self.filetype
# deal with no input filename
if filename is None:
filename = self.filename
basename = self.basename
else:
basename = os.path.basename(filename)
# -----------------------------------------------------------------
# deal with no argument name
if argname is None:
argname = TextEntry('40-001-00018')
# -----------------------------------------------------------------
# check recipe has been set
self.check_recipe()
# get recipe and parameters
params = self.recipe.drs_params
# -----------------------------------------------------------------
# check extension
if filetype is None:
msg = TextEntry('09-000-00003', args=[basename])
cond = True
elif filename.endswith(filetype):
msg = TextEntry('09-000-00004', args=[basename, filetype])
cond = True
else:
msg = TextEntry('09-000-00005', args=[basename, filetype])
cond = False
# if valid return True and no error
if cond:
dargs = [argname, os.path.basename(filename)]
WLOG(params, 'debug', TextEntry('90-001-00009', args=dargs),
wrap=False)
return True, msg
# if False generate error and return it
else:
emsg = TextEntry('09-001-00006', args=[argname, filetype])
return False, emsg
def hkeys_exist(self, header=None, filename=None, argname=None):
# set function name
func_name = display_func(None, 'hkeys_exist', __NAME__, 'DrsFitsFile')
# deal with no input header
if header is None:
# check file has been read
self.check_read(header_only=True, load=True)
# get header
header = self.header
# deal with no input filename
if filename is None:
basename = self.basename
else:
basename = os.path.basename(filename)
# -----------------------------------------------------------------
# check recipe has been set
self.check_recipe()
# get recipe and parameters
params = self.recipe.drs_params
rkeys = self.required_header_keys
# -----------------------------------------------------------------
# deal with no argument name
if argname is None:
argname = TextEntry('40-001-00018')
# -----------------------------------------------------------------
# Check that required keys are in header
for drskey in rkeys:
# check whether header key is in param dict (i.e. from a
# keywordstore) or whether we have to use the key as is
if drskey in params:
key = params[drskey][0]
source = params.sources[drskey]
else:
key = drskey
source = func_name
# deal with empty key
if (key is None) or key == '':
eargs = [key, drskey, source]
WLOG(params, 'error', TextEntry('00-006-00011', args=eargs))
# check if key is in header
if key not in header:
eargs = [argname, key]
emsg = TextEntry('09-001-00007', args=eargs)
WLOG(params, 'debug', emsg)
return False, emsg
else:
dargs = [argname, key, basename]
WLOG(params, 'debug', TextEntry('90-001-00010', args=dargs),
wrap=False)
# if we have got to this point return True (success) and no error
# messages
return True, None
def has_correct_hkeys(self, header=None, argname=None, log=True,
filename=None):
# set function name
_ = display_func(None, 'has_correct_hkeys', __NAME__, 'DrsFitsFile')
# -----------------------------------------------------------------
# check recipe has been set
self.check_recipe()
# get recipe and parameters
params = self.recipe.drs_params
# -----------------------------------------------------------------
# set function name
_ = display_func(params, 'has_correct_hkeys', __NAME__, 'DrsFitsFile')
# deal with no input header
if header is None:
# check file has been read
self.check_read(header_only=True, load=True)
# get header
header = self.header
# get file
filename = self.filename
# get short hand to required header keys
rkeys = self.required_header_keys
# -----------------------------------------------------------------
# deal with no argument name
if argname is None:
argname = TextEntry('40-001-00018')
# -----------------------------------------------------------------
# search for correct value for each header key
found = True
# storage
errors = dict()
# -----------------------------------------------------------------
# loop around required keys
for drskey in rkeys:
# check whether header key is in param dict (i.e. from a
# keywordstore) or whether we have to use the key as is
if drskey in params:
key = params[drskey][0]
else:
key = drskey
# check that key is in header
if key not in header:
ekwargs = dict(level='error', key=key, filename=filename)
raise lang.drs_exceptions.DrsHeaderError('Key not found',
**ekwargs)
# get value and required value
value = header[key].strip()
rvalue = rkeys[drskey].strip()
# check if key is valid
if rvalue != value:
dargs = [argname, key, rvalue]
if log:
WLOG(params, 'debug', TextEntry('90-001-00011', args=dargs),
wrap=False)
found = False
else:
dargs = [argname, key, rvalue]
if log:
WLOG(params, 'debug', TextEntry('90-001-00012', args=dargs),
wrap=False)
# store info
errors[key] = (found, argname, rvalue, value)
# return found (bool) and errors
return found, errors
def has_fiber(self, header=None):
# set function name
_ = display_func(None, 'has_fiber', __NAME__, 'DrsFitsFile')
# -----------------------------------------------------------------
# check whether fiber already set (in which case ignore)
if self.fiber is not None:
return
# -----------------------------------------------------------------
# check recipe has been set
self.check_recipe()
# deal with no input header
if header is None:
# check file has been read
self.check_read(header_only=True, load=True)
# get header
header = self.header
# get recipe and parameters
params = self.recipe.drs_params
# -----------------------------------------------------------------
kw_fiber = params['KW_FIBER'][0]
# -----------------------------------------------------------------
# deal with fiber
if kw_fiber in self.header:
fiber = header[kw_fiber]
# TODO: remove elif when fiber is always in header if file
# TODO: has a fiber
# TODO: START OF REMOVE ------------------------------------------------
elif 'AB' in self.basename.split('_')[-1]:
fiber = 'AB'
elif 'A' in self.basename.split('_')[-1]:
fiber = 'A'
elif 'B' in self.basename.split('_')[-1]:
fiber = 'B'
elif 'C' in self.basename.split('_')[-1]:
fiber = 'C'
# TODO: END OF REMOVE --------------------------------------------------
else:
fiber = None
# update fiber value
if fiber is not None:
self.fiber = fiber
# -------------------------------------------------------------------------
# table checking
# -------------------------------------------------------------------------
def get_infile_outfilename(self, params, recipe, infilename,
allowedfibers=None, ext='.fits'):
# set function name
_ = display_func(None, 'get_infile_outfilename', __NAME__,
'DrsFitsFile')
# ------------------------------------------------------------------
# 1. need to assign an input type for our raw file
if self.intype is not None:
# deal with in type being list
if isinstance(self.intype, list):
intype = self.intype[0]
else:
intype = self.intype
# get new copy
infile = intype.newcopy(recipe=recipe)
else:
infile = DrsFitsFile('DRS_RAW_TEMP')
# ------------------------------------------------------------------
# storage of files
chain_files = []
# need to go back through the file history and update filename
cintype = self.completecopy(infile)
# loop until we have no intype (raw file)
while cintype is not None:
# add to chain
chain_files.append(self.completecopy(cintype))
if hasattr(cintype, 'intype'):
# deal with in type being list
if isinstance(cintype.intype, list):
cintype = cintype.intype[0]
else:
cintype = cintype.intype
else:
break
# ------------------------------------------------------------------
# set the file name to the infilename
filename = str(infilename)
bottomfile = chain_files[-1]
# now we have chain we can project file (assuming last element in the
# chain is the raw file)
for cintype in chain_files[::-1][1:]:
bottomfile.filename = filename
bottomfile.basename = os.path.basename(filename)
# check whether we need fiber
if bottomfile.fibers is not None:
fiber = allowedfibers
else:
fiber = None
# get out file name
out = cintype.check_table_filename(params, recipe, bottomfile,
fullpath=True,
allowedfibers=fiber)
valid, outfilename = out
# set the filename to the outfilename
filename = outfilename
bottomfile = cintype
# ------------------------------------------------------------------
# add values to infile
infile.filename = filename
infile.basename = os.path.basename(filename)
infile.filetype = ext
# ------------------------------------------------------------------
# get outfilename (final)
valid, outfilename = self.check_table_filename(params, recipe, infile,
allowedfibers)
# ------------------------------------------------------------------
# return infile
return infile, valid, outfilename
def check_table_filename(self, params, recipe, infile, allowedfibers=None,
fullpath=False):
"""
Checks whether raw "filename" belongs to this DrsFile
:param params:
:param recipe:
:param infile:
:param allowedfibers:
:param fullpath:
:return:
"""
# set function name
func_name = display_func(None, 'check_table_filename', __NAME__,
'DrsFitsFile')
# ------------------------------------------------------------------
# deal with fibers
if allowedfibers is not None:
if isinstance(allowedfibers, str):
fibers = [allowedfibers]
else:
fibers = list(allowedfibers)
elif self.fibers is None:
fibers = [None]
else:
fibers = self.fibers
# set initial value of out filename
outfilename = infile.filename
# loop around fibers
for fiber in fibers:
# 2. need to assign an output filename for out file
if self.outfunc is not None:
outfilename = self.outfunc(params, infile=infile, outfile=self,
fiber=fiber)
else:
eargs = [self.name, recipe.name, func_name]
WLOG(params, 'error', TextEntry('09-503-00009', args=eargs))
outfilename = None
# ------------------------------------------------------------------
# assume file is valid
valid = True
# ------------------------------------------------------------------
# check extension
if outfilename.endswith(self.filetype):
# remove extension to test suffix
filename = outfilename[:-len(self.filetype)]
else:
filename = None
valid = False
# debug log that extension was incorrect
dargs = [self.filetype, filename]
WLOG(params, 'debug', TextEntry('90-008-00004', args=dargs))
# ------------------------------------------------------------------
# check suffix (after extension removed)
if (self.suffix is not None) and valid:
# if we have no fibers file should end with suffix
if fibers == [None]:
if not filename.endswith(self.suffix):
valid = False
# debug log that extension was incorrect
dargs = [self.suffix, filename]
WLOG(params, 'debug', TextEntry('90-008-00005', args=dargs))
# ------------------------------------------------------------------
# if we have fibers then file should end with one of them and
# the suffix
elif len(fibers) > 0:
# have to set up a new valid that should be True if any
# fiber is present
valid1 = False
# loop around fibers
for fiber in fibers:
if filename.endswith('{0}_{1}'.format(self.suffix, fiber)):
valid1 |= True
# if valid1 is False debug log that fibers were not found
if not valid1:
dargs = [', '.join(fibers), filename]
WLOG(params, 'debug', TextEntry('90-008-00006', args=dargs))
# put valid1 back into valid
valid &= valid1
# ------------------------------------------------------------------
# return valid (True if filename is valid False otherwise)
if fullpath:
return valid, outfilename
else:
return valid, os.path.basename(outfilename)
def check_table_keys(self, params, filedict, rkeys=None):
"""
Checks whether a dictionary contains the required key/value pairs
to belong to this DrsFile
:param params:
:param filedict:
:param rkeys:
:return:
"""
# set function name
_ = display_func(None, 'check_table_keys', __NAME__, 'DrsFitsFile')
# ------------------------------------------------------------------
# get required keys
if rkeys is None:
rkeys = self.required_header_keys
# assume file is valid
valid = True
# loop around required keys
for key in rkeys:
# key needs to be in table
if key in filedict:
# get rvalue
rvalues = rkeys[key]
# check if rvalue is list
if isinstance(rvalues, str):
rvalues = [rvalues]
# set up aux valid
valid1 = False
# loop around
for rvalue in rvalues:
# get this value
filedictvalue = filedict[key]
# deal with null values
if filedictvalue in [None, 'None', '']:
valid1 |= True
continue
# make sure there are no white spaces and all upper case
if isinstance(filedictvalue, str):
filedictvalue = filedictvalue.strip().upper()
# else make sure there are no end white spaces and all
# upper case for the required value
rvalueclean = rvalue.strip().upper()
# if key is in file dictionary then we should check it
if filedictvalue == rvalueclean:
valid1 |= True
# modify valid value
valid &= valid1
dargs = [key, valid, filedict['OUT'], rvalues]
WLOG(params, 'debug', TextEntry('90-008-00003', args=dargs))
# if we haven't found a key the we can stop here
if not valid:
return False
else:
# Log that key was not found
dargs = [key, filedict['OUT'], ', '.join(list(filedict.keys()))]
WLOG(params, 'debug', TextEntry('90-008-00002', args=dargs))
# return valid
return valid
# -------------------------------------------------------------------------
# fits file checking (OLD)
# -------------------------------------------------------------------------
def check_file_header(self, quiet=False, argname=None):
"""
Check file header has all required header keys
:param quiet:
:param argname: string, the name of the argument we are checking
(for error and debug messages)
:return:
"""
# set function name
func_name = display_func(None, 'check_file_header', __NAME__,
'DrsFitsFile')
# -----------------------------------------------------------------
# check file has been read
self.read_file()
# check recipe has been set
self.check_recipe()
params = self.recipe.drs_params
rkeys = self.required_header_keys
# -----------------------------------------------------------------
# deal with no argument name
if argname is None:
argname = TextEntry('40-001-00018')
# -----------------------------------------------------------------
# Step 1: Check that required keys are in header
for drskey in rkeys:
# check whether header key is in param dict (i.e. from a
# keywordstore) or whether we have to use the key as is
if drskey in params:
key = params[drskey][0]
source = params.sources[drskey]
else:
key = drskey
source = func_name
# deal with empty key
if (key is None) or key == '':
eargs = [key, drskey, source]
WLOG(params, 'error', TextEntry('00-006-00011', args=eargs))
# check if key is in header
if key not in self.header:
eargs = [argname, key]
emsg = TextEntry('09-001-00007', args=eargs)
return [False, True], None, [emsg, None]
else:
dargs = [argname, key, os.path.basename(self.filename)]
WLOG(params, 'debug', TextEntry('90-001-00010', args=dargs),
wrap=False)
# -----------------------------------------------------------------
# Step 2: search for correct value for each header key
found = True
# storage
errors = dict()
# loop around required keys
for drskey in rkeys:
# check whether header key is in param dict (i.e. from a
# keywordstore) or whether we have to use the key as is
if drskey in params:
key = params[drskey][0]
else:
key = drskey
# get value and required value
value = self.header[key].strip()
rvalue = rkeys[drskey].strip()
# write error message
# emsg1 = '{0} Header key {1} value is incorrect'
# emsg2 = '\tvalue = {2} required = {3}'
# eargs = [argstring, key, value, rvalue, self.filename]
# emsgs = [emsg1.format(*eargs), emsg2.format(*eargs)]
# check if key is valid
if rvalue != value:
dargs = [argname, key, rvalue]
WLOG(params, 'debug', TextEntry('90-001-00011', args=dargs),
wrap=False)
found = False
else:
dargs = [argname, key, rvalue]
WLOG(params, 'debug', TextEntry('90-001-00012', args=dargs),
wrap=False)
# store info
errors[key] = (found, argname, rvalue, value)
# return:
# [valid cond1, valid cond2], self, [errors1, errors2]
if found:
return [True, True], self, [None, errors]
else:
return [True, False], self, [None, errors]
# -------------------------------------------------------------------------
# fits file methods
# -------------------------------------------------------------------------
def read_file(self, ext=None, check=False, params=None, copy=False):
"""
Read this fits file data and header
:param ext: int or None, the data extension to open
:param check: bool, if True checks if data is already read and does
not read again, to overwrite/re-read set "check" to False
:param params: Parameter Dict (not used --in overridden definition)
:return None:
"""
# set function name
_ = display_func(None, 'read_file', __NAME__, 'DrsFitsFile')
# check if we have data set
if check:
cond1 = self.data is not None
cond2 = self.header is not None
if cond1 and cond2:
return True
# deal with no extension
if (ext is None) and (self.datatype == 'image'):
ext = 0
elif ext is None:
ext = 1
# get params
params = self.recipe.drs_params
# check that filename is set
self.check_filename()
# get data format
if self.datatype == 'image':
fmt = 'fits-image'
elif self.datatype == 'table':
fmt = 'fits-table'
else:
fmt = None
out = drs_fits.readfits(params, self.filename, getdata=True,
gethdr=True, fmt=fmt, ext=ext)
# deal with copying
if copy:
self.data = np.array(out[0])
else:
self.data = out[0]
self.header = drs_fits.Header.from_fits_header(out[1])
# update fiber parameter from header
if self.header is not None:
self.fiber = self.get_key('KW_FIBER', dtype=str, required=False)
# set number of data sets to 1
self.numfiles = 1
# set the shape
if (self.data is not None) and (self.datatype == 'image'):
self.shape = self.data.shape
elif self.data is not None:
self.shape = [len(self.data)]
def read_data(self, ext=0, log=True, copy=False):
# set function name
_ = display_func(None, 'read_data', __NAME__, 'DrsFitsFile')
# check that filename is set
self.check_filename()
# get params
params = self.recipe.drs_params
# get data
data = drs_fits.readfits(params, self.filename, ext=ext, log=log)
# set number of data sets to 1
self.numfiles = 1
# assign to object
if copy:
self.data = np.array(data)
else:
self.data = data
def read_header(self, ext=None, log=True, copy=False):
# set function name
_ = display_func(None, 'read_header', __NAME__, 'DrsFitsFile')
# check that filename is set
self.check_filename()
# deal with no extension
if (ext is None) and (self.datatype == 'image'):
ext = 0
elif ext is None:
ext = 1
# get params
params = self.recipe.drs_params
# get header
header = drs_fits.read_header(params, self.filename, ext=ext, log=log)
# assign to object
if copy:
self.header = drs_fits.Header(header)
else:
self.header = header
# update fiber parameter from header
if self.header is not None:
self.fiber = self.get_key('KW_FIBER', dtype=str, required=False)
# TODO: sort out header_only and load_data -- ambiguous
def check_read(self, header_only=False, load=False, load_data=True):
# set function name
_ = display_func(None, 'check_read', __NAME__, 'DrsFitsFile')
# deal with only wanting to check if header is read
if header_only:
if self.header is None:
if not load_data:
return self.read_header()
if load:
return self.read_file()
func = self.__repr__()
eargs = [func, func + '.read_file()']
self.__error__(TextEntry('00-001-00004', args=eargs))
else:
return 1
# check that data/header/comments is not None
if self.header is None and not load_data:
return self.read_header()
if self.data is None:
if load:
return self.read_file()
func = self.__repr__()
eargs = [func, func + '.read_file()']
self.__error__(TextEntry('00-001-00004', args=eargs))
def read_multi(self, ext=None, check=True):
# set function name
_ = display_func(None, 'read_multi', __NAME__, 'DrsFitsFile')
# check if we have data set
if check:
cond1 = self.data is not None
cond2 = self.header is not None
if cond1 and cond2:
return True
# get params
params = self.recipe.drs_params
# check that filename is set
self.check_filename()
# get data format
if ext is not None:
out = drs_fits.readfits(params, self.filename, getdata=True,
ext=ext, gethdr=True,
fmt='fits-image')
else:
out = drs_fits.readfits(params, self.filename, getdata=True,
gethdr=True, fmt='fits-multi')
self.data = out[0][0]
self.header = drs_fits.Header.from_fits_header(out[1][0])
# update fiber parameter from header
if self.header is not None:
self.fiber = self.get_key('KW_FIBER', dtype=str, required=False)
self.data_array = out[0]
# set number of data sets to 1
self.numfiles = 1
# append headers (as copy)
self.header_array = []
for header in out[1]:
self.header_array.append(drs_fits.Header.from_fits_header(header))
# set the shape
if self.data is not None:
self.shape = self.data.shape
def update_header_with_hdict(self):
# set function name
_ = display_func(None, 'update_header_with_hdict', __NAME__,
'DrsFitsFile')
# deal with unset header
if self.header is None:
if isinstance(self.hdict, drs_fits.fits.Header):
self.header = self.hdict.copy()
return
self.header = drs_fits.Header()
# add keys from hdict
for key in self.hdict:
self.header[key] = (self.hdict[key], self.hdict.comments[key])
def write_file(self, params=None):
# set function name
func_name = display_func(None, 'write_file', __NAME__, 'DrsFitsFile')
# get params
params = self.recipe.drs_params
# ---------------------------------------------------------------------
# check that filename is set
self.check_filename()
# copy keys from hdict into header
self.update_header_with_hdict()
# write to file
drs_fits.writefits(params, self.filename, self.data, self.header,
self.datatype, self.dtype, func=func_name)
# ---------------------------------------------------------------------
# write output dictionary
self.output_dictionary()
def write_multi(self, data_list, header_list=None, datatype_list=None,
dtype_list=None):
# set function name
func_name = display_func(None, 'write_multi', __NAME__, 'DrsFitsFile')
# get params
params = self.recipe.drs_params
# ---------------------------------------------------------------------
# check that filename is set
self.check_filename()
# copy keys from hdict into header
self.update_header_with_hdict()
# ---------------------------------------------------------------------
# deal with header list being empty
if header_list is None:
header_list = []
for it in range(len(data_list)):
if self.header is not None:
header_list.append(self.header.to_fits_header())
else:
header_list.append(None)
# deal with datatype_list being empty
if datatype_list is None:
datatype_list = []
for it in range(len(data_list)):
if isinstance(data_list[it], Table):
datatype_list.append('table')
else:
datatype_list.append('image')
# deal with dtype being empty
if dtype_list is None:
dtype_list = [None] * len(data_list)
# ---------------------------------------------------------------------
# get data and header lists
data_list = [self.data] + data_list
header_list = [self.header] + header_list
datatype_list = [self.datatype] + datatype_list
dtype_list = [self.dtype] + dtype_list
# writefits to file
drs_fits.writefits(params, self.filename, data_list, header_list,
datatype_list, dtype_list, func=func_name)
# ---------------------------------------------------------------------
# write output dictionary
self.output_dictionary()
def get_fiber(self, header=None):
# set function name
_ = display_func(None, 'get_fiber', __NAME__, 'DrsFitsFile')
# get params
params = self.recipe.drs_params
# must have fibers defined to be able to get a fiber
if self.fibers is None:
return None
# get fiber header key
key = params['KW_FIBER'][0]
# deal with case where no header was given
if header is None:
if self.header is not None:
if key in self.header:
return str(self.header[key])
else:
if key in header:
return str(header[key])
# if we still don't have fiber search in file name for fiber
for fiber in self.fibers:
if '_{0}'.format(fiber) in self.basename:
return fiber
# if we still don't have fiber then return None
return None
def output_dictionary(self):
"""
Generate the output dictionary (for use while writing)
Uses OUTPUT_FILE_HEADER_KEYS and DrsFile.hdict to generate an
output dictionary for this file (for use in indexing)
Requires DrsFile.filename and DrsFile.recipe to be set
:return None:
"""
# set function name
_ = display_func(None, 'output_dictionary', __NAME__, 'DrsFitsFile')
# check that recipe is set
self.check_recipe()
params = self.recipe.drs_params
pconstant = self.recipe.drs_pconstant
# get output dictionary
output_hdr_keys = pconstant.OUTPUT_FILE_HEADER_KEYS()
# loop around the keys and find them in hdict (or add null character if
# not found)
for key in output_hdr_keys:
# deal with header key stores
if key in params:
dkey = params[key][0]
else:
dkey = str(key)
if dkey in self.hdict:
self.output_dict[key] = str(self.hdict[dkey])
elif dkey in self.header:
self.output_dict[key] = str(self.header[dkey])
else:
self.output_dict[key] = '--'
def combine(self, infiles, math='sum', same_type=True, path=None):
# set function name
func_name = display_func(None, 'combine', __NAME__, 'DrsFitsFile')
# define usable math
available_math = ['sum', 'add', '+', 'average', 'mean', 'subtract',
'-', 'divide', '/', 'multiply', 'times', '*']
# --------------------------------------------------------------------
# check that recipe is set
self.check_recipe()
params = self.recipe.drs_params
# check that data is read
self.check_read()
# set new data to this files data
data = np.array(self.data)
# --------------------------------------------------------------------
# cube
datacube = [data]
basenames = [self.basename]
# combine data into cube
for infile in infiles:
# check data is read for infile
infile.check_read()
# check that infile matches in name to self
if (self.name != infile.name) and same_type:
eargs = [func_name]
WLOG(params, 'error', TextEntry('00-001-00021', args=eargs))
# add to cube
datacube.append(infile.data)
basenames.append(infile.basename)
# make datacube an numpy array
datacube = np.array(datacube)
# --------------------------------------------------------------------
# deal with math
# --------------------------------------------------------------------
# log what we are doing
WLOG(params, '', TextEntry('40-999-00004', args=[math]))
# if we want to sum the data
if math in ['sum', 'add', '+']:
with warnings.catch_warnings(record=True) as _:
data = mp.nansum(datacube, axis=0)
# else if we want to subtract the data
elif math in ['subtract', '-']:
for im in range(1, len(datacube)):
data -= datacube[im]
# else if we want to divide the data
elif math in ['divide', '/']:
for im in range(1, len(datacube)):
data /= datacube[im]
# else if we want to multiple the data
elif math in ['multiple', 'times', '*']:
for im in range(1, len(datacube)):
data *= datacube[im]
# elif if mean/average
elif math in ['average', 'mean']:
with warnings.catch_warnings(record=True) as _:
data = mp.nanmean(datacube, axis=0)
# elif if median
elif math in ['median', 'med']:
with warnings.catch_warnings(record=True) as _:
data = mp.nanmedian(datacube, axis=0)
# else we have an error in math
else:
eargs = [math, ', '.join(available_math), func_name]
WLOG(params, 'error', TextEntry('00-001-00042', args=eargs))
# --------------------------------------------------------------------
# Need to setup a new filename
# get common prefix
prefix = drs_text.common_text(basenames, 'prefix')
suffix = drs_text.common_text(basenames, 'suffix')
basename = drs_text.combine_uncommon_text(basenames, prefix, suffix)
# update path and filename
if path is None:
path = self.path
filename = os.path.join(path, basename)
# --------------------------------------------------------------------
# construct keys for new DrsFitsFile
# set empty file attributes
nkwargs = dict()
nkwargs['name'] = self.name
nkwargs['filetype'] = self.filetype
nkwargs['suffix'] = self.suffix
nkwargs['remove_insuffix'] = self.remove_insuffix
nkwargs['prefix'] = self.prefix
nkwargs['recipe'] = self.recipe
nkwargs['fiber'] = self.fiber
nkwargs['fibers'] = self.fibers
nkwargs['rkeys'] = self.required_header_keys
nkwargs['filename'] = filename
nkwargs['path'] = path
nkwargs['basename'] = basename
nkwargs['inputdir'] = self.inputdir
nkwargs['directory'] = self.directory
nkwargs['data'] = data
nkwargs['header'] = self.header
nkwargs['shape'] = data.shape
nkwargs['hdict'] = self.hdict
nkwargs['output_dict'] = self.output_dict
nkwargs['fileset'] = self.fileset
nkwargs['filesetnames'] = self.filesetnames
nkwargs['outfunc'] = self.outfunc
nkwargs['is_combined'] = True
nkwargs['combined_list'] = list(basenames)
# return new instance of DrsFitsFile
return DrsFitsFile(**nkwargs)
# -------------------------------------------------------------------------
# fits file header methods
# -------------------------------------------------------------------------
def get_key(self, key, has_default=False, default=None, required=True,
dtype: Type = float, listtype=None):
# set function name
_ = display_func(None, 'get_key', __NAME__, 'DrsFitsFile')
# run read_header_key method
return self.read_header_key(key, has_default, default, required, dtype,
listtype=listtype)
def read_header_key(self, key, has_default=False, default=None,
required=True, dtype: Type = float, listtype=None):
"""
Looks for a key in DrsFile.header, if has_default is
True sets value of key to 'default' if not found else if "required"
logs an error
:param key: string, key in the dictionary to find first looks in
DrsFile.header directly then checks
:param has_default: bool, if True uses "default" as the value if key
not found
:param default: object, value of the key if not found and
has_default is True
:param required: bool, if True key is required and causes error is
missing if False and key not found value is None
:param dtype: type, the data type for output
:return: the value from the header for key
"""
# set function name
func_name = display_func(None, 'read_header_key', __NAME__,
'DrsFitsFile')
# check that recipe is set
self.check_recipe()
# check that data is read
self.check_read(header_only=True, load=True)
# check key is valid
drskey = self._check_key(key)
# if we have a default key try to get key else use default value
if has_default:
value = self.header.get(drskey, default)
# else we must look for the value manually and handle the exception
else:
try:
value = self.header[drskey]
except KeyError:
# if we do not require this keyword don't generate an error
# just return None
if not required:
return None
# else generate an error
else:
if key == drskey:
eargs = [drskey, self.filename, func_name]
emsg = TextEntry('09-000-00006', args=eargs)
else:
eargs = [drskey, self.filename, key, func_name]
emsg = TextEntry('09-000-00006', args=eargs)
self.__error__(emsg)
value = None
# deal with booleans
if isinstance(value, str):
# if dtype is a bool try to push to a boolean
if dtype == bool or dtype == 'bool':
if value.upper() in ['1', 'TRUE', 'T']:
value = True
else:
value = False
# deal with input lists
if isinstance(value, str):
# if dtype is a list
if dtype == list:
# try to split the value as a list
value = value.split(',')
value = list(np.char.array(value).strip())
# cast to required list type
if listtype is not None and isinstance(listtype, type):
value = list(map(lambda x: listtype(x), value))
# try to convert to dtype else just return as string
try:
value = dtype(value)
except ValueError:
value = str(value)
except TypeError:
value = str(value)
# return value
return value
def read_header_keys(self, keys, has_default=False, defaults=None):
"""
Looks for a set of keys in DrsFile.header, if has_default is
True sets value of key to 'default' if not found else if "required"
logs an error
:param keys: string, key in the dictionary to find
:param has_default: bool, if True uses "default" as the value if key
not found
:param defaults: object, value of the key if not found and
has_default is True
:return value: object, value of DrsFile.header[key] or default (if
has_default=True)
"""
# set function name
func_name = display_func(None, 'read_header_keys', __NAME__,
'DrsFitsFile')
# check that recipe is set
self.check_recipe()
# check that data is read
self.check_read(header_only=True, load=True)
# make sure keys is a list
try:
keys = list(keys)
except TypeError:
self.__error__(TextEntry('00-001-00005', args=[func_name]))
# if defaults is None --> list of Nones else make sure defaults
# is a list
if defaults is None:
defaults = list(np.repeat([None], len(keys)))
else:
try:
defaults = list(defaults)
if len(defaults) != len(keys):
self.__error__(TextEntry('00-001-00006', args=[func_name]))
except TypeError:
self.__error__(TextEntry('00-001-00007', args=[func_name]))
# loop around keys and look up each key
values = []
for k_it, key in enumerate(keys):
# get the value for key
v = self.read_header_key(key, has_default, default=defaults[k_it])
# append value to values list
values.append(v)
# return values
return values
def read_header_key_1d_list(self, key, dim1=None, dtype=float,
start=0, excludes=None, includes=None,
elogic='AND', ilogic='AND'):
"""
Read a set of header keys that were created from a 1D list
:param key: string, prefix of HEADER key to construct 1D list from
key[row number]
:param dim1: int, the number of elements in dimension 1
(number of rows) if unset tries to guess number of rows
:param dtype: type, the type to force the data to be (i.e. float, int)
:param start: int, the start index for the 1d list (normally 0)
:type key: str
:type dim1: Union[int, None]
:type dtype: type
:type start: int
:return values: numpy array (1D), the values force to type = dtype
"""
# set function name
func_name = display_func(None, 'read_header_key_1d_list', __NAME__,
'DrsFitsFile')
# check that data is read
self.check_read(header_only=True, load=True)
# check key is valid
drskey = self._check_key(key)
# ------------------------------------------------------------------
# deal with no dim1 key
if dim1 is None:
# use wild card to try to find keys
wildkey = drskey.split('{')[0] + '*' + drskey.split('}')[-1]
# use wild card in header
rawwildvalues = list(self.header[wildkey].keys())
# deal with includes/excludes
ieargs = [rawwildvalues, includes, excludes, ilogic, elogic]
wildvalues = drs_strings.include_exclude(*ieargs)
# deal with no wild card values found
if wildvalues is None:
eargs = [wildkey, dim1, self.basename, func_name]
self.__error__(TextEntry('09-000-00008', args=eargs))
# else get the length of dim1
else:
dim1 = len(wildvalues)
# ------------------------------------------------------------------
# create 1d list
values = []
# loop around the 2D array
for it in range(start, dim1 + start):
# construct the key name
keyname = test_for_formatting(drskey, it)
# try to get the values
try:
# set the value
values.append(dtype(self.header[keyname]))
except KeyError:
eargs = [keyname, dim1, self.basename, func_name]
self.__error__(TextEntry('09-000-00008', args=eargs))
values = None
# return values
return np.array(values)
def read_header_key_2d_list(self, key, dim1, dim2, dtype=float):
"""
Read a set of header keys that were created from a 2D list
:param key: string, prefix of HEADER key to construct 2D list from
key[number]
where number = (row number * number of columns) + column number
where column number = dim2 and row number = range(0, dim1)
:param dim1: int, the number of elements in dimension 1
(number of rows)
:param dim2: int, the number of columns in dimension 2
(number of columns)
:param dtype: type, the type to force the data to be (i.e. float, int)
:return values: numpy array (2D), the values force to type = dtype
"""
# set function name
func_name = display_func(None, 'read_header_key_2d_list', __NAME__,
'DrsFitsFile')
# check that data is read
self.check_read(header_only=True, load=True)
# check key is valid
drskey = self._check_key(key)
# create 2d list
values = | np.zeros((dim1, dim2), dtype=dtype) | numpy.zeros |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division
import argparse
from collections import OrderedDict
import numpy as np
import matplotlib
matplotlib.use('Agg')
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['svg.fonttype'] = 'none'
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
import matplotlib.gridspec as gridspec
from matplotlib import ticker
import copy
import sys
import plotly.offline as py
import plotly.graph_objs as go
# own modules
from deeptools import cm # noqa: F401
from deeptools import parserCommon
from deeptools import heatmapper
from deeptools.heatmapper_utilities import plot_single, plotly_single
from deeptools.utilities import convertCmap
from deeptools.computeMatrixOperations import filterHeatmapValues
debug = 0
old_settings = np.seterr(all='ignore')
plt.ioff()
def parse_arguments(args=None):
parser = argparse.ArgumentParser(
parents=[parserCommon.heatmapperMatrixArgs(),
parserCommon.heatmapperOutputArgs(mode='heatmap'),
parserCommon.heatmapperOptionalArgs(mode='heatmap')],
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='This tool creates a heatmap for '
'scores associated with genomic regions. '
'The program requires a matrix file '
'generated by the tool ``computeMatrix``.',
epilog='An example usage is: plotHeatmap -m <matrix file>',
add_help=False)
return parser
def process_args(args=None):
args = parse_arguments().parse_args(args)
args.heatmapHeight = args.heatmapHeight if args.heatmapHeight > 3 and args.heatmapHeight <= 100 else 10
if not matplotlib.colors.is_color_like(args.missingDataColor):
exit("The value {0} for --missingDataColor is not valid".format(args.missingDataColor))
args.boxAroundHeatmaps = True if args.boxAroundHeatmaps == 'yes' else False
return args
def prepare_layout(hm_matrix, heatmapsize, showSummaryPlot, showColorbar, perGroup, colorbar_position):
"""
prepare the plot layout
as a grid having as many rows
as samples (+1 for colobar)
and as many rows as groups (or clusters) (+1 for profile plot)
"""
heatmapwidth, heatmapheight = heatmapsize
numcols = hm_matrix.get_num_samples()
numrows = hm_matrix.get_num_groups()
if perGroup:
numcols, numrows = numrows, numcols
# the rows have different size depending
# on the number of regions contained in the
if perGroup:
# heatmap
height_ratio = np.array([np.amax(np.diff(hm_matrix.group_boundaries))] * numrows)
# scale ratio to sum = heatmapheight
height_ratio = heatmapheight * (height_ratio.astype(float) / height_ratio.sum())
else:
# heatmap
height_ratio = np.diff(hm_matrix.group_boundaries)
# scale ratio to sum = heatmapheight
height_ratio = heatmapheight * (height_ratio.astype(float) / height_ratio.sum())
# convert the height_ratio from numpy array back to list
height_ratio = height_ratio.tolist()
# the width ratio is equal for all heatmaps
width_ratio = [heatmapwidth] * numcols
if showColorbar:
if colorbar_position == 'below':
numrows += 2 # a spacer needs to be added to avoid overlaps
height_ratio += [4 / 2.54] # spacer
height_ratio += [1 / 2.54]
else:
numcols += 1
width_ratio += [1 / 2.54]
if showSummaryPlot:
numrows += 2 # plus 2 because a spacer is added
# make height of summary plot
# proportional to the width of heatmap
sumplot_height = heatmapwidth
spacer_height = heatmapwidth / 8
# scale height_ratios to convert from row
# numbers to heatmapheigt fractions
height_ratio = np.concatenate([[sumplot_height, spacer_height], height_ratio])
grids = gridspec.GridSpec(numrows, numcols, height_ratios=height_ratio, width_ratios=width_ratio)
return grids
def addProfilePlot(hm, plt, fig, grids, iterNum, iterNum2, perGroup, averageType, plot_type, yAxisLabel, color_list, yMin, yMax, wspace, hspace, colorbar_position, label_rotation=0.0):
"""
A function to add profile plots to the given figure, possibly in a custom grid subplot which mimics a tight layout (if wspace and hspace are not None)
"""
if wspace is not None and hspace is not None:
if colorbar_position == 'side':
gridsSub = gridspec.GridSpecFromSubplotSpec(1, iterNum, subplot_spec=grids[0, :-1], wspace=wspace, hspace=hspace)
else:
gridsSub = gridspec.GridSpecFromSubplotSpec(1, iterNum, subplot_spec=grids[0, :], wspace=wspace, hspace=hspace)
ax_list = []
globalYmin = np.inf
globalYmax = -np.inf
for sample_id in range(iterNum):
if perGroup:
title = hm.matrix.group_labels[sample_id]
tickIdx = sample_id % hm.matrix.get_num_samples()
else:
title = hm.matrix.sample_labels[sample_id]
tickIdx = sample_id
if sample_id > 0 and len(yMin) == 1 and len(yMax) == 1:
ax_profile = fig.add_subplot(grids[0, sample_id])
else:
if wspace is not None and hspace is not None:
ax_profile = fig.add_subplot(gridsSub[0, sample_id])
else:
ax_profile = fig.add_subplot(grids[0, sample_id])
ax_profile.set_title(title)
for group in range(iterNum2):
if perGroup:
sub_matrix = hm.matrix.get_matrix(sample_id, group)
line_label = sub_matrix['sample']
else:
sub_matrix = hm.matrix.get_matrix(group, sample_id)
line_label = sub_matrix['group']
plot_single(ax_profile, sub_matrix['matrix'],
averageType,
color_list[group],
line_label,
plot_type=plot_type)
if sample_id > 0 and len(yMin) == 1 and len(yMax) == 1:
plt.setp(ax_profile.get_yticklabels(), visible=False)
if sample_id == 0 and yAxisLabel != '':
ax_profile.set_ylabel(yAxisLabel)
xticks, xtickslabel = hm.getTicks(tickIdx)
if np.ceil(max(xticks)) != float(sub_matrix['matrix'].shape[1] - 1):
tickscale = float(sub_matrix['matrix'].shape[1] - 1) / max(xticks)
xticks_use = [x * tickscale for x in xticks]
ax_profile.axes.set_xticks(xticks_use)
else:
ax_profile.axes.set_xticks(xticks)
ax_profile.axes.set_xticklabels(xtickslabel, rotation=label_rotation)
ax_list.append(ax_profile)
# align the first and last label
# such that they don't fall off
# the heatmap sides
ticks = ax_profile.xaxis.get_major_ticks()
ticks[0].label1.set_horizontalalignment('left')
ticks[-1].label1.set_horizontalalignment('right')
globalYmin = min(np.float64(globalYmin), ax_profile.get_ylim()[0])
globalYmax = max(globalYmax, ax_profile.get_ylim()[1])
# It turns out that set_ylim only takes np.float64s
for sample_id, subplot in enumerate(ax_list):
localYMin = yMin[sample_id % len(yMin)]
localYMax = yMax[sample_id % len(yMax)]
lims = [globalYmin, globalYmax]
if localYMin:
if localYMax:
lims = (np.float64(localYMin), np.float64(localYMax))
else:
lims = (np.float64(localYMin), lims[1])
elif localYMax:
lims = (lims[0], np.float64(localYMax))
if lims[0] >= lims[1]:
lims = (lims[0], lims[0] + 1)
ax_list[sample_id].set_ylim(lims)
return ax_list
def plotlyMatrix(hm,
outFilename,
yMin=[None], yMax=[None],
zMin=[None], zMax=[None],
showSummaryPlot=False,
cmap=None, colorList=None, colorBarPosition='side',
perGroup=False,
averageType='median', yAxisLabel='', xAxisLabel='',
plotTitle='',
showColorbar=False,
label_rotation=0.0):
label_rotation *= -1.0
if colorBarPosition != 'side':
sys.error.write("Warning: It is not currently possible to have multiple colorbars with plotly!\n")
nRows = hm.matrix.get_num_groups()
nCols = hm.matrix.get_num_samples()
if perGroup:
nRows, nCols = nCols, nRows
profileHeight = 0.0
profileBottomBuffer = 0.0
if showSummaryPlot:
profileHeight = 0.2
profileBottomBuffer = 0.05
profileSideBuffer = 0.
profileWidth = 1. / nCols
if nCols > 1:
profileSideBuffer = 0.1 / (nCols - 1)
profileWidth = 0.9 / nCols
dataSummary = []
annos = []
fig = go.Figure()
fig['layout'].update(title=plotTitle)
xAxisN = 1
yAxisN = 1
# Summary plots at the top (if appropriate)
if showSummaryPlot:
yMinLocal = np.inf
yMaxLocal = -np.inf
for i in range(nCols):
xanchor = 'x{}'.format(xAxisN)
yanchor = 'y{}'.format(yAxisN)
xBase = i * (profileSideBuffer + profileWidth)
yBase = 1 - profileHeight
xDomain = [xBase, xBase + profileWidth]
yDomain = [yBase, 1.0]
for j in range(nRows):
if perGroup:
mat = hm.matrix.get_matrix(i, j)
xTicks, xTicksLabels = hm.getTicks(i)
label = mat['sample']
else:
mat = hm.matrix.get_matrix(j, i)
xTicks, xTicksLabels = hm.getTicks(j)
label = mat['group']
if j == 0:
fig['layout']['xaxis{}'.format(xAxisN)] = dict(domain=xDomain, anchor=yanchor, range=[0, mat['matrix'].shape[1]], tickmode='array', tickvals=xTicks, ticktext=xTicksLabels, tickangle=label_rotation)
fig['layout']['yaxis{}'.format(yAxisN)] = dict(anchor=xanchor, domain=yDomain)
trace = plotly_single(mat['matrix'], averageType, colorList[j], label)[0]
trace.update(xaxis=xanchor, yaxis=yanchor, legendgroup=label)
if min(trace['y']) < yMinLocal:
yMinLocal = min(trace['y'])
if max(trace['y']) > yMaxLocal:
yMaxLocal = max(trace['y'])
if i == 0:
trace.update(showlegend=True)
dataSummary.append(trace)
# Add the column label
if perGroup:
title = hm.matrix.group_labels[i]
else:
title = hm.matrix.sample_labels[i]
titleX = xBase + 0.5 * profileWidth
annos.append({'yanchor': 'bottom', 'xref': 'paper', 'xanchor': 'center', 'yref': 'paper', 'text': title, 'y': 1.0, 'x': titleX, 'font': {'size': 16}, 'showarrow': False})
xAxisN += 1
yAxisN += 1
# Adjust y-bounds as appropriate:
for i in range(1, yAxisN):
yMinUse = yMinLocal
if yMin[(i - 1) % len(yMin)] is not None:
yMinUse = yMin[(i - 1) % len(yMin)]
yMaxUse = yMaxLocal
if yMax[(i - 1) % len(yMax)] is not None:
yMaxUse = yMax[(i - 1) % len(yMax)]
fig['layout']['yaxis{}'.format(i)].update(range=[yMinUse, yMaxUse])
fig['layout']['yaxis1'].update(title=yAxisLabel)
# Add the heatmap
dataHeatmap = []
zMinLocal = np.inf
zMaxLocal = -np.inf
heatmapWidth = 1. / nCols
heatmapSideBuffer = 0.0
if nCols > 1:
heatmapWidth = .9 / nCols
heatmapSideBuffer = 0.1 / (nCols - 1)
heatmapHeight = 1.0 - profileHeight - profileBottomBuffer
for i in range(nCols):
xanchor = 'x{}'.format(xAxisN)
xBase = i * (heatmapSideBuffer + heatmapWidth)
# Determine the height of each heatmap, they have no buffer
lengths = [0.0]
for j in range(nRows):
if perGroup:
mat = hm.matrix.get_matrix(i, j)
else:
mat = hm.matrix.get_matrix(j, i)
lengths.append(mat['matrix'].shape[0])
fractionalHeights = heatmapHeight * np.cumsum(lengths).astype(float) / np.sum(lengths).astype(float)
xDomain = [xBase, xBase + heatmapWidth]
fig['layout']['xaxis{}'.format(xAxisN)] = dict(domain=xDomain, anchor='free', position=0.0, range=[0, mat['matrix'].shape[1]], tickmode='array', tickvals=xTicks, ticktext=xTicksLabels, title=xAxisLabel)
# Start adding the heatmaps
for j in range(nRows):
if perGroup:
mat = hm.matrix.get_matrix(i, j)
label = mat['sample']
start = hm.matrix.group_boundaries[i]
end = hm.matrix.group_boundaries[i + 1]
else:
mat = hm.matrix.get_matrix(j, i)
label = mat['group']
start = hm.matrix.group_boundaries[j]
end = hm.matrix.group_boundaries[j + 1]
regs = hm.matrix.regions[start:end]
regs = [x[2] for x in regs]
yanchor = 'y{}'.format(yAxisN)
yDomain = [heatmapHeight - fractionalHeights[j + 1], heatmapHeight - fractionalHeights[j]]
visible = False
if i == 0:
visible = True
fig['layout']['yaxis{}'.format(yAxisN)] = dict(domain=yDomain, anchor=xanchor, visible=visible, title=label, tickmode='array', tickvals=[], ticktext=[])
if np.min(mat['matrix']) < zMinLocal:
zMinLocal = np.min(mat['matrix'])
if np.max(mat['matrix']) < zMaxLocal:
zMaxLocal = np.max(mat['matrix'])
trace = go.Heatmap(z=np.flipud(mat['matrix']),
y=regs[::-1],
xaxis=xanchor,
yaxis=yanchor,
showlegend=False,
name=label,
showscale=False)
dataHeatmap.append(trace)
yAxisN += 1
xAxisN += 1
if showColorbar:
dataHeatmap[-1].update(showscale=True)
dataHeatmap[-1]['colorbar'].update(len=heatmapHeight, y=0, yanchor='bottom', ypad=0.0)
# Adjust z bounds and colorscale
for trace in dataHeatmap:
zMinUse = zMinLocal
zMaxUse = zMaxLocal
if zMin[0] is not None:
zMinUse = zMin[0]
if zMax[0] is not None:
zMaxUse = zMax[0]
trace.update(zmin=zMinUse, zmax=zMaxUse, colorscale=convertCmap(cmap[0], vmin=zMinUse, vmax=zMaxUse))
dataSummary.extend(dataHeatmap)
fig.add_traces(dataSummary)
fig['layout']['annotations'] = annos
py.plot(fig, filename=outFilename, auto_open=False)
def plotMatrix(hm, outFileName,
colorMapDict={'colorMap': ['binary'], 'missingDataColor': 'black', 'alpha': 1.0},
plotTitle='',
xAxisLabel='', yAxisLabel='', regionsLabel='',
zMin=None, zMax=None,
yMin=None, yMax=None,
averageType='median',
reference_point_label=None,
startLabel='TSS', endLabel="TES",
heatmapHeight=25,
heatmapWidth=7.5,
perGroup=False, whatToShow='plot, heatmap and colorbar',
plot_type='lines',
linesAtTickMarks=False,
image_format=None,
legend_location='upper-left',
box_around_heatmaps=True,
label_rotation=0.0,
dpi=200,
interpolation_method='auto'):
hm.reference_point_label = hm.parameters['ref point']
if reference_point_label is not None:
hm.reference_point_label = [reference_point_label] * hm.matrix.get_num_samples()
hm.startLabel = startLabel
hm.endLabel = endLabel
matrix_flatten = None
if zMin is None:
matrix_flatten = hm.matrix.flatten()
# try to avoid outliers by using np.percentile
zMin = np.percentile(matrix_flatten, 1.0)
if np.isnan(zMin):
zMin = [None]
else:
zMin = [zMin] # convert to list to support multiple entries
elif 'auto' in zMin:
matrix_flatten = hm.matrix.flatten()
auto_min = np.percentile(matrix_flatten, 1.0)
if np.isnan(auto_min):
auto_min = None
new_mins = [float(x) if x != 'auto' else auto_min for x in zMin]
zMin = new_mins
else:
new_mins = [float(x) for x in zMin]
zMin = new_mins
if zMax is None:
if matrix_flatten is None:
matrix_flatten = hm.matrix.flatten()
# try to avoid outliers by using np.percentile
zMax = np.percentile(matrix_flatten, 98.0)
if np.isnan(zMax) or zMax <= zMin[0]:
zMax = [None]
else:
zMax = [zMax]
elif 'auto' in zMax:
matrix_flatten = hm.matrix.flatten()
auto_max = np.percentile(matrix_flatten, 98.0)
if np.isnan(auto_max):
auto_max = None
new_maxs = [float(x) if x != 'auto' else auto_max for x in zMax]
zMax = new_maxs
else:
new_maxs = [float(x) for x in zMax]
zMax = new_maxs
if (len(zMin) > 1) & (len(zMax) > 1):
for index, value in enumerate(zMax):
if value <= zMin[index]:
sys.stderr.write("Warnirng: In bigwig {}, the given zmin ({}) is larger than "
"or equal to the given zmax ({}). Thus, it has been set "
"to None. \n".format(index + 1, zMin[index], value))
zMin[index] = None
if yMin is None:
yMin = [None]
if yMax is None:
yMax = [None]
if not isinstance(yMin, list):
yMin = [yMin]
if not isinstance(yMax, list):
yMax = [yMax]
plt.rcParams['font.size'] = 8.0
fontP = FontProperties()
showSummaryPlot = False
showColorbar = False
if whatToShow == 'plot and heatmap':
showSummaryPlot = True
elif whatToShow == 'heatmap and colorbar':
showColorbar = True
elif whatToShow == 'plot, heatmap and colorbar':
showSummaryPlot = True
showColorbar = True
# colormap for the heatmap
if colorMapDict['colorMap']:
cmap = []
for color_map in colorMapDict['colorMap']:
copy_cmp = copy.copy(plt.get_cmap(color_map))
cmap.append(copy_cmp)
cmap[-1].set_bad(colorMapDict['missingDataColor']) # nans are printed using this color
if colorMapDict['colorList'] and len(colorMapDict['colorList']) > 0:
# make a cmap for each color list given
cmap = []
for color_list in colorMapDict['colorList']:
cmap.append(matplotlib.colors.LinearSegmentedColormap.from_list(
'my_cmap', color_list.replace(' ', '').split(","), N=colorMapDict['colorNumber']))
cmap[-1].set_bad(colorMapDict['missingDataColor']) # nans are printed using this color
if len(cmap) > 1 or len(zMin) > 1 or len(zMax) > 1:
# position color bar below heatmap when more than one
# heatmap color is given
colorbar_position = 'below'
else:
colorbar_position = 'side'
grids = prepare_layout(hm.matrix, (heatmapWidth, heatmapHeight),
showSummaryPlot, showColorbar, perGroup, colorbar_position)
# figsize: w,h tuple in inches
figwidth = heatmapWidth / 2.54
figheight = heatmapHeight / 2.54
if showSummaryPlot:
# the summary plot ocupies a height
# equal to the fig width
figheight += figwidth
numsamples = hm.matrix.get_num_samples()
if perGroup:
num_cols = hm.matrix.get_num_groups()
else:
num_cols = numsamples
total_figwidth = figwidth * num_cols
if showColorbar:
if colorbar_position == 'below':
figheight += 1 / 2.54
else:
total_figwidth += 1 / 2.54
fig = plt.figure(figsize=(total_figwidth, figheight))
fig.suptitle(plotTitle, y=1 - (0.06 / figheight))
# color map for the summary plot (profile) on top of the heatmap
cmap_plot = plt.get_cmap('jet')
numgroups = hm.matrix.get_num_groups()
if perGroup:
color_list = cmap_plot(np.arange(hm.matrix.get_num_samples()) / hm.matrix.get_num_samples())
else:
color_list = cmap_plot( | np.arange(numgroups) | numpy.arange |
"""
Particle Filter helper functions
"""
import configparser
import json
import math
import os
from collections import defaultdict
from io import BytesIO
from itertools import permutations
from itertools import product
from pathlib import Path
import imageio
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import requests
from PIL import Image
from scipy.ndimage.filters import gaussian_filter
from .definitions import RUN_DIR
def permute_particle(particle):
return np.hstack((particle[4:], particle[:4]))
def particle_swap(env):
# 2000 x 8
particles = np.copy(env.pf.particles)
n_targets = env.state.n_targets
state_dim = 4
# convert particles to cartesian
for i in range(n_targets):
x, y = pol2cart(particles[:, state_dim*i],
np.radians(particles[:, (state_dim*i)+1]))
particles[:, state_dim*i] = x
particles[:, (state_dim*i)+1] = y
swapped = True
k = 0
while swapped and k < 10:
k += 1
swapped = False
for i in range(len(particles)):
original_particle = np.copy(particles[i])
target_centroids = [
np.mean(particles[:, state_dim*t:(state_dim*t)+2]) for t in range(n_targets)]
distance = 0
for t in range(n_targets):
dif = particles[i, state_dim *
t:(state_dim*t)+2] - target_centroids[t]
distance += np.dot(dif, dif)
permuted_particle = permute_particle(particles[i])
particles[i] = permuted_particle
permuted_target_centroids = [
np.mean(particles[:, state_dim*t:(state_dim*t)+2]) for t in range(n_targets)]
permuted_distance = 0
for t in range(n_targets):
dif = particles[i, state_dim *
t:(state_dim*t)+2] - permuted_target_centroids[t]
permuted_distance += np.dot(dif, dif)
if distance < permuted_distance:
particles[i] = original_particle
else:
swapped = True
# convert particles to polar
for i in range(n_targets):
rho, phi = cart2pol(
particles[:, state_dim*i], particles[:, (state_dim*i)+1])
particles[:, state_dim*i] = rho
particles[:, (state_dim*i)+1] = np.degrees(phi)
env.pf.particles = particles
def pol2cart(rho, phi):
"""
Transform polar to cartesian
"""
x = rho * np.cos(phi)
y = rho * np.sin(phi)
return(x, y)
def cart2pol(x, y):
"""
Transform cartesian to polar
"""
rho = np.sqrt(x**2 + y**2)
phi = np.arctan2(y, x)
return rho, phi
def get_distance(coord1, coord2):
"""
Get the distance between two coordinates
"""
if (coord1 is None) or (coord2 is None):
return None
lat1, long1 = coord1
lat2, long2 = coord2
# approximate radius of earth in km
R = 6373.0
lat1 = np.radians(lat1)
long1 = np.radians(long1)
lat2 = np.radians(lat2)
long2 = np.radians(long2)
dlon = long2 - long1
dlat = lat2 - lat1
a = np.sin(dlat / 2)**2 + np.cos(lat1) * np.cos(lat2) * np.sin(dlon / 2)**2
c = 2 * np.arctan2(np.sqrt(a), np.sqrt(1 - a))
distance = R * c
return distance*(1e3)
def get_bearing(coord1, coord2):
"""
Get the bearing of two coordinates
"""
if (coord1 is None) or (coord2 is None):
return None
lat1, long1 = coord1
lat2, long2 = coord2
dLon = (long2 - long1)
x = np.cos(np.radians(lat2)) * np.sin(np.radians(dLon))
y = np.cos(np.radians(lat1)) * np.sin(np.radians(lat2)) - \
np.sin(np.radians(lat1)) * np.cos(np.radians(lat2)) * \
np.cos(np.radians(dLon))
brng = np.arctan2(x, y)
brng = np.degrees(brng)
return -brng + 90
def is_float(element):
"""
Check if an element is a float or not
"""
try:
float(element)
return True
except (ValueError, TypeError):
return False
class GPSVis:
"""
modified from:
https://github.com/tisljaricleo/GPS-visualization-Python
MIT License
Copyright (c) 2021 <NAME>
Class for GPS data visualization using pre-downloaded OSM map in image format.
"""
def __init__(self, position=None, map_path=None, bounds=None):
"""
:param data_path: Path to file containing GPS records.
:param map_path: Path to pre-downloaded OSM map in image format.
:param bounds: Upper-left, and lower-right GPS points of the map (lat1, lon1, lat2, lon2).
"""
self.position = position
self.map_path = map_path
self.bounds = bounds
if self.map_path is not None and self.bounds is not None:
self.img = self.create_image_from_map()
elif self.position is not None:
self.zoom = 17
self.TILE_SIZE = 256
distance = 100
coord = self.position
lat_dist = distance/111111
lon_dist = distance / (111111 * np.cos(np.radians(coord[0])))
top, bot = coord[0] + lat_dist, coord[0] - lat_dist
lef, rgt = coord[1] - lon_dist, coord[1] + lon_dist
self.bounds = [top, lef, bot, rgt]
self.img = self.create_image_from_position()
self.get_ticks()
self.cell_size = 1
self.xedges = np.arange(0, self.width_meters +
self.cell_size, self.cell_size)
self.yedges = np.arange(0, self.height_meters +
self.cell_size, self.cell_size)
def plot_map(self, axis1=None, output=None, save_as='resultMap.png'):
"""
Method for plotting the map. You can choose to save it in file or to plot it.
:param output: Type 'plot' to show the map or 'save' to save it. Default None
:param save_as: Name and type of the resulting image.
:return:
"""
# create Fig and Axis if doesn't exist
if axis1 is None:
fig, axis1 = plt.subplots(figsize=(10, 13))
# Plot background map
axis1.imshow(np.flipud(self.img), alpha=0.7, origin='lower')
# Set axis dimensions, labels and tick marks
axis1.set_xlim(0, int(self.width_meters))
axis1.set_ylim(0, int(self.height_meters))
axis1.set_xlabel('Longitude')
axis1.set_ylabel('Latitude')
axis1.set_xticks(np.linspace(0, int(self.width_meters), num=8))
axis1.set_xticklabels(self.x_ticks, rotation=30, ha='center')
axis1.set_yticks(np.linspace(0, int(self.height_meters), num=8))
axis1.set_yticklabels(self.y_ticks)
axis1.grid()
# Save or display
if output == 'save':
plt.savefig(save_as)
elif output == 'plot':
plt.show()
def point_to_pixels(self, lat, lon, zoom):
"""convert gps coordinates to web mercator"""
r = math.pow(2, zoom) * self.TILE_SIZE
lat = math.radians(lat)
x = int((lon + 180.0) / 360.0 * r)
y = int(
(1.0 - math.log(math.tan(lat) + (1.0 / math.cos(lat))) / math.pi) / 2.0 * r)
return x, y
def create_image_from_position(self):
URL = 'https://tile.openstreetmap.org/{z}/{x}/{y}.png'.format
top, lef, bot, rgt = self.bounds
x0, y0 = self.point_to_pixels(top, lef, self.zoom)
x1, y1 = self.point_to_pixels(bot, rgt, self.zoom)
x0_tile, y0_tile = int(x0 / self.TILE_SIZE), int(y0 / self.TILE_SIZE)
x1_tile, y1_tile = math.ceil(
x1 / self.TILE_SIZE), math.ceil(y1 / self.TILE_SIZE)
assert (x1_tile - x0_tile) * (y1_tile -
y0_tile) < 50, "That's too many tiles!"
# full size image we'll add tiles to
img = Image.new('RGB', (
(x1_tile - x0_tile) * self.TILE_SIZE,
(y1_tile - y0_tile) * self.TILE_SIZE))
# loop through every tile inside our bounded box
for x_tile, y_tile in product(range(x0_tile, x1_tile), range(y0_tile, y1_tile)):
with requests.get(URL(x=x_tile, y=y_tile, z=self.zoom)) as resp:
tile_img = Image.open(BytesIO(resp.content))
# add each tile to the full size image
img.paste(
im=tile_img,
box=((x_tile - x0_tile) * self.TILE_SIZE, (y_tile - y0_tile) * self.TILE_SIZE))
x, y = x0_tile * self.TILE_SIZE, y0_tile * self.TILE_SIZE
img = img.crop((
int(x0-x), # left
int(y0-y), # top
int(x1-x), # right
int(y1-y))) # bottom
self.width_meters = get_distance(
(self.bounds[0], self.bounds[1]), (self.bounds[0], self.bounds[3]))
self.height_meters = get_distance(
(self.bounds[0], self.bounds[1]), (self.bounds[2], self.bounds[1]))
img = img.resize((int(self.width_meters), int(self.height_meters)))
return img
def create_image_from_map(self):
"""
Create the image that contains the original map and the GPS records.
:param color: Color of the GPS records.
:param width: Width of the drawn GPS records.
:return:
"""
img = Image.open(self.map_path, 'r')
self.width_meters = get_distance(
(self.bounds[0], self.bounds[1]), (self.bounds[0], self.bounds[3]))
self.height_meters = get_distance(
(self.bounds[0], self.bounds[1]), (self.bounds[2], self.bounds[1]))
img = img.resize((int(self.width_meters), int(self.height_meters)))
print('background image size (pixels) = ', img.size)
return img
def scale_to_img(self, lat_lon, w_h):
"""
Conversion from latitude and longitude to the image pixels.
It is used for drawing the GPS records on the map image.
:param lat_lon: GPS record to draw (lat1, lon1).
:param w_h: Size of the map image (w, h).
:return: Tuple containing x and y coordinates to draw on map image.
"""
# https://gamedev.stackexchange.com/questions/33441/how-to-convert-a-number-from-one-min-max-set-to-another-min-max-set/33445
lat_old = (self.bounds[2], self.bounds[0])
new = (0, w_h[1])
y = ((lat_lon[0] - lat_old[0]) * (new[1] - new[0]) /
(lat_old[1] - lat_old[0])) + new[0]
lon_old = (self.bounds[1], self.bounds[3])
new = (0, w_h[0])
x = ((lat_lon[1] - lon_old[0]) * (new[1] - new[0]) /
(lon_old[1] - lon_old[0])) + new[0]
# y must be reversed because the orientation of the image in the matplotlib.
# image - (0, 0) in upper left corner; coordinate system - (0, 0) in lower left corner
return int(x), int(y) # w_h[1] - int(y)
def set_origin(self, lat_lon):
self.origin = self.scale_to_img(
lat_lon, (int(self.width_meters), int(self.height_meters)))
def get_ticks(self):
"""
Generates custom ticks based on the GPS coordinates of the map for the matplotlib output.
:return:
"""
self.x_ticks = map(
lambda x: round(x, 4),
np.linspace(self.bounds[1], self.bounds[3], num=8))
self.y_ticks = map(
lambda x: round(x, 4),
np.linspace(self.bounds[2], self.bounds[0], num=8))
# Ticks must be reversed because the orientation of the image in the matplotlib.
# image - (0, 0) in upper left corner; coordinate system - (0, 0) in lower left corner
self.y_ticks = list(self.y_ticks) # sorted(y_ticks, reverse=True)
self.x_ticks = list(self.x_ticks)
class Results:
'''
Results class for saving run results
to file with common format.
'''
def __init__(self, method_name='', global_start_time='', num_iters=0, plotting=False, config={}):
self.num_iters = num_iters
self.method_name = method_name
self.global_start_time = global_start_time
self.plotting = plotting
if not isinstance(self.plotting, bool):
if self.plotting in ('true', 'True'):
self.plotting = True
else:
self.plotting = False
self.native_plot = config.get('native_plot', 'false').lower()
self.plot_every_n = int(config.get('plot_every_n', 1))
self.make_gif = config.get('make_gif', 'false').lower()
self.namefile = f'{RUN_DIR}/{method_name}/{global_start_time}_data.csv'
self.plot_dir = config.get(
'plot_dir', f'{RUN_DIR}/{method_name}/{global_start_time}')
self.logdir = f'{RUN_DIR}/{method_name}/{global_start_time}_logs/'
if self.make_gif == 'true':
Path(self.plot_dir+'/png/').mkdir(parents=True, exist_ok=True)
Path(self.plot_dir+'/gif/').mkdir(parents=True, exist_ok=True)
Path(self.logdir).mkdir(parents=True, exist_ok=True)
self.col_names = ['time', 'run_time', 'target_state', 'sensor_state',
'action', 'observation', 'reward', 'collisions', 'lost',
'r_err', 'theta_err', 'heading_err', 'centroid_err', 'rmse', 'mae', 'inference_times', 'pf_cov']
self.pf_stats = defaultdict(list)
self.abs_target_hist = []
self.abs_sensor_hist = []
self.target_hist = []
self.sensor_hist = []
self.sensor_gps_hist = []
self.history_length = 50
self.time_step = 0
self.texts = []
self.openstreetmap = None
self.transform = None
self.expected_target_rssi = None
if config:
write_header_log(config, self.method_name, self.global_start_time)
def write_dataframe(self, run_data):
"""
Save dataframe to CSV file
"""
if os.path.isfile(self.namefile):
print('Updating file {}'.format(self.namefile))
else:
print('Saving file to {}'.format(self.namefile))
df = pd.DataFrame(run_data, columns=self.col_names)
df.to_csv(self.namefile)
def save_gif(self, run, sub_run=None):
filename = run if sub_run is None else '{}_{}'.format(run, sub_run)
# Build GIF
with imageio.get_writer('{}/gif/{}.gif'.format(self.plot_dir, filename), mode='I', fps=5) as writer:
for png_filename in sorted(os.listdir(self.plot_dir+'/png/'), key=lambda x: (len(x), x)):
image = imageio.imread(self.plot_dir+'/png/'+png_filename)
writer.append_data(image)
def live_plot(self, env, time_step=None, fig=None, ax=None, data=None):
"""
Create a live plot
"""
if self.openstreetmap is None and data.get('position', None) is not None and data.get('bearing', None) is not None:
self.openstreetmap = GPSVis(
position=data['position']
# map_path='map_delta_park.png', # Path to map downloaded from the OSM.
# bounds=(45.60311,-122.68450, 45.59494, -122.67505) # upper left, lower right
)
self.openstreetmap.set_origin(data['position'])
self.transform = np.array(
[self.openstreetmap.origin[0], self.openstreetmap.origin[1]])
self.time_step = time_step
self.pf_stats['mean_hypothesis'].append(
env.pf.mean_hypothesis if hasattr(env.pf, 'mean_hypothesis') else [None])
self.pf_stats['map_hypothesis'].append(
env.pf.map_hypothesis if hasattr(env.pf, 'map_hypothesis') else [None])
self.pf_stats['mean_state'].append(
env.pf.mean_state if hasattr(env.pf, 'mean_state') else [None])
self.pf_stats['map_state'].append(
env.pf.map_state if hasattr(env.pf, 'map_state') else [None])
abs_sensor = env.state.sensor_state
abs_particles = env.get_absolute_particles()
self.sensor_hist.append(abs_sensor)
target_bearing = None
target_relative_bearing = None
if data.get('position', None) is not None and data.get('drone_position', None) is not None and data.get('bearing', None) is not None:
target_bearing = get_bearing(
data['position'], data['drone_position'])
target_relative_bearing = target_bearing - data['bearing']
target_distance = get_distance(
data['position'], data['drone_position'])
self.expected_target_rssi = env.sensor.observation(
[[target_distance, target_relative_bearing, None, None]])[0]
ax.clear()
if self.openstreetmap is not None:
self.openstreetmap.plot_map(axis1=ax)
# TODO get variables
ax.set_title('Time = {}, Frequency = {}, Bandwidth = {}, Gain = {}'.format(
time_step, None, None, None))
color_array = [['salmon', 'darkred', 'red'],
['lightskyblue', 'darkblue', 'blue']]
lines = [] # https://matplotlib.org/3.5.0/api/_as_gen/matplotlib.pyplot.legend.html
# Plot Particles
for t in range(env.state.n_targets):
particles_x, particles_y = pol2cart(
abs_particles[:, t, 0], np.radians(abs_particles[:, t, 1]))
if self.transform is not None:
particles_x += self.transform[0]
particles_y += self.transform[1]
line1, = ax.plot(particles_x, particles_y, 'o',
color=color_array[t][0], markersize=4, markeredgecolor='black', label='particles', alpha=0.3, zorder=1)
if self.openstreetmap:
heatmap, xedges, yedges = np.histogram2d(particles_x, particles_y, bins=(
self.openstreetmap.xedges, self.openstreetmap.yedges))
heatmap = gaussian_filter(heatmap, sigma=8)
extent = [xedges[0], xedges[-1], yedges[0], yedges[-1]]
im = ax.imshow(heatmap.T, extent=extent, origin='lower',
cmap='jet', interpolation='nearest', alpha=0.2)
# plt.colorbar(im)
centroid_x = np.mean(particles_x)
centroid_y = np.mean(particles_y)
line2, = ax.plot(centroid_x, centroid_y, '*', color='magenta',
markeredgecolor='black', label='centroid', markersize=12, zorder=2)
if t == 0:
lines.extend([line1, line2])
else:
lines.extend([])
# Plot Sensor
sensor_x, sensor_y = pol2cart(np.array(self.sensor_hist)[
:, 0], np.radians(np.array(self.sensor_hist)[:, 1]))
if self.transform is not None:
sensor_x += self.transform[0]
sensor_y += self.transform[1]
if len(self.sensor_hist) > 1:
ax.arrow(sensor_x[-2], sensor_y[-2], 4*(sensor_x[-1]-sensor_x[-2]),
4*(sensor_y[-1]-sensor_y[-2]), width=1.5, color='blue', zorder=4)
ax.plot(sensor_x[:-1], sensor_y[:-1], linewidth=3.0,
color='blue', markeredgecolor='black', markersize=4, zorder=4)
line4, = ax.plot(sensor_x[-1], sensor_y[-1], 'H',
color='blue', label='sensor', markersize=10, zorder=4)
lines.extend([line4])
if self.openstreetmap and data.get('drone_position', None) is not None:
self.target_hist.append(self.openstreetmap.scale_to_img(
data['drone_position'], (self.openstreetmap.width_meters, self.openstreetmap.height_meters)))
target_np = np.array(self.target_hist)
if len(self.target_hist) > 1:
ax.plot(target_np[:, 0], target_np[:, 1], linewidth=3.0,
color='maroon', zorder=3, markersize=4)
line5, = ax.plot(target_np[-1, 0], target_np[-1, 1], 'o', color='maroon',
markeredgecolor='black', label='target', markersize=10, zorder=3)
lines.extend([line5])
# Legend
ax.legend(handles=lines, loc='upper left', bbox_to_anchor=(
1.04, 1.0), fancybox=True, shadow=True, ncol=1)
# X/Y Limits
if self.openstreetmap is None:
map_width = 600
min_map = -1*int(map_width/2)
max_map = int(map_width/2)
ax.set_xlim(min_map, max_map)
ax.set_ylim(min_map, max_map)
# Sidebar Text
# actual_str = r'$\bf{Actual}$''\n' # prettier format but adds ~0.04 seconds ???
actual_str = 'Actual\n'
actual_str += 'Bearing = {:.0f} deg\n'.format(data.get(
'bearing', None)) if data.get('bearing', None) else 'Bearing = unknown\n'
actual_str += 'Speed = {:.2f} m/s'.format(data.get('action_taken', None)[
1]) if data.get('action_taken', None) else 'Speed = unknown\n'
proposal_str = 'Proposed\n'
proposal_str += 'Bearing = {:.0f} deg\n'.format(data.get('action_proposal', None)[
0]) if None not in data.get('action_proposal', (None, None)) else 'Bearing = unknown\n'
proposal_str += 'Speed = {:.2f} m/s'.format(data.get('action_proposal', None)[
1]) if None not in data.get('action_proposal', (None, None)) else 'Speed = unknown\n'
last_mean_hyp = self.pf_stats['mean_hypothesis'][-1][0]
last_map_hyp = self.pf_stats['map_hypothesis'][-1][0]
rssi_str = 'RSSI\n'
rssi_str += 'Observed = {:.1f} dB\n'.format(
env.last_observation) if env.last_observation else 'Observed = unknown\n'
rssi_str += 'Expected = {:.1f} dB\n'.format(
self.expected_target_rssi) if self.expected_target_rssi else 'Expected = unknown\n'
rssi_str += 'Difference = {:.1f} dB\n'.format(env.last_observation - self.expected_target_rssi) if (
env.last_observation and self.expected_target_rssi) else ''
#rssi_str += 'Target bearing = {} \n'.format(target_bearing) if target_bearing else ''
#rssi_str += 'Target relative bearing = {} \n'.format(target_relative_bearing) if target_relative_bearing else ''
rssi_str += 'MLE estimate = {:.1f} dB\n'.format(
last_mean_hyp) if last_mean_hyp else 'MLE estimate = unknown'
rssi_str += 'MAP estimate = {:.1f} dB'.format(
last_map_hyp) if last_map_hyp else 'MAP estimate = unknown'
if len(fig.texts) == 0:
props = dict(boxstyle='round', facecolor='palegreen', alpha=0.5)
text = fig.text(1.04, 0.75, actual_str, transform=ax.transAxes,
fontsize=14, verticalalignment='top', bbox=props)
props = dict(boxstyle='round',
facecolor='paleturquoise', alpha=0.5)
text = fig.text(1.04, 0.5, proposal_str, transform=ax.transAxes,
fontsize=14, verticalalignment='top', bbox=props)
props = dict(boxstyle='round', facecolor='khaki', alpha=0.5)
text = fig.text(1.04, 0.25, rssi_str, transform=ax.transAxes,
fontsize=14, verticalalignment='top', bbox=props)
else:
fig.texts[0].set_text(actual_str)
fig.texts[1].set_text(proposal_str)
fig.texts[2].set_text(rssi_str)
self.native_plot = 'true' if time_step % self.plot_every_n == 0 else 'false'
if self.native_plot == 'true':
plt.draw()
plt.pause(0.001)
if self.make_gif == 'true':
png_filename = '{}/png/{}.png'.format(self.plot_dir, time_step)
print('saving plots in {}'.format(png_filename))
plt.savefig(png_filename, bbox_inches='tight')
def build_multitarget_plots(self, env, time_step=None, fig=None, axs=None, centroid_distance_error=None, selected_plots=[1, 2, 3, 4, 5], simulated=True, textstr=None):
xp = env.state.target_state
belief = env.pf.particles.reshape(
len(env.pf.particles), env.state.n_targets, 4)
#print('sensor state = ',env.state.sensor_state)
abs_sensor = env.state.sensor_state
abs_particles = env.get_absolute_particles()
if simulated:
abs_target = np.array(env.get_absolute_target())
else:
abs_target = None
# these are matplotlib.patch.Patch properties
props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)
if len(self.abs_target_hist) < self.history_length:
self.abs_target_hist = [abs_target] * self.history_length
self.abs_sensor_hist = [abs_sensor] * self.history_length
else:
self.abs_target_hist.pop(0)
self.abs_target_hist.append(abs_target)
self.abs_sensor_hist.pop(0)
self.abs_sensor_hist.append(abs_sensor)
if len(self.target_hist) == 150:
self.target_hist = []
self.sensor_hist = []
self.rel_sensor_hist = []
self.target_hist.append(abs_target)
self.sensor_hist.append(abs_sensor)
plt.tight_layout()
# Put space between plots
plt.subplots_adjust(wspace=0.7, hspace=0.2)
color_array = [['salmon', 'darkred', 'red'],
['lightskyblue', 'darkblue', 'blue']]
plot_count = 0
if axs is None:
axs = {}
map_width = 600
min_map = -1*int(map_width/2)
max_map = int(map_width/2)
cell_size = int((max_map - min_map)/max_map)
cell_size = 2
xedges = np.arange(min_map, max_map+cell_size, cell_size)
yedges = np.arange(min_map, max_map+cell_size, cell_size)
if 1 in selected_plots:
# Plot 1: Particle Plot (Polar)
plot_count += 1
if 1 not in axs:
axs[1] = fig.add_subplot(
1, len(selected_plots), plot_count, polar=True)
ax = axs[1]
ax.clear()
for t in range(env.state.n_targets):
# plot particles
plot_theta = np.radians(belief[:, t, 1])
plot_r = belief[:, t, 0] # [row[0] for row in belief]
ax.plot(plot_theta, plot_r, 'o', color=color_array[t][0], markersize=4,
markeredgecolor='black', label='particles', alpha=0.3, zorder=1)
# plot targets
plot_x_theta = np.radians(xp[t, 1])
plot_x_r = xp[t, 0]
ax.set_ylim(0, 300)
if 2 in selected_plots:
# Plot 2: Particle Plot (Polar) with Interpolation
plot_count += 1
if 2 not in axs:
axs[2] = fig.add_subplot(
1, len(selected_plots), plot_count, polar=True)
ax = axs[2]
for t in range(env.state.n_targets):
# Create grid values first via histogram.
nbins = 10
plot_theta = np.radians(belief[:, t, 1])
plot_r = belief[:, t, 0] # [row[0] for row in belief]
counts, xbins, ybins = np.histogram2d(
plot_theta, plot_r, bins=nbins)
# Make a meshgrid for theta, r values
tm, rm = np.meshgrid(xbins[:-1], ybins[:-1])
# Build contour plot
ax.contourf(tm, rm, counts)
# True position
plot_x_theta = np.radians(xp[t, 1])
plot_x_r = xp[t, 0]
ax.plot(plot_x_theta, plot_x_r, 'X')
ax.set_ylim(0, 300)
if 3 in selected_plots:
# Plot 3: Heatmap Plot (Cartesian)
plot_count += 1
if 3 not in axs:
axs[3] = fig.add_subplot(1, len(selected_plots), plot_count)
ax = axs[3]
# COMBINED; UNCOMMENT AFTER PAPER PLOT
all_particles_x, all_particles_y = [], []
for t in range(env.state.n_targets):
cart = np.array(
list(map(pol2cart, belief[:, t, 0], np.radians(belief[:, t, 1]))))
x = cart[:, 0]
y = cart[:, 1]
all_particles_x.extend(x)
all_particles_y.extend(y)
heatmap, xedges, yedges = np.histogram2d(
all_particles_x, all_particles_y, bins=(xedges, yedges))
heatmap = gaussian_filter(heatmap, sigma=8)
extent = [xedges[0], xedges[-1], yedges[0], yedges[-1]]
im = ax.imshow(heatmap.T, extent=extent, origin='lower',
cmap='jet', interpolation='nearest')
plt.colorbar(im)
ax.set_xlim(min_map, max_map)
ax.set_ylim(min_map, max_map)
if 4 in selected_plots:
# Plot 4: Absolute Polar coordinates
plot_count += 1
if 4 not in axs:
axs[4] = fig.add_subplot(
1, len(selected_plots), plot_count, polar=True)
ax = axs[4]
ax.clear()
lines = [] # https://matplotlib.org/3.5.0/api/_as_gen/matplotlib.pyplot.legend.html
for t in range(env.state.n_targets):
particles_x, particles_y = pol2cart(
abs_particles[:, t, 0], np.radians(abs_particles[:, t, 1]))
centroid_x = np.mean(particles_x)
centroid_y = np.mean(particles_y)
centroid_r, centroid_theta = cart2pol(centroid_x, centroid_y)
target_r, target_theta, target_x, target_y = [], [], [], []
for i in range(5):
target_r.append(
self.abs_target_hist[10*(i+1)-1][env.state.n_targets-1-t][0])
target_theta.append(np.radians(
self.abs_target_hist[10*(i+1)-1][env.state.n_targets-1-t][1]))
target_x, target_y = pol2cart(target_r, target_theta)
if len(self.target_hist) > 1:
ax.plot(np.radians(np.array(self.target_hist)[:-1, t, 1]), np.array(self.target_hist)[
:-1, t, 0], linewidth=4.0, color='limegreen', zorder=3, markersize=12)
line0, = ax.plot(target_theta[4], target_r[4], 'X', color='limegreen',
markeredgecolor='black', label='targets', markersize=20, zorder=4)
line1, = ax.plot(np.radians(abs_particles[:, t, 1]), abs_particles[:, t, 0], 'o', color=color_array[t]
[0], markersize=4, markeredgecolor='black', label='particles', alpha=0.3, zorder=1)
if t == 0:
lines.extend([line0, line1])
else:
lines.extend([line0])
if len(self.sensor_hist) > 1:
ax.plot(np.radians(np.array(self.sensor_hist)[:-1, 1]), np.array(self.sensor_hist)[
:-1, 0], linewidth=4.0, color='mediumorchid', zorder=3, markersize=12)
line4, = ax.plot(np.radians(self.sensor_hist[-1][1]), self.sensor_hist[-1][0], 'H',
color='mediumorchid', markeredgecolor='black', label='sensor', markersize=20, zorder=3)
lines.extend([line4])
ax.legend(handles=lines, loc='center left', bbox_to_anchor=(
1.08, 0.5), fancybox=True, shadow=True,)
ax.set_ylim(0, 250)
if 5 in selected_plots:
# Plot 5: Absolute Cartesian coordinates
plot_count += 1
if 5 not in axs:
axs[5] = fig.add_subplot(1, len(selected_plots), plot_count)
ax = axs[5]
xedges = np.arange(min_map, max_map, cell_size)
yedges = np.arange(min_map, max_map, cell_size)
heatmap_combined = None
all_particles_x, all_particles_y = [], []
for t in range(env.state.n_targets):
particles_x, particles_y = pol2cart(
abs_particles[:, t, 0], np.radians(abs_particles[:, t, 1]))
all_particles_x.extend(particles_x)
all_particles_y.extend(particles_y)
centroid_x = np.mean(particles_x)
centroid_y = np.mean(particles_y)
centroid_r, centroid_theta = cart2pol(centroid_x, centroid_y)
target_r, target_theta, target_x, target_y = [], [], [], []
for i in range(5):
target_r.append(self.abs_target_hist[10*(i+1)-1][t][0])
target_theta.append(np.radians(
self.abs_target_hist[10*(i+1)-1][t][1]))
target_x, target_y = pol2cart(target_r, target_theta)
ax.plot(centroid_x, centroid_y, '*',
label='centroid', markersize=12)
ax.plot(target_x[4], target_y[4], 'X',
label='target', markersize=12)
sensor_r, sensor_theta, sensor_x, sensor_y = [], [], [], []
for i in range(5):
sensor_r.append(self.abs_sensor_hist[10*(i+1)-1][0])
sensor_theta.append(np.radians(
self.abs_sensor_hist[10*(i+1)-1][1]))
sensor_x, sensor_y = pol2cart(sensor_r, sensor_theta)
ax.plot(sensor_x[4], sensor_y[4], 'p',
label='sensor', markersize=12)
heatmap, xedges, yedges = np.histogram2d(
all_particles_x, all_particles_y, bins=(xedges, yedges))
heatmap = gaussian_filter(heatmap, sigma=8)
extent = [xedges[0], xedges[-1], yedges[0], yedges[-1]]
im = ax.imshow(heatmap.T, extent=extent, origin='lower',
cmap='jet', interpolation='nearest')
plt.colorbar(im)
ax.legend(loc='center left', bbox_to_anchor=(
1.2, 0.5), fancybox=True, shadow=True,)
ax.set_xlim(min_map, max_map)
ax.set_ylim(min_map, max_map)
if 6 in selected_plots:
# Plot 1: Particle Plot (Polar)
plot_count += 1
if 6 not in axs:
axs[6] = fig.add_subplot(1, len(selected_plots), plot_count)
ax = axs[6]
ax.clear()
for t in range(env.state.n_targets):
# plot particles
plot_theta = np.radians(belief[:, t, 1])
plot_r = belief[:, t, 0]
particles_x, particles_y = pol2cart(
belief[:, t, 0], np.radians(belief[:, t, 1]))
ax.plot(particles_x, particles_y, 'o',
color=color_array[t][0], markersize=4, markeredgecolor='black', label='particles', alpha=0.3, zorder=1)
# plot targets
plot_x_theta = np.radians(xp[t, 1])
plot_x_r = xp[t, 0]
ax.set_xlim(min_map, max_map)
ax.set_ylim(min_map, max_map)
sensor_x, sensor_y = pol2cart(
self.sensor_hist[-1][0], np.radians(self.sensor_hist[-1][1]))
if 7 in selected_plots:
plot_count += 1
if 7 not in axs:
axs[7] = fig.add_subplot(1, len(selected_plots), plot_count)
ax = axs[7]
ax.clear()
lines = [] # https://matplotlib.org/3.5.0/api/_as_gen/matplotlib.pyplot.legend.html
for t in range(env.state.n_targets):
particles_x, particles_y = pol2cart(
abs_particles[:, t, 0], np.radians(abs_particles[:, t, 1]))
centroid_x = np.mean(particles_x)
centroid_y = np.mean(particles_y)
centroid_r, centroid_theta = cart2pol(centroid_x, centroid_y)
target_r, target_theta, target_x, target_y = [], [], [], []
for i in range(5):
target_r.append(
self.abs_target_hist[10*(i+1)-1][env.state.n_targets-1-t][0])
target_theta.append(np.radians(
self.abs_target_hist[10*(i+1)-1][env.state.n_targets-1-t][1]))
target_x, target_y = pol2cart(target_r, target_theta)
target_x, target_y = pol2cart(np.array(self.target_hist)[
:, t, 0], np.radians(np.array(self.target_hist)[:, t, 1]))
if len(self.target_hist) > 1:
ax.plot(target_x[:-1], target_y[:-1], linewidth=4.0,
color='limegreen', zorder=3, markersize=12)
line0, = ax.plot(target_x[-1], target_y[-1], 'X', color='limegreen',
markeredgecolor='black', label='targets', markersize=20, zorder=4)
line1, = ax.plot(particles_x, particles_y, 'o',
color=color_array[t][0], markersize=4, markeredgecolor='black', label='particles', alpha=0.3, zorder=1)
#ax.plot(centroid_theta, centroid_r, '*', color=color_array[t][1],markeredgecolor='white', label='centroid', markersize=12, zorder=2)
if t == 0:
lines.extend([line0, line1])
else:
lines.extend([line0])
sensor_x, sensor_y = pol2cart(np.array(self.sensor_hist)[
:, 0], np.radians(np.array(self.sensor_hist)[:, 1]))
if len(self.sensor_hist) > 1:
ax.plot(sensor_x[:-1], sensor_y[:-1], linewidth=4.0,
color='mediumorchid', zorder=3, markersize=12)
line4, = ax.plot(sensor_x[-1], sensor_y[-1], 'H', color='mediumorchid',
markeredgecolor='black', label='sensor', markersize=20, zorder=3)
lines.extend([line4])
ax.legend(handles=lines, loc='center left', bbox_to_anchor=(
1.08, 0.5), fancybox=True, shadow=True,)
ax.set_xlim(min_map, max_map)
ax.set_ylim(min_map, max_map)
if 8 in selected_plots:
plot_count += 1
if 8 not in axs:
axs[8] = fig.add_subplot(1, len(selected_plots), plot_count)
ax = axs[8]
ax.clear()
lines = [] # https://matplotlib.org/3.5.0/api/_as_gen/matplotlib.pyplot.legend.html
for t in range(env.state.n_targets):
particles_x, particles_y = pol2cart(
abs_particles[:, t, 0], np.radians(abs_particles[:, t, 1]))
centroid_x = np.mean(particles_x)
centroid_y = np.mean(particles_y)
centroid_r, centroid_theta = cart2pol(centroid_x, centroid_y)
line1, = ax.plot(particles_x, particles_y, 'o',
color=color_array[t][0], markersize=4, markeredgecolor='black', label='particles', alpha=0.3, zorder=1)
if t == 0:
lines.extend([line1])
else:
lines.extend([])
sensor_x, sensor_y = pol2cart(np.array(self.sensor_hist)[
:, 0], np.radians(np.array(self.sensor_hist)[:, 1]))
if len(self.sensor_hist) > 1:
ax.plot(sensor_x[:-1], sensor_y[:-1], linewidth=4.0,
color='mediumorchid', zorder=3, markersize=12)
line4, = ax.plot(sensor_x[-1], sensor_y[-1], 'H', color='mediumorchid',
markeredgecolor='black', label='sensor', markersize=20, zorder=3)
lines.extend([line4])
ax.legend(handles=lines, loc='upper center', bbox_to_anchor=(
0.5, -0.05), fancybox=True, shadow=True, ncol=2)
ax.set_xlim(min_map, max_map)
ax.set_ylim(min_map, max_map)
if textstr:
props = dict(boxstyle='round',
facecolor='palegreen', alpha=0.5)
ax.text(1.04, 0.75, textstr[0], transform=ax.transAxes,
fontsize=14, verticalalignment='top', bbox=props)
props = dict(boxstyle='round',
facecolor='paleturquoise', alpha=0.5)
ax.text(1.04, 0.5, textstr[1], transform=ax.transAxes,
fontsize=14, verticalalignment='top', bbox=props)
png_filename = '{}/png/{}.png'.format(self.plot_dir, time_step)
return axs
def build_plots(self, xp=[], belief=[], abs_sensor=None, abs_target=None, abs_particles=None, time_step=None, fig=None, ax=None):
print(belief.shape)
if len(self.abs_target_hist) < self.history_length:
self.abs_target_hist = [abs_target] * self.history_length
self.abs_sensor_hist = [abs_sensor] * self.history_length
else:
self.abs_target_hist.pop(0)
self.abs_target_hist.append(abs_target)
self.abs_sensor_hist.pop(0)
self.abs_sensor_hist.append(abs_sensor)
fig = plt.figure(figsize=(30, 6))
plt.tight_layout()
# Put space between plots
plt.subplots_adjust(wspace=0.2, hspace=0.2)
# Plot 1: Particle Plot (Polar)
ax = fig.add_subplot(1, 5, 1, polar=True)
grid_r, grid_theta = [], []
plot_r = [row[0] for row in belief]
plot_theta = np.radians(np.array([row[1] for row in belief]))
plot_x_theta = np.radians(xp[1])
plot_x_r = xp[0]
ax.plot(plot_theta, plot_r, 'ro')
ax.plot(plot_x_theta, plot_x_r, 'bo')
ax.set_ylim(-150, 150)
ax.set_title('iteration {}'.format(time_step), fontsize=16)
# Plot 2: Particle Plot (Polar) with Interpolation
ax = fig.add_subplot(1, 5, 2, polar=True)
# Create grid values first via histogram.
nbins = 10
counts, xbins, ybins = np.histogram2d(plot_theta, plot_r, bins=nbins)
# Make a meshgrid for theta, r values
tm, rm = np.meshgrid(xbins[:-1], ybins[:-1])
# Build contour plot
ax.contourf(tm, rm, counts)
# True position
ax.plot(plot_x_theta, plot_x_r, 'bo')
ax.set_ylim(-150, 150)
ax.set_title('Interpolated Belief'.format(time_step), fontsize=16)
# Plot 3: Heatmap Plot (Cartesian)
ax = fig.add_subplot(1, 5, 3)
cart = np.array(
list(map(pol2cart, belief[:, 0], np.radians(belief[:, 1]))))
x = cart[:, 0]
y = cart[:, 1]
xedges = np.arange(-150, 153, 3)
yedges = np.arange(-150, 153, 3)
heatmap, xedges, yedges = np.histogram2d(x, y, bins=(xedges, yedges))
heatmap = gaussian_filter(heatmap, sigma=5)
extent = [xedges[0], xedges[-1], yedges[0], yedges[-1]]
im = ax.imshow(heatmap.T, extent=extent,
origin='lower', cmap='coolwarm')
plt.colorbar(im)
ax.set_xlim(-200, 200)
ax.set_ylim(-200, 200)
ax.set_title('Particle heatmap (relative to sensor)')
# Plots 4 & 5: Absolute Particle/Sensor/Target Plot
# particles/centroid coordinates
particles_x, particles_y = pol2cart(
abs_particles[:, 0], np.radians(abs_particles[:, 1]))
centroid_x = np.mean(particles_x)
centroid_y = np.mean(particles_y)
centroid_r, centroid_theta = cart2pol(centroid_x, centroid_y)
sensor_r, sensor_theta, sensor_x, sensor_y = [], [], [], []
target_r, target_theta, target_x, target_y = [], [], [], []
for i in range(5):
sensor_r.append(self.abs_sensor_hist[10*(i+1)-1][0])
sensor_theta.append(np.radians(
self.abs_sensor_hist[10*(i+1)-1][1]))
target_r.append(self.abs_target_hist[10*(i+1)-1][0])
target_theta.append(np.radians(
self.abs_target_hist[10*(i+1)-1][1]))
sensor_x[i], sensor_y[i] = pol2cart(sensor_r, sensor_theta)
target_x[i], target_y[i] = pol2cart(target_r, target_theta)
# Plot 4: Absolute Polar coordinates
ax = fig.add_subplot(1, 5, 4, polar=True)
ax.plot(np.radians(
abs_particles[:, 1]), abs_particles[:, 0], 'ro', label='particles', alpha=0.5)
ax.plot(centroid_theta, centroid_r, 'c*',
label='centroid', markersize=12)
ax.plot(sensor_theta[4], sensor_r[4], 'gp',
label='sensor', markersize=12)
ax.plot(target_theta[4], target_r[4], 'bX',
label='target', markersize=12)
for i in range(4):
ax.plot(sensor_theta[i], sensor_r[i],
'gp', markersize=6, alpha=0.75)
ax.plot(target_theta[i], target_r[i],
'bX', markersize=6, alpha=0.75)
ax.legend()
ax.set_title('Absolute positions (polar)'.format(
time_step), fontsize=16)
# Plot 5: Absolute Cartesian coordinates
ax = fig.add_subplot(1, 5, 5)
xedges = np.arange(-100, 103, 3)
yedges = np.arange(-100, 103, 3)
heatmap, xedges, yedges = np.histogram2d(
particles_x, particles_y, bins=(xedges, yedges))
heatmap = gaussian_filter(heatmap, sigma=2)
extent = [xedges[0], xedges[-1], yedges[0], yedges[-1]]
im = ax.imshow(heatmap.T, extent=extent,
origin='lower', cmap='coolwarm')
plt.colorbar(im)
ax.plot(centroid_x, centroid_y, 'c*', label='centroid', markersize=12)
ax.plot(sensor_x[4], sensor_y[4], 'gp', label='sensor', markersize=12)
ax.plot(target_x[4], target_y[4], 'bX', label='target', markersize=12)
for i in range(4):
ax.plot(sensor_x[i], sensor_y[i], 'gp', markersize=6, alpha=0.55)
ax.plot(target_x[i], target_y[i], 'bX', markersize=6, alpha=0.55)
ax.legend()
ax.set_xlim(-150, 150)
ax.set_ylim(-150, 150)
ax.set_title('Absolute positions (cartesian)'.format(
time_step), fontsize=16)
r_error, theta_error, heading_error, centroid_distance_error, rmse, mae = tracking_error(
abs_target, abs_particles)
png_filename = '{}/png/{}.png'.format(self.plot_dir, time_step)
print('saving plots in {}'.format(png_filename))
plt.savefig(png_filename)
plt.close(fig)
##################################################################
# Logging
##################################################################
def write_header_log(config, method, global_start_time):
if type(config) == configparser.ConfigParser:
config2log = {section: dict(config[section])
for section in config.sections()}
else:
config2log = dict(config)
# write output header
if not os.path.isdir(f'{RUN_DIR}/{method}/'):
os.makedirs(f'{RUN_DIR}/{method}/')
header_filename = f'{RUN_DIR}/{method}/{global_start_time}_header.txt'
with open(header_filename, 'w', encoding='UTF-8') as f:
f.write(json.dumps(config2log))
def read_header_log(filename):
with open(filename, 'r', encoding='UTF-8') as f:
config = json.load(f)
return config
def particles_mean_belief(particles):
particles_r = particles[:, 0]
particles_theta = np.radians(particles[:, 1])
particles_x, particles_y = pol2cart(particles_r, particles_theta)
# centroid of particles x,y
mean_x = np.mean(particles_x)
mean_y = np.mean(particles_y)
# centroid of particles r,theta
mean_r, mean_theta = cart2pol(mean_x, mean_y)
particles_heading = particles[:, 2]
particles_heading_rad = np.radians(particles_heading)
mean_heading_rad = np.arctan2(
np.mean(np.sin(particles_heading_rad)), np.mean(np.cos(particles_heading_rad)))
mean_heading = | np.degrees(mean_heading_rad) | numpy.degrees |
"""
Links up the various cards in the BDF.
For example, with cross referencing...
.. code-block:: python
>>> model = BDF()
>>> model.read_bdf(bdf_filename, xref=True)
>>> nid1 = 1
>>> node1 = model.nodes[nid1]
>>> node.nid
1
>>> node.xyz
[1., 2., 3.]
>>> node.Cid()
3
>>> node.cid
3
>>> node.cid_ref
CORD2S, 3, 1, 0., 0., 0., 0., 0., 1.,
1., 0., 0.
# get the position in the global frame
>>> node.get_position()
[4., 5., 6.]
# get the position with respect to another frame
>>> node.get_position_wrt(model, cid=2)
[4., 5., 6.]
Without cross referencing...
.. code-block:: python
>>> model = BDF()
>>> model.read_bdf(bdf_filename, xref=True)
>>> nid1 = 1
>>> node1 = model.nodes[nid1]
>>> node.nid
1
>>> node.xyz
[1., 2., 3.]
>>> node.Cid()
3
>>> node.cid
3
>>> node.cid_ref
None
# get the position in the global frame
>>> node.get_position()
Error!
Cross-referencing allows you to easily jump across cards and also helps
with calculating things like position, area, and mass. The BDF is designed
around the idea of cross-referencing, so it's recommended that you use it.
"""
# pylint: disable=R0902,R0904,R0914
from collections import defaultdict
import traceback
from typing import List, Dict, Any
from numpy import zeros, argsort, arange, array_equal, array
from pyNastran.bdf.bdf_interface.attributes import BDFAttributes
class XrefMesh(BDFAttributes):
"""Links up the various cards in the BDF."""
def __init__(self) -> None:
"""The main BDF class defines all the parameters that are used."""
BDFAttributes.__init__(self)
self._nxref_errors = 100
self._stop_on_xref_error = True
# def geom_check(self):
# """
# Performs various geometry checks
# 1. nodal uniqueness on elements
# """
# for elem in model.elements:
# elem.check_unique_nodes()
def cross_reference(self,
xref: bool=True,
xref_nodes: bool=True,
xref_elements: bool=True,
xref_nodes_with_elements: bool=False,
xref_properties: bool=True,
xref_masses: bool=True,
xref_materials: bool=True,
xref_loads: bool=True,
xref_constraints: bool=True,
xref_aero: bool=True,
xref_sets: bool=True,
xref_optimization: bool=True,
word: str='') -> None:
"""
Links up all the cards to the cards they reference
Parameters
----------
xref : bool; default=True
cross references the model
xref_nodes : bool; default=True
set cross referencing of nodes/coords
xref_element : bool; default=True
set cross referencing of elements
xref_properties : bool; default=True
set cross referencing of properties
xref_masses : bool; default=True
set cross referencing of CMASS/PMASS
xref_materials : bool; default=True
set cross referencing of materials
xref_loads : bool; default=True
set cross referencing of loads
xref_constraints : bool; default=True
set cross referencing of constraints
xref_aero : bool; default=True
set cross referencing of CAERO/SPLINEs
xref_sets : bool; default=True
set cross referencing of SETx
word : str; default=''
model flag
To only cross-reference nodes:
.. code-block:: python
model = BDF()
model.read_bdf(bdf_filename, xref=False)
model.cross_reference(xref=True, xref_loads=False, xref_constraints=False,
xref_materials=False, xref_properties=False,
xref_aero=False, xref_masses=False,
xref_sets=False)
.. warning:: be careful if you call this method with False values
"""
if not xref:
return
self.log.debug("Cross Referencing%s..." % word)
if xref_nodes:
self._cross_reference_nodes()
self._cross_reference_coordinates()
if xref_elements:
self._cross_reference_elements()
if xref_properties:
self._cross_reference_properties()
if xref_masses:
self._cross_reference_masses()
if xref_materials:
self._cross_reference_materials()
if xref_aero:
self._cross_reference_aero()
if xref_constraints:
self._cross_reference_constraints()
if xref_loads:
self._cross_reference_loads()
if xref_sets:
self._cross_reference_sets()
if xref_optimization:
self._cross_reference_optimization()
if xref_nodes_with_elements:
self._cross_reference_nodes_with_elements()
self._cross_reference_contact()
self._cross_reference_superelements()
#self.case_control_deck.cross_reference(self)
self.pop_xref_errors()
for super_id, superelement in sorted(self.superelement_models.items()):
superelement.cross_reference(
xref=xref, xref_nodes=xref_nodes, xref_elements=xref_elements,
xref_nodes_with_elements=xref_nodes_with_elements,
xref_properties=xref_properties, xref_masses=xref_masses,
xref_materials=xref_materials, xref_loads=xref_loads,
xref_constraints=xref_constraints, xref_aero=xref_aero,
xref_sets=xref_sets, xref_optimization=xref_optimization,
word=' (Superelement %i)' % super_id)
def _cross_reference_constraints(self) -> None:
"""
Links the SPCADD, SPC, SPCAX, SPCD, MPCADD, MPC, SUPORT,
SUPORT1, SESUPORT cards.
"""
for spcadds in self.spcadds.values():
for spcadd in spcadds:
spcadd.cross_reference(self)
for spcs in self.spcs.values():
for spc in spcs:
spc.cross_reference(self)
for spcoffs in self.spcoffs.values():
for spcoff in spcoffs:
spcoff.cross_reference(self)
for mpcadds in self.mpcadds.values():
for mpcadd in mpcadds:
mpcadd.cross_reference(self)
for mpcs in self.mpcs.values():
for mpc in mpcs:
mpc.cross_reference(self)
for suport in self.suport:
suport.cross_reference(self)
for unused_suport1_id, suport1 in self.suport1.items():
suport1.cross_reference(self)
for se_suport in self.se_suport:
se_suport.cross_reference(self)
def _cross_reference_coordinates(self) -> None:
"""
Links up all the coordinate cards to other coordinate cards and nodes
- CORD1R, CORD1C, CORD1S
- CORD2R, CORD2C, CORD2S
"""
# CORD2x: links the rid to coordinate systems
# CORD1x: links g1,g2,g3 to grid points
for coord in self.coords.values():
coord.cross_reference(self)
for coord in self.coords.values():
coord.setup()
def _cross_reference_aero(self, check_caero_element_ids: bool=False) -> None:
"""
Links up all the aero cards
- CAEROx, PAEROx, SPLINEx, AECOMP, AELIST, AEPARAM, AESTAT, AESURF, AESURFS
"""
self.zona.cross_reference()
for caero in self.caeros.values():
caero.cross_reference(self)
for paero in self.paeros.values():
paero.cross_reference(self)
for trim in self.trims.values():
trim.cross_reference(self)
for csschd in self.csschds.values():
csschd.cross_reference(self)
for spline in self.splines.values():
spline.cross_reference(self)
for aecomp in self.aecomps.values():
aecomp.cross_reference(self)
for aelist in self.aelists.values():
aelist.cross_reference(self)
for aeparam in self.aeparams.values():
aeparam.cross_reference(self)
#for aestat in self.aestats.values(s):
#aestat.cross_reference(self)
for aesurf in self.aesurf.values():
aesurf.cross_reference(self)
for aesurfs in self.aesurfs.values():
aesurfs.cross_reference(self)
for flutter in self.flutters.values():
flutter.cross_reference(self)
for monitor_point in self.monitor_points:
monitor_point.cross_reference(self)
if self.aero:
self.aero.cross_reference(self)
if self.aeros:
self.aeros.cross_reference(self)
if check_caero_element_ids: # only support CAERO1
ncaeros = len(self.caeros)
if ncaeros > 1:
# we don't need to check the ncaeros=1 case
i = 0
min_maxs = zeros((ncaeros, 2), dtype='int32')
for unused_eid, caero in sorted(self.caeros.items()):
min_maxs[i, :] = caero.min_max_eid
i += 1
isort = argsort(min_maxs.ravel())
expected = arange(ncaeros * 2, dtype='int32')
if not | array_equal(isort, expected) | numpy.array_equal |
import sys
import numpy as np
from matplotlib import pyplot
sys.path.append('..')
from submission import SubmissionBase
def displayData(X, example_width=None, figsize=(10, 10)):
"""
Displays 2D data stored in X in a nice grid.
"""
# Compute rows, cols
if X.ndim == 2:
m, n = X.shape
elif X.ndim == 1:
n = X.size
m = 1
X = X[None] # Promote to a 2 dimensional array
else:
raise IndexError('Input X should be 1 or 2 dimensional.')
example_width = example_width or int(np.round(np.sqrt(n)))
example_height = n / example_width
# Compute number of items to display
display_rows = int(np.floor(np.sqrt(m)))
display_cols = int(np.ceil(m / display_rows))
fig, ax_array = pyplot.subplots(display_rows, display_cols, figsize=figsize)
fig.subplots_adjust(wspace=0.025, hspace=0.025)
ax_array = [ax_array] if m == 1 else ax_array.ravel()
for i, ax in enumerate(ax_array):
ax.imshow(X[i].reshape(example_width, example_width, order='F'),
cmap='Greys', extent=[0, 1, 0, 1])
ax.axis('off')
def sigmoid(z):
"""
Computes the sigmoid of z.
"""
return 1.0 / (1.0 + np.exp(-z))
class Grader(SubmissionBase):
# Random Test Cases
X = np.stack([np.ones(20),
np.exp(1) * np.sin(np.arange(1, 21)),
| np.exp(0.5) | numpy.exp |
#!/usr/bin/env python
import json
import pickle
import numpy as np
from gryffin import Gryffin
from gemini import GeminiOpt as Gemini
from gemini.plotter import PlotterOpt
import olympus
from olympus.surfaces import Surface
#===============================================================================
#===============================================================================`
'''
Run Gryffin with "external" predictive model.
Predictive model training takes place OUTSIDE of the Gryffin source code,
and a callable predictive model object must then be passed to the recommend
method of gryffin.py
The callable makes predictions about the objective function given parameter
proposals.
This callable must have the following methods and attributes to be compatible with
use in Gryffin
This example uses 2d trigonometric functions for the surfaces
'''
# define params ----------------------------------------------------------------
PLOT = True
BUDGET_EXP = 30
BUDGET_CHEAP = 1000
RATIO = 10
CONFIG_FILE = 'config.json'
RANDOM_SEED = 100700
TYPE = 'gemini'
# helper function
def normalize(x, minmax=None):
if not type(minmax)==np.ndarray:
z = (x - np.amin(x))/(np.amax(x)-np.amin(x))
else:
z = (x - np.amin(minmax))/(np.amax(minmax)-np.amin(minmax))
return z
# initialize plotter
if PLOT:
plotter = PlotterOpt()
# initialize Gryffin
with open(CONFIG_FILE, 'r') as content:
CONFIG_DICT = json.load(content)
CONFIG_DICT['general']['random_seed'] = RANDOM_SEED
gryffin = Gryffin(config_dict=CONFIG_DICT)
# initalize Gemini
gemini = Gemini()
# define the surfaces ----------------------------------------------------------
def cheap_func(X, Y):
return np.sin(X+Y)
def exp_func(X, Y):
return np.cos(X+Y)
# generate the data
RESOLUTION = 50
x = np.linspace(-3, 3, RESOLUTION)
y = np.linspace(-3, 3, RESOLUTION)
X, Y = np.meshgrid(x, y)
cheap_Z = cheap_func(X, Y)
exp_Z = exp_func(X, Y)
features = np.dstack([X, Y]).reshape(-1, 2)
cheap_z = exp_Z.reshape((x.shape[0], y.shape[0]))
exp_z = exp_Z.reshape((x.shape[0], y.shape[0]))
# START THE CAMPAIGN -----------------------------------------------------------
iteration = 0
observations_exp = []
observations_cheap = []
while len(observations_exp) < BUDGET_EXP:
# check to see if we have sufficent observations to commence training
if len(observations_exp) >= 2 and len(observations_cheap) >= 2:
print('@@\tTRAINING PREDICTIVE MODEL\n')
training_set = gryffin.construct_training_set(observations_exp, observations_cheap)
gemini.train(training_set['train_features'], training_set['train_targets'],
training_set['proxy_train_features'], training_set['proxy_train_targets'],
num_folds=1, user_hyperparams={'max_epochs': 10000})
#-----------------------------------------------------------------------
# EXPENSIVE EXPERIMENT
#-----------------------------------------------------------------------
print('@@\tEXPENSIVE EXPERIMENT\n')
# get a new sample
samples_exp = gryffin.recommend(observations=observations_exp,
predictive_model=gemini)
# get measurements for samples
new_observations = []
for sample in samples_exp:
param = np.array([sample['param_0'][0], sample['param_1'][0]])
grid = np.meshgrid(param[0], param[1])
measurement = exp_func(grid[0], grid[1])
sample['obj'] = measurement[0][0]
new_observations.append(sample)
# add measurements to cache
observations_exp.extend(new_observations)
#-----------------------------------------------------------------------
# CHEAP EXPERIMENT
#-----------------------------------------------------------------------
print('@@\tCHEAP EXPERIMENT\n')
# add same observations as the proxy observations
samples_cheap = np.random.uniform(-3, 3, size=(2*RATIO, 2))
new_observations = []
for sample in samples_cheap:
grid = np.meshgrid(sample[0], sample[1])
measurement = cheap_func(grid[0], grid[1])
new_observations.append({'param_0': np.array([sample[0]]),
'param_1': np.array([sample[1]]),
'obj': measurement[0][0]})
# add measurements to cache
observations_cheap.extend(new_observations)
# plotting stuff------------------------------------------------------------
if PLOT:
# unpack the observations
params_exp = np.array([[o['param_0'][0], o['param_1'][0]] for o in observations_exp])
objs_exp = | np.array([o['obj'] for o in observations_exp]) | numpy.array |
#!/usr/bin/python3
from tools import *
from sys import argv
from os.path import join
import h5py
import matplotlib.pylab as plt
import numpy as np
from time import sleep
if len(argv) > 1:
pathToSimFolder = argv[1]
else:
pathToSimFolder = "../data/"
parameters, electrodes = readParameters(pathToSimFolder)
fileOpenTries = 0
while fileOpenTries < 50:
fileOpenTries += 1
try:
with h5py.File(join(pathToSimFolder, "data.hdf5"), "r") as dataFile:
voltages = np.array(dataFile["/voltages"][:])
optEnergy = np.array(dataFile["/optEnergy"][:])
while True:
try:
generations = np.array(dataFile["/generation"][:])
mode = "genetic"
break
except KeyError:
pass
try:
basinAccepted = np.array(dataFile["/basinAccepted"][:], dtype=int)
accepted = basinAccepted.astype(bool)
notAccepted = np.invert(accepted)
mode = "basinHop"
break
except KeyError:
pass
mode = "MC"
try:
accepted = np.array(dataFile["/accepted"][:], dtype=bool)
notAccepted = np.invert(accepted)
except KeyError:
accepted = np.ones(
optEnergy.shape, dtype=bool
) # support for deprecated version
notAccepted = np.invert(accepted)
break
break
except OSError as e:
if "No such file" in repr(e):
raise e
else:
print(f"could not open file. try number {fileOpenTries}")
sleep(1)
cotrolElectrodeIndices = list(range(0, len(electrodes)))
cotrolElectrodeIndices.remove(parameters["outputElectrode"])
cotrolElectrodeIndices.remove(parameters["inputElectrode1"])
cotrolElectrodeIndices.remove(parameters["inputElectrode2"])
controlVoltages = voltages[:, cotrolElectrodeIndices]
if mode == "MC":
distance = 0
meanRange = 1000
displace = []
for i in range(int(distance + meanRange / 2), controlVoltages.shape[0]):
mean = np.mean(
controlVoltages[
int(i - distance - meanRange / 2) : int(i - distance + meanRange / 2), :
],
axis=0,
)
# displace.append(np.sqrt(np.sum((controlVoltages[i])**2)))
displace.append(np.sqrt(np.sum((mean - controlVoltages[i]) ** 2)))
MSD = np.sum((controlVoltages[0] - controlVoltages[:]) ** 2, axis=1)
fig, ax = plt.subplots(1, 1, figsize=(4.980614173228346, 3.2))
ax.plot(range(len(MSD)), MSD, "r-", label="MSD")
ax2 = ax.twinx()
ax2.plot(
range(int(distance + meanRange / 2), controlVoltages.shape[0]),
displace,
"k-",
label="displacement",
)
ax.legend()
ax2.legend()
ax.set_xlabel("step")
ax.set_ylabel("displacement")
plt.savefig(join(pathToSimFolder, "displacement.png"), bbox_inches="tight", dpi=300)
# plt.show()
plt.close(fig)
fig, ax = plt.subplots(1, 1, figsize=(4.980614173228346, 3.2))
ax.plot(np.maximum.accumulate(optEnergy), color="darkorange", label="best")
ax.plot(
np.arange(optEnergy.shape[0])[notAccepted[:, 0]],
optEnergy[notAccepted],
".",
ms=1,
color="darkred",
label="not accepted",
zorder=10,
)
ax.plot(
np.arange(optEnergy.shape[0])[accepted[:, 0]],
optEnergy[accepted],
".",
ms=1,
color="darkgreen",
label="accepted",
zorder=10,
)
# ax.set_xlim(-0.15,0.65)
ax.set_ylim(0.15, 1.05)
ax.set_xlabel("iteration")
ax.set_ylabel(r"$\mathcal{F}$")
ax.legend()
plt.savefig(join(pathToSimFolder, "convergence.png"), bbox_inches="tight", dpi=300)
# plt.show()
plt.close(fig)
fig, ax = plt.subplots(1, 1, figsize=(4.980614173228346, 3.2))
ax.plot(np.maximum.accumulate(optEnergy), color="darkorange", label="best")
ax.plot(
np.arange(optEnergy.shape[0])[notAccepted[:, 0]],
optEnergy[notAccepted],
".",
ms=1,
color="darkred",
label="not accepted",
zorder=10,
)
ax.plot(
np.arange(optEnergy.shape[0])[accepted[:, 0]],
optEnergy[accepted],
".",
ms=1,
color="darkgreen",
label="accepted",
zorder=10,
)
ax2 = ax.twinx()
ax.set_zorder(ax2.get_zorder() + 1)
ax.patch.set_visible(False)
ax2.plot(
range(int(distance + meanRange / 2), controlVoltages.shape[0]),
displace,
"k-",
label="displacement",
)
ax.set_ylim(0.15, 1.05)
ax.set_xlabel("iteration")
ax.set_ylabel(r"$\mathcal{F}$")
ax2.set_ylabel("displacement")
# ax.legend([line],[line.get_label()])
# ax2.legend()
plt.savefig(
join(pathToSimFolder, "convergence_displacement.png"),
bbox_inches="tight",
dpi=300,
)
# plt.show()
plt.close(fig)
###############################
if mode == "genetic":
distance = 0
meanRange = 1000
displace = []
for i in range(int(distance + meanRange / 2), controlVoltages.shape[0]):
mean = np.mean(
controlVoltages[
int(i - distance - meanRange / 2) : int(i - distance + meanRange / 2), :
],
axis=0,
)
# displace.append(np.sqrt(np.sum((controlVoltages[i])**2)))
displace.append(np.sqrt(np.sum((mean - controlVoltages[i]) ** 2)))
MSD = np.sum((controlVoltages[0] - controlVoltages[:]) ** 2, axis=1)
fig, ax = plt.subplots(1, 1, figsize=(4.980614173228346, 3.2))
ax.plot(range(len(MSD)), MSD, "r-", label="MSD")
ax2 = ax.twinx()
ax2.plot(
range(int(distance + meanRange / 2), controlVoltages.shape[0]),
displace,
"k-",
label="displacement",
)
ax.legend()
ax2.legend()
ax.set_xlabel("step")
ax.set_ylabel("displacement")
plt.savefig(join(pathToSimFolder, "displacement.png"), bbox_inches="tight", dpi=300)
# plt.show()
plt.close(fig)
genBest = np.empty(optEnergy.shape)
for i in range(int(optEnergy.shape[0] / 25)):
genBest[i * 25 : (i + 1) * 25] = max(optEnergy[i * 25 : (i + 1) * 25])
fig, ax = plt.subplots(1, 1, figsize=(4.980614173228346, 3.2))
ax.plot(np.maximum.accumulate(optEnergy), color="darkorange", label="best")
ax.plot(optEnergy, ".", ms=1, color="darkgreen", label="all")
ax.plot(genBest, color="darkblue", label="gen best")
# ax.set_xlim(-0.15,0.65)
ax.set_ylim(0.15, 1.05)
ax.set_xlabel("iteration")
ax.set_ylabel(r"$\mathcal{F}$")
ax.legend()
plt.savefig(join(pathToSimFolder, "convergence.png"), bbox_inches="tight", dpi=300)
# plt.show()
plt.close(fig)
fig, ax = plt.subplots(1, 1, figsize=(4.980614173228346, 3.2))
ax.plot(np.maximum.accumulate(optEnergy), color="darkorange", label="best")
ax.plot(optEnergy, ".", ms=1, color="darkgreen", label="all")
ax.plot(genBest, color="darkblue", label="gen best")
ax2 = ax.twinx()
ax.set_zorder(ax2.get_zorder() + 1)
ax.patch.set_visible(False)
ax2.plot(
range(int(distance + meanRange / 2), controlVoltages.shape[0]),
displace,
"k-",
label="displacement",
)
ax.set_ylim(0.15, 1.05)
ax.set_xlabel("iteration")
ax.set_ylabel(r"$\mathcal{F}$")
ax2.set_ylabel("displacement")
# ax.legend([line],[line.get_label()])
# ax2.legend()
plt.savefig(
join(pathToSimFolder, "convergence_displacement.png"),
bbox_inches="tight",
dpi=300,
)
# plt.show()
plt.close(fig)
###############################
if mode == "basinHop":
fig, ax = plt.subplots(1, 1, figsize=(4.980614173228346, 3.2))
ax.plot(np.maximum.accumulate(optEnergy), color="darkorange", label="best")
ax.plot(
np.arange(optEnergy.shape[0])[notAccepted[:, 0]],
optEnergy[notAccepted],
".",
ms=1,
color="darkred",
label="not accepted",
zorder=10,
)
ax.plot(
np.arange(optEnergy.shape[0])[accepted[:, 0]],
optEnergy[accepted],
".",
ms=1,
color="darkgreen",
label="accepted",
zorder=10,
)
buff = np.where(basinAccepted[:, 0] == 2)[0]
basinChanges = np.array([buff, np.zeros(buff.shape)], dtype=int)
buff = | np.where(basinAccepted[:, 0] == 3) | numpy.where |
import gym
import model
import torch
import torch.nn as nn
import torch.nn.functional as F
import wandb
import os
import argparse
import numpy as np
p1 = np.array([[0.9, 0.1], [0.2, 0.8]])
p2 = np.array([[0.3, 0.7], [0.6, 0.4]])
p3 = np.array([[0.5, 0.5], [0.1, 0.9]])
cost_mat = -1*np.array([[50, 200, 10], [3, 500, 0]])
theta_init = np.array([[0.2, 0.6, 0.2], [0.4, 0.4, 0.2]])
NUM_BATCHES = 10000
BATCH_SIZE = 10000
NUM_STATES = 2
NUM_ACTIONS = 3
state = 0
reward_ls_bef = []
reward_ls_af = []
state1 = None
state2 = None
C = 0
for k in range(NUM_BATCHES):
for i in range(BATCH_SIZE):
if i < 500:
action_chose = np.random.choice([0, 1, 2], p=theta_init[state])
reward_ls_bef.append(cost_mat[state, action_chose])
if action_chose == 0:
state = np.random.choice([0, 1], p=p1[state])
elif action_chose == 1:
state = np.random.choice([0, 1], p=p2[state])
elif action_chose == 2:
state = np.random.choice([0, 1], p=p3[state])
elif i == 500:
action_chose = np.random.choice([0, 1, 2], p=theta_init[state])
if action_chose == 0:
state1 = np.random.choice([0, 1], p=p1[state])
elif action_chose == 1:
state1 = np.random.choice([0, 1], p=p2[state])
elif action_chose == 2:
state1 = np.random.choice([0, 1], p=p3[state])
state2 = np.random.choice([0, 1], p=[0.5, 0.5])
else:
action_chose1 = np.random.choice([0, 1, 2], p=theta_init[state1])
action_chose2 = np.random.choice([0, 1, 2], p=theta_init[state2])
reward_ls_af.append(
(cost_mat[state1, action_chose1], cost_mat[state2, action_chose2]))
if action_chose1 == 0:
state1 = np.random.choice([0, 1], p=p1[state1])
elif action_chose1 == 1:
state1 = np.random.choice([0, 1], p=p2[state1])
elif action_chose1 == 2:
state1 = np.random.choice([0, 1], p=p3[state1])
if action_chose2 == 0:
state2 = np.random.choice([0, 1], p=p1[state2])
elif action_chose2 == 1:
state2 = | np.random.choice([0, 1], p=p2[state2]) | numpy.random.choice |
###########################################################################
######################## fierClass example script #########################
###########################################################################
### Authors: <NAME> (<EMAIL>) ######################
############ <NAME> (<EMAIL>) ##############
############ <NAME> (<EMAIL>) ## Date: 2019/05/03 ##
########################### (<EMAIL>) ####################
###########################################################################
### Note: this script is an introductory example for using the functions ##
### designed according to the fierClass algorithmin. ######################
### In the present example, the algorithm tries to classify data coming ###
### from an Inertial Measurement Unit (IMU), comparing the stream of data #
### with respect to a set of examples provided. The classification is #####
### performed considering two scenarios: ##################################
####### 1) when (example) instances are grouped toghether through #########
####### subclasses; #######################################################
####### 2) when each instance represents an individual operating mode #####
####### that needs to be classified separately with respect to the ########
####### others. ###########################################################
###########################################################################
### Further details on how to use the training and classifying functions ##
### are available through the help. #######################################
###########################################################################
### If you are going to use fierClass in your research project, please ####
### cite its reference article - <NAME>, S. Formentin, et al. #########
### "fierClass: A multi-signal, cepstrum-based, time series classifier," ##
### Engineering Applications of Artificial Intelligence, Volume 87, 2020 ##
### https://doi.org/10.1016/j.engappai.2019.103262. #######################
###########################################################################
### Copyright and license: © <NAME>, <NAME> #############
### <NAME>, Politecnico di Milano #################################
###########################################################################
### Licensed under the [MIT License](LICENSE). ############################
###########################################################################
########## In case of help, feel free to contact the author ###############
###########################################################################
###########################################################################
# Libraries #
###########################################################################
import numpy as np
from custom_lib.visualization import *
from custom_lib.fierClass import *
from custom_lib.functions import*
import warnings
warnings.filterwarnings("ignore")
###########################################################################
# Simulation Params #
###########################################################################
timespan=100 #[s]
fs=100 #[Hz]
samples=timespan*fs #[-] recorded samples
np.random.seed(1)
e= | np.random.randn(samples, 1) | numpy.random.randn |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.