prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
import itertools
from typing import List, Optional, Sequence, Tuple
import numpy as np
import xarray as xr
from starfish.core.types import Coordinates, Features, Number, OverlapStrategy
class Area:
"""
Small class that defines rectangular area of physical space by
its bottom left and top right coordinates.
"""
def __init__(self, min_x: Number, max_x: Number, min_y: Number, max_y: Number):
self.min_x = min_x
self.max_x = max_x
self.min_y = min_y
self.max_y = max_y
def __eq__(self, other) -> bool:
return (self.min_x == other.min_x
and self.min_y == other.min_y
and self.max_x == other.max_x
and self.max_y == other.max_y)
@staticmethod
def _overlap(area1: "Area", area2: "Area") -> bool:
"""Return True if two rectangles overlap"""
if (area1.max_x < area2.min_x) or (area1.min_x > area2.max_x):
return False
if (area1.max_y < area2.min_y) or (area1.min_y > area2.max_y):
return False
return True
@staticmethod
def find_intersection(area1: "Area", area2: "Area") -> Optional["Area"]:
"""
Find the overlap area of two rectangles and return as new Area object.
If the two rectangles do not overlap, return None.
"""
if Area._overlap(area1, area2):
return Area(max(area1.min_x, area2.min_x),
min(area1.max_x, area2.max_x),
max(area1.min_y, area2.min_y),
min(area1.max_y, area2.max_y))
return None
def find_overlaps_of_xarrays(xarrays: Sequence[xr.DataArray]) -> Sequence[Tuple[int, int]]:
"""
Find all the overlap areas within a list of xarrays.
Parameters
----------
xarrays : List[xr.DataArray]
The list of xarrays to find overlaps in.
Returns
-------
List[Tuple[int, int]] :
A list of tuples containing the indices of two overlapping
IntensityTables.
"""
all_overlaps: List[Tuple[int, int]] = list()
for idx1, idx2 in itertools.combinations(range(len(xarrays)), 2):
xr1 = xarrays[idx1]
xr2 = xarrays[idx2]
area1 = Area( | np.min(xr1[Coordinates.X.value]) | numpy.min |
"""
Numerical python functions written for compatibility with MATLAB
commands with the same names. Most numerical python functions can be found in
the `numpy` and `scipy` libraries. What remains here is code for performing
spectral computations.
Spectral functions
------------------
`cohere`
Coherence (normalized cross spectral density)
`csd`
Cross spectral density using Welch's average periodogram
`detrend`
Remove the mean or best fit line from an array
`psd`
Power spectral density using Welch's average periodogram
`specgram`
Spectrogram (spectrum over segments of time)
`complex_spectrum`
Return the complex-valued frequency spectrum of a signal
`magnitude_spectrum`
Return the magnitude of the frequency spectrum of a signal
`angle_spectrum`
Return the angle (wrapped phase) of the frequency spectrum of a signal
`phase_spectrum`
Return the phase (unwrapped angle) of the frequency spectrum of a signal
`detrend_mean`
Remove the mean from a line.
`detrend_linear`
Remove the best fit line from a line.
`detrend_none`
Return the original line.
`stride_windows`
Get all windows in an array in a memory-efficient manner
"""
import functools
from numbers import Number
import numpy as np
from matplotlib import _api
import matplotlib.cbook as cbook
from matplotlib import docstring
def window_hanning(x):
"""
Return x times the hanning window of len(x).
See Also
--------
window_none : Another window algorithm.
"""
return np.hanning(len(x))*x
def window_none(x):
"""
No window function; simply return x.
See Also
--------
window_hanning : Another window algorithm.
"""
return x
def detrend(x, key=None, axis=None):
"""
Return x with its trend removed.
Parameters
----------
x : array or sequence
Array or sequence containing the data.
key : {'default', 'constant', 'mean', 'linear', 'none'} or function
The detrending algorithm to use. 'default', 'mean', and 'constant' are
the same as `detrend_mean`. 'linear' is the same as `detrend_linear`.
'none' is the same as `detrend_none`. The default is 'mean'. See the
corresponding functions for more details regarding the algorithms. Can
also be a function that carries out the detrend operation.
axis : int
The axis along which to do the detrending.
See Also
--------
detrend_mean : Implementation of the 'mean' algorithm.
detrend_linear : Implementation of the 'linear' algorithm.
detrend_none : Implementation of the 'none' algorithm.
"""
if key is None or key in ['constant', 'mean', 'default']:
return detrend(x, key=detrend_mean, axis=axis)
elif key == 'linear':
return detrend(x, key=detrend_linear, axis=axis)
elif key == 'none':
return detrend(x, key=detrend_none, axis=axis)
elif callable(key):
x = np.asarray(x)
if axis is not None and axis + 1 > x.ndim:
raise ValueError(f'axis(={axis}) out of bounds')
if (axis is None and x.ndim == 0) or (not axis and x.ndim == 1):
return key(x)
# try to use the 'axis' argument if the function supports it,
# otherwise use apply_along_axis to do it
try:
return key(x, axis=axis)
except TypeError:
return np.apply_along_axis(key, axis=axis, arr=x)
else:
raise ValueError(
f"Unknown value for key: {key!r}, must be one of: 'default', "
f"'constant', 'mean', 'linear', or a function")
def detrend_mean(x, axis=None):
"""
Return x minus the mean(x).
Parameters
----------
x : array or sequence
Array or sequence containing the data
Can have any dimensionality
axis : int
The axis along which to take the mean. See numpy.mean for a
description of this argument.
See Also
--------
detrend_linear : Another detrend algorithm.
detrend_none : Another detrend algorithm.
detrend : A wrapper around all the detrend algorithms.
"""
x = np.asarray(x)
if axis is not None and axis+1 > x.ndim:
raise ValueError('axis(=%s) out of bounds' % axis)
return x - x.mean(axis, keepdims=True)
def detrend_none(x, axis=None):
"""
Return x: no detrending.
Parameters
----------
x : any object
An object containing the data
axis : int
This parameter is ignored.
It is included for compatibility with detrend_mean
See Also
--------
detrend_mean : Another detrend algorithm.
detrend_linear : Another detrend algorithm.
detrend : A wrapper around all the detrend algorithms.
"""
return x
def detrend_linear(y):
"""
Return x minus best fit line; 'linear' detrending.
Parameters
----------
y : 0-D or 1-D array or sequence
Array or sequence containing the data
axis : int
The axis along which to take the mean. See numpy.mean for a
description of this argument.
See Also
--------
detrend_mean : Another detrend algorithm.
detrend_none : Another detrend algorithm.
detrend : A wrapper around all the detrend algorithms.
"""
# This is faster than an algorithm based on linalg.lstsq.
y = np.asarray(y)
if y.ndim > 1:
raise ValueError('y cannot have ndim > 1')
# short-circuit 0-D array.
if not y.ndim:
return np.array(0., dtype=y.dtype)
x = np.arange(y.size, dtype=float)
C = np.cov(x, y, bias=1)
b = C[0, 1]/C[0, 0]
a = y.mean() - b*x.mean()
return y - (b*x + a)
def stride_windows(x, n, noverlap=None, axis=0):
"""
Get all windows of x with length n as a single array,
using strides to avoid data duplication.
.. warning::
It is not safe to write to the output array. Multiple
elements may point to the same piece of memory,
so modifying one value may change others.
Parameters
----------
x : 1D array or sequence
Array or sequence containing the data.
n : int
The number of data points in each window.
noverlap : int, default: 0 (no overlap)
The overlap between adjacent windows.
axis : int
The axis along which the windows will run.
References
----------
`stackoverflow: Rolling window for 1D arrays in Numpy?
<http://stackoverflow.com/a/6811241>`_
`stackoverflow: Using strides for an efficient moving average filter
<http://stackoverflow.com/a/4947453>`_
"""
if noverlap is None:
noverlap = 0
if noverlap >= n:
raise ValueError('noverlap must be less than n')
if n < 1:
raise ValueError('n cannot be less than 1')
x = np.asarray(x)
if x.ndim != 1:
raise ValueError('only 1-dimensional arrays can be used')
if n == 1 and noverlap == 0:
if axis == 0:
return x[np.newaxis]
else:
return x[np.newaxis].transpose()
if n > x.size:
raise ValueError('n cannot be greater than the length of x')
# np.lib.stride_tricks.as_strided easily leads to memory corruption for
# non integer shape and strides, i.e. noverlap or n. See #3845.
noverlap = int(noverlap)
n = int(n)
step = n - noverlap
if axis == 0:
shape = (n, (x.shape[-1]-noverlap)//step)
strides = (x.strides[0], step*x.strides[0])
else:
shape = ((x.shape[-1]-noverlap)//step, n)
strides = (step*x.strides[0], x.strides[0])
return np.lib.stride_tricks.as_strided(x, shape=shape, strides=strides)
def _spectral_helper(x, y=None, NFFT=None, Fs=None, detrend_func=None,
window=None, noverlap=None, pad_to=None,
sides=None, scale_by_freq=None, mode=None):
"""
Private helper implementing the common parts between the psd, csd,
spectrogram and complex, magnitude, angle, and phase spectrums.
"""
if y is None:
# if y is None use x for y
same_data = True
else:
# The checks for if y is x are so that we can use the same function to
# implement the core of psd(), csd(), and spectrogram() without doing
# extra calculations. We return the unaveraged Pxy, freqs, and t.
same_data = y is x
if Fs is None:
Fs = 2
if noverlap is None:
noverlap = 0
if detrend_func is None:
detrend_func = detrend_none
if window is None:
window = window_hanning
# if NFFT is set to None use the whole signal
if NFFT is None:
NFFT = 256
if mode is None or mode == 'default':
mode = 'psd'
_api.check_in_list(
['default', 'psd', 'complex', 'magnitude', 'angle', 'phase'],
mode=mode)
if not same_data and mode != 'psd':
raise ValueError("x and y must be equal if mode is not 'psd'")
# Make sure we're dealing with a numpy array. If y and x were the same
# object to start with, keep them that way
x = np.asarray(x)
if not same_data:
y = np.asarray(y)
if sides is None or sides == 'default':
if np.iscomplexobj(x):
sides = 'twosided'
else:
sides = 'onesided'
_api.check_in_list(['default', 'onesided', 'twosided'], sides=sides)
# zero pad x and y up to NFFT if they are shorter than NFFT
if len(x) < NFFT:
n = len(x)
x = np.resize(x, NFFT)
x[n:] = 0
if not same_data and len(y) < NFFT:
n = len(y)
y = np.resize(y, NFFT)
y[n:] = 0
if pad_to is None:
pad_to = NFFT
if mode != 'psd':
scale_by_freq = False
elif scale_by_freq is None:
scale_by_freq = True
# For real x, ignore the negative frequencies unless told otherwise
if sides == 'twosided':
numFreqs = pad_to
if pad_to % 2:
freqcenter = (pad_to - 1)//2 + 1
else:
freqcenter = pad_to//2
scaling_factor = 1.
elif sides == 'onesided':
if pad_to % 2:
numFreqs = (pad_to + 1)//2
else:
numFreqs = pad_to//2 + 1
scaling_factor = 2.
if not np.iterable(window):
window = window(np.ones(NFFT, x.dtype))
if len(window) != NFFT:
raise ValueError(
"The window length must match the data's first dimension")
result = stride_windows(x, NFFT, noverlap, axis=0)
result = detrend(result, detrend_func, axis=0)
result = result * window.reshape((-1, 1))
result = np.fft.fft(result, n=pad_to, axis=0)[:numFreqs, :]
freqs = np.fft.fftfreq(pad_to, 1/Fs)[:numFreqs]
if not same_data:
# if same_data is False, mode must be 'psd'
resultY = stride_windows(y, NFFT, noverlap)
resultY = detrend(resultY, detrend_func, axis=0)
resultY = resultY * window.reshape((-1, 1))
resultY = np.fft.fft(resultY, n=pad_to, axis=0)[:numFreqs, :]
result = np.conj(result) * resultY
elif mode == 'psd':
result = np.conj(result) * result
elif mode == 'magnitude':
result = np.abs(result) / np.abs(window).sum()
elif mode == 'angle' or mode == 'phase':
# we unwrap the phase later to handle the onesided vs. twosided case
result = np.angle(result)
elif mode == 'complex':
result /= np.abs(window).sum()
if mode == 'psd':
# Also include scaling factors for one-sided densities and dividing by
# the sampling frequency, if desired. Scale everything, except the DC
# component and the NFFT/2 component:
# if we have a even number of frequencies, don't scale NFFT/2
if not NFFT % 2:
slc = slice(1, -1, None)
# if we have an odd number, just don't scale DC
else:
slc = slice(1, None, None)
result[slc] *= scaling_factor
# MATLAB divides by the sampling frequency so that density function
# has units of dB/Hz and can be integrated by the plotted frequency
# values. Perform the same scaling here.
if scale_by_freq:
result /= Fs
# Scale the spectrum by the norm of the window to compensate for
# windowing loss; see Bendat & Piersol Sec 11.5.2.
result /= (np.abs(window)**2).sum()
else:
# In this case, preserve power in the segment, not amplitude
result /= np.abs(window).sum()**2
t = np.arange(NFFT/2, len(x) - NFFT/2 + 1, NFFT - noverlap)/Fs
if sides == 'twosided':
# center the frequency range at zero
freqs = np.roll(freqs, -freqcenter, axis=0)
result = np.roll(result, -freqcenter, axis=0)
elif not pad_to % 2:
# get the last value correctly, it is negative otherwise
freqs[-1] *= -1
# we unwrap the phase here to handle the onesided vs. twosided case
if mode == 'phase':
result = np.unwrap(result, axis=0)
return result, freqs, t
def _single_spectrum_helper(
mode, x, Fs=None, window=None, pad_to=None, sides=None):
"""
Private helper implementing the commonality between the complex, magnitude,
angle, and phase spectrums.
"""
_api.check_in_list(['complex', 'magnitude', 'angle', 'phase'], mode=mode)
if pad_to is None:
pad_to = len(x)
spec, freqs, _ = _spectral_helper(x=x, y=None, NFFT=len(x), Fs=Fs,
detrend_func=detrend_none, window=window,
noverlap=0, pad_to=pad_to,
sides=sides,
scale_by_freq=False,
mode=mode)
if mode != 'complex':
spec = spec.real
if spec.ndim == 2 and spec.shape[1] == 1:
spec = spec[:, 0]
return spec, freqs
# Split out these keyword docs so that they can be used elsewhere
docstring.interpd.update(
Spectral="""\
Fs : float, default: 2
The sampling frequency (samples per time unit). It is used to calculate
the Fourier frequencies, *freqs*, in cycles per time unit.
window : callable or ndarray, default: `.window_hanning`
A function or a vector of length *NFFT*. To create window vectors see
`.window_hanning`, `.window_none`, `numpy.blackman`, `numpy.hamming`,
`numpy.bartlett`, `scipy.signal`, `scipy.signal.get_window`, etc. If a
function is passed as the argument, it must take a data segment as an
argument and return the windowed version of the segment.
sides : {'default', 'onesided', 'twosided'}, optional
Which sides of the spectrum to return. 'default' is one-sided for real
data and two-sided for complex data. 'onesided' forces the return of a
one-sided spectrum, while 'twosided' forces two-sided.""",
Single_Spectrum="""\
pad_to : int, optional
The number of points to which the data segment is padded when performing
the FFT. While not increasing the actual resolution of the spectrum (the
minimum distance between resolvable peaks), this can give more points in
the plot, allowing for more detail. This corresponds to the *n* parameter
in the call to fft(). The default is None, which sets *pad_to* equal to
the length of the input signal (i.e. no padding).""",
PSD="""\
pad_to : int, optional
The number of points to which the data segment is padded when performing
the FFT. This can be different from *NFFT*, which specifies the number
of data points used. While not increasing the actual resolution of the
spectrum (the minimum distance between resolvable peaks), this can give
more points in the plot, allowing for more detail. This corresponds to
the *n* parameter in the call to fft(). The default is None, which sets
*pad_to* equal to *NFFT*
NFFT : int, default: 256
The number of data points used in each block for the FFT. A power 2 is
most efficient. This should *NOT* be used to get zero padding, or the
scaling of the result will be incorrect; use *pad_to* for this instead.
detrend : {'none', 'mean', 'linear'} or callable, default: 'none'
The function applied to each segment before fft-ing, designed to remove
the mean or linear trend. Unlike in MATLAB, where the *detrend* parameter
is a vector, in Matplotlib is it a function. The :mod:`~matplotlib.mlab`
module defines `.detrend_none`, `.detrend_mean`, and `.detrend_linear`,
but you can use a custom function as well. You can also use a string to
choose one of the functions: 'none' calls `.detrend_none`. 'mean' calls
`.detrend_mean`. 'linear' calls `.detrend_linear`.
scale_by_freq : bool, default: True
Whether the resulting density values should be scaled by the scaling
frequency, which gives density in units of Hz^-1. This allows for
integration over the returned frequency values. The default is True for
MATLAB compatibility.""")
@docstring.dedent_interpd
def psd(x, NFFT=None, Fs=None, detrend=None, window=None,
noverlap=None, pad_to=None, sides=None, scale_by_freq=None):
r"""
Compute the power spectral density.
The power spectral density :math:`P_{xx}` by Welch's average
periodogram method. The vector *x* is divided into *NFFT* length
segments. Each segment is detrended by function *detrend* and
windowed by function *window*. *noverlap* gives the length of
the overlap between segments. The :math:`|\mathrm{fft}(i)|^2`
of each segment :math:`i` are averaged to compute :math:`P_{xx}`.
If len(*x*) < *NFFT*, it will be zero padded to *NFFT*.
Parameters
----------
x : 1-D array or sequence
Array or sequence containing the data
%(Spectral)s
%(PSD)s
noverlap : int, default: 0 (no overlap)
The number of points of overlap between segments.
Returns
-------
Pxx : 1-D array
The values for the power spectrum :math:`P_{xx}` (real valued)
freqs : 1-D array
The frequencies corresponding to the elements in *Pxx*
References
----------
<NAME> -- Random Data: Analysis and Measurement Procedures, John
Wiley & Sons (1986)
See Also
--------
specgram
`specgram` differs in the default overlap; in not returning the mean of
the segment periodograms; and in returning the times of the segments.
magnitude_spectrum : returns the magnitude spectrum.
csd : returns the spectral density between two signals.
"""
Pxx, freqs = csd(x=x, y=None, NFFT=NFFT, Fs=Fs, detrend=detrend,
window=window, noverlap=noverlap, pad_to=pad_to,
sides=sides, scale_by_freq=scale_by_freq)
return Pxx.real, freqs
@docstring.dedent_interpd
def csd(x, y, NFFT=None, Fs=None, detrend=None, window=None,
noverlap=None, pad_to=None, sides=None, scale_by_freq=None):
"""
Compute the cross-spectral density.
The cross spectral density :math:`P_{xy}` by Welch's average
periodogram method. The vectors *x* and *y* are divided into
*NFFT* length segments. Each segment is detrended by function
*detrend* and windowed by function *window*. *noverlap* gives
the length of the overlap between segments. The product of
the direct FFTs of *x* and *y* are averaged over each segment
to compute :math:`P_{xy}`, with a scaling to correct for power
loss due to windowing.
If len(*x*) < *NFFT* or len(*y*) < *NFFT*, they will be zero
padded to *NFFT*.
Parameters
----------
x, y : 1-D arrays or sequences
Arrays or sequences containing the data
%(Spectral)s
%(PSD)s
noverlap : int, default: 0 (no overlap)
The number of points of overlap between segments.
Returns
-------
Pxy : 1-D array
The values for the cross spectrum :math:`P_{xy}` before scaling (real
valued)
freqs : 1-D array
The frequencies corresponding to the elements in *Pxy*
References
----------
Bendat & Piersol -- Random Data: Analysis and Measurement Procedures, John
Wiley & Sons (1986)
See Also
--------
psd : equivalent to setting ``y = x``.
"""
if NFFT is None:
NFFT = 256
Pxy, freqs, _ = _spectral_helper(x=x, y=y, NFFT=NFFT, Fs=Fs,
detrend_func=detrend, window=window,
noverlap=noverlap, pad_to=pad_to,
sides=sides, scale_by_freq=scale_by_freq,
mode='psd')
if Pxy.ndim == 2:
if Pxy.shape[1] > 1:
Pxy = Pxy.mean(axis=1)
else:
Pxy = Pxy[:, 0]
return Pxy, freqs
_single_spectrum_docs = """\
Compute the {quantity} of *x*.
Data is padded to a length of *pad_to* and the windowing function *window* is
applied to the signal.
Parameters
----------
x : 1-D array or sequence
Array or sequence containing the data
{Spectral}
{Single_Spectrum}
Returns
-------
spectrum : 1-D array
The {quantity}.
freqs : 1-D array
The frequencies corresponding to the elements in *spectrum*.
See Also
--------
psd
Returns the power spectral density.
complex_spectrum
Returns the complex-valued frequency spectrum.
magnitude_spectrum
Returns the absolute value of the `complex_spectrum`.
angle_spectrum
Returns the angle of the `complex_spectrum`.
phase_spectrum
Returns the phase (unwrapped angle) of the `complex_spectrum`.
specgram
Can return the complex spectrum of segments within the signal.
"""
complex_spectrum = functools.partial(_single_spectrum_helper, "complex")
complex_spectrum.__doc__ = _single_spectrum_docs.format(
quantity="complex-valued frequency spectrum",
**docstring.interpd.params)
magnitude_spectrum = functools.partial(_single_spectrum_helper, "magnitude")
magnitude_spectrum.__doc__ = _single_spectrum_docs.format(
quantity="magnitude (absolute value) of the frequency spectrum",
**docstring.interpd.params)
angle_spectrum = functools.partial(_single_spectrum_helper, "angle")
angle_spectrum.__doc__ = _single_spectrum_docs.format(
quantity="angle of the frequency spectrum (wrapped phase spectrum)",
**docstring.interpd.params)
phase_spectrum = functools.partial(_single_spectrum_helper, "phase")
phase_spectrum.__doc__ = _single_spectrum_docs.format(
quantity="phase of the frequency spectrum (unwrapped phase spectrum)",
**docstring.interpd.params)
@docstring.dedent_interpd
def specgram(x, NFFT=None, Fs=None, detrend=None, window=None,
noverlap=None, pad_to=None, sides=None, scale_by_freq=None,
mode=None):
"""
Compute a spectrogram.
Compute and plot a spectrogram of data in x. Data are split into
NFFT length segments and the spectrum of each section is
computed. The windowing function window is applied to each
segment, and the amount of overlap of each segment is
specified with noverlap.
Parameters
----------
x : array-like
1-D array or sequence.
%(Spectral)s
%(PSD)s
noverlap : int, default: 128
The number of points of overlap between blocks.
mode : str, default: 'psd'
What sort of spectrum to use:
'psd'
Returns the power spectral density.
'complex'
Returns the complex-valued frequency spectrum.
'magnitude'
Returns the magnitude spectrum.
'angle'
Returns the phase spectrum without unwrapping.
'phase'
Returns the phase spectrum with unwrapping.
Returns
-------
spectrum : array-like
2D array, columns are the periodograms of successive segments.
freqs : array-like
1-D array, frequencies corresponding to the rows in *spectrum*.
t : array-like
1-D array, the times corresponding to midpoints of segments
(i.e the columns in *spectrum*).
See Also
--------
psd : differs in the overlap and in the return values.
complex_spectrum : similar, but with complex valued frequencies.
magnitude_spectrum : similar single segment when mode is 'magnitude'.
angle_spectrum : similar to single segment when mode is 'angle'.
phase_spectrum : similar to single segment when mode is 'phase'.
Notes
-----
detrend and scale_by_freq only apply when *mode* is set to 'psd'.
"""
if noverlap is None:
noverlap = 128 # default in _spectral_helper() is noverlap = 0
if NFFT is None:
NFFT = 256 # same default as in _spectral_helper()
if len(x) <= NFFT:
_api.warn_external("Only one segment is calculated since parameter "
f"NFFT (={NFFT}) >= signal length (={len(x)}).")
spec, freqs, t = _spectral_helper(x=x, y=None, NFFT=NFFT, Fs=Fs,
detrend_func=detrend, window=window,
noverlap=noverlap, pad_to=pad_to,
sides=sides,
scale_by_freq=scale_by_freq,
mode=mode)
if mode != 'complex':
spec = spec.real # Needed since helper implements generically
return spec, freqs, t
@docstring.dedent_interpd
def cohere(x, y, NFFT=256, Fs=2, detrend=detrend_none, window=window_hanning,
noverlap=0, pad_to=None, sides='default', scale_by_freq=None):
r"""
The coherence between *x* and *y*. Coherence is the normalized
cross spectral density:
.. math::
C_{xy} = \frac{|P_{xy}|^2}{P_{xx}P_{yy}}
Parameters
----------
x, y
Array or sequence containing the data
%(Spectral)s
%(PSD)s
noverlap : int, default: 0 (no overlap)
The number of points of overlap between segments.
Returns
-------
Cxy : 1-D array
The coherence vector.
freqs : 1-D array
The frequencies for the elements in *Cxy*.
See Also
--------
:func:`psd`, :func:`csd` :
For information about the methods used to compute :math:`P_{xy}`,
:math:`P_{xx}` and :math:`P_{yy}`.
"""
if len(x) < 2 * NFFT:
raise ValueError(
"Coherence is calculated by averaging over *NFFT* length "
"segments. Your signal is too short for your choice of *NFFT*.")
Pxx, f = psd(x, NFFT, Fs, detrend, window, noverlap, pad_to, sides,
scale_by_freq)
Pyy, f = psd(y, NFFT, Fs, detrend, window, noverlap, pad_to, sides,
scale_by_freq)
Pxy, f = csd(x, y, NFFT, Fs, detrend, window, noverlap, pad_to, sides,
scale_by_freq)
Cxy = np.abs(Pxy) ** 2 / (Pxx * Pyy)
return Cxy, f
class GaussianKDE:
"""
Representation of a kernel-density estimate using Gaussian kernels.
Parameters
----------
dataset : array-like
Datapoints to estimate from. In case of univariate data this is a 1-D
array, otherwise a 2D array with shape (# of dims, # of data).
bw_method : str, scalar or callable, optional
The method used to calculate the estimator bandwidth. This can be
'scott', 'silverman', a scalar constant or a callable. If a
scalar, this will be used directly as `kde.factor`. If a
callable, it should take a `GaussianKDE` instance as only
parameter and return a scalar. If None (default), 'scott' is used.
Attributes
----------
dataset : ndarray
The dataset with which `gaussian_kde` was initialized.
dim : int
Number of dimensions.
num_dp : int
Number of datapoints.
factor : float
The bandwidth factor, obtained from `kde.covariance_factor`, with which
the covariance matrix is multiplied.
covariance : ndarray
The covariance matrix of *dataset*, scaled by the calculated bandwidth
(`kde.factor`).
inv_cov : ndarray
The inverse of *covariance*.
Methods
-------
kde.evaluate(points) : ndarray
Evaluate the estimated pdf on a provided set of points.
kde(points) : ndarray
Same as kde.evaluate(points)
"""
# This implementation with minor modification was too good to pass up.
# from scipy: https://github.com/scipy/scipy/blob/master/scipy/stats/kde.py
def __init__(self, dataset, bw_method=None):
self.dataset = np.atleast_2d(dataset)
if not np.array(self.dataset).size > 1:
raise ValueError("`dataset` input should have multiple elements.")
self.dim, self.num_dp = np.array(self.dataset).shape
if bw_method is None:
pass
elif cbook._str_equal(bw_method, 'scott'):
self.covariance_factor = self.scotts_factor
elif cbook._str_equal(bw_method, 'silverman'):
self.covariance_factor = self.silverman_factor
elif isinstance(bw_method, Number):
self._bw_method = 'use constant'
self.covariance_factor = lambda: bw_method
elif callable(bw_method):
self._bw_method = bw_method
self.covariance_factor = lambda: self._bw_method(self)
else:
raise ValueError("`bw_method` should be 'scott', 'silverman', a "
"scalar or a callable")
# Computes the covariance matrix for each Gaussian kernel using
# covariance_factor().
self.factor = self.covariance_factor()
# Cache covariance and inverse covariance of the data
if not hasattr(self, '_data_inv_cov'):
self.data_covariance = np.atleast_2d(
np.cov(
self.dataset,
rowvar=1,
bias=False))
self.data_inv_cov = np.linalg.inv(self.data_covariance)
self.covariance = self.data_covariance * self.factor ** 2
self.inv_cov = self.data_inv_cov / self.factor ** 2
self.norm_factor = (np.sqrt(np.linalg.det(2 * np.pi * self.covariance))
* self.num_dp)
def scotts_factor(self):
return | np.power(self.num_dp, -1. / (self.dim + 4)) | numpy.power |
import numpy as np
import os
import re
import requests
import sys
import time
from netCDF4 import Dataset
import pandas as pd
from bs4 import BeautifulSoup
from tqdm import tqdm
# setup constants used to access the data from the different M2M interfaces
BASE_URL = 'https://ooinet.oceanobservatories.org/api/m2m/' # base M2M URL
SENSOR_URL = '12576/sensor/inv/' # Sensor Information
# setup access credentials
AUTH = ['OOIAPI-853A3LA6QI3L62', '<KEY>']
def M2M_Call(uframe_dataset_name, start_date, end_date):
options = '?beginDT=' + start_date + '&endDT=' + end_date + '&format=application/netcdf'
r = requests.get(BASE_URL + SENSOR_URL + uframe_dataset_name + options, auth=(AUTH[0], AUTH[1]))
if r.status_code == requests.codes.ok:
data = r.json()
else:
return None
# wait until the request is completed
print('Waiting for OOINet to process and prepare data request, this may take up to 20 minutes')
url = [url for url in data['allURLs'] if re.match(r'.*async_results.*', url)][0]
check_complete = url + '/status.txt'
with tqdm(total=400, desc='Waiting') as bar:
for i in range(400):
r = requests.get(check_complete)
bar.update(1)
if r.status_code == requests.codes.ok:
bar.n = 400
bar.last_print_n = 400
bar.refresh()
print('\nrequest completed in %f minutes.' % elapsed)
break
else:
time.sleep(3)
elapsed = (i * 3) / 60
return data
def M2M_Files(data, tag=''):
"""
Use a regex tag combined with the results of the M2M data request to collect the data from the THREDDS catalog.
Collected data is gathered into an xarray dataset for further processing.
:param data: JSON object returned from M2M data request with details on where the data is to be found for download
:param tag: regex tag to use in discriminating the data files, so we only collect the correct ones
:return: the collected data as an xarray dataset
"""
# Create a list of the files from the request above using a simple regex as a tag to discriminate the files
url = [url for url in data['allURLs'] if re.match(r'.*thredds.*', url)][0]
files = list_files(url, tag)
return files
def list_files(url, tag=''):
"""
Function to create a list of the NetCDF data files in the THREDDS catalog created by a request to the M2M system.
:param url: URL to user's THREDDS catalog specific to a data request
:param tag: regex pattern used to distinguish files of interest
:return: list of files in the catalog with the URL path set relative to the catalog
"""
page = requests.get(url).text
soup = BeautifulSoup(page, 'html.parser')
pattern = re.compile(tag)
return [node.get('href') for node in soup.find_all('a', text=pattern)]
def M2M_Data(nclist,variables):
thredds = 'https://opendap.oceanobservatories.org/thredds/dodsC/ooi/'
#nclist is going to contain more than one url eventually
for jj in range(len(nclist)):
url=nclist[jj]
url=url[25:]
dap_url = thredds + url + '#fillmismatch'
openFile = Dataset(dap_url,'r')
for ii in range(len(variables)):
dum = openFile.variables[variables[ii].name]
variables[ii].data = np.append(variables[ii].data, dum[:].data)
tmp = variables[0].data/60/60/24
time_converted = pd.to_datetime(tmp, unit='D', origin=pd.Timestamp('1900-01-01'))
return variables, time_converted
class var(object):
def __init__(self):
"""A Class that generically holds data with a variable name
and the units as attributes"""
self.name = ''
self.data = np.array([])
self.units = ''
def __repr__(self):
return_str = "name: " + self.name + '\n'
return_str += "units: " + self.units + '\n'
return_str += "data: size: " + str(self.data.shape)
return return_str
class structtype(object):
def __init__(self):
""" A class that imitates a Matlab structure type
"""
self._data = []
def __getitem__(self, index):
"""implement index behavior in the struct"""
if index == len(self._data):
self._data.append(var())
return self._data[index]
def __len__(self):
return len(self._data)
def M2M_URLs(platform_name,node,instrument_class,method):
var_list = structtype()
#MOPAK
if platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#METBK
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
#FLORT
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
#FDCHP
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/telemetered/fdchp_a_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#DOSTA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
#ADCP
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
#ZPLSC
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#WAVSS
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
#VELPT
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
#PCO2W
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
#PHSEN
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
#SPKIR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
#PRESF
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
#CTDBP
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#VEL3D
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
#VEL3DK
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/03-CTDPFK000/telemetered/ctdpf_ckl_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#PCO2A
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#PARAD
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/05-PARADK000/telemetered/parad_k__stc_imodem_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
#OPTAA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = | np.array([]) | numpy.array |
import numpy as np
import sys, os
sys.path.append(os.pardir)
from activation import *
from layers import *
from loss import *
from collections import OrderedDict
class TwoLayerNet:
def __init__(self, input_size, hidden_size, output_size, batch_norm=True, weight_decay=0.0, Prelu=False):
self.Prelu = Prelu
self.weight_decay = weight_decay
self.batch_norm = batch_norm
self.params = {}
self.params['W1'] = np.random.randn(input_size, hidden_size) / np.sqrt(input_size/2.0)
self.params['b1'] = np.zeros(hidden_size)
if self.batch_norm:
self.params['gamma'] = np.random.random(hidden_size)
self.params['beta'] = | np.zeros(hidden_size) | numpy.zeros |
"""
This script laods the MICCAI 2012 Multi-Atlas Challenge dataset stored in nifty format (.nii)
and exports the labels to npz files (.mat).
Note that labels are converted into 135 categories.
"""
import os
import glob
import sys
import numpy as np
import h5py
import nibabel as nib
nib.nifti1.Nifti1Header.quaternion_threshold = -1e-6
ignored_labels = list(range(1,4))+list(range(5,11))+list(range(12,23))+list(range(24,30))+[33,34]+[42,43]+[53,54]+list(range(63,69))+[70,74]+\
list(range(80,100))+[110,111]+[126,127]+[130,131]+[158,159]+[188,189]
# 47: right hippocampus, 48: left hippocampus, 0: others
true_labels = [4, 11, 23, 30, 31, 32, 35, 36, 37, 38, 39, 40, 41, 44, 45, 46, 47, 48, 49, 50, 51, 52, 55, 56, 57,
58, 59, 60, 61, 62, 69, 71, 72, 73, 75, 76, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 112,
113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 128, 129, 132, 133, 134, 135, 136,
137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156,
157, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178,
179, 180, 181, 182, 183, 184, 185, 186, 187, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200,
201, 202, 203, 204, 205, 206, 207]
def label_filtering(lab, ignore_labels, true_labels):
for ignored_label in ignored_labels:
lab[lab == ignored_label] = 0
for idx, label in enumerate(true_labels):
lab[lab==label] = idx+1
return lab
def crop_background(signal_volume, label_volume, signal_bg=0, verbose=True):
"""Crop out the cube in the signal and label volume in which the signal is non-zero"""
bg_mask = signal_volume == signal_bg
# the following DOES NOT work since there is skull/skin signal labeled as background
# bg_mask = label_volume == bg_label
# generate 1d arrays over axes which are false iff only bg is found in the corresponding slice
only_bg_x = 1-np.all(bg_mask, axis=(1,2))
only_bg_y = 1-np.all(bg_mask, axis=(0,2))
only_bg_z = 1-np.all(bg_mask, axis=(0,1))
# get start and stop index of non bg mri volume
x_start = np.argmax(only_bg_x)
x_stop = np.argmax(1 - only_bg_x[x_start:]) + x_start
x_stop = x_stop if x_start!=x_stop else len(only_bg_x)
y_start = | np.argmax(only_bg_y) | numpy.argmax |
"""visualization.py: Functions for visualizing MAVE-NN models."""
# Standard imports
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import re
import pdb
# Special plotting-related imports
#from matplotlib.colors import DivergingNorm, Normalize
from matplotlib.colors import TwoSlopeNorm, Normalize
from mpl_toolkits.axes_grid1 import make_axes_locatable
# MAVE-NN imports
from mavenn.src.error_handling import handle_errors, check
from mavenn.src.validate import validate_alphabet, validate_seqs
@handle_errors
def _get_45deg_mesh(mat):
"""Create X and Y grids rotated -45 degreees."""
# Define rotation matrix
theta = -np.pi / 4
R = np.array([[np.cos(theta), -np.sin(theta)],
[np.sin(theta), | np.cos(theta) | numpy.cos |
from src.SLIPPER import SLIPPER
import numpy as np
import pandas as pd
import pickle
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, precision_score, recall_score
MANAGEMENT = 'Light'
DISCOUNT = 'DR5'
def base_load(data, management, discount, spatial=False):
rates = ["NoDR", "DR1", "DR3", "DR5"]
if discount not in rates:
raise ValueError("Invalid Discount Rate!")
salvage = data[
(data['TimeStep'] == 40) &
(data['Treatment'] == management) &
(data['Salvage'] == 'Salvage')
]
salvage = salvage.set_index("StandID")
salvage = salvage.fillna(salvage.mean())
no_salvage = data[
(data['TimeStep'] == 40) &
(data['Treatment'] == management) &
(data['Salvage'] == 'NoSalvage')
]
no_salvage = no_salvage.set_index("StandID")
no_salvage = no_salvage.fillna(no_salvage.mean())
data = salvage.copy()
data['diff'] = data[discount]
data['diff'] -= no_salvage[discount]
return data, salvage, no_salvage
def load_data_class(data, management, discount="DR5", spatial=False):
rates = ["NoDR", "DR1", "DR3", "DR5"]
data, salvage, no_salvage = base_load(data, management, discount, spatial)
data['Voucher'] = (data['diff'] > 0)
rates.remove(discount)
data = data.drop(rates, axis=1)
return data, salvage, no_salvage
def get_strategy(data, target, management, discount):
# Get Optimal Strategy
strategy = target.rename('strategy')
salvage = pd.merge(data, strategy, on="StandID")
salvage_strategy = salvage[
(salvage['strategy'] == 1) & # flake8 doesn't like True here
(salvage['Salvage'] == 'NoSalvage') &
(salvage['TimeStep'] == 40) &
(salvage['Treatment'] == management)
]
print(salvage[
(salvage['strategy'] == 1) & # flake8 doesn't like True here
(salvage['Treatment'] == 'Light') &
(salvage['TimeStep'] == 40)
])
salvage_strategy = salvage_strategy[discount]
no_salvage = pd.merge(data, strategy, on="StandID")
no_salvage_strategy = no_salvage[
(no_salvage['strategy'] == 0) & # flake8 doesn't like False here
(no_salvage['Salvage'] == 'Salvage') &
(no_salvage['TimeStep'] == 40) &
(no_salvage['Treatment'] == management)
]
no_salvage_strategy = no_salvage_strategy[discount]
# Make sure we don't duplicate
print(target.shape[0])
print(salvage_strategy.shape[0])
print(no_salvage_strategy.shape[0])
assert target.shape[0] == salvage_strategy.shape[0] + \
no_salvage_strategy.shape[0]
# Really make sure we don't duplicate
a = salvage_strategy.index.tolist()
b = no_salvage_strategy.index.tolist()
assert len(set(a).intersection(set(b))) == 0
outcome = (salvage_strategy.sum() + no_salvage_strategy.sum()) \
/ target.shape[0]
return outcome
def run_train(management, discount):
MANAGEMENT, DISCOUNT = management, discount
all_data = pd.read_csv('updata.csv')
data, salvage, no_salvage = load_data_class(all_data, MANAGEMENT,
DISCOUNT)
X = data.drop(['Voucher', 'Treatment', DISCOUNT, 'diff', 'SiteInd',
'Salvage', 'TimeStep'], axis=1)
y = data['Voucher']
index = y.index.to_list()
X = X.drop([
'ACRU_316', 'PIRE_125', 'PIST_129', 'THOC2_241',
'ACPE_315', 'BEPA_375', 'BEPO_379', 'LALA_71', 'FAGR_531', 'PIBA2_105',
'POTR5_746', 'BEAL2_371', 'TSCA_261', 'PRPE2_761', 'ACSA3_318',
'POGR4_743', 'AMELA_356', 'FRAM2_541', 'SOAM3_935', 'ACSP2_319',
'POBA2_741', 'FRNI_543', 'PIAB_91', 'OSVI_701', 'POHE4_744', 'ULAM_972',
'MALUS_660', 'PRSE2_762', 'TIAM_951', 'CRATA_500', 'QURU_833',
'FRPE_544', 'NA_950', 'CACA18_391', 'PRVI_763', 'QUAL_802', 'PIRI_126',
'JUCI_601', 'LITU_621', 'NA_970', 'QUVE_837', 'QUBI_804', 'NA_934',
'NA_920', 'SODE3_937', 'BELE_372', 'X2TB_998', 'X2TE_299', 'NA_740',
'PISY_130', 'PSME_202', 'CACO15_402', 'MAAC_651', 'PRAV_771',
'QUPR2_832', 'JUVI_68', 'JUNI_602', 'LARIX_70', 'ACNE2_313',
'ACSA2_317', 'PODE3_742', 'ALGL2_355', 'ABFR_16', 'PIPU_96', 'NA_10',
'ROPS_901', 'CHTH2_43'
], axis=1)
X, y = X.to_numpy(), y.to_numpy()
X_train, X_test, y_train, y_test, _, test_index = train_test_split(
X, y, index, test_size=0.2, random_state=1)
_, _, E_p, y_no_salvage = train_test_split(
X, salvage[DISCOUNT], test_size=0.2, random_state=1)
_, _, E_n, y_salvage = train_test_split(
X, no_salvage[DISCOUNT], test_size=0.2, random_state=1)
E_p, E_n = E_p.to_numpy(), E_n.to_numpy()
clf = SLIPPER()
clf.fit(X_train, y_train)
preds = clf.predict(X_test)
preds = pd.Series(preds)
preds.index = test_index
preds = preds.rename_axis('StandID')
y_test = pd.Series(y_test)
y_test.index = test_index
y_test = y_test.rename_axis('StandID')
print( | np.unique(preds, return_counts=True) | numpy.unique |
"""
Testing code.
Updated BSM February 2017
"""
import sys
import os
import numpy as np
import pytest
from pytest import approx
from numpy.testing import assert_allclose
from scipy.spatial.distance import cdist
from pykrige import kriging_tools as kt
from pykrige import core
from pykrige import variogram_models
from pykrige.ok import OrdinaryKriging
from pykrige.uk import UniversalKriging
from pykrige.ok3d import OrdinaryKriging3D
from pykrige.uk3d import UniversalKriging3D
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
allclose_pars = {"rtol": 1e-05, "atol": 1e-08}
@pytest.fixture
def validation_ref():
data = np.genfromtxt(os.path.join(BASE_DIR, "test_data/test_data.txt"))
ok_test_answer, ok_test_gridx, ok_test_gridy, cellsize, no_data = kt.read_asc_grid(
os.path.join(BASE_DIR, "test_data/test1_answer.asc"), footer=2
)
uk_test_answer, uk_test_gridx, uk_test_gridy, cellsize, no_data = kt.read_asc_grid(
os.path.join(BASE_DIR, "test_data/test2_answer.asc"), footer=2
)
return (
data,
(ok_test_answer, ok_test_gridx, ok_test_gridy),
(uk_test_answer, uk_test_gridx, uk_test_gridy),
)
@pytest.fixture
def sample_data_2d():
data = np.array(
[
[0.3, 1.2, 0.47],
[1.9, 0.6, 0.56],
[1.1, 3.2, 0.74],
[3.3, 4.4, 1.47],
[4.7, 3.8, 1.74],
]
)
gridx = np.arange(0.0, 6.0, 1.0)
gridx_2 = np.arange(0.0, 5.5, 0.5)
gridy = np.arange(0.0, 5.5, 0.5)
xi, yi = np.meshgrid(gridx, gridy)
mask = np.array(xi == yi)
return data, (gridx, gridy, gridx_2), mask
@pytest.fixture
def sample_data_3d():
data = np.array(
[
[0.1, 0.1, 0.3, 0.9],
[0.2, 0.1, 0.4, 0.8],
[0.1, 0.3, 0.1, 0.9],
[0.5, 0.4, 0.4, 0.5],
[0.3, 0.3, 0.2, 0.7],
]
)
gridx = np.arange(0.0, 0.6, 0.05)
gridy = np.arange(0.0, 0.6, 0.01)
gridz = np.arange(0.0, 0.6, 0.1)
zi, yi, xi = np.meshgrid(gridz, gridy, gridx, indexing="ij")
mask = np.array((xi == yi) & (yi == zi))
return data, (gridx, gridy, gridz), mask
def test_core_adjust_for_anisotropy():
X = np.array([[1.0, 0.0, -1.0, 0.0], [0.0, 1.0, 0.0, -1.0]]).T
X_adj = core._adjust_for_anisotropy(X, [0.0, 0.0], [2.0], [90.0])
assert_allclose(X_adj[:, 0], np.array([0.0, 1.0, 0.0, -1.0]), **allclose_pars)
assert_allclose(X_adj[:, 1], np.array([-2.0, 0.0, 2.0, 0.0]), **allclose_pars)
def test_core_adjust_for_anisotropy_3d():
# this is a bad examples, as the X matrix is symmetric
# and insensitive to transpositions
X = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]).T
X_adj = core._adjust_for_anisotropy(
X, [0.0, 0.0, 0.0], [2.0, 2.0], [90.0, 0.0, 0.0]
)
assert_allclose(X_adj[:, 0], np.array([1.0, 0.0, 0.0]), **allclose_pars)
assert_allclose(X_adj[:, 1], np.array([0.0, 0.0, 2.0]), **allclose_pars)
assert_allclose(X_adj[:, 2], np.array([0.0, -2.0, 0.0]), **allclose_pars)
X_adj = core._adjust_for_anisotropy(
X, [0.0, 0.0, 0.0], [2.0, 2.0], [0.0, 90.0, 0.0]
)
assert_allclose(X_adj[:, 0], np.array([0.0, 0.0, -1.0]), **allclose_pars)
assert_allclose(X_adj[:, 1], np.array([0.0, 2.0, 0.0]), **allclose_pars)
assert_allclose(X_adj[:, 2], np.array([2.0, 0.0, 0.0]), **allclose_pars)
X_adj = core._adjust_for_anisotropy(
X, [0.0, 0.0, 0.0], [2.0, 2.0], [0.0, 0.0, 90.0]
)
assert_allclose(X_adj[:, 0], np.array([0.0, 1.0, 0.0]), **allclose_pars)
assert_allclose(X_adj[:, 1], np.array([-2.0, 0.0, 0.0]), **allclose_pars)
assert_allclose(X_adj[:, 2], np.array([0.0, 0.0, 2.0]), **allclose_pars)
def test_core_make_variogram_parameter_list():
# test of first case - variogram_model_parameters is None
# function should return None unaffected
result = core._make_variogram_parameter_list("linear", None)
assert result is None
# tests for second case - variogram_model_parameters is dict
with pytest.raises(KeyError):
core._make_variogram_parameter_list("linear", {"tacos": 1.0, "burritos": 2.0})
result = core._make_variogram_parameter_list(
"linear", {"slope": 1.0, "nugget": 0.0}
)
assert result == [1.0, 0.0]
with pytest.raises(KeyError):
core._make_variogram_parameter_list("power", {"frijoles": 1.0})
result = core._make_variogram_parameter_list(
"power", {"scale": 2.0, "exponent": 1.0, "nugget": 0.0}
)
assert result == [2.0, 1.0, 0.0]
with pytest.raises(KeyError):
core._make_variogram_parameter_list("exponential", {"tacos": 1.0})
with pytest.raises(KeyError):
core._make_variogram_parameter_list(
"exponential", {"range": 1.0, "nugget": 1.0}
)
result = core._make_variogram_parameter_list(
"exponential", {"sill": 5.0, "range": 2.0, "nugget": 1.0}
)
assert result == [4.0, 2.0, 1.0]
result = core._make_variogram_parameter_list(
"exponential", {"psill": 4.0, "range": 2.0, "nugget": 1.0}
)
assert result == [4.0, 2.0, 1.0]
with pytest.raises(TypeError):
core._make_variogram_parameter_list("custom", {"junk": 1.0})
with pytest.raises(ValueError):
core._make_variogram_parameter_list("blarg", {"junk": 1.0})
# tests for third case - variogram_model_parameters is list
with pytest.raises(ValueError):
core._make_variogram_parameter_list("linear", [1.0, 2.0, 3.0])
result = core._make_variogram_parameter_list("linear", [1.0, 2.0])
assert result == [1.0, 2.0]
with pytest.raises(ValueError):
core._make_variogram_parameter_list("power", [1.0, 2.0])
result = core._make_variogram_parameter_list("power", [1.0, 2.0, 3.0])
assert result == [1.0, 2.0, 3.0]
with pytest.raises(ValueError):
core._make_variogram_parameter_list("exponential", [1.0, 2.0, 3.0, 4.0])
result = core._make_variogram_parameter_list("exponential", [5.0, 2.0, 1.0])
assert result == [4.0, 2.0, 1.0]
result = core._make_variogram_parameter_list("custom", [1.0, 2.0, 3.0])
assert result == [1.0, 2.0, 3]
with pytest.raises(ValueError):
core._make_variogram_parameter_list("junk", [1.0, 1.0, 1.0])
# test for last case - make sure function handles incorrect
# variogram_model_parameters type appropriately
with pytest.raises(TypeError):
core._make_variogram_parameter_list("linear", "tacos")
def test_core_initialize_variogram_model(validation_ref):
data, _, _ = validation_ref
# Note the variogram_function argument is not a string in real life...
# core._initialize_variogram_model also checks the length of input
# lists, which is redundant now because the same tests are done in
# core._make_variogram_parameter_list
with pytest.raises(ValueError):
core._initialize_variogram_model(
np.vstack((data[:, 0], data[:, 1])).T,
data[:, 2],
"linear",
[0.0],
"linear",
6,
False,
"euclidean",
)
with pytest.raises(ValueError):
core._initialize_variogram_model(
np.vstack((data[:, 0], data[:, 1])).T,
data[:, 2],
"spherical",
[0.0],
"spherical",
6,
False,
"euclidean",
)
# core._initialize_variogram_model does also check coordinate type,
# this is NOT redundant
with pytest.raises(ValueError):
core._initialize_variogram_model(
np.vstack((data[:, 0], data[:, 1])).T,
data[:, 2],
"spherical",
[0.0, 0.0, 0.0],
"spherical",
6,
False,
"tacos",
)
x = np.array([1.0 + n / np.sqrt(2) for n in range(4)])
y = np.array([1.0 + n / np.sqrt(2) for n in range(4)])
z = np.arange(1.0, 5.0, 1.0)
lags, semivariance, variogram_model_parameters = core._initialize_variogram_model(
np.vstack((x, y)).T, z, "linear", [0.0, 0.0], "linear", 6, False, "euclidean"
)
assert_allclose(lags, np.array([1.0, 2.0, 3.0]))
assert_allclose(semivariance, np.array([0.5, 2.0, 4.5]))
def test_core_initialize_variogram_model_3d(sample_data_3d):
data, _, _ = sample_data_3d
# Note the variogram_function argument is not a string in real life...
# again, these checks in core._initialize_variogram_model are redundant
# now because the same tests are done in
# core._make_variogram_parameter_list
with pytest.raises(ValueError):
core._initialize_variogram_model(
np.vstack((data[:, 0], data[:, 1], data[:, 2])).T,
data[:, 3],
"linear",
[0.0],
"linear",
6,
False,
"euclidean",
)
with pytest.raises(ValueError):
core._initialize_variogram_model(
np.vstack((data[:, 0], data[:, 1], data[:, 2])).T,
data[:, 3],
"spherical",
[0.0],
"spherical",
6,
False,
"euclidean",
)
with pytest.raises(ValueError):
core._initialize_variogram_model(
np.vstack((data[:, 0], data[:, 1], data[:, 2])).T,
data[:, 3],
"linear",
[0.0, 0.0],
"linear",
6,
False,
"geographic",
)
lags, semivariance, variogram_model_parameters = core._initialize_variogram_model(
np.vstack(
(
np.array([1.0, 2.0, 3.0, 4.0]),
np.array([1.0, 2.0, 3.0, 4.0]),
np.array([1.0, 2.0, 3.0, 4.0]),
)
).T,
np.array([1.0, 2.0, 3.0, 4.0]),
"linear",
[0.0, 0.0],
"linear",
3,
False,
"euclidean",
)
assert_allclose(
lags, np.array([np.sqrt(3.0), 2.0 * np.sqrt(3.0), 3.0 * np.sqrt(3.0)])
)
assert_allclose(semivariance, np.array([0.5, 2.0, 4.5]))
def test_core_calculate_variogram_model():
res = core._calculate_variogram_model(
np.array([1.0, 2.0, 3.0, 4.0]),
np.array([2.05, 2.95, 4.05, 4.95]),
"linear",
variogram_models.linear_variogram_model,
False,
)
assert_allclose(res, np.array([0.98, 1.05]), 0.01, 0.01)
res = core._calculate_variogram_model(
np.array([1.0, 2.0, 3.0, 4.0]),
np.array([2.05, 2.95, 4.05, 4.95]),
"linear",
variogram_models.linear_variogram_model,
True,
)
assert_allclose(res, np.array([0.98, 1.05]), 0.01, 0.01)
res = core._calculate_variogram_model(
np.array([1.0, 2.0, 3.0, 4.0]),
np.array([1.0, 2.8284271, 5.1961524, 8.0]),
"power",
variogram_models.power_variogram_model,
False,
)
assert_allclose(res, np.array([1.0, 1.5, 0.0]), 0.001, 0.001)
res = core._calculate_variogram_model(
np.array([1.0, 2.0, 3.0, 4.0]),
np.array([1.0, 1.4142, 1.7321, 2.0]),
"power",
variogram_models.power_variogram_model,
False,
)
assert_allclose(res, np.array([1.0, 0.5, 0.0]), 0.001, 0.001)
res = core._calculate_variogram_model(
np.array([1.0, 2.0, 3.0, 4.0]),
np.array([1.2642, 1.7293, 1.9004, 1.9634]),
"exponential",
variogram_models.exponential_variogram_model,
False,
)
assert_allclose(res, np.array([2.0, 3.0, 0.0]), 0.001, 0.001)
res = core._calculate_variogram_model(
np.array([1.0, 2.0, 3.0, 4.0]),
np.array([0.5769, 1.4872, 1.9065, 1.9914]),
"gaussian",
variogram_models.gaussian_variogram_model,
False,
)
assert_allclose(res, np.array([2.0, 3.0, 0.0]), 0.001, 0.001)
res = core._calculate_variogram_model(
np.array([1.0, 2.0, 3.0, 4.0]),
np.array([3.33060952, 3.85063879, 3.96667301, 3.99256374]),
"exponential",
variogram_models.exponential_variogram_model,
False,
)
assert_allclose(res, np.array([3.0, 2.0, 1.0]), 0.001, 0.001)
res = core._calculate_variogram_model(
np.array([1.0, 2.0, 3.0, 4.0]),
np.array([2.60487044, 3.85968813, 3.99694817, 3.99998564]),
"gaussian",
variogram_models.gaussian_variogram_model,
False,
)
assert_allclose(res, np.array([3.0, 2.0, 1.0]), 0.001, 0.001)
def test_core_krige():
# Example 3.2 from Kitanidis
data = np.array([[9.7, 47.6, 1.22], [43.8, 24.6, 2.822]])
z, ss = core._krige(
np.vstack((data[:, 0], data[:, 1])).T,
data[:, 2],
np.array([18.8, 67.9]),
variogram_models.linear_variogram_model,
[0.006, 0.1],
"euclidean",
)
assert z == approx(1.6364, rel=1e-4)
assert ss == approx(0.4201, rel=1e-4)
z, ss = core._krige(
np.vstack((data[:, 0], data[:, 1])).T,
data[:, 2],
np.array([43.8, 24.6]),
variogram_models.linear_variogram_model,
[0.006, 0.1],
"euclidean",
)
assert z == approx(2.822, rel=1e-3)
assert ss == approx(0.0, rel=1e-3)
def test_core_krige_3d():
# Adapted from example 3.2 from Kitanidis
data = np.array([[9.7, 47.6, 1.0, 1.22], [43.8, 24.6, 1.0, 2.822]])
z, ss = core._krige(
np.vstack((data[:, 0], data[:, 1], data[:, 2])).T,
data[:, 3],
np.array([18.8, 67.9, 1.0]),
variogram_models.linear_variogram_model,
[0.006, 0.1],
"euclidean",
)
assert z == approx(1.6364, rel=1e-4)
assert ss == approx(0.4201, rel=1e-4)
z, ss = core._krige(
np.vstack((data[:, 0], data[:, 1], data[:, 2])).T,
data[:, 3],
np.array([43.8, 24.6, 1.0]),
variogram_models.linear_variogram_model,
[0.006, 0.1],
"euclidean",
)
assert z == approx(2.822, rel=1e-3)
assert ss == approx(0.0, rel=1e-3)
def test_non_exact():
# custom data for this test
data = np.array(
[
[0.0, 0.0, 0.47],
[1.5, 1.5, 0.56],
[3, 3, 0.74],
[4.5, 4.5, 1.47],
]
)
# construct grid points so diagonal
# is identical to input points
gridx = np.arange(0.0, 4.51, 1.5)
gridy = np.arange(0.0, 4.51, 1.5)
ok = OrdinaryKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="exponential",
variogram_parameters=[500.0, 3000.0, 5.0],
)
z, ss = ok.execute("grid", gridx, gridy, backend="vectorized")
ok_non_exact = OrdinaryKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="exponential",
variogram_parameters=[500.0, 3000.0, 5.0],
exact_values=False,
)
z_non_exact, ss_non_exact = ok_non_exact.execute(
"grid", gridx, gridy, backend="vectorized"
)
in_values = np.diag(z)
# test that krig field
# at input location are identical
# to the inputs themselves with
# exact_values == True
assert_allclose(in_values, data[:, 2])
# test that krig field
# at input location are different
# than the inputs themselves
# with exact_values == False
assert ~np.allclose(in_values, data[:, 2])
# test that off diagonal values are the same
# by filling with dummy value and comparing
# each entry in array
np.fill_diagonal(z, 0.0)
np.fill_diagonal(z_non_exact, 0.0)
assert_allclose(z, z_non_exact)
def test_ok(validation_ref):
# Test to compare OK results to those obtained using KT3D_H2O.
# (<NAME>, <NAME>, and <NAME>, 2009, Groundwater,
# vol. 47, no. 4, 580-586.)
data, (ok_test_answer, gridx, gridy), _ = validation_ref
ok = OrdinaryKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="exponential",
variogram_parameters=[500.0, 3000.0, 0.0],
)
z, ss = ok.execute("grid", gridx, gridy, backend="vectorized")
assert_allclose(z, ok_test_answer)
z, ss = ok.execute("grid", gridx, gridy, backend="loop")
assert_allclose(z, ok_test_answer)
def test_ok_update_variogram_model(validation_ref):
data, (ok_test_answer, gridx, gridy), _ = validation_ref
with pytest.raises(ValueError):
OrdinaryKriging(data[:, 0], data[:, 1], data[:, 2], variogram_model="blurg")
ok = OrdinaryKriging(data[:, 0], data[:, 1], data[:, 2])
variogram_model = ok.variogram_model
variogram_parameters = ok.variogram_model_parameters
anisotropy_scaling = ok.anisotropy_scaling
anisotropy_angle = ok.anisotropy_angle
with pytest.raises(ValueError):
ok.update_variogram_model("blurg")
ok.update_variogram_model("power", anisotropy_scaling=3.0, anisotropy_angle=45.0)
# TODO: check that new parameters equal to the set parameters
assert variogram_model != ok.variogram_model
assert not np.array_equal(variogram_parameters, ok.variogram_model_parameters)
assert anisotropy_scaling != ok.anisotropy_scaling
assert anisotropy_angle != ok.anisotropy_angle
def test_ok_get_variogram_points(validation_ref):
# Test to compare the variogram of OK results to those obtained using
# KT3D_H2O.
# (<NAME>, <NAME>, and <NAME>, 2009, Groundwater,
# vol. 47, no. 4, 580-586.)
# Variogram parameters
_variogram_parameters = [500.0, 3000.0, 0.0]
data, _, (ok_test_answer, gridx, gridy) = validation_ref
ok = OrdinaryKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="exponential",
variogram_parameters=_variogram_parameters,
)
# Get the variogram points from the UniversalKriging instance
lags, calculated_variogram = ok.get_variogram_points()
# Generate the expected variogram points according to the
# exponential variogram model
expected_variogram = variogram_models.exponential_variogram_model(
_variogram_parameters, lags
)
assert_allclose(calculated_variogram, expected_variogram)
def test_ok_execute(sample_data_2d):
data, (gridx, gridy, _), mask_ref = sample_data_2d
ok = OrdinaryKriging(data[:, 0], data[:, 1], data[:, 2])
with pytest.raises(ValueError):
OrdinaryKriging(data[:, 0], data[:, 1], data[:, 2], exact_values="blurg")
ok_non_exact = OrdinaryKriging(
data[:, 0], data[:, 1], data[:, 2], exact_values=False
)
with pytest.raises(ValueError):
ok.execute("blurg", gridx, gridy)
z, ss = ok.execute("grid", gridx, gridy, backend="vectorized")
shape = (gridy.size, gridx.size)
assert z.shape == shape
assert ss.shape == shape
assert np.amax(z) != np.amin(z)
assert np.amax(ss) != np.amin(ss)
assert not np.ma.is_masked(z)
z, ss = ok.execute("grid", gridx, gridy, backend="loop")
shape = (gridy.size, gridx.size)
assert z.shape == shape
assert ss.shape == shape
assert np.amax(z) != np.amin(z)
assert np.amax(ss) != np.amin(ss)
assert not np.ma.is_masked(z)
z1, ss1 = ok_non_exact.execute("grid", gridx, gridy, backend="loop")
assert_allclose(z1, z)
assert_allclose(ss1, ss)
z, ss = ok_non_exact.execute("grid", gridx, gridy, backend="loop")
shape = (gridy.size, gridx.size)
assert z.shape == shape
assert ss.shape == shape
assert np.amax(z) != np.amin(z)
assert np.amax(ss) != np.amin(ss)
assert not np.ma.is_masked(z)
with pytest.raises(IOError):
ok.execute("masked", gridx, gridy, backend="vectorized")
mask = np.array([True, False])
with pytest.raises(ValueError):
ok.execute("masked", gridx, gridy, mask=mask, backend="vectorized")
z, ss = ok.execute("masked", gridx, gridy, mask=mask_ref, backend="vectorized")
assert np.ma.is_masked(z)
assert np.ma.is_masked(ss)
assert z[0, 0] is np.ma.masked
assert ss[0, 0] is np.ma.masked
z, ss = ok.execute("masked", gridx, gridy, mask=mask_ref.T, backend="vectorized")
assert np.ma.is_masked(z)
assert np.ma.is_masked(ss)
assert z[0, 0] is np.ma.masked
assert ss[0, 0] is np.ma.masked
with pytest.raises(IOError):
ok.execute("masked", gridx, gridy, backend="loop")
mask = np.array([True, False])
with pytest.raises(ValueError):
ok.execute("masked", gridx, gridy, mask=mask, backend="loop")
z, ss = ok.execute("masked", gridx, gridy, mask=mask_ref, backend="loop")
assert np.ma.is_masked(z)
assert | np.ma.is_masked(ss) | numpy.ma.is_masked |
#! python3
# Multi-atlas segmentation scheme trying to give a platform to do tests before translating them to the plugin.
from __future__ import print_function
from GetMetricFromElastixRegistration import GetFinalMetricFromElastixLogFile
from MultiAtlasSegmentation import MultiAtlasSegmentation
from ApplyBiasCorrection import ApplyBiasCorrection
import SimpleITK as sitk
from ipywidgets import interact, interactive, fixed, interact_manual
import ipywidgets as widgets
import SitkImageManipulation as sitkIm
import winshell
import numpy as np
import matplotlib.pyplot as plt
import sys
import os
# DATA FOLDERS:
case = "107"
basePath = "D:\Martin\ImplantMigrationStudy\\" + case + "\\"
postopImageNames = basePath + case + '_Migration_ContralateralPostopHemiPelvis.mhd'
followupImageNames = basePath + case + '_Migration_ContralateralFollowupHemiPelvis.mhd'
#postopImageNames = basePath + case + '_Migration_PostopPelvis.mhd'
#followupImageNames = basePath + case + '_Migration_FollowupPelvis.mhd'
#postopImageNames = basePath + case + '_Migration_PostopBone.mhd'
#followupImageNames = basePath + case + '_Migration_FollowupBone.mhd'
# READ DATA
postopImage = sitk.ReadImage(postopImageNames) # This will be the reference
followupImage = sitk.ReadImage(followupImageNames) # This will be the segmented
# BINARIZE THE IMAGES:
postopImage = sitk.Greater(postopImage, 0)
followupImage = sitk.Greater(followupImage, 0)
# HOW OVERLAP IMAGES
slice_number = round(postopImage.GetSize()[1]/2)
#DisplayWithOverlay(image, segmented, slice_number, window_min, window_max)
sitkIm.DisplayWithOverlay(postopImage[:,slice_number,:], followupImage[:,slice_number,:], 0, 1)
#interact(sitkIm.DisplayWithOverlay, slice_number = (5), image = fixed(postopImage), segmented = fixed(followupImage),
# window_min = fixed(0), window_max=fixed(1));
# Get the image constrained by both bounding boxes:
#labelStatisticFilter = sitk.LabelShapeStatisticsImageFilter()
#labelStatisticFilter.Execute(postopImage)
#postopBoundingBox = np.array(labelStatisticFilter.GetBoundingBox(1))
#labelStatisticFilter.Execute(followupImage)
#followupBoundingBox = np.array(labelStatisticFilter.GetBoundingBox(1))
#minimumStart = np.minimum(postopBoundingBox[0:3], followupBoundingBox[0:3]+ 20) # 50 is to give an extra margin
#minimumStop = np.minimum(postopBoundingBox[0:3]+postopBoundingBox[3:6], followupBoundingBox[0:3]+followupBoundingBox[3:6]- 20)
#minimumBoxSize = minimumStop - minimumStart
#postopImage = postopImage[minimumStart[0]:minimumStop[0], minimumStart[1]:minimumStop[1], minimumStart[2]:minimumStop[2]]
#followupImage = followupImage[minimumStart[0]:minimumStop[0], minimumStart[1]:minimumStop[1], minimumStart[2]:minimumStop[2]]
# Another approach is to get the bounding box of the intersection:
postopAndFollowupImage = sitk.And(postopImage, followupImage)
labelStatisticFilter = sitk.LabelShapeStatisticsImageFilter()
labelStatisticFilter.Execute(postopAndFollowupImage)
bothBoundingBox = np.array(labelStatisticFilter.GetBoundingBox(1))
postopImage = postopImage[bothBoundingBox[0]:bothBoundingBox[0]+bothBoundingBox[3],
bothBoundingBox[1]:bothBoundingBox[1]+bothBoundingBox[4],
bothBoundingBox[2]+20:bothBoundingBox[2]++bothBoundingBox[5]-20]
followupImage = followupImage[bothBoundingBox[0]:bothBoundingBox[0]+bothBoundingBox[3],
bothBoundingBox[1]:bothBoundingBox[1]+bothBoundingBox[4],
bothBoundingBox[2]+20:bothBoundingBox[2]+bothBoundingBox[5]-20]
#Display reduced image:
slice_number = round(postopImage.GetSize()[1]*1/3)
sitkIm.DisplayWithOverlay(postopImage[:,slice_number,:], followupImage[:,slice_number,:], 0, 1)
#sitk.Get
#postopZ = permute(sum(sum(postopImage))>0, [3 1 2]);
#followupZ = permute(sum(sum(followupImage))>0, [3 1 2]);
#bothZ = find(postopZ&followupZ > 0);
#% Remove 10 slices each side:
#bothZ(1:10) = []; bothZ(end-10:end) = [];
# GET SEGMENTATION PERFORMANCE BASED ON SURFACES:
# init signed mauerer distance as reference metrics
reference_distance_map = sitk.Abs(sitk.SignedMaurerDistanceMap(postopImage, squaredDistance=False, useImageSpacing=True))
# Get the reference surface:
reference_surface = sitk.LabelContour(postopImage)
statistics_image_filter = sitk.StatisticsImageFilter()
# Get the number of pixels in the reference surface by counting all pixels that are 1.
statistics_image_filter.Execute(reference_surface)
num_reference_surface_pixels = int(statistics_image_filter.GetSum())
# Get the surface (contour) of the segmented image:
segmented_distance_map = sitk.Abs(sitk.SignedMaurerDistanceMap(followupImage, squaredDistance=False, useImageSpacing=True))
segmented_surface = sitk.LabelContour(followupImage)
# Get the number of pixels in the reference surface by counting all pixels that are 1.
statistics_image_filter.Execute(segmented_surface)
num_segmented_surface_pixels = int(statistics_image_filter.GetSum())
label_intensity_statistics_filter = sitk.LabelIntensityStatisticsImageFilter()
label_intensity_statistics_filter.Execute(segmented_surface, reference_distance_map)
# Hausdorff distance:
hausdorff_distance_filter = sitk.HausdorffDistanceImageFilter()
hausdorff_distance_filter.Execute(postopImage, followupImage)
#All the other metrics:
# Multiply the binary surface segmentations with the distance maps. The resulting distance
# maps contain non-zero values only on the surface (they can also contain zero on the surface)
seg2ref_distance_map = reference_distance_map * sitk.Cast(segmented_surface, sitk.sitkFloat32)
ref2seg_distance_map = segmented_distance_map * sitk.Cast(reference_surface, sitk.sitkFloat32)
# Get all non-zero distances and then add zero distances if required.
seg2ref_distance_map_arr = sitk.GetArrayViewFromImage(seg2ref_distance_map)
seg2ref_distances = list(seg2ref_distance_map_arr[seg2ref_distance_map_arr != 0])
seg2ref_distances = seg2ref_distances + \
list(np.zeros(num_segmented_surface_pixels - len(seg2ref_distances)))
ref2seg_distance_map_arr = sitk.GetArrayViewFromImage(ref2seg_distance_map)
ref2seg_distances = list(ref2seg_distance_map_arr[ref2seg_distance_map_arr != 0])
ref2seg_distances = ref2seg_distances + \
list(np.zeros(num_reference_surface_pixels - len(ref2seg_distances)))
all_surface_distances = seg2ref_distances + ref2seg_distances
# The maximum of the symmetric surface distances is the Hausdorff distance between the surfaces. In
# general, it is not equal to the Hausdorff distance between all voxel/pixel points of the two
# segmentations, though in our case it is. More on this below.
#hausdorff_distance = hausdorff_distance_filter.GetHausdorffDistance()
#max_surface_distance = label_intensity_statistics_filter.GetMaximum(1)
#avg_surface_distance = label_intensity_statistics_filter.GetMean(1)
#median_surface_distance = label_intensity_statistics_filter.GetMedian(1)
#std_surface_distance = label_intensity_statistics_filter.GetStandardDeviation(1)
hausdorff_distance = hausdorff_distance_filter.GetHausdorffDistance()
avg_surface_distance = np.mean(all_surface_distances)
max_surface_distance = np.max(all_surface_distances)
median_surface_distance = np.median(all_surface_distances)
std_surface_distance = | np.std(all_surface_distances) | numpy.std |
# -*- coding: utf-8 -*-
"""Orientation models."""
import numpy as np
from .closures import compute_closure
def jeffery_ode(a, t, xi, L, closure="IBOF", **kwargs):
"""ODE describing Jeffery's model.
Parameters
----------
a : 9x1 numpy array
Flattened fiber orientation tensor.
t : float
Time of evaluation.
xi : float
Shape factor computed from aspect ratio.
L : function handle
Function to compute velocity gradient at time t.
closure: str
Name of closure to be used.
Returns
-------
9x1 numpy array
Orientation tensor rate.
References
----------
.. [1] <NAME>
'The motion of ellipsoidal particles immersed in a viscous fluid',
Proceedings of the Royal Society A, 1922.
https://doi.org/10.1098/rspa.1922.0078
"""
a = np.reshape(a, (3, 3))
A = compute_closure(a, closure)
D = 0.5 * (L(t) + np.transpose(L(t)))
W = 0.5 * (L(t) - np.transpose(L(t)))
dadt = (
np.einsum("ik,kj->ij", W, a)
- np.einsum("ik,kj->ij", a, W)
+ xi
* (
np.einsum("ik,kj->ij", D, a)
+ np.einsum("ik,kj->ij", a, D)
- 2 * np.einsum("ijkl,kl->ij", A, D)
)
)
return dadt.ravel()
def folgar_tucker_ode(a, t, xi, L, Ci=0.0, closure="IBOF", **kwargs):
"""ODE describing the Folgar-Tucker model.
Parameters
----------
a : 9x1 numpy array
Flattened fiber orientation tensor.
t : float
Time of evaluation.
xi : float
Shape factor computed from aspect ratio.
L : function handle
Function to compute velocity gradient at time t.
Ci : float
Fiber interaction constant (typically 0 < Ci < 0.1).
closure: str
Name of closure to be used.
Returns
-------
9x1 numpy array
Orientation tensor rate.
References
----------
.. [1] <NAME>, <NAME> III,
'Orientation behavior of fibers in concentrated suspensions',
Journal of Reinforced Plastic Composites 3, 98-119, 1984.
https://doi.org/10.1177%2F073168448400300201
"""
a = np.reshape(a, (3, 3))
A = compute_closure(a, closure)
D = 0.5 * (L(t) + np.transpose(L(t)))
W = 0.5 * (L(t) - np.transpose(L(t)))
G = np.sqrt(2.0 * np.einsum("ij,ij", D, D))
delta = np.eye(3)
dadt = (
np.einsum("ik,kj->ij", W, a)
- np.einsum("ik,kj->ij", a, W)
+ xi
* (
np.einsum("ik,kj->ij", D, a)
+ np.einsum("ik,kj->ij", a, D)
- 2 * np.einsum("ijkl,kl->ij", A, D)
)
+ 2 * Ci * G * (delta - 3 * a)
)
return dadt.ravel()
def maier_saupe_ode(a, t, xi, L, Ci=0.0, U0=0.0, closure="IBOF", **kwargs):
"""ODE using Folgar-Tucker constant and Maier-Saupe potential.
Parameters
----------
a : 9x1 numpy array
Flattened fiber orientation tensor.
t : float
Time of evaluation.
xi : float
Shape factor computed from aspect ratio.
L : function handle
Function to compute velocity gradient at time t.
Ci : float
Fiber interaction constant (typically 0 < Ci < 0.1).
U0 : float
Maier-Saupe Potential (in 3D stable for y U0 < 8 Ci).
closure: str
Name of closure to be used.
Returns
-------
9x1 numpy array
Orientation tensor rate.
References
----------
.. [1] <NAME>, <NAME>, <NAME>,
'Comparative numerical study of two concentrated fiber suspension models',
Journal of Non-Newtonian Fluid Mechanics 165, 764-781, 2010.
https://doi.org/10.1016/j.jnnfm.2010.04.001
"""
a = np.reshape(a, (3, 3))
A = compute_closure(a, closure)
D = 0.5 * (L(t) + np.transpose(L(t)))
W = 0.5 * (L(t) - np.transpose(L(t)))
G = np.sqrt(2.0 * np.einsum("ij,ij", D, D))
delta = np.eye(3)
dadt = (
np.einsum("ik,kj->ij", W, a)
- np.einsum("ik,kj->ij", a, W)
+ xi
* (
np.einsum("ik,kj->ij", D, a)
+ np.einsum("ik,kj->ij", a, D)
- 2 * np.einsum("ijkl,kl->ij", A, D)
)
+ 2
* G
* (
Ci * (delta - 3 * a)
+ U0
* (np.einsum("ik,kj->ij", a, a) - np.einsum("ijkl,kl->ij", A, a))
)
)
return dadt.ravel()
def iard_ode(a, t, xi, L, Ci=0.0, Cm=0.0, closure="IBOF", **kwargs):
"""ODE describing iARD model.
Parameters
----------
a : 9x1 numpy array
Flattened fiber orientation tensor.
t : float
Time of evaluation.
xi : float
Shape factor computed from aspect ratio.
L : function handle
Function to compute velocity gradient at time t.
Ci : float
Fiber interaction constant (typically 0 < Ci < 0.05).
Cm : float
Anisotropy factor (0 < Cm < 1).
closure: str
Name of closure to be used.
Returns
-------
9x1 numpy array
Orientation tensor rate.
References
----------
.. [1] <NAME>; <NAME>; <NAME>,
'An objective tensor to predict anisotropic fiber orientation in concentrated suspensions',
Journal of Rheology 60, 215, 2016.
https://doi.org/10.1122/1.4939098
"""
a = np.reshape(a, (3, 3))
A = compute_closure(a, closure)
D = 0.5 * (L(t) + np.transpose(L(t)))
W = 0.5 * (L(t) - np.transpose(L(t)))
G = np.sqrt(2.0 * np.einsum("ij,ij", D, D))
delta = np.eye(3)
D2 = np.einsum("ik,kj->ij", D, D)
D2_norm = np.sqrt(1.0 / 2.0 * np.einsum("ij,ij", D2, D2))
Dr = Ci * (delta - Cm * D2 / D2_norm)
dadt_HD = (
np.einsum("ik,kj->ij", W, a)
- np.einsum("ik,kj->ij", a, W)
+ xi
* (
np.einsum("ik,kj->ij", D, a)
+ np.einsum("ik,kj->ij", a, D)
- 2 * np.einsum("ijkl,kl->ij", A, D)
)
)
dadt_iard = G * (
2 * Dr
- 2 * np.trace(Dr) * a
- 5 * np.einsum("ik,kj->ij", Dr, a)
- 5 * np.einsum("ik,kj->ij", a, Dr)
+ 10 * np.einsum("ijkl,kl->ij", A, Dr)
)
dadt = dadt_HD + dadt_iard
return dadt.ravel()
def iardrpr_ode(
a, t, xi, L, Ci=0.0, Cm=0.0, alpha=0.0, beta=0.0, closure="IBOF", **kwargs
):
"""ODE describing iARD-RPR model.
Parameters
----------
a : 9x1 numpy array
Flattened fiber orientation tensor.
t : float
Time of evaluation.
xi : float
Shape factor computed from aspect ratio.
L : function handle
Function to compute velocity gradient at time t.
Ci : float
Fiber interaction constant (typically 0 < Ci < 0.05).
Cm : float
Anisotropy factor (0 < Cm < 1).
alpha : float
Retardance rate (0 < alpha < 1).
beta : float
Retardance tuning factor (0< beta < 1).
closure: str
Name of closure to be used.
Returns
-------
9x1 numpy array
Orientation tensor rate.
References
----------
.. [1] <NAME>; <NAME>; <NAME>,
'An objective tensor to predict anisotropic fiber orientation in concentrated suspensions',
Journal of Rheology 60, 215, 2016.
https://doi.org/10.1122/1.4939098
"""
a = np.reshape(a, (3, 3))
A = compute_closure(a, closure)
D = 0.5 * (L(t) + np.transpose(L(t)))
W = 0.5 * (L(t) - np.transpose(L(t)))
G = np.sqrt(2.0 * np.einsum("ij,ij", D, D))
delta = np.eye(3)
D2 = np.einsum("ik,kj->ij", D, D)
D2_norm = np.sqrt(1.0 / 2.0 * np.einsum("ij,ij", D2, D2))
Dr = Ci * (delta - Cm * D2 / D2_norm)
dadt_HD = (
np.einsum("ik,kj->ij", W, a)
- np.einsum("ik,kj->ij", a, W)
+ xi
* (
np.einsum("ik,kj->ij", D, a)
+ np.einsum("ik,kj->ij", a, D)
- 2.0 * np.einsum("ijkl,kl->ij", A, D)
)
)
dadt_iard = G * (
2.0 * Dr
- 2.0 * np.trace(Dr) * a
- 5.0 * np.einsum("ik,kj->ij", Dr, a)
- 5.0 * np.einsum("ik,kj->ij", a, Dr)
+ 10.0 * np.einsum("ijkl,kl->ij", A, Dr)
)
dadt_temp = dadt_HD + dadt_iard
# Spectral Decomposition
eigenValues, eigenVectors = np.linalg.eig(a)
idx = eigenValues.argsort()[::-1]
R = eigenVectors[:, idx]
# Estimation of eigenvalue rates (rotated back)
dadt_diag = np.einsum("ik, kl, lj->ij", np.transpose(R), dadt_temp, R)
lbd0 = dadt_diag[0, 0]
lbd1 = dadt_diag[1, 1]
lbd2 = dadt_diag[2, 2]
# Computation of IOK tensor by rotation
IOK = np.zeros((3, 3))
IOK[0, 0] = alpha * (lbd0 - beta * (lbd0 ** 2.0 + 2.0 * lbd1 * lbd2))
IOK[1, 1] = alpha * (lbd1 - beta * (lbd1 ** 2.0 + 2.0 * lbd0 * lbd2))
IOK[2, 2] = alpha * (lbd2 - beta * (lbd2 ** 2.0 + 2.0 * lbd0 * lbd1))
dadt_rpr = -np.einsum("ik, kl, lj->ij", R, IOK, np.transpose(R))
dadt = dadt_temp + dadt_rpr
return dadt.ravel()
def mrd_ode(
a, t, xi, L, Ci=0.0, D1=1.0, D2=0.8, D3=0.15, closure="IBOF", **kwargs
):
"""ODE describing MRD model.
Parameters
----------
a : 9x1 numpy array
Flattened fiber orientation tensor.
t : float
Time of evaluation.
xi : float
Shape factor computed from aspect ratio.
L : function handle
Function to compute velocity gradient at time t.
Ci : float
Fiber interaction constant (typically 0 < Ci < 0.05).
D1 : type
Anisotropy factors (D1 > 0).
D2 : type
Anisotropy factors (D2 > 0).
D3 : type
Anisotropy factors (D3 > 0).
closure: str
Name of closure to be used.
Returns
-------
9x1 numpy array
Orientation tensor rate.
References
----------
.. [1] <NAME>, <NAME>, <NAME> and <NAME>,
'Using New Anisotropic Rotational Diffusion Model To Improve Prediction Of Short
Fibers in Thermoplastic InjectionMolding',
ANTEC, Orlando, 2018.
"""
a = np.reshape(a, (3, 3))
A = compute_closure(a, closure)
D = 0.5 * (L(t) + np.transpose(L(t)))
W = 0.5 * (L(t) - np.transpose(L(t)))
G = np.sqrt(2.0 * np.einsum("ij,ij", D, D))
C_hat = np.array([[D1, 0.0, 0.0], [0.0, D2, 0.0], [0.0, 0.0, D3]])
# Spectral Decomposition
eigenValues, eigenVectors = np.linalg.eig(a)
idx = eigenValues.argsort()[::-1]
R = eigenVectors[:, idx]
C = Ci * np.einsum("ij,jk,kl->il", R, C_hat, np.transpose(R))
dadt_HD = (
np.einsum("ik,kj->ij", W, a)
- np.einsum("ik,kj->ij", a, W)
+ xi
* (
np.einsum("ik,kj->ij", D, a)
+ np.einsum("ik,kj->ij", a, D)
- 2 * np.einsum("ijkl,kl->ij", A, D)
)
)
dadt_mrd = G * (
2 * C
- 2 * np.trace(C) * a
- 5 * np.einsum("ik,kj->ij", C, a)
- 5 * np.einsum("ik,kj->ij", a, C)
+ 10 * np.einsum("ijkl,kl->ij", A, C)
)
dadt = dadt_HD + dadt_mrd
return dadt.ravel()
def pard_ode(a, t, xi, L, Ci=0.0, Omega=0.0, closure="IBOF", **kwargs):
"""ODE describing pARD model.
Parameters
----------
a : 9x1 numpy array
Flattened fiber orientation tensor.
t : float
Time of evaluation.
xi : float
Shape factor computed from aspect ratio.
L : function handle
Function to compute velocity gradient at time t.
Ci : float
Fiber interaction constant (typically 0 < Ci < 0.05).
Omega : type
Anisotropy factor (0.5 < Omega < 1).
closure: str
Name of closure to be used.
Returns
-------
9x1 numpy array
Orientation tensor rate.
References
----------
.. [1] <NAME>; <NAME>; <NAME>,
'The use of principal spatial tensor to predict anisotropic fiber orientation in
concentrated fiber suspensions',
Journal of Rheology 62, 313, 2017.
https://doi.org/10.1122/1.4998520
"""
a = np.reshape(a, (3, 3))
A = compute_closure(a, closure)
D = 0.5 * (L(t) + np.transpose(L(t)))
W = 0.5 * (L(t) - np.transpose(L(t)))
G = np.sqrt(2.0 * np.einsum("ij,ij", D, D))
C_hat = np.array(
[[1.0, 0.0, 0.0], [0.0, Omega, 0.0], [0.0, 0.0, 1.0 - Omega]]
)
# Spectral Decomposition
eigenValues, eigenVectors = np.linalg.eig(a)
idx = eigenValues.argsort()[::-1]
R = eigenVectors[:, idx]
C = Ci * np.einsum("ij,jk,kl->il", R, C_hat, np.transpose(R))
dadt_HD = (
np.einsum("ik,kj->ij", W, a)
- np.einsum("ik,kj->ij", a, W)
+ xi
* (
np.einsum("ik,kj->ij", D, a)
+ np.einsum("ik,kj->ij", a, D)
- 2 * np.einsum("ijkl,kl->ij", A, D)
)
)
dadt_pard = G * (
2 * C
- 2 * np.trace(C) * a
- 5 * np.einsum("ik,kj->ij", C, a)
- 5 * np.einsum("ik,kj->ij", a, C)
+ 10 * np.einsum("ijkl,kl->ij", A, C)
)
dadt = dadt_HD + dadt_pard
return dadt.ravel()
def pardrpr_ode(
a, t, xi, L, Ci=0.0, Omega=0.0, alpha=0.0, closure="IBOF", **kwargs
):
"""ODE describing pARD-RPR model.
Parameters
----------
a : 9x1 numpy array
Flattened fiber orientation tensor.
t : float
Time of evaluation.
xi : float
Shape factor computed from aspect ratio.
L : function handle
Function to compute velocity gradient at time t.
Ci : float
Fiber interaction constant (typically 0 < Ci < 0.05).
Omega : type
Anisotropy factor (0.5 < Omega < 1).
alpha : float
Retardance rate (0 < alpha < 1).
closure: str
Name of closure to be used.
Returns
-------
9x1 numpy array
Orientation tensor rate.
References
----------
.. [1] <NAME>; <NAME>; <NAME>,
'The use of principal spatial tensor to predict anisotropic fiber orientation in
concentrated fiber suspensions',
Journal of Rheology 62, 313, 2017.
https://doi.org/10.1122/1.4998520
"""
a = np.reshape(a, (3, 3))
A = compute_closure(a, closure)
D = 0.5 * (L(t) + np.transpose(L(t)))
W = 0.5 * (L(t) - np.transpose(L(t)))
G = np.sqrt(2.0 * np.einsum("ij,ij", D, D))
C_hat = np.array(
[[1.0, 0.0, 0.0], [0.0, Omega, 0.0], [0.0, 0.0, 1.0 - Omega]]
)
# Spectral Decomposition
eigenValues, eigenVectors = np.linalg.eig(a)
idx = eigenValues.argsort()[::-1]
R = eigenVectors[:, idx]
C = Ci * np.einsum("ij,jk,kl->il", R, C_hat, np.transpose(R))
dadt_HD = (
np.einsum("ik,kj->ij", W, a)
- np.einsum("ik,kj->ij", a, W)
+ xi
* (
np.einsum("ik,kj->ij", D, a)
+ np.einsum("ik,kj->ij", a, D)
- 2 * np.einsum("ijkl,kl->ij", A, D)
)
)
dadt_pard = G * (
2 * C
- 2 * np.trace(C) * a
- 5 * np.einsum("ik,kj->ij", C, a)
- 5 * np.einsum("ik,kj->ij", a, C)
+ 10 * np.einsum("ijkl,kl->ij", A, C)
)
dadt_temp = dadt_HD + dadt_pard
# Estimation of eigenvalue rates (rotated back)
dadt_diag = np.einsum("ik, kl, lj->ij", np.transpose(R), dadt_temp, R)
lbd0 = dadt_diag[0, 0]
lbd1 = dadt_diag[1, 1]
lbd2 = dadt_diag[2, 2]
# Computation of IOK tensor by rotation
IOK = np.zeros((3, 3))
IOK[0, 0] = alpha * lbd0
IOK[1, 1] = alpha * lbd1
IOK[2, 2] = alpha * lbd2
dadt_rpr = -np.einsum("ik, kl, lj->ij", R, IOK, np.transpose(R))
dadt = dadt_temp + dadt_rpr
return dadt.ravel()
def rsc_ode(a, t, xi, L, Ci=0.0, kappa=1.0, closure="IBOF", **kwargs):
"""ODE describing RSC model.
Parameters
----------
a : 9x1 numpy array
Flattened fiber orientation tensor.
t : float
Time of evaluation.
xi : float
Shape factor computed from aspect ratio.
L : function handle
Function to compute velocity gradient at time t.
Ci : float
Fiber interaction constant (typically 0 < Ci < 0.05).
kappa : float
Strain reduction factor (0 < kappa < 1).
closure: str
Name of closure to be used.
Returns
-------
9x1 numpy array
Orientation tensor rate.
References
----------
.. [1] <NAME>, <NAME>, and <NAME>,
'An objective model for slow orientation kinetics in concentrated fiber suspensions:
Theory and rheological evidence',
Journal of Rheology 52, 1179, 2008.
https://doi.org/10.1122/1.2946437
"""
a = np.reshape(a, (3, 3))
A = compute_closure(a, closure)
D = 0.5 * (L(t) + np.transpose(L(t)))
W = 0.5 * (L(t) - np.transpose(L(t)))
G = np.sqrt(2.0 * np.einsum("ij,ij", D, D))
delta = np.eye(3)
w, v = np.linalg.eig(a)
L = (
w[0] * np.einsum("i,j,k,l->ijkl", v[:, 0], v[:, 0], v[:, 0], v[:, 0])
+ w[1] * np.einsum("i,j,k,l->ijkl", v[:, 1], v[:, 1], v[:, 1], v[:, 1])
+ w[2] * np.einsum("i,j,k,l->ijkl", v[:, 2], v[:, 2], v[:, 2], v[:, 2])
)
M = (
np.einsum("i,j,k,l->ijkl", v[:, 0], v[:, 0], v[:, 0], v[:, 0])
+ np.einsum("i,j,k,l->ijkl", v[:, 1], v[:, 1], v[:, 1], v[:, 1])
+ np.einsum("i,j,k,l->ijkl", v[:, 2], v[:, 2], v[:, 2], v[:, 2])
)
tensor4 = A + (1.0 - kappa) * (L - np.einsum("ijmn,mnkl->ijkl", M, A))
dadt = (
np.einsum("ik,kj->ij", W, a)
- np.einsum("ik,kj->ij", a, W)
+ xi
* (
np.einsum("ik,kj->ij", D, a)
+ np.einsum("ik,kj->ij", a, D)
- 2 * np.einsum("ijkl,kl->ij", tensor4, D)
)
+ 2 * kappa * Ci * G * (delta - 3 * a)
)
return dadt.ravel()
def ard_rsc_ode(
a,
t,
xi,
L,
b1=0.0,
kappa=1.0,
b2=0,
b3=0,
b4=0,
b5=0,
closure="IBOF",
**kwargs
):
"""ODE describing ARD-RSC model.
Parameters
----------
a : 9x1 numpy array
Flattened fiber orientation tensor.
t : float
Time of evaluation.
xi : float
Shape factor computed from aspect ratio.
L : function handle
Function to compute velocity gradient at time t.
b1 : float
First parameter of rotary diffusion tensor (0 < b1 < 0.1).
kappa : float
Strain reduction factor (0 < kappa < 1).
b2 : type
Second parameter of rotary diffusion tensor.
b3 : type
Third parameter of rotary diffusion tensor.
b4 : type
Fourth parameter of rotary diffusion tensor.
b5 : type
Fith parameter of rotary diffusion tensor.
closure: str
Name of closure to be used.
Returns
-------
9x1 numpy array
Orientation tensor rate.
References
----------
.. [1] <NAME>, <NAME>,
'An anisotropic rotary diffusion model for fiber orientation in short- and
long-fiber thermoplastics',
Journal of Non-Newtonian Fluid Mechanics 156, 165-176, 2009.
https://doi.org/10.1016/j.jnnfm.2008.08.002
"""
a = np.reshape(a, (3, 3))
A = compute_closure(a, closure)
D = 0.5 * (L(t) + np.transpose(L(t)))
W = 0.5 * (L(t) - np.transpose(L(t)))
G = np.sqrt(2.0 * np.einsum("ij,ij", D, D))
delta = np.eye(3)
w, v = np.linalg.eig(a)
L = (
w[0] * np.einsum("i,j,k,l->ijkl", v[:, 0], v[:, 0], v[:, 0], v[:, 0])
+ w[1] * np.einsum("i,j,k,l->ijkl", v[:, 1], v[:, 1], v[:, 1], v[:, 1])
+ w[2] * np.einsum("i,j,k,l->ijkl", v[:, 2], v[:, 2], v[:, 2], v[:, 2])
)
M = (
np.einsum("i,j,k,l->ijkl", v[:, 0], v[:, 0], v[:, 0], v[:, 0])
+ np.einsum("i,j,k,l->ijkl", v[:, 1], v[:, 1], v[:, 1], v[:, 1])
+ np.einsum("i,j,k,l->ijkl", v[:, 2], v[:, 2], v[:, 2], v[:, 2])
)
if G > 0.0:
C = (
b1 * delta
+ b2 * a
+ b3 * np.einsum("ik,kj->ij", a, a)
+ b4 * D / G
+ b5 * np.einsum("ik,kj->ij", D, D) / (G * G)
)
else:
C = np.eye(3)
tensor4 = A + (1.0 - kappa) * (L - np.einsum("ijmn,mnkl->ijkl", M, A))
dadt = (
| np.einsum("ik,kj->ij", W, a) | numpy.einsum |
"""von-Mises-Fisher complex-Angular-Centric-Gaussian mixture model
This is a specific mixture model to integrate DC and spatial observations. It
does and will not support independent dimensions. This also explains, why
concrete variable names (i.e. F, T, embedding) are used instead of unnamed
independent axes.
@article{Drude2019Integration,
title={Integration of neural networks and probabilistic spatial models for acoustic blind source separation},
author={<NAME> <NAME>},
journal={IEEE Journal of Selected Topics in Signal Processing},
year={2019},
publisher={IEEE}
}
"""
from operator import xor
import numpy as np
from dataclasses import dataclass
from pb_bss.distribution import (
ComplexAngularCentralGaussian,
ComplexAngularCentralGaussianTrainer,
)
from pb_bss.distribution import VonMisesFisher
from pb_bss.distribution import VonMisesFisherTrainer
from pb_bss.distribution.mixture_model_utils import (
log_pdf_to_affiliation,
log_pdf_to_affiliation_for_integration_models_with_inline_pa,
)
from pb_bss.distribution.utils import _ProbabilisticModel
from pb_bss.utils import unsqueeze
@dataclass
class VMFCACGMM(_ProbabilisticModel):
weight: np.array # Shape (), (K,), (F, K), (T, K)
weight_constant_axis: tuple
vmf: VonMisesFisher
cacg: ComplexAngularCentralGaussian
spatial_weight: float
spectral_weight: float
def predict(self, observation, embedding):
assert np.iscomplexobj(observation), observation.dtype
assert np.isrealobj(embedding), embedding.dtype
observation = observation / np.maximum(
| np.linalg.norm(observation, axis=-1, keepdims=True) | numpy.linalg.norm |
import os
import scipy
import heapq
import random
import warnings
import collections
import numpy as np
import seaborn as sns
import networkx as nx
import matplotlib.pyplot as plt
from PIL import Image
from sklearn.metrics import f1_score
from scipy.optimize import linear_sum_assignment
import torch
from torch.utils.data import DataLoader
import dgl
from model import get_model
from dataset import load_data
warnings.filterwarnings("ignore", category=Warning)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def set_seed(seed):
np.random.seed(seed)
random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
dgl.random.seed(seed)
def evaluate_f1(logits, labels):
y_pred = torch.where(logits > 0.0, torch.ones_like(logits), torch.zeros_like(logits))
y_pred = y_pred.detach().cpu().numpy()
y_true = labels.detach().cpu().numpy()
return f1_score(y_true, y_pred, average='micro')
def plot_feature_att(target, score, path):
f = plt.figure(figsize=(4, 1))
ax1 = plt.subplot(2, 1, 1)
sns.heatmap(target, cmap='Blues', cbar=False, vmin=0.0, vmax=1.0)
ax1.spines['top'].set_visible(True)
ax1.spines['right'].set_visible(True)
ax1.spines['bottom'].set_visible(True)
ax1.spines['left'].set_visible(True)
plt.xticks([])
plt.yticks([])
ax2 = plt.subplot(2, 1, 2)
sns.heatmap(score, cmap='Blues', cbar=False, vmin=0.0, vmax=1.0, cbar_kws={"orientation": "horizontal"})
ax2.spines['top'].set_visible(True)
ax2.spines['right'].set_visible(True)
ax2.spines['bottom'].set_visible(True)
ax2.spines['left'].set_visible(True)
plt.xticks([])
plt.yticks([])
plt.savefig(path, format='png', dpi=300, bbox_inches='tight')
plt.close()
def get_att(score, idx, feature_num, selected_num):
src = np.zeros((score.shape[0], ))
index = heapq.nlargest(selected_num, range(len(score)), score.take)
src[index] = 1.0
out = np.where(src[idx*feature_num:(idx+1)*feature_num] == 1.0)[0].shape[0]
return out
def evaluate_att(score, param, path, plot):
num_graph = param['num_graph']
feature_num = param['feature_num']
selected_num = param['selected_num']
cost = np.zeros((num_graph, num_graph))
for i in range(num_graph):
for j in range(num_graph):
cost[i, j] = get_att(score[i], j, feature_num, selected_num)
row_ind, col_ind = linear_sum_assignment(feature_num-cost)
out = cost[row_ind, col_ind].sum()
if plot:
sort_index = np.arange(num_graph * feature_num)
np.random.shuffle(sort_index)
for i in range(num_graph):
target = np.zeros((1, score.shape[1]))
target[:, i*feature_num:(i+1)*feature_num] = 1
target = target[:, sort_index]
src = np.zeros((score.shape[1], ))
feat_index = np.where(col_ind == i)[0][0]
index = heapq.nlargest(selected_num, range(len(score[feat_index])), score[feat_index].take)
src[index] = 1.0
n_min = np.min(score[feat_index, index])
n_max = np.max(score[feat_index, index])
att_score = (score[feat_index] - n_min) / (n_max - n_min) * 0.5 + 0.5
att_score = (src * att_score).reshape((1, score.shape[1]))
att_score = att_score[:, sort_index]
os.makedirs(path, exist_ok=True)
plot_feature_att(target, att_score, path='{}Feature_Factor_{}.png'.format(path, i))
return out
def collate(samples):
graphs, labels, gt_adjs = map(list, zip(*samples))
batched_graphs = dgl.batch(graphs)
return batched_graphs, torch.cat(tuple(labels), 0), gt_adjs
def dgl_to_adj(dgl_graph):
adjs_list = []
for i in range(16):
if f'factor_{i}' not in dgl_graph.edata:
break
srt, dst = dgl_graph.edges()
esge_weights = dgl_graph.edata[f'factor_{i}'].squeeze()
srt, dst = srt.detach().cpu().numpy(), dst.detach().cpu().numpy()
esge_weights = esge_weights.detach().cpu().numpy()
num_node = dgl_graph.number_of_nodes()
adjs = np.zeros((num_node, num_node))
adjs[srt, dst] = esge_weights
adjs += np.transpose(adjs)
adjs /= 2.0
adjs_list.append(adjs)
return adjs_list
def translate_gt_graph_to_adj(gt_graph):
gt_adjs = []
gt_g_list = dgl.unbatch(gt_graph)
for gt_g in gt_g_list:
gt_list = []
gt_ids = []
n_node = gt_g.number_of_nodes()
srt, dst = gt_g.edges()
srt, dst = srt.detach().cpu().numpy(), dst.detach().cpu().numpy()
edge_factor = gt_g.edata['feat'].detach().cpu().numpy()
assert srt.shape[0] == edge_factor.shape[0]
for edge_id in set(edge_factor):
org_g = | np.zeros((n_node, n_node)) | numpy.zeros |
"""
predict_cnn_mnist.py
Predict MNIST images using a trained neural network.
ECE196 Face Recognition Project
Author: <NAME>
"""
# TODO: Import other layers as necessary. (Conv2D, MaxPooling2D)
from keras.models import load_model
from keras.datasets import mnist
from keras.utils import to_categorical
import keras
import numpy as np
import cv2
# Proccess the data from (28,28) to (32,32)
def procces_image(img):
proccesed_image = cv2.resize(img, (32,32))
return proccesed_image
def _main(args):
# Load MNIST dataset.
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# Resize the images
x_train = np.array(map(procces_image, x_train))
x_test = np.array(map(procces_image, x_test))
# Reshape to fit model
x_train = np.reshape(x_train,(60000,32,32,1))
x_test = np.reshape(x_test,(10000,32,32,1))
print("Resized images to {}".format(x_train.shape))
# One hot encode labels.
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
# Reshape to fit model
y_train = np.reshape(y_train,(60000,1,1,10))
y_test = | np.reshape(y_test,(10000,1,1,10)) | numpy.reshape |
"""
Created on Thu Jan 26 17:04:11 2017
@author: <NAME>, <EMAIL>
"""
#%matplotlib inline
import numpy as np
import pandas as pd
import dicom
import os
import scipy.ndimage as ndimage
import matplotlib.pyplot as plt
import scipy.ndimage # added for scaling
import cv2
import time
import glob
from skimage import measure, morphology, segmentation
import SimpleITK as sitk
RESIZE_SPACING = [2,2,2] # z, y, x (x & y MUST be the same)
RESOLUTION_STR = "2x2x2"
img_rows = 448
img_cols = 448 # global values
DO_NOT_USE_SEGMENTED = True
#STAGE = "stage1"
STAGE_DIR_BASE = "../input/%s/" # on one cluster we had input_shared
LUNA_MASKS_DIR = "../luna/data/original_lung_masks/"
luna_subset = 0 # initial
LUNA_BASE_DIR = "../luna/data/original_lungs/subset%s/" # added on AWS; data as well
LUNA_DIR = LUNA_BASE_DIR % luna_subset
CSVFILES = "../luna/data/original_lungs/CSVFILES/%s"
LUNA_ANNOTATIONS = CSVFILES % "annotations.csv"
LUNA_CANDIDATES = CSVFILES % "candidates.csv"
# Load the scans in given folder path (loads the most recent acquisition)
def load_scan(path):
slices = [dicom.read_file(path + '/' + s) for s in os.listdir(path)]
#slices.sort(key = lambda x: int(x.InstanceNumber))
acquisitions = [x.AcquisitionNumber for x in slices]
vals, counts = np.unique(acquisitions, return_counts=True)
vals = vals[::-1] # reverse order so the later acquisitions are first (the np.uniques seems to always return the ordered 1 2 etc.
counts = counts[::-1]
## take the acquistions that has more entries; if these are identical take the later entrye
acq_val_sel = vals[np.argmax(counts)]
##acquisitions = sorted(np.unique(acquisitions), reverse=True)
if len(vals) > 1:
print ("WARNING ##########: MULTIPLE acquisitions & counts, acq_val_sel, path: ", vals, counts, acq_val_sel, path)
slices2= [x for x in slices if x.AcquisitionNumber == acq_val_sel]
slices = slices2
## ONE path includes 2 acquisitions (2 sets), take the latter acquiisiton only whihch cyupically is better than the first/previous ones.
## example of the '../input/stage1/b8bb02d229361a623a4dc57aa0e5c485'
#slices.sort(key = lambda x: int(x.ImagePositionPatient[2])) # from v 8, BUG should be float
slices.sort(key = lambda x: float(x.ImagePositionPatient[2])) # from v 9
try:
slice_thickness = np.abs(slices[0].ImagePositionPatient[2] - slices[1].ImagePositionPatient[2])
except:
slice_thickness = np.abs(slices[0].SliceLocation - slices[1].SliceLocation)
for s in slices:
s.SliceThickness = slice_thickness
return slices
def get_3d_data_slices(slices): # get data in Hunsfield Units
slices.sort(key = lambda x: float(x.ImagePositionPatient[2])) # from v 9
image = np.stack([s.pixel_array for s in slices])
image = image.astype(np.int16) # ensure int16 (it may be here uint16 for some images )
image[image == -2000] = 0 #correcting cyindrical bound entrioes to 0
# Convert to Hounsfield units (HU)
# The intercept is usually -1024
for slice_number in range(len(slices)): # from v 8
intercept = slices[slice_number].RescaleIntercept
slope = slices[slice_number].RescaleSlope
if slope != 1: # added 16 Jan 2016, evening
image[slice_number] = slope * image[slice_number].astype(np.float64)
image[slice_number] = image[slice_number].astype(np.int16)
image[slice_number] += np.int16(intercept)
return np.array(image, dtype=np.int16)
def get_pixels_hu(slices):
image = np.stack([s.pixel_array for s in slices])
image = image.astype(np.int16)
# Set outside-of-scan pixels to 0
# The intercept is usually -1024, so air is approximately 0
image[image == -2000] = 0
# Convert to Hounsfield units (HU)
### slope can differ per slice -- so do it individually (case in point black_tset, slices 95 vs 96)
### Changes/correction - 31.01.2017
for slice_number in range(len(slices)):
intercept = slices[slice_number].RescaleIntercept
slope = slices[slice_number].RescaleSlope
if slope != 1:
image[slice_number] = slope * image[slice_number].astype(np.float64)
image[slice_number] = image[slice_number].astype(np.int16)
image[slice_number] += np.int16(intercept)
return np.array(image, dtype=np.int16)
MARKER_INTERNAL_THRESH = -400
MARKER_FRAME_WIDTH = 9 # 9 seems OK for the half special case ...
def generate_markers(image):
#Creation of the internal Marker
useTestPlot = False
if useTestPlot:
timg = image
plt.imshow(timg, cmap='gray')
plt.show()
add_frame_vertical = True
if add_frame_vertical: # add frame for potentially closing the lungs that touch the edge, but only vertically
fw = MARKER_FRAME_WIDTH # frame width (it looks that 2 is the minimum width for the algorithms implemented here, namely the first 2 operations for the marker_internal)
xdim = image.shape[1]
#ydim = image.shape[0]
img2 = np.copy(image)
#y3 = ydim // 3
img2 [:, 0] = -1024
img2 [:, 1:fw] = 0
img2 [:, xdim-1:xdim] = -1024
img2 [:, xdim-fw:xdim-1] = 0
marker_internal = img2 < MARKER_INTERNAL_THRESH
else:
marker_internal = image < MARKER_INTERNAL_THRESH # was -400
useTestPlot = False
if useTestPlot:
timg = marker_internal
plt.imshow(timg, cmap='gray')
plt.show()
correct_edges2 = False ## NOT a good idea - no added value
if correct_edges2:
marker_internal[0,:] = 0
marker_internal[:,0] = 0
#marker_internal[:,1] = True
#marker_internal[:,2] = True
marker_internal[511,:] = 0
marker_internal[:,511] = 0
marker_internal = segmentation.clear_border(marker_internal, buffer_size=0)
marker_internal_labels = measure.label(marker_internal)
areas = [r.area for r in measure.regionprops(marker_internal_labels)]
areas.sort()
if len(areas) > 2:
for region in measure.regionprops(marker_internal_labels):
if region.area < areas[-2]:
for coordinates in region.coords:
marker_internal_labels[coordinates[0], coordinates[1]] = 0
marker_internal = marker_internal_labels > 0
#Creation of the external Marker
external_a = ndimage.binary_dilation(marker_internal, iterations=10) # was 10
external_b = ndimage.binary_dilation(marker_internal, iterations=55) # was 55
marker_external = external_b ^ external_a
#Creation of the Watershed Marker matrix
#marker_watershed = np.zeros((512, 512), dtype=np.int) # origi
marker_watershed = np.zeros((marker_external.shape), dtype=np.int)
marker_watershed += marker_internal * 255
marker_watershed += marker_external * 128
return marker_internal, marker_external, marker_watershed
# Some of the starting Code is taken from ArnavJain, since it's more readable then my own
def generate_markers_3d(image):
#Creation of the internal Marker
marker_internal = image < -400
marker_internal_labels = np.zeros(image.shape).astype(np.int16)
for i in range(marker_internal.shape[0]):
marker_internal[i] = segmentation.clear_border(marker_internal[i])
marker_internal_labels[i] = measure.label(marker_internal[i])
#areas = [r.area for r in measure.regionprops(marker_internal_labels)]
areas = [r.area for i in range(marker_internal.shape[0]) for r in measure.regionprops(marker_internal_labels[i])]
for i in range(marker_internal.shape[0]):
areas = [r.area for r in measure.regionprops(marker_internal_labels[i])]
areas.sort()
if len(areas) > 2:
for region in measure.regionprops(marker_internal_labels[i]):
if region.area < areas[-2]:
for coordinates in region.coords:
marker_internal_labels[i, coordinates[0], coordinates[1]] = 0
marker_internal = marker_internal_labels > 0
#Creation of the external Marker
# 3x3 structuring element with connectivity 1, used by default
struct1 = ndimage.generate_binary_structure(2, 1)
struct1 = struct1[np.newaxis,:,:] # expand by z axis .
external_a = ndimage.binary_dilation(marker_internal, structure=struct1, iterations=10)
external_b = ndimage.binary_dilation(marker_internal, structure=struct1, iterations=55)
marker_external = external_b ^ external_a
#Creation of the Watershed Marker matrix
#marker_watershed = np.zeros((512, 512), dtype=np.int) # origi
marker_watershed = np.zeros((marker_external.shape), dtype=np.int)
marker_watershed += marker_internal * 255
marker_watershed += marker_external * 128
return marker_internal, marker_external, marker_watershed
BINARY_CLOSING_SIZE = 7 #was 7 before final; 5 for disk seems sufficient - for safety let's go with 6 or even 7
def seperate_lungs(image):
#Creation of the markers as shown above:
marker_internal, marker_external, marker_watershed = generate_markers(image)
#Creation of the Sobel-Gradient
sobel_filtered_dx = ndimage.sobel(image, 1)
sobel_filtered_dy = ndimage.sobel(image, 0)
sobel_gradient = np.hypot(sobel_filtered_dx, sobel_filtered_dy)
sobel_gradient *= 255.0 / np.max(sobel_gradient)
#Watershed algorithm
watershed = morphology.watershed(sobel_gradient, marker_watershed)
#Reducing the image created by the Watershed algorithm to its outline
outline = ndimage.morphological_gradient(watershed, size=(3,3))
outline = outline.astype(bool)
#Performing Black-Tophat Morphology for reinclusion
#Creation of the disk-kernel and increasing its size a bit
blackhat_struct = [[0, 0, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0, 0]]
blackhat_struct = ndimage.iterate_structure(blackhat_struct, 8)
#Perform the Black-Hat
outline += ndimage.black_tophat(outline, structure=blackhat_struct)
#Use the internal marker and the Outline that was just created to generate the lungfilter
lungfilter = np.bitwise_or(marker_internal, outline)
#Close holes in the lungfilter
#fill_holes is not used here, since in some slices the heart would be reincluded by accident
##structure = np.ones((BINARY_CLOSING_SIZE,BINARY_CLOSING_SIZE)) # 5 is not enough, 7 is
structure = morphology.disk(BINARY_CLOSING_SIZE) # better , 5 seems sufficient, we use 7 for safety/just in case
lungfilter = ndimage.morphology.binary_closing(lungfilter, structure=structure, iterations=3) #, iterations=3) # was structure=np.ones((5,5))
### NOTE if no iterattions, i.e. default 1 we get holes within lungs for the disk(5) and perhaps more
#Apply the lungfilter (note the filtered areas being assigned -2000 HU)
segmented = np.where(lungfilter == 1, image, -2000*np.ones((512, 512))) ### was -2000
return segmented, lungfilter, outline, watershed, sobel_gradient, marker_internal, marker_external, marker_watershed
def rescale_n(n,reduce_factor):
return max( 1, int(round(n / reduce_factor)))
def seperate_lungs_cv2(image): # for increased speed
#Creation of the markers as shown above:
marker_internal, marker_external, marker_watershed = generate_markers(image)
#image_size = image.shape[0]
reduce_factor = 512 / image.shape[0]
#Creation of the Sobel-Gradient
sobel_filtered_dx = ndimage.sobel(image, 1)
sobel_filtered_dy = ndimage.sobel(image, 0)
sobel_gradient = np.hypot(sobel_filtered_dx, sobel_filtered_dy)
sobel_gradient *= 255.0 / np.max(sobel_gradient)
useTestPlot = False
if useTestPlot:
timg = sobel_gradient
plt.imshow(timg, cmap='gray')
plt.show()
#Watershed algorithm
watershed = morphology.watershed(sobel_gradient, marker_watershed)
if useTestPlot:
timg = marker_external
plt.imshow(timg, cmap='gray')
plt.show()
#Reducing the image created by the Watershed algorithm to its outline
#wsize = rescale_n(3,reduce_factor) # THIS IS TOO SMALL, dynamically adjusting the size for the watersehed algorithm
outline = ndimage.morphological_gradient(watershed, size=(3,3)) # original (3,3), (wsize, wsize) is too small to create an outline
outline = outline.astype(bool)
outline_u = outline.astype(np.uint8) #added
#Performing Black-Tophat Morphology for reinclusion
#Creation of the disk-kernel and increasing its size a bit
blackhat_struct = [[0, 0, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0, 0]]
use_reduce_factor = True
if use_reduce_factor:
blackhat_struct = ndimage.iterate_structure(blackhat_struct, rescale_n(8,reduce_factor)) # dyanmically adjust the number of iterattions; original was 8
else:
blackhat_struct = ndimage.iterate_structure(blackhat_struct, 8)
blackhat_struct_cv2 = blackhat_struct.astype(np.uint8)
#Perform the Black-Hat
#outline += ndimage.black_tophat(outline, structure=blackhat_struct) # original slow
#outline1 = outline + (cv2.morphologyEx(outline_u, cv2.MORPH_BLACKHAT, kernel=blackhat_struct_cv2)).astype(np.bool)
#outline2 = outline + ndimage.black_tophat(outline, structure=blackhat_struct)
#np.array_equal(outline1,outline2) # True
outline += (cv2.morphologyEx(outline_u, cv2.MORPH_BLACKHAT, kernel=blackhat_struct_cv2)).astype(np.bool) # fats
if useTestPlot:
timg = outline
plt.imshow(timg, cmap='gray')
plt.show()
#Use the internal marker and the Outline that was just created to generate the lungfilter
lungfilter = np.bitwise_or(marker_internal, outline)
if useTestPlot:
timg = lungfilter
plt.imshow(timg, cmap='gray')
plt.show()
#Close holes in the lungfilter
#fill_holes is not used here, since in some slices the heart would be reincluded by accident
##structure = np.ones((BINARY_CLOSING_SIZE,BINARY_CLOSING_SIZE)) # 5 is not enough, 7 is
structure2 = morphology.disk(2) # used to fill the gaos/holes close to the border (otherwise the large sttructure would create a gap by the edge)
if use_reduce_factor:
structure3 = morphology.disk(rescale_n(BINARY_CLOSING_SIZE,reduce_factor)) # dynanically adjust; better , 5 seems sufficient, we use 7 for safety/just in case
else:
structure3 = morphology.disk(BINARY_CLOSING_SIZE) # dynanically adjust; better , 5 seems sufficient, we use 7 for safety/just in case
##lungfilter = ndimage.morphology.binary_closing(lungfilter, structure=structure, iterations=3) #, ORIGINAL iterations=3) # was structure=np.ones((5,5))
lungfilter2 = ndimage.morphology.binary_closing(lungfilter, structure=structure2, iterations=3) # ADDED
lungfilter3 = ndimage.morphology.binary_closing(lungfilter, structure=structure3, iterations=3)
lungfilter = np.bitwise_or(lungfilter2, lungfilter3)
### NOTE if no iterattions, i.e. default 1 we get holes within lungs for the disk(5) and perhaps more
#Apply the lungfilter (note the filtered areas being assigned -2000 HU)
#image.shape
#segmented = np.where(lungfilter == 1, image, -2000*np.ones((512, 512)).astype(np.int16)) # was -2000 someone suggested 30
segmented = np.where(lungfilter == 1, image, -2000*np.ones(image.shape).astype(np.int16)) # was -2000 someone suggested 30
return segmented, lungfilter, outline, watershed, sobel_gradient, marker_internal, marker_external, marker_watershed
def seperate_lungs_3d(image):
#Creation of the markers as shown above:
marker_internal, marker_external, marker_watershed = generate_markers_3d(image)
#Creation of the Sobel-Gradient
sobel_filtered_dx = ndimage.sobel(image, axis=2)
sobel_filtered_dy = ndimage.sobel(image, axis=1)
sobel_gradient = np.hypot(sobel_filtered_dx, sobel_filtered_dy)
sobel_gradient *= 255.0 / np.max(sobel_gradient)
#Watershed algorithm
watershed = morphology.watershed(sobel_gradient, marker_watershed)
#Reducing the image created by the Watershed algorithm to its outline
outline = ndimage.morphological_gradient(watershed, size=(1,3,3))
outline = outline.astype(bool)
#Performing Black-Tophat Morphology for reinclusion
#Creation of the disk-kernel and increasing its size a bit
blackhat_struct = [[0, 0, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0, 0]]
blackhat_struct = ndimage.iterate_structure(blackhat_struct, 8)
blackhat_struct = blackhat_struct[np.newaxis,:,:]
#Perform the Black-Hat
outline += ndimage.black_tophat(outline, structure=blackhat_struct) # very long time
#Use the internal marker and the Outline that was just created to generate the lungfilter
lungfilter = np.bitwise_or(marker_internal, outline)
#Close holes in the lungfilter
#fill_holes is not used here, since in some slices the heart would be reincluded by accident
##structure = np.ones((BINARY_CLOSING_SIZE,BINARY_CLOSING_SIZE)) # 5 is not enough, 7 is
structure = morphology.disk(BINARY_CLOSING_SIZE) # better , 5 seems sufficient, we use 7 for safety/just in case
structure = structure[np.newaxis,:,:]
lungfilter = ndimage.morphology.binary_closing(lungfilter, structure=structure, iterations=3) #, iterations=3) # was structure=np.ones((5,5))
### NOTE if no iterattions, i.e. default 1 we get holes within lungs for the disk(5) and perhaps more
#Apply the lungfilter (note the filtered areas being assigned -2000 HU)
segmented = np.where(lungfilter == 1, image, -2000*np.ones(marker_internal.shape))
return segmented, lungfilter, outline, watershed, sobel_gradient, marker_internal, marker_external, marker_watershed
def get_slice_location(dcm):
return float(dcm[0x0020, 0x1041].value)
def thru_plane_position(dcm):
"""Gets spatial coordinate of image origin whose axis
is perpendicular to image plane.
"""
orientation = tuple((float(o) for o in dcm.ImageOrientationPatient))
position = tuple((float(p) for p in dcm.ImagePositionPatient))
rowvec, colvec = orientation[:3], orientation[3:]
normal_vector = np.cross(rowvec, colvec)
slice_pos = np.dot(position, normal_vector)
return slice_pos
def resample(image, scan, new_spacing=[1,1,1]):
# Determine current pixel spacing
spacing = map(float, ([scan[0].SliceThickness] + scan[0].PixelSpacing))
spacing = np.array(list(spacing))
#scan[2].SliceThickness
resize_factor = spacing / new_spacing
new_real_shape = image.shape * resize_factor
new_shape = np.round(new_real_shape)
real_resize_factor = new_shape / image.shape
new_spacing = spacing / real_resize_factor
image = scipy.ndimage.interpolation.zoom(image, real_resize_factor, mode='nearest') ### early orig modified
return image, new_spacing
def segment_all(stage, part=0, processors=1, showSummaryPlot=True): # stage added to simplify the stage1 and stage2 calculations
count = 0
STAGE_DIR = STAGE_DIR_BASE % stage
folders = glob.glob(''.join([STAGE_DIR,'*']))
if len(folders) == 0:
print ("ERROR, check directory, no folders found in: ", STAGE_DIR )
for folder in folders:
count += 1
if count % processors == part: # do this part in this process, otherwise skip
path = folder
slices = load_scan(path)
image_slices = get_3d_data_slices(slices)
#mid = len(image_slices) // 2
#img_sel = mid
useTestPlot = False
if useTestPlot:
print("Shape before segmenting\t", image_slices.shape)
plt.hist(image_slices.flatten(), bins=80, color='c')
plt.xlabel("Hounsfield Units (HU)")
plt.ylabel("Frequency")
plt.show()
start = time.time()
resampleImages = True
if resampleImages:
image_resampled, spacing = resample(image_slices, slices, RESIZE_SPACING) # let's start wkith this small resolutuion for workign our the system (then perhaps 2, 0.667, 0.667)
print("Shape_before_&_after_resampling\t", image_slices.shape,image_resampled.shape)
if useTestPlot:
plt.imshow(image_slices[image_slices.shape[0]//2], cmap=plt.cm.bone)
plt.show()
plt.imshow(image_resampled[image_resampled.shape[0]//2], cmap=plt.cm.bone)
np.max(image_slices)
np.max(image_resampled)
np.min(image_slices)
np.min(image_resampled)
plt.show()
image_slices = image_resampled
shape = image_slices.shape
l_segmented = np.zeros(shape).astype(np.int16)
l_lungfilter = np.zeros(shape).astype(np.bool)
l_outline = np.zeros(shape).astype(np.bool)
l_watershed = np.zeros(shape).astype(np.int16)
l_sobel_gradient = np.zeros(shape).astype(np.float32)
l_marker_internal = np.zeros(shape).astype(np.bool)
l_marker_external = np.zeros(shape).astype(np.bool)
l_marker_watershed = np.zeros(shape).astype(np.int16)
# start = time.time()
i=0
for i in range(shape[0]):
l_segmented[i], l_lungfilter[i], l_outline[i], l_watershed[i], l_sobel_gradient[i], l_marker_internal[i], l_marker_external[i], l_marker_watershed[i] = seperate_lungs_cv2(image_slices[i])
print("Rescale & Seg time, and path: ", ((time.time() - start)), path )
if useTestPlot:
plt.hist(image_slices.flatten(), bins=80, color='c')
plt.xlabel("Hounsfield Units (HU)")
plt.ylabel("Frequency")
plt.show()
plt.hist(l_segmented.flatten(), bins=80, color='c')
plt.xlabel("Hounsfield Units (HU)")
plt.ylabel("Frequency")
plt.show()
img_sel_i = shape[0] // 2
# Show some slice in the middle
plt.imshow(image_slices[img_sel_i], cmap=plt.cm.gray)
plt.show()
# Show some slice in the middle
plt.imshow(l_segmented[img_sel_i], cmap='gray')
plt.show()
path_rescaled = path.replace(stage, ''.join([stage, "_", RESOLUTION_STR]), 1)
path_segmented = path.replace(stage, ''.join([stage, "_segmented_", RESOLUTION_STR]), 1)
path_segmented_crop = path.replace(stage, ''.join([stage, "_segmented_", RESOLUTION_STR, "_crop"]), 1)
np.savez_compressed (path_rescaled, image_slices)
np.savez_compressed (path_segmented, l_segmented)
mask = l_lungfilter.astype(np.int8)
regions = measure.regionprops(mask) # this measures the largest region and is a bug when the mask is not the largest region !!!
bb = regions[0].bbox
#print(bb)
zlen = bb[3] - bb[0]
ylen = bb[4] - bb[1]
xlen = bb[5] - bb[2]
dx = 0 # could be reduced
## have to reduce dx as for istance at least image the lungs stretch right to the border evebn without cropping
## namely for '../input/stage1/be57c648eb683a31e8499e278a89c5a0'
crop_max_ratio_z = 0.6 # 0.8 is to big make_submit2(45, 1)
crop_max_ratio_y = 0.4
crop_max_ratio_x = 0.6
bxy_min = np.min(bb[1:3])
bxy_max = np.max(bb[4:6])
mask_shape= mask.shape
image_shape = l_segmented.shape
mask_volume = zlen*ylen*zlen /(mask_shape[0] * mask_shape[1] * mask_shape[2])
mask_volume_thresh = 0.08 # anything below is too small (maybe just one half of the lung or something very small0)
mask_volume_check = mask_volume > mask_volume_thresh
# print ("Mask Volume: ", mask_volume )
### DO NOT allow the mask to touch x & y ---> if it does it is likely a wrong one as for:
## folders[3] , path = '../input/stage1/9ba5fbcccfbc9e08edcfe2258ddf7
maskOK = False
if bxy_min >0 and bxy_max < 512 and mask_volume_check and zlen/mask_shape[0] > crop_max_ratio_z and ylen/mask_shape[1] > crop_max_ratio_y and xlen/mask_shape[2] > crop_max_ratio_x:
## square crop and at least dx elements on both sides on x & y
bxy_min = np.min(bb[1:3])
bxy_max = np.max(bb[4:6])
if bxy_min == 0 or bxy_max == 512:
# Mask to bigg, auto-correct
print("The following mask likely too big, autoreducing by:", dx)
bxy_min = np.max((bxy_min, dx))
bxy_max = np.min ((bxy_max, mask_shape[1] - dx))
image = l_segmented[bb[0]:bb[3], bxy_min:bxy_max, bxy_min:bxy_max]
mask = mask[bb[0]:bb[3], bxy_min:bxy_max, bxy_min:bxy_max]
#maskOK = True
print ("Shape, cropped, bbox ", mask_shape, mask.shape, bb)
elif bxy_min> 0 and bxy_max < 512 and mask_volume_check and zlen/mask.shape[0] > crop_max_ratio_z:
## cut on z at least
image = l_segmented[bb[0]:bb[3], dx: image_shape[1] - dx, dx: image_shape[2] - dx]
#mask = mask[bb[0]:bb[3], dx: mask_shape[1] - dx, dx: mask_shape[2] - dx]
print("Mask too small, NOT auto-cropping x-y: shape, cropped, bbox, ratios, violume:", mask_shape, image.shape, bb, path, zlen/mask_shape[0], ylen/mask_shape[1], xlen/mask_shape[2], mask_volume)
else:
image = l_segmented[0:mask_shape[0], dx: image_shape[1] - dx, dx: image_shape[2] - dx]
#mask = mask[0:mask_shape[0], dx: mask_shape[1] - dx, dx: mask_shape[2] - dx]
print("Mask wrong, NOT auto-cropping: shape, cropped, bbox, ratios, volume:", mask_shape, image.shape, bb, path, zlen/mask_shape[0], ylen/mask_shape[1], xlen/mask_shape[2], mask_volume)
if showSummaryPlot:
img_sel_i = shape[0] // 2
# Show some slice in the middle
useSeparatePlots = False
if useSeparatePlots:
plt.imshow(image_slices[img_sel_i], cmap=plt.cm.gray)
plt.show()
# Show some slice in the middle
plt.imshow(l_segmented[img_sel_i], cmap='gray')
plt.show()
else:
f, ax = plt.subplots(1, 2, figsize=(6,3))
ax[0].imshow(image_slices[img_sel_i],cmap=plt.cm.bone)
ax[1].imshow(l_segmented[img_sel_i],cmap=plt.cm.bone)
plt.show()
# Show some slice in the middle
#plt.imshow(image[image.shape[0] // 2], cmap='gray') # don't show it for simpler review
#plt.show()
np.savez_compressed(path_segmented_crop, image)
#print("Mask count: ", count)
#print ("Shape: ", image.shape)
return part, processors, count
# the following 3 functions to read LUNA files are from: https://www.kaggle.com/arnavkj95/data-science-bowl-2017/candidate-generation-and-luna16-preprocessing/notebook
'''
This funciton reads a '.mhd' file using SimpleITK and return the image array,
origin and spacing of the image.
'''
def load_itk(filename):
# Reads the image using SimpleITK
itkimage = sitk.ReadImage(filename)
# Convert the image to a numpy array first and then shuffle the dimensions to get axis in the order z,y,x
ct_scan = sitk.GetArrayFromImage(itkimage)
# Read the origin of the ct_scan, will be used to convert the coordinates from world to voxel and vice versa.
origin = np.array(list(reversed(itkimage.GetOrigin())))
# Read the spacing along each dimension
spacing = np.array(list(reversed(itkimage.GetSpacing())))
return ct_scan, origin, spacing
'''
This function is used to convert the world coordinates to voxel coordinates using
the origin and spacing of the ct_scan
'''
def world_2_voxel(world_coordinates, origin, spacing):
stretched_voxel_coordinates = np.absolute(world_coordinates - origin)
voxel_coordinates = stretched_voxel_coordinates / spacing
return voxel_coordinates
'''
This function is used to convert the voxel coordinates to world coordinates using
the origin and spacing of the ct_scan.
'''
def voxel_2_world(voxel_coordinates, origin, spacing):
stretched_voxel_coordinates = voxel_coordinates * spacing
world_coordinates = stretched_voxel_coordinates + origin
return world_coordinates
def seq(start, stop, step=1):
n = int(round((stop - start)/float(step)))
if n > 1:
return([start + step*i for i in range(n+1)])
else:
return([])
'''
This function is used to create spherical regions in binary masks
at the given locations and radius.
'''
#image = lung_img
#spacing = new_spacing
def draw_circles(image,cands,origin,spacing):
#make empty matrix, which will be filled with the mask
image_mask = np.zeros(image.shape, dtype=np.int16)
#run over all the nodules in the lungs
for ca in cands.values:
#get middel x-,y-, and z-worldcoordinate of the nodule
#radius = np.ceil(ca[4])/2 ## original: replaced the ceil with a very minor increase of 1% ....
radius = (ca[4])/2 + 0.51 * spacing[0] # increasing by circa half of distance in z direction .... (trying to capture wider region/border for learning ... and adress the rough net .
coord_x = ca[1]
coord_y = ca[2]
coord_z = ca[3]
image_coord = np.array((coord_z,coord_y,coord_x))
#determine voxel coordinate given the worldcoordinate
image_coord = world_2_voxel(image_coord,origin,spacing)
#determine the range of the nodule
#noduleRange = seq(-radius, radius, RESIZE_SPACING[0]) # original, uniform spacing
noduleRange_z = seq(-radius, radius, spacing[0])
noduleRange_y = seq(-radius, radius, spacing[1])
noduleRange_x = seq(-radius, radius, spacing[2])
#x = y = z = -2
#create the mask
for x in noduleRange_x:
for y in noduleRange_y:
for z in noduleRange_z:
coords = world_2_voxel(np.array((coord_z+z,coord_y+y,coord_x+x)),origin,spacing)
#if (np.linalg.norm(image_coord-coords) * RESIZE_SPACING[0]) < radius: ### original (contrained to a uniofrm RESIZE)
if (np.linalg.norm((image_coord-coords) * spacing)) < radius:
image_mask[int(np.round(coords[0])),int(np.round(coords[1])),int(np.round(coords[2]))] = int(1)
return image_mask
'''
This function takes the path to a '.mhd' file as input and
is used to create the nodule masks and segmented lungs after
rescaling to 1mm size in all directions. It saved them in the .npz
format. It also takes the list of nodule locations in that CT Scan as
input.
'''
def load_scans_masks(luna_subset, useAll, use_unsegmented=True):
#luna_subset = "[0-6]"
LUNA_DIR = LUNA_BASE_DIR % luna_subset
files = glob.glob(''.join([LUNA_DIR,'*.mhd']))
annotations = pd.read_csv(LUNA_ANNOTATIONS)
annotations.head()
sids = []
scans = []
masks = []
cnt = 0
skipped = 0
for file in files:
imagePath = file
seriesuid = file[file.rindex('/')+1:] # everything after the last slash
seriesuid = seriesuid[:len(seriesuid)-len(".mhd")] # cut out the suffix to get the uid
path = imagePath[:len(imagePath)-len(".mhd")] # cut out the suffix to get the uid
if use_unsegmented:
path_segmented = path.replace("original_lungs", "lungs_2x2x2", 1)
else:
path_segmented = path.replace("original_lungs", "segmented_2x2x2", 1)
cands = annotations[seriesuid == annotations.seriesuid] # select the annotations for the current series
#useAll = True
if (len(cands) > 0 or useAll):
sids.append(seriesuid)
if use_unsegmented:
scan_z = np.load(''.join((path_segmented + '_lung' + '.npz')))
else:
scan_z = np.load(''.join((path_segmented + '_lung_seg' + '.npz')))
#scan_z.keys()
scan = scan_z['arr_0']
mask_z = np.load(''.join((path_segmented + '_nodule_mask' + '.npz')))
mask = mask_z['arr_0']
scans.append(scan)
masks.append(mask)
cnt += 1
else:
print("Skipping non-nodules entry ", seriesuid)
skipped += 1
print ("Summary: cnt & skipped: ", cnt, skipped)
return scans, masks, sids
def load_scans_masks_or_blanks(luna_subset, useAll, use_unsegmented=True):
#luna_subset = "[0-6]"
LUNA_DIR = LUNA_BASE_DIR % luna_subset
files = glob.glob(''.join([LUNA_DIR,'*.mhd']))
annotations = pd.read_csv(LUNA_ANNOTATIONS)
annotations.head()
candidates = pd.read_csv(LUNA_CANDIDATES)
candidates_false = candidates[candidates["class"] == 0] # only select the false candidates
candidates_true = candidates[candidates["class"] == 1] # only select the false candidates
sids = []
scans = []
masks = []
blankids = [] # class/id whether scan is with nodule or without, 0 - with, 1 - without
cnt = 0
skipped = 0
#file=files[7]
for file in files:
imagePath = file
seriesuid = file[file.rindex('/')+1:] # everything after the last slash
seriesuid = seriesuid[:len(seriesuid)-len(".mhd")] # cut out the suffix to get the uid
path = imagePath[:len(imagePath)-len(".mhd")] # cut out the suffix to get the uid
if use_unsegmented:
path_segmented = path.replace("original_lungs", "lungs_2x2x2", 1)
else:
path_segmented = path.replace("original_lungs", "segmented_2x2x2", 1)
cands = annotations[seriesuid == annotations.seriesuid] # select the annotations for the current series
ctrue = candidates_true[seriesuid == candidates_true.seriesuid]
cfalse = candidates_false[seriesuid == candidates_false.seriesuid]
#useAll = True
blankid = 1 if (len(cands) == 0 and len(ctrue) == 0 and len(cfalse) > 0) else 0
skip_nodules_entirely = False # was False
use_only_nodules = False # was True
if skip_nodules_entirely and blankid ==0:
## manual switch to generate extra data for the corrupted set
print("Skipping nodules (skip_nodules_entirely) ", seriesuid)
skipped += 1
elif use_only_nodules and (len(cands) == 0):
## manual switch to generate only nodules data due lack of time and repeat etc time pressures
print("Skipping blanks (use_only_nodules) ", seriesuid)
skipped += 1
else: # NORMAL operations
if (len(cands) > 0 or
(blankid >0) or
useAll):
sids.append(seriesuid)
blankids.append(blankid)
if use_unsegmented:
scan_z = np.load(''.join((path_segmented + '_lung' + '.npz')))
else:
scan_z = np.load(''.join((path_segmented + '_lung_seg' + '.npz')))
#scan_z.keys()
scan = scan_z['arr_0']
#mask_z = np.load(''.join((path_segmented + '_nodule_mask' + '.npz')))
mask_z = np.load(''.join((path_segmented + '_nodule_mask_wblanks' + '.npz')))
mask = mask_z['arr_0']
testPlot = False
if testPlot:
maskcheck_z = np.load(''.join((path_segmented + '_nodule_mask' + '.npz')))
maskcheck = maskcheck_z['arr_0']
f, ax = plt.subplots(1, 2, figsize=(10,5))
ax[0].imshow(np.sum(np.abs(maskcheck), axis=0),cmap=plt.cm.gray)
ax[1].imshow(np.sum(np.abs(mask), axis=0),cmap=plt.cm.gray)
#ax[2].imshow(masks1[i,:,:],cmap=plt.cm.gray)
plt.show()
scans.append(scan)
masks.append(mask)
cnt += 1
else:
print("Skipping non-nodules and non-blank entry ", seriesuid)
skipped += 1
print ("Summary: cnt & skipped: ", cnt, skipped)
return scans, masks, sids, blankids
#return scans, masks, sids # not yet, old style
def load_scans_masks_no_nodules(luna_subset, use_unsegmented=True): # load only the ones that do not contain nodules
#luna_subset = "[0-6]"
LUNA_DIR = LUNA_BASE_DIR % luna_subset
files = glob.glob(''.join([LUNA_DIR,'*.mhd']))
annotations = pd.read_csv(LUNA_ANNOTATIONS)
annotations.head()
sids = []
scans = []
masks = []
cnt = 0
skipped = 0
for file in files:
imagePath = file
seriesuid = file[file.rindex('/')+1:] # everything after the last slash
seriesuid = seriesuid[:len(seriesuid)-len(".mhd")] # cut out the suffix to get the uid
path = imagePath[:len(imagePath)-len(".mhd")] # cut out the suffix to get the uid
if use_unsegmented:
path_segmented = path.replace("original_lungs", "lungs_2x2x2", 1)
else:
path_segmented = path.replace("original_lungs", "segmented_2x2x2", 1)
cands = annotations[seriesuid == annotations.seriesuid] # select the annotations for the current series
#useAll = True
if (len(cands)):
print("Skipping entry with nodules ", seriesuid)
skipped += 1
else:
sids.append(seriesuid)
if use_unsegmented:
scan_z = np.load(''.join((path_segmented + '_lung' + '.npz')))
else:
scan_z = np.load(''.join((path_segmented + '_lung_seg' + '.npz')))
#scan_z.keys()
scan = scan_z['arr_0']
mask_z = np.load(''.join((path_segmented + '_nodule_mask' + '.npz')))
mask = mask_z['arr_0']
scans.append(scan)
masks.append(mask)
cnt += 1
print ("Summary: cnt & skipped: ", cnt, skipped)
return scans, masks, sids
MIN_BOUND = -1000.0
MAX_BOUND = 400.0
def normalize(image):
image = (image - MIN_BOUND) / (MAX_BOUND - MIN_BOUND)
image[image>1] = 1.
image[image<0] = 0.
return image
PIXEL_MEAN = 0.028 ## for LUNA subset 0 and our preprocessing, only with nudels was 0.028, all was 0.020421744071562546 (in the tutorial they used 0.25)
def zero_center(image):
image = image - PIXEL_MEAN
return image
def load_scans(path): # function used for testing
slices = [dicom.read_file(path + '/' + s) for s in os.listdir(path)]
slices.sort(key=lambda x: int(x.InstanceNumber))
return np.stack([s.pixel_array for s in slices])
def get_scans(df,scans_list):
scans=np.stack([load_scans(scan_folder+df.id[i_scan[0]])[i_scan[1]] for i_scan in scans_list])
scans=process_scans(scans)
view_scans(scans)
return(scans)
def process_scans(scans): # used for tesing
scans1=np.zeros((scans.shape[0],1,img_rows,img_cols))
for i in range(scans.shape[0]):
img=scans[i,:,:]
img = 255.0 / np.amax(img) * img
img =img.astype(np.uint8)
img =cv2.resize(img, (img_rows, img_cols))
scans1[i,0,:,:]=img
return (scans1)
only_with_nudels = True
def convert_scans_and_masks(scans, masks, only_with_nudels):
flattened1 = [val for sublist in scans for val in sublist[1:-1]] # skip one element at the beginning and at the end
scans1 = np.stack(flattened1)
flattened1 = [val for sublist in masks for val in sublist[1:-1]] # skip one element at the beginning and at the end
masks1 = np.stack(flattened1) # 10187
#only_with_nudels = True
if only_with_nudels:
nudels_pix_count = np.sum(masks1, axis = (1,2))
scans1 = scans1[nudels_pix_count>0]
masks1 = masks1[nudels_pix_count>0] # 493 -- circa 5 % with nudeles oters without
#nudels2 = np.where(masks1 == 1, scans1, -4000*np.ones(( masks1.shape[1], masks1.shape[2))) ### was -2000
#nudels1 = np.where(masks1 == 1, scans1, masks1 - 4000) ### was -2000
#nudles1_rf = nudels1.flatten()
#nudles1_rf = nudles1_rf[nudles1_rf > -4000]
scans = normalize(scans1)
useTestPlot = False
if useTestPlot:
plt.hist(scans1.flatten(), bins=80, color='c')
plt.xlabel("Hounsfield Units (HU)")
plt.ylabel("Frequency")
plt.show()
#for i in range(scans.shape[0]):
for i in range(20):
print ('scan '+str(i))
f, ax = plt.subplots(1, 3, figsize=(15,5))
ax[0].imshow(scans1[i,:,:],cmap=plt.cm.gray)
ax[1].imshow(scans[i,:,:],cmap=plt.cm.gray)
ax[2].imshow(masks1[i,:,:],cmap=plt.cm.gray)
plt.show()
#np.mean(scans) # 0.028367 / 0.0204
#np.min(scans) # 0
#np.max(scans) #
scans = zero_center(scans)
masks = np.copy(masks1)
## if needed do the resize here ....
img_rows = scans.shape[1] ### redefine img_rows/ cols and add resize if needed
img_cols = scans.shape[2]
scans1=np.zeros((scans.shape[0],1,img_rows,img_cols))
for i in range(scans.shape[0]):
img=scans[i,:,:]
###img =cv2.resize(img, (img_rows, img_cols)) ## add/test resizing if needed
scans1[i,0,:,:]=img
masks1=np.zeros((masks.shape[0],1,img_rows,img_cols))
for i in range(masks.shape[0]):
img=masks[i,:,:]
###img =cv2.resize(img, (img_rows, img_cols)) ## add/test resizing if needed
masks1[i,0,:,:]=img
return scans1, masks1
#scans = [scans[i]]
#masks = [masks[i]]
def convert_scans_and_masks_xd_ablanks(scans, masks, blankids, only_with_nudels, dim=3):
# reuse scan to reduce memory footprint
dim_orig = dim
add_blank_spacing_size = dim * 8 #### use 4 for [0 - 3] and 8 for [4 - 7] ???initial trial (should perhaps be just dim ....)
#skip = dim // 2 # old
skip_low = dim // 2 # dim shoudl be uneven -- it is recalculated anyway to this end
skip_high = dim -skip_low - 1
do_not_allow_even_dim = False ## now we allow odd numbers ...
if do_not_allow_even_dim:
dim = 2 * skip_low + 1
skip_low = dim // 2
skip_high = dim -skip_low - 1
if dim != dim_orig:
print ("convert_scans_and_masks_x: Dim must be uneven, corrected from .. to:", dim_orig, dim)
work = [] # 3 layers
#scan = scans[0]
for scan in scans: ##TEMP
tmp = []
#i = 1
#for i in range(1, scan.shape[0]-1, 3): # SKIP EVERY 3
for i in range(skip_low, scan.shape[0]-skip_high):
#img1 = scan[i-1]
#img2 = scan[i]
#img3 = scan[i+1]
#rgb = np.stack((img1, img2, img3))
rgb = np.stack(scan[i-skip_low:i+skip_high+1])
tmp.append(rgb)
work.append(np.array(tmp))
#flattened1 = [val for sublist in work for val in sublist ] # NO skipping as we have already cut the first and the last layer
#scans1 = np.stack(flattened1)
scans1 = np.stack([val for sublist in work for val in sublist ]) # NO skipping as we have already cut the first and the last layer
work = []
### ADD ariticial mask pixel every add_blank_spacing layers for each blankids ...
# set the (0,0) pixel to -1 every add_blank_spacing_size for blanks ..
blanks_per_axis = 4 # skip border
crop = 16
dx = (img_cols - 2 * crop) // (blanks_per_axis + 2)
dy = (img_rows - 2 * crop) // (blanks_per_axis + 2)
for mask in masks:
if (np.sum(mask) < 0):
## we have a blank
### ADD ariticial mask pixel every add_blank_spacing layers for each blankids ...
# set the (0,0) pixel to -1 every add_blank_spacing_size for blanks ..
for i in range(skip_low, mask.shape[0]-skip_high, add_blank_spacing_size):
for ix in range(blanks_per_axis):
xpos = crop + (ix+1)*dx + dx //2
for iy in range(blanks_per_axis):
ypos = crop + (iy+1)*dy + dy //2
#print (xpos, ypos)
mask[skip_low, ypos, xpos] = -1 # negative pixel to be picked up below and corrected back to none
#for k in range(len(blankids)):
# if blankids[k] > 0:
# mask = masks[k]
# ## add the blanls
# for i in range(skip_low, mask.shape[0]-skip_high, add_blank_spacing_size):
# mask[skip_low, 0, 0] = -1 # negative pixel to be picked up below and corrected back to none
use_3d_mask = True ##
if use_3d_mask:
work = [] # 3 layers
#mask = masks[0]
for mask in masks:
tmp = []
#i = 0
for i in range(skip_low, mask.shape[0]-skip_high):
#img1 = mask[i-1]
#img2 = mask[i]
#img3 = mask[i+1]
#rgb = np.stack((img1, img2, img3))
rgb = np.stack(mask[i-skip_low:i+skip_high+1])
tmp.append(rgb)
work.append(np.array(tmp))
masks1 = np.stack([val for sublist in work for val in sublist ] )# NO skipping as we have already cut the first and the last layer
else:
masks1 = np.stack([val for sublist in masks for val in sublist[skip_low:-skip_high]] ) # skip one element at the beginning and at the end
#masks1 = np.stack(flattened1) # 10187
#only_with_nudels = True
if only_with_nudels:
if use_3d_mask:
nudels_pix_count = np.sum(masks1[:,skip_low], axis = (1,2)) ## abd added for the potential blanks; modified that the centre mask be mask!
else:
nudels_pix_count = np.sum(masks1, axis = (1,2))
scans1 = scans1[nudels_pix_count != 0]
masks1 = masks1[nudels_pix_count != 0]
#blank_mask_factor = np.sign(nudels_pix_count)[nudels_pix_count != 0]
#sum(blank_mask_factor)
#blank_mask_factor[blank_mask_factor <0] = 0
#mask1_orig = masks1
#np.sum(mask1_orig)
#np.min(masks1)
#masks1 = masks1[nudels_pix_count != 0] * blank_mask_factor # 493 -- circa 5 % with nudeles oters without; 232 if we skip over every 3 layers and use a 3d mask
masks1[masks1 < 0] = 0 # 493 -- circa 5 % with nudeles oters without; 232 if we skip over every 3 layers and use a 3d mask
#masks1[nudels_pix_count < 0] = 0 # making empty mask for balancing training set
#nudels2 = np.where(masks1 == 1, scans1, -4000*np.ones(( masks1.shape[1], masks1.shape[2))) ### was -2000
#nudels1 = np.where(masks1 == 1, scans1, masks1 - 4000) ### was -2000
#nudles1_rf = nudels1.flatten()
#nudles1_rf = nudles1_rf[nudles1_rf > -4000]
scans1 = normalize(scans1)
useTestPlot = False
if useTestPlot:
plt.hist(scans1.flatten(), bins=80, color='c')
plt.xlabel("Hounsfield Units (HU)")
plt.ylabel("Frequency")
plt.show()
#for i in range(scans.shape[0]):
for i in range(20):
print ('scan '+str(i))
f, ax = plt.subplots(1, 3, figsize=(15,5))
ax[0].imshow(scans1[i,:,:],cmap=plt.cm.gray)
ax[1].imshow(scans[i,:,:],cmap=plt.cm.gray)
ax[2].imshow(masks1[i,:,:],cmap=plt.cm.gray)
plt.show()
#np.mean(scans) # 0.028367 / 0.0204
#np.min(scans) # 0
#np.max(scans) #
scans1 = zero_center(scans1)
#masks = np.copy(masks1)
## if needed do the resize here .... (img_rows and img_cols are global values defined externally)
#img_rows = scans.shape[1] ### redefine img_rows/ cols and add resize if needed
#img_cols = scans.shape[2]
# scans already are in the tensor mode with 3 rgb elements ....
#scans1 = scans ## no change
#scans1=np.zeros((scans.shape[0],3,img_rows,img_cols))
#for i in range(scans.shape[0]):
# img=scans[i,:,:]
# ###img =cv2.resize(img, (img_rows, img_cols)) ## add/test resizing if needed
# scans1[i,0,:,:]=img
if use_3d_mask:
done = 1 # nothing to do
else:
masks = np.copy(masks1)
masks1=np.zeros((masks.shape[0],1,img_rows,img_cols))
for i in range(masks.shape[0]):
img=masks[i,:,:]
###img =cv2.resize(img, (img_rows, img_cols)) ## add/test resizing if needed
masks1[i,0,:,:]=img
return scans1, masks1
#scans = [scans[j]]
#masks = [masks[j]]
def convert_scans_and_masks_xd3(scans, masks, only_with_nudels, dim=3, crop=16, blanks_per_axis = 4, add_blank_spacing_size=0, add_blank_layers = 0):
# reuse scan to reduce memory footprint
dim_orig = dim
#add_blank_spacing_size = 0 # dim *4 # dim # was dim ### set to 0 for version_16 #### initial trial (should perhaps be just dim ....), if 0 - do not add ...
#add_blank_layers = 0 # was 4
#skip = dim // 2 # old
skip_low = dim // 2 # dim shoudl be uneven -- it is recalculated anyway to this end
skip_high = dim -skip_low - 1
do_not_allow_even_dim = False ## now we allow odd numbers ...
if do_not_allow_even_dim:
dim = 2 * skip_low + 1
skip_low = dim // 2
skip_high = dim -skip_low - 1
if dim != dim_orig:
print ("convert_scans_and_masks_x: Dim must be uneven, corrected from .. to:", dim_orig, dim)
work = [] # 3 layers
#scan = scans[0]
for scan in scans: ##TEMP
tmp = []
#i = 1
#for i in range(1, scan.shape[0]-1, 3): # SKIP EVERY 3
for i in range(skip_low, scan.shape[0]-skip_high):
#img1 = scan[i-1]
#img2 = scan[i]
#img3 = scan[i+1]
#rgb = np.stack((img1, img2, img3))
rgb = np.stack(scan[i-skip_low:i+skip_high+1])
tmp.append(rgb)
work.append(np.array(tmp))
#flattened1 = [val for sublist in work for val in sublist ] # NO skipping as we have already cut the first and the last layer
#scans1 = np.stack(flattened1)
scans1 = np.stack([val for sublist in work for val in sublist ]) # NO skipping as we have already cut the first and the last layer
work = []
##blanks_per_axis = 6 # cover all slice
##crop = 44
dxrange = scans[0].shape[-1] - 2 * crop
dyrange = scans[0].shape[-2] - 2 * crop
#dx = (img_cols - 2 * crop) // (blanks_per_axis)
#dy = (img_rows - 2 * crop) // (blanks_per_axis)
#dx = dxrange // (blanks_per_axis+1)
#dy = dyrange // (blanks_per_axis+1)
### ADD ariticial mask pixel every add_blank_spacing layers for each blankids ...
# set the (0,0) pixel to -1 every add_blank_spacing_size for blanks ..
if add_blank_spacing_size > 0:
for mask in masks:
if (np.min(mask) < 0):
## we have a blank
### ADD ariticial mask pixel every add_blank_spacing layers for each blankids ...
# set the (0,0) pixel to -1 every add_blank_spacing_size for blanks ..
for i in range(skip_low+(add_blank_spacing_size//2), mask.shape[0]-skip_high, add_blank_spacing_size):
mask[i, np.random.randint(0,dyrange), np.random.randint(0,dxrange)] = -1 # negative pixel to be picked up below and corrected back to none
if add_blank_layers > 0:
for mask in masks:
if (np.min(mask) < 0):
dzrange = mask.shape[0]-dim
## we have a blank
### ADD ariticial mask pixel every add_blank_spacing layers for each blankids ...
# set the (0,0) pixel to -1 every add_blank_spacing_size for blanks ..
for k in range(add_blank_layers):
i = np.random.randint(0, dzrange) + skip_low
#print ("dz position, random, mask.shape ", i, mask.shape)
mask[i, np.random.randint(0,dyrange), np.random.randint(0,dxrange)] = -1 # negative pixel to be picked up below and corrected back to none
#mask = masks[0]
add_random_blanks_in_blanks = False ## NO need for the extra random blank pixels now, 20170327
if add_random_blanks_in_blanks:
for mask in masks:
if (np.min(mask) < 0):
## we have a blank
### ADD ariticial mask pixel every add_blank_spacing layers for each blankids ...
# set the (0,0) pixel to -1 every add_blank_spacing_size for blanks ..
#zlow = skip_low
#zhigh = mask.shape[0]-skip_high
pix_sum = np.sum(mask, axis=(1,2))
idx_blanks = np.min(mask, axis=(1,2)) < 0 ## don't use it - let's vary the position across the space
for iz in range(mask.shape[0]):
if (np.min(mask[iz])) < 0:
for ix in range(blanks_per_axis):
#xpos = crop + (ix)*dx + dx //2
for iy in range(blanks_per_axis):
#ypos = crop + (iy)*dy + dy //2
xpos = crop + np.random.randint(0,dxrange)
ypos = crop + np.random.randint(0,dyrange)
#print (iz, xpos, ypos)
#mask[idx_blanks, ypos, xpos] = -1 # negative pixel to be picked up below and corrected back to none
mask[iz, ypos, xpos] = -1
use_3d_mask = True ##
if use_3d_mask:
work = [] # 3 layers
#mask = masks[0]
for mask in masks:
tmp = []
#i = 0
for i in range(skip_low, mask.shape[0]-skip_high):
#img1 = mask[i-1]
#img2 = mask[i]
#img3 = mask[i+1]
#rgb = np.stack((img1, img2, img3))
rgb = np.stack(mask[i-skip_low:i+skip_high+1])
tmp.append(rgb)
work.append(np.array(tmp))
masks1 = np.stack([val for sublist in work for val in sublist ] )# NO skipping as we have already cut the first and the last layer
else:
masks1 = np.stack([val for sublist in masks for val in sublist[skip_low:-skip_high]] ) # skip one element at the beginning and at the end
#masks1 = np.stack(flattened1) # 10187
#only_with_nudels = True
if only_with_nudels:
if use_3d_mask:
#nudels_pix_count = np.sum(np.abs(masks1[:,skip_low]), axis = (1,2)) ## CHANGE IT WED - use ANY i.e. remove skip_low abd added for the potential blanks; modified that the centre mask be mask!
nudels_pix_count = np.sum(np.abs(masks1), axis = (1,2,3)) ## USE ANY March 1; CHANGE IT WED - use ANY i.e. remove skip_low abd added for the potential blanks; modified that the centre mask be mask!
else:
nudels_pix_count = np.sum(np.abs(masks1), axis = (1,2))
scans1 = scans1[nudels_pix_count != 0]
masks1 = masks1[nudels_pix_count != 0]
#blank_mask_factor = np.sign(nudels_pix_count)[nudels_pix_count != 0]
#sum(blank_mask_factor)
#blank_mask_factor[blank_mask_factor <0] = 0
#mask1_orig = masks1
#np.sum(mask1_orig)
#np.min(masks1)
#masks1 = masks1[nudels_pix_count != 0] * blank_mask_factor # 493 -- circa 5 % with nudeles oters without; 232 if we skip over every 3 layers and use a 3d mask
#masks1[masks1 < 0] = 0 # !!!!!!!!!!!!!! in GRID version do NOT do that - do it in the key version 493 -- circa 5 % with nudeles oters without; 232 if we skip over every 3 layers and use a 3d mask
#masks1[nudels_pix_count < 0] = 0 # making empty mask for balancing training set
#nudels2 = np.where(masks1 == 1, scans1, -4000*np.ones(( masks1.shape[1], masks1.shape[2))) ### was -2000
#nudels1 = np.where(masks1 == 1, scans1, masks1 - 4000) ### was -2000
#nudles1_rf = nudels1.flatten()
#nudles1_rf = nudles1_rf[nudles1_rf > -4000]
scans1 = normalize(scans1)
### after this scans1 becomes float64 ....
useTestPlot = False
if useTestPlot:
plt.hist(scans1.flatten(), bins=80, color='c')
plt.xlabel("Hounsfield Units (HU)")
plt.ylabel("Frequency")
plt.show()
#for i in range(scans.shape[0]):
for i in range(20):
print ('scan '+str(i))
f, ax = plt.subplots(1, 3, figsize=(15,5))
ax[0].imshow(scans1[i,:,:],cmap=plt.cm.gray)
ax[1].imshow(scans[i,:,:],cmap=plt.cm.gray)
ax[2].imshow(masks1[i,:,:],cmap=plt.cm.gray)
plt.show()
#np.mean(scans) # 0.028367 / 0.0204
#np.min(scans) # 0
#np.max(scans) #
scans1 = zero_center(scans1)
#masks = np.copy(masks1)
scans1 = scans1.astype(np.float32) # make it float 32 (not point carring 64, also because kears operates on float32, and originals were in int
## if needed do the resize here .... (img_rows and img_cols are global values defined externally)
#img_rows = scans.shape[1] ### redefine img_rows/ cols and add resize if needed
#img_cols = scans.shape[2]
# scans already are in the tensor mode with 3 rgb elements ....
#scans1 = scans ## no change
#scans1=np.zeros((scans.shape[0],3,img_rows,img_cols))
#for i in range(scans.shape[0]):
# img=scans[i,:,:]
# ###img =cv2.resize(img, (img_rows, img_cols)) ## add/test resizing if needed
# scans1[i,0,:,:]=img
if use_3d_mask:
done = 1 # nothing to do
else:
masks = np.copy(masks1)
masks1=np.zeros((masks.shape[0],1,img_rows,img_cols))
for i in range(masks.shape[0]):
img=masks[i,:,:]
###img =cv2.resize(img, (img_rows, img_cols)) ## add/test resizing if needed
masks1[i,0,:,:]=img
return scans1, masks1
def convert_scans_and_masks_3d(scans, masks, only_with_nudels):
# reuse scan to reduce memory footprint
work = [] # 3 layers
#scan = scans[0]
for scan in scans:
tmp = []
#i = 0
#for i in range(1, scan.shape[0]-1, 3): # SKIP EVERY 3
for i in range(1, scan.shape[0]-1):
img1 = scan[i-1]
img2 = scan[i]
img3 = scan[i+1]
rgb = np.stack((img1, img2, img3))
tmp.append(rgb)
work.append(np.array(tmp))
#flattened1 = [val for sublist in work for val in sublist ] # NO skipping as we have already cut the first and the last layer
#scans1 = np.stack(flattened1)
scans1 = np.stack([val for sublist in work for val in sublist ]) # NO skipping as we have already cut the first and the last layer
work = []
use_3d_mask = False
if use_3d_mask:
work = [] # 3 layers
#mask = masks[0]
for mask in masks:
tmp = []
#i = 0
for i in range(1, mask.shape[0]-1, 3): # SKIP EVERY 3
img1 = mask[i-1]
img2 = mask[i]
img3 = mask[i+1]
rgb = np.stack((img1, img2, img3))
tmp.append(rgb)
work.append(np.array(tmp))
masks1 = np.stack([val for sublist in work for val in sublist ] )# NO skipping as we have already cut the first and the last layer
else:
masks1 = np.stack([val for sublist in masks for val in sublist[1:-1]] ) # skip one element at the beginning and at the end
#masks1 = np.stack(flattened1) # 10187
#only_with_nudels = True
if only_with_nudels:
if use_3d_mask:
nudels_pix_count = np.sum(masks1, axis = (1,2,3))
else:
nudels_pix_count = np.sum(masks1, axis = (1,2))
scans1 = scans1[nudels_pix_count>0]
masks1 = masks1[nudels_pix_count>0] # 493 -- circa 5 % with nudeles oters without; 232 if we skip over every 3 layers and use a 3d mask
#nudels2 = np.where(masks1 == 1, scans1, -4000*np.ones(( masks1.shape[1], masks1.shape[2))) ### was -2000
#nudels1 = np.where(masks1 == 1, scans1, masks1 - 4000) ### was -2000
#nudles1_rf = nudels1.flatten()
#nudles1_rf = nudles1_rf[nudles1_rf > -4000]
scans1 = normalize(scans1)
useTestPlot = False
if useTestPlot:
plt.hist(scans1.flatten(), bins=80, color='c')
plt.xlabel("Hounsfield Units (HU)")
plt.ylabel("Frequency")
plt.show()
#for i in range(scans.shape[0]):
for i in range(20):
print ('scan '+str(i))
f, ax = plt.subplots(1, 3, figsize=(15,5))
ax[0].imshow(scans1[i,:,:],cmap=plt.cm.gray)
ax[1].imshow(scans[i,:,:],cmap=plt.cm.gray)
ax[2].imshow(masks1[i,:,:],cmap=plt.cm.gray)
plt.show()
#np.mean(scans) # 0.028367 / 0.0204
#np.min(scans) # 0
#np.max(scans) #
scans1 = zero_center(scans1)
#masks = np.copy(masks1)
## if needed do the resize here .... (img_rows and img_cols are global values defined externally)
#img_rows = scans.shape[1] ### redefine img_rows/ cols and add resize if needed
#img_cols = scans.shape[2]
# scans already are in the tensor mode with 3 rgb elements ....
#scans1 = scans ## no change
#scans1=np.zeros((scans.shape[0],3,img_rows,img_cols))
#for i in range(scans.shape[0]):
# img=scans[i,:,:]
# ###img =cv2.resize(img, (img_rows, img_cols)) ## add/test resizing if needed
# scans1[i,0,:,:]=img
if use_3d_mask:
done = 1 # nothing to do
else:
masks = np.copy(masks1)
masks1=np.zeros((masks.shape[0],1,img_rows,img_cols))
for i in range(masks.shape[0]):
img=masks[i,:,:]
###img =cv2.resize(img, (img_rows, img_cols)) ## add/test resizing if needed
masks1[i,0,:,:]=img
return scans1, masks1
def view_scans(scans):
#%matplotlib inline
for i in range(scans.shape[0]):
print ('scan '+str(i))
plt.imshow(scans[i,0,:,:], cmap=plt.cm.gray)
plt.show()
def view_scans_widget(scans):
#%matplotlib tk
for i in range(scans.shape[0]):
plt.figure(figsize=(7,7))
plt.imshow(scans[i,0,:,:], cmap=plt.cm.gray)
plt.show()
def get_masks(scans,masks_list):
#%matplotlib inline
scans1=scans.copy()
maxv=255
masks=np.zeros(shape=(scans.shape[0],1,img_rows,img_cols))
for i_m in range(len(masks_list)):
for i in range(-masks_list[i_m][3],masks_list[i_m][3]+1):
for j in range(-masks_list[i_m][3],masks_list[i_m][3]+1):
masks[masks_list[i_m][0],0,masks_list[i_m][2]+i,masks_list[i_m][1]+j]=1
for i1 in range(-masks_list[i_m][3],masks_list[i_m][3]+1):
scans1[masks_list[i_m][0],0,masks_list[i_m][2]+i1,masks_list[i_m][1]+masks_list[i_m][3]]=maxv=255
scans1[masks_list[i_m][0],0,masks_list[i_m][2]+i1,masks_list[i_m][1]-masks_list[i_m][3]]=maxv=255
scans1[masks_list[i_m][0],0,masks_list[i_m][2]+masks_list[i_m][3],masks_list[i_m][1]+i1]=maxv=255
scans1[masks_list[i_m][0],0,masks_list[i_m][2]-masks_list[i_m][3],masks_list[i_m][1]+i1]=maxv=255
for i in range(scans.shape[0]):
print ('scan '+str(i))
f, ax = plt.subplots(1, 2,figsize=(10,5))
ax[0].imshow(scans1[i,0,:,:],cmap=plt.cm.gray)
ax[1].imshow(masks[i,0,:,:],cmap=plt.cm.gray)
plt.show()
return(masks)
def augmentation(scans,masks,n):
datagen = ImageDataGenerator(
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
rotation_range=25, # was 25
width_shift_range=0.3, # ws 0.3; was 0.1# tried 0.01
height_shift_range=0.3, # was 0.3; was 0.1 # tried 0.01
horizontal_flip=True,
vertical_flip=True,
zoom_range=False)
i=0
scans_g=scans.copy()
for batch in datagen.flow(scans, batch_size=1, seed=1000):
scans_g=np.vstack([scans_g,batch])
i += 1
if i > n:
break
i=0
masks_g=masks.copy()
for batch in datagen.flow(masks, batch_size=1, seed=1000):
masks_g=np.vstack([masks_g,batch])
i += 1
if i > n:
break
return((scans_g,masks_g))
def hu_to_pix (hu):
return (hu - MIN_BOUND) / (MAX_BOUND - MIN_BOUND) - PIXEL_MEAN
def pix_to_hu (pix):
return (pix + PIXEL_MEAN) * (MAX_BOUND - MIN_BOUND) + MIN_BOUND
from scipy import stats
def eliminate_incorrectly_segmented(scans, masks):
skip = dim // 2 # To Change see below ...
sxm = scans * masks
near_air_thresh = (-900 - MIN_BOUND) / (MAX_BOUND - MIN_BOUND) - PIXEL_MEAN # version 3 # -750 gives one more (for 0_3, d4, -600 give 15 more than -900
near_air_thresh #0.08628 for -840 # 0.067 # for -867; 0.1148 for -800
cnt = 0
for i in range(sxm.shape[0]):
#sx = sxm[i,skip]
sx = sxm[i]
mx = masks[i]
if np.sum(mx) > 0: # only check non-blanks ...(keep blanks)
sx_max = np.max(sx)
if (sx_max) <= near_air_thresh:
cnt += 1
print ("Entry, count # and max: ", i, cnt, sx_max)
print (stats.describe(sx, axis=None))
#plt.imshow(sx, cmap='gray')
plt.imshow(sx[0,skip], cmap='gray') # selecting the mid entry
plt.show()
s_eliminate = np.max(sxm, axis=(1,2,3,4)) <= near_air_thresh # 3d
s_preserve = np.max(sxm, axis=(1,2,3,4)) > near_air_thresh #3d
s_eliminate_sum = sum(s_eliminate)
s_preserve_sum = sum(s_preserve)
print ("Eliminate, preserve =", s_eliminate_sum, s_preserve_sum)
masks = masks[s_preserve]
scans = scans[s_preserve]
del(sxm)
return scans, masks
# the following 3 functions to read LUNA files are from: https://www.kaggle.com/arnavkj95/data-science-bowl-2017/candidate-generation-and-luna16-preprocessing/notebook
'''
This funciton reads a '.mhd' file using SimpleITK and return the image array,
origin and spacing of the image.
'''
def load_itk(filename):
# Reads the image using SimpleITK
itkimage = sitk.ReadImage(filename)
# Convert the image to a numpy array first and then shuffle the dimensions to get axis in the order z,y,x
ct_scan = sitk.GetArrayFromImage(itkimage)
# Read the origin of the ct_scan, will be used to convert the coordinates from world to voxel and vice versa.
origin = np.array(list(reversed(itkimage.GetOrigin())))
# Read the spacing along each dimension
spacing = np.array(list(reversed(itkimage.GetSpacing())))
return ct_scan, origin, spacing
'''
This function is used to convert the world coordinates to voxel coordinates using
the origin and spacing of the ct_scan
'''
def world_2_voxel(world_coordinates, origin, spacing):
stretched_voxel_coordinates = np.absolute(world_coordinates - origin)
voxel_coordinates = stretched_voxel_coordinates / spacing
return voxel_coordinates
'''
This function is used to convert the voxel coordinates to world coordinates using
the origin and spacing of the ct_scan.
'''
def voxel_2_world(voxel_coordinates, origin, spacing):
stretched_voxel_coordinates = voxel_coordinates * spacing
world_coordinates = stretched_voxel_coordinates + origin
return world_coordinates
def seq(start, stop, step=1):
n = int(round((stop - start)/float(step)))
if n > 1:
return([start + step*i for i in range(n+1)])
else:
return([])
'''
This function is used to create spherical regions in binary masks
at the given locations and radius.
'''
def draw_circles(image,cands,origin,spacing):
#make empty matrix, which will be filled with the mask
image_mask = np.zeros(image.shape, dtype=np.int16)
#run over all the nodules in the lungs
for ca in cands.values:
#get middel x-,y-, and z-worldcoordinate of the nodule
#radius = np.ceil(ca[4])/2 ## original: replaced the ceil with a very minor increase of 1% ....
radius = (ca[4])/2 + 0.51 * spacing[0] # increasing by circa half of distance in z direction .... (trying to capture wider region/border for learning ... and adress the rough net .
coord_x = ca[1]
coord_y = ca[2]
coord_z = ca[3]
image_coord = np.array((coord_z,coord_y,coord_x))
#determine voxel coordinate given the worldcoordinate
image_coord = world_2_voxel(image_coord,origin,spacing)
#determine the range of the nodule
#noduleRange = seq(-radius, radius, RESIZE_SPACING[0]) # original, uniform spacing
noduleRange_z = seq(-radius, radius, spacing[0])
noduleRange_y = seq(-radius, radius, spacing[1])
noduleRange_x = seq(-radius, radius, spacing[2])
#x = y = z = -2
#create the mask
for x in noduleRange_x:
for y in noduleRange_y:
for z in noduleRange_z:
coords = world_2_voxel(np.array((coord_z+z,coord_y+y,coord_x+x)),origin,spacing)
#if (np.linalg.norm(image_coord-coords) * RESIZE_SPACING[0]) < radius: ### original (contrained to a uniofrm RESIZE)
if (np.linalg.norm((image_coord-coords) * spacing)) < radius:
image_mask[int(np.round(coords[0])),int(np.round(coords[1])),int(np.round(coords[2]))] = int(1)
return image_mask
'''
This function takes the path to a '.mhd' file as input and
is used to create the nodule masks and segmented lungs after
rescaling to 1mm size in all directions. It saved them in the .npz
format. It also takes the list of nodule locations in that CT Scan as
input.
'''
def load_scans_masks_or_blanks(luna_subset, useAll, use_unsegmented=True):
#luna_subset = "[0-6]"
LUNA_DIR = LUNA_BASE_DIR % luna_subset
files = glob.glob(''.join([LUNA_DIR,'*.mhd']))
annotations = pd.read_csv(LUNA_ANNOTATIONS)
annotations.head()
candidates = pd.read_csv(LUNA_CANDIDATES)
candidates_false = candidates[candidates["class"] == 0] # only select the false candidates
candidates_true = candidates[candidates["class"] == 1] # only select the false candidates
sids = []
scans = []
masks = []
blankids = [] # class/id whether scan is with nodule or without, 0 - with, 1 - without
cnt = 0
skipped = 0
#file=files[7]
for file in files:
imagePath = file
seriesuid = file[file.rindex('/')+1:] # everything after the last slash
seriesuid = seriesuid[:len(seriesuid)-len(".mhd")] # cut out the suffix to get the uid
path = imagePath[:len(imagePath)-len(".mhd")] # cut out the suffix to get the uid
if use_unsegmented:
path_segmented = path.replace("original_lungs", "lungs_2x2x2", 1)
else:
path_segmented = path.replace("original_lungs", "segmented_2x2x2", 1)
cands = annotations[seriesuid == annotations.seriesuid] # select the annotations for the current series
ctrue = candidates_true[seriesuid == candidates_true.seriesuid]
cfalse = candidates_false[seriesuid == candidates_false.seriesuid]
blankid = 1 if (len(cands) == 0 and len(ctrue) == 0 and len(cfalse) > 0) else 0
skip_nodules_entirely = False # was False
use_only_nodules = False
if skip_nodules_entirely and blankid ==0:
## manual switch to generate extra data for the corrupted set
print("Skipping nodules (skip_nodules_entirely) ", seriesuid)
skipped += 1
elif use_only_nodules and (len(cands) == 0):
## manual switch to generate only nodules data due lack of time and repeat etc time pressures
print("Skipping blanks (use_only_nodules) ", seriesuid)
skipped += 1
else: # NORMAL operations
if (len(cands) > 0 or
(blankid >0) or
useAll):
sids.append(seriesuid)
blankids.append(blankid)
if use_unsegmented:
scan_z = np.load(''.join((path_segmented + '_lung' + '.npz')))
else:
scan_z = np.load(''.join((path_segmented + '_lung_seg' + '.npz')))
scan = scan_z['arr_0']
mask_z = np.load(''.join((path_segmented + '_nodule_mask_wblanks' + '.npz')))
mask = mask_z['arr_0']
testPlot = False
if testPlot:
maskcheck_z = np.load(''.join((path_segmented + '_nodule_mask' + '.npz')))
maskcheck = maskcheck_z['arr_0']
f, ax = plt.subplots(1, 2, figsize=(10,5))
ax[0].imshow(np.sum(np.abs(maskcheck), axis=0),cmap=plt.cm.gray)
ax[1].imshow(np.sum(np.abs(mask), axis=0),cmap=plt.cm.gray)
#ax[2].imshow(masks1[i,:,:],cmap=plt.cm.gray)
plt.show()
scans.append(scan)
masks.append(mask)
cnt += 1
else:
print("Skipping non-nodules and non-blank entry ", seriesuid)
skipped += 1
print ("Summary: cnt & skipped: ", cnt, skipped)
return scans, masks, sids, blankids
MIN_BOUND = -1000.0
MAX_BOUND = 400.0
def normalize(image):
image = (image - MIN_BOUND) / (MAX_BOUND - MIN_BOUND)
image[image>1] = 1.
image[image<0] = 0.
return image
PIXEL_MEAN = 0.028 ## for LUNA subset 0 and our preprocessing, only with nudels was 0.028, all was 0.020421744071562546 (in the tutorial they used 0.25)
def zero_center(image):
image = image - PIXEL_MEAN
return image
def convert_scans_and_masks_xd3(scans, masks, only_with_nudels, dim=3, crop=16, blanks_per_axis = 4, add_blank_spacing_size=0, add_blank_layers = 0):
# reuse scan to reduce memory footprint
dim_orig = dim
skip_low = dim // 2 # dim shoudl be uneven -- it is recalculated anyway to this end
skip_high = dim -skip_low - 1
do_not_allow_even_dim = False ## now we allow odd numbers ...
if do_not_allow_even_dim:
dim = 2 * skip_low + 1
skip_low = dim // 2
skip_high = dim -skip_low - 1
if dim != dim_orig:
print ("convert_scans_and_masks_x: Dim must be uneven, corrected from .. to:", dim_orig, dim)
work = []
for scan in scans:
tmp = []
for i in range(skip_low, scan.shape[0]-skip_high):
#img1 = scan[i-1]
#img2 = scan[i]
#img3 = scan[i+1]
#rgb = np.stack((img1, img2, img3))
rgb = np.stack(scan[i-skip_low:i+skip_high+1])
tmp.append(rgb)
work.append(np.array(tmp))
scans1 = np.stack([val for sublist in work for val in sublist ]) # NO skipping as we have already cut the first and the last layer
work = []
dxrange = scans[0].shape[-1] - 2 * crop
dyrange = scans[0].shape[-2] - 2 * crop
if add_blank_spacing_size > 0:
for mask in masks:
if (np.min(mask) < 0):
## we have a blank
### ADD ariticial mask pixel every add_blank_spacing layers for each blankids ...
# set the (0,0) pixel to -1 every add_blank_spacing_size for blanks ..
for i in range(skip_low+(add_blank_spacing_size//2), mask.shape[0]-skip_high, add_blank_spacing_size):
mask[i, np.random.randint(0,dyrange), np.random.randint(0,dxrange)] = -1 # negative pixel to be picked up below and corrected back to none
if add_blank_layers > 0:
for mask in masks:
if (np.min(mask) < 0):
dzrange = mask.shape[0]-dim
## we have a blank
### ADD ariticial mask pixel every add_blank_spacing layers for each blankids ...
# set the (0,0) pixel to -1 every add_blank_spacing_size for blanks ..
for k in range(add_blank_layers):
i = np.random.randint(0, dzrange) + skip_low
#print ("dz position, random, mask.shape ", i, mask.shape)
mask[i, np.random.randint(0,dyrange), np.random.randint(0,dxrange)] = -1 # negative pixel to be picked up below and corrected back to none
add_random_blanks_in_blanks = False ## NO need for the extra random blank pixels now, 20170327
if add_random_blanks_in_blanks:
for mask in masks:
if (np.min(mask) < 0):
## we have a blank
### ADD ariticial mask pixel every add_blank_spacing layers for each blankids ...
# set the (0,0) pixel to -1 every add_blank_spacing_size for blanks ..
#zlow = skip_low
#zhigh = mask.shape[0]-skip_high
pix_sum = np.sum(mask, axis=(1,2))
idx_blanks = np.min(mask, axis=(1,2)) < 0 ## don't use it - let's vary the position across the space
for iz in range(mask.shape[0]):
if (np.min(mask[iz])) < 0:
for ix in range(blanks_per_axis):
#xpos = crop + (ix)*dx + dx //2
for iy in range(blanks_per_axis):
#ypos = crop + (iy)*dy + dy //2
xpos = crop + np.random.randint(0,dxrange)
ypos = crop + np.random.randint(0,dyrange)
#print (iz, xpos, ypos)
#mask[idx_blanks, ypos, xpos] = -1 # negative pixel to be picked up below and corrected back to none
mask[iz, ypos, xpos] = -1
use_3d_mask = True ##
if use_3d_mask:
work = [] # 3 layers
for mask in masks:
tmp = []
#i = 0
for i in range(skip_low, mask.shape[0]-skip_high):
rgb = np.stack(mask[i-skip_low:i+skip_high+1])
tmp.append(rgb)
work.append(np.array(tmp))
masks1 = np.stack([val for sublist in work for val in sublist ] )# NO skipping as we have already cut the first and the last layer
else:
masks1 = np.stack([val for sublist in masks for val in sublist[skip_low:-skip_high]] ) # skip one element at the beginning and at the end
if only_with_nudels:
if use_3d_mask:
nudels_pix_count = np.sum(np.abs(masks1), axis = (1,2,3)) ## USE ANY March 1; CHANGE IT WED - use ANY i.e. remove skip_low abd added for the potential blanks; modified that the centre mask be mask!
else:
nudels_pix_count = np.sum(np.abs(masks1), axis = (1,2))
scans1 = scans1[nudels_pix_count != 0]
masks1 = masks1[nudels_pix_count != 0]
scans1 = normalize(scans1)
useTestPlot = False
if useTestPlot:
plt.hist(scans1.flatten(), bins=80, color='c')
plt.xlabel("Hounsfield Units (HU)")
plt.ylabel("Frequency")
plt.show()
for i in range(20):
print ('scan '+str(i))
f, ax = plt.subplots(1, 3, figsize=(15,5))
ax[0].imshow(scans1[i,:,:],cmap=plt.cm.gray)
ax[1].imshow(scans[i,:,:],cmap=plt.cm.gray)
ax[2].imshow(masks1[i,:,:],cmap=plt.cm.gray)
plt.show()
scans1 = zero_center(scans1)
scans1 = scans1.astype(np.float32) # make it float 32 (not point carring 64, also because kears operates on float32, and originals were in int
if use_3d_mask:
done = 1 # nothing to do
else:
masks = np.copy(masks1)
masks1=np.zeros((masks.shape[0],1,img_rows,img_cols))
for i in range(masks.shape[0]):
img=masks[i,:,:]
###img =cv2.resize(img, (img_rows, img_cols)) ## add/test resizing if needed
masks1[i,0,:,:]=img
return scans1, masks1
def eliminate_incorrectly_segmented(scans, masks):
skip = dim // 2 # To Change see below ...
sxm = scans * masks
near_air_thresh = (-900 - MIN_BOUND) / (MAX_BOUND - MIN_BOUND) - PIXEL_MEAN # version 3 # -750 gives one more (for 0_3, d4, -600 give 15 more than -900
#near_air_thresh #0.08628 for -840 # 0.067 # for -867; 0.1148 for -800
cnt = 0
for i in range(sxm.shape[0]):
#sx = sxm[i,skip]
sx = sxm[i]
mx = masks[i]
if np.sum(mx) > 0: # only check non-blanks ...(keep blanks)
sx_max = np.max(sx)
if (sx_max) <= near_air_thresh:
cnt += 1
print ("Entry, count # and max: ", i, cnt, sx_max)
print (stats.describe(sx, axis=None))
#plt.imshow(sx, cmap='gray')
plt.imshow(sx[0,skip], cmap='gray') # selecting the mid entry
plt.show()
s_eliminate = np.max(sxm, axis=(1,2,3,4)) <= near_air_thresh # 3d
s_preserve = np.max(sxm, axis=(1,2,3,4)) > near_air_thresh #3d
s_eliminate_sum = sum(s_eliminate)
s_preserve_sum = sum(s_preserve)
print ("Eliminate, preserve =", s_eliminate_sum, s_preserve_sum)
masks = masks[s_preserve]
scans = scans[s_preserve]
del(sxm)
return scans, masks
def grid_data(source, grid=32, crop=16, expand=12):
gridsize = grid + 2 * expand
stacksize = source.shape[0]
height = source.shape[3] # should be 224 for our data
width = source.shape[4]
gridheight = (height - 2 * crop) // grid # should be 6 for our data
gridwidth = (width - 2 * crop) // grid
cells = []
for j in range(gridheight):
for i in range (gridwidth):
cell = source[:,:,:, crop+j*grid-expand:crop+(j+1)*grid+expand, crop+i*grid-expand:crop+(i+1)*grid+expand]
cells.append(cell)
cells = np.vstack (cells)
return cells, gridwidth, gridheight
def data_from_grid (cells, gridwidth, gridheight, grid=32):
height = cells.shape[3] # should be 224 for our data
width = cells.shape[4]
crop = (width - grid ) // 2 ## for simplicity we are assuming the same crop (and grid) vertically and horizontally
dspacing = gridwidth * gridheight
layers = cells.shape[0] // dspacing
if crop > 0: # do NOT crop with 0 as we get empty cells ...
cells = cells[:,:,:,crop:-crop,crop:-crop]
if crop > 2*grid:
print ("data_from_grid Warning, unusually large crop (> 2*grid); crop, & grid, gridwith, gridheight: ", (crop, grid, gridwidth, gridheight))
shape = cells.shape
new_shape_1_dim = shape[0]// (gridwidth * gridheight) # ws // 36 -- Improved on 20170306
new_shape = (gridwidth * gridheight, new_shape_1_dim, ) + tuple([x for x in shape][1:]) # was 36, Improved on 20170306
cells = np.reshape(cells, new_shape)
cells = np.moveaxis(cells, 0, -3)
shape = cells.shape
new_shape2 = tuple([x for x in shape[0:3]]) + (gridheight, gridwidth,) + tuple([x for x in shape[4:]])
cells = np.reshape(cells, new_shape2)
cells = cells.swapaxes(-2, -3)
shape = cells.shape
combine_shape =tuple([x for x in shape[0:3]]) + (shape[-4]*shape[-3], shape[-2]*shape[-1],)
cells = np.reshape(cells, combine_shape)
return cells
def data_from_grid_by_proximity (cells, gridwidth, gridheight, grid=32):
# disperse the sequential dats into layers and then use data_from_grid
dspacing = gridwidth * gridheight
layers = cells.shape[0] // dspacing
shape = cells.shape
new_shape_1_dim = shape[0]// (gridwidth * gridheight) # ws // 36 -- Improved on 20170306
### NOTE tha we invert the order of shapes below to get the required proximity type ordering
new_shape = (new_shape_1_dim, gridwidth * gridheight, ) + tuple([x for x in shape][1:]) # was 36, Improved on 20170306
# swap ordering of axes
cells = np.reshape(cells, new_shape)
cells = cells.swapaxes(0, 1)
cells = np.reshape(cells, shape)
cells = data_from_grid (cells, gridwidth, gridheight, grid)
return cells
def find_voxels(dim, grid, images3, images3_seg, pmasks3, nodules_threshold=0.999, voxelscountmax = 1000, mid_mask_only = True, find_blanks_also = True, centralcutonly=True):
zsel = dim // 2
sstart = 0
send = images3.shape[0]
if mid_mask_only:
pmav = pmasks3[:,0,dim // 2] # using the mid mask
pmav.shape
else:
pmav = pmasks3[:,0] ### NOTE this variant has NOT been tested fully YET
run_UNNEEDED_code = False
ims = images3[sstart:send,0,zsel] # selecting the zsel cut for nodules calc ...
ims_seg = images3_seg[sstart:send,0,zsel]
ims.shape
#pms = pmasks3[sstart:send,0,0]
pms = pmav[sstart:send]
images3.shape
thresh = nodules_threshold # for testing , set it here and skip the loop
segment = 2 # for compatibility of the naming convention
# threshold the precited nasks ...
#for thresh in [0.5, 0.9, 0.9999]:
#for thresh in [0.5, 0.75, 0.9, 0.95, 0.98, 0.99, 0.999, 0.9999, 0.99999, 0.999999, 0.9999999]:
for thresh in [nodules_threshold]: # jusst this one - keeping loop for a while
if find_blanks_also:
idx = np.abs(pms) > thresh
else:
idx = pms > thresh
idx.shape
nodls = np.zeros(pms.shape).astype(np.int16)
nodls[idx] = 1
nx = nodls[idx]
nodules_pixels = ims[idx] # flat
nodules_hu = pix_to_hu(nodules_pixels)
part_name = ''.join([str(segment), '_', str(thresh)])
### DO NOT do them here
use_corrected_nodules = True # do it below from 20170311
if not use_corrected_nodules:
df = hu_describe(nodules_hu, uid=uid, part=part_name)
add_projections = False
axis = 1
nodules_projections = []
for axis in range(3):
nodls_projection = np.max(nodls, axis=axis)
naxis_name = ''.join(["naxis_", str(axis),"_", part_name])
if add_projections:
df[naxis_name] = np.sum(nodls_projection)
nodules_projections.append(nodls_projection)
idx.shape
## find the individual nodules ... as per the specified probabilities
labs, labs_num = measure.label(idx, return_num = True, neighbors = 8 , background = 0) # label the nodules in 3d, allow for diagonal connectivity
voxels = []
vmasks = []
if labs_num > 0 and labs.shape[0] >1: # checking for height > 1 is needed as measure.regionprops fails when it is not, for instance for shape (1, 20, 20) we get ValueError: Label and intensity image must have the same shape.
print("Befpre measure.regionprops, labs & intensity shapes: ", labs.shape, ims.shape)
regprop = measure.regionprops(labs, intensity_image=ims) # probkem here on 20170327
voxel_volume = np.product(RESIZE_SPACING)
areas = [rp.area for rp in regprop] # this is in cubic mm now (i.e. should really be called volume)
volumes = [rp.area * voxel_volume for rp in regprop]
diameters = [2 * (3* volume / (4 * np.pi ))**0.3333 for volume in volumes]
labs_ids = [rp.label for rp in regprop]
#ls = [rp.label for rp in regprop]
max_val = np.max(areas)
max_index = areas.index(max_val)
max_label = regprop[max_index].label
bboxes = [r.bbox for r in regprop]
idl = labs == regprop[max_index].label # 400
nodules_pixels = ims[idl]
nodules_hu = pix_to_hu(nodules_pixels)
if run_UNNEEDED_code:
nodules_hu_reg = []
for rp in regprop:
idl = labs == rp.label
nodules_pixels = ims[idl]
nodules_hu = pix_to_hu(nodules_pixels)
nodules_hu_reg.append(nodules_hu) # NOTE some are out of interest, i.e. are equal all (or near all) to MAX_BOUND (400)
dfn = pd.DataFrame(
{
"area": areas,
"diameter": diameters,
"bbox": bboxes
},
index=labs_ids)
nodules_count = len(dfn) # 524 for file 1 of part 8 ..
max_nodules_count = voxelscountmax
n=0
for n in range(max_nodules_count):
if n < len(dfn): # use the nodule data, otheriwse empty
bb = dfn.iloc[n]["bbox"]
zmin = bb[0]
zmax = bb[3]
zlen = bb[3] - bb[0]
ylen = bb[4] - bb[1]
xlen = bb[5] - bb[2]
xmin = np.max([bb[2] - np.max([(grid - xlen ) //2, 0]), 0]) ## do not go beyond 0/left side of the image
xmax = np.min([xmin + grid, ims.shape[2]]) ## do not beyond the right side
xmin = xmax - grid
if (xmax - xmin) != grid:
print ("ERROR in calculating the cut-offs ..., xmin, xmax =", xmin, xmax)
ymin = np.max([bb[1] - np.max([(grid - ylen ) //2, 0]), 0]) ## do not go beyond 0/left side of the image
ymax = np.min([ymin + grid, ims.shape[1]]) ## do not beyond the right side
ymin = ymax - grid
if (ymax - ymin) != grid:
print ("ERROR in calculating the cut-offs ..., ymin, ymax =", ymin, ymax)
zmin_sel = zmin
zmax_sel = zmax
if centralcutonly: #include only one voxel representation
zmin_sel = zmin + zlen // 2
zmax_sel = zmin_sel + 1
iz=zmin_sel # for testing
for iz in range(zmin_sel,zmax_sel):
voxel = images3[iz,:,:, ymin:ymax, xmin:xmax]
vmask = pmasks3[iz,:,:, ymin:ymax, xmin:xmax]
voxels.append(voxel)
vmasks.append(vmask)
testPlot = False
if testPlot:
print ('scan '+str(iz))
f, ax = plt.subplots(1, 8, figsize=(24,3))
ax[0].imshow(nodls[iz,ymin:ymax, xmin:xmax],cmap=plt.cm.gray)
ax[1].imshow(ims[iz,ymin:ymax, xmin:xmax],cmap=plt.cm.gray)
ax[2].imshow(images3_amp[iz,0, dim//2, ymin:ymax, xmin:xmax],cmap=plt.cm.gray)
ax[3].imshow(voxel[0,dim//2],cmap=plt.cm.gray)
ax[4].imshow(voxel[0,dim],cmap=plt.cm.gray)
ax[5].imshow(voxel[0,dim+1],cmap=plt.cm.gray)
ax[6].imshow(voxel[0,dim+2],cmap=plt.cm.gray)
ax[7].imshow(voxel[0,dim+3],cmap=plt.cm.gray)
if len(voxels) > 0:
voxel_stack = np.stack(voxels)
vmask_stack = np.stack(vmasks)
else:
print_warning = False
if print_warning:
print("WARNING, find_voxels, not single voxel found even though expected")
voxel_stack = []
vmask_stack = []
if testPlot:
print ('voxels count ', len(voxel_stack))
for ii in range(0,len(voxel_stack),len(voxel_stack)//10):
f, ax = plt.subplots(1, 2, figsize=(6,3))
ax[0].imshow(voxel_stack[ii, 0, dim // 2],cmap=plt.cm.gray)
ax[1].imshow(vmask_stack[ii, 0, dim // 2],cmap=plt.cm.gray)
return voxel_stack, vmask_stack
def measure_voxels(labs, ims):
#print("Befpre measure.regionprops, labs & intensity shapes: ", labs.shape, ims.shape)
regprop = measure.regionprops(labs, intensity_image=ims) # probkem here on 20170327
voxel_volume = np.product(RESIZE_SPACING)
areas = [rp.area for rp in regprop] # this is in cubic mm now (i.e. should really be called volume)
volumes = [rp.area * voxel_volume for rp in regprop]
diameters = [2 * (3* volume / (4 * np.pi ))**0.3333 for volume in volumes]
labs_ids = [rp.label for rp in regprop]
#ls = [rp.label for rp in regprop]
max_val = np.max(areas)
max_index = areas.index(max_val)
max_label = regprop[max_index].label
bboxes = [r.bbox for r in regprop]
#max_ls = ls[max_index]
idl = labs == regprop[max_index].label # 400
nodules_pixels = ims[idl]
nodules_hu = pix_to_hu(nodules_pixels)
run_UNNEEDED_code = False
if run_UNNEEDED_code:
nodules_hu_reg = []
for rp in regprop:
idl = labs == rp.label
nodules_pixels = ims[idl]
nodules_hu = pix_to_hu(nodules_pixels)
nodules_hu_reg.append(nodules_hu) # NOTE some are out of interest, i.e. are equal all (or near all) to MAX_BOUND (400)
dfn = pd.DataFrame(
{
#"zcenter": zcenters,
#"ycenter": ycenters,
#"xcenter": xcenters,
"area": areas,
"diameter": diameters,
#"irreg_vol": irreg_vol,
#"irreg_shape": irreg_shape,
#"nodules_hu": nodules_hu_reg,
"bbox": bboxes
},
index=labs_ids)
return dfn
def find_voxels_and_blanks(dim, grid, images3, images3_seg, pmasks3, nodules_threshold=0.999, voxelscountmax = 1000, find_blanks_also = True, centralcutonly=True, diamin=2, diamax=10):
if np.sum(pmasks3) > 0:
centralcutonly = False # override centralcut for True nodule masks
zsel = dim // 2 if centralcutonly else range(0,dim)
pmav = pmasks3[:,0,zsel]
ims = images3[:,0,zsel] # selecting the zsel cut for nodules calc ...
ims_seg = images3_seg[:,0,zsel]
sstart = 0
send = images3.shape[0]
pms = pmav[sstart:send]
run_UNNEEDED_code = False
thresh = nodules_threshold # for testing , set it here and skip the loop
segment = 2 # for compatibility of the naming convention
for thresh in [nodules_threshold]: # jusst this one - keeping loop for a while
if find_blanks_also:
idx = np.abs(pms) > thresh
else:
idx = pms > thresh
idx.shape
nodls = np.zeros(pms.shape).astype(np.int16)
nodls[idx] = 1
nx = nodls[idx]
volume = np.sum(nodls) # A check calculation ... :wcounted as a count within hu_describe
nodules_pixels = ims[idx] # flat
nodules_hu = pix_to_hu(nodules_pixels)
part_name = ''.join([str(segment), '_', str(thresh)])
### DO NOT do them here
use_corrected_nodules = True # do it below from 20170311
if not use_corrected_nodules:
df = hu_describe(nodules_hu, uid=uid, part=part_name)
add_projections = False
if add_projections:
nodules_projections = []
for axis in range(3):
#sxm_projection = np.max(sxm, axis = axis)
nodls_projection = np.max(nodls, axis=axis)
naxis_name = ''.join(["naxis_", str(axis),"_", part_name])
if add_projections:
df[naxis_name] = np.sum(nodls_projection)
nodules_projections.append(nodls_projection)
voxels = []
vmasks = []
if not centralcutonly:
for k in range(idx.shape[0]):
if np.sum(idx[k]) > 0:
## find the nodules and take a cut
labs, labs_num = measure.label(idx[k], return_num = True, neighbors = 8 , background = 0) # label the nodules in 3d, allow for diagonal connectivity
dfn = measure_voxels(labs, ims[k])
nodules_count_0 = len(dfn)
## CUT out anything that is outside of the specified diam range
dfn = dfn[(dfn["diameter"] >= diamin) & ((dfn["diameter"] < diamax))] # CUT OUT anything that is less than 3 mm (essentially less than 7 voxels for 2x2x2
nodules_count = len(dfn) # 524 for file 1 of part 8 ..
max_nodules_count = voxelscountmax
n=0
for n in range(max_nodules_count):
if n < len(dfn): # use the nodule data, otheriwse empty
bb = dfn.iloc[n]["bbox"]
zmin = bb[0]
zmax = bb[3]
zlen = bb[3] - bb[0]
ylen = bb[4] - bb[1]
xlen = bb[5] - bb[2]
xmin = np.max([bb[2] - np.max([(grid - xlen ) //2, 0]), 0]) ## do not go beyond 0/left side of the image
xmax = np.min([xmin + grid, ims.shape[-1]]) ## do not beyond the right side
xmin = xmax - grid
if (xmax - xmin) != grid:
print ("ERROR in calculating the cut-offs ..., xmin, xmax =", xmin, xmax)
ymin = np.max([bb[1] - np.max([(grid - ylen ) //2, 0]), 0]) ## do not go beyond 0/left side of the image
ymax = np.min([ymin + grid, ims.shape[-2]]) ## do not beyond the right side
ymin = ymax - grid
if (ymax - ymin) != grid:
print ("ERROR in calculating the cut-offs ..., ymin, ymax =", ymin, ymax)
# here simply takje the entire voxel we have
#images3.shape
voxel = images3[k,:,:, ymin:ymax, xmin:xmax]
vmask = pmasks3[k,:,:, ymin:ymax, xmin:xmax]
voxels.append(voxel)
vmasks.append(vmask)
#voxel.shape
else:# essentially taking the central cuts of the blanks
## find the individual nodules ... as per the specified probabilities
labs, labs_num = measure.label(idx, return_num = True, neighbors = 8 , background = 0) # label the nodules in 3d, allow for diagonal connectivity
if labs_num > 0 and labs.shape[0] >1: # checking for height > 1 is needed as measure.regionprops fails when it is not, for instance for shape (1, 20, 20) we get ValueError: Label and intensity image must have the same shape.
#labs_num_to_store = 5
dfn = measure_voxels(labs, ims)
nodules_count = len(dfn) # 524 for file 1 of part 8 ..
max_nodules_count = voxelscountmax
n=0
for n in range(max_nodules_count):
if n < len(dfn): # use the nodule data, otheriwse empty
bb = dfn.iloc[n]["bbox"]
zmin = bb[0]
zmax = bb[3]
zlen = bb[3] - bb[0]
ylen = bb[4] - bb[1]
xlen = bb[5] - bb[2]
xmin = np.max([bb[2] - np.max([(grid - xlen ) //2, 0]), 0]) ## do not go beyond 0/left side of the image
xmax = np.min([xmin + grid, ims.shape[-1]]) ## do not beyond the right side
xmin = xmax - grid
if (xmax - xmin) != grid:
print ("ERROR in calculating the cut-offs ..., xmin, xmax =", xmin, xmax)
ymin = np.max([bb[1] - np.max([(grid - ylen ) //2, 0]), 0]) ## do not go beyond 0/left side of the image
ymax = np.min([ymin + grid, ims.shape[-2]]) ## do not beyond the right side
ymin = ymax - grid
if (ymax - ymin) != grid:
print ("ERROR in calculating the cut-offs ..., ymin, ymax =", ymin, ymax)
zmin_sel = zmin
zmax_sel = zmax
if centralcutonly: #include only one voxel representation
zmin_sel = zmin + zlen // 2
zmax_sel = zmin_sel + 1
iz=zmin_sel # for testing
for iz in range(zmin_sel,zmax_sel):
voxel = images3[iz,:,:, ymin:ymax, xmin:xmax]
vmask = pmasks3[iz,:,:, ymin:ymax, xmin:xmax]
voxels.append(voxel)
vmasks.append(vmask)
testPlot = False
if testPlot:
print ('scan '+str(iz))
f, ax = plt.subplots(1, 8, figsize=(24,3))
ax[0].imshow(nodls[iz,ymin:ymax, xmin:xmax],cmap=plt.cm.gray)
ax[1].imshow(ims[iz,ymin:ymax, xmin:xmax],cmap=plt.cm.gray)
ax[2].imshow(images3_amp[iz,0, dim//2, ymin:ymax, xmin:xmax],cmap=plt.cm.gray)
ax[3].imshow(voxel[0,dim//2],cmap=plt.cm.gray)
ax[4].imshow(voxel[0,dim],cmap=plt.cm.gray)
ax[5].imshow(voxel[0,dim+1],cmap=plt.cm.gray)
ax[6].imshow(voxel[0,dim+2],cmap=plt.cm.gray)
ax[7].imshow(voxel[0,dim+3],cmap=plt.cm.gray)
if len(voxels) > 0:
voxel_stack = np.stack(voxels)
vmask_stack = np.stack(vmasks)
else:
print_warning = False
if print_warning:
print("WARNING, find_voxels, not single voxel found even though expected")
voxel_stack = []
vmask_stack = []
#print("Nodules, voxels_aggregated: ", len(dfn), len(voxel_stack))
#np.savez_compressed(path_voxels_variant, voxel_stack)
testPlot = False
if testPlot:
print ('voxels count ', len(voxel_stack))
for ii in range(0,len(voxel_stack),len(voxel_stack)//10):
#plt.imshow(voxel_stack[ii,0,dim // 2], cmap=plt.cm.gray)
#plt.show()
f, ax = plt.subplots(1, 2, figsize=(6,3))
ax[0].imshow(voxel_stack[ii, 0, dim // 2],cmap=plt.cm.gray)
ax[1].imshow(vmask_stack[ii, 0, dim // 2],cmap=plt.cm.gray)
return voxel_stack, vmask_stack
def shuffle_scans_masks(scans, masks, seed):
np.random.seed(seed)
index_shuf = np.arange(len(scans))
np.random.shuffle(index_shuf)
scans = scans[index_shuf]
masks = masks[index_shuf]
return scans, masks
def create_unet_training_files (dim, recreate_grid8_March_data=True): # version with backward compatibility
grid8_March_data_str = "a" if recreate_grid8_March_data else "" # used for the the original data/approach
# the main procedure to create training files for the nodule identifier (consolidated version, with backward compatibility for grid 8)
create_main_grid = True
if create_main_grid:
diamins_2_10 = not recreate_grid8_March_data # backward compatible option
if diamins_2_10:
grids = [10, 20]
diamins = [2, 10]
diamaxs = [10, 100]
crops2 = [7, 2] # not used in this option, added for flow
else:
grids = [20, 40]
diamins = [2, 2]
diamaxs = [100, 100]
crops2 = [2, 12] # added to recreate_grid8_March_data
else:
## created separately -- as an addition - for extra augmentation
grids = [10]
diamins = [2]
diamaxs = [5]
crops2 = [7]
create_missing_grid_file = False
if create_missing_grid_file:
grids = [20]
diamins = [19]
diamaxs = [99]
crops2 = [2]
resolution_str = RESOLUTION_STR
grid=20
centralcutonly = True
grid_multiple = 1 # do not aggregate any of the grids/data crreated -- save verbatim
grid_dest = grid * grid_multiple
eliminate_blanks_for_mid_extra_cut = False # typically False, only true for the extra data
if eliminate_blanks_for_mid_extra_cut:
crop=12 # leading to 200x200 image cut 10 x 10 times
model_grid_name = "8g10"
else:
crop=22 #provigind with grid 9x20 9x20
model_grid_name = "8g9" #"16g3" # was 16g9
dim = dim
include_ba_partial_height = dim//2
grid_passes = 1 # was 10 # gp10 standard must be 1
if grid_passes > 1:
model_grid_name = "8g10x%s" % grid_passes
elif grid_passes < 1:
grid_passes = 1
print ("grid_passes, include_ba_partial_height, model_grid_name: ", grid_passes, include_ba_partial_height, model_grid_name)
data_generation=True
testPrint = False
if data_generation: # DO ONE BY ONE as convert_scans_and_masks_xd requires a lot of memory and can swap ...
exclude_blanks = False if create_main_grid else True # replaces the manual change done in the interactive mode
include_below_above_nodule = False
if not include_below_above_nodule:
ba0 = dim //2 - include_ba_partial_height
ba1 = np.min([dim //2 + include_ba_partial_height + 1, dim])
split_into_nodules_and_blanks = True
for pt in range(0,3): # splitting into 2 parts due to memory needs
np.random.seed(1000+pt)
scans_all_grid = []
masks_all_grid = []
scans_all_grid2 = []
masks_all_grid2 = []
scans_all_grid3 = []
masks_all_grid3 = []
if pt == 0:
istart = 4*pt
iend = 4*(pt+1)
elif pt == 1:
istart = 4*pt
iend = 4*(pt+1)
iend += 1 # increase by 1 to cover 9
else:
istart = 9
iend = 10
for i in range(istart, iend):
scans_all = []
masks_all = []
sids_all = []
scans_all2 = []
masks_all2 = []
sids_all2 = []
scans_all3 = []
masks_all3 = []
sids_all3 = []
print ("\n\n################################# EXECUTING subset ", i)
scans, masks, sids, blankids = load_scans_masks_or_blanks(i, useAll = False, use_unsegmented=DO_NOT_USE_SEGMENTED)
if include_below_above_nodule:
only_with_nudels = True # This may be False or True must be False so we do not loose the info
else:
only_with_nudels = True # could be True ...
for j in range(len(scans)):
extra_test=False
if extra_test:
mtemp = masks[j]
np.sum(mtemp)
np.min(mtemp)
idx = np.sum(masks[j], axis=(1,2)) != 0 # at this stage, with this more memory friendly version there should be only items with nodules
idx_nodules = np.sum(masks[j], axis=(1,2)) > 0
idx_blanks = np.sum(masks[j], axis=(1,2)) < 0
print ("Masks, with nodules and blanks: ", np.sum(idx_nodules), np.sum(idx_blanks))
blanks_per_axis = 0 # we now rnadomly position this
scans1 = [scans[j]]
masks1 = [masks[j]]
use_standard_convert = True if recreate_grid8_March_data else False # added for backward compatbility
if use_standard_convert:
scans1, masks1 = convert_scans_and_masks_xd3 (scans1, masks1, only_with_nudels = only_with_nudels, dim=dim, crop=crop, blanks_per_axis = blanks_per_axis,
add_blank_spacing_size=1, add_blank_layers = 0) # as per March data generation
if not include_below_above_nodule:
### take the centrale values
idx = np.sum(np.abs(masks1[:,ba0:ba1]), axis=(-1,-2, -3)) != 0 #dim // 2
idx_nodules = np.sum(masks1[:,ba0:ba1], axis=(-1,-2, -3)) > 0
idx_blanks = np.sum(masks1[:,ba0:ba1], axis=(-1,-2, -3)) < 0
else:
idx = np.sum(np.abs(masks1), axis=(-1,-2,-3)) != 0
idx_nodules = np.sum(masks1, axis=(-1,-2,-3)) > 0
idx_blanks = np.sum(masks1, axis=(-1,-2,-3)) < 0
count_nodules = np.sum(idx_nodules)
count_blanks = np.sum(idx_blanks)
count_all = np.sum(idx, axis=0)
print ("sidj, Total masks, and with nodules and blanks: ", sids[j], len(idx), count_nodules, count_blanks)
if (count_nodules == 0):
# cut down the blanks only to the centrally located, whatever the include_below_above_nodule
idx_blanks = np.sum(masks1[:,dim // 2], axis=(-1,-2)) < 0
count_blanks = np.sum(idx_blanks)
print("Selecting only the central blanks, count of: ", count_blanks)
masks1 = masks1[idx_blanks]
scans1 = scans1[idx_blanks]
elif not include_below_above_nodule:
#print("Not including the below and above nodules' entries, beyond partial_height of , remaining count: ", include_ba_partial_height, count_all)
print("Using ba partial_height; remaining count: ", count_all)
masks1 = masks1[idx]
scans1 = scans1[idx]
else:
print("Keeping all entries of: ", count_all )
else:
## just convert into 3d rep and find the vosel in the entire space
scans1, masks1 = convert_scans_and_masks_xd3 (scans1, masks1, only_with_nudels = False, dim=dim, crop=crop, blanks_per_axis = blanks_per_axis,
add_blank_spacing_size=0, add_blank_layers = 0)
scans1 = scans1[:, np.newaxis] # do NOT change these as we iterate by different grids now 20170327
masks1 = masks1[:, np.newaxis] # do NOT change these as we iterate by different grids now 20170327
for ig in range(len(grids)):
grid_masks = []
grid_scans = []
grid = grids[ig]
crop12 = crops2[ig]
if exclude_blanks and np.sum(masks1) <0:
print("Completely excluding blanks & gridding of them ...")
scans1_c = []
masks1_c = []
else:
for gpass in range(grid_passes):
if grid_passes != 1:
shift = grid // grid_passes
shifting_gridwith = img_cols // grid - 1 # minus 1 to accomodate the shift
crop_top_left = (img_cols - (shifting_gridwith+1)*grid) // 2 + gpass*shift
crop_bottom_right = crop_top_left + shifting_gridwith*grid
masks1_c = masks1[:,:,:,crop_top_left:crop_bottom_right,crop_top_left:crop_bottom_right]
scans1_c = scans1[:,:,:,crop_top_left:crop_bottom_right,crop_top_left:crop_bottom_right]
if recreate_grid8_March_data:
grid_masks1, gridwidth, gridheight = grid_data(masks1_c, grid=grid, crop=0, expand=0 )
grid_scans1, gridwidth, gridheight = grid_data(scans1_c, grid=grid, crop=0, expand=0)
else:
#### NOTE the following has NOT been tested
print("WARNING: grid_passes option has NOT been tested working with the find_voxels procedure")
grid_scans1, grid_masks1 = find_voxels_and_blanks(dim, grid, scans1_c, scans1_c, masks1_c, nodules_threshold=0.999, voxelscountmax = 1000,
find_blanks_also = True, centralcutonly = centralcutonly, diamin=diamins[ig], diamax=diamaxs[ig])
else: # just a single standard pass - no shifting grid
if recreate_grid8_March_data:
grid_masks1, gridwidth, gridheight = grid_data(masks1, grid=grid, crop=crop12, expand=0 )
grid_scans1, gridwidth, gridheight = grid_data(scans1, grid=grid, crop=crop12, expand=0)
else:
grid_scans1, grid_masks1 = find_voxels_and_blanks(dim, grid, scans1, scans1, masks1, nodules_threshold=0.999, voxelscountmax = 1000,
find_blanks_also = True, centralcutonly = centralcutonly, diamin=diamins[ig], diamax=diamaxs[ig])
testPlot = False
if testPlot:
for ii in range(0, len(grid_scans1)): # was 2, 20
print ('gridscans1 scan/cut '+str(ii))
f, ax = plt.subplots(1, 2, figsize=(8,4))
ax[0].imshow(grid_scans1[ii,0,dim // 2],cmap=plt.cm.gray)
#ax[1].imshow(masks_pred[ii,0,0],cmap=plt.cm.gray)
ax[1].imshow(grid_masks1[ii,0,dim // 2] ,cmap=plt.cm.gray)
#ax[2].imshow(np.abs(masks_pred[ii,0,0] - masks_pred_prev[ii,0,0]) ,cmap=plt.cm.gray)
#ax[2].imshow(masks1[i,:,:],cmap=plt.cm.gray)
plt.show()
if len(grid_masks1) > 0:
idx_blanks = np.sum(grid_masks1[:,:,dim // 2], axis=(-1,-2, -3)) < 0
idx = np.sum(np.abs(grid_masks1), axis=(1,2,3,4)) != 0
if not include_below_above_nodule:
idx_nodules = np.sum(grid_masks1[:,:,ba0:ba1], axis=(1,2,3,4)) > 0
else:
idx_nodules = np.sum(grid_masks1, axis=(1,2,3,4)) > 0 # this may be inaccurate whene blanks was somewhere there
# cut down the blanks only to the centrally located
if testPrint:
print ("Total masks (after grid), and with nodules and blanks: ", len(idx), np.sum(idx_nodules), np.sum(idx_blanks))
idx_nodules_central_blanks = idx_nodules | idx_blanks
if exclude_blanks:
if testPrint:
print("Not including blanks ....")
grid_masks1 = grid_masks1[idx_nodules]
grid_scans1 = grid_scans1[idx_nodules]
else:
grid_masks1 = grid_masks1[idx_nodules_central_blanks] # ONLY keep the masks and scans with nodules(central)
grid_scans1 = grid_scans1[idx_nodules_central_blanks]
if testPrint:
print ("Total masks (after another central blanks cut): ", len(grid_masks1))
grid_masks.append(grid_masks1)
grid_scans.append(grid_scans1)
if len(grid_masks):
masks1_c = np.concatenate(grid_masks)
scans1_c = np.concatenate(grid_scans)
else:
masks1_c = []
scans1_c = []
print ("=== Grid, Sub-total masks1 : ", (grid, len(masks1_c)))
if (len(masks1_c) > 0):
if ig == 0:
scans_all.append(scans1_c)
masks_all.append(masks1_c)
sids_all.append(sids[j]) # ????
elif ig == 1:
scans_all2.append(scans1_c)
masks_all2.append(masks1_c)
sids_all2.append(sids[j]) # ????
elif ig == 2:
scans_all3.append(scans1_c)
masks_all3.append(masks1_c)
sids_all3.append(sids[j]) # ???
else:
print("Warning: 4 separate grids are not implemented for automatic data generation")
## end of the grid_and_limit_data LOOP --------------------------------------------------------
scans = np.concatenate(scans_all) #e.g. [0:4])
masks = np.concatenate(masks_all) #[0:4])
if len(grids) > 1:
scans2 = np.concatenate(scans_all2)
masks2 = np.concatenate(masks_all2)
if len(grids) > 2:
scans3 = np.concatenate(scans_all3)
masks3 = np.concatenate(masks_all3)
################### end o the scans loop #### ######################################################
ig =0
for ig in range(len(grids)):
if ig == 0:
scansx = scans
masksx = masks
elif ig == 1:
scansx = scans2
masksx = masks2
elif ig == 2:
scansx = scans3
masksx = masks3
# select only non-zero grids .. (essentially decimating the data; for subset 1: from 17496 dow to 1681)
idx = np.sum(np.abs(masksx), axis=(1,2,3,4)) != 0 # at this stage, with this more memory friendly version there should be only items with nodules
idx_nodules = np.sum(masksx, axis=(1,2,3,4)) > 0
idx_blanks = np.sum(masksx, axis=(1,2,3,4)) < 0
count_nodules = np.sum(idx_nodules)
count_blanks = np.sum(idx_blanks)
count_all = np.sum(idx, axis=0)
print ("All entries, grid, total, nodules and blanks: ", grids[ig], len(idx), count_all, count_nodules, count_blanks)
testPlot = False
if testPlot:
jump=len(idx) // 20
jump =1
for ii in range(0, len(idx)//20, jump): # was 2, 20
print ('scan/cut '+str(ii))
f, ax = plt.subplots(1, 2, figsize=(8,4))
ax[0].imshow(masksx[ii,0,dim // 2],cmap=plt.cm.gray)
ax[1].imshow(scansx[ii,0,dim // 2] ,cmap=plt.cm.gray)
plt.show()
len(masksx)
if not include_below_above_nodule:
masksx = masksx[idx]
scansx = scansx[idx]
len(masksx)
if eliminate_blanks_for_mid_extra_cut:
masksx = masksx[~ idx_blanks]
scansx = scansx[~ idx_blanks]
idx = np.sum(np.abs(masksx), axis=(1,2,3,4)) != 0 # at this stage, with this more memory friendly version there should be only items with nodules
idx_nodules = np.sum(masksx, axis=(1,2,3,4)) > 0
idx_blanks = | np.sum(masksx, axis=(1,2,3,4)) | numpy.sum |
#https://stackoverflow.com/questions/42007434/slider-widget-for-pyqtgraph
import numpy as np
from vispy import app, visuals
import h5py
from PyQt5.QtWidgets import QApplication, QMainWindow
import sys
from PyQt5 import QtCore, QtGui, QtWidgets
import time
import numpy as np
from vispy import app
import vispy
from PyQt5.QtWidgets import *
import vispy.app
import sys
from vispy.app import use_app
use_app('PyQt5')
from vispy import scene
from vispy import color
from vispy.color.colormap import Colormap
import h5py
import imageio
from vispy import visuals
from PyQt5.QtWidgets import QApplication, QMainWindow, QSlider
import sys
from PyQt5 import QtCore, QtGui, QtWidgets
import time
import numpy as np
from vispy import app
import vispy
from PyQt5.QtWidgets import *
import vispy.app
import sys
from vispy.app import use_app
use_app('PyQt5')
from vispy import scene
from vispy import color
from vispy.color.colormap import Colormap
import h5py
import imageio
from vispy import visuals
import matplotlib.pyplot as plt
from PyQt5.QtCore import Qt
class Canvas(scene.SceneCanvas):
def __init__(self):
scene.SceneCanvas.__init__(self,keys='interactive', size=(1024, 1024))
self.unfreeze()
self.i=0
self.pos=np.array([[0,0]])
self.colors=[0,0,0,1]
self.index = 0
#self.markers = visuals.MarkersVisual()
#self.markers=scene.visuals.Markers(pos=pos, parent=wc_2.scene, face_color='blue')
#self.markers.set_data(self.pos, face_color=self.colors)
#self.markers.symbol = visuals.marker_types[10]
#self.markers.transform = STTransform()
self.plane_ind=0
self.filename='//ZMN-HIVE/User-Data/Maria/Caiman_MC/fish11_6dpf_medium_aligned.h5'
#self.filename='//ZMN-HIVE/User-Data/Maria/check_registration/control/fish11_6dpf_medium_aligned.h5'
self.load_image()
self.view=self.central_widget.add_view()
self.image=scene.visuals.Image(self.im, parent=self.view.scene, cmap='hsv',clim=[0,255])
self.image.set_gl_state('translucent', depth_test=False)
self.markers=scene.visuals.Markers(pos=self.pos, parent=self.view.scene, face_color='blue')
self.nrs=[]
def load_image(self):
with h5py.File(self.filename, "r") as f:
# List all groups
print("Loading raw data from a plane...")
start=time.time()
self.im=f['data'][0,self.plane_ind,:,:].astype('float32')
self.im*= 400.0/(self.im.max()+0.00001)
end=time.time()
print('Time to load raw data file: ',end-start)
print( | np.max(self.im) | numpy.max |
################################################################################
##
## 2018/05/02
##
## Author: <NAME>, IB²
## Version: 1.0
## Python: 3.6
##
## This implements a random forest in order to predict digenic effect of both
## DIDA combinations (1) and dual diagnosis combinations (2). It aims to diffe-
## rentiate between true digenic class, composite and dual diagnosis.
##
## (1) https://academic.oup.com/nar/article/45/15/e140/3894171
## (2) https://www.nejm.org/doi/full/10.1056/NEJMoa1516767
##
## It performs stratified cross-validations and averages results over a given
## amount of repeats. dida_dualdiag.csv is an instance of valid CSV file.
##
################################################################################
import sys
import time
import pandas as pd
from math import sqrt
from numpy import array, concatenate, dot, diag, mean, std
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import LeaveOneGroupOut
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.metrics import roc_curve, roc_auc_score, auc, matthews_corrcoef
def main(f_name, n_trees, n_epochs, threshold, selector):
"""
Loads csv, launches cross-validation, displays scores
f_name: str, path to reach the .csv file to evaluate predictor
n_trees: int, amount of trees in forest
n_epochs: int, amount of cross-validation to perform
thresholds: see getScores
selector: str, boolean vector representing features to take into account
"""
features = [
'CADD1', 'CADD2', 'RecA', 'EssA',
'CADD3', 'CADD4', 'RecB', 'EssB',
'Path'
]
assert len(selector) == len(features), "Features selector must fit features amount."
to_keep = [f for i, f in enumerate(features) if selector[i] == '1']
# Csv gathering, it needs to be ordered.
df_data = pd.read_csv(f_name)
X = array(df_data[to_keep])
# TD: true digenic, CO: composite, UK: unknown
# OV: OVerlapping dual diagnosis, DI: Distinct dual diagnosis
y = array(
df_data['DE'].replace(
'TD', 2
).replace(
'CO', 1
).replace(
'UK', -1
).replace(
'OV', 0
).replace(
'DI', 0
).replace(
'DD', 0
)
)
gene_pairs = array(df_data['Pair'])
X, y, gene_pairs = X[y != -1], y[y != -1], gene_pairs[y != -1]
y = array([ [i == y_i for i in range(3)] for y_i in y])
print('Training on subspace {', ', '.join( to_keep ), '}.' )
def getScores(pred, real, thresholds=[1, 1, 1]):
"""
Returns evaluation metrics to evaluate one cross-validation:
For each class, Sensitivity and Specificity. Order:
sen_dd, sen_co, sen_td, spe_dd, spe_co, spe_td
pred: Predicted probabilities. For each sample, vector is such as
[pred_dd, pred_co, pred_td]
real: real label. A label is a 3-long boolean vector.
DD: [1, 0, 0] - CO: [0, 1, 0] - TD: [0, 0, 1]
thresholds: weightings to compensate lack of data in certain class.
"""
if len(pred) != len(real):
raise Exception("ERROR: input vectors have differente len!")
results = {
'sen': [ { 'count': 0, 'recognized': 0 } for _ in range(3) ],
'spe': [ { 'count': 0, 'true': 0 } for _ in range(3) ],
}
for i, r_tab in enumerate(real):
r = max(range(3), key=lambda k: r_tab[k])
p = max(range(3), key=lambda k: pred[i][k]*thresholds[k])
results['sen'][r]['count'] += 1
results['spe'][p]['count'] += 1
if p == r:
results['sen'][p]['recognized'] += 1
results['spe'][p]['true'] += 1
return map(
lambda x: round(x*100)/100,
[r['recognized'] / r['count'] for r in results['sen']] + [r['true'] / r['count'] for r in results['spe']]
)
def LOGO_crossValidation(X, y, groups, n_trees=100, n_epochs=50, thresholds=[1,1,1]):
"""
Stratified cross-validation.
X: Design matrix
y: label vector
groups: Gene pair vector to define training groups
n_trees: Amount of trees in random forest
n_epochs: number of cross validations to perform
thresholds: see getScores
"""
logo = LeaveOneGroupOut()
clf = RandomForestClassifier(
n_estimators=n_trees,
max_depth=10,
criterion='gini',
min_samples_split=2,
min_samples_leaf=2,
bootstrap=True,
n_jobs=1
)
# Vector to compute final scores
sum_dd_se, sum_co_se, sum_td_se = [], [], []
sum_dd_sp, sum_co_sp, sum_td_sp = [], [], []
for i in range(n_epochs):
start_time = time.time()
values_t, values_p = [], []
print("#"*10, "Trial %i" % i, "#"*10)
# We leave one group out
for train_index, test_index in logo.split(X, y, groups):
X_fit, y_fit, X_train, y_train = (
X[train_index], y[train_index],
X[test_index], y[test_index]
)
clf = clf.fit(X_fit, y_fit)
y_predicted = clf.predict_proba(X_train)
# y_predicted is not shaped correctly. Reshape it to fit
# getScores expectations.
y_formatted = [ [0, 0, 0] for _ in range(len(y_predicted[0])) ]
for de in (0, 1, 2):
for i, proba in enumerate(y_predicted[de][:,1]):
y_formatted[i][de] = proba
# Predictions are concatenated into a prediction vector
values_t, values_p = values_t + [yi for yi in y_train], values_p + [yi for yi in y_formatted]
sen_dd, sen_co, sen_td, spe_dd, spe_co, spe_td = getScores(values_p, values_t, thresholds)
sum_dd_se.append(sen_dd)
sum_co_se.append(sen_co)
sum_td_se.append(sen_td)
sum_dd_sp.append(spe_dd)
sum_co_sp.append(spe_co)
sum_td_sp.append(spe_td)
print('Duration:', round( (time.time() - start_time) * 100) / 100, 's')
print('sen | dd - co - td / spe | dd - co - td')
print('sen | ' + '-'.join(map(str, (sen_dd, sen_co, sen_td))) + ' / spe | ' + '-'.join(map(str, (spe_dd, spe_co, spe_td))))
print('Sen DD: %f, std: %f' % (mean(sum_dd_se), std(sum_dd_se)) )
print('Sen CO: %f, std: %f' % (mean(sum_co_se), std(sum_co_se)) )
print('Sen TD: %f, std: %f' % (mean(sum_td_se), std(sum_td_se)) )
print('Spe DD: %f, std: %f' % (mean(sum_dd_sp), std(sum_dd_sp)) )
print('Spe CO: %f, std: %f' % (mean(sum_co_sp), std(sum_co_sp)) )
print('Spe TD: %f, std: %f' % (mean(sum_td_sp), std(sum_td_sp)) )
geo_mn = (
mean(sum_dd_se)*mean(sum_co_se)*mean(sum_td_se)*
mean(sum_dd_sp)*mean(sum_co_sp)*mean(sum_td_sp)
)**(1/6)
geo_std = (
| std(sum_dd_se) | numpy.std |
# this file contains several functions, which serve as helper functions
import numpy as np
import os
import handle_data
def get_current_best_models():
# this function contains a list of the current best models for
# each energy range (the filename of the trained model)
# NOTE: the model for 1.2 is still using 128 x 128 images!!
models = ["unfinishedNetworks/classifier_experimental_cnn_hdf5_0.3_best_test_12.16.cnn",
#"unfinishedNetworks/classifier_experimental_cnn_hdf5_0.6_best_test_13.98.cnn",
"unfinishedNetworks/classifier_experimental_ReLU_bigger_cnn_hdf5_0.6_best_test_12.3333333333.cnn",
#"unfinishedNetworks/classifier_experimental_cnn_hdf5_1.2_best_test_9.8.cnn",
"unfinishedNetworks/classifier_experimental_cnn_hdf5_1.2_best_test_8.25.cnn",
"unfinishedNetworks/classifier_experimental_cnn_hdf5_1.5_best_test_3.62.cnn",
"unfinishedNetworks/classifier_experimental_cnn_hdf5_2.5_best_test_3.58.cnn",
#"unfinishedNetworks/classifier_experimental_cnn_hdf5_4.0_best_test_7.34.cnn",
"unfinishedNetworks/classifier_experimental_cnn_hdf5_4.1_best_test_3.33333333333.cnn",
"unfinishedNetworks/classifier_experimental_cnn_hdf5_5.0_best_test_3.46.cnn",
"unfinishedNetworks/classifier_experimental_cnn_hdf5_8.0_best_test_5.24.cnn"]
return models
def get_energy_binning():
# returns the energy binning boundaries
energy_binning = np.asarray([0.15,
0.4,
0.7,
1.2,
2.1,
3.2,
4.9,
6.9,
10.0])
return energy_binning
def get_energy_bins():
# returns a list of tuples, which give the actual bins
# of the energy binning
energy_bins = [ (0.15, 0.4),
(0.4, 0.7),
(0.7, 1.2),
(1.2, 2.1),
(2.1, 3.2),
(3.2, 4.9),
(4.9, 6.9),
(6.9, 10.0) ]
return energy_bins
def get_energy_bin_for_energy(E):
# this function returns the correct bin index for a given energy E
# corresponding to the energy bins defined in get_energy_bins()
# given np.argmin gives us the first element bigger than E,
# we choose ind - 1 as our index for the correct group
energy_binning = get_energy_binning()
ind = | np.argmin(energy_binning < E) | numpy.argmin |
# Copyright (c) 2021, <NAME>
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the Offroad Robotics Lab at Queen's University nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY <COPYRIGHT HOLDER> ''AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# This file plots the ground truth for the 2D example
#
# Author: <NAME> <<EMAIL>>
# License: BSD 2.0
import matplotlib.pyplot as plt
import numpy as np
# Center of terrains element chuncks
bump_center = np.array([ 2046.298, 1599.936]) # mm
grass_center = np.array([ 236.904, -2487.764]) # mm
rocks_center = np.array([-1917.432, 1926.706]) # mm
# Corners of terrain element chuncks
bumps1 = (bump_center + np.array([ 441.689, -898.057])) / 1000 # m
bumps2 = (bump_center + np.array([-443.011, -900.151])) / 1000 # m
bumps3 = (bump_center + np.array([-442.218, 899.451])) / 1000 # m
bumps4 = (bump_center + np.array([ 443.539, 898.757])) / 1000 # m
grass1 = (grass_center + np.array([ 1805.520, -438.652])) / 1000 # m
grass2 = (grass_center + np.array([ 1804.230, 443.375])) / 1000 # m
grass3 = (grass_center + np.array([-1799.550, 438.900])) / 1000 # m
grass4 = (grass_center + np.array([-1810.200, -443.624])) / 1000 # m
rocks1 = (rocks_center + np.array([ -4.514, 1263.860])) / 1000 # m
rocks2 = (rocks_center + np.array([ 1259.800, 12.871])) / 1000 # m
rocks3 = (rocks_center + np.array([ 8.747, -1267.760])) / 1000 # m
rocks4 = (rocks_center + np.array([-1264.030, -8.969])) / 1000 # m
# Group corner markers of terrain elements for 2D plotting
bumps_2d = np.array([bumps1, bumps2, bumps3, bumps4, bumps1])
grass_2d = np.array([grass1, grass2, grass3, grass4, grass1])
rocks_2d = | np.array([rocks1, rocks2, rocks3, rocks4, rocks1]) | numpy.array |
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 12 09:18:56 2018
An example to demonstrate use of the Scipy v1.00 ODE integration routine
Simple bounce event, with chatter sequence detection
@author: rihy
"""
import scipy.integrate
import matplotlib.pyplot as plt
import numpy as npy
import timeit
def upward_cannon(t, y):
return [y[1], -0.5]
def hit_ground(t, y):
return y[0]
# Attributes for hit_ground function
hit_ground.terminal = True # defines whether integration should be terminated
hit_ground.direction = -1
# Run sim
tmin = 0.0
tmax = 100.0
dt = 2.0
y0 = npy.array([0,10.0],dtype=float)
method = 'RK45'
r = 0.56 # coefficient of restitution
terminateSolver = False
solvecount = 0
bouncecount = 0
tic=timeit.default_timer()
while not terminateSolver:
# Run solution
sol = scipy.integrate.solve_ivp(upward_cannon, [tmin, tmax], y0, method=method, events=hit_ground, max_step = dt)
solvecount = solvecount + 1
# Append results to arrays
if solvecount == 1:
t = sol.t
y = sol.y
else:
t = npy.append(t,sol.t)
y = npy.append(y,sol.y,axis=1)
# Set initial conditions (for bounce event)
if sol.status == 1: # termination event occurred, i.e. bounce
# Register new bounce event
bouncecount = bouncecount + 1
if bouncecount == 1:
tbounce = sol.t_events[0]
else:
tbounce = | npy.append(tbounce,sol.t_events[0]) | numpy.append |
# @cosmmostat_init.py
#
# Some convenient ROUTINES for interactive use
#
# Functions which makes the life easier for iSAP users.
#
# @author <NAME>
# @version 1.0
# @date 2020
#
import numpy as np
from os import remove
from subprocess import check_call
from datetime import datetime
import matplotlib.pyplot as plt
import astropy
from astropy.io import fits
from subprocess import check_call
import pylab
import readline
from mpl_toolkits.axes_grid1 import make_axes_locatable
import pyqtgraph
from pyqtgraph.Qt import QtGui
import numpy
from matplotlib.colors import PowerNorm
import seaborn as sns
from scipy import ndimage
################################################
# def pwd():
# check_call(['pwd'])
################################################
# def ls():
# check_call(['ls'])
################################################
# similiar to the rebin function
def smooth2d(map, sigma):
return ndimage.filters.gaussian_filter(map,sigma=sigma)
def rebin2d(a, shape):
sh = shape[0],a.shape[0]//shape[0],shape[1],a.shape[1]//shape[1]
return a.reshape(sh).mean(-1).mean(1)
def rebin1d(a, shape):
sh = shape[0],a.shape[0]//shape[0]
return a.reshape(sh).mean(-1)
def pad_width2d(size):
pad_width = ((size, size), (size,size))
return pad_width
def unpad(x, pad_width):
slices = []
for c in pad_width:
e = None if c[1] == 0 else -c[1]
slices.append(slice(c[0], e))
return x[tuple(slices)]
def unpad2d(x, size):
return unpad(x, pad_width2d(size))
# Test
# pad_width = ((0, 0), (1, 0), (3, 4))
# a = np.random.rand(10, 10, 10)
# b = np.pad(a, pad_width, mode='constant')
# c = unpad(b, pad_width)
# np.testing.assert_allclose(a, c)
################################################
def hthres(alpha,Thres):
Res = np.copy(alpha)
Res[np.abs(Res) <= Thres] = 0
return Res
def hard_thresholding(alpha,Thres):
alpha[np.abs(alpha) <= Thres] = 0
################################################
def soft_thresholding(alpha, Thres):
Res = np.copy(np.abs(alpha)) - Thres
Res[Res < 0] = 0
alpha[:,:] = np.sign(alpha) * Res[:,:]
def sthres(alpha, Thres):
Res = np.copy(alpha)
Res = np.abs(alpha) - Thres
Res[Res < 0] = 0
Res = np.sign(alpha) * Res
return Res
################################################
def info(Data,name=0,mask=None):
if mask is None:
if name:
print (name, ": Size = ", Data.shape, "Type = ", Data.dtype, ", Mean = ", np.mean(Data), " Sigma = ", np.std(Data), "Min = ", np.min(Data), " Max = ", np.max(Data))
else:
print ("Size = ", Data.shape, "Type = ", Data.dtype, ", Mean = ", np.mean(Data), " Sigma = ", np.std(Data), "Min = ", np.min(Data), " Max = ", np.max(Data))
else:
ind = np.where(mask != 0)
if name:
print (name, ": Size = ", Data.shape, "Type = ", Data.dtype, ", Mean = ", np.mean(Data[ind]), " Sigma = ", np.std(Data[ind]), "Min = ", np.min(Data[ind]), " Max = ", np.max(Data[ind]))
else:
print ("Size = ", Data.shape, "Type = ", Data.dtype, ", Mean = ", np.mean(Data[ind]), " Sigma = ", np.std(Data[ind]), "Min = ", np.min(Data[ind]), " Max = ", | np.max(Data[ind]) | numpy.max |
""" Interface for running Turbomole ADC(2) calculations. """
import subprocess
import re
import os
import numpy as np
import file_utils
STDOUT = "qm.log"
STDERR = "qm.err"
class Turbomole():
""" Interface for turbomole calculations. """
def __init__(self, data):
self.data = data
self.results = dict()
def update_coord(self):
""" Update coord file with self.data["geom"]. """
file_utils.replace_cols_inplace("coord", self.data["geom"], r"\$coord")
if self.data["qmmm"]:
fn = file_utils.search_file("control", r"\$point_charges")[0]
fn = fn.split("=")[1].split()[0]
file_utils.replace_cols_inplace(fn, self.data["mm_geom"], r"\$point_charges")
def update_state(self):
""" Update control file to request gradient of self.data["state"]. """
raise NotImplementedError("Need to call specific interface.")
def update_input(self):
""" Update all input files with values from self.data. """
self.update_coord()
self.update_state()
def run(self):
""" Run the calculation, check success and read results. """
raise NotImplementedError("Need to call specific interface.")
def read(self):
""" Read calculation results. """
raise NotImplementedError("Need to call specific interface.")
class ricc2(Turbomole):
""" Interface for turbomole ricc2 calculations. """
def __init__(self, model, data):
self.data = data
self.model = re.escape(model)
self.gs_model = re.escape(model)
if model == "adc(2)":
self.gs_model = "mp2"
self.results = dict()
def update_state(self):
""" Update control file to request gradient of self.data["state"]. """
if self.data["state"] == 1:
state_string = r"(x)"
else:
state_string = r"(a {})".format(self.data["state"]-1)
n_sub = file_utils.replace_inplace("control",
r"(geoopt +model={} +state=).*".format(self.model),
r"\1" + state_string)
if n_sub < 1:
raise ValueError("Expected geoopt section not found in control file.")
def run(self):
""" Run the calculation, check success and read results. """
with open(STDOUT, "w") as out, open(STDERR, "w") as err:
subprocess.run("dscf", stdout=out, stderr=err)
actual_check()
subprocess.run("ricc2", stdout=out, stderr=err)
actual_check()
def read(self):
self.results["energy"] = ricc2_energy(STDOUT, self.gs_model.upper())
self.results["gradient"] = ricc2_gradient()[self.data["state"]]
try:
self.results["oscill"] = ricc2_oscill(STDOUT)
except:
pass
class mp2(ricc2):
""" Interface for MP2 ground state calculations. """
def read(self):
self.results["energy"] = ricc2_gs_energy(self.gs_model.upper())
self.results["gradient"] = ricc2_gradient()[1]
class egrad(Turbomole):
def __init__(self, data):
self.data = data
try:
soes = file_utils.search_file("control", r"\$soes", after=1)
soes = file_utils.split_columns(soes, col=1, convert=int)[0]
self.data["n_ex_state"] = soes
except:
self.data["n_ex_state"] = 0
try:
_ = file_utils.search_file("control", r"\$rij")
self.data["ri"] = True
except:
self.data["ri"] = False
self.results = dict()
def update_state(self):
ex_state = self.data["state"] - 1
if self.data["n_ex_state"] < ex_state:
raise ValueError("Not enough states selected in QM calculation.")
if ex_state == 0:
return
n_sub = file_utils.replace_inplace("control",
r"\$exopt.*",
r"$exopt {}".format(ex_state))
if n_sub == 0:
file_utils.replace_inplace("control",
r"\$end",
r"$exopt {}\n$end".format(ex_state))
def run(self):
# Remove existing gradient file to avoid the file becoming huge.
try:
os.remove("gradient")
except:
pass
# Run new calculation.
with open(STDOUT, "w") as out, open(STDERR, "w") as err:
if self.data["ri"]:
subprocess.run("ridft", stdout=out, stderr=err)
else:
subprocess.run("dscf", stdout=out, stderr=err)
actual_check()
if self.data["state"] == 1:
if self.data["ri"]:
subprocess.run("rdgrad", stdout=out, stderr=err)
else:
subprocess.run("grad", stdout=out, stderr=err)
if self.data["n_ex_state"] > 0:
subprocess.run("escf", stdout=out, stderr=err)
else:
subprocess.run("egrad", stdout=out, stderr=err)
actual_check()
def read(self):
self.results["energy"] = tddft_energy(STDOUT)
self.results["gradient"] = get_grad_from_gradient(STDOUT, self.data["natom"])
if self.data["n_ex_state"] > 0:
self.results["oscill"] = tddft_oscill(STDOUT)
# TDDFT Calculation keywords
DSCF_EN = re.escape(r"| total energy =")
ESCF_EN = r"Total energy:"
GRAD_GRAD = r"SCF ENERGY GRADIENT with respect to NUCLEAR COORDINATES"
EGRAD_GRAD = r"Excited state no.*chosen for optimization"
RDGRAD_GRAD = r"RDGRAD - INFORMATION"
def tddft_energy(fname):
try:
energy = file_utils.search_file(fname, ESCF_EN)
col = 2
except:
energy = file_utils.search_file(fname, DSCF_EN)
col = 4
file_utils.split_columns(energy, col=col, convert=np.float64)
return np.array(energy)
def tddft_gradient(fname, target, ri):
if target == 1:
if ri:
cfile, _ = file_utils.go_to_keyword(fname, RDGRAD_GRAD)
else:
cfile, _ = file_utils.go_to_keyword(fname, GRAD_GRAD)
else:
cfile = file_utils.open_if_needed(fname)
while True:
cfile, cstate = file_utils.go_to_keyword(cfile, EGRAD_GRAD)
cstate = int(cstate.split()[3]) + 1
if cstate == target:
break
grad = get_grad_from_stdout(cfile)
return grad
def tddft_oscill(fname):
oscill = file_utils.search_file(fname, "mixed representation:")
file_utils.split_columns(oscill, col=2, convert=np.float64)
return | np.array(oscill) | numpy.array |
import unittest
from approvaltests import verify
from tests.helpers import verify_numpy_array
import numpy as np
import homlib
class ValtonenOrnhagArxiv2020BfHfTestCase(unittest.TestCase):
def setUp(self):
self.tol = 14
self.p1 = np.array([
[2.003107199098924, -15.634084933471335],
[-0.017087350257598, -7.041596829586987]
])
self.p2 = np.array([
[0.395688457559412, -0.012777594199286],
[2.097270018093999, 0.988175585551782]
])
self.R1 = np.array([
[0.854451801803156, 0.080889542675225, 0.513194895026376],
[0.251645807638113, 0.799754574299643, -0.545038538440134],
[-0.454517882919365, 0.594852505057001, 0.662996222714662]
])
self.R2 = np.array([
[0.243935353955667, -0.895887070857591, -0.371324520279403],
[0.945623784801441, 0.134783533871648, 0.296022054271079],
[-0.215153900053711, -0.423343542843488, 0.880050591741408]
])
self.sols = homlib.get_valtonenornhag_arxiv_2020b_fHf(
np.asfortranarray(self.p1),
np.asfortranarray(self.p2),
np.asfortranarray(self.R1),
np.asfortranarray(self.R2)
)
def test_valtonenornhag_arxiv_2020b_fHf_length(self):
assert len(self.sols) == 4
def test_valtonenornhag_arxiv_2020b_fHf_sol0(self):
np.testing.assert_almost_equal(self.sols[0]['f'], -0.16963695303093723, self.tol)
verify(verify_numpy_array(self.sols[0]['H']))
def test_valtonenornhag_arxiv_2020b_fHf_sol1(self):
np.testing.assert_almost_equal(self.sols[1]['f'], 0.34165415155423584, self.tol)
verify(verify_numpy_array(self.sols[1]['H']))
def test_valtonenornhag_arxiv_2020b_fHf_sol2(self):
np.testing.assert_almost_equal(self.sols[2]['f'], 0.6535921559444265, self.tol)
verify(verify_numpy_array(self.sols[2]['H']))
def test_valtonenornhag_arxiv_2020b_fHf_sol3(self):
np.testing.assert_almost_equal(self.sols[3]['f'], 3.3956095501687518, self.tol)
verify(verify_numpy_array(self.sols[3]['H']))
def test_valtonenornhag_arxiv_2020b_fHf_dimensions01(self):
"""Check that an exception is raised when dimensions are incorrect."""
p1 = np.random.randn(2, 2)
p2 = np.random.randn(3, 2)
R1 = np.random.randn(3, 3)
R2 = np.random.randn(3, 3)
with self.assertRaises(ValueError):
sols = homlib.get_valtonenornhag_arxiv_2020b_fHf( # noqa
np.asfortranarray(p1),
| np.asfortranarray(p2) | numpy.asfortranarray |
# python3.7
"""Utility functions for latent codes manipulation."""
import numpy as np
from sklearn import svm
from .logger import setup_logger
__all__ = ['train_boundary', 'project_boundary', 'linear_interpolate']
def train_boundary(latent_codes,
scores,
chosen_num_or_ratio=0.02,
split_ratio=0.7,
invalid_value=None,
logger=None):
"""Trains boundary in latent space with offline predicted attribute scores.
Given a collection of latent codes and the attribute scores predicted from the
corresponding images, this function will train a linear SVM by treating it as
a bi-classification problem. Basically, the samples with highest attribute
scores are treated as positive samples, while those with lowest scores as
negative. For now, the latent code can ONLY be with 1 dimension.
NOTE: The returned boundary is with shape (1, latent_space_dim), and also
normalized with unit norm.
Args:
latent_codes: Input latent codes as training data.
scores: Input attribute scores used to generate training labels.
chosen_num_or_ratio: How many samples will be chosen as positive (negative)
samples. If this field lies in range (0, 0.5], `chosen_num_or_ratio *
latent_codes_num` will be used. Otherwise, `min(chosen_num_or_ratio,
0.5 * latent_codes_num)` will be used. (default: 0.02)
split_ratio: Ratio to split training and validation sets. (default: 0.7)
invalid_value: This field is used to filter out data. (default: None)
logger: Logger for recording log messages. If set as `None`, a default
logger, which prints messages from all levels to screen, will be created.
(default: None)
Returns:
A decision boundary with type `numpy.ndarray`.
Raises:
ValueError: If the input `latent_codes` or `scores` are with invalid format.
"""
if not logger:
logger = setup_logger(work_dir='', logger_name='train_boundary')
if (not isinstance(latent_codes, np.ndarray) or
not len(latent_codes.shape) == 2):
raise ValueError(f'Input `latent_codes` should be with type'
f'`numpy.ndarray`, and shape [num_samples, '
f'latent_space_dim]!')
num_samples = latent_codes.shape[0]
latent_space_dim = latent_codes.shape[1]
if (not isinstance(scores, np.ndarray) or not len(scores.shape) == 2 or
not scores.shape[0] == num_samples or not scores.shape[1] == 1):
raise ValueError(f'Input `scores` should be with type `numpy.ndarray`, and '
f'shape [num_samples, 1], where `num_samples` should be '
f'exactly same as that of input `latent_codes`!')
if chosen_num_or_ratio <= 0:
raise ValueError(f'Input `chosen_num_or_ratio` should be positive, '
f'but {chosen_num_or_ratio} received!')
logger.info(f'Filtering training data.')
if invalid_value is not None:
latent_codes = latent_codes[scores[:, 0] != invalid_value]
scores = scores[scores[:, 0] != invalid_value]
logger.info(f'Sorting scores to get positive and negative samples.')
sorted_idx = np.argsort(scores, axis=0)[::-1, 0]
latent_codes = latent_codes[sorted_idx]
scores = scores[sorted_idx]
num_samples = latent_codes.shape[0]
if 0 < chosen_num_or_ratio <= 1:
chosen_num = int(num_samples * chosen_num_or_ratio)
else:
chosen_num = int(chosen_num_or_ratio)
chosen_num = min(chosen_num, num_samples // 2)
logger.info(f'Spliting training and validation sets:')
train_num = int(chosen_num * split_ratio)
val_num = chosen_num - train_num
# Positive samples.
positive_idx = | np.arange(chosen_num) | numpy.arange |
import os
import logging
import itertools
import pandas as pd
import numpy as np
from nltk.stem.porter import PorterStemmer
from nltk.corpus import stopwords
from tqdm import tqdm
from data_utils.text_util import clean_str, pad_sentence, build_vocab
from data_utils.noise_uitl import generate_random_noise
class DataLoad(object):
logging.getLogger().setLevel(logging.INFO)
def __init__(self, data_path, fnames, forced_seq_len, vocab_size, paly_times,
num_main_actors, batch_size, num_epochs, noise_rate):
self.data_path = data_path
self.fnames = fnames
self.forced_seq_len = forced_seq_len
self.vocab_size = vocab_size
self.paly_times = paly_times
self.num_main_actors = num_main_actors
self.batch_size = batch_size
self.num_epochs = num_epochs
self.noise_rate = noise_rate
# data file path
self.info_path = os.path.join(data_path, fnames['movies'])
self.actors_path = os.path.join(data_path, fnames['actors'])
self.summaries_path = os.path.join(data_path, fnames['summaries'])
self.storylines_path = os.path.join(data_path, fnames['storylines'])
self.all_ratings_path = os.path.join(data_path, fnames['all_ratings'])
self.all_actors_path = os.path.join(
data_path, fnames['all_actors'].format(self.num_main_actors))
self.users_bias = os.path.join(data_path, fnames['bu'])
self.movies_bias = os.path.join(data_path, fnames['bm'])
# generate features
self._generate_id_mappings()
self._generate_bias()
self._generate_info() # dim = M*self.dim_onehot
self._generate_actors() # dim = M*self.dim_onehot
self._generate_descriptions() # dim = M*self.forced_seq_len
def load_data(self, mode, num_sub=100*100):
if mode in ('train', 'dev', 'eval'):
ratings_path = os.path.join(
self.data_path, self.fnames['{}_ratings'.format(mode)])
else:
raise ValueError('please choose correct mode (train/dev/eval)')
df_ratings = pd.read_csv(
ratings_path,
header=0,
dtype={'userId':np.int32, 'movieId':np.int32, 'rating':np.float, 'timestamp':np.str})
if mode == 'train':
return self._train_batch_iterator(df_ratings)
else:
return self._dev_eval_iterator(df_ratings, num_sub=num_sub)
def _train_batch_iterator(self, df):
num_batches_per_epoch = df.shape[0] // self.batch_size + 1
# shuffle trian dataset
df = df.sample(frac=1).reset_index(drop=True)
# generate train batch
for i in range(num_batches_per_epoch):
start_idx = i * self.batch_size
end_idx = min((i + 1) * self.batch_size, df.shape[0])
batch_df = df.iloc[start_idx:end_idx]
batch_uids = list(batch_df.loc[:, 'userId'])
batch_mids = list(batch_df.loc[:, 'movieId'])
batch_bu_seq = np.array([self.bu_dict[u] for u in batch_uids], np.float)
batch_bm_seq = np.array([self.bm_dict[m] for m in batch_mids], np.float)
batch_u_oids = np.array([self.uid2order[u] for u in batch_uids])
batch_m_oids = np.array([self.mid2order[m] for m in batch_mids])
batch_info = self.info_mat[batch_m_oids, :]
batch_actors = self.actors_mat[batch_m_oids, :]
batch_descriptions = self.descriptions_mat[batch_m_oids, :]
batch_u_oids = np.reshape(batch_u_oids, (batch_u_oids.shape[0], 1))
batch_m_oids = np.reshape(batch_m_oids, (batch_m_oids.shape[0], 1))
X_user = (batch_u_oids, batch_bu_seq)
X_movie = (batch_m_oids, batch_info, batch_actors, batch_descriptions, batch_bm_seq)
X = (X_user, X_movie)
Y = np.array(batch_df.loc[:, 'rating'])
yield X, Y
def _dev_eval_iterator(self, df, num_sub):
# shuffle dev dataset
df = df.sample(frac=1).reset_index(drop=True)
num_sub_per_eval = df.shape[0] // num_sub + 1
# generate sub df for each dev
for i in range(num_sub_per_eval):
start_idx = i * num_sub
end_idx = min((i+1) * num_sub, df.shape[0])
sub_df = df.iloc[start_idx:end_idx]
sub_uids = list(sub_df.loc[:, 'userId'])
sub_mids = list(sub_df.loc[:, 'movieId'])
sub_bu_seq = np.array([self.bu_dict[u] for u in sub_uids], np.float)
sub_bm_seq = np.array([self.bm_dict[m] for m in sub_mids], np.float)
sub_u_oids = np.array([self.uid2order[u] for u in sub_uids])
sub_m_oids = np.array([self.mid2order[m] for m in sub_mids])
sub_info = self.info_mat[sub_m_oids, :]
sub_actors = self.actors_mat[sub_m_oids, :]
sub_descriptions = self.descriptions_mat[sub_m_oids, :]
sub_u_oids = np.reshape(sub_u_oids, (sub_u_oids.shape[0], 1))
sub_m_oids = np.reshape(sub_m_oids, (sub_m_oids.shape[0], 1))
X_user = (sub_u_oids, sub_bu_seq)
X_movie = (sub_m_oids, sub_info, sub_actors, sub_descriptions, sub_bm_seq)
X = (X_user, X_movie)
Y = np.array(sub_df.loc[:, 'rating'])
yield X, Y
def _generate_id_mappings(self):
df_all_ratings = pd.read_csv(self.all_ratings_path, header=0,
dtype={'userId': np.int32, 'movieId': np.int32,
'rating': np.float, 'timestamp': np.str})
all_uids = sorted(list(df_all_ratings.loc[:, 'userId'].unique()))
all_mids = sorted(list(df_all_ratings.loc[:, 'movieId'].unique()))
self.num_all_users = len(all_uids)
self.num_all_movies = len(all_mids)
self.uid2order = {u: i for i, u in enumerate(all_uids)}
self.order2uid = {i: u for i, u in enumerate(all_uids)}
self.mid2order = {m: i for i, m in enumerate(all_mids)}
self.order2mid = {i: m for i, m in enumerate(all_mids)}
self.mu = np.mean(df_all_ratings.loc[:, 'rating'])
return self
def _generate_bias(self):
df_users_bias = pd.read_csv(
self.users_bias, header=0, dtype={'userId':np.int32, 'bias':np.float})
df_movies_bias = pd.read_csv(
self.movies_bias, header=0, dtype={'movieId':np.int32, 'bias':np.float})
uids = list(df_users_bias.loc[:, 'userId'])
mids = list(df_movies_bias.loc[:, 'movieId'])
bu_seq = list(df_users_bias.loc[:, 'bias'])
bm_seq = list(df_movies_bias.loc[:, 'bias'])
self.bu_dict = dict(zip(uids, bu_seq))
self.bm_dict = dict(zip(mids, bm_seq))
return self
def _generate_info(self):
def _generate_year_range(year):
if year <= 1950:
year = 1950
elif year <= 1960:
year = 1960
elif year <= 1970:
year = 1970
elif year <= 1980:
year = 1980
elif year <= 1990:
year = 1990
return year
df_info = pd.read_csv(self.info_path, header=0,
dtype={'movieId': np.int32, 'title': np.str,
'genres': np.str, 'year': np.int32})
df_info.loc[:, 'year'] = df_info.loc[:, 'year'].apply(_generate_year_range)
df_info.loc[:, 'genres'] = df_info.loc[:, 'genres'].apply(lambda x: x.split('|'))
years = list(df_info.loc[:, 'year'])
genres = list(df_info.loc[:, 'genres'])
# build info vocabulary
all_info = list(set(years).union(set(itertools.chain(*genres))))
all_info += ['<OOI>'] # out of info
self.num_all_info = len(all_info)
info2id = {info: i for i, info in enumerate(all_info)}
# merge year into genres
info_list = genres.copy()
for i in range(len(years)):
info_list[i].append(years[i])
self.num_most_info = max([len(info) for info in info_list])
new_info_list = []
for info in info_list:
new_info = []
for i in range(self.num_most_info):
try:
new_info.append(info2id[info[i]])
except IndexError:
new_info.append(info2id['<OOI>'])
new_info_list.append(new_info)
# dimension = N * self.dim_onehot
self.info_mat = np.array(new_info_list)
print('have generated feature matrix, shape={}'.format(self.info_mat.shape))
return self
def _generate_actors(self):
# read all actors' name
df_all_actors = pd.read_csv(self.all_actors_path, header=0,
dtype={'name': np.str, 'times': np.int32})
# build actors vocabulary
selected_actors = list(
df_all_actors.loc[df_all_actors['times'] >= self.paly_times, 'name'])
selected_actors += ['<OTA>'] # other actors
self.num_all_main_actors = len(selected_actors)
actor2id = {a: i for i, a in enumerate(selected_actors)}
# read actors for each movie
df_actors = pd.read_csv(self.actors_path, header=0, dtype={
'movieId': np.int32, 'actors': np.str})
df_actors.loc[:, 'actors'] = df_actors.loc[:, 'actors'].apply(
lambda x: x.split('|'))
actors_list = list(df_actors.loc[:, 'actors'])
new_actors_list = []
for actors in actors_list:
new_actors = []
for i in range(self.num_main_actors):
try:
new_actors.append(actor2id[actors[i]])
except IndexError:
new_actors.append(actor2id['<OTA>'])
except KeyError:
new_actors.append(actor2id['<OTA>'])
new_actors_list.append(new_actors)
self.actors_mat = np.array(new_actors_list)
print('have generated actor matrix, shape={}'.format(self.actors_mat.shape))
return self
def _generate_descriptions(self):
df_info = pd.read_csv(self.info_path, header=0,
dtype={'movieId': np.int32, 'title': np.str,
'genres': np.str, 'year': np.str})
df_summaries = pd.read_csv(self.summaries_path, header=0,
dtype={'movieId': np.int32, 'summary': np.str})
df_storylines = pd.read_csv(self.storylines_path, header=0,
dtype={'movieId': np.int32, 'storyline': np.str})
titles = list(df_info.loc[:, 'title'])
summaries = list(df_summaries.loc[:, 'summary'])
storylines = list(df_storylines.loc[:, 'storyline'])
porter_stemmer = PorterStemmer()
stop = stopwords.words('english')
raw_descriptions = [clean_str('{} {} {}'.format(t, su, st))
for t, su, st in zip(titles, summaries, storylines)]
tokenized_descriptions = [[porter_stemmer.stem(word) for word in sent.split(' ') if word not in stop]
for sent in raw_descriptions]
noised_descriptions = generate_random_noise(tokenized_descriptions, self.noise_rate)
padded_descriptions = pad_sentence(
noised_descriptions, self.forced_seq_len)
token2id, _, self.vocab_size = build_vocab(
padded_descriptions, self.vocab_size)
descriptions = []
for sent in padded_descriptions:
description = []
for word in sent:
if word not in token2id:
word = '<OOV>'
description.append(token2id[word])
descriptions.append(description)
self.descriptions_mat = | np.array(descriptions) | numpy.array |
# -*- coding: utf-8 -*-
# Import necessary modules
import numpy as np
import numpy.linalg as npla
import hoggorm.statTools as st
import hoggorm.cross_val as cv
class nipalsPCA:
"""
This class carries out Principal Component Analysis using the
NIPALS algorithm.
PARAMETERS
----------
arrX : numpy array
A numpy array containing the data
numComp : int, optional
An integer that defines how many components are to be computed
Xstand : boolean, optional
Defines whether variables in ``arrX`` are to be standardised/scaled or centered
False : columns of ``arrX`` are mean centred (default)
``Xstand = False``
True : columns of ``arrX`` are mean centred and devided by their own standard deviation
``Xstand = True``
cvType : list, optional
The list defines cross validation settings when computing the PCA model. Note if `cvType` is not provided, cross validation will not be performed and as such cross validation results will not be available. Choose cross validation type from the following:
loo : leave one out / a.k.a. full cross validation (default)
``cvType = ["loo"]``
KFold : leave out one fold or segment
``cvType = ["KFold", numFolds]``
numFolds: int
Number of folds or segments
lolo : leave one label out
``cvType = ["lolo", lablesList]``
lablesList: list
Sequence of lables. Must be same lenght as number of rows in ``arrX``. Leaves out objects with same lable.
RETURNS
-------
class
A class that contains the PCA model and computational results
EXAMPLES
--------
First import the hoggorm package.
>>> import hoggorm as ho
Import your data into a numpy array.
>>> myData
array([[ 5.7291665, 3.416667 , 3.175 , 2.6166668, 6.2208333],
[ 6.0749993, 2.7416666, 3.6333339, 3.3833334, 6.1708336],
[ 6.1166663, 3.4916666, 3.5208333, 2.7125003, 6.1625004],
...,
[ 6.3333335, 2.3166668, 4.1249995, 4.3541665, 6.7500005],
[ 5.8250003, 4.8291669, 1.4958333, 1.0958334, 6.0999999],
[ 5.6499996, 4.6624999, 1.9291668, 1.0749999, 6.0249996]])
>>> np.shape(myData)
(14, 5)
Examples of how to compute a PCA model using different settings for the input parameters.
>>> model = ho.nipalsPCA(arrX=myData, numComp=5, Xstand=False)
>>> model = ho.nipalsPCA(arrX=myData)
>>> model = ho.nipalsPCA(arrX=myData, numComp=3)
>>> model = ho.nipalsPCA(arrX=myData, Xstand=True)
>>> model = ho.nipalsPCA(arrX=myData, cvType=["loo"])
>>> model = ho.nipalsPCA(arrX=myData, cvType=["KFold", 4])
>>> model = ho.nipalsPCA(arrX=myData, cvType=["lolo", [1, 2, 3, 4, 5, 6, 7, 1, 2, 3, 4, 5, 6, 7]])
Examples of how to extract results from the PCA model.
>>> scores = model.X_scores()
>>> loadings = model.X_loadings()
>>> cumulativeCalibratedExplainedVariance_allVariables = model.X_cumCalExplVar_indVar()
"""
def __init__(self, arrX, numComp=None, Xstand=False, cvType=None):
"""
On initialisation check how arrX and arrY are to be pre-processed
(Xstand and Ystand are either True or False). Then check whether
number of components chosen by user is OK.
"""
# ===============================================================================
# Check what is provided by user
# ===============================================================================
# Define X and y within class such that the data can be accessed from
# all attributes in class.
self.arrX_input = arrX
# Check whether cvType is provided. If NOT, then no cross validation
# is carried out.
self.cvType = cvType
# Define maximum number of components to compute depending on whether
# cross validation was selected or not.
if isinstance(self.cvType, type(None)):
maxNumPC = min(np.shape(self.arrX_input))
else:
# Depict the number of components that are possible to compute based
# on size of data set (#rows, #cols), type of cross validation (i.e.
# size of CV segments)
numObj = np.shape(self.arrX_input)[0]
# Compute the sizes of training sets in CV
if self.cvType[0] == "loo":
cvComb = cv.LeaveOneOut(numObj)
elif self.cvType[0] == "KFold":
cvComb = cv.KFold(numObj, k=self.cvType[1])
elif self.cvType[0] == "lolo":
cvComb = cv.LeaveOneLabelOut(self.cvType[1])
else:
print("Requested form of cross validation is not available")
pass
# First devide into combinations of training and test sets. Collect
# sizes of training sets, since this also may limit the number of
# components that can be computed.
segSizes = []
for train_index, test_index in cvComb:
x_train, x_test = cv.split(train_index, test_index,
self.arrX_input)
segSizes.append(numObj - sum(train_index))
# Compute the max number of components based on only object size
maxN = numObj - max(segSizes) - 1
# Choose whatever is smaller, number of variables or maxN
maxNumPC = min(np.shape(arrX)[1], maxN)
# Now set the number of components that is possible to compute.
if numComp is None:
self.numPC = maxNumPC
else:
if numComp > maxNumPC:
self.numPC = maxNumPC
else:
self.numPC = numComp
# Pre-process data according to user request.
# -------------------------------------------
# Check whether standardisation of X and Y are requested by user. If
# NOT, then X and y are centred by default.
self.Xstand = Xstand
# Standardise X if requested by user, otherwise center X.
if self.Xstand:
self.Xmeans = np.average(self.arrX_input, axis=0)
self.Xstd = np.std(self.arrX_input, axis=0, ddof=1)
self.arrX = (self.arrX_input - self.Xmeans) / self.Xstd
else:
self.Xmeans = np.average(self.arrX_input, axis=0)
self.arrX = self.arrX_input - self.Xmeans
# Before PLS2 NIPALS algorithm starts initiate and lists in which
# results will be stored.
self.X_scoresList = []
self.X_loadingsList = []
self.X_loadingsWeightsList = []
self.coeffList = []
self.X_residualsList = [self.arrX]
# Collect residual matrices/arrays after each computed component
self.resids = {}
self.X_residualsDict = {}
# Collect predicted matrices/array Xhat after each computed component
self.calXhatDict_singPC = {}
# Collect explained variance in each component
self.calExplainedVariancesDict = {}
self.X_calExplainedVariancesList = []
# ===============================================================================
# Here the NIPALS PCA algorithm on X starts
# ===============================================================================
threshold = 1.0e-8
X_new = self.arrX.copy()
# Compute number of principal components as specified by user
for j in range(self.numPC):
# Check if first column contains only zeros. If yes, then
# NIPALS will not converge and (npla.norm(num) will contain
# nan's). Rather put in other starting values.
if not np.any(X_new[:, 0]):
X_repl_nonCent = np.arange(np.shape(X_new)[0])
X_repl = X_repl_nonCent - np.mean(X_repl_nonCent)
t = X_repl.reshape(-1, 1)
else:
t = X_new[:, 0].reshape(-1, 1)
# Iterate until score vector converges according to threshold
while 1:
num = np.dot(np.transpose(X_new), t)
denom = npla.norm(num)
p = num / denom
t_new = np.dot(X_new, p)
diff = t - t_new
t = t_new.copy()
SS = np.sum(np.square(diff))
# Check whether sum of squares is smaller than threshold. Break
# out of loop if true and start computation of next component.
if SS < threshold:
self.X_scoresList.append(t)
self.X_loadingsList.append(p)
break
# Peel off information explained by actual component and continue with
# decomposition on the residuals (X_new = E).
X_old = X_new.copy()
Xhat_j = np.dot(t, np.transpose(p))
X_new = X_old - Xhat_j
# Store residuals E and Xhat in their dictionaries
self.X_residualsDict[j + 1] = X_new
self.calXhatDict_singPC[j + 1] = Xhat_j
if self.Xstand:
self.calXhatDict_singPC[j +
1] = (Xhat_j * self.Xstd) + self.Xmeans
else:
self.calXhatDict_singPC[j + 1] = Xhat_j + self.Xmeans
# Collect scores and loadings for the actual component.
self.arrT = np.hstack(self.X_scoresList)
self.arrP = np.hstack(self.X_loadingsList)
# ==============================================================================
# From here computation of CALIBRATED explained variance starts
# ==============================================================================
# ========== COMPUTATIONS FOR X ==========
# ---------------------------------------------------------------------
# Create a list holding arrays of Xhat predicted calibration after each
# component. Xhat is computed with Xhat = T*P'
self.calXpredList = []
# Compute Xhat for 1 and more components (cumulatively).
for ind in range(1, self.numPC + 1):
part_arrT = self.arrT[:, 0:ind]
part_arrP = self.arrP[:, 0:ind]
predXcal = np.dot(part_arrT, np.transpose(part_arrP))
if self.Xstand:
Xhat = (predXcal * self.Xstd) + self.Xmeans
else:
Xhat = predXcal + self.Xmeans
self.calXpredList.append(Xhat)
# ---------------------------------------------------------------------
# ---------------------------------------------------------------------
# Collect all PRESSE for individual variables in a dictionary.
# Keys represent number of component.
self.PRESSEdict_indVar_X = {}
# Compute PRESS for calibration / estimation
PRESSE_0_indVar_X = np.sum(np.square(st.center(self.arrX_input)),
axis=0)
self.PRESSEdict_indVar_X[0] = PRESSE_0_indVar_X
# Compute PRESS for each Xhat for 1, 2, 3, etc number of components
# and compute explained variance
for ind, Xhat in enumerate(self.calXpredList):
diffX = self.arrX_input - Xhat
PRESSE_indVar_X = np.sum(np.square(diffX), axis=0)
self.PRESSEdict_indVar_X[ind + 1] = PRESSE_indVar_X
# Now store all PRESSE values into an array. Then compute MSEE and
# RMSEE.
self.PRESSEarr_indVar_X = np.array(
list(self.PRESSEdict_indVar_X.values()))
self.MSEEarr_indVar_X = self.PRESSEarr_indVar_X / np.shape(
self.arrX_input)[0]
self.RMSEEarr_indVar_X = np.sqrt(self.MSEEarr_indVar_X)
# ---------------------------------------------------------------------
# ---------------------------------------------------------------------
# Compute explained variance for each variable in X using the
# MSEE for each variable. Also collect PRESSE, MSEE, RMSEE in
# their respective dictionaries for each variable. Keys represent
# now variables and NOT components as above with
# self.PRESSEdict_indVar_X
self.cumCalExplVarXarr_indVar = np.zeros(
np.shape(self.MSEEarr_indVar_X))
MSEE_0_indVar_X = self.MSEEarr_indVar_X[0, :]
for ind, MSEE_indVar_X in enumerate(self.MSEEarr_indVar_X):
explVar = (MSEE_0_indVar_X - MSEE_indVar_X) / MSEE_0_indVar_X * 100
self.cumCalExplVarXarr_indVar[ind] = explVar
self.PRESSE_indVar_X = {}
self.MSEE_indVar_X = {}
self.RMSEE_indVar_X = {}
self.cumCalExplVarX_indVar = {}
for ind in range( | np.shape(self.PRESSEarr_indVar_X) | numpy.shape |
"""
Copyright (c) 2018-2021 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import copy
from functools import partial
from collections import OrderedDict
import pickle
from pathlib import Path
import numpy as np
import cv2
from ..base_evaluator import BaseEvaluator
from ..quantization_model_evaluator import create_dataset_attributes
from ...adapters import create_adapter, MTCNNPAdapter
from ...launcher import create_launcher, InputFeeder
from ...preprocessor import PreprocessingExecutor
from ...utils import extract_image_representations, read_pickle, contains_any, get_path
from ...config import ConfigError
from ...progress_reporters import ProgressReporter
from ...logging import print_info
def build_stages(models_info, preprocessors_config, launcher, model_args, delayed_model_loading=False):
required_stages = ['pnet']
stages_mapping = OrderedDict([
('pnet', {'caffe': CaffeProposalStage, 'dlsdk': DLSDKProposalStage, 'dummy': DummyProposalStage}),
('rnet', {'caffe': CaffeRefineStage, 'dlsdk': DLSDKRefineStage}),
('onet', {'caffe': CaffeOutputStage, 'dlsdk': DLSDKOutputStage})
])
framework = launcher.config['framework']
common_preprocessor = PreprocessingExecutor(preprocessors_config)
stages = OrderedDict()
for stage_name, stage_classes in stages_mapping.items():
if stage_name not in models_info:
if stage_name not in required_stages:
continue
raise ConfigError('{} required for evaluation'.format(stage_name))
model_config = models_info[stage_name]
if 'predictions' in model_config and not model_config.get('store_predictions', False):
stage_framework = 'dummy'
else:
stage_framework = framework
if not delayed_model_loading:
if not contains_any(model_config, ['model', 'caffe_model']) and stage_framework != 'dummy':
if model_args:
model_config['model'] = model_args[len(stages) if len(model_args) > 1 else 0]
stage = stage_classes.get(stage_framework)
if not stage_classes:
raise ConfigError('{} stage does not support {} framework'.format(stage_name, stage_framework))
stage_preprocess = models_info[stage_name].get('preprocessing', [])
model_specific_preprocessor = PreprocessingExecutor(stage_preprocess)
stages[stage_name] = stage(
models_info[stage_name], model_specific_preprocessor, common_preprocessor, launcher, delayed_model_loading
)
if not stages:
raise ConfigError('please provide information about MTCNN pipeline stages')
return stages
class BaseStage:
def __init__(self, model_info, model_specific_preprocessor, common_preprocessor, delayed_model_loading=False):
self.model_info = model_info
self.model_specific_preprocessor = model_specific_preprocessor
self.common_preprocessor = common_preprocessor
self.input_feeder = None
self.store = model_info.get('store_predictions', False)
self.predictions = []
def predict(self, input_blobs, batch_meta, output_callback=None):
raise NotImplementedError
def preprocess_data(self, batch_input, batch_annotation, previous_stage_prediction, *args, **kwargs):
raise NotImplementedError
def postprocess_result(self, identifiers, this_stage_result, batch_meta, previous_stage_result, *args, **kwargs):
raise NotImplementedError
def release(self):
pass
def reset(self):
self._predictions = []
def dump_predictions(self):
if not hasattr(self, 'prediction_file'):
prediction_file = Path(self.model_info.get('predictions', 'predictions.pickle'))
self.prediction_file = prediction_file
with self.prediction_file.open('wb') as out_file:
pickle.dump(self._predictions, out_file)
def update_preprocessing(self, preprocessor):
self.common_preprocessor = preprocessor
class ProposalBaseStage(BaseStage):
default_model_name = 'mtcnn-p'
def __init__(self, model_info, model_specific_preprocessor, common_preprocessor, delayed_model_loading=False):
super().__init__(model_info, model_specific_preprocessor, common_preprocessor)
self.adapter = None
self.input_feeder = None
self._predictions = []
def preprocess_data(self, batch_input, batch_annotation, *args, **kwargs):
batch_input = self.model_specific_preprocessor.process(batch_input, batch_annotation)
batch_input = self.common_preprocessor.process(batch_input, batch_annotation)
_, batch_meta = extract_image_representations(batch_input)
filled_inputs = self.input_feeder.fill_inputs(batch_input) if self.input_feeder else batch_input
return filled_inputs, batch_meta
def postprocess_result(self, identifiers, this_stage_result, batch_meta, *args, **kwargs):
result = self.adapter.process(this_stage_result, identifiers, batch_meta) if self.adapter else this_stage_result
if self.store:
self._predictions.extend(result)
return result
def _infer(self, input_blobs, batch_meta):
raise NotImplementedError
def predict(self, input_blobs, batch_meta, output_callback=None):
return self._infer(input_blobs, batch_meta)
def dump_predictions(self):
if not hasattr(self, 'prediction_file'):
prediction_file = Path(self.model_info.get('predictions', 'pnet_predictions.pickle'))
self.prediction_file = prediction_file
with self.prediction_file.open('wb') as out_file:
pickle.dump(self._predictions, out_file)
class DummyProposalStage(ProposalBaseStage):
def __init__(self, model_info, model_specific_preprocessor, common_preprocessor, *args, **kwargs):
super().__init__(model_info, model_specific_preprocessor, common_preprocessor)
self._index = 0
if 'predictions' not in self.model_info:
raise ConfigError('predictions_file is not found')
self._predictions = read_pickle(self.model_info['predictions'])
self.iterator = 0
def preprocess_data(self, batch_input, batch_annotation, *args, **kwargs):
_, batch_meta = extract_image_representations(batch_input)
return batch_input, batch_meta
def _infer(self, input_blobs, batch_meta):
batch_size = len(batch_meta)
results = self._predictions[self._index:self._index+batch_size]
self._index += batch_size
return results
def postprocess_result(self, identifiers, this_stage_result, batch_meta, *args, **kwargs):
return this_stage_result
class RefineBaseStage(BaseStage):
input_size = 24
include_boundaries = True
default_model_name = 'mtcnn-r'
def preprocess_data(self, batch_input, batch_annotation, previous_stage_prediction, *args, **kwargs):
batch_input = self.model_specific_preprocessor.process(batch_input, batch_annotation)
batch_input = self.common_preprocessor.process(batch_input, batch_annotation)
_, batch_meta = extract_image_representations(batch_input)
batch_input = [
cut_roi(input_image, prediction, self.input_size, include_bound=self.include_boundaries)
for input_image, prediction in zip(batch_input, previous_stage_prediction)
]
filled_inputs = self.input_feeder.fill_inputs(batch_input) if self.input_feeder else batch_input
return filled_inputs, batch_meta
def postprocess_result(self, identifiers, this_stage_result, batch_meta, previous_stage_result, *args, **kwargs):
result = calibrate_predictions(
previous_stage_result, this_stage_result, 0.7, self.model_info['outputs'], 'Union'
)
if self.store:
self._predictions.extend(result)
return result
def _infer(self, input_blobs, batch_meta):
raise NotImplementedError
def predict(self, input_blobs, batch_meta, output_callback=None):
return self._infer(input_blobs, batch_meta)
def dump_predictions(self):
if not hasattr(self, 'prediction_file'):
prediction_file = Path(self.model_info.get('predictions', 'rnet_predictions.pickle'))
self.prediction_file = prediction_file
with self.prediction_file.open('wb') as out_file:
pickle.dump(self._predictions, out_file)
class OutputBaseStage(RefineBaseStage):
input_size = 48
include_boundaries = False
default_model_name = 'mtcnn-o'
def _infer(self, input_blobs, batch_meta):
raise NotImplementedError
def postprocess_result(self, identifiers, this_stage_result, batch_meta, previous_stage_result, *args, **kwargs):
batch_predictions = calibrate_predictions(
previous_stage_result, this_stage_result, 0.7, self.model_info['outputs']
)
batch_predictions[0], _ = nms(batch_predictions[0], 0.7, 'Min')
if self.store:
self._predictions.extend(batch_predictions)
return batch_predictions
def dump_predictions(self):
if not hasattr(self, 'prediction_file'):
prediction_file = Path(self.model_info.get('predictions', 'onet_predictions.pickle'))
self.prediction_file = prediction_file
with self.prediction_file.open('wb') as out_file:
pickle.dump(self._predictions, out_file)
class CaffeModelMixin:
def _infer(self, input_blobs, batch_meta, *args, **kwargs):
for meta in batch_meta:
meta['input_shape'] = []
results = []
for feed_dict in input_blobs:
for layer_name, data in feed_dict.items():
if data.shape != self.inputs[layer_name]:
self.net.blobs[layer_name].reshape(*data.shape)
for meta in batch_meta:
meta['input_shape'].append(self.inputs)
results.append(self.net.forward(**feed_dict))
return results
@property
def inputs(self):
inputs_map = {}
for input_blob in self.net.inputs:
inputs_map[input_blob] = self.net.blobs[input_blob].data.shape
return inputs_map
def release(self):
del self.net
def fit_to_input(self, data, layer_name, layout, precision):
data_shape = np.shape(data)
layer_shape = self.inputs[layer_name]
if len(data_shape) == 5 and len(layer_shape) == 4:
data = data[0]
data_shape = np.shape(data)
data = np.transpose(data, layout) if len(data_shape) == 4 else np.array(data)
if precision:
data = data.astype(precision)
return data
def automatic_model_search(self, network_info):
model = Path(network_info.get('model', ''))
weights = network_info.get('weights')
if model.is_dir():
models_list = list(Path(model).glob('{}.prototxt'.format(self.default_model_name)))
if not models_list:
models_list = list(Path(model).glob('*.prototxt'))
if not models_list:
raise ConfigError('Suitable model description is not detected')
if len(models_list) != 1:
raise ConfigError('Several suitable models found, please specify required model')
model = models_list[0]
if weights is None or Path(weights).is_dir():
weights_dir = weights or model.parent
weights = Path(weights_dir) / model.name.replace('prototxt', 'caffemodel')
if not weights.exists():
weights_list = list(weights_dir.glob('*.caffemodel'))
if not weights_list:
raise ConfigError('Suitable weights is not detected')
if len(weights_list) != 1:
raise ConfigError('Several suitable weights found, please specify required explicitly')
weights = weights_list[0]
weights = Path(weights)
return model, weights
class DLSDKModelMixin:
def _infer(self, input_blobs, batch_meta):
for meta in batch_meta:
meta['input_shape'] = []
results = []
for feed_dict in input_blobs:
input_shapes = {layer_name: data.shape for layer_name, data in feed_dict.items()}
self._reshape_input(input_shapes)
results.append(self.exec_network.infer(feed_dict))
for meta in batch_meta:
meta['input_shape'].append(input_shapes)
return results
def _reshape_input(self, input_shapes):
del self.exec_network
self.network.reshape(input_shapes)
self.exec_network = self.launcher.ie_core.load_network(self.network, self.launcher.device)
@property
def inputs(self):
has_info = hasattr(self.exec_network, 'input_info')
if not has_info:
return self.exec_network.inputs
return OrderedDict([(name, data.input_data) for name, data in self.exec_network.input_info.items()])
def release(self):
self.input_feeder.release()
del self.network
del self.exec_network
self.launcher.release()
def fit_to_input(self, data, layer_name, layout, precision):
layer_shape = tuple(self.inputs[layer_name].shape)
data_shape = np.shape(data)
if len(layer_shape) == 4:
if len(data_shape) == 5:
data = data[0]
data = np.transpose(data, layout)
if precision:
data = data.astype(precision)
return data
def prepare_model(self, launcher):
launcher_specific_entries = [
'model', 'weights', 'caffe_model', 'caffe_weights', 'tf_model', 'inputs', 'outputs', '_model_optimizer'
]
def update_mo_params(launcher_config, model_config):
for entry in launcher_specific_entries:
if entry not in launcher_config:
continue
if entry in model_config:
continue
model_config[entry] = launcher_config[entry]
model_mo_flags, model_mo_params = model_config.get('mo_flags', []), model_config.get('mo_params', {})
launcher_mo_flags, launcher_mo_params = launcher_config.get('mo_flags', []), launcher_config.get(
'mo_params', {})
for launcher_flag in launcher_mo_flags:
if launcher_flag not in model_mo_flags:
model_mo_flags.append(launcher_flag)
for launcher_mo_key, launcher_mo_value in launcher_mo_params.items():
if launcher_mo_key not in model_mo_params:
model_mo_params[launcher_mo_key] = launcher_mo_value
model_config['mo_flags'] = model_mo_flags
model_config['mo_params'] = model_mo_params
update_mo_params(launcher.config, self.model_info)
if 'caffe_model' in self.model_info:
model, weights = launcher.convert_model(self.model_info)
else:
model, weights = self.auto_model_search(self.model_info)
return model, weights
def auto_model_search(self, network_info):
model = Path(network_info.get('model', ''))
weights = network_info.get('weights')
if model.is_dir():
models_list = list(Path(model).glob('{}.xml'.format(self.default_model_name)))
if not models_list:
models_list = list(Path(model).glob('*.xml'))
if not models_list:
raise ConfigError('Suitable model description is not detected')
if len(models_list) != 1:
raise ConfigError('Several suitable models found, please specify required model')
model = models_list[0]
print_info('{} - Found model: {}'.format(self.default_model_name, model))
if weights is None or Path(weights).is_dir():
weights_dir = weights or model.parent
weights = Path(weights_dir) / model.name.replace('xml', 'bin')
if not weights.exists():
weights_list = list(weights_dir.glob('*.bin'))
if not weights_list:
raise ConfigError('Suitable weights is not detected')
if len(weights_list) != 1:
raise ConfigError('Several suitable weights found, please specify required explicitly')
weights = weights_list[0]
weights = get_path(weights)
print_info('{} - Found weights: {}'.format(self.default_model_name, weights))
return model, weights
def load_network(self, network, launcher, model_prefix):
self.network = network
self.exec_network = launcher.ie_core.load_network(network, launcher.device)
self.update_input_output_info(model_prefix)
self.input_feeder = InputFeeder(self.model_info.get('inputs', []), self.inputs, self.fit_to_input)
def load_model(self, network_info, launcher, model_prefix=None, log=False):
self.network = launcher.read_network(str(network_info['model']), str(network_info['weights']))
self.exec_network = launcher.ie_core.load_network(self.network, launcher.device)
self.launcher = launcher
self.update_input_output_info(model_prefix)
self.input_feeder = InputFeeder(self.model_info.get('inputs', []), self.inputs, self.fit_to_input)
if log:
self.print_input_output_info()
def print_input_output_info(self):
print_info('{} - Input info:'.format(self.default_model_name))
has_info = hasattr(self.network if self.network is not None else self.exec_network, 'input_info')
if self.network:
if has_info:
network_inputs = OrderedDict(
[(name, data.input_data) for name, data in self.network.input_info.items()]
)
else:
network_inputs = self.network.inputs
network_outputs = self.network.outputs
else:
if has_info:
network_inputs = OrderedDict([
(name, data.input_data) for name, data in self.exec_network.input_info.items()
])
else:
network_inputs = self.exec_network.inputs
network_outputs = self.exec_network.outputs
for name, input_info in network_inputs.items():
print_info('\tLayer name: {}'.format(name))
print_info('\tprecision: {}'.format(input_info.precision))
print_info('\tshape {}\n'.format(input_info.shape))
print_info('{} - Output info'.format(self.default_model_name))
for name, output_info in network_outputs.items():
print_info('\tLayer name: {}'.format(name))
print_info('\tprecision: {}'.format(output_info.precision))
print_info('\tshape: {}\n'.format(output_info.shape))
def update_input_output_info(self, model_prefix):
def generate_name(prefix, with_prefix, layer_name):
return prefix + layer_name if with_prefix else layer_name.split(prefix)[-1]
if model_prefix is None:
return
config_inputs = self.model_info.get('inputs', [])
network_with_prefix = next(iter(self.inputs)).startswith(model_prefix)
if config_inputs:
config_with_prefix = config_inputs[0]['name'].startswith(model_prefix)
if config_with_prefix == network_with_prefix:
return
for c_input in config_inputs:
c_input['name'] = generate_name(model_prefix, network_with_prefix, c_input['name'])
self.model_info['inputs'] = config_inputs
config_outputs = self.model_info['outputs']
for key, value in config_outputs.items():
config_with_prefix = value.startswith(model_prefix)
if config_with_prefix != network_with_prefix:
config_outputs[key] = generate_name(model_prefix, network_with_prefix, value)
self.model_info['outputs'] = config_outputs
class CaffeProposalStage(CaffeModelMixin, ProposalBaseStage):
def __init__(self, model_info, model_specific_preprocessor, common_preprocessor, launcher, *args, **kwargs):
super().__init__(model_info, model_specific_preprocessor, common_preprocessor)
self.net = launcher.create_network(self.model_info['model'], self.model_info['weights'])
self.input_feeder = InputFeeder(model_info.get('inputs', []), self.inputs, self.fit_to_input)
pnet_outs = model_info['outputs']
pnet_adapter_config = launcher.config.get('adapter', {'type': 'mtcnn_p', **pnet_outs})
pnet_adapter_config.update({'regions_format': 'hw'})
self.adapter = create_adapter(pnet_adapter_config)
class CaffeRefineStage(CaffeModelMixin, RefineBaseStage):
def __init__(self, model_info, model_specific_preprocessor, common_preprocessor, launcher, *args, **kwargs):
super().__init__(model_info, model_specific_preprocessor, common_preprocessor)
self.net = launcher.create_network(self.model_info['model'], self.model_info['weights'])
self.input_feeder = InputFeeder(model_info.get('inputs', []), self.inputs, self.fit_to_input)
class CaffeOutputStage(CaffeModelMixin, OutputBaseStage):
def __init__(self, model_info, model_specific_preprocessor, common_preprocessor, launcher):
super().__init__(model_info, model_specific_preprocessor, common_preprocessor)
self.net = launcher.create_network(self.model_info['model'], self.model_info['weights'])
self.input_feeder = InputFeeder(model_info.get('inputs', []), self.inputs, self.fit_to_input)
class DLSDKProposalStage(DLSDKModelMixin, ProposalBaseStage):
def __init__(
self, model_info, model_specific_preprocessor, common_preprocessor, launcher, delayed_model_loading=False
):
super().__init__(model_info, model_specific_preprocessor, common_preprocessor)
self.adapter = None
if not delayed_model_loading:
model_xml, model_bin = self.prepare_model(launcher)
self.load_model({'model': model_xml, 'weights': model_bin}, launcher, 'pnet_', log=True)
pnet_outs = model_info['outputs']
pnet_adapter_config = launcher.config.get('adapter', {'type': 'mtcnn_p', **pnet_outs})
# pnet_adapter_config.update({'regions_format': 'hw'})
self.adapter = create_adapter(pnet_adapter_config)
def load_network(self, network, launcher, model_prefix):
self.network = network
self.exec_network = launcher.ie_core.load_network(network, launcher.device)
self.update_input_output_info(model_prefix)
self.input_feeder = InputFeeder(self.model_info.get('inputs', []), self.inputs, self.fit_to_input)
pnet_outs = self.model_info['outputs']
pnet_adapter_config = launcher.config.get('adapter', {'type': 'mtcnn_p', **pnet_outs})
self.adapter = create_adapter(pnet_adapter_config)
def load_model(self, network_info, launcher, model_prefix=None, log=False):
self.network = launcher.read_network(str(network_info['model']), str(network_info['weights']))
self.exec_network = launcher.ie_core.load_network(self.network, launcher.device)
self.launcher = launcher
self.update_input_output_info(model_prefix)
self.input_feeder = InputFeeder(self.model_info.get('inputs', []), self.inputs, self.fit_to_input)
pnet_outs = self.model_info['outputs']
pnet_adapter_config = launcher.config.get('adapter', {'type': 'mtcnn_p', **pnet_outs})
self.adapter = create_adapter(pnet_adapter_config)
if log:
self.print_input_output_info()
def predict(self, input_blobs, batch_meta, output_callback=None):
raw_outputs = self._infer(input_blobs, batch_meta)
if output_callback:
for out in raw_outputs:
output_callback(out)
return raw_outputs
class DLSDKRefineStage(DLSDKModelMixin, RefineBaseStage):
def __init__(
self, model_info, model_specific_preprocessor, common_preprocessor, launcher, delayed_model_loading=False
):
super().__init__(model_info, model_specific_preprocessor, common_preprocessor)
if not delayed_model_loading:
model_xml, model_bin = self.prepare_model(launcher)
self.load_model({'model': model_xml, 'weights': model_bin}, launcher, 'rnet_', log=True)
def predict(self, input_blobs, batch_meta, output_callback=None):
raw_outputs = self._infer(input_blobs, batch_meta)
if output_callback:
batch_size = np.shape(next(iter(input_blobs[0].values())))[0]
output_callback(self.transform_for_callback(batch_size, raw_outputs))
return raw_outputs
@staticmethod
def transform_for_callback(batch_size, raw_outputs):
output_per_box = []
fq_weights = []
for i in range(batch_size):
box_outs = OrderedDict()
for layer_name, data in raw_outputs[0].items():
if layer_name in fq_weights:
continue
if layer_name.endswith('fq_weights_1'):
fq_weights.append(layer_name)
box_outs[layer_name] = data
else:
box_outs[layer_name] = np.expand_dims(data[i], axis=0)
output_per_box.append(box_outs)
return output_per_box
class DLSDKOutputStage(DLSDKModelMixin, OutputBaseStage):
def __init__(
self, model_info, model_specific_preprocessor, common_preprocessor, launcher, delayed_model_loading=False
):
super().__init__(model_info, model_specific_preprocessor, common_preprocessor)
if not delayed_model_loading:
model_xml, model_bin = self.prepare_model(launcher)
self.load_model({'model': model_xml, 'weights': model_bin}, launcher, 'onet_', log=True)
def predict(self, input_blobs, batch_meta, output_callback=None):
raw_outputs = self._infer(input_blobs, batch_meta)
return raw_outputs
@staticmethod
def transform_for_callback(batch_size, raw_outputs):
output_per_box = []
fq_weights = []
for i in range(batch_size):
box_outs = OrderedDict()
for layer_name, data in raw_outputs[0].items():
if layer_name in fq_weights:
continue
if layer_name.endswith('fq_weights_1'):
fq_weights.append(layer_name)
box_outs[layer_name] = data
else:
box_outs[layer_name] = np.expand_dims(data[i], axis=0)
output_per_box.append(box_outs)
return output_per_box
class MTCNNEvaluator(BaseEvaluator):
def __init__(
self, dataset_config, launcher, stages
):
self.dataset_config = dataset_config
self.stages = stages
self.launcher = launcher
self.dataset = None
self.postprocessor = None
self.metric_executor = None
self._annotations, self._predictions, self._metrics_results = [], [], []
def process_dataset(
self, subset=None,
num_images=None,
check_progress=False,
dataset_tag='',
output_callback=None,
allow_pairwise_subset=False,
dump_prediction_to_annotation=False,
calculate_metrics=True,
**kwargs):
def no_detections(batch_pred):
return batch_pred[0].size == 0
self._prepare_dataset(dataset_tag)
self._create_subset(subset, num_images, allow_pairwise_subset)
_progress_reporter = self._prepare_progress_reporter(check_progress, kwargs.get('progress_reporter'))
compute_intermediate_metric_res = kwargs.get('intermediate_metrics_results', False)
if compute_intermediate_metric_res:
metric_interval = kwargs.get('metrics_interval', 1000)
ignore_results_formatting = kwargs.get('ignore_results_formatting', False)
for batch_id, (batch_input_ids, batch_annotation, batch_inputs, batch_identifiers) in enumerate(self.dataset):
batch_prediction = []
batch_raw_prediction = []
intermediate_callback = None
if output_callback:
intermediate_callback = partial(output_callback,
metrics_result=None,
element_identifiers=batch_identifiers,
dataset_indices=batch_input_ids)
batch_size = 1
for stage in self.stages.values():
previous_stage_predictions = batch_prediction
filled_inputs, batch_meta = stage.preprocess_data(
copy.deepcopy(batch_inputs), batch_annotation, previous_stage_predictions
)
batch_raw_prediction = stage.predict(filled_inputs, batch_meta, intermediate_callback)
batch_size = np.shape(next(iter(filled_inputs[0].values())))[0]
batch_prediction = stage.postprocess_result(
batch_identifiers, batch_raw_prediction, batch_meta, previous_stage_predictions
)
if no_detections(batch_prediction):
break
batch_annotation, batch_prediction = self.postprocessor.process_batch(batch_annotation, batch_prediction)
metrics_result = None
if self.metric_executor:
metrics_result, _ = self.metric_executor.update_metrics_on_batch(
batch_input_ids, batch_annotation, batch_prediction
)
if self.metric_executor.need_store_predictions:
self._annotations.extend(batch_annotation)
self._predictions.extend(batch_prediction)
if output_callback:
output_callback(
list(self.stages.values())[-1].transform_for_callback(batch_size, batch_raw_prediction),
metrics_result=metrics_result,
element_identifiers=batch_identifiers,
dataset_indices=batch_input_ids
)
if _progress_reporter:
_progress_reporter.update(batch_id, len(batch_prediction))
if compute_intermediate_metric_res and _progress_reporter.current % metric_interval == 0:
self.compute_metrics(
print_results=True, ignore_results_formatting=ignore_results_formatting
)
if _progress_reporter:
_progress_reporter.finish()
def compute_metrics(self, print_results=True, ignore_results_formatting=False):
if self._metrics_results:
del self._metrics_results
self._metrics_results = []
for result_presenter, evaluated_metric in self.metric_executor.iterate_metrics(
self._annotations, self._predictions):
self._metrics_results.append(evaluated_metric)
if print_results:
result_presenter.write_result(evaluated_metric, ignore_results_formatting)
return self._metrics_results
def extract_metrics_results(self, print_results=True, ignore_results_formatting=False):
if not self._metrics_results:
self.compute_metrics(False, ignore_results_formatting)
result_presenters = self.metric_executor.get_metric_presenters()
extracted_results, extracted_meta = [], []
for presenter, metric_result in zip(result_presenters, self._metrics_results):
result, metadata = presenter.extract_result(metric_result)
if isinstance(result, list):
extracted_results.extend(result)
extracted_meta.extend(metadata)
else:
extracted_results.append(result)
extracted_meta.append(metadata)
if print_results:
presenter.write_result(metric_result, ignore_results_formatting)
return extracted_results, extracted_meta
def print_metrics_results(self, ignore_results_formatting=False):
if not self._metrics_results:
self.compute_metrics(True, ignore_results_formatting)
return
result_presenters = self.metrics_executor.get_metric_presenters()
for presenter, metric_result in zip(result_presenters, self._metrics_results):
presenter.write_result(metric_result, ignore_results_formatting)
@classmethod
def from_configs(cls, config, delayed_model_loading=False):
dataset_config = config['datasets']
launcher_config = config['launchers'][0]
if launcher_config['framework'] == 'dlsdk' and 'device' not in launcher_config:
launcher_config['device'] = 'CPU'
models_info = config['network_info']
launcher = create_launcher(launcher_config, delayed_model_loading=True)
stages = build_stages(models_info, [], launcher, config.get('_models'), delayed_model_loading)
return cls(dataset_config, launcher, stages)
@staticmethod
def get_processing_info(config):
module_specific_params = config.get('module_config')
model_name = config['name']
dataset_config = module_specific_params['datasets'][0]
launcher_config = module_specific_params['launchers'][0]
return (
model_name, launcher_config['framework'], launcher_config['device'], launcher_config.get('tags'),
dataset_config['name']
)
def set_profiling_dir(self, profiler_dir):
self.metric_executor.set_profiling_dir(profiler_dir)
def release(self):
for _, stage in self.stages.items():
stage.release()
self.launcher.release()
def reset(self):
if self.metric_executor:
self.metric_executor.reset()
if hasattr(self, '_annotations'):
del self._annotations
del self._predictions
del self._metrics_results
self._annotations = []
self._predictions = []
self._input_ids = []
self._metrics_results = []
if self.dataset:
self.dataset.reset(self.postprocessor.has_processors)
for _, stage in self.stages.items():
stage.reset()
def load_network(self, network=None):
if network is None:
for stage_name, stage in self.stages.items():
stage.load_network(network, self.launcher, stage_name + '_')
else:
for net_dict in network:
stage_name = net_dict['name']
network_ = net_dict['model']
self.stages[stage_name].load_network(network_, self.launcher, stage_name+'_')
def load_network_from_ir(self, models_list):
for models_dict in models_list:
stage_name = models_dict['name']
self.stages[stage_name].load_model(models_dict, self.launcher, stage_name+'_')
def get_network(self):
return [{'name': stage_name, 'model': stage.network} for stage_name, stage in self.stages.items()]
def get_metrics_attributes(self):
if not self.metric_executor:
return {}
return self.metric_executor.get_metrics_attributes()
def register_metric(self, metric_config):
if isinstance(metric_config, str):
self.metric_executor.register_metric({'type': metric_config})
elif isinstance(metric_config, dict):
self.metric_executor.register_metric(metric_config)
else:
raise ValueError('Unsupported metric configuration type {}'.format(type(metric_config)))
def register_postprocessor(self, postprocessing_config):
pass
def register_dumped_annotations(self):
pass
def select_dataset(self, dataset_tag):
if self.dataset is not None and isinstance(self.dataset_config, list):
return
dataset_attributes = create_dataset_attributes(self.dataset_config, dataset_tag)
self.dataset, self.metric_executor, preprocessor, self.postprocessor = dataset_attributes
for _, stage in self.stages.items():
stage.update_preprocessing(preprocessor)
@staticmethod
def _create_progress_reporter(check_progress, dataset_size):
pr_kwargs = {}
if isinstance(check_progress, int) and not isinstance(check_progress, bool):
pr_kwargs = {"print_interval": check_progress}
return ProgressReporter.provide('print', dataset_size, **pr_kwargs)
def _prepare_dataset(self, dataset_tag=''):
if self.dataset is None or (dataset_tag and self.dataset.tag != dataset_tag):
self.select_dataset(dataset_tag)
if self.dataset.batch is None:
self.dataset.batch = 1
def _create_subset(self, subset=None, num_images=None, allow_pairwise=False):
if subset is not None:
self.dataset.make_subset(ids=subset, accept_pairs=allow_pairwise)
elif num_images is not None:
self.dataset.make_subset(end=num_images, accept_pairs=allow_pairwise)
def _prepare_progress_reporter(self, check_progress, progress_reporter=None):
if progress_reporter:
progress_reporter.reset(self.dataset.size)
return progress_reporter
return None if not check_progress else self._create_progress_reporter(check_progress, self.dataset.size)
@property
def dataset_size(self):
return self.dataset.size
def calibrate_predictions(previous_stage_predictions, out, threshold, outputs_mapping, iou_type=None):
score = out[0][outputs_mapping['probability_out']][:, 1]
pass_t = np.where(score > 0.7)[0]
removed_boxes = [i for i in range(previous_stage_predictions[0].size) if i not in pass_t]
previous_stage_predictions[0].remove(removed_boxes)
previous_stage_predictions[0].scores = score[pass_t]
bboxes = np.c_[
previous_stage_predictions[0].x_mins, previous_stage_predictions[0].y_mins,
previous_stage_predictions[0].x_maxs, previous_stage_predictions[0].y_maxs,
previous_stage_predictions[0].scores
]
mv = out[0][outputs_mapping['region_out']][pass_t]
if iou_type:
previous_stage_predictions[0], peek = nms(previous_stage_predictions[0], threshold, iou_type)
bboxes = np.c_[
previous_stage_predictions[0].x_mins, previous_stage_predictions[0].y_mins,
previous_stage_predictions[0].x_maxs, previous_stage_predictions[0].y_maxs,
previous_stage_predictions[0].scores
]
mv = mv[np.sort(peek).astype(int)]
bboxes = bbreg(bboxes, mv.T)
x_mins, y_mins, x_maxs, y_maxs, _ = bboxes.T
previous_stage_predictions[0].x_mins = x_mins
previous_stage_predictions[0].y_mins = y_mins
previous_stage_predictions[0].x_maxs = x_maxs
previous_stage_predictions[0].y_maxs = y_maxs
return previous_stage_predictions
def nms(prediction, threshold, iou_type):
bboxes = np.c_[
prediction.x_mins, prediction.y_mins,
prediction.x_maxs, prediction.y_maxs,
prediction.scores
]
peek = MTCNNPAdapter.nms(bboxes, threshold, iou_type)
prediction.remove([i for i in range(prediction.size) if i not in peek])
return prediction, peek
def bbreg(boundingbox, reg):
reg = reg.T
# calibrate bounding boxes
w = boundingbox[:, 2] - boundingbox[:, 0] + 1
h = boundingbox[:, 3] - boundingbox[:, 1] + 1
bb0 = boundingbox[:, 0] + reg[:, 0] * w
bb1 = boundingbox[:, 1] + reg[:, 1] * h
bb2 = boundingbox[:, 2] + reg[:, 2] * w
bb3 = boundingbox[:, 3] + reg[:, 3] * h
boundingbox[:, 0:4] = np.array([bb0, bb1, bb2, bb3]).T
return boundingbox
def filter_valid(dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph):
mask = np.ones(len(tmph))
tmp_ys_len = (edy + 1) - dy
tmp_xs_len = (edx + 1) - dx
img_ys_len = (ey + 1) - y
img_xs_len = (ex + 1) - x
mask = np.logical_and(mask, np.logical_and(tmph > 0, tmpw > 0))
mask = np.logical_and(mask, np.logical_and(tmp_ys_len > 0, tmp_xs_len > 0))
mask = np.logical_and(mask, np.logical_and(img_xs_len > 0, img_ys_len > 0))
mask = np.logical_and(mask, np.logical_and(tmp_xs_len == img_xs_len, tmp_ys_len == img_ys_len))
return dy[mask], edy[mask], dx[mask], edx[mask], y[mask], ey[mask], x[mask], ex[mask], tmpw[mask], tmph[mask], mask
def pad(boxesA, h, w):
boxes = boxesA.copy()
tmph = boxes[:, 3] - boxes[:, 1] + 1
tmpw = boxes[:, 2] - boxes[:, 0] + 1
numbox = boxes.shape[0]
dx = np.ones(numbox)
dy = np.ones(numbox)
edx = tmpw
edy = tmph
x = boxes[:, 0:1][:, 0]
y = boxes[:, 1:2][:, 0]
ex = boxes[:, 2:3][:, 0]
ey = boxes[:, 3:4][:, 0]
tmp = np.where(ex > w)[0]
if tmp.shape[0] != 0:
edx[tmp] = -ex[tmp] + w - 1 + tmpw[tmp]
ex[tmp] = w - 1
tmp = np.where(ey > h)[0]
if tmp.shape[0] != 0:
edy[tmp] = -ey[tmp] + h - 1 + tmph[tmp]
ey[tmp] = h - 1
tmp = np.where(x < 1)[0]
if tmp.shape[0] != 0:
dx[tmp] = 2 - x[tmp]
x[tmp] = np.ones_like(x[tmp])
tmp = np.where(y < 1)[0]
if tmp.shape[0] != 0:
dy[tmp] = 2 - y[tmp]
y[tmp] = np.ones_like(y[tmp])
# for python index from 0, while matlab from 1
dy = np.maximum(0, dy - 1)
dx = np.maximum(0, dx - 1)
y = np.maximum(0, y - 1)
x = np.maximum(0, x - 1)
edy = np.maximum(0, edy - 1)
edx = np.maximum(0, edx - 1)
ey = np.maximum(0, ey - 1)
ex = | np.maximum(0, ex - 1) | numpy.maximum |
# -*- coding: utf-8 -*-
####################################################
# 作者: 刘朝阳
# 时间: 2020.05.01
# 更新时间: 2021.11.25
# 功能: 在计算PERCLOS时, 需要知道驾驶在正常情况下的眼睛开度, 来作为基准计算
# 使用说明: 自动调用, 无需操作
####################################################
import os
import numpy as np
import cv2
import dlib
from imutils import face_utils
from head_posture_estimation import head_posture_estimation
from aspect_ratio_estimation import aspect_ratio_estimation
HPE = head_posture_estimation()
ARE = aspect_ratio_estimation()
# 使用dlib.get_frontal_face_detector() 获得脸部位置检测器
detector = dlib.get_frontal_face_detector()
# 使用dlib.shape_predictor获得脸部特征位置检测器
predictor = dlib.shape_predictor('shape_predictor_68_face_landMARks.dat')
# 分别获取左右眼面部标志的索引
(lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
(rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]
(mStart, mEnd) = face_utils.FACIAL_LANDMARKS_IDXS["mouth"]
EAR = everybody_EAR_mean =[]
EAR_all_per_person = []
EAR_all_per_person_open = []
pitch_all_per_person = []
pitch_mean_per_person = []
everybody_pitch_mean = []
everybody_EAR_min = []
def get_everybody_EARandMAR_standard(face_path):
# 遍历每个人的所有图片,提取出眼睛的平均高度
for subdir in os.listdir(face_path): # os.listdir()输出该目录下的所有文件名字 到了lzy文件夹的面前(未进去)
EAR_all_per_person_open = EAR_all_per_person = []
subpath = os.path.join(face_path, subdir) # 连接路径,定位到子文件夹路径 到了lzy文件夹的面前(未进去)
if os.path.isdir(subpath): # 如果子文件夹路径存在
for filename in os.listdir(subpath): # os.listdir(subpath)输出该目录下的所有文件名字 lzy进入,到了1、2、3.png了,然后对每一张进行处理
EAR_mean_per_person = EAR_min_per_person = []
imgpath = os.path.join(subpath, filename) # 连接路径,定位到子文件夹路径
img = cv2.imread(imgpath, cv2.IMREAD_COLOR) # 读1.png
grayimg = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
faces = detector(grayimg, 0)
for k, d in enumerate(faces): # 找出每张图片上的人脸 #一个图片上人脸数就1,所以看作没有这句就行
shape = predictor(grayimg, d)
shape_array = face_utils.shape_to_np(shape)
leftEye = shape_array[lStart:lEnd]
rightEye = shape_array[rStart:rEnd]
reprojectdst, euler_angle, pitch, roll, yaw = HPE.get_head_pose(shape_array) # 重新投影,欧拉角
pitch_all_per_person.append(pitch)
leftEAR = ARE.eye_aspect_ratio(leftEye)
rightEAR = ARE.eye_aspect_ratio(rightEye)
EAR = (leftEAR + rightEAR) / 2.0
EAR_all_per_person.append(EAR)
# for完全进行完毕后,把文件下的所有眼睛高度存入了
if EAR > 0.13 and EAR < 0.23: # 防止闭眼时为0而拉低整体睁眼值 阈值由经验尝试得出
EAR_all_per_person_open.append(EAR) # 把每张图片的高度值放在一起,形成该人所有图片的高度值集合
pitch_mean_per_person = | np.mean(pitch_all_per_person) | numpy.mean |
#!/usr/bin/env python
# coding: utf-8
# # Non cartesian sampling: SPARKLING imaging
#
# We explore the performance of SPARKLING (_Spreading projection Algorithm for Rapid K-space sampLING_) as non-Cartesian imaging technique. We do not actually provide the code of this algorithm but instead upload result files containing trajectories generated from the previous *radial in-out* initialization. For details, see the recently published paper: Lazarus et al, "SPARKLING: variable‐density k‐space filling curves for accelerated $T_2^*$ ‐weighted MRI", Magn Reson Med 2019; 81:3643:3661.
#
# - Authors: <NAME> (<EMAIL>)
# - Date: 04/02/2019
# - Target: [ISBI'19 tutorial](https://biomedicalimaging.org/2019/tutorials/) on **Recent advances in acquisition and reconstruction for Compressed Sensing MRI**
# - **Revision**: 01/06/2021 for ATSI MSc hands-on session at Paris-Saclay University.
# In[6]:
#DISPLAY T2* MR IMAGE
get_ipython().run_line_magic('matplotlib', 'inline')
import numpy as np
import os.path as op
import os
import math ; import cmath
import matplotlib
import matplotlib.pyplot as plt
import sys
from mri.operators import NonCartesianFFT
from mri.operators.utils import convert_locations_to_mask, gridded_inverse_fourier_transform_nd
from pysap.data import get_sample_data
from skimage import data, img_as_float, io, filters
from modopt.math.metrics import ssim
mri_img = get_sample_data('2d-mri')
img_size = mri_img.shape[0]
plt.figure()
plt.title("T2* axial slice, size = {}".format(img_size))
if mri_img.ndim == 2:
plt.imshow(mri_img, cmap=plt.cm.gray)
else:
plt.imshow(mri_img)
plt.show()
# In[7]:
from scipy.io import loadmat
cwd = os.getcwd()
dirimg_2d = op.join(cwd,"..","data")
k_spark = loadmat(op.join(cwd, "..", "data","samples_SPARKLING_N512_nc34x3073_OS1.mat"))
k_spark_vec = k_spark['samples']
Kmax = np.amax(k_spark_vec)
#print(Kmax)
k_spark_vec = k_spark_vec*1/(2*np.pi*Kmax)
#save in npz format in the outdir directory
outdir = op.join(cwd,"..","output")
filename_traj = "sparkling_radial_N" + str(img_size) + ".npz"
outfile = op.join(outdir, filename_traj)
np.savez(outfile, k_spark_vec)
k_spark = plt.figure(figsize=(7,7))
plt.scatter(k_spark_vec[:,0],k_spark_vec[:,1], marker = '.', s=0.1)
plt.grid()
#Figure layout
unit = 1/4
tick = np.arange(-0.5, 0.5 + unit, unit)
label = [r"$-\frac{1}{2}$", r"$-\frac{1}{4}$", r"$0$", r"$+\frac{1}{4}$", r"$+\frac{1}{2}$"]
plt.xticks(tick/np.pi,labels = label, fontsize = 16) ; plt.yticks(tick/np.pi,labels = label, fontsize = 16)
plt.xlabel(r"$k_x$", fontsize = 22) ; plt.ylabel(r"$k_y$", fontsize = 22)
plt.title("K-space sampling, spiral in-out initialization",fontsize = 18)
plt.show()
# In[9]:
data=convert_locations_to_mask(k_spark_vec, mri_img.shape)
fourier_op = NonCartesianFFT(samples=k_spark_vec, shape=mri_img.shape,
implementation='cpu')
kspace_obs = fourier_op.op(mri_img.data)
# In[10]:
grid_space = | np.linspace(-0.5, 0.5, num=mri_img.shape[0]) | numpy.linspace |
import numpy as np
from scipy.interpolate import RectBivariateSpline
from scipy.ndimage import shift
#my imports
#import cv2
def LucasKanade(It, It1, rect, p0 = np.zeros(2)):
# Input:
# It: template image
# It1: Current image
# rect: Current position of the car
# (top left, bot right coordinates)
# p0: Initial movement vector [dp_x0, dp_y0]
# Output:
# p: movement vector [dp_x, dp_y]
# Put your implementation here
threshold = 0.01
p = p0
#YY, XX = np.meshgrid(np.arange(rect[0], rect[2]), np.arange(rect[1],rect[3]))
warp_jacobian = | np.array([[1, 0],[0, 1]]) | numpy.array |
from __future__ import absolute_import
from builtins import range
from . import datalayer
import numpy as np
# from numpy.polynomial.polynomial import polyval
## TODO: correctly handle large gaps (wait what?)
## TODO: correctly handle multiple vertical values
# Function consisting of a single Bezier curve
class CurveFunction(datalayer.Function):
# the global variables:
# self.pixels [(point0), (point1), (point2), (point3)] - the control points, in pixel space
# self.p0, self.p1, self.p2, self.p3 - the control points, in math space
# the polynomials for x and y, their derivatives, and their second derivatives:
# self.x, self.y
# self.dxdt, self.dydt
# self.ddx, self.ddy
def __init__(self, xaxis, yaxis, path_info, tolerance = dict()):
datalayer.Function.__init__(self, xaxis, yaxis, path_info, tolerance)
self.set_default_tolerance('imag_threshold', 1e-5) # threshold for determining real / complex number
self.set_default_tolerance('t_threshold', 0.002) # threshold for t values
# self.set_default_tolerance('straight_line', 100) # threshold for straight lines
def create(self):
self.x = np.array([-1, 3, -3, 1]) * self.p0[0] + np.array([3, -6, 3, 0]) * self.p1[0] + np.array([-3, 3, 0, 0]) * self.p2[0] + np.array([1, 0, 0, 0]) * self.p3[0]
self.y = np.array([-1, 3, -3, 1]) * self.p0[1] + np.array([3, -6, 3, 0]) * self.p1[1] + np.array([-3, 3, 0, 0]) * self.p2[1] + np.array([1, 0, 0, 0]) * self.p3[1]
self.dxdt = np.array([1, -2, 1]) * 3 * (self.p1[0] - self.p0[0]) + np.array([-1, 1, 0]) * 6 * (self.p2[0]-self.p1[0]) + np.array([1, 0, 0]) * 3 * (self.p3[0] - self.p2[0])
self.dydt = np.array([1, -2, 1]) * 3 * (self.p1[1] - self.p0[1]) + np.array([-1, 1, 0]) * 6 * (self.p2[1]-self.p1[1]) + np.array([1, 0, 0]) * 3 * (self.p3[1] - self.p2[1])
self.ddx = np.array([-1, 1]) * 6 * (self.p2[0] - 2*self.p1[0] + self.p0[0]) + np.array([1, 0]) * 6 * (self.p3[0] - 2*self.p2[0] + self.p1[0])
self.ddy = np.array([-1, 1]) * 6 * (self.p2[1] - 2*self.p1[1] + self.p0[1]) + np.array([1, 0]) * 6 * (self.p3[1] - 2*self.p2[1] + self.p1[1])
def create_from_path_info(self, path_info):
self.pixels = []
for i in range(4):
self.pixels.append(path_info[i])
self.p0 = (self.px_to_xval(path_info[0][0]), self.px_to_yval(path_info[0][1]))
self.p1 = (self.px_to_xval(path_info[1][0]), self.px_to_yval(path_info[1][1]))
self.p2 = (self.px_to_xval(path_info[2][0]), self.px_to_yval(path_info[2][1]))
self.p3 = (self.px_to_xval(path_info[3][0]), self.px_to_yval(path_info[3][1]))
self.domain = [min(self.p0[0], self.p3[0]), max(self.p0[0], self.p3[0])]
self.create()
# checks the t val: get_t_for_xval will return -1 if there is no t val
def is_defined_at(self, xval):
return self.get_t_for_xval(xval) > -self.tolerance['t_threshold']
def get_t_roots_within_zero_and_one(self, p):
r = np.roots(p)
t_unfiltered = r.real[abs(r.imag) < self.tolerance['imag_threshold']] # filters for reals, but not for t values
t_filtered = [t for t in t_unfiltered if (t>=(0-self.tolerance['t_threshold']) and t<=(1+self.tolerance['t_threshold']))] # filters t values
return t_filtered
def get_t_extrema_between(self, xmin, xmax, p):
# gets the t vals of the roots of p between xmin and xmax
# used for finding extrema between xmin and xmax
# assumes xmin and xmax are within the domain
tleft = self.get_t_for_xval(xmin)
tright = self.get_t_for_xval(xmax)
tmin = min(tleft, tright)
tmax = max(tright, tleft)
r = np.roots(p)
t_unfiltered = r.real[abs(r.imag) < self.tolerance['imag_threshold']]
t_filtered = [t for t in t_unfiltered if (t>=tmin and t<=tmax)]
t_filtered.append(tmin)
t_filtered.append(tmax)
return t_filtered
def get_t_for_xval(self, xval):
# currently returns the first t val (i.e., not all of them if there are multiple), -1 if there is no t val
if xval == float('inf'):
return -1
if xval == float('-inf'):
return -1
p = [self.x[0], self.x[1], self.x[2], self.x[3] - xval]
t = self.get_t_roots_within_zero_and_one(p)
if len(t) >= 1:
return t[0]
else:
return -1
def get_value_at(self, xval):
# returns False if the function is not defined at this xval
t = self.get_t_for_xval(xval)
if t > -self.tolerance['t_threshold']:
yval = | np.polyval(self.y, t) | numpy.polyval |
#####################################################################################################################
# #
# This file is part of the 5th project of Udacity's Self-Driving Car Engineer Nd - Vehicle Detection and Tracking #
# #
# Copyright (c) 2018 by <NAME> #
# #
#####################################################################################################################
import Object_Classifier
import Hog_Me
import cv2
import numpy as np
import matplotlib.pyplot as plt
class Object_Finder:
"""
Finds all instances of an object previously trained by an Object_Classifier in an image.
Properties:
Classifier: The Object_Classifier to be used to find occurrences of an object.
box_size: The size of the object / search window size
eff_box_size: The size of the scaled box size. Applied by set_scaling.
boundings: The boundings of the recently detected objects
single_hog: Defines if HOG shall just be created once for the whole image. Otherwise it will be created for each
window (far slower).
hogger: The image to hog converter to used.
resized_image: A backup of the resized image
scan_regions: Defines the regions to be scanned and by which factor the search window shall be magnified in this
region. (The closer to the observer the higher the factor should be)
original: The currently selected original image
"""
def __init__(self, classifier, hogger):
"""
Constructor
:param classifier: The classifier of class Object_Classifier to be used to detect the object
:param hogger: The image to hog convert of class Hoggit
"""
self.classifier = classifier
self.box_size = classifier.box_size
self.boundings = []
self.single_hog = True
self.hogger= hogger
self.resized_image = None
# Setup magnifications for single scan regions
self.scan_regions = [(1.0, (0, 380, 1280, 500)),
(1.25, (0, 380, 1280, 550)),
(1.5, (0, 380, 1280, 550)),
(1.75, (0, 380, 1280, 550)),
(2.0, (0, 380, 1280, 650))]
def select(self, image):
"""
Selects a new image and clears the old detections
:param image: The new image to be used
"""
self.original = image
self.boundings = []
def set_scaling(self, scaling):
"""
Sets the scaling factor to be used for new detections
:param scaling: The factor by which the search window shall be enlarged. (>1.0 for objects close to the eye)
"""
self.scaling = scaling
self.eff_box_size = int(self.box_size*self.scaling+0.5)
def get_resized_sub_sample(self, off_x, off_y):
"""
Returns the sub region of the currenty selected original image
:param off_x: The x offset
:param off_y: The y offset
:return: An image resized from the current effective box size to the original box size
"""
sub_sample = self.original[off_y:off_y + self.eff_box_size, off_x:off_x + self.eff_box_size, :]
new_size = (self.box_size, self.box_size)
return cv2.resize(sub_sample, new_size)
def find_instances_in_features(self, features, region):
"""
Finds the instances of an object in the currently selected image.
:param features: The feature list of the sub region of the main image
:param region: Defines the region of the main image which is represents by features
"""
for current_window in self.hogger.hog_scan(self.box_size, self.box_size):
if self.classifier.classify_features(current_window["features"])==1.0:
off_x = current_window["x"]
off_y = current_window["y"]
trans_off_x = int(off_x * self.scaling) + region[0]
trans_off_y = int(off_y * self.scaling) + region[1]
cv2.rectangle(self.resized_image, (off_x, off_y), (off_x + self.box_size, off_y + self.box_size),
color=(255, 255, 255), thickness=2)
cv2.rectangle(self.image, (trans_off_x, trans_off_y), (trans_off_x + self.eff_box_size, trans_off_y + self.eff_box_size),
color=(255, 255, 255), thickness=2)
self.boundings.append(((trans_off_x, trans_off_y), (trans_off_x + self.eff_box_size, trans_off_y + self.eff_box_size)))
def find_instances(self, image, region, overlap):
"""
Finds all instances of the object the classifier has been trained on in given image."
The results are appened to property boundings.
:param image: The image to search in
:param region: The sub region to search within
:param overlap: The overlap in percent. Only required if single_hog is det to False.
:return: The original with highlighted detection regions and optionally the resized sub image
"""
self.image = np.copy(image)
self.eff_step_size = int((1.0-overlap)*self.eff_box_size)
y_steps = (region[3]-region[1])//self.eff_step_size
x_steps = (region[2]-region[0])//self.eff_step_size
if region[0]+(x_steps-1)*self.eff_step_size+self.eff_box_size>region[2]:
x_steps -= 1
if region[1]+(y_steps-1)*self.eff_step_size+self.eff_box_size>region[3]:
y_steps -= 1
if self.single_hog:
self.resized_image = image[region[1]:region[3],region[0]:region[2],:]
self.resized_image = cv2.resize(self.resized_image, (int(self.resized_image.shape[1]/self.scaling), int(self.resized_image.shape[0]/self.scaling)))
features, img = self.hogger.hog_image(self.resized_image, visualize=False, feature_vector=False)
features = | np.array(features) | numpy.array |
from logging import warn
import os
from collections import namedtuple
import pandas as pd
import pathlib
from enum import IntEnum
import warnings
import numpy as np
from datetime import datetime, timedelta
from scipy.spatial import cKDTree
import tempfile
from tqdm import trange
import typing
from DHI.Generic.MikeZero import eumUnit, eumQuantity
from DHI.Generic.MikeZero.DFS import DfsFileFactory, DfsFactory
from DHI.Generic.MikeZero.DFS.dfsu import DfsuFile, DfsuFileType, DfsuBuilder, DfsuUtil
from DHI.Generic.MikeZero.DFS.mesh import MeshFile, MeshBuilder
from .dfsutil import _get_item_info, _valid_item_numbers, _valid_timesteps
from .dataset import Dataset
from .dotnet import (
to_numpy,
to_dotnet_float_array,
to_dotnet_datetime,
from_dotnet_datetime,
asNumpyArray,
to_dotnet_array,
asnetarray_v2,
)
from .dfs0 import Dfs0
from .dfs2 import Dfs2
from .eum import ItemInfo, EUMType, EUMUnit
from .helpers import safe_length
from .spatial import Grid2D
from .interpolation import get_idw_interpolant, interp2d
from .custom_exceptions import InvalidGeometry
class UnstructuredType(IntEnum):
"""
-1: Mesh: 2D unstructured MIKE mesh
0: Dfsu2D: 2D area series
1: DfsuVerticalColumn: 1D vertical column
2: DfsuVerticalProfileSigma: 2D vertical slice through a Dfsu3DSigma
3: DfsuVerticalProfileSigmaZ: 2D vertical slice through a Dfsu3DSigmaZ
4: Dfsu3DSigma: 3D file with sigma coordinates, i.e., a constant number of layers.
5: Dfsu3DSigmaZ: 3D file with sigma and Z coordinates, i.e. a varying number of layers.
"""
Mesh = -1
Dfsu2D = 0
DfsuVerticalColumn = 1
DfsuVerticalProfileSigma = 2
DfsuVerticalProfileSigmaZ = 3
Dfsu3DSigma = 4
Dfsu3DSigmaZ = 5
class _UnstructuredGeometry:
# THIS CLASS KNOWS NOTHING ABOUT MIKE FILES!
_type = None # -1: mesh, 0: 2d-dfsu, 4:dfsu3dsigma, ...
_projstr = None
_n_nodes = None
_n_elements = None
_nc = None
_ec = None
_codes = None
_valid_codes = None
_element_ids = None
_node_ids = None
_element_table = None
_element_table_dotnet = None
_top_elems = None
_n_layers_column = None
_bot_elems = None
_n_layers = None
_n_sigma = None
_geom2d = None
_e2_e3_table = None
_2d_ids = None
_layer_ids = None
_shapely_domain_obj = None
_tree2d = None
def __repr__(self):
out = []
out.append("Unstructured Geometry")
if self.n_nodes:
out.append(f"Number of nodes: {self.n_nodes}")
if self.n_elements:
out.append(f"Number of elements: {self.n_elements}")
if self._n_layers:
out.append(f"Number of layers: {self._n_layers}")
if self._projstr:
out.append(f"Projection: {self.projection_string}")
return str.join("\n", out)
@property
def type_name(self):
return self._type.name
@property
def n_nodes(self):
"""Number of nodes"""
return self._n_nodes
@property
def node_coordinates(self):
"""Coordinates (x,y,z) of all nodes"""
return self._nc
@property
def node_ids(self):
return self._node_ids
@property
def n_elements(self):
"""Number of elements"""
return self._n_elements
@property
def element_ids(self):
return self._element_ids
@property
def codes(self):
"""Node codes of all nodes"""
return self._codes
@property
def valid_codes(self):
"""Unique list of node codes"""
if self._valid_codes is None:
self._valid_codes = list(set(self.codes))
return self._valid_codes
@property
def boundary_codes(self):
"""provides a unique list of boundary codes"""
return [code for code in self.valid_codes if code > 0]
@property
def projection_string(self):
return self._projstr
@property
def is_geo(self):
"""Are coordinates geographical (LONG/LAT)?"""
return self._projstr == "LONG/LAT"
@property
def is_local_coordinates(self):
return self._projstr == "NON-UTM"
@property
def element_table(self):
"""Element to node connectivity"""
if (self._element_table is None) and (self._element_table_dotnet is not None):
self._element_table = self._get_element_table_from_dotnet()
return self._element_table
@property
def max_nodes_per_element(self):
"""The maximum number of nodes for an element"""
maxnodes = 0
for local_nodes in self.element_table:
n = len(local_nodes)
if n > maxnodes:
maxnodes = n
return maxnodes
@property
def is_2d(self):
"""Type is either mesh or Dfsu2D (2 horizontal dimensions)"""
return self._type <= 0
@property
def is_tri_only(self):
"""Does the mesh consist of triangles only?"""
return self.max_nodes_per_element == 3 or self.max_nodes_per_element == 6
_boundary_polylines = None
@property
def boundary_polylines(self):
"""Lists of closed polylines defining domain outline"""
if self._boundary_polylines is None:
self._boundary_polylines = self._get_boundary_polylines()
return self._boundary_polylines
def get_node_coords(self, code=None):
"""Get the coordinates of each node.
Parameters
----------
code: int
Get only nodes with specific code, e.g. land == 1
Returns
-------
np.array
x,y,z of each node
"""
nc = self.node_coordinates
if code is not None:
if code not in self.valid_codes:
print(
f"Selected code: {code} is not valid. Valid codes: {self.valid_codes}"
)
raise Exception
return nc[self.codes == code]
return nc
def _get_element_table_from_dotnet(self):
# Note: this can tak 10-20 seconds for large dfsu3d!
elem_tbl = []
for j in range(self.n_elements):
elem_nodes = list(self._element_table_dotnet[j])
elem_nodes = [nd - 1 for nd in elem_nodes] # make 0-based
elem_tbl.append(elem_nodes)
return elem_tbl
def _element_table_to_dotnet(self, elem_table=None):
if elem_table is None:
elem_table = self._element_table
new_elem_table = []
n_elements = len(elem_table)
for j in range(n_elements):
elem_nodes = elem_table[j]
elem_nodes = [nd + 1 for nd in elem_nodes] # make 1-based
new_elem_table.append(elem_nodes)
return asnetarray_v2(new_elem_table)
def _set_nodes(
self, node_coordinates, codes=None, node_ids=None, projection_string=None
):
self._nc = np.asarray(node_coordinates)
if codes is None:
codes = np.zeros(len(node_coordinates), dtype=int)
self._codes = np.asarray(codes)
self._n_nodes = len(codes)
if node_ids is None:
node_ids = list(range(self._n_nodes))
self._node_ids = np.asarray(node_ids)
if projection_string is None:
projection_string = "LONG/LAT"
self._projstr = projection_string
def _set_elements(self, element_table, element_ids=None, geometry_type=None):
self._element_table = element_table
self._n_elements = len(element_table)
if element_ids is None:
element_ids = list(range(self.n_elements))
self._element_ids = np.asarray(element_ids)
if geometry_type is None:
# guess type
if self.max_nodes_per_element < 5:
geometry_type = UnstructuredType.Dfsu2D
else:
geometry_type = UnstructuredType.Dfsu3DSigma
self._type = geometry_type
def _reindex(self):
new_node_ids = range(self.n_nodes)
new_element_ids = range(self.n_elements)
node_dict = dict(zip(self.node_ids, new_node_ids))
for j in range(self.n_elements):
elem_nodes = self._element_table[j]
new_elem_nodes = []
for idx in elem_nodes:
new_elem_nodes.append(node_dict[idx])
self._element_table[j] = new_elem_nodes
self._node_ids = np.array(list(new_node_ids))
self._element_ids = np.array(list(new_element_ids))
def _get_element_table_for_elements(self, elements):
return [self.element_table[j] for j in elements]
def elements_to_geometry(self, elements, node_layers="all"):
"""export elements to new geometry
Parameters
----------
elements : list(int)
list of element ids
node_layers : str, optional
for 3d files either 'top', 'bottom' layer nodes
or 'all' can be selected, by default 'all'
Returns
-------
UnstructuredGeometry
which can be used for further extraction or saved to file
"""
elements = np.sort(elements) # make sure elements are sorted!
# extract information for selected elements
node_ids, elem_tbl = self._get_nodes_and_table_for_elements(
elements, node_layers=node_layers
)
node_coords = self.node_coordinates[node_ids]
codes = self.codes[node_ids]
# create new geometry
geom = _UnstructuredGeometry()
geom._set_nodes(
node_coords,
codes=codes,
node_ids=node_ids,
projection_string=self.projection_string,
)
geom._set_elements(elem_tbl, self.element_ids[elements])
geom._reindex()
geom._type = self._type #
if not self.is_2d:
# original file was 3d
layers_used = self.layer_ids[elements]
unique_layer_ids = np.unique(layers_used)
n_layers = len(unique_layer_ids)
if (
self._type == UnstructuredType.Dfsu3DSigma
or self._type == UnstructuredType.Dfsu3DSigmaZ
) and n_layers == 1:
# If source is 3d, but output only has 1 layer
# then change type to 2d
geom._type = UnstructuredType.Dfsu2D
geom._n_layers = None
if node_layers == "all":
warnings.warn(
"Warning: Only 1 layer in new geometry (hence 2d), but you have kept both top and bottom nodes! Hint: use node_layers='top' or 'bottom'"
)
else:
geom._type = self._type
geom._n_layers = n_layers
lowest_sigma = self.n_layers - self.n_sigma_layers + 1
geom._n_sigma = sum(unique_layer_ids >= lowest_sigma)
# If source is sigma-z but output only has sigma layers
# then change type accordingly
if (
self._type == UnstructuredType.DfsuVerticalProfileSigmaZ
or self._type == UnstructuredType.Dfsu3DSigmaZ
) and n_layers == geom._n_sigma:
geom._type = UnstructuredType(self._type.value - 1)
geom._top_elems = geom._get_top_elements_from_coordinates()
return geom
def _get_top_elements_from_coordinates(self, ec=None):
"""Get list of top element ids based on element coordinates"""
if ec is None:
ec = self.element_coordinates
d_eps = 1e-4
top_elems = []
x_old = ec[0, 0]
y_old = ec[0, 1]
for j in range(1, len(ec)):
d2 = (ec[j, 0] - x_old) ** 2 + (ec[j, 1] - y_old) ** 2
# print(d2)
if d2 > d_eps:
# this is a new x,y point
# then the previous element must be a top element
top_elems.append(j - 1)
x_old = ec[j, 0]
y_old = ec[j, 1]
return np.array(top_elems)
def to_2d_geometry(self):
"""extract 2d geometry from 3d geometry
Returns
-------
UnstructuredGeometry
2d geometry (bottom nodes)
"""
if self.is_2d:
return self
# extract information for selected elements
elem_ids = self.bottom_elements
if self._type == UnstructuredType.Dfsu3DSigmaZ:
# for z-layers nodes will not match on neighboring elements!
elem_ids = self.top_elements
node_ids, elem_tbl = self._get_nodes_and_table_for_elements(
elem_ids, node_layers="bottom"
)
node_coords = self.node_coordinates[node_ids]
codes = self.codes[node_ids]
# create new geometry
geom = _UnstructuredGeometry()
geom._set_nodes(
node_coords,
codes=codes,
node_ids=node_ids,
projection_string=self.projection_string,
)
geom._set_elements(elem_tbl, self.element_ids[elem_ids])
geom._type = UnstructuredType.Mesh
geom._reindex()
# Fix z-coordinate for sigma-z:
if self._type == UnstructuredType.Dfsu3DSigmaZ:
zn = geom.node_coordinates[:, 2].copy()
for j, elem_nodes in enumerate(geom.element_table):
elem_nodes3d = self.element_table[self.bottom_elements[j]]
for jn in range(len(elem_nodes)):
znj_3d = self.node_coordinates[elem_nodes3d[jn], 2]
zn[elem_nodes[jn]] = min(zn[elem_nodes[jn]], znj_3d)
geom.node_coordinates[:, 2] = zn
return geom
def _get_nodes_and_table_for_elements(self, elements, node_layers="all"):
"""list of nodes and element table for a list of elements
Parameters
----------
elements : np.array(int)
array of element ids
node_layers : str, optional
for 3D files 'all', 'bottom' or 'top' nodes
of each element, by default 'all'
Returns
-------
np.array(int)
array of node ids (unique)
list(list(int))
element table with a list of nodes for each element
"""
nodes = []
elem_tbl = []
if (node_layers is None) or (node_layers == "all") or self.is_2d:
for j in elements:
elem_nodes = self.element_table[j]
elem_tbl.append(elem_nodes)
for node in elem_nodes:
nodes.append(node)
else:
# 3D file
if (node_layers != "bottom") and (node_layers != "top"):
raise Exception("node_layers must be either all, bottom or top")
for j in elements:
elem_nodes = self.element_table[j]
nn = len(elem_nodes)
halfn = int(nn / 2)
if node_layers == "bottom":
elem_nodes = elem_nodes[:halfn]
if node_layers == "top":
elem_nodes = elem_nodes[halfn:]
elem_tbl.append(elem_nodes)
for node in elem_nodes:
nodes.append(node)
return np.unique(nodes), elem_tbl
@property
def element_coordinates(self):
"""Center coordinates of each element"""
if self._ec is None:
self._ec = self._get_element_coords()
return self._ec
def _get_element_coords(self):
"""Calculates the coordinates of the center of each element.
Returns
-------
np.array
x,y,z of each element
"""
n_elements = self.n_elements
ec = np.empty([n_elements, 3])
# pre-allocate for speed
maxnodes = self.max_nodes_per_element
idx = np.zeros(maxnodes, dtype=np.int)
xcoords = np.zeros([maxnodes, n_elements])
ycoords = np.zeros([maxnodes, n_elements])
zcoords = np.zeros([maxnodes, n_elements])
nnodes_per_elem = np.zeros(n_elements)
for j in range(n_elements):
nodes = self._element_table[j]
nnodes = len(nodes)
nnodes_per_elem[j] = nnodes
for i in range(nnodes):
idx[i] = nodes[i] # - 1
xcoords[:nnodes, j] = self._nc[idx[:nnodes], 0]
ycoords[:nnodes, j] = self._nc[idx[:nnodes], 1]
zcoords[:nnodes, j] = self._nc[idx[:nnodes], 2]
ec[:, 0] = np.sum(xcoords, axis=0) / nnodes_per_elem
ec[:, 1] = np.sum(ycoords, axis=0) / nnodes_per_elem
ec[:, 2] = np.sum(zcoords, axis=0) / nnodes_per_elem
self._ec = ec
return ec
def contains(self, points):
"""test if a list of points are contained by mesh
Parameters
----------
points : array-like n-by-2
x,y-coordinates of n points to be tested
Returns
-------
bool array
True for points inside, False otherwise
"""
import matplotlib.path as mp
points = np.atleast_2d(points)
exterior = self.boundary_polylines.exteriors[0]
cnts = mp.Path(exterior.xy).contains_points(points)
if self.boundary_polylines.n_exteriors > 1:
# in case of several dis-joint outer domains
for exterior in self.boundary_polylines.exteriors[1:]:
in_domain = mp.Path(exterior.xy).contains_points(points)
cnts = np.logical_or(cnts, in_domain)
# subtract any holes
for interior in self.boundary_polylines.interiors:
in_hole = mp.Path(interior.xy).contains_points(points)
cnts = np.logical_and(cnts, ~in_hole)
return cnts
def get_overset_grid(self, dx=None, dy=None, shape=None, buffer=None):
"""get a 2d grid that covers the domain by specifying spacing or shape
Parameters
----------
dx : float or (float, float), optional
grid resolution in x-direction (or in x- and y-direction)
dy : float, optional
grid resolution in y-direction
shape : (int, int), optional
tuple with nx and ny describing number of points in each direction
one of them can be None, in which case the value will be inferred
buffer : float, optional
positive to make the area larger, default=0
can be set to a small negative value to avoid NaN
values all around the domain.
Returns
-------
<mikeio.Grid2D>
2d grid
"""
nc = self.geometry2d.node_coordinates
bbox = Grid2D.xy_to_bbox(nc, buffer=buffer)
return Grid2D(bbox=bbox, dx=dx, dy=dy, shape=shape)
def get_2d_interpolant(
self, xy, n_nearest: int = 1, extrapolate=False, p=2, radius=None
):
"""IDW interpolant for list of coordinates
Parameters
----------
xy : array-like
x,y coordinates of new points
n_nearest : int, optional
[description], by default 1
extrapolate : bool, optional
allow , by default False
p : float, optional
power of inverse distance weighting, default=2
radius: float, optional
an alternative to extrapolate=False,
only include elements within radius
Returns
-------
(np.array, np.array)
element ids and weights
"""
xy = np.atleast_2d(xy)
ids, dists = self._find_n_nearest_2d_elements(xy, n=n_nearest)
weights = None
if n_nearest == 1:
weights = np.ones(dists.shape)
if not extrapolate:
weights[~self.contains(xy)] = np.nan
elif n_nearest > 1:
weights = get_idw_interpolant(dists, p=p)
if not extrapolate:
weights[~self.contains(xy), :] = np.nan
else:
ValueError("n_nearest must be at least 1")
if radius is not None:
idx = np.where(dists > radius)[0]
weights[idx] = np.nan
return ids, weights
def interp2d(self, data, elem_ids, weights=None, shape=None):
"""interp spatially in data (2d only)
Parameters
----------
data : ndarray or list(ndarray)
dfsu data
elem_ids : ndarray(int)
n sized array of 1 or more element ids used for interpolation
weights : ndarray(float), optional
weights with same size as elem_ids used for interpolation
shape: tuple, optional
reshape output
Returns
-------
ndarray or list(ndarray)
spatially interped data
Examples
--------
>>> ds = dfsu.read()
>>> g = dfs.get_overset_grid(shape=(50,40))
>>> elem_ids, weights = dfs.get_2d_interpolant(g.xy)
>>> dsi = dfs.interp2d(ds, elem_ids, weights)
"""
return interp2d(data, elem_ids, weights, shape)
def _create_tree2d(self):
xy = self.geometry2d.element_coordinates[:, :2]
self._tree2d = cKDTree(xy)
def _find_n_nearest_2d_elements(self, x, y=None, n=1):
if self._tree2d is None:
self._create_tree2d()
if y is None:
p = x
if (not np.isscalar(x)) and (np.ndim(x) == 2):
p = x[:, 0:2]
else:
p = np.array((x, y)).T
d, elem_id = self._tree2d.query(p, k=n)
return elem_id, d
def _find_3d_from_2d_points(self, elem2d, z=None, layer=None):
was_scalar = np.isscalar(elem2d)
if was_scalar:
elem2d = np.array([elem2d])
else:
orig_shape = elem2d.shape
elem2d = np.reshape(elem2d, (elem2d.size,))
if (layer is None) and (z is None):
# return top element
idx = self.top_elements[elem2d]
elif layer is None:
idx = np.zeros_like(elem2d)
if np.isscalar(z):
z = z * np.ones_like(elem2d, dtype=float)
elem3d = self.e2_e3_table[elem2d]
for j, row in enumerate(elem3d):
zc = self.element_coordinates[row, 2]
d3d = np.abs(z[j] - zc)
idx[j] = row[d3d.argsort()[0]]
elif z is None:
if 1 <= layer <= self.n_z_layers:
idx = | np.zeros_like(elem2d) | numpy.zeros_like |
import unagi.utils as utils
import numpy as np
from .loss import Loss
class Sigmoid_Cross_Entropy( Loss):
def activation(self, Z):
return utils.sigmoid( Z)
def cost(self, logits, labels, params, lambd_factor):
m = labels.shape[1]
logprobs = np.multiply(- | np.log(logits) | numpy.log |
import os
import numpy as np
import matplotlib.pyplot as plt
from robotics.estimation import KalmanFilter
class Vehicle2DEstimation:
def __init__(self) -> None:
delta_t = 1
newton_acc = 0.5*delta_t*delta_t
# no control inputs
# fmt: off
F = np.array([
[1 , delta_t , newton_acc , 0 , 0 , 0] ,
[0 , 1 , delta_t , 0 , 0 , 0] ,
[0 , 0 , 1 , 0 , 0 , 0] ,
[0 , 0 , 0 , 1 , delta_t , newton_acc] ,
[0 , 0 , 0 , 0 , 1 , delta_t] ,
[0 , 0 , 0 , 0 , 0 , 1]
])
print(F)
q_directions = np.array(
[
[delta_t**4/4 , delta_t**3/2 , delta_t**2/2] ,
[delta_t**3/2 , delta_t**2 , delta_t] ,
[delta_t**2/2 , delta_t , 1]
]
)
# fmt: on
Q = | np.zeros((6, 6)) | numpy.zeros |
import numpy
import sys
import math
import logic
from scipy.integrate import odeint
import scipy.optimize as optim
import NNEX_DEEP_NETWORK as NNEX
import NNEX_DEEP_NETWORKY as NNEXY
#import NNEX
def DISCON(avrSWAP_py, from_SC_py, to_SC_py):
if logic.counter == 0:
import globalDISCON
import OBSERVER
import yawerrmeas
logic.counter = logic.counter + 1
elif logic.counter == 1:
import globalDISCON1 as globalDISCON
import OBSERVER1 as OBSERVER
import yawerrmeas1 as yawerrmeas
logic.counter = logic.counter + 1
elif logic.counter == 2:
import globalDISCON2 as globalDISCON
import OBSERVER2 as OBSERVER
import yawerrmeas2 as yawerrmeas
logic.counter = 0
#print("SIAMO ENTRATI IN DISCON.py")
#print("from_SC_py in DISCON.py: ", from_SC_py)
#print(avrSWAP_py[95], avrSWAP_py[26])
VS_RtGnSp = 121.6805
VS_SlPc = 10.00
VS_Rgn2K = 2.332287
VS_Rgn2Sp = 91.21091
VS_CtInSp = 70.16224
VS_RtPwr = 5296610.0
CornerFreq = 1.570796 #1.570796
PC_MaxPit = 1.570796 # ERA 1.570796 rad
PC_DT = 0.000125
VS_DT = 0.000125
OnePlusEps = 1 + sys.float_info.epsilon
VS_MaxTq = 47402.91
BlPitch = numpy.zeros(3)
PitRate = numpy.zeros(3)
VS_Rgn3MP = 0.01745329
PC_KK = 0.1099965
PC_KI = 0.008068634
PC_KP = 0.01882681
PC_RefSpd = 122.9096
VS_MaxRat = 15000.0
PC_MaxRat = 0.1396263 #0.1396263
YawSpr = 9.02832e9
YawDamp = 1.916e7
YawIn = 2.60789e6
kdYaw = 1e7
kpYaw = 5e7
kiYaw = 1e9
tauF = (1/3) * ((2 * numpy.pi) / 1.2671)
Ts = 0.005
iStatus = int(round(avrSWAP_py[0]))
NumBl = int(round(avrSWAP_py[60]))
PC_MinPit = 0.0
#print("PC_MinPit in DISCON.py: ", PC_MinPit)
#print("NumBl in DISCON.py: ", NumBl)
#print("OnePLUSEps ", OnePlusEps)
BlPitch[0] = min( max( avrSWAP_py[3], PC_MinPit ), PC_MaxPit )
BlPitch[1] = min( max( avrSWAP_py[32], PC_MinPit ), PC_MaxPit )
BlPitch[2] = min( max( avrSWAP_py[33], PC_MinPit ), PC_MaxPit )
GenSpeed = avrSWAP_py[19]
HorWindV = avrSWAP_py[26]
Time = avrSWAP_py[1]
aviFAIL_py = 0
if iStatus == 0:
globalDISCON.VS_SySp = VS_RtGnSp/( 1.0 + 0.01*VS_SlPc )
globalDISCON.VS_Slope15 = ( VS_Rgn2K*VS_Rgn2Sp*VS_Rgn2Sp )/( VS_Rgn2Sp - VS_CtInSp )
globalDISCON.VS_Slope25 = ( VS_RtPwr/VS_RtGnSp )/( VS_RtGnSp - globalDISCON.VS_SySp )
if VS_Rgn2K == 0:
globalDISCON.VS_TrGnSp = globalDISCON.VS_SySp
else:
globalDISCON.VS_TrGnSp = ( globalDISCON.VS_Slope25 - math.sqrt(globalDISCON.VS_Slope25*( globalDISCON.VS_Slope25 - 4.0*VS_Rgn2K*globalDISCON.VS_SySp ) ) )/( 2.0*VS_Rgn2K )
globalDISCON.GenSpeedF = GenSpeed
globalDISCON.PitCom = BlPitch
#print("PitCom: ", globalDISCON.PitCom)
#print("BlPitch: ", BlPitch)
GK = 1.0/( 1.0 + globalDISCON.PitCom[0]/PC_KK )
globalDISCON.IntSpdErr = globalDISCON.PitCom[0]/( GK*PC_KI )
globalDISCON.LastTime = Time
globalDISCON.LastTimePC = Time - PC_DT
globalDISCON.LastTimeVS = Time - VS_DT
print("0")
if iStatus >= 0 and aviFAIL_py >= 0:
avrSWAP_py[35] = 0.0
avrSWAP_py[40] = 0.0
avrSWAP_py[45] = 0.0
avrSWAP_py[47] = 0.0
avrSWAP_py[64] = 0.0
avrSWAP_py[71] = 0.0
avrSWAP_py[78] = 0.0
avrSWAP_py[79] = 0.0
avrSWAP_py[80] = 0.0
Alpha = math.exp( ( globalDISCON.LastTime - Time )*CornerFreq )
globalDISCON.GenSpeedF = ( 1.0 - Alpha )*GenSpeed + Alpha*globalDISCON.GenSpeedF
ElapTime = Time - globalDISCON.LastTimeVS
print("1 ", ElapTime)
print("globalDISCON.LastTimeVS: ", globalDISCON.LastTimeVS)
print("Time*OnePlusEps - globalDISCON.LastTimeVS: ", Time*OnePlusEps - globalDISCON.LastTimeVS)
if ( Time*OnePlusEps - globalDISCON.LastTimeVS ) >= VS_DT:
print("GenSPeedF: ", globalDISCON.GenSpeedF)
print("PitCom: ", globalDISCON.PitCom[0])
if globalDISCON.GenSpeedF >= VS_RtGnSp or globalDISCON.PitCom[0] >= VS_Rgn3MP:
GenTrq = VS_RtPwr/globalDISCON.GenSpeedF
print("A")
print("GenTrq: ", GenTrq)
elif globalDISCON.GenSpeedF <= VS_CtInSp:
GenTrq = 0.0
print("B")
elif globalDISCON.GenSpeedF < VS_Rgn2Sp:
GenTrq = globalDISCON.VS_Slope15*( globalDISCON.GenSpeedF - VS_CtInSp )
print("C")
elif globalDISCON.GenSpeedF < globalDISCON.VS_TrGnSp:
GenTrq = VS_Rgn2K*globalDISCON.GenSpeedF*globalDISCON.GenSpeedF
print("D")
else:
GenTrq = globalDISCON.VS_Slope25*( globalDISCON.GenSpeedF - globalDISCON.VS_SySp )
print("E")
GenTrq = min(GenTrq, VS_MaxTq)
print("2: ", GenTrq)
if iStatus == 0:
globalDISCON.LastGenTrq = GenTrq
TrqRate = ( GenTrq - globalDISCON.LastGenTrq )/ElapTime
TrqRate = min( max( TrqRate, -VS_MaxRat ), VS_MaxRat )
GenTrq = globalDISCON.LastGenTrq + TrqRate*ElapTime
globalDISCON.LastTimeVS = Time
globalDISCON.LastGenTrq = GenTrq
print("3")
avrSWAP_py[34] = 1.0
avrSWAP_py[55] = 0.0
avrSWAP_py[46] = globalDISCON.LastGenTrq
print("Time ", Time)
ElapTime = Time - globalDISCON.LastTimePC
print("ELAP Time ", ElapTime)
print("LASTTIMEPC Time ", globalDISCON.LastTimePC)
if ( Time*OnePlusEps - globalDISCON.LastTimePC ) >= PC_DT:
GK = 1.0/( 1.0 + globalDISCON.PitCom[0]/PC_KK )
SpdErr = globalDISCON.GenSpeedF - PC_RefSpd
globalDISCON.IntSpdErr = globalDISCON.IntSpdErr + SpdErr*ElapTime
globalDISCON.IntSpdErr = min( max( globalDISCON.IntSpdErr, PC_MinPit/( GK*PC_KI ) ), PC_MaxPit/( GK*PC_KI ) )
PitComP = GK*PC_KP* SpdErr
PitComI = GK*PC_KI*globalDISCON.IntSpdErr
PitComT = PitComP + PitComI
PitComT = min( max( PitComT, PC_MinPit ), PC_MaxPit )
for i in range(NumBl):
PitRate[i] = ( PitComT - BlPitch[i] )/ElapTime
PitRate[i] = min( max( PitRate[i], -PC_MaxRat ), PC_MaxRat )
globalDISCON.PitCom[i] = BlPitch[i] + PitRate[i]*ElapTime
globalDISCON.PitCom[i] = min( max( globalDISCON.PitCom[i], PC_MinPit ), PC_MaxPit )
globalDISCON.LastTimePC = Time
print("4")
#print("PitCom: ", globalDISCON.PitCom)
avrSWAP_py[54] = 0.0
avrSWAP_py[41] = globalDISCON.PitCom[0]
avrSWAP_py[42] = globalDISCON.PitCom[1]
avrSWAP_py[43] = globalDISCON.PitCom[2]
avrSWAP_py[44] = globalDISCON.PitCom[0]
# COMMANDING YAW RATE
globalDISCON.YawAngleGA = from_SC_py
#if Time > 70.0:
if logic.counter < 4:
if Time > 40.0 and Time < 55.0:
avrSWAP_py[28] = 1 # --> YAW CONTROL 0 = SPEED CONTROL, 1 = TORQUE CONTROL
# SETTING POSITION TO BE REACHED AT 0.1 rad --> PI CONTROLLER ( I is INTEGRAL of 0.1rad in time)
# avrSwap_py[23] --> YawRate Good for PID -- Derivative term
if not numpy.isclose(abs(avrSWAP_py[36]), 0.174533) and globalDISCON.flagyaw == False:
#if (not numpy.isclose(avrSWAP_py[36], globalDISCON.PosYawRef)) and (not numpy.isclose(avrSWAP_py[23], 0.0)) and globalDISCON.flag_yaw == False:
#globalDISCON.IntYawRef = globalDISCON.IntYawRef + globalDISCON.PosYawRef * ElapTime
#globalDISCON.IntYaw = globalDISCON.IntYaw + avrSWAP_py[36] * ElapTime
#avrSWAP_py[47] = kpYaw * (globalDISCON.PosYawRef - avrSWAP_py[36]) + kiYaw * (globalDISCON.IntYawRef - globalDISCON.IntYaw)
if abs(globalDISCON.PosYawRef) < 0.174533:
globalDISCON.VelYawRef = 0.0349066/3
globalDISCON.PosYawRef = globalDISCON.PosYawRef + globalDISCON.VelYawRef*ElapTime
else:
if Time > 54.0:
globalDISCON.flagyaw = True
globalDISCON.VelYawRef = 0.0
avrSWAP_py[47] = kiYaw * (globalDISCON.PosYawRef - avrSWAP_py[36]) + kpYaw * (globalDISCON.VelYawRef - avrSWAP_py[23]) - YawDamp * avrSWAP_py[23]
else: # HERE I CONSIDER PERTURBATIONS ABOUT THE NEW WORKING POSITION
#globalDISCON.flagyaw = True
globalDISCON.IntYawRef = globalDISCON.IntYawRef + globalDISCON.PosYawRef * ElapTime
globalDISCON.IntYaw = globalDISCON.IntYaw + avrSWAP_py[36] * ElapTime
avrSWAP_py[47] = - YawDamp * (avrSWAP_py[23] - 0.0) - YawSpr * (avrSWAP_py[36] - globalDISCON.PosYawRef) + kpYaw * (globalDISCON.PosYawRef - avrSWAP_py[36]) + kiYaw * (globalDISCON.IntYawRef - globalDISCON.IntYaw)
else:
avrSWAP_py[28] = 1 # --> YAW CONTROL 0 = SPEED CONTROL, 1 = TORQUE CONTROL
# SETTING POSITION TO BE REACHED AT 0.1 rad --> PI CONTROLLER ( I is INTEGRAL of 0.1rad in time)
globalDISCON.IntYawRef = globalDISCON.IntYawRef + globalDISCON.PosYawRef * ElapTime
globalDISCON.IntYaw = globalDISCON.IntYaw + avrSWAP_py[36] * ElapTime
avrSWAP_py[47] = - YawDamp * (avrSWAP_py[23] - 0.0) - YawSpr * (avrSWAP_py[36] - globalDISCON.PosYawRef) + kpYaw * (globalDISCON.PosYawRef - avrSWAP_py[36]) + kiYaw * (globalDISCON.IntYawRef - globalDISCON.IntYaw)
# avrSwap_py[23] --> YawRate Good for PID -- Derivative term
if globalDISCON.counterY >= 2.0:
avrSWAP_py[28] = 1
if not numpy.isclose(abs(avrSWAP_py[36]), abs(globalDISCON.PosYawRef - globalDISCON.PosFin)) and globalDISCON.flagyaw == False:
#if (not numpy.isclose(avrSWAP_py[36], globalDISCON.PosYawRef)) and (not numpy.isclose(avrSWAP_py[23], 0.0)) and globalDISCON.flag_yaw == False:
#globalDISCON.IntYawRef = globalDISCON.IntYawRef + globalDISCON.PosYawRef * ElapTime
#globalDISCON.IntYaw = globalDISCON.IntYaw + avrSWAP_py[36] * ElapTime
#avrSWAP_py[47] = kpYaw * (globalDISCON.PosYawRef - avrSWAP_py[36]) + kiYaw * (globalDISCON.IntYawRef - globalDISCON.IntYaw)
#if numpy.sign(globalDISCON.PosFin - globalDISCON.PosYawRef) == globalDISCON.signold:
if abs(globalDISCON.PosYawRef - globalDISCON.PosFin) > 0.004:
globalDISCON.VelYawRef = globalDISCON.signold * 0.0349066/3
globalDISCON.PosYawRef = globalDISCON.PosYawRef + globalDISCON.VelYawRef*ElapTime
else:
#if Time > 72.0:
globalDISCON.flagyaw = True
globalDISCON.VelYawRef = 0.0
avrSWAP_py[47] = kiYaw * (globalDISCON.PosYawRef - avrSWAP_py[36]) + kpYaw * (globalDISCON.VelYawRef - avrSWAP_py[23]) - YawDamp * avrSWAP_py[23]
else: # HERE I CONSIDER PERTURBATIONS ABOUT THE NEW WORKING POSITION
#globalDISCON.flagyaw = True
globalDISCON.IntYawRef = globalDISCON.IntYawRef + globalDISCON.PosYawRef * ElapTime
globalDISCON.IntYaw = globalDISCON.IntYaw + avrSWAP_py[36] * ElapTime
avrSWAP_py[47] = - YawDamp * (avrSWAP_py[23] - 0.0) - YawSpr * (avrSWAP_py[36] - globalDISCON.PosYawRef) + kpYaw * (globalDISCON.PosYawRef - avrSWAP_py[36]) + kiYaw * (globalDISCON.IntYawRef - globalDISCON.IntYaw)
#globalDISCON.signold = numpy.sign(globalDISCON.PosFin - globalDISCON.PosYawRef)
print("TOTAL TORQUE TERM PASSED TO SERVODYN FOR YAW CONTROL ----> ", avrSWAP_py[47])
'''if Time > 70.0 and Time < 85.0:
avrSWAP_py[47] = 0.0349066/3
else:
avrSWAP_py[47] = 0.0'''
else:
avrSWAP_py[28] = 0
#else:
# avrSWAP_py[28] = 0
'''avrSWAP_py[28] = 0 # DOPO LEVALO
avrSWAP_py[47] = 0.0'''
# END OF COMMANDED YAW RATE ON TURBINE 1
#YAW LOGIC BLOCK
globalDISCON.LastTime = Time
print("globalDISCON.LastTime: ", globalDISCON.LastTime)
# INPUTS FOR SUPERCONTROLLER
to_SC_py = avrSWAP_py[14] # MEASURED POWER OUTPUT
avrSWAP_py = numpy.append(avrSWAP_py,to_SC_py)
to_SC_py = avrSWAP_py[36] # ACTUAL YAW ANGLE
avrSWAP_py = numpy.append(avrSWAP_py,to_SC_py)
# END OF SECTION
# WIND SPEED OBSERVER SECTION
file = open("Bl1outin.txt","a+")
file.write("%f, %f, %f \n" % (avrSWAP_py[29], avrSWAP_py[68], Time))
file.close()
file = open("Bl2outin.txt","a+")
file.write("%f, %f, %f \n" % (avrSWAP_py[30], avrSWAP_py[69], Time))
file.close()
file = open("Bl3outin.txt","a+")
file.write("%f, %f, %f \n" % (avrSWAP_py[31], avrSWAP_py[70], Time))
file.close()
#file = open("Azimuth.txt","a+")
#file.write("%f, %f, %f, %f \n" % (avrSWAP_py[59], avrSWAP_py[20], avrSWAP_py[26], Time))
#file.close()
#if from_SC_py == 0:
tmp = float(OBSERVER.tmp) #POSG
acc = float(OBSERVER.acc) #POSR
OBSERVER.y = avrSWAP_py[19]
#print("tmp: ", OBSERVER.tmp)
#print("acc: ", OBSERVER.acc)
#print("y: ", OBSERVER.y)
OBSERVER.Qg = avrSWAP_py[22]
#print("Qg: ", avrSWAP_py[22])
if numpy.isclose(Time, 0.0):
x0 = numpy.array([1.5, 120, 0, 0])
xsol = numpy.array([1.5, 120, 0, 0])
OBSERVER.xsol = xsol
xppsolin = numpy.array([0, 0, 1.5, 120])
#print(xsol)
Qasol = OBSERVER.Qacalc(xppsolin, xsol, float(OBSERVER.y), float(OBSERVER.tmp))
error = 0.0
errorposg = 0.0
errorposr = 0.0
errorwr = 0.0
errorwg = 0.0
pitch_obs = (avrSWAP_py[3]+avrSWAP_py[32]+avrSWAP_py[33])*180/(3*numpy.pi)
if pitch_obs > 17.9:
pitch_obs = 17.9
elif pitch_obs < -10:
pitch_obs = -10
num = (2*Qasol)/(numpy.pi*OBSERVER.rho*(xsol[0]**2)*(OBSERVER.R**5))
tsr_obs = optim.fsolve(OBSERVER.func_impl, 4.5, args=(num, pitch_obs))
vento_obs = xsol[0]*OBSERVER.R/tsr_obs
file = open("EXSOL.txt","a+")
file.write("%f, %f, %f, %f, %f \n" % (xsol[0], xsol[1], xsol[2], xsol[3], Time))
file.close()
file = open("Azimuth.txt","a+")
file.write("%f, %f, %f, %f \n" % (xsol[2], xsol[0], vento_obs, Time))
file.close()
else:
x0 = OBSERVER.xsol
if numpy.isclose(ElapTime, 0.0):
ElapTime = 0.005
#print(OBSERVER.xsolold)
#input("ELAP TIME = 0.0 PROBLEM")
ts = numpy.linspace(Time - ElapTime, Time, 2)
xsol = odeint(OBSERVER.dx_dt, x0, ts, args=(float(OBSERVER.y), float(OBSERVER.tmp)))
#print("SOL SHAPE: ", numpy.shape(xsol))
OBSERVER.xsol = xsol[-1,:]
OBSERVER.xsolold = numpy.vstack((OBSERVER.xsolold, OBSERVER.xsol))
xppsolin = numpy.gradient(OBSERVER.xsolold, ElapTime, axis=0)
#print("SOL: ", xsol)
#print("XOLD: ", OBSERVER.xsolold)
xppsol = OBSERVER.xpp(xsol[-1,:], float(OBSERVER.y), float(OBSERVER.tmp))
#print("INERTIA: ", xppsol)
#print("INERTIA: ", xppsolin[-1,:])
Qasol = OBSERVER.Qacalc(xppsolin[-1,:], xsol[-1,:], float(OBSERVER.y), float(OBSERVER.tmp))
error = (Qasol - (avrSWAP_py[13]/avrSWAP_py[20]))/(avrSWAP_py[13]/avrSWAP_py[20])
errorposg = (OBSERVER.tmp-xsol[-1,3])/xsol[-1,3]
errorposr = (OBSERVER.acc-xsol[-1,2])/xsol[-1,2]
errorwr = (avrSWAP_py[20]-xsol[-1,0])/avrSWAP_py[20]
errorwg = (avrSWAP_py[19]-xsol[-1,1])/avrSWAP_py[19]
pitch_obs = (avrSWAP_py[3]+avrSWAP_py[32]+avrSWAP_py[33])*180/(3*numpy.pi)
if pitch_obs > 17.9:
pitch_obs = 17.9
elif pitch_obs < -10:
pitch_obs = -10
num = (2*Qasol)/(numpy.pi*OBSERVER.rho*(xsol[-1,0]**2)*(OBSERVER.R**5))
tsr_obs = optim.fsolve(OBSERVER.func_impl, 4.5, args=(num, pitch_obs))
vento_obs = xsol[-1,0]*OBSERVER.R/tsr_obs
file = open("EXSOL.txt","a+")
file.write("%f, %f, %f, %f, %f \n" % (xsol[-1,0], xsol[-1,1], xsol[-1,2], xsol[-1,3], Time))
file.close()
file = open("Azimuth.txt","a+")
file.write("%f, %f, %f, %f \n" % (xsol[-1,2], xsol[-1,0], vento_obs, Time))
file.close()
if vento_obs > 25:
vento_obs = 25
elif vento_obs < 3:
vento_obs = 3
file = open("Error.txt","a+")
file.write("%f, %f \n" % (error, Time))
file.close()
file = open("ErrorPosg.txt","a+")
file.write("%f, %f \n" % (errorposg, Time))
file.close()
file = open("ErrorPosr.txt","a+")
file.write("%f, %f \n" % (errorposr, Time))
file.close()
file = open("ErrorWG.txt","a+")
file.write("%f, %f \n" % (errorwg, Time))
file.close()
file = open("ErrorWR.txt","a+")
file.write("%f, %f \n" % (errorwr, Time))
file.close()
file = open("EWR.txt","a+")
file.write("%f, %f \n" % (avrSWAP_py[20], Time))
file.close()
file = open("EWG.txt","a+")
file.write("%f, %f \n" % (avrSWAP_py[19], Time))
file.close()
file = open("EPOSG.txt","a+")
file.write("%f, %f \n" % (tmp, Time))
file.close()
file = open("EPOSR.txt","a+")
file.write("%f, %f \n" % (acc, Time))
file.close()
file = open("EPitch.txt","a+")
file.write("%f, %f, %f \n" % ((avrSWAP_py[3]+avrSWAP_py[32]+avrSWAP_py[33])*180/(3*numpy.pi), pitch_obs, Time))
file.close()
file = open("EWIND.txt","a+")
file.write("%f, %f, %f \n" % (vento_obs, Time, HorWindV))
file.close()
file = open("EQasol.txt","a+")
file.write("%f, %f \n" % (Qasol, Time))
file.close()
file = open("ENum.txt","a+")
file.write("%f, %f \n" % (num, Time))
file.close()
OBSERVER.tmp = float(avrSWAP_py[19]*ElapTime + tmp)
OBSERVER.acc = float(avrSWAP_py[20]*ElapTime + acc)
#print("ERROR: ", error)
#print("Qa: ", Qasol)
#print("Qareal: ", avrSWAP_py[13]/avrSWAP_py[20])
#print("POWER: ", avrSWAP_py[13])
#WIND YAW ERROR OBSERVER SECTION
blmom1 = numpy.array([avrSWAP_py[29], avrSWAP_py[68]])
blmom2 = numpy.array([avrSWAP_py[30], avrSWAP_py[69]])
blmom3 = numpy.array([avrSWAP_py[31], avrSWAP_py[70]])
N = 1
if numpy.isclose(Time, 0.0):
azimuth = numpy.array([xsol[2],xsol[2] + 2*numpy.pi/3, xsol[2] + 4*numpy.pi/3])
wryaw = xsol[0]
globalDISCON.wr_old = wryaw # (1/(2*tauF + Ts)) * ((2*tauF - Ts)*globalDISCON.m_out1f_old + Ts*(m_out1 + globalDISCON.m_out1_old))
globalDISCON.wrf_old = wryaw
globalDISCON.azimuth_old = azimuth
globalDISCON.azimuthf_old = azimuth
m_out1 = 1
m_out2 = 0
m_out3 = 0
m_in1 = 1
m_in2 = 0
m_in3 = 0
yawerrmeas.bl1_old = blmom1
yawerrmeas.bl2_old = blmom2
yawerrmeas.bl3_old = blmom3
yawerrmeas.azimuth_old = azimuth[0]
else:
#azimuth = (1/(2*tauF + Ts)) * ((2*tauF - Ts)*globalDISCON.azimuthf_old + Ts*(numpy.array([xsol[-1,2], xsol[-1,2] + 2*numpy.pi/3, xsol[-1,2] + 4*numpy.pi/3]) + globalDISCON.azimuth_old))
#wryaw = (1/(2*tauF + Ts)) * ((2*tauF - Ts)*globalDISCON.wrf_old + Ts*(xsol[-1,0] + globalDISCON.wr_old))
azimuth = numpy.array([xsol[-1,2], xsol[-1,2] + 2*numpy.pi/3, xsol[-1,2] + 4*numpy.pi/3])
wryaw = xsol[-1,0]
globalDISCON.wr_old = xsol[-1,0]
globalDISCON.azimuth_old = numpy.array([xsol[-1,2], xsol[-1,2] + 2*numpy.pi/3, xsol[-1,2] + 4*numpy.pi/3])
globalDISCON.wrf_old = wryaw
globalDISCON.azimuthf_old = azimuth
yawerrmeas.bl1_old = numpy.vstack((yawerrmeas.bl1_old, blmom1))
yawerrmeas.bl2_old = numpy.vstack((yawerrmeas.bl2_old, blmom2))
yawerrmeas.bl3_old = numpy.vstack((yawerrmeas.bl3_old, blmom3))
yawerrmeas.azimuth_old = numpy.hstack((yawerrmeas.azimuth_old, azimuth[0]))
#if ((azimuth[0] - 2*N*numpy.pi) > yawerrmeas.azimuth_old[0]) and ((azimuth[0] - 2*N*numpy.pi) > yawerrmeas.azimuth_old[1]):
inddel = numpy.where(yawerrmeas.azimuth_old < azimuth[0] - 2*N*numpy.pi)
#print("INDDEL: ", inddel[0])
if inddel[0].size > 1:
#print(yawerrmeas.azimuth_old.size)
yawerrmeas.bl1_old = numpy.delete(yawerrmeas.bl1_old, [inddel[0][:-2]], 0)
yawerrmeas.bl2_old = numpy.delete(yawerrmeas.bl2_old, [inddel[0][:-2]], 0)
yawerrmeas.bl3_old = numpy.delete(yawerrmeas.bl3_old, [inddel[0][:-2]], 0)
yawerrmeas.azimuth_old = numpy.delete(yawerrmeas.azimuth_old, [inddel[0][:-2]], None)
#print(yawerrmeas.azimuth_old.size)
#print("DELETED OBJECT")
ind = numpy.where(yawerrmeas.azimuth_old > azimuth[0] - 2*N*numpy.pi)
#print("IND: ", ind[0])
a = 0
if ind[0][0] == 0:
ind[0][0] = 1
a = 1
blmom1into = numpy.interp(azimuth[0] - 2*N*numpy.pi, [yawerrmeas.azimuth_old[ind[0][0]-1], yawerrmeas.azimuth_old[ind[0][0]]], [yawerrmeas.bl1_old[ind[0][0]-1,0], yawerrmeas.bl1_old[ind[0][0],0]])
blmom1inti = numpy.interp(azimuth[0] - 2*N*numpy.pi, [yawerrmeas.azimuth_old[ind[0][0]-1], yawerrmeas.azimuth_old[ind[0][0]]], [yawerrmeas.bl1_old[ind[0][0]-1,1], yawerrmeas.bl1_old[ind[0][0],1]])
blmom2into = numpy.interp(azimuth[0] - 2*N*numpy.pi + 2*numpy.pi/3, [yawerrmeas.azimuth_old[ind[0][0]-1] + 2*numpy.pi/3, yawerrmeas.azimuth_old[ind[0][0]] + 2*numpy.pi/3], [yawerrmeas.bl2_old[ind[0][0]-1,0], yawerrmeas.bl2_old[ind[0][0],0]])
blmom2inti = numpy.interp(azimuth[0] - 2*N*numpy.pi + 2*numpy.pi/3, [yawerrmeas.azimuth_old[ind[0][0]-1] + 2*numpy.pi/3, yawerrmeas.azimuth_old[ind[0][0]] + 2*numpy.pi/3], [yawerrmeas.bl2_old[ind[0][0]-1,1], yawerrmeas.bl2_old[ind[0][0],1]])
blmom3into = numpy.interp(azimuth[0] - 2*N*numpy.pi + 4*numpy.pi/3, [yawerrmeas.azimuth_old[ind[0][0]-1] + 4*numpy.pi/3, yawerrmeas.azimuth_old[ind[0][0]] + 4*numpy.pi/3], [yawerrmeas.bl3_old[ind[0][0]-1,0], yawerrmeas.bl3_old[ind[0][0],0]])
blmom3inti = numpy.interp(azimuth[0] - 2*N*numpy.pi + 4*numpy.pi/3, [yawerrmeas.azimuth_old[ind[0][0]-1] + 4*numpy.pi/3, yawerrmeas.azimuth_old[ind[0][0]] + 4*numpy.pi/3], [yawerrmeas.bl3_old[ind[0][0]-1,1], yawerrmeas.bl3_old[ind[0][0],1]])
if a == 1:
ind[0][0] = 0
mo10= numpy.trapz(numpy.hstack((blmom1into, yawerrmeas.bl1_old[ind[0],0])), x=numpy.hstack((azimuth[0] - 2*N*numpy.pi, yawerrmeas.azimuth_old[ind[0]])))/(2*N*numpy.pi)
mo1c= numpy.trapz(numpy.multiply(numpy.hstack((blmom1into, yawerrmeas.bl1_old[ind[0],0])), numpy.cos(numpy.hstack((azimuth[0] - 2*N*numpy.pi, yawerrmeas.azimuth_old[ind[0]])))), x=numpy.hstack((azimuth[0] - 2*N*numpy.pi, yawerrmeas.azimuth_old[ind[0]])))/(N*numpy.pi)
mo1s= numpy.trapz(numpy.multiply(numpy.hstack((blmom1into, yawerrmeas.bl1_old[ind[0],0])), numpy.sin(numpy.hstack((azimuth[0] - 2*N*numpy.pi, yawerrmeas.azimuth_old[ind[0]])))), x=numpy.hstack((azimuth[0] - 2*N*numpy.pi, yawerrmeas.azimuth_old[ind[0]])))/(N*numpy.pi)
mi10= numpy.trapz(numpy.hstack((blmom1inti, yawerrmeas.bl1_old[ind[0],1])), x=numpy.hstack((azimuth[0] - 2*N*numpy.pi, yawerrmeas.azimuth_old[ind[0]])))/(2*N*numpy.pi)
mi1c= numpy.trapz(numpy.multiply(numpy.hstack((blmom1inti, yawerrmeas.bl1_old[ind[0],1])), numpy.cos(numpy.hstack((azimuth[0] - 2*N*numpy.pi, yawerrmeas.azimuth_old[ind[0]])))), x=numpy.hstack((azimuth[0] - 2*N*numpy.pi, yawerrmeas.azimuth_old[ind[0]])))/(N*numpy.pi)
mi1s= numpy.trapz(numpy.multiply(numpy.hstack((blmom1inti, yawerrmeas.bl1_old[ind[0],1])), numpy.sin(numpy.hstack((azimuth[0] - 2*N*numpy.pi, yawerrmeas.azimuth_old[ind[0]])))), x=numpy.hstack((azimuth[0] - 2*N*numpy.pi, yawerrmeas.azimuth_old[ind[0]])))/(N*numpy.pi)
mo20= numpy.trapz(numpy.hstack((blmom2into, yawerrmeas.bl2_old[ind[0],0])), x=numpy.hstack((azimuth[0] - 2*N*numpy.pi, yawerrmeas.azimuth_old[ind[0]])) + 2*numpy.pi/3)/(2*N*numpy.pi)
mo2c= numpy.trapz(numpy.multiply(numpy.hstack((blmom2into, yawerrmeas.bl2_old[ind[0],0])), numpy.cos(numpy.hstack((azimuth[0] - 2*N*numpy.pi, yawerrmeas.azimuth_old[ind[0]])) + 2*numpy.pi/3)), x=numpy.hstack((azimuth[0] - 2*N*numpy.pi, yawerrmeas.azimuth_old[ind[0]])) + 2*numpy.pi/3)/(N*numpy.pi)
mo2s= numpy.trapz(numpy.multiply(numpy.hstack((blmom2into, yawerrmeas.bl2_old[ind[0],0])), numpy.sin(numpy.hstack((azimuth[0] - 2*N*numpy.pi, yawerrmeas.azimuth_old[ind[0]])) + 2*numpy.pi/3)), x=numpy.hstack((azimuth[0] - 2*N*numpy.pi, yawerrmeas.azimuth_old[ind[0]])) + 2*numpy.pi/3)/(N*numpy.pi)
mi20= numpy.trapz(numpy.hstack((blmom2inti, yawerrmeas.bl2_old[ind[0],1])), x=numpy.hstack((azimuth[0] - 2*N*numpy.pi, yawerrmeas.azimuth_old[ind[0]])) + 2*numpy.pi/3)/(2*N*numpy.pi)
mi2c= numpy.trapz(numpy.multiply(numpy.hstack((blmom2inti, yawerrmeas.bl2_old[ind[0],1])), numpy.cos(numpy.hstack((azimuth[0] - 2*N*numpy.pi, yawerrmeas.azimuth_old[ind[0]])) + 2*numpy.pi/3)), x=numpy.hstack((azimuth[0] - 2*N*numpy.pi, yawerrmeas.azimuth_old[ind[0]])) + 2*numpy.pi/3)/(N*numpy.pi)
mi2s= numpy.trapz(numpy.multiply(numpy.hstack((blmom2inti, yawerrmeas.bl2_old[ind[0],1])), numpy.sin(numpy.hstack((azimuth[0] - 2*N*numpy.pi, yawerrmeas.azimuth_old[ind[0]])) + 2*numpy.pi/3)), x=numpy.hstack((azimuth[0] - 2*N*numpy.pi, yawerrmeas.azimuth_old[ind[0]])) + 2*numpy.pi/3)/(N*numpy.pi)
mo30= numpy.trapz(numpy.hstack((blmom3into, yawerrmeas.bl3_old[ind[0],0])), x=numpy.hstack((azimuth[0] - 2*N*numpy.pi, yawerrmeas.azimuth_old[ind[0]])) + 4*numpy.pi/3)/(2*N*numpy.pi)
mo3c= numpy.trapz(numpy.multiply(numpy.hstack((blmom3into, yawerrmeas.bl3_old[ind[0],0])), numpy.cos( | numpy.hstack((azimuth[0] - 2*N*numpy.pi, yawerrmeas.azimuth_old[ind[0]])) | numpy.hstack |
from os import system
import numpy as np
import scipy.optimize as op
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d.art3d import Poly3DCollection, Line3DCollection
from anytree import AnyNode, RenderTree
####################################################################
def concatenateVectors(X,Y):
return np.concatenate((X,Y),axis=1)
####################################################################
def getPlot():
return plt
####################################################################
def clearScreen():
system('cls')
return
####################################################################
def loadData(fileName):
data= np.loadtxt(fileName, delimiter=',',unpack=True,dtype=float)
data=data.T
if (len(data.shape)==1):
data.shape=(data.shape[0],1)
return data
####################################################################
def accurracy(Xy,NewXy):
Xy=np.sort(Xy,axis=0)
NewXy=np.sort(NewXy,axis=0)
Y1=Xy[:,-1]
Y2=NewXy[:,-1]
m=np.mean(np.where(Y1==Y2,1,0))
return m*100
####################################################################
def SplitTree(X, y,Level=1,Node=AnyNode(id="root",vPredictedClass=-1),ThresholdCount=1):
ri,ci=GetBestSplit(X,y,ThresholdCount)
if( ri!=-1 and ci!=-1):
SplitFeature=ci
SplitValue=X[ri,ci]
#PlotTreeSplit(X,SplitFeature,SplitValue,Level) #Plot While Training
X0=X[np.where(X[:,SplitFeature]<=SplitValue)]
Y0=y[np.where(X[:,SplitFeature]<=SplitValue)]
X1=X[np.where(X[:,SplitFeature]>SplitValue)]
Y1=y[np.where(X[:,SplitFeature]>SplitValue)]
s0 = AnyNode(id="Level_"+str(Level)+"_Left("+"X"+str(SplitFeature)+"<"+str(round(SplitValue,1))+")", parent=Node,vLevel=Level,vSplitFeature=SplitFeature,vOp="<",vSplitValue=SplitValue,vSplitSign=-1,vPredictedClass=-1)
s1 = AnyNode(id="Level_"+str(Level)+"_Right("+"X"+str(SplitFeature)+">"+str(round(SplitValue,1))+")", parent=Node,vLevel=Level,vSplitFeature=SplitFeature,vOp=">",vSplitValue=SplitValue,vSplitSign=1,vPredictedClass=-1)
s0=SplitTree(X0,Y0,Level+1,s0,ThresholdCount=ThresholdCount)
s1=SplitTree(X1,Y1,Level+1,s1,ThresholdCount=ThresholdCount)
else:
PredictedClass=0
PredictedClassLen=0
for i in range(int(y.max()+1)):
if (len(y[np.where(y==i)])>PredictedClassLen):
PredictedClass=i
PredictedClassLen=len(y[np.where(y==i)])
Node.vPredictedClass=PredictedClass
return Node
####################################################################
def PredictTree(X,y,Node):
if(len(Node.children)!=0):
SplitFeature=Node.children[0].vSplitFeature
SplitValue=Node.children[0].vSplitValue
X0=X[np.where(X[:,SplitFeature]<=SplitValue)]
Y0=y[np.where(X[:,SplitFeature]<=SplitValue)]
X1=X[np.where(X[:,SplitFeature]>SplitValue)]
Y1=y[np.where(X[:,SplitFeature]>SplitValue)]
newX1,newY1=PredictTree(X0,Y0,Node.children[0])
newX2,newY2=PredictTree(X1,Y1,Node.children[1])
newX= np.concatenate((newX1,newX2),axis=0)
newY=np.concatenate((newY1,newY2),axis=0)
else:
newX=X
for i in range(len(y)):
y[i]=Node.vPredictedClass
newY=y
return newX,newY
####################################################################
def PruneTree(X,y,Node,ThresholdCount):
if(len(Node.children)!=0):
SplitFeature=Node.children[0].vSplitFeature
SplitValue=Node.children[0].vSplitValue
X0=X[np.where(X[:,SplitFeature]<=SplitValue)]
Y0=y[np.where(X[:,SplitFeature]<=SplitValue)]
X1=X[np.where(X[:,SplitFeature]>SplitValue)]
Y1=y[np.where(X[:,SplitFeature]>SplitValue)]
if (X0.shape[0]<ThresholdCount or X1.shape[0]<ThresholdCount):
Node.children=[]
PredictedClass=0
PredictedClassLen=0
for i in range(int(y.max()+1)):
if (len(y[np.where(y==i)])>PredictedClassLen):
PredictedClass=i
PredictedClassLen=len(y[np.where(y==i)])
Node.vPredictedClass=PredictedClass
else:
PruneTree(X0,Y0,Node.children[0],ThresholdCount)
PruneTree(X1,Y1,Node.children[1],ThresholdCount)
return Node
####################################################################
def GetBestSplit(X,y,ThresholdCount):
ri=0
ci=0
for i in range(int(y.max()+1)):
if(len(y[np.where(y==i)])==len(y)):
ri=-1
ci=-1
if(X.shape[0]<=ThresholdCount):
ri=-1
ci=-1
if(ri!=-1 and ci!=-1):
G=np.zeros((X.shape))
for ri in range(G.shape[0]):
for ci in range(G.shape[1]):
G[ri,ci]=GetGiniScore(X,y,ri,ci)
ri=np.unravel_index(np.argmax(G, axis=None), G.shape)[0]
ci=np.unravel_index(np.argmax(G, axis=None), G.shape)[1]
return ri,ci
####################################################################
def GetGiniScore(X,y,ri,ci):
G0=0
G1=0
Y0=y[np.where(X[:,ci]<=X[ri,ci])]
Y1=y[np.where(X[:,ci]>X[ri,ci])]
if (len(Y0)!=0):
for i in range(int(y.max()+1)):
P=len(Y0[np.where(Y0==i)])/len(Y0)
G0=G0+P*P
if (len(Y1)!=0):
for i in range(int(y.max()+1)):
P=len(Y1[np.where(Y1==i)])/len(Y1)
G1=G1+P*P
G_Score=(len(Y0)/len(y)) * G0 + (len(Y1)/len(y)) * G1
return G_Score
####################################################################
def PlotTreeSplit(ax,X,SplitFeature,SplitValue,Level):
x_min, x_max = X[:, 0].min() , X[:, 0].max()
y_min, y_max = X[:, 1].min() , X[:, 1].max()
z_min, z_max = X[:, 2].min() , X[:, 2].max()
u = np.linspace(x_min, x_max, 2)
v = np.linspace(y_min, y_max, 2)
w = np.linspace(z_min, z_max, 2)
if (SplitFeature==0):
u = np.zeros(( len(v), len(w) ))
V,W=np.meshgrid(v,w)
for i in range(len(v)):
for j in range(len(w)):
u[i,j] =SplitValue
U = np.transpose(u)
if (SplitFeature==1):
v = np.zeros(( len(u), len(w) ))
U,W=np.meshgrid(u,w)
for i in range(len(u)):
for j in range(len(w)):
v[i,j] =SplitValue
V = np.transpose(v)
if (SplitFeature==2):
w = np.zeros(( len(u), len(v) ))
U,V=np.meshgrid(u,v)
for i in range(len(u)):
for j in range(len(v)):
w[i,j] =SplitValue
W = np.transpose(w)
ax.plot_surface(U,V,W,alpha=0.6,zorder=5)
ax.text(U[0][0], V[0][0], W[0][0], Level, color='red')
return
####################################################################
def PlotTree(ax,X,y,Node):
if(Node.id=="root"):
ax.scatter(X[np.where(y==0),0],X[np.where(y==0),1],X[np.where(y==0),2],marker=".",facecolors='r', zorder=2)
ax.scatter(X[np.where(y==1),0],X[np.where(y==1),1],X[np.where(y==1),2],marker=".",facecolors='g', zorder=3)
ax.scatter(X[np.where(y==2),0],X[np.where(y==2),1],X[np.where(y==2),2],marker=".",facecolors='b', zorder=4)
if(len(Node.children)!=0):
SplitFeature=Node.children[0].vSplitFeature
SplitValue=Node.children[0].vSplitValue
Level=Node.children[0].vLevel
X0=X[np.where(X[:,SplitFeature]<=SplitValue)]
Y0=y[np.where(X[:,SplitFeature]<=SplitValue)]
X1=X[np.where(X[:,SplitFeature]>SplitValue)]
Y1=y[np.where(X[:,SplitFeature]>SplitValue)]
PlotTreeSplit(ax,X,SplitFeature,SplitValue,Level)
PlotTree(ax,X0,Y0,Node.children[0])
PlotTree(ax,X1,Y1,Node.children[1])
return
####################################################################
def PlotPoints(ax,X,y):
ax.scatter(X[np.where(y==0),0],X[ | np.where(y==0) | numpy.where |
# @version: 1.0 date: 05/06/2015 by <NAME>
# @author: <EMAIL>, <EMAIL>, <EMAIL>
# @copyright: EPFL-IC-LCAV 2015
from __future__ import division
import numpy as np
import scipy.linalg as la
from .parameters import constants
from . import utilities as u
from .soundsource import build_rir_matrix
from . import windows
from . import stft
#=========================================================================
# Free (non-class-member) functions related to beamformer design
#=========================================================================
def H(A, **kwargs):
'''Returns the conjugate (Hermitian) transpose of a matrix.'''
return np.transpose(A, **kwargs).conj()
def sumcols(A):
'''
Sums the columns of a matrix (np.array).
The output is a 2D np.array
of dimensions M x 1.
'''
return np.sum(A, axis=1, keepdims=1)
def mdot(*args):
'''Left-to-right associative matrix multiplication of multiple 2D ndarrays.'''
ret = args[0]
for a in args[1:]:
ret = np.dot(ret, a)
return ret
def distance(x, y):
'''
Computes the distance matrix E.
E[i,j] = sqrt(sum((x[:,i]-y[:,j])**2)).
x and y are DxN ndarray containing N D-dimensional vectors.
'''
# Assume x, y are arrays, *not* matrices
x = np.array(x)
y = np.array(y)
# return np.sqrt((x[0,:,np.newaxis]-y[0,:])**2 + (x[1,:,np.newaxis]-y[1,:])**2)
return np.sqrt(np.sum((x[:, :, np.newaxis] - y[:, np.newaxis, :])**2, axis=0))
def unit_vec2D(phi):
return np.array([[np.cos(phi), np.sin(phi)]]).T
def linear_2D_array(center, M, phi, d):
'''
Creates an array of uniformly spaced linear points in 2D
Parameters
----------
center: array_like
The center of the array
M: int
The number of points
phi: float
The counterclockwise rotation of the array (from the x-axis)
d: float
The distance between neighboring points
Returns
-------
ndarray (2, M)
The array of points
'''
u = unit_vec2D(phi)
return np.array(center)[:, np.newaxis] + d * \
(np.arange(M)[np.newaxis, :] - (M - 1.) / 2.) * u
def circular_2D_array(center, M, phi0, radius):
'''
Creates an array of uniformly spaced circular points in 2D
Parameters
----------
center: array_like
The center of the array
M: int
The number of points
phi0: float
The counterclockwise rotation of the first element in the array (from the x-axis)
radius: float
The radius of the array
Returns
-------
ndarray (2, M)
The array of points
'''
phi = np.arange(M) * 2. * np.pi / M
return np.array(center)[:, np.newaxis] + radius * \
np.vstack((np.cos(phi + phi0), np.sin(phi + phi0)))
def poisson_2D_array(center, M, d):
'''
Create array of 2D positions drawn from Poisson process.
Parameters
----------
center: array_like
The center of the array
M: int
The number of points in the first dimension
M: int
The number of points in the second dimension
phi: float
The counterclockwise rotation of the array (from the x-axis)
d: float
The distance between neighboring points
Returns
-------
ndarray (2, M * N)
The array of points
'''
from numpy.random import standard_exponential, randint
R = d*standard_exponential((2, M))*(2*randint(0, 2, (2, M)) - 1)
R = R.cumsum(axis=1)
R -= R.mean(axis=1)[:, np.newaxis]
R += np.array([center]).T
return R
def square_2D_array(center, M, N, phi, d):
'''
Creates an array of uniformly spaced grid points in 2D
Parameters
----------
center: array_like
The center of the array
M: int
The number of points in the first dimension
M: int
The number of points in the second dimension
phi: float
The counterclockwise rotation of the array (from the x-axis)
d: float
The distance between neighboring points
Returns
-------
ndarray (2, M * N)
The array of points
'''
c = linear_2D_array(center, M, phi+np.pi/2., d)
R = np.zeros((2, M*N))
for i in np.arange(M):
R[:, i*N:(i+1)*N] = linear_2D_array(c[:, i], N, phi, d)
return R
def spiral_2D_array(center, M, radius=1., divi=3, angle=None):
'''
Generate an array of points placed on a spiral
Parameters
----------
center: array_like
location of the center of the array
M: int
number of microphones
radius: float
microphones are contained within a cirle of this radius (default 1)
divi: int
number of rotations of the spiral (default 3)
angle: float
the angle offset of the spiral (default random)
Returns
-------
ndarray (2, M * N)
The array of points
'''
num_seg = int(np.ceil(M / divi))
pos_array_norm = np.linspace(0, radius, num=M, endpoint=False)
pos_array_angle = np.reshape(np.tile(np.pi * 2 * np.arange(divi) / divi, num_seg),
(divi, -1), order='F') + \
np.linspace(0, 2 * np.pi / divi,
num=num_seg, endpoint=False)[np.newaxis, :]
pos_array_angle = np.insert(pos_array_angle.flatten('F')[:M - 1], 0, 0)
if angle is None:
pos_array_angle += np.random.rand() * np.pi / divi
else:
pos_array_angle += angle
pos_mic_x = pos_array_norm * np.cos(pos_array_angle)
pos_mic_y = pos_array_norm * np.sin(pos_array_angle)
return np.array([pos_mic_x, pos_mic_y])
def fir_approximation_ls(weights, T, n1, n2):
freqs_plus = np.array(weights.keys())[:, np.newaxis]
freqs = np.vstack([freqs_plus,
-freqs_plus])
omega = 2 * np.pi * freqs
omega_discrete = omega * T
n = np.arange(n1, n2)
# Create the DTFT transform matrix corresponding to a discrete set of
# frequencies and the FIR filter indices
F = np.exp(-1j * omega_discrete * n)
w_plus = np.array(weights.values())[:, :, 0]
w = np.vstack([w_plus,
w_plus.conj()])
return np.linalg.pinv(F).dot(w)
#=========================================================================
# Classes (microphone array and beamformer related)
#=========================================================================
class MicrophoneArray(object):
'''Microphone array class.'''
def __init__(self, R, fs):
R = np.array(R)
self.dim = R.shape[0] # are we in 2D or in 3D
self.M = R.shape[1] # number of microphones
self.R = R # array geometry
self.fs = fs # sampling frequency of microphones
self.signals = None
self.center = np.mean(R, axis=1, keepdims=True)
def record(self, signals, fs):
'''
This simulates the recording of the signals by the microphones.
In particular, if the microphones and the room simulation
do not use the same sampling frequency, down/up-sampling
is done here.
Parameters
----------
signals:
An ndarray with as many lines as there are microphones.
fs:
the sampling frequency of the signals.
'''
if signals.shape[0] != self.M:
raise NameError('The signals array should have as many lines as there are microphones.')
if signals.ndim != 2:
raise NameError('The signals should be a 2D array.')
if fs != self.fs:
try:
import samplerate
fs_ratio = self.fs / float(fs)
newL = int(fs_ratio * signals.shape[1]) - 1
self.signals = np.zeros((self.M, newL))
# samplerate resample function considers columns as channels (hence the transpose)
for m in range(self.M):
self.signals[m] = samplerate.resample(signals[m], fs_ratio, 'sinc_best')
except ImportError:
raise ImportError('The samplerate package must be installed for resampling of the signals.')
else:
self.signals = signals
def to_wav(self, filename, mono=False, norm=False, bitdepth=np.float):
'''
Save all the signals to wav files.
Parameters
----------
filename: str
the name of the file
mono: bool, optional
if true, records only the center channel floor(M / 2) (default `False`)
norm: bool, optional
if true, normalize the signal to fit in the dynamic range (default `False`)
bitdepth: int, optional
the format of output samples [np.int8/16/32/64 or np.float (default)]
'''
from scipy.io import wavfile
if mono is True:
signal = self.signals[self.M // 2]
else:
signal = self.signals.T # each column is a channel
float_types = [float, np.float, np.float32, np.float64]
if bitdepth in float_types:
bits = None
elif bitdepth is np.int8:
bits = 8
elif bitdepth is np.int16:
bits = 16
elif bitdepth is np.int32:
bits = 32
elif bitdepth is np.int64:
bits = 64
else:
raise NameError('No such type.')
if norm:
from .utilities import normalize
signal = normalize(signal, bits=bits)
signal = np.array(signal, dtype=bitdepth)
wavfile.write(filename, self.fs, signal)
class Beamformer(MicrophoneArray):
'''
At some point, in some nice way, the design methods
should also go here. Probably with generic arguments.
Parameters
----------
R: numpy.ndarray
Mics positions
fs: int
Sampling frequency
N: int, optional
Length of FFT, i.e. number of FD beamforming weights, equally spaced. Defaults to 1024.
Lg: int, optional
Length of time-domain filters. Default to N.
hop: int, optional
Hop length for frequency domain processing. Default to N/2.
zpf: int, optional
Front zero padding length for frequency domain processing. Default is 0.
zpb: int, optional
Zero padding length for frequency domain processing. Default is 0.
'''
def __init__(self, R, fs, N=1024, Lg=None, hop=None, zpf=0, zpb=0):
MicrophoneArray.__init__(self, R, fs)
# only support even length (in freq)
if N % 2 is 1:
N += 1
self.N = int(N) # FFT length
if Lg is None:
self.Lg = N # TD filters length
else:
self.Lg = int(Lg)
# setup lengths for FD processing
self.zpf = int(zpf)
self.zpb = int(zpb)
self.L = self.N - self.zpf - self.zpb
if hop is None:
self.hop = self.L // 2
else:
self.hop = hop
# for now only support equally spaced frequencies
self.frequencies = np.arange(0, self.N // 2+1) / self.N * float(self.fs)
# weights will be computed later, the array is of shape (M, N/2+1)
self.weights = None
# the TD beamforming filters (M, Lg)
self.filters = None
def __add__(self, y):
''' Concatenates two beamformers together.'''
newR = np.concatenate((self.R, y.R), axis=1)
return Beamformer(newR, self.fs, self.Lg, self.N, hop=self.hop, zpf=self.zpf, zpb=self.zpb)
def filters_from_weights(self, non_causal=0.):
'''
Compute time-domain filters from frequency domain weights.
Parameters
----------
non_causal: float, optional
ratio of filter coefficients used for non-causal part
'''
if self.weights is None:
raise NameError('Weights must be defined.')
self.filters = np.zeros((self.M, self.Lg))
if self.N <= self.Lg:
# go back to time domain and shift DC to center
tw = np.fft.irfft(np.conj(self.weights), axis=1, n=self.N)
self.filters[:, :self.N] = np.concatenate((tw[:, -self.N//2:], tw[:, :self.N//2]), axis=1)
elif self.N > self.Lg:
# Least-square projection
for i in np.arange(self.M):
Lgp = np.floor((1 - non_causal)*self.Lg)
Lgm = self.Lg - Lgp
# the beamforming weights in frequency are the complex conjugates of the FT of the filter
w = np.concatenate((np.conj(self.weights[i]), self.weights[i, -2:0:-1]))
# create partial Fourier matrix
k = np.arange(self.N)[:, np.newaxis]
l = np.concatenate((np.arange(self.N-Lgm, self.N), np.arange(Lgp)))
F = np.exp(-2j*np.pi*k*l / self.N)
self.filters[i] = np.real(np.linalg.lstsq(F, w)[0])
def weights_from_filters(self):
if self.filters is None:
raise NameError('Filters must be defined.')
# this is what we want to use, really.
#self.weights = np.conj(np.fft.rfft(self.filters, n=self.N, axis=1))
# quick hack to be able to use MKL acceleration package from anaconda
self.weights = np.zeros((self.M, self.N//2+1), dtype=np.complex128)
for m in range(self.M):
self.weights[m] = np.conj(np.fft.rfft(self.filters[m], n=self.N))
def steering_vector_2D(self, frequency, phi, dist, attn=False):
phi = np.array([phi]).reshape(phi.size)
# Assume phi and dist are measured from the array's center
X = dist * np.array([np.cos(phi), np.sin(phi)]) + self.center
D = distance(self.R, X)
omega = 2 * np.pi * frequency
if attn:
# TO DO 1: This will mean slightly different absolute value for
# every entry, even within the same steering vector. Perhaps a
# better paradigm is far-field with phase carrier.
return 1. / (4 * np.pi) / D * np.exp(-1j * omega * D / constants.get('c'))
else:
return np.exp(-1j * omega * D / constants.get('c'))
def steering_vector_2D_from_point(self, frequency, source, attn=True, ff=False):
''' Creates a steering vector for a particular frequency and source
Args:
frequency
source: location in cartesian coordinates
attn: include attenuation factor if True
ff: uses far-field distance if true
Return:
A 2x1 ndarray containing the steering vector.
'''
X = np.array(source)
if X.ndim == 1:
X = source[:, np.newaxis]
omega = 2 * np.pi * frequency
# normalize for far-field if requested
if (ff):
# unit vectors pointing towards sources
p = (X - self.center)
p /= np.linalg.norm(p)
# The projected microphone distances on the unit vectors
D = np.dot(self.R.T, p)
# subtract minimum in each column
D -= np.min(D)
else:
D = distance(self.R, X)
phase = np.exp(-1j * omega * D / constants.get('c'))
if attn:
# TO DO 1: This will mean slightly different absolute value for
# every entry, even within the same steering vector. Perhaps a
# better paradigm is far-field with phase carrier.
return 1. / (4 * np.pi) / D * phase
else:
return phase
def response(self, phi_list, frequency):
i_freq = np.argmin(np.abs(self.frequencies - frequency))
if self.weights is None and self.filters is not None:
self.weights_from_filters()
elif self.weights is None and self.filters is None:
raise NameError('Beamforming weights or filters need to be computed first.')
# For the moment assume that we are in 2D
bfresp = np.dot(H(self.weights[:,i_freq]), self.steering_vector_2D(
self.frequencies[i_freq], phi_list, constants.get('ffdist')))
return self.frequencies[i_freq], bfresp
def response_from_point(self, x, frequency):
i_freq = np.argmin(np.abs(self.frequencies - frequency))
if self.weights is None and self.filters is not None:
self.weights_from_filters()
elif self.weights is None and self.filters is None:
raise NameError('Beamforming weights or filters need to be computed first.')
# For the moment assume that we are in 2D
bfresp = np.dot(H(self.weights[:, i_freq]), self.steering_vector_2D_from_point(
self.frequencies[i_freq], x, attn=True, ff=False))
return self.frequencies[i_freq], bfresp
def plot_response_from_point(self, x, legend=None):
if self.weights is None and self.filters is not None:
self.weights_from_filters()
elif self.weights is None and self.filters is None:
raise NameError('Beamforming weights or filters need to be computed first.')
if x.ndim == 0:
x = np.array([x])
try:
import matplotlib.pyplot as plt
except ImportError:
import warnings
warnings.warn('Matplotlib is required for plotting')
return
HF = np.zeros((x.shape[1], self.frequencies.shape[0]), dtype=complex)
for k, p in enumerate(x.T):
for i, f in enumerate(self.frequencies):
r = np.dot(H(self.weights[:, i]),
self.steering_vector_2D_from_point(f, p, attn=True, ff=False))
HF[k, i] = r[0]
plt.subplot(2, 1, 1)
plt.title('Beamformer response')
for hf in HF:
plt.plot(self.frequencies, np.abs(hf))
plt.ylabel('Modulus')
plt.axis('tight')
plt.legend(legend)
plt.subplot(2, 1, 2)
for hf in HF:
plt.plot(self.frequencies, np.unwrap(np.angle(hf)))
plt.ylabel('Phase')
plt.xlabel('Frequency [Hz]')
plt.axis('tight')
plt.legend(legend)
def plot_beam_response(self):
if self.weights is None and self.filters is not None:
self.weights_from_filters()
elif self.weights is None and self.filters is None:
raise NameError('Beamforming weights or filters need to be computed first.')
phi = np.linspace(-np.pi, np.pi-np.pi/180, 360)
freq = self.frequencies
resp = np.zeros((freq.shape[0], phi.shape[0]), dtype=complex)
for i, f in enumerate(freq):
# For the moment assume that we are in 2D
resp[i,:] = np.dot(H(self.weights[:,i]), self.steering_vector_2D(
f, phi, constants.get('ffdist')))
H_abs = np.abs(resp)**2
H_abs /= H_abs.max()
H_abs = 10*np.log10(H_abs + 1e-10)
p_min = 0
p_max = 100
vmin, vmax = np.percentile(H_abs.flatten(), [p_min, p_max])
try:
import matplotlib.pyplot as plt
except ImportError:
import warnings
warnings.warn('Matplotlib is required for plotting')
return
plt.imshow(H_abs,
aspect='auto',
origin='lower',
interpolation='sinc',
vmax=vmax, vmin=vmin)
plt.xlabel('Angle [rad]')
xticks = [-np.pi, -np.pi/2, 0, np.pi/2, np.pi]
for i, p in enumerate(xticks):
xticks[i] = np.argmin(np.abs(p - phi))
xticklabels = ['$-\pi$', '$-\pi/2$', '0', '$\pi/2$', '$\pi$']
plt.setp(plt.gca(), 'xticks', xticks)
plt.setp(plt.gca(), 'xticklabels', xticklabels)
plt.ylabel('Freq [kHz]')
yticks = np.zeros(4)
f_0 = np.floor(self.fs/8000.)
for i in np.arange(1, 5):
yticks[i-1] = np.argmin(np.abs(freq - 1000.*i*f_0))
#yticks = np.array(plt.getp(plt.gca(), 'yticks'), dtype=np.int)
plt.setp(plt.gca(), 'yticks', yticks)
plt.setp(plt.gca(), 'yticklabels', np.arange(1, 5)*f_0)
def snr(self, source, interferer, f, R_n=None, dB=False):
i_f = np.argmin(np.abs(self.frequencies - f))
if self.weights is None and self.filters is not None:
self.weights_from_filters()
elif self.weights is None and self.filters is None:
raise NameError('Beamforming weights or filters need to be computed first.')
# This works at a single frequency because otherwise we need to pass
# many many covariance matrices. Easy to change though (you can also
# have frequency independent R_n).
if R_n is None:
R_n = np.zeros((self.M, self.M))
# To compute the SNR, we /must/ use the real steering vectors, so no
# far field, and attn=True
A_good = self.steering_vector_2D_from_point(self.frequencies[i_f], source.images, attn=True, ff=False)
if interferer is not None:
A_bad = self.steering_vector_2D_from_point(self.frequencies[i_f], interferer.images, attn=True, ff=False)
R_nq = R_n + sumcols(A_bad) * H(sumcols(A_bad))
else:
R_nq = R_n
w = self.weights[:, i_f]
a_1 = sumcols(A_good)
SNR = np.real(mdot(H(w), a_1, H(a_1), w) / mdot(H(w), R_nq, w))
if dB is True:
SNR = 10 * np.log10(SNR)
return SNR
def udr(self, source, interferer, f, R_n=None, dB=False):
i_f = np.argmin(np.abs(self.frequencies - f))
if self.weights is None and self.filters is not None:
self.weights_from_filters()
elif self.weights is None and self.filters is None:
raise NameError('Beamforming weights or filters need to be computed first.')
if R_n is None:
R_n = np.zeros((self.M, self.M))
A_good = self.steering_vector_2D_from_point(self.frequencies[i_f], source.images, attn=True, ff=False)
if interferer is not None:
A_bad = self.steering_vector_2D_from_point(self.frequencies[i_f], interferer.images, attn=True, ff=False)
R_nq = R_n + sumcols(A_bad).dot(H(sumcols(A_bad)))
else:
R_nq = R_n
w = self.weights[:, i_f]
UDR = np.real(mdot(H(w), A_good, H(A_good), w) / mdot(H(w), R_nq, w))
if dB is True:
UDR = 10 * np.log10(UDR)
return UDR
def process(self, FD=False):
if self.signals is None or len(self.signals) == 0:
raise NameError('No signal to beamform')
if FD is True:
# STFT processing
if self.weights is None and self.filters is not None:
self.weights_from_filters()
elif self.weights is None and self.filters is None:
raise NameError('Beamforming weights or filters need to be computed first.')
# create window function
win = np.concatenate((np.zeros(self.zpf),
windows.hann(self.L),
np.zeros(self.zpb)))
# do real STFT of first signal
tfd_sig = stft.analysis(self.signals[0],
self.L,
self.hop,
zp_back=self.zpb,
zp_front=self.zpf,
transform=np.fft.rfft,
win=win) * np.conj(self.weights[0])
for i in range(1, self.M):
tfd_sig += stft.analysis(self.signals[i],
self.L,
self.hop,
zp_back=self.zpb,
zp_front=self.zpf,
transform=np.fft.rfft,
win=win) * np.conj(self.weights[i])
# now reconstruct the signal
output = stft.synthesis(
tfd_sig,
self.L,
self.hop,
zp_back=self.zpb,
zp_front=self.zpf,
transform=np.fft.irfft)
# remove the zero padding from output signal
if self.zpb is 0:
output = output[self.zpf:]
else:
output = output[self.zpf:-self.zpb]
else:
# TD processing
if self.weights is not None and self.filters is None:
self.filters_from_weights()
elif self.weights is None and self.filters is None:
raise NameError('Beamforming weights or filters need to be computed first.')
from scipy.signal import fftconvolve
# do real STFT of first signal
output = fftconvolve(self.filters[0], self.signals[0])
for i in range(1, len(self.signals)):
output += fftconvolve(self.filters[i], self.signals[i])
return output
def plot(self, sum_ir=False, FD=True):
if self.weights is None and self.filters is not None:
self.weights_from_filters()
elif self.weights is not None and self.filters is None:
self.filters_from_weights()
elif self.weights is None and self.filters is None:
raise NameError('Beamforming weights or filters need to be computed first.')
try:
import matplotlib.pyplot as plt
except ImportError:
import warnings
warnings.warn('Matplotlib is required for plotting')
return
if FD is True:
plt.subplot(2, 2, 1)
plt.plot(self.frequencies, np.abs(self.weights.T))
plt.title('Beamforming weights [modulus]')
plt.xlabel('Frequency [Hz]')
plt.ylabel('Weight modulus')
plt.subplot(2, 2, 2)
plt.plot(self.frequencies, np.unwrap(np.angle(self.weights.T), axis=0))
plt.title('Beamforming weights [phase]')
plt.xlabel('Frequency [Hz]')
plt.ylabel('Unwrapped phase')
plt.subplot(2, 1, 2)
plt.plot( | np.arange(self.Lg) | numpy.arange |
'''
Routines for generation of triangular surface meshes.
'''
import numpy as np
from cnld import abstract, util
from cnld.h2lib import *
from matplotlib import pyplot as plt
from scipy.interpolate import Rbf
eps = np.finfo(np.float64).eps
class Mesh:
'''
2D triangular mesh class using H2Lib datastructures.
'''
_surface = None
def __init__(self):
self._surface = None
@classmethod
def from_surface3d(cls, surf):
obj = cls()
obj._surface = surf
obj._update_properties()
return obj
@classmethod
def from_macrosurface3d(cls, ms, center=(0, 0, 0), refn=2):
# mesh must be refined at least once, otherwise h2lib throws exception
assert refn > 1
obj = cls.from_surface3d(build_from_macrosurface3d_surface3d(ms, refn))
obj.translate(center)
return obj
@classmethod
def from_geometry(cls, vertices, edges, triangles, triangle_edges):
surf = Surface3d(len(vertices), len(edges), len(triangles))
surf.x[:] = vertices
surf.e[:] = edges
surf.t[:] = triangles
surf.s[:] = triangle_edges
return cls.from_surface3d(surf)
@classmethod
def from_abstract(cls, array, refn=1, **kwargs):
return _from_abstract(cls, array, refn, **kwargs)
# @classmethod
# def from_geometry(cls, vertices, edges, triangles, triangle_edges, center=(0,0,0),
# refn=2, parametrization='square'):
# ms = Macrosurface3d(len(vertices), len(edges), len(triangles))
# ms.x[:] = vertices
# ms.e[:] = edges
# ms.t[:] = triangles
# ms.s[:] = triangle_edges
# ms.set_parametrization(parametrization)
# return cls.from_macrosurface3d(ms, center=center, refn=refn)
def __add__(self, other):
surf1 = self._surface
surf2 = other._surface
if surf1 is None and surf2 is None:
return Mesh()
elif surf1 is None:
return Mesh.from_surface3d(surf2)
elif surf2 is None:
return Mesh.from_surface3d(surf1)
else:
return Mesh.from_surface3d(merge_surface3d(surf1, surf2))
def __iadd__(self, other):
surf1 = self._surface
surf2 = other._surface
if surf1 is None and surf2 is None:
pass
elif surf1 is None:
self._surface = surf2
elif surf2 is None:
pass
else:
self._surface = merge_surface3d(surf1, surf2)
self._update_properties()
return self
@property
def vertices(self):
return np.asarray(self._surface.x)
@property
def edges(self):
return np.asarray(self._surface.e)
@property
def triangles(self):
return np.asarray(self._surface.t)
@property
def triangle_edges(self):
return np.asarray(self._surface.s)
@property
def normals(self):
return np.asarray(self._surface.n)
@property
def g(self):
return np.asarray(self._surface.g)
@property
def triangle_areas(self):
return np.asarray(self._surface.g) / 2
@property
def hmin(self):
return self._surface.hmin
@property
def hmax(self):
return self._surface.hmax
@property
def nvertices(self):
return len(self.vertices)
@property
def nedges(self):
return len(self.edges)
@property
def ntriangles(self):
return len(self.triangles)
@property
def surface3d(self):
return self._surface
def _update_properties(self):
prepare_surface3d(self._surface)
def _refine(self):
self._surface = refine_red_surface3d(self._surface)
self._update_properties()
def refine(self, n=1):
for i in range(n):
self._refine()
def translate(self, r):
translate_surface3d(self._surface, np.array(r, dtype=np.float64))
def draw(self):
vertices = self.vertices
edges = self.edges
plt.figure()
plt.plot(vertices[:, 0], vertices[:, 1], '.')
for e in edges:
x1, y1, z1 = vertices[e[0], :]
x2, y2, z2 = vertices[e[1], :]
plt.plot([x1, x2], [y1, y2], 'b-')
plt.axis('equal')
plt.show()
def _memoize():
return (self.nvertices, self.triangles.tostring(), self.edges.tostring(),
self.triangle_edges.tostring())
def _from_abstract(cls, array, refn=1, **kwargs):
'''
Generate mesh from abstract representation of an array.
'''
# generate geometry in terms of vertices, edges, and triangles with refinement
# (much faster to construct mesh from entire geometry once instead of membrane by
# membrane)
verts, edges, tris, tri_edges = [], [], [], []
vidx = 0
eidx = 0
for elem in array.elements:
for mem in elem.membranes:
if isinstance(mem, abstract.SquareCmutMembrane):
v, e, t, s = geometry_square(mem.length_x, mem.length_y, refn=refn)
elif isinstance(mem, abstract.CircularCmutMembrane):
v, e, t, s = geometry_circle(mem.radius, n=4, refn=refn)
else:
raise TypeError
v += np.array(mem.position)
e += vidx
t += vidx
s += eidx
vidx += len(v)
eidx += len(e)
verts.append(v)
edges.append(e)
tris.append(t)
tri_edges.append(s)
verts = np.concatenate(verts, axis=0)
edges = np.concatenate(edges, axis=0)
tris = np.concatenate(tris, axis=0)
tri_edges = np.concatenate(tri_edges, axis=0)
# construct mesh from geometry
mesh = cls.from_geometry(verts, edges, tris, tri_edges)
# assign mesh vertices to patches, membranes, and elements
nverts = len(mesh.vertices)
# patch_counter = np.zeros(nverts, dtype=np.int32) # keeps track of current patch idx for each vertex
# patch_ids = np.ones((nverts, 4), dtype=np.int32) * np.nan
membrane_ids = np.ones(nverts, dtype=np.int32) * np.nan
element_ids = np.ones(nverts, dtype=np.int32) * np.nan
mesh.on_boundary = np.zeros(nverts, dtype=np.bool)
x, y, z = mesh.vertices.T
for elem in array.elements:
for mem in elem.membranes:
# for pat in mem.patches:
# determine vertices which belong to each patch, using
# eps for buffer to account for round-off error
# pat_x, pat_y, pat_z = pat.position
# length_x, length_y = pat.length_x, pat.length_y
# xmin = pat_x - length_x / 2 - 2 * eps
# xmax = pat_x + length_x / 2 + 2 * eps
# ymin = pat_y - length_y / 2 - 2 * eps
# ymax = pat_y + length_y / 2 + 2 * eps
# mask_x = np.logical_and(x >= xmin, x <= xmax)
# mask_y = np.logical_and(y >= ymin, y <= ymax)
# mask = np.logical_and(mask_x, mask_y)
# patch_ids[mask, patch_counter[mask]] = pat.id
# patch_counter[mask] += 1 # increment patch idx
# membrane_ids[mask] = mem.id
# element_ids[mask] = elem.id
if isinstance(mem, abstract.SquareCmutMembrane):
# determine vertices which belong to each membrane
mem_x, mem_y, mem_z = mem.position
length_x, length_y = mem.length_x, mem.length_y
xmin = mem_x - length_x / 2 # - 2 * eps
xmax = mem_x + length_x / 2 # + 2 * eps
ymin = mem_y - length_y / 2 # - 2 * eps
ymax = mem_y + length_y / 2 # + 2 * eps
mask_x = np.logical_and(x >= xmin - 2 * eps, x <= xmax + 2 * eps)
mask_y = np.logical_and(y >= ymin - 2 * eps, y <= ymax + 2 * eps)
mem_mask = np.logical_and(mask_x, mask_y)
membrane_ids[mem_mask] = mem.id
element_ids[mem_mask] = elem.id
# check and flag boundary vertices
mask1 = np.abs(x[mem_mask] - xmin) <= 2 * eps
mask2 = np.abs(x[mem_mask] - xmax) <= 2 * eps
mask3 = np.abs(y[mem_mask] - ymin) <= 2 * eps
mask4 = np.abs(y[mem_mask] - ymax) <= 2 * eps
mesh.on_boundary[mem_mask] = np.any(np.c_[mask1, mask2, mask3, mask4],
axis=1)
elif isinstance(mem, abstract.CircularCmutMembrane):
# determine vertices which belong to each membrane
mem_x, mem_y, mem_z = mem.position
radius = mem.radius
rmax = radius + 2 * eps
r = np.sqrt((x - mem_x)**2 + (y - mem_y)**2)
mem_mask = r <= rmax
membrane_ids[mem_mask] = mem.id
element_ids[mem_mask] = elem.id
# check and flag boundary vertices
mask1 = r[mem_mask] <= radius + 2 * eps
mask2 = r[mem_mask] >= radius - 2 * eps
mesh.on_boundary[mem_mask] = np.logical_and(mask1, mask2)
else:
raise TypeError
# check that no vertices were missed
# assert ~np.any(np.isnan(patch_ids[:,0])) # check that each vertex is assigned to at least one patch
# assert ~np.any(np.isnan(membrane_ids))
# assert ~np.any(np.isnan(element_ids))
# mesh.patch_ids = patch_ids
mesh.membrane_ids = membrane_ids
mesh.element_ids = element_ids
return mesh
@util.memoize
def geometry_square(xl, yl, refn=1, type=1):
'''
Creates a square mesh geometry (vertices, triangles etc.) which can be used to
construct a mesh object.
'''
if type == 1:
# vertices
v = np.zeros((5, 3), dtype=np.float64)
v[0, :] = -xl / 2, -yl / 2, 0.0 # bottom left
v[1, :] = xl / 2, -yl / 2, 0.0 # bottom right
v[2, :] = xl / 2, yl / 2, 0.0 # top right
v[3, :] = -xl / 2, yl / 2, 0.0 # top left
v[4, :] = 0.0, 0.0, 0.0 # center
# edges
e = np.zeros((8, 2), dtype=np.uint32)
e[0, :] = 0, 1 # bottom
e[1, :] = 1, 2 # right
e[2, :] = 2, 3 # top
e[3, :] = 3, 0 # left
e[4, :] = 0, 4 # bottom left
e[5, :] = 1, 4 # bottom right
e[6, :] = 2, 4 # top right
e[7, :] = 3, 4 # top left
# triangles and triangle edges
t = np.zeros((4, 3), dtype=np.uint32)
s = np.zeros((4, 3), dtype=np.uint32)
t[0, :] = 0, 1, 4 # bottom
s[0, :] = 5, 4, 0
t[1, :] = 1, 2, 4 # right
s[1, :] = 6, 5, 1
t[2, :] = 2, 3, 4 # top
s[2, :] = 7, 6, 2
t[3, :] = 3, 0, 4 # left
s[3, :] = 4, 7, 3
elif type == 2:
# vertices
v = np.zeros((4, 3), dtype=np.float64)
v[0, :] = -xl / 2, -yl / 2, 0.0 # bottom left
v[1, :] = xl / 2, -yl / 2, 0.0 # bottom right
v[2, :] = xl / 2, yl / 2, 0.0 # top right
v[3, :] = -xl / 2, yl / 2, 0.0 # top left
# edges
e = np.zeros((5, 2), dtype=np.uint32)
e[0, :] = 0, 1 # bottom
e[1, :] = 1, 2 # right
e[2, :] = 2, 3 # top
e[3, :] = 3, 0 # left
e[4, :] = 1, 3 # diagonal
# triangles and triangle edges
t = np.zeros((2, 3), dtype=np.uint32)
s = np.zeros((2, 3), dtype=np.uint32)
t[0, :] = 0, 1, 3 # bottom left
s[0, :] = 4, 3, 0
t[1, :] = 1, 2, 3 # top right
s[1, :] = 2, 4, 1
else:
raise ValueError('incorrect type')
# refine geometry using h2lib macrosurface3d -> surface3d procedure
if refn > 1:
msurf = Macrosurface3d(len(v), len(e), len(t))
msurf.x[:] = v
msurf.e[:] = e
msurf.t[:] = t
msurf.s[:] = s
msurf.set_parametrization('square')
surf = build_from_macrosurface3d_surface3d(msurf, refn)
# copy arrays from surf
v = np.array(surf.x, copy=True)
e = np.array(surf.e, copy=True)
t = np.array(surf.t, copy=True)
s = np.array(surf.s, copy=True)
# translate geometry
# v += np.array(center)
return v, e, t, s
# @util.memoize
# def geometry_circle(rl, refn=1):
# '''
# Creates a circle mesh geometry (vertices, triangles etc.) which can be used to
# construct a mesh object.
# '''
# # vertices
# v = np.zeros((5, 3), dtype=np.float64)
# v[0,:] = -rl, 0.0, 0.0 # left
# v[1,:] = 0.0, -rl, 0.0 # bottom
# v[2,:] = rl, 0.0, 0.0 # right
# v[3,:] = 0.0, rl, 0.0 # top
# v[4,:] = 0.0, 0.0, 0.0 # center
# # edges
# e = np.zeros((8, 2), dtype=np.uint32)
# e[0,:] = 0, 1 # bottom left
# e[1,:] = 1, 2 # bottom right
# e[2,:] = 2, 3 # top right
# e[3,:] = 3, 0 # top left
# e[4,:] = 0, 4 # left horizontal
# e[5,:] = 1, 4 # bottom vertical
# e[6,:] = 2, 4 # right horizontal
# e[7,:] = 3, 4 # right vertical
# # triangles and triangle edges
# t = np.zeros((4, 3), dtype=np.uint32)
# s = np.zeros((4, 3), dtype=np.uint32)
# t[0, :] = 0, 1, 4 # bottom left
# s[0, :] = 5, 4, 0
# t[1, :] = 1, 2, 4 # bottom right
# s[1, :] = 6, 5, 1
# t[2, :] = 2, 3, 4 # top right
# s[2, :] = 7, 6, 2
# t[3, :] = 3, 0, 4 # top left
# s[3, :] = 4, 7, 3
# # refine geometry using h2lib macrosurface3d -> surface3d procedure
# if refn > 1:
# msurf = Macrosurface3d(len(v), len(e), len(t))
# msurf.x[:] = v
# msurf.e[:] = e
# msurf.t[:] = t
# msurf.s[:] = s
# msurf.set_parametrization('circle')
# surf = build_from_macrosurface3d_surface3d(msurf, refn)
# # copy arrays from surf
# v = np.array(surf.x, copy=True)
# e = np.array(surf.e, copy=True)
# t = np.array(surf.t, copy=True)
# s = np.array(surf.s, copy=True)
# # translate geometry
# # v += np.array(center)
# return v, e, t, s
@util.memoize
def geometry_circle(rl, n=4, refn=1):
'''
Creates a circle mesh geometry (vertices, triangles etc.) which can be used to
construct a mesh object.
'''
# vertices
v = np.zeros((n + 1, 3), dtype=np.float64)
for i in range(n):
theta = 2 * np.pi / n * i - np.pi
# p = rl / (np.abs(np.sin(theta)) + np.abs(np.cos(theta)))
x = rl * np.cos(theta)
y = rl * np.sin(theta)
v[i, :] = x, y, 0.0
v[n, :] = 0.0, 0.0, 0.0
v[np.isclose(v, 0)] = 0.0
# edges
e = np.zeros((2 * n, 2), dtype=np.uint32)
for i in range(n):
e[i, :] = i, np.mod(i + 1, n)
for i in range(n):
e[n + i, :] = i, n
# triangles and triangle edges
t = np.zeros((n, 3), dtype=np.uint32)
s = np.zeros((n, 3), dtype=np.uint32)
first = list(np.mod(np.arange(0, n) + 1, n) + n)
second = list(np.mod(np.arange(0, n), n) + n)
third = list(np.arange(0, n))
for i in range(n):
t[i, :] = i, np.mod(i + 1, n), n
s[i, :] = first[i], second[i], third[i]
# refine geometry using h2lib macrosurface3d -> surface3d procedure
if refn > 1:
msurf = Macrosurface3d(len(v), len(e), len(t))
msurf.x[:] = v
msurf.e[:] = e
msurf.t[:] = t
msurf.s[:] = s
msurf.set_parametrization('circle')
surf = build_from_macrosurface3d_surface3d(msurf, refn)
# copy arrays from surf
v = np.array(surf.x, copy=True)
e = np.array(surf.e, copy=True)
t = np.array(surf.t, copy=True)
s = np.array(surf.s, copy=True)
# translate geometry
# v += np.array(center)
return v, e, t, s
def square(xl, yl, refn=1, type=1, center=(0, 0, 0)):
'''
'''
v, e, t, s = geometry_square(xl, yl, refn=refn, type=type)
v += np.array(center)
mesh = Mesh.from_geometry(v, e, t, s)
# check and flag boundary vertices
mask1 = np.abs(mesh.vertices[:, 0] - center[0] + xl / 2) <= 2 * eps
mask2 = np.abs(mesh.vertices[:, 0] - center[0] - xl / 2) <= 2 * eps
mask3 = np.abs(mesh.vertices[:, 1] - center[1] + yl / 2) <= 2 * eps
mask4 = np.abs(mesh.vertices[:, 1] - center[1] - yl / 2) <= 2 * eps
mesh.on_boundary = np.any(np.c_[mask1, mask2, mask3, mask4], axis=1)
return mesh
def circle(rl, refn=1, center=(0, 0, 0)):
'''
'''
v, e, t, s = geometry_circle(rl, n=4, refn=refn)
v += | np.array(center) | numpy.array |
"""
Set operations for arrays based on sorting.
:Contains:
unique,
isin,
ediff1d,
intersect1d,
setxor1d,
in1d,
union1d,
setdiff1d
:Notes:
For floating point arrays, inaccurate results may appear due to usual round-off
and floating point comparison issues.
Speed could be gained in some operations by an implementation of
sort(), that can provide directly the permutation vectors, avoiding
thus calls to argsort().
To do: Optionally return indices analogously to unique for all functions.
:Author: <NAME>
"""
import functools
import numpy as np
from numpy.core import overrides
array_function_dispatch = functools.partial(
overrides.array_function_dispatch, module='numpy')
__all__ = [
'ediff1d', 'intersect1d', 'setxor1d', 'union1d', 'setdiff1d', 'unique',
'in1d', 'isin'
]
def _ediff1d_dispatcher(ary, to_end=None, to_begin=None):
return (ary, to_end, to_begin)
@array_function_dispatch(_ediff1d_dispatcher)
def ediff1d(ary, to_end=None, to_begin=None):
"""
The differences between consecutive elements of an array.
Parameters
----------
ary : array_like
If necessary, will be flattened before the differences are taken.
to_end : array_like, optional
Number(s) to append at the end of the returned differences.
to_begin : array_like, optional
Number(s) to prepend at the beginning of the returned differences.
Returns
-------
ediff1d : ndarray
The differences. Loosely, this is ``ary.flat[1:] - ary.flat[:-1]``.
See Also
--------
diff, gradient
Notes
-----
When applied to masked arrays, this function drops the mask information
if the `to_begin` and/or `to_end` parameters are used.
Examples
--------
>>> x = np.array([1, 2, 4, 7, 0])
>>> np.ediff1d(x)
array([ 1, 2, 3, -7])
>>> np.ediff1d(x, to_begin=-99, to_end=np.array([88, 99]))
array([-99, 1, 2, ..., -7, 88, 99])
The returned array is always 1D.
>>> y = [[1, 2, 4], [1, 6, 24]]
>>> np.ediff1d(y)
array([ 1, 2, -3, 5, 18])
"""
# force a 1d array
ary = np.asanyarray(ary).ravel()
# enforce that the dtype of `ary` is used for the output
dtype_req = ary.dtype
# fast track default case
if to_begin is None and to_end is None:
return ary[1:] - ary[:-1]
if to_begin is None:
l_begin = 0
else:
to_begin = np.asanyarray(to_begin)
if not np.can_cast(to_begin, dtype_req, casting="same_kind"):
raise TypeError("dtype of `to_end` must be compatible "
"with input `ary` under the `same_kind` rule.")
to_begin = to_begin.ravel()
l_begin = len(to_begin)
if to_end is None:
l_end = 0
else:
to_end = np.asanyarray(to_end)
if not np.can_cast(to_end, dtype_req, casting="same_kind"):
raise TypeError("dtype of `to_end` must be compatible "
"with input `ary` under the `same_kind` rule.")
to_end = to_end.ravel()
l_end = len(to_end)
# do the calculation in place and copy to_begin and to_end
l_diff = max(len(ary) - 1, 0)
result = np.empty(l_diff + l_begin + l_end, dtype=ary.dtype)
result = ary.__array_wrap__(result)
if l_begin > 0:
result[:l_begin] = to_begin
if l_end > 0:
result[l_begin + l_diff:] = to_end
np.subtract(ary[1:], ary[:-1], result[l_begin:l_begin + l_diff])
return result
def _unpack_tuple(x):
""" Unpacks one-element tuples for use as return values """
if len(x) == 1:
return x[0]
else:
return x
def _unique_dispatcher(ar, return_index=None, return_inverse=None,
return_counts=None, axis=None):
return (ar,)
@array_function_dispatch(_unique_dispatcher)
def unique(ar, return_index=False, return_inverse=False,
return_counts=False, axis=None):
"""
Find the unique elements of an array.
Returns the sorted unique elements of an array. There are three optional
outputs in addition to the unique elements:
* the indices of the input array that give the unique values
* the indices of the unique array that reconstruct the input array
* the number of times each unique value comes up in the input array
Parameters
----------
ar : array_like
Input array. Unless `axis` is specified, this will be flattened if it
is not already 1-D.
return_index : bool, optional
If True, also return the indices of `ar` (along the specified axis,
if provided, or in the flattened array) that result in the unique array.
return_inverse : bool, optional
If True, also return the indices of the unique array (for the specified
axis, if provided) that can be used to reconstruct `ar`.
return_counts : bool, optional
If True, also return the number of times each unique item appears
in `ar`.
.. versionadded:: 1.9.0
axis : int or None, optional
The axis to operate on. If None, `ar` will be flattened. If an integer,
the subarrays indexed by the given axis will be flattened and treated
as the elements of a 1-D array with the dimension of the given axis,
see the notes for more details. Object arrays or structured arrays
that contain objects are not supported if the `axis` kwarg is used. The
default is None.
.. versionadded:: 1.13.0
Returns
-------
unique : ndarray
The sorted unique values.
unique_indices : ndarray, optional
The indices of the first occurrences of the unique values in the
original array. Only provided if `return_index` is True.
unique_inverse : ndarray, optional
The indices to reconstruct the original array from the
unique array. Only provided if `return_inverse` is True.
unique_counts : ndarray, optional
The number of times each of the unique values comes up in the
original array. Only provided if `return_counts` is True.
.. versionadded:: 1.9.0
See Also
--------
numpy.lib.arraysetops : Module with a number of other functions for
performing set operations on arrays.
repeat : Repeat elements of an array.
Notes
-----
When an axis is specified the subarrays indexed by the axis are sorted.
This is done by making the specified axis the first dimension of the array
(move the axis to the first dimension to keep the order of the other axes)
and then flattening the subarrays in C order. The flattened subarrays are
then viewed as a structured type with each element given a label, with the
effect that we end up with a 1-D array of structured types that can be
treated in the same way as any other 1-D array. The result is that the
flattened subarrays are sorted in lexicographic order starting with the
first element.
Examples
--------
>>> np.unique([1, 1, 2, 2, 3, 3])
array([1, 2, 3])
>>> a = np.array([[1, 1], [2, 3]])
>>> np.unique(a)
array([1, 2, 3])
Return the unique rows of a 2D array
>>> a = np.array([[1, 0, 0], [1, 0, 0], [2, 3, 4]])
>>> np.unique(a, axis=0)
array([[1, 0, 0], [2, 3, 4]])
Return the indices of the original array that give the unique values:
>>> a = np.array(['a', 'b', 'b', 'c', 'a'])
>>> u, indices = np.unique(a, return_index=True)
>>> u
array(['a', 'b', 'c'], dtype='<U1')
>>> indices
array([0, 1, 3])
>>> a[indices]
array(['a', 'b', 'c'], dtype='<U1')
Reconstruct the input array from the unique values and inverse:
>>> a = np.array([1, 2, 6, 4, 2, 3, 2])
>>> u, indices = np.unique(a, return_inverse=True)
>>> u
array([1, 2, 3, 4, 6])
>>> indices
array([0, 1, 4, 3, 1, 2, 1])
>>> u[indices]
array([1, 2, 6, 4, 2, 3, 2])
Reconstruct the input values from the unique values and counts:
>>> a = np.array([1, 2, 6, 4, 2, 3, 2])
>>> values, counts = np.unique(a, return_counts=True)
>>> values
array([1, 2, 3, 4, 6])
>>> counts
array([1, 3, 1, 1, 1])
>>> np.repeat(values, counts)
array([1, 2, 2, 2, 3, 4, 6]) # original order not preserved
"""
ar = np.asanyarray(ar)
if axis is None:
ret = _unique1d(ar, return_index, return_inverse, return_counts)
return _unpack_tuple(ret)
# axis was specified and not None
try:
ar = np.moveaxis(ar, axis, 0)
except np.AxisError:
# this removes the "axis1" or "axis2" prefix from the error message
raise np.AxisError(axis, ar.ndim) from None
# Must reshape to a contiguous 2D array for this to work...
orig_shape, orig_dtype = ar.shape, ar.dtype
ar = ar.reshape(orig_shape[0], | np.prod(orig_shape[1:], dtype=np.intp) | numpy.prod |
# Copyright 2018 <NAME> <<EMAIL>>
#
# SPDX-License-Identifier: MIT
import os
import shutil
import textwrap
from tempfile import TemporaryDirectory
import numpy as np
import pytest
from pgimp.GimpFile import GimpFile, GimpFileType
from pgimp.GimpFileCollection import GimpFileCollection, NonExistingPathComponentException, \
GimpMissingRequiredParameterException, MaskForegroundColor
from pgimp.util import file
from pgimp.util.TempFile import TempFile
from pgimp.util.string import escape_single_quotes
def test_create_from_pathname_with_file():
prefix = file.relative_to(__file__, 'test-resources/files/')
collection = GimpFileCollection.create_from_pathname(file.relative_to(__file__, 'test-resources/files/first'))
assert len(collection.get_files()) == 1
assert prefix == collection.get_prefix()
collection = GimpFileCollection.create_from_pathname(file.relative_to(__file__, 'test-resources/files/first.xcf'))
assert len(collection.get_files()) == 1
assert prefix == collection.get_prefix()
collection = GimpFileCollection.create_from_pathname(file.relative_to(__file__, 'test-resources/files/first.png'))
assert len(collection.get_files()) == 0
assert '' == collection.get_prefix()
def test_create_from_pathname_with_directory():
prefix = file.relative_to(__file__, 'test-resources/files/')
collection = GimpFileCollection.create_from_pathname(file.relative_to(__file__, 'test-resources/files'))
assert len(collection.get_files()) == 2
assert prefix == collection.get_prefix()
collection = GimpFileCollection.create_from_pathname(file.relative_to(__file__, 'test-resources/files/'))
assert len(collection.get_files()) == 2
assert prefix == collection.get_prefix()
collection = GimpFileCollection.create_from_pathname(file.relative_to(__file__, 'test-resources/files/*'))
assert len(collection.get_files()) == 2
assert prefix == collection.get_prefix()
collection = GimpFileCollection.create_from_pathname(file.relative_to(__file__, 'test-resources/files/*.xcf'))
assert len(collection.get_files()) == 2
assert prefix == collection.get_prefix()
collection = GimpFileCollection.create_from_pathname(file.relative_to(__file__, 'test-resources/files/*.png'))
assert len(collection.get_files()) == 0
assert '' == collection.get_prefix()
def test_create_from_pathname_with_recursive_match():
prefix = file.relative_to(__file__, 'test-resources/files/')
collection = GimpFileCollection.create_from_pathname(file.relative_to(__file__, 'test-resources/files/**'))
assert len(collection.get_files()) == 4
assert prefix == collection.get_prefix()
collection = GimpFileCollection.create_from_pathname(file.relative_to(__file__, 'test-resources/files/**/'))
assert len(collection.get_files()) == 4
assert prefix == collection.get_prefix()
collection = GimpFileCollection.create_from_pathname(file.relative_to(__file__, 'test-resources/files/**/*'))
assert len(collection.get_files()) == 4
assert prefix == collection.get_prefix()
collection = GimpFileCollection.create_from_pathname(file.relative_to(__file__, 'test-resources/files/**/*.xcf'))
assert len(collection.get_files()) == 4
assert prefix == collection.get_prefix()
collection = GimpFileCollection.create_from_pathname(file.relative_to(__file__, 'test-resources/files/**/*.png'))
assert len(collection.get_files()) == 0
assert '' == collection.get_prefix()
def test_ordering():
prefix = file.relative_to(__file__, 'test-resources/files/')
collection = GimpFileCollection.create_from_pathname(file.relative_to(__file__, 'test-resources/files/**'))
collection = collection.replace_prefix(prefix)
assert [
'first.xcf',
'second.xcf',
'a/third.xcf',
'a/b/fourth.xcf',
] == collection.get_files()
def test_replace_path_components():
prefix = file.relative_to(__file__, 'test-resources/files/')
suffix = '.xcf'
collection = GimpFileCollection.create_from_pathname(file.relative_to(__file__, 'test-resources/files/**'))
collection = collection.replace_path_components(prefix, '#', suffix, '%')
assert [
'#first%.xcf',
'#second%.xcf',
'#a/third%.xcf',
'#a/b/fourth%.xcf',
] == collection.get_files()
def test_replace_path_components_with_non_existing_component():
collection = GimpFileCollection.create_from_pathname(file.relative_to(__file__, 'test-resources/files/**'))
with pytest.raises(NonExistingPathComponentException):
collection.replace_path_components('wrong_prefix', '#')
def test_replace_path_components_without_replacements():
collection = GimpFileCollection.create_from_pathname(file.relative_to(__file__, 'test-resources/files/**'))
files_before = collection.get_files()
collection = collection.replace_path_components()
files_after = collection.get_files()
assert files_before == files_after
def test_find_files_containing_layer_by_predictate():
with TempFile('.xcf') as with_white, TempFile('.xcf') as without_white:
GimpFile(with_white)\
.create('Background', np.zeros(shape=(1, 1), dtype=np.uint8))\
.add_layer_from_numpy('White', np.ones(shape=(1, 1), dtype=np.uint8)*255)
GimpFile(without_white) \
.create('Background', np.zeros(shape=(1, 1), dtype=np.uint8)) \
.add_layer_from_numpy('Black', np.zeros(shape=(1, 1), dtype=np.uint8))
collection = GimpFileCollection([with_white, without_white])
files = collection.find_files_containing_layer_by_predictate(
lambda layers: 'White' in map(lambda layer: layer.name, layers)
)
assert len(files) == 1
assert with_white == files[0]
files = collection.find_files_containing_layer_by_predictate(
lambda layers: 'Not existing' in map(lambda layer: layer.name, layers)
)
assert len(files) == 0
def test_find_files_containing_layer_by_name():
with TempFile('.xcf') as with_white, TempFile('.xcf') as without_white:
GimpFile(with_white)\
.create('Background', np.zeros(shape=(1, 1), dtype=np.uint8))\
.add_layer_from_numpy('White', np.ones(shape=(1, 1), dtype=np.uint8)*255)
GimpFile(without_white) \
.create('Background', np.zeros(shape=(1, 1), dtype=np.uint8)) \
.add_layer_from_numpy('Black', np.zeros(shape=(1, 1), dtype=np.uint8))
collection = GimpFileCollection([with_white, without_white])
files = collection.find_files_containing_layer_by_name('White', timeout_in_seconds=10)
assert len(files) == 1
assert with_white == files[0]
files = collection.find_files_containing_layer_by_name('Not existing', timeout_in_seconds=10)
assert len(files) == 0
def test_find_files_by_script_with_script_that_takes_single_file():
with TempFile('.xcf') as with_white, TempFile('.xcf') as without_white:
GimpFile(with_white)\
.create('Background', np.zeros(shape=(1, 1), dtype=np.uint8))\
.add_layer_from_numpy('White', np.ones(shape=(1, 1), dtype=np.uint8)*255)
GimpFile(without_white) \
.create('Background', np.zeros(shape=(1, 1), dtype=np.uint8)) \
.add_layer_from_numpy('Black', np.zeros(shape=(1, 1), dtype=np.uint8))
collection = GimpFileCollection([with_white, without_white])
script = textwrap.dedent(
"""
from pgimp.gimp.file import open_xcf
from pgimp.gimp.parameter import return_bool
image = open_xcf('__file__')
for layer in image.layers:
if layer.name == '{0:s}':
return_bool(True)
return_bool(False)
"""
)
files = collection.find_files_by_script(script.format(escape_single_quotes('White')), timeout_in_seconds=3)
assert len(files) == 1
assert with_white == files[0]
files = collection.find_files_by_script(script.format(escape_single_quotes('Not existing')), timeout_in_seconds=3)
assert len(files) == 0
def test_find_files_by_script_with_script_that_takes_multiple_files():
with TempFile('.xcf') as with_white, TempFile('.xcf') as without_white:
GimpFile(with_white)\
.create('Background', np.zeros(shape=(1, 1), dtype=np.uint8))\
.add_layer_from_numpy('White', np.ones(shape=(1, 1), dtype=np.uint8)*255)
GimpFile(without_white) \
.create('Background', np.zeros(shape=(1, 1), dtype=np.uint8)) \
.add_layer_from_numpy('Black', np.zeros(shape=(1, 1), dtype=np.uint8))
collection = GimpFileCollection([with_white, without_white])
script = textwrap.dedent(
"""
import gimp
from pgimp.gimp.file import XcfFile
from pgimp.gimp.parameter import return_json, get_json
files = get_json('__files__')
matches = []
for file in files:
with XcfFile(file) as image:
for layer in image.layers:
if layer.name == '{0:s}':
matches.append(file)
return_json(matches)
"""
)
files = collection.find_files_by_script(script.format(escape_single_quotes('White')), timeout_in_seconds=3)
assert len(files) == 1
assert with_white == files[0]
files = collection.find_files_by_script(script.format(escape_single_quotes('Not existing')), timeout_in_seconds=3)
assert len(files) == 0
def test_find_files_by_script_without_required_parameters():
collection = GimpFileCollection([])
script = textwrap.dedent(
"""
print(1)
"""
)
with pytest.raises(GimpMissingRequiredParameterException):
collection.find_files_by_script(script, timeout_in_seconds=3)
def test_execute_script_and_return_json_with_script_that_takes_single_file():
with TempFile('.xcf') as with_white, TempFile('.xcf') as without_white:
GimpFile(with_white)\
.create('Background', np.zeros(shape=(1, 1), dtype=np.uint8))\
.add_layer_from_numpy('White', np.ones(shape=(1, 1), dtype=np.uint8)*255)
GimpFile(without_white) \
.create('Background', np.zeros(shape=(1, 1), dtype=np.uint8)) \
.add_layer_from_numpy('Black', np.zeros(shape=(1, 1), dtype=np.uint8))
collection = GimpFileCollection([with_white, without_white])
script = textwrap.dedent(
"""
from pgimp.gimp.file import open_xcf
from pgimp.gimp.parameter import return_json
image = open_xcf('__file__')
for layer in image.layers:
if layer.name == '{0:s}':
return_json(True)
return_json(False)
"""
)
files = collection.execute_script_and_return_json(script.format(escape_single_quotes('White')), timeout_in_seconds=3)
assert {
with_white: True,
without_white: False,
} == files
def test_execute_script_and_return_json_with_script_that_takes_multiple_files_using_open():
with TempFile('.xcf') as with_white, TempFile('.xcf') as without_white:
GimpFile(with_white)\
.create('Background', np.zeros(shape=(1, 1), dtype=np.uint8))\
.add_layer_from_numpy('White', np.ones(shape=(1, 1), dtype=np.uint8)*255)
GimpFile(without_white) \
.create('Background', np.zeros(shape=(1, 1), dtype=np.uint8)) \
.add_layer_from_numpy('Black', np.zeros(shape=(1, 1), dtype=np.uint8))
collection = GimpFileCollection([with_white, without_white])
script = textwrap.dedent(
"""
import gimp
from pgimp.gimp.file import open_xcf
from pgimp.gimp.parameter import return_json, get_json
files = get_json('__files__')
matches = []
for file in files:
image = open_xcf(file)
for layer in image.layers:
if layer.name == '{0:s}':
matches.append(file)
gimp.pdb.gimp_image_delete(image)
return_json(matches)
"""
)
files = collection.execute_script_and_return_json(script.format(escape_single_quotes('White')), timeout_in_seconds=3)
assert len(files) == 1
assert with_white == files[0]
def test_execute_script_and_return_json_with_script_that_takes_multiple_files_using_xcf_file():
with TempFile('.xcf') as with_white, TempFile('.xcf') as without_white:
GimpFile(with_white)\
.create('Background', np.zeros(shape=(1, 1), dtype=np.uint8))\
.add_layer_from_numpy('White', np.ones(shape=(1, 1), dtype=np.uint8)*255)
GimpFile(without_white) \
.create('Background', np.zeros(shape=(1, 1), dtype=np.uint8)) \
.add_layer_from_numpy('Black', np.zeros(shape=(1, 1), dtype=np.uint8))
collection = GimpFileCollection([with_white, without_white])
script = textwrap.dedent(
"""
import gimp
from pgimp.gimp.file import XcfFile
from pgimp.gimp.parameter import return_json, get_json
files = get_json('__files__')
matches = []
for file in files:
with XcfFile(file) as image:
for layer in image.layers:
if layer.name == '{0:s}':
matches.append(file)
return_json(matches)
"""
)
files = collection.execute_script_and_return_json(script.format(escape_single_quotes('White')), timeout_in_seconds=3)
assert len(files) == 1
assert with_white == files[0]
def test_execute_script_and_return_json_with_script_that_takes_multiple_files_using_for_each():
with TempFile('.xcf') as with_white, TempFile('.xcf') as without_white:
GimpFile(with_white) \
.create('Background', np.zeros(shape=(1, 1), dtype=np.uint8)) \
.add_layer_from_numpy('White', np.ones(shape=(1, 1), dtype=np.uint8) * 255)
GimpFile(without_white) \
.create('Background', np.zeros(shape=(1, 1), dtype=np.uint8)) \
.add_layer_from_numpy('Black', np.zeros(shape=(1, 1), dtype=np.uint8))
collection = GimpFileCollection([with_white, without_white])
script = textwrap.dedent(
"""
from pgimp.gimp.file import for_each_file
from pgimp.gimp.parameter import return_json, get_json
matches = []
def layer_matches(image, file):
for layer in image.layers:
if layer.name == '{0:s}':
matches.append(file)
for_each_file(layer_matches)
return_json(matches)
"""
)
files = collection.execute_script_and_return_json(script.format(escape_single_quotes('White')),
timeout_in_seconds=3)
assert len(files) == 1
assert with_white == files[0]
def test_copy_layer_from():
with TemporaryDirectory('_src') as srcdir, TemporaryDirectory('_dst') as dstdir:
src_1 = GimpFile(os.path.join(srcdir, 'file1.xcf'))\
.create('Background', np.zeros(shape=(1, 1), dtype=np.uint8))\
.add_layer_from_numpy('White', np.ones(shape=(1, 1), dtype=np.uint8)*255)
src_2 = GimpFile(os.path.join(srcdir, 'file2.xcf'))\
.create('Background', np.zeros(shape=(1, 1), dtype=np.uint8)) \
.add_layer_from_numpy('White', np.ones(shape=(1, 1), dtype=np.uint8)*255)
dst_1 = GimpFile(os.path.join(dstdir, 'file1.xcf')) \
.create('Background', np.zeros(shape=(1, 1), dtype=np.uint8)) \
.add_layer_from_numpy('White', np.zeros(shape=(1, 1), dtype=np.uint8)*255)
dst_2 = GimpFile(os.path.join(dstdir, 'file2.xcf')) \
.create('Background', np.zeros(shape=(1, 1), dtype=np.uint8))
src_collection = GimpFileCollection([src_1.get_file(), src_2.get_file()])
dst_collection = GimpFileCollection([dst_1.get_file(), dst_2.get_file()])
dst_collection.copy_layer_from(src_collection, 'White', layer_position=1, timeout_in_seconds=10)
assert np.all(dst_1.layer_to_numpy('White') == 255)
assert ['Background', 'White'] == dst_1.layer_names()
assert 'White' in dst_2.layer_names()
assert np.all(dst_2.layer_to_numpy('White') == 255)
assert ['Background', 'White'] == dst_2.layer_names()
def test_merge_mask_layer_from_with_grayscale_and_foreground_color_white():
with TemporaryDirectory('_src') as srcdir, TemporaryDirectory('_dst') as dstdir:
src_1 = GimpFile(os.path.join(srcdir, 'file1.xcf'))\
.create('Mask', np.array([[255, 0]], dtype=np.uint8))
dst_1 = GimpFile(os.path.join(dstdir, 'file1.xcf')) \
.create('Mask', np.array([[0, 255]], dtype=np.uint8))
dst_2 = GimpFile(os.path.join(dstdir, 'file2.xcf')) \
.create('Mask', np.array([[0, 255]], dtype=np.uint8))
src_collection = GimpFileCollection([src_1.get_file()])
dst_collection = GimpFileCollection([dst_1.get_file(), dst_2.get_file()])
dst_collection.merge_mask_layer_from(src_collection, 'Mask', MaskForegroundColor.WHITE, timeout_in_seconds=10)
assert np.all(dst_1.layer_to_numpy('Mask') == [[255], [255]])
assert ['Mask'] == dst_1.layer_names()
assert 'Mask' in dst_2.layer_names()
assert np.all(dst_2.layer_to_numpy('Mask') == [[0], [255]])
assert ['Mask'] == dst_2.layer_names()
def test_merge_mask_layer_from_with_grayscale_and_foreground_color_black():
with TemporaryDirectory('_src') as srcdir, TemporaryDirectory('_dst') as dstdir:
src_1 = GimpFile(os.path.join(srcdir, 'file1.xcf'))\
.create('Mask', np.array([[255, 0]], dtype=np.uint8))
dst_1 = GimpFile(os.path.join(dstdir, 'file1.xcf')) \
.create('Mask', np.array([[0, 255]], dtype=np.uint8))
dst_2 = GimpFile(os.path.join(dstdir, 'file2.xcf')) \
.create('Mask', np.array([[0, 255]], dtype=np.uint8))
src_collection = GimpFileCollection([src_1.get_file()])
dst_collection = GimpFileCollection([dst_1.get_file(), dst_2.get_file()])
dst_collection.merge_mask_layer_from(src_collection, 'Mask', MaskForegroundColor.BLACK, timeout_in_seconds=10)
assert np.all(dst_1.layer_to_numpy('Mask') == [[0], [0]])
assert ['Mask'] == dst_1.layer_names()
assert 'Mask' in dst_2.layer_names()
assert np.all(dst_2.layer_to_numpy('Mask') == [[0], [255]])
assert ['Mask'] == dst_2.layer_names()
def test_merge_mask_layer_from_with_color():
with TemporaryDirectory('_src') as srcdir, TemporaryDirectory('_dst') as dstdir:
src_1 = GimpFile(os.path.join(srcdir, 'file1.xcf'))\
.create('Mask', np.array([[[255, 255, 255], [0, 0, 0]]], dtype=np.uint8))
dst_1 = GimpFile(os.path.join(dstdir, 'file1.xcf')) \
.create('Mask', np.array([[[0, 0, 0], [255, 255, 255]]], dtype=np.uint8))
dst_2 = GimpFile(os.path.join(dstdir, 'file2.xcf')) \
.create('Mask', np.array([[[0, 0, 0], [255, 255, 255]]], dtype=np.uint8))
src_collection = GimpFileCollection([src_1.get_file()])
dst_collection = GimpFileCollection([dst_1.get_file(), dst_2.get_file()])
dst_collection.merge_mask_layer_from(src_collection, 'Mask', MaskForegroundColor.WHITE, timeout_in_seconds=10)
assert np.all(dst_1.layer_to_numpy('Mask') == [[255, 255, 255], [255, 255, 255]])
assert ['Mask'] == dst_1.layer_names()
assert 'Mask' in dst_2.layer_names()
assert np.all(dst_2.layer_to_numpy('Mask') == [[0, 0, 0], [255, 255, 255]])
assert ['Mask'] == dst_2.layer_names()
def test_merge_mask_layer_from_with_mask_not_available_in_files_in_both_collections_and_foreground_color_white():
with TemporaryDirectory('_src') as srcdir, TemporaryDirectory('_dst') as dstdir:
src_1 = GimpFile(os.path.join(srcdir, 'file1.xcf')) \
.create_empty(2, 1, GimpFileType.GRAY)
dst_1 = GimpFile(os.path.join(dstdir, 'file1.xcf')) \
.create_empty(2, 1, GimpFileType.GRAY)
src_collection = GimpFileCollection([src_1.get_file()])
dst_collection = GimpFileCollection([dst_1.get_file()])
dst_collection.merge_mask_layer_from(src_collection, 'Mask', MaskForegroundColor.WHITE, timeout_in_seconds=10)
assert np.all(dst_1.layer_to_numpy('Mask') == [[0], [0]])
assert ['Mask'] == dst_1.layer_names()
def test_merge_mask_layer_from_with_mask_not_available_in_files_in_both_collections_and_foreground_color_black():
with TemporaryDirectory('_src') as srcdir, TemporaryDirectory('_dst') as dstdir:
src_1 = GimpFile(os.path.join(srcdir, 'file1.xcf')) \
.create_empty(2, 1, GimpFileType.GRAY)
dst_1 = GimpFile(os.path.join(dstdir, 'file1.xcf')) \
.create_empty(2, 1, GimpFileType.GRAY)
src_collection = GimpFileCollection([src_1.get_file()])
dst_collection = GimpFileCollection([dst_1.get_file()])
dst_collection.merge_mask_layer_from(src_collection, 'Mask', MaskForegroundColor.BLACK, timeout_in_seconds=10)
assert np.all(dst_1.layer_to_numpy('Mask') == [[255], [255]])
assert ['Mask'] == dst_1.layer_names()
def test_clear_selection():
file_with_selection_original = file.relative_to(__file__, 'test-resources/selection.xcf')
with TempFile('.xcf') as file_with_selection:
shutil.copyfile(file_with_selection_original, file_with_selection)
collection = GimpFileCollection([file_with_selection])
selections_before = _has_selections(collection)
assert selections_before[file_with_selection]
collection.clear_selection(timeout_in_seconds=10)
selections_after = _has_selections(collection)
assert not selections_after[file_with_selection]
def _has_selections(collection):
result = collection.execute_script_and_return_json(
textwrap.dedent(
"""
import gimp
from pgimp.gimp.parameter import get_json, return_json
from pgimp.gimp.file import XcfFile
files = get_json('__files__')
selections = {}
for file in files:
with XcfFile(file, save=True) as image:
selections[file] = not gimp.pdb.gimp_selection_is_empty(image)
return_json(selections)
"""
),
timeout_in_seconds=10
)
return result
def test_remove_layers_by_name():
data = | np.array([[0, 255]], dtype=np.uint8) | numpy.array |
# Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tempfile
import unittest
import numpy as np
import pandas as pd
import mars.dataframe as md
from mars.config import option_context
from mars.dataframe import DataFrame
from mars.deploy.local.core import new_cluster
from mars.session import new_session
from mars.tests.core import TestBase
try:
import vineyard
except ImportError:
vineyard = None
try:
import sqlalchemy
except ImportError:
sqlalchemy = None
try:
import pyarrow as pa
except ImportError:
pa = None
try:
import fastparquet
except ImportError:
fastparquet = None
class Test(TestBase):
def setUp(self):
super().setUp()
self.ctx, self.executor = self._create_test_context()
def testToCSVExecution(self):
index = pd.RangeIndex(100, 0, -1, name='index')
raw = pd.DataFrame({
'col1': np.random.rand(100),
'col2': np.random.choice(['a', 'b', 'c'], (100,)),
'col3': np.arange(100)
}, index=index)
df = DataFrame(raw, chunk_size=33)
with tempfile.TemporaryDirectory() as base_path:
# DATAFRAME TESTS
# test one file with dataframe
path = os.path.join(base_path, 'out.csv')
r = df.to_csv(path)
self.executor.execute_dataframe(r)
result = pd.read_csv(path, dtype=raw.dtypes.to_dict())
result.set_index('index', inplace=True)
pd.testing.assert_frame_equal(result, raw)
# test multi files with dataframe
path = os.path.join(base_path, 'out-*.csv')
r = df.to_csv(path)
self.executor.execute_dataframe(r)
dfs = [pd.read_csv(os.path.join(base_path, f'out-{i}.csv'),
dtype=raw.dtypes.to_dict())
for i in range(4)]
result = pd.concat(dfs, axis=0)
result.set_index('index', inplace=True)
pd.testing.assert_frame_equal(result, raw)
pd.testing.assert_frame_equal(dfs[1].set_index('index'), raw.iloc[33: 66])
with self.ctx:
# test df with unknown shape
df2 = DataFrame(raw, chunk_size=(50, 2))
df2 = df2[df2['col1'] < 1]
path2 = os.path.join(base_path, 'out2.csv')
r = df2.to_csv(path2)
self.executor.execute_dataframes([r])
result = pd.read_csv(path2, dtype=raw.dtypes.to_dict())
result.set_index('index', inplace=True)
pd.testing.assert_frame_equal(result, raw)
# SERIES TESTS
series = md.Series(raw.col1, chunk_size=33)
# test one file with series
path = os.path.join(base_path, 'out.csv')
r = series.to_csv(path)
self.executor.execute_dataframe(r)
result = pd.read_csv(path, dtype=raw.dtypes.to_dict())
result.set_index('index', inplace=True)
pd.testing.assert_frame_equal(result, raw.col1.to_frame())
# test multi files with series
path = os.path.join(base_path, 'out-*.csv')
r = series.to_csv(path)
self.executor.execute_dataframe(r)
dfs = [pd.read_csv(os.path.join(base_path, f'out-{i}.csv'),
dtype=raw.dtypes.to_dict())
for i in range(4)]
result = pd.concat(dfs, axis=0)
result.set_index('index', inplace=True)
pd.testing.assert_frame_equal(result, raw.col1.to_frame())
pd.testing.assert_frame_equal(dfs[1].set_index('index'), raw.col1.to_frame().iloc[33: 66])
@unittest.skipIf(sqlalchemy is None, 'sqlalchemy not installed')
def testToSQL(self):
index = pd.RangeIndex(100, 0, -1, name='index')
raw = pd.DataFrame({
'col1': np.random.rand(100),
'col2': np.random.choice(['a', 'b', 'c'], (100,)),
'col3': np.arange(100).astype('int64'),
}, index=index)
with tempfile.TemporaryDirectory() as d:
table_name1 = 'test_table'
table_name2 = 'test_table2'
uri = 'sqlite:///' + os.path.join(d, 'test.db')
engine = sqlalchemy.create_engine(uri)
# test write dataframe
df = DataFrame(raw, chunk_size=33)
r = df.to_sql(table_name1, con=engine)
self.executor.execute_dataframe(r)
written = pd.read_sql(table_name1, con=engine, index_col='index') \
.sort_index(ascending=False)
pd.testing.assert_frame_equal(raw, written)
# test write with existing table
with self.assertRaises(ValueError):
df.to_sql(table_name1, con=uri).execute()
# test write series
series = md.Series(raw.col1, chunk_size=33)
with engine.connect() as conn:
r = series.to_sql(table_name2, con=conn)
self.executor.execute_dataframe(r)
written = pd.read_sql(table_name2, con=engine, index_col='index') \
.sort_index(ascending=False)
pd.testing.assert_frame_equal(raw.col1.to_frame(), written)
@unittest.skipIf(vineyard is None, 'vineyard not installed')
def testToVineyard(self):
def testWithGivenSession(session):
ipc_socket = os.environ.get('VINEYARD_IPC_SOCKET', '/tmp/vineyard/vineyard.sock')
with option_context({'vineyard.socket': ipc_socket}):
df1 = DataFrame(pd.DataFrame( | np.arange(12) | numpy.arange |
#!/usr/bin/env python
# encoding: utf-8
#
# maskbit.py
#
# @Author: <NAME> <andrews>
# @Date: 2017-10-06 10:10:00
# @Last modified by: <NAME> (<EMAIL>)
# @Last modified time: 2018-11-26 11:51:50
from __future__ import absolute_import, division, print_function
import os
import numpy as np
import pandas as pd
import marvin
from marvin.extern.yanny import yanny
# Stores the maskbits yanny file structure so that we don't need to open it more than once.
_maskbits_from_yanny = None
def _read_maskbit_schemas():
"""Read all available SDSS maskbit schemas from yanny file.
Returns:
Record Array: all bits for all schemas.
"""
global _maskbits_from_yanny
if _maskbits_from_yanny is None:
path_maskbits = os.path.join(os.path.dirname(marvin.__file__), 'data', 'sdssMaskbits.par')
_maskbits_from_yanny = yanny(path_maskbits, np=True)
return _maskbits_from_yanny['MASKBITS']
def get_available_maskbits():
"""Get names of available maskbit schemas from yanny file.
Returns:
list: Names of available maskbits.
"""
maskbits = _read_maskbit_schemas()
return sorted(set([it[0] for it in maskbits]))
def get_manga_target(flag_id, bitmasks, header):
"""Get MANGA_TARGET[``flag_id``] flag.
Parameters:
flag_id (str):
Flag ID number (e.g., "1" for MANGA_TARGET1).
bitmasks (dict):
`Maskbit` objects.
header (`astropy.io.fits.header.Header`):
File header.
Returns:
`Maskbit`
"""
flag_id = str(int(flag_id))
manga_target = bitmasks['MANGA_TARGET{}'.format(flag_id)]
try:
manga_target.mask = int(header['MNGTRG{}'.format(flag_id)])
except KeyError:
manga_target.mask = int(header['MNGTARG{}'.format(flag_id)])
return manga_target
class Maskbit(object):
"""A class representing a maskbit.
Parameters:
schema (DataFrame):
Maskbit schema.
name (str):
Name of maskbit.
description (str):
Description of maskbit.
"""
def __init__(self, name, schema=None, description=None):
self.name = name
self.schema = schema if schema is not None else self._load_schema(name)
self.description = description if description is not None else None
self.mask = None
def __repr__(self):
if (isinstance(self.mask, int) or self.mask is None):
labels = self.labels
else:
labels = 'shape={}'.format(self.mask.shape)
return '<Maskbit {0!r} {1}>'.format(self.name, labels)
def _load_schema(self, flag_name):
"""Load SDSS Maskbit schema from yanny file.
Parameters:
flag_name (str):
Name of flag.
Returns:
DataFrame: Schema of flag.
"""
maskbits = _read_maskbit_schemas()
flag = maskbits[maskbits['flag'] == flag_name]
return pd.DataFrame(flag[['bit', 'label', 'description']])
@property
def bits(self):
return self.values_to_bits() if self.mask is not None else None
@property
def labels(self):
return self.values_to_labels() if self.mask is not None else None
def values_to_bits(self, values=None):
"""Convert mask values to a list of bits set.
Parameters:
values (int or array):
Mask values. If ``None``, apply to entire
``Maskbit.mask`` array. Default is ``None``.
Returns:
list:
Bits that are set.
Example:
>>> maps = Maps(plateifu='8485-1901')
>>> ha = maps['emline_gflux_ha_6564']
>>> ha.pixmask.values_to_bits()
[[[0, 1, 4, 30],
[0, 1, 4, 30],
...
[0, 1, 4, 30]]]
"""
# assert (self.mask is not None) or (values is not None), 'Must provide values.'
# values = np.array(self.mask) if values is None else np.array(values)
# ndim = values.ndim
# assert ndim <= 3, '`value` must be int, 1-D array, 2-D array, or 3-D array.'
# # expand up to 2 dimensions
# while values.ndim < 3:
# values = np.array([values])
# # create list of list of lists of bits set
# bits_set = []
# for ii in range(values.shape[0]):
# row_ii = []
# for jj in range(values.shape[1]):
# row_jj = []
# for kk in range(values.shape[2]):
# row_jj.append(self._value_to_bits(values[ii, jj, kk], self.schema.bit.values))
# row_ii.append(row_jj)
# bits_set.append(row_ii)
# # condense back down to initial dimensions
# for __ in range(3 - ndim):
# bits_set = bits_set[0]
bits_set = self._get_a_set(values, convert_to='bits')
return bits_set
def _get_uniq_bits(self, values):
''' Return a dictionary of unique bits
Parameters:
values (list):
A flattened list of mask values
Returns:
dict:
A unique dictionary of {mask value: bit list} as {key: value}
'''
uniqvals = set(values)
vdict = {v: self._value_to_bits(v, self.schema.bit.values) for v in uniqvals}
return vdict
def _get_uniq_labels(self, values):
''' Return a dictionary of unique labels
Parameters:
values (list):
A flattened list of mask values
Returns:
dict:
A unique dictionary of {mask value: labels list} as {key: value}
'''
uniqbits = self._get_uniq_bits(values)
uniqlabels = {k: self.schema.label[self.schema.bit.isin(v)].values.tolist() for k, v in uniqbits.items()}
return uniqlabels
def _get_a_set(self, values, convert_to='bits'):
''' Convert mask values to a list of either bit or label sets.
Parameters:
values (int or array):
Mask values. If ``None``, apply to entire
``Maskbit.mask`` array. Default is ``None``.
convert_to (str):
Indicates what to convert to. Either "bits" or "labels"
Returns:
list:
Bits/Labels that are set.
'''
assert (self.mask is not None) or (values is not None), 'Must provide values.'
values = np.array(self.mask) if values is None else np.array(values)
ndim = values.ndim
shape = values.shape
assert ndim <= 3, '`value` must be int, 1-D array, 2-D array, or 3-D array.'
flatmask = values.flatten()
if convert_to == 'bits':
uniqvals = self._get_uniq_bits(flatmask)
elif convert_to == 'labels':
uniqvals = self._get_uniq_labels(flatmask)
vallist = list(map(lambda x: uniqvals[x], flatmask))
if ndim > 0:
vals_set = np.reshape(vallist, shape).tolist()
else:
vals_set = vallist[0]
return vals_set
def _value_to_bits(self, value, bits_all):
"""Convert mask value to a list of bits.
Parameters:
value (int):
Mask value.
bits_all (array):
All bits for flag.
Returns:
list:
Bits that are set.
"""
return [it for it in bits_all if int(value) & (1 << it)]
def values_to_labels(self, values=None):
"""Convert mask values to a list of the labels of bits set.
Parameters:
values (int or array):
Mask values. If ``None``, apply to entire
``Maskbit.mask`` array. Default is ``None``.
Returns:
list:
Bits that are set.
Example:
>>> maps = Maps(plateifu='8485-1901')
>>> ha = maps['emline_gflux_ha_6564']
>>> ha.pixmask.values_to_labels()
[[['NOCOV', 'LOWCOV', 'NOVALUE', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'NOVALUE', 'DONOTUSE'],
...
['NOCOV', 'LOWCOV', 'NOVALUE', 'DONOTUSE']]]
"""
#bits_set = self.values_to_bits(values=values)
#labels_set = self._bits_to_labels(bits_set)
labels_set = self._get_a_set(values, convert_to='labels')
return labels_set
def _bits_to_labels(self, nested):
"""Recursively convert a nested list of bits to labels.
Parameters:
nested (list):
Nested list of bits.
Returns:
list: Nested list of labels.
"""
# Base condition
if isinstance(nested, (int, np.integer)):
return self.schema.label[self.schema.bit == nested].values[0]
return [self._bits_to_labels(it) for it in nested]
def labels_to_value(self, labels):
"""Convert bit labels into a bit value.
Parameters:
labels (str or list):
Labels of bits to set.
Returns:
int: Integer bit value.
Example:
>>> maps = Maps(plateifu='8485-1901')
>>> ha = maps['emline_gflux_ha_6564']
>>> ha.pixmask._labels_to_value('DONOTUSE')
1073741824
>>> ha.pixmask._labels_to_value(['NOCOV', 'LOWCOV'])
3
"""
if isinstance(labels, str):
labels = [labels]
bit_values = []
for label in labels:
bit = self.schema.bit[self.schema.label == label]
if not bit.empty:
bit_values.append(bit.values[0])
return np.sum([2**value for value in bit_values])
def labels_to_bits(self, labels):
"""Convert bit labels into bits.
Parameters:
labels (str or list):
Labels of bits.
Returns:
list: Bits that correspond to the labels.
Example:
>>> maps = Maps(plateifu='8485-1901')
>>> ha = maps['emline_gflux_ha_6564']
>>> ha.pixmask.labels_to_bits('DONOTUSE')
[30]
>>> ha.pixmask.labels_to_value(['NOCOV', 'LOWCOV'])
[0, 1]
"""
return self.values_to_bits(self.labels_to_value(labels))
def get_mask(self, labels, mask=None, dtype=int):
"""Create mask from a list of labels.
If ``dtype`` is ``int``, then ``get_mask`` can effectively
perform an OR or AND operation. However, if ``dtype`` is
``bool``, then ``get_mask`` does an OR.
Parameters:
labels (str or list):
Labels of bits.
mask (int or array):
User-defined mask. If ``None``, use ``self.mask``.
Default is ``None``.
dtype:
Output dtype, which must be either ``int`` or ``bool``.
Default is ``int``.
Returns:
array: Mask for given labels.
Example:
>>> maps = Maps(plateifu='8485-1901')
>>> ha = maps['emline_gflux_ha_6564']
>>> ha.pixmask.get_mask(['NOCOV', 'LOWCOV'])
array([[3, 3, 3, ..., 3, 3, 3],
...,
[3, 3, 3, ..., 3, 3, 3]])
>>> ha.pixmask.get_mask(['NOCOV', 'LOWCOV'], dtype=bool)
array([[ True, True, True, ..., True, True, True],
...,
[ True, True, True, ..., True, True, True]], dtype=bool)
"""
assert dtype in [int, bool], '``dtype`` must be either ``int`` or ``bool``.'
if isinstance(labels, str):
labels = [labels]
schema_labels = self.schema.label.tolist()
for label in labels:
if label not in schema_labels:
raise ValueError('label {0!r} not found in the maskbit schema.'.format(label))
bits = self.labels_to_bits(labels)
mask = mask if mask is not None else self.mask
if len(bits) == 0:
return np.zeros(mask.shape, dtype=np.int)
return | np.sum([mask & 2**bit for bit in bits], axis=0) | numpy.sum |
"""
A helper class for solving the non-linear time dependent equations
of biofilm growth which includes models of the cell concentration
and also nutrient concentrations in both the substrate and biofilm.
All of these are asumed to be radially symmetric and depend on
r and t, and the article concentration additionally depends on z.
The specific equations solved by this class are described in the
publication:
A Thin-Film Lubrication Model for Biofilm Expansion Under Strong Adhesion,
<NAME>, <NAME>, <NAME>, <NAME>, and <NAME>
To be submitted soon, 2020.
This works builds upon the model developed by <NAME> in his PhD thesis:
Mathematical Modelling of Pattern Formation in Yeast Biofilms,
<NAME>,
The University of Adelaide, 2019.
Two solvers are currently implemented within the class.
The first, a "decoupled" Crank-Nicolson implementation, denoted DCN,
solves the non-linear system of equations in a weakly coupled manner.
Each equation is solved one at a time (via Newton iterations where
applicable) using the last known/computed solution of any other variables.
The second, a fully coupled Crank-Nicolson implementation, denoted FCN,
solves the complete non-linear system of equations using Newton iterations.
Both use the scipy sparse LU solver to solve the discretised systems
of equations that result from a compact finite difference discretisation
(although iterative solvers for some variables can be toggled through
private class switches). Both methods can be expected to achieve 2nd order
convergence in both space and time.
Compatibility notes:
The code was written in Python3 (3.7.3 specifically) although it
should also work in 2.7.x releases that are not to old as well.
The scientific computing packages numpy and scipy are required.
Again, any version that is not unreasonably old should be fine.
You will probably also want matplotlib for plotting.
Maintainer: <NAME>
Initial development: June-July 2020
Last updated: August 2020
"""
import numpy as np
from scipy.sparse.linalg import spsolve,spilu,LinearOperator,gmres,bicgstab
from scipy.sparse import diags,bmat,coo_matrix
class gmres_counter(object):
"""
Convenience class for monitoring gmres iterations (from scipy.sparse.linalg)
(Useful for debugging purposes)
"""
def __init__(self, disp=True):
self._disp = disp
self.niter = 0
def __call__(self, rk=None):
self.niter += 1
if self._disp:
print('gmres: iteration {:03d} residual = {:s}'.format(self.niter,str(rk)))
class BiofilmTwoDLubricationModel(object):
"""
Helper class for solving the PDEs describing the development of
a radially symmetric and thin yeast biofilm over time.
The model/system that is solved includes the biofilm height,
the cell concentration, and the nutrient concentrations in both
the biofilm and the substrate.
"""
def __init__(self,R=2.0,dr=0.5**7,nxi=33,dt=None,params=None,solver='DCN',verbose=False):
"""
Initialise the class
With no arguments a default problem set up is initialised.
Optionally you may pass the following:
R: The radius of the domain (or petri dish). If not specified
a default value of 2 is used.
dr: The grid spacing used for the discretisation of the domain.
If not specified a default value of 0.5**7 is used.
dt: The time step size, if not specified 0.25*dr is used.
params: Parameters for the system of equations. These should
be passed as a dictionary. Any which are not specified will
be set to a default value (specifically corresponding to
Table 6.1 in Alex's thesis).
solver: specify which solver to use.
verbose: Set to True to output convergence information when solving
"""
# Set up the radial coordinate array
self._r = np.arange(0.0,R+0.5*dr,dr)
self._r_half = self._r[:-1]+0.5*dr
self._nxi = nxi
self._xi = np.linspace(0,1,nxi)
self._R,self._XI = np.meshgrid(self._r,self._xi)
# Set up the parameters
if dt is None:
self._dt = 0.25*dr # this is quite conservative... (and assumes dr<h*dxi)
else:
self._dt = dt
if type(params)==dict:
# Set various parameters depending what is passed,
# those not specified will be set to those Alex used
# in his thesis (Table 6.1 specifically)
self._b = params.get("b",0.0001)
self._H0 = params.get("H0",0.1)
self._Psi_m = params.get("Psi_m",0.111)
self._Psi_d = params.get("Psi_d",0.0)
#self._R = params.get("R",10.0)
#self._T = params.get("T",50.0)
self._gamma_ast = params.get("gamma_ast",1.0)
self._D = params.get("D",1.05)
self._Pe = params.get("Pe",3.94)
self._Upsilon = params.get("Upsilon",3.15)
self._Q_b = params.get("Q_b",8.65)
self._Q_s = params.get("Q_s",2.09)
self._h_ast = params.get("h_ast",0.002)
self._lambda_ast = params.get("lambda_ast",np.inf)
else:
if params is not None:
print("Setting parameters is currently only supported through a dictionary, default values will be used")
# Set various parameters to those Alex used in
# his thesis (Table 6.1 specifically)
self._b = 0.0001
self._H0 = 0.1
self._Psi_m = 0.111
self._Psi_d = 0.0
#self._R = 10.0
#self._T = 50.0
self._gamma_ast = 1.0
self._D = 1.05
self._Pe = 3.94
self._Upsilon = 3.15
self._Q_b = 8.65
self._Q_s = 2.09
self._h_ast = 0.002
self._lambda_ast = np.inf
self.set_solver(solver)
self._verbose = verbose
# Set up the solution arrays with default initial conditions
self._set_default_initial_conditions()
# The following were used in initial debugging and testing and generally need not be changed
self._Phi_n_DCN_solver = 3 # Changes the numerical method used for solving Phi_n
self._FCN_solver_mode = -1 # Change the FCN solver
self._add_top_Phi_bc = False
self._use_artificial_dr_bc = True # untested with False...
# done
def _set_default_initial_conditions(self):
"""
Sets the initial conditions to be those described by
equation 6.22 of <NAME>'s thesis.
"""
self._t = 0
r = self._r
R = self._R
XI = self._XI
self._h = self._b + (self._H0-self._b)*(r<1)*(1-r**2)**4
self._Phi_n = (XI**3-0.5*XI**4)*self._h[np.newaxis,:]*(R<1)*(1-3*R**2+2*R**3)
self._g_s = np.ones(len(self._r))
self._g_b = np.zeros(len(self._r))
# done
# add getters and setters
def set_parameters(self,params):
"""
Set the current problem parameters.
Parameters should be passed using a dictionary.
"""
if type(params)==dict:
# Set various parameters depending what is passed,
# those not specified will be set to those Alex used
# in his thesis (Table 6.1 specifically)
self._b = params.get("b",self._b)
self._H0 = params.get("H0",self._H0)
self._Psi_m = params.get("Psi_m",self._Psi_m)
self._Psi_d = params.get("Psi_d",self._Psi_d)
#self._R = params.get("R",self._R)
#self._T = params.get("T",self._T)
self._gamma_ast = params.get("gamma_ast",self._gamma_ast)
self._D = params.get("D",self._D )
self._Pe = params.get("Pe",self._Pe)
self._Upsilon = params.get("Upsilon",self._Upsilon)
self._Q_b = params.get("Q_b",self._Q_b)
self._Q_s = params.get("Q_s",self._Q_s)
self._h_ast = params.get("h_ast",self._h_ast)
self._lambda_ast = params.get("lambda_ast",self._lambda_ast)
else:
print("Setting parameters is currently only supported through a dictionary, existing values will be used")
# done
def get_parameters(self,param=None):
"""
Get the current problem parameters.
If a specific parameter is not requested
then all are returned in a dictionary.
"""
params_dict = {"b":self._b,"H0":self._H0,"Psi_m":self._Psi_m,"Psi_d":self._Psi_d,\
"gamma_ast":self._gamma_ast,"D":self._D,"Pe":self._Pe,\
"Upsilon":self._Upsilon,"Q_b":self._Q_b,"Q_s":self._Q_s,\
"h_ast":self._h_ast,"lambda_ast":self._lambda_ast}
#params_dict["R"] = self._R
#params_dict["T"] = self._T
if param is None:
# return dictionary with all parameters
return params_dict
elif param in params_dict.keys():
return params_dict[param]
else:
print("Requested parameter does not exist")
# done
def get_r(self):
"""
Returns the array for the radial coordinates.
"""
return self._r
def get_xi(self):
"""
Returns the array for the radial coordinates.
"""
return self._xi
def set_verbosity(self,verbose):
"""
Set the verbosity for the solvers (True or False).
"""
self._verbose = verbose
# done
def set_h(self,h):
"""
Update the biofilm height h.
For example, use this to set the initial condition.
(Note this over-writes the current solution in the class.)
Accepts a callable function h(r), or an array (with correct length).
Note: This will not alter Phi_n=int_0^{h xi} phi_n dz. If it is desired that this
too be changed it should be done separately via set_Phi_n or set_phi_n.
"""
if callable(h):
self._h[:] = h(self._r)
else:
assert len(h)==len(self._r)
self._h[:] = h
# done
def get_h(self):
"""
Returns the current biofilm height h.
"""
return self._h
def set_Phi_n(self,Phi_n):
"""
Update the cumulative cell volume fraction Phi_n (=int_0^{h xi} phi_n dz).
For example, use this to set the initial condition.
(Note this over-writes the current solution in the class.)
It is expected that Phi_n be provided in the re-scaled coordinates r,xi.
Accepts a callable function Phi_n(r,xi), or an array (with correct shape).
"""
if callable(Phi_n):
self._Phi_n[:,:] = Phi_n(self._XI,self._R)
else:
assert Phi_n.shape==self._R.shape
self._Phi_n[:,:] = Phi_n
# done
def get_Phi_n(self):
"""
Returns the current cumulative cell volume fraction Phi_n (=int_0^{h xi} phi_n dz).
(Note this is given with respect to the re-scaled coordinates r,xi.)
"""
return self._Phi_n
def get_phi_n_bar(self):
"""
Returns the vertically averaged cell volume fraction bar{phi_n} =(1/h) int_0^{h} phi_n dz.
(Note this is given with respect to the re-scaled coordinates r,xi.)
"""
return self._Phi_n[-1,:]/self._h
def set_phi_n(self,phi_n):
"""
Update the cell volume fraction phi_n.
For example, use this to set the initial condition.
(Note this over-writes the current solution in the class.)
It is expected that phi_n be provided in re-scaled coordinates r,xi.
Accepts a callable function phi_n(r,xi), or an array (with correct length).
Note: This internally updates Phi_n=\int_0^{h xi} phi_n dz using the existing h.
If h is also to be updated, it should be done first!
"""
XI,R = self._XI,self._R
if callable(phi_n):
phi_n_int_dxi = XI[1,0]*np.cumsum(0.5*(phi_n(XI,R)[1:,:]+phi_n(XI,R)[:-1,:]),axis=0)
else:
assert phi_n.shape==self._R.shape
phi_n_int_dxi = XI[1,0]*np.cumsum(0.5*(phi_n[1:,:]+phi_n[:-1,:]),axis=0)
self._Phi_n[0,:] = 0
self._Phi_n[1:,:] = phi_n_int_dxi*self._h[np.newaxis,:]
self._Phi_n[(self._h<self._h_ast)[np.newaxis,:]] = 0 # zero areas where h is small
# done
def get_phi_n(self):
"""
Returns the current cell volume fraction phi_n.
(Note this is given with respect to the re-scaled coordinates r,xi.)
"""
phi_n = np.empty_like(self._Phi_n)
phi_n[1:-1,:] = 0.5*(self._Phi_n[2:,:]-self._Phi_n[:-2,:])*(self._nxi-1)/self._h[np.newaxis,:]
phi_n[ 0,:] = 0.5*(-3*self._Phi_n[ 0,:]+4*self._Phi_n[ 1,:]-self._Phi_n[ 2,:])*(self._nxi-1)/self._h[np.newaxis,:]
phi_n[-1,:] = 0.5*( 3*self._Phi_n[-1,:]-4*self._Phi_n[-2,:]+self._Phi_n[-3,:])*(self._nxi-1)/self._h[np.newaxis,:]
phi_n[:,self._h<self._h_ast] = 0
return phi_n
def set_g_s(self,g_s):
"""
Update the substrate nutrient concentration g_s.
For example, use this to set the initial condition.
(Note this over-writes the current solution in the class)
Accepts a callable function g_s(r), or an array (with correct length).
"""
if callable(g_s):
self._g_s[:] = g_s(self._r)
else:
assert len(g_s)==len(self._r)
self._g_s[:] = g_s
# done
def get_g_s(self):
"""
Returns the substrate nutrient concentration g_s.
"""
return self._g_s
def set_g_b(self,g_b):
"""
Update the biofilm nutrient concentration g_b.
For example, use this to set the initial condition.
(Note this over-writes the current solution in the class)
Accepts a callable function g_b(r), or an array (with correct length).
"""
if callable(g_b):
self._g_b[:] = g_b(self._r)
else:
assert len(g_b)==len(self._r)
self._g_b[:] = g_b
# done
def get_g_b(self):
"""
Returns the biofilm nutrient concentration g_b.
"""
return self._g_b
def set_dt(self,dt):
"""
Set/change the time step size (dt) which is used by default
(i.e. if dt is not specified when solve is called then this value is used)
"""
self._dt = dt
# done
def get_dt(self):
"""
Get the current time step size (dt) which is used by default
(i.e. if dt is not specified when solve is called then this value is used)
"""
return self._dt
def set_t(self,t):
"""
Set/change the current solution time t.
"""
self._t = t
# done
def get_t(self):
"""
Get the current solution time T.
"""
return self._t
# done
# Add private methods relating to the discretisation of the fourth order 'advective' term
def _advective_term(self,r,h,p=3,f=None,near_boundary=True,prefix=None):
"""
Finite difference discretisation of:
prefix * (d/dr)[ r h^p f (d/dr)[ (1/r) (d/dr)[ r (dh/dr) ] ] ]
"""
r_half = 0.5*(r[1:]+r[:-1])
dr = r[1]-r[0]
h_half = 0.5*(h[1:]+h[:-1])
D_half = (r_half[2: ]*(h[3: ]-h[2:-1])-r_half[1:-1]*(h[2:-1]-h[1:-2]))/r[2:-1]\
-(r_half[1:-1]*(h[2:-1]-h[1:-2])-r_half[ :-2]*(h[1:-2]-h[ :-3]))/r[1:-2]
if f is None:
f = np.ones(len(r))
f_half = f[:-1]
else:
f_half = 0.5*(f[1:]+f[:-1])
res = np.empty(len(r))
res[[0,1,-2,-1]] = 0
res[2:-2] = r_half[2:-1]*h_half[2:-1]**p*f_half[2:-1]*D_half[1:] \
-r_half[1:-2]*h_half[1:-2]**p*f_half[1:-2]*D_half[:-1]
if near_boundary:
# At index one we expoloit that 0 = (d/dr)[ (1/r) (d/dr)[ r (dh/dr) ] ] for r=0
D_p2 = 0.5*(D_half[ 0]+D_half[ 1])
res[1] = 0.5*r[2]*h[2]**p*f[2]*D_p2
# At index -2 we can exploit that 0 = (dh/dr)
# The width of the stencil is widened to achieve this though...
D_m5o2 = 0.25*(r[-2]*(h[-1]-h[-3])-r[-4]*(h[-3]-h[-5]))/r[-3]\
-0.25*(r[-3]*(h[-2]-h[-4])-r[-5]*(h[-4]-h[-6]))/r[-4]
D_m3o2 = 0.25*( -r[-3]*(h[-2]-h[-4]))/r[-2]\
-0.25*(r[-2]*(h[-1]-h[-3])-r[-4]*(h[-3]-h[-5]))/r[-3]
res[-2] = r_half[-1]*h_half[-1]**p*f_half[-1]*D_m3o2 \
-r_half[-2]*h_half[-2]**p*f_half[-2]*D_m5o2
if prefix is not None:
res[1:-1] *= prefix[1:-1]
return res/dr**4
def _advective_term_h_gradient(self,r,h,p=3,f=None,near_boundary=True,prefix=None):
"""
Finite difference discretisation of the gradient of
prefix * (d/dr)[ r h^p f (d/dr)[ (1/r) (d/dr)[ r (dh/dr) ] ] ]
with respect to h.
Note: the caller is responsible for enforcing boundary conditions
"""
r_half = 0.5*(r[1:]+r[:-1])
dr = r[1]-r[0]
h_half = 0.5*(h[1:]+h[:-1])
D_half = (r_half[2: ]*(h[3: ]-h[2:-1])-r_half[1:-1]*(h[2:-1]-h[1:-2]))/r[2:-1]\
-(r_half[1:-1]*(h[2:-1]-h[1:-2])-r_half[ :-2]*(h[1:-2]-h[ :-3]))/r[1:-2]
if f is None:
f = np.ones(len(r))
f_half = f[:-1]
else:
f_half = 0.5*(f[1:]+f[:-1])
Dh_diag_p2 = np.empty((len(r)))
Dh_diag_p1 = np.empty((len(r)))
Dh_diag_p0 = np.empty((len(r)))
Dh_diag_m1 = np.empty((len(r)))
Dh_diag_m2 = np.empty((len(r)))
Dh_diag_p2[[0,1,-2,-1]] = 0
Dh_diag_p1[[0,1,-2,-1]] = 0
Dh_diag_p0[[0,1,-2,-1]] = 0
Dh_diag_m1[[0,1,-2,-1]] = 0
Dh_diag_m2[[0,1,-2,-1]] = 0
Dh_diag_p1[2:-2] = r_half[2:-1]*0.5*p*h_half[2:-1]**(p-1)*f_half[2:-1]*D_half[1:]/dr**4
Dh_diag_p0[2:-2] = r_half[2:-1]*0.5*p*h_half[2:-1]**(p-1)*f_half[2:-1]*D_half[1:]/dr**4 \
-r_half[1:-2]*0.5*p*h_half[1:-2]**(p-1)*f_half[1:-2]*D_half[:-1]/dr**4
Dh_diag_m1[2:-2] = -r_half[1:-2]*0.5*p*h_half[1:-2]**(p-1)*f_half[1:-2]*D_half[:-1]/dr**4
# I think the following 5 are okay...
Dh_diag_p2[2:-2] = r_half[2:-1]*h_half[2:-1]**p*f_half[2:-1]*(r_half[3: ]/r[3:-1])/dr**4
Dh_diag_p1[2:-2] += -r_half[2:-1]*h_half[2:-1]**p*f_half[2:-1]*(r_half[2:-1]/r[2:-2]+2)/dr**4 \
-r_half[1:-2]*h_half[1:-2]**p*f_half[1:-2]*(r_half[2:-1]/r[2:-2])/dr**4
Dh_diag_p0[2:-2] += r_half[2:-1]*h_half[2:-1]**p*f_half[2:-1]*(r_half[2:-1]/r[3:-1]+2)/dr**4 \
+r_half[1:-2]*h_half[1:-2]**p*f_half[1:-2]*(r_half[1:-2]/r[1:-3]+2)/dr**4
Dh_diag_m1[2:-2] += -r_half[2:-1]*h_half[2:-1]**p*f_half[2:-1]*(r_half[1:-2]/r[2:-2])/dr**4 \
-r_half[1:-2]*h_half[1:-2]**p*f_half[1:-2]*(r_half[1:-2]/r[2:-2]+2)/dr**4
Dh_diag_m2[2:-2] = r_half[1:-2]*h_half[1:-2]**p*f_half[1:-2]*(r_half[ :-3]/r[1:-3])/dr**4
if near_boundary:
# Pre-allocate additional diagonals for the boundary terms
Dh_diag_p3 = np.zeros((len(r)))
Dh_diag_m3 = np.zeros((len(r)))
Dh_diag_m4 = np.zeros((len(r)))
# At index one we expoloit that 0 = (d/dr)[ (1/r) (d/dr)[ r (dh/dr) ] ]
D_p2 = 0.5*(D_half[ 0]+D_half[ 1])
Dh_diag_p1[1] = 0.5*r[2]*p*h[2]**(p-1)*f[2]*D_p2/dr**4
Dh_diag_p3[1] = 0.5*r[2]*h[2]**p*f[2]*0.5*(r_half[3]/r[3])/dr**4
Dh_diag_p2[1] = -0.5*r[2]*h[2]**p*f[2]/dr**4
Dh_diag_p1[1] += 0.5*r[2]*h[2]**p*f[2]*0.5*(r_half[2]/r[3]-r_half[1]/r[1])/dr**4
Dh_diag_p0[1] = -0.5*r[2]*h[2]**p*f[2]/dr**4
Dh_diag_m1[1] = 0.5*r[2]*h[2]**p*f[2]*0.5*(r_half[0]/r[1])/dr**4
# At index -2 we can exploit that 0 = (dh/dr)
# The width of the stencil is widened to achieve this though...
D_m5o2 = 0.25*(r[-2]*(h[-1]-h[-3])-r[-4]*(h[-3]-h[-5]))/r[-3]\
-0.25*(r[-3]*(h[-2]-h[-4])-r[-5]*(h[-4]-h[-6]))/r[-4]
D_m3o2 = 0.25*( -r[-3]*(h[-2]-h[-4]))/r[-2]\
-0.25*(r[-2]*(h[-1]-h[-3])-r[-4]*(h[-3]-h[-5]))/r[-3]
Dh_diag_p1[-2] = r_half[-1]*0.5*p*h_half[-1]**(p-1)*f_half[-1]*D_m3o2
Dh_diag_p0[-2] = r_half[-1]*0.5*p*h_half[-1]**(p-1)*f_half[-1]*D_m3o2 \
-r_half[-2]*0.5*p*h_half[-2]**(p-1)*f_half[-2]*D_m5o2
Dh_diag_m1[-2] = -r_half[-2]*0.5*p*h_half[-2]**(p-1)*f_half[-2]*D_m5o2
# I think the following are okay...
Dh_diag_p1[-2] += r_half[-1]*h_half[-1]**p*f_half[-1]*( r[-2]/r[-3])*0.25/dr**4 \
-r_half[-2]*h_half[-2]**p*f_half[-2]*(-r[-2]/r[-3])*0.25/dr**4
Dh_diag_p0[-2] += r_half[-1]*h_half[-1]**p*f_half[-1]*(-r[-3]/r[-4])*0.25/dr**4 \
-r_half[-2]*h_half[-2]**p*f_half[-2]*(-r[-3]/r[-2])*0.25/dr**4
Dh_diag_m1[-2] += r_half[-1]*h_half[-1]**p*f_half[-1]*(-2)*0.25/dr**4 \
-r_half[-2]*h_half[-2]**p*f_half[-2]*( 2)*0.25/dr**4
Dh_diag_m2[-2] = r_half[-1]*h_half[-1]**p*f_half[-1]*( 2)*0.25/dr**4 \
-r_half[-2]*h_half[-2]**p*f_half[-2]*( r[-3]/r[-2])*0.25/dr**4
Dh_diag_m3[-2] = r_half[-1]*h_half[-1]**p*f_half[-1]*( r[-4]/r[-3])*0.25/dr**4 \
-r_half[-2]*h_half[-2]**p*f_half[-2]*( r[-4]/r[-3])*0.25/dr**4
Dh_diag_m4[-2] = r_half[-1]*h_half[-1]**p*f_half[-1]*(-r[-5]/r[-4])*0.25/dr**4
#Dh = diags([Dh_diag_m4[4:],Dh_diag_m3[3:],Dh_diag_m2[2:],Dh_diag_m1[1:],\
# Dh_diag_p0,Dh_diag_p1[:-1],Dh_diag_p2[:-2],Dh_diag_p3[:-3]],\
# [-4,-3,-2,-1,0,1,2,3])
diagonals = [Dh_diag_m4,Dh_diag_m3,Dh_diag_m2,Dh_diag_m1,\
Dh_diag_p0,Dh_diag_p1,Dh_diag_p2,Dh_diag_p3]
offsets = [-4,-3,-2,-1,0,1,2,3]
else:
#Dh = diags([Dh_diag_m2[2:],Dh_diag_m1[1:],Dh_diag_p0,Dh_diag_p1[:-1],Dh_diag_p2[:-2]],\
# [-2,-1,0,1,2])
diagonals = [Dh_diag_m2,Dh_diag_m1,Dh_diag_p0,Dh_diag_p1,Dh_diag_p2]
offsets = [-2,-1,0,1,2]
if prefix is not None:
for diagonal in diagonals:
diagonal[1:-1] *= prefix[1:-1]
return diagonals,offsets
def _advective_term_f_gradient(self,r,h,p=3,f=None,near_boundary=True,prefix=None):
"""
Finite difference discretisation of the gradient of
prefix * (d/dr)[ r h^p f (d/dr)[ (1/r) (d/dr)[ r (dh/dr) ] ] ]
with respect to f.
"""
if f is None: # This is the only place f is actually used...
return None
r_half = 0.5*(r[1:]+r[:-1])
dr = r[1]-r[0]
h_half = 0.5*(h[1:]+h[:-1])
D_half = (r_half[2: ]*(h[3: ]-h[2:-1])-r_half[1:-1]*(h[2:-1]-h[1:-2]))/r[2:-1]\
-(r_half[1:-1]*(h[2:-1]-h[1:-2])-r_half[ :-2]*(h[1:-2]-h[ :-3]))/r[1:-2]
#f_half = 0.5*(f[1:]+f[:-1])
Df_diag_p1 = np.empty((len(r)))
Df_diag_p0 = np.empty((len(r)))
Df_diag_m1 = np.empty((len(r)))
Df_diag_p1[[0,1,-2,-1]] = 0
Df_diag_p0[[0,1,-2,-1]] = 0
Df_diag_m1[[0,1,-2,-1]] = 0
Df_diag_p1[2:-2] = r_half[2:-1]*h_half[2:-1]**p*0.5*D_half[1:]/dr**4
Df_diag_p0[2:-2] = r_half[2:-1]*h_half[2:-1]**p*0.5*D_half[1:]/dr**4 \
-r_half[1:-2]*h_half[1:-2]**p*0.5*D_half[:-1]/dr**4
Df_diag_m1[2:-2] = -r_half[1:-2]*h_half[1:-2]**p*0.5*D_half[:-1]/dr**4
if near_boundary:
D_p2 = 0.5*(D_half[ 0]+D_half[ 1])
Df_diag_p1[1] = 0.5*r[2]*h[2]**p*D_p2/dr**4
D_m5o2 = 0.25*(r[-2]*(h[-1]-h[-3])-r[-4]*(h[-3]-h[-5]))/r[-3]\
-0.25*(r[-3]*(h[-2]-h[-4])-r[-5]*(h[-4]-h[-6]))/r[-4]
D_m3o2 = 0.25*( -r[-3]*(h[-2]-h[-4]))/r[-2]\
-0.25*(r[-2]*(h[-1]-h[-3])-r[-4]*(h[-3]-h[-5]))/r[-3]
Df_diag_p1[-2] = r_half[-1]*h_half[-1]**p*D_m3o2/dr**4
Df_diag_p0[-2] = -r_half[-2]*h_half[-2]**p*D_m5o2/dr**4
#Df = diags([Df_diag_m1[1:],Df_diag_p0,Df_diag_p1[:-1]],[-1,0,1])#,format="csr")
diagonals = [Df_diag_m1,Df_diag_p0,Df_diag_p1]
offsets = [-1,0,1]
if prefix is not None:
for diagonal in diagonals:
diagonal[1:-1] *= prefix[1:-1]
return diagonals,offsets
# Add 'private' methods related to the solvers
def _h_equation_RHS(self,v_old,v_new,dt=None):
"""
Calculate the RHS vector component corresponding to the height equation.
The internal time step dt is used if one is not provided.
"""
r = self._r
nr = len(r)
dr = r[1]
b = self._b
h_ast = self._h_ast
g_ast = self._gamma_ast
Psi_m = self._Psi_m
lambda_ast = self._lambda_ast
h_old,Phi_n_old,g_s_old,g_b_old = v_old
h_new,Phi_n_new,g_s_new,g_b_new = v_new
if dt is None:
dt = self._dt
# Initialise rhs vector
rhs = np.empty(nr)
rhs[2:-2] = -(h_new[2:-2]-h_old[2:-2])
# Calculate spatial stencil and add to the rhs
adv_old = self._advective_term(r,h_old,near_boundary=False)
adv_new = self._advective_term(r,h_new,near_boundary=False)
rhs[2:-2] -= 0.5*dt*g_ast/3.0*(adv_old[2:-2]+adv_new[2:-2])/r[2:-2]
if np.isfinite(lambda_ast): # add slip term if lambda_ast is finite
adv_old = self._advective_term(r,h_old,p=2,near_boundary=False)
adv_new = self._advective_term(r,h_new,p=2,near_boundary=False)
rhs[2:-2] -= 0.5*dt*g_ast/lambda_ast*(adv_old[2:-2]+adv_new[2:-2])/r[2:-2]
# Add the forcing term
forcing_old = (h_old>h_ast)*(1.0+Psi_m)*Phi_n_old[-1,:]*g_b_old
forcing_new = (h_new>h_ast)*(1.0+Psi_m)*Phi_n_new[-1,:]*g_b_new
rhs[2:-2] += 0.5*dt*(forcing_old[2:-2]+forcing_new[2:-2])
# Set RHS entries relating to boundary conditions
rhs[ 0] = 3.0*h_new[ 0]- 4.0*h_new[ 1]+ h_new[ 2]
rhs[ 1] = 5.0*h_new[ 0]-18.0*h_new[ 1]+24.0*h_new[ 2]-14.0*h_new[ 3]+3.0*h_new[ 4]
rhs[-2] = -3.0*h_new[-1]+ 4.0*h_new[-2]- h_new[-3]
rhs[-1] = b-h_new[-1]
# done
return rhs
def _h_equation_LHS0(self,v_new,dt=None):
"""
Calculate the LHS matrix block corresponding to the
h dependence in the height equation.
The internal time step dt is used if one is not provided.
"""
r = self._r
nr = len(r)
dr = r[1]
r_half = self._r_half
g_ast = self._gamma_ast
h_ast = self._h_ast
Psi_m = self._Psi_m
lambda_ast = self._lambda_ast
h_new,Phi_n_new,g_s_new,g_b_new = v_new
if dt is None:
dt = self._dt
# Construct/fetch the diagonal components from the gradient of the fourth order "advective" term
diagonals,offsets = self._advective_term_h_gradient(r,h_new,near_boundary=False)
for i in range(len(diagonals)):
assert offsets[i]==i-2 # sanity check
diagonals[i][2:-2] *= (0.5*dt*g_ast/3.0)*r[2:-2]**(-1)
if np.isfinite(lambda_ast): # add slip term if lambda_ast is finite
diagonals2,offsets2 = self._advective_term_h_gradient(r,h_new,p=2,near_boundary=False)
for i in range(len(diagonals2)):
assert offsets2[i]==offsets[i]
diagonals[i][2:-2] += (0.5*dt*g_ast/lambda_ast)*r[2:-2]**(-1)*diagonals2[i][2:-2]
# Add to the main diagonal
diagonals[2][2:-2] += 1.0
# Note: there is no longer a 'forcing term' since h is absorbed into Phi_n
# Enforce the boundary conditions
diagonals.append(np.zeros(nr))
offsets.append(3)
diagonals[2][ 0] = -3 # first order BC at r=0
diagonals[3][ 0] = 4
diagonals[4][ 0] = -1
diagonals[1][ 1] = -5 # third order BC at r=0
diagonals[2][ 1] = 18
diagonals[3][ 1] = -24
diagonals[4][ 1] = 14
diagonals[5][ 1] = -3
diagonals[1][-2] = 1 # first order BC at r=R
diagonals[2][-2] = -4
diagonals[3][-2] = 3
diagonals[2][-1] = 1 # Dirichlet BC at r=R
# Final construction
A_00 = diags([diagonals[0][2:],diagonals[1][1:],diagonals[2],diagonals[3][:-1],\
diagonals[4][:-2],diagonals[5][:-3]],\
offsets)#,format="csr")
return A_00
def _h_equation_LHS1(self,v_new,dt=None):
"""
Calculate the LHS matrix block corresponding to the
Phi_n dependence in the height equation (Phi_n = int_0^{h xi} phi_n dz).
The internal time step dt is used if one is not provided.
"""
h_ast = self._h_ast
Psi_m = self._Psi_m
h_new,Phi_n_new,g_s_new,g_b_new = v_new
if dt is None:
dt = self._dt
# Note: this block has a rectangular shape
nr,nxi = len(self._r),len(self._xi)
row = np.arange(2,nr-2)
col = nxi-1+nxi*row
dat = -0.5*dt*(1.0+Psi_m)*((h_new>h_ast)*g_b_new)[2:-2]
return coo_matrix((dat,(row,col)),shape=(nr,nr*nxi))#.tocsr()
def _h_equation_LHS2(self,v_new,dt=None):
"""
Calculate the LHS matrix block corresponding to the
g_s dependence in the height equation.
The internal time step dt is used if one is not provided.
"""
# Note: there is no g_s dependence
return None
def _h_equation_LHS3(self,v_new,dt=None):
"""
Calculate the LHS matrix block corresponding to the
g_b dependence in the height equation.
The internal time step dt is used if one is not provided.
"""
h_ast = self._h_ast
Psi_m = self._Psi_m
h_new,Phi_n_new,g_s_new,g_b_new = v_new
if dt is None:
dt = self._dt
A_diag = -0.5*dt*(1.0+Psi_m)*(h_new>h_ast)*Phi_n_new[-1,:]
A_diag[[0,1,-2,-1]] = 0
return diags(A_diag)#,format="csr")
# Add private methods relating to the discretisation of the fourth order 'advective' term
def _advective_term_alt(self,r,h,f,near_boundary=True):
"""
Finite difference discretisation of:
(d/dr)[ f r (d/dr)[ (1/r) (d/dr)[ r (dh/dr) ] ] ]
This version handles f which is two dimensional.
Note the h**p factor and the prefix have been dropped in this alt version.
"""
r_half = 0.5*(r[1:]+r[:-1])
dr = r[1]-r[0]
h_half = 0.5*(h[1:]+h[:-1])
D_half = (r_half[2: ]*(h[3: ]-h[2:-1])-r_half[1:-1]*(h[2:-1]-h[1:-2]))/r[2:-1]\
-(r_half[1:-1]*(h[2:-1]-h[1:-2])-r_half[ :-2]*(h[1:-2]-h[ :-3]))/r[1:-2]
f_half = 0.5*(f[:,1:]+f[:,:-1])
res = np.empty(f.shape)
res[:,[0,1,-2,-1]] = 0
res[:,2:-2] = r_half[np.newaxis,2:-1]*D_half[np.newaxis,1: ]*f_half[:,2:-1] \
-r_half[np.newaxis,1:-2]*D_half[np.newaxis, :-1]*f_half[:,1:-2]
if near_boundary:
# At index one we expoloit that 0 = (d/dr)[ (1/r) (d/dr)[ r (dh/dr) ] ] for r=0
D_p2 = 0.5*(D_half[ 0]+D_half[ 1])
res[:,1] = 0.5*r[2]*D_p2*f[:,2]
# At index -2 we can exploit that 0 = (dh/dr)
# The width of the stencil is widened to achieve this though...
D_m5o2 = 0.25*(r[-2]*(h[-1]-h[-3])-r[-4]*(h[-3]-h[-5]))/r[-3]\
-0.25*(r[-3]*(h[-2]-h[-4])-r[-5]*(h[-4]-h[-6]))/r[-4]
D_m3o2 = 0.25*( -r[-3]*(h[-2]-h[-4]))/r[-2]\
-0.25*(r[-2]*(h[-1]-h[-3])-r[-4]*(h[-3]-h[-5]))/r[-3]
res[:,-2] = r_half[-1]*D_m3o2*f_half[:,-1] \
-r_half[-2]*D_m5o2*f_half[:,-2]
return res/dr**4
def _advective_term_h_gradient_alt(self,r,h,p=3,f=None,near_boundary=True,prefix=None):
"""
Finite difference discretisation of the gradient of
prefix * (d/dr)[ r h^p f (d/dr)[ (1/r) (d/dr)[ r (dh/dr) ] ] ]
with respect to h.
This version handles f which is two dimensional.
Note: the caller is responsible for enforcing boundary conditions
"""
r_half = 0.5*(r[1:]+r[:-1])
dr = r[1]-r[0]
h_half = 0.5*(h[1:]+h[:-1])
D_half = (r_half[2: ]*(h[3: ]-h[2:-1])-r_half[1:-1]*(h[2:-1]-h[1:-2]))/r[2:-1]\
-(r_half[1:-1]*(h[2:-1]-h[1:-2])-r_half[ :-2]*(h[1:-2]-h[ :-3]))/r[1:-2]
if f is None:
f = np.ones((1,len(r)))
f_half = f[:,:-1]
else:
f_half = 0.5*(f[:,1:]+f[:,:-1])
Dh_diag_p2 = np.empty(f.shape)
Dh_diag_p1 = np.empty(f.shape)
Dh_diag_p0 = np.empty(f.shape)
Dh_diag_m1 = np.empty(f.shape)
Dh_diag_m2 = np.empty(f.shape)
Dh_diag_p2[:,[0,1,-2,-1]] = 0
Dh_diag_p1[:,[0,1,-2,-1]] = 0
Dh_diag_p0[:,[0,1,-2,-1]] = 0
Dh_diag_m1[:,[0,1,-2,-1]] = 0
Dh_diag_m2[:,[0,1,-2,-1]] = 0
Dh_diag_p1[:,2:-2] = f_half[:,2:-1]*(r_half[2:-1]*0.5*p*h_half[2:-1]**(p-1)*D_half[1:]/dr**4)[np.newaxis,:]
Dh_diag_p0[:,2:-2] = f_half[:,2:-1]*(r_half[2:-1]*0.5*p*h_half[2:-1]**(p-1)*D_half[1:]/dr**4)[np.newaxis,:] \
-f_half[:,1:-2]*(r_half[1:-2]*0.5*p*h_half[1:-2]**(p-1)*D_half[:-1]/dr**4)[np.newaxis,:]
Dh_diag_m1[:,2:-2] = -f_half[:,1:-2]*(r_half[1:-2]*0.5*p*h_half[1:-2]**(p-1)*D_half[:-1]/dr**4)[np.newaxis,:]
# I think the following 5 are okay...
Dh_diag_p2[:,2:-2] = f_half[:,2:-1]*(r_half[2:-1]*h_half[2:-1]**p*(r_half[3: ]/r[3:-1])/dr**4)[np.newaxis,:]
Dh_diag_p1[:,2:-2] += -f_half[:,2:-1]*(r_half[2:-1]*h_half[2:-1]**p*(r_half[2:-1]/r[2:-2]+2)/dr**4)[np.newaxis,:] \
-f_half[:,1:-2]*(r_half[1:-2]*h_half[1:-2]**p*(r_half[2:-1]/r[2:-2])/dr**4)[np.newaxis,:]
Dh_diag_p0[:,2:-2] += f_half[:,2:-1]*(r_half[2:-1]*h_half[2:-1]**p*(r_half[2:-1]/r[3:-1]+2)/dr**4)[np.newaxis,:] \
+f_half[:,1:-2]*(r_half[1:-2]*h_half[1:-2]**p*(r_half[1:-2]/r[1:-3]+2)/dr**4)[np.newaxis,:]
Dh_diag_m1[:,2:-2] += -f_half[:,2:-1]*(r_half[2:-1]*h_half[2:-1]**p*(r_half[1:-2]/r[2:-2])/dr**4)[np.newaxis,:] \
-f_half[:,1:-2]*(r_half[1:-2]*h_half[1:-2]**p*(r_half[1:-2]/r[2:-2]+2)/dr**4)[np.newaxis,:]
Dh_diag_m2[:,2:-2] = f_half[:,1:-2]*(r_half[1:-2]*h_half[1:-2]**p*(r_half[ :-3]/r[1:-3])/dr**4)[np.newaxis,:]
if near_boundary:
# Pre-allocate additional diagonals for the boundary terms
Dh_diag_p3 = np.zeros(f.shape)
Dh_diag_m3 = np.zeros(f.shape)
Dh_diag_m4 = np.zeros(f.shape)
# At index one we expoloit that 0 = (d/dr)[ (1/r) (d/dr)[ r (dh/dr) ] ]
D_p2 = 0.5*(D_half[ 0]+D_half[ 1])
Dh_diag_p1[:,1] = 0.5*r[2]*p*h[2]**(p-1)*f[:,2]*D_p2/dr**4
Dh_diag_p3[:,1] = 0.5*r[2]*h[2]**p*f[:,2]*0.5*(r_half[3]/r[3])/dr**4
Dh_diag_p2[:,1] = -0.5*r[2]*h[2]**p*f[:,2]/dr**4
Dh_diag_p1[:,1] += 0.5*r[2]*h[2]**p*f[:,2]*0.5*(r_half[2]/r[3]-r_half[1]/r[1])/dr**4
Dh_diag_p0[:,1] = -0.5*r[2]*h[2]**p*f[:,2]/dr**4
Dh_diag_m1[:,1] = 0.5*r[2]*h[2]**p*f[:,2]*0.5*(r_half[0]/r[1])/dr**4
# At index -2 we can exploit that 0 = (dh/dr)
# The width of the stencil is widened to achieve this though...
D_m5o2 = 0.25*(r[-2]*(h[-1]-h[-3])-r[-4]*(h[-3]-h[-5]))/r[-3]\
-0.25*(r[-3]*(h[-2]-h[-4])-r[-5]*(h[-4]-h[-6]))/r[-4]
D_m3o2 = 0.25*( -r[-3]*(h[-2]-h[-4]))/r[-2]\
-0.25*(r[-2]*(h[-1]-h[-3])-r[-4]*(h[-3]-h[-5]))/r[-3]
Dh_diag_p1[:,-2] = r_half[-1]*0.5*p*h_half[-1]**(p-1)*f_half[:,-1]*D_m3o2
Dh_diag_p0[:,-2] = r_half[-1]*0.5*p*h_half[-1]**(p-1)*f_half[:,-1]*D_m3o2 \
-r_half[-2]*0.5*p*h_half[-2]**(p-1)*f_half[:,-2]*D_m5o2
Dh_diag_m1[:,-2] = -r_half[-2]*0.5*p*h_half[-2]**(p-1)*f_half[:,-2]*D_m5o2
# I think the following are okay...
Dh_diag_p1[:,-2] += r_half[-1]*h_half[-1]**p*f_half[:,-1]*( r[-2]/r[-3])*0.25/dr**4 \
-r_half[-2]*h_half[-2]**p*f_half[:,-2]*(-r[-2]/r[-3])*0.25/dr**4
Dh_diag_p0[:,-2] += r_half[-1]*h_half[-1]**p*f_half[:,-1]*(-r[-3]/r[-4])*0.25/dr**4 \
-r_half[-2]*h_half[-2]**p*f_half[:,-2]*(-r[-3]/r[-2])*0.25/dr**4
Dh_diag_m1[:,-2] += r_half[-1]*h_half[-1]**p*f_half[:,-1]*(-2)*0.25/dr**4 \
-r_half[-2]*h_half[-2]**p*f_half[:,-2]*( 2)*0.25/dr**4
Dh_diag_m2[:,-2] = r_half[-1]*h_half[-1]**p*f_half[:,-1]*( 2)*0.25/dr**4 \
-r_half[-2]*h_half[-2]**p*f_half[:,-2]*( r[-3]/r[-2])*0.25/dr**4
Dh_diag_m3[:,-2] = r_half[-1]*h_half[-1]**p*f_half[:,-1]*( r[-4]/r[-3])*0.25/dr**4 \
-r_half[-2]*h_half[-2]**p*f_half[:,-2]*( r[-4]/r[-3])*0.25/dr**4
Dh_diag_m4[:,-2] = r_half[-1]*h_half[-1]**p*f_half[:,-1]*(-r[-5]/r[-4])*0.25/dr**4
#Dh = diags([Dh_diag_m4[4:],Dh_diag_m3[3:],Dh_diag_m2[2:],Dh_diag_m1[1:],\
# Dh_diag_p0,Dh_diag_p1[:-1],Dh_diag_p2[:-2],Dh_diag_p3[:-3]],\
# [-4,-3,-2,-1,0,1,2,3])
diagonals = [Dh_diag_m4,Dh_diag_m3,Dh_diag_m2,Dh_diag_m1,\
Dh_diag_p0,Dh_diag_p1,Dh_diag_p2,Dh_diag_p3]
offsets = [-4,-3,-2,-1,0,1,2,3]
else:
#Dh = diags([Dh_diag_m2[2:],Dh_diag_m1[1:],Dh_diag_p0,Dh_diag_p1[:-1],Dh_diag_p2[:-2]],\
# [-2,-1,0,1,2])
diagonals = [Dh_diag_m2,Dh_diag_m1,Dh_diag_p0,Dh_diag_p1,Dh_diag_p2]
offsets = [-2,-1,0,1,2]
if prefix is not None:
if len(prefix.shape)==1:
for diagonal in diagonals:
diagonal[:,1:-1] *= prefix[np.newaxis,1:-1]
elif len(prefix.shape)==2:
for diagonal in diagonals:
diagonal[:,1:-1] *= prefix[:,1:-1]
# else do nothing...
return diagonals,offsets
def _Phi_n_equation_explicit(self,v_old,dt=None):
"""
Calculate a simple forward Euler step of the Phi_n equations.
(Here Phi_n = int_0^{h xi} phi_n dz, the input v_old,v_new must contain Phi_n rather than phi_n)
The internal time step dt is used if one is not provided.
Note: This is generally going to be unstable, however I have been able to 'get lucky' with some grid choices.
"""
r = self._r
nr = len(r)
dr = r[1]
R,XI = self._R,self._XI
dxi = XI[1,1]
gamma_ast = self._gamma_ast
h_ast = self._h_ast
Psi_d = self._Psi_d
Psi_m = self._Psi_m
lambda_ast = self._lambda_ast
h_old,Phi_n_old,g_s_old,g_b_old = v_old
if dt is None:
dt = self._dt
# Setup the vertical velocity factor
# Note: the second line of each v_z terms do not include r=0 or r=R parts, they are not needed regardless
# The v_z terms also exclude the 1/h factor...
fot_old = self._advective_term(r,h_old)
v_z_old = (1.0+Psi_m)*g_b_old[np.newaxis,:]*(Phi_n_old-XI*(Phi_n_old[-1,:])[np.newaxis,:])
v_z_old[:,1:-1] += gamma_ast/6.0*(2.0*XI+XI**3-3*XI**2)[:,1:-1]/R[:,1:-1]*(fot_old)[np.newaxis,1:-1]
# Setup the horizontal 'advection' stencil
Phi_int_dxi_old = np.cumsum(0.5*((Phi_n_old*(1-XI))[1:,:]+(Phi_n_old*(1-XI))[:-1,:]),axis=0)*dxi
integral_old = np.empty(Phi_n_old.shape)
integral_old[0 ,:] = 0
integral_old[1:,:] = Phi_int_dxi_old
f_old = (Phi_n_old*(0.5*XI**2-XI)+integral_old)*h_old[np.newaxis,:]**2
if np.isfinite(lambda_ast):
f_old -= Phi_n_old*h_old[np.newaxis,:]/lambda_ast
adv_old = self._advective_term_alt(r,h_old,f_old)
# Initialise the update with the forcing term
delta_Phi_n = Phi_n_old*(g_b_old-Psi_d)[np.newaxis,:]
# Add the vertical advection part (note no flux through the top or bottom)
delta_Phi_n[1:-1,1:-1] -= v_z_old[1:-1,1:-1]/h_old[np.newaxis,1:-1]*(Phi_n_old[2:,1:-1]-Phi_n_old[:-2,1:-1])/(2.0*dxi)
# Add the horizontal 'advection' part
delta_Phi_n[:,1:-1] += gamma_ast/r[np.newaxis,1:-1]*adv_old[:,1:-1]
# Perform the update
Phi_n_new = Phi_n_old+dt*delta_Phi_n
# Enforce the boundary conditions post update
Phi_n_new[:,-1] = 0
Phi_n_new[:, 0] = (4*Phi_n_new[:,1]-Phi_n_new[:,0])/3.0
if self._use_artificial_dr_bc: # if artificial 'BC' is also enforced near r=0
Phi_n_new[:,0] = 0.2*(9*Phi_n_new[:,2]-4*Phi_n_new[:,3])
Phi_n_new[:,1] = 0.2*(8*Phi_n_new[:,2]-3*Phi_n_new[:,3])
if False: # if high order BC enforcement at r=0
Phi_n_new[:,0] = (18*Phi_n_new[:,1]-9*Phi_n_new[:,2]+2*Phi_n_new[:,3])/11.0
if False: # if both high order BC enforcement at r=0 and additional artificial BC near r=0
Phi_n_new[:,0] = 0.2*(9*Phi_n_new[:,2]-4*Phi_n_new[:,3]) # (note: it works out same as above...)
Phi_n_new[:,1] = 0.2*(8*Phi_n_new[:,2]-3*Phi_n_new[:,3])
Phi_n_new[ 0,:] = 0 # by definition
#Phi_n_new[-1,:] = 2*Phi_n_new[-2,:]-Phi_n_new[-3,:] # need to do something here? maybe enforce d^2\Phi_n/d\xi^2=0
Phi_n_new[-1,:] = 2.5*Phi_n_new[-2,:]-2*Phi_n_new[-3,:]+0.5*Phi_n_new[-4,:] # higher order...
# Zero parts where h is still too small
Phi_n_new[:,h_old<=h_ast] = 0
# done
return Phi_n_new
def _Phi_n_equation_semi_implicit(self,v_old,dt=None,explicit_r_advection=False):
"""
Calculate a simple backward Euler step of the Phi_n equations.
(Here Phi_n = int_0^{h xi} phi_n dz, the input v_old,v_new must contain Phi_n rather than phi_n)
The internal time step dt is used if one is not provided.
Note: This is semi-implicit in the sense we linearise the equations to make it somewhat easier to implement.
This currently works reasonably well in the current form...
"""
r = self._r
nr = len(r)
dr = r[1]
R,XI = self._R,self._XI
nxi = len(self._xi)
dxi = XI[1,1]
gamma_ast = self._gamma_ast
h_ast = self._h_ast
Psi_d = self._Psi_d
Psi_m = self._Psi_m
lambda_ast = self._lambda_ast
h_old,Phi_n_old,g_s_old,g_b_old = v_old
if dt is None:
dt = self._dt
# Initialise the lhs matrix with ones on the main diagonal
A_p0_p0 = np.ones(Phi_n_old.shape)
# Initialise the rhs vector with the 'old' Phi_n
rhs = Phi_n_old.copy()
# Note: the xi=0 boundary condition should require no changes to the above (since Phi_n_old should be 0 on the bottom)
rhs[0,:] = 0 # but we set it explicitly to be absolutely clear
# Note: the same applies to the r=R boundary condition (where we enforce Phi_n=0 since h=b here)
rhs[:,-1] = 0 # but we again set it explicitly to be absolutely clear
# For the xi=1 boundary condition we implicitly make the 2nd derivative zero
A_p0_m1 = np.zeros(Phi_n_old.shape)
A_p0_m2 = np.zeros(Phi_n_old.shape)
A_p0_m3 = np.zeros(Phi_n_old.shape) # required for higher order stencil
A_p0_p0[-1,2:-1] = 2.0 # 1 low order, 2 higher order
A_p0_m1[-1,2:-1] = -5.0 # -2 low order, -5 higher order
A_p0_m2[-1,2:-1] = 4.0 # 1 low order, 4 higher order
A_p0_m3[-1,2:-1] = -1.0 # -1 higher order
rhs[-1,2:-1] = 0
# Now the BC at r=0 (and the artificial one I enforce next to it)
A_p1_p0 = np.zeros(Phi_n_old.shape)
A_p2_p0 = np.zeros(Phi_n_old.shape)
A_m1_p0 = np.zeros(Phi_n_old.shape) # required for the artificial BC
A_p3_p0 = np.zeros(Phi_n_old.shape) # required for higher order stencil at r=0
A_p0_p0[:,0] = 3.0;A_p1_p0[:,0] = -4.0;A_p2_p0[:,0] = 1.0
rhs[:,0] = 0
if self._use_artificial_dr_bc: # if artificial 'BC' is also enforced near r=0
A_p0_p0[:,0] = 3.0;A_p1_p0[:,0] = -4.0;A_p2_p0[:,0] = 1.0
A_m1_p0[:,1] = 4.0;A_p0_p0[:,1] = -7.0;A_p1_p0[:,1] = 4.0;A_p2_p0[:,1] = -1.0
rhs[:,1] = 0
if False: # if high order BC enforcement at r=0
A_p0_p0[:,0] = 11.0;A_p1_p0[:,0] = -18.0;A_p2_p0[:,0] = 9.0;A_p3_p0[:,0] = - 2.0
if False: # if both high order BC enforcement at r=0 and additional artificial BC near r=0
A_p0_p0[:,0] = 11.0;A_p1_p0[:,0] = -18.0;A_p2_p0[:,0] = 9.0;A_p3_p0[:,0] = -2.0
A_m1_p0[:,1] = 4.0;A_p0_p0[:,1] = - 7.0;A_p1_p0[:,1] = 4.0;A_p2_p0[:,1] = -1.0
rhs[:,1] = 0
# Add the forcing terms on the 'interior' (this need not be implicit really)
A_p0_p0[1:-1,2:-1] += -dt*(g_b_old-Psi_d)[np.newaxis,2:-1] # implicit forcing...
#rhs[1:-1,2:-1] += dt*Phi_n_old[1:-1,2:-1]*(g_b_old-Psi_d)[np.newaxis,2:-1] # explicit forcing...
# Setup the vertical velocity factor
# Note: the second line of each v_z terms do not include r=0 or r=R parts, they are not needed regardless
# The v_z terms also exclude the 1/h factor...
fot_old = self._advective_term(r,h_old)
v_z_old = (1.0+Psi_m)*g_b_old[np.newaxis,:]*(Phi_n_old-XI*(Phi_n_old[-1,:])[np.newaxis,:])
v_z_old[:,1:-1] += gamma_ast/6.0*(2.0*XI+XI**3-3*XI**2)[:,1:-1]/R[:,1:-1]*(fot_old)[np.newaxis,1:-1]
# Now add this to the appropriate diagonals...
A_p0_p1 = np.zeros(Phi_n_old.shape)
A_p0_m1[1:-1,2:-1] = -dt/(2*dxi)*v_z_old[1:-1,2:-1]/h_old[np.newaxis,2:-1] # central...
A_p0_p1[1:-1,2:-1] = +dt/(2*dxi)*v_z_old[1:-1,2:-1]/h_old[np.newaxis,2:-1]
#A_p0_m1[1:-1,2:-1] += -dt/dxi*(v_z_old[1:-1,2:-1]>0)*v_z_old[1:-1,2:-1]/h_old[np.newaxis,2:-1] # upwinded...
#A_p0_p0[1:-1,2:-1] += +dt/dxi*(v_z_old[1:-1,2:-1]>0)*v_z_old[1:-1,2:-1]/h_old[np.newaxis,2:-1]
#A_p0_p0[1:-1,2:-1] += -dt/dxi*(v_z_old[1:-1,2:-1]<0)*v_z_old[1:-1,2:-1]/h_old[np.newaxis,2:-1]
#A_p0_p1[1:-1,2:-1] += +dt/dxi*(v_z_old[1:-1,2:-1]<0)*v_z_old[1:-1,2:-1]/h_old[np.newaxis,2:-1]
# Setup the horizontal 'advection' stencil
if explicit_r_advection: # true - explicit, false - implicit
Phi_int_dxi_old = np.cumsum(0.5*((Phi_n_old*(1-XI))[1:,:]+(Phi_n_old*(1-XI))[:-1,:]),axis=0)*dxi
integral_old = np.empty(Phi_n_old.shape)
integral_old[0 ,:] = 0
integral_old[1:,:] = Phi_int_dxi_old
f_old = (Phi_n_old*(0.5*XI**2-XI)+integral_old)*h_old[np.newaxis,:]**2
if np.isfinite(lambda_ast):
f_old -= Phi_n_old*h_old[np.newaxis,:]/lambda_ast
adv_old = self._advective_term_alt(r,h_old,f_old)
# Add the horizontal 'advection' part to the system
# Note: currently this is treated explicitly, which seems to work okay for the most part...
rhs[1:-1,2:-1] += dt*gamma_ast*adv_old[1:-1,2:-1]/r[np.newaxis,2:-1]
else:
# Note: we can re-use the _advective_term_f_gradient function here
diagonals_h2,offsets_h2 = self._advective_term_f_gradient(r,h_old,2,Phi_n_old)
assert offsets_h2[0]==-1
A_m1_p0[1:-1,2:-1] += -dt*gamma_ast*(0.5*XI**2-XI)[1:-1,2:-1]*diagonals_h2[0][np.newaxis,2:-1]/r[np.newaxis,2:-1]
A_p0_p0[1:-1,2:-1] += -dt*gamma_ast*(0.5*XI**2-XI)[1:-1,2:-1]*diagonals_h2[1][np.newaxis,2:-1]/r[np.newaxis,2:-1]
A_p1_p0[1:-1,2:-1] += -dt*gamma_ast*(0.5*XI**2-XI)[1:-1,2:-1]*diagonals_h2[2][np.newaxis,2:-1]/r[np.newaxis,2:-1]
if np.isfinite(lambda_ast):
diagonals_h1,offsets_h1 = self._advective_term_f_gradient(r,h_old,1,Phi_n_old)
assert offsets_h1[0]==-1
A_m1_p0[1:-1,2:-1] += +dt*gamma_ast/lambda_ast*diagonals_h1[0][np.newaxis,2:-1]/r[np.newaxis,2:-1]
A_p0_p0[1:-1,2:-1] += +dt*gamma_ast/lambda_ast*diagonals_h1[1][np.newaxis,2:-1]/r[np.newaxis,2:-1]
A_p1_p0[1:-1,2:-1] += +dt*gamma_ast/lambda_ast*diagonals_h1[2][np.newaxis,2:-1]/r[np.newaxis,2:-1]
# Now add the integral component (note this is somewhat denser than usual)
# (Note: it might be easier to build the entire matrix directly in coo format?)
r_i,xi_i = np.meshgrid(range(nr),range(nxi))
indices = xi_i*nr+r_i
H = (h_old>h_ast) # Use this to zero out bits where h is too small...
row,col,dat = [],[],[]
for j in range(1,nxi-1): # exclude the first and last index... (the first is 0 regardless)
for k in range(j+1):
c = 0.5*dxi if (k==0 or k==j) else dxi
row.append(indices[j,2:-1])
col.append(indices[k,1:-2])
dat.append(-dt*gamma_ast*c*(1-XI[k,2:-1])*H[2:-1]*diagonals_h2[0][2:-1]/r[2:-1])
row.append(indices[j,2:-1])
col.append(indices[k,2:-1])
dat.append(-dt*gamma_ast*c*(1-XI[k,2:-1])*H[2:-1]*diagonals_h2[1][2:-1]/r[2:-1])
row.append(indices[j,2:-1])
col.append(indices[k,3: ])
dat.append(-dt*gamma_ast*c*(1-XI[k,2:-1])*H[2:-1]*diagonals_h2[2][2:-1]/r[2:-1])
M_trap = coo_matrix((np.concatenate(dat),(np.concatenate(row),np.concatenate(col))),shape=(nr*nxi,nr*nxi))
# Zero parts where h is still too small
h_small = (h_old<=h_ast)
A_p0_p0[:,h_small] = 1
rhs[:,h_small] = 0
A_m1_p0[:,h_small] = 0;A_p1_p0[:,h_small] = 0;A_p2_p0[:,h_small] = 0;#A_p3_p0[:,h_small] = 0;
A_p0_m3[:,h_small] = 0;A_p0_m2[:,h_small] = 0;A_p0_m1[:,h_small] = 0;A_p0_p1[:,h_small] = 0;
# Now setup the sparse linear system...
if explicit_r_advection:
A_11 = diags([A_p0_p0.ravel(),
A_m1_p0.ravel()[1:],A_p1_p0.ravel()[:-1],A_p2_p0.ravel()[:-2],#A_p3_p0.ravel()[:-3],
A_p0_m3.ravel()[3*nr:],A_p0_m2.ravel()[2*nr:],A_p0_m1.ravel()[nr:],A_p0_p1.ravel()[:-nr]],
[0,
-1,1,2,#3,
-3*nr,-2*nr,-nr,nr],
format="csr")
else:
A_11_partial = diags([A_p0_p0.ravel(),
A_m1_p0.ravel()[1:],A_p1_p0.ravel()[:-1],A_p2_p0.ravel()[:-2],#A_p3_p0.ravel()[:-3],
A_p0_m3.ravel()[3*nr:],A_p0_m2.ravel()[2*nr:],A_p0_m1.ravel()[nr:],A_p0_p1.ravel()[:-nr]],
[0,
-1,1,2,#3,
-3*nr,-2*nr,-nr,nr],
format="coo")
A_11 = (A_11_partial+M_trap).tocsr()
# Now solve the sparse linear system...
Phi_n_new = spsolve(A_11,rhs.ravel()).reshape(Phi_n_old.shape)
# done
return Phi_n_new
def _Phi_n_equation_RHS(self,v_old,v_new,dt=None):
"""
Calculate the RHS vector component corresponding to the Phi_n equation.
(Here Phi_n = int_0^{h xi} phi_n dz, the input v_old,v_new must contain Phi_n rather than phi_n)
The internal time step dt is used if one is not provided.
"""
r,xi = self._r,self._xi
nr,nxi = len(r),len(xi)
dr,dxi = r[1],xi[1]
R,XI = self._R,self._XI
r_half = self._r_half
h_ast = self._h_ast
gamma_ast = self._gamma_ast
Psi_d = self._Psi_d
Psi_m = self._Psi_m
lambda_ast = self._lambda_ast
h_old,Phi_n_old,g_s_old,g_b_old = v_old
h_new,Phi_n_new,g_s_new,g_b_new = v_new
if dt is None:
dt = self._dt
# Some extra fields for convenience
H_old,H_new = (h_old>h_ast),(h_new>h_ast) # Use this to zero out bits where h is too small...
Hor_old,Hor_new = H_old[2:-1]/r[2:-1],H_new[2:-1]/r[2:-1]
# Setup the rhs field and initialise on the interior with the difference in the fields
rhs = np.zeros(Phi_n_new.shape)
rhs[1:-1,2:-1] = -(Phi_n_new[1:-1,2:-1]-Phi_n_old[1:-1,2:-1]*H_old[np.newaxis,2:-1])
# Note: the H_old in the above line should ensure that delta_Phi will be 0 where-ever h remains small
# (although it should be redundant since Phi_n_old should be zero here regardless)
# Add the forcing term
rhs[1:-1,2:-1] += 0.5*dt*( Phi_n_old[1:-1,2:-1]*(H_old*(g_b_old-Psi_d))[np.newaxis,2:-1]\
+Phi_n_new[1:-1,2:-1]*(H_new*(g_b_new-Psi_d))[np.newaxis,2:-1])
# Setup the vertical velocity factor
# Note: the second line of each v_z terms do not include r=0 or r=R parts, they are not needed regardless
# The v_z terms also exclude the 1/h factor...
fot_new = self._advective_term(r,h_new)
fot_old = self._advective_term(r,h_old)
#v_z_old = (1.0+Psi_m)*g_b_old[np.newaxis,:]*(Phi_n_old-XI*(Phi_n_old[-1,:])[np.newaxis,:])\
# +gamma_ast/6.0*(2.0*XI+XI**3-3*XI**2)/R*(fot_old)[np.newaxis,:]
v_z_old = (1.0+Psi_m)*g_b_old[np.newaxis,:]*(Phi_n_old-XI*(Phi_n_old[-1,:])[np.newaxis,:])
v_z_old[:,1:-1] += gamma_ast/6.0*(2.0*XI+XI**3-3*XI**2)[:,1:-1]/R[:,1:-1]*fot_old[np.newaxis,1:-1]
#v_z_new = +(1.0+Psi_m)*g_b_new[np.newaxis,:]*(Phi_n_new-XI*(Phi_n_new[-1,:])[np.newaxis,:])\
# +gamma_ast/6.0*(2.0*XI+XI**3-3*XI**2)/R*fot_new[np.newaxis,:]
v_z_new = (1.0+Psi_m)*g_b_new[np.newaxis,:]*(Phi_n_new-XI*(Phi_n_new[-1,:])[np.newaxis,:])
v_z_new[:,1:-1] += gamma_ast/6.0*(2.0*XI+XI**3-3*XI**2)[:,1:-1]/R[:,1:-1]*fot_new[np.newaxis,1:-1]
# Add the vertical advection part (note no flux through the top or bottom...
rhs[1:-1,2:-1] -= 0.25*dt/dxi*( v_z_old[1:-1,2:-1]*(Phi_n_old[2:,2:-1]-Phi_n_old[:-2,2:-1])*(H_old/h_old)[np.newaxis,2:-1]\
+v_z_new[1:-1,2:-1]*(Phi_n_new[2:,2:-1]-Phi_n_new[:-2,2:-1])*(H_new/h_new)[np.newaxis,2:-1])
# Setup the horizontal 'advection' stencil
Phi_int_dxi_old = np.cumsum(0.5*((Phi_n_old*(1-XI))[1:,:]+(Phi_n_old*(1-XI))[:-1,:]),axis=0)*dxi
Phi_int_dxi_new = np.cumsum(0.5*((Phi_n_new*(1-XI))[1:,:]+(Phi_n_new*(1-XI))[:-1,:]),axis=0)*dxi
integral_old = np.empty(Phi_n_old.shape)
integral_old[0 ,:] = 0
integral_old[1:,:] = Phi_int_dxi_old
integral_new = np.empty(Phi_n_new.shape)
integral_new[0 ,:] = 0
integral_new[1:,:] = Phi_int_dxi_new
f_old = (Phi_n_old*(0.5*XI**2-XI)+integral_old)*h_old[np.newaxis,:]**2
f_new = (Phi_n_new*(0.5*XI**2-XI)+integral_new)*h_new[np.newaxis,:]**2
if np.isfinite(lambda_ast):
f_old -= Phi_n_old*h_old[np.newaxis,:]/lambda_ast
f_new -= Phi_n_new*h_new[np.newaxis,:]/lambda_ast
adv_new = self._advective_term_alt(r,h_new,f_new)
adv_old = self._advective_term_alt(r,h_old,f_old)
# Add the horizontal 'advection' part
rhs[1:-1,2:-1] += 0.5*dt*gamma_ast*( adv_new[1:-1,2:-1]*Hor_new[np.newaxis,:]
+adv_old[1:-1,2:-1]*Hor_old[np.newaxis,:])
# Set all of the entries relating to boundary conditions
# Set the RHS corresponding to the \xi=0 boundary condition (delta_Phi+Phi_n_new)=0
rhs[0,2:-1] = -Phi_n_new[0,2:-1]
# Set the RHS corresponding to the r=R boundary condition (delta_Phi+Phi_n_new)=0 (since h=b~0 is enforced)
rhs[:, -1] = -Phi_n_new[:, -1]
if self._add_top_Phi_bc:
# Set the RHS corresponding to the \xi=1 boundary condition d^2/dr^2(delta_Phi+Phi_n_new)=0
rhs[-1,2:-1] = -2*Phi_n_new[-1,2:-1]+5*Phi_n_new[-2,2:-1]-4*Phi_n_new[-3,2:-1]+Phi_n_new[-4,2:-1]
else:
# Implement the discretisation of the horizontal advection
rhs[-1,2:-1] = -(Phi_n_new[-1,2:-1]-Phi_n_old[-1,2:-1]*H_old[2:-1])\
+0.5*dt*gamma_ast*(adv_new[-1,2:-1]*Hor_new+adv_old[-1,2:-1]*Hor_old)\
+0.5*dt*( Phi_n_old[-1,2:-1]*(H_old*(g_b_old-Psi_d))[2:-1]\
+Phi_n_new[-1,2:-1]*(H_new*(g_b_new-Psi_d))[2:-1])
# Set the RHS corresponding to the r=0 boundary condition d/dr(delta_Phi+Phi_n_new)=0
rhs[:, 0] = -3.0*Phi_n_new[:,0]+4.0*Phi_n_new[:,1]-Phi_n_new[:,2]
if False: # optional, higher order stencil
rhs[:, 0] = -11*Phi_n_new[:,0]+18*Phi_n_new[:,1]-9*Phi_n_new[:,2]+2*Phi_n_new[:,3]
if self._use_artificial_dr_bc:
# Set the RHS corresponding to the introduced r=dr condition Phi(dr)=Phi(0)+0.5*dr^2*Phi''(0)
rhs[:, 1] = 4*Phi_n_new[:,0]-7*Phi_n_new[:,1]+4*Phi_n_new[:,2]-Phi_n_new[:,3]
# done
return rhs.ravel()
def _Phi_n_equation_LHS0(self,v_new,dt=None):
"""
Calculate the LHS matrix block corresponding to the
h dependence in the Phi_n equation.
(Here Phi_n = int_0^{h xi} phi_n dz, the input v_old,v_new must contain Phi_n rather than phi_n)
"""
r,xi = self._r,self._xi
nr,nxi = len(r),len(xi)
dr,dxi = r[1],xi[1]
R,XI = self._R,self._XI
r_half = self._r_half
h_ast = self._h_ast
gamma_ast = self._gamma_ast
Psi_m = self._Psi_m
lambda_ast = self._lambda_ast
h_new,Phi_n_new,g_s_new,g_b_new = v_new
if dt is None:
dt = self._dt
# Note this block has rectangular shape
# Setup some index arrays for constructing the matrix in coo format
r_i,xi_i = np.meshgrid(range(nr),range(nxi))
indices = xi_i*nr+r_i
row,col,dat = [],[],[]
H = (h_new>h_ast) # Use this to zero out bits where h is too small...
Hor = H[1:-1]/r[1:-1]
# Setup the vertical advection components first
# Do the easier part first
fot_new = self._advective_term(r,h_new)
#v_z_new = +(1.0+Psi_m)*g_b_new[np.newaxis,:]*(Phi_n_new-XI*(Phi_n_new[-1,:])[np.newaxis,:])\
# +gamma_ast/6.0*(2.0*XI+XI**3-3*XI**2)/R*fot_new[np.newaxis,:]
v_z_new = (1.0+Psi_m)*g_b_new[np.newaxis,:]*(Phi_n_new-XI*(Phi_n_new[-1,:])[np.newaxis,:])
v_z_new[:,1:-1] += gamma_ast/6.0*(2.0*XI+XI**3-3*XI**2)[:,1:-1]/R[:,1:-1]*fot_new[np.newaxis,1:-1]
xi_adv_term1 = -0.25*dt/dxi*v_z_new[1:-1,:]*(Phi_n_new[2:,:]-Phi_n_new[:-2,:])*(H/h_new**2)[np.newaxis,:]
row.append(indices[1:-1,1:-1].ravel())
col.append(r_i[1:-1,1:-1].ravel())
dat.append(xi_adv_term1[:,1:-1].ravel())
if self._use_artificial_dr_bc:
#xi_adv_term1[:,1] = 0 # Need to modify this in conjunction with the 'artificial' BC at r=dr
row[-1] = indices[1:-1,2:-1].ravel()
col[-1] = r_i[1:-1,2:-1].ravel()
dat[-1] = xi_adv_term1[:,2:-1].ravel()
# Now the more difficult/involved part...
# First get diagonals relating to the fourth order h term
diagonals_h3,offsets_h3 = self._advective_term_h_gradient(r,h_new,3)
if self._use_artificial_dr_bc:
# Need to modify diagonals in conjunction with the 'artificial' BC at r=dr
for k in range(len(diagonals_h3)):
diagonals_h3[k][1] = 0
# now construct the 2D factor and then add the diagonals to the matrix
twoD_factor = 0.25*dt/dxi*gamma_ast*(XI**3-3*XI**2+2*XI)[1:-1,1:-1]*(Phi_n_new[2:,1:-1]-Phi_n_new[:-2,1:-1])\
*H[np.newaxis,1:-1]/(6.0*r*h_new)[np.newaxis,1:-1]
diag_h3_m4_dat = diagonals_h3[0][np.newaxis,4:-1]*twoD_factor[:,3:]
row.append(indices[1:-1,4:-1].ravel())
col.append(r_i[1:-1,0:-5].ravel())
dat.append(diag_h3_m4_dat.ravel())
diag_h3_m3_dat = diagonals_h3[1][np.newaxis,3:-1]*twoD_factor[:,2:]
row.append(indices[1:-1,3:-1].ravel())
col.append(r_i[1:-1,0:-4].ravel())
dat.append(diag_h3_m3_dat.ravel())
diag_h3_m2_dat = diagonals_h3[2][np.newaxis,2:-1]*twoD_factor[:,1:]
row.append(indices[1:-1,2:-1].ravel())
col.append(r_i[1:-1,0:-3].ravel())
dat.append(diag_h3_m2_dat.ravel())
diag_h3_m1_dat = diagonals_h3[3][np.newaxis,1:-1]*twoD_factor[:,:]
row.append(indices[1:-1,1:-1].ravel())
col.append(r_i[1:-1,0:-2].ravel())
dat.append(diag_h3_m1_dat.ravel())
diag_h3_p0_dat = diagonals_h3[4][np.newaxis,1:-1]*twoD_factor[:,:]
row.append(indices[1:-1,1:-1].ravel())
col.append(r_i[1:-1,1:-1].ravel())
dat.append(diag_h3_p0_dat.ravel())
diag_h3_p1_dat = diagonals_h3[5][np.newaxis,1:-1]*twoD_factor[:,:]
row.append(indices[1:-1,1:-1].ravel())
col.append(r_i[1:-1,2:].ravel())
dat.append(diag_h3_p1_dat.ravel())
diag_h3_p2_dat = diagonals_h3[6][np.newaxis,1:-2]*twoD_factor[:,:-1]
row.append(indices[1:-1,1:-2].ravel())
col.append(r_i[1:-1,3:].ravel())
dat.append(diag_h3_p2_dat.ravel())
diag_h3_p3_dat = diagonals_h3[7][np.newaxis,1:-3]*twoD_factor[:,:-2]
row.append(indices[1:-1,1:-3].ravel())
col.append(r_i[1:-1,4:].ravel())
dat.append(diag_h3_p3_dat.ravel())
# Now we need to do the radial 'advective' term
# First get diagonals relating to the fourth order h term
Phi_int_dxi_new = np.cumsum(0.5*((Phi_n_new*(1-XI))[1:,:]+(Phi_n_new*(1-XI))[:-1,:]),axis=0)*dxi
h2_factor = Phi_n_new*(0.5*XI**2-XI)
h2_factor[1:,:] += Phi_int_dxi_new
h2_prefix = np.zeros(nr)
h2_prefix[1:-1] = -0.5*dt*gamma_ast*Hor
diagonals_h2,offsets_h2 = self._advective_term_h_gradient_alt(r,h_new,2,h2_factor,True,h2_prefix)
if np.isfinite(lambda_ast):
h1_prefix = np.zeros(nr)
h1_prefix[1:-1] = 0.5*dt*gamma_ast/lambda_ast*Hor
diagonals_h1,offsets_h1 = self._advective_term_h_gradient_alt(r,h_new,1,Phi_n_new,True,h1_prefix)
for k in range(len(diagonals_h2)):
diagonals_h2[k] += diagonals_h1[k]
if self._use_artificial_dr_bc:
# Need to modify diagonals in conjunction with the 'artificial' BC at r=dr
for k in range(len(diagonals_h2)):
diagonals_h2[k][:,1] = 0
diag_h2_m4_dat = diagonals_h2[0][1:-1,4:-1]
row.append(indices[1:-1,4:-1].ravel())
col.append(r_i[1:-1,0:-5].ravel())
dat.append(diag_h2_m4_dat.ravel())
diag_h2_m3_dat = diagonals_h2[1][1:-1,3:-1]
row.append(indices[1:-1,3:-1].ravel())
col.append(r_i[1:-1,0:-4].ravel())
dat.append(diag_h2_m3_dat.ravel())
diag_h2_m2_dat = diagonals_h2[2][1:-1,2:-1]
row.append(indices[1:-1,2:-1].ravel())
col.append(r_i[1:-1,0:-3].ravel())
dat.append(diag_h2_m2_dat.ravel())
diag_h2_m1_dat = diagonals_h2[3][1:-1,1:-1]
row.append(indices[1:-1,1:-1].ravel())
col.append(r_i[1:-1,0:-2].ravel())
dat.append(diag_h2_m1_dat.ravel())
diag_h2_p0_dat = diagonals_h2[4][1:-1,1:-1]
row.append(indices[1:-1,1:-1].ravel())
col.append(r_i[1:-1,1:-1].ravel())
dat.append(diag_h2_p0_dat.ravel())
diag_h2_p1_dat = diagonals_h2[5][1:-1,1:-1]
row.append(indices[1:-1,1:-1].ravel())
col.append(r_i[1:-1,2:].ravel())
dat.append(diag_h2_p1_dat.ravel())
diag_h2_p2_dat = diagonals_h2[6][1:-1,1:-2]
row.append(indices[1:-1,1:-2].ravel())
col.append(r_i[1:-1,3:].ravel())
dat.append(diag_h2_p2_dat.ravel())
diag_h2_p3_dat = diagonals_h2[7][1:-1,1:-3]
row.append(indices[1:-1,1:-3].ravel())
col.append(r_i[1:-1,4:].ravel())
dat.append(diag_h2_p3_dat.ravel())
if not self._add_top_Phi_bc:
row.append(indices[-1,4:-1].ravel())
col.append(r_i[-1,0:-5].ravel())
dat.append(diagonals_h2[0][-1,4:-1].ravel())
row.append(indices[-1,3:-1].ravel())
col.append(r_i[-1,0:-4].ravel())
dat.append(diagonals_h2[1][-1,3:-1].ravel())
row.append(indices[-1,2:-1].ravel())
col.append(r_i[-1,0:-3].ravel())
dat.append(diagonals_h2[2][-1,2:-1].ravel())
row.append(indices[-1,1:-1].ravel())
col.append(r_i[-1,0:-2].ravel())
dat.append(diagonals_h2[3][-1,1:-1].ravel())
row.append(indices[-1,1:-1].ravel())
col.append(r_i[-1,1:-1].ravel())
dat.append(diagonals_h2[4][-1,1:-1].ravel())
row.append(indices[-1,1:-1].ravel())
col.append(r_i[-1,2:].ravel())
dat.append(diagonals_h2[5][-1,1:-1].ravel())
row.append(indices[-1,1:-2].ravel())
col.append(r_i[-1,3:].ravel())
dat.append(diagonals_h2[6][-1,1:-2].ravel())
row.append(indices[-1,1:-3].ravel())
col.append(r_i[-1,4:].ravel())
dat.append(diagonals_h2[7][-1,1:-3].ravel())
# done, construct and return
return coo_matrix((np.concatenate(dat),(np.concatenate(row),np.concatenate(col))),shape=(nr*nxi,nr))#.tocsr()
def _Phi_n_equation_LHS1(self,v_new,dt=None):
"""
Calculate the LHS matrix block corresponding to the
phi_n dependence in the Phi_n equation.
(Here Phi_n = int_0^{h xi} phi_n dz, the input v_old,v_new must contain Phi_n in place of phi_n)
"""
r,xi = self._r,self._xi
nr,nxi = len(r),len(xi)
dr,dxi = r[1],xi[1]
R,XI = self._R,self._XI
r_half = self._r_half
h_ast = self._h_ast
gamma_ast = self._gamma_ast
Psi_d = self._Psi_d
Psi_m = self._Psi_m
lambda_ast = self._lambda_ast
h_new,Phi_n_new,g_s_new,g_b_new = v_new
if dt is None:
dt = self._dt
# Setup some index arrays for constructing the matrix in coo format
r_i,xi_i = np.meshgrid(range(nr),range(nxi))
indices = xi_i*nr+r_i
H = (h_new>h_ast) # Use this to zero out bits where h is too small...
Hor = H[2:-1]/r[2:-1]
A_p0_p0 = np.ones(Phi_n_new.shape)
row,col,dat = [indices.ravel()],[indices.ravel()],[A_p0_p0.ravel()] # initialise with a view of A_p0_p0
# We start by filling out the interior stencils
# Add the forcing term to the main diagonal
A_p0_p0[1:-1,2:-1] += -0.5*dt*H[np.newaxis,2:-1]*(g_b_new-Psi_d)[np.newaxis,2:-1]
# Add the simple non-linear component of the vertical advection term
A_p0_p0[1:-1,2:-1] += +0.25*dt/dxi*(1+Psi_m)*(H*g_b_new/h_new)[np.newaxis,2:-1]*(Phi_n_new[2:,2:-1]-Phi_n_new[:-2,2:-1])
# Add the other non-linear component of the vertical advection term
#for k in range(1,nxi-1): # exclude the two ends
# row.append(indices[k,2:-1])
# col.append(indices[-1,2:-1])
# dat.append(-0.25*dt/dxi*(1+Psi_m)*(XI[k]*H*g_b_new/h)[2:-1]*(Phi_n_new[k+1,2:-1]-Phi_n_new[k-1,2:-1]))
row.append(indices[1:-1,2:-1].ravel())
col.append(np.broadcast_to(indices[-1,2:-1],(nxi-2,nr-3)).ravel())
dat.append((-0.25*dt/dxi*(1+Psi_m)*(H*g_b_new/h_new)[np.newaxis,2:-1]*XI[1:-1,2:-1]\
*(Phi_n_new[2:,2:-1]-Phi_n_new[:-2,2:-1])).ravel())
# Add the remaining vertical advection term
fot_new = self._advective_term(r,h_new)
#v_z_new = +(1.0+Psi_m)*g_b_new[np.newaxis,:]*(Phi_n_new-XI*(Phi_n_new[-1,:])[np.newaxis,:])\
# +gamma_ast/6.0*(2.0*XI+XI**3-3*XI**2)/R*fot_new[np.newaxis,:]
v_z_new = (1.0+Psi_m)*g_b_new[np.newaxis,:]*(Phi_n_new-XI*(Phi_n_new[-1,:])[np.newaxis,:])
v_z_new[:,1:-1] += gamma_ast/6.0*(2.0*XI+XI**3-3*XI**2)[:,1:-1]/R[:,1:-1]*fot_new[np.newaxis,1:-1]
A_p0_m1 = np.zeros(Phi_n_new.shape)
A_p0_p1 = np.zeros(Phi_n_new.shape)
A_p0_m1[1:-1,2:-1] = -0.5*dt/(2.0*dxi)*H[np.newaxis,2:-1]*v_z_new[1:-1,2:-1]/h_new[np.newaxis,2:-1]
A_p0_p1[1:-1,2:-1] = +0.5*dt/(2.0*dxi)*H[np.newaxis,2:-1]*v_z_new[1:-1,2:-1]/h_new[np.newaxis,2:-1]
row.append(indices[1:-1,2:-1].ravel());row.append(indices[1:-1,2:-1].ravel())
col.append(indices[2: ,2:-1].ravel());col.append(indices[ :-2,2:-1].ravel())
dat.append(A_p0_p1[1:-1,2:-1].ravel());dat.append(A_p0_m1[1:-1,2:-1].ravel())
# Add the radial 'advective' terms
# Note: we can re-use the self._advective_term_f_gradient function here
A_m1_p0 = np.zeros(Phi_n_new.shape)
A_p1_p0 = np.zeros(Phi_n_new.shape)
diagonals_h2,offsets_h2 = self._advective_term_f_gradient(r,h_new,2,Phi_n_new)
assert offsets_h2[0]==-1
A_m1_p0[1:-1,2:-1] += -0.5*dt*gamma_ast*(0.5*XI**2-XI)[1:-1,2:-1]*Hor[np.newaxis,:]*diagonals_h2[0][np.newaxis,2:-1]
A_p0_p0[1:-1,2:-1] += -0.5*dt*gamma_ast*(0.5*XI**2-XI)[1:-1,2:-1]*Hor[np.newaxis,:]*diagonals_h2[1][np.newaxis,2:-1]
A_p1_p0[1:-1,2:-1] += -0.5*dt*gamma_ast*(0.5*XI**2-XI)[1:-1,2:-1]*Hor[np.newaxis,:]*diagonals_h2[2][np.newaxis,2:-1]
if np.isfinite(lambda_ast):
diagonals_h1,offsets_h1 = self._advective_term_f_gradient(r,h_new,1,Phi_n_new)
assert offsets_h1[0]==-1
A_m1_p0[1:-1,2:-1] += +0.5*dt*gamma_ast/lambda_ast*Hor[np.newaxis,:]*diagonals_h1[0][np.newaxis,2:-1]
A_p0_p0[1:-1,2:-1] += +0.5*dt*gamma_ast/lambda_ast*Hor[np.newaxis,:]*diagonals_h1[1][np.newaxis,2:-1]
A_p1_p0[1:-1,2:-1] += +0.5*dt*gamma_ast/lambda_ast*Hor[np.newaxis,:]*diagonals_h1[2][np.newaxis,2:-1]
row.append(indices[1:-1,2:-1].ravel());row.append(indices[1:-1,2:-1].ravel())
col.append(indices[1:-1,3: ].ravel());col.append(indices[1:-1,1:-2].ravel())
dat.append(A_p1_p0[1:-1,2:-1].ravel());dat.append(A_m1_p0[1:-1,2:-1].ravel())
# Now add the integral component (note this is somewhat denser than other components)
for j in range(1,nxi-1): # exclude the first and last index... (the first is 0 regardless)
for k in range(j+1):
c = 0.5*dxi if (k==0 or k==j) else dxi
row.append(indices[j,2:-1])
col.append(indices[k,1:-2])
dat.append(-0.5*dt*gamma_ast*c*(1-XI[k,2:-1])*Hor*diagonals_h2[0][2:-1])
row.append(indices[j,2:-1])
col.append(indices[k,2:-1])
dat.append(-0.5*dt*gamma_ast*c*(1-XI[k,2:-1])*Hor*diagonals_h2[1][2:-1])
row.append(indices[j,2:-1])
col.append(indices[k,3: ])
dat.append(-0.5*dt*gamma_ast*c*(1-XI[k,2:-1])*Hor*diagonals_h2[2][2:-1])
# Now we need to enforce the boundary conditions...
# When \xi=0 then we want (delta_Phi+Phi_n_new) = 0 ==> delta_Phi = -Phi_n_new
# ==> so ones on the main diagonal is fine, and rhs needs to be set accordingly.
# When r=R then we want (delta_Phi+Phi_n_new) = 0 ==> delta_Phi = -Phi_n_new
# ==> so ones on the main diagonal is fine, and rhs needs to be set accordingly.
if self._add_top_Phi_bc:
# When \xi=1 then we want d^2/d\xi^2(delta_Phi+Phi_n_new) = 0
# ==> stencil 1,-2,_1_ for 1st order, -1,4,-5,_2_ for second order
A_p0_xi1m1 = np.empty(nr)
A_p0_xi1m2 = np.empty(nr)
A_p0_xi1m3 = np.empty(nr)
A_p0_p0[-1,2:-1] = 2.0 # 1 low order, 2 higher order
A_p0_xi1m1[2:-1] = -5.0 # -2 low order, -5 higher order
A_p0_xi1m2[2:-1] = 4.0 # 1 low order, 4 higher order
A_p0_xi1m3[2:-1] = -1.0 # -1 higher order
row.append(indices[-1,2:-1]);row.append(indices[-1,2:-1]);row.append(indices[-1,2:-1])
col.append(indices[-2,2:-1]);col.append(indices[-3,2:-1]);col.append(indices[-4,2:-1])
dat.append(A_p0_xi1m1[2:-1]);dat.append(A_p0_xi1m2[2:-1]);dat.append(A_p0_xi1m3[2:-1])
else:
# Implement the discretisation of the horizontal advection
A_p0_p0[-1,2:-1] = 1.0-0.5*dt*H[2:-1]*(g_b_new-Psi_d)[2:-1]
# the radial advective part... (could really just sub XI=1 here...)
A_m1_p0[-1,2:-1] += -0.5*dt*gamma_ast*(0.5*XI**2-XI)[-1,2:-1]*Hor*diagonals_h2[0][2:-1]
A_p0_p0[-1,2:-1] += -0.5*dt*gamma_ast*(0.5*XI**2-XI)[-1,2:-1]*Hor*diagonals_h2[1][2:-1]
A_p1_p0[-1,2:-1] += -0.5*dt*gamma_ast*(0.5*XI**2-XI)[-1,2:-1]*Hor*diagonals_h2[2][2:-1]
if np.isfinite(lambda_ast):
#diagonals_h1,offsets_h1 = self._advective_term_f_gradient(r,h_new,1,Phi_n_new) # should already exist...
#assert offsets_h1[0]==-1
A_m1_p0[-1,2:-1] += +0.5*dt*gamma_ast/lambda_ast*Hor*diagonals_h1[0][2:-1]
A_p0_p0[-1,2:-1] += +0.5*dt*gamma_ast/lambda_ast*Hor*diagonals_h1[1][2:-1]
A_p1_p0[-1,2:-1] += +0.5*dt*gamma_ast/lambda_ast*Hor*diagonals_h1[2][2:-1]
row.append(indices[-1,2:-1].ravel());row.append(indices[-1,2:-1].ravel())
col.append(indices[-1,3: ].ravel());col.append(indices[-1,1:-2].ravel())
dat.append(A_p1_p0[-1,2:-1].ravel());dat.append(A_m1_p0[-1,2:-1].ravel())
# Now add the integral component (note this is somewhat denser than other components)
j = nxi-1
for k in range(j+1):
c = 0.5*dxi if (k==0 or k==j) else dxi
row.append(indices[j,2:-1])
col.append(indices[k,1:-2])
dat.append(-0.5*dt*gamma_ast*c*(1-XI[k,2:-1])*Hor*diagonals_h2[0][2:-1])
row.append(indices[j,2:-1])
col.append(indices[k,2:-1])
dat.append(-0.5*dt*gamma_ast*c*(1-XI[k,2:-1])*Hor*diagonals_h2[1][2:-1])
row.append(indices[j,2:-1])
col.append(indices[k,3: ])
dat.append(-0.5*dt*gamma_ast*c*(1-XI[k,2:-1])*Hor*diagonals_h2[2][2:-1])
# When r=0 we want to enforce d/dr(delta_Phi+Phi_n_new) = 0
# ==> stencil _3_,-4,1 for second order
A_r0p1_p0 = np.empty(nxi)
A_r0p2_p0 = np.empty(nxi)
A_p0_p0[:,0] = 3.0
A_r0p1_p0[:] = -4.0
A_r0p2_p0[:] = 1.0
row.append(indices[:,0]);row.append(indices[:,0])
col.append(indices[:,1]);col.append(indices[:,2])
dat.append(A_r0p1_p0); dat.append(A_r0p2_p0)
if False:
# Coud implement optional higher order stencil for r=0 here, but not really needed...
pass
if self._use_artificial_dr_bc:
# When r=dr we also enforce Phi(dr)=Phi(0)+0.5*dr^2*Phi''(0) (to smooth things out a bit here)
# ==> stencil -4,_7_,-4,1 (derived from a forward 2nd order stencil for Phi''(0))
A_r1m1_p0 = np.empty(nxi)
A_r1p1_p0 = np.empty(nxi)
A_r1p2_p0 = np.empty(nxi)
A_r1m1_p0[:] = -4.0
A_p0_p0[:,1] = 7.0
A_r1p1_p0[:] = -4.0
A_r1p2_p0[:] = 1.0
row.append(indices[:,1]);row.append(indices[:,1]);row.append(indices[:,1])
col.append(indices[:,0]);col.append(indices[:,2]);col.append(indices[:,3])
dat.append(A_r1m1_p0); dat.append(A_r1p1_p0); dat.append(A_r1p2_p0)
# Final constructions
A_11 = coo_matrix((np.concatenate(dat),(np.concatenate(row),np.concatenate(col))),shape=(nr*nxi,nr*nxi))
# done
return A_11
def _Phi_n_equation_LHS2(self,v_new,dt=None):
"""
Calculate the LHS matrix block corresponding to the
g_s dependence in the Phi_n equation.
(Here Phi_n = int_0^{h xi} phi_n dz, the input v_new must contain Phi_n rather than phi_n)
"""
# Note: there is no dependence on g_s
return None
def _Phi_n_equation_LHS3(self,v_new,dt=None):
"""
Calculate the LHS matrix block corresponding to the
g_b dependence in the Phi_n equation.
(Here Phi_n = int_0^{h xi} phi_n dz, the input v_new must contain Phi_n rather than phi_n)
"""
nr,nxi = len(self._r),len(self._xi)
XI = self._XI
dxi = XI[1,1]
Psi_m = self._Psi_m
h_new,Phi_n_new,g_s_new,g_b_new = v_new
if dt is None:
dt = self._dt
H = (h_new>self._h_ast) # Use this to zero out bits where h is too small...
# Note: This block has a rectangular shape
r_i,xi_i = np.meshgrid(np.arange(nr),np.arange(nxi))
indices = r_i+nr*xi_i
row,col,dat = [],[],[]
# First add the component coming from the forcing term
forcing_term = -0.5*dt*Phi_n_new*H
row.append(indices[1:-1,1:-1].ravel())
col.append(r_i[1:-1,1:-1].ravel())
dat.append(forcing_term[1:-1,1:-1].ravel())
if self._use_artificial_dr_bc:
#forcing_term[:,1] = 0 # Need to modify this in conjunction with the 'artificial' BC at r=dr
row[-1] = indices[1:-1,2:-1].ravel()
col[-1] = r_i[1:-1,2:-1].ravel()
dat[-1] = forcing_term[1:-1,2:-1].ravel()
# Now add the component coming from the vertical advection term
# Note: the following term (from the vertical advection) excludes xi=0 and xi=1 entries
xi_adv_term = 0.25*dt/dxi*(1+Psi_m)*(Phi_n_new[1:-1,:]-XI[1:-1,:]*(Phi_n_new[-1,:])[np.newaxis,:])\
*(Phi_n_new[2:,:]-Phi_n_new[:-2,:])*(H/h_new)[np.newaxis,:]
row.append(indices[1:-1,1:-1].ravel())
col.append(r_i[1:-1,1:-1].ravel())
dat.append(xi_adv_term[:,1:-1].ravel())
if self._use_artificial_dr_bc:
#xi_adv_term[:,1] = 0 # Need to modify this in conjunction with the 'artificial' BC at r=dr
row[-1] = indices[1:-1,2:-1].ravel()
col[-1] = r_i[1:-1,2:-1].ravel()
dat[-1] = xi_adv_term[:,2:-1].ravel()
if not self._add_top_Phi_bc:
# Add forcing on top row...
row.append(indices[-1,1:-1].ravel())
col.append(r_i[-1,1:-1].ravel())
dat.append(forcing_term[-1,1:-1].ravel())
# done, construct and return
return coo_matrix((np.concatenate(dat),(np.concatenate(row),np.concatenate(col))),shape=(nr*nxi,nr))#.tocsr()
def _g_s_equation_RHS(self,v_old,v_new,dt=None):
"""
Calculate the RHS vector component corresponding to the g_s equation.
"""
r = self._r
nr = len(r)
dr = r[1]
r_half = self._r_half
h_ast = self._h_ast
D = self._D
Q_s = self._Q_s
h_old,phi_n_old,g_s_old,g_b_old = v_old
h_new,phi_n_new,g_s_new,g_b_new = v_new
if dt is None:
dt = self._dt
# Calculate spatial stencil and add to the interior of the rhs vector
rhs = | np.empty(nr) | numpy.empty |
"""Miscellaneous internal PyJanitor helper functions."""
import functools
import os
import sys
import warnings
from typing import Callable, Dict, List, Union
import numpy as np
import pandas as pd
from .errors import JanitorError
def check(varname: str, value, expected_types: list):
"""
One-liner syntactic sugar for checking types.
Should be used like this::
check('x', x, [int, float])
:param varname: The name of the variable.
:param value: The value of the varname.
:param expected_types: The types we expect the item to be.
:returns: TypeError if data is not the expected type.
"""
is_expected_type = False
for t in expected_types:
if isinstance(value, t):
is_expected_type = True
break
if not is_expected_type:
raise TypeError(
"{varname} should be one of {expected_types}".format(
varname=varname, expected_types=expected_types
)
)
def _clean_accounting_column(x: str) -> float:
"""
Perform the logic for the `cleaning_style == "accounting"` attribute.
This is a private function, not intended to be used outside of
``currency_column_to_numeric``.
It is intended to be used in a pandas `apply` method.
:returns: An object with a cleaned column.
"""
y = x.strip()
y = y.replace(",", "")
y = y.replace(")", "")
y = y.replace("(", "-")
if y == "-":
return 0.00
return float(y)
def _currency_column_to_numeric(x, cast_non_numeric=None) -> str:
"""
Perform logic for changing cell values.
This is a private function intended to be used only in
``currency_column_to_numeric``.
It is intended to be used in a pandas `apply` method, after being passed
through `partial`.
"""
acceptable_currency_characters = {
"-",
".",
"1",
"2",
"3",
"4",
"5",
"6",
"7",
"8",
"9",
"0",
}
if len(x) == 0:
return "ORIGINAL_NA"
if cast_non_numeric:
if x in cast_non_numeric.keys():
check(
"{%r: %r}" % (x, str(cast_non_numeric[x])),
cast_non_numeric[x],
[int, float],
)
return cast_non_numeric[x]
else:
return "".join(i for i in x if i in acceptable_currency_characters)
else:
return "".join(i for i in x if i in acceptable_currency_characters)
def _replace_empty_string_with_none(column_series):
column_series.loc[column_series == ""] = None
return column_series
def _replace_original_empty_string_with_none(column_series):
column_series.loc[column_series == "ORIGINAL_NA"] = None
return column_series
def _strip_underscores(
df: pd.DataFrame, strip_underscores: Union[str, bool] = None
) -> pd.DataFrame:
"""
Strip underscores from DataFrames column names.
Underscores can be stripped from the beginning, end or both.
.. code-block:: python
df = _strip_underscores(df, strip_underscores='left')
:param df: The pandas DataFrame object.
:param strip_underscores: (optional) Removes the outer underscores from all
column names. Default None keeps outer underscores. Values can be
either 'left', 'right' or 'both' or the respective shorthand 'l', 'r'
and True.
:returns: A pandas DataFrame with underscores removed.
"""
df = df.rename(
columns=lambda x: _strip_underscores_func(x, strip_underscores)
)
return df
def _strip_underscores_func(
col: str, strip_underscores: Union[str, bool] = None
) -> pd.DataFrame:
"""Strip underscores from a string."""
underscore_options = [None, "left", "right", "both", "l", "r", True]
if strip_underscores not in underscore_options:
raise JanitorError(
f"strip_underscores must be one of: {underscore_options}"
)
if strip_underscores in ["left", "l"]:
col = col.lstrip("_")
elif strip_underscores in ["right", "r"]:
col = col.rstrip("_")
elif strip_underscores == "both" or strip_underscores is True:
col = col.strip("_")
return col
def import_message(
submodule: str,
package: str,
conda_channel: str = None,
pip_install: bool = False,
):
"""
Return warning if package is not found.
Generic message for indicating to the user when a function relies on an
optional module / package that is not currently installed. Includes
installation instructions. Used in `chemistry.py` and `biology.py`.
:param submodule: pyjanitor submodule that needs an external dependency.
:param package: External package this submodule relies on.
:param conda_channel: Conda channel package can be installed from,
if at all.
:param pip_install: Whether package can be installed via pip.
"""
is_conda = os.path.exists(os.path.join(sys.prefix, "conda-meta"))
installable = True
if is_conda:
if conda_channel is None:
installable = False
installation = f"{package} cannot be installed via conda"
else:
installation = f"conda install -c {conda_channel} {package}"
else:
if pip_install:
installation = f"pip install {package}"
else:
installable = False
installation = f"{package} cannot be installed via pip"
print(
f"To use the janitor submodule {submodule}, you need to install "
f"{package}."
)
print()
if installable:
print("To do so, use the following command:")
print()
print(f" {installation}")
else:
print(f"{installation}")
def idempotent(func: Callable, df: pd.DataFrame, *args, **kwargs):
"""
Raises error if a function operating on a `DataFrame` is not idempotent,
that is, `func(func(df)) = func(df)` is not true for all `df`.
:param func: A python method.
:param df: A pandas `DataFrame`.
:param args: Positional arguments supplied to the method.
:param kwargs: Keyword arguments supplied to the method.
:raises ValueError: If `func` is found to not be idempotent for the given
`DataFrame` `df`.
"""
if not func(df, *args, **kwargs) == func(
func(df, *args, **kwargs), *args, **kwargs
):
raise ValueError(
"Supplied function is not idempotent for the given " "DataFrame."
)
def deprecated_alias(**aliases) -> Callable:
"""
Used as a decorator when deprecating old function argument names, while
keeping backwards compatibility.
Implementation is inspired from `StackOverflow`_.
.. _StackOverflow: https://stackoverflow.com/questions/49802412/how-to-implement-deprecation-in-python-with-argument-alias
Functional usage example:
.. code-block:: python
@deprecated_alias(a='alpha', b='beta')
def simple_sum(alpha, beta):
return alpha + beta
:param aliases: Dictionary of aliases for a function's arguments.
:return: Your original function wrapped with the kwarg redirection
function.
""" # noqa: E501
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
rename_kwargs(func.__name__, kwargs, aliases)
return func(*args, **kwargs)
return wrapper
return decorator
def refactored_function(message: str) -> Callable:
"""Used as a decorator when refactoring functions
Implementation is inspired from `Hacker Noon`_.
.. Hacker Noon: https://hackernoon.com/why-refactoring-how-to-restructure-python-package-51b89aa91987
Functional usage example:
.. code-block:: python
@refactored_function(
message="simple_sum() has been refactored. Use hard_sum() instead."
)
def simple_sum(alpha, beta):
return alpha + beta
:param message: Message to use in warning user about refactoring.
:return: Your original function wrapped with the kwarg redirection
function.
""" # noqa: E501
def decorator(func):
def emit_warning(*args, **kwargs):
warnings.warn(message, FutureWarning)
return func(*args, **kwargs)
return emit_warning
return decorator
def rename_kwargs(func_name: str, kwargs: Dict, aliases: Dict):
"""
Used to update deprecated argument names with new names. Throws a
TypeError if both arguments are provided, and warns if old alias is used.
Implementation is inspired from `StackOverflow`_.
.. _StackOverflow: https://stackoverflow.com/questions/49802412/how-to-implement-deprecation-in-python-with-argument-alias
:param func_name: name of decorated function.
:param kwargs: Arguments supplied to the method.
:param aliases: Dictionary of aliases for a function's arguments.
:return: Nothing; the passed `kwargs` are modified directly.
""" # noqa: E501
for old_alias, new_alias in aliases.items():
if old_alias in kwargs:
if new_alias in kwargs:
raise TypeError(
f"{func_name} received both {old_alias} and {new_alias}"
)
warnings.warn(
f"{old_alias} is deprecated; use {new_alias}",
DeprecationWarning,
)
kwargs[new_alias] = kwargs.pop(old_alias)
def check_column(
df: pd.DataFrame, old_column_names: List, present: bool = True
):
"""
One-liner syntactic sugar for checking the presence or absence of a column.
Should be used like this::
check(df, ['a', 'b'], present=True)
:param df: The name of the variable.
:param old_column_names: A list of column names we want to check to see if
present (or absent) in df.
:param present: If True (default), checks to see if all of old_column_names
are in df.columns. If False, checks that none of old_column_names are
in df.columns.
:returns: ValueError if data is not the expected type.
"""
for column_name in old_column_names:
if present:
if column_name not in df.columns:
raise ValueError(
f"{column_name} not present in dataframe columns!"
)
else: # Tests for exclusion
if column_name in df.columns:
raise ValueError(
f"{column_name} already present in dataframe columns!"
)
def skipna(f: Callable) -> Callable:
"""
Decorator for escaping np.nan and None in a function
Should be used like this::
df[column].apply(skipna(transform))
or::
@skipna
def transform(x):
pass
:param f: the function to be wrapped
:returns: _wrapped, the wrapped function
"""
def _wrapped(x, *args, **kwargs):
if (type(x) is float and np.isnan(x)) or x is None:
return np.nan
else:
return f(x, *args, **kwargs)
return _wrapped
def skiperror(
f: Callable, return_x: bool = False, return_val=np.nan
) -> Callable:
"""
Decorator for escaping errors in a function
Should be used like this::
df[column].apply(
skiperror(transform, return_val=3, return_x=False))
or::
@skiperror(return_val=3, return_x=False)
def transform(x):
pass
:param f: the function to be wrapped
:param return_x: whether or not the original value that caused error
should be returned
:param return_val: the value to be returned when an error hits.
Ignored if return_x is True
:returns: _wrapped, the wrapped function
"""
def _wrapped(x, *args, **kwargs):
try:
return f(x, *args, **kwargs)
except Exception:
if return_x:
return x
return return_val
return _wrapped
def _check_instance(entry: Dict):
"""
Function to check instances in the expand_grid function.
This checks if entry is a dictionary,
checks the instance of value in key:value pairs in entry,
and makes changes to other types as deemed necessary.
Additionally, type-specific errors are raised
if unsupported data types are passed in as values
in the entry dictionary.
How each type is handled, and their associated exceptions,
are pretty clear from the code.
"""
# dictionary should not be empty
if not entry:
raise ValueError("passed dictionary cannot be empty")
# If it is a NoneType, number, Boolean, or string,
# then wrap in a list
entry = {
key: [value]
if isinstance(value, (type(None), int, float, bool, str))
else value
for key, value in entry.items()
}
# Convert to list if value is a set|tuple|range
entry = {
key: list(value) if isinstance(value, (set, tuple, range)) else value
for key, value in entry.items()
}
# collect dataframes here
dfs = []
# collect non dataframes here, proper dicts
dicts = {}
for key, value in entry.items():
# exclude dicts:
if isinstance(value, dict):
raise TypeError("Nested dictionaries are not allowed")
# process arrays
if isinstance(value, np.ndarray):
if value.size == 0:
raise ValueError("array cannot be empty")
if value.ndim == 1:
dfs.append(pd.DataFrame(value, columns=[key]))
elif value.ndim == 2:
dfs.append(pd.DataFrame(value).add_prefix(f"{key}_"))
else:
raise TypeError(
"`expand_grid` works with only vector and matrix arrays"
)
# process series
if isinstance(value, pd.Series):
if value.empty:
raise ValueError("passed Series cannot be empty")
if not isinstance(value.index, pd.MultiIndex):
# this section checks if the Series has a name or not
# and uses that information to create a new column name
# for the resulting Dataframe
if value.name:
value = value.to_frame(name=f"{key}_{value.name}")
dfs.append(value)
else:
value = value.to_frame(name=f"{key}")
dfs.append(value)
else:
raise TypeError(
"`expand_grid` does not work with pd.MultiIndex"
)
# process dataframe
if isinstance(value, pd.DataFrame):
if value.empty:
raise ValueError("passed DataFrame cannot be empty")
if not (
isinstance(value.index, pd.MultiIndex)
or isinstance(value.columns, pd.MultiIndex)
):
# add key to dataframe columns
value = value.add_prefix(f"{key}_")
dfs.append(value)
else:
raise TypeError(
"`expand_grid` does not work with pd.MultiIndex"
)
# process lists
if isinstance(value, list):
if not value:
raise ValueError("passed Sequence cannot be empty")
if np.array(value).ndim == 1:
checklist = (type(None), str, int, float, bool)
instance_check_type = (
isinstance(internal, checklist) for internal in value
)
if all(instance_check_type):
dicts.update({key: value})
else:
raise ValueError("values in iterable must be scalar")
elif np.array(value).ndim == 2:
value = pd.DataFrame(value).add_prefix(f"{key}_")
dfs.append(value)
else:
raise ValueError("Sequence's dimension should be 1d or 2d")
return dfs, dicts
def _grid_computation_dict(dicts: Dict) -> pd.DataFrame:
"""
Function used within the expand_grid function,
to compute dataframe from values that are not dataframes/arrays/series.
These values are collected into a dictionary,
and processed with numpy meshgrid.
Numpy's meshgrid is faster than itertools' product -
the speed difference shows up,
as the size of the input dictionary increases,
and is also fast when converting to a dataframe.
"""
# if there is only name value pair in the dictionary
if len(dicts) == 1:
final = pd.DataFrame(dicts)
# if there are more than one name value pair
else:
# extract value from each key:value pair
# in the dicts dictionary
extracted_data = [value for key, value in dicts.items()]
# create the cartesian product of the extracted data
res = np.meshgrid(*extracted_data)
# get sorter array from the first entry in the res list
# this way, we can safely sort all
# the arrays by the first array
sorter = np.argsort(res[0].ravel())
# flatten each array and sort with the sorter
res = [entry.ravel()[sorter] for entry in res]
# pair each array with the appropriate dictionary key
res = dict(zip(dicts, res))
# create dataframe
final = pd.DataFrame(res)
return final
def _grid_computation_list(frames: List) -> pd.DataFrame:
"""
Compute the cartesian product of two Dataframes.
Used by the expand_grid function.
Numpy is employed here, to get faster computations,
compared to running a many-to-many join with pandas merge.
This process also ensures the data types are preserved.
Initially used Structured arrays; however, I noticed slow speeds
when transitioning into dataframes, especially for mixed data types
"""
# get the product of all the lengths of the dataframes
length = | np.prod([ent.index.size for ent in frames]) | numpy.prod |
from ROAR.control_module.controller import Controller
from ROAR.utilities_module.vehicle_models import VehicleControl, Vehicle
from ROAR.utilities_module.data_structures_models import Transform, Location
import numpy as np
import logging
from ROAR.agent_module.agent import Agent
from typing import Tuple
import json
from pathlib import Path
import cvxpy as cp
import scipy
import scipy.signal
import scipy.linalg
class MPCController(Controller):
def __init__(self, agent, steering_boundary: Tuple[float, float],
throttle_boundary: Tuple[float, float], **kwargs):
super().__init__(agent, **kwargs)
self.max_speed = self.agent.agent_settings.max_speed
self.throttle_boundary = throttle_boundary
self.steering_boundary = steering_boundary
self.config = json.load(
Path(agent.agent_settings.mpc_config_file_path).open(mode='r'))
self.controller = FullMPCController(agent=agent,
throttle_boundary=throttle_boundary,
steering_boundary=steering_boundary,
max_speed=self.max_speed,
config=self.config)
self.logger = logging.getLogger(__name__)
def run_in_series(self, next_waypoint: Transform, **kwargs) -> VehicleControl:
long_control, lat_control = self.controller.run_in_series(next_waypoint=next_waypoint,
target_speed=kwargs.get("target_speed", self.max_speed))
long_control = float(np.clip(long_control, *self.throttle_boundary))
lat_control = float(np.clip(lat_control, *self.steering_boundary))
return VehicleControl(throttle=long_control, steering=lat_control)
class FullMPCController(Controller):
def __init__(self, agent, config: dict,
throttle_boundary: Tuple[float, float],
steering_boundary: Tuple[float, float],
max_speed: float,
dt: float = 0.03, **kwargs):
super().__init__(agent, **kwargs)
self.config = config
self.max_speed = max_speed
self.throttle_boundary = throttle_boundary
self.steering_boundary = steering_boundary
self._dt = dt
self.A_matrices, self.B_matrices = self.construct_linearized_matrices(max_speed)
self.last_steer_CMD = 0
def get_throttle_CMD(self, Fr_x, vx):
"""Calculates the motor input command
Calculates the motor input command based on the optimal rear tire longitudinal force
given by solving the CVXPY problem. The optimal rear tire longitudinal force is then
used with the longitudinal dynamics model to solve for the actual motor input command.
Args:
Fr_x: Optimal rear tire longitudinal force
vx: Current longitudinal velocity
Returns:
Motor input command
"""
return (Fr_x + self.config['F_friction'] + self.config['C_d'] * vx**2) / self.config['b_motor']
def get_steer_CMD(self, Ff_y, beta, r, vx):
"""Calculates the steering input command
Calculates the steering input command based on the optimal front tire lateral force
given by solving the CVXPY problem. The optimal front tire lateral force is then
used with the lateral dynamics model to solve for the actual steering input command.
Args:
Ff_y: Optimal front tire lateral force
beta: Current side slip angle of vehicle
r: Current angular velocity
vx: Current longitudinal velocity
Returns:
steer_cmd
"""
# Makes sure the argument to the arcsin function on the following line is valid
arcsin_arg = np.clip(Ff_y / (-self.config['mu'] * self.config['Ff_z']), -1, 1)
alpha_f = np.tan(np.arcsin(arcsin_arg) / self.config['C']) / self.config['B']
steer_angle = np.arctan(beta + ((r * self.config['Lf']) / (vx + 10e-1))) - alpha_f
steer_cmd = steer_angle / self.config['max_angle']
self.last_steer_CMD = np.abs(steer_cmd)
return steer_cmd
def linearize_around_steer_angle(self, steer_angle_eq, speed_eq):
"""Calculates linearized state space equations
Linearizes and discretizes the state space equations of the vehicle dynamics model
around a given equilibrium steering angle and equilibrium speed.
Args:
steer_angle_eq: Equilibrium steering angle to linearize around
speed_eq: Equilibrium vehicle speed to linearize around
Returns:
Ad: The linearized and discretized A matrix in the state space model
Bd: The linearized and discretized B matrix in the state space model
"""
# Linearize system state equations around a steering angle and 100km/hr
beta_eq = np.arctan((self.config['Lr'] / self.config['wheelbase']) * np.tan(steer_angle_eq))
vx_eq = speed_eq * np.cos(beta_eq)
r_eq = (speed_eq / self.config['Lr']) * np.sin(beta_eq)
alpha_f = np.arctan(beta_eq + (r_eq * self.config['Lf']) / vx_eq) - steer_angle_eq
Ff_y_eq = -self.config['mu'] * self.config['Ff_z'] * np.sin(self.config['C'] * np.arctan(self.config['B'] * alpha_f))
Fr_y_eq = (self.config['Lf'] * Ff_y_eq * np.cos(steer_angle_eq)) / self.config['Lr']
# Find partial derivative entries for A and B matrices
a_13 = -(Fr_y_eq + Ff_y_eq * np.cos(steer_angle_eq)) / (self.config['mass'] * vx_eq)
a_31 = -vx_eq * r_eq
# Below is a more complex a_13 term that comes from Gonzales dissertation, found to not be needed but may be useful for improving performance
# a_31 = vx_eq * r_eq \
# + ((Ff_y_eq * np.cos(steer_angle_eq)) / mass) \
# * (1 /(1 + (beta_eq + ((r_eq * Lf) / vx_eq))**2))
Ac = np.array([
[0, -1, a_13],
[0, 0, 0,],
[a_31, 0, 0]])
b_11 = np.cos(steer_angle_eq) / (self.config['mass'] * vx_eq)
b_21 = np.cos(steer_angle_eq) * self.config['Lf'] / self.config['Izz']
b_31 = -np.sin(steer_angle_eq) / self.config['mass']
Bc = np.array([
[b_11, 0],
[b_21, 0],
[b_31, 1/self.config['mass']]])
# C and D are just for calling cont2discrete
Cc = | np.zeros((3, 3)) | numpy.zeros |
# Code by <NAME>
# Edited by <NAME> 07/2015
from builtins import zip
from builtins import range
import numpy as np
from cosmosis.datablock import names as section_names
from cosmosis.datablock import option_section
import scipy.interpolate
import warnings
def gaussian(z, mu, sigma):
g = | np.exp(-0.5 * (z - mu)**2 / sigma**2) | numpy.exp |
"""Functions for building the face recognition network.
"""
# MIT License
#
# Copyright (c) 2016 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# pylint: disable=missing-docstring
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow as tf
from scipy import misc
import matplotlib.pyplot as plt
import sklearn
if sklearn.__version__ < '0.20':
from sklearn.cross_validation import KFold ## < sklearn 0.20
else:
from sklearn.model_selection import KFold ## > sklearn 0.20
from scipy import interpolate
import random
import re
from collections import Counter
import matplotlib.pyplot as plt
import cv2
import python_getdents
from scipy import spatial
import sys
import numpy as np
import pandas
from scipy import misc
#### libs of DavaideSanderburg ####
sys.path.insert(0, '../lib/facenet/src')
#import facenet
import glob
def label_mapping(label_list_src, EXPRSSIONS_TYPE_src, EXPRSSIONS_TYPE_trg):
labels_mapping = []
idx_label_notexist = []
for i, label in enumerate(label_list_src):
expre_src = str.split(EXPRSSIONS_TYPE_src[label], '=')[1]
expre_trg = [x for x in EXPRSSIONS_TYPE_trg if expre_src in x]
if expre_trg == []:
label_trg = -1
idx_label_notexist.append(i)
else:
label_trg = int(str.split(expre_trg[0], '=')[0])
labels_mapping.append(label_trg)
return idx_label_notexist, labels_mapping
def gather(data, label):
i = 0
if data.ndim == 1:
data_batch = np.zeros(len(label))
for idx in label:
data_batch[i] = data[idx]
i += 1
if data.ndim == 2:
data_batch = np.zeros([len(label), np.shape(data)[1]])
for idx in label:
data_batch[i, :] = data[idx, :]
i += 1
if data.ndim > 2:
print('The data of dimension should be less than 3!\n')
assert (data.ndim < 3)
return data_batch
# def scatter(data, index):
# return data_sactter
def generate_labels_id(subs):
subjects = list(set(subs))
subjects = np.sort(subjects)
labels_id = []
for sub in subs:
labels_id.append([idx for idx, subject in enumerate(subjects) if sub == subject][0])
return labels_id
def get_image_paths_and_labels_hand(images_path, labelfile, nfold, ifold):
image_paths = []
labels = []
idx_train_all = []
idx_test_all = []
image_paths_final = []
labels_final = []
image_paths_final_test = []
labels_final_test = []
datal = pandas.read_excel(labelfile)
labels_all = datal['PersonID'].values
labels_frm = datal['Frame'].values
labels_frm_list = labels_frm.tolist()
labels_all_list = labels_all.tolist()
image_paths = glob.glob(os.path.join(images_path, '*.png'))
image_paths.sort()
for imgfile in image_paths:
strtmp = str.split(imgfile,'/')[-1]
strtmp = str.split(strtmp, '_')[0]
framenum = int(strtmp[5:])
idx = labels_frm_list.index(framenum)
labels.append(labels_all_list[idx])
# folds = KFold(n=len(labels_flat), n_folds=nrof_folds, shuffle=True)
if sklearn.__version__ < '0.20':
folds = KFold(n=len(labels), n_folds=10, shuffle=True) ## Before the version of sklearn 0.20
else:
kf = KFold(n_splits=nfold, shuffle=True) ## After the version of sklearn 0.20
i = 0
if sklearn.__version__ < '0.20':
for idx_train, idx_test in folds: ## Before sklearn 0.20
idx_train_all.append([])
idx_train_all[i].append(idx_train)
idx_test_all.append([])
idx_test_all[i].append(idx_test)
# print('train:', idx_train, 'test', idx_test)
i += 1
else:
for idx_train, idx_test in kf.split(labels): ## After skleran 0.20
idx_train_all.append([])
idx_train_all[i].append(idx_train)
idx_test_all.append([])
idx_test_all[i].append(idx_test)
#print('train:', idx_train, 'test', idx_test)
i += 1
idx_train = idx_train_all[ifold][0]
idx_test = idx_test_all[ifold][0]
for idx in idx_train:
#idx_train.append(idx)
image_paths_final.append(image_paths[idx])
labels_final.append(labels[idx])
for idx in idx_test:
#idx_test.append(idx)
image_paths_final_test.append(image_paths[idx])
labels_final_test.append(labels[idx])
nrof_classes = len(set(labels_final))
nrof_classes_test = len(set(labels_final_test))
return image_paths_final, labels_final, nrof_classes, image_paths_final_test, labels_final_test, nrof_classes_test
def get_image_paths_and_labels_headcap(images_path, usage, nfold, ifold):
image_paths = []
labels = []
idx_train_all = []
idx_test_all = []
image_paths_final = []
labels_final = []
folders = os.listdir(images_path)
folders.sort()
for fold in folders:
if not os.path.isdir(os.path.join(images_path, fold)):
continue
img_path_folder = glob.glob(os.path.join(images_path, fold, '*.png'))
img_path_folder.sort()
image_paths += img_path_folder
label_txt = glob.glob(os.path.join(images_path, fold, '*.txt'))[0]
with open(label_txt, 'r') as f:
for line in f.readlines():
line = line.replace('\r\n','\n')
#print ('%s %s'%(fold, line))
labels.append(int(line[-2:-1]))
# folds = KFold(n=len(labels_flat), n_folds=nrof_folds, shuffle=True)
if sklearn.__version__ < '0.20':
folds = KFold(n=len(labels), n_folds=10, shuffle=False) ## Before the version of sklearn 0.20
else:
kf = KFold(n_splits=nfold, shuffle=False) ## After the version of sklearn 0.20
i = 0
if sklearn.__version__ < '0.20':
for idx_train, idx_test in folds: ## Before sklearn 0.20
idx_train_all.append([])
idx_train_all[i].append(idx_train)
idx_test_all.append([])
idx_test_all[i].append(idx_test)
# print('train:', idx_train, 'test', idx_test)
i += 1
else:
for idx_train, idx_test in kf.split(labels): ## After skleran 0.20
idx_train_all.append([])
idx_train_all[i].append(idx_train)
idx_test_all.append([])
idx_test_all[i].append(idx_test)
#print('train:', idx_train, 'test', idx_test)
i += 1
idx_train = idx_train_all[ifold][0]
idx_test = idx_test_all[ifold][0]
if usage == 'Training':
for idx in idx_train:
#idx_train.append(idx)
image_paths_final.append(image_paths[idx])
labels_final.append(labels[idx])
if usage == 'Test':
for idx in idx_test:
#idx_test.append(idx)
image_paths_final.append(image_paths[idx])
labels_final.append(labels[idx])
nrof_classes = len(set(labels_final))
return image_paths_final, labels_final, usage, nrof_classes
def get_image_paths_and_labels_recog(dataset):
image_paths_flat = []
labels_flat = []
classes_flat = []
for i in range(len(dataset)):
image_paths_flat += dataset[i].image_paths
classes_flat += [dataset[i].name]
labels_flat += [i] * len(dataset[i].image_paths)
return image_paths_flat, labels_flat, classes_flat
def random_rotate_image(image):
# angle = np.random.uniform(low=-10.0, high=10.0)
angle = np.random.uniform(low=-180.0, high=180.0)
return misc.imrotate(image, angle, 'bicubic')
def prewhiten(x):
mean = np.mean(x)
std = np.std(x)
std_adj = np.maximum(std, 1.0 / np.sqrt(x.size))
y = np.multiply(np.subtract(x, mean), 1 / std_adj)
return y
def crop(image, random_crop, image_size):
if image.shape[1] > image_size:
sz1 = np.int(image.shape[1] // 2) ##python 3 // int division
sz2 = np.int(image_size // 2)
if random_crop:
diff = sz1 - sz2
(h, v) = (np.random.randint(-diff, diff + 1), np.random.randint(-diff, diff + 1))
else:
(h, v) = (0, 0)
image = image[(sz1 - sz2 + v):(sz1 + sz2 + v), (sz1 - sz2 + h):(sz1 + sz2 + h), :]
return image
def flip(image, random_flip):
if random_flip and np.random.choice([True, False]):
image = np.fliplr(image)
return image
def to_rgb(img):
w, h = img.shape
ret = np.empty((w, h, 3), dtype=np.uint8)
ret[:, :, 0] = ret[:, :, 1] = ret[:, :, 2] = img
return ret
def load_data_test(image_paths, do_random_crop, do_random_flip, image_size, do_prewhiten=True):
nrof_samples = len(image_paths)
images = np.zeros((nrof_samples, image_size, image_size, 3))
for i in range(nrof_samples):
img = misc.imread(image_paths[i])
img = cv2.resize(img, (image_size, image_size))
if img.ndim == 2:
img = to_rgb(img)
if do_prewhiten:
img = prewhiten(img)
img = cv2.resize(img, (image_size, image_size))
##img = crop(img, do_random_crop, image_size)
img = flip(img, do_random_flip)
images[i, :, :, :] = img
return images
def load_data_mega(image_paths, do_random_crop, do_random_flip, do_resize, image_size, BBox, do_prewhiten=True):
nrof_samples = len(image_paths)
images = np.zeros((nrof_samples, image_size, image_size, 3))
for i in range(nrof_samples):
image = misc.imread(image_paths[i])
BBox = BBox.astype(int)
img = image[BBox[i, 0]:BBox[i, 0] + BBox[i, 2], BBox[i, 1]:BBox[i, 1] + BBox[i, 3], :]
if img.ndim == 2:
img = to_rgb(img)
if do_prewhiten:
img = prewhiten(img)
if do_resize:
img = cv2.resize(img, (image_size, image_size), interpolation=cv2.INTER_NEAREST)
img = crop(img, do_random_crop, image_size)
img = flip(img, do_random_flip)
images[i, :, :, :] = img
return images
def get_learning_rate_from_file(filename, epoch):
with open(filename, 'r') as f:
for line in f.readlines():
line = line.split('#', 1)[0]
if line:
par = line.strip().split(':')
e = int(par[0])
lr = float(par[1])
if e <= epoch:
learning_rate = lr
# else:
# return learning_rate
return learning_rate
def get_dataset(paths):
dataset = []
for path in paths.split(':'):
path_exp = os.path.expanduser(path)
classes = os.listdir(path_exp)
classes.sort()
nrof_classes = len(classes)
for i in range(nrof_classes):
class_name = classes[i]
facedir = os.path.join(path_exp, class_name)
if os.path.isdir(facedir):
images = os.listdir(facedir)
image_paths = [os.path.join(facedir, img) for img in images]
dataset.append(ImageClass(class_name, image_paths))
return dataset
def get_huge_dataset(paths, start_n=0, end_n=-1):
dataset = []
classes = []
for path in paths.split(':'):
path_exp = os.path.expanduser(path)
for (d_ino, d_off, d_reclen, d_type, d_name) in python_getdents.getdents64(path_exp):
if d_name == '.' or d_name == '..':
continue
classes += [d_name]
classes.sort()
nrof_classes = len(classes)
if end_n == -1:
end_n = nrof_classes
if end_n > nrof_classes:
raise ValueError('Invalid end_n:%d more than nrof_class:%d' % (end_n, nrof_classes))
for i in range(start_n, end_n):
if (i % 1000 == 0):
print('reading identities: %d/%d\n' % (i, end_n))
class_name = classes[i]
facedir = os.path.join(path_exp, class_name)
if os.path.isdir(facedir):
images = os.listdir(facedir)
image_paths = [os.path.join(facedir, img) for img in images]
dataset.append(ImageClass(class_name, image_paths))
return dataset
class ImageClass():
"Stores the paths to images for a given class"
def __init__(self, name, image_paths):
self.name = name
self.image_paths = image_paths
def __str__(self):
return self.name + ', ' + str(len(self.image_paths)) + ' images'
def __len__(self):
return len(self.image_paths)
def get_model_filenames(model_dir):
files = os.listdir(model_dir)
meta_files = [s for s in files if s.endswith('.meta')]
if len(meta_files)==0:
raise ValueError('No meta file found in the model directory (%s)' % model_dir)
elif len(meta_files)>1:
raise ValueError('There should not be more than one meta file in the model directory (%s)' % model_dir)
meta_file = meta_files[0]
meta_files = [s for s in files if '.ckpt' in s]
max_step = -1
for f in files:
step_str = re.match(r'(^model-[\w\- ]+.ckpt-(\d+))', f)
if step_str is not None and len(step_str.groups())>=2:
step = int(step_str.groups()[1])
if step > max_step:
max_step = step
ckpt_file = step_str.groups()[0]
return meta_file, ckpt_file
def load_model(model_dir, meta_file, ckpt_file):
model_dir_exp = os.path.expanduser(model_dir)
saver = tf.train.import_meta_graph(os.path.join(model_dir_exp, meta_file))
saver.restore(tf.get_default_session(), os.path.join(model_dir_exp, ckpt_file))
def prewhiten(x):
mean = np.mean(x)
std = np.std(x)
std_adj = np.maximum(std, 1.0 / np.sqrt(x.size))
y = np.multiply(np.subtract(x, mean), 1 / std_adj)
return y
def crop(image, random_crop, image_size):
if min(image.shape[0], image.shape[1]) > image_size:
sz1 = image.shape[0] // 2
sz2 = image.shape[1] // 2
crop_size = image_size//2
diff_h = sz1 - crop_size
diff_v = sz2 - crop_size
(h, v) = (np.random.randint(-diff_h, diff_h + 1), np.random.randint(-diff_v, diff_v + 1))
image = image[(sz1+h-crop_size):(sz1+h+crop_size ), (sz2+v-crop_size):(sz2+v+crop_size ), :]
else:
print("Image size is small than crop image size!")
return image
# def crop(image, random_crop, image_size):
# ## Firstly crop the image as a square according to the y length of the input image
# if image.shape[1] > image_size:
# sz1 = image.shape[1] // 2
# sz2 = image_size // 2
# if random_crop:
# diff = sz1 - sz2
# (h, v) = (np.random.randint(-diff, diff + 1), np.random.randint(-diff, diff + 1))
# else:
# (h, v) = (0, 0)
# image = image[(sz1 - sz2 + v):(sz1 + sz2 + v), (sz1 - sz2 + h):(sz1 + sz2 + h), :]
# return image
def flip(image, random_flip):
if random_flip and np.random.choice([True, False]):
image = np.fliplr(image)
return image
def to_rgb(img):
w, h = img.shape
ret = np.empty((w, h, 3), dtype=np.uint8)
ret[:, :, 0] = ret[:, :, 1] = ret[:, :, 2] = img
return ret
def load_data(image_paths, do_random_crop, do_random_flip, image_size, do_prewhiten=True):
nrof_samples = len(image_paths)
images = np.zeros((nrof_samples, image_size, image_size, 3))
for i in range(nrof_samples):
img = misc.imread(image_paths[i])
if img.ndim == 2:
img = to_rgb(img)
if do_prewhiten:
img = prewhiten(img)
if do_random_crop:
img = crop(img, do_random_crop, image_size)
if do_random_flip:
img = flip(img, do_random_flip)
img = cv2.resize(img,(image_size,image_size))
images[i,:,:,:] = img
return images
def load_data_im(imgs, do_random_crop, do_random_flip, image_size, do_prewhiten=True):
# nrof_samples = len(image_paths)
if (len(imgs.shape) > 3):##RGB images
nrof_samples = imgs.shape[0]
elif (len(imgs.shape) == 3): ## one RGB
nrof_samples = 1
elif (len(imgs.shape) == 2): ## grey images
nrof_samples = imgs.shape[0]
elif (len(imgs.shape) == 1): ## one grey
nrof_samples = 1
else:
print('No images!')
return -1
images = np.zeros((nrof_samples, image_size, image_size, 3))
for i in range(nrof_samples):
# img = misc.imread(image_paths[i])
if len(imgs.shape) == 3 or len(imgs.shape) == 1:
img = imgs
else:
img = imgs[i]
if len(img):
if img.ndim == 2:
img = to_rgb(img)
if do_prewhiten:
img = prewhiten(img)
if do_random_crop:
img = crop(img, do_random_crop, image_size)
if do_random_flip:
img = flip(img, do_random_flip)
img = cv2.resize(img, (image_size, image_size))
images[i] = img
images = np.squeeze(images)
return images
def calculate_roc(thresholds, embeddings1, embeddings2, actual_issame, nrof_folds=10):
assert (embeddings1.shape[0] == embeddings2.shape[0])
assert (embeddings1.shape[1] == embeddings2.shape[1])
nrof_pairs = min(len(actual_issame), embeddings1.shape[0])
nrof_thresholds = len(thresholds)
folds = KFold(n=nrof_pairs, n_folds=nrof_folds, shuffle=False)
# folds = KFold(n=nrof_pairs, n_folds=nrof_folds, shuffle=True, seed=666)
tprs = np.zeros((nrof_folds, nrof_thresholds))
fprs = np.zeros((nrof_folds, nrof_thresholds))
accuracy = np.zeros((nrof_folds))
best_threshold = np.zeros((nrof_folds))
diff = np.subtract(embeddings1, embeddings2)
dist = np.sum(np.square(diff), 1)
for fold_idx, (train_set, test_set) in enumerate(folds):
# Find the best threshold for the fold
acc_train = np.zeros((nrof_thresholds))
for threshold_idx, threshold in enumerate(thresholds):
_, _, acc_train[threshold_idx], fp_idx, fn_idx = calculate_accuracy(threshold, dist[train_set],
actual_issame[train_set])
best_threshold_index = np.argmax(acc_train)
best_threshold[fold_idx] = thresholds[best_threshold_index]
for threshold_idx, threshold in enumerate(thresholds):
tprs[fold_idx, threshold_idx], fprs[fold_idx, threshold_idx], _, fp_idx, fn_idx = calculate_accuracy(
threshold, dist[test_set], actual_issame[test_set])
_, _, accuracy[fold_idx], fp_idx, fn_idx = calculate_accuracy(thresholds[best_threshold_index], dist[test_set],
actual_issame[test_set])
tpr = np.mean(tprs, 0)
fpr = np.mean(fprs, 0)
mean_best_threshold = np.mean(best_threshold)
# #### Global evaluation (not n-fold evaluation) for collecting the indices of the False positive/negative examples #####
_, _, acc_total, fp_idx, fn_idx = calculate_accuracy(mean_best_threshold, dist, actual_issame)
return tpr, fpr, accuracy, fp_idx, fn_idx, mean_best_threshold
def calculate_roc_cosine(thresholds, embeddings1, embeddings2, actual_issame, nrof_folds=10):
assert (embeddings1.shape[0] == embeddings2.shape[0])
assert (embeddings1.shape[1] == embeddings2.shape[1])
nrof_pairs = min(len(actual_issame), embeddings1.shape[0])
nrof_thresholds = len(thresholds)
folds = KFold(n=nrof_pairs, n_folds=nrof_folds, shuffle=False)
# folds = KFold(n=nrof_pairs, n_folds=nrof_folds, shuffle=True, seed=666)
tprs = np.zeros((nrof_folds, nrof_thresholds))
fprs = np.zeros((nrof_folds, nrof_thresholds))
accuracy = np.zeros((nrof_folds))
# diff = np.subtract(embeddings1, embeddings2) ###Eucldian l2 distance
# dist = np.sum(np.square(diff), 1)
dist_all = spatial.distance.cdist(embeddings1, embeddings2,
'cosine') ## cosine_distance = 1 - similarity; similarity=dot(u,v)/(||u||*||v||)
dist = dist_all.diagonal()
for fold_idx, (train_set, test_set) in enumerate(folds):
# Find the best threshold for the fold
acc_train = np.zeros((nrof_thresholds))
for threshold_idx, threshold in enumerate(thresholds):
_, _, acc_train[threshold_idx], fp_idx, fn_idx = calculate_accuracy(threshold, dist[train_set],
actual_issame[train_set])
best_threshold_index = np.argmax(acc_train)
for threshold_idx, threshold in enumerate(thresholds):
tprs[fold_idx, threshold_idx], fprs[fold_idx, threshold_idx], _, fp_idx, fn_idx = calculate_accuracy(
threshold,
dist[test_set],
actual_issame[
test_set])
_, _, accuracy[fold_idx], fp_idx, fn_idx = calculate_accuracy(thresholds[best_threshold_index], dist[test_set],
actual_issame[test_set])
tpr = np.mean(tprs, 0)
fpr = np.mean(fprs, 0)
best_threshold = thresholds[best_threshold_index]
# #### Global evaluation (not n-fold evaluation) for collecting the indices of the False positive/negative examples #####
_, _, acc_total, fp_idx, fn_idx = calculate_accuracy(best_threshold, dist, actual_issame)
return tpr, fpr, accuracy, fp_idx, fn_idx, best_threshold
def calculate_accuracy(threshold, dist, actual_issame):
predict_issame = np.less(dist, threshold)
tp = np.sum(np.logical_and(predict_issame, actual_issame))
fp = np.sum(np.logical_and(predict_issame, np.logical_not(actual_issame)))
tn = np.sum(np.logical_and(np.logical_not(predict_issame), np.logical_not(actual_issame)))
fn = np.sum(np.logical_and(np.logical_not(predict_issame), actual_issame))
tpr = 0 if (tp + fn == 0) else float(tp) / float(tp + fn)
fpr = 0 if (fp + tn == 0) else float(fp) / float(fp + tn)
acc = float(tp + tn) / dist.size
# #################################### Edit by mzh 11012017 ####################################
# #### save the false predict samples: the false posivite (fp) or the false negative(fn) #####
fp_idx = np.logical_and(predict_issame, np.logical_not(actual_issame))
fn_idx = np.logical_and(np.logical_not(predict_issame), actual_issame)
# #################################### Edit by mzh 11012017 ####################################
return tpr, fpr, acc, fp_idx, fn_idx
def plot_roc(fpr, tpr, label):
figure = plt.figure()
plt.plot(fpr, tpr, label=label)
plt.title('Receiver Operating Characteristics')
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.legend()
plt.plot([0, 1], [0, 1], 'g--')
plt.grid(True)
plt.show()
return figure
def calculate_val(thresholds, embeddings1, embeddings2, actual_issame, far_target, nrof_folds=10):
assert (embeddings1.shape[0] == embeddings2.shape[0])
assert (embeddings1.shape[1] == embeddings2.shape[1])
nrof_pairs = min(len(actual_issame), embeddings1.shape[0])
diff = np.subtract(embeddings1, embeddings2)
dist = np.sum(np.square(diff), 1)
nrof_thresholds = len(thresholds)
folds = KFold(n=nrof_pairs, n_folds=nrof_folds, shuffle=False)
# folds = KFold(n=nrof_pairs, n_folds=nrof_folds, shuffle=True, seed=666)
val = np.zeros(nrof_folds)
far = np.zeros(nrof_folds)
for fold_idx, (train_set, test_set) in enumerate(folds):
if nrof_thresholds > 1:
# Find the threshold that gives FAR = far_target
far_train = np.zeros(nrof_thresholds)
for threshold_idx, threshold in enumerate(thresholds):
_, far_train[threshold_idx] = calculate_val_far(threshold, dist[train_set], actual_issame[train_set])
if np.max(far_train) >= far_target:
f = interpolate.interp1d(far_train, thresholds, kind='slinear')
threshold = f(far_target)
else:
threshold = 0.0
else:
threshold = thresholds[0]
val[fold_idx], far[fold_idx] = calculate_val_far(threshold, dist[test_set], actual_issame[test_set])
val_mean = np.mean(val)
far_mean = np.mean(far)
val_std = np.std(val)
val[fold_idx], far[fold_idx] = calculate_val_far(threshold, dist[test_set], actual_issame[test_set])
return val_mean, val_std, far_mean, threshold
def calculate_val_cosine(thresholds, embeddings1, embeddings2, actual_issame, far_target, nrof_folds=10):
assert (embeddings1.shape[0] == embeddings2.shape[0])
assert (embeddings1.shape[1] == embeddings2.shape[1])
nrof_pairs = min(len(actual_issame), embeddings1.shape[0])
nrof_thresholds = len(thresholds)
folds = KFold(n=nrof_pairs, n_folds=nrof_folds, shuffle=False)
# folds = KFold(n=nrof_pairs, n_folds=nrof_folds, shuffle=True, seed=666)
val = np.zeros(nrof_folds)
far = np.zeros(nrof_folds)
# diff = np.subtract(embeddings1, embeddings2)
# dist = np.sum(np.square(diff), 1)
dist_all = spatial.distance.cdist(embeddings1, embeddings2,
'cosine') ## cosine_distance = 1 - similarity; similarity=dot(u,v)/(||u||*||v||)
dist = dist_all.diagonal()
for fold_idx, (train_set, test_set) in enumerate(folds):
# Find the threshold that gives FAR = far_target
far_train = np.zeros(nrof_thresholds)
for threshold_idx, threshold in enumerate(thresholds):
_, far_train[threshold_idx] = calculate_val_far(threshold, dist[train_set], actual_issame[train_set])
if np.max(far_train) >= far_target:
f = interpolate.interp1d(far_train, thresholds, kind='slinear')
threshold = f(far_target)
else:
threshold = 0.0
val[fold_idx], far[fold_idx] = calculate_val_far(threshold, dist[test_set], actual_issame[test_set])
val_mean = np.mean(val)
far_mean = np.mean(far)
val_std = np.std(val)
return val_mean, val_std, far_mean, threshold
def calculate_val_far(threshold, dist, actual_issame):
predict_issame = np.less(dist, threshold)
true_accept = np.sum(np.logical_and(predict_issame, actual_issame))
false_accept = np.sum(np.logical_and(predict_issame, | np.logical_not(actual_issame) | numpy.logical_not |
#!/usr/bin/env python
import rospy
from geometry_msgs.msg import Vector3
from sensor_msgs.msg import Imu
import matplotlib.pyplot as plt
import numpy as np
angle = Vector3
Initialized = False
init_angle = Vector3
# "p" is eular angle array
def rotM(angle):
px = angle.x - init_angle.x
py = angle.y - init_angle.y
pz = angle.z - init_angle.z
Rx = np.array([[1, 0, 0],
[0, np.cos(px), np.sin(px)],
[0, -np.sin(px), np.cos(px)]])
Ry = np.array([[np.cos(py), 0, -np.sin(py)],
[0, 1, 0],
[np.sin(py), 0, np.cos(py)]])
Rz = np.array([[np.cos(pz), np.sin(pz), 0],
[- | np.sin(pz) | numpy.sin |
"""
Paper: Session-Based Recommendation with Graph Neural Networks
Author: <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>
Reference: https://github.com/CRIPAC-DIG/SR-GNN
@author: <NAME>
"""
import numpy as np
from model.AbstractRecommender import SeqAbstractRecommender
from util import DataIterator
import tensorflow as tf
from util import pad_sequences
import math
class SRGNN(SeqAbstractRecommender):
def __init__(self, sess, dataset, config):
super(SRGNN, self).__init__(dataset, config)
self.lr = config["lr"]
self.L2 = config["L2"]
self.hidden_size = config["hidden_size"]
self.step = config["step"]
self.batch_size = config["batch_size"]
self.epochs = config["epochs"]
self.lr_dc = config["lr_dc"]
self.lr_dc_step = config["lr_dc_step"]
self.nonhybrid = config["nonhybrid"]
self.max_seq_len = config["max_seq_len"]
self.num_users, self.num_item = dataset.num_users, dataset.num_items
self.user_pos_train = dataset.get_user_train_dict(by_time=True)
self.train_seq = []
self.train_tar = []
for user, seqs in self.user_pos_train.items():
for i in range(1, len(seqs)):
self.train_seq.append(seqs[-i - self.max_seq_len:-i])
self.train_tar.append(seqs[-i])
self.sess = sess
def _create_variable(self):
self.mask_ph = tf.placeholder(dtype=tf.float32, shape=[self.batch_size, None])
self.alias_ph = tf.placeholder(dtype=tf.int32, shape=[self.batch_size, None]) # 给给每个输入重新
self.item_ph = tf.placeholder(dtype=tf.int32, shape=[self.batch_size, None]) # 原始ID+padID, pad 在后面
self.target_ph = tf.placeholder(dtype=tf.int32, shape=[self.batch_size])
self.adj_in_ph = tf.placeholder(dtype=tf.float32, shape=[self.batch_size, None, None])
self.adj_out_ph = tf.placeholder(dtype=tf.float32, shape=[self.batch_size, None, None])
stdv = 1.0 / math.sqrt(self.hidden_size)
w_init = tf.random_uniform_initializer(-stdv, stdv)
self.nasr_w1 = tf.get_variable('nasr_w1', [self.hidden_size, self.hidden_size], dtype=tf.float32,
initializer=w_init)
self.nasr_w2 = tf.get_variable('nasr_w2', [self.hidden_size, self.hidden_size], dtype=tf.float32,
initializer=w_init)
self.nasr_v = tf.get_variable('nasrv', [1, self.hidden_size], dtype=tf.float32, initializer=w_init)
self.nasr_b = tf.get_variable('nasr_b', [self.hidden_size], dtype=tf.float32,
initializer=tf.zeros_initializer())
embedding = tf.get_variable(shape=[self.num_item, self.hidden_size], name='embedding',
dtype=tf.float32, initializer=w_init)
zero_pad = tf.zeros([1, self.hidden_size], name="padding")
self.embedding = tf.concat([embedding, zero_pad], axis=0)
self.W_in = tf.get_variable('W_in', shape=[self.hidden_size, self.hidden_size], dtype=tf.float32,
initializer=w_init)
self.b_in = tf.get_variable('b_in', [self.hidden_size], dtype=tf.float32, initializer=w_init)
self.W_out = tf.get_variable('W_out', [self.hidden_size, self.hidden_size], dtype=tf.float32,
initializer=w_init)
self.b_out = tf.get_variable('b_out', [self.hidden_size], dtype=tf.float32, initializer=w_init)
self.B = tf.get_variable('B', [2 * self.hidden_size, self.hidden_size], initializer=w_init)
def ggnn(self):
fin_state = tf.nn.embedding_lookup(self.embedding, self.item_ph) # (b,l,d)
cell = tf.nn.rnn_cell.GRUCell(self.hidden_size)
with tf.variable_scope('gru'):
for i in range(self.step):
fin_state = tf.reshape(fin_state, [self.batch_size, -1, self.hidden_size]) # (b,l,d)
fin_state_tmp = tf.reshape(fin_state, [-1, self.hidden_size]) # (b*l,d)
fin_state_in = tf.reshape(tf.matmul(fin_state_tmp, self.W_in) + self.b_in,
[self.batch_size, -1, self.hidden_size]) # (b,l,d)
# fin_state_tmp = tf.reshape(fin_state, [-1, self.hidden_size]) # (b*l,d)
fin_state_out = tf.reshape(tf.matmul(fin_state_tmp, self.W_out) + self.b_out,
[self.batch_size, -1, self.hidden_size]) # (b,l,d)
av_in = tf.matmul(self.adj_in_ph, fin_state_in) # (b,l,d)
av_out = tf.matmul(self.adj_out_ph, fin_state_out) # (b,l,d)
av = tf.concat([av_in, av_out], axis=-1) # (b,l,2d)
av = tf.expand_dims(tf.reshape(av, [-1, 2 * self.hidden_size]), axis=1) # (b*l,1,2d)
# fin_state_tmp = tf.reshape(fin_state, [-1, self.hidden_size]) # (b*l,d)
state_output, fin_state = tf.nn.dynamic_rnn(cell, av, initial_state=fin_state_tmp)
return tf.reshape(fin_state, [self.batch_size, -1, self.hidden_size]) # (b,l,d)
def _session_embedding(self, re_embedding):
# re_embedding (b,l,d)
rm = tf.reduce_sum(self.mask_ph, 1) # (b,), length of each session
last_idx = tf.stack([tf.range(self.batch_size), tf.to_int32(rm) - 1], axis=1) # (b, 2) index of last item
last_id = tf.gather_nd(self.alias_ph, last_idx) # (b,) alias id of last item
last_h = tf.gather_nd(re_embedding, tf.stack([tf.range(self.batch_size), last_id], axis=1)) # (b,d) embedding of last item
seq_h = [tf.nn.embedding_lookup(re_embedding[i], self.alias_ph[i]) for i in range(self.batch_size)]
seq_h = tf.stack(seq_h, axis=0) # batch_size*T*d
last = tf.matmul(last_h, self.nasr_w1)
seq = tf.matmul(tf.reshape(seq_h, [-1, self.hidden_size]), self.nasr_w2)
last = tf.reshape(last, [self.batch_size, 1, -1])
m = tf.nn.sigmoid(last + tf.reshape(seq, [self.batch_size, -1, self.hidden_size]) + self.nasr_b)
coef = tf.matmul(tf.reshape(m, [-1, self.hidden_size]), self.nasr_v, transpose_b=True) * tf.reshape(self.mask_ph, [-1, 1])
if not self.nonhybrid:
ma = tf.concat([tf.reduce_sum(tf.reshape(coef, [self.batch_size, -1, 1]) * seq_h, 1),
tf.reshape(last, [-1, self.hidden_size])], -1)
sess_embedding = tf.matmul(ma, self.B)
else:
sess_embedding = tf.reduce_sum(tf.reshape(coef, [self.batch_size, -1, 1]) * seq_h, 1)
return sess_embedding
def build_graph(self):
self._create_variable()
with tf.variable_scope('ggnn_model', reuse=None):
node_embedding = self.ggnn()
sess_embedding = self._session_embedding(node_embedding)
item_embedding = self.embedding[:-1]
self.all_logits = tf.matmul(sess_embedding, item_embedding, transpose_b=True)
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=self.target_ph, logits=self.all_logits)
loss = tf.reduce_mean(loss)
vars = tf.trainable_variables()
lossL2 = [tf.nn.l2_loss(v) for v in vars if v.name not in ['bias', 'gamma', 'b', 'g', 'beta']]
loss_train = loss + self.L2 * tf.add_n(lossL2)
global_step = tf.Variable(0)
decay = self.lr_dc_step * len(self.train_seq) / self.batch_size
learning_rate = tf.train.exponential_decay(self.lr, global_step=global_step, decay_steps=decay,
decay_rate=self.lr_dc, staircase=True)
self.train_opt = tf.train.AdamOptimizer(learning_rate).minimize(loss_train, global_step=global_step)
def train_model(self):
train_seq_len = [(idx, len(seq)) for idx, seq in enumerate(self.train_seq)]
train_seq_len = sorted(train_seq_len, key=lambda x: x[1], reverse=True)
train_seq_index, _ = list(zip(*train_seq_len))
self.logger.info(self.evaluator.metrics_info())
for epoch in range(self.epochs):
for bat_index in self._shuffle_index(train_seq_index):
item_seqs = [self.train_seq[idx] for idx in bat_index]
bat_tars = [self.train_tar[idx] for idx in bat_index]
bat_adj_in, bat_adj_out, bat_alias, bat_items, bat_mask = self._build_session_graph(item_seqs)
feed = {self.target_ph: bat_tars,
self.item_ph: bat_items,
self.adj_in_ph: bat_adj_in,
self.adj_out_ph: bat_adj_out,
self.alias_ph: bat_alias,
self.mask_ph: bat_mask}
self.sess.run(self.train_opt, feed_dict=feed)
self.logger.info("epoch %d:\t%s" % (epoch, self.evaluate_model()))
def _shuffle_index(self, seq_index):
index_chunks = DataIterator(seq_index, batch_size=self.batch_size*32, shuffle=False, drop_last=False) # chunking
index_chunks = list(index_chunks)
index_chunks_iter = DataIterator(index_chunks, batch_size=1, shuffle=True, drop_last=False) # shuffle index chunk
for indexes in index_chunks_iter:
indexes = indexes[0]
indexes_iter = DataIterator(indexes, batch_size=self.batch_size, shuffle=True, drop_last=True) # shuffle batch index
for bat_index in indexes_iter:
yield bat_index
def _build_session_graph(self, bat_items):
A_in, A_out, alias_inputs = [], [], []
all_mask = [[1] * len(items) for items in bat_items]
bat_items = pad_sequences(bat_items, value=self.num_item)
unique_nodes = [np.unique(items).tolist() for items in bat_items]
max_n_node = np.max([len(nodes) for nodes in unique_nodes])
for u_seq, u_node, mask in zip(bat_items, unique_nodes, all_mask):
adj_mat = np.zeros((max_n_node, max_n_node))
id_map = {node: idx for idx, node in enumerate(u_node)}
if len(u_seq) > 1:
alias_previous = [id_map[i] for i in u_seq[:len(mask) - 1]]
alias_next = [id_map[i] for i in u_seq[1:len(mask)]]
adj_mat[alias_previous, alias_next] = 1
u_sum_in = np.sum(adj_mat, axis=0)
u_sum_in[np.where(u_sum_in == 0)] = 1
u_A_in = np.divide(adj_mat, u_sum_in)
u_sum_out = np.sum(adj_mat, 1)
u_sum_out[np.where(u_sum_out == 0)] = 1
u_A_out = np.divide(adj_mat.transpose(), u_sum_out)
A_in.append(u_A_in)
A_out.append(u_A_out)
alias_inputs.append([id_map[i] for i in u_seq])
items = pad_sequences(unique_nodes, value=self.num_item)
all_mask = pad_sequences(all_mask, value=0)
return A_in, A_out, alias_inputs, items, all_mask
def evaluate_model(self):
return self.evaluator.evaluate(self)
def predict(self, users, items):
users = DataIterator(users, batch_size=self.batch_size, shuffle=False, drop_last=False)
all_ratings = []
for bat_user in users:
cur_batch_size = len(bat_user)
bat_items = [self.user_pos_train[user][-self.max_seq_len:] for user in bat_user]
bat_adj_in, bat_adj_out, bat_alias, bat_items, bat_mask = self._build_session_graph(bat_items)
if cur_batch_size < self.batch_size: # padding
pad_size = self.batch_size - cur_batch_size
bat_adj_in = np.concatenate([bat_adj_in, [bat_adj_in[-1]] * pad_size], axis=0)
bat_adj_out = np.concatenate([bat_adj_out, [bat_adj_out[-1]] * pad_size], axis=0)
bat_alias = np.concatenate([bat_alias, [bat_alias[-1]] * pad_size], axis=0)
bat_items = np.concatenate([bat_items, [bat_items[-1]] * pad_size], axis=0)
bat_mask = np.concatenate([bat_mask, [bat_mask[-1]] * pad_size], axis=0)
feed = {self.item_ph: bat_items,
self.adj_in_ph: bat_adj_in,
self.adj_out_ph: bat_adj_out,
self.alias_ph: bat_alias,
self.mask_ph: bat_mask}
bat_ratings = self.sess.run(self.all_logits, feed_dict=feed)
all_ratings.extend(bat_ratings[:cur_batch_size])
all_ratings = | np.array(all_ratings) | numpy.array |
import tempfile
from collections import OrderedDict
import pytest
from hypothesis import given, strategies as st, settings
import numpy as np
import pandas as pd
from ._testtools import strategy_2d_prob_distribution
from tmtoolkit._pd_dt_compat import USE_DT, FRAME_TYPE, pd_dt_colnames
from tmtoolkit.topicmod import model_io
def test_save_load_ldamodel_pickle():
try:
import lda
except ImportError:
pytest.skip('lda not installed')
pfile = 'tests/data/test_pickle_unpickle_ldamodel.pickle'
dtm = np.array([[0, 1], [2, 3], [4, 5], [6, 0]])
doc_labels = ['doc_' + str(i) for i in range(dtm.shape[0])]
vocab = ['word_' + str(i) for i in range(dtm.shape[1])]
model = lda.LDA(2, n_iter=1)
model.fit(dtm)
model_io.save_ldamodel_to_pickle(pfile, model, vocab, doc_labels)
unpickled = model_io.load_ldamodel_from_pickle(pfile)
assert np.array_equal(model.doc_topic_, unpickled['model'].doc_topic_)
assert np.array_equal(model.topic_word_, unpickled['model'].topic_word_)
assert vocab == unpickled['vocab']
assert doc_labels == unpickled['doc_labels']
@given(
topic_word=strategy_2d_prob_distribution(),
top_n=st.integers(min_value=0, max_value=20)
)
@settings(deadline=1000)
def test_ldamodel_top_topic_words(topic_word, top_n):
topic_word = np.array(topic_word)
vocab = np.array(['t%d' % i for i in range(topic_word.shape[1])])
if top_n < 1 or top_n > topic_word.shape[1]:
with pytest.raises(ValueError):
model_io.ldamodel_top_topic_words(topic_word, vocab, top_n)
else:
top_topic_words = model_io.ldamodel_top_topic_words(topic_word, vocab, top_n)
colnames = np.array([model_io.DEFAULT_RANK_NAME_FMT.format(i1=i + 1) for i in range(top_n)])
rownames = np.array([model_io.DEFAULT_TOPIC_NAME_FMT.format(i1=i + 1) for i in range(topic_word.shape[0])])
assert top_topic_words.shape == (topic_word.shape[0], top_n)
assert np.array_equal(top_topic_words.index.values, rownames)
assert np.array_equal(top_topic_words.columns.values, colnames)
@given(
doc_topic=strategy_2d_prob_distribution(),
top_n=st.integers(min_value=0, max_value=20)
)
@settings(deadline=1000)
def test_ldamodel_top_doc_topics(doc_topic, top_n):
doc_topic = np.array(doc_topic)
doc_labels = np.array(['doc%d' % i for i in range(doc_topic.shape[0])])
if top_n < 1 or top_n > doc_topic.shape[1]:
with pytest.raises(ValueError):
model_io.ldamodel_top_topic_words(doc_topic, doc_labels, top_n)
else:
top_doc_topics = model_io.ldamodel_top_doc_topics(doc_topic, doc_labels, top_n)
colnames = np.array([model_io.DEFAULT_RANK_NAME_FMT.format(i1=i + 1) for i in range(top_n)])
assert top_doc_topics.shape == (doc_topic.shape[0], top_n)
assert np.array_equal(top_doc_topics.index.values, doc_labels)
assert np.array_equal(top_doc_topics.columns.values, colnames)
@given(topic_word=strategy_2d_prob_distribution())
@settings(deadline=1000)
def test_ldamodel_full_topic_words(topic_word):
topic_word = np.array(topic_word)
vocab = np.array(['t%d' % i for i in range(topic_word.shape[1])])
df = model_io.ldamodel_full_topic_words(topic_word, vocab)
assert isinstance(df, FRAME_TYPE)
rownames = np.array([model_io.DEFAULT_TOPIC_NAME_FMT.format(i1=i + 1) for i in range(topic_word.shape[0])])
assert np.array_equal(pd_dt_colnames(df), ['_topic'] + list(vocab))
if USE_DT:
assert np.array_equal(df[:, 0].to_list()[0], rownames)
else:
assert np.array_equal(df.iloc[:, 0].to_numpy(), rownames)
@given(doc_topic=strategy_2d_prob_distribution())
@settings(deadline=1000)
def test_ldamodel_full_doc_topics(doc_topic):
doc_topic = | np.array(doc_topic) | numpy.array |
# coding=utf-8
import numpy as np
import scipy.interpolate as intpl
import scipy.sparse as sprs
def to_sparse(D, format="csc"):
"""
Transform dense matrix to sparse matrix of return_type
bsr_matrix(arg1[, shape, dtype, copy, blocksize]) Block Sparse Row matrix
coo_matrix(arg1[, shape, dtype, copy]) A sparse matrix in COOrdinate format.
csc_matrix(arg1[, shape, dtype, copy]) Compressed Sparse Column matrix
csr_matrix(arg1[, shape, dtype, copy]) Compressed Sparse Row matrix
dia_matrix(arg1[, shape, dtype, copy]) Sparse matrix with DIAgonal storage
dok_matrix(arg1[, shape, dtype, copy]) Dictionary Of Keys based sparse matrix.
lil_matrix(arg1[, shape, dtype, copy]) Row-based linked list sparse matrix
:param D: Dense matrix
:param format: how to save the sparse matrix
:return: sparse version
"""
if format == "bsr":
return sprs.bsr_matrix(D)
elif format == "coo":
return sprs.coo_matrix(D)
elif format == "csc":
return sprs.csc_matrix(D)
elif format == "csr":
return sprs.csr_matrix(D)
elif format == "dia":
return sprs.dia_matrix(D)
elif format == "dok":
return sprs.dok_matrix(D)
elif format == "lil":
return sprs.lil_matrix(D)
else:
return to_dense(D)
def to_dense(D):
if sprs.issparse(D):
return D.toarray()
elif isinstance(D, np.ndarray):
return D
def next_neighbors_periodic(p, ps, k, T=None):
"""
This function gives for a value p the k points next to it which are found in
in the vector ps and the points which are found periodically.
:param p: value
:param ps: ndarray, vector where to find the next neighbors
:param k: integer, number of neighbours
:return: ndarray, with the k next neighbors and an array containing the
"""
if T is None:
T = ps[-1]-2*ps[0]+ps[1]
p_bar = p - np.floor(p/T)*T
ps = ps - ps[0]
distance_to_p = []
for tk in ps:
d1 = tk+T-p_bar
d2 = tk-p_bar
d3 = tk-T-p_bar
min_d = min([np.abs(d1), np.abs(d2), np.abs(d3)])
if np.abs(d1) == min_d:
distance_to_p.append(d1)
elif np.abs(d2) == min_d:
distance_to_p.append(d2)
else:
distance_to_p.append(d3)
distance_to_p = np.asarray(distance_to_p)
value_index = []
for d,i in zip(distance_to_p, range(distance_to_p.size)):
value_index.append((d, i))
# sort by distance
value_index_sorted_by_abs = sorted(value_index,cmp=lambda x,y:cmp(np.abs(x),np.abs(y)), key=lambda s: s[0])
if k % 2 == 1:
value_index_sorted_by_sign =sorted(value_index_sorted_by_abs[0:k+1], key=lambda s: s[0])[:k]
else:
value_index_sorted_by_sign =sorted(value_index_sorted_by_abs[0:k], key=lambda s: s[0])
return map(lambda s: s[1], value_index_sorted_by_sign), map(lambda s: s[0]+p, value_index_sorted_by_sign)
def next_neighbors(p, ps, k):
"""
This function gives for a value p the k points next to it which are found in
in the vector ps
:param p: value
:param ps: ndarray, vector where to find the next neighbors
:param k: integer, number of neighbours
:return: ndarray, with the k next neighbors
"""
distance_to_p = np.abs(ps-p)
# zip it
value_index = []
for d,i in zip(distance_to_p, range(distance_to_p.size)):
value_index.append((d,i))
# sort by distance
value_index_sorted = sorted(value_index, key=lambda s: s[0])
# take first k indices with least distance and sort them
return sorted(map(lambda s: s[1], value_index_sorted[0:k]))
def continue_periodic_array(arr,nn,T):
nn = np.asarray(nn)
d_nn = nn[1:]-nn[:-1]
if np.all(d_nn == np.ones(nn.shape[0]-1)):
return arr[nn]
else:
cont_arr = [arr[nn[0]]]
shift = 0.
for n,d in zip(nn[1:],d_nn):
if d != 1:
shift = -T
cont_arr.append(arr[n]+shift)
return np.asarray(cont_arr)
def restriction_matrix_1d(fine_grid, coarse_grid, k=2, return_type="csc", periodic=False, T=1.0):
"""
We construct the restriction matrix between two 1d grids, using lagrange interpolation.
:param fine_grid: a one dimensional 1d array containing the nodes of the fine grid
:param coarse_grid: a one dimensional 1d array containing the nodes of the coarse grid
:param k: order of the restriction
:return: a restriction matrix
"""
M = np.zeros((coarse_grid.size, fine_grid.size))
n_g = coarse_grid.size
for i, p in zip(range(n_g), coarse_grid):
if periodic:
nn, cont_arr = next_neighbors_periodic(p, fine_grid, k, T)
circulating_one = np.asarray([1.0]+[0.0]*(k-1))
lag_pol = []
for l in range(k):
lag_pol.append(intpl.lagrange(cont_arr, np.roll(circulating_one, l)))
M[i, nn] = np.asarray(map(lambda x: x(p), lag_pol))
else:
nn = next_neighbors(p, fine_grid, k)
# construct the lagrange polynomials for the k neighbors
circulating_one = np.asarray([1.0]+[0.0]*(k-1))
lag_pol = []
for l in range(k):
lag_pol.append(intpl.lagrange(fine_grid[nn], np.roll(circulating_one, l)))
M[i, nn] = np.asarray(map(lambda x: x(p), lag_pol))
return to_sparse(M, return_type)
def interpolation_matrix_1d(fine_grid, coarse_grid, k=2, return_type="csc", periodic=False, T=1.0):
"""
We construct the interpolation matrix between two 1d grids, using lagrange interpolation.
:param fine_grid: a one dimensional 1d array containing the nodes of the fine grid
:param coarse_grid: a one dimensional 1d array containing the nodes of the coarse grid
:param k: order of the restriction
:return: a interpolation matrix
"""
M = np.zeros((fine_grid.size, coarse_grid.size))
n_f = fine_grid.size
for i, p in zip(range(n_f), fine_grid):
if periodic:
nn,cont_arr = next_neighbors_periodic(p, coarse_grid, k, T)
circulating_one = np.asarray([1.0]+[0.0]*(k-1))
lag_pol = []
for l in range(k):
lag_pol.append(intpl.lagrange(cont_arr, | np.roll(circulating_one, l) | numpy.roll |
from __future__ import print_function
import argparse
import time
import numpy as np
from models.ResNeXt_DenseNet.models.densenet import densenet
from models.ResNeXt_DenseNet.models.resnext import resnext29
from models.WideResNet_pytorch.wideresnet import WideResNet
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
import torch.nn.functional as F
from torchvision import datasets
from torchvision import transforms
from ChannelAug import ChannelSplit, ChannelMix
from matplotlib import pyplot as plt
from utils import nentr
# Code From https://github.com/mlaves/bayesian-temperature-scaling
def uceloss(softmaxes, labels, n_bins=15):
d = softmaxes.device
bin_boundaries = torch.linspace(0, 1, n_bins + 1, device=d)
bin_lowers = bin_boundaries[:-1]
bin_uppers = bin_boundaries[1:]
_, predictions = torch.max(softmaxes, 1)
errors = predictions.ne(labels)
uncertainties = nentr(softmaxes, base=softmaxes.size(1))
errors_in_bin_list = []
avg_entropy_in_bin_list = []
uce = torch.zeros(1, device=d)
for bin_lower, bin_upper in zip(bin_lowers, bin_uppers):
# Calculate |uncert - err| in each bin
in_bin = uncertainties.gt(bin_lower.item()) * uncertainties.le(bin_upper.item())
prop_in_bin = in_bin.float().mean() # |Bm| / n
if prop_in_bin.item() > 0.0:
errors_in_bin = errors[in_bin].float().mean() # err()
avg_entropy_in_bin = uncertainties[in_bin].mean() # uncert()
uce += torch.abs(avg_entropy_in_bin - errors_in_bin) * prop_in_bin
errors_in_bin_list.append(errors_in_bin)
avg_entropy_in_bin_list.append(avg_entropy_in_bin)
err_in_bin = torch.tensor(errors_in_bin_list, device=d)
avg_entropy_in_bin = torch.tensor(avg_entropy_in_bin_list, device=d)
return uce, err_in_bin, avg_entropy_in_bin
# Code From https://github.com/mlaves/bayesian-temperature-scaling
def eceloss(softmaxes, labels, n_bins=15):
"""
Modified from https://github.com/gpleiss/temperature_scaling/blob/master/temperature_scaling.py
"""
d = softmaxes.device
bin_boundaries = torch.linspace(0, 1, n_bins + 1, device=d)
bin_lowers = bin_boundaries[:-1]
bin_uppers = bin_boundaries[1:]
confidences, predictions = torch.max(softmaxes, 1)
accuracies = predictions.eq(labels)
accuracy_in_bin_list = []
avg_confidence_in_bin_list = []
ece = torch.zeros(1, device=d)
for bin_lower, bin_upper in zip(bin_lowers, bin_uppers):
in_bin = confidences.gt(bin_lower.item()) * confidences.le(bin_upper.item())
prop_in_bin = in_bin.float().mean()
if prop_in_bin.item() > 0.0:
accuracy_in_bin = accuracies[in_bin].float().mean()
avg_confidence_in_bin = confidences[in_bin].mean()
ece += torch.abs(avg_confidence_in_bin - accuracy_in_bin) * prop_in_bin
accuracy_in_bin_list.append(accuracy_in_bin)
avg_confidence_in_bin_list.append(avg_confidence_in_bin)
acc_in_bin = torch.tensor(accuracy_in_bin_list, device=d)
avg_conf_in_bin = torch.tensor(avg_confidence_in_bin_list, device=d)
return ece, acc_in_bin, avg_conf_in_bin
parser = argparse.ArgumentParser(description='Trains a CIFAR Classifier', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--dataset', type=str, default='cifar10', choices=['cifar10', 'cifar100'], help='Choose between CIFAR-10, CIFAR-100.')
parser.add_argument('--corruption_path', type=str, default='./data/cifar/', help='Corruption dataset path.')
parser.add_argument('--model', '-m', type=str, default='wrn', choices=['wrn', 'allconv', 'densenet', 'resnext'], help='Choose models.')
parser.add_argument('--epochs', '-e', type=int, default=500, help='Epochs.')
parser.add_argument('--learning-rate', '-lr', type=float, default=0.1, help='Initial learning rate.')
parser.add_argument('--batch-size', '-b', type=int, default=128, help='Batch size.')
parser.add_argument('--momentum', type=float, default=0.9, help='Momentum.')
parser.add_argument('--decay', '-wd', type=float, default=0.0005, help='Weight decay.')
parser.add_argument('--print-freq', type=int, default=50, help='Training loss print frequency.')
parser.add_argument('--num-workers', type=int, default=4, help='Number of worker threads.')
args = parser.parse_args()
def train(model, train_loader, optimizer, scheduler):
model.train()
loss_ema = 0.
for i, (images, targets) in enumerate(train_loader):
optimizer.zero_grad()
images = images.cuda()
targets = targets.cuda()
logits = model(images)
loss = F.cross_entropy(logits, targets)
loss.backward()
optimizer.step()
scheduler.step()
loss_ema = loss_ema * 0.1 + float(loss) * 0.9
if i % args.print_freq == 0:
print('Train Loss {:.3f}'.format(loss_ema))
return loss_ema
def get_lr(step, total_steps, lr_max, lr_min):
return lr_min + (lr_max - lr_min) * 0.5 * (1 + np.cos(step / total_steps * np.pi))
def test(model, test_loader, calibration=False,):
model.eval()
total_loss = 0.
total_correct = 0
logits_ = []
labels_ = []
with torch.no_grad():
for images, targets in test_loader:
images, targets = images.cuda(), targets.cuda()
logits = model(images)
loss = F.cross_entropy(logits, targets)
pred = logits.data.max(1)[1]
total_loss += float(loss.data)
total_correct += pred.eq(targets.data).sum().item()
if calibration:
logits_.append(torch.softmax(logits, dim=1).detach())
labels_.append(targets.detach())
if calibration:
logits_ = torch.cat(logits_, dim=0)
labels_ = torch.cat(labels_, dim=0)
ece, acc, conf = eceloss(logits_, labels_)
uce, err, entr = uceloss(logits_, labels_)
return ece, uce, total_loss / len(test_loader.dataset), total_correct / len(test_loader.dataset)
return total_loss / len(test_loader.dataset), total_correct / len(test_loader.dataset)
def test_c(net, test_data, base_path):
corruption_accs = []
ece_c = 0
uce_c = 0
CORRUPTIONS = [
'gaussian_noise', 'shot_noise', 'impulse_noise', 'defocus_blur',
'glass_blur', 'motion_blur', 'zoom_blur', 'snow', 'frost', 'fog',
'brightness', 'contrast', 'elastic_transform', 'pixelate',
'jpeg_compression'
]
for corruption in CORRUPTIONS:
# Reference to original data is mutated
test_data.data = np.load(base_path + corruption + '.npy')
test_data.targets = torch.LongTensor(np.load(base_path + 'labels.npy'))
test_loader = torch.utils.data.DataLoader(
test_data,
batch_size=1000,
shuffle=False,
num_workers=args.num_workers,
pin_memory=True)
ece, uce, test_loss, test_acc = test(net, test_loader, True)
corruption_accs.append(test_acc)
ece_c += ece.item()*100
uce_c += uce.item()*100
print('{}: Test Loss {:.3f} | Test Error {:.3f} | ECE : {:.2f} | UCE : {:.2f}'.format(corruption, test_loss, 100 - 100. * test_acc, ece.item()*100, uce.item()*100))
print('[Mean Corruption ECE : {:.2f}, UCE : {:.2f}]'.format(ece_c/15, uce_c/15))
return | np.mean(corruption_accs) | numpy.mean |
""" Test functions for linalg module
"""
import os
import sys
import itertools
import traceback
import textwrap
import subprocess
import pytest
import numpy as np
from numpy import array, single, double, csingle, cdouble, dot, identity, matmul
from numpy import multiply, atleast_2d, inf, asarray
from numpy import linalg
from numpy.linalg import matrix_power, norm, matrix_rank, multi_dot, LinAlgError
from numpy.linalg.linalg import _multi_dot_matrix_chain_order
from numpy.testing import (
assert_, assert_equal, assert_raises, assert_array_equal,
assert_almost_equal, assert_allclose, suppress_warnings,
assert_raises_regex, HAS_LAPACK64,
)
from numpy.testing._private.utils import requires_memory
def consistent_subclass(out, in_):
# For ndarray subclass input, our output should have the same subclass
# (non-ndarray input gets converted to ndarray).
return type(out) is (type(in_) if isinstance(in_, np.ndarray)
else np.ndarray)
old_assert_almost_equal = assert_almost_equal
def assert_almost_equal(a, b, single_decimal=6, double_decimal=12, **kw):
if asarray(a).dtype.type in (single, csingle):
decimal = single_decimal
else:
decimal = double_decimal
old_assert_almost_equal(a, b, decimal=decimal, **kw)
def get_real_dtype(dtype):
return {single: single, double: double,
csingle: single, cdouble: double}[dtype]
def get_complex_dtype(dtype):
return {single: csingle, double: cdouble,
csingle: csingle, cdouble: cdouble}[dtype]
def get_rtol(dtype):
# Choose a safe rtol
if dtype in (single, csingle):
return 1e-5
else:
return 1e-11
# used to categorize tests
all_tags = {
'square', 'nonsquare', 'hermitian', # mutually exclusive
'generalized', 'size-0', 'strided' # optional additions
}
class LinalgCase:
def __init__(self, name, a, b, tags=set()):
"""
A bundle of arguments to be passed to a test case, with an identifying
name, the operands a and b, and a set of tags to filter the tests
"""
assert_(isinstance(name, str))
self.name = name
self.a = a
self.b = b
self.tags = frozenset(tags) # prevent shared tags
def check(self, do):
"""
Run the function `do` on this test case, expanding arguments
"""
do(self.a, self.b, tags=self.tags)
def __repr__(self):
return f'<LinalgCase: {self.name}>'
def apply_tag(tag, cases):
"""
Add the given tag (a string) to each of the cases (a list of LinalgCase
objects)
"""
assert tag in all_tags, "Invalid tag"
for case in cases:
case.tags = case.tags | {tag}
return cases
#
# Base test cases
#
np.random.seed(1234)
CASES = []
# square test cases
CASES += apply_tag('square', [
LinalgCase("single",
array([[1., 2.], [3., 4.]], dtype=single),
array([2., 1.], dtype=single)),
LinalgCase("double",
array([[1., 2.], [3., 4.]], dtype=double),
array([2., 1.], dtype=double)),
LinalgCase("double_2",
array([[1., 2.], [3., 4.]], dtype=double),
array([[2., 1., 4.], [3., 4., 6.]], dtype=double)),
LinalgCase("csingle",
array([[1. + 2j, 2 + 3j], [3 + 4j, 4 + 5j]], dtype=csingle),
array([2. + 1j, 1. + 2j], dtype=csingle)),
LinalgCase("cdouble",
array([[1. + 2j, 2 + 3j], [3 + 4j, 4 + 5j]], dtype=cdouble),
array([2. + 1j, 1. + 2j], dtype=cdouble)),
LinalgCase("cdouble_2",
array([[1. + 2j, 2 + 3j], [3 + 4j, 4 + 5j]], dtype=cdouble),
array([[2. + 1j, 1. + 2j, 1 + 3j], [1 - 2j, 1 - 3j, 1 - 6j]], dtype=cdouble)),
LinalgCase("0x0",
np.empty((0, 0), dtype=double),
np.empty((0,), dtype=double),
tags={'size-0'}),
LinalgCase("8x8",
np.random.rand(8, 8),
np.random.rand(8)),
LinalgCase("1x1",
np.random.rand(1, 1),
np.random.rand(1)),
LinalgCase("nonarray",
[[1, 2], [3, 4]],
[2, 1]),
])
# non-square test-cases
CASES += apply_tag('nonsquare', [
LinalgCase("single_nsq_1",
array([[1., 2., 3.], [3., 4., 6.]], dtype=single),
array([2., 1.], dtype=single)),
LinalgCase("single_nsq_2",
array([[1., 2.], [3., 4.], [5., 6.]], dtype=single),
array([2., 1., 3.], dtype=single)),
LinalgCase("double_nsq_1",
array([[1., 2., 3.], [3., 4., 6.]], dtype=double),
array([2., 1.], dtype=double)),
LinalgCase("double_nsq_2",
array([[1., 2.], [3., 4.], [5., 6.]], dtype=double),
array([2., 1., 3.], dtype=double)),
LinalgCase("csingle_nsq_1",
array(
[[1. + 1j, 2. + 2j, 3. - 3j], [3. - 5j, 4. + 9j, 6. + 2j]], dtype=csingle),
array([2. + 1j, 1. + 2j], dtype=csingle)),
LinalgCase("csingle_nsq_2",
array(
[[1. + 1j, 2. + 2j], [3. - 3j, 4. - 9j], [5. - 4j, 6. + 8j]], dtype=csingle),
array([2. + 1j, 1. + 2j, 3. - 3j], dtype=csingle)),
LinalgCase("cdouble_nsq_1",
array(
[[1. + 1j, 2. + 2j, 3. - 3j], [3. - 5j, 4. + 9j, 6. + 2j]], dtype=cdouble),
array([2. + 1j, 1. + 2j], dtype=cdouble)),
LinalgCase("cdouble_nsq_2",
array(
[[1. + 1j, 2. + 2j], [3. - 3j, 4. - 9j], [5. - 4j, 6. + 8j]], dtype=cdouble),
array([2. + 1j, 1. + 2j, 3. - 3j], dtype=cdouble)),
LinalgCase("cdouble_nsq_1_2",
array(
[[1. + 1j, 2. + 2j, 3. - 3j], [3. - 5j, 4. + 9j, 6. + 2j]], dtype=cdouble),
array([[2. + 1j, 1. + 2j], [1 - 1j, 2 - 2j]], dtype=cdouble)),
LinalgCase("cdouble_nsq_2_2",
array(
[[1. + 1j, 2. + 2j], [3. - 3j, 4. - 9j], [5. - 4j, 6. + 8j]], dtype=cdouble),
array([[2. + 1j, 1. + 2j], [1 - 1j, 2 - 2j], [1 - 1j, 2 - 2j]], dtype=cdouble)),
LinalgCase("8x11",
np.random.rand(8, 11),
np.random.rand(8)),
LinalgCase("1x5",
np.random.rand(1, 5),
np.random.rand(1)),
LinalgCase("5x1",
np.random.rand(5, 1),
np.random.rand(5)),
LinalgCase("0x4",
np.random.rand(0, 4),
np.random.rand(0),
tags={'size-0'}),
LinalgCase("4x0",
np.random.rand(4, 0),
np.random.rand(4),
tags={'size-0'}),
])
# hermitian test-cases
CASES += apply_tag('hermitian', [
LinalgCase("hsingle",
array([[1., 2.], [2., 1.]], dtype=single),
None),
LinalgCase("hdouble",
array([[1., 2.], [2., 1.]], dtype=double),
None),
LinalgCase("hcsingle",
array([[1., 2 + 3j], [2 - 3j, 1]], dtype=csingle),
None),
LinalgCase("hcdouble",
array([[1., 2 + 3j], [2 - 3j, 1]], dtype=cdouble),
None),
LinalgCase("hempty",
np.empty((0, 0), dtype=double),
None,
tags={'size-0'}),
LinalgCase("hnonarray",
[[1, 2], [2, 1]],
None),
LinalgCase("matrix_b_only",
array([[1., 2.], [2., 1.]]),
None),
LinalgCase("hmatrix_1x1",
np.random.rand(1, 1),
None),
])
#
# Gufunc test cases
#
def _make_generalized_cases():
new_cases = []
for case in CASES:
if not isinstance(case.a, np.ndarray):
continue
a = np.array([case.a, 2 * case.a, 3 * case.a])
if case.b is None:
b = None
else:
b = np.array([case.b, 7 * case.b, 6 * case.b])
new_case = LinalgCase(case.name + "_tile3", a, b,
tags=case.tags | {'generalized'})
new_cases.append(new_case)
a = np.array([case.a] * 2 * 3).reshape((3, 2) + case.a.shape)
if case.b is None:
b = None
else:
b = np.array([case.b] * 2 * 3).reshape((3, 2) + case.b.shape)
new_case = LinalgCase(case.name + "_tile213", a, b,
tags=case.tags | {'generalized'})
new_cases.append(new_case)
return new_cases
CASES += _make_generalized_cases()
#
# Generate stride combination variations of the above
#
def _stride_comb_iter(x):
"""
Generate cartesian product of strides for all axes
"""
if not isinstance(x, np.ndarray):
yield x, "nop"
return
stride_set = [(1,)] * x.ndim
stride_set[-1] = (1, 3, -4)
if x.ndim > 1:
stride_set[-2] = (1, 3, -4)
if x.ndim > 2:
stride_set[-3] = (1, -4)
for repeats in itertools.product(*tuple(stride_set)):
new_shape = [abs(a * b) for a, b in zip(x.shape, repeats)]
slices = tuple([slice(None, None, repeat) for repeat in repeats])
# new array with different strides, but same data
xi = np.empty(new_shape, dtype=x.dtype)
xi.view(np.uint32).fill(0xdeadbeef)
xi = xi[slices]
xi[...] = x
xi = xi.view(x.__class__)
assert_(np.all(xi == x))
yield xi, "stride_" + "_".join(["%+d" % j for j in repeats])
# generate also zero strides if possible
if x.ndim >= 1 and x.shape[-1] == 1:
s = list(x.strides)
s[-1] = 0
xi = np.lib.stride_tricks.as_strided(x, strides=s)
yield xi, "stride_xxx_0"
if x.ndim >= 2 and x.shape[-2] == 1:
s = list(x.strides)
s[-2] = 0
xi = np.lib.stride_tricks.as_strided(x, strides=s)
yield xi, "stride_xxx_0_x"
if x.ndim >= 2 and x.shape[:-2] == (1, 1):
s = list(x.strides)
s[-1] = 0
s[-2] = 0
xi = np.lib.stride_tricks.as_strided(x, strides=s)
yield xi, "stride_xxx_0_0"
def _make_strided_cases():
new_cases = []
for case in CASES:
for a, a_label in _stride_comb_iter(case.a):
for b, b_label in _stride_comb_iter(case.b):
new_case = LinalgCase(case.name + "_" + a_label + "_" + b_label, a, b,
tags=case.tags | {'strided'})
new_cases.append(new_case)
return new_cases
CASES += _make_strided_cases()
#
# Test different routines against the above cases
#
class LinalgTestCase:
TEST_CASES = CASES
def check_cases(self, require=set(), exclude=set()):
"""
Run func on each of the cases with all of the tags in require, and none
of the tags in exclude
"""
for case in self.TEST_CASES:
# filter by require and exclude
if case.tags & require != require:
continue
if case.tags & exclude:
continue
try:
case.check(self.do)
except Exception as e:
msg = f'In test case: {case!r}\n\n'
msg += traceback.format_exc()
raise AssertionError(msg) from e
class LinalgSquareTestCase(LinalgTestCase):
def test_sq_cases(self):
self.check_cases(require={'square'},
exclude={'generalized', 'size-0'})
def test_empty_sq_cases(self):
self.check_cases(require={'square', 'size-0'},
exclude={'generalized'})
class LinalgNonsquareTestCase(LinalgTestCase):
def test_nonsq_cases(self):
self.check_cases(require={'nonsquare'},
exclude={'generalized', 'size-0'})
def test_empty_nonsq_cases(self):
self.check_cases(require={'nonsquare', 'size-0'},
exclude={'generalized'})
class HermitianTestCase(LinalgTestCase):
def test_herm_cases(self):
self.check_cases(require={'hermitian'},
exclude={'generalized', 'size-0'})
def test_empty_herm_cases(self):
self.check_cases(require={'hermitian', 'size-0'},
exclude={'generalized'})
class LinalgGeneralizedSquareTestCase(LinalgTestCase):
@pytest.mark.slow
def test_generalized_sq_cases(self):
self.check_cases(require={'generalized', 'square'},
exclude={'size-0'})
@pytest.mark.slow
def test_generalized_empty_sq_cases(self):
self.check_cases(require={'generalized', 'square', 'size-0'})
class LinalgGeneralizedNonsquareTestCase(LinalgTestCase):
@pytest.mark.slow
def test_generalized_nonsq_cases(self):
self.check_cases(require={'generalized', 'nonsquare'},
exclude={'size-0'})
@pytest.mark.slow
def test_generalized_empty_nonsq_cases(self):
self.check_cases(require={'generalized', 'nonsquare', 'size-0'})
class HermitianGeneralizedTestCase(LinalgTestCase):
@pytest.mark.slow
def test_generalized_herm_cases(self):
self.check_cases(require={'generalized', 'hermitian'},
exclude={'size-0'})
@pytest.mark.slow
def test_generalized_empty_herm_cases(self):
self.check_cases(require={'generalized', 'hermitian', 'size-0'},
exclude={'none'})
def dot_generalized(a, b):
a = asarray(a)
if a.ndim >= 3:
if a.ndim == b.ndim:
# matrix x matrix
new_shape = a.shape[:-1] + b.shape[-1:]
elif a.ndim == b.ndim + 1:
# matrix x vector
new_shape = a.shape[:-1]
else:
raise ValueError("Not implemented...")
r = np.empty(new_shape, dtype=np.common_type(a, b))
for c in itertools.product(*map(range, a.shape[:-2])):
r[c] = dot(a[c], b[c])
return r
else:
return dot(a, b)
def identity_like_generalized(a):
a = asarray(a)
if a.ndim >= 3:
r = np.empty(a.shape, dtype=a.dtype)
r[...] = identity(a.shape[-2])
return r
else:
return identity(a.shape[0])
class SolveCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
# kept apart from TestSolve for use for testing with matrices.
def do(self, a, b, tags):
x = linalg.solve(a, b)
assert_almost_equal(b, dot_generalized(a, x))
assert_(consistent_subclass(x, b))
class TestSolve(SolveCases):
@pytest.mark.parametrize('dtype', [single, double, csingle, cdouble])
def test_types(self, dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
assert_equal(linalg.solve(x, x).dtype, dtype)
def test_0_size(self):
class ArraySubclass(np.ndarray):
pass
# Test system of 0x0 matrices
a = np.arange(8).reshape(2, 2, 2)
b = np.arange(6).reshape(1, 2, 3).view(ArraySubclass)
expected = linalg.solve(a, b)[:, 0:0, :]
result = linalg.solve(a[:, 0:0, 0:0], b[:, 0:0, :])
assert_array_equal(result, expected)
assert_(isinstance(result, ArraySubclass))
# Test errors for non-square and only b's dimension being 0
assert_raises(linalg.LinAlgError, linalg.solve, a[:, 0:0, 0:1], b)
assert_raises(ValueError, linalg.solve, a, b[:, 0:0, :])
# Test broadcasting error
b = np.arange(6).reshape(1, 3, 2) # broadcasting error
assert_raises(ValueError, linalg.solve, a, b)
assert_raises(ValueError, linalg.solve, a[0:0], b[0:0])
# Test zero "single equations" with 0x0 matrices.
b = np.arange(2).reshape(1, 2).view(ArraySubclass)
expected = linalg.solve(a, b)[:, 0:0]
result = linalg.solve(a[:, 0:0, 0:0], b[:, 0:0])
assert_array_equal(result, expected)
assert_(isinstance(result, ArraySubclass))
b = np.arange(3).reshape(1, 3)
assert_raises(ValueError, linalg.solve, a, b)
assert_raises(ValueError, linalg.solve, a[0:0], b[0:0])
assert_raises(ValueError, linalg.solve, a[:, 0:0, 0:0], b)
def test_0_size_k(self):
# test zero multiple equation (K=0) case.
class ArraySubclass(np.ndarray):
pass
a = np.arange(4).reshape(1, 2, 2)
b = np.arange(6).reshape(3, 2, 1).view(ArraySubclass)
expected = linalg.solve(a, b)[:, :, 0:0]
result = linalg.solve(a, b[:, :, 0:0])
assert_array_equal(result, expected)
assert_(isinstance(result, ArraySubclass))
# test both zero.
expected = linalg.solve(a, b)[:, 0:0, 0:0]
result = linalg.solve(a[:, 0:0, 0:0], b[:, 0:0, 0:0])
assert_array_equal(result, expected)
assert_(isinstance(result, ArraySubclass))
class InvCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
def do(self, a, b, tags):
a_inv = linalg.inv(a)
assert_almost_equal(dot_generalized(a, a_inv),
identity_like_generalized(a))
assert_(consistent_subclass(a_inv, a))
class TestInv(InvCases):
@pytest.mark.parametrize('dtype', [single, double, csingle, cdouble])
def test_types(self, dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
assert_equal(linalg.inv(x).dtype, dtype)
def test_0_size(self):
# Check that all kinds of 0-sized arrays work
class ArraySubclass(np.ndarray):
pass
a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass)
res = linalg.inv(a)
assert_(res.dtype.type is np.float64)
assert_equal(a.shape, res.shape)
assert_(isinstance(res, ArraySubclass))
a = np.zeros((0, 0), dtype=np.complex64).view(ArraySubclass)
res = linalg.inv(a)
assert_(res.dtype.type is np.complex64)
assert_equal(a.shape, res.shape)
assert_(isinstance(res, ArraySubclass))
class EigvalsCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
def do(self, a, b, tags):
ev = linalg.eigvals(a)
evalues, evectors = linalg.eig(a)
assert_almost_equal(ev, evalues)
class TestEigvals(EigvalsCases):
@pytest.mark.parametrize('dtype', [single, double, csingle, cdouble])
def test_types(self, dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
assert_equal(linalg.eigvals(x).dtype, dtype)
x = np.array([[1, 0.5], [-1, 1]], dtype=dtype)
assert_equal(linalg.eigvals(x).dtype, get_complex_dtype(dtype))
def test_0_size(self):
# Check that all kinds of 0-sized arrays work
class ArraySubclass(np.ndarray):
pass
a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass)
res = linalg.eigvals(a)
assert_(res.dtype.type is np.float64)
assert_equal((0, 1), res.shape)
# This is just for documentation, it might make sense to change:
assert_(isinstance(res, np.ndarray))
a = np.zeros((0, 0), dtype=np.complex64).view(ArraySubclass)
res = linalg.eigvals(a)
assert_(res.dtype.type is np.complex64)
assert_equal((0,), res.shape)
# This is just for documentation, it might make sense to change:
assert_(isinstance(res, np.ndarray))
class EigCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
def do(self, a, b, tags):
evalues, evectors = linalg.eig(a)
assert_allclose(dot_generalized(a, evectors),
np.asarray(evectors) * np.asarray(evalues)[..., None, :],
rtol=get_rtol(evalues.dtype))
assert_(consistent_subclass(evectors, a))
class TestEig(EigCases):
@pytest.mark.parametrize('dtype', [single, double, csingle, cdouble])
def test_types(self, dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
w, v = np.linalg.eig(x)
assert_equal(w.dtype, dtype)
assert_equal(v.dtype, dtype)
x = np.array([[1, 0.5], [-1, 1]], dtype=dtype)
w, v = np.linalg.eig(x)
assert_equal(w.dtype, get_complex_dtype(dtype))
assert_equal(v.dtype, get_complex_dtype(dtype))
def test_0_size(self):
# Check that all kinds of 0-sized arrays work
class ArraySubclass(np.ndarray):
pass
a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass)
res, res_v = linalg.eig(a)
assert_(res_v.dtype.type is np.float64)
assert_(res.dtype.type is np.float64)
assert_equal(a.shape, res_v.shape)
assert_equal((0, 1), res.shape)
# This is just for documentation, it might make sense to change:
assert_(isinstance(a, np.ndarray))
a = np.zeros((0, 0), dtype=np.complex64).view(ArraySubclass)
res, res_v = linalg.eig(a)
assert_(res_v.dtype.type is np.complex64)
assert_(res.dtype.type is np.complex64)
assert_equal(a.shape, res_v.shape)
assert_equal((0,), res.shape)
# This is just for documentation, it might make sense to change:
assert_(isinstance(a, np.ndarray))
class SVDBaseTests:
hermitian = False
@pytest.mark.parametrize('dtype', [single, double, csingle, cdouble])
def test_types(self, dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
u, s, vh = linalg.svd(x)
assert_equal(u.dtype, dtype)
assert_equal(s.dtype, get_real_dtype(dtype))
assert_equal(vh.dtype, dtype)
s = linalg.svd(x, compute_uv=False, hermitian=self.hermitian)
assert_equal(s.dtype, get_real_dtype(dtype))
class SVDCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
def do(self, a, b, tags):
u, s, vt = linalg.svd(a, False)
assert_allclose(a, dot_generalized(np.asarray(u) * np.asarray(s)[..., None, :],
np.asarray(vt)),
rtol=get_rtol(u.dtype))
assert_(consistent_subclass(u, a))
assert_(consistent_subclass(vt, a))
class TestSVD(SVDCases, SVDBaseTests):
def test_empty_identity(self):
""" Empty input should put an identity matrix in u or vh """
x = np.empty((4, 0))
u, s, vh = linalg.svd(x, compute_uv=True, hermitian=self.hermitian)
assert_equal(u.shape, (4, 4))
assert_equal(vh.shape, (0, 0))
assert_equal(u, np.eye(4))
x = np.empty((0, 4))
u, s, vh = linalg.svd(x, compute_uv=True, hermitian=self.hermitian)
assert_equal(u.shape, (0, 0))
assert_equal(vh.shape, (4, 4))
assert_equal(vh, np.eye(4))
class SVDHermitianCases(HermitianTestCase, HermitianGeneralizedTestCase):
def do(self, a, b, tags):
u, s, vt = linalg.svd(a, False, hermitian=True)
assert_allclose(a, dot_generalized(np.asarray(u) * np.asarray(s)[..., None, :],
np.asarray(vt)),
rtol=get_rtol(u.dtype))
def hermitian(mat):
axes = list(range(mat.ndim))
axes[-1], axes[-2] = axes[-2], axes[-1]
return np.conj(np.transpose(mat, axes=axes))
assert_almost_equal(np.matmul(u, hermitian(u)), np.broadcast_to(np.eye(u.shape[-1]), u.shape))
assert_almost_equal(np.matmul(vt, hermitian(vt)), np.broadcast_to(np.eye(vt.shape[-1]), vt.shape))
assert_equal(np.sort(s)[..., ::-1], s)
assert_(consistent_subclass(u, a))
assert_(consistent_subclass(vt, a))
class TestSVDHermitian(SVDHermitianCases, SVDBaseTests):
hermitian = True
class CondCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
# cond(x, p) for p in (None, 2, -2)
def do(self, a, b, tags):
c = asarray(a) # a might be a matrix
if 'size-0' in tags:
assert_raises(LinAlgError, linalg.cond, c)
return
# +-2 norms
s = linalg.svd(c, compute_uv=False)
assert_almost_equal(
linalg.cond(a), s[..., 0] / s[..., -1],
single_decimal=5, double_decimal=11)
assert_almost_equal(
linalg.cond(a, 2), s[..., 0] / s[..., -1],
single_decimal=5, double_decimal=11)
assert_almost_equal(
linalg.cond(a, -2), s[..., -1] / s[..., 0],
single_decimal=5, double_decimal=11)
# Other norms
cinv = np.linalg.inv(c)
assert_almost_equal(
linalg.cond(a, 1),
abs(c).sum(-2).max(-1) * abs(cinv).sum(-2).max(-1),
single_decimal=5, double_decimal=11)
assert_almost_equal(
linalg.cond(a, -1),
abs(c).sum(-2).min(-1) * abs(cinv).sum(-2).min(-1),
single_decimal=5, double_decimal=11)
assert_almost_equal(
linalg.cond(a, np.inf),
abs(c).sum(-1).max(-1) * abs(cinv).sum(-1).max(-1),
single_decimal=5, double_decimal=11)
assert_almost_equal(
linalg.cond(a, -np.inf),
abs(c).sum(-1).min(-1) * abs(cinv).sum(-1).min(-1),
single_decimal=5, double_decimal=11)
assert_almost_equal(
linalg.cond(a, 'fro'),
np.sqrt((abs(c)**2).sum(-1).sum(-1)
* (abs(cinv)**2).sum(-1).sum(-1)),
single_decimal=5, double_decimal=11)
class TestCond(CondCases):
def test_basic_nonsvd(self):
# Smoketest the non-svd norms
A = array([[1., 0, 1], [0, -2., 0], [0, 0, 3.]])
assert_almost_equal(linalg.cond(A, inf), 4)
assert_almost_equal(linalg.cond(A, -inf), 2/3)
assert_almost_equal(linalg.cond(A, 1), 4)
assert_almost_equal(linalg.cond(A, -1), 0.5)
assert_almost_equal(linalg.cond(A, 'fro'), np.sqrt(265 / 12))
def test_singular(self):
# Singular matrices have infinite condition number for
# positive norms, and negative norms shouldn't raise
# exceptions
As = [np.zeros((2, 2)), np.ones((2, 2))]
p_pos = [None, 1, 2, 'fro']
p_neg = [-1, -2]
for A, p in itertools.product(As, p_pos):
# Inversion may not hit exact infinity, so just check the
# number is large
assert_(linalg.cond(A, p) > 1e15)
for A, p in itertools.product(As, p_neg):
linalg.cond(A, p)
def test_nan(self):
# nans should be passed through, not converted to infs
ps = [None, 1, -1, 2, -2, 'fro']
p_pos = [None, 1, 2, 'fro']
A = np.ones((2, 2))
A[0,1] = np.nan
for p in ps:
c = linalg.cond(A, p)
assert_(isinstance(c, np.float_))
assert_(np.isnan(c))
A = np.ones((3, 2, 2))
A[1,0,1] = np.nan
for p in ps:
c = linalg.cond(A, p)
assert_(np.isnan(c[1]))
if p in p_pos:
assert_(c[0] > 1e15)
assert_(c[2] > 1e15)
else:
assert_(not np.isnan(c[0]))
assert_(not np.isnan(c[2]))
def test_stacked_singular(self):
# Check behavior when only some of the stacked matrices are
# singular
np.random.seed(1234)
A = np.random.rand(2, 2, 2, 2)
A[0,0] = 0
A[1,1] = 0
for p in (None, 1, 2, 'fro', -1, -2):
c = linalg.cond(A, p)
assert_equal(c[0,0], np.inf)
assert_equal(c[1,1], np.inf)
assert_(np.isfinite(c[0,1]))
assert_(np.isfinite(c[1,0]))
class PinvCases(LinalgSquareTestCase,
LinalgNonsquareTestCase,
LinalgGeneralizedSquareTestCase,
LinalgGeneralizedNonsquareTestCase):
def do(self, a, b, tags):
a_ginv = linalg.pinv(a)
# `a @ a_ginv == I` does not hold if a is singular
dot = dot_generalized
assert_almost_equal(dot(dot(a, a_ginv), a), a, single_decimal=5, double_decimal=11)
assert_(consistent_subclass(a_ginv, a))
class TestPinv(PinvCases):
pass
class PinvHermitianCases(HermitianTestCase, HermitianGeneralizedTestCase):
def do(self, a, b, tags):
a_ginv = linalg.pinv(a, hermitian=True)
# `a @ a_ginv == I` does not hold if a is singular
dot = dot_generalized
assert_almost_equal(dot(dot(a, a_ginv), a), a, single_decimal=5, double_decimal=11)
assert_(consistent_subclass(a_ginv, a))
class TestPinvHermitian(PinvHermitianCases):
pass
class DetCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
def do(self, a, b, tags):
d = linalg.det(a)
(s, ld) = linalg.slogdet(a)
if asarray(a).dtype.type in (single, double):
ad = asarray(a).astype(double)
else:
ad = asarray(a).astype(cdouble)
ev = linalg.eigvals(ad)
assert_almost_equal(d, multiply.reduce(ev, axis=-1))
assert_almost_equal(s * np.exp(ld), multiply.reduce(ev, axis=-1))
s = np.atleast_1d(s)
ld = np.atleast_1d(ld)
m = (s != 0)
assert_almost_equal(np.abs(s[m]), 1)
assert_equal(ld[~m], -inf)
class TestDet(DetCases):
def test_zero(self):
assert_equal(linalg.det([[0.0]]), 0.0)
assert_equal(type(linalg.det([[0.0]])), double)
assert_equal(linalg.det([[0.0j]]), 0.0)
assert_equal(type(linalg.det([[0.0j]])), cdouble)
assert_equal(linalg.slogdet([[0.0]]), (0.0, -inf))
assert_equal(type(linalg.slogdet([[0.0]])[0]), double)
assert_equal(type(linalg.slogdet([[0.0]])[1]), double)
assert_equal(linalg.slogdet([[0.0j]]), (0.0j, -inf))
assert_equal(type(linalg.slogdet([[0.0j]])[0]), cdouble)
assert_equal(type(linalg.slogdet([[0.0j]])[1]), double)
@pytest.mark.parametrize('dtype', [single, double, csingle, cdouble])
def test_types(self, dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
assert_equal(np.linalg.det(x).dtype, dtype)
ph, s = np.linalg.slogdet(x)
assert_equal(s.dtype, get_real_dtype(dtype))
assert_equal(ph.dtype, dtype)
def test_0_size(self):
a = np.zeros((0, 0), dtype=np.complex64)
res = linalg.det(a)
assert_equal(res, 1.)
assert_(res.dtype.type is np.complex64)
res = linalg.slogdet(a)
assert_equal(res, (1, 0))
assert_(res[0].dtype.type is np.complex64)
assert_(res[1].dtype.type is np.float32)
a = np.zeros((0, 0), dtype=np.float64)
res = linalg.det(a)
assert_equal(res, 1.)
assert_(res.dtype.type is np.float64)
res = linalg.slogdet(a)
assert_equal(res, (1, 0))
assert_(res[0].dtype.type is np.float64)
assert_(res[1].dtype.type is np.float64)
class LstsqCases(LinalgSquareTestCase, LinalgNonsquareTestCase):
def do(self, a, b, tags):
arr = np.asarray(a)
m, n = arr.shape
u, s, vt = linalg.svd(a, False)
x, residuals, rank, sv = linalg.lstsq(a, b, rcond=-1)
if m == 0:
assert_((x == 0).all())
if m <= n:
assert_almost_equal(b, dot(a, x))
assert_equal(rank, m)
else:
assert_equal(rank, n)
assert_almost_equal(sv, sv.__array_wrap__(s))
if rank == n and m > n:
expect_resids = (
np.asarray(abs(np.dot(a, x) - b)) ** 2).sum(axis=0)
expect_resids = np.asarray(expect_resids)
if np.asarray(b).ndim == 1:
expect_resids.shape = (1,)
assert_equal(residuals.shape, expect_resids.shape)
else:
expect_resids = np.array([]).view(type(x))
assert_almost_equal(residuals, expect_resids)
assert_(np.issubdtype(residuals.dtype, np.floating))
assert_(consistent_subclass(x, b))
assert_(consistent_subclass(residuals, b))
class TestLstsq(LstsqCases):
def test_future_rcond(self):
a = np.array([[0., 1., 0., 1., 2., 0.],
[0., 2., 0., 0., 1., 0.],
[1., 0., 1., 0., 0., 4.],
[0., 0., 0., 2., 3., 0.]]).T
b = np.array([1, 0, 0, 0, 0, 0])
with suppress_warnings() as sup:
w = sup.record(FutureWarning, "`rcond` parameter will change")
x, residuals, rank, s = linalg.lstsq(a, b)
assert_(rank == 4)
x, residuals, rank, s = linalg.lstsq(a, b, rcond=-1)
assert_(rank == 4)
x, residuals, rank, s = linalg.lstsq(a, b, rcond=None)
assert_(rank == 3)
# Warning should be raised exactly once (first command)
assert_(len(w) == 1)
@pytest.mark.parametrize(["m", "n", "n_rhs"], [
(4, 2, 2),
(0, 4, 1),
(0, 4, 2),
(4, 0, 1),
(4, 0, 2),
(4, 2, 0),
(0, 0, 0)
])
def test_empty_a_b(self, m, n, n_rhs):
a = np.arange(m * n).reshape(m, n)
b = np.ones((m, n_rhs))
x, residuals, rank, s = linalg.lstsq(a, b, rcond=None)
if m == 0:
assert_((x == 0).all())
assert_equal(x.shape, (n, n_rhs))
assert_equal(residuals.shape, ((n_rhs,) if m > n else (0,)))
if m > n and n_rhs > 0:
# residuals are exactly the squared norms of b's columns
r = b - np.dot(a, x)
assert_almost_equal(residuals, (r * r).sum(axis=-2))
assert_equal(rank, min(m, n))
assert_equal(s.shape, (min(m, n),))
def test_incompatible_dims(self):
# use modified version of docstring example
x = np.array([0, 1, 2, 3])
y = np.array([-1, 0.2, 0.9, 2.1, 3.3])
A = np.vstack([x, np.ones(len(x))]).T
with assert_raises_regex(LinAlgError, "Incompatible dimensions"):
linalg.lstsq(A, y, rcond=None)
@pytest.mark.parametrize('dt', [np.dtype(c) for c in '?bBhHiIqQefdgFDGO'])
class TestMatrixPower:
rshft_0 = np.eye(4)
rshft_1 = rshft_0[[3, 0, 1, 2]]
rshft_2 = rshft_0[[2, 3, 0, 1]]
rshft_3 = rshft_0[[1, 2, 3, 0]]
rshft_all = [rshft_0, rshft_1, rshft_2, rshft_3]
noninv = array([[1, 0], [0, 0]])
stacked = np.block([[[rshft_0]]]*2)
#FIXME the 'e' dtype might work in future
dtnoinv = [object, np.dtype('e'), np.dtype('g'), np.dtype('G')]
def test_large_power(self, dt):
rshft = self.rshft_1.astype(dt)
assert_equal(
matrix_power(rshft, 2**100 + 2**10 + 2**5 + 0), self.rshft_0)
assert_equal(
matrix_power(rshft, 2**100 + 2**10 + 2**5 + 1), self.rshft_1)
assert_equal(
matrix_power(rshft, 2**100 + 2**10 + 2**5 + 2), self.rshft_2)
assert_equal(
matrix_power(rshft, 2**100 + 2**10 + 2**5 + 3), self.rshft_3)
def test_power_is_zero(self, dt):
def tz(M):
mz = matrix_power(M, 0)
assert_equal(mz, identity_like_generalized(M))
assert_equal(mz.dtype, M.dtype)
for mat in self.rshft_all:
tz(mat.astype(dt))
if dt != object:
tz(self.stacked.astype(dt))
def test_power_is_one(self, dt):
def tz(mat):
mz = matrix_power(mat, 1)
assert_equal(mz, mat)
assert_equal(mz.dtype, mat.dtype)
for mat in self.rshft_all:
tz(mat.astype(dt))
if dt != object:
tz(self.stacked.astype(dt))
def test_power_is_two(self, dt):
def tz(mat):
mz = matrix_power(mat, 2)
mmul = matmul if mat.dtype != object else dot
assert_equal(mz, mmul(mat, mat))
assert_equal(mz.dtype, mat.dtype)
for mat in self.rshft_all:
tz(mat.astype(dt))
if dt != object:
tz(self.stacked.astype(dt))
def test_power_is_minus_one(self, dt):
def tz(mat):
invmat = matrix_power(mat, -1)
mmul = matmul if mat.dtype != object else dot
assert_almost_equal(
mmul(invmat, mat), identity_like_generalized(mat))
for mat in self.rshft_all:
if dt not in self.dtnoinv:
tz(mat.astype(dt))
def test_exceptions_bad_power(self, dt):
mat = self.rshft_0.astype(dt)
assert_raises(TypeError, matrix_power, mat, 1.5)
assert_raises(TypeError, matrix_power, mat, [1])
def test_exceptions_non_square(self, dt):
assert_raises(LinAlgError, matrix_power, np.array([1], dt), 1)
assert_raises(LinAlgError, matrix_power, np.array([[1], [2]], dt), 1)
assert_raises(LinAlgError, matrix_power, np.ones((4, 3, 2), dt), 1)
def test_exceptions_not_invertible(self, dt):
if dt in self.dtnoinv:
return
mat = self.noninv.astype(dt)
assert_raises(LinAlgError, matrix_power, mat, -1)
class TestEigvalshCases(HermitianTestCase, HermitianGeneralizedTestCase):
def do(self, a, b, tags):
# note that eigenvalue arrays returned by eig must be sorted since
# their order isn't guaranteed.
ev = linalg.eigvalsh(a, 'L')
evalues, evectors = linalg.eig(a)
evalues.sort(axis=-1)
assert_allclose(ev, evalues, rtol=get_rtol(ev.dtype))
ev2 = linalg.eigvalsh(a, 'U')
assert_allclose(ev2, evalues, rtol=get_rtol(ev.dtype))
class TestEigvalsh:
@pytest.mark.parametrize('dtype', [single, double, csingle, cdouble])
def test_types(self, dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
w = np.linalg.eigvalsh(x)
assert_equal(w.dtype, get_real_dtype(dtype))
def test_invalid(self):
x = np.array([[1, 0.5], [0.5, 1]], dtype=np.float32)
assert_raises(ValueError, np.linalg.eigvalsh, x, UPLO="lrong")
assert_raises(ValueError, np.linalg.eigvalsh, x, "lower")
assert_raises(ValueError, np.linalg.eigvalsh, x, "upper")
def test_UPLO(self):
Klo = np.array([[0, 0], [1, 0]], dtype=np.double)
Kup = np.array([[0, 1], [0, 0]], dtype=np.double)
tgt = np.array([-1, 1], dtype=np.double)
rtol = get_rtol(np.double)
# Check default is 'L'
w = np.linalg.eigvalsh(Klo)
assert_allclose(w, tgt, rtol=rtol)
# Check 'L'
w = np.linalg.eigvalsh(Klo, UPLO='L')
assert_allclose(w, tgt, rtol=rtol)
# Check 'l'
w = np.linalg.eigvalsh(Klo, UPLO='l')
assert_allclose(w, tgt, rtol=rtol)
# Check 'U'
w = np.linalg.eigvalsh(Kup, UPLO='U')
assert_allclose(w, tgt, rtol=rtol)
# Check 'u'
w = np.linalg.eigvalsh(Kup, UPLO='u')
assert_allclose(w, tgt, rtol=rtol)
def test_0_size(self):
# Check that all kinds of 0-sized arrays work
class ArraySubclass(np.ndarray):
pass
a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass)
res = linalg.eigvalsh(a)
assert_(res.dtype.type is np.float64)
assert_equal((0, 1), res.shape)
# This is just for documentation, it might make sense to change:
assert_(isinstance(res, np.ndarray))
a = np.zeros((0, 0), dtype=np.complex64).view(ArraySubclass)
res = linalg.eigvalsh(a)
assert_(res.dtype.type is np.float32)
assert_equal((0,), res.shape)
# This is just for documentation, it might make sense to change:
assert_(isinstance(res, np.ndarray))
class TestEighCases(HermitianTestCase, HermitianGeneralizedTestCase):
def do(self, a, b, tags):
# note that eigenvalue arrays returned by eig must be sorted since
# their order isn't guaranteed.
ev, evc = linalg.eigh(a)
evalues, evectors = linalg.eig(a)
evalues.sort(axis=-1)
assert_almost_equal(ev, evalues)
assert_allclose(dot_generalized(a, evc),
np.asarray(ev)[..., None, :] * np.asarray(evc),
rtol=get_rtol(ev.dtype))
ev2, evc2 = linalg.eigh(a, 'U')
assert_almost_equal(ev2, evalues)
assert_allclose(dot_generalized(a, evc2),
np.asarray(ev2)[..., None, :] * np.asarray(evc2),
rtol=get_rtol(ev.dtype), err_msg=repr(a))
class TestEigh:
@pytest.mark.parametrize('dtype', [single, double, csingle, cdouble])
def test_types(self, dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
w, v = np.linalg.eigh(x)
assert_equal(w.dtype, get_real_dtype(dtype))
assert_equal(v.dtype, dtype)
def test_invalid(self):
x = np.array([[1, 0.5], [0.5, 1]], dtype=np.float32)
assert_raises(ValueError, np.linalg.eigh, x, UPLO="lrong")
assert_raises(ValueError, np.linalg.eigh, x, "lower")
assert_raises(ValueError, np.linalg.eigh, x, "upper")
def test_UPLO(self):
Klo = np.array([[0, 0], [1, 0]], dtype=np.double)
Kup = np.array([[0, 1], [0, 0]], dtype=np.double)
tgt = np.array([-1, 1], dtype=np.double)
rtol = get_rtol(np.double)
# Check default is 'L'
w, v = np.linalg.eigh(Klo)
assert_allclose(w, tgt, rtol=rtol)
# Check 'L'
w, v = np.linalg.eigh(Klo, UPLO='L')
assert_allclose(w, tgt, rtol=rtol)
# Check 'l'
w, v = np.linalg.eigh(Klo, UPLO='l')
assert_allclose(w, tgt, rtol=rtol)
# Check 'U'
w, v = np.linalg.eigh(Kup, UPLO='U')
assert_allclose(w, tgt, rtol=rtol)
# Check 'u'
w, v = np.linalg.eigh(Kup, UPLO='u')
assert_allclose(w, tgt, rtol=rtol)
def test_0_size(self):
# Check that all kinds of 0-sized arrays work
class ArraySubclass(np.ndarray):
pass
a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass)
res, res_v = linalg.eigh(a)
assert_(res_v.dtype.type is np.float64)
assert_(res.dtype.type is np.float64)
assert_equal(a.shape, res_v.shape)
assert_equal((0, 1), res.shape)
# This is just for documentation, it might make sense to change:
assert_(isinstance(a, np.ndarray))
a = np.zeros((0, 0), dtype=np.complex64).view(ArraySubclass)
res, res_v = linalg.eigh(a)
assert_(res_v.dtype.type is np.complex64)
assert_(res.dtype.type is np.float32)
assert_equal(a.shape, res_v.shape)
assert_equal((0,), res.shape)
# This is just for documentation, it might make sense to change:
assert_(isinstance(a, np.ndarray))
class _TestNormBase:
dt = None
dec = None
class _TestNormGeneral(_TestNormBase):
def test_empty(self):
assert_equal(norm([]), 0.0)
assert_equal(norm(array([], dtype=self.dt)), 0.0)
assert_equal(norm(atleast_2d(array([], dtype=self.dt))), 0.0)
def test_vector_return_type(self):
a = np.array([1, 0, 1])
exact_types = np.typecodes['AllInteger']
inexact_types = np.typecodes['AllFloat']
all_types = exact_types + inexact_types
for each_inexact_types in all_types:
at = a.astype(each_inexact_types)
an = norm(at, -np.inf)
assert_(issubclass(an.dtype.type, np.floating))
assert_almost_equal(an, 0.0)
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "divide by zero encountered")
an = norm(at, -1)
assert_(issubclass(an.dtype.type, np.floating))
assert_almost_equal(an, 0.0)
an = norm(at, 0)
assert_(issubclass(an.dtype.type, np.floating))
assert_almost_equal(an, 2)
an = norm(at, 1)
assert_(issubclass(an.dtype.type, np.floating))
assert_almost_equal(an, 2.0)
an = norm(at, 2)
assert_(issubclass(an.dtype.type, np.floating))
assert_almost_equal(an, an.dtype.type(2.0)**an.dtype.type(1.0/2.0))
an = norm(at, 4)
assert_(issubclass(an.dtype.type, np.floating))
assert_almost_equal(an, an.dtype.type(2.0)**an.dtype.type(1.0/4.0))
an = norm(at, np.inf)
assert_(issubclass(an.dtype.type, np.floating))
assert_almost_equal(an, 1.0)
def test_vector(self):
a = [1, 2, 3, 4]
b = [-1, -2, -3, -4]
c = [-1, 2, -3, 4]
def _test(v):
np.testing.assert_almost_equal(norm(v), 30 ** 0.5,
decimal=self.dec)
np.testing.assert_almost_equal(norm(v, inf), 4.0,
decimal=self.dec)
np.testing.assert_almost_equal(norm(v, -inf), 1.0,
decimal=self.dec)
np.testing.assert_almost_equal(norm(v, 1), 10.0,
decimal=self.dec)
np.testing.assert_almost_equal(norm(v, -1), 12.0 / 25,
decimal=self.dec)
np.testing.assert_almost_equal(norm(v, 2), 30 ** 0.5,
decimal=self.dec)
np.testing.assert_almost_equal(norm(v, -2), ((205. / 144) ** -0.5),
decimal=self.dec)
np.testing.assert_almost_equal(norm(v, 0), 4,
decimal=self.dec)
for v in (a, b, c,):
_test(v)
for v in (array(a, dtype=self.dt), array(b, dtype=self.dt),
array(c, dtype=self.dt)):
_test(v)
def test_axis(self):
# Vector norms.
# Compare the use of `axis` with computing the norm of each row
# or column separately.
A = array([[1, 2, 3], [4, 5, 6]], dtype=self.dt)
for order in [None, -1, 0, 1, 2, 3, np.Inf, -np.Inf]:
expected0 = [norm(A[:, k], ord=order) for k in range(A.shape[1])]
assert_almost_equal(norm(A, ord=order, axis=0), expected0)
expected1 = [norm(A[k, :], ord=order) for k in range(A.shape[0])]
assert_almost_equal(norm(A, ord=order, axis=1), expected1)
# Matrix norms.
B = np.arange(1, 25, dtype=self.dt).reshape(2, 3, 4)
nd = B.ndim
for order in [None, -2, 2, -1, 1, np.Inf, -np.Inf, 'fro']:
for axis in itertools.combinations(range(-nd, nd), 2):
row_axis, col_axis = axis
if row_axis < 0:
row_axis += nd
if col_axis < 0:
col_axis += nd
if row_axis == col_axis:
assert_raises(ValueError, norm, B, ord=order, axis=axis)
else:
n = norm(B, ord=order, axis=axis)
# The logic using k_index only works for nd = 3.
# This has to be changed if nd is increased.
k_index = nd - (row_axis + col_axis)
if row_axis < col_axis:
expected = [norm(B[:].take(k, axis=k_index), ord=order)
for k in range(B.shape[k_index])]
else:
expected = [norm(B[:].take(k, axis=k_index).T, ord=order)
for k in range(B.shape[k_index])]
assert_almost_equal(n, expected)
def test_keepdims(self):
A = np.arange(1, 25, dtype=self.dt).reshape(2, 3, 4)
allclose_err = 'order {0}, axis = {1}'
shape_err = 'Shape mismatch found {0}, expected {1}, order={2}, axis={3}'
# check the order=None, axis=None case
expected = norm(A, ord=None, axis=None)
found = norm(A, ord=None, axis=None, keepdims=True)
assert_allclose(np.squeeze(found), expected,
err_msg=allclose_err.format(None, None))
expected_shape = (1, 1, 1)
assert_(found.shape == expected_shape,
shape_err.format(found.shape, expected_shape, None, None))
# Vector norms.
for order in [None, -1, 0, 1, 2, 3, np.Inf, -np.Inf]:
for k in range(A.ndim):
expected = norm(A, ord=order, axis=k)
found = norm(A, ord=order, axis=k, keepdims=True)
assert_allclose(np.squeeze(found), expected,
err_msg=allclose_err.format(order, k))
expected_shape = list(A.shape)
expected_shape[k] = 1
expected_shape = tuple(expected_shape)
assert_(found.shape == expected_shape,
shape_err.format(found.shape, expected_shape, order, k))
# Matrix norms.
for order in [None, -2, 2, -1, 1, np.Inf, -np.Inf, 'fro', 'nuc']:
for k in itertools.permutations(range(A.ndim), 2):
expected = norm(A, ord=order, axis=k)
found = norm(A, ord=order, axis=k, keepdims=True)
assert_allclose(np.squeeze(found), expected,
err_msg=allclose_err.format(order, k))
expected_shape = list(A.shape)
expected_shape[k[0]] = 1
expected_shape[k[1]] = 1
expected_shape = tuple(expected_shape)
assert_(found.shape == expected_shape,
shape_err.format(found.shape, expected_shape, order, k))
class _TestNorm2D(_TestNormBase):
# Define the part for 2d arrays separately, so we can subclass this
# and run the tests using np.matrix in matrixlib.tests.test_matrix_linalg.
array = np.array
def test_matrix_empty(self):
assert_equal(norm(self.array([[]], dtype=self.dt)), 0.0)
def test_matrix_return_type(self):
a = self.array([[1, 0, 1], [0, 1, 1]])
exact_types = np.typecodes['AllInteger']
# float32, complex64, float64, complex128 types are the only types
# allowed by `linalg`, which performs the matrix operations used
# within `norm`.
inexact_types = 'fdFD'
all_types = exact_types + inexact_types
for each_inexact_types in all_types:
at = a.astype(each_inexact_types)
an = norm(at, -np.inf)
assert_(issubclass(an.dtype.type, np.floating))
assert_almost_equal(an, 2.0)
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "divide by zero encountered")
an = norm(at, -1)
assert_(issubclass(an.dtype.type, np.floating))
assert_almost_equal(an, 1.0)
an = norm(at, 1)
assert_(issubclass(an.dtype.type, np.floating))
assert_almost_equal(an, 2.0)
an = norm(at, 2)
assert_(issubclass(an.dtype.type, np.floating))
assert_almost_equal(an, 3.0**(1.0/2.0))
an = norm(at, -2)
assert_(issubclass(an.dtype.type, np.floating))
assert_almost_equal(an, 1.0)
an = norm(at, np.inf)
assert_(issubclass(an.dtype.type, np.floating))
assert_almost_equal(an, 2.0)
an = norm(at, 'fro')
assert_(issubclass(an.dtype.type, np.floating))
assert_almost_equal(an, 2.0)
an = norm(at, 'nuc')
assert_(issubclass(an.dtype.type, np.floating))
# Lower bar needed to support low precision floats.
# They end up being off by 1 in the 7th place.
np.testing.assert_almost_equal(an, 2.7320508075688772, decimal=6)
def test_matrix_2x2(self):
A = self.array([[1, 3], [5, 7]], dtype=self.dt)
assert_almost_equal(norm(A), 84 ** 0.5)
assert_almost_equal(norm(A, 'fro'), 84 ** 0.5)
assert_almost_equal(norm(A, 'nuc'), 10.0)
assert_almost_equal( | norm(A, inf) | numpy.linalg.norm |
__author__ = 'jparedes'
import numpy as np
import pandas as pd
from itertools import compress, chain
import scipy.sparse as sp
def trimf(x, params):
"""
Triangular fuzzy operation in a vector
:param x: Input column vector: array([[0.2],[0.9],[0.42],[0.74],[0.24],[0.28],[0.34]])
:param params: 3 points os triangle shape: [0.1 0.4, 0.7]
:return: Column vector: array([[0.], [0.], [0.8], [0.], [0.], [0.], [0.4]])
"""
a = params[0]
b = params[1]
c = params[2]
y = np.zeros(np.shape(x)) # Left and right shoulders (y = 0)
# Left slope
if a != b:
index = np.logical_and(a < x, x < b) # find(a < x & x < b)
y[index] = (x[index] - a) / (b - a)
# right slope
if b != c:
index = np.logical_and(b < x, x < c) # find(b < x & x < c)
y[index] = (c - x[index]) / (c - b)
# Center (y = 1)
index = x == b
y[index] = 1
return y
def trapmf(x, params):
"""
Trapezoidal fuzzy operation
:param x: Input column vector
:param params: 4 points which define the trapezoidal
:return: Output column vector
"""
a, b, c, d = params
y1 = np.zeros(np.shape(x))
y2 = np.zeros(np.shape(x))
# Compute y1
index = x >= b
if sum(index) != 0: # ~isempty(index)
y1[index] = 1.
index = x < a
if sum(index) != 0: # ~isempty(index):
y1[index] = 0.
index = | np.logical_and(a <= x, x < b) | numpy.logical_and |
# program to calculate the B-field for a magnetic coil
import numpy as np
import matplotlib.pyplot as plt
def biot_savart(wire1, wire2, point, current = 1):
'''
current is given in units of inverse permeability
'''
dl = wire2-wire1
r = point - 0.5 * (wire2 + wire1)
return current * np.cross(dl, r) / (4 * np.pi * np.linalg.norm(r)**3)
def coil(center, radius, theta, elements):
'''
theta is angle between the plane of the normal and the x-y plane (chosen for numerical reasons)
'''
points = np.zeros((elements, 3))
rad = np.linspace(0, 2 * np.pi, elements, endpoint=True)
for i in range(elements):
points[i,:] = radius*np.array([np.cos(rad[i])*np.cos(theta),np.sin(rad[i])*np.cos(theta),np.sin(theta)])
points = [point + center for point in points]
return points
def single_coil():
'''
elem : Number of elements to use in the coil
num : number of test points along z axis
R : Radius of the coil
theta : Angle between the normal of the coil and x-y plane
'''
elem = 100
num = 10
R = 1
theta = 0
wire = coil(np.zeros(3), R, theta, elem + 1)
points = np.zeros((num,3))
z = | np.linspace(0, 1, num) | numpy.linspace |
import unittest
import numpy as np
from aitoolbox.torchtrain.train_loop.components.pred_collate_fns import *
class TestBatchPredCollateFns(unittest.TestCase):
def test_append_predictions(self):
preds = []
preds_ep_1 = np.random.rand(100, 1)
preds = append_predictions(preds_ep_1, preds)
self.assertEqual([preds_ep_1], preds)
preds_ep_2 = np.random.rand(45, 1)
preds = append_predictions(preds_ep_2, preds)
self.assertEqual([preds_ep_1, preds_ep_2], preds)
def test_append_concat_predictions(self):
preds = []
preds_ep_1 = np.random.rand(100, 1)
preds = append_predictions(preds_ep_1, preds)
self.assertEqual([preds_ep_1], preds)
preds_ep_2 = np.random.rand(45, 1)
preds = append_predictions(preds_ep_2, preds)
self.assertEqual([preds_ep_1, preds_ep_2], preds)
preds_list = []
preds_list_ep_1 = np.random.rand(100).tolist()
preds_list = append_concat_predictions(preds_list_ep_1, preds_list)
self.assertEqual(preds_list_ep_1, preds_list)
preds_list_ep_2 = np.random.rand(45, 1).tolist()
preds_list = append_concat_predictions(preds_list_ep_2, preds_list)
self.assertEqual(preds_list_ep_1 + preds_list_ep_2, preds_list)
class TestAllPredTransformFns(unittest.TestCase):
def test_torch_cat_transf(self):
self.assertEqual(
torch_cat_transf([torch.Tensor([1, 2]), torch.Tensor([3, 4])]).numpy().tolist(),
| np.array([1., 2., 3., 4.]) | numpy.array |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0. Licensed to the Apache
# Software Foundation. You may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test for Layer-wise Adaptive Rate Scaling optimizer."""
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from flars import flars_optimizer
class FLARSOptimizerTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.named_parameters(
('tf.float32 m=0', tf.float32, 0), ('tf.float32 m=0.9', tf.float32, 0.9),
('tf.float64 m=0', tf.float64, 0), ('tf.float64 m=0.9', tf.float64, 0.9))
def testFLARSGradientOneStep(self, dtype, momentum):
shape = [3, 3]
var_np = np.ones(shape)
grad_np = np.ones(shape)
lr_np = 0.1
m_np = momentum
ep_np = 1e-5
eeta = 0.1
vel_np = np.zeros(shape)
var = tf.Variable(var_np, dtype=dtype, name='a')
grad = tf.Variable(grad_np, dtype=dtype)
opt = flars_optimizer.FLARSOptimizer(
learning_rate=lr_np, momentum=m_np, eeta=eeta, epsilon=ep_np)
g_norm = np.linalg.norm(grad_np.flatten(), ord=2)
opt.update_grads_norm([var], [g_norm])
self.evaluate(tf.compat.v1.global_variables_initializer())
pre_var = self.evaluate(var)
self.assertAllClose(var_np, pre_var)
opt.apply_gradients([(grad, var)])
post_var = self.evaluate(var)
w_norm = np.linalg.norm(var_np.flatten(), ord=2)
trust_ratio = eeta * w_norm / (g_norm + ep_np)
scaled_lr = lr_np * trust_ratio
vel_np = m_np * vel_np - scaled_lr * grad_np
var_np += vel_np
self.assertAllClose(var_np, post_var)
if m_np != 0:
post_vel = self.evaluate(opt.get_slot(var, 'momentum'))
self.assertAllClose(vel_np, post_vel)
@parameterized.named_parameters(
('tf.float32 m=0', tf.float32, 0), ('tf.float32 m=0.9', tf.float32, 0.9),
('tf.float64 m=0', tf.float64, 0), ('tf.float64 m=0.9', tf.float64, 0.9))
def testFLARSGradientMultiStep(self, dtype, momentum):
shape = [3, 3]
var_np = np.ones(shape)
grad_np = np.ones(shape)
lr_np = 0.1
m_np = momentum
ep_np = 1e-5
eeta = 0.1
vel_np = np.zeros(shape)
var = tf.Variable(var_np, dtype=dtype, name='a')
grad = tf.Variable(grad_np, dtype=dtype)
opt = flars_optimizer.FLARSOptimizer(
learning_rate=lr_np, momentum=m_np, eeta=eeta, epsilon=ep_np)
g_norm = np.linalg.norm(grad_np.flatten(), ord=2)
opt.update_grads_norm([var], [g_norm])
self.evaluate(tf.compat.v1.global_variables_initializer())
pre_var = self.evaluate(var)
self.assertAllClose(var_np, pre_var)
for _ in range(10):
opt.apply_gradients([(grad, var)])
post_var = self.evaluate(var)
w_norm = np.linalg.norm(var_np.flatten(), ord=2)
trust_ratio = eeta * w_norm / (g_norm + ep_np)
scaled_lr = lr_np * trust_ratio
vel_np = m_np * vel_np - scaled_lr * grad_np
var_np += vel_np
self.assertAllClose(var_np, post_var)
if m_np != 0:
post_vel = self.evaluate(opt.get_slot(var, 'momentum'))
self.assertAllClose(vel_np, post_vel)
@parameterized.named_parameters(('tf.float32', tf.float32),
('tf.float64', tf.float64))
def testComputeLRMaxRatio(self, dtype):
shape = [3, 3]
var_np = np.ones(shape)
grad_np = | np.ones(shape) | numpy.ones |
"""
vp_surpise.py
Do calculations for surprisal type functionals
"""
import numpy as np
def vp_surprise(self):
#Calculate overlap
Pa = np.sum(self.na_frac, axis=1) / | np.sum(self.nf, axis=1) | numpy.sum |
from torch.utils.data.sampler import Sampler
import os
import numpy as np
import torch
import torch.utils.data
import cv2
import imutils
from torchvision.transforms import functional as TF
from PIL import Image
import torchvision
import random
from constants import is_real, workspace_limits, heightmap_resolution, PUSH_Q, GRASP_Q, color_mean, color_std, depth_mean, depth_std, used_binary_mean, used_binary_std, DEPTH_MIN, total_obj, colors_upper, colors_lower, binary_mean, binary_std, distance, resolution, resolution_pad, padding_width, resolution_crop
from math import atan2, cos, sin, sqrt, pi, degrees
import glob
import skimage.io
import json
class SegmentationDataset(torch.utils.data.Dataset):
"""
Create segmentation dataset for training Mask R-CNN.
One uses pre-defined color range to separate objects (assume the color in one image is unique).
One directly reads masks.
"""
def __init__(self, root, transforms, is_real=False):
self.root = root
self.transforms = transforms
self.is_real = is_real
# load all image files, sorting them to ensure that they are aligned
self.color_imgs = list(sorted(os.listdir(os.path.join(root, "color-heightmaps"))))
self.depth_imgs = list(sorted(os.listdir(os.path.join(root, "depth-heightmaps"))))
self.masks = list(sorted(os.listdir(os.path.join(root, "masks"))))
def __getitem__(self, idx):
# load images
color_path = os.path.join(self.root, "color-heightmaps", self.color_imgs[idx])
# depth_path = os.path.join(self.root, "depth-heightmaps", self.depth_imgs[idx])
# color image input
color_img = cv2.imread(color_path)
color_img = cv2.cvtColor(color_img, cv2.COLOR_BGR2RGB)
mask_path = os.path.join(self.root, "masks", self.masks[idx])
mask_img = cv2.imread(mask_path, cv2.IMREAD_GRAYSCALE)
# get masks
hsv = cv2.cvtColor(color_img, cv2.COLOR_RGB2HSV)
masks = []
if self.is_real:
gray = cv2.cvtColor(color_img, cv2.COLOR_RGB2GRAY)
gray = gray.astype(np.uint8)
blurred = cv2.medianBlur(gray, 5)
thresh = cv2.threshold(blurred, 60, 255, cv2.THRESH_BINARY)[1]
cnts = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
cnts = imutils.grab_contours(cnts)
for c in cnts:
if cv2.contourArea(c) > 100:
mask = np.zeros(color_img.shape[:2], np.uint8)
cv2.drawContours(mask, [c], -1, (1), -1)
masks.append(mask)
# cv2.imshow('mask' + self.color_imgs[idx], mask)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
else:
for ci in range(1, np.max(mask_img)):
mask = mask_img == ci
if np.sum((mask == True)) > 100:
masks.append(mask)
num_objs = len(masks)
if num_objs > 0:
masks = np.stack(masks, axis=0)
# get bounding box coordinates for each mask
boxes = []
for i in range(num_objs):
pos = np.where(masks[i])
xmin = np.min(pos[1])
xmax = np.max(pos[1])
ymin = np.min(pos[0])
ymax = np.max(pos[0])
boxes.append([xmin, ymin, xmax, ymax])
if xmin == xmax or ymin == ymax:
num_objs = 0
# convert everything into a torch.Tensor
boxes = torch.as_tensor(boxes, dtype=torch.float32)
# there is only one class
labels = torch.ones((num_objs,), dtype=torch.int64)
masks = torch.as_tensor(masks, dtype=torch.uint8)
image_id = torch.tensor([idx])
if num_objs > 0:
area = (boxes[:, 3] - boxes[:, 1]) * (boxes[:, 2] - boxes[:, 0])
else:
area = torch.as_tensor([0], dtype=torch.float32)
# suppose all instances are not crowd
iscrowd = torch.zeros((num_objs,), dtype=torch.int64)
num_objs = torch.tensor(num_objs)
target = {}
target["boxes"] = boxes
target["labels"] = labels
target["masks"] = masks
target["image_id"] = image_id
target["area"] = area
target["iscrowd"] = iscrowd
target["num_obj"] = num_objs
if self.transforms is not None:
# img, target = self.transforms(img, target)
img, target = self.transforms(color_img, target)
return img, target
def __len__(self):
# return len(self.imgs)
return len(self.color_imgs)
class ForegroundDataset(torch.utils.data.Dataset):
'''
Craete binary image, 1 means foreground, 0 means background.
For grasp, we care about the center of object, while considering the clearance of gripper.
For push, we know all pushs are from left to right.
This labeling approach is the as the in the function get_neg of trainer.py
'''
def __init__(self, root, num_rotations, fine_tuning_num):
self.root = root
# load all image files, sorting them to ensure that they are aligned
self.color_imgs = list(sorted(os.listdir(os.path.join(root, "color-heightmaps"))))
self.depth_imgs = list(sorted(os.listdir(os.path.join(root, "depth-heightmaps"))))
if fine_tuning_num is not None:
self.color_imgs = self.color_imgs[:int(fine_tuning_num)]
self.depth_imgs = self.depth_imgs[:int(fine_tuning_num)]
self.num_rotations = num_rotations
self.push_large_kernel = np.ones((61, 61)) # hyperparamter
self.push_small_kernel = np.ones((15, 15)) # hyperparamter
self.grasp_kernel = np.ones((9, 9)) # hyperparamter
def __getitem__(self, idx):
# load images
color_path = os.path.join(self.root, "color-heightmaps", self.color_imgs[idx])
depth_path = os.path.join(self.root, "depth-heightmaps", self.depth_imgs[idx])
# color image input
color_img = cv2.imread(color_path)
color_img = cv2.cvtColor(color_img, cv2.COLOR_BGR2RGB)
color_img_pil = Image.fromarray(color_img)
# depth image input
depth_img = cv2.imread(depth_path, cv2.IMREAD_UNCHANGED)
depth_img = depth_img.astype(np.float32) / 100000 # translate to meters
depth_img_pil = Image.fromarray(depth_img)
# binary push image target, we need boundary and some extra
push_depth_img = np.copy(depth_img)
push_depth_img[push_depth_img <= DEPTH_MIN] = 0
push_depth_img[push_depth_img > DEPTH_MIN] = 1
push_depth_large = cv2.filter2D(push_depth_img, -1, self.push_large_kernel)
push_depth_large[push_depth_large < 1] = 0
push_depth_large[push_depth_large > 1] = 1
push_depth_small = cv2.filter2D(push_depth_img, -1, self.push_small_kernel)
push_depth_small[push_depth_small < 1] = 0
push_depth_small[push_depth_small > 1] = 1
push_depth_final = push_depth_large - push_depth_small
push_depth_final[push_depth_final < 0] = 0
# prepare q values
push_depth_final[push_depth_final == 1] = PUSH_Q
push_depth_final[push_depth_final == 0] = 0
target_push_img_pil = Image.fromarray(push_depth_final)
# binary grasp image target, we need center part
grasp_depth_img = np.copy(depth_img)
grasp_depth_img[grasp_depth_img <= DEPTH_MIN] = -100
grasp_depth_img[grasp_depth_img > DEPTH_MIN] = 1
grasp_depth = cv2.filter2D(grasp_depth_img, -1, self.grasp_kernel)
grasp_depth[grasp_depth < 1] = 0
grasp_depth[grasp_depth > 1] = 1
# prepare q values
grasp_depth[grasp_depth == 1] = GRASP_Q
grasp_depth[grasp_depth == 0] = 0
target_grasp_img_pil = Image.fromarray(grasp_depth)
color_img_pil, depth_img_pil, target_push_img_pil, target_grasp_img_pil = self.transforms(
color_img_pil, depth_img_pil, target_push_img_pil, target_grasp_img_pil)
return color_img_pil, depth_img_pil, target_push_img_pil, target_grasp_img_pil
def __len__(self):
return len(self.color_imgs)
@torch.no_grad()
def transforms(self, color_heightmap, depth_heightmap, target_push_heightmap, target_grasp_heightmap):
# Add extra padding (to handle rotations inside network)
diag_length = float(color_heightmap.size[0]) * np.sqrt(2)
diag_length = np.ceil(diag_length / 32) * 32
padding_width = int((diag_length - color_heightmap.size[0]) / 2)
color_heightmap_pad = TF.pad(color_heightmap, padding_width, fill=0, padding_mode='constant')
depth_heightmap_pad = TF.pad(depth_heightmap, padding_width, fill=0, padding_mode='constant')
depth_heightmap_pad_push = TF.pad(depth_heightmap, padding_width, fill=-1, padding_mode='constant')
target_push_heightmap_pad = TF.pad(target_push_heightmap, padding_width, fill=0, padding_mode='constant')
target_grasp_heightmap_pad = TF.pad(target_grasp_heightmap, padding_width, fill=0, padding_mode='constant')
# Random rotate
rotate_idx = random.randint(0, self.num_rotations - 1)
rotate_theta = rotate_idx * (360 / self.num_rotations)
color_heightmap_pad = TF.rotate(color_heightmap_pad, rotate_theta)
depth_heightmap_pad = TF.rotate(depth_heightmap_pad, rotate_theta)
depth_heightmap_pad_push = TF.rotate(depth_heightmap_pad_push, rotate_theta)
target_push_heightmap_pad = TF.rotate(target_push_heightmap_pad, rotate_theta)
target_grasp_heightmap_pad = TF.rotate(target_grasp_heightmap_pad, rotate_theta)
color_heightmap_pad = np.array(color_heightmap_pad)
depth_heightmap_pad = np.array(depth_heightmap_pad)
depth_heightmap_pad_push = np.array(depth_heightmap_pad_push)
target_push_heightmap_pad = np.array(target_push_heightmap_pad)
target_grasp_heightmap_pad = np.array(target_grasp_heightmap_pad)
# Post process for pushing, only pixel has something on the right (based
# on heightmap) will be 1, otherwise it will be a empty push, also if the
# pushed place is empty
x_y_idx = np.argwhere(target_push_heightmap_pad > 0)
# assume the gripper is 2 cm wide
for idx in x_y_idx:
x, y = tuple(idx)
area = depth_heightmap_pad[max(0, x - 4):min(depth_heightmap_pad.shape[0], x + 5),
min(depth_heightmap_pad.shape[1], y + 5):min(depth_heightmap_pad.shape[1], y + 30)] # 5x40 in image size, so 5 cm of push
if np.sum(area > DEPTH_MIN) == 0:
target_push_heightmap_pad[x, y] = 0
else:
area = depth_heightmap_pad_push[max(0, x - 4):min(depth_heightmap_pad_push.shape[0], x + 5),
min(depth_heightmap_pad_push.shape[1] - 1, y + 75)]
if np.sum(area < 0) > 0: # out of the workspace
target_push_heightmap_pad[x, y] = 0
# Post process for grasping, only pixel has clearance on the left/right (based on heightmap) will be 1
x_y_idx = np.argwhere(target_grasp_heightmap_pad > 0)
for idx in x_y_idx:
x, y = tuple(idx)
if is_real:
left_area = depth_heightmap_pad[max(0, x - 4):min(depth_heightmap_pad.shape[0], x + 5),
max(0, y - 27):max(0, y - 22)] # 2x3 pixels in each side
right_area = depth_heightmap_pad[max(0, x - 4):min(depth_heightmap_pad.shape[0], x + 5),
min(depth_heightmap_pad.shape[1] - 1, y + 23):min(depth_heightmap_pad.shape[1], y + 28)] # 2x3 pixels in each side
if ((np.sum(left_area > DEPTH_MIN) > 0 and np.sum((left_area - depth_heightmap_pad[x, y]) > -0.05) > 0) or
(np.sum(right_area > DEPTH_MIN) > 0 and np.sum((right_area - depth_heightmap_pad[x, y]) > -0.05) > 0)):
target_grasp_heightmap_pad[x, y] = 0
else:
left_area = depth_heightmap_pad[max(0, x - 4):min(depth_heightmap_pad.shape[0], x + 5),
max(0, y - 28):max(0, y - 18)] # 2x3 pixels in each side
right_area = depth_heightmap_pad[max(0, x - 4):min(depth_heightmap_pad.shape[0], x + 5),
min(depth_heightmap_pad.shape[1] - 1, y + 19):min(depth_heightmap_pad.shape[1], y + 29)] # 2x3 pixels in each side
if ((np.sum(left_area > DEPTH_MIN) > 0 and np.sum((left_area - depth_heightmap_pad[x, y]) > -0.04) > 0) or
(np.sum(right_area > DEPTH_MIN) > 0 and np.sum((right_area - depth_heightmap_pad[x, y]) > -0.04) > 0)):
target_grasp_heightmap_pad[x, y] = 0
# To tensor
color_heightmap_pad = TF.to_tensor(color_heightmap_pad)
depth_heightmap_pad = TF.to_tensor(depth_heightmap_pad)
target_push_heightmap_pad = TF.to_tensor(target_push_heightmap_pad)
target_grasp_heightmap_pad = TF.to_tensor(target_grasp_heightmap_pad)
# Normalize
color_heightmap_pad = TF.normalize(color_heightmap_pad, color_mean, color_std, inplace=True)
depth_heightmap_pad = TF.normalize(depth_heightmap_pad, depth_mean, depth_std, inplace=True)
return color_heightmap_pad, depth_heightmap_pad, target_push_heightmap_pad, target_grasp_heightmap_pad
class PushPredictionMultiDatasetEvaluation(torch.utils.data.Dataset):
'''
Push Prediction Dataset for Evaluation
Input: Image, Action (x, y), Pose (x, y)
Output: Diff_x, Diff_y, Diff_angle
'''
def __init__(self, depth_imgs, actions, poses, binary_objs, is_padding=False):
self.is_padding = is_padding
self.total_obj = total_obj
self.distance = distance
self.workspace_limits = workspace_limits
self.heightmap_resolution = heightmap_resolution
self.prev_depth_imgs = []
self.prev_poses = []
self.actions = []
self.binary_objs = []
print('Total files', len(depth_imgs), len(actions), len(poses))
for i in range(len(actions)):
self.prev_depth_imgs.append(depth_imgs[i][48:272, 48:272])
self.prev_poses.append(poses[i])
self.actions.append(actions[i])
self.binary_objs.append(binary_objs[i])
print('Used files', len(self.prev_depth_imgs), len(self.prev_poses), len(self.actions), len(self.binary_objs))
assert len(set([len(self.prev_depth_imgs), len(self.prev_poses),
len(self.actions), len(self.binary_objs)])) == 1
def __getitem__(self, idx):
# depth image input
prev_depth_img = self.prev_depth_imgs[idx]
# number of objects
num_obj = len(self.prev_poses[idx])
# poses
prev_poses = torch.tensor(self.prev_poses[idx])
# action
action_start = torch.tensor(self.actions[idx])
action_end = torch.tensor([action_start[0] + self.distance / 0.2, action_start[1]])
# prev binary depth binary
# obj
prev_depth_binary_img_obj = np.copy(prev_depth_img)
prev_depth_binary_img_obj[prev_depth_binary_img_obj <= DEPTH_MIN] = 0
prev_depth_binary_img_obj[prev_depth_binary_img_obj > DEPTH_MIN] = 1
temp = np.zeros((680, 680))
temp[228:452, 228:452] = prev_depth_binary_img_obj
prev_depth_binary_img_obj = temp[int(action_start[1] + 228) - 112:int(action_start[1] + 228) +
112, int(action_start[0] + 228) - 40:int(action_start[0] + 228) + 184]
# action
prev_depth_binary_img_action = np.zeros_like(prev_depth_img)
prev_depth_binary_img_action[int(action_start[1]) - 11:int(action_start[1]) +
12, int(action_start[0]):int(action_end[0])] = 1
temp = np.zeros((680, 680))
temp[228:452, 228:452] = prev_depth_binary_img_action
prev_depth_binary_img_action = temp[int(action_start[1] + 228) - 112:int(action_start[1] + 228) +
112, int(action_start[0] + 228) - 40:int(action_start[0] + 228) + 184]
binary_objs = self.binary_objs[idx]
temp = np.zeros_like(binary_objs[:, :, 0:1])
if self.is_padding:
for _ in range(total_obj - num_obj):
binary_objs = np.concatenate((binary_objs, temp), axis=-1)
# centralize
action_start_ori = torch.clone(action_start).detach()
action_end_ori = torch.clone(action_end).detach()
action_start[0] -= 40
action_start[1] -= 112
for pi in range(num_obj):
prev_poses[pi] = prev_poses[pi] - action_start
prev_poses = prev_poses.flatten()
if self.is_padding:
prev_poses = torch.cat((prev_poses, torch.tensor(
[-100, -100] * (self.total_obj - num_obj), dtype=torch.float32)))
action = torch.tensor([40.0, 112.0, 40.0 + self.distance / 0.2, 112.0])
used_binary_img, binary_objs_total = self.transforms(
prev_depth_binary_img_obj, prev_depth_binary_img_action, binary_objs)
# sort input based on the distance to the action
prev_poses_sort = prev_poses.numpy()
if self.is_padding:
prev_poses_sort = prev_poses_sort.reshape(self.total_obj, -1)
else:
prev_poses_sort = prev_poses_sort.reshape(num_obj, -1)
action_start = np.array([40, 112])
sort_idx = np.argsort(np.sum(np.square(prev_poses_sort - action_start), axis=1))
prev_poses_sort = prev_poses_sort[sort_idx].flatten()
prev_poses = torch.tensor(prev_poses_sort, dtype=torch.float32)
binary_objs_total = binary_objs_total[sort_idx]
return prev_poses, action, action_start_ori, action_end_ori, used_binary_img, binary_objs_total, num_obj, sort_idx
def __len__(self):
return len(self.actions)
@torch.no_grad()
def transforms(self, prev_depth_binary_img_obj, prev_depth_binary_img_action, binary_objs):
prev_depth_binary_img_obj = TF.to_tensor(prev_depth_binary_img_obj)
prev_depth_binary_img_action = TF.to_tensor(prev_depth_binary_img_action)
used_binary_img = torch.cat((prev_depth_binary_img_obj, prev_depth_binary_img_action), dim=0)
used_binary_img = TF.normalize(used_binary_img, used_binary_mean, used_binary_std, inplace=True)
binary_objs_total = TF.to_tensor(binary_objs)
current_binary_mean = binary_mean * binary_objs_total.size(0)
current_binary_std = binary_std * binary_objs_total.size(0)
binary_objs_total = TF.normalize(binary_objs_total, current_binary_mean, current_binary_std, inplace=True)
return used_binary_img, binary_objs_total
class ClusterRandomSampler(Sampler):
"""Takes a dataset with cluster_indices property, cuts it into batch-sized chunks
Drops the extra items, not fitting into exact batches
Arguments:
data_source (Dataset): a Dataset to sample from. Should have a cluster_indices property
batch_size (int): a batch size that you would like to use later with Dataloader class
shuffle (bool): whether to shuffle the data or not
"""
def __init__(self, data_source, batch_size, shuffle=True):
self.data_source = data_source
self.batch_size = batch_size
self.shuffle = shuffle
def flatten_list(self, lst):
return [item for sublist in lst for item in sublist]
def __iter__(self):
batch_lists = []
for _, cluster_indices in self.data_source.cluster_indices.items():
if self.shuffle:
random.shuffle(cluster_indices)
batches = [cluster_indices[i:i + self.batch_size] for i in range(0, len(cluster_indices), self.batch_size)]
# filter our the shorter batches
batches = [_ for _ in batches if len(_) == self.batch_size]
batch_lists.append(batches)
# flatten lists and shuffle the batches if necessary
# this works on batch level
lst = self.flatten_list(batch_lists)
if self.shuffle:
random.shuffle(lst)
# final flatten - produce flat list of indexes
lst = self.flatten_list(lst)
return iter(lst)
def __len__(self):
return len(self.data_source)
class PushPredictionMultiDataset(torch.utils.data.Dataset):
'''
Push Prediction Dataset for training Push prediction network.
The push distance is fixed, could be 5 or 10 cm.
Track objects by color, so we assume each object has a unique color, however, this constraint does not needed in evalution.
Input: Image, Action (x, y), Pose (x, y)
Output: Diff_x, Diff_y, Diff_angle
'''
def __init__(self, root, distance, is_padding=False, cutoff=None):
self.root = root
self.total_obj = total_obj
self.is_padding = is_padding
# load all image files, sorting them to ensure that they are aligned
color_imgs = list(sorted(os.listdir(os.path.join(root, "color-heightmaps"))))
depth_imgs = list(sorted(os.listdir(os.path.join(root, "depth-heightmaps"))))
actions = list(sorted(os.listdir(os.path.join(root, "actions"))))
poses = list(sorted(os.listdir(os.path.join(root, "poses"))))
self.distance = distance
self.workspace_limits = workspace_limits
self.heightmap_resolution = heightmap_resolution
self.prev_color_imgs = []
self.prev_depth_imgs = []
self.prev_poses = []
self.actions = []
self.next_color_imgs = []
self.next_depth_imgs = []
self.next_poses = []
self.cluster_indices = {}
print('Total files', len(color_imgs), len(depth_imgs), len(actions), len(poses))
# only keep data that we will use, based on distance
if self.distance == 10:
for i in range(len(actions)):
assert len(set([actions[i][:7], color_imgs[i][:7], depth_imgs[i][:7], poses[i][:7]])
) == 1, (actions[i][:7], color_imgs[i][:7], depth_imgs[i][:7], poses[i][:7])
if cutoff is not None:
if int(actions[i][:3]) > cutoff:
break
if actions[i].endswith('00.action.txt'):
self.prev_color_imgs.append(color_imgs[i])
self.prev_depth_imgs.append(depth_imgs[i])
self.prev_poses.append(poses[i])
self.actions.append(actions[i])
# create cluster indices, so the the data with same amount of object will be put together
poses_path = os.path.join(self.root, "poses", poses[i])
with open(poses_path, 'r') as file:
filedata = file.read()
poses_str = filedata.split(' ')
num_obj = len(poses_str) // 5
if num_obj in self.cluster_indices:
self.cluster_indices[num_obj].append(len(self.prev_poses) - 1)
else:
self.cluster_indices[num_obj] = [len(self.prev_poses) - 1]
elif actions[i].endswith('10.action.txt'):
self.next_color_imgs.append(color_imgs[i])
self.next_depth_imgs.append(depth_imgs[i])
self.next_poses.append(poses[i])
if self.distance == 5:
for i in range(len(actions)):
assert len(set([actions[i][:7], color_imgs[i][:7], depth_imgs[i][:7], poses[i][:7]])
) == 1, (actions[i][:7], color_imgs[i][:7], depth_imgs[i][:7], poses[i][:7])
# only use a certain amount of data
if cutoff is not None:
if i >= 11 * int(cutoff) * 100:
break
# work in real world
if actions[i].endswith('00.action.txt'):
self.prev_color_imgs.append(color_imgs[i])
self.prev_depth_imgs.append(depth_imgs[i])
self.prev_poses.append(poses[i])
self.actions.append(actions[i])
# create cluster indices, so the the data with same amount of object will be put together
poses_path = os.path.join(self.root, "poses", poses[i])
with open(poses_path, 'r') as file:
filedata = file.read()
poses_str = filedata.split(' ')
num_obj = len(poses_str) // 5
if num_obj in self.cluster_indices:
self.cluster_indices[num_obj].append(len(self.prev_poses) - 1)
else:
self.cluster_indices[num_obj] = [len(self.prev_poses) - 1]
elif actions[i].endswith('5.action.txt'):
self.next_color_imgs.append(color_imgs[i])
self.next_depth_imgs.append(depth_imgs[i])
self.next_poses.append(poses[i])
# work in sim, data accumulation, for a 10 cm push, we can have 0-5, 1-6, 2-7, ..., 5-10
# if len(set([actions[i][:7], color_imgs[i][:7], depth_imgs[i][:7], poses[i][:7]])) != 1:
# print(actions[i][:7], color_imgs[i][:7], depth_imgs[i][:7], poses[i][:7])
# if (actions[i].endswith('00.action.txt') or actions[i].endswith('01.action.txt') or actions[i].endswith('02.action.txt') or
# actions[i].endswith('03.action.txt') or actions[i].endswith('04.action.txt')):
# self.prev_color_imgs.append(color_imgs[i])
# self.prev_depth_imgs.append(depth_imgs[i])
# self.prev_poses.append(poses[i])
# self.actions.append(actions[i])
# # create cluster indices, so the the data with same amount of object will be put together
# poses_path = os.path.join(self.root, "poses", poses[i])
# with open(poses_path, 'r') as file:
# filedata = file.read()
# poses_str = filedata.split(' ')
# num_obj = len(poses_str) // 5
# if num_obj in self.cluster_indices:
# self.cluster_indices[num_obj].append(len(self.prev_poses) - 1)
# else:
# self.cluster_indices[num_obj] = [len(self.prev_poses) - 1]
# elif (actions[i].endswith('06.action.txt') or actions[i].endswith('07.action.txt') or actions[i].endswith('08.action.txt') or
# actions[i].endswith('09.action.txt') or actions[i].endswith('10.action.txt')):
# self.next_color_imgs.append(color_imgs[i])
# self.next_depth_imgs.append(depth_imgs[i])
# self.next_poses.append(poses[i])
# else:
# self.prev_color_imgs.append(color_imgs[i])
# self.prev_depth_imgs.append(depth_imgs[i])
# self.prev_poses.append(poses[i])
# self.actions.append(actions[i])
# self.next_color_imgs.append(color_imgs[i])
# self.next_depth_imgs.append(depth_imgs[i])
# self.next_poses.append(poses[i])
# # create cluster indices, so the the data with same amount of object will be put together
# poses_path = os.path.join(self.root, "poses", poses[i])
# with open(poses_path, 'r') as file:
# filedata = file.read()
# poses_str = filedata.split(' ')
# num_obj = len(poses_str) // 5
# if num_obj in self.cluster_indices:
# self.cluster_indices[num_obj].append(len(self.prev_poses) - 1)
# else:
# self.cluster_indices[num_obj] = [len(self.prev_poses) - 1]
print('Used files', len(self.prev_color_imgs), len(self.next_color_imgs), len(self.prev_depth_imgs), len(self.next_depth_imgs),
len(self.prev_poses), len(self.next_poses), len(self.actions))
assert len(set([len(self.prev_color_imgs), len(self.next_color_imgs), len(self.prev_depth_imgs), len(self.next_depth_imgs),
len(self.prev_poses), len(self.next_poses), len(self.actions)])) == 1
def __getitem__(self, idx):
# load data path
prev_color_path = os.path.join(self.root, "color-heightmaps", self.prev_color_imgs[idx])
prev_depth_path = os.path.join(self.root, "depth-heightmaps", self.prev_depth_imgs[idx])
prev_poses_path = os.path.join(self.root, "poses", self.prev_poses[idx])
actions_path = os.path.join(self.root, "actions", self.actions[idx])
next_color_path = os.path.join(self.root, "color-heightmaps", self.next_color_imgs[idx])
next_depth_path = os.path.join(self.root, "depth-heightmaps", self.next_depth_imgs[idx])
next_poses_path = os.path.join(self.root, "poses", self.next_poses[idx])
# color image input
prev_color_img = cv2.imread(prev_color_path)
prev_color_img = cv2.cvtColor(prev_color_img, cv2.COLOR_BGR2RGB)
next_color_img = cv2.imread(next_color_path)
next_color_img = cv2.cvtColor(next_color_img, cv2.COLOR_BGR2RGB)
# depth image input
prev_depth_img = cv2.imread(prev_depth_path, cv2.IMREAD_UNCHANGED)
prev_depth_img = prev_depth_img.astype(np.float32) / 100000 # translate to meters 100000
next_depth_img = cv2.imread(next_depth_path, cv2.IMREAD_UNCHANGED)
next_depth_img = next_depth_img.astype(np.float32) / 100000 # translate to meters 100000
next_depth_img[next_depth_img < 0] = 0
# poses
with open(prev_poses_path, 'r') as file:
filedata = file.read()
poses = filedata.split(' ')
num_obj = len(poses) // 5
prev_poses = []
for pi in range(num_obj):
x = (float(poses[pi * 5]) - self.workspace_limits[0][0]) / self.heightmap_resolution
y = (float(poses[pi * 5 + 1]) - self.workspace_limits[1][0]) / self.heightmap_resolution
angle_y = degrees(float(poses[pi * 5 + 4]))
prev_poses.extend([x, y, angle_y])
prev_poses = torch.tensor(prev_poses)
with open(next_poses_path, 'r') as file:
filedata = file.read()
poses = filedata.split(' ')
assert len(poses) // 5 == num_obj
next_poses = []
for pi in range(num_obj):
x = (float(poses[pi * 5]) - self.workspace_limits[0][0]) / self.heightmap_resolution
y = (float(poses[pi * 5 + 1]) - self.workspace_limits[1][0]) / self.heightmap_resolution
angle_y = degrees(float(poses[pi * 5 + 4]))
next_poses.extend([x, y, angle_y])
next_poses = torch.tensor(next_poses)
# action
with open(actions_path, 'r') as file:
filedata = file.read()
x, y = filedata.split(' ')
x = (float(x) - self.workspace_limits[0][0]) / self.heightmap_resolution
y = (float(y) - self.workspace_limits[1][0]) / self.heightmap_resolution
action_start = torch.tensor([float(x), float(y)])
action_end = torch.tensor([float(x + self.distance / 0.2), float(y)])
# prev binary depth binary
# obj
prev_depth_binary_img_obj = np.copy(prev_depth_img)
prev_depth_binary_img_obj[prev_depth_binary_img_obj <= DEPTH_MIN] = 0
prev_depth_binary_img_obj[prev_depth_binary_img_obj > DEPTH_MIN] = 1
temp = np.zeros((680, 680))
temp[228:452, 228:452] = prev_depth_binary_img_obj
prev_depth_binary_img_obj = temp[int(action_start[1] + 228) - 112:int(action_start[1] + 228) +
112, int(action_start[0] + 228) - 40:int(action_start[0] + 228) + 184]
# action
prev_depth_binary_img_action = np.zeros_like(prev_depth_img)
prev_depth_binary_img_action[int(action_start[1]) - 11:int(action_start[1]) +
12, int(action_start[0]):int(action_end[0])] = 1
temp = np.zeros((680, 680))
temp[228:452, 228:452] = prev_depth_binary_img_action
prev_depth_binary_img_action = temp[int(action_start[1] + 228) - 112:int(action_start[1] + 228) +
112, int(action_start[0] + 228) - 40:int(action_start[0] + 228) + 184]
# TODO: assume pose in order of blue, green, brown, orange, yellow
imgcolor = np.copy(prev_color_img)
imgcolor = imgcolor.astype(np.uint8)
temp = np.zeros((480, 480, 3), dtype=np.uint8)
temp[128:(480 - 128), 128:(480 - 128), :] = imgcolor
imgcolor = cv2.cvtColor(temp, cv2.COLOR_RGB2HSV)
binary_objs = []
for ci in range(num_obj):
crop = imgcolor[int(prev_poses[ci * 3 + 1]) + 128 - 30:int(prev_poses[ci * 3 + 1]) +
128 + 30, int(prev_poses[ci * 3]) + 128 - 30:int(prev_poses[ci * 3]) + 128 + 30, :]
assert crop.shape[0] == 60 and crop.shape[1] == 60, (self.prev_color_imgs[idx], crop.shape)
mask = cv2.inRange(crop, colors_lower[ci], colors_upper[ci])
binary_objs.append(mask)
# delta poses
deltas = []
for pi in range(num_obj):
d_x = next_poses[pi * 3] - prev_poses[pi * 3]
d_y = next_poses[pi * 3 + 1] - prev_poses[pi * 3 + 1]
d_a = -(next_poses[pi * 3 + 2] - prev_poses[pi * 3 + 2])
if d_a < -180:
d_a = 360 + d_a
elif d_a > 180:
d_a = d_a - 360
assert abs(d_a) < 120, (pi, d_a, self.prev_color_imgs[idx],
self.next_color_imgs[idx], prev_poses, next_poses)
deltas.extend([d_x, d_y, d_a])
if self.is_padding:
deltas.extend([0, 0, 0] * (self.total_obj - num_obj))
deltas = torch.tensor(deltas, dtype=torch.float32)
# centralize
action_start_ori = torch.clone(action_start).detach()
action_end_ori = torch.clone(action_end).detach()
action_start[0] -= 40
action_start[1] -= 112
for pi in range(num_obj):
prev_poses[pi * 3: pi * 3 + 2] = prev_poses[pi * 3: pi * 3 + 2] - action_start
next_poses[pi * 3: pi * 3 + 2] = next_poses[pi * 3: pi * 3 + 2] - action_start
prev_poses_no_angle = []
for pi in range(num_obj):
prev_poses_no_angle.extend([prev_poses[pi * 3], prev_poses[pi * 3 + 1]])
if self.is_padding:
prev_poses_no_angle.extend([-100, -100] * (self.total_obj - num_obj))
next_poses_no_angle = []
for pi in range(num_obj):
next_poses_no_angle.extend([next_poses[pi * 3], next_poses[pi * 3 + 1]])
if self.is_padding:
next_poses_no_angle.extend([-100, -100] * (self.total_obj - num_obj))
prev_poses = torch.tensor(prev_poses_no_angle, dtype=torch.float32)
next_poses = torch.tensor(next_poses_no_angle, dtype=torch.float32)
action = torch.tensor([40.0, 112.0, 40.0 + self.distance / 0.2, 112.0])
prev_color_img, prev_depth_img, next_color_img, next_depth_img, used_binary_img, binary_objs_total = self.transforms(
prev_color_img, prev_depth_img, next_color_img, next_depth_img, prev_depth_binary_img_obj, prev_depth_binary_img_action, binary_objs)
# sort input based on the distance to the action
prev_poses_sort = prev_poses.numpy()
if self.is_padding:
prev_poses_sort = prev_poses_sort.reshape(self.total_obj, -1)
else:
prev_poses_sort = prev_poses_sort.reshape(num_obj, -1)
action_start = np.array([40, 112])
sort_idx = np.argsort(np.sum(np.square(prev_poses_sort - action_start), axis=1))
prev_poses_sort = prev_poses_sort[sort_idx].flatten()
prev_poses = torch.tensor(prev_poses_sort, dtype=torch.float32)
next_poses_sort = next_poses.numpy()
if self.is_padding:
next_poses_sort = next_poses_sort.reshape(self.total_obj, -1)
else:
next_poses_sort = next_poses_sort.reshape(num_obj, -1)
next_poses_sort = next_poses_sort[sort_idx].flatten()
next_poses = torch.tensor(next_poses_sort, dtype=torch.float32)
deltas_sort = deltas.numpy()
if self.is_padding:
deltas_sort = deltas_sort.reshape(self.total_obj, -1)
else:
deltas_sort = deltas_sort.reshape(num_obj, -1)
deltas_sort = deltas_sort[sort_idx].flatten()
deltas = torch.tensor(deltas_sort, dtype=torch.float32)
binary_objs_total = binary_objs_total[sort_idx]
return prev_color_img, prev_depth_img, next_color_img, next_depth_img, used_binary_img, prev_poses, next_poses, action, deltas, self.prev_color_imgs[
idx], self.next_color_imgs[idx], action_start_ori, action_end_ori, binary_objs_total, num_obj, sort_idx
def __len__(self):
return len(self.actions)
@torch.no_grad()
def transforms(self, prev_color_img, prev_depth_img, next_color_img, next_depth_img,
prev_depth_binary_img_obj, prev_depth_binary_img_action, binary_objs):
# To tensor
prev_color_img = TF.to_tensor(prev_color_img)
prev_depth_img = TF.to_tensor(prev_depth_img)
next_color_img = TF.to_tensor(next_color_img)
next_depth_img = TF.to_tensor(next_depth_img)
prev_depth_binary_img_obj = TF.to_tensor(prev_depth_binary_img_obj)
prev_depth_binary_img_action = TF.to_tensor(prev_depth_binary_img_action)
used_binary_img = torch.cat((prev_depth_binary_img_obj, prev_depth_binary_img_action), dim=0)
used_binary_img = TF.normalize(used_binary_img, used_binary_mean, used_binary_std, inplace=True)
binary_objs_total = TF.to_tensor(binary_objs[0])
for ci in range(1, len(binary_objs)):
temp = TF.to_tensor(binary_objs[ci])
temp = TF.normalize(temp, binary_mean, binary_std, inplace=True)
binary_objs_total = torch.cat((binary_objs_total, temp), dim=0)
if self.is_padding:
if len(binary_objs) < self.total_obj:
temp = | np.zeros_like(binary_objs[0]) | numpy.zeros_like |
import os
import numba as nb
import numpy as np
from numba_celltree import geometry_utils as gu
from numba_celltree.constants import Box, Point, Vector
def test_to_vector():
a = Point(0.0, 0.0)
b = Point(1.0, 2.0)
actual = gu.to_vector(a, b)
assert isinstance(actual, Vector)
assert actual.x == 1.0
assert actual.y == 2.0
def test_as_point():
a = np.array([0.0, 1.0])
actual = gu.as_point(a)
assert isinstance(actual, Point)
assert actual.x == 0.0
assert actual.y == 1.0
def test_to_point():
a = Point(0.0, 0.0)
b = Point(1.0, 2.0)
V = gu.to_vector(a, b)
t = 0.0
actual = gu.to_point(t, a, V)
assert np.allclose(actual, a)
t = 1.0
actual = gu.to_point(t, a, V)
assert np.allclose(actual, b)
t = 0.5
actual = gu.to_point(t, a, V)
assert np.allclose(actual, Point(0.5, 1.0))
def test_cross_product():
u = Vector(1.0, 2.0)
v = Vector(3.0, 4.0)
assert np.allclose(gu.cross_product(u, v), np.cross(u, v))
def test_dot_product():
u = Vector(1.0, 2.0)
v = Vector(3.0, 4.0)
assert np.allclose(gu.dot_product(u, v), np.dot(u, v))
def test_polygon_length():
face = np.array([0, 1, 2])
assert gu.polygon_length(face) == 3
assert gu.polygon_length(face) == 3
face = np.array([0, 1, 2, -1, -1])
assert gu.polygon_length(face) == 3
face = np.array([0, 1, 2, 3, -1])
assert gu.polygon_length(face) == 4
def test_polygon_area():
# square
p = np.array(
[
[0.0, 0.0],
[1.0, 0.0],
[1.0, 1.0],
[0.0, 1.0],
]
)
assert np.allclose(gu.polygon_area(p), 1.0)
# triangle
p = np.array(
[
[0.0, 0.0],
[1.0, 0.0],
[1.0, 1.0],
]
)
assert np.allclose(gu.polygon_area(p), 0.5)
# pentagon, counter-clockwise
p = np.array(
[
[0.0, 0.0],
[1.0, 0.0],
[1.0, 1.0],
[0.5, 2.0],
[0.0, 1.0],
]
)
assert np.allclose(gu.polygon_area(p), 1.5)
# clockwise
assert np.allclose(gu.polygon_area(p[::-1]), 1.5)
def test_point_in_polygon():
poly = np.array(
[
[0.0, 0.0],
[1.0, 0.0],
[1.0, 1.0],
]
)
assert gu.point_in_polygon(Point(0.5, 0.25), poly)
assert not gu.point_in_polygon(Point(1.5, 0.25), poly)
assert gu.point_in_polygon(Point(0.0, 0.0), poly)
assert gu.point_in_polygon(Point(0.0, 0.0), poly[::-1])
assert gu.point_in_polygon(Point(0.5, 0.5), poly)
assert gu.point_in_polygon(Point(0.5, 0.5), poly[::-1])
assert not gu.point_in_polygon(Point(1.0, 1.0), poly)
assert not gu.point_in_polygon(Point(1.0, 1.0), poly[::-1])
def test_boxes_intersect():
# Identity
a = Box(0.0, 1.0, 0.0, 1.0)
b = a
assert gu.boxes_intersect(a, b)
assert gu.boxes_intersect(b, a)
# Overlap
b = Box(0.5, 1.5, 0.0, 1.0)
assert gu.boxes_intersect(a, b)
assert gu.boxes_intersect(b, a)
# No overlap
b = Box(1.5, 2.5, 0.5, 1.0)
assert not gu.boxes_intersect(a, b)
assert not gu.boxes_intersect(b, a)
# Different identity
b = a
assert gu.boxes_intersect(a, b)
assert gu.boxes_intersect(b, a)
# Inside
a = Box(0.0, 1.0, 0.0, 1.0)
b = Box(0.25, 0.75, 0.25, 0.75)
assert gu.boxes_intersect(a, b)
assert gu.boxes_intersect(b, a)
def test_bounding_box():
face = np.array([0, 1, 2])
vertices = np.array(
[
[0.0, 1.0],
[1.0, 0.0],
[1.0, 1.0],
]
)
assert gu.bounding_box(face, vertices) == (0.0, 1.0, 0.0, 1.0)
face = np.array([0, 1, 2, -1, -1])
assert gu.bounding_box(face, vertices) == (0.0, 1.0, 0.0, 1.0)
def test_build_bboxes():
faces = np.array(
[
[0, 1, 2, -1],
[0, 1, 2, 3],
]
)
vertices = np.array(
[
[0.0, 5.0],
[5.0, 0.0],
[5.0, 5.0],
[0.0, 5.0],
]
)
expected = np.array(
[
[0.0, 5.0, 0.0, 5.0],
[0.0, 5.0, 0.0, 5.0],
]
)
actual = gu.build_bboxes(faces, vertices)
assert np.array_equal(actual, expected)
def test_copy_vertices():
"""
This has to be tested inside of numba jitted function, because the vertices
are copied to a stack allocated array. This array is not returned properly
to dynamic python. This is OK: these arrays are exclusively for internal
use to temporarily store values.
"""
if os.environ.get("NUMBA_DISABLE_JIT", "0") == "0":
@nb.njit()
def test():
face = np.array([0, 1, 2, -1, -1])
vertices = np.array(
[
[0.0, 1.0],
[1.0, 0.0],
[1.0, 1.0],
]
)
expected = vertices.copy()
actual = gu.copy_vertices(vertices, face)
result = True
for i in range(3):
result = result and actual[i, 0] == expected[i, 0]
result = result and actual[i, 1] == expected[i, 1]
return result
assert test()
else:
face = np.array([0, 1, 2, -1, -1])
vertices = np.array(
[
[0.0, 1.0],
[1.0, 0.0],
[1.0, 1.0],
]
)
expected = vertices.copy()
actual = gu.copy_vertices(vertices, face)
assert np.array_equal(actual, expected)
assert len(actual) == 3
def test_copy_vertices_into():
out = np.empty((10, 2))
face = | np.array([0, 1, 2, -1, -1]) | numpy.array |
import numpy as np
from os import path as osp
from glob import glob
from superpoint.settings import EXPER_PATH
def get_paths(exper_name):
"""
Return a list of paths to the outputs of the experiment.
"""
return glob(osp.join(EXPER_PATH, 'outputs/{}/*.npz'.format(exper_name)))
def compute_tp_fp(data, remove_zero=1e-4, distance_thresh=2, simplified=False):
"""
Compute the true and false positive rates.
"""
# Read data
gt = np.where(data['keypoint_map'])
gt = np.stack([gt[0], gt[1]], axis=-1)
n_gt = len(gt)
prob = data['prob_nms'] if 'prob_nms' in data.files else data['prob']
# Filter out predictions with near-zero probability
mask = np.where(prob > remove_zero)
prob = prob[mask]
pred = np.array(mask).T
# When several detections match the same ground truth point, only pick
# the one with the highest score (the others are false positive)
sort_idx = np.argsort(prob)[::-1]
prob = prob[sort_idx]
pred = pred[sort_idx]
diff = np.expand_dims(pred, axis=1) - np.expand_dims(gt, axis=0)
dist = np.linalg.norm(diff, axis=-1)
matches = np.less_equal(dist, distance_thresh)
tp = []
matched = np.zeros(len(gt))
for m in matches:
correct = np.any(m)
if correct:
gt_idx = np.argmax(m)
tp.append(not matched[gt_idx])
matched[gt_idx] = 1
else:
tp.append(False)
tp = | np.array(tp, bool) | numpy.array |
import numpy as np
import numexpr as ne
from math import pi
class Surface:
""""Generation of surface realizations from spectrum"""
def __init__(self, kmax, spectrum, seed=None, xaxis=None):
"""Setup random generator used for realizations
spectrum is a scalar, 1-D array or 2-D array
"""
self.rng = np.random.default_rng(seed=seed)
self.kmax = kmax
self.spectrum = np.array(spectrum, ndmin=1)
self.dx = 2 * pi / kmax
self.xaxis = xaxis
self.g = 9.81
self.km = 370 # wavenumber at GC wave phase speed minimum
if xaxis is not None:
self.yaxis = None
self.N = spectrum.size
self.kx = kmax / (np.arange(spectrum.size) + 1)
self.ky = None
self.omega = self.ldis_deepwater(self.kx)
self.h_rms = np.sum(self.spectrum)
elif self.spectrum.size == self.spectrum.shape[0]:
self.N = (self.spectrum.shape[0] - 1) * 2
self.Nx = None
self.Ny = None
self.kx = np.arange(self.N // 2 + 1) * kmax / self.N
self.ky = None
self.xaxis = np.arange(self.N) * self.dx
self.yaxis = None
self.omega = self.ldis_deepwater(self.kx)
self.h_rms = np.sqrt(np.sum(self.spectrum) * kmax / self.N)
else:
self.N = None
Nx, self.Ny = self.spectrum.shape
self.Nx = (Nx - 1) * 2
self.kx = | np.arange(self.Nx // 2 + 1) | numpy.arange |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 12/8/2018 9:00 PM
# @Author : chinshin
# @FileName: parsers.py
import os
from logging import getLogger
import numpy
import pandas
import pickle
from rdkit import Chem
from tqdm import tqdm
from chainer_chemistry.dataset.parsers.base_parser import BaseFileParser
from chainer_chemistry.dataset.preprocessors.common import MolFeatureExtractionError # NOQA
from chainer_chemistry.dataset.preprocessors.mol_preprocessor import MolPreprocessor # NOQA
from chainer_chemistry.datasets.numpy_tuple_dataset import NumpyTupleDataset
import traceback
class CSVFileParserForPair(BaseFileParser):
"""data frame parser
This FileParser parses pandas dataframe.
It should contain column which contain SMILES as input, and
label column which is the target to predict.
Args:
preprocessor (BasePreprocessor): preprocessor instance
labels (str or list or None): labels column
smiles_cols (list): smiles columns
postprocess_label (Callable): post processing function if necessary
postprocess_fn (Callable): post processing function if necessary
logger:
"""
def __init__(self, preprocessor,
labels=None,
smiles_cols=('smiles_1', 'smiles_2'),
postprocess_label=None, postprocess_fn=None,
logger=None):
super(CSVFileParserForPair, self).__init__(preprocessor)
if isinstance(labels, str):
labels = [labels, ]
self.labels = labels # type: list
if not isinstance(smiles_cols, list):
self.smiles_cols = list(smiles_cols)
else:
self.smiles_cols = smiles_cols
self.postprocess_label = postprocess_label
self.postprocess_fn = postprocess_fn
self.logger = logger or getLogger(__name__)
def parse(self, filepath, return_smiles_pair=False, return_smiles_pair_original=False, target_index=None,
return_is_successful=False):
"""parse DataFrame using `preprocessor`
Label is extracted from `labels` columns and input features are
extracted from smiles information in `smiles` column.
Args:
filepath (str): file path to be parsed.
return_smiles_pair (bool): If set to `True`, smiles list is returned in
the key 'smiles', it is a list of SMILES from which input
features are successfully made.
If set to `False`, `None` is returned in the key 'smiles'.
target_index (list or None): target index list to partially extract
dataset. If None (default), all examples are parsed.
return_is_successful (bool): If set to `True`, boolean list is
returned in the key 'is_successful'. It represents
preprocessing has succeeded or not for each SMILES.
If set to False, `None` is returned in the key 'is_success'.
Returns (dict): dictionary that contains Dataset, 1-d numpy array with
dtype=object(string) which is a vector of smiles for each example
or None.
"""
df = pandas.read_csv(filepath)
logger = self.logger
pp = self.preprocessor
smiles_pair_list = []
smiles_pair_list_original = []
is_successful_list = []
# counter = 0
if isinstance(pp, MolPreprocessor):
# No influence.
if target_index is not None:
df = df.iloc[target_index]
features = None
smiles_1_index = df.columns.get_loc(self.smiles_cols[0])
smiles_2_index = df.columns.get_loc(self.smiles_cols[1])
if self.labels is None:
labels_index = [] # dummy list
else:
labels_index = [df.columns.get_loc(c) for c in self.labels]
total_count = df.shape[0]
fail_count = 0
success_count = 0
# iteration on every row within the csv file
for row in tqdm(df.itertuples(index=False), total=df.shape[0]):
smiles_1 = row[smiles_1_index]
smiles_2 = row[smiles_2_index]
# currently it assumes list
labels = [int(row[i]) for i in labels_index]
try:
mol_1 = Chem.MolFromSmiles(smiles_1)
mol_2 = Chem.MolFromSmiles(smiles_2)
if mol_1 is None or mol_2 is None:
fail_count += 1
if return_is_successful:
is_successful_list.append(False)
continue
# Note that smiles expression is not unique.
# we obtain canonical smiles
# canonical_smiles_1, mol_1 = pp.prepare_smiles_and_mol(mol_1)
# input_features_1 = pp.get_input_features(mol_1)
# canonical_smiles_2, mol_2 = pp.prepare_smiles_and_mol(mol_2)
# input_features_2 = pp.get_input_features(mol_2)
input_features_1 = pp.get_input_features(mol_1)
input_features_2 = pp.get_input_features(mol_2)
# Extract label
if self.postprocess_label is not None:
labels = self.postprocess_label(labels)
# if return_smiles_pair:
# smiles_pair_list.append([canonical_smiles_1, canonical_smiles_2])
if return_smiles_pair:
smiles_pair_list.append([smiles_1, smiles_2])
if return_smiles_pair_original:
smiles_pair_list_original.append([smiles_1, smiles_2])
except MolFeatureExtractionError as e:
# This is expected error that extracting feature failed,
# skip this molecule.
fail_count += 1
if return_is_successful:
is_successful_list.append(False)
continue
except Exception as e:
logger.warning('parse(), type: {}, {}'
.format(type(e).__name__, e.args))
logger.info(traceback.format_exc())
fail_count += 1
if return_is_successful:
is_successful_list.append(False)
continue
# Initialize features: list of list
if features is None:
if isinstance(input_features_1, tuple):
num_features_1 = len(input_features_1)
else:
num_features_1 = 1
if isinstance(input_features_2, tuple):
num_features_2 = len(input_features_2)
else:
num_features_2 = 1
num_features = num_features_1 + num_features_2
if self.labels is not None:
num_features += 1
# list of list, a sublist corresponding to a certain feature
features = [[] for _ in range(num_features)]
# for every row in csv file
if isinstance(input_features_1, tuple):
for i in range(len(input_features_1)):
# features[i] a list containing the i-th feature
features[i].append(input_features_1[i])
else:
features[0].append(input_features_1)
offset = len(input_features_1)
if isinstance(input_features_2, tuple):
for i in range(len(input_features_2)):
features[offset + i].append(input_features_2[i])
else:
features[offset].append(input_features_2)
# last column corresponding to targeted label
if self.labels is not None:
features[len(features) - 1].append(labels)
success_count += 1
if return_is_successful:
is_successful_list.append(True)
ret = []
for feature in features:
try:
feat_array = numpy.asarray(feature)
except ValueError:
# Temporal work around.
# See,
# https://stackoverflow.com/questions/26885508/why-do-i-get-error-trying-to-cast-np-arraysome-list-valueerror-could-not-broa
feat_array = numpy.empty(len(feature), dtype=numpy.ndarray)
feat_array[:] = feature[:]
ret.append(feat_array)
result = tuple(ret)
logger.info('Preprocess finished. FAIL {}, SUCCESS {}, TOTAL {}'
.format(fail_count, success_count, total_count))
else:
raise NotImplementedError
smiles_pairs = numpy.array(smiles_pair_list) if return_smiles_pair else None
smiles_pairs_original = numpy.array(smiles_pair_list_original) if return_smiles_pair_original else None
if return_is_successful:
is_successful = numpy.array(is_successful_list)
else:
is_successful = None
if isinstance(result, tuple):
if self.postprocess_fn is not None:
result = self.postprocess_fn(*result)
dataset = NumpyTupleDataset(*result)
else:
if self.postprocess_fn is not None:
result = self.postprocess_fn(result)
dataset = NumpyTupleDataset(result)
return {"dataset": dataset,
"smiles_pair": smiles_pairs,
"smiles_pair_original": smiles_pairs_original,
"is_successful": is_successful}
def extract_total_num(self, df):
"""Extracts total number of data which can be parsed
We can use this method to determine the value fed to `target_index`
option of `parse` method. For example, if we want to extract input
feature from 10% of whole dataset, we need to know how many samples
are in a file. The returned value of this method may not to be same as
the final dataset size.
Args:
df (pandas.DataFrame): dataframe to be parsed.
Returns (int): total number of dataset can be parsed.
"""
return len(df)
class Mol2VecParserForPair(BaseFileParser):
"""data frame parser
This FileParser parses pandas dataframe.
It should contain column which contain SMILES as input, and
label column which is the target to predict.
Args:
preprocessor (BasePreprocessor): preprocessor instance
labels (str or list or None): labels column
smiles_cols (list): smiles columns
postprocess_label (Callable): post processing function if necessary
postprocess_fn (Callable): post processing function if necessary
logger:
"""
def __init__(self, preprocessor,
labels=None,
smiles_cols=('smiles_1', 'smiles_2'),
postprocess_label=None, postprocess_fn=None,
logger=None):
super(Mol2VecParserForPair, self).__init__(preprocessor)
if isinstance(labels, str):
labels = [labels, ]
self.labels = labels # type: list
if not isinstance(smiles_cols, list):
self.smiles_cols = list(smiles_cols)
else:
self.smiles_cols = smiles_cols
self.postprocess_label = postprocess_label
self.postprocess_fn = postprocess_fn
self.logger = logger or getLogger(__name__)
def parse(self, filepath, return_smiles_pair=False, return_smiles_pair_original=False, target_index=None,
return_is_successful=False):
smiles2vec_filename = "smiles2vec.pkl"
smiles2vec_path = "/home/chenx/drug_mining/representation_learning/chainer-chemistry/examples/ddi/dataset/drug_list"
smiles2vec_filepath = os.path.join(smiles2vec_path, smiles2vec_filename)
with open(smiles2vec_filepath, 'rb') as pkl_reader:
smiles2vec = pickle.load(pkl_reader)
df = pandas.read_csv(filepath)
logger = self.logger
pp = self.preprocessor
smiles_pair_list = []
smiles_pair_list_original = []
is_successful_list = []
# counter = 0
if isinstance(pp, MolPreprocessor):
# No influence.
if target_index is not None:
df = df.iloc[target_index]
features = None
smiles_1_index = df.columns.get_loc(self.smiles_cols[0])
smiles_2_index = df.columns.get_loc(self.smiles_cols[1])
if self.labels is None:
labels_index = [] # dummy list
else:
labels_index = [df.columns.get_loc(c) for c in self.labels]
total_count = df.shape[0]
fail_count = 0
success_count = 0
# iteration on every row within the csv file
for row in tqdm(df.itertuples(index=False), total=df.shape[0]):
smiles_1 = row[smiles_1_index]
smiles_2 = row[smiles_2_index]
# currently it assumes list
labels = [int(row[i]) for i in labels_index]
try:
mol_1 = Chem.MolFromSmiles(smiles_1)
mol_2 = Chem.MolFromSmiles(smiles_2)
if mol_1 is None or mol_2 is None:
fail_count += 1
if return_is_successful:
is_successful_list.append(False)
continue
# input_features_1 = pp.get_input_features(mol_1)
# input_features_2 = pp.get_input_features(mol_2)
input_features_1 = smiles2vec[smiles_1]
input_features_2 = smiles2vec[smiles_2]
# Extract label
if self.postprocess_label is not None:
labels = self.postprocess_label(labels)
# if return_smiles_pair:
# smiles_pair_list.append([canonical_smiles_1, canonical_smiles_2])
if return_smiles_pair:
smiles_pair_list.append([smiles_1, smiles_2])
if return_smiles_pair_original:
smiles_pair_list_original.append([smiles_1, smiles_2])
except MolFeatureExtractionError as e:
# This is expected error that extracting feature failed,
# skip this molecule.
fail_count += 1
if return_is_successful:
is_successful_list.append(False)
continue
except Exception as e:
logger.warning('parse(), type: {}, {}'
.format(type(e).__name__, e.args))
logger.info(traceback.format_exc())
fail_count += 1
if return_is_successful:
is_successful_list.append(False)
continue
# Initialize features: list of list
if features is None:
if isinstance(input_features_1, tuple):
num_features_1 = len(input_features_1)
else:
num_features_1 = 1
if isinstance(input_features_2, tuple):
num_features_2 = len(input_features_2)
else:
num_features_2 = 1
num_features = num_features_1 + num_features_2
if self.labels is not None:
num_features += 1
# list of list, a sublist corresponding to a certain feature
features = [[] for _ in range(num_features)]
# for every row in csv file
if isinstance(input_features_1, tuple):
for i in range(len(input_features_1)):
# features[i] a list containing the i-th feature
features[i].append(input_features_1[i])
else:
features[0].append(input_features_1)
# offset = len(input_features_1)
offset = num_features_1
if isinstance(input_features_2, tuple):
for i in range(len(input_features_2)):
features[offset + i].append(input_features_2[i])
else:
features[offset].append(input_features_2)
# last column corresponding to targeted label
if self.labels is not None:
features[len(features) - 1].append(labels)
success_count += 1
if return_is_successful:
is_successful_list.append(True)
ret = []
for feature in features:
try:
feat_array = | numpy.asarray(feature) | numpy.asarray |
import numpy as np
from numpy.testing import run_module_suite, assert_array_equal, assert_raises
from skimage import img_as_ubyte, img_as_uint, img_as_float
from skimage import data, util
from skimage.morphology import cmorph, disk
from skimage.filter import rank
def test_random_sizes():
# make sure the size is not a problem
niter = 10
elem = np.array([[1, 1, 1], [1, 1, 1], [1, 1, 1]], dtype=np.uint8)
for m, n in np.random.random_integers(1, 100, size=(10, 2)):
mask = np.ones((m, n), dtype=np.uint8)
image8 = np.ones((m, n), dtype=np.uint8)
out8 = np.empty_like(image8)
rank.mean(image=image8, selem=elem, mask=mask, out=out8,
shift_x=0, shift_y=0)
assert_array_equal(image8.shape, out8.shape)
rank.mean(image=image8, selem=elem, mask=mask, out=out8,
shift_x=+1, shift_y=+1)
assert_array_equal(image8.shape, out8.shape)
image16 = np.ones((m, n), dtype=np.uint16)
out16 = np.empty_like(image8, dtype=np.uint16)
rank.mean(image=image16, selem=elem, mask=mask, out=out16,
shift_x=0, shift_y=0)
assert_array_equal(image16.shape, out16.shape)
rank.mean(image=image16, selem=elem, mask=mask, out=out16,
shift_x=+1, shift_y=+1)
assert_array_equal(image16.shape, out16.shape)
rank.mean_percentile(image=image16, mask=mask, out=out16,
selem=elem, shift_x=0, shift_y=0, p0=.1, p1=.9)
assert_array_equal(image16.shape, out16.shape)
rank.mean_percentile(image=image16, mask=mask, out=out16,
selem=elem, shift_x=+1, shift_y=+1, p0=.1, p1=.9)
assert_array_equal(image16.shape, out16.shape)
def test_compare_with_cmorph_dilate():
# compare the result of maximum filter with dilate
image = (np.random.random((100, 100)) * 256).astype(np.uint8)
out = np.empty_like(image)
mask = np.ones(image.shape, dtype=np.uint8)
for r in range(1, 20, 1):
elem = np.ones((r, r), dtype=np.uint8)
rank.maximum(image=image, selem=elem, out=out, mask=mask)
cm = cmorph._dilate(image=image, selem=elem)
assert_array_equal(out, cm)
def test_compare_with_cmorph_erode():
# compare the result of maximum filter with erode
image = (np.random.random((100, 100)) * 256).astype(np.uint8)
out = np.empty_like(image)
mask = np.ones(image.shape, dtype=np.uint8)
for r in range(1, 20, 1):
elem = np.ones((r, r), dtype=np.uint8)
rank.minimum(image=image, selem=elem, out=out, mask=mask)
cm = cmorph._erode(image=image, selem=elem)
assert_array_equal(out, cm)
def test_bitdepth():
# test the different bit depth for rank16
elem = np.ones((3, 3), dtype=np.uint8)
out = np.empty((100, 100), dtype=np.uint16)
mask = np.ones((100, 100), dtype=np.uint8)
for i in range(5):
image = np.ones((100, 100), dtype=np.uint16) * 255 * 2 ** i
r = rank.mean_percentile(image=image, selem=elem, mask=mask,
out=out, shift_x=0, shift_y=0, p0=.1, p1=.9)
def test_population():
# check the number of valid pixels in the neighborhood
image = np.zeros((5, 5), dtype=np.uint8)
elem = np.ones((3, 3), dtype=np.uint8)
out = np.empty_like(image)
mask = np.ones(image.shape, dtype=np.uint8)
rank.pop(image=image, selem=elem, out=out, mask=mask)
r = np.array([[4, 6, 6, 6, 4],
[6, 9, 9, 9, 6],
[6, 9, 9, 9, 6],
[6, 9, 9, 9, 6],
[4, 6, 6, 6, 4]])
assert_array_equal(r, out)
def test_structuring_element8():
# check the output for a custom structuring element
r = np.array([[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 255, 0, 0, 0],
[0, 0, 255, 255, 255, 0],
[0, 0, 0, 255, 255, 0],
[0, 0, 0, 0, 0, 0]])
# 8-bit
image = np.zeros((6, 6), dtype=np.uint8)
image[2, 2] = 255
elem = np.asarray([[1, 1, 0], [1, 1, 1], [0, 0, 1]], dtype=np.uint8)
out = np.empty_like(image)
mask = np.ones(image.shape, dtype=np.uint8)
rank.maximum(image=image, selem=elem, out=out, mask=mask,
shift_x=1, shift_y=1)
assert_array_equal(r, out)
# 16-bit
image = np.zeros((6, 6), dtype=np.uint16)
image[2, 2] = 255
out = np.empty_like(image)
rank.maximum(image=image, selem=elem, out=out, mask=mask,
shift_x=1, shift_y=1)
assert_array_equal(r, out)
def test_pass_on_bitdepth():
# should pass because data bitdepth is not too high for the function
image = np.ones((100, 100), dtype=np.uint16) * 2 ** 11
elem = np.ones((3, 3), dtype=np.uint8)
out = np.empty_like(image)
mask = np.ones(image.shape, dtype=np.uint8)
def test_inplace_output():
# rank filters are not supposed to filter inplace
selem = disk(20)
image = (np.random.random((500, 500)) * 256).astype(np.uint8)
out = image
assert_raises(NotImplementedError, rank.mean, image, selem, out=out)
def test_compare_autolevels():
# compare autolevel and percentile autolevel with p0=0.0 and p1=1.0
# should returns the same arrays
image = util.img_as_ubyte(data.camera())
selem = disk(20)
loc_autolevel = rank.autolevel(image, selem=selem)
loc_perc_autolevel = rank.autolevel_percentile(image, selem=selem,
p0=.0, p1=1.)
assert_array_equal(loc_autolevel, loc_perc_autolevel)
def test_compare_autolevels_16bit():
# compare autolevel(16-bit) and percentile autolevel(16-bit) with p0=0.0
# and p1=1.0 should returns the same arrays
image = data.camera().astype(np.uint16) * 4
selem = disk(20)
loc_autolevel = rank.autolevel(image, selem=selem)
loc_perc_autolevel = rank.autolevel_percentile(image, selem=selem,
p0=.0, p1=1.)
assert_array_equal(loc_autolevel, loc_perc_autolevel)
def test_compare_ubyte_vs_float():
# Create signed int8 image that and convert it to uint8
image_uint = img_as_ubyte(data.camera()[:50, :50])
image_float = img_as_float(image_uint)
methods = ['autolevel', 'bottomhat', 'equalize', 'gradient', 'threshold',
'subtract_mean', 'enhance_contrast', 'pop', 'tophat']
for method in methods:
func = getattr(rank, method)
out_u = func(image_uint, disk(3))
out_f = func(image_float, disk(3))
assert_array_equal(out_u, out_f)
def test_compare_8bit_unsigned_vs_signed():
# filters applied on 8-bit image ore 16-bit image (having only real 8-bit
# of dynamic) should be identical
# Create signed int8 image that and convert it to uint8
image = img_as_ubyte(data.camera())
image[image > 127] = 0
image_s = image.astype(np.int8)
image_u = img_as_ubyte(image_s)
assert_array_equal(image_u, img_as_ubyte(image_s))
methods = ['autolevel', 'bottomhat', 'equalize', 'gradient', 'maximum',
'mean', 'subtract_mean', 'median', 'minimum', 'modal',
'enhance_contrast', 'pop', 'threshold', 'tophat']
for method in methods:
func = getattr(rank, method)
out_u = func(image_u, disk(3))
out_s = func(image_s, disk(3))
assert_array_equal(out_u, out_s)
def test_compare_8bit_vs_16bit():
# filters applied on 8-bit image ore 16-bit image (having only real 8-bit
# of dynamic) should be identical
image8 = util.img_as_ubyte(data.camera())
image16 = image8.astype(np.uint16)
assert_array_equal(image8, image16)
methods = ['autolevel', 'bottomhat', 'equalize', 'gradient', 'maximum',
'mean', 'subtract_mean', 'median', 'minimum', 'modal',
'enhance_contrast', 'pop', 'threshold', 'tophat']
for method in methods:
func = getattr(rank, method)
f8 = func(image8, disk(3))
f16 = func(image16, disk(3))
assert_array_equal(f8, f16)
def test_trivial_selem8():
# check that min, max and mean returns identity if structuring element
# contains only central pixel
image = np.zeros((5, 5), dtype=np.uint8)
out = np.zeros_like(image)
mask = np.ones_like(image, dtype=np.uint8)
image[2, 2] = 255
image[2, 3] = 128
image[1, 2] = 16
elem = np.array([[0, 0, 0], [0, 1, 0], [0, 0, 0]], dtype=np.uint8)
rank.mean(image=image, selem=elem, out=out, mask=mask,
shift_x=0, shift_y=0)
assert_array_equal(image, out)
rank.minimum(image=image, selem=elem, out=out, mask=mask,
shift_x=0, shift_y=0)
assert_array_equal(image, out)
rank.maximum(image=image, selem=elem, out=out, mask=mask,
shift_x=0, shift_y=0)
assert_array_equal(image, out)
def test_trivial_selem16():
# check that min, max and mean returns identity if structuring element
# contains only central pixel
image = np.zeros((5, 5), dtype=np.uint16)
out = np.zeros_like(image)
mask = np.ones_like(image, dtype=np.uint8)
image[2, 2] = 255
image[2, 3] = 128
image[1, 2] = 16
elem = np.array([[0, 0, 0], [0, 1, 0], [0, 0, 0]], dtype=np.uint8)
rank.mean(image=image, selem=elem, out=out, mask=mask,
shift_x=0, shift_y=0)
assert_array_equal(image, out)
rank.minimum(image=image, selem=elem, out=out, mask=mask,
shift_x=0, shift_y=0)
assert_array_equal(image, out)
rank.maximum(image=image, selem=elem, out=out, mask=mask,
shift_x=0, shift_y=0)
assert_array_equal(image, out)
def test_smallest_selem8():
# check that min, max and mean returns identity if structuring element
# contains only central pixel
image = np.zeros((5, 5), dtype=np.uint8)
out = np.zeros_like(image)
mask = np.ones_like(image, dtype=np.uint8)
image[2, 2] = 255
image[2, 3] = 128
image[1, 2] = 16
elem = np.array([[1]], dtype=np.uint8)
rank.mean(image=image, selem=elem, out=out, mask=mask,
shift_x=0, shift_y=0)
assert_array_equal(image, out)
rank.minimum(image=image, selem=elem, out=out, mask=mask,
shift_x=0, shift_y=0)
assert_array_equal(image, out)
rank.maximum(image=image, selem=elem, out=out, mask=mask,
shift_x=0, shift_y=0)
assert_array_equal(image, out)
def test_smallest_selem16():
# check that min, max and mean returns identity if structuring element
# contains only central pixel
image = np.zeros((5, 5), dtype=np.uint16)
out = np.zeros_like(image)
mask = np.ones_like(image, dtype=np.uint8)
image[2, 2] = 255
image[2, 3] = 128
image[1, 2] = 16
elem = np.array([[1]], dtype=np.uint8)
rank.mean(image=image, selem=elem, out=out, mask=mask,
shift_x=0, shift_y=0)
assert_array_equal(image, out)
rank.minimum(image=image, selem=elem, out=out, mask=mask,
shift_x=0, shift_y=0)
assert_array_equal(image, out)
rank.maximum(image=image, selem=elem, out=out, mask=mask,
shift_x=0, shift_y=0)
assert_array_equal(image, out)
def test_empty_selem():
# check that min, max and mean returns zeros if structuring element is
# empty
image = np.zeros((5, 5), dtype=np.uint16)
out = np.zeros_like(image)
mask = np.ones_like(image, dtype=np.uint8)
res = np.zeros_like(image)
image[2, 2] = 255
image[2, 3] = 128
image[1, 2] = 16
elem = np.array([[0, 0, 0], [0, 0, 0]], dtype=np.uint8)
rank.mean(image=image, selem=elem, out=out, mask=mask,
shift_x=0, shift_y=0)
assert_array_equal(res, out)
rank.minimum(image=image, selem=elem, out=out, mask=mask,
shift_x=0, shift_y=0)
assert_array_equal(res, out)
rank.maximum(image=image, selem=elem, out=out, mask=mask,
shift_x=0, shift_y=0)
assert_array_equal(res, out)
def test_otsu():
# test the local Otsu segmentation on a synthetic image
# (left to right ramp * sinus)
test = np.tile([128, 145, 103, 127, 165, 83, 127, 185, 63, 127, 205, 43,
127, 225, 23, 127],
(16, 1))
test = test.astype(np.uint8)
res = np.tile([1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1], (16, 1))
selem = np.ones((6, 6), dtype=np.uint8)
th = 1 * (test >= rank.otsu(test, selem))
assert_array_equal(th, res)
def test_entropy():
# verify that entropy is coherent with bitdepth of the input data
selem = np.ones((16, 16), dtype=np.uint8)
# 1 bit per pixel
data = np.tile(np.asarray([0, 1]), (100, 100)).astype(np.uint8)
assert(np.max(rank.entropy(data, selem)) == 1)
# 2 bit per pixel
data = np.tile(np.asarray([[0, 1], [2, 3]]), (10, 10)).astype(np.uint8)
assert(np.max(rank.entropy(data, selem)) == 2)
# 3 bit per pixel
data = np.tile(
np.asarray([[0, 1, 2, 3], [4, 5, 6, 7]]), (10, 10)).astype(np.uint8)
assert(np.max(rank.entropy(data, selem)) == 3)
# 4 bit per pixel
data = np.tile(
np.reshape(np.arange(16), (4, 4)), (10, 10)).astype(np.uint8)
assert(np.max(rank.entropy(data, selem)) == 4)
# 6 bit per pixel
data = np.tile(
np.reshape(np.arange(64), (8, 8)), (10, 10)).astype(np.uint8)
assert(np.max(rank.entropy(data, selem)) == 6)
# 8-bit per pixel
data = np.tile(
np.reshape(np.arange(256), (16, 16)), (10, 10)).astype(np.uint8)
assert(np.max(rank.entropy(data, selem)) == 8)
# 12 bit per pixel
selem = np.ones((64, 64), dtype=np.uint8)
data = np.tile(
np.reshape(np.arange(4096), (64, 64)), (2, 2)).astype(np.uint16)
assert(np.max(rank.entropy(data, selem)) == 12)
# make sure output is of dtype double
out = rank.entropy(data, np.ones((16, 16), dtype=np.uint8))
assert out.dtype == np.double
def test_selem_dtypes():
image = np.zeros((5, 5), dtype=np.uint8)
out = np.zeros_like(image)
mask = np.ones_like(image, dtype=np.uint8)
image[2, 2] = 255
image[2, 3] = 128
image[1, 2] = 16
for dtype in (np.uint8, np.uint16, np.int32, np.int64,
np.float32, np.float64):
elem = | np.array([[0, 0, 0], [0, 1, 0], [0, 0, 0]], dtype=dtype) | numpy.array |
import numpy as np
from sklearn.base import BaseEstimator
class LogisticRegressionBC:
"""
Binary classifier logistic regression model has been implemented here.
For complete explanation take a look at the article link that is available in the README of repo.
"""
def __init__(self, lr=0.01,random_state=42,epochs=50):
"""
:param lr: learning rate.
:param random_state: seed for random initialization of weights.
:param epochs: number of times the model sees the training data.
"""
self.lr = lr
self.random_state = random_state
self.epochs = epochs
def fit(self,X,y):
X_b = np.c_[np.ones((len(X),1)),X]
rgen = np.random.RandomState(self.random_state)
self.weights = rgen.normal(loc=0.0,size=X_b.shape[1],scale=0.0)
self.cost = []
for _ in range(self.epochs):
np.seterr(divide='ignore')
input = self.net_input(X_b)
output = self.activation(X_b)
errors = y-output
self.weights += self.lr*X_b.T.dot(errors)
c = (-y.dot(np.log(output)))-((1-y).dot(np.log(1-output)))
self.cost.append(c)
return self
def predict(self,X):
X_b = np.c_[np.ones((len(X),1)),X]
inp = self.net_input(X_b)
output = self.activation(inp)
return np.where(output>=0.5,1,0)
def net_input(self,X):
return X.dot(self.weights[:,np.newaxis])
def activation(self,X):
X_arr = 1/(1+np.exp(-np.clip(X,-250,250)))
return np.mean(X_arr,axis=1)
class LinearRegressionGD(BaseEstimator):
"""
Linear regression using Gradient Descent optimization algorithm.
The important part in this script is three lines in the 'for loop' which implement Gradient Descent.
"""
def __init__(self, lr=0.01, epochs=100, random_state=42):
"""
:param lr: learning rate, specifies the magnitude of GD's step
:param epochs: the number of times you want the model to see the training data
:param random_state: seed for initialization of weights. Used for reproduciblility.
"""
self.lr = lr
self.epochs = epochs
self.random_state = random_state
def fit(self, X, y):
np.random.seed(self.random_state)
X_b = np.c_[np.ones((X.shape[0], 1)), X]
self.theta = np.random.randn(X_b.shape[1], 1) # random weight initialization
for _ in range(self.epochs):
output = X_b.dot(self.theta) # the hypothesis or net input
gradient = (2 / X_b.shape[0]) * X_b.T.dot(output - y) # calculating the gradient
self.theta = self.theta - self.lr * gradient
self.intercept_ = self.theta[0][0]
self.coef_ = np.ravel(self.theta[1:]) # these are the params that you can look at after training/
return self
def predict(self, X):
X_b = np.c_[np.ones((X.shape[0], 1)), X]
return X_b.dot(self.theta)
class LinearRegressionSGD(BaseEstimator):
"""
Linear regression using SGD optimization algorithm.
SGD improves on the execution time of GD by introducing randomness in the training process.
Please find the full explanation for this algorithm in link provided in the README section of the repo.
You can find some useful implementation of this model in GD.ipynb
"""
def __init__(self, lr=0.01, epochs=100, random_state=42):
"""
:param lr: learning rate.
:param epochs: number of times the model can see the training data.
:param random_state: seed for random initialization of weights.
"""
self.lr = lr
self.epochs = epochs
self.random_state = random_state
def fit(self, X, y):
np.random.seed(self.random_state)
X_b = np.c_[np.ones((X.shape[0], 1)), X] # adding the bias column
self.theta = np.random.randn(X_b.shape[1], 1) # random initialization.
for _ in range(self.epochs):
for i in range(len(X_b)):
random_ind = np.random.randint(len(X_b)) # random index.
X_random = X_b[random_ind:random_ind + 1] # random training index.
y_random = y[random_ind:random_ind + 1]
output = X_random.dot(self.theta) # hypothesis or net input.
gradient = 2 * X_random.T.dot(output - y_random) # the gradient
self.theta = self.theta - self.lr * gradient # descent step.
self.intercept_ = self.theta[0][0]
self.coef_ = np.ravel(self.theta[1:])
return self
def predict(self, X):
X_b = np.c_[np.ones((X.shape[0], 1)), X]
return X_b.dot(self.theta)
class LinearRegressionMBGD(BaseEstimator):
"""
Linear regression using Mini-Batch GD optimization algorithm,
MBGD uses the best characteristics of gradient descent and stochastic gradient descent.
MBGD learns in batches which speeds up the program when the dataset is really large.
"""
def __init__(self,lr=0.01,epochs=100,random_state=42,batch_size=32):
"""
:param lr: learning rate, specifies the magnitude of GD's step
:param epochs: the number of times you want the model to see the training data
:param random_state: seed for initialization of weights. Used for reproduciblility.
:param batch_size: the size of the batch that the model will train on a particular instance.
"""
self.lr = lr
self.epochs = epochs
self.random_state = random_state
self.batch_size = batch_size
def fit(self,X,y):
np.random.seed(self.random_state)
X_b = np.c_[np.ones((X.shape[0],1)),X]
self.theta = np.random.randn(X_b.shape[1],1)
for _ in range(self.epochs):
for i in range(len(X_b)):
random_ind = np.random.randint(len(X_b)-self.batch_size)
X_random = X_b[random_ind:random_ind+self.batch_size]
y_random = y[random_ind:random_ind+self.batch_size]
output = X_random.dot(self.theta)
gradient = (2/self.batch_size)*X_random.T.dot(output-y_random)
self.theta = self.theta - self.lr*gradient
self.intercept_ = self.theta[0][0]
self.coef_ = | np.ravel(self.theta[1:]) | numpy.ravel |
import tclab
import time
import numpy as np
import sys
import first_principles_model as fp
def doublet_test(data_file='step_test.csv', show_plot=True):
'''doublet test the system and save data to given file path'''
import Adafruit_DHT # Only importable on the Pi itself
tc1 = tclab.TCLab()
tc1.LED(100)
# Bogus data row added to make concatenation work, never goes anywhere
data = [1, 1, 1, 1, 1, 1, 1, 1]
csv_file_header = 'time,control output,box humidity,box temp,outside humidity,outside temp,heater 1 temp,heater 2 temp,P,I,D,SP,Err'
start_time = time.time()
u = 0
tc1.Q1(u)
tc1.Q2(u)
current_time = 0
while current_time < 1200:
try:
# read temp, humidity and time
humid_in, temp_in = Adafruit_DHT.read_retry(
11, 4, retries=5, delay_seconds=1)
humid_out, temp_out = Adafruit_DHT.read_retry(
11, 17, retries=5, delay_seconds=1)
current_time = time.time() - start_time
if humid_in is None:
# Rejects failed readings
continue
if humid_in > 100:
# Corrupted data, so ignore it
continue
if current_time > 60:
u = 100
if current_time > 800:
u = 50
tc1.Q1(u)
tc1.Q2(u)
# print current values
print('time: {:.1f}, u: {}, h_in: {}, t_in: {}, h1: {}, h2: {}, h_out: {}, t_out: {}'
.format(current_time, u, humid_in, temp_in, tc1.T1, tc1.T2, humid_out, temp_out))
data = np.vstack([data, [current_time, u, humid_in,
temp_in, humid_out, temp_out, tc1.T1, tc1.T2]])
np.savetxt(data_file, data[1:],
delimiter=',', header=csv_file_header)
except KeyboardInterrupt:
print('Exiting...')
tc1.LED(0)
return
except ValueError as error:
# Handles cases when the heater overheats
print(error)
def run_controller(run_time, PID_parameters, show_plot=True):
'''
Run the main loop
run_time total run time in minutes
show_plot whether to show the dynamic plot of the system
'''
Kc, tau_I, tau_D = PID_parameters
import Adafruit_DHT # Only importable on the Pi itself
tc1 = tclab.TCLab()
tc1.LED(100)
# Bogus data row added to make concatenation work, never goes anywhere
data = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
csv_file_header = 'time,control output,box humidity,box temp,outside humidity,outside temp,heater 1 temp,heater 2 temp,P,I,D,SP,Err'
start_time = time.time()
u = 0
Qss = 0 # 0% heater to start
err = np.zeros(run_time*60)
sp = | np.ones(run_time*60) | numpy.ones |
# model.distributions.py
# copyright 2021 <NAME>
import numpy as np
import pymc3 as pm
from scipy import stats, special
import theano.tensor as tt
from pymc3.distributions.dist_math import bound, logpow, alltrue_elemwise
from pymc3.distributions.continuous import assert_negative_support, PositiveContinuous
from pymc3.distributions.distribution import draw_values, generate_samples
from pymc3.theanof import floatX
RANDOM_SEED = 42
rng = np.random.default_rng(seed=RANDOM_SEED)
# NOTE hack to clip values away from {0, 1} for invcdfs
# Whilst value = {0, 1} is theoretically allowed, is seems to cause a
# numeric compuational issue somewhere in tt.erfcinv which throws infs.
# This screws up the downstream, so clip slightly away from {0, 1}
CLIP_U_AWAY_FROM_ZERO_ONE_FOR_INVCDFS = 1e-15 #1e-18 too small
def boundzero_numpy(vals, *conditions):
""" Bound natural unit distribution params, return 0 for out-of-bounds
Copy from pymc.bound pymc3.distributions.dist_math.py
"""
return np.where(alltrue_elemwise(conditions), vals, 0.)
def boundzero_theano(vals, *conditions):
""" Bound natural unit distribution params, return 0 for out-of-bounds
Copy from pymc.bound pymc3.distributions.dist_math.py
"""
return tt.switch(alltrue_elemwise(conditions), vals, 0.)
def boundlog_numpy(vals, *conditions):
""" Bound log unit distribution params, return -inf for out-of-bounds
Copy from pymc.bound pymc3.distributions.dist_math.py
"""
return np.where(alltrue_elemwise(conditions), vals, -np.inf)
def logpow_numpy(x, m):
""" Copy from pymc3
Safe calc log(x**m) since m*log(x) will fail when m, x = 0.
"""
return np.where(x == 0, np.where(m == 0, 0.0, -np.inf), m * np.log(x))
class Gamma(pm.Gamma):
"""Inherit the pymc class, add cdf and invcdf """
def __init__(self):
raise NotImplementedError(
"""Consider that InvCDF is hard to calculate: even scipy uses C functions
Recommend use different dist in practice""")
class GammaNumpy():
"""Gamma PDF, CDF, InvCDF and logPDF, logCDF, logInvCDF
Manual implementations used in pymc3 custom distributions
Helpful to compare these to scipy to confirm my correct implementation
Ref: https://en.wikipedia.org/wiki/Gamma_distribution
Params: x > 0, u in [0, 1], a (shape) > 0, b (rate) > 0
"""
def __init__(self):
self.name = 'Gamma'
self.notation = {'notation': r'x \sim Gamma(\alpha, \beta)'}
self.dist_natural = {
'pdf': r'f(x \mid \alpha, \beta) = \frac{1}{\Gamma(\alpha)} \beta^{\alpha} x^{\alpha-1} e^{- \beta x}',
'cdf': r'F(x \mid \alpha, \beta) = \frac{1}{\Gamma(\alpha)} \gamma(\alpha, \beta x)',
'invcdf': r'F^{-1}(u \mid \alpha, \beta) = '}
self.dist_log = {
'logpdf': r'\log f(x \mid \alpha, \beta) = -\log \Gamma(\alpha) + \log \beta^{\alpha} + \log x^{\alpha-1} - \beta x',
'logcdf': r'\log F(x \mid \alpha, \beta) = -\log \Gamma(\alpha) + \log \gamma(\alpha, \beta x)',
'loginvcdf': r'\log F^{-1}(u \mid \alpha, \beta) = '}
self.conditions = {
'parameters': r'\alpha > 0 \, \text{(shape)}, \; \beta > 0 \, \text{(rate)}',
'support': r'x \in (0, \infty), \; u \sim \text{Uniform([0, 1])}'}
self.summary_stats = {
'mean': r'\frac{\alpha}{\beta}',
'mode': r'\frac{\alpha - 1}{\beta}, \; \text{for} \alpha \geq 1',
'variance': r'\frac{\alpha}{\beta^{2}}'
}
def pdf(self, x, a, b):
"""Gamma PDF
compare to https://github.com/scipy/scipy/blob/ab1c0907fe9255582397db04592d6066745018d3/scipy/stats/_continuous_distns.py#L2595
"""
fn = (1 / special.gamma(a)) * np.power(b, a) * np.power(x, a-1) * np.exp(-b * x)
return boundzero_numpy(fn, a > 0, b > 0, x >= 0)
def cdf(self, x, a, b):
"""Gamma CDF:
where $\gamma(a, bx)$ is lower incomplete gamma function [0, lim)
compare to https://github.com/scipy/scipy/blob/ab1c0907fe9255582397db04592d6066745018d3/scipy/stats/_continuous_distns.py#L2602
"""
# fn = (1 / special.gamma(a)) * special.gammainc(a, b * x)
fn = special.gammainc(a, b * x)
return boundzero_numpy(fn, a > 0, b > 0, x >= 0)
def invcdf(self, u, a, b):
"""Gamma Inverse CDF aka PPF:
compare to https://github.com/scipy/scipy/blob/ab1c0907fe9255582397db04592d6066745018d3/scipy/stats/_continuous_distns.py#L2608
see sc.gammainc()
"""
raise NotImplementedError('TODO gamma inverse CDF')
def logpdf(self, x, a, b):
"""Gamma log PDF
compare to https://github.com/scipy/scipy/blob/ab1c0907fe9255582397db04592d6066745018d3/scipy/stats/_continuous_distns.py#L2599
"""
fn = -special.gammaln(a) + logpow_numpy(b, a) + logpow_numpy(x, a-1) - b * x
return boundlog_numpy(fn, a > 0, b > 0, x > 0)
def logcdf(self, x, a, b):
"""Gamma log CDF:
where $\gamma(a, bx)$ is lower incomplete gamma function [0, lim)
compare to https://github.com/pymc-devs/pymc3/blob/41a25d561b3aa40c75039955bf071b9632064a66/pymc3/distributions/continuous.py#L2614
"""
return boundlog_numpy((-special.gammaln(a)) + special.gammainc(a, b * x),
a > 0, b > 0, x > 0)
def loginvcdf(self, u, a, b):
"""Gamma log Inverse CDF aka log PPF:
see sc.gammaincinv()
"""
raise NotImplementedError('TODO gamma log inverse CDF')
class Gumbel(pm.Gumbel):
"""Inherit the pymc class, add cdf, logcdf and invcdf, loginvcdf
Also clobber logp (!)
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, defaults=("mode",), **kwargs)
def logp(self, value):
"""
JS patch refactored code to align with other distributions
Calculate log-probability of Gumbel distribution at specified value.
z = (x - mu) / b
pdf = (1 / b) * exp(-z - exp(-z))
logpdf = -log(b) - z - exp(-z)
Parameters
----------
value: numeric
Value(s) for which log-probability is calculated. If the
log probabilities for multiple values are desired the values must
be provided in a numpy array or theano tensor
Returns
-------
TensorVariable
"""
mu = self.mu
beta = self.beta
z = (value - mu) / beta
logp = -tt.log(beta) - z - tt.exp(-z)
return bound(logp, beta > 0)
def logcdf(self, value):
"""
JS patch refactored code to align with other distributions
cdf = exp(-exp(-(X - mu) / b))
logcdf = -exp(-(X-mu)/b)
Compute the log of the cumulative distribution function for
Gumbel distribution at the specified value.
Parameters
----------
value: numeric
Value(s) for which log CDF is calculated. If the log CDF for
multiple values are desired the values must be provided in a
numpy array or theano tensor.
Returns
-------
TensorVariable
"""
beta = self.beta
mu = self.mu
logcdf = -tt.exp(-(value - mu)/beta)
return bound(logcdf, beta > 0)
def loginvcdf(self, value):
"""
JS new function
invcdf = mu - b * log(-log(u))
loginvcdf = log(mu) + log(1 - (b * log(-log(u))/mu))
Parameters
----------
value: numeric
Value(s) for which log-probability is calculated. If the
log probabilities for multiple values are desired the values must
be provided in a numpy array or theano tensor
Returns
-------
TensorVariable
"""
beta = self.beta
mu = self.mu
loginvcdf = tt.log(mu) + tt.log(1 - (beta * tt.log(-tt.log(value))/mu))
return bound(loginvcdf, beta > 0)
class InverseWeibull(PositiveContinuous):
r"""
Inverse Weibull log-likelihood, the reciprocal of the Weibull distribution,
also known as the Fréchet distribution, a special case of the generalized
extreme value distribution.
See scipy for reference
https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.invweibull.html
https://github.com/scipy/scipy/blob/v1.6.0/scipy/stats/_continuous_distns.py
The pdf of this distribution is
.. math::
f(x \mid \alpha, s, m) =
\frac{\alpha }{s}} \; \left({\frac{x-m}{s}}\right)^{{-1-\alpha }}\;e^{{-({\frac{x-m}{s}})^{{-\alpha }}}
.. plot::
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as st
plt.style.use('seaborn-darkgrid')
x = np.linspace(0, 3, 500)
alphas = [1., 2., 3., 3.]
betas = [1., 1., 1., .5]
for a, b in zip(alphas, betas):
pdf = st.invgamma.pdf(x, a, scale=b)
plt.plot(x, pdf, label=r'$\alpha$ = {}, $\beta$ = {}'.format(a, b))
plt.xlabel('x', fontsize=12)
plt.ylabel('f(x)', fontsize=12)
plt.legend(loc=1)
plt.show()
======== ======================================================
Support :math:`x \in (-\infty, \infty)`
Mean :math:`{\begin{cases}\ m+s\Gamma \left(1-{\frac {1}{\alpha }}\right)&{\text{for }}\alpha >1\\\ \infty &{\text{otherwise}}\end{cases}}`
Variance :math:`{\begin{cases}\ s^{2}\left(\Gamma \left(1-{\frac {2}{\alpha }}\right)-\left(\Gamma \left(1-{\frac{1}{\alpha }}\right)\right)^{2}\right)&{\text{for }}\alpha >2\\\ \infty &{\text{otherwise}}\end{cases}}`
======== ======================================================
Parameters
----------
alpha: float
Shape parameter (alpha > 0).
s: float
Scale parameter (s > 0), default = 1
## m: float
## Location parameter (mu in (-inf, inf)), default = 0
"""
def __init__(self, alpha=None, s=1., *args, **kwargs):
super().__init__(*args, defaults=("mode",), **kwargs)
self.alpha = alpha = tt.as_tensor_variable(floatX(alpha))
self.s = s = tt.as_tensor_variable(floatX(s))
self.mode = s * tt.power(alpha / (1. + alpha), 1. / alpha)
assert_negative_support(alpha, "alpha", "InverseWeibull")
assert_negative_support(s, "s", "InverseWeibull")
def _distr_parameters_for_repr(self):
return ["alpha", 's']
def random(self, point=None, size=None):
"""
Draw random values from InverseWeibull PDF distribution.
Parameters
----------
point: dict, optional
Dict of variable values on which random values are to be
conditioned (uses default point if not specified).
size: int, optional
Desired size of random sample (returns one sample if not
specified).
Returns
-------
array
"""
alpha, s = draw_values([self.alpha, self.s], point=point, size=size)
return generate_samples(stats.invweibull.rvs, c=alpha, scale=s, loc=0.,
dist_shape=self.shape, size=size)
def logp(self, value):
"""
Calculate log-probability of InverseWeibull distribution at specified value.
pdf: https://www.wolframalpha.com/input/?i=%28a%2Fs%29+*+%28x%2Fs%29**%28-1-a%29+*+exp%28-%28x%2Fs%29**-a%29
alt form according to WA: a e^(-(s/x)^a) s^a x^(-1 - a)
Parameters
----------
value: numeric
Value(s) for which log-probability is calculated. If the log probabilities for multiple
values are desired the values must be provided in a numpy array or theano tensor
Returns
-------
TensorVariable
"""
alpha = self.alpha
s = self.s
return bound(
(
tt.log(alpha) -
tt.log(s) +
logpow(s / value, 1. + alpha) -
tt.power(s / value, alpha) # this term grossly dominates if alpha >> 2
),
value > 0.,
alpha > 0.,
s > 0.
)
def cdf(self, value):
"""InverseWeibull CDF"""
alpha = self.alpha
s = self.s
fn = tt.exp(-tt.power(value / s, -alpha))
return boundzero_theano(fn, alpha > 0, s > 0, value > 0)
def logcdf(self, value):
"""InverseWeibull log CDF
ref: ? manually calced and confirmed vs scipy
"""
alpha = self.alpha
s = self.s
fn = -tt.power(value / s, -alpha)
return bound(fn, alpha > 0, s > 0, value > 0)
def invcdf(self, value):
"""InverseWeibull Inverse CDF aka PPF"""
alpha = self.alpha
s = self.s
value = tt.clip(value, CLIP_U_AWAY_FROM_ZERO_ONE_FOR_INVCDFS,
1-CLIP_U_AWAY_FROM_ZERO_ONE_FOR_INVCDFS)
fn = s * tt.power(-tt.log(value), -1. / alpha)
return boundzero_theano(fn, alpha > 0, s > 0, value >= 0, value <= 1)
def loginvcdf(self, value):
"""InverseWeibull log Inverse CDF aka log PPF
ref: ? manually calced and confirmed vs scipy
"""
alpha = self.alpha
s = self.s
fn = tt.log(s) - (1./ alpha ) * tt.log(-tt.log(value))
return bound(fn, alpha > 0, s > 0, value >= 0, value <= 1)
class InverseWeibullNumpy():
"""Inverse Weibull PDF, CDF, InvCDF and logPDF, logCDF, logInvCDF
Manual implementations potentially used if needed in pymc3 custom distributions
Helpful to compare these to scipy to confirm my correct implementation
NOTE: I'm lazy and have set m=0 throughout: this suits my usecase anyhow
Ref: https://en.wikipedia.org/wiki/Fréchet_distribution
Ref: https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.invweibull.html?highlight=inverse%20weibull
Params: alpha (shape) > 0, s (scale) > 0, m (location of minimum) = 0
Support: x > 0, u in [0, 1]
"""
def __init__(self):
self.name = 'InverseWeibull'
self.notation = {'notation': r'x \sim InverseWeibull(\alpha, s, m=0)'}
self.dist_natural = {
'pdf': r"""f(x \mid \alpha, s, m=0) = \frac{\alpha}{s} \;
\left( \frac{x}{s} \right)^{-1-\alpha} \;
\exp \left( -\left( \frac{x}{s} \right)^{-\alpha} \right)""",
'cdf': r'F(x \mid \alpha, s, m=0) = \exp \left( -\left( \frac{x}{s} \right)^{-\alpha} \right)',
'invcdf': r"""F^{-1}(u \mid \alpha, s, m=0) = s \log(u)^{-\frac{1}{\alpha}}"""}
self.dist_log = {
'logpdf': r"""\log f(x \mid \alpha, s, m=0) = \log{\alpha} - (1+\alpha)\log{x} +
\alpha \log{s} - \left( \frac{x}{s} \right)^{-\alpha}""",
'logcdf': r'\log F(x \mid \alpha, s, m=0) = - \left( \frac{x}{s} \right)^{-\alpha}',
'loginvcdf': r'\log F^{-1}(u \mid \alpha, s, m=0) = \log(s) - \frac{1}{\alpha} * \log(-\log(u))'}
self.conditions = {
'parameters': r"""\alpha > 0 \, \text{(shape)}, \;
s > 0 \, \text{(scale, default } s=1 \text{)}, \;
m \in (-\infty, \infty) \, \text{(location of minimum, default } m=0 \text{)}""",
'support': r'x \in (m, \infty), \; u \sim \text{Uniform([0, 1])}'}
self.summary_stats = {
'mean': r"""
\begin{cases}
m + s \Gamma \left( 1 - \frac{1}{\alpha} \right) & \text{for } \alpha > 1 \\
\infty & \text{otherwise} \\
\end{cases}""",
'mode': r'm + s \left( \frac{\alpha}{1+\alpha} \right)^{1/\alpha}',
'variance': r"""
\begin{cases}
s^{2} \left( \Gamma \left( 1-\frac{2}{\alpha} \right) -
\left( \Gamma \left( 1-\frac{1}{\alpha} \right) \right)^{2}
\right) & \text{for } \alpha > 2 \\
\infty & \text{otherwise}
\end{cases}"""
}
def pdf(self, x, a, s):
"""InverseWeibull PDF
ref: https://github.com/scipy/scipy/blob/ab1c0907fe9255582397db04592d6066745018d3/scipy/stats/_continuous_distns.py#L3919
"""
a = np.array(a).astype(np.float) #, casting='no')
s = np.array(s).astype(np.float) #, casting='no')
fn = (
(a/s) *
np.power(x/s, -1.-a) *
np.exp(-np.power(x/s, -a))
)
return boundzero_numpy(fn, a > 0, s > 0, x > 0)
def cdf(self, x, a, s):
"""InverseWeibull CDF
ref: https://github.com/scipy/scipy/blob/ab1c0907fe9255582397db04592d6066745018d3/scipy/stats/_continuous_distns.py#L3926
"""
a = np.array(a).astype(np.float) #, casting='no')
s = np.array(s).astype(np.float) #, casting='no')
fn = np.exp(-np.power(x/s, -a))
return boundzero_numpy(fn, a > 0, s > 0, x > 0)
def invcdf(self, u, a, s):
"""InverseWeibull Inverse CDF aka PPF:
ref: https://github.com/scipy/scipy/blob/ab1c0907fe9255582397db04592d6066745018d3/scipy/stats/_continuous_distns.py#L3930
"""
a = np.array(a).astype(np.float) #, casting='no')
s = np.array(s).astype(np.float) #, casting='no')
fn = s * np.power(-np.log(u), -1./a)
return boundzero_numpy(fn, a > 0, s > 0, u >= 0, u <= 1)
def logpdf(self, x, a, s):
"""InverseWeibull log PDF
ref: ? manually calced and confirmed vs scipy
"""
a = np.array(a).astype(np.float) #, casting='no')
s = np.array(s).astype(np.float) #, casting='no')
fn = (
np.log(a) - np.log(s) +
logpow_numpy(x/s, -1.-a) -
np.power(x/s, -a) # this term grossly dominates if a >> 2
)
return boundlog_numpy(fn, a > 0, s > 0, x >= 0)
def logcdf(self, x, a, s):
"""InverseWeibull log CDF
ref: ? manually calced and confirmed vs scipy
"""
a = np.array(a).astype(np.float) #, casting='no')
s = np.array(s).astype(np.float) #, casting='no')
fn = -np.power(x/s, -a)
return boundlog_numpy(fn, a > 0, s > 0, x >= 0)
def loginvcdf(self, u, a, s):
"""InverseWeibull log Inverse CDF aka log PPF
ref: ? manually calced and confirmed vs scipy
"""
a = np.array(a).astype(np.float) #, casting='no')
s = np.array(s).astype(np.float) #, casting='no')
fn = np.log(s) - (1./a) * np.log(-np.log(u))
return boundlog_numpy(fn, a > 0, s > 0, u >= 0, u <= 1)
class ZeroInflatedInverseWeibull(PositiveContinuous):
r"""
ZeroInflatedInvserseWeibull log-likelihood
WIP! Mixture model to allow for observations dominated by zeros such as sev
also see
+ McElreath 2014, http://xcelab.net/rmpubs/Mcelreath%20Koster%202014.pdf,
https://github.com/rmcelreath/mcelreath-koster-human-nature-2014
+ Jones 2013, https://royalsocietypublishing.org/doi/10.1098/rspb.2013.1210
+ https://stackoverflow.com/questions/42409761/pymc3-nuts-has-difficulty-sampling-from-a-hierarchical-zero-inflated-gamma-mode
The pmf of this distribution is
.. math::
f(x \mid \psi, \alpha, s) = \left\{
\begin{array}{l}
(1 - \psi), & \text{if } x = 0 \\
\psi \, \text{InverseWeibull}(\alpha, s), & \text{if } x > 0
\end{array}
\right.
======== ==========================
Support :math:`x \in \mathbb{N}_0`
Mean :math:`\psi \, \text{InverseWeibull}(\mu, \sigma)`
Variance :math: TODO
======== ==========================
Parameters
----------
psi: float
Expected proportion of InverseWeibull variates (0 <= psi <= 1)
alpha: float
s: float
"""
def __init__(self, psi, alpha, s, *args, **kwargs):
super().__init__(*args, defaults=("mode",), **kwargs)
self.psi = psi = tt.as_tensor_variable(floatX(psi))
self.alpha = alpha = tt.as_tensor_variable(floatX(alpha))
self.s = s = tt.as_tensor_variable(floatX(s))
self.invweibull = InverseWeibull.dist(alpha=alpha, s=s)
# TODO
#self.mean = self.psi * self.invweibull.mean
self.mode = self.psi * self.invweibull.mode
assert_negative_support(alpha, "alpha", "ZeroInflatedInverseWeibull")
assert_negative_support(s, "s", "ZeroInflatedInverseWeibull")
# def _random(self, psi, size=None):
# """Note by definition any rvs_ from invweibull that are zero will
# correctly remain zero, covering the case x = 0"""
# rvs_ = self.invweibull.random(size=size)
# return rvs_ * psi
def _random(self, psi, size=None):
"""Inputs are numpy arrays"""
rvs_ = self.invweibull.random(size=size)
pi = stats.binom(n=np.repeat([1], len(psi)), p=psi).rvs(len(psi))
return rvs_ * pi
def random(self, point=None, size=None):
"""
Draw random values from ZeroInflatedInverseWeibull PDF distribution.
Parameters
----------
point: dict, optional
Dict of variable values on which random values are to be
conditioned (uses default point if not specified).
size: int, optional
Desired size of random sample (returns one sample if not
specified).
Returns
-------
array
"""
psi, alpha, s = draw_values([self.psi, self.alpha, self.s],
point=point, size=size)
return generate_samples(self._random, psi,
dist_shape=self.shape, size=size)
def logp(self, value):
"""LogPDF"""
psi = self.psi
logp_ = tt.switch(tt.neq(value, 0), # or use tt.gt(value, 0), dunno which faster
tt.log(psi) + self.invweibull.logp(value),
tt.log1p(-psi))
return bound(logp_, value >=0, psi > 0, psi < 1)
def cdf(self, value):
"""CDF"""
psi = self.psi
cdf_ = (1. - psi) * 1 + psi * self.invweibull.cdf(value)
return boundzero_theano(cdf_, value >=0, psi > 0, psi < 1)
def invcdf(self, value):
"""InvCDF aka PPF"""
psi = self.psi
invcdf_ = self.invweibull.invcdf((value + psi - 1) / psi)
return boundzero_theano(invcdf_, value>=0, value<=1, psi > 0, psi < 1)
class ZeroInflatedInverseWeibullNumpy():
"""Zero-inflated Inverse Weibull PDF, CDF, InvCDF and logPDF, logCDF, logInvCDF
Manual implementations potentially used if needed in pymc3 custom distributions
Helpful to compare these ? seems rare
NOTE: I'm lazy and have set m=0 throughout: this suits my usecase anyhow
Ref: https://en.wikipedia.org/wiki/Fréchet_distribution
Ref: https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.invweibull.html?highlight=inverse%20weibull
Params: 0 < psi < 1 (prop invweibull), alpha (shape) > 0, s (scale) > 0, m (location of minimum) = 0
Support: x > 0, u in [0, 1]
"""
def __init__(self):
self.name = 'InverseWeibull'
self.notation = {'notation': r'x \sim InverseWeibull(\alpha, s, m=0)'}
self.dist_natural = {
'pdf': r"""f(x \mid \alpha, s, m=0) = \frac{\alpha}{s} \;
\left( \frac{x}{s} \right)^{-1-\alpha} \;
\exp \left( -\left( \frac{x}{s} \right)^{-\alpha} \right)""",
'cdf': r'F(x \mid \alpha, s, m=0) = \exp \left( -\left( \frac{x}{s} \right)^{-\alpha} \right)',
'invcdf': r"""F^{-1}(u \mid \alpha, s, m=0) = s \log(u)^{-\frac{1}{\alpha}}"""}
self.dist_log = {
'logpdf': r"""\log f(x \mid \alpha, s, m=0) = \log{\alpha} - (1+\alpha)\log{x} +
\alpha \log{s} - \left( \frac{x}{s} \right)^{-\alpha}""",
'logcdf': r'\log F(x \mid \alpha, s, m=0) = - \left( \frac{x}{s} \right)^{-\alpha}',
'loginvcdf': r'\log F^{-1}(u \mid \alpha, s, m=0) = \log(s) - \frac{1}{\alpha} * \log(-\log(u))'}
self.conditions = {
'parameters': r"""\alpha > 0 \, \text{(shape)}, \;
s > 0 \, \text{(scale, default } s=1 \text{)}, \;
m \in (-\infty, \infty) \, \text{(location of minimum, default } m=0 \text{)}""",
'support': r'x \in (m, \infty), \; u \sim \text{Uniform([0, 1])}'}
self.summary_stats = {
'mean': r"""
\begin{cases}
m + s \Gamma \left( 1 - \frac{1}{\alpha} \right) & \text{for } \alpha > 1 \\
\infty & \text{otherwise} \\
\end{cases}""",
'mode': r'm + s \left( \frac{\alpha}{1+\alpha} \right)^{1/\alpha}',
'variance': r"""
\begin{cases}
s^{2} \left( \Gamma \left( 1-\frac{2}{\alpha} \right) -
\left( \Gamma \left( 1-\frac{1}{\alpha} \right) \right)^{2}
\right) & \text{for } \alpha > 2 \\
\infty & \text{otherwise}
\end{cases}"""
}
def pdf(self, x, a, s):
"""InverseWeibull PDF
ref: https://github.com/scipy/scipy/blob/ab1c0907fe9255582397db04592d6066745018d3/scipy/stats/_continuous_distns.py#L3919
"""
a = np.array(a).astype(np.float) #, casting='no')
s = np.array(s).astype(np.float) #, casting='no')
fn = (
(a/s) *
np.power(x/s, -1.-a) *
np.exp(-np.power(x/s, -a))
)
return boundzero_numpy(fn, a > 0, s > 0, x > 0)
def cdf(self, x, a, s):
"""InverseWeibull CDF
ref: https://github.com/scipy/scipy/blob/ab1c0907fe9255582397db04592d6066745018d3/scipy/stats/_continuous_distns.py#L3926
"""
a = np.array(a).astype(np.float) #, casting='no')
s = np.array(s).astype(np.float) #, casting='no')
fn = np.exp(-np.power(x/s, -a))
return boundzero_numpy(fn, a > 0, s > 0, x > 0)
def invcdf(self, u, a, s):
"""InverseWeibull Inverse CDF aka PPF:
ref: https://github.com/scipy/scipy/blob/ab1c0907fe9255582397db04592d6066745018d3/scipy/stats/_continuous_distns.py#L3930
"""
a = np.array(a).astype(np.float) #, casting='no')
s = np.array(s).astype(np.float) #, casting='no')
fn = s * np.power(-np.log(u), -1./a)
return boundzero_numpy(fn, a > 0, s > 0, u >= 0, u <= 1)
def logpdf(self, x, a, s):
"""InverseWeibull log PDF
ref: ? manually calced and confirmed vs scipy
"""
a = np.array(a).astype(np.float) #, casting='no')
s = np.array(s).astype(np.float) #, casting='no')
fn = (
np.log(a) - np.log(s) +
logpow_numpy(x/s, -1.-a) -
np.power(x/s, -a) # this term grossly dominates if a >> 2
)
return boundlog_numpy(fn, a > 0, s > 0, x >= 0)
def logcdf(self, x, a, s):
"""InverseWeibull log CDF
ref: ? manually calced and confirmed vs scipy
"""
a = | np.array(a) | numpy.array |
from sklearn.linear_model import LassoCV
from sklearn.neighbors import KNeighborsRegressor
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF, WhiteKernel
from xgboost import XGBRegressor
from sklearn.utils import resample
import numpy as np
from FeatureSelection import get_features
from scipy.stats import pearsonr
import tensorflow as tf
from tensorflow.keras.layers.experimental import preprocessing
from tensorflow.python.keras.applications.densenet import layers
from data_helper import get_intersecting_gene_ids_and_data
from abc import ABC, abstractmethod
import argparse
import datetime
import sys
import warnings
def get_rmse(pred, true):
return (sum((pred - true) ** 2) / len(true)) ** 0.5
class KNNFeatureModel(ABC):
use_knn = None
knn_model = None
sclr_knn = None
def __init__(self):
super().__init__()
@abstractmethod
def train_inner(self, X, y):
pass
@abstractmethod
def predict_inner(self, X, use_std):
pass
def predict(self, X):
if self.use_knn:
X = self.enrich_with_knn(X)
return self.predict_inner(X)
def predict_with_std(self, X):
if self.use_knn:
X = self.enrich_with_knn(X)
return self.predict_inner(X, use_std=True)
def train(self, X, y, use_knn):
self.use_knn = use_knn
if use_knn:
X = self.add_knn_model(X, y)
return self.train_inner(X, y)
def enrich_with_knn(self, X):
knn_out = self.knn_model.predict(X)
# A = self.knn_model.model.kneighbors_graph(X)
X = np.hstack((X, np.array([knn_out]).T))
return X
def add_knn_model(self, x_train, train_y):
knn_model = train_model(x_train, train_y, 'knn', False)
self.knn_model = knn_model
x_train = self.enrich_with_knn(x_train)
return x_train
class XgBoost:
model = None
def __init__(self):
self.model = None
def train(self, X, y):
x_train, x_validation, y_train, y_validation = train_test_split(X, y, test_size=0.1, random_state=0)
max_depth_l = [5]
n_estimators_l = [500]
learning_rate_l = [0.1, 0.2, 0.05]
min_rmse = 10000
min_m = -1
min_n_est = -1
min_lr = -1
eval_set = [(x_train, y_train), (x_validation, y_validation)]
for m in max_depth_l:
for n_estimator in n_estimators_l:
for lr in learning_rate_l:
model = XGBRegressor(max_depth=m, seed=0, n_estimators=n_estimator, learning_rate=lr, n_jobs=1)
model = model.fit(x_train, y_train, eval_set=eval_set, early_stopping_rounds=40, verbose=False)
val_pred = model.predict(x_validation).flatten()
rmse = get_rmse(val_pred, y_validation)
if rmse < min_rmse:
min_m = m
min_lr = lr
min_n_est = n_estimator
min_rmse = rmse
model = XGBRegressor(max_depth=min_m, seed=0, n_estimators=min_n_est, learning_rate=min_lr, n_jobs=1)
model = model.fit(x_train, y_train, eval_set=eval_set, early_stopping_rounds=40, verbose=False)
self.model = model
def predict(self, X):
return self.model.predict(X).flatten()
class LinearModel:
model = None
sclr = None
def __init__(self):
self.model = None
self.sclr = None
def train(self, X, y):
self.sclr = StandardScaler()
self.sclr = self.sclr.fit(X)
X = self.sclr.transform(X)
model = LassoCV(cv=3, random_state=0)
self.model = model.fit(X, y)
def predict(self, X):
X = self.sclr.transform(X)
return self.model.predict(X).flatten()
class GaussianProcessRegressionModel(KNNFeatureModel):
model = None
sclr = None
def train_inner(self, X, y):
self.sclr = StandardScaler()
kernel = RBF() # + WhiteKernel()
self.sclr = self.sclr.fit(X)
X = self.sclr.transform(X)
model = GaussianProcessRegressor(kernel=kernel, random_state=0)
self.model = model.fit(X, y)
# def train(self, X, y, use_knn=False):
# if use_knn:
# X = self.add_knn_model(X, y)
# self.sclr = StandardScaler()
# kernel = RBF()# + WhiteKernel()
# self.sclr = self.sclr.fit(X)
# X = self.sclr.transform(X)
# model = GaussianProcessRegressor(kernel=kernel, random_state=0)
# self.model = model.fit(X, y)
def predict_inner(self, X, use_std=False):
X = self.sclr.transform(X)
return self.model.predict(X, return_std=use_std)
# def enrich_with_knn(self, X):
# knn_out = self.knn_model.predict(X)
# X = np.hstack((X, np.array([knn_out]).T))
# return X
class DeepLearning:
model = None
def __init__(self):
self.model = None
def build_and_compile_model(self, norm, l2_reg=0.0001):
regularizer = tf.keras.regularizers.L2(
l2=l2_reg
)
model = tf.keras.Sequential([
norm,
layers.Dense(50, activation='relu', kernel_regularizer=regularizer),
layers.Dropout(0.4),
layers.Dense(20, activation='relu', kernel_regularizer=regularizer),
layers.Dropout(0.2),
layers.Dense(15, activation='relu', kernel_regularizer=regularizer),
layers.Dropout(0.1),
layers.Dense(12, activation='relu', kernel_regularizer=regularizer),
layers.Dense(1, activation='linear')
])
model.compile(loss='mean_squared_error',
optimizer=tf.keras.optimizers.Adam(0.001))
return model
def train(self, X, y):
x_train, x_validation, y_train, y_validation = train_test_split(X, y, test_size=0.1, random_state=0)
normalizer = preprocessing.Normalization(input_shape=[x_train.shape[1], ])
normalizer.adapt(np.array(X))
dnn_model = self.build_and_compile_model(normalizer)
callback = tf.keras.callbacks.EarlyStopping(monitor='loss', patience=100)
validation_data = (x_validation, y_validation)
history = dnn_model.fit(
x_train, y_train,
validation_split=0.1, validation_data=validation_data,
verbose=0, epochs=1500, callbacks=[callback])
self.model = dnn_model
def predict(self, X):
return self.model.predict(X).flatten()
class KNNModel:
model = None
def train(self, X, y, k=3):
model = KNeighborsRegressor(n_neighbors=k, weights='distance')
self.model = model.fit(X, y)
def predict(self, X):
return self.model.predict(X).flatten()
class Ensemble:
members = []
def __init__(self):
self.members = []
def train(self, X, y):
n_splits = 4
num_rows = X.shape[0]
train_rows = int(num_rows * 0.9)
is_xg_boost = True
for _ in range(n_splits):
# select indexes
ix = [i for i in range(num_rows)]
train_ix = resample(ix, replace=True, n_samples=train_rows)
test_ix = [x for x in ix if x not in train_ix]
train_ix = sorted(list(set(train_ix)))
# select data
trainX = X[train_ix, :]
trainy = y[train_ix]
testX = X[test_ix, :]
testy = y[test_ix]
# evaluate model
if is_xg_boost:
cur_model = XgBoost()
cur_model.train(trainX, trainy)
is_xg_boost = False
else:
cur_model = DeepLearning()
cur_model.train(trainX, trainy)
is_xg_boost = True
# print('>%.3f' % test_acc)
# scores.append(test_acc)
self.members.append(cur_model)
def predict(self, X):
yhats = [model.predict(X).flatten() for model in self.members]
yhats = np.array(yhats)
# sum across ensemble members
summed = np.sum(yhats, axis=0)
# argmax across classes
result = summed / len(self.members)
return result
class ChooseBest:
model = None
def __init__(self):
self.model = None
def train(self, X, y):
x_train, x_validation, y_train, y_validation = train_test_split(X, y, test_size=0.1, random_state=0)
min_rmse = 10000
min_model = ""
for model_name, model_method in model_train_method_for_choose_best.items():
m = model_method(x_train, y_train)
vals_pred = m.predict(x_validation)
val_rmse = get_rmse(vals_pred, y_validation)
if val_rmse < min_rmse:
min_rmse = val_rmse
min_model = model_name
self.model = model_train_method[min_model](X, y)
def predict(self, X):
return self.model.predict(X)
def train_linear(X, y, use_knn=False):
m = LinearModel()
m.train(X, y)
return m
def train_deep_learning(X, y, use_knn=False):
m = DeepLearning()
m.train(X, y)
return m
def train_xgboost(X, y, use_knn=False):
m = XgBoost()
m.train(X, y)
return m
def train_gp(X, y, use_knn=True):
m = GaussianProcessRegressionModel()
m.train(X, y, use_knn)
return m
def train_knn(X, y, use_knn=False):
m = KNNModel()
m.train(X, y)
return m
def train_ensemble(X, y, use_knn=False):
m = Ensemble()
m.train(X, y)
return m
def train_best_using_validation(X, y, use_knn=False):
m = ChooseBest()
m.train(X, y)
return m
model_train_method = {
'linear': train_linear,
'xg_boost': train_xgboost,
'deep': train_deep_learning,
'ensemble': train_ensemble,
'GP': train_gp,
'choose_best': train_best_using_validation,
'knn': train_knn
}
model_train_method_for_choose_best = {
'linear': train_linear,
'xg_boost': train_xgboost,
'ensemble': train_ensemble,
'GP': train_gp,
}
def train_model(X, y, model_name, use_knn=False):
"""Trains a ML model to predict y based on X input.
Parameters
----------
X : pd.DataFrame
input data used for training
y : np.array with shape (1, n)
the target variable
model_name : string
The name of the type of model desired to train.
Options are 'linear', 'xg_boost', 'deep', 'ensemble', 'choose_best', 'GP', 'knn'
"""
return model_train_method[model_name](X, y, use_knn)
def cross_validation_eval(achilles_effect, expression_dat, target_gene_name, cross_validation_df, model_name,
achilles_id_name='DepMap_ID', expression_id_name='Unnamed: 0'):
"""Trains a ML model to predict y based on X input using cross validation
and prints the final cross validated pearson correlations and RMSE.
Parameters
----------
achilles_effect : pd.DataFrame
contains at least two columns, cell id column and target gene achilles scores
expression_dat : pd.DataFrame
expression data of all genes to be used for input to ML
target_gene_name: String
name of target gene column in achilles_effect dataframe
cross_validation_df : pd.DataFrame
columns represent cell ids except for the first column which represents which rows
are train and which rows are test
model_name : string
The name of the type of model desired to train.
Options are 'linear', 'xg_boost', 'deep', 'ensemble', 'choose_best', 'GP'
achilles_id_name : string
The column name of cell line id column in the achilles data set
expression_id_name : string
The column name of cell line id column in the expression data set
"""
test_start_idx = 0
for state in list(cross_validation_df.state):
if state == "test":
break
test_start_idx += 1
rmse_sum = 0
fold_count = 0
pearson_corr_pred_sum = 0
model_failed = False
for fold_col in cross_validation_df.columns[1:]:
fold_count += 1
cur_ids = list(cross_validation_df[fold_col])
train_ids = set(cur_ids[0:test_start_idx])
test_ids = set(cur_ids[test_start_idx:])
train_achilles = achilles_effect.loc[achilles_effect[achilles_id_name].isin(train_ids)]
test_achilles = achilles_effect.loc[achilles_effect[achilles_id_name].isin(test_ids)]
train_achilles = train_achilles.sort_values(by=['DepMap_ID'])
test_achilles = test_achilles.sort_values(by=['DepMap_ID'])
train_y = train_achilles[target_gene_name]
test_y = test_achilles[target_gene_name]
train_expression = expression_dat.loc[expression_dat[expression_id_name].isin(train_ids)]
test_expression = expression_dat.loc[expression_dat[expression_id_name].isin(test_ids)]
train_expression = train_expression.sort_values(by=['Unnamed: 0'])
test_expression = test_expression.sort_values(by=['Unnamed: 0'])
expression_feature_indices = get_features(train_y, train_expression, 20)
in_use_gene_names = train_expression.columns[expression_feature_indices]
x_train = train_expression[in_use_gene_names]
x_train = np.array(x_train)
train_y = np.array(train_y)
x_test = test_expression[in_use_gene_names]
x_test = | np.array(x_test) | numpy.array |
import numpy as np
import matplotlib.pyplot as plt
import statsmodels.api as sm
import gen_data
import qp_solver
import util
import parametric_si
def compute_c_d(X, a, b, p, lamda):
dim_beta = p
dim_z = p - 1
no_vars = p + 2 * dim_z
e_1 = lamda * np.hstack((np.zeros(dim_beta), np.ones(2 * dim_z)))
e_2 = np.hstack((np.dot(X.T, a).flatten(), np.zeros(2 * dim_z)))
c = e_1 - e_2
d = np.hstack((-np.dot(X.T, b).flatten(), np.zeros(2 * dim_z)))
c = c.reshape((no_vars, 1))
d = d.reshape((no_vars, 1))
return c, d
def construct_P_q_G_h_A_b(X, y, D, n, p, lamda):
dim_beta = p
dim_z = p - 1
no_vars = p + 2 * dim_z
# construct P
e_1 = np.hstack((X, np.zeros((n, 2 * dim_z))))
P = np.dot(e_1.T, e_1)
# construct q
e_1 = lamda * np.hstack((np.zeros(dim_beta), np.ones(2 * dim_z)))
e_2 = np.hstack((np.dot(X.T, y).flatten(), np.zeros(2 * dim_z)))
q = e_1 - e_2
q = q.reshape((no_vars, 1))
# construct G
G = np.zeros((no_vars, no_vars))
G[dim_beta:, dim_beta:] = np.zeros((2 * dim_z, 2 * dim_z)) - np.identity(2 * dim_z)
# construct h
h = np.zeros((no_vars, 1))
# construct A
e_1 = np.hstack((np.identity(dim_z), np.zeros((dim_z, dim_z)) - np.identity(dim_z)))
A = np.hstack((-D, e_1))
# construct b
b = np.zeros((D.shape[0], 1))
return P, q, G, h, A, b
def run():
n = 30
p = n
lamda = 5
z_threshold = 20
X = np.identity(n)
D = (np.diag([-1] * n, k=0) + np.diag([1] * (n - 1), k=1))[:-1]
true_y = np.zeros(n)
true_y[10:20] += 2
y = true_y + np.random.normal(0, 1, n)
y = y.reshape((n, 1))
true_y = true_y.reshape((n, 1))
P, q, G, h, A, b = construct_P_q_G_h_A_b(X, y, D, n, p, lamda)
dim_x = p + 2 * (p - 1)
x, prob = qp_solver.run(P, q, G, h, A, b, dim_x)
x = x.value
beta = x[0:p]
list_cp = util.find_list_cp(beta, p)
if len(list_cp) == 2:
return None
cp_selected_idx = np.random.randint(1, len(list_cp) - 1)
cp_selected = list_cp[cp_selected_idx]
pre_cp = list_cp[cp_selected_idx - 1]
next_cp = list_cp[cp_selected_idx + 1]
n_s_1 = cp_selected - pre_cp
n_s_2 = next_cp - cp_selected
etaj = np.zeros(n)
etaj[pre_cp + 1 : cp_selected + 1] = np.ones(n_s_1) / n_s_1
etaj[cp_selected + 1 : next_cp + 1] = - | np.ones(n_s_2) | numpy.ones |
from anntools import Collection
from pathlib import Path
from ner_utils import load_training_entities, load_testing_entities, postprocessing_labels1, get_char2idx, \
train_by_shape, predict_by_shape, convert_to_str_label
from base_clsf import BaseClassifier
import score
from sklearn.preprocessing import LabelEncoder
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Dense, LSTM, TimeDistributed, Bidirectional, Input, Embedding, concatenate, \
Masking
from tensorflow.keras.losses import categorical_crossentropy
from utils import weighted_loss, detect_language, nlp_es, nlp_en
# from keras_crf import CRF
# from keras_crf import CRF
import numpy as np
import fasttext
import time
import json, pickle
class NERClassifier(BaseClassifier):
"""Classifier for the name entity resolution task"""
def __init__(self):
BaseClassifier.__init__(self)
self.n_tags = 6
self.n_entities = 4
self.encoder_tags = LabelEncoder()
self.encoder_entities = LabelEncoder()
# self.ScieloSku = fasttext.load_model("./Scielo_cbow_cased.bin")
def train(self, collection: Collection):
"""
Wrapper function where of the process of training is done
"""
features, X_char, tags, entities = self.get_sentences(collection)
X, (y_tags, y_entities) = self.preprocessing(features, (tags, entities))
self.get_model()
return self.fit_model((X, X_char), (y_tags, y_entities))
def get_model(self):
"""
Construct the neural network architecture using the keras functional api.
`mode` is the mode where the lstm are joined in the bidirectional layer, (its not currently being used)
"""
# input for words
inputs = Input(shape=(None, self.n_features))
# outputs = Embedding(input_dim=35179, output_dim=20,
# emb_in = Input(shape=(None, 300))
# # input_length=self.X_shape[1], mask_zero=True)(inputs) # 20-dim embedding
# emb_mask = Masking(mask_value=0, input_shape=(None, 10))(emb_in)
# input for characters
char_in = Input(shape=(None, 10))
# inputs of the embeddings
emb_char = TimeDistributed(Embedding(input_dim=254, output_dim=10,
input_length=10, mask_zero=True))(char_in)
# character LSTM to get word encoding by characters
char_enc = TimeDistributed(LSTM(units=20, return_sequences=False, recurrent_dropout=0.5))(emb_char)
# main LSTM
x = concatenate((inputs, char_enc))
x = Bidirectional(LSTM(units=32, return_sequences=True,
recurrent_dropout=0.1))(x) # variational biLSTM
x = Bidirectional(LSTM(units=32, return_sequences=True,
recurrent_dropout=0.2, dropout=0.2))(x)
# x = MaxPooling1D()(x)
out1 = TimeDistributed(Dense(self.n_tags, activation="softmax"))(x) # a dense layer as suggested by neuralNer
out2 = TimeDistributed(Dense(self.n_entities, activation="softmax"))(
x) # a dense layer as suggested by neuralNer
# crf = CRF(self.n_labels) # CRF layer
# outputs = crf(outputs) # output
model = Model(inputs=(inputs, char_in), outputs=(out1, out2))
model.compile(optimizer="adam", metrics=self.metrics,
# loss=weighted_loss(categorical_crossentropy, self.weights))
loss=categorical_crossentropy)
model.summary()
self.model = model
def preprocessing(self, features, labels):
"""
Handles the preprocessing step. The features and labels are converted in vectors
and their shape is adjusted.
"""
tags, entities = labels
X = self.preprocess_features(features)
y_tags = self.preprocess_labels(tags, self.encoder_tags)
self.n_tags = y_tags[0].shape[-1]
y_entities = self.preprocess_labels(entities, self.encoder_entities)
self.n_entities = y_entities[0].shape[-1]
# self.get_weights(labels)
return X, (y_tags, y_entities)
def get_sentences(self, collection: Collection):
"""
Giving a collection, the features and labels of its sentences are returned
"""
features = []
tags = []
entities = []
X_char = []
self.char2idx = get_char2idx(collection)
# embedding_vec = []
for sentence in collection:
feat, chars, tag, entity = load_training_entities(sentence, self.char2idx)
features.append(feat)
tags.append(tag)
entities.append(entity)
X_char.append(np.array(chars))
# embedding_vec.append(embedding)
return features, X_char, tags, entities
def get_features(self, collection: Collection):
"""Giving a collection, the features of its sentences are returned"""
features = []
X_char = []
# embedding_vec = []
for sentence in collection:
feat, chars = load_testing_entities(sentence, self.char2idx)
features.append(feat)
X_char.append(chars)
# embedding_vec.append(embedding)
return features, X_char
def fit_model(self, X, y, plot=False):
"""
The model is fitted. The training begins
"""
# hist = self.model.fit(X, y, batch_size=32, epochs=5,
# validation_split=0.2, verbose=1)
# hist = self.model.fit(MyBatchGenerator(X, y, batch_size=30), epochs=5)
X, X_char = X
y_tags, y_entities = y
num_examples = len(X)
# self.model.fit(self.generator(X, y), steps_per_epoch=steps_per_epoch, epochs=5)
x_shapes, x_char_shapes, yt_shapes, ye_shapes = train_by_shape(X, y_tags, y_entities,
X_char)
for shape in x_shapes:
self.model.fit(
#(np.asarray(x_shapes[shape]), np.asarray(x_char_shapes[shape]), np.asarray(my_Embedding_shapes[shape])),
(np.asarray(x_shapes[shape]), np.asarray(x_char_shapes[shape])),
(np.asarray(yt_shapes[shape]), np.asarray(ye_shapes[shape])),
epochs=10)
def test_model(self, collection: Collection) -> Collection:
collection = collection.clone()
features, X_char = self.get_features(collection)
X = self.preprocess_features(features, train=False)
x_shapes, x_char_shapes, indices = predict_by_shape(X, X_char)
pred_tags = []
pred_entities = []
for x_items, x_chars in zip(x_shapes, x_char_shapes):
# pt, pe = self.model.predict((np.asarray(x_items), np.asarray(x_chars), np.asarray(z_items)))
pt, pe = self.model.predict(( | np.asarray(x_items) | numpy.asarray |
import numpy as np
from src.network_elements.network_element import NetworkElement
class LayersLinker(NetworkElement):
def __init__(self, previous_layer_dimension, next_layer_dimension) -> None:
self.previous_layer_dimension = previous_layer_dimension
self.next_layer_dimension = next_layer_dimension
self.W = self.init_random_uniform_matrix(size=(previous_layer_dimension, next_layer_dimension))
self.B = self.init_random_uniform_matrix(size=(1, next_layer_dimension))
self.previous_layer_activated_output = None
self.dLdW = None
self.dLdB = None
def init_random_uniform_matrix(self, size):
low = - np.sqrt(1 / np.sum(size))
high = np.sqrt(1 / | np.sum(size) | numpy.sum |
"""Draw predicted or ground truth boxes on input image."""
import imghdr
import colorsys
import random
import numpy as np
from PIL import Image, ImageDraw, ImageFont
from tensorflow.keras import backend as K
from functools import reduce
def preprocess_image(img_path, model_image_size):
image_type = imghdr.what(img_path)
image = Image.open(img_path)
resized_image = image.resize(tuple(reversed(model_image_size)), Image.BICUBIC)
image_data = np.array(resized_image, dtype='float32')
image_data /= 255.
image_data = np.expand_dims(image_data, 0) # Add batch dimension.
return image, image_data
def compose(*funcs):
"""Compose arbitrarily many functions, evaluated left to right.
Reference: https://mathieularose.com/function-composition-in-python/
"""
# return lambda x: reduce(lambda v, f: f(v), funcs, x)
if funcs:
return reduce(lambda f, g: lambda *a, **kw: g(f(*a, **kw)), funcs)
else:
raise ValueError('Composition of empty sequence not supported.')
def read_classes(classes_path):
with open(classes_path) as f:
class_names = f.readlines()
class_names = [c.strip() for c in class_names]
return class_names
def read_anchors(anchors_path):
with open(anchors_path) as f:
anchors = f.readline()
anchors = [float(x) for x in anchors.split(',')]
anchors = np.array(anchors).reshape(-1, 2)
return anchors
def scale_boxes(boxes, image_shape):
""" Scales the predicted boxes in order to be drawable on the image"""
height = image_shape[0]
width = image_shape[1]
image_dims = K.stack([height, width, height, width])
image_dims = K.reshape(image_dims, [1, 4])
boxes = boxes * image_dims
return boxes
def get_colors_for_classes(num_classes):
"""Return list of random colors for number of classes given."""
# Use previously generated colors if num_classes is the same.
if (hasattr(get_colors_for_classes, "colors") and
len(get_colors_for_classes.colors) == num_classes):
return get_colors_for_classes.colors
hsv_tuples = [(x / num_classes, 1., 1.) for x in range(num_classes)]
colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
colors = list(
map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),
colors))
random.seed(10101) # Fixed seed for consistent colors across runs.
random.shuffle(colors) # Shuffle colors to decorrelate adjacent classes.
random.seed(None) # Reset seed to default.
get_colors_for_classes.colors = colors # Save colors for future calls.
return colors
def draw_boxes(image, boxes, box_classes, class_names, scores=None):
"""Draw bounding boxes on image.
Draw bounding boxes with class name and optional box score on image.
Args:
image: An `array` of shape (width, height, 3) with values in [0, 1].
boxes: An `array` of shape (num_boxes, 4) containing box corners as
(y_min, x_min, y_max, x_max).
box_classes: A `list` of indicies into `class_names`.
class_names: A `list` of `string` class names.
`scores`: A `list` of scores for each box.
Returns:
A copy of `image` modified with given bounding boxes.
"""
#image = Image.fromarray(np.floor(image * 255 + 0.5).astype('uint8'))
font = ImageFont.truetype(
font='font/FiraMono-Medium.otf',
size=np.floor(3e-2 * image.size[1] + 0.5).astype('int32'))
thickness = (image.size[0] + image.size[1]) // 300
colors = get_colors_for_classes(len(class_names))
for i, c in list(enumerate(box_classes)):
box_class = class_names[c]
box = boxes[i]
if isinstance(scores, np.ndarray):
score = scores[i]
label = '{} {:.2f}'.format(box_class, score)
else:
label = '{}'.format(box_class)
draw = ImageDraw.Draw(image)
label_size = draw.textsize(label, font)
top, left, bottom, right = box
top = max(0, np.floor(top + 0.5).astype('int32'))
left = max(0, np.floor(left + 0.5).astype('int32'))
bottom = min(image.size[1], np.floor(bottom + 0.5).astype('int32'))
right = min(image.size[0], | np.floor(right + 0.5) | numpy.floor |
import numpy as np
import csv
import matplotlib.pyplot as pyplot
# Generates and saves a grouped bar graph for the data provided in rtCycles paramter
def gBarGraph(rtCycles=[],
xLabel=['W1', 'W2', 'W3', 'W4', 'W5', 'W6', 'W7'],
figName='outputs/figures/cycles.png',
dfList=["os", "ws", "is"],
adList=[[128,128], [64,64], [32,32], [16,16], [8,8]],
debug=False):
rtcShape = rtCycles.shape
barWidth = 0.25
barColor = ['#4F81BD', '#9F4C7C', '#9BBB59']
fig, axes = pyplot.subplots(1,rtcShape[0], figsize=(rtcShape[0]*3+1,3))
# sfLable = [chr(alph) for alph in range(ord('a'),ord('a')+rtcShape[0])]
x = np.arange(rtcShape[2])
for spIndex in range(rtcShape[0]) :
for i in range(rtcShape[1]) :
axes[spIndex].bar(x+i*barWidth, rtCycles[spIndex][i], color =barColor[i], width = barWidth, zorder=3)
axes[spIndex].grid(True, axis='y', zorder=0)
# axes[spIndex].set_title('Cycles for array of size 128*128')
axes[spIndex].set_xlabel(adList[spIndex])
# axes[spIndex].set_ylabel('Runtime in million cycles')
axes[spIndex].set_xticks([x1+barWidth for x1 in x])
axes[spIndex].set_xticklabels(xLabel)
axes[0].set_ylabel('Runtime in million cycles')
pyplot.legend(labels = dfList, loc = (1,0.7))
pyplot.tight_layout()
pyplot.savefig(figName, transparent = False, format='png', orientation = 'landscape', dpi=300)
if debug:
pyplot.show()
pyplot.close(fig=None)
# Summation of results of different layers in a NN
# Returns 2 variables:
# 1. rtCycles: An nd array. Each element is the sum corresponding to one ScaleSIM run
# 2. mdfList: A list of files, which represents ScaleSIM runs
# that are either not ran properly or the results are not stored properly
def sum_gen(adList=[], dfList=[], nnList=[], layerCount=[], rootFolder='./', typeOfData='cycles', scaleFac=10**6, debug=False):
adCount = len(adList)
dfCount = len(dfList)
nnCount = len(nnList)
rtCycles = np.zeros((adCount, dfCount, nnCount)) # Array of runtime cycles for each execution.
mdfList = [] # List of Missing data files
for adIndex, ad in enumerate(adList) :
for dfIndex, df in enumerate(dfList) :
for nnIndex, nn in enumerate(nnList) :
runFolder = nn + "_" + str(ad[0]) + "_" + str(ad[0]) + "_" + df + "/"
fileName = rootFolder + runFolder + "outputs/" + runFolder + nn + "_" + typeOfData + ".csv"
#File Parsing and data Processing
with open(fileName, mode = 'r') as file :
fileContent = csv.DictReader(file)
lineCount = 0
totalCycles = 0
for line in fileContent:
if (lineCount == 0) :
lineCount += 1 #Added due to dictReader. To count the line with keys
totalCycles += int(line[" Cycles"])
lineCount += 1
if (lineCount != (layerCount[nnIndex] + 1)) :
mdfList.append(fileName)
if (debug) :
print(f'Total Cycles\t: {totalCycles}')
print(f'Lines read\t: {lineCount}')
rtCycles[adIndex][dfIndex][nnIndex] = totalCycles/scaleFac
if debug:
print(f'{rtCycles}')
return rtCycles, mdfList
#Generates and saves a scatter plot for the data provided in ratioSuSo
def scatterPlot(ratioSUSO=[],
xLabel=[],
legendList=[],
figName='outputs/figures/ratioSUSO.png',
dfList=["os", "ws", "is"],
debug=False
):
rtcShape = ratioSUSO.shape
lineColor = ['#4F81BD', '#9F4C7C', '#9BBB59','#C0504D', '#FF9933', '#006666', '#404040']
x = np.arange(rtcShape[0])
# sfLable = [chr(alph) for alph in range(ord('a'),ord('a')+rtcShape[1])]
# Generate Connected scatter plot
fig, axes = pyplot.subplots(1,rtcShape[1], figsize=(rtcShape[1]*3+1,3))
for dfIndex in range(rtcShape[1]):
for i in range(rtcShape[2]):
axes[dfIndex].plot( | np.flipud(x) | numpy.flipud |
# -*- coding: utf-8 -*-
"""
Example: Vedo visualisation
Example of using vedo to visualise results from a structured 3D problem
"""
import sys
import numpy as np
from vedo import *
import vtk
import calfem.geometry as cfg
import calfem.mesh as cfm
import calfem.vis as cfv
import calfem.core as cfc
import calfem.utils as cfu
import calfem.vis_vedo_utils as cfvu
# ---- Define geometry ------------------------------------------------------
print("Defining geometry...")
g = cfg.geometry()
# Add Points
l = 5.0
h = 0.5
w = 0.3
n_el_x = 5
n_el_y = 5
n_el_z = 50
marker_fixed_left = 45
marker_fixed_right = 46
marker_top = 47
right_support = 48
g.point([0, 0, 0], 0)
g.point([0.0, 0.0, w/2.0], 1)
g.point([0, 0, w], 2)
g.point([l, 0, w], 3)
g.point([l, 0, 0], 4, marker=11) # Set some markers no reason.
g.point([0, h, 0], 5, marker=11) # (markers can be given to points as well
# as curves and surfaces)
g.point([0, h, w], 6, marker=11)
g.point([l, h, w], 7)
g.point([l, h, 0], 8)
# Add splines
g.spline([0, 1, 2], 0, marker=33, el_on_curve=n_el_x)
g.spline([2, 3], 1, marker=23, el_on_curve=n_el_z)
g.spline([3, 4], 2, marker=right_support, el_on_curve=n_el_x)
g.spline([4, 0], 3, el_on_curve=n_el_z)
g.spline([0, 5], 4, el_on_curve=n_el_y)
g.spline([2, 6], 5, el_on_curve=n_el_y)
g.spline([3, 7], 6, el_on_curve=n_el_y)
g.spline([4, 8], 7, el_on_curve=n_el_y)
g.spline([5, 6], 8, el_on_curve=n_el_x)
g.spline([6, 7], 9, el_on_curve=n_el_z)
g.spline([7, 8], 10, el_on_curve=n_el_x)
g.spline([8, 5], 11, el_on_curve=n_el_z)
# Add surfaces
g.structuredSurface([0, 1, 2, 3], 0)
g.structuredSurface([8, 9, 10, 11], 1, marker=marker_top)
g.structuredSurface([0, 4, 8, 5], 2, marker=marker_fixed_left)
g.structuredSurface([1, 5, 9, 6], 3)
g.structuredSurface([2, 6, 10, 7], 4, marker=marker_fixed_right)
g.structuredSurface([3, 4, 11, 7], 5)
g.structuredVolume([0, 1, 2, 3, 4, 5], 0, marker=90)
# ---- Create mesh ----------------------------------------------------------
# Element type 5 is hexahedron. (See user manual for more element types)
el_type = 5
dofs_per_node = 3
el_nodes = 8
# Create mesh
print("Generating mesh...")
coords, edof, dofs, bdofs, elementmarkers = cfm.mesh(
g, el_type, 1, dofs_per_node)
print("Extracting element coordinates...")
ex, ey, ez = cfc.coord_extract(edof, coords, dofs)
t = 0.2
v = 0.35
E = 2e9
ep = [3]
D = cfc.hooke(4, E, v)
# ---- Solve problem --------------------------------------------------------
print("Assembling stiffness matrix...")
n_dofs = np.size(dofs)
K = np.zeros((n_dofs, n_dofs))
for eltopo, elx, ely, elz, el_marker in zip(edof, ex, ey, ez, elementmarkers):
Ke = cfc.soli8e(elx, ely, elz, [2], D)
cfc.assem(eltopo, K, Ke)
bc = np.array([], 'i')
bcVal = np.array([], 'i')
bc, bcVal = cfu.apply_bc_3d(bdofs, bc, bcVal, marker_fixed_left, 0.0)
#bc, bcVal = cfu.apply_bc_3d(bdofs, bc, bcVal, marker_fixed_right, 0.0)
#bc, bcVal = cfu.apply_bc_3d(bdofs, bc, bcVal, right_support, 0.0, dimension=2)
#bc, bcVal = cfu.apply_bc_3d(bdofs, bc, bcVal, right_support, 0.0, dimension=3)
f = np.zeros([n_dofs, 1])
cfu.apply_force_total_3d(bdofs, f, marker_top, value=-10e5, dimension=2)
print("Solving equation system")
a, r = cfc.solveq(K, f, bc, bcVal)
print("Extracting element displacements")
ed = cfc.extract_eldisp(edof, a)
# ---- Extract element forces -----------------------------------------------
print("Calculating element forces")
# We can visualise stresses per element or per node
#
# sigv - von mises stresses per element
# sigv_hex - von mises stresses per element node
# sigv_el - all von mieses stresses att integration points
#
# cfvu.von_mises(...) calculates von mises stresses at
# integration points
#
# cfvu.sig_to_hex(...) extracts von mises stresses at nodes
# corresponding to element node
n_ip = ep[0]*ep[0]*ep[0]
sigv = np.zeros(ed.shape[0])
sigv_hex = np.zeros((ed.shape[0], el_nodes))
sigv_el = np.zeros((ed.shape[0], n_ip))
i = 0
for elx, ely, elz, eld in zip(ex, ey, ez, ed):
et, es, eci = cfc.soli8s(elx, ely, elz, ep, D, eld)
sigv[i] = np.mean(cfvu.von_mises_3d(es))
sigv_hex[i] = cfvu.sigv_to_hex(cfvu.von_mises_3d(es))
sigv_el[i] = cfvu.von_mises_3d(es)
i += 1
print("Max mises stress = ", np.max(sigv_el))
print("Min mises stress = ", np.min(sigv_el))
# ---- Visualisation --------------------------------------------------------
print("Visualising results...")
nodes, topo, node_dofs, node_displ, node_scalars = cfvu.convert_to_node_topo(
edof, ex, ey, ez, ed, sigv_hex, dofs_per_node=3)
npoint = nodes.shape[0]
nel = topo.shape[0]
nnd = topo.shape[1]
if nnd == 4:
ct = vtk.VTK_TETRA
elif nnd == 8:
ct = vtk.VTK_HEXAHEDRON
else:
print("Topology not supported.")
sys.exit(1)
celltypes = [ct] * nel
max_deflection = | np.max(node_displ) | numpy.max |
# -*- coding: utf-8 -*-
from __future__ import print_function
import random
import time
import numpy as np
from collections import defaultdict, deque
from quoridor import Quoridor
from policy_value_net import PolicyValueNet
from mcts import MCTSPlayer
from torch.utils.tensorboard import SummaryWriter
from torch.utils.data import DataLoader
from constant import *
iter_count = 0
writer = SummaryWriter()
class TrainPipeline(object):
def __init__(self, init_model=None):
self.game = Quoridor()
self.learn_rate = 2e-3
self.lr_multiplier = 1.0
self.temp = 1.0
self.n_playout = 200
self.c_puct = 5
self.buffer_size = 10000
self.data_buffer = deque(maxlen=self.buffer_size)
self.play_batch_size = 1
self.kl_targ = 0.02
self.check_freq = 10
self.game_batch_num = 1000
self.best_win_ratio = 0.0
self.pure_mcts_playout_num = 1000
self.old_probs = 0
self.new_probs = 0
self.first_trained = False
if init_model:
self.policy_value_net = PolicyValueNet(model_file=init_model)
else:
self.policy_value_net = PolicyValueNet()
self.mcts_player = MCTSPlayer(self.policy_value_net.policy_value_fn, c_puct=self.c_puct,
n_playout=self.n_playout, is_selfplay=1)
def get_equi_data(self, play_data):
extend_data = []
for i, (state, mcts_prob, winner) in enumerate(play_data):
wall_state = state[:3,:BOARD_SIZE - 1,:BOARD_SIZE - 1]
dist_state1 = np.reshape(state[(6 + (WALL_NUM + 1) * 2), :BOARD_SIZE, :BOARD_SIZE], (1, BOARD_SIZE, BOARD_SIZE))
dist_state2 = np.reshape(state[(7 + (WALL_NUM + 1) * 2), :BOARD_SIZE, :BOARD_SIZE], (1, BOARD_SIZE, BOARD_SIZE))
# horizontally flipped game
flipped_wall_state = []
for i in range(3):
wall_padded = np.fliplr(wall_state[i])
wall_padded = np.pad(wall_padded, (0,1), mode='constant', constant_values=0)
flipped_wall_state.append(wall_padded)
flipped_wall_state = np.array(flipped_wall_state)
player_position = state[3:5, :,:]
flipped_player_position = []
for i in range(2):
flipped_player_position.append(np.fliplr(player_position[i]))
flipped_player_position = np.array(flipped_player_position)
h_equi_state = np.vstack([flipped_wall_state, flipped_player_position, state[5:, :,:]])
h_equi_mcts_prob = np.copy(mcts_prob)
h_equi_mcts_prob[11] = mcts_prob[10] # SE to SW
h_equi_mcts_prob[10] = mcts_prob[11] # SW to SE
h_equi_mcts_prob[9] = mcts_prob[8] # NE to NW
h_equi_mcts_prob[8] = mcts_prob[9] # NW to NE
h_equi_mcts_prob[7] = mcts_prob[6] # EE to WW
h_equi_mcts_prob[6] = mcts_prob[7] # WW to EE
h_equi_mcts_prob[3] = mcts_prob[2] # E to W
h_equi_mcts_prob[2] = mcts_prob[3] # W to E
h_wall_actions = h_equi_mcts_prob[12:12 + (BOARD_SIZE-1) ** 2].reshape(BOARD_SIZE-1, BOARD_SIZE-1)
v_wall_actions = h_equi_mcts_prob[12 + (BOARD_SIZE-1) ** 2:].reshape(BOARD_SIZE-1, BOARD_SIZE -1)
flipped_h_wall_actions = np.fliplr(h_wall_actions)
flipped_v_wall_actions = np.fliplr(v_wall_actions)
h_equi_mcts_prob[12:] = np.hstack([flipped_h_wall_actions.flatten(), flipped_v_wall_actions.flatten()])
# Vertically flipped game
flipped_wall_state = []
for i in range(3):
wall_padded = np.flipud(wall_state[i])
wall_padded = np.pad(wall_padded, (0,1), mode='constant', constant_values=0)
flipped_wall_state.append(wall_padded)
flipped_wall_state = np.array(flipped_wall_state)
flipped_player_position = []
for i in range(2):
flipped_player_position.append(np.flipud(player_position[1-i]))
flipped_player_position = | np.array(flipped_player_position) | numpy.array |
# coding: utf-8
# ### Autoencoders and Neural Network for Place recognition with WiFi fingerprints
# Implementation of algorithm discussed in <a href="https://arxiv.org/pdf/1611.02049v1.pdf">Low-effort place recognition with WiFi fingerprints using Deep Learning </a>
# In[13]:
import pandas as pd
import numpy as np
import tensorflow as tf
from sklearn.preprocessing import scale
import copy
# In[14]:
# Read Data into Pandas DataFrame First line is labels
dataset = pd.read_csv("trainingData.csv",header=0)
# First Rows then columns
features = np.asarray(dataset.iloc[:,0:520])
# Make all the zero signals(100) to -110
features[features == 100] = -110
# Normalizing the data
features = (features - features.mean()) / features.var()
# Extracting the two columns as a concatenated row string
labels = np.asarray(dataset["BUILDINGID"].map(str) + dataset["FLOOR"].map(str))
# Now one hot encoding the categorical rep to list
labels = np.asarray(pd.get_dummies(labels))
#labels = labels[0:1]
# #### Dividing UJIndoorLoc training data set into training and validation set
# In[15]:
# Prints for each row True or False to include in the training
train_val_split = np.random.rand(len(features)) < 0.70
# Generating Training Features and Labels
train_x = features[train_val_split]
train_y = labels[train_val_split]
# Generating Validation Features and Labels
val_x = features[~train_val_split]
val_y = labels[~train_val_split]
# #### Using UJIndoorLoc validation data set as testing set
# In[16]:
# Repeat the above process for testing set
test_dataset = pd.read_csv("validationData.csv",header = 0)
test_features = np.asarray(test_dataset.iloc[:,0:520])
test_features[test_features == 100] = -110
test_features = (test_features - test_features.mean()) / test_features.var()
test_labels = np.asarray(test_dataset["BUILDINGID"].map(str) + test_dataset["FLOOR"].map(str))
test_labels = np.asarray(pd.get_dummies(test_labels))
# In[17]:
# Core Algorithm
# Functions to initialize the Weights between layers and their biases
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev = 0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.0, shape = shape)
return tf.Variable(initial)
# In[54]:
"""
n_input = 520
n_hidden_1 = 256
n_hidden_2 = 128
n_hidden_3 = 64
# Number of Output Classes
n_classes = labels.shape[1]
learning_rate = 0.00001
training_epochs = 30
batch_size = 15
# Number of training examples(rows)
total_batches = train_x.shape[0] // batch_size
# In[55]:
# X is input, random hence shape is none(#Rows),520features
# Y is output, depends on number of X fed
X = tf.placeholder(tf.float32, shape=[None,n_input])
Y = tf.placeholder(tf.float32,[None,n_classes])
# Neural Networks Variables
# Weight_variable and bias_variable are initialization fns
# First one is through truncated normal
# Second is zero constant
# --------------------- Encoder Variables --------------- #
e_weights_h1 = weight_variable([n_input, n_hidden_1])
e_biases_h1 = bias_variable([n_hidden_1])
e_weights_h2 = weight_variable([n_hidden_1, n_hidden_2])
e_biases_h2 = bias_variable([n_hidden_2])
e_weights_h3 = weight_variable([n_hidden_2, n_hidden_3])
e_biases_h3 = bias_variable([n_hidden_3])
# --------------------- Decoder Variables --------------- #
d_weights_h1 = weight_variable([n_hidden_3, n_hidden_2])
d_biases_h1 = bias_variable([n_hidden_2])
d_weights_h2 = weight_variable([n_hidden_2, n_hidden_1])
d_biases_h2 = bias_variable([n_hidden_1])
d_weights_h3 = weight_variable([n_hidden_1, n_input])
d_biases_h3 = bias_variable([n_input])
# --------------------- DNN Variables ------------------ #
dnn_weights_h1 = weight_variable([n_hidden_3, n_hidden_2])
dnn_biases_h1 = bias_variable([n_hidden_2])
dnn_weights_h2 = weight_variable([n_hidden_2, n_hidden_2])
dnn_biases_h2 = bias_variable([n_hidden_2])
dnn_weights_out = weight_variable([n_hidden_2, n_classes])
dnn_biases_out = bias_variable([n_classes])
# In[56]:
# Encoder, Decoder and DNN as in paper page-4
def encode(x):
l1 = tf.nn.tanh(tf.add(tf.matmul(x,e_weights_h1),e_biases_h1))
l2 = tf.nn.tanh(tf.add(tf.matmul(l1,e_weights_h2),e_biases_h2))
l3 = tf.nn.tanh(tf.add(tf.matmul(l2,e_weights_h3),e_biases_h3))
return l3
def decode(x):
l1 = tf.nn.tanh(tf.add(tf.matmul(x,d_weights_h1),d_biases_h1))
l2 = tf.nn.tanh(tf.add(tf.matmul(l1,d_weights_h2),d_biases_h2))
l3 = tf.nn.tanh(tf.add(tf.matmul(l2,d_weights_h3),d_biases_h3))
return l3
def dnn(x):
l1 = tf.nn.tanh(tf.add(tf.matmul(x,dnn_weights_h1),dnn_biases_h1))
l2 = tf.nn.tanh(tf.add(tf.matmul(l1,dnn_weights_h2),dnn_biases_h2))
out = tf.nn.softmax(tf.add(tf.matmul(l2,dnn_weights_out),dnn_biases_out))
return out
# In[57]:
# Nodes with operations created
encoded = encode(X)
decoded = decode(encoded)
y_ = dnn(encoded)
# In[58]:
# Two types of cost functions
# First one with decoder and encoder
# Next one with actual output
us_cost_function = tf.reduce_mean(tf.pow(X - decoded, 2))
s_cost_function = -tf.reduce_sum(Y * tf.log(y_))
us_optimizer = tf.train.AdamOptimizer(learning_rate).minimize(us_cost_function)
s_optimizer = tf.train.AdamOptimizer(learning_rate).minimize(s_cost_function)
# In[59]:
# Remember: the operations are on tensors
# Argmax is used to get max prob element from softmax output along --> axis
correct_prediction = tf.equal(tf.argmax(y_,1), tf.argmax(Y,1))
# Equal returns boolean, now accuracy measured after cast
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# #### Model architecture
# Image take from: https://arxiv.org/pdf/1611.02049v1.pdf
# <img src="AE.png">
# <img src="NN.png">
# In[60]:
with tf.Session() as session:
tf.global_variables_initializer().run()
# ------------ 1. Training Autoencoders - Unsupervised Learning ----------- #
for epoch in range(training_epochs):
epoch_costs = np.empty(0)
for b in range(total_batches):
offset = (b * batch_size) % (train_x.shape[0] - batch_size)
batch_x = train_x[offset:(offset + batch_size), :]
_, c = session.run([us_optimizer, us_cost_function],feed_dict={X: batch_x})
epoch_costs = np.append(epoch_costs,c)
print("Epoch: ",epoch," Loss: ",np.mean(epoch_costs))
print("Unsupervised pre-training finished...")
# ---------------- 2. Training NN - Supervised Learning ------------------ #
for epoch in range(training_epochs):
epoch_costs = np.empty(0)
for b in range(total_batches):
offset = (b * batch_size) % (train_x.shape[0] - batch_size)
batch_x = train_x[offset:(offset + batch_size), :]
batch_y = train_y[offset:(offset + batch_size), :]
_, c = session.run([s_optimizer, s_cost_function],feed_dict={X: batch_x, Y : batch_y})
epoch_costs = np.append(epoch_costs,c)
print("Epoch: ",epoch," Loss: ",np.mean(epoch_costs)," Training Accuracy: ", session.run(accuracy, feed_dict={X: train_x, Y: train_y}), "Validation Accuracy:", session.run(accuracy, feed_dict={X: val_x, Y: val_y}))
print("Supervised training finished...")
# Instead of setting number of iterations we can also find where validation error starts increasing for generalization
print("\nTesting Accuracy:", session.run(accuracy, feed_dict={X: test_features, Y: test_labels}))
# --------------------------------------------------------------------------------------------------------------------------
# Generating a random test2.csv
"""
####### #######
####### Hierarchial Part #######
####### #######
# Training data in dataset
# Now we have to extract features and labels
#### Hierarchial Features ####
# Creating the feature set for second NN
additionalfeatures = | np.asarray(dataset.iloc[:,522:528]) | numpy.asarray |
"""Test gates defined in `qibo/core/gates.py`."""
import pytest
import numpy as np
from qibo import gates, K
from qibo.config import raise_error
from qibo.tests.utils import random_state, random_density_matrix
def apply_gates(gatelist, nqubits=None, initial_state=None):
if initial_state is None:
state = K.qnp.zeros(2 ** nqubits)
state[0] = 1
elif isinstance(initial_state, np.ndarray):
state = np.copy(initial_state)
if nqubits is None:
nqubits = int(np.log2(len(state)))
else: # pragma: no cover
assert nqubits == int(np.log2(len(state)))
else: # pragma: no cover
raise_error(TypeError, "Invalid initial state type {}."
"".format(type(initial_state)))
state = K.cast(state)
for gate in gatelist:
state = gate(state)
return state
def test__control_unitary(backend):
matrix = K.cast(np.random.random((2, 2)))
gate = gates.Unitary(matrix, 0)
unitary = gate._control_unitary(matrix)
target_unitary = np.eye(4, dtype=K._dtypes.get('DTYPECPX'))
target_unitary[2:, 2:] = K.to_numpy(matrix)
K.assert_allclose(unitary, target_unitary)
with pytest.raises(ValueError):
unitary = gate._control_unitary(np.random.random((16, 16)))
def test_h(backend):
final_state = apply_gates([gates.H(0), gates.H(1)], nqubits=2)
target_state = np.ones_like(final_state) / 2
K.assert_allclose(final_state, target_state)
def test_x(backend):
final_state = apply_gates([gates.X(0)], nqubits=2)
target_state = np.zeros_like(final_state)
target_state[2] = 1.0
K.assert_allclose(final_state, target_state)
def test_y(backend):
final_state = apply_gates([gates.Y(1)], nqubits=2)
target_state = np.zeros_like(final_state)
target_state[1] = 1j
K.assert_allclose(final_state, target_state)
def test_z(backend):
final_state = apply_gates([gates.H(0), gates.H(1), gates.Z(0)], nqubits=2)
target_state = np.ones_like(final_state) / 2.0
target_state[2] *= -1.0
target_state[3] *= -1.0
K.assert_allclose(final_state, target_state)
def test_s(backend):
final_state = apply_gates([gates.H(0), gates.H(1), gates.S(1)], nqubits=2)
target_state = np.array([0.5, 0.5j, 0.5, 0.5j])
K.assert_allclose(final_state, target_state)
def test_sdg(backend):
final_state = apply_gates([gates.H(0), gates.H(1), gates.SDG(1)], nqubits=2)
target_state = np.array([0.5, -0.5j, 0.5, -0.5j])
K.assert_allclose(final_state, target_state)
def test_t(backend):
final_state = apply_gates([gates.H(0), gates.H(1), gates.T(1)], nqubits=2)
target_state = np.array([0.5, (1 + 1j) / np.sqrt(8),
0.5, (1 + 1j) / np.sqrt(8)])
K.assert_allclose(final_state, target_state)
def test_tdg(backend):
final_state = apply_gates([gates.H(0), gates.H(1), gates.TDG(1)], nqubits=2)
target_state = np.array([0.5, (1 - 1j) / np.sqrt(8),
0.5, (1 - 1j) / np.sqrt(8)])
K.assert_allclose(final_state, target_state)
def test_identity(backend):
gatelist = [gates.H(0), gates.H(1), gates.I(0), gates.I(1)]
final_state = apply_gates(gatelist, nqubits=2)
target_state = np.ones_like(final_state) / 2.0
K.assert_allclose(final_state, target_state)
gatelist = [gates.H(0), gates.H(1), gates.I(0, 1)]
final_state = apply_gates(gatelist, nqubits=2)
K.assert_allclose(final_state, target_state)
def test_align(backend):
gate = gates.Align(0, 1)
gatelist = [gates.H(0), gates.H(1), gate]
final_state = apply_gates(gatelist, nqubits=2)
target_state = np.ones_like(final_state) / 2.0
K.assert_allclose(final_state, target_state)
gate_matrix = gate._construct_unitary()
K.assert_allclose(gate_matrix, np.eye(4))
# :class:`qibo.core.cgates.M` is tested seperately in `test_measurement_gate.py`
def test_rx(backend):
theta = 0.1234
final_state = apply_gates([gates.H(0), gates.RX(0, theta=theta)], nqubits=1)
phase = np.exp(1j * theta / 2.0)
gate = np.array([[phase.real, -1j * phase.imag],
[-1j * phase.imag, phase.real]])
target_state = gate.dot(np.ones(2)) / np.sqrt(2)
K.assert_allclose(final_state, target_state)
def test_ry(backend):
theta = 0.1234
final_state = apply_gates([gates.H(0), gates.RY(0, theta=theta)], nqubits=1)
phase = np.exp(1j * theta / 2.0)
gate = np.array([[phase.real, -phase.imag],
[phase.imag, phase.real]])
target_state = gate.dot(np.ones(2)) / np.sqrt(2)
K.assert_allclose(final_state, target_state)
@pytest.mark.parametrize("applyx", [True, False])
def test_rz(backend, applyx):
theta = 0.1234
if applyx:
gatelist = [gates.X(0)]
else:
gatelist = []
gatelist.append(gates.RZ(0, theta))
final_state = apply_gates(gatelist, nqubits=1)
target_state = np.zeros_like(final_state)
p = int(applyx)
target_state[p] = np.exp((2 * p - 1) * 1j * theta / 2.0)
K.assert_allclose(final_state, target_state)
def test_u1(backend):
theta = 0.1234
final_state = apply_gates([gates.X(0), gates.U1(0, theta)], nqubits=1)
target_state = np.zeros_like(final_state)
target_state[1] = np.exp(1j * theta)
K.assert_allclose(final_state, target_state)
def test_u2(backend):
phi = 0.1234
lam = 0.4321
initial_state = random_state(1)
final_state = apply_gates([gates.U2(0, phi, lam)], initial_state=initial_state)
matrix = np.array([[np.exp(-1j * (phi + lam) / 2), -np.exp(-1j * (phi - lam) / 2)],
[np.exp(1j * (phi - lam) / 2), np.exp(1j * (phi + lam) / 2)]])
target_state = matrix.dot(initial_state) / np.sqrt(2)
K.assert_allclose(final_state, target_state)
def test_u3(backend):
theta = 0.1111
phi = 0.1234
lam = 0.4321
initial_state = random_state(1)
final_state = apply_gates([gates.U3(0, theta, phi, lam)],
initial_state=initial_state)
cost, sint = np.cos(theta / 2), np.sin(theta / 2)
ep = np.exp(1j * (phi + lam) / 2)
em = np.exp(1j * (phi - lam) / 2)
matrix = np.array([[ep.conj() * cost, - em.conj() * sint],
[em * sint, ep * cost]])
target_state = matrix.dot(initial_state)
K.assert_allclose(final_state, target_state)
@pytest.mark.parametrize("applyx", [False, True])
def test_cnot(backend, applyx):
if applyx:
gatelist = [gates.X(0)]
else:
gatelist = []
gatelist.append(gates.CNOT(0, 1))
final_state = apply_gates(gatelist, nqubits=2)
target_state = np.zeros_like(final_state)
target_state[3 * int(applyx)] = 1.0
K.assert_allclose(final_state, target_state)
@pytest.mark.parametrize("controlled_by", [False, True])
def test_cz(backend, controlled_by):
initial_state = random_state(2)
matrix = np.eye(4)
matrix[3, 3] = -1
target_state = matrix.dot(initial_state)
if controlled_by:
gate = gates.Z(1).controlled_by(0)
else:
gate = gates.CZ(0, 1)
final_state = apply_gates([gate], initial_state=initial_state)
assert gate.name == "cz"
K.assert_allclose(final_state, target_state)
@pytest.mark.parametrize("name,params",
[("CRX", {"theta": 0.1}),
("CRY", {"theta": 0.2}),
("CRZ", {"theta": 0.3}),
("CU1", {"theta": 0.1}),
("CU2", {"phi": 0.1, "lam": 0.2}),
("CU3", {"theta": 0.1, "phi": 0.2, "lam": 0.3})])
def test_cun(backend, name, params):
initial_state = random_state(2)
gate = getattr(gates, name)(0, 1, **params)
final_state = apply_gates([gate], initial_state=initial_state)
target_state = np.dot(K.to_numpy(gate.matrix), initial_state)
K.assert_allclose(final_state, target_state)
def test_swap(backend):
final_state = apply_gates([gates.X(1), gates.SWAP(0, 1)], nqubits=2)
target_state = np.zeros_like(final_state)
target_state[2] = 1.0
K.assert_allclose(final_state, target_state)
def test_multiple_swap(backend):
gatelist = [gates.X(0), gates.X(2), gates.SWAP(0, 1), gates.SWAP(2, 3)]
final_state = apply_gates(gatelist, nqubits=4)
gatelist = [gates.X(1), gates.X(3)]
target_state = apply_gates(gatelist, nqubits=4)
K.assert_allclose(final_state, target_state)
def test_fsim(backend):
theta = 0.1234
phi = 0.4321
gatelist = [gates.H(0), gates.H(1), gates.fSim(0, 1, theta, phi)]
final_state = apply_gates(gatelist, nqubits=2)
target_state = np.ones_like(K.to_numpy(final_state)) / 2.0
rotation = np.array([[np.cos(theta), -1j * np.sin(theta)],
[-1j * np.sin(theta), np.cos(theta)]])
matrix = np.eye(4, dtype=target_state.dtype)
matrix[1:3, 1:3] = rotation
matrix[3, 3] = np.exp(-1j * phi)
target_state = matrix.dot(target_state)
K.assert_allclose(final_state, target_state)
def test_generalized_fsim(backend):
phi = np.random.random()
rotation = np.random.random((2, 2)) + 1j * np.random.random((2, 2))
gatelist = [gates.H(0), gates.H(1), gates.H(2)]
gatelist.append(gates.GeneralizedfSim(1, 2, rotation, phi))
final_state = apply_gates(gatelist, nqubits=3)
target_state = np.ones_like(K.to_numpy(final_state)) / np.sqrt(8)
matrix = np.eye(4, dtype=target_state.dtype)
matrix[1:3, 1:3] = rotation
matrix[3, 3] = np.exp(-1j * phi)
target_state[:4] = matrix.dot(target_state[:4])
target_state[4:] = matrix.dot(target_state[4:])
K.assert_allclose(final_state, target_state)
def test_generalized_fsim_parameter_setter(backend):
phi = np.random.random()
matrix = np.random.random((2, 2))
gate = gates.GeneralizedfSim(0, 1, matrix, phi)
K.assert_allclose(gate.parameters[0], matrix)
assert gate.parameters[1] == phi
matrix = np.random.random((4, 4))
with pytest.raises(ValueError):
gate = gates.GeneralizedfSim(0, 1, matrix, phi)
@pytest.mark.parametrize("applyx", [False, True])
def test_toffoli(backend, applyx):
if applyx:
gatelist = [gates.X(0), gates.X(1), gates.TOFFOLI(0, 1, 2)]
else:
gatelist = [gates.X(1), gates.TOFFOLI(0, 1, 2)]
final_state = apply_gates(gatelist, nqubits=3)
target_state = np.zeros_like(final_state)
if applyx:
target_state[-1] = 1
else:
target_state[2] = 1
K.assert_allclose(final_state, target_state)
@pytest.mark.parametrize("nqubits", [2, 3])
def test_unitary(backend, nqubits):
initial_state = np.ones(2 ** nqubits) / np.sqrt(2 ** nqubits)
matrix = np.random.random(2 * (2 ** (nqubits - 1),))
target_state = np.kron(np.eye(2), matrix).dot(initial_state)
gatelist = [gates.H(i) for i in range(nqubits)]
gatelist.append(gates.Unitary(matrix, *range(1, nqubits), name="random"))
final_state = apply_gates(gatelist, nqubits=nqubits)
K.assert_allclose(final_state, target_state)
def test_unitary_initialization(backend):
matrix = np.random.random((4, 4))
gate = gates.Unitary(matrix, 0, 1)
K.assert_allclose(gate.parameters, matrix)
matrix = np.random.random((8, 8))
with pytest.raises(ValueError):
gate = gates.Unitary(matrix, 0, 1)
with pytest.raises(TypeError):
gate = gates.Unitary("abc", 0, 1)
def test_unitary_common_gates(backend):
target_state = apply_gates([gates.X(0), gates.H(1)], nqubits=2)
gatelist = [gates.Unitary(np.array([[0, 1], [1, 0]]), 0),
gates.Unitary(np.array([[1, 1], [1, -1]]) / np.sqrt(2), 1)]
final_state = apply_gates(gatelist, nqubits=2)
K.assert_allclose(final_state, target_state)
thetax = 0.1234
thetay = 0.4321
gatelist = [gates.RX(0, theta=thetax), gates.RY(1, theta=thetay),
gates.CNOT(0, 1)]
target_state = apply_gates(gatelist, nqubits=2)
rx = np.array([[np.cos(thetax / 2), -1j * np.sin(thetax / 2)],
[-1j * np.sin(thetax / 2), np.cos(thetax / 2)]])
ry = np.array([[np.cos(thetay / 2), -np.sin(thetay / 2)],
[np.sin(thetay / 2), | np.cos(thetay / 2) | numpy.cos |
# This module has been generated automatically from space group information
# obtained from the Computational Crystallography Toolbox
#
"""
Space groups
This module contains a list of all the 230 space groups that can occur in
a crystal. The variable space_groups contains a dictionary that maps
space group numbers and space group names to the corresponding space
group objects.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 The Mosaic Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file LICENSE.txt, distributed as part of this software.
#-----------------------------------------------------------------------------
import numpy as N
class SpaceGroup(object):
"""
Space group
All possible space group objects are created in this module. Other
modules should access these objects through the dictionary
space_groups rather than create their own space group objects.
"""
def __init__(self, number, symbol, transformations):
"""
:param number: the number assigned to the space group by
international convention
:type number: int
:param symbol: the Hermann-Mauguin space-group symbol as used
in PDB and mmCIF files
:type symbol: str
:param transformations: a list of space group transformations,
each consisting of a tuple of three
integer arrays (rot, tn, td), where
rot is the rotation matrix and tn/td
are the numerator and denominator of the
translation vector. The transformations
are defined in fractional coordinates.
:type transformations: list
"""
self.number = number
self.symbol = symbol
self.transformations = transformations
self.transposed_rotations = N.array([N.transpose(t[0])
for t in transformations])
self.phase_factors = N.exp(N.array([(-2j*N.pi*t[1])/t[2]
for t in transformations]))
def __repr__(self):
return "SpaceGroup(%d, %s)" % (self.number, repr(self.symbol))
def __len__(self):
"""
:return: the number of space group transformations
:rtype: int
"""
return len(self.transformations)
def symmetryEquivalentMillerIndices(self, hkl):
"""
:param hkl: a set of Miller indices
:type hkl: Scientific.N.array_type
:return: a tuple (miller_indices, phase_factor) of two arrays
of length equal to the number of space group
transformations. miller_indices contains the Miller
indices of each reflection equivalent by symmetry to the
reflection hkl (including hkl itself as the first element).
phase_factor contains the phase factors that must be applied
to the structure factor of reflection hkl to obtain the
structure factor of the symmetry equivalent reflection.
:rtype: tuple
"""
hkls = N.dot(self.transposed_rotations, hkl)
p = N.multiply.reduce(self.phase_factors**hkl, -1)
return hkls, p
space_groups = {}
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(1, 'P 1', transformations)
space_groups[1] = sg
space_groups['P 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(2, 'P -1', transformations)
space_groups[2] = sg
space_groups['P -1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(3, 'P 1 2 1', transformations)
space_groups[3] = sg
space_groups['P 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(4, 'P 1 21 1', transformations)
space_groups[4] = sg
space_groups['P 1 21 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(5, 'C 1 2 1', transformations)
space_groups[5] = sg
space_groups['C 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(6, 'P 1 m 1', transformations)
space_groups[6] = sg
space_groups['P 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(7, 'P 1 c 1', transformations)
space_groups[7] = sg
space_groups['P 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(8, 'C 1 m 1', transformations)
space_groups[8] = sg
space_groups['C 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(9, 'C 1 c 1', transformations)
space_groups[9] = sg
space_groups['C 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(10, 'P 1 2/m 1', transformations)
space_groups[10] = sg
space_groups['P 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(11, 'P 1 21/m 1', transformations)
space_groups[11] = sg
space_groups['P 1 21/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(12, 'C 1 2/m 1', transformations)
space_groups[12] = sg
space_groups['C 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(13, 'P 1 2/c 1', transformations)
space_groups[13] = sg
space_groups['P 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(14, 'P 1 21/c 1', transformations)
space_groups[14] = sg
space_groups['P 1 21/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(15, 'C 1 2/c 1', transformations)
space_groups[15] = sg
space_groups['C 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(16, 'P 2 2 2', transformations)
space_groups[16] = sg
space_groups['P 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(17, 'P 2 2 21', transformations)
space_groups[17] = sg
space_groups['P 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(18, 'P 21 21 2', transformations)
space_groups[18] = sg
space_groups['P 21 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(19, 'P 21 21 21', transformations)
space_groups[19] = sg
space_groups['P 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(20, 'C 2 2 21', transformations)
space_groups[20] = sg
space_groups['C 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(21, 'C 2 2 2', transformations)
space_groups[21] = sg
space_groups['C 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(22, 'F 2 2 2', transformations)
space_groups[22] = sg
space_groups['F 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(23, 'I 2 2 2', transformations)
space_groups[23] = sg
space_groups['I 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(24, 'I 21 21 21', transformations)
space_groups[24] = sg
space_groups['I 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(25, 'P m m 2', transformations)
space_groups[25] = sg
space_groups['P m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(26, 'P m c 21', transformations)
space_groups[26] = sg
space_groups['P m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(27, 'P c c 2', transformations)
space_groups[27] = sg
space_groups['P c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(28, 'P m a 2', transformations)
space_groups[28] = sg
space_groups['P m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(29, 'P c a 21', transformations)
space_groups[29] = sg
space_groups['P c a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(30, 'P n c 2', transformations)
space_groups[30] = sg
space_groups['P n c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(31, 'P m n 21', transformations)
space_groups[31] = sg
space_groups['P m n 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(32, 'P b a 2', transformations)
space_groups[32] = sg
space_groups['P b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(33, 'P n a 21', transformations)
space_groups[33] = sg
space_groups['P n a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(34, 'P n n 2', transformations)
space_groups[34] = sg
space_groups['P n n 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(35, 'C m m 2', transformations)
space_groups[35] = sg
space_groups['C m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(36, 'C m c 21', transformations)
space_groups[36] = sg
space_groups['C m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(37, 'C c c 2', transformations)
space_groups[37] = sg
space_groups['C c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(38, 'A m m 2', transformations)
space_groups[38] = sg
space_groups['A m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(39, 'A b m 2', transformations)
space_groups[39] = sg
space_groups['A b m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(40, 'A m a 2', transformations)
space_groups[40] = sg
space_groups['A m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(41, 'A b a 2', transformations)
space_groups[41] = sg
space_groups['A b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(42, 'F m m 2', transformations)
space_groups[42] = sg
space_groups['F m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(43, 'F d d 2', transformations)
space_groups[43] = sg
space_groups['F d d 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(44, 'I m m 2', transformations)
space_groups[44] = sg
space_groups['I m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(45, 'I b a 2', transformations)
space_groups[45] = sg
space_groups['I b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(46, 'I m a 2', transformations)
space_groups[46] = sg
space_groups['I m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(47, 'P m m m', transformations)
space_groups[47] = sg
space_groups['P m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(48, 'P n n n :2', transformations)
space_groups[48] = sg
space_groups['P n n n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(49, 'P c c m', transformations)
space_groups[49] = sg
space_groups['P c c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(50, 'P b a n :2', transformations)
space_groups[50] = sg
space_groups['P b a n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(51, 'P m m a', transformations)
space_groups[51] = sg
space_groups['P m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(52, 'P n n a', transformations)
space_groups[52] = sg
space_groups['P n n a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(53, 'P m n a', transformations)
space_groups[53] = sg
space_groups['P m n a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(54, 'P c c a', transformations)
space_groups[54] = sg
space_groups['P c c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(55, 'P b a m', transformations)
space_groups[55] = sg
space_groups['P b a m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(56, 'P c c n', transformations)
space_groups[56] = sg
space_groups['P c c n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(57, 'P b c m', transformations)
space_groups[57] = sg
space_groups['P b c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(58, 'P n n m', transformations)
space_groups[58] = sg
space_groups['P n n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(59, 'P m m n :2', transformations)
space_groups[59] = sg
space_groups['P m m n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(60, 'P b c n', transformations)
space_groups[60] = sg
space_groups['P b c n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(61, 'P b c a', transformations)
space_groups[61] = sg
space_groups['P b c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(62, 'P n m a', transformations)
space_groups[62] = sg
space_groups['P n m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(63, 'C m c m', transformations)
space_groups[63] = sg
space_groups['C m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(64, 'C m c a', transformations)
space_groups[64] = sg
space_groups['C m c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(65, 'C m m m', transformations)
space_groups[65] = sg
space_groups['C m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(66, 'C c c m', transformations)
space_groups[66] = sg
space_groups['C c c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(67, 'C m m a', transformations)
space_groups[67] = sg
space_groups['C m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(68, 'C c c a :2', transformations)
space_groups[68] = sg
space_groups['C c c a :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(69, 'F m m m', transformations)
space_groups[69] = sg
space_groups['F m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(70, 'F d d d :2', transformations)
space_groups[70] = sg
space_groups['F d d d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(71, 'I m m m', transformations)
space_groups[71] = sg
space_groups['I m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(72, 'I b a m', transformations)
space_groups[72] = sg
space_groups['I b a m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(73, 'I b c a', transformations)
space_groups[73] = sg
space_groups['I b c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(74, 'I m m a', transformations)
space_groups[74] = sg
space_groups['I m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(75, 'P 4', transformations)
space_groups[75] = sg
space_groups['P 4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = | N.array([1,1,4]) | numpy.array |
import warnings, copy
from astropy.time import Time, TimeDelta
from astropy.coordinates import SkyCoord, AltAz, ICRS, FK5, EarthLocation, Longitude
from astropy import units as U
import numpy as NP
import constants as CNST
# Perform some IERS adjustments
from astropy.utils import iers
tnow = Time.now()
try:
print('Checking if some IERS related adjustments are required...')
tnow_ut1 = tnow.ut1
except iers.IERSRangeError as exception:
default_iers_auto_url = 'http://maia.usno.navy.mil/ser7/finals2000A.all'
secondary_iers_auto_url = 'https://datacenter.iers.org/data/9/finals2000A.all'
tertiary_iers_auto_url = 'ftp://cddis.gsfc.nasa.gov/pub/products/iers/finals2000A.all'
try:
# iers.conf.iers_auto_url = default_iers_auto_url
iers.conf.remote_timeout = 120.0
iers.IERS_A.open(iers.IERS_A_URL)
except Exception as err:
if ('url' in str(err).lower()) or (('connection' in str(err).lower())):
print(err)
print('Original source URL for IERS_A: {0} FAILED!'.format(iers.conf.iers_auto_url))
print('Original IERS Configuration:')
print(iers.conf.__class__.__dict__)
print('Modifying the source URL for IERS_A table to {0}'.format(secondary_iers_auto_url))
# iers.IERS_A_URL = 'ftp://cddis.gsfc.nasa.gov/pub/products/iers/finals2000A.all'
iers.conf.auto_download = True
iers.conf.iers_auto_url = secondary_iers_auto_url
# iers.conf.iers_auto_url = 'ftp://cddis.gsfc.nasa.gov/pub/products/iers/finals2000A.all'
try:
print('Now testing {0}'.format(secondary_iers_auto_url))
iers_a = iers.IERS_A.open(secondary_iers_auto_url)
except Exception as newerr:
if ('url' in str(err).lower()):
print(newerr)
print('Modified URL also did not work. Computation of LST may be affected or will completely fail.')
# raise newerr
else:
print('Updated source URL {0} worked!'.format(secondary_iers_auto_url))
print('Modified IERS Configuration:')
print(iers.conf.__class__.__dict__)
try:
tnow_ut1 = tnow.ut1
except iers.IERARangeError as exception:
print(exception)
warnings.warn('Ephemeris predictions will be unreliable despite a successful download of IERS tables')
################################################################################
def equation_of_equinoxes(jd):
"""
----------------------------------------------------------------------------
Estimate the equation of the equinoxes
Inputs:
jd [scalar or numpy array] Julian date at which nutation is to be
estimated and the equation of equinoxes is returned.
Output:
Equation of the equinoxes (in hours) that should be used to correct the
Greenwich Mean Sidereal Time to obtain the Greenwich Apparent Sidereal Time
Notes: Adopted from https://aa.usno.navy.mil/faq/docs/GAST.php
----------------------------------------------------------------------------
"""
if not isinstance(jd, (int, float, NP.ndarray)):
raise TypeError('Input julian date(s) must be a scalar or numpy array')
d = jd - 2451545.0 # Days since 2000 January 1, 12h UT, Julian date 2451545.0
omega = 125.04 - 0.052954 * d # Longitude of the ascending node of the Moon in degrees
l = 280.47 + 0.98565 * d # Mean Longitude of the Sun in degrees
obliquity = 23.4393 - 0.0000004 * d # in degrees
nutation = -0.000319 * NP.sin(NP.radians(omega)) - 0.000024 * NP.sin(NP.radians(2*l)) # in hours
eqeq = nutation * NP.cos(NP.radians(obliquity)) # Equation of the equinoxes in hours
# t = d / 36525 # number of centuries since 2000 January 1, 12h UT, Julian date 2451545.0
return eqeq
################################################################################
def julian_date_from_LAST(last, jd0, longitude, tol=1e-6):
"""
----------------------------------------------------------------------------
Inputs:
last [scalar or numpy array] Local Apparent Sidereal Time (in hours)
jd0 [scalar or numpy array] Julian date at previous midnight. Same
size as input 'last' or numpy broadcastable to that shape
longitude [scalar or numpy array] Longitude of observing site (in hours).
Same size as input 'last' or numpy broadcastable to that shape
tol [scalar] Tolerance for convergence since these calculations
are iteratively solved
Output:
Julian date(s) as a numpy array correspoding to the input apparent sidereal
time and longitude on given starting Julian dates.
Notes: Adopted from https://aa.usno.navy.mil/faq/docs/GAST.php
----------------------------------------------------------------------------
"""
if not isinstance(jd0, (int, float, NP.ndarray)):
raise TypeError('Input starting julian date(s) must be a scalar or numpy array')
jd0 = NP.asarray(jd0).ravel()
if not isinstance(last, (int, float, NP.ndarray)):
raise TypeError('Input local apparent sidereal time(s) must be a scalar or numpy array')
last = NP.asarray(last).ravel()
if not isinstance(longitude, (int, float, NP.ndarray)):
raise TypeError('Input longitude(s) must be a scalar or numpy array')
longitude = | NP.asarray(longitude) | numpy.asarray |
import numpy as np
from open_spiel.python.algorithms.nash_solver import subproc
import os
import pickle
import itertools
import logging
import math
from open_spiel.python.algorithms.psro_v2.eval_utils import dev_regret
"""
This script connects meta-games with gambit. It translates a meta-game to am EFG format
that gambit-logit could recognize to find the QRE.
Gambit file format: http://www.gambit-project.org/gambit16/16.0.0/formats.html
"""
def isExist(path):
"""
Check if a path exists.
:param path: path to check.
:return: bool
"""
return os.path.exists(path)
def mkdir(path):
path = path.strip()
path = path.rstrip("\\")
isExists = os.path.exists(path)
if isExists:
raise ValueError(path + " already exists.")
else:
os.makedirs(path)
print(path + " has been created successfully.")
def save_pkl(obj,path):
"""
Pickle a object to path.
:param obj: object to be pickled.
:param path: path to save the object
"""
with open(path,'wb') as f:
pickle.dump(obj,f)
def load_pkl(path):
"""
Load a pickled object from path
:param path: path to the pickled object.
:return: object
"""
if not isExist(path):
raise ValueError(path + " does not exist.")
with open(path,'rb') as f:
result = pickle.load(f)
return result
# This functions help to translate meta_games into gambit nfg.
def product(shape, axes):
prod_trans = tuple(zip(*itertools.product(*(range(shape[axis]) for axis in axes))))
prod_trans_ordered = [None] * len(axes)
for i, axis in enumerate(axes):
prod_trans_ordered[axis] = prod_trans[i]
return zip(*prod_trans_ordered)
def encode_gambit_file_qre(meta_games, checkpoint_dir=None):
"""
Encode a meta-game to nfg file that gambit can recognize.
:param meta_games: A meta-game (payoff tensor) in PSRO.
"""
num_players = len(meta_games)
num_strategies = | np.shape(meta_games[0]) | numpy.shape |
import sys
sys.path.append('../../pySuStaIn/')
sys.path.append('../evaluation/')
import os
import shutil
import numpy as np
import matplotlib.pyplot as plt
import pickle
from pathlib import Path
import sklearn.model_selection
import pandas as pd
import pylab
# import the simulation functions from pySuStaIn needed to generate simulated data
from simfuncs import generate_random_sustain_model, generate_data_sustain
# import the functions for z-score SuStaIn
from ZscoreSustain import ZscoreSustain
from eval_utils import get_cluster_pear_metric, get_cluster_swap_metric
from sklearn.metrics import adjusted_rand_score
def run_sustain_baseline(epochs, optim_epochs, trials, ppmi=False, data_num=1, how_impute='mice'):
sustain_trial_results = np.zeros((trials, 3))
for trial_num in range(trials):
ari, swaps, pear = run_sustain_notebook(epochs, optim_epochs, trial_num, ppmi, data_num=data_num, how_impute=how_impute)
sustain_trial_results[trial_num] = [ari, swaps, pear]
if trials == 1:
print('%s, %s, %.3f, %.3f, %.3f' % ('SuStaIn', '--', ari, swaps, pear))
else:
line_str = list()
for i,j in zip(sustain_trial_results.mean(axis=0), sustain_trial_results.std(axis=0)):
line_str.append('%.3f $\\pm$ %.3f' % (i,j))
print(' & '.join(['SuStaIn'] + line_str) + '\\\\')
import os.path
trials_fname = '../runs/sustain%d_experiments.txt' % trials
f = open(trials_fname, 'wb')
import pickle
pickle.dump(sustain_trial_results, f)
f.close()
print(sustain_trial_results)
def run_sustain_notebook(epochs=100, optim_epochs=1000, trial_num=1,ppmi=False, data_num=1, how_impute='mice'):
epochs = int(epochs)
optim_epochs = int(optim_epochs)
N = 5 # number of biomarkers
M = 500 # number of observations ( e.g. subjects )
M_control = 100 # number of these that are control subjects
N_S_gt = 2 # number of ground truth subtypes
N_startpoints = 10
N_S_max = 2
n_iterations_MCMC_optimisation = optim_epochs # replace to 1e4
N_iterations_MCMC = epochs
output_folder = 'data1'
dataset_name = 'data1'
# LOAD SUBLIGN SYNTHETIC DATA
import sys
sys.path.append('../data/')
from load import load_data_format
from data_utils import parse_data
if ppmi:
from load import parkinsons
data = parkinsons()
max_visits = 17
else:
data = load_data_format(data_num,trial_num=trial_num, cache=True)
max_visits = 4 if data_num < 10 else 17
_, train_data_dict, _, test_data_dict, _, _ = parse_data(data.values, max_visits=max_visits, test_per=0.2)
if data_num == 11 or data_num == 12 or data_num == 14:
X = train_data_dict['obs_t_collect']
Y = train_data_dict['Y_collect']
M = train_data_dict['mask_collect']
X[X == -1000] = np.nan
Y[Y == -1000] = np.nan
sys.path.append('../model')
from utils import interpolate
if how_impute == 'mrnn':
Y[np.isnan(Y)] = 0.
X[np.isnan(X)] = 0.
Y_impute = interpolate(Y, m=M, t=X, how=how_impute)
train_data_dict['Y_collect'] = Y_impute
X = test_data_dict['obs_t_collect']
Y = test_data_dict['Y_collect']
M = test_data_dict['mask_collect']
X[X == -1000] = np.nan
Y[Y == -1000] = np.nan
sys.path.append('../model')
from utils import interpolate
if how_impute == 'mrnn':
Y[np.isnan(Y)] = 0.
X[np.isnan(X)] = 0.
Y_impute = interpolate(Y, m=M, t=X, how=args.how_impute)
test_data_dict['Y_collect'] = Y_impute
N_patients, N_visits, N_dims = train_data_dict['Y_collect'].shape
gt_stages = train_data_dict['t_collect'].reshape((N_patients * N_visits, 1))
gt_subtypes = [int(i) for i in train_data_dict['s_collect'].flatten()]
gt_subtypes = | np.concatenate([[i] * max_visits for i in gt_subtypes]) | numpy.concatenate |
import os
import numpy as np
import librosa
import librosa.display
import soundfile as sf
from keras.models import load_model
from util import ispecgram, fix_specgram_shape, plot_specgrams
from generate import generate_z, generate_specgram, audio_from_specgram
import matplotlib as mpl
mpl.use('agg')
import matplotlib.pyplot as plt
# loaded trained models
encoder = load_model("models/encoder.hdf5")
decoder = load_model("models/decoder.hdf5")
spec = | np.loadtxt('spectrograms/ir_00x00y_16000_5.txt') | numpy.loadtxt |
# -*- coding: utf-8 -*-
import numpy as np
import json
class BatchTableHeader(object):
def __init__(self):
self.properties = {}
def add_property_from_array(self, propertyName, array):
self.properties[propertyName] = array
def to_array(self):
# convert dict to json string
bt_json = json.dumps(self.properties, separators=(',', ':'))
# header must be 4-byte aligned (refer to batch table documentation)
#bt_json += ' '*(4 - len(bt_json) % 4)
# returns an array of binaries representing the batch table
return | np.fromstring(bt_json, dtype=np.uint8) | numpy.fromstring |
from __future__ import division
from __future__ import print_function
import os
import re
import sys
import math
from data_generator import DataGenerator
from scipy.io import wavfile
import numpy as np
from numpy.lib.stride_tricks import as_strided
import tensorflow as tf
# tf.config.run_functions_eagerly(True)
# tf.compat.v1.disable_eager_execution()
from tensorflow.keras.layers import Input, Reshape, Conv2D, BatchNormalization, Softmax, Conv1D, Bidirectional
from tensorflow.keras.layers import MaxPool2D, Dropout, Permute, Flatten, Dense, MaxPool1D, AvgPool1D
from tensorflow.keras.models import Model
import librosa
import librosa.display
from encoder_2 import Encoder
import pyhocon
import os
import json
import pandas as pd
from collections import defaultdict
from tensorflow.keras.callbacks import ModelCheckpoint
import encoder_3
import matplotlib.pyplot as plt
# store as a global variable, since we only support a few models for now
from data_generator import DataGenerator
models = {
'tiny': None,
'small': None,
'medium': None,
'large': None,
'full': None
}
# the model is trained on 16kHz audio
# model_srate = 16000
# max_batch_size = 3000
# sequence_length = 200
# n_labels = 30
# config = pyhocon.ConfigFactory.parse_file("crepe/experiments.conf")['test']
def build_and_load_model(config, task ='raga'):
"""
Build the CNN model and load the weights
Parameters
----------
model_capacity : 'tiny', 'small', 'medium', 'large', or 'full'
String specifying the model capacity, which determines the model's
capacity multiplier to 4 (tiny), 8 (small), 16 (medium), 24 (large),
or 32 (full). 'full' uses the model size specified in the paper,
and the others use a reduced number of filters in each convolutional
layer, resulting in a smaller model that is faster to evaluate at the
cost of slightly reduced pitch estimation accuracy.
Returns
-------
model : tensorflow.keras.models.Model
The pre-trained keras model loaded in memory
"""
model_capacity = config['model_capacity']
model_srate = config['model_srate']
hop_size = int(config['hop_size']*model_srate)
sequence_length = int((config['sequence_length']*model_srate - 1024)/hop_size) + 1
N = config['encoder_layers']
drop_rate = config['drop_rate']
tonic_filter = config['tonic_filter']
tonic_emb_size = config['tonic_emb_size']
tonic_cnn_filters = config['tonic_cnn_filters']
cutoff = config['cutoff']
n_frames = 1 + int((model_srate * cutoff - 1024) / hop_size)
n_seq = int(n_frames // sequence_length)
#print('n_seq', n_seq)
#input()
n_labels = config['n_labels']
note_dim = config['note_dim']
tonic_mask_flag = config['tonic_mask']
if models[model_capacity] is None:
capacity_multiplier = {
'tiny': 4, 'small': 8, 'medium': 16, 'large': 24, 'full': 32
}[model_capacity]
layers = [1, 2, 3, 4, 5, 6]
filters = [n * capacity_multiplier for n in [32, 4, 4, 4, 8, 16]]
widths = [512, 64, 64, 64, 64, 64]
strides = [(4, 1), (1, 1), (1, 1), (1, 1), (1, 1), (1, 1)]
# x = Input(shape=(1024,), name='input2', dtype='float32')
x_batch = Input(shape=(None, 1024), name='x_input', dtype='float32')
# chroma_batch = Input(shape=(60, None), name='chroma_input', dtype='float32')
energy_batch = Input(shape=(None,), name='energy_input', dtype='float32')
x = x_batch[0]
# chroma = chroma_batch[0]
energy = energy_batch[0]
x_pitch = x
# x_pitch = x - tf.reduce_mean(x, axis=1, keepdims=True)
# x_pitch = x_pitch/tf.math.reduce_std(x_pitch, axis=1, keepdims=True)
# x_energy_reshape = tf.reshape(x, [-1])
# x_energy = x - tf.reduce_mean(x_energy_reshape)
# x_energy = x_energy/tf.math.reduce_std(x_energy_reshape)
y = Reshape(target_shape=(1024, 1, 1), name='input-reshape')(x_pitch)
for l, f, w, s in zip(layers, filters, widths, strides):
y = Conv2D(f, (w, 1), strides=s, padding='same',
activation='relu', name="conv%d" % l, trainable=False)(y)
y = BatchNormalization(name="conv%d-BN" % l)(y)
y = MaxPool2D(pool_size=(2, 1), strides=None, padding='valid',
name="conv%d-maxpool" % l, trainable=False)(y)
y = Dropout(0.25, name="conv%d-dropout" % l)(y)
y = Permute((2, 1, 3), name="transpose")(y)
y = Flatten(name="flatten")(y)
den = Dense(360, activation='sigmoid', name="classifier", trainable=False)
y = den(y)
#model = Model(inputs=[x_batch, mask_batch, chroma_batch, energy_batch], outputs=y)
#model.load_weights('E:\\Vishwaas\\Anaconda3\\envs\\env_tf2\\Lib\\site-packages\\crepe\\model-full.h5', by_name=True)
# return model
# pitch_model = Model(inputs=[x, chroma, mask], outputs=y)
# pitch_model.summary()
note_emb = den.weights[0]
note_emb = tf.reduce_mean(tf.reshape(note_emb, [-1, 6, 60]), axis=1)
note_emb = tf.reduce_mean(tf.reshape(note_emb, [note_dim, -1, 60]), axis=1)
note_emb = tf.transpose(note_emb, name='note_emb') #60,note_emb
# note_emb_12 = tf.reduce_mean(tf.reshape(note_emb, [12,5,note_dim]), axis=1)
# enc = Encoder(note_emb_12,sequence_length=sequence_length, N=N,size=note_dim)
# tonic_chroma = get_tonic_from_cnn(chroma, int(note_dim*3/4), int(note_dim/2), drop_rate) #(60, 32)
tonic_hist = get_tonic_from_hist(y, int(note_dim*3/4), int(note_dim/2), drop_rate) #(60, 32)
tonic_sil = get_tonic_from_silence(y, energy, int(note_dim*1/2), drop_rate) # (60, 32)
print(tonic_chroma, tonic_hist, tonic_sil)
tonic_chs = tf.concat([tonic_chroma, tonic_hist, tonic_sil], axis=1)
tonic_chs_scores_emb = ffnn(tonic_chs, [note_dim], drop_rate=drop_rate)
# tonic_chs_scores_emb = ffnn(tonic_chs, [note_dim], drop_rate=drop_rate)
# tonic_chs_scores_emb = tonic_chs
red_y = tf.reshape(y, [-1, 6, 60])
red_y = tf.reduce_mean(red_y, axis=1) # (None, 60)
red_y_seq = tf.reshape(red_y, [n_seq, sequence_length, 60]) # (n_seq, sequence_length, 60)
notes_prob = tf.reduce_max(red_y, axis=1)
notes_prob_seq = tf.reshape(notes_prob, [n_seq, sequence_length]) #n_seq, sequence_length
notes_n = tf.reduce_mean(red_y_seq, axis=1)
notes_n_seq_red = tf.reduce_mean(notes_n, axis=1)
energy_seq = tf.reshape(energy, [n_seq, sequence_length]) #n_seq, sequence_length
energy_seq_red = tf.reduce_mean(energy_seq, axis=1) # n_seq
entropy_seq = get_entropy(red_y, sequence_length) #n_seq, sequence_length
entropy_seq_red = tf.reduce_mean(entropy_seq, axis=1) # n_seq
notes_prob_seq = min_max_scale(notes_prob_seq, sequence_length)
energy_seq = min_max_scale(energy_seq, sequence_length)
entropy_seq = min_max_scale(entropy_seq, sequence_length)
sequence_strength = tf.stack([notes_prob_seq, energy_seq, entropy_seq]) #3, n_seq, sequence_length
sequence_strength = tf.transpose(sequence_strength, [1,2,0])
note_strength = tf.reduce_mean(sequence_strength, axis=2) #n_seq, sequence_length
sequence_strength = tf.reduce_mean(sequence_strength, axis=1)
sequence_strength = tf.reduce_mean(sequence_strength, axis=1, keepdims=True)
sequence_strength_sum = tf.reduce_sum(sequence_strength)
# seq_str = tf.tile(seq_str, [1, note_dim])
# strength_seq = get_note_strength(energy_seq, entropy_seq, notes_prob_seq)
# un_tr_den = Dense(1, activation='relu')
# seq_str_den = (Dense(4, activation='relu'), Dense(1, activation='relu'))
# notes_strength_seq = tf.tile(tf.expand_dims(strength_seq, 2), multiples=[1,1,note_dim])
# tonic_f4 = []
# mask_seq = tf.reshape(mask, [n_seq, sequence_length])
# seq_str = get_seq_strength(energy_seq, entropy_seq, notes_prob_seq, seq_str_den, notes_n, drop_rate)
# seq_str = tf.tile(tf.expand_dims(seq_str, 1), [1, note_dim])
# for i in range(12):
# rolled_notes = tf.roll(red_y_12, -i, axis=1) # (None, 12)
# rolled_notes_id = tf.argmax(rolled_notes, axis=1)
# rolled_notes_id_seq = tf.reshape(rolled_notes_id, [n_seq, sequence_length])
# raga_enc = enc.encode(rolled_notes_id_seq, mask_seq, None, True) # (None, 200, note_dim)
# raga_enc = tf.reduce_mean(raga_enc, axis=1)
#
# raga_enc = tf.multiply(raga_enc, seq_str)
# raga_enc = tf.reduce_sum(raga_enc, 0)
# raga_enc = tf.tile(tf.expand_dims(raga_enc,0), [5,1])
# tonic_f4.append(raga_enc)
#
# tonic_f4 = tf.concat(tonic_f4, axis=0)
# notes_id_seq_all_tonics = []
# notes_prob_seq_all = []
# seq_str = get_seq_strength(energy_seq, entropy_seq, notes_prob_seq, seq_str_den, notes_n, drop_rate)
# # seq_str = tf.nn.softmax(seq_str)
# for i in range(12):
# rolled_notes = tf.roll(red_y_12, -i, axis=1) # (None, 60)
# rolled_notes_id = tf.argmax(rolled_notes, axis=1)
# rolled_notes_prob_seq = tf.reduce_max(rolled_notes, axis=1)
# rolled_notes_id_seq = tf.reshape(rolled_notes_id, [n_seq, sequence_length])
# rolled_notes_prob_seq = tf.reshape(rolled_notes_prob_seq, [n_seq, sequence_length])
# notes_prob_seq_all.append(rolled_notes_prob_seq)
# notes_id_seq_all_tonics.append(rolled_notes_id_seq)
#
# notes_id_seq_all_tonics = tf.concat(notes_id_seq_all_tonics, axis=0)
# notes_prob_seq_all = tf.concat(notes_prob_seq_all, axis=0)
# notes_prob_seq_all = tf.tile(tf.expand_dims(notes_prob_seq_all,2), [1,1,note_dim])
# mask_seq_all_tonics = tf.ones((1, 1, sequence_length), dtype=tf.float32)
# # mask_seq_all_tonics = tf.tile(mask_seq, [12,1])
# # mask_seq_all_tonics = tf.cast(mask_seq_all_tonics, tf.float32)
# tonic_f4 = encoder_3.encode(notes_id_seq_all_tonics, note_emb_12, mask_seq_all_tonics, N, d_model=note_dim)
# # tonic_f4 = enc.encode(notes_id_seq_all_tonics, mask_seq_all_tonics, None, True) # (None, 200, note_dim)
#
# tonic_f4 = tf.reduce_mean(tonic_f4, axis=1)
# # tonic_f4 = tonic_f4[:,-1,:]
# tonic_f4 = tf.reshape(tonic_f4, [12, n_seq, note_dim])
# seq_str = tf.expand_dims(tf.expand_dims(seq_str, 0),2)
# seq_str = tf.tile(seq_str, [12,1,note_dim])
# tonic_f4 = tf.multiply(tonic_f4, seq_str)
# tonic_f4 = tf.reduce_sum(tonic_f4,1)
# tonic_f4 = tf.reduce_mean(tonic_f4, 1)
# tonic_f4 = tf.expand_dims(tonic_f4, 0)
# tonic_f4 = tf.tile(tonic_f4, [5,1,1])
# tonic_f4 = tf.transpose(tonic_f4, [1,0,2])
# tonic_f4 = tf.reshape(tonic_f4, [60, note_dim])
# tonic_f4 = tf.reduce_mean(tonic_f4, axis=1)
# tonic_f4 = tf.reshape(tonic_f4, [60, n_seq, note_dim])
# seq_str = tf.expand_dims(tf.expand_dims(seq_str, 0),2)
# seq_str = tf.tile(seq_str, [60,1,note_dim])
# tonic_f4 = tf.multiply(tonic_f4, seq_str)
# tonic_f4 = tf.reduce_sum(tonic_f4,1)
# un_tr_list = []
# for i in range(60):
# rolled_notes = tf.roll(red_y, -i, axis=1)
# un_tr = unlikely_transitions(rolled_notes, sequence_length, un_tr_den, drop_rate)
# un_tr_list.append(un_tr)
# un_tr_list = tf.stack(un_tr_list)
#tonic_emb = tf.concat([tonic_f4, tonic_chs_scores_emb], axis=1)
tonic_emb = tonic_chs_scores_emb
# tonic_emb = ffnn(tonic_emb, [note_dim, note_dim//2, note_dim//4])
tonic_logit_den = Dense(1)
tonic_emb = tf.squeeze(tonic_logit_den(tonic_emb))
if tonic_mask_flag:
tonic_logits = tf.nn.sigmoid(tonic_emb)
else:
tonic_logits = tf.nn.softmax(tonic_emb)
tonic_logits = tf.expand_dims(tonic_logits, 0, name='tonic')
tonic_model = Model(inputs=[x_batch, chroma_batch, energy_batch], outputs=tonic_logits)
tonic_model.summary()
tonic_model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['categorical_accuracy'])
if task== 'tonic':
return tonic_model
for layer in tonic_model.layers:
layer.trainable = False
if tonic_mask_flag:
tonic_logits_masked = tonic_logits[0]
tonic_logits_pad = tf.pad(tonic_logits_masked, [[5,5]])
tonic_logits_argmax = tf.cast(tf.argmax(tonic_logits_pad), tf.int32)
tonic_indices = tf.range(70)
lower_limit = tf.less(tonic_indices, tonic_logits_argmax-4)
upper_limit = tf.greater(tonic_indices, tonic_logits_argmax + 5)
tonic_logits_mask = 1 - tf.cast(tf.logical_or(lower_limit, upper_limit), tf.float32)
tonic_logits_mask = tonic_logits_mask[5:-5]
tonic_logits_masked = tf.multiply(tonic_logits_masked, tonic_logits_mask)
tonic_logits_masked = tonic_logits_masked/tf.reduce_sum(tonic_logits_masked)
tonic_logits_masked = tf.expand_dims(tonic_logits_masked, 0)
else:
tonic_logits_masked = tonic_logits
tdms_rag_emb = get_tdms(red_y, note_dim, model_srate, hop_size, drop_rate)
mask_seq = tf.ones([1,1,sequence_length], tf.float32)
notes_id = tf.argmax(red_y, axis=1)
notes_id_seq = tf.reshape(notes_id, [n_seq, sequence_length])
encoding = encoder_3.encode(notes_id_seq, note_emb, mask_seq, N, d_model=note_dim)
encoding = tf.reduce_mean(encoding, axis=1)
# encoding = tf.multiply(encoding, sequence_strength)
# encoding = encoding/sequence_strength_sum
# encoding = tf.reduce_sum(encoding, axis=0, keepdims=True)
encoding = tf.reduce_mean(encoding, axis=0, keepdims=True)
# encoding_tdms = tf.concat([encoding, tdms_rag_emb], axis=1)
encoding_tdms = tdms_rag_emb
encoding_tdms = tf.tile(encoding_tdms, [60,1])
# encoding = tf.transpose(tf.concat([encoding, sequence_strength], axis=1))
# encoding = tf.transpose(Dense(1)(encoding)) #(1, note_dim)
# encoding = tf.tile(encoding, [60, 1])
c_note_emb = tf.tile(tf.expand_dims(note_emb[0],0), [60,1])
rel_note_emb = note_emb - c_note_emb
rel_note_f1 = tf.cast(tf.expand_dims(tf.cast(tf.range(0,120,2)/2, tf.int32),1), tf.float32)/10
rel_note_corr = tf.expand_dims(tf.reduce_sum(tf.multiply(note_emb, c_note_emb), axis=1), axis=1)
tonic_rag_emb = tf.concat([rel_note_corr, rel_note_f1, rel_note_emb, encoding_tdms], axis=1)
tonic_rag_emb = ffnn(tonic_rag_emb, [2*note_dim, 2*note_dim, 2*note_dim, note_dim])
# tonic_rag_emb = Dense(n_labels, activation='elu')(tonic_rag_emb)
tonic_logits_masked = tf.transpose(tonic_logits_masked)
tonic_rag_emb = tf.multiply(tonic_rag_emb, tonic_logits_masked)
tonic_rag_emb = tf.reduce_sum(tonic_rag_emb, axis=0, keepdims=True)
tonic_rag_emb = Dense(n_labels, activation='elu')(tonic_rag_emb)
rag_logits = tf.nn.softmax(tonic_rag_emb, axis=1, name='raga')
# best_tonic = tf.argmax(tonic_logits[0])
# rolled_notes = tf.roll(red_y, -best_tonic, axis=1) # (None, 60)
# rolled_notes_id = tf.argmax(rolled_notes, axis=1)
# rolled_notes_id_seq = tf.reshape(rolled_notes_id, [n_seq, sequence_length])
#
# # tonic_f4 = enc.encode(rolled_notes_id_seq, mask_seq, None, True) # (None, 200, note_dim)
# mask_seq = tf.cast(mask_seq, tf.float32)
# tonic_f4 = encoder_3.encode(rolled_notes_id_seq, note_emb, mask_seq, N, d_model=note_dim)
# tonic_f4 = tf.reduce_mean(tonic_f4, axis=1)
# tonic_f4 = tf.reduce_mean(tonic_f4, axis=0, keepdims=True)
# seq_str = tf.expand_dims(seq_str, 1)
# seq_str = tf.tile(seq_str, [1, note_dim])
# tonic_f4 = tf.multiply(tonic_f4, seq_str)
# tonic_f4 = tf.reduce_sum(tonic_f4, 0, keepdims=True)
# raga_enc = Encoder(note_emb, enc_num=2, sequence_length=sequence_length, N=N, size=note_dim)
# raga_encoded = raga_enc.encode(notes_id_seq, mask_seq, None, True) # (None, 200, note_dim)
# raga_usms = []
# for m in range(n_seq):
# raga_usm = tf.math.unsorted_segment_mean(raga_encoded[m], notes_id_seq[m], 60)
# raga_usms.append(raga_usm)
# raga_usms = tf.stack(raga_usms)
# raga_usms = tf.multiply(raga_usms, seq_str)
# raga_usms = tf.reduce_sum(raga_usms, 0)
# tdms_rag_embs = Dense(n_labels, name='raga')(raga_usms)
#
# tonic_logits_tr = tf.transpose(tonic_logits_masked, [1,0])
# tonic_logits_tr = tf.tile(tonic_logits_tr, [1, n_labels])
#
# tdms_rag_embs = tf.multiply(tdms_rag_embs, tonic_logits_tr)
# tdms_rag_embs = tf.reduce_sum(tdms_rag_embs, 0, keepdims=True)
# tdms_rag_emb = get_tdms(red_y, note_dim, model_srate, hop_size, drop_rate) # (60,?)
# tdms_rag_emb = ffnn(tdms_rag_emb, [2*note_dim, note_dim])
# tdms_rag_emb = Dense(n_labels, activation='elu')(tdms_rag_emb)
#
# tonic_logits_tr = tf.transpose(tonic_logits_masked, [1,0])
#
# tonic_logits_tr = tf.tile(tonic_logits_tr, [1, n_labels])
# tdms_rag_emb = tf.multiply(tdms_rag_emb, tonic_logits_tr)
# tdms_rag_emb = tf.reduce_sum(tdms_rag_emb, 0, keepdims=True)
# print(tdms_rag_emb)
# tdms_rag_emb = tf.multiply(tdms_rag_emb, tonic_logits_tr)
# tdms_rag_emb = tf.reduce_sum(tdms_rag_emb, 0, keepdims=True)
# tdms_rag_emb = Dense(n_labels, name='raga')(tdms_rag_emb)
# tdms_rag_emb = ffnn(tdms_rag_emb, [2*note_dim, note_dim])
loss_weights = config['loss_weights']
# tdms_rag_embs = tf.concat([tdms_rag_emb, usms], axis=1)
# tdms_rag_embs_ffnn = ffnn(tdms_rag_embs, [note_dim], drop_rate=0)
# tdms_rag_embs_ffnn = Dense(n_labels)(tdms_rag_embs_ffnn)
# tdms_rag_embs_ffnn = Dense(n_labels)(usms)
# tdms_rag_embs = ffnn(tdms_rag_embs, [2*note_dim, note_dim], drop_rate=0)
# tdms_rag_embs = Dense(n_labels, name='raga')(tdms_rag_embs)
# tdms_rag_embs_ffnn = tf.multiply(tdms_rag_embs, tonic_logits_tr)
# tdms_rag_embs_ffnn = tf.reduce_sum(tdms_rag_embs_ffnn, 0, keepdims=True)
# rag_logits = Dense(n_labels, name='raga')(tdms_rag_embs_ffnn)
# tonic_logits_masked = tf.transpose(tonic_logits_masked)
# tonic_f4 = Dense(n_labels)(tonic_f4)
# tonic_f4 = tf.multiply(tonic_f4, tf.tile(tonic_logits_masked, [1,n_labels]))
# tonic_f4 = tf.reduce_sum(tonic_f4, 0, keepdims=True)
# rag_logits = Dense(n_labels, activation='softmax', name='raga')(tdms_rag_emb)
# rag_logits = tf.nn.softmax(tdms_rag_emb, axis=1, name='raga')
rag_model = Model(inputs=[x_batch, chroma_batch, energy_batch], outputs=[tonic_logits, rag_logits])
rag_model.compile(loss={'tf_op_layer_tonic': 'binary_crossentropy', 'tf_op_layer_raga': 'categorical_crossentropy'},
optimizer='adam', metrics={'tf_op_layer_tonic': 'categorical_accuracy', 'tf_op_layer_raga': 'accuracy'}, loss_weights={'tf_op_layer_tonic': loss_weights[0], 'tf_op_layer_raga': loss_weights[1]})
rag_model.summary()
# rag_model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return rag_model
# package_dir = os.path.dirname(os.path.realpath(__file__))
# filename = "model-{}.h5".format(model_capacity)
# model.load_weights(os.path.join(package_dir, filename))
# rag_model = Model(inputs=x, outputs=logits)
# model.load_weights('E:\\Vishwaas\\Anaconda3\\envs\\env_tf2\\Lib\\site-packages\\crepe\\model-full.h5', by_name=True)
# w1 = den.weights[0] #2048,360
# w1 = tf.reduce_mean(tf.reshape(w1, [-1, 6, 60]), axis=1)
# w1 = tf.reduce_mean(tf.reshape(w1, [512, 4, 60]), axis=1)
# # MaxPool1D()
# # w1 = tf.reduce_mean(tf.reshape(w1, [-1, 12, 5]), axis=2)
# w1 = tf.transpose(w1)
# mat = np.zeros([60,60])
# for i in range(60):
# for j in range(60):
#
# a = float(tf.reduce_sum(tf.multiply(w1[i], w1[j])))
# mat[i, j] = a
#
# plt.imshow(mat, cmap='hot', interpolation='nearest')
# plt.show()
# model.compile('adam', 'binary_crossentropy')
# models[model_capacity] = model
#
# return models[model_capacity], tonic_model, rag_model
# return models[model_capacity], energy_model, tonic_model, sil_model
def min_max_scale(val_seq, sequence_length):
val_max = tf.reduce_max(val_seq, axis=1, keepdims=True)
val_min = tf.reduce_min(val_seq, axis=1, keepdims=True)
val_max = tf.tile(val_max, [1, sequence_length])
val_min = tf.tile(val_min, [1, sequence_length])
return (val_seq - val_min)/(val_max-val_min)
def get_note_strength(energy_seq, entropy_seq, notes_prob_seq, drop_rate=0.2):
stacked = tf.stack([energy_seq, entropy_seq, notes_prob_seq],2)
stacked = tf.concat([energy_seq, entropy_seq, notes_prob_seq],1)
stacked = tf.reduce_mean(stacked,0)
return stacked
# stacked = Dense(3, activation='relu')(stacked)
# d1 = Dense(1, activation='relu')(stacked)
return tf.squeeze(d1, 2)
def get_seq_strength(energy_seq, entropy_seq, notes_prob_seq, seq_str_den, notes_n, drop_rate=0.2):
energy_seq = tf.reduce_max(energy_seq, 1, keepdims=True)
entropy_seq = tf.reduce_max(entropy_seq, 1, keepdims=True)
notes_prob_seq = tf.reduce_max(notes_prob_seq, 1, keepdims=True)
notes_n= tf.expand_dims(notes_n, 1)
stacked = tf.concat([energy_seq, entropy_seq, notes_prob_seq, notes_n],1)
d1= seq_str_den[0](stacked)
d2 = seq_str_den[1](d1)
return tf.squeeze(d2, 1)
def get_tdms(y, note_dim, model_srate=16000, hop_length=160, cuttoff=60, max_time_delay=4, step=0.1, drop_rate=0.2):
n_frames = 1 + int((model_srate - 1024) / hop_length)
tdms = []
for i in range(0, int(max_time_delay / step), 1):
frames_2_shift = int(n_frames * i * step)
shifted_y = tf.roll(y, -frames_2_shift, axis=0) # (None, 60)
y_truc = tf.transpose(y[:-frames_2_shift, :])
y__shifted_truc = shifted_y[:-frames_2_shift, :]
norm = tf.cast(tf.shape(y_truc)[0], tf.float32)
# norm = 1
# y = tf.roll(z,-j,axis=1)
matmul = tf.matmul(y_truc, y__shifted_truc)
matmul = matmul / norm
tdms.append(matmul)
tdms = tf.stack(tdms)
tdms = tf.expand_dims(tdms, 3) #(int(max_time_delay/step), 60, 60, 1)
tdms = tf.transpose(tdms, [3,1,2,0])
z = Conv2D(filters=64, kernel_size=(5, 5), strides=(2, 2), activation='relu', padding='same',
name='tdms_conv2d_1')(tdms)
z = BatchNormalization()(z)
z = MaxPool2D(pool_size=(2, 2), strides=None, padding='valid')(z)
z = Dropout(drop_rate)(z)
z = Conv2D(filters=128, kernel_size=(3, 3), strides=(1, 1), activation='relu', padding='valid',
name='tdms_conv2d_2')(z)
z = MaxPool2D(pool_size=(2, 2), strides=None, padding='valid')(z)
z = BatchNormalization()(z)
z = Dropout(drop_rate)(z)
z = Flatten(name='tdms_flatten')(z)
z = Dense(note_dim, activation='relu', name='tdms_dense')(z)
# base_tdms = tf.transpose(base_tdms, [1,2,0])
# # base_tdms = tf.squeeze(den(base_tdms),2)
# base_tdms = tf.squeeze(base_tdms, 2)
# base_tdms = tf.pow(base_tdms, 0.75)
# base_tdms = base_tdms/tf.reduce_sum(base_tdms)
# for i in range(60):
# rot_tdms = tf.roll(base_tdms, -i, axis=1)
# rot_tdms= tf.roll(rot_tdms, -i, axis=0)
# tdms.append(rot_tdms)
# tdms = tf.stack(tdms)
# tdms = tf.reshape(tdms, [60, -1])
return z
def get_number_of_notes(notes_seq):
notes_seq_red = tf.reduce_mean(notes_seq, axis=1) #(None, 60)
return tf.reduce_mean(notes_seq_red, axis=1)
# d1 = Dense(60, activation='relu')(notes_seq_red)
# return tf.reduce_mean(d1, axis=1)
def ffnn(inputs, hidden_size, drop_rate=0.2):
x = inputs
for hs in hidden_size:
den = Dense(hs, activation='relu')(x)
x = Dropout(drop_rate)(den)
return x
def unlikely_transitions(rolled_notes, sequence_length, den_weight, drop_rate=0.2):
# (None, 60)
rolled_notes_seq = tf.reshape(rolled_notes, [-1, sequence_length, 60])
rolled_notes_seq = tf.reduce_mean(rolled_notes_seq, 1) #(-1, 60)
r_R = rolled_notes_seq[:, 5:15] #(None, 10)
g_G =rolled_notes_seq[:, 15:25]
# m_M = den_weights[2](rolled_notes_seq[:, 25:35]) #try?
d_D = rolled_notes_seq[:, 40:50]
n_N = rolled_notes_seq[:, 50:60]
adj_notes = tf.stack([r_R, g_G, d_D, n_N], 0) #(4, None, 10)
adj_notes = tf.transpose(adj_notes, [1,0,2]) #(None, 4,10)
return 1-tf.squeeze(den_weight(adj_notes),2)
def get_entropy(red_y, sequence_length):
sum_y = tf.reduce_sum(red_y, axis=1, keepdims=True)
sum_y = tf.tile(sum_y, [1,60])
y = red_y/sum_y
entropy = tf.multiply(y, tf.math.log(y))
entropy = tf.reduce_sum(entropy, 1)
entropy_seq = tf.reshape(entropy, [-1, sequence_length])
return entropy_seq
# @tf.function
def get_tonic_from_silence(y, energy, tonic_emb_size=32, drop_rate=0.2):
#y, energy, int(note_dim*1/16), drop_rate
# zarg = tf.constant([1, 1, 5, 3, 5, 2, 2, 7, 6, 6, 11, 3, 3, 3])
# energy = tf.constant([0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0])
# z = tf.nn.softmax(y, axis=1) #try this
z = tf.reshape(y, [-1, 6, 60])
z = tf.reduce_mean(z, axis=1) #(None, 60)
# z = tf.reshape(z, [-1,12,5])
# z = tf.reduce_mean(z, axis=2) # (None, 12)
zarg = tf.argmax(z, axis=1)
energy = (energy - tf.reduce_min(energy))/(tf.reduce_max(energy)- tf.reduce_min(energy))
energy = tf.cast(tf.math.round(energy), tf.int32)
# energy = energy - tf.re
# return energy
# energy = tf.cast(energy>0.01, tf.int32)
# energy = tf.cast(tf.round(tf.sigmoid(energy)), tf.int32)
energy_r = tf.roll(energy, 1, axis=-1)
energy_r = tf.concat([[0], energy_r[1:]], axis=-1)
delta_en = energy - energy_r
delta_en_abs = tf.abs(delta_en)
delta_en_csum = tf.math.cumsum(delta_en_abs)
delta_en_csum_pos = tf.squeeze(tf.where(tf.greater(delta_en_csum, 0)), axis=1)
zarg = tf.gather(zarg, delta_en_csum_pos)
delta_en_csum = tf.gather(delta_en_csum, delta_en_csum_pos)
energy = tf.gather(energy, delta_en_csum_pos)
delta_en = tf.gather(delta_en, delta_en_csum_pos)
seg_sum_1 = tf.math.segment_sum(1 - energy, delta_en_csum)
seg_sum_where = tf.squeeze(tf.where(tf.greater(seg_sum_1, 0)), axis=1)
seg_sum_1 = tf.gather(seg_sum_1, seg_sum_where)
delta_en_mo = tf.squeeze(tf.where(tf.equal(delta_en, -1)), axis=1) - 1
zarg_deta = tf.gather(zarg, delta_en_mo)
seg_sum_usm = tf.math.unsorted_segment_mean(seg_sum_1, zarg_deta, 60)
logspace_idx = tf.cast(tf.floor(tf.math.log1p(seg_sum_usm) / math.log(2)), tf.int32) + 3
use_identity = tf.cast(seg_sum_usm <= 4, tf.int32)
seg_sum_usm = tf.cast(seg_sum_usm, tf.int32)
combined_idx = use_identity * seg_sum_usm + (1 - use_identity) * logspace_idx
clipped = tf.clip_by_value(combined_idx, 0, 9)
emb = tf.keras.layers.Embedding(10, tonic_emb_size, input_length=60)
return emb(clipped)
# emb = tf.Variable(initializer([10, 128]), dtype=tf.float32)
# return tf.gather(emb, clipped)
# return clipped
# @tf.function
def get_tonic_from_hist(y, tonic_emb_size=32, tonic_cnn_filters=128, drop_rate=0.2):
#y, int(note_dim*3/32), int(note_dim/2), drop_rate
#z = tf.nn.softmax(y, axis=1) #try this
z = tf.reduce_mean(y, axis=0)
z = tf.reshape(z, [6,60])
z = tf.reduce_mean(z, axis=0)
kernel_size = [5, 10, 15, 20]
outputs = []
for ks in kernel_size:
bz = tf.concat([z, z[:ks]], axis=0)
bz = tf.reshape(bz, [1, -1, 1]) #
conv = Conv1D(filters=tonic_cnn_filters, kernel_size=ks, strides=1, activation='relu', padding='valid')(bz) ##((60+ks)/ks, ks, 1)
conv = tf.squeeze(conv, axis=0)
conv = conv[:-1, :]
conv = Dropout(drop_rate)(conv)
conv = Dense(tonic_emb_size, activation='relu')(conv)
outputs.append(conv)
outputs = tf.concat(outputs, 1)
outputs = ffnn(outputs, [tonic_emb_size], drop_rate=drop_rate)
return outputs
# @tf.function
def get_tonic_from_cnn(chroma, tonic_emb_size=32, tonic_cnn_filters=128, drop_rate=0.1):
#chroma, int(note_dim*3/32), int(note_dim/2), drop_rate
chroma = tf.expand_dims(chroma,0)
chroma = tf.expand_dims(chroma, 3)
y = Conv2D(tonic_cnn_filters, (5, 5), strides=1, padding='same', activation='relu', name='tonic_cnn_1')(chroma)
y = Conv2D(tonic_cnn_filters, (5, 5), strides=1, padding='same', activation='relu', name='tonic_cnn_2')(y)
y = Conv2D(tonic_cnn_filters, (5, 5), strides=1, padding='same', activation='relu', name='tonic_cnn_3')(y)
y = Dropout(drop_rate)(y)
y = Conv2D(tonic_cnn_filters, (5, 5), strides=1, padding='same', activation='relu', name='tonic_cnn_4')(y)
y = Conv2D(tonic_cnn_filters, (5, 5), strides=1, padding='same', activation='relu', name='tonic_cnn_5')(y) #(1, 60, -1, 128)
y = Dropout(drop_rate)(y)
y = tf.squeeze(y, 0)
y = tf.reduce_mean(y, 1)
y = Dense(tonic_emb_size, activation='relu', name = 'tonic_cnn_dense_1')(y)
return y #(60,32)
def output_path(file, suffix, output_dir):
"""
return the output path of an output file corresponding to a wav file
"""
path = re.sub(r"(?i).wav$", suffix, file)
if output_dir is not None:
path = os.path.join(output_dir, os.path.basename(path))
return path
def to_local_average_cents(salience, center=None):
"""
find the weighted average cents near the argmax bin
"""
if not hasattr(to_local_average_cents, 'cents_mapping'):
# the bin number-to-cents mapping
to_local_average_cents.cents_mapping = (
np.linspace(0, 7180, 360)[:60] + 1997.3794084376191)
if salience.ndim == 1:
if center is None:
center = int(np.argmax(salience))
start = max(0, center - 4)
end = min(len(salience), center + 5)
salience = salience[start:end]
product_sum = np.sum(
salience * to_local_average_cents.cents_mapping[start:end])
weight_sum = np.sum(salience)
return product_sum / weight_sum
# product_sum = np.sum(
# salience * to_local_average_cents.cents_mapping)
# return product_sum
if salience.ndim == 2:
return np.array([to_local_average_cents(salience[i, :]) for i in
range(salience.shape[0])])
raise Exception("label should be either 1d or 2d ndarray")
def to_viterbi_cents(salience):
"""
Find the Viterbi path using a transition prior that induces pitch
continuity.
"""
from hmmlearn import hmm
# uniform prior on the starting pitch
starting = np.ones(360) / 360
# transition probabilities inducing continuous pitch
xx, yy = np.meshgrid(range(360), range(360))
transition = np.maximum(12 - abs(xx - yy), 0)
transition = transition / np.sum(transition, axis=1)[:, None]
# emission probability = fixed probability for self, evenly distribute the
# others
self_emission = 0.1
emission = (np.eye(360) * self_emission + np.ones(shape=(360, 360)) *
((1 - self_emission) / 360))
# fix the model parameters because we are not optimizing the model
model = hmm.MultinomialHMM(360, starting, transition)
model.startprob_, model.transmat_, model.emissionprob_ = \
starting, transition, emission
# find the Viterbi path
observations = np.argmax(salience, axis=1)
path = model.predict(observations.reshape(-1, 1), [len(observations)])
return np.array([to_local_average_cents(salience[i, :], path[i]) for i in
range(len(observations))])
def get_activation(audio, sr, model_capacity='full', center=True, step_size=10,
verbose=1):
"""
Parameters
----------
audio : np.ndarray [shape=(N,) or (N, C)]
The audio samples. Multichannel audio will be downmixed.
sr : int
Sample rate of the audio samples. The audio will be resampled if
the sample rate is not 16 kHz, which is expected by the model.
model_capacity : 'tiny', 'small', 'medium', 'large', or 'full'
String specifying the model capacity; see the docstring of
:func:`~crepe.core.build_and_load_model`
center : boolean
- If `True` (default), the signal `audio` is padded so that frame
`D[:, t]` is centered at `audio[t * hop_length]`.
- If `False`, then `D[:, t]` begins at `audio[t * hop_length]`
step_size : int
The step size in milliseconds for running pitch estimation.
verbose : int
Set the keras verbosity mode: 1 (default) will print out a progress bar
during prediction, 0 will suppress all non-error printouts.
Returns
-------
activation : np.ndarray [shape=(T, 360)]
The raw activation matrix
"""
config = pyhocon.ConfigFactory.parse_file("crepe/experiments.conf")['raga']
model = build_and_load_model(config)
if len(audio.shape) == 2:
audio = audio.mean(1) # make mono
audio = audio.astype(np.float32)
if sr != model_srate:
# resample audio if necessary
from resampy import resample
audio = resample(audio, sr, model_srate)
chroma = get_chroma(audio, model_srate)
# pad so that frames are centered around their timestamps (i.e. first frame
# is zero centered).
if center:
audio = np.pad(audio, 512, mode='constant', constant_values=0)
# make 1024-sample frames of the audio with hop length of 10 milliseconds
hop_length = int(model_srate * step_size / 1000)
n_frames = 1 + int((len(audio) - 1024) / hop_length)
frames = as_strided(audio, shape=(1024, n_frames),
strides=(audio.itemsize, hop_length * audio.itemsize))
frames = frames.transpose().copy()
# frames = np.expand_dims(1, frames)
energy = (audio-np.mean(audio))/np.std(audio)
energy = np.square(energy)
energy_frames = as_strided(energy, shape=(1024, n_frames),
strides=(energy.itemsize, hop_length * energy.itemsize))
energy_frames = energy_frames.transpose().copy()
energy_frames = np.mean(energy_frames, axis=1)
energy_frames = (energy_frames-np.mean(energy_frames))/np.std(energy_frames)
frames = (frames - np.mean(frames, axis=1))/np.std(frames, axis=1)
frames, energy_frames, mask = pad_frames(frames, energy_frames, sequence_length)
frames = np.array([frames])
mask = | np.array([mask]) | numpy.array |
"""
A module that provides some specializations and utilities for all NumPy based backends
that are using FFT for performing convolutions.
"""
from typing import Tuple, Optional, Union
import numpy as np
from scipy.fft import next_fast_len
from opt_einsum import contract_expression
from ._NumPyBackend import NumPyBackend
class NumPyFFTBackend(NumPyBackend):
r"""
The parent class for all NumPy based backends that are using FFT for performing convolutions.
They provide the functionality to evaluate the analytic gradients of the factorization model.
"""
def __init__(
self,
**kwargs,
):
super().__init__(**kwargs)
self.fft_params = {}
def _initialize_matrices(
self,
V: np.ndarray,
atom_shape: Tuple[int, ...],
n_atoms: int,
W: Optional[np.ndarray] = None,
axes_W_normalization: Optional[Union[int, Tuple[int, ...]]] = None,
) -> Tuple[np.ndarray, np.ndarray]:
# this sets pad_mode and pad_width properly
w, h = super()._initialize_matrices(V, atom_shape, n_atoms, W, axes_W_normalization)
# shorthand for unpadded and unsliced axes
unpadded = ((0, 0), ) * (V.ndim - len(self._shift_dimensions))
unsliced = (slice(None), ) * (V.ndim - len(self._shift_dimensions))
# shape for fft
fft_shape = tuple(next_fast_len(s) for s in np.array(self._sample_shape) + np.array(self._transform_shape) - 1)
# shorthands for shape of FFT fields
H_f_shape = (self.n_samples, n_atoms) + fft_shape
W_f_shape = (n_atoms, self.n_channels) + fft_shape
V_f_shape = (self.n_samples, self.n_channels) + fft_shape
# fft details: reconstruction
lower_idx = np.array(atom_shape) - 1
self.fft_params['reconstruct'] = {
'fft_axes': self._shift_dimensions,
'pad_mode': self._pad_mode,
'pad_width': unpadded + self._padding_left,
'correlate': False,
'slices': unsliced + tuple(slice(f, f + s) for f, s in zip(lower_idx, self._sample_shape)),
'fft_shape': fft_shape,
# sum_c V|R[n, c, ... ] * W[m , c, ...] --> dR / dH[n, m, ...]
'contraction': contract_expression('nm...,mc...->nc...', H_f_shape, W_f_shape)
}
# fft details: gradient H computation
lower_idx = np.zeros_like(self._transform_shape)
if self._pad_mode is not None:
lower_idx += np.asarray(self._padding_right)[:, 1]
self.fft_params['grad_H'] = {
'fft_axes': self._shift_dimensions,
'pad_mode': self._pad_mode,
'pad_width': unpadded + self._padding_right,
'correlate': True,
'slices': unsliced + tuple(slice(f, f + s) for f, s in zip(lower_idx, self._transform_shape)),
'fft_shape': fft_shape,
# sum_c V|R[n, c, ... ] * W[m , c, ...] --> dR / dH[n, m, ...]
'contraction': contract_expression('nc...,mc...->nm...', V_f_shape, W_f_shape),
}
# fft details: gradient W computation
lower_idx = np.minimum(np.array(self._sample_shape), | np.array(self._transform_shape) | numpy.array |
# Copyright 2021 The WAX-ML Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as onp
import pytest
from wax.compile import jit_init_apply
from wax.modules.ewma import EWMA
from wax.modules.ewmvar import EWMVar
from wax.unroll import unroll
# Another implementation for checking
class EWMVar_v2(hk.Module):
"""Exponentially weighted variance.
To calculate the variance we use the fact that Var(X) = Mean(x^2) - Mean(x)^2 and internally
we use the exponentially weighted mean of x/x^2 to calculate this.
Arguments:
alpha : The closer `alpha` is to 1 the more the statistic will adapt to recent values.
Attributes:
variance : The running exponentially weighted variance.
References
----------
[^1]: [<NAME>., 2009. Incremental calculation of weighted mean and variance. University of Cambridge, 4(11-5), pp.41-42.](https://fanf2.user.srcf.net/hermes/doc/antiforgery/stats.pdf) # noqa
"""
def __init__(self, alpha=0.5, adjust=True, name=None):
super().__init__(name=name)
self.alpha = alpha
self.adjust = adjust
def __call__(self, x):
mean = EWMA(self.alpha, self.adjust, initial_value=jnp.nan, name="mean")(x)
mean_square = EWMA(
self.alpha, self.adjust, initial_value=jnp.nan, name="mean_square"
)(x * x)
var = mean_square - mean ** 2
var = jnp.where(var < 0, 0.0, var)
return var
@pytest.mark.parametrize("dtype", ["float32", "float64"])
def test_init_and_first_step_var_float64(dtype):
from jax.config import config
if dtype == "float64":
config.update("jax_enable_x64", True)
else:
config.update("jax_enable_x64", False)
seq = hk.PRNGSequence(42)
x = jax.random.normal(shape=(3,), key=next(seq), dtype=jnp.float64)
@jit_init_apply
@hk.transform_with_state
def model(x):
return EWMVar(0.1, adjust=True)(x)
params, state = model.init(next(seq), x)
var, state = model.apply(params, state, next(seq), x)
assert var.dtype == jnp.dtype(dtype)
def test_run_var_vs_pandas_not_adjust():
from jax.config import config
config.update("jax_enable_x64", True)
import pandas as pd
seq = hk.PRNGSequence(42)
x = jax.random.normal(shape=(10, 3), key=next(seq), dtype=jnp.float64)
@jit_init_apply
@hk.transform_with_state
def model(x):
return EWMVar(0.1, adjust=False)(x)
var, state = unroll(model, return_final_state=True)(x)
var = pd.DataFrame(var)
@jit_init_apply
@hk.transform_with_state
def model2(x):
return EWMVar_v2(0.1, adjust=False)(x)
var2, state2 = unroll(model2, return_final_state=True)(x)
var2 = pd.DataFrame(var2)
assert onp.allclose(var, var2)
pandas_var = pd.DataFrame(x).ewm(alpha=0.1, adjust=False).var()
assert not onp.allclose(var, pandas_var.values)
def test_run_var_vs_pandas_adjust():
from jax.config import config
config.update("jax_enable_x64", True)
import pandas as pd
seq = hk.PRNGSequence(42)
x = jax.random.normal(shape=(10, 3), key=next(seq), dtype=jnp.float64)
@jit_init_apply
@hk.transform_with_state
def model(x):
return EWMVar(0.1, adjust=True)(x)
var, state = unroll(model, return_final_state=True)(x)
var = pd.DataFrame(var)
@jit_init_apply
@hk.transform_with_state
def model2(x):
return EWMVar_v2(0.1, adjust=True)(x)
var2, state2 = unroll(model2, return_final_state=True)(x)
var2 = pd.DataFrame(var2)
assert | onp.allclose(var, var2) | numpy.allclose |
"""
Tests heat_2d
"""
import pytest
import numpy as np
from pymgrit.heat.heat_2d import Heat2D
from pymgrit.heat.heat_2d import VectorHeat2D
def test_heat_2d_constructor():
"""
Test constructor
"""
x_start = 0
x_end = 1
y_start = 3
y_end = 4
nx = 5
ny = 5
a = 1
heat_2d = Heat2D(a=a, x_start=x_start, x_end=x_end, y_start=y_start, y_end=y_end, nx=nx, ny=ny,
t_start=0, t_stop=1, nt=11)
np.testing.assert_equal(heat_2d.x_start, x_start)
np.testing.assert_equal(heat_2d.x_end, x_end)
np.testing.assert_equal(heat_2d.y_start, y_start)
np.testing.assert_equal(heat_2d.y_end, y_end)
np.testing.assert_equal(heat_2d.nx, nx)
np.testing.assert_equal(heat_2d.ny, ny)
np.testing.assert_almost_equal(heat_2d.dx, 0.25)
np.testing.assert_almost_equal(heat_2d.dy, 0.25)
np.testing.assert_equal(heat_2d.x, np.linspace(x_start, x_end, nx))
np.testing.assert_equal(heat_2d.y, np.linspace(y_start, y_end, ny))
np.testing.assert_equal(heat_2d.x_2d, np.linspace(x_start, x_end, nx)[:, np.newaxis])
np.testing.assert_equal(heat_2d.y_2d, np.linspace(y_start, y_end, ny)[np.newaxis, :])
np.testing.assert_equal(heat_2d.a, a)
np.testing.assert_equal(True, isinstance(heat_2d.vector_template, VectorHeat2D))
np.testing.assert_equal(True, isinstance(heat_2d.vector_t_start, VectorHeat2D))
np.testing.assert_equal(heat_2d.vector_t_start.get_values(), np.zeros((5, 5)))
matrix = [[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., - 16., 0., 0., 0., - 16., 64., - 16., 0., 0., 0., - 16., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0.],
[0., 0., - 16., 0., 0., 0., - 16., 64., - 16., 0., 0., 0., - 16., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0.],
[0., 0., 0., - 16., 0., 0., 0., - 16., 64., - 16., 0., 0., 0., - 16., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., - 16., 0., 0., 0., - 16., 64., - 16., 0., 0., 0., - 16., 0., 0., 0., 0.,
0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., - 16., 0., 0., 0., - 16., 64., - 16., 0., 0., 0., - 16., 0., 0., 0.,
0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., - 16., 0., 0., 0., - 16., 64., -16., 0., 0., 0., - 16., 0.,
0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., - 16., 0., 0., 0., - 16., 64., - 16., 0., 0.,
0., - 16., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., - 16., 0., 0., 0., - 16., 64., - 16., 0., 0.,
0., - 16., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., - 16., 0., 0., 0., - 16., 64., - 16., 0.,
0., 0., - 16., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]]
np.testing.assert_equal(heat_2d.space_disc.toarray(), matrix)
def test_heat_2d_constructor_be():
"""
Test constructor
"""
x_start = 0
x_end = 1
y_start = 3
y_end = 4
nx = 5
ny = 5
a = 1
heat_2d = Heat2D(a=a, x_start=x_start, x_end=x_end, y_start=y_start, y_end=y_end, nx=nx, ny=ny, method='BE',
rhs=lambda x, y, t: 2 * x * y, t_start=0, t_stop=1, nt=11)
np.testing.assert_almost_equal(heat_2d.compute_rhs(u_start=4 * np.ones((5, 5)), t_start=0.2, t_stop=0.3), np.array(
[0., 0., 0., 0., 0., 0., 4.1625, 4.175, 4.1875, 0., 0., 4.325, 4.35, 4.375, 0., 0., 4.4875, 4.525, 4.5625, 0.,
0., 0., 0., 0., 0.]))
def test_heat_2d_constructor_fe():
"""
Test constructor
"""
x_start = 0
x_end = 1
y_start = 3
y_end = 4
nx = 5
ny = 5
a = 1
heat_2d = Heat2D(a=a, x_start=x_start, x_end=x_end, y_start=y_start, y_end=y_end, nx=nx, ny=ny, method='FE',
rhs=lambda x, y, t: 2 * x * y, t_start=0, t_stop=1, nt=11)
np.testing.assert_almost_equal(heat_2d.compute_rhs(u_start=4 * np.ones((5, 5)), t_start=0.2, t_stop=0.3), np.array(
[0., 0., 0., 0., 0., 0., 4.1625, 4.175, 4.1875, 0., 0., 4.325, 4.35, 4.375, 0., 0., 4.4875, 4.525, 4.5625, 0.,
0., 0., 0., 0., 0.]))
def test_heat_2d_constructor_cn():
"""
Test constructor
"""
x_start = 0
x_end = 1
y_start = 3
y_end = 4
nx = 5
ny = 5
a = 1
heat_2d = Heat2D(a=a, x_start=x_start, x_end=x_end, y_start=y_start, y_end=y_end, nx=nx, ny=ny, method='CN',
rhs=lambda x, y, t: 2 * x * y, t_start=0, t_stop=1, nt=11)
np.testing.assert_almost_equal(heat_2d.compute_rhs(u_start=4 * np.ones((5, 5)), t_start=0.2, t_stop=0.3), np.array(
[0., 0., 0., 0., 0., 0., 4.1625, 4.175, 4.1875, 0., 0., 4.325, 4.35, 4.375, 0., 0., 4.4875, 4.525, 4.5625, 0.,
0., 0., 0., 0., 0.]))
def test_heat_2d_constructor_exception_method():
"""
Test constructor
"""
x_start = 0
x_end = 1
y_start = 3
y_end = 4
nx = 5
ny = 5
a = 1
with pytest.raises(Exception):
Heat2D(a=a, x_start=x_start, x_end=x_end, y_start=y_start, y_end=y_end, nx=nx, ny=ny, method='DE',
rhs=lambda x, y, t: 2 * x * y, t_start=0, t_stop=1, nt=11)
def test_heat_2d_constructor_exception_boundary_left():
"""
Test constructor
"""
x_start = 0
x_end = 1
y_start = 3
y_end = 4
nx = 5
ny = 5
a = 1
with pytest.raises(Exception):
Heat2D(a=a, x_start=x_start, x_end=x_end, y_start=y_start, y_end=y_end, nx=nx, ny=ny, bc_left='2',
rhs=lambda x, y, t: 2 * x * y, t_start=0, t_stop=1, nt=11)
def test_heat_2d_constructor_exception_boundary_bottom():
"""
Test constructor
"""
x_start = 0
x_end = 1
y_start = 3
y_end = 4
nx = 5
ny = 5
a = 1
with pytest.raises(Exception):
Heat2D(a=a, x_start=x_start, x_end=x_end, y_start=y_start, y_end=y_end, nx=nx, ny=ny, bc_bottom='2',
rhs=lambda x, y, t: 2 * x * y, t_start=0, t_stop=1, nt=11)
def test_heat_2d_constructor_exception_boundary_right():
"""
Test constructor
"""
x_start = 0
x_end = 1
y_start = 3
y_end = 4
nx = 5
ny = 5
a = 1
with pytest.raises(Exception):
Heat2D(a=a, x_start=x_start, x_end=x_end, y_start=y_start, y_end=y_end, nx=nx, ny=ny, bc_right='2',
rhs=lambda x, y, t: 2 * x * y, t_start=0, t_stop=1, nt=11)
def test_heat_2d_constructor_exception_boundary_top():
"""
Test constructor
"""
x_start = 0
x_end = 1
y_start = 3
y_end = 4
nx = 5
ny = 5
a = 1
with pytest.raises(Exception):
Heat2D(a=a, x_start=x_start, x_end=x_end, y_start=y_start, y_end=y_end, nx=nx, ny=ny, bc_top='2',
rhs=lambda x, y, t: 2 * x * y, t_start=0, t_stop=1, nt=11)
def test_heat_2d_constructor_boundary():
"""
Test constructor
"""
x_start = 0
x_end = 1
y_start = 3
y_end = 4
nx = 5
ny = 5
a = 1
Heat2D(a=a, x_start=x_start, x_end=x_end, y_start=y_start, y_end=y_end, nx=nx, ny=ny, bc_top=lambda y: 2,
bc_left=lambda y: 2, bc_right=lambda y: 2, bc_bottom=lambda y: 2, rhs=lambda x, y, t: 2 * x * y,
t_start=0, t_stop=1, nt=11)
def test_heat_2d_step_be():
"""
Test step()
"""
x_start = 0
x_end = 1
y_start = 3
y_end = 4
nx = 5
ny = 5
a = 1
heat_2d = Heat2D(a=a, x_start=x_start, x_end=x_end, y_start=y_start, y_end=y_end, nx=nx, ny=ny, method='BE',
rhs=lambda x, y, t: 2 * x * y, t_start=0, t_stop=1, nt=11)
heat_2d_res = heat_2d.step(u_start=heat_2d.vector_t_start, t_start=0, t_stop=0.1)
np.testing.assert_almost_equal(heat_2d_res.get_values(), np.array(
[[0., 0., 0., 0., 0.], [0., 0.06659024, 0.08719337, 0.07227713, 0.],
[0., 0.11922399, 0.15502696, 0.12990086, 0.], [0., 0.12666875, 0.16193148, 0.1391124, 0.],
[0., 0., 0., 0., 0.]]))
def test_heat_2d_step_cn():
"""
Test step()
"""
x_start = 0
x_end = 1
y_start = 3
y_end = 4
nx = 5
ny = 5
a = 1
heat_2d = Heat2D(a=a, x_start=x_start, x_end=x_end, y_start=y_start, y_end=y_end, nx=nx, ny=ny, method='CN',
rhs=lambda x, y, t: 2 * x * y, t_start=0, t_stop=1, nt=11)
heat_2d_res = heat_2d.step(u_start=heat_2d.vector_t_start, t_start=0, t_stop=0.1)
np.testing.assert_almost_equal(heat_2d_res.get_values(), np.array(
[[0., 0., 0., 0., 0.], [0., 0.09547237, 0.12246323, 0.10480116, 0.],
[0., 0.17564171, 0.22390841, 0.19336787, 0.], [0., 0.1964882, 0.24654636, 0.21772176, 0.],
[0., 0., 0., 0., 0.]]))
def test_heat_2d_step_fe():
"""
Test step()
"""
x_start = 0
x_end = 1
y_start = 3
y_end = 4
nx = 5
ny = 5
a = 1
heat_2d = Heat2D(a=a, x_start=x_start, x_end=x_end, y_start=y_start, y_end=y_end, nx=nx, ny=ny, method='FE',
rhs=lambda x, y, t: 2 * x * y, t_start=0, t_stop=1, nt=11)
heat_2d_res = heat_2d.step(u_start=heat_2d.vector_t_start, t_start=0, t_stop=0.1)
np.testing.assert_almost_equal(heat_2d_res.get_values(), np.array(
[[0., 0., 0., 0., 0.], [0., 0.1625, 0.175, 0.1875, 0.], [0., 0.325, 0.35, 0.375, 0.],
[0., 0.4875, 0.525, 0.5625, 0.], [0., 0., 0., 0., 0.]]))
def test_vector_heat_2d_constructor():
"""
Test constructor
"""
vector_heat_2d = VectorHeat2D(nx=3, ny=3)
np.testing.assert_equal(vector_heat_2d.nx, 3)
np.testing.assert_equal(vector_heat_2d.ny, 3)
np.testing.assert_equal(vector_heat_2d.values, np.zeros((3, 3)))
def test_vector_heat_2d_add():
"""
Test __add__
"""
vector_heat_2d_1 = VectorHeat2D(nx=3, ny=3)
vector_heat_2d_1.values = np.ones((3, 3))
vector_heat_2d_2 = VectorHeat2D(nx=3, ny=3)
vector_heat_2d_2.values = 2 * np.ones((3, 3))
vector_heat_2d_res = vector_heat_2d_1 + vector_heat_2d_2
np.testing.assert_equal(vector_heat_2d_res.values, 3 * | np.ones((3, 3)) | numpy.ones |
"""
This module contains our thermodynamic calculations. Calculation of pressure, fugacity coefficient, and max density are handled by an Eos object so that these functions can be used with any EOS. The thermodynamics module contains a series of wrapper to handle the inputs and outputs of these functions.
"""
import numpy as np
from scipy import interpolate
import scipy.optimize as spo
from scipy.ndimage.filters import gaussian_filter1d
import copy
import logging
import despasito.utils.general_toolbox as gtb
from despasito import fundamental_constants as constants
import despasito.utils.general_toolbox as gtb
logger = logging.getLogger(__name__)
def pressure_vs_volume_arrays(
T,
xi,
Eos,
min_density_fraction=(1.0 / 500000.0),
density_increment=5.0,
max_volume_increment=1.0e-4,
pressure_min=100,
maxiter=25,
multfactor=2,
extended_npts=20,
max_density=None,
density_max_opts={},
**kwargs
):
r"""
Output arrays with specific volume and pressure arrays calculated from the given EOS.
This function is fundamental to every calculation, the options of which are passed through higher level calculation with the keyword variable ``density_opts``.
Parameters
----------
T : float
[K] Temperature of the system
xi : numpy.ndarray
Mole fraction of each component, sum(xi) should equal 1.0
Eos : obj
An instance of the defined EOS class to be used in thermodynamic computations.
min_density_fraction : float, Optional, default=(1.0/500000.0)
Fraction of the maximum density used to calculate, and is equal to, the minimum density of the density array. The minimum density is the reciprocal of the maximum specific volume used to calculate the roots.
density_increment : float, Optional, default=5.0
The increment between density values in the density array.
max_volume_increment : float, Optional, default=1.0E-4
Maximum increment between specific volume array values. After conversion from density to specific volume, the increment values are compared to this value.
pressure_min : float, Optional, default=100
Ensure pressure curve reaches down to this value
multfactor : int, Optional, default=2
Multiplication factor to extend range
extended_npts : int, Optional, default=20
Number of points in extended range
maxiter : int, Optional, default=25
Number of times to multiply range by to obtain full pressure vs. specific volume curve
max_density : float, Optional, default=None
[mol/:math:`m^3`] Maximum molar density defined, if default of None is used then the Eos object method, density_max is used.
density_max_opts : dict, Optional, default={}
Keyword arguments for density_max method for EOS object
Returns
-------
vlist : numpy.ndarray
[:math:`m^3`/mol] Specific volume array.
Plist : numpy.ndarray
[Pa] Pressure associated with specific volume of system with given temperature and composition
"""
if len(kwargs) > 0:
logger.debug(
" 'pressure_vs_volume_arrays' does not use the following keyword arguments: {}".format(
", ".join(list(kwargs.keys()))
)
)
if np.any(np.isnan(xi)):
raise ValueError("Given mole fractions are NaN")
if isinstance(xi, list):
xi = np.array(xi)
# estimate the maximum density based on the hard sphere packing fraction, part of EOS
if not max_density:
max_density = Eos.density_max(xi, T, **density_max_opts)
elif gtb.isiterable(max_density):
logger.error(
" Maxrho should be type float. Given value: {}".format(max_density)
)
max_density = max_density[0]
if max_density > 1e5:
raise ValueError(
"Max density of {} mol/m^3 is not feasible, check parameters.".format(
max_density
)
)
# min rho is a fraction of max rho, such that minrho << rhogassat
minrho = max_density * min_density_fraction
# list of densities for P,rho and P,v
if (max_density - minrho) < density_increment:
raise ValueError(
"Density range, {}, is less than increment, {}. Check parameters used in Eos.density_max().".format(
(max_density - minrho), density_increment
)
)
rholist = np.arange(minrho, max_density, density_increment)
# check rholist to see when the spacing
vspace = (1.0 / rholist[:-1]) - (1.0 / rholist[1:])
if np.amax(vspace) > max_volume_increment:
vspaceswitch = np.where(vspace > max_volume_increment)[0][-1]
rholist_2 = (
1.0
/ np.arange(
1.0 / rholist[vspaceswitch + 1], 1.0 / minrho, max_volume_increment
)[::-1]
)
rholist = np.append(rholist_2, rholist[vspaceswitch + 2 :])
# compute Pressures (Plist) for rholist
Plist = Eos.pressure(rholist, T, xi)
# Make sure enough of the pressure curve is obtained
for i in range(maxiter):
if Plist[0] > pressure_min:
rhotmp = np.linspace(rholist[0] / 2, rholist[0], extended_npts)[:-1]
Ptmp = Eos.pressure(rhotmp, T, xi)
Plist = np.append(Ptmp, Plist)
rholist = np.append(rhotmp, rholist)
else:
break
# Flip Plist and rholist arrays
Plist = Plist[:][::-1]
rholist = rholist[:][::-1]
vlist = 1.0 / rholist
return vlist, Plist
def pressure_vs_volume_spline(vlist, Plist):
r"""
Fit arrays of specific volume and pressure values to a cubic Univariate Spline.
Parameters
----------
vlist : numpy.ndarray
[:math:`m^3`/mol] Specific volume array.
Plist : numpy.ndarray
[Pa] Pressure associated with specific volume of system with given temperature and composition
Returns
-------
Pvspline : obj
Function object of pressure vs. specific volume
roots : list
List of specific volume roots. Subtract a system pressure from the output of Pvsrho to find density of vapor and/or liquid densities.
extrema : list
List of specific volume values corresponding to local minima and maxima.
"""
# Larger sigma value
Psmoothed = gaussian_filter1d(Plist, sigma=1.0e-2)
Pvspline = interpolate.InterpolatedUnivariateSpline(vlist, Psmoothed)
roots = Pvspline.roots().tolist()
Pvspline = interpolate.InterpolatedUnivariateSpline(vlist, Psmoothed, k=4)
extrema = Pvspline.derivative().roots().tolist()
if extrema:
if len(extrema) > 2:
extrema = extrema[0:2]
# pressure_vs_volume_plot(vlist, Plist, Pvspline, markers=extrema)
if np.any(np.isnan(Plist)):
roots = [np.nan]
return Pvspline, roots, extrema
def pressure_vs_volume_plot(vlist, Plist, Pvspline, markers=[], **kwargs):
r"""
Plot pressure vs. specific volume.
Parameters
----------
vlist : numpy.ndarray
[:math:`m^3`/mol] Specific volume array.
Plist : numpy.ndarray
[Pa] Pressure associated with specific volume of system with given temperature and composition
Pvspline : obj
Function object of pressure vs. specific volume
markers : list, Optional, default=[]
List of plot markers used in plot
"""
if len(kwargs) > 0:
logger.debug(
" 'pressure_vs_volume_plot' does not use the following keyword arguments: {}".format(
", ".join(list(kwargs.keys()))
)
)
try:
import matplotlib.pyplot as plt
plt.figure(1)
plt.plot(vlist, Plist, label="Orig.")
plt.plot(vlist, Pvspline(vlist), label="Smoothed")
plt.plot([vlist[0], vlist[-1]], [0, 0], "k")
for k in range(len(markers)):
plt.plot([markers[k], markers[k]], [min(Plist), max(Plist)], "k")
plt.xlabel("Specific Volume [$m^3$/mol]"), plt.ylabel("Pressure [Pa]")
# plt.ylim(min(Plist)/2,np.abs(min(Plist))/2)
plt.legend(loc="best")
plt.tight_layout()
plt.show()
except Exception:
logger.error("Matplotlib package is not installed, could not plot")
def calc_saturation_properties(
T, xi, Eos, density_opts={}, tol=1e-6, Pconverged=1, **kwargs
):
r"""
Computes the saturated pressure, gas and liquid densities for a single component system.
Parameters
----------
T : float
[K] Temperature of the system
xi : numpy.ndarray
Mole fraction of each component, sum(xi) should equal 1.0
Eos : obj
An instance of the defined EOS class to be used in thermodynamic computations.
density_opts : dict, Optional, default={}
Dictionary of options used in calculating pressure vs. specific volume in :func:`~despasito.thermodynamics.calc.pressure_vs_volume_arrays`
tol : float, Optional, default=1e-6
Tolerance to accept pressure value
Pconverged : float, Optional, default=1.0
If the pressure is negative (under tension), we search from a value just above vacuum
Returns
-------
Psat : float
[Pa] Saturation pressure given system information
rhov : float
[mol/:math:`m^3`] Density of vapor at saturation pressure
rhol : float
[mol/:math:`m^3`] Density of liquid at saturation pressure
"""
if len(kwargs) > 0:
logger.debug(
" 'calc_saturation_properties' does not use the following keyword arguments: {}".format(
", ".join(list(kwargs.keys()))
)
)
if np.count_nonzero(xi) != 1:
if np.count_nonzero(xi > 0.1) != 1:
raise ValueError(
"Multiple components have compositions greater than 10%, check code for source"
)
else:
ind = np.where((xi > 0.1) == True)[0]
raise ValueError(
"Multiple components have compositions greater than 0. Do you mean to obtain the saturation pressure of {} with a mole fraction of {}?".format(
Eos.beads[ind], xi[ind]
)
)
vlist, Plist = pressure_vs_volume_arrays(T, xi, Eos, **density_opts)
Pvspline, roots, extrema = pressure_vs_volume_spline(vlist, Plist)
if not extrema or len(extrema) < 2 or np.any(np.isnan(roots)):
logger.warning(" The component is above its critical point")
Psat, rhol, rhov = np.nan, np.nan, np.nan
else:
ind_Pmin1 = np.argwhere(np.diff(Plist) > 0)[0][0]
ind_Pmax1 = np.argmax(Plist[ind_Pmin1:]) + ind_Pmin1
Pmaxsearch = Plist[ind_Pmax1]
Pminsearch = max(Pconverged, np.amin(Plist[ind_Pmin1:ind_Pmax1]))
# Using computed Psat find the roots in the maxwell construction to give liquid (first root) and vapor (last root) densities
Psat = spo.minimize_scalar(
objective_saturation_pressure,
args=(Plist, vlist),
bounds=(Pminsearch, Pmaxsearch),
method="bounded",
)
Psat = Psat.x
obj_value = objective_saturation_pressure(Psat, Plist, vlist)
Pvspline, roots, extrema = pressure_vs_volume_spline(vlist, Plist - Psat)
# pressure_vs_volume_plot(vlist, Plist, Pvspline, markers=extrema)
if obj_value < tol:
logger.debug(
" Psat found: {} Pa, obj value: {}, with {} roots and {} extrema".format(
Psat, obj_value, np.size(roots), np.size(extrema)
)
)
if len(roots) == 2:
slope, yroot = np.polyfit(vlist[-4:], Plist[-4:] - Psat, 1)
vroot = -yroot / slope
if vroot < 0.0:
vroot = np.finfo(float).eps
rho_tmp = spo.minimize(
pressure_spline_error,
1.0 / vroot,
args=(Psat, T, xi, Eos),
bounds=[(1.0 / (vroot * 1e2), 1.0 / (1.1 * roots[-1]))],
)
roots = np.append(roots, [1.0 / rho_tmp.x])
rhol = 1.0 / roots[0]
rhov = 1.0 / roots[2]
else:
logger.warning(
" Psat NOT found: {} Pa, obj value: {}, consider decreasing 'pressure_min' option in density_opts".format(
Psat, obj_value
)
)
Psat, rhol, rhov = np.nan, np.nan, np.nan
tmpv, _, _ = calc_vapor_fugacity_coefficient(
Psat, T, xi, Eos, density_opts=density_opts
)
tmpl, _, _ = calc_liquid_fugacity_coefficient(
Psat, T, xi, Eos, density_opts=density_opts
)
logger.debug(" phiv: {}, phil: {}".format(tmpv, tmpl))
return Psat, rhol, rhov
def objective_saturation_pressure(shift, Pv, vlist):
r"""
Objective function used to calculate the saturation pressure.
Parameters
----------
shift : float
[Pa] Guess in Psat value used to translate the pressure vs. specific volume curve
Pv : numpy.ndarray
[Pa] Pressure associated with specific volume of system with given temperature and composition
vlist : numpy.ndarray
[mol/:math:`m^3`] Specific volume array. Length depends on values in density_opts passed to :func:`~despasito.thermodynamics.calc.pressure_vs_volume_arrays`
Returns
-------
obj_value : float
Output of objective function, the addition of the positive area between first two roots, and negative area between second and third roots, quantity squared.
"""
Pvspline, roots, extrema = pressure_vs_volume_spline(vlist, Pv - shift)
if len(roots) >= 3:
a = Pvspline.integral(roots[0], roots[1])
b = Pvspline.integral(roots[1], roots[2])
elif len(roots) == 2:
a = Pvspline.integral(roots[0], roots[1])
# If the curve hasn't decayed to 0 yet, estimate the remaining area as a triangle. This isn't super accurate but we are just using the saturation pressure to get started.
slope, yroot = np.polyfit(vlist[-4:], Pv[-4:] - shift, 1)
b = (
Pvspline.integral(roots[1], vlist[-1])
+ (Pv[-1] - shift) * (-yroot / slope - vlist[-1]) / 2
)
# raise ValueError("Pressure curve only has two roots. If the curve hasn't fully decayed, either increase maximum specific volume or decrease 'pressure_min' in :func:`~despasito.thermodynamics.calc.pressure_vs_volume_arrays`.")
elif np.any(np.isnan(roots)):
raise ValueError(
"Pressure curve without cubic properties has wrongly been accepted. Try decreasing pressure."
)
else:
raise ValueError(
"Pressure curve without cubic properties has wrongly been accepted. Try decreasing min_density_fraction"
)
# pressure_vs_volume_plot(vlist, Pv-shift, Pvspline, markers=extrema)
return (a + b) ** 2
def calc_vapor_density(P, T, xi, Eos, density_opts={}, **kwargs):
r"""
Computes vapor density under system conditions.
Parameters
----------
P : float
[Pa] Pressure of the system
T : float
[K] Temperature of the system
xi : numpy.ndarray
Mole fraction of each component, sum(xi) should equal 1.0
Eos : obj
An instance of the defined EOS class to be used in thermodynamic computations.
density_opts : dict, Optional, default={}
Dictionary of options used in calculating pressure vs. specific volume in :func:`~despasito.thermodynamics.calc.pressure_vs_volume_arrays`
Returns
-------
rhov : float
[mol/:math:`m^3`] Density of vapor at system pressure
flag : int
A value of 0 is vapor, 1 is liquid, 2 mean a critical fluid, 3 means that neither is true, 4 means we should assume ideal gas
"""
if len(kwargs) > 0:
logger.debug(
" 'calc_vapor_density' does not use the following keyword arguments: {}".format(
", ".join(list(kwargs.keys()))
)
)
vlist, Plist = pressure_vs_volume_arrays(T, xi, Eos, **density_opts)
Plist = Plist - P
Pvspline, roots, extrema = pressure_vs_volume_spline(vlist, Plist)
logger.debug(" Find rhov: P {} Pa, roots {} m^3/mol".format(P, roots))
flag_NoOpt = False
l_roots = len(roots)
if np.any(np.isnan(roots)):
rho_tmp = np.nan
flag = 3
logger.warning(
" Flag 3: The T and yi, {} {}, won't produce a fluid (vapor or liquid) at this pressure".format(
T, xi
)
)
elif l_roots == 0:
if Pvspline(1 / vlist[-1]) < 0:
try:
rho_tmp = spo.least_squares(
pressure_spline_error,
1 / vlist[0],
args=(P, T, xi, Eos),
bounds=(
np.finfo("float").eps,
Eos.density_max(xi, T, maxpack=0.99),
),
)
rho_tmp = rho_tmp.x
if not len(extrema):
flag = 2
logger.debug(
" Flag 2: The T and yi, {} {}, combination produces a critical fluid at this pressure".format(
T, xi
)
)
else:
flag = 1
logger.debug(
" Flag 1: The T and yi, {} {}, combination produces a liquid at this pressure".format(
T, xi
)
)
except Exception:
rho_tmp = np.nan
flag = 3
logger.warning(
" Flag 3: The T and xi, {} {}, won't produce a fluid (vapor or liquid) at this pressure, without density greater than max, {}".format(
T, xi, Eos.density_max(xi, T, maxpack=0.99)
)
)
flag_NoOpt = True
elif min(Plist) + P > 0:
slope, yroot = np.polyfit(vlist[-4:], Plist[-4:], 1)
vroot = -yroot / slope
try:
rho_tmp = spo.least_squares(
pressure_spline_error,
1 / vroot,
args=(P, T, xi, Eos),
bounds=(np.finfo("float").eps, 1.0 / (1.1 * roots[-1])),
)
rho_tmp = rho_tmp.x
flag = 0
except Exception:
rho_tmp = np.nan
flag = 4
if not len(extrema):
logger.debug(
" Flag 2: The T and yi, {} {}, combination produces a critical fluid at this pressure".format(
T, xi
)
)
else:
logger.debug(
" Flag 0: This T and yi, {} {}, combination produces a vapor at this pressure. Warning! approaching critical fluid".format(
T, xi
)
)
else:
logger.warning(
" Flag 3: The T and yi, {} {}, won't produce a fluid (vapor or liquid) at this pressure".format(
T, xi
)
)
flag = 3
rho_tmp = np.nan
elif l_roots == 1:
if not len(extrema):
flag = 2
rho_tmp = 1.0 / roots[0]
logger.debug(
" Flag 2: The T and yi, {} {}, combination produces a critical fluid at this pressure".format(
T, xi
)
)
elif (Pvspline(roots[0]) + P) > (Pvspline(max(extrema)) + P):
flag = 1
rho_tmp = 1.0 / roots[0]
logger.debug(
" Flag 1: The T and yi, {} {}, combination produces a liquid at this pressure".format(
T, xi
)
)
elif len(extrema) > 1:
flag = 0
rho_tmp = 1.0 / roots[0]
logger.debug(
" Flag 0: This T and yi, {} {}, combination produces a vapor at this pressure. Warning! approaching critical fluid".format(
T, xi
)
)
elif l_roots == 2:
if (Pvspline(roots[0]) + P) < 0.0:
flag = 1
rho_tmp = 1.0 / roots[0]
logger.debug(
" Flag 1: This T and yi, {} {}, combination produces a liquid under tension at this pressure".format(
T, xi
)
)
else:
slope, yroot = | np.polyfit(vlist[-4:], Plist[-4:], 1) | numpy.polyfit |
import os
import copy
import time
import joblib
import numpy as np
import pandas as pd
from datetime import datetime
from sklearn.model_selection import KFold
from sklearn.model_selection import StratifiedKFold
from ..Classifiers.CatBoostClassifier import CatBoostClassifier
from ..Classifiers.XGBClassifier import XGBClassifier
from ..Classifiers.LGBMClassifier import LGBMClassifier
from ..Regressors.CatBoostRegressor import CatBoostRegressor
from ..Regressors.XGBRegressor import XGBRegressor
from ..Regressors.LGBMRegressor import LGBMRegressor
from sklearn import linear_model
from sklearn import ensemble
from sklearn import svm
from sklearn import metrics
class Modeller:
def __init__(self,
mode='regression',
shuffle=False,
n_splits=3,
objective='accuracy',
samples=None,
folder='',
dataset='set_0',
store_models=False,
store_results=True):
"""
Runs various regression or classification models.
Includes:
- Scikit's Linear Model
- Scikit's Random Forest
- Scikit's Bagging
- Scikit's GradientBoosting
- Scikit's HistGradientBoosting
- DMLC's XGBoost
- Catboost's Catboost
- Microsoft's LightGBM
Parameters
----------
mode str: 'regression' or 'classification'
shuffle bool: Whether to shuffle samples for training / validation
n_splits int: Number of cross-validation splits
objective str: Performance metric from SciKit Scorers*
samples int: Samples in dataset, does not need to be specified but useful for calling return_models()
folder str: Folder to store models and / or results
dataset str: Name of feature set, documentation purpose
store_models bool: Whether to store the trained models
store_results bool:Whether to store the results
* https://scikit-learn.org/stable/modules/model_evaluation.html
"""
# Test
assert mode in ['classification', 'regression'], 'Unsupported mode'
assert isinstance(shuffle, bool)
assert isinstance(n_splits, int)
assert 2 < n_splits < 10, 'Reconsider your number of splits'
assert isinstance(objective, str)
assert objective in metrics.SCORERS.keys(), \
'Pick scorer from sklearn.metrics.SCORERS: \n{}'.format(list(metrics.SCORERS.keys()))
assert isinstance(samples, int) or samples is None
assert isinstance(folder, str)
assert isinstance(dataset, str)
assert isinstance(store_models, bool)
assert isinstance(store_results, bool)
# Parameters
self.objective = objective
self.scoring = metrics.SCORERS[objective]
self.mode = mode
self.shuffle = shuffle
self.cvSplits = n_splits
self.samples = samples
self.dataset = str(dataset)
self.storeResults = store_results
self.storeModels = store_models
self.results = pd.DataFrame(columns=['date', 'model', 'dataset', 'params', 'mean_objective', 'std_objective',
'mean_time', 'std_time'])
# Folder
self.folder = folder if len(folder) == 0 or folder[-1] == '/' else folder + '/'
if (store_results or store_models) and self.folder != '':
if not os.path.exists(self.folder):
os.makedirs(self.folder)
self.needsProba = False # Whether scorer requires needs_proba attr
if 'True' in self.scoring._factory_args():
self.needsProba = True
def fit(self, x, y):
# Copy number of samples
self.samples = len(y)
# Regression
if self.mode == 'regression':
cv = KFold(n_splits=self.cvSplits, shuffle=self.shuffle)
return self._fit(x, y, cv)
# Classification
if self.mode == 'classification':
cv = StratifiedKFold(n_splits=self.cvSplits, shuffle=self.shuffle)
return self._fit(x, y, cv)
def return_models(self):
models = []
# All classifiers
if self.mode == 'classification':
# The thorough ones
if self.samples < 25000:
if not self.needsProba:
models.append(svm.SVC(kernel='rbf'))
models.append(ensemble.BaggingClassifier())
# models.append(ensemble.GradientBoostingClassifier()) == XG Boost
models.append(XGBClassifier())
# The efficient ones
else:
# models.append(ensemble.HistGradientBoostingClassifier()) == LGBM
models.append(LGBMClassifier())
# And the multifaceted ones
if not self.needsProba:
models.append(linear_model.RidgeClassifier())
models.append(CatBoostClassifier())
models.append(ensemble.RandomForestClassifier())
elif self.mode == 'regression':
# The thorough ones
if self.samples < 25000:
models.append(linear_model.LinearRegression())
models.append(svm.SVR(kernel='rbf'))
models.append(ensemble.BaggingRegressor())
# models.append(ensemble.GradientBoostingRegressor()) == XG Boost
models.append(XGBRegressor())
# The efficient ones
else:
models.append(linear_model.LinearRegression())
# models.append(ensemble.HistGradientBoostingRegressor()) == LGBM
models.append(LGBMRegressor())
# And the multifaceted ones
models.append(CatBoostRegressor())
models.append(ensemble.RandomForestRegressor())
return models
def _fit(self, x, y, cross_val):
# Convert to NumPy
x = np.array(x)
y = | np.array(y) | numpy.array |
import numpy as np
class Event(object):
def __init__(self, data):
self.name, self.sample, self.var_type = data[0:3]
self.qualA, self.qualB, self.qual_res = [float(x) for x in data[3:6]]
self.uniqA, self.uniqB, self.uniq_res = [float(x) for x in data[6:9]]
self.spanA, self.spanB, self.span_res = [float(x) for x in data[9:12]]
self.global_cov = float(data[12])
self.localA, self.localB, self.local_res = [float(x) for x in data[13:16]]
self.gcA, self.gcB, self.gc_res = [float(x) for x in data[16:19]]
self.alignA, self.alignB, self.align_res = [float(x) for x in data[19:22]]
@classmethod
def init_link(cls, clusters, name):
qualA = np.mean([x.qualA for x in clusters])
qualB = np.mean([x.qualB for x in clusters])
qual_res = np.mean([x.qual_res for x in clusters])
uniqA = np.mean([x.uniqA for x in clusters])
uniqB = np.mean([x.uniqB for x in clusters])
uniq_res = np.mean([x.uniq_res for x in clusters])
spanA = np.mean([abs(x.spanA) for x in clusters])
spanB = np.mean([abs(x.spanB) for x in clusters])
span_res = np.mean([x.span_res for x in clusters])
global_cov = np.mean([x.global_cov for x in clusters])
localA = np.mean([x.localA for x in clusters])
localB = np.mean([x.localB for x in clusters])
local_res = np.mean([x.local_res for x in clusters])
gcA = np.mean([x.gcA for x in clusters])
gcB = np.mean([x.gcB for x in clusters])
gc_res = np.mean([x.gc_res for x in clusters])
alignA = np.mean([x.alignA for x in clusters])
alignB = | np.mean([x.alignB for x in clusters]) | numpy.mean |
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 27 15:35:57 2021
@author: OTPS
"""
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import scipy as scipy
from scipy import optimize
from matplotlib.ticker import AutoMinorLocator
from matplotlib import gridspec
import matplotlib.ticker as ticker
from colormap import shortest_dist
from colormap import colorMap
"""
### Data
"""
### DATA 211026 1014
I = np.loadtxt('211029_1009_QUHFLICh0cav_spec.dat', unpack = True, skiprows=1)
R = np.loadtxt('211029_1009_IUHFLICh0cav_spec.dat', unpack = True, skiprows=1)
mod = np.sqrt(R**2 + I**2)
mod_mat = mod.reshape(17,301)
nr_of_volt_measurements = np.arange(6.6, 7.4, 0.05).size
volt_measurements = np.arange(6.6, 7.4, 0.05)
# ### DATA 211026 1016
# I = np.loadtxt('211026_1016_QUHFLICh0cav_spec.dat', unpack = True, skiprows=1)
# R = np.loadtxt('211026_1016_IUHFLICh0cav_spec.dat', unpack = True, skiprows=1)
# mod = np.sqrt(R**2 + I**2)
# mod_mat = mod.reshape(17,301)
# nr_of_volt_measurements = np.arange(6.6, 7.4, 0.05).size
# volt_measurements = np.arange(6.6, 7.4, 0.05)
# colorMap()
"""
### Run function
"""
good_start = 4
good_end = 3
pairs, g_approx = shortest_dist(mod_mat, 6.2e9, 6.8e9, 301, 6.6, 7.4, 0.05, 4, 3)
### Order pairs --> smallest freq left
def order_tuple(tuple):
ordered_tuple_list = []
for i, tupel in enumerate(pairs):
if tupel[0] > tupel[1]:
ordered_tuple_list.append((tupel[1], tupel[0]))
else:
ordered_tuple_list.append((tupel[0], tupel[1]))
return ordered_tuple_list
new_pairs = order_tuple(pairs)
### Get upper/lower freq
upper_freqs = np.zeros(volt_measurements.size-(good_start + good_end))
lower_freqs = np.zeros(volt_measurements.size-(good_start + good_end))
upper_freqs_index = []
lower_freqs_index = []
for i, tupel in enumerate(new_pairs):
# upper_freqs_index[i] = tupel[1]
# lower_freqs_index[i] = tupel[0]
upper_freqs[i] = np.linspace(6.2e9, 6.8e9, 301)[tupel[1]]
lower_freqs[i] = np.linspace(6.2e9, 6.8e9, 301)[tupel[0]]
"""
### Model
"""
def avoided_crossing_direct_coupling(flux, f_center1, f_center2,
c1, c2, g, flux_state=0):
"""
Calculates the frequencies of an avoided crossing for the following model.
[f_1, g ]
[g, f_2]
f1 = c1*flux + f_center1
f2 = c2*flux + f_center2
flux: is the array of voltages
Data that you want to fit is the array (len(voltages, 2)) of frequencies corresponding to these voltages
g: the coupling strength, beware to relabel your variable if using this
model to fit J1 or J2.
flux_state: this is a switch used for fitting. It determines which
transition to return
"""
if type(flux_state) == int:
flux_state = [flux_state] * len(flux)
frequencies = np.zeros([len(flux), 2])
for kk, dac in enumerate(flux):
f_1 = dac * c1 + f_center1
f_2 = dac * c2 + f_center2
# f_1 = dac**(2) + f_center1
# f_2 = dac**(0.5) + f_center2
matrix = [[f_1, g],
[g, f_2]]
frequencies[kk, :] = np.linalg.eigvalsh(matrix)[:2]
# result = np.where(flux_state, frequencies[:, 0], frequencies[:, 1])
return frequencies
##################
### Estimate c1, c2
# def linear_func(x, a, b, c):
# return a * x**c + b
# def fitting_c1():
# x = lower_freqs
# y = flux
# params, err = scipy.optimize.curve_fit(linear_func, x, y, p0=[1, f1_guess, 2])
# return [params[0], params[1], params[2]]
# def fitting_c2():
# x = upper_freqs
# y = flux
# params, err = scipy.optimize.curve_fit(linear_func, x, y, p0=[3, f1_guess, 0.5])
# return [params[0], params[1], params[2]]
### Parameter
good_start = 4
good_end = 3
flux = volt_measurements[good_start:-good_end]
upper_flux = flux[6:]
lower_flux = flux[:6] ### What is upper, lower_flux?
g_guess = g_approx * (10**9)
f1_guess = np.mean(lower_freqs) - 2.2e9
f2_guess = np.mean(upper_freqs) - 2.2e9
c1_guess=(upper_freqs[-1]-upper_freqs[0])/\
(upper_flux[-1]-upper_flux[0])
c2_guess=(lower_freqs[-1]-lower_freqs[0])/\
(lower_flux[-1]-lower_flux[0])
# c1_guess = -0.2e9/(7.4-6.8)
# c2_guess = -0.18e9/(7.4-6.8)
# c1_fit = fitting_c1()
# c2_fit = fitting_c2()
# c1_guess = c1_fit
# c2_guess = c2_fit
freqs = avoided_crossing_direct_coupling(flux, f1_guess, f2_guess, 0.2e9/(7.4-6.8), 0.18e9/(7.4-6.8), 0.126e9, flux_state=0)
# result_fit, freqs_fit = avoided_crossing_direct_coupling(flux, f1_guess, f2_guess, c1_fit, c2_fit, g_guess)
print('Estimated freqs: ', freqs)
freq_dist = freqs[:,1] - freqs[:,0]
print('Mean g [GHz]: ', 0.5*min(freq_dist)/10**9)
### Figure
# plt.figure()
# for n, tupel in enumerate(new_pairs):
# plt.plot(np.linspace(6.2e9, 6.8e9, 301)[tupel[0]], volt_measurements[n+4], '-o', color='red')
# plt.plot(np.linspace(6.2e9, 6.8e9, 301)[tupel[1]], volt_measurements[n+4], '-o', color='blue')
# plt.title('Data of lower and upper frequencies')
# plt.ylabel('Absolute value of flux [V]')
# plt.xlabel('Frequencies [GHz]')
### Plot estimated freqs
# for n, tupel in enumerate(freqs):
# plt.plot(tupel[0], volt_measurements[n+4], '-+', color='red')
# plt.plot(tupel[1], volt_measurements[n+4], '-+', color='blue')
# plt.title('Fitting results of lower and upper frequencies')
# plt.ylabel('Absolute value of flux [V]')
# plt.xlabel('Frequencies [GHz]')
### Fitting function
'''
x-data: volt_measurements[4:]
y-data: lower_freqs
'''
def avoided_crossing_direct_coupling_lower(flux, f_center1, f_center2,
c1, c2, g, flux_state=0):
"""
Calculates the frequencies of an avoided crossing for the following model.
[f_1, g ]
[g, f_2]
f1 = c1*flux + f_center1
f2 = c2*flux + f_center2
flux: is the array of voltages
Data that you want to fit is the array (len(voltages, 2)) of frequencies corresponding to these voltages
g: the coupling strength, beware to relabel your variable if using this
model to fit J1 or J2.
flux_state: this is a switch used for fitting. It determines which
transition to return
"""
if type(flux_state) == int:
flux_state = [flux_state] * len(flux)
frequencies = np.zeros([len(flux), 2])
for kk, dac in enumerate(flux):
f_1 = dac * c1 + f_center1
f_2 = dac * c2 + f_center2
# f_1 = dac**(2) + f_center1
# f_2 = dac**(0.5) + f_center2
matrix = [[f_1, g],
[g, f_2]]
frequencies[kk, :] = np.linalg.eigvalsh(matrix)[:2]
# result = np.where(flux_state, frequencies[:, 0], frequencies[:, 1])
return frequencies[:,0]
def avoided_crossing_direct_coupling_upper(flux, f_center1, f_center2,
c1, c2, g, flux_state=0):
"""
Calculates the frequencies of an avoided crossing for the following model.
[f_1, g ]
[g, f_2]
f1 = c1*flux + f_center1
f2 = c2*flux + f_center2
flux: is the array of voltages
Data that you want to fit is the array (len(voltages, 2)) of frequencies corresponding to these voltages
g: the coupling strength, beware to relabel your variable if using this
model to fit J1 or J2.
flux_state: this is a switch used for fitting. It determines which
transition to return
"""
if type(flux_state) == int:
flux_state = [flux_state] * len(flux)
frequencies = np.zeros([len(flux), 2])
for kk, dac in enumerate(flux):
f_1 = dac * c1 + f_center1
f_2 = dac * c2 + f_center2
# f_1 = dac**(2) + f_center1
# f_2 = dac**(0.5) + f_center2
matrix = [[f_1, g],
[g, f_2]]
frequencies[kk, :] = np.linalg.eigvalsh(matrix)[:2]
# result = np.where(flux_state, frequencies[:, 0], frequencies[:, 1])
return frequencies[:,1]
def avoided_crossing_direct_coupling_flat(flux, f_center1, f_center2,
c1, c2, g, flux_state=0):
"""
Calculates the frequencies of an avoided crossing for the following model.
[f_1, g ]
[g, f_2]
f1 = c1*flux + f_center1
f2 = c2*flux + f_center2
flux: is the array of voltages
Data that you want to fit is the array (len(voltages, 2)) of frequencies corresponding to these voltages
g: the coupling strength, beware to relabel your variable if using this
model to fit J1 or J2.
flux_state: this is a switch used for fitting. It determines which
transition to return
"""
if type(flux_state) == int:
flux_state = [flux_state] * len(flux)
frequencies = np.zeros([len(flux), 2])
for kk, dac in enumerate(flux):
f_1 = dac * c1 + f_center1
f_2 = dac * c2 + f_center2
# f_1 = dac**(2) + f_center1
# f_2 = dac**(0.5) + f_center2
matrix = [[f_1, g],
[g, f_2]]
frequencies[kk, :] = np.linalg.eigvalsh(matrix)[:2]
# result = np.where(flux_state, frequencies[:, 0], frequencies[:, 1])
return frequencies.flatten()
x = flux
f_data = np.zeros([len(x), 2])
f_data[:,0] = lower_freqs
f_data[:,1] = upper_freqs
def fit():
x = flux
# print(f_data)
# popt_lower, pcov_lower = scipy.optimize.curve_fit(avoided_crossing_direct_coupling_lower, x, lower_freqs, p0=[f1_guess, f2_guess, c1_guess, c2_guess, 0.126e9], maxfev=5000)
# popt_upper, pcov_upper = scipy.optimize.curve_fit(avoided_crossing_direct_coupling_upper, x, upper_freqs, p0=[f1_guess, f2_guess, c1_guess, c2_guess, 0.126e9], maxfev=5000)
# print(popt_lower)
# print(popt_upper)
### Flatten
freqs_flat = freqs.flatten()
popt, pcov = scipy.optimize.curve_fit(avoided_crossing_direct_coupling_flat, x, f_data.flatten(), p0=[f1_guess, f2_guess, c1_guess, c2_guess, 0.126e9], maxfev=5000)
# plt.plot(x, lower_freqs, '-o', color='lightgrey', label = 'Observed frequency sweep')
# plt.plot(x, upper_freqs, '-o', color='lightgrey')
plt.plot(lower_freqs, x, '-o', color='lightgrey', label = 'Observed frequency sweep')
plt.plot(upper_freqs, x, '-o', color='lightgrey')
plt.plot(avoided_crossing_direct_coupling(x, popt[0], popt[1], popt[2], popt[3],
popt[4])[:,0], x, 'red', label='Fit lower frequencies $f_-$')
plt.plot(avoided_crossing_direct_coupling(x, popt[0], popt[1], popt[2], popt[3],
popt[4])[:,1], x, 'blue', label='Fit upper frequencies $f_+$')
lower_fit = avoided_crossing_direct_coupling(x, popt[0], popt[1], popt[2], popt[3],
popt[4])[:,0]
upper_fit = avoided_crossing_direct_coupling(x, popt[0], popt[1], popt[2], popt[3],
popt[4])[:,1]
# plt.plot(x, avoided_crossing_direct_coupling(x, popt[0], popt[1], popt[2], popt[3],
# popt[4])[:,0], 'red', label='Fit upper frequencies')
# plt.plot(x, avoided_crossing_direct_coupling(x, popt[0], popt[1], popt[2], popt[3],
# popt[4])[:,1], 'blue', label='Fit lower frequencies')
plt.legend(loc='upper center')
print("The absolute value of the estimated g is: ", np.abs(popt[4]) / 10**6, "[MHz]")
plt.title("Avoided crossing fit for input power -23.979 dB")
plt.ylabel("Abs. value voltage [V]")
plt.xlabel("Resonance frequency [GHz]")
plt.grid(True)
# plt.axis('off')
plt.show()
return popt
fit()
### Plot fit
popt = fit()
lower_fit = np.round(avoided_crossing_direct_coupling(x, popt[0], popt[1], popt[2], popt[3],
popt[4])[:,0] / 10**9, 2)
upper_fit = np.round(avoided_crossing_direct_coupling(x, popt[0], popt[1], popt[2], popt[3],
popt[4])[:,1] / 10**9, 2)
freq_axis = np.round(np.linspace(6.2, 6.8, 301), 3)
x_array_lower = np.zeros(10)
x_array_upper = np.zeros(10)
for i in range(0, 10):
index_lower = np.where(freq_axis == lower_fit[i])
index_upper = np.where(freq_axis == upper_fit[i])
# print(index_lower[0][0])
x_array_lower[i] = index_lower[0][0]
x_array_upper[i] = index_upper[0][0]
def colorMap_fit(data, dbm):
trans = data.transpose()
fig, ax0 = plt.subplots()
# c = ax0.pcolor(trans)
c = ax0.pcolor(data)
ax0.set_title(f"Coil sweep for input power {dbm} dB")
ax0.set_yticks(np.arange(0,10))
# # ax0.set_xticks(np.linspace(6.8, 7.25, 10))
ax0.set_yticklabels(np.round( | np.linspace(6.8,7.25, 10) | numpy.linspace |
import numpy
from scipy.optimize import fsolve
def euler(simulation, cons, prim, aux):
dt = simulation.dt
rhs = simulation.rhs
return cons + dt * rhs(cons, prim, aux, simulation)
def rk2(simulation, cons, prim, aux):
dt = simulation.dt
rhs = simulation.rhs
cons1 = cons + dt * rhs(cons, prim, aux, simulation)
cons1 = simulation.bcs(cons1, simulation.grid.Npoints, simulation.grid.Ngz)
prim1, aux1 = simulation.model.cons2all(cons1, prim)
return 0.5 * (cons + cons1 + dt * rhs(cons1, prim1, aux1, simulation))
def rk3(simulation, cons, prim, aux):
dt = simulation.dt
rhs = simulation.rhs
cons1 = cons + dt * rhs(cons, prim, aux, simulation)
cons1 = simulation.bcs(cons1, simulation.grid.Npoints, simulation.grid.Ngz)
if simulation.fix_cons:
cons1 = simulation.model.fix_cons(cons1)
prim1, aux1 = simulation.model.cons2all(cons1, prim)
cons2 = (3 * cons + cons1 + dt * rhs(cons1, prim1, aux1, simulation)) / 4
cons2 = simulation.bcs(cons2, simulation.grid.Npoints, simulation.grid.Ngz)
if simulation.fix_cons:
cons2 = simulation.model.fix_cons(cons2)
prim2, aux2 = simulation.model.cons2all(cons2, prim1)
return (cons + 2 * cons2 + 2 * dt * rhs(cons2, prim2, aux2, simulation)) / 3
def rk_euler_split(rk_method, source):
def timestepper(simulation, cons, prim, aux):
consstar = rk_method(simulation, cons, prim, aux)
primstar, auxstar = simulation.model.cons2all(consstar, prim)
return consstar + simulation.dt * source(consstar, primstar, auxstar)
return timestepper
def rk_backward_euler_split(rk_method, source):
def timestepper(simulation, cons, prim, aux):
consstar = rk_method(simulation, cons, prim, aux)
primstar, auxstar = simulation.model.cons2all(consstar, prim)
def residual(consguess, cons_star, prim_old):
consguess = consguess.reshape(consguess.shape[0], 1)
prim_old = prim_old.reshape(prim_old.shape[0], 1)
cons_star = cons_star.reshape(cons_star.shape[0], 1)
primguess, auxguess = simulation.model.cons2all(consguess, prim_old)
return (consguess - cons_star - simulation.dt*source(consguess, primguess, auxguess)).ravel()
consnext = numpy.zeros_like(cons)
cons_initial_guess = consstar + \
0.5*simulation.dt*source(consstar,
primstar,
auxstar)
for i in range(cons.shape[1]):
consnext[:, i] = fsolve(residual, cons_initial_guess[:,i].ravel(),
args=(consstar[:, i].ravel(), prim[:, i].ravel()))
return numpy.reshape(consnext, cons.shape)
return timestepper
def imex222(source, source_fprime=None, source_guess=None):
gamma = 1 - 1/numpy.sqrt(2)
def residual1(consguess, dt, cons, prim, simulation):
consguess = consguess.reshape((cons.shape[0], 1))
cons = cons.reshape((cons.shape[0], 1))
prim = prim.reshape((prim.shape[0], 1))
try:
primguess, auxguess = simulation.model.cons2all(consguess, prim)
except ValueError:
res = 1e6 * numpy.ones_like(consguess)
return res.ravel()
res = consguess - cons - dt * gamma * source(consguess,
primguess, auxguess)
if numpy.any(numpy.isnan(res)):
res = 1e6 * numpy.ones_like(consguess)
return res.ravel()
def residual2(consguess, dt, cons, prim, k1, source1, simulation):
consguess = consguess.reshape((cons.shape[0], 1))
cons = cons.reshape((cons.shape[0], 1))
prim = prim.reshape((prim.shape[0], 1))
k1 = k1.reshape((cons.shape[0], 1))
source1 = source1.reshape((cons.shape[0], 1))
try:
primguess, auxguess = simulation.model.cons2all(consguess, prim)
except ValueError:
res = 1e6 * numpy.ones_like(consguess)
return res.ravel()
res = (consguess - cons - dt * (k1 + (1 - 2*gamma)*source1 + \
gamma*source(consguess, primguess, auxguess))).ravel()
if numpy.any(numpy.isnan(res)):
res = 1e6 * numpy.ones_like(consguess).ravel()
return res
def residual2_noflux(consguess, dt, cons, prim, source1, simulation):
consguess = consguess.reshape((cons.shape[0], 1))
cons = cons.reshape((cons.shape[0], 1))
prim = prim.reshape((prim.shape[0], 1))
source1 = source1.reshape((cons.shape[0], 1))
try:
primguess, auxguess = simulation.model.cons2all(consguess, prim)
except ValueError:
res = 1e6 * numpy.ones_like(consguess)
return res.ravel()
res = (consguess - cons - dt * ((1 - 2*gamma)*source1 + \
gamma*source(consguess, primguess, auxguess))).ravel()
if numpy.any(numpy.isnan(res)):
res = 1e6 * numpy.ones_like(consguess).ravel()
return res
# def residual1_prime(consguess, dt, cons, prim, simulation):
# consguess = consguess.reshape((cons.shape[0], 1))
# jac = numpy.eye(cons.shape[0])
# primguess, auxguess = simulation.model.cons2all(consguess, prim)
# jac -= dt * gamma * source_fprime(consguess, primguess, auxguess)
# return jac
# def residual2_prime(consguess, dt, cons, prim, k1, source1, simulation):
# """
# Whilst the result is idential to residual1_prime, the argument list
# is of course different
# """
# consguess = consguess.reshape((cons.shape[0], 1))
# jac = numpy.eye(cons.shape[0])
# primguess, auxguess = simulation.model.cons2all(consguess, prim)
# jac -= dt * gamma * source_fprime(consguess, primguess, auxguess)
# return jac
residual1_prime = None
def timestepper(simulation, cons, prim, aux):
Np = cons.shape[1]
dt = simulation.dt
rhs = simulation.rhs
consguess = cons.copy()
if source_guess:
primguess, auxguess = simulation.model.cons2all(consguess, prim)
consguess = source_guess(consguess, primguess, auxguess)
cons1 = numpy.zeros_like(cons)
for i in range(Np):
cons1[:,i] = fsolve(residual1, consguess[:,i],
fprime=residual1_prime,
args=(dt, cons[:,i], prim[:,i], simulation),
xtol = 1e-12)
cons1 = simulation.bcs(cons1, simulation.grid.Npoints, simulation.grid.Ngz)
prim1, aux1 = simulation.model.cons2all(cons1, prim)
k1 = rhs(cons1, prim1, aux1, simulation)
source1 = source(cons1, prim1, aux1)
cons2 = numpy.zeros_like(cons)
for i in range(Np):
consguess_source = fsolve(residual2_noflux, cons1[:,i],
fprime=residual1_prime,
args=(dt, cons[:,i], prim1[:,i], source1[:,i], simulation),
xtol = 1e-12)
consguess_flux = cons1[:,i] + dt * k1[:, i]
consguess = 0.5 * (consguess_source + consguess_flux)
cons2[:,i] = fsolve(residual2, consguess,
fprime=residual1_prime,
args=(dt, cons[:,i], prim1[:,i], k1[:,i], source1[:,i], simulation),
xtol = 1e-12)
cons2 = simulation.bcs(cons2, simulation.grid.Npoints, simulation.grid.Ngz)
prim2, aux2 = simulation.model.cons2all(cons2, prim1)
k2 = rhs(cons2, prim2, aux2, simulation)
source2 = source(cons2, prim2, aux2)
return cons + simulation.dt * (k1 + k2 + source1 + source2) / 2
return timestepper
def imex433(source):
alpha = 0.24169426078821
beta = 0.06042356519705
eta = 0.12915286960590
def residual1(consguess, dt, cons, prim, simulation):
consguess = consguess.reshape((cons.shape[0], 1))
cons = cons.reshape((cons.shape[0], 1))
prim = prim.reshape((prim.shape[0], 1))
try:
primguess, auxguess = simulation.model.cons2all(consguess, prim)
except ValueError:
res = 1e6 * numpy.ones_like(consguess)
return res.ravel()
res = consguess - cons - dt * alpha * source(consguess,
primguess, auxguess)
if numpy.any( | numpy.isnan(res) | numpy.isnan |
import numpy as np
from sklearn.neighbors import NearestNeighbors
def best_fit_transform(A, B):
'''
Calculates the least-squares best-fit transform that maps corresponding points A to B in m spatial dimensions
Input:
A: Nxm numpy array of corresponding points
B: Nxm numpy array of corresponding points
Returns:
T: (m+1)x(m+1) homogeneous transformation matrix that maps A on to B
R: mxm rotation matrix
t: mx1 translation vector
'''
assert A.shape == B.shape
# get number of dimensions
m = A.shape[1]
# translate points to their centroids
centroid_A = np.mean(A, axis=0)
centroid_B = np.mean(B, axis=0)
AA = A - centroid_A
BB = B - centroid_B
# rotation matrix
H = np.dot(AA.T, BB)
U, S, Vt = np.linalg.svd(H)
R = np.dot(Vt.T, U.T)
# special reflection case
if | np.linalg.det(R) | numpy.linalg.det |
import numpy as np
from numpy.lib import recfunctions
import root_numpy as rnp
from numpy.testing import assert_array_almost_equal, assert_array_equal
from nose.tools import assert_equal, assert_raises
from . import load
def test_rec2array():
# scalar fields
a = np.array([
(12345, 2., 2.1, True),
(3, 4., 4.2, False),],
dtype=[
('x', np.int32),
('y', np.float32),
('z', np.float64),
('w', np.bool)])
arr = rnp.rec2array(a)
assert_array_equal(arr,
np.array([
[12345, 2, 2.1, 1],
[3, 4, 4.2, 0]]))
arr = rnp.rec2array(a, fields=['x', 'y'])
assert_array_equal(arr,
np.array([
[12345, 2],
[3, 4]]))
# single scalar field
arr = rnp.rec2array(a, fields=['x'])
assert_array_equal(arr, np.array([[12345], [3]], dtype=np.int32))
# single scalar field simplified
arr = rnp.rec2array(a, fields='x')
assert_array_equal(arr, np.array([12345, 3], dtype=np.int32))
# case where array has single record
assert_equal(rnp.rec2array(a[:1]).shape, (1, 4))
assert_equal(rnp.rec2array(a[:1], fields=['x']).shape, (1, 1))
assert_equal(rnp.rec2array(a[:1], fields='x').shape, (1,))
# array fields
a = np.array([
([1, 2, 3], [4.5, 6, 9.5],),
([4, 5, 6], [3.3, 7.5, 8.4],),],
dtype=[
('x', np.int32, (3,)),
('y', np.float32, (3,))])
arr = rnp.rec2array(a)
assert_array_almost_equal(arr,
np.array([[[1, 4.5],
[2, 6],
[3, 9.5]],
[[4, 3.3],
[5, 7.5],
[6, 8.4]]]))
# single array field
arr = rnp.rec2array(a, fields=['y'])
assert_array_almost_equal(arr,
np.array([[[4.5], [6], [9.5]],
[[3.3], [7.5], [8.4]]]))
# single array field simplified
arr = rnp.rec2array(a, fields='y')
assert_array_almost_equal(arr,
np.array([[4.5, 6, 9.5],
[3.3, 7.5, 8.4]]))
# case where array has single record
assert_equal(rnp.rec2array(a[:1], fields=['y']).shape, (1, 3, 1))
assert_equal(rnp.rec2array(a[:1], fields='y').shape, (1, 3))
# lengths mismatch
a = np.array([
([1, 2], [4.5, 6, 9.5],),
([4, 5], [3.3, 7.5, 8.4],),],
dtype=[
('x', np.int32, (2,)),
('y', np.float32, (3,))])
assert_raises(ValueError, rnp.rec2array, a)
# mix of scalar and array fields should fail
a = np.array([
(1, [4.5, 6, 9.5],),
(4, [3.3, 7.5, 8.4],),],
dtype=[
('x', np.int32),
('y', np.float32, (3,))])
assert_raises(ValueError, rnp.rec2array, a)
def test_stack():
rec = rnp.root2array(load('test.root'))
s = rnp.stack([rec, rec])
assert_equal(s.shape[0], 2 * rec.shape[0])
assert_equal(s.dtype.names, rec.dtype.names)
s = rnp.stack([rec, rec], fields=['x', 'y'])
assert_equal(s.shape[0], 2 * rec.shape[0])
assert_equal(s.dtype.names, ('x', 'y'))
# recs don't have identical fields
rec2 = recfunctions.drop_fields(rec, ['i', 'x'])
s = rnp.stack([rec, rec2])
assert_equal(set(s.dtype.names), set(['y', 'z']))
def test_dup_idx():
a = [1, 2, 3, 4, 3, 2]
assert_array_equal(rnp.dup_idx(a), [1, 2, 4, 5])
def test_stretch():
arr = np.empty(5,
dtype=[
('scalar', np.int),
('vl1', 'O'),
('vl2', 'O'),
('vl3', 'O'),
('fl1', np.int, (2, 2)),
('fl2', np.float, (2, 3)),
('fl3', np.double, (3, 2))])
for i in range(arr.shape[0]):
vl1 = np.array(range(i + 1), dtype=np.int)
vl2 = np.array(range(i + 2), dtype=np.float) * 2
vl3 = np.array(range(2), dtype=np.double) * 3
fl1 = np.array(range(4), dtype=np.int).reshape((2, 2))
fl2 = np.array(range(6), dtype=np.float).reshape((2, 3))
fl3 = np.array(range(6), dtype=np.double).reshape((3, 2))
arr[i] = (i, vl1, vl2, vl3, fl1, fl2, fl3)
# no array columns included
assert_raises(RuntimeError, rnp.stretch, arr, ['scalar',])
# lengths don't match
assert_raises(ValueError, rnp.stretch, arr, ['scalar', 'vl1', 'vl2',])
assert_raises(ValueError, rnp.stretch, arr, ['scalar', 'fl1', 'fl3',])
assert_raises(ValueError, rnp.stretch, arr)
# variable-length stretch
stretched = rnp.stretch(arr, ['scalar', 'vl1',])
assert_equal(stretched.dtype,
[('scalar', np.int),
('vl1', np.int)])
assert_equal(stretched.shape[0], 15)
assert_array_equal(
stretched['scalar'],
np.repeat(arr['scalar'], np.vectorize(len)(arr['vl1'])))
# fixed-length stretch
stretched = rnp.stretch(arr, ['scalar', 'vl3', 'fl1', 'fl2',])
assert_equal(stretched.dtype,
[('scalar', np.int),
('vl3', np.double),
('fl1', np.int, (2,)),
('fl2', np.float, (3,))])
assert_equal(stretched.shape[0], 10)
assert_array_equal(
stretched['scalar'], np.repeat(arr['scalar'], 2))
# optional argument return_indices
stretched, idx = rnp.stretch(arr, ['scalar', 'vl1'], return_indices=True)
assert_equal(stretched.shape[0], idx.shape[0])
from_arr = list(map(lambda x: x['vl1'][0], arr))
from_stretched = stretched[idx == 0]['vl1']
assert_array_equal(from_arr, from_stretched)
# stretch single field and produce unstructured output
stretched = rnp.stretch(arr, 'vl1')
assert_equal(stretched.dtype, np.int)
def test_blockwise_inner_join():
test_data = np.array([
(1.0, np.array([11,12,13]), np.array([1,0,1]), 0, np.array([1,2,3])),
(2.0, np.array([21,22,23]), np.array([-1,2,-1]), 1, np.array([31,32,33]))],
dtype=[
('sl', np.float),
('al', 'O'),
('fk', 'O'),
('s_fk', np.int),
('ar', 'O')])
# vector join
a1 = rnp.blockwise_inner_join(
test_data, ['sl', 'al'], test_data['fk'], ['ar'])
# specify fk with string
a1 = rnp.blockwise_inner_join(
test_data, ['sl', 'al'], 'fk', ['ar'])
exp1 = np.array([
(1.0, 11, 2, 1),
(1.0, 12, 1, 0),
(1.0, 13, 2, 1),
(2.0, 22, 33, 2)],
dtype=[
('sl', '<f8'),
('al', '<i8'),
('ar', '<i8'),
('fk', '<i8')])
assert_array_equal(a1, exp1, verbose=True)
# vector join with force repeat
a2 = rnp.blockwise_inner_join(
test_data, ['sl','al'], test_data['fk'], ['ar'], force_repeat=['al'])
exp2 = np.array([
(1.0, np.array([11, 12, 13]), 2, 1),
(1.0, np.array([11, 12, 13]), 1, 0),
(1.0, np.array([11, 12, 13]), 2, 1),
(2.0, | np.array([21, 22, 23]) | numpy.array |
import tensorflow as tf
import numpy as np
import os
import matplotlib.pyplot as plt
from data_loader import load_3D, load_2D
def create_model():
model = tf.keras.Sequential([
tf.keras.layers.Dense(units=1200, activation=tf.keras.activations.relu, input_shape=[441]),
tf.keras.layers.Dense(units=1200, activation=tf.keras.activations.relu),
tf.keras.layers.Dense(units=441)
])
model.compile(loss='mean_squared_error', optimizer=tf.keras.optimizers.Adam(0.0001), metrics=['accuracy'])
return model
def main():
map_size = 21
features_train, labels_train = load_2D(100000, map_size, map_size)
features_test, labels_test = load_2D(1000, map_size, map_size, "test_created_data_")
checkpoint_path = "output_dense"
checkpoint_dir = os.path.dirname(checkpoint_path)
cp_callback = tf.keras.callbacks.ModelCheckpoint(checkpoint_path, verbose = 1, period = 5)
model = create_model()
model.load_weights(checkpoint_path)
#model.fit(features_train, labels_train, epochs = 10, callbacks = [cp_callback], validation_split=0.1)
size = 69
test = np.array(features_test[size]).reshape(map_size, map_size)
prediction = model.predict(features_test)
fig, axs = plt.subplot(1,3,1), plt.imshow(test)
fig.axis('off')
fig.set_title('Map')
pred = np.array(prediction[size]).reshape(map_size, map_size) * features_test[size].reshape(map_size, map_size)
array = | np.clip(pred, -0.25, 0.25) | numpy.clip |
import six
import numbers
import logging
import numpy as np
from sklearn.metrics import log_loss
from sklearn.utils import check_consistent_length, check_array
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import LabelBinarizer
import mxnet as mx
from mxnet.metric import EvalMetric
__all__ = ['Activation', 'Dense', 'SoftmaxOutput', 'Variable',
'BatchNormalization', 'Dropout', 'Sequential', 'Adam']
def _weighted_sum(sample_score, sample_weight, normalize=False):
if normalize:
return np.average(sample_score, weights=sample_weight)
elif sample_weight is not None:
return np.dot(sample_score, sample_weight)
else:
return sample_score.sum()
class LogLoss(object):
def __init__(self):
self.lb_ = None
@property
def __name__(self):
return 'log_loss'
def __call__(self, y_true, y_pred, eps=1e-15, normalize=True, sample_weight=None):
if self.lb_ is None:
self.lb_ = LabelBinarizer()
T = self.lb_.fit_transform(y_true)
else:
T = self.lb_.transform(y_true)
if T.shape[1] == 1:
T = np.append(1 - T, T, axis=1)
Y = np.clip(y_pred, eps, 1 - eps)
if not isinstance(Y, np.ndarray):
raise ValueError('y_pred should be an array of floats.')
if Y.ndim == 1:
Y = Y[:, np.newaxis]
if Y.shape[1] == 1:
Y = np.append(1 - Y, Y, axis=1)
check_consistent_length(T, Y)
T = check_array(T)
Y = check_array(Y)
if T.shape[1] != Y.shape[1]:
raise ValueError('y_true and y_pred have different number of classes '
'%d, %d' % (T.shape[1], Y.shape[1]))
Y /= Y.sum(axis=1)[:, np.newaxis]
loss = -(T * | np.log(Y) | numpy.log |
import random
import time
import math
import os.path
import numpy as np
import pandas as pd
from pysc2.agents import base_agent
from pysc2.env import sc2_env, run_loop
from pysc2.lib import actions, features, units
from absl import app
import torch
from torch.utils.tensorboard import SummaryWriter
from baseline.skdrl.pytorch.model.mlp import NaiveMultiLayerPerceptron
from baseline.skdrl.common.memory.memory import ExperienceReplayMemory
from baseline.skdrl.pytorch.model.dqn import DQN, prepare_training_inputs
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
writer = SummaryWriter()
class TerranAgentWithRawActsAndRawObs(base_agent.BaseAgent):
actions = ("do_nothing",
"harvest_minerals",
"build_supply_depot",
"build_barracks",
"train_marine",
"attack")
def get_my_units_by_type(self, obs, unit_type):
return [unit for unit in obs.observation.raw_units
if unit.unit_type == unit_type
and unit.alliance == features.PlayerRelative.SELF]
def get_enemy_units_by_type(self, obs, unit_type):
return [unit for unit in obs.observation.raw_units
if unit.unit_type == unit_type
and unit.alliance == features.PlayerRelative.ENEMY]
def get_my_completed_units_by_type(self, obs, unit_type):
return [unit for unit in obs.observation.raw_units
if unit.unit_type == unit_type
and unit.build_progress == 100
and unit.alliance == features.PlayerRelative.SELF]
def get_enemy_completed_units_by_type(self, obs, unit_type):
return [unit for unit in obs.observation.raw_units
if unit.unit_type == unit_type
and unit.build_progress == 100
and unit.alliance == features.PlayerRelative.ENEMY]
def get_distances(self, obs, units, xy):
units_xy = [(unit.x, unit.y) for unit in units]
return np.linalg.norm(np.array(units_xy) - np.array(xy), axis=1)
def step(self, obs):
super(TerranAgentWithRawActsAndRawObs, self).step(obs)
if obs.first():
command_center = self.get_my_units_by_type(
obs, units.Terran.CommandCenter)[0]
self.base_top_left = (command_center.x < 32)
def do_nothing(self, obs):
return actions.RAW_FUNCTIONS.no_op()
def harvest_minerals(self, obs):
scvs = self.get_my_units_by_type(obs, units.Terran.SCV)
idle_scvs = [scv for scv in scvs if scv.order_length == 0]
if len(idle_scvs) > 0:
mineral_patches = [unit for unit in obs.observation.raw_units
if unit.unit_type in [
units.Neutral.BattleStationMineralField,
units.Neutral.BattleStationMineralField750,
units.Neutral.LabMineralField,
units.Neutral.LabMineralField750,
units.Neutral.MineralField,
units.Neutral.MineralField750,
units.Neutral.PurifierMineralField,
units.Neutral.PurifierMineralField750,
units.Neutral.PurifierRichMineralField,
units.Neutral.PurifierRichMineralField750,
units.Neutral.RichMineralField,
units.Neutral.RichMineralField750
]]
scv = random.choice(idle_scvs)
distances = self.get_distances(obs, mineral_patches, (scv.x, scv.y))
mineral_patch = mineral_patches[ | np.argmin(distances) | numpy.argmin |
# -*- coding: utf-8 -*-
"""File for base geometry class built using the Geomdl class"""
import numpy as np
class Geometry2D:
'''
Base class for 2D domains
Input: geomData - dictionary containing the geomety information
Keys: degree_u, degree_v: polynomial degree in the u and v directions
ctrlpts_size_u, ctrlpts_size_v: number of control points in u,v directions
ctrlpts: weighted control points (in a list with
ctrlpts_size_u*ctrlpts_size_v rows and 3 columns for x,y,z coordinates)
weights: correspond weights (list with ctrlpts_size_u*ctrlpts_size_v entries)
knotvector_u, knotvector_v: knot vectors in the u and v directions
'''
def __init__(self, geomData):
self.degree_u = geomData['degree_u']
self.degree_v = geomData['degree_v']
self.ctrlpts_size_u = geomData['ctrlpts_size_u']
self.ctrlpts_size_v = geomData['ctrlpts_size_v']
self.ctrlpts = self.getUnweightedCpts(geomData['ctrlpts'], geomData['weights'])
self.weights = geomData['weights']
self.knotvector_u = geomData['knotvector_u']
self.knotvector_v = geomData['knotvector_v']
def getUnweightedCpts(self, ctrlpts, weights):
numCtrlPts = np.shape(ctrlpts)[0]
PctrlPts = np.zeros_like(ctrlpts)
for i in range(2):
for j in range(numCtrlPts):
PctrlPts[j,i]=ctrlpts[j][i]/weights[j]
# PctrlPts = PctrlPts.tolist()
return PctrlPts
def mapPoints(self, uPar, vPar):
'''
Map points from the parameter domain [0,1]x[0,1] to the quadrilater domain
Input: uPar - array containing the u-coordinates in the parameter space
vPar - array containing the v-coordinates in the parameter space
Note: the arrays uPar and vPar must be of the same size
Output: xPhys - array containing the x-coordinates in the physical space
yPhys - array containing the y-coordinates in the physical space
'''
gpParamUV = np.array([uPar, vPar])
evalList = tuple(map(tuple, gpParamUV.transpose()))
res = np.array(self.surf.evaluate_list(evalList))
return res
def bezierExtraction(self, knot, deg):
'''
Bezier extraction
Based on Algorithm 1, from Borden - Isogeometric finite element data
structures based on Bezier extraction
'''
m = len(knot)-deg-1
a = deg + 1
b = a + 1
# Initialize C with the number of non-zero knotspans in the 3rd dimension
nb_final = len(np.unique(knot))-1
C = np.zeros((deg+1,deg+1,nb_final))
nb = 1
C[:,:,0] = np.eye(deg + 1)
while b <= m:
C[:,:,nb] = np.eye(deg + 1)
i = b
while (b <= m) and (knot[b] == knot[b-1]):
b = b+1
multiplicity = b-i+1
alphas = np.zeros(deg-multiplicity)
if (multiplicity < deg):
numerator = knot[b-1] - knot[a-1]
for j in range(deg,multiplicity,-1):
alphas[j-multiplicity-1] = numerator/(knot[a+j-1]-knot[a-1])
r = deg - multiplicity
for j in range(1,r+1):
save = r-j+1
s = multiplicity + j
for k in range(deg+1,s,-1):
alpha = alphas[k-s-1]
C[:,k-1,nb-1] = alpha*C[:,k-1,nb-1] + (1-alpha)*C[:,k-2,nb-1]
if b <= m:
C[save-1:save+j,save-1,nb] = C[deg-j:deg+1,deg,nb-1]
nb=nb+1
if b <= m:
a=b
b=b+1
elif multiplicity==deg:
if b <= m:
nb = nb + 1
a = b
b = b + 1
assert(nb==nb_final)
return C, nb
def computeC(self):
knotU = self.knotvector_u
knotV = self.knotvector_v
degU = self.degree_u
degV = self.degree_v
C_u, nb = self.bezierExtraction(knotU, degU)
C_v, nb = self.bezierExtraction(knotV, degV)
numElemU = len(np.unique(knotU)) - 1
numElemV = len(np.unique(knotV)) - 1
basisU = len(knotU) - degU - 1
nument = (degU+1)*(degV+1)
elemInfo = dict()
elemInfo['vertex'] = []
elemInfo['nodes'] = []
elemInfo['C'] = []
for j in range (0, len(knotV)-1):
for i in range (0, len(knotU)-1):
if ((knotU[i+1] > knotU[i]) and (knotV[j+1] > knotV[j])):
vertices = np.array([knotU[i], knotV[j], knotU[i+1], knotV[j+1]])
elemInfo['vertex'].append(vertices)
currow = np.array([np.zeros(nument)])
tcount = 0
for t2 in range(j+1-degV,j+2):
for t1 in range(i+1-degU,i+2):
currow[0,tcount] = t1 + (t2-1)*basisU
tcount = tcount + 1
elemInfo['nodes'].append(currow)
for j in range (0, numElemV):
for i in range (0, numElemU):
cElem = np.kron(C_v[:,:,j],C_u[:,:,i])
elemInfo['C'].append(cElem)
return elemInfo
def bernsteinBasis(self,xi, deg):
'''
Algorithm A1.3 in Piegl & Tiller
xi is a 1D array
'''
B = np.zeros((len(xi),deg+1))
B[:,0] = 1.0
u1 = 1-xi
u2 = 1+xi
for j in range(1,deg+1):
saved = 0.0
for k in range(0,j):
temp = B[:,k].copy()
B[:,k] = saved + u1*temp
saved = u2*temp
B[:,j] = saved
B = B/np.power(2,deg)
dB = np.zeros((len(xi),deg))
dB[:,0] = 1.0
for j in range(1,deg):
saved = 0.0
for k in range(0,j):
temp = dB[:,k].copy()
dB[:,k] = saved + u1*temp
saved = u2*temp
dB[:,j] = saved
dB = dB/np.power(2,deg)
dB0 = np.transpose(np.array([np.zeros(len(xi))]))
dB = np.concatenate((dB0, dB, dB0), axis=1)
dB = (dB[:,0:-1] - dB[:,1:])*deg
return B, dB
def findspan(self, uCoord, vCoord):
'''Generates the element number on which the co-ordinate is located'''
knotU = self.knotvector_u
knotV = self.knotvector_v
counter = 0
for j in range (0, len(knotV)-1):
for i in range (0, len(knotU)-1):
if ((knotU[i+1] > knotU[i]) and (knotV[j+1] > knotV[j])):
if ((uCoord > knotU[i]) and (uCoord < knotU[i+1]) and (vCoord > knotV[j]) and (vCoord < knotV[j+1])):
elmtNum = counter
break
counter = counter + 1
return elmtNum
def getDerivatives(self, uCoord, vCoord, elmtNo):
'''
Generate physical points and jacobians for parameter points inside the domain
Assume there is one element in the parameter space
Input: uCoord, vCoord: Inputs the co-odinates of the Gauss points in the parameter space.
Output: xPhys, yPhys, ptJac - Generates the co-ordinates in the physical space and the jacobian
'''
curVertex = self.vertex[elmtNo]
cElem = self.C[elmtNo]
curNodes = np.int32(self.nodes[elmtNo])-1 # Python indexing starts from 0
curPts = np.squeeze(self.ctrlpts[curNodes,0:2])
wgts = np.transpose(np.array([np.squeeze(self.weights[curNodes,0:1])]))
# Get the Gauss points on the reference interval [-1,1]
uMax = curVertex[2]
uMin = curVertex[0]
vMax = curVertex[3]
vMin = curVertex[1]
uHatCoord = (2*uCoord - (uMax+uMin))/(uMax-uMin)
vHatCoord = (2*vCoord - (vMax+vMin))/(vMax-vMin)
degU = self.degree_u
degV = self.degree_v
B_u, dB_u = self.bernsteinBasis(uHatCoord,degU)
B_v, dB_v = self.bernsteinBasis(vHatCoord,degV)
numGauss = len(uCoord)
B_u, dB_u = self.bernsteinBasis(uHatCoord,degU)
B_v, dB_v = self.bernsteinBasis(vHatCoord,degV)
# Computing the Bernstein polynomials in 2D
dBdu = np.zeros((numGauss, numGauss, (degU+1)*(degV+1)))
dBdv = np.zeros((numGauss, numGauss, (degU+1)*(degV+1)))
R = np.zeros((numGauss, numGauss, (degU+1)*(degV+1)))
counter = 0
for j in range(0,degV+1):
for i in range(0,degU+1):
R[:,:,counter] = np.outer(B_u[:,i], B_v[:,j])
dBdu[:,:,counter] = np.outer(dB_u[:,i],B_v[:,j])
dBdv[:,:,counter] = np.outer(B_u[:,i],dB_v[:,j])
counter = counter + 1
quadPts = np.zeros((3))
# Map the points to the physical space
for jPt in range(0,numGauss):
for iPt in range(0,numGauss):
dRdx = np.matmul(cElem,np.transpose(np.array([dBdu[iPt,jPt,:]])))*2/(uMax-uMin)
dRdy = np.matmul(cElem,np.transpose(np.array([dBdv[iPt,jPt,:]])))*2/(vMax-vMin)
RR = np.matmul(cElem,np.transpose(np.array([R[iPt,jPt,:]])))
RR = RR*wgts
dRdx = dRdx*wgts
dRdy = dRdy*wgts
w_sum = np.sum(RR, axis=0)
dw_xi = np.sum(dRdx, axis=0)
dw_eta = np.sum(dRdy, axis=0)
dRdx = dRdx/w_sum - RR*dw_xi/np.power(w_sum,2)
dRdy = dRdy/w_sum - RR*dw_eta/np.power(w_sum,2)
RR = RR/w_sum;
dR = np.concatenate((dRdx.T,dRdy.T),axis=0)
dxdxi = np.matmul(dR,curPts)
coord = np.matmul(np.array([R[iPt,jPt,:]]),curPts)
detJac = np.absolute(np.linalg.det(dxdxi))
quadPts[0] = coord[0,0]
quadPts[1] = coord[0,1]
quadPts[2] = detJac
xPhys = quadPts[0]
yPhys = quadPts[1]
ptJac = quadPts[2]
return xPhys, yPhys, ptJac
def genElemList(self, numElemU, numElemV):
'''
Generate the element (vertex) list for an initial (uniform)
subdivision mesh
Input: numElemU, numElemV - number of subdivisions in the u and v
directions in the parameter space
Output: vertex - arrays containing the element vertices + initial level (=0)
'''
vertex = np.zeros((numElemU*numElemV, 5))
#generate the knots on the interval [0,1]
uEdge = np.linspace(0, 1, numElemU+1)
vEdge = np.linspace(0, 1, numElemV+1)
uPar, vPar = np.meshgrid(uEdge, vEdge)
counterElem = 0
initalLevel = 0
# Generate points for each element
for iV in range(numElemV):
for iU in range(numElemU):
uMin = uPar[iV, iU]
uMax = uPar[iV, iU+1]
vMin = vPar[iV, iU]
vMax = vPar[iV+1, iU]
vertex[counterElem, 0] = uMin
vertex[counterElem, 1] = vMin
vertex[counterElem, 2] = uMax
vertex[counterElem, 3] = vMax
vertex[counterElem, 4] = initalLevel
counterElem = counterElem + 1
return vertex
def getElemIntPts(self, elemList, numGauss):
'''
Generate quadrature points inside the domain
Input: elemList - contains the vertices of the elements the refined elements
numGauss - number of Gauss quadrature points for each subdivision
Output: xPhys, yPhys, wgtPhy - arrays containing the x and y coordinates
of the points and the corresponding weights
'''
# Allocate quadPts array
quadPts = np.zeros((elemList.shape[0]*numGauss**2, 3))
# Get the Gauss points on the reference interval [-1,1]
gp, gw = | np.polynomial.legendre.leggauss(numGauss) | numpy.polynomial.legendre.leggauss |
import os, glob
import numpy as np
from tqdm import tqdm
import tensorflow as tf
from tensorflow import keras
from scipy.interpolate import interp1d
from astropy.table import Table, Column
__all__ = ['ConvNN']
class ConvNN(object):
"""
Creates and trains the convolutional
neural network.
"""
def __init__(self, output_dir, ds=None,
layers=None, optimizer='adam',
loss='binary_crossentropy',
metrics=None):
"""
Creates and trains a Tensorflow keras model
with either layers that have been passed in
by the user or with default layers used in
Feinstein et al. (2020; in prep.).
Parameters
----------
ds : stella.DataSet object
output_dir : str
Path to a given output directory for files.
training : float, optional
Assigns the percentage of training set data for training.
Default is 80%.
validation : float, optional
Assigns the percentage of training set data for validation.
Default is 10%.
layers : np.array, optional
An array of keras.layers for the ConvNN.
optimizer : str, optional
Optimizer used to compile keras model. Default is 'adam'.
loss : str, optional
Loss function used to compile keras model. Default is
'binary_crossentropy'.
metrics: np.array, optional
Metrics used to train the keras model on. If None, metrics are
[accuracy, precision, recall].
epochs : int, optional
Number of epochs to train the keras model on. Default is 15.
seed : int, optional
Sets random seed for reproducable results. Default is 2.
output_dir : path, optional
The path to save models/histories/predictions to. Default is
to create a hidden ~/.stella directory.
Attributes
----------
layers : np.array
optimizer : str
loss : str
metrics : np.array
training_matrix : stella.TrainingSet.training_matrix
labels : stella.TrainingSet.labels
image_fmt : stella.TrainingSet.cadences
"""
self.ds = ds
self.layers = layers
self.optimizer = optimizer
self.loss = loss
self.metrics = metrics
if ds is not None:
self.training_matrix = np.copy(ds.training_matrix)
self.labels = np.copy(ds.labels)
self.cadences = np.copy(ds.cadences)
self.frac_balance = ds.frac_balance + 0.0
self.tpeaks = ds.training_peaks
self.training_ids = ds.training_ids
else:
print("WARNING: No stella.DataSet object passed in.")
print("Can only use stella.ConvNN.predict().")
self.prec_recall_curve = None
self.history = None
self.history_table = None
self.output_dir = output_dir
def create_model(self, seed):
"""
Creates the Tensorflow keras model with appropriate layers.
Attributes
----------
model : tensorflow.python.keras.engine.sequential.Sequential
"""
# SETS RANDOM SEED FOR REPRODUCABLE RESULTS
| np.random.seed(seed) | numpy.random.seed |
# -*- coding: utf-8 -*-
"""Tests for `codex-africanus` package."""
import numpy as np
import pytest
def test_fit_spi_components_vs_scipy():
"""
Here we just test the per component spi fitter against
a looped version of scipy's curve_fit
:return:
"""
from africanus.model.spi import fit_spi_components
curve_fit = pytest.importorskip("scipy.optimize").curve_fit
np.random.seed(123)
ncomps = 25
alphas = -0.7 + 0.25 * np.random.randn(ncomps, 1)
i0s = 5.0 + np.random.randn(ncomps, 1)
nfreqs = 100
freqs = np.linspace(0.5, 1.5, nfreqs).reshape(1, nfreqs)
freq0 = 0.7
model = i0s * (freqs / freq0) ** alphas
sigma = np.abs(0.25 + 0.1 * np.random.randn(nfreqs))
data = model + sigma[None, :] * np.random.randn(ncomps, nfreqs)
weights = 1.0/sigma**2
alpha1, alphavar1, I01, I0var1 = fit_spi_components(
data, weights, freqs.squeeze(), freq0, tol=1e-8)
def spi_func(nu, I0, alpha):
return I0 * nu ** alpha
I02 = np.zeros(ncomps)
I0var2 = np.zeros(ncomps)
alpha2 = | np.zeros(ncomps) | numpy.zeros |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.