prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
import sys
import numpy as np
from matplotlib import pyplot
sys.path.append('..')
from submission import SubmissionBase
def displayData(X, example_width=None, figsize=(10, 10)):
"""
Displays 2D data stored in X in a nice grid.
"""
# Compute rows, cols
if X.ndim == 2:
m, n = X.shape
elif X.ndim == 1:
n = X.size
m = 1
X = X[None] # Promote to a 2 dimensional array
else:
raise IndexError('Input X should be 1 or 2 dimensional.')
example_width = example_width or int(np.round(np.sqrt(n)))
example_height = n / example_width
# Compute number of items to display
display_rows = int(np.floor(np.sqrt(m)))
display_cols = int(np.ceil(m / display_rows))
fig, ax_array = pyplot.subplots(display_rows, display_cols, figsize=figsize)
fig.subplots_adjust(wspace=0.025, hspace=0.025)
ax_array = [ax_array] if m == 1 else ax_array.ravel()
for i, ax in enumerate(ax_array):
ax.imshow(X[i].reshape(example_width, example_width, order='F'),
cmap='Greys', extent=[0, 1, 0, 1])
ax.axis('off')
def sigmoid(z):
"""
Computes the sigmoid of z.
"""
return 1.0 / (1.0 + np.exp(-z))
class Grader(SubmissionBase):
# Random Test Cases
X = np.stack([np.ones(20),
np.exp(1) * np.sin(np.arange(1, 21)),
np.exp(0.5) * np.cos(np.arange(1, 21))], axis=1)
y = (np.sin(X[:, 0] + X[:, 1]) > 0).astype(float)
Xm = np.array([[-1, -1],
[-1, -2],
[-2, -1],
[-2, -2],
[1, 1],
[1, 2],
[2, 1],
[2, 2],
[-1, 1],
[-1, 2],
[-2, 1],
[-2, 2],
[1, -1],
[1, -2],
[-2, -1],
[-2, -2]])
ym = np.array([0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3])
t1 = np.sin(np.reshape(np.arange(1, 25, 2), (4, 3), order='F'))
t2 = np.cos(np.reshape(np.arange(1, 41, 2), (4, 5), order='F'))
def __init__(self):
part_names = ['Regularized Logistic Regression',
'One-vs-All Classifier Training',
'One-vs-All Classifier Prediction',
'Neural Network Prediction Function']
super().__init__('multi-class-classification-and-neural-networks', part_names)
def __iter__(self):
for part_id in range(1, 5):
try:
func = self.functions[part_id]
# Each part has different expected arguments/different function
if part_id == 1:
res = func(np.array([0.25, 0.5, -0.5]), self.X, self.y, 0.1)
res = | np.hstack(res) | numpy.hstack |
import os
import tempfile
import numpy as np
import scipy.ndimage.measurements as meas
from functools import reduce
import warnings
import sys
sys.path.append(os.path.abspath(r'../lib'))
import NumCppPy as NumCpp # noqa E402
####################################################################################
def factors(n):
return set(reduce(list.__add__,
([i, n//i] for i in range(1, int(n**0.5) + 1) if n % i == 0)))
####################################################################################
def test_seed():
np.random.seed(1)
####################################################################################
def test_abs():
randValue = np.random.randint(-100, -1, [1, ]).astype(np.double).item()
assert NumCpp.absScaler(randValue) == np.abs(randValue)
components = np.random.randint(-100, -1, [2, ]).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.absScaler(value), 9) == np.round(np.abs(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.absArray(cArray), np.abs(data))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols]) + \
1j * np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.absArray(cArray), 9), np.round(np.abs(data), 9))
####################################################################################
def test_add():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(-100, 100, [shape.rows, shape.cols])
data2 = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.add(cArray1, cArray2), data1 + data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(cArray, value), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(value, cArray), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.add(cArray1, cArray2), data1 + data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(cArray, value), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(value, cArray), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArray(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
data2 = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.add(cArray1, cArray2), data1 + data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols])
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.add(cArray1, cArray2), data1 + data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(cArray, value), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(value, cArray), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(cArray, value), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(value, cArray), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.add(cArray1, cArray2), data1 + data2)
####################################################################################
def test_alen():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.alen(cArray) == shape.rows
####################################################################################
def test_all():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.all(cArray, NumCpp.Axis.NONE).astype(bool).item() == np.all(data).item()
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.all(cArray, NumCpp.Axis.NONE).astype(bool).item() == np.all(data).item()
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.all(cArray, NumCpp.Axis.ROW).flatten().astype(bool), np.all(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.all(cArray, NumCpp.Axis.ROW).flatten().astype(bool), np.all(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.all(cArray, NumCpp.Axis.COL).flatten().astype(bool), np.all(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.all(cArray, NumCpp.Axis.COL).flatten().astype(bool), np.all(data, axis=1))
####################################################################################
def test_allclose():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
cArray3 = NumCpp.NdArray(shape)
tolerance = 1e-5
data1 = np.random.randn(shape.rows, shape.cols)
data2 = data1 + tolerance / 10
data3 = data1 + 1
cArray1.setArray(data1)
cArray2.setArray(data2)
cArray3.setArray(data3)
assert NumCpp.allclose(cArray1, cArray2, tolerance) and not NumCpp.allclose(cArray1, cArray3, tolerance)
####################################################################################
def test_amax():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.amax(cArray, NumCpp.Axis.NONE).item() == np.max(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.amax(cArray, NumCpp.Axis.NONE).item() == np.max(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.amax(cArray, NumCpp.Axis.ROW).flatten(), np.max(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.amax(cArray, NumCpp.Axis.ROW).flatten(), np.max(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.amax(cArray, NumCpp.Axis.COL).flatten(), np.max(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.amax(cArray, NumCpp.Axis.COL).flatten(), np.max(data, axis=1))
####################################################################################
def test_amin():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.amin(cArray, NumCpp.Axis.NONE).item() == np.min(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.amin(cArray, NumCpp.Axis.NONE).item() == np.min(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.amin(cArray, NumCpp.Axis.ROW).flatten(), np.min(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.amin(cArray, NumCpp.Axis.ROW).flatten(), np.min(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.amin(cArray, NumCpp.Axis.COL).flatten(), np.min(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.amin(cArray, NumCpp.Axis.COL).flatten(), np.min(data, axis=1))
####################################################################################
def test_angle():
components = np.random.randint(-100, -1, [2, ]).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.angleScaler(value), 9) == np.round(np.angle(value), 9) # noqa
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols]) + \
1j * np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.angleArray(cArray), 9), np.round(np.angle(data), 9))
####################################################################################
def test_any():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.any(cArray, NumCpp.Axis.NONE).astype(bool).item() == np.any(data).item()
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.any(cArray, NumCpp.Axis.NONE).astype(bool).item() == np.any(data).item()
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.any(cArray, NumCpp.Axis.ROW).flatten().astype(bool), np.any(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.any(cArray, NumCpp.Axis.ROW).flatten().astype(bool), np.any(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.any(cArray, NumCpp.Axis.COL).flatten().astype(bool), np.any(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.any(cArray, NumCpp.Axis.COL).flatten().astype(bool), np.any(data, axis=1))
####################################################################################
def test_append():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(0, 100, [shape.rows, shape.cols])
data2 = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.append(cArray1, cArray2, NumCpp.Axis.NONE).getNumpyArray().flatten(),
np.append(data1, data2))
shapeInput = np.random.randint(20, 100, [2, ])
numRows = np.random.randint(1, 100, [1, ]).item()
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[0].item() + numRows, shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
data1 = np.random.randint(0, 100, [shape1.rows, shape1.cols])
data2 = np.random.randint(0, 100, [shape2.rows, shape2.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.append(cArray1, cArray2, NumCpp.Axis.ROW).getNumpyArray(),
np.append(data1, data2, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
NumCppols = np.random.randint(1, 100, [1, ]).item()
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + NumCppols)
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
data1 = np.random.randint(0, 100, [shape1.rows, shape1.cols])
data2 = np.random.randint(0, 100, [shape2.rows, shape2.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.append(cArray1, cArray2, NumCpp.Axis.COL).getNumpyArray(),
np.append(data1, data2, axis=1))
####################################################################################
def test_arange():
start = np.random.randn(1).item()
stop = np.random.randn(1).item() * 100
step = np.abs(np.random.randn(1).item())
if stop < start:
step *= -1
data = np.arange(start, stop, step)
assert np.array_equal(np.round(NumCpp.arange(start, stop, step).flatten(), 9), np.round(data, 9))
####################################################################################
def test_arccos():
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.arccosScaler(value), 9) == np.round(np.arccos(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.arccosScaler(value), 9) == np.round(np.arccos(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.arccosArray(cArray), 9), np.round(np.arccos(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.arccosArray(cArray), 9), np.round(np.arccos(data), 9))
####################################################################################
def test_arccosh():
value = np.abs(np.random.rand(1).item()) + 1
assert np.round(NumCpp.arccoshScaler(value), 9) == np.round(np.arccosh(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.arccoshScaler(value), 9) == np.round(np.arccosh(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols) + 1
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.arccoshArray(cArray), 9), np.round(np.arccosh(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.arccoshArray(cArray), 9), np.round(np.arccosh(data), 9))
####################################################################################
def test_arcsin():
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.arcsinScaler(value), 9) == np.round(np.arcsin(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.arcsinScaler(value), 9) == np.round(np.arcsin(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.arcsinArray(cArray), 9), np.round(np.arcsin(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
np.array_equal(np.round(NumCpp.arcsinArray(cArray), 9), np.round(np.arcsin(data), 9))
####################################################################################
def test_arcsinh():
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.arcsinhScaler(value), 9) == np.round(np.arcsinh(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.arcsinhScaler(value), 9) == np.round(np.arcsinh(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.arcsinhArray(cArray), 9), np.round(np.arcsinh(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
np.array_equal(np.round(NumCpp.arcsinhArray(cArray), 9), np.round(np.arcsinh(data), 9))
####################################################################################
def test_arctan():
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.arctanScaler(value), 9) == np.round(np.arctan(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.arctanScaler(value), 9) == np.round(np.arctan(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.arctanArray(cArray), 9), np.round(np.arctan(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
np.array_equal(np.round(NumCpp.arctanArray(cArray), 9), np.round(np.arctan(data), 9))
####################################################################################
def test_arctan2():
xy = np.random.rand(2) * 2 - 1
assert np.round(NumCpp.arctan2Scaler(xy[1], xy[0]), 9) == np.round(np.arctan2(xy[1], xy[0]), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArrayX = NumCpp.NdArray(shape)
cArrayY = NumCpp.NdArray(shape)
xy = np.random.rand(*shapeInput, 2) * 2 - 1
xData = xy[:, :, 0].reshape(shapeInput)
yData = xy[:, :, 1].reshape(shapeInput)
cArrayX.setArray(xData)
cArrayY.setArray(yData)
assert np.array_equal(np.round(NumCpp.arctan2Array(cArrayY, cArrayX), 9), np.round(np.arctan2(yData, xData), 9))
####################################################################################
def test_arctanh():
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.arctanhScaler(value), 9) == np.round(np.arctanh(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.arctanhScaler(value), 9) == np.round(np.arctanh(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.arctanhArray(cArray), 9), np.round(np.arctanh(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
np.array_equal(np.round(NumCpp.arctanhArray(cArray), 9), np.round(np.arctanh(data), 9))
####################################################################################
def test_argmax():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.argmax(cArray, NumCpp.Axis.NONE).item(), np.argmax(data))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.argmax(cArray, NumCpp.Axis.NONE).item(), np.argmax(data))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.argmax(cArray, NumCpp.Axis.ROW).flatten(), np.argmax(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.argmax(cArray, NumCpp.Axis.ROW).flatten(), np.argmax(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.argmax(cArray, NumCpp.Axis.COL).flatten(), np.argmax(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.argmax(cArray, NumCpp.Axis.COL).flatten(), np.argmax(data, axis=1))
####################################################################################
def test_argmin():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.argmin(cArray, NumCpp.Axis.NONE).item(), np.argmin(data))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.argmin(cArray, NumCpp.Axis.NONE).item(), np.argmin(data))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.argmin(cArray, NumCpp.Axis.ROW).flatten(), np.argmin(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.argmin(cArray, NumCpp.Axis.ROW).flatten(), np.argmin(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.argmin(cArray, NumCpp.Axis.COL).flatten(), np.argmin(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.argmin(cArray, NumCpp.Axis.COL).flatten(), np.argmin(data, axis=1))
####################################################################################
def test_argsort():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
dataFlat = data.flatten()
assert np.array_equal(dataFlat[NumCpp.argsort(cArray, NumCpp.Axis.NONE).flatten().astype(np.uint32)],
dataFlat[np.argsort(data, axis=None)])
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
dataFlat = data.flatten()
assert np.array_equal(dataFlat[NumCpp.argsort(cArray, NumCpp.Axis.NONE).flatten().astype(np.uint32)],
dataFlat[np.argsort(data, axis=None)])
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
pIdx = np.argsort(data, axis=0)
cIdx = NumCpp.argsort(cArray, NumCpp.Axis.ROW).astype(np.uint16)
allPass = True
for idx, row in enumerate(data.T):
if not np.array_equal(row[cIdx[:, idx]], row[pIdx[:, idx]]):
allPass = False
break
assert allPass
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
pIdx = np.argsort(data, axis=0)
cIdx = NumCpp.argsort(cArray, NumCpp.Axis.ROW).astype(np.uint16)
allPass = True
for idx, row in enumerate(data.T):
if not np.array_equal(row[cIdx[:, idx]], row[pIdx[:, idx]]):
allPass = False
break
assert allPass
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
pIdx = np.argsort(data, axis=1)
cIdx = NumCpp.argsort(cArray, NumCpp.Axis.COL).astype(np.uint16)
allPass = True
for idx, row in enumerate(data):
if not np.array_equal(row[cIdx[idx, :]], row[pIdx[idx, :]]): # noqa
allPass = False
break
assert allPass
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
pIdx = np.argsort(data, axis=1)
cIdx = NumCpp.argsort(cArray, NumCpp.Axis.COL).astype(np.uint16)
allPass = True
for idx, row in enumerate(data):
if not np.array_equal(row[cIdx[idx, :]], row[pIdx[idx, :]]):
allPass = False
break
assert allPass
####################################################################################
def test_argwhere():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
randValue = np.random.randint(0, 100, [1, ]).item()
data2 = data > randValue
cArray.setArray(data2)
assert np.array_equal(NumCpp.argwhere(cArray).flatten(), np.argwhere(data.flatten() > randValue).flatten())
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
randValue = np.random.randint(0, 100, [1, ]).item()
data2 = data > randValue
cArray.setArray(data2)
assert np.array_equal(NumCpp.argwhere(cArray).flatten(), np.argwhere(data.flatten() > randValue).flatten())
####################################################################################
def test_around():
value = np.abs(np.random.rand(1).item()) * np.random.randint(1, 10, [1, ]).item()
numDecimalsRound = np.random.randint(0, 10, [1, ]).astype(np.uint8).item()
assert NumCpp.aroundScaler(value, numDecimalsRound) == np.round(value, numDecimalsRound)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols) * np.random.randint(1, 10, [1, ]).item()
cArray.setArray(data)
numDecimalsRound = np.random.randint(0, 10, [1, ]).astype(np.uint8).item()
assert np.array_equal(NumCpp.aroundArray(cArray, numDecimalsRound), np.round(data, numDecimalsRound))
####################################################################################
def test_array_equal():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
cArray3 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 100, shapeInput)
data2 = np.random.randint(1, 100, shapeInput)
cArray1.setArray(data1)
cArray2.setArray(data1)
cArray3.setArray(data2)
assert NumCpp.array_equal(cArray1, cArray2) and not NumCpp.array_equal(cArray1, cArray3)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
cArray3 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data1)
cArray3.setArray(data2)
assert NumCpp.array_equal(cArray1, cArray2) and not NumCpp.array_equal(cArray1, cArray3)
####################################################################################
def test_array_equiv():
shapeInput1 = np.random.randint(1, 100, [2, ])
shapeInput3 = np.random.randint(1, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput1[0].item(), shapeInput1[1].item())
shape2 = NumCpp.Shape(shapeInput1[1].item(), shapeInput1[0].item())
shape3 = NumCpp.Shape(shapeInput3[0].item(), shapeInput3[1].item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
cArray3 = NumCpp.NdArray(shape3)
data1 = np.random.randint(1, 100, shapeInput1)
data3 = np.random.randint(1, 100, shapeInput3)
cArray1.setArray(data1)
cArray2.setArray(data1.reshape([shapeInput1[1].item(), shapeInput1[0].item()]))
cArray3.setArray(data3)
assert NumCpp.array_equiv(cArray1, cArray2) and not NumCpp.array_equiv(cArray1, cArray3)
shapeInput1 = np.random.randint(1, 100, [2, ])
shapeInput3 = np.random.randint(1, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput1[0].item(), shapeInput1[1].item())
shape2 = NumCpp.Shape(shapeInput1[1].item(), shapeInput1[0].item())
shape3 = NumCpp.Shape(shapeInput3[0].item(), shapeInput3[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape1)
cArray2 = NumCpp.NdArrayComplexDouble(shape2)
cArray3 = NumCpp.NdArrayComplexDouble(shape3)
real1 = np.random.randint(1, 100, [shape1.rows, shape1.cols])
imag1 = np.random.randint(1, 100, [shape1.rows, shape1.cols])
data1 = real1 + 1j * imag1
real3 = np.random.randint(1, 100, [shape3.rows, shape3.cols])
imag3 = np.random.randint(1, 100, [shape3.rows, shape3.cols])
data3 = real3 + 1j * imag3
cArray1.setArray(data1)
cArray2.setArray(data1.reshape([shapeInput1[1].item(), shapeInput1[0].item()]))
cArray3.setArray(data3)
assert NumCpp.array_equiv(cArray1, cArray2) and not NumCpp.array_equiv(cArray1, cArray3)
####################################################################################
def test_asarray():
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayArray1D(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayArray1D(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayArray1DCopy(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayArray1DCopy(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayArray2D(*values), data)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayArray2D(*values), data)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayArray2DCopy(*values), data)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayArray2DCopy(*values), data)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayVector1D(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayVector1D(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayVector1DCopy(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayVector1DCopy(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayVector2D(*values), data)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayVector2D(*values), data)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayVectorArray2D(*values), data)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayVectorArray2D(*values), data)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayVectorArray2DCopy(*values), data)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayVectorArray2DCopy(*values), data)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayDeque1D(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayDeque1D(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayDeque2D(*values), data)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayDeque2D(*values), data)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayList(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayList(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayIterators(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayIterators(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayPointerIterators(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayPointerIterators(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayPointer(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayPointer(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayPointer2D(*values), data)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayPointer2D(*values), data)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayPointerShell(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayPointerShell(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayPointerShell2D(*values), data)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayPointerShell2D(*values), data)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayPointerShellTakeOwnership(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayPointerShellTakeOwnership(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayPointerShell2DTakeOwnership(*values), data)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayPointerShell2DTakeOwnership(*values), data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
cArrayCast = NumCpp.astypeDoubleToUint32(cArray).getNumpyArray()
assert np.array_equal(cArrayCast, data.astype(np.uint32))
assert cArrayCast.dtype == np.uint32
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
cArrayCast = NumCpp.astypeDoubleToComplex(cArray).getNumpyArray()
assert np.array_equal(cArrayCast, data.astype(np.complex128))
assert cArrayCast.dtype == np.complex128
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
cArrayCast = NumCpp.astypeComplexToComplex(cArray).getNumpyArray()
assert np.array_equal(cArrayCast, data.astype(np.complex64))
assert cArrayCast.dtype == np.complex64
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
cArrayCast = NumCpp.astypeComplexToDouble(cArray).getNumpyArray()
warnings.filterwarnings('ignore', category=np.ComplexWarning)
assert np.array_equal(cArrayCast, data.astype(np.double))
warnings.filters.pop() # noqa
assert cArrayCast.dtype == np.double
####################################################################################
def test_average():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.round(NumCpp.average(cArray, NumCpp.Axis.NONE).item(), 9) == np.round(np.average(data), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.round(NumCpp.average(cArray, NumCpp.Axis.NONE).item(), 9) == np.round(np.average(data), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.average(cArray, NumCpp.Axis.ROW).flatten(), 9),
np.round(np.average(data, axis=0), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.average(cArray, NumCpp.Axis.ROW).flatten(), 9),
np.round(np.average(data, axis=0), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.average(cArray, NumCpp.Axis.COL).flatten(), 9),
np.round(np.average(data, axis=1), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.average(cArray, NumCpp.Axis.COL).flatten(), 9),
np.round(np.average(data, axis=1), 9))
####################################################################################
def test_averageWeighted():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
cWeights = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
weights = np.random.randint(1, 5, [shape.rows, shape.cols])
cArray.setArray(data)
cWeights.setArray(weights)
assert np.round(NumCpp.averageWeighted(cArray, cWeights, NumCpp.Axis.NONE).item(), 9) == \
np.round(np.average(data, weights=weights), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
cWeights = NumCpp.NdArray(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
weights = np.random.randint(1, 5, [shape.rows, shape.cols])
cArray.setArray(data)
cWeights.setArray(weights)
assert np.round(NumCpp.averageWeighted(cArray, cWeights, NumCpp.Axis.NONE).item(), 9) == \
np.round(np.average(data, weights=weights), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
cWeights = NumCpp.NdArray(1, shape.cols)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
weights = np.random.randint(1, 5, [1, shape.rows])
cArray.setArray(data)
cWeights.setArray(weights)
assert np.array_equal(np.round(NumCpp.averageWeighted(cArray, cWeights, NumCpp.Axis.ROW).flatten(), 9),
np.round(np.average(data, weights=weights.flatten(), axis=0), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
cWeights = NumCpp.NdArray(1, shape.cols)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
weights = np.random.randint(1, 5, [1, shape.rows])
cArray.setArray(data)
cWeights.setArray(weights)
assert np.array_equal(np.round(NumCpp.averageWeighted(cArray, cWeights, NumCpp.Axis.ROW).flatten(), 9),
np.round(np.average(data, weights=weights.flatten(), axis=0), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
cWeights = NumCpp.NdArray(1, shape.rows)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
weights = np.random.randint(1, 5, [1, shape.cols])
cWeights.setArray(weights)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.averageWeighted(cArray, cWeights, NumCpp.Axis.COL).flatten(), 9),
np.round(np.average(data, weights=weights.flatten(), axis=1), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
cWeights = NumCpp.NdArray(1, shape.rows)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
weights = np.random.randint(1, 5, [1, shape.cols])
cWeights.setArray(weights)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.averageWeighted(cArray, cWeights, NumCpp.Axis.COL).flatten(), 9),
np.round(np.average(data, weights=weights.flatten(), axis=1), 9))
####################################################################################
def test_binaryRepr():
value = np.random.randint(0, np.iinfo(np.uint64).max, [1, ], dtype=np.uint64).item()
assert NumCpp.binaryRepr(np.uint64(value)) == np.binary_repr(value, np.iinfo(np.uint64).bits)
####################################################################################
def test_bincount():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayUInt32(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint16)
cArray.setArray(data)
assert np.array_equal(NumCpp.bincount(cArray, 0).flatten(), np.bincount(data.flatten(), minlength=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayUInt32(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint16)
cArray.setArray(data)
minLength = int(np.max(data) + 10)
assert np.array_equal(NumCpp.bincount(cArray, minLength).flatten(),
np.bincount(data.flatten(), minlength=minLength))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayUInt32(shape)
cWeights = NumCpp.NdArrayUInt32(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint16)
weights = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint16)
cArray.setArray(data)
cWeights.setArray(weights)
assert np.array_equal(NumCpp.bincountWeighted(cArray, cWeights, 0).flatten(),
np.bincount(data.flatten(), minlength=0, weights=weights.flatten()))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayUInt32(shape)
cWeights = NumCpp.NdArrayUInt32(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint16)
weights = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint16)
cArray.setArray(data)
cWeights.setArray(weights)
minLength = int(np.max(data) + 10)
assert np.array_equal(NumCpp.bincountWeighted(cArray, cWeights, minLength).flatten(),
np.bincount(data.flatten(), minlength=minLength, weights=weights.flatten()))
####################################################################################
def test_bitwise_and():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayUInt64(shape)
cArray2 = NumCpp.NdArrayUInt64(shape)
data1 = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint64)
data2 = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint64)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.bitwise_and(cArray1, cArray2), np.bitwise_and(data1, data2))
####################################################################################
def test_bitwise_not():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayUInt64(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint64)
cArray.setArray(data)
assert np.array_equal(NumCpp.bitwise_not(cArray), np.bitwise_not(data))
####################################################################################
def test_bitwise_or():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayUInt64(shape)
cArray2 = NumCpp.NdArrayUInt64(shape)
data1 = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint64)
data2 = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint64)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.bitwise_or(cArray1, cArray2), np.bitwise_or(data1, data2))
####################################################################################
def test_bitwise_xor():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayUInt64(shape)
cArray2 = NumCpp.NdArrayUInt64(shape)
data1 = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint64)
data2 = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint64)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.bitwise_xor(cArray1, cArray2), np.bitwise_xor(data1, data2))
####################################################################################
def test_byteswap():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayUInt64(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint64)
cArray.setArray(data)
assert np.array_equal(NumCpp.byteswap(cArray).shape, shapeInput)
####################################################################################
def test_cbrt():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.cbrtArray(cArray), 9), np.round(np.cbrt(data), 9))
####################################################################################
def test_ceil():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols).astype(np.double) * 1000
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.ceilArray(cArray), 9), np.round(np.ceil(data), 9))
####################################################################################
def test_center_of_mass():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols).astype(np.double) * 1000
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.centerOfMass(cArray, NumCpp.Axis.NONE).flatten(), 9),
np.round(meas.center_of_mass(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols).astype(np.double) * 1000
cArray.setArray(data)
coms = list()
for col in range(data.shape[1]):
coms.append(np.round(meas.center_of_mass(data[:, col])[0], 9))
assert np.array_equal(np.round(NumCpp.centerOfMass(cArray, NumCpp.Axis.ROW).flatten(), 9), np.round(coms, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols).astype(np.double) * 1000
cArray.setArray(data)
coms = list()
for row in range(data.shape[0]):
coms.append(np.round(meas.center_of_mass(data[row, :])[0], 9))
assert np.array_equal(np.round(NumCpp.centerOfMass(cArray, NumCpp.Axis.COL).flatten(), 9), np.round(coms, 9))
####################################################################################
def test_clip():
value = np.random.randint(0, 100, [1, ]).item()
minValue = np.random.randint(0, 10, [1, ]).item()
maxValue = np.random.randint(90, 100, [1, ]).item()
assert NumCpp.clipScaler(value, minValue, maxValue) == np.clip(value, minValue, maxValue)
value = np.random.randint(0, 100, [1, ]).item() + 1j * np.random.randint(0, 100, [1, ]).item()
minValue = np.random.randint(0, 10, [1, ]).item() + 1j * np.random.randint(0, 10, [1, ]).item()
maxValue = np.random.randint(90, 100, [1, ]).item() + 1j * np.random.randint(0, 100, [1, ]).item()
assert NumCpp.clipScaler(value, minValue, maxValue) == np.clip(value, minValue, maxValue) # noqa
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
minValue = np.random.randint(0, 10, [1, ]).item()
maxValue = np.random.randint(90, 100, [1, ]).item()
assert np.array_equal(NumCpp.clipArray(cArray, minValue, maxValue), np.clip(data, minValue, maxValue))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
minValue = np.random.randint(0, 10, [1, ]).item() + 1j * np.random.randint(0, 10, [1, ]).item()
maxValue = np.random.randint(90, 100, [1, ]).item() + 1j * np.random.randint(0, 100, [1, ]).item()
assert np.array_equal(NumCpp.clipArray(cArray, minValue, maxValue), np.clip(data, minValue, maxValue)) # noqa
####################################################################################
def test_column_stack():
shapeInput = np.random.randint(20, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
shape3 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
shape4 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
cArray3 = NumCpp.NdArray(shape3)
cArray4 = NumCpp.NdArray(shape4)
data1 = np.random.randint(1, 100, [shape1.rows, shape1.cols])
data2 = np.random.randint(1, 100, [shape2.rows, shape2.cols])
data3 = np.random.randint(1, 100, [shape3.rows, shape3.cols])
data4 = np.random.randint(1, 100, [shape4.rows, shape4.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
cArray3.setArray(data3)
cArray4.setArray(data4)
assert np.array_equal(NumCpp.column_stack(cArray1, cArray2, cArray3, cArray4),
np.column_stack([data1, data2, data3, data4]))
####################################################################################
def test_complex():
real = np.random.rand(1).astype(np.double).item()
value = complex(real)
assert np.round(NumCpp.complexScaler(real), 9) == np.round(value, 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.complexScaler(components[0], components[1]), 9) == np.round(value, 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
realArray = NumCpp.NdArray(shape)
real = np.random.rand(shape.rows, shape.cols)
realArray.setArray(real)
assert np.array_equal(np.round(NumCpp.complexArray(realArray), 9), np.round(real + 1j * np.zeros_like(real), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
realArray = NumCpp.NdArray(shape)
imagArray = NumCpp.NdArray(shape)
real = np.random.rand(shape.rows, shape.cols)
imag = np.random.rand(shape.rows, shape.cols)
realArray.setArray(real)
imagArray.setArray(imag)
assert np.array_equal(np.round(NumCpp.complexArray(realArray, imagArray), 9), np.round(real + 1j * imag, 9))
####################################################################################
def test_concatenate():
shapeInput = np.random.randint(20, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
shape3 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
shape4 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
cArray3 = NumCpp.NdArray(shape3)
cArray4 = NumCpp.NdArray(shape4)
data1 = np.random.randint(1, 100, [shape1.rows, shape1.cols])
data2 = np.random.randint(1, 100, [shape2.rows, shape2.cols])
data3 = np.random.randint(1, 100, [shape3.rows, shape3.cols])
data4 = np.random.randint(1, 100, [shape4.rows, shape4.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
cArray3.setArray(data3)
cArray4.setArray(data4)
assert np.array_equal(NumCpp.concatenate(cArray1, cArray2, cArray3, cArray4, NumCpp.Axis.NONE).flatten(),
np.concatenate([data1.flatten(), data2.flatten(), data3.flatten(), data4.flatten()]))
shapeInput = np.random.randint(20, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[0].item() + np.random.randint(1, 10, [1, ]).item(), shapeInput[1].item())
shape3 = NumCpp.Shape(shapeInput[0].item() + np.random.randint(1, 10, [1, ]).item(), shapeInput[1].item())
shape4 = NumCpp.Shape(shapeInput[0].item() + np.random.randint(1, 10, [1, ]).item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
cArray3 = NumCpp.NdArray(shape3)
cArray4 = NumCpp.NdArray(shape4)
data1 = np.random.randint(1, 100, [shape1.rows, shape1.cols])
data2 = np.random.randint(1, 100, [shape2.rows, shape2.cols])
data3 = np.random.randint(1, 100, [shape3.rows, shape3.cols])
data4 = np.random.randint(1, 100, [shape4.rows, shape4.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
cArray3.setArray(data3)
cArray4.setArray(data4)
assert np.array_equal(NumCpp.concatenate(cArray1, cArray2, cArray3, cArray4, NumCpp.Axis.ROW),
np.concatenate([data1, data2, data3, data4], axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
shape3 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
shape4 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
cArray3 = NumCpp.NdArray(shape3)
cArray4 = NumCpp.NdArray(shape4)
data1 = np.random.randint(1, 100, [shape1.rows, shape1.cols])
data2 = np.random.randint(1, 100, [shape2.rows, shape2.cols])
data3 = np.random.randint(1, 100, [shape3.rows, shape3.cols])
data4 = np.random.randint(1, 100, [shape4.rows, shape4.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
cArray3.setArray(data3)
cArray4.setArray(data4)
assert np.array_equal(NumCpp.concatenate(cArray1, cArray2, cArray3, cArray4, NumCpp.Axis.COL),
np.concatenate([data1, data2, data3, data4], axis=1))
####################################################################################
def test_conj():
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.conjScaler(value), 9) == np.round(np.conj(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.conjArray(cArray), 9), np.round(np.conj(data), 9))
####################################################################################
def test_contains():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
value = np.random.randint(0, 100, [1, ]).item()
cArray.setArray(data)
assert NumCpp.contains(cArray, value, NumCpp.Axis.NONE).getNumpyArray().item() == (value in data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
value = np.random.randint(0, 100, [1, ]).item() + 1j * np.random.randint(0, 100, [1, ]).item()
cArray.setArray(data)
assert NumCpp.contains(cArray, value, NumCpp.Axis.NONE).getNumpyArray().item() == (value in data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
value = np.random.randint(0, 100, [1, ]).item()
cArray.setArray(data)
truth = list()
for row in data:
truth.append(value in row)
assert np.array_equal(NumCpp.contains(cArray, value, NumCpp.Axis.COL).getNumpyArray().flatten(), np.asarray(truth))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
value = np.random.randint(0, 100, [1, ]).item() + 1j * np.random.randint(0, 100, [1, ]).item()
cArray.setArray(data)
truth = list()
for row in data:
truth.append(value in row)
assert np.array_equal(NumCpp.contains(cArray, value, NumCpp.Axis.COL).getNumpyArray().flatten(), np.asarray(truth))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
value = np.random.randint(0, 100, [1, ]).item()
cArray.setArray(data)
truth = list()
for row in data.T:
truth.append(value in row)
assert np.array_equal(NumCpp.contains(cArray, value, NumCpp.Axis.ROW).getNumpyArray().flatten(), np.asarray(truth))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
value = np.random.randint(0, 100, [1, ]).item() + 1j * np.random.randint(0, 100, [1, ]).item()
cArray.setArray(data)
truth = list()
for row in data.T:
truth.append(value in row)
assert np.array_equal(NumCpp.contains(cArray, value, NumCpp.Axis.ROW).getNumpyArray().flatten(), np.asarray(truth))
####################################################################################
def test_copy():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.copy(cArray), data)
####################################################################################
def test_copysign():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(-100, 100, [shape.rows, shape.cols])
data2 = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.copysign(cArray1, cArray2), np.copysign(data1, data2))
####################################################################################
def test_copyto():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray()
data1 = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
assert np.array_equal(NumCpp.copyto(cArray2, cArray1), data1)
####################################################################################
def test_cos():
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.cosScaler(value), 9) == np.round(np.cos(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.cosScaler(value), 9) == np.round(np.cos(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.cosArray(cArray), 9), np.round(np.cos(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.cosArray(cArray), 9), np.round(np.cos(data), 9))
####################################################################################
def test_cosh():
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.coshScaler(value), 9) == np.round(np.cosh(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.coshScaler(value), 9) == np.round(np.cosh(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.coshArray(cArray), 9), np.round(np.cosh(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.coshArray(cArray), 9), np.round(np.cosh(data), 9))
####################################################################################
def test_count_nonzero():
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 3, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
assert NumCpp.count_nonzero(cArray, NumCpp.Axis.NONE) == np.count_nonzero(data)
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 3, [shape.rows, shape.cols])
imag = np.random.randint(1, 3, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.count_nonzero(cArray, NumCpp.Axis.NONE) == np.count_nonzero(data)
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 3, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
assert np.array_equal(NumCpp.count_nonzero(cArray, NumCpp.Axis.ROW).flatten(), np.count_nonzero(data, axis=0))
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 3, [shape.rows, shape.cols])
imag = np.random.randint(1, 3, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.count_nonzero(cArray, NumCpp.Axis.ROW).flatten(), np.count_nonzero(data, axis=0))
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 3, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
assert np.array_equal(NumCpp.count_nonzero(cArray, NumCpp.Axis.COL).flatten(), np.count_nonzero(data, axis=1))
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 3, [shape.rows, shape.cols])
imag = np.random.randint(1, 3, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.count_nonzero(cArray, NumCpp.Axis.COL).flatten(), np.count_nonzero(data, axis=1))
####################################################################################
def test_cross():
shape = NumCpp.Shape(1, 2)
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
data2 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert NumCpp.cross(cArray1, cArray2, NumCpp.Axis.NONE).item() == np.cross(data1, data2).item()
shape = NumCpp.Shape(1, 2)
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert NumCpp.cross(cArray1, cArray2, NumCpp.Axis.NONE).item() == np.cross(data1, data2).item()
shape = NumCpp.Shape(2, np.random.randint(1, 100, [1, ]).item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
data2 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.cross(cArray1, cArray2, NumCpp.Axis.ROW).getNumpyArray().flatten(),
np.cross(data1, data2, axis=0))
shape = NumCpp.Shape(2, np.random.randint(1, 100, [1, ]).item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.cross(cArray1, cArray2, NumCpp.Axis.ROW).getNumpyArray().flatten(),
np.cross(data1, data2, axis=0))
shape = NumCpp.Shape(np.random.randint(1, 100, [1, ]).item(), 2)
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
data2 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.cross(cArray1, cArray2, NumCpp.Axis.COL).getNumpyArray().flatten(),
np.cross(data1, data2, axis=1))
shape = NumCpp.Shape(np.random.randint(1, 100, [1, ]).item(), 2)
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.cross(cArray1, cArray2, NumCpp.Axis.COL).getNumpyArray().flatten(),
np.cross(data1, data2, axis=1))
shape = NumCpp.Shape(1, 3)
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
data2 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.cross(cArray1, cArray2, NumCpp.Axis.NONE).getNumpyArray().flatten(),
np.cross(data1, data2).flatten())
shape = NumCpp.Shape(1, 3)
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.cross(cArray1, cArray2, NumCpp.Axis.NONE).getNumpyArray().flatten(),
np.cross(data1, data2).flatten())
shape = NumCpp.Shape(3, np.random.randint(1, 100, [1, ]).item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
data2 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.cross(cArray1, cArray2, NumCpp.Axis.ROW).getNumpyArray(),
np.cross(data1, data2, axis=0))
shape = NumCpp.Shape(3, np.random.randint(1, 100, [1, ]).item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.cross(cArray1, cArray2, NumCpp.Axis.ROW).getNumpyArray(),
np.cross(data1, data2, axis=0))
shape = NumCpp.Shape(np.random.randint(1, 100, [1, ]).item(), 3)
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
data2 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.cross(cArray1, cArray2, NumCpp.Axis.COL).getNumpyArray(),
np.cross(data1, data2, axis=1))
shape = NumCpp.Shape(np.random.randint(1, 100, [1, ]).item(), 3)
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.cross(cArray1, cArray2, NumCpp.Axis.COL).getNumpyArray(),
np.cross(data1, data2, axis=1))
####################################################################################
def test_cube():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.cube(cArray), 9), np.round(data * data * data, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.cube(cArray), 9), np.round(data * data * data, 9))
####################################################################################
def test_cumprod():
shapeInput = np.random.randint(1, 5, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 4, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
assert np.array_equal(NumCpp.cumprod(cArray, NumCpp.Axis.NONE).flatten(), data.cumprod())
shapeInput = np.random.randint(1, 5, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 4, [shape.rows, shape.cols])
imag = np.random.randint(1, 4, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.cumprod(cArray, NumCpp.Axis.NONE).flatten(), data.cumprod())
shapeInput = np.random.randint(1, 5, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 4, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.cumprod(cArray, NumCpp.Axis.ROW), data.cumprod(axis=0))
shapeInput = np.random.randint(1, 5, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 4, [shape.rows, shape.cols])
imag = np.random.randint(1, 4, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.cumprod(cArray, NumCpp.Axis.ROW), data.cumprod(axis=0))
shapeInput = np.random.randint(1, 5, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 4, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.cumprod(cArray, NumCpp.Axis.COL), data.cumprod(axis=1))
shapeInput = np.random.randint(1, 5, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 4, [shape.rows, shape.cols])
imag = np.random.randint(1, 4, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.cumprod(cArray, NumCpp.Axis.COL), data.cumprod(axis=1))
####################################################################################
def test_cumsum():
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.cumsum(cArray, NumCpp.Axis.NONE).flatten(), data.cumsum())
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 50, [shape.rows, shape.cols])
imag = np.random.randint(1, 50, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.cumsum(cArray, NumCpp.Axis.NONE).flatten(), data.cumsum())
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.cumsum(cArray, NumCpp.Axis.ROW), data.cumsum(axis=0))
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 50, [shape.rows, shape.cols])
imag = np.random.randint(1, 50, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.cumsum(cArray, NumCpp.Axis.ROW), data.cumsum(axis=0))
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.cumsum(cArray, NumCpp.Axis.COL), data.cumsum(axis=1))
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 50, [shape.rows, shape.cols])
imag = np.random.randint(1, 50, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.cumsum(cArray, NumCpp.Axis.COL), data.cumsum(axis=1))
####################################################################################
def test_deg2rad():
value = np.abs(np.random.rand(1).item()) * 360
assert np.round(NumCpp.deg2radScaler(value), 9) == np.round(np.deg2rad(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols) * 360
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.deg2radArray(cArray), 9), np.round(np.deg2rad(data), 9))
####################################################################################
def test_degrees():
value = np.abs(np.random.rand(1).item()) * 2 * np.pi
assert np.round(NumCpp.degreesScaler(value), 9) == np.round(np.degrees(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols) * 2 * np.pi
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.degreesArray(cArray), 9), np.round(np.degrees(data), 9))
####################################################################################
def test_deleteIndices():
shapeInput = np.asarray([100, 100])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
indices = NumCpp.Slice(0, 100, 4)
indicesPy = slice(0, 99, 4)
cArray.setArray(data)
assert np.array_equal(NumCpp.deleteIndicesSlice(cArray, indices, NumCpp.Axis.NONE).flatten(),
np.delete(data, indicesPy, axis=None))
shapeInput = np.asarray([100, 100])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
indices = NumCpp.Slice(0, 100, 4)
indicesPy = slice(0, 99, 4)
cArray.setArray(data)
assert np.array_equal(NumCpp.deleteIndicesSlice(cArray, indices, NumCpp.Axis.ROW),
np.delete(data, indicesPy, axis=0))
shapeInput = np.asarray([100, 100])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
indices = NumCpp.Slice(0, 100, 4)
indicesPy = slice(0, 99, 4)
cArray.setArray(data)
assert np.array_equal(NumCpp.deleteIndicesSlice(cArray, indices, NumCpp.Axis.COL),
np.delete(data, indicesPy, axis=1))
shapeInput = np.asarray([100, 100])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
index = np.random.randint(0, shape.size(), [1, ]).item()
cArray.setArray(data)
assert np.array_equal(NumCpp.deleteIndicesScaler(cArray, index, NumCpp.Axis.NONE).flatten(),
np.delete(data, index, axis=None))
shapeInput = np.asarray([100, 100])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
index = np.random.randint(0, 100, [1, ]).item()
cArray.setArray(data)
assert np.array_equal(NumCpp.deleteIndicesScaler(cArray, index, NumCpp.Axis.ROW), np.delete(data, index, axis=0))
shapeInput = np.asarray([100, 100])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
index = np.random.randint(0, 100, [1, ]).item()
cArray.setArray(data)
assert np.array_equal(NumCpp.deleteIndicesScaler(cArray, index, NumCpp.Axis.COL), np.delete(data, index, axis=1))
####################################################################################
def test_diag():
shapeInput = np.random.randint(2, 25, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
k = np.random.randint(0, np.min(shapeInput), [1, ]).item()
elements = np.random.randint(1, 100, shapeInput)
cElements = NumCpp.NdArray(shape)
cElements.setArray(elements)
assert np.array_equal(NumCpp.diag(cElements, k).flatten(), np.diag(elements, k))
shapeInput = np.random.randint(2, 25, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
k = np.random.randint(0, np.min(shapeInput), [1, ]).item()
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
elements = real + 1j * imag
cElements = NumCpp.NdArrayComplexDouble(shape)
cElements.setArray(elements)
assert np.array_equal(NumCpp.diag(cElements, k).flatten(), np.diag(elements, k))
####################################################################################
def test_diagflat():
numElements = np.random.randint(2, 25, [1, ]).item()
shape = NumCpp.Shape(1, numElements)
k = np.random.randint(0, 10, [1, ]).item()
elements = np.random.randint(1, 100, [numElements, ])
cElements = NumCpp.NdArray(shape)
cElements.setArray(elements)
assert np.array_equal(NumCpp.diagflat(cElements, k), np.diagflat(elements, k))
numElements = np.random.randint(2, 25, [1, ]).item()
shape = NumCpp.Shape(1, numElements)
k = np.random.randint(0, 10, [1, ]).item()
real = np.random.randint(1, 100, [numElements, ])
imag = np.random.randint(1, 100, [numElements, ])
elements = real + 1j * imag
cElements = NumCpp.NdArrayComplexDouble(shape)
cElements.setArray(elements)
assert np.array_equal(NumCpp.diagflat(cElements, k), np.diagflat(elements, k))
numElements = np.random.randint(1, 25, [1, ]).item()
shape = NumCpp.Shape(1, numElements)
k = np.random.randint(0, 10, [1, ]).item()
elements = np.random.randint(1, 100, [numElements, ])
cElements = NumCpp.NdArray(shape)
cElements.setArray(elements)
assert np.array_equal(NumCpp.diagflat(cElements, k), np.diagflat(elements, k))
numElements = np.random.randint(1, 25, [1, ]).item()
shape = NumCpp.Shape(1, numElements)
k = np.random.randint(0, 10, [1, ]).item()
real = np.random.randint(1, 100, [numElements, ])
imag = np.random.randint(1, 100, [numElements, ])
elements = real + 1j * imag
cElements = NumCpp.NdArrayComplexDouble(shape)
cElements.setArray(elements)
assert np.array_equal(NumCpp.diagflat(cElements, k), np.diagflat(elements, k))
####################################################################################
def test_diagonal():
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols])
cArray.setArray(data)
offset = np.random.randint(0, min(shape.rows, shape.cols), [1, ]).item()
assert np.array_equal(NumCpp.diagonal(cArray, offset, NumCpp.Axis.ROW).flatten(),
np.diagonal(data, offset, axis1=0, axis2=1))
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 50, [shape.rows, shape.cols])
imag = np.random.randint(1, 50, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
offset = np.random.randint(0, min(shape.rows, shape.cols), [1, ]).item()
assert np.array_equal(NumCpp.diagonal(cArray, offset, NumCpp.Axis.ROW).flatten(),
np.diagonal(data, offset, axis1=0, axis2=1))
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols])
cArray.setArray(data)
offset = np.random.randint(0, min(shape.rows, shape.cols), [1, ]).item()
assert np.array_equal(NumCpp.diagonal(cArray, offset, NumCpp.Axis.COL).flatten(),
np.diagonal(data, offset, axis1=1, axis2=0))
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 50, [shape.rows, shape.cols])
imag = np.random.randint(1, 50, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
offset = np.random.randint(0, min(shape.rows, shape.cols), [1, ]).item()
assert np.array_equal(NumCpp.diagonal(cArray, offset, NumCpp.Axis.COL).flatten(),
np.diagonal(data, offset, axis1=1, axis2=0))
####################################################################################
def test_diff():
shapeInput = np.random.randint(10, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.diff(cArray, NumCpp.Axis.NONE).flatten(),
np.diff(data.flatten()))
shapeInput = np.random.randint(10, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 50, [shape.rows, shape.cols])
imag = np.random.randint(1, 50, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.diff(cArray, NumCpp.Axis.NONE).flatten(),
np.diff(data.flatten()))
shapeInput = np.random.randint(10, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.diff(cArray, NumCpp.Axis.ROW), np.diff(data, axis=0))
shapeInput = np.random.randint(10, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 50, [shape.rows, shape.cols])
imag = np.random.randint(1, 50, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.diff(cArray, NumCpp.Axis.ROW), np.diff(data, axis=0))
shapeInput = np.random.randint(10, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols]).astype(np.uint32)
cArray.setArray(data)
assert np.array_equal(NumCpp.diff(cArray, NumCpp.Axis.COL).astype(np.uint32), np.diff(data, axis=1))
shapeInput = np.random.randint(10, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 50, [shape.rows, shape.cols])
imag = np.random.randint(1, 50, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.diff(cArray, NumCpp.Axis.COL), np.diff(data, axis=1))
####################################################################################
def test_divide():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(-100, 100, [shape.rows, shape.cols])
data2 = np.random.randint(-100, 100, [shape.rows, shape.cols])
data2[data2 == 0] = 1
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(np.round(NumCpp.divide(cArray1, cArray2), 9),
np.round(data1 / data2, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = 0
while value == 0:
value = np.random.randint(-100, 100)
assert np.array_equal(np.round(NumCpp.divide(cArray, value), 9),
np.round(data / value, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
data[data == 0] = 1
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(np.round(NumCpp.divide(value, cArray), 9),
np.round(value / data, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
data2[data2 == complex(0)] = complex(1)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(np.round(NumCpp.divide(cArray1, cArray2), 9),
np.round(data1 / data2, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = 0
while value == complex(0):
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(np.round(NumCpp.divide(cArray, value), 9),
np.round(data / value, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
data[data == complex(0)] = complex(1)
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(np.round(NumCpp.divide(value, cArray), 9),
np.round(value / data, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArray(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
data2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2[data2 == 0] = 1
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(np.round(NumCpp.divide(cArray1, cArray2), 9),
np.round(data1 / data2, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols])
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
data2[data2 == complex(0)] = complex(1)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(np.round(NumCpp.divide(cArray1, cArray2), 9),
np.round(data1 / data2, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
while value == complex(0):
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(np.round(NumCpp.divide(cArray, value), 9),
np.round(data / value, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
data[data == 0] = 1
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(np.round(NumCpp.divide(value, cArray), 9),
np.round(value / data, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = 0
while value == 0:
value = np.random.randint(-100, 100)
assert np.array_equal(np.round(NumCpp.divide(cArray, value), 9),
np.round(data / value, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
data[data == complex(0)] = complex(1)
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(np.round(NumCpp.divide(value, cArray), 9),
| np.round(value / data, 9) | numpy.round |
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 9 14:43:48 2020
@author: arnou
"""
import os
import errno
import numpy as np
import librosa
import pandas as pd
import more_itertools
from sklearn.preprocessing import StandardScaler
def segment_audio(signal, fs, win_size, win_step):
print('segment')
win_data = list(more_itertools.windowed(signal, n=win_size, step=win_step))
return(win_data)
def obtain_win_label_single(seg_label):
seg_win_label= np.zeros((len(seg_label), 1))
for iSeg in range(len(seg_label)):
win_label_value = np.asarray(seg_label[iSeg])
win_label_value[win_label_value == None] = 0
print(win_label_value)
if np.sum(win_label_value) / len(win_label_value) >= 0.5:
seg_win_label[iSeg] = 1
return(seg_win_label)
def obtain_win_label(seg_label):
print('windowed label array to lable value')
seg_win_label_exist = np.zeros((len(seg_label), 1))
seg_win_label_strong = np.zeros((len(seg_label), 1))
seg_win_label_mid = np.zeros((len(seg_label), 1))
seg_win_label_weak = np.zeros((len(seg_label), 1))
for iSeg in range(len(seg_label)):
win_label_value = np.asarray(seg_label[iSeg])
win_label_value[win_label_value == None] = 0
#print(win_label_value)
if np.sum(win_label_value) > 0:
seg_win_label_exist[iSeg] = 1
if np.sum(win_label_value) / len(win_label_value) == 1:
seg_win_label_strong[iSeg] = 1
if np.sum(win_label_value) / len(win_label_value) >= 0.75:
seg_win_label_mid[iSeg] = 1
if | np.sum(win_label_value) | numpy.sum |
from flask import Flask, render_template, request, redirect, url_for, session, Response
import re
import mysql.connector
from numpy.linalg import norm
import cv2
import sys
import os
import math
import numpy as np
import json
from threading import Thread
import VideoEnhancement
import fishpredictor
import detector
import kmeancluster
import preproccesing
import randomforst
class fishs:
def __init__(self):
self.mylist = []
def addfish(self,data):
x=[data]
self.mylist.append(x)
def addframe(self,id,data):
# print(len(self.mylist))
if len(self.mylist)>(id-1):
self.mylist[id-1].append(data)
else:
self.addfish(data)
app = Flask(__name__)
app.secret_key = "abc"
# # Enter your database connection details below
# mydb = mysql.connector.connect(
# host = "localhost",
# user = "root",
# password = "",
# database = "samak"
# )
# mycursor = mydb.cursor()
waterIsToxic = "Clear"
isFinished = False
currentBehavior = "Normal"
cap = cv2.VideoCapture('chaos1.avi')
def yolo():
cluster = kmeancluster.kmeans()
classifier = randomforst.randomforst()
fishs = []
framenum = 0
sum = 0
max = 0
mylist = [[]]
yolo = detector.detector()
ret, frame = cap.read()
fheight, fwidth, channels = frame.shape
resize = False
if (fheight > 352 or fwidth > 640):
resize = True
fwidth = 640
fheight = 352
frame = cv2.resize(frame, (640, 352))
mask = np.zeros_like(frame)
# Needed for saving video
fps = cap.get(cv2.CAP_PROP_FPS)
fourcc = cv2.VideoWriter_fourcc(*'DIVX')
dt_string = datetime.now().strftime("%H_%M_%S_%d_%m_%y")
num_seconds = 10
video = cv2.VideoWriter('videonormal/' +str(num_seconds*round(fps))+'_'+str(dt_string)+'.avi', fourcc, fps, (fwidth, fheight))
# Read until video is completed
buffer = [[]]
apperance = [[]]
last_changed = []
top = 0
frms = 0
# Needed to track objects
n_frame = 8
ref_n_frame_axies = []
ref_n_frame_label = []
ref_n_frame_axies_flatten = []
ref_n_frame_label_flatten = []
frm_num = 1
coloredLine = np.random.randint(0, 255, (10000, 3))
label_cnt = 1
min_distance = 50
while (cap.isOpened()):
ret, img = cap.read()
if ret == True:
if frms % 2 == 0:
img = VideoEnhancement.enhanceVideo(img, resize)
cur_frame_axies = []
cur_frame_label = []
boxes, confidences, centers, colors = yolo.detect(img)
indexes = cv2.dnn.NMSBoxes(boxes, confidences, 0.1, 0.4)
font = cv2.FONT_HERSHEY_PLAIN
for i in range(len(boxes)):
if i in indexes:
lbl = float('nan')
x, y, w, h, = boxes[i]
center_x, center_y = centers[i]
color = colors[0]
if (len(ref_n_frame_label_flatten) > 0):
b = np.array([(center_x, center_y)])
a = np.array(ref_n_frame_axies_flatten)
distance = norm(a - b, axis=1)
min_value = distance.min()
if (min_value < min_distance):
idx = | np.where(distance == min_value) | numpy.where |
#!/usr/bin/env python3
import os, sys
sys.path.append(os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'data'))
import data_tools
import pandas
#import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import patches
from sklearn.linear_model import LinearRegression, Lasso, Ridge
data_dir = '../../HuGaDB/HuGaDB/Data.parsed/'
def plot_zplane(zeros, k, ax):
if len(zeros) == 0:
return
uc = patches.Circle((0,0), radius=1, fill=False, color='black', ls='dashed')
ax.add_patch(uc)
p = plt.plot(zeros.real, zeros.imag, 'bo', ms=10)
plt.setp( p, markersize=12.0, markeredgewidth=3.0,
markeredgecolor='b', markerfacecolor='b')
ax.spines['left'].set_position('center')
ax.spines['bottom'].set_position('center')
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
r = max(1.5, 1.3*max(abs(zeros))); plt.axis('scaled'); plt.axis([-r, r, -r, r])
ticks = [-1, -.5, .5, 1]; plt.xticks(ticks); plt.yticks(ticks)
ax.title.set_text("z={}, K={}".format(sum(zeros != 0.0), k))
def compute_zeros(b_vec):
"""Compute the zeros of the FIR filter given the coefficents b_vec
y[n] = \summation b_k*x[n-k]
"""
if (b_vec==0.0).all():
return np.array([]), 0
zeros = np.roots(b_vec)
poly = np.poly(zeros)
print("computed zeros:")
print(zeros)
return zeros, b_vec[0]/poly[0]
HISTORY = 3
def build_features(file_name, axis, is_cross=False):
#file_name='HuGaDB_v1_walking_14_02.txt'
#ACCEL_FIX = 2.0/32765
#GYRO_FIX = 2000/32768
#start = 12
#cols = range(start, start+6)
dataset = pandas.read_csv(file_name, sep=',')
data_length = np.shape(dataset)[0]
data_mat = dataset.as_matrix()
features, labels = [], []
#for start in range(0, 36, 6):
col_set = [axis]
#if axis < 3:
# col_set = [0,1,2]
#else:
# col_set = [3,4,5]
for start in col_set:
if is_cross:
cols = range(6)
else:
cols = [start]
for time in range(HISTORY, data_length):
example = []
for col in cols:
example.extend(data_mat[:,col][time-HISTORY:time])
features.append(example)
labels.append(data_mat[:,start][time]) # only train x_accel for now
return np.asarray(features), np.asarray(labels)
def test():
fil_coef = np.zeros((6,6,1))
for thing in range(6):
fil_coef[thing][thing][0] = 1
fil_coef[0][0][0]=.9182
dir_name = 'cross_fir'
data_tools.save_array(fil_coef, os.path.join(dir_name, 'test_coefs'))
print(fil_coef)
def data_train():
#X, y = None, None
#iteration = 0
#for data_file in os.listdir(data_dir):
# data_path = os.path.join(data_dir, data_file)
# iteration +=1
# if iteration < 1:
# continue
# if iteration > 21:
# break
# print("loading: {}".format(data_file))
# if X is not None:
# Xnew, ynew = build_features(data_path)
# X = np.concatenate((X, Xnew))
# y = np.concatenate((y, ynew))
# else:
# X, y = build_features(data_path)
data_file = '/home/chiasson/Documents/David/research/HuGaDB/HuGaDB/Data.parsed/processed/training/all.csv'
all_coeffs = []
#for stream in [0, 3]:
for stream in range(6):
X, y = build_features(data_file, stream, is_cross=False)
print("features built for stream {}".format(stream))
#reg = LinearRegression().fit(X,y)
reg = Lasso(alpha=.1, fit_intercept=False, max_iter=200000).fit(X,y)
reg.coef_[np.abs(reg.coef_) < (1.0/16)] = 0
print(reg.coef_)
#all_coeffs.append(reg.coef_)
#all_coeffs.append(reg.coef_)
all_coeffs.append(reg.coef_)
all_coeffs = np.asarray(all_coeffs)
#print(reg.score(X,y))
#print(np.linalg.norm(reg.coef_, ord=1))
#print(np.linalg.norm(reg.coef_, ord=2))
dir_name = 'cross_fir'
try:
os.makedirs(dir_name)
except OSError:
pass
print("FINISHED")
print(all_coeffs)
data_tools.save_array(all_coeffs, os.path.join(dir_name, 'test_coefs'))
return
# plot the poles and zeros!
for var in range(6):
b_vec = | np.flip(reg.coef_[var*HISTORY:var*HISTORY+HISTORY]) | numpy.flip |
'''
Date: 9/28/20
Commit: <PASSWORD>
'''
import numpy as np
# import torch
# from torch.autograd import Variable
import sys
import os
import matplotlib.pyplot as plt
| np.set_printoptions(threshold=sys.maxsize) | numpy.set_printoptions |
# Author: <NAME>
#
# License: BSD 3 clause
import logging
import numpy as np
import pandas as pd
from gensim.models.doc2vec import Doc2Vec, TaggedDocument
from gensim.utils import simple_preprocess
from gensim.parsing.preprocessing import strip_tags
import umap
import hdbscan
from wordcloud import WordCloud
import matplotlib.pyplot as plt
from joblib import dump, load
from sklearn.cluster import dbscan
import tempfile
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.preprocessing import normalize
from scipy.special import softmax
try:
import hnswlib
_HAVE_HNSWLIB = True
except ImportError:
_HAVE_HNSWLIB = False
try:
import tensorflow as tf
import tensorflow_hub as hub
import tensorflow_text
_HAVE_TENSORFLOW = True
except ImportError:
_HAVE_TENSORFLOW = False
try:
from sentence_transformers import SentenceTransformer
_HAVE_TORCH = True
except ImportError:
_HAVE_TORCH = False
logger = logging.getLogger('top2vec')
logger.setLevel(logging.WARNING)
sh = logging.StreamHandler()
sh.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
logger.addHandler(sh)
def default_tokenizer(doc):
"""Tokenize documents for training and remove too long/short words"""
return simple_preprocess(strip_tags(doc), deacc=True)
class Top2Vec:
"""
Top2Vec
Creates jointly embedded topic, document and word vectors.
Parameters
----------
embedding_model: string
This will determine which model is used to generate the document and
word embeddings. The valid string options are:
* doc2vec
* universal-sentence-encoder
* universal-sentence-encoder-multilingual
* distiluse-base-multilingual-cased
For large data sets and data sets with very unique vocabulary doc2vec
could produce better results. This will train a doc2vec model from
scratch. This method is language agnostic. However multiple languages
will not be aligned.
Using the universal sentence encoder options will be much faster since
those are pre-trained and efficient models. The universal sentence
encoder options are suggested for smaller data sets. They are also
good options for large data sets that are in English or in languages
covered by the multilingual model. It is also suggested for data sets
that are multilingual.
For more information on universal-sentence-encoder visit:
https://tfhub.dev/google/universal-sentence-encoder/4
For more information on universal-sentence-encoder-multilingual visit:
https://tfhub.dev/google/universal-sentence-encoder-multilingual/3
The distiluse-base-multilingual-cased pre-trained sentence transformer
is suggested for multilingual datasets and languages that are not
covered by the multilingual universal sentence encoder. The
transformer is significantly slower than the universal sentence
encoder options.
For more informati ond istiluse-base-multilingual-cased visit:
https://www.sbert.net/docs/pretrained_models.html
embedding_model_path: string (Optional)
Pre-trained embedding models will be downloaded automatically by
default. However they can also be uploaded from a file that is in the
location of embedding_model_path.
Warning: the model at embedding_model_path must match the
Warning: the model at embedding_model_path must match the
embedding_model parameter type.
documents: List of str
Input corpus, should be a list of strings.
min_count: int (Optional, default 50)
Ignores all words with total frequency lower than this. For smaller
corpora a smaller min_count will be necessary.
speed: string (Optional, default 'learn')
This parameter is only used when using doc2vec as embedding_model.
It will determine how fast the model takes to train. The
fast-learn option is the fastest and will generate the lowest quality
vectors. The learn option will learn better quality vectors but take
a longer time to train. The deep-learn option will learn the best
quality vectors but will take significant time to train. The valid
string speed options are:
* fast-learn
* learn
* deep-learn
use_corpus_file: bool (Optional, default False)
This parameter is only used when using doc2vec as embedding_model.
Setting use_corpus_file to True can sometimes provide speedup for
large datasets when multiple worker threads are available. Documents
are still passed to the model as a list of str, the model will create
a temporary corpus file for training.
document_ids: List of str, int (Optional)
A unique value per document that will be used for referring to
documents in search results. If ids are not given to the model, the
index of each document in the original corpus will become the id.
keep_documents: bool (Optional, default True)
If set to False documents will only be used for training and not saved
as part of the model. This will reduce model size. When using search
functions only document ids will be returned, not the actual
documents.
workers: int (Optional)
The amount of worker threads to be used in training the model. Larger
amount will lead to faster training.
tokenizer: callable (Optional, default None)
Override the default tokenization method. If None then
gensim.utils.simple_preprocess will be used.
use_embedding_model_tokenizer: bool (Optional, default False)
If using an embedding model other than doc2vec, use the model's
tokenizer for document embedding. If set to True the tokenizer, either
default or passed callable will be used to tokenize the text to
extract the vocabulary for word embedding.
verbose: bool (Optional, default True)
Whether to print status data during training.
"""
def __init__(self,
documents,
min_count=50,
embedding_model='doc2vec',
embedding_model_path=None,
speed='learn',
use_corpus_file=False,
document_ids=None,
keep_documents=True,
workers=None,
tokenizer=None,
use_embedding_model_tokenizer=False,
verbose=True,
umap_args=None,
hdbscan_args=None):
if verbose:
logger.setLevel(logging.DEBUG)
self.verbose = True
else:
logger.setLevel(logging.WARNING)
self.verbose = False
if tokenizer is not None:
self._tokenizer = tokenizer
else:
self._tokenizer = default_tokenizer
# validate documents
if not (isinstance(documents, list) or isinstance(documents, np.ndarray)):
raise ValueError("Documents need to be a list of strings")
if not all((isinstance(doc, str) or isinstance(doc, np.str_)) for doc in documents):
raise ValueError("Documents need to be a list of strings")
if keep_documents:
self.documents = np.array(documents, dtype="object")
else:
self.documents = None
# validate document ids
if document_ids is not None:
if not (isinstance(document_ids, list) or isinstance(document_ids, np.ndarray)):
raise ValueError("Documents ids need to be a list of str or int")
if len(documents) != len(document_ids):
raise ValueError("Document ids need to match number of documents")
elif len(document_ids) != len(set(document_ids)):
raise ValueError("Document ids need to be unique")
if all((isinstance(doc_id, str) or isinstance(doc_id, np.str_)) for doc_id in document_ids):
self.doc_id_type = np.str_
elif all((isinstance(doc_id, int) or isinstance(doc_id, np.int_)) for doc_id in document_ids):
self.doc_id_type = np.int_
else:
raise ValueError("Document ids need to be str or int")
self.document_ids_provided = True
self.document_ids = np.array(document_ids)
self.doc_id2index = dict(zip(document_ids, list(range(0, len(document_ids)))))
else:
self.document_ids_provided = False
self.document_ids = np.array(range(0, len(documents)))
self.doc_id2index = dict(zip(self.document_ids, list(range(0, len(self.document_ids)))))
self.doc_id_type = np.int_
acceptable_embedding_models = ["universal-sentence-encoder-multilingual",
"universal-sentence-encoder",
"distiluse-base-multilingual-cased"]
self.embedding_model_path = embedding_model_path
if embedding_model == 'doc2vec':
# validate training inputs
if speed == "fast-learn":
hs = 0
negative = 5
epochs = 40
elif speed == "learn":
hs = 1
negative = 0
epochs = 40
elif speed == "deep-learn":
hs = 1
negative = 0
epochs = 400
elif speed == "test-learn":
hs = 0
negative = 5
epochs = 1
else:
raise ValueError("speed parameter needs to be one of: fast-learn, learn or deep-learn")
if workers is None:
pass
elif isinstance(workers, int):
pass
else:
raise ValueError("workers needs to be an int")
doc2vec_args = {"vector_size": 300,
"min_count": min_count,
"window": 15,
"sample": 1e-5,
"negative": negative,
"hs": hs,
"epochs": epochs,
"dm": 0,
"dbow_words": 1}
if workers is not None:
doc2vec_args["workers"] = workers
logger.info('Pre-processing documents for training')
if use_corpus_file:
processed = [' '.join(self._tokenizer(doc)) for doc in documents]
lines = "\n".join(processed)
temp = tempfile.NamedTemporaryFile(mode='w+t')
temp.write(lines)
doc2vec_args["corpus_file"] = temp.name
else:
train_corpus = [TaggedDocument(self._tokenizer(doc), [i]) for i, doc in enumerate(documents)]
doc2vec_args["documents"] = train_corpus
logger.info('Creating joint document/word embedding')
self.embedding_model = 'doc2vec'
self.model = Doc2Vec(**doc2vec_args)
if use_corpus_file:
temp.close()
elif embedding_model in acceptable_embedding_models:
self.embed = None
self.embedding_model = embedding_model
self._check_import_status()
logger.info('Pre-processing documents for training')
# preprocess documents
train_corpus = [' '.join(self._tokenizer(doc)) for doc in documents]
# preprocess vocabulary
vectorizer = CountVectorizer()
doc_word_counts = vectorizer.fit_transform(train_corpus)
words = vectorizer.get_feature_names()
word_counts = np.array(np.sum(doc_word_counts, axis=0).tolist()[0])
vocab_inds = np.where(word_counts > min_count)[0]
if len(vocab_inds) == 0:
raise ValueError(f"A min_count of {min_count} results in "
f"all words being ignored, choose a lower value.")
self.vocab = [words[ind] for ind in vocab_inds]
self._check_model_status()
logger.info('Creating joint document/word embedding')
# embed words
self.word_indexes = dict(zip(self.vocab, range(len(self.vocab))))
self.word_vectors = self._l2_normalize(np.array(self.embed(self.vocab)))
# embed documents
if use_embedding_model_tokenizer:
self.document_vectors = self._embed_documents(documents)
else:
self.document_vectors = self._embed_documents(train_corpus)
else:
raise ValueError(f"{embedding_model} is an invalid embedding model.")
# create 5D embeddings of documents
logger.info('Creating lower dimension embedding of documents')
self.umap_args = {'n_neighbors': 15,
'n_components': 5,
'metric': 'cosine'} if umap_args is None else umap_args
umap_model = umap.UMAP(**self.umap_args).fit(self._get_document_vectors(norm=False))
# find dense areas of document vectors
logger.info('Finding dense areas of documents')
self.hdbscan_args = {'min_cluster_size': 15,
'metric': 'euclidean',
'cluster_selection_method': 'eom'} if hdbscan_args is None else hdbscan_args
cluster = hdbscan.HDBSCAN(**self.hdbscan_args).fit(umap_model.embedding_)
# calculate topic vectors from dense areas of documents
logger.info('Finding topics')
# create topic vectors
self._create_topic_vectors(cluster.labels_)
# deduplicate topics
self._deduplicate_topics()
# find topic words and scores
self.topic_words, self.topic_word_scores = self._find_topic_words_and_scores(topic_vectors=self.topic_vectors)
# assign documents to topic
self.doc_top, self.doc_dist = self._calculate_documents_topic(self.topic_vectors,
self._get_document_vectors())
# calculate topic sizes
self.topic_sizes = self._calculate_topic_sizes(hierarchy=False)
# re-order topics
self._reorder_topics(hierarchy=False)
# initialize variables for hierarchical topic reduction
self.topic_vectors_reduced = None
self.doc_top_reduced = None
self.doc_dist_reduced = None
self.topic_sizes_reduced = None
self.topic_words_reduced = None
self.topic_word_scores_reduced = None
self.hierarchy = None
# initialize document indexing variables
self.document_index = None
self.serialized_document_index = None
self.documents_indexed = False
self.index_id2doc_id = None
self.doc_id2index_id = None
# initialize word indexing variables
self.word_index = None
self.serialized_word_index = None
self.words_indexed = False
def save(self, file):
"""
Saves the current model to the specified file.
Parameters
----------
file: str
File where model will be saved.
"""
# do not save sentence encoders and sentence transformers
if self.embedding_model != "doc2vec":
self.embed = None
# serialize document index so that it can be saved
if self.documents_indexed:
temp = tempfile.NamedTemporaryFile(mode='w+b')
self.document_index.save_index(temp.name)
self.serialized_document_index = temp.read()
temp.close()
self.document_index = None
# serialize word index so that it can be saved
if self.words_indexed:
temp = tempfile.NamedTemporaryFile(mode='w+b')
self.word_index.save_index(temp.name)
self.serialized_word_index = temp.read()
temp.close()
self.word_index = None
dump(self, file)
@classmethod
def load(cls, file):
"""
Load a pre-trained model from the specified file.
Parameters
----------
file: str
File where model will be loaded from.
"""
top2vec_model = load(file)
# load document index
if top2vec_model.documents_indexed:
if not _HAVE_HNSWLIB:
raise ImportError(f"Cannot load document index.\n\n"
"Try: pip install top2vec[indexing]\n\n"
"Alternatively try: pip install hnswlib")
temp = tempfile.NamedTemporaryFile(mode='w+b')
temp.write(top2vec_model.serialized_document_index)
if top2vec_model.embedding_model == 'doc2vec':
document_vectors = top2vec_model.model.docvecs.vectors_docs
else:
document_vectors = top2vec_model.document_vectors
top2vec_model.document_index = hnswlib.Index(space='ip',
dim=document_vectors.shape[1])
top2vec_model.document_index.load_index(temp.name, max_elements=document_vectors.shape[0])
temp.close()
top2vec_model.serialized_document_index = None
# load word index
if top2vec_model.words_indexed:
if not _HAVE_HNSWLIB:
raise ImportError(f"Cannot load word index.\n\n"
"Try: pip install top2vec[indexing]\n\n"
"Alternatively try: pip install hnswlib")
temp = tempfile.NamedTemporaryFile(mode='w+b')
temp.write(top2vec_model.serialized_word_index)
if top2vec_model.embedding_model == 'doc2vec':
word_vectors = top2vec_model.model.wv.vectors
else:
word_vectors = top2vec_model.word_vectors
top2vec_model.word_index = hnswlib.Index(space='ip',
dim=word_vectors.shape[1])
top2vec_model.word_index.load_index(temp.name, max_elements=word_vectors.shape[0])
temp.close()
top2vec_model.serialized_word_index = None
return top2vec_model
@staticmethod
def _l2_normalize(vectors):
if vectors.ndim == 2:
return normalize(vectors)
else:
return normalize(vectors.reshape(1, -1))[0]
def _embed_documents(self, train_corpus):
self._check_import_status()
self._check_model_status()
# embed documents
batch_size = 500
document_vectors = []
current = 0
batches = int(len(train_corpus) / batch_size)
extra = len(train_corpus) % batch_size
for ind in range(0, batches):
document_vectors.append(self.embed(train_corpus[current:current + batch_size]))
current += batch_size
if extra > 0:
document_vectors.append(self.embed(train_corpus[current:current + extra]))
document_vectors = self._l2_normalize(np.array(np.vstack(document_vectors)))
return document_vectors
def _set_document_vectors(self, document_vectors):
if self.embedding_model == 'doc2vec':
self.model.docvecs.vectors_docs = document_vectors
else:
self.document_vectors = document_vectors
def _get_document_vectors(self, norm=True):
if self.embedding_model == 'doc2vec':
if norm:
self.model.docvecs.init_sims()
return self.model.docvecs.vectors_docs_norm
else:
return self.model.docvecs.vectors_docs
else:
return self.document_vectors
def _index2word(self, index):
if self.embedding_model == 'doc2vec':
return self.model.wv.index2word[index]
else:
return self.vocab[index]
def _get_word_vectors(self):
if self.embedding_model == 'doc2vec':
self.model.wv.init_sims()
return self.model.wv.vectors_norm
else:
return self.word_vectors
def _create_topic_vectors(self, cluster_labels):
unique_labels = set(cluster_labels)
if -1 in unique_labels:
unique_labels.remove(-1)
self.topic_vectors = self._l2_normalize(
np.vstack([self._get_document_vectors(norm=False)[np.where(cluster_labels == label)[0]]
.mean(axis=0) for label in unique_labels]))
def _deduplicate_topics(self):
core_samples, labels = dbscan(X=self.topic_vectors,
eps=0.1,
min_samples=2,
metric="cosine")
duplicate_clusters = set(labels)
if len(duplicate_clusters) > 1 or -1 not in duplicate_clusters:
# unique topics
unique_topics = self.topic_vectors[np.where(labels == -1)[0]]
if -1 in duplicate_clusters:
duplicate_clusters.remove(-1)
# merge duplicate topics
for unique_label in duplicate_clusters:
unique_topics = np.vstack(
[unique_topics, self._l2_normalize(self.topic_vectors[np.where(labels == unique_label)[0]]
.mean(axis=0))])
self.topic_vectors = unique_topics
def _calculate_topic_sizes(self, hierarchy=False):
if hierarchy:
topic_sizes = pd.Series(self.doc_top_reduced).value_counts()
else:
topic_sizes = pd.Series(self.doc_top).value_counts()
return topic_sizes
def _reorder_topics(self, hierarchy=False):
if hierarchy:
self.topic_vectors_reduced = self.topic_vectors_reduced[self.topic_sizes_reduced.index]
self.topic_words_reduced = self.topic_words_reduced[self.topic_sizes_reduced.index]
self.topic_word_scores_reduced = self.topic_word_scores_reduced[self.topic_sizes_reduced.index]
old2new = dict(zip(self.topic_sizes_reduced.index, range(self.topic_sizes_reduced.index.shape[0])))
self.doc_top_reduced = np.array([old2new[i] for i in self.doc_top_reduced])
self.hierarchy = [self.hierarchy[i] for i in self.topic_sizes_reduced.index]
self.topic_sizes_reduced.reset_index(drop=True, inplace=True)
else:
self.topic_vectors = self.topic_vectors[self.topic_sizes.index]
self.topic_words = self.topic_words[self.topic_sizes.index]
self.topic_word_scores = self.topic_word_scores[self.topic_sizes.index]
old2new = dict(zip(self.topic_sizes.index, range(self.topic_sizes.index.shape[0])))
self.doc_top = np.array([old2new[i] for i in self.doc_top])
self.topic_sizes.reset_index(drop=True, inplace=True)
@staticmethod
def _calculate_documents_topic(topic_vectors, document_vectors, dist=True):
batch_size = 10000
doc_top = []
if dist:
doc_dist = []
if document_vectors.shape[0] > batch_size:
current = 0
batches = int(document_vectors.shape[0] / batch_size)
extra = document_vectors.shape[0] % batch_size
for ind in range(0, batches):
res = np.inner(document_vectors[current:current + batch_size], topic_vectors)
doc_top.extend(np.argmax(res, axis=1))
if dist:
doc_dist.extend(np.max(res, axis=1))
current += batch_size
if extra > 0:
res = np.inner(document_vectors[current:current + extra], topic_vectors)
doc_top.extend(np.argmax(res, axis=1))
if dist:
doc_dist.extend(np.max(res, axis=1))
if dist:
doc_dist = np.array(doc_dist)
else:
res = np.inner(document_vectors, topic_vectors)
doc_top = np.argmax(res, axis=1)
if dist:
doc_dist = np.max(res, axis=1)
if dist:
return doc_top, doc_dist
else:
return doc_top
def _find_topic_words_and_scores(self, topic_vectors):
topic_words = []
topic_word_scores = []
res = np.inner(topic_vectors, self._get_word_vectors())
top_words = np.flip(np.argsort(res, axis=1), axis=1)
top_scores = np.flip(np.sort(res, axis=1), axis=1)
for words, scores in zip(top_words, top_scores):
topic_words.append([self._index2word(i) for i in words[0:50]])
topic_word_scores.append(scores[0:50])
topic_words = np.array(topic_words)
topic_word_scores = np.array(topic_word_scores)
return topic_words, topic_word_scores
def _assign_documents_to_topic(self, document_vectors, hierarchy=False):
if hierarchy:
doc_top_new, doc_dist_new = self._calculate_documents_topic(self.topic_vectors_reduced,
document_vectors,
dist=True)
self.doc_top_reduced = np.append(self.doc_top_reduced, doc_top_new)
self.doc_dist_reduced = np.append(self.doc_dist_reduced, doc_dist_new)
topic_sizes_new = pd.Series(doc_top_new).value_counts()
for top in topic_sizes_new.index.tolist():
self.topic_sizes_reduced[top] += topic_sizes_new[top]
self.topic_sizes_reduced.sort_values(ascending=False, inplace=True)
self._reorder_topics(hierarchy)
else:
doc_top_new, doc_dist_new = self._calculate_documents_topic(self.topic_vectors, document_vectors, dist=True)
self.doc_top = np.append(self.doc_top, doc_top_new)
self.doc_dist = np.append(self.doc_dist, doc_dist_new)
topic_sizes_new = pd.Series(doc_top_new).value_counts()
for top in topic_sizes_new.index.tolist():
self.topic_sizes[top] += topic_sizes_new[top]
self.topic_sizes.sort_values(ascending=False, inplace=True)
self._reorder_topics(hierarchy)
def _unassign_documents_from_topic(self, doc_indexes, hierarchy=False):
if hierarchy:
doc_top_remove = self.doc_top_reduced[doc_indexes]
self.doc_top_reduced = | np.delete(self.doc_top_reduced, doc_indexes, 0) | numpy.delete |
from . import DATA_DIR
import sys
import glob
from .background_systems import BackgroundSystemModel
from .export import ExportInventory
from inspect import currentframe, getframeinfo
from pathlib import Path
from scipy import sparse
import csv
import itertools
import numexpr as ne
import numpy as np
import xarray as xr
REMIND_FILES_DIR = DATA_DIR / "IAM"
class InventoryCalculation:
"""
Build and solve the inventory for results characterization and inventory export
Vehicles to be analyzed can be filtered by passing a `scope` dictionary.
Some assumptions in the background system can also be adjusted by passing a `background_configuration` dictionary.
.. code-block:: python
scope = {
'powertrain':['BEV', 'FCEV', 'ICEV-p'],
}
background_configuration = {
'country' : 'DE', # will use the network electricity losses of Germany
'custom electricity mix' : [[1,0,0,0,0,0,0,0,0,0], # in this case, 100% hydropower for the first year
[0.5,0.5,0,0,0,0,0,0,0,0]], # in this case, 50% hydro, 50% nuclear for the second year
'hydrogen technology' : 'Electrolysis',
'petrol technology': 'bioethanol - wheat straw',
'alternative petrol share':[0.1,0.2],
'battery technology': 'LFP',
'battery origin': 'NO'
}
InventoryCalculation(CarModel.array,
background_configuration=background_configuration,
scope=scope,
scenario="RCP26")
The `custom electricity mix` key in the background_configuration dictionary defines an electricity mix to apply,
under the form of one or several array(s), depending on teh number of years to analyze,
that should total 1, of which the indices correspond to:
- [0]: hydro-power
- [1]: nuclear
- [2]: natural gas
- [3]: solar power
- [4]: wind power
- [5]: biomass
- [6]: coal
- [7]: oil
- [8]: geothermal
- [9]: waste incineration
If none is given, the electricity mix corresponding to the country specified in `country` will be selected.
If no country is specified, Europe applies.
The `alternative petrol share` key contains an array with shares of alternative petrol fuel for each year, to create a custom blend.
If none is provided, a blend provided by the Integrated Assessment model REMIND is used, which will depend on the REMIND energy scenario selected.
:ivar array: array from the CarModel class
:vartype array: CarModel.array
:ivar scope: dictionary that contains filters for narrowing the analysis
:ivar background_configuration: dictionary that contains choices for background system
:ivar scenario: REMIND energy scenario to use ("BAU": business-as-usual or "RCP26": limits radiative forcing to 2.6 W/m^2.).
"BAU" selected by default.
.. code-block:: python
"""
def __init__(
self, array, scope=None, background_configuration=None, scenario="SSP2-Base"
):
if scope is None:
scope = {}
scope["size"] = array.coords["size"].values.tolist()
scope["powertrain"] = array.coords["powertrain"].values.tolist()
scope["year"] = array.coords["year"].values.tolist()
else:
scope["size"] = scope.get("size", array.coords["size"].values.tolist())
scope["powertrain"] = scope.get(
"powertrain", array.coords["powertrain"].values.tolist()
)
scope["year"] = scope.get("year", array.coords["year"].values.tolist())
self.scope = scope
self.scenario = scenario
if background_configuration is None:
self.background_configuration = {"country": "RER"}
else:
self.background_configuration = background_configuration
if "country" not in self.background_configuration:
self.background_configuration["country"] = "RER"
if "energy storage" not in self.background_configuration:
self.background_configuration["energy storage"] = {
"electric": {"type":"NMC",
"origin":"CN"}
}
else:
if "electric" not in self.background_configuration["energy storage"]:
self.background_configuration["energy storage"]["electric"] = {"type":"NMC",
"origin":"CN"}
else:
if "origin" not in self.background_configuration["energy storage"]["electric"]:
self.background_configuration["energy storage"]["electric"]["origin"] = "CN"
if "type" not in self.background_configuration["energy storage"]["electric"]:
self.background_configuration["energy storage"]["electric"]["type"] = "NMC"
array = array.sel(
powertrain=self.scope["powertrain"],
year=self.scope["year"],
size=self.scope["size"],
)
self.array = array.stack(desired=["size", "powertrain", "year"])
self.iterations = len(array.value.values)
self.number_of_cars = (
len(self.scope["size"])
* len(self.scope["powertrain"])
* len(self.scope["year"])
)
self.array_inputs = {
x: i for i, x in enumerate(list(self.array.parameter.values), 0)
}
self.array_powertrains = {
x: i for i, x in enumerate(list(self.array.powertrain.values), 0)
}
self.A = self.get_A_matrix()
self.inputs = self.get_dict_input()
self.add_additional_activities()
self.rev_inputs = self.get_rev_dict_input()
self.index_cng = [self.inputs[i] for i in self.inputs if "ICEV-g" in i[0]]
self.index_combustion_wo_cng = [
self.inputs[i]
for i in self.inputs
if any(
ele in i[0]
for ele in ["ICEV-p", "HEV-p", "PHEV-p", "ICEV-d", "PHEV-d", "HEV-d"]
)
]
self.index_diesel = [self.inputs[i] for i in self.inputs if "ICEV-d" in i[0]]
self.index_all_petrol = [
self.inputs[i]
for i in self.inputs
if any(ele in i[0] for ele in ["ICEV-p", "HEV-p", "PHEV-p"])
]
self.index_petrol = [self.inputs[i] for i in self.inputs if "ICEV-p" in i[0]]
self.index_hybrid = [
self.inputs[i]
for i in self.inputs
if any(ele in i[0] for ele in ["HEV-p", "HEV-d"])
]
self.index_plugin_hybrid = [
self.inputs[i] for i in self.inputs if "PHEV" in i[0]
]
self.index_fuel_cell = [self.inputs[i] for i in self.inputs if "FCEV" in i[0]]
self.index_emissions = [
self.inputs[i]
for i in self.inputs
if "air" in i[1][0]
and len(i[1]) > 1
and i[0]
not in [
"Carbon dioxide, fossil",
"Carbon monoxide, non-fossil",
"Methane, non-fossil",
"Particulates, > 10 um",
]
]
self.map_non_fuel_emissions = {
(
"Methane, fossil",
("air", "non-urban air or from high stacks"),
"kilogram",
): "Methane direct emissions, suburban",
(
"Methane, fossil",
("air", "low population density, long-term"),
"kilogram",
): "Methane direct emissions, rural",
(
"Lead",
("air", "non-urban air or from high stacks"),
"kilogram",
): "Lead direct emissions, suburban",
(
"Ammonia",
("air", "non-urban air or from high stacks"),
"kilogram",
): "Ammonia direct emissions, suburban",
(
"NMVOC, non-methane volatile organic compounds, unspecified origin",
("air", "urban air close to ground"),
"kilogram",
): "NMVOC direct emissions, urban",
(
"PAH, polycyclic aromatic hydrocarbons",
("air", "urban air close to ground"),
"kilogram",
): "Hydrocarbons direct emissions, urban",
(
"Dinitrogen monoxide",
("air", "low population density, long-term"),
"kilogram",
): "Dinitrogen oxide direct emissions, rural",
(
"Nitrogen oxides",
("air", "urban air close to ground"),
"kilogram",
): "Nitrogen oxides direct emissions, urban",
(
"Ammonia",
("air", "urban air close to ground"),
"kilogram",
): "Ammonia direct emissions, urban",
(
"Particulates, < 2.5 um",
("air", "non-urban air or from high stacks"),
"kilogram",
): "Particulate matters direct emissions, suburban",
(
"Carbon monoxide, fossil",
("air", "urban air close to ground"),
"kilogram",
): "Carbon monoxide direct emissions, urban",
(
"Nitrogen oxides",
("air", "low population density, long-term"),
"kilogram",
): "Nitrogen oxides direct emissions, rural",
(
"NMVOC, non-methane volatile organic compounds, unspecified origin",
("air", "non-urban air or from high stacks"),
"kilogram",
): "NMVOC direct emissions, suburban",
(
"Benzene",
("air", "non-urban air or from high stacks"),
"kilogram",
): "Benzene direct emissions, suburban",
(
"Ammonia",
("air", "low population density, long-term"),
"kilogram",
): "Ammonia direct emissions, rural",
(
"Sulfur dioxide",
("air", "low population density, long-term"),
"kilogram",
): "Sulfur dioxide direct emissions, rural",
(
"NMVOC, non-methane volatile organic compounds, unspecified origin",
("air", "low population density, long-term"),
"kilogram",
): "NMVOC direct emissions, rural",
(
"Particulates, < 2.5 um",
("air", "urban air close to ground"),
"kilogram",
): "Particulate matters direct emissions, urban",
(
"Sulfur dioxide",
("air", "urban air close to ground"),
"kilogram",
): "Sulfur dioxide direct emissions, urban",
(
"Dinitrogen monoxide",
("air", "non-urban air or from high stacks"),
"kilogram",
): "Dinitrogen oxide direct emissions, suburban",
(
"Carbon monoxide, fossil",
("air", "low population density, long-term"),
"kilogram",
): "Carbon monoxide direct emissions, rural",
(
"Methane, fossil",
("air", "urban air close to ground"),
"kilogram",
): "Methane direct emissions, urban",
(
"Carbon monoxide, fossil",
("air", "non-urban air or from high stacks"),
"kilogram",
): "Carbon monoxide direct emissions, suburban",
(
"Lead",
("air", "urban air close to ground"),
"kilogram",
): "Lead direct emissions, urban",
(
"Particulates, < 2.5 um",
("air", "low population density, long-term"),
"kilogram",
): "Particulate matters direct emissions, rural",
(
"Sulfur dioxide",
("air", "non-urban air or from high stacks"),
"kilogram",
): "Sulfur dioxide direct emissions, suburban",
(
"Benzene",
("air", "low population density, long-term"),
"kilogram",
): "Benzene direct emissions, rural",
(
"Nitrogen oxides",
("air", "non-urban air or from high stacks"),
"kilogram",
): "Nitrogen oxides direct emissions, suburban",
(
"Lead",
("air", "low population density, long-term"),
"kilogram",
): "Lead direct emissions, rural",
(
"Benzene",
("air", "urban air close to ground"),
"kilogram",
): "Benzene direct emissions, urban",
(
"PAH, polycyclic aromatic hydrocarbons",
("air", "low population density, long-term"),
"kilogram",
): "Hydrocarbons direct emissions, rural",
(
"PAH, polycyclic aromatic hydrocarbons",
("air", "non-urban air or from high stacks"),
"kilogram",
): "Hydrocarbons direct emissions, suburban",
(
"Dinitrogen monoxide",
("air", "urban air close to ground"),
"kilogram",
): "Dinitrogen oxide direct emissions, urban",
}
self.index_noise = [self.inputs[i] for i in self.inputs if "noise" in i[0]]
self.list_cat, self.split_indices = self.get_split_indices()
self.bs = BackgroundSystemModel()
def __getitem__(self, key):
"""
Make class['foo'] automatically filter for the parameter 'foo'
Makes the model code much cleaner
:param key: Parameter name
:type key: str
:return: `array` filtered after the parameter selected
"""
return self.temp_array.sel(parameter=key)
def get_results_table(self, method, level, split, sensitivity=False):
"""
Format an xarray.DataArray array to receive the results.
:param method: impact assessment method. Only "ReCiPe" method available at the moment.
:param level: "midpoint" or "endpoint" impact assessment level. Only "midpoint" available at the moment.
:param split: "components" or "impact categories". Split by impact categories only applicable when "endpoint" level is applied.
:return: xarrray.DataArray
"""
if split == "components":
cat = [
"direct",
"energy chain",
"maintenance",
"glider",
"EoL",
"powertrain",
"energy storage",
"road",
]
dict_impact_cat = self.get_dict_impact_categories()
if sensitivity == False:
response = xr.DataArray(
np.zeros(
(
self.B.shape[1],
len(self.scope["size"]),
len(self.scope["powertrain"]),
len(self.scope["year"]),
len(cat),
self.iterations,
)
),
coords=[
dict_impact_cat[method][level],
self.scope["size"],
self.scope["powertrain"],
self.scope["year"],
cat,
np.arange(0, self.iterations),
],
dims=[
"impact_category",
"size",
"powertrain",
"year",
"impact",
"value",
],
)
else:
params = ["reference"]
params.extend([a for a in self.array_inputs])
response = xr.DataArray(
np.zeros(
(
self.B.shape[1],
len(self.scope["size"]),
len(self.scope["powertrain"]),
len(self.scope["year"]),
self.iterations,
)
),
coords=[
dict_impact_cat[method][level],
self.scope["size"],
self.scope["powertrain"],
self.scope["year"],
params,
],
dims=["impact_category",
"size",
"powertrain",
"year",
"parameter"],
)
return response
def get_split_indices(self):
"""
Return list of indices to split the results into categories.
:return: list of indices
:rtype: list
"""
filename = "dict_split.csv"
filepath = DATA_DIR / filename
if not filepath.is_file():
raise FileNotFoundError("The dictionary of splits could not be found.")
with open(filepath) as f:
csv_list = [[val.strip() for val in r.split(";")] for r in f.readlines()]
(_, _, *header), *data = csv_list
csv_dict = {}
for row in data:
key, sub_key, *values = row
if key in csv_dict:
if sub_key in csv_dict[key]:
csv_dict[key][sub_key].append(
{"search by": values[0], "search for": values[1]}
)
else:
csv_dict[key][sub_key] = [
{"search by": values[0], "search for": values[1]}
]
else:
csv_dict[key] = {
sub_key: [{"search by": values[0], "search for": values[1]}]
}
flatten = itertools.chain.from_iterable
d = {}
l = []
for cat in csv_dict["components"]:
d[cat] = list(
flatten(
[
self.get_index_of_flows([l["search for"]], l["search by"])
for l in csv_dict["components"][cat]
]
)
)
l.append(d[cat])
list_ind = [d[x] for x in d]
maxLen = max(map(len, list_ind))
for row in list_ind:
while len(row) < maxLen:
row.extend([len(self.inputs) - 1])
return list(d.keys()), list_ind
def calculate_impacts(
self, method="recipe", level="midpoint", split="components", sensitivity=False
):
# Load the B matrix
self.B = self.get_B_matrix()
# Prepare an array to store the results
results = self.get_results_table(method, level, split, sensitivity=sensitivity)
# Fill in the A matrix with car parameters
self.set_inputs_in_A_matrix(self.array.values)
# Collect indices of activities contributing to the first level
arr = self.A[0, : -self.number_of_cars, -self.number_of_cars :].sum(axis=1)
ind = | np.nonzero(arr) | numpy.nonzero |
import numba as nb
import numpy as np
import warnings
from scipy import optimize
from .utils import (
preprocess_trajs,
get_nfeatures,
trajs_matmul,
symeig,
solve_stationary,
compute_ic,
compute_c0,
batch_compute_ic,
batch_compute_c0,
is_cutlag,
)
# -----------------------------------------------------------------------------
# linear VAC and IVAC
class LinearVAC:
r"""Solve linear VAC at a given lag time.
Linear VAC solves the equation
.. math::
C(\tau) v_i = \lambda_i C(0) v_i
for eigenvalues :math:`\lambda_i`
and eigenvector coefficients :math:`v_i`.
The correlation matrices are given by
.. math::
C_{ij}(\tau) = E[\phi_i(x_t) \phi_j(x_{t+\tau})]
C_{ij}(0) = E[\phi_i(x_t) \phi_j(x_t)]
where :math:`\phi_i` are the input features
and :math:`\tau` is the lag time parameter.
This implementation assumes that the constant feature can be
represented by a linear combination of the other features.
If this is not the case, addones=True will augment the input
features with the constant feature.
Parameters
----------
lag : int
Lag time, in units of frames.
nevecs : int, optional
Number of eigenvectors (including the trivial eigenvector)
to compute.
If None, use the maximum possible number of eigenvectors
(n_features).
addones : bool, optional
If True, add a feature of ones before solving VAC.
This increases n_features by 1.
This should only be set to True if the constant feature
is not contained within the span of the input features.
reweight : bool, optional
If True, reweight trajectories to equilibrium.
adjust : bool, optional
If True, adjust :math:`C(0)` to ensure that the trivial
eigenvector is exactly solved.
Attributes
----------
lag : int
VAC lag time in units of frames.
evals : (n_evecs,) ndarray
VAC eigenvalues in decreasing order.
This includes the trivial eigenvalue.
its : (n_evecs,) ndarray
Implied timescales corresponding to the eigenvalues,
in units of frames.
evecs : (n_features, n_evecs) ndarray
Coefficients of the VAC eigenvectors
corresponding to the eigenvalues.
cov : (n_features, n_features) ndarray
Covariance matrix of the fitted data.
trajs : list of (n_frames[i], n_features) ndarray
List of featurized trajectories used to solve VAC.
weights : list of (n_frames[i],) ndarray
Equilibrium weight of trajectories starting at each configuration.
"""
def __init__(
self,
lag,
nevecs=None,
addones=False,
reweight=False,
adjust=True,
):
self.lag = lag
self.nevecs = nevecs
self.addones = addones
self.reweight = reweight
self.adjust = adjust
self._isfit = False
def fit(self, trajs, weights=None):
"""Compute VAC results from input trajectories.
Calculate and store VAC eigenvalues, eigenvector coefficients,
and implied timescales from the input trajectories.
Parameters
----------
trajs : list of (n_frames[i], n_features) ndarray
List of featurized trajectories.
weights : int or list of (n_frames[i],) ndarray, optional
If int, the number of frames to drop from the end of each
trajectory, which must be greater than or equal to the VAC
lag time. This is equivalent to passing a list of uniform
weights but with the last int frames having zero weight.
If a list of ndarray, the weight of the trajectory starting
at each configuration. Note that the last frames of each
trajectory must have zero weight. This number of ending
frames with zero weight must be at least the VAC lag time.
"""
trajs = preprocess_trajs(trajs, addones=self.addones)
if self.reweight:
if weights is None:
weights = _ivac_weights(trajs, self.lag)
else:
if weights is not None:
raise ValueError("weights provided but not reweighting")
c0, evals, evecs = _solve_ivac(
trajs,
self.lag,
weights=weights,
adjust=self.adjust,
)
its = _vac_its(evals, self.lag)
self._set_fit_data(c0, evals, evecs, its, trajs, weights)
def transform(self, trajs):
"""Compute VAC eigenvectors on the input trajectories.
Use the fitted VAC eigenvector coefficients to calculate
the values of the VAC eigenvectors on the input trajectories.
Parameters
----------
trajs : list of (traj_len[i], n_features) ndarray
List of featurized trajectories.
Returns
-------
list of (traj_len[i], n_evecs) ndarray
VAC eigenvectors at each frame of the input trajectories.
"""
trajs = preprocess_trajs(trajs, addones=self.addones)
return trajs_matmul(trajs, self.evecs[:, : self.nevecs])
def _set_fit_data(self, cov, evals, evecs, its, trajs, weights):
"""Set fields computed by the fit method."""
self._isfit = True
self._cov = cov
self._evals = evals
self._evecs = evecs
self._its = its
self._trajs = trajs
self._weights = weights
@property
def cov(self):
if self._isfit:
return self._cov
raise ValueError("object has not been fit to data")
@property
def evals(self):
if self._isfit:
return self._evals
raise ValueError("object has not been fit to data")
@property
def evecs(self):
if self._isfit:
return self._evecs
raise ValueError("object has not been fit to data")
@property
def its(self):
if self._isfit:
return self._its
raise ValueError("object has not been fit to data")
@property
def trajs(self):
if self._isfit:
return self._trajs
raise ValueError("object has not been fit to data")
@property
def weights(self):
if self._isfit:
return self._weights
raise ValueError("object has not been fit to data")
class LinearIVAC:
r"""Solve linear IVAC for a given range of lag times.
Linear IVAC solves the equation
.. math::
\sum_\tau C(\tau) v_i = \lambda_i C(0) v_i
for eigenvalues :math:`\lambda_i`
and eigenvector coefficients :math:`v_i`.
The covariance matrices are given by
.. math::
C_{ij}(\tau) = E[\phi_i(x_t) \phi_j(x_{t+\tau})]
C_{ij}(0) = E[\phi_i(x_t) \phi_j(x_t)]
where :math:`\phi_i` are the input features
and :math:`\tau` is the lag time parameter.
This implementation assumes that the constant feature can be
represented by a linear combination of the other features.
If this is not the case, addones=True will augment the input
features with the constant feature.
Parameters
----------
minlag : int
Minimum lag time in units of frames.
maxlag : int
Maximum lag time (inclusive) in units of frames.
If minlag == maxlag, this is equivalent to VAC.
lagstep : int, optional
Number of frames between each lag time.
This must evenly divide maxlag - minlag.
The integrated covariance matrix is computed using lag times
(minlag, minlag + lagstep, ..., maxlag)
nevecs : int, optional
Number of eigenvectors (including the trivial eigenvector)
to compute.
If None, use the maximum possible number of eigenvectors
(n_features).
addones : bool, optional
If True, add a feature of ones before solving VAC.
This increases n_features by 1.
reweight : bool, optional
If True, reweight trajectories to equilibrium.
adjust : bool, optional
If True, adjust :math:`C(0)` to ensure that the trivial
eigenvector is exactly solved.
method : str, optional
Method to compute the integrated covariance matrix.
Currently, 'direct', 'fft' are supported.
Both 'direct' and 'fft' integrate features over lag times before
computing the correlation matrix.
Method 'direct' does so by summing the time-lagged features.
Its runtime increases linearly with the number of lag times.
Method 'fft' does so by performing an FFT convolution.
It takes around the same amount of time to run regardless
of the number of lag times, and is faster than 'direct' when
there is more than around 100 lag times.
Attributes
----------
minlag : int
Minimum IVAC lag time in units of frames.
maxlag : int
Maximum IVAC lag time in units of frames.
lagstep : int
Interval between IVAC lag times, in units of frames.
evals : (n_evecs,) ndarray
IVAC eigenvalues in decreasing order.
This includes the trivial eigenvalue.
its : (n_evecs,) ndarray
Implied timescales corresponding to the eigenvalues,
in units of frames.
evecs : (n_features, n_evecs) ndarray
Coefficients of the IVAC eigenvectors
corresponding to the eigenvalues.
cov : (n_features, n_features) ndarray
Covariance matrix of the fitted data.
trajs : list of (n_frames[i], n_features) ndarray
List of featurized trajectories used to solve IVAC.
weights : list of (n_frames[i],) ndarray
Equilibrium weight of trajectories starting at each configuration.
"""
def __init__(
self,
minlag,
maxlag,
lagstep=1,
nevecs=None,
addones=False,
reweight=False,
adjust=True,
method="fft",
):
if minlag > maxlag:
raise ValueError("minlag must be less than or equal to maxlag")
if (maxlag - minlag) % lagstep != 0:
raise ValueError("lag time interval must be a multiple of lagstep")
if method not in ["direct", "fft"]:
raise ValueError("method must be 'direct', or 'fft'")
self.minlag = minlag
self.maxlag = maxlag
self.lagstep = lagstep
self.lags = np.arange(self.minlag, self.maxlag + 1, self.lagstep)
self.nevecs = nevecs
self.addones = addones
self.reweight = reweight
self.adjust = adjust
self.method = method
self._isfit = False
def fit(self, trajs, weights=None):
"""Compute IVAC results from input trajectories.
Calculate and store IVAC eigenvalues, eigenvector coefficients,
and implied timescales from the input trajectories.
Parameters
----------
trajs : list of (n_frames[i], n_features) ndarray
List of featurized trajectories.
weights : int or list of (n_frames[i],) ndarray, optional
If int, the number of frames to drop from the end of each
trajectory, which must be greater than or equal to the
maximum IVAC lag time. This is equivalent to passing a list
of uniform weights but with the last int frames having zero
weight.
If a list of ndarray, the weight of the trajectory starting
at each configuration. Note that the last frames of each
trajectory must have zero weight. This number of ending
frames with zero weight must be at least the maximum IVAC
lag time.
"""
trajs = preprocess_trajs(trajs, addones=self.addones)
if self.reweight:
if weights is None:
weights = _ivac_weights(trajs, self.lags, method=self.method)
else:
if weights is not None:
raise ValueError("weights provided but not reweighting")
c0, evals, evecs = _solve_ivac(
trajs,
self.lags,
weights=weights,
adjust=self.adjust,
method=self.method,
)
its = _ivac_its(evals, self.minlag, self.maxlag, self.lagstep)
self._set_fit_data(c0, evals, evecs, its, trajs, weights)
def transform(self, trajs):
"""Compute IVAC eigenvectors on the input trajectories.
Use the fitted IVAC eigenvector coefficients to calculate
the values of the IVAC eigenvectors on the input trajectories.
Parameters
----------
trajs : list of (traj_len[i], n_features) ndarray
List of featurized trajectories.
Returns
-------
list of (traj_len[i], n_evecs) ndarray
IVAC eigenvectors at each frame of the input trajectories.
"""
trajs = preprocess_trajs(trajs, addones=self.addones)
return trajs_matmul(trajs, self.evecs[:, : self.nevecs])
def _set_fit_data(self, cov, evals, evecs, its, trajs, weights):
"""Set fields computed by the fit method."""
self._isfit = True
self._cov = cov
self._evals = evals
self._evecs = evecs
self._its = its
self._trajs = trajs
self._weights = weights
@property
def cov(self):
if self._isfit:
return self._cov
raise ValueError("object has not been fit to data")
@property
def evals(self):
if self._isfit:
return self._evals
raise ValueError("object has not been fit to data")
@property
def evecs(self):
if self._isfit:
return self._evecs
raise ValueError("object has not been fit to data")
@property
def its(self):
if self._isfit:
return self._its
raise ValueError("object has not been fit to data")
@property
def trajs(self):
if self._isfit:
return self._trajs
raise ValueError("object has not been fit to data")
@property
def weights(self):
if self._isfit:
return self._weights
raise ValueError("object has not been fit to data")
def _solve_ivac(
trajs,
lags,
*,
weights=None,
adjust=True,
method="fft",
):
"""Solve IVAC with the given parameters.
Parameters
----------
trajs : list of (n_frames[i], n_features) ndarray
List of featurized trajectories.
lags : int or 1d array-like of int
VAC lag time or IVAC lag times, in units of frames.
For IVAC, this should be a list of lag times that will be used,
not the 2 or 3 values specifying the range.
weights : int or list of (n_frames[i],) ndarray, optional
If int, the number of frames to drop from the end of each
trajectory, which must be greater than or equal to the maximum
IVAC lag time. This is equivalent to passing a list of uniform
weights but with the last int frames having zero weight.
If a list of ndarray, the weight of the trajectory starting at
each configuration. Note that the last frames of each trajectory
must have zero weight. This number of ending frames with zero
weight must be at least the maximum IVAC lag time.
adjust : bool, optional
If True, adjust :math:`C(0)` to ensure that the trivial
eigenvector is exactly solved.
method : str, optional
Method to compute the integrated covariance matrix.
Currently, 'direct', 'fft' are supported.
Both 'direct' and 'fft' integrate features over lag times before
computing the correlation matrix.
Method 'direct' does so by summing the time-lagged features.
Its runtime increases linearly with the number of lag times.
Method 'fft' does so by performing an FFT convolution.
It takes around the same amount of time to run regardless
of the number of lag times, and is faster than 'direct' when
there is more than around 100 lag times.
"""
ic = compute_ic(trajs, lags, weights=weights, method=method)
if adjust:
c0 = compute_c0(trajs, lags=lags, weights=weights, method=method)
else:
c0 = compute_c0(trajs, weights=weights, method=method)
evals, evecs = symeig(ic, c0)
return c0, evals, evecs
# -----------------------------------------------------------------------------
# linear VAC and IVAC scans
class LinearVACScan:
"""Solve linear VAC at each given lag time.
This class provides a more optimized way of solving linear VAC at a
set of lag times with the same input trajectories. The code
.. code-block:: python
scan = LinearVACScan(lags)
vac = scan[lags[i]]
is equivalent to
.. code-block:: python
vac = LinearVAC(lags[i])
Parameters
----------
lag : int
Lag time, in units of frames.
nevecs : int, optional
Number of eigenvectors (including the trivial eigenvector)
to compute.
If None, use the maximum possible number of eigenvectors
(n_features).
addones : bool, optional
If True, add a feature of ones before solving VAC.
This increases n_features by 1.
This should only be set to True if the constant feature
is not contained within the span of the input features.
reweight : bool, optional
If True, reweight trajectories to equilibrium.
adjust : bool, optional
If True, adjust :math:`C(0)` to ensure that the trivial
eigenvector is exactly solved.
method : str, optional
Method used to compute the time lagged covariance matrices.
Currently supported methods are 'direct',
which computes each time lagged covariance matrix separately,
and 'fft-all', which computes all time-lagged correlation
matrices at once by convolving each pair of features.
The runtime of 'fft-all' is almost independent of the number
of lag times, and is faster then 'direct' when scanning a
large number of lag times.
Attributes
----------
lags : 1d array-like of int
VAC lag time, in units of frames.
cov : (n_features, n_features) ndarray
Covariance matrix of the fitted data.
"""
def __init__(
self,
lags,
nevecs=None,
addones=False,
reweight=False,
adjust=True,
method="direct",
):
maxlag = np.max(lags)
if method not in ["direct", "fft-all"]:
raise ValueError("method must be 'direct' or 'fft-all'")
self.lags = lags
self.nevecs = nevecs
self.addones = addones
self.reweight = reweight
self.adjust = adjust
self.method = method
def fit(self, trajs, weights=None):
"""Compute VAC results from input trajectories.
Calculate and store VAC eigenvalues, eigenvector coefficients,
and implied timescales from the input trajectories.
Parameters
----------
trajs : list of (n_frames[i], n_features) ndarray
List of featurized trajectories.
weights : int or list of (n_frames[i],) ndarray, optional
If int, the number of frames to drop from the end of each
trajectory, which must be greater than or equal to the VAC
lag time. This is equivalent to passing a list of uniform
weights but with the last int frames having zero weight.
If a list of ndarray, the weight of the trajectory starting
at each configuration. Note that the last frames of each
trajectory must have zero weight. This number of ending
frames with zero weight must be at least the VAC lag time.
"""
trajs = preprocess_trajs(trajs, addones=self.addones)
nfeatures = get_nfeatures(trajs)
nlags = len(self.lags)
nevecs = self.nevecs
if nevecs is None:
nevecs = nfeatures
cts = batch_compute_ic(
trajs,
self.lags,
weights=weights,
method=self.method,
)
if self.adjust:
c0s = batch_compute_c0(
trajs,
lags=self.lags,
weights=weights,
method=self.method,
)
else:
c0s = batch_compute_c0(
trajs,
weights=weights,
method=self.method,
)
self.evals = np.empty((nlags, nevecs))
self.evecs = np.empty((nlags, nfeatures, nevecs))
self.its = np.empty((nlags, nevecs))
for n, (ct, c0, lag) in enumerate(zip(cts, c0s, self.lags)):
evals, evecs = symeig(ct, c0, nevecs)
self.evals[n] = evals
self.evecs[n] = evecs
self.its[n] = _vac_its(evals, lag)
if self.adjust:
self.cov = None
else:
self.cov = c0
self.trajs = trajs
self.weights = weights
def __getitem__(self, lag):
"""Get a fitted LinearVAC with the specified lag time.
Parameters
----------
lag : int
Lag time, in units of frames.
Returns
-------
LinearVAC
Fitted LinearVAC instance.
"""
i = np.argwhere(self.lags == lag)[0, 0]
vac = LinearVAC(lag, nevecs=self.nevecs, addones=self.addones)
vac._set_fit_data(
self.cov,
self.evals[i],
self.evecs[i],
self.its[i],
self.trajs,
self.weights,
)
return vac
class LinearIVACScan:
"""Solve linear IVAC for each pair of lag times.
This class provides a more optimized way of solving linear IVAC
with the same input trajectories
for all intervals within a set of lag times,
The code
.. code-block:: python
scan = LinearIVACScan(lags)
ivac = scan[lags[i], lags[j]]
is equivalent to
.. code-block:: python
ivac = LinearVAC(lags[i], lags[j])
Parameters
----------
lags : int
Lag times, in units of frames.
lagstep : int, optional
Number of frames between each lag time.
This must evenly divide maxlag - minlag.
The integrated covariance matrix is computed using lag times
(minlag, minlag + lagstep, ..., maxlag)
nevecs : int, optional
Number of eigenvectors (including the trivial eigenvector)
to compute.
If None, use the maximum possible number of eigenvectors
(n_features).
addones : bool, optional
If True, add a feature of ones before solving VAC.
This increases n_features by 1.
reweight : bool, optional
If True, reweight trajectories to equilibrium.
adjust : bool, optional
If True, adjust :math:`C(0)` to ensure that the trivial
eigenvector is exactly solved.
method : str, optional
Method to compute the integrated covariance matrix.
Currently, 'direct', 'fft', and 'fft-all' are supported.
Both 'direct' and 'fft' integrate features over lag times before
computing the correlation matrix. They scale linearly with
the number of parameter sets.
Method 'direct' does so by summing the time-lagged features.
Its runtime increases linearly with the number of lag times.
Method 'fft' does so by performing an FFT convolution.
It takes around the same amount of time to run regardless
of the number of lag times, and is faster than 'direct' when
there is more than around 100 lag times.
Method 'fft-all' computes all time-lagged correlation matrices
at once by convolving each pair of features, before summing
up those correlation matrices to obtain integrated correlation
matrices. It is the slowest of these methods for calculating
a few sets of parameters, but is almost independent of the
number of lag times or parameter sets.
Attributes
----------
lags : 1d array-like of int
VAC lag time, in units of frames.
cov : (n_features, n_features) ndarray
Covariance matrix of the fitted data.
"""
def __init__(
self,
lags,
lagstep=1,
nevecs=None,
addones=False,
reweight=False,
adjust=True,
method="fft",
):
if np.any(lags[1:] < lags[:-1]):
raise ValueError("lags must be nondecreasing")
if np.any((lags[1:] - lags[:-1]) % lagstep != 0):
raise ValueError(
"lags time intervals must be multiples of lagstep"
)
maxlag = np.max(lags)
if method not in ["direct", "fft", "fft-all"]:
raise ValueError("method must be 'direct', 'fft', or 'fft-all")
self.lags = lags
self.lagstep = lagstep
self.nevecs = nevecs
self.addones = addones
self.reweight = reweight
self.adjust = adjust
self.method = method
def fit(self, trajs, weights=None):
"""Compute IVAC results from input trajectories.
Calculate and store IVAC eigenvalues, eigenvector coefficients,
and implied timescales from the input trajectories.
Parameters
----------
trajs : list of (n_frames[i], n_features) ndarray
List of featurized trajectories.
weights : int or list of (n_frames[i],) ndarray, optional
If int, the number of frames to drop from the end of each
trajectory, which must be greater than or equal to the
maximum IVAC lag time. This is equivalent to passing a list
of uniform weights but with the last int frames having zero
weight.
If a list of ndarray, the weight of the trajectory starting
at each configuration. Note that the last frames of each
trajectory must have zero weight. This number of ending
frames with zero weight must be at least the maximum IVAC
lag time.
"""
trajs = preprocess_trajs(trajs, addones=self.addones)
nfeatures = get_nfeatures(trajs)
nlags = len(self.lags)
nevecs = self.nevecs
if nevecs is None:
nevecs = nfeatures
params = [
np.arange(start + self.lagstep, end + 1, self.lagstep)
for start, end in zip(self.lags[:-1], self.lags[1:])
]
ics = list(
batch_compute_ic(
trajs,
params,
weights=weights,
method=self.method,
)
)
if self.adjust:
c0s = list(
batch_compute_c0(
trajs,
params,
weights=weights,
method=self.method,
)
)
else:
c0 = compute_c0(trajs, weights=weights, method=self.method)
denom = 1
self.evals = np.full((nlags, nlags, nevecs), np.nan)
self.evecs = np.full((nlags, nlags, nfeatures, nevecs), np.nan)
self.its = np.full((nlags, nlags, nevecs), np.nan)
for i in range(nlags):
ic = compute_ic(
trajs,
self.lags[i],
weights=weights,
method=self.method,
)
if self.adjust:
c0 = compute_c0(
trajs,
lags=self.lags[i],
weights=weights,
method=self.method,
)
denom = 1
evals, evecs = symeig(ic, c0, nevecs)
if self.lags[i] > 0:
self.evals[i, i] = evals
self.evecs[i, i] = evecs
self.its[i, i] = _ivac_its(
evals, self.lags[i], self.lags[i], self.lagstep
)
for j in range(i + 1, nlags):
ic += ics[j - 1]
if self.adjust:
count = (self.lags[j] - self.lags[j - 1]) // self.lagstep
c0 += c0s[j - 1] * count
denom += count
evals, evecs = symeig(ic, c0 / denom, nevecs)
self.evals[i, j] = evals
self.evecs[i, j] = evecs
self.its[i, j] = _ivac_its(
evals, self.lags[i], self.lags[j], self.lagstep
)
if self.adjust:
self.cov = c0
else:
self.cov = None
self.trajs = trajs
self.weights = weights
def __getitem__(self, lags):
"""Get a fitted LinearIVAC with the specified lag times.
Parameters
----------
lags : Tuple[int, int]
Minimum and maximum lag times, in units of frames.
Returns
-------
LinearIVAC
Fitted LinearIVAC instance.
"""
minlag, maxlag = lags
i = np.argwhere(self.lags == minlag)[0, 0]
j = np.argwhere(self.lags == maxlag)[0, 0]
ivac = LinearIVAC(
minlag,
maxlag,
lagstep=self.lagstep,
nevecs=self.nevecs,
addones=self.addones,
)
ivac._set_fit_data(
self.cov,
self.evals[i, j],
self.evecs[i, j],
self.its[i, j],
self.trajs,
self.weights,
)
return ivac
# -----------------------------------------------------------------------------
# reweighting
def _ivac_weights(trajs, lags, weights=None, method="fft"):
"""Estimate weights for IVAC.
Parameters
----------
trajs : list of (n_frames[i], n_features) ndarray
List of featurized trajectories.
The features must be able to represent constant features.
lags : array-like of int
Lag times at which to evaluate IVAC, in units of frames.
weights : int or list of (n_frames[i],) ndarray, optional
If int, the number of frames to drop from the end of each
trajectory, which must be greater than or equal to the maximum
IVAC lag time. This is equivalent to passing a list of uniform
weights but with the last int frames having zero weight.
If a list of ndarray, the weight of the trajectory starting at
each configuration. Note that the last frames of each trajectory
must have zero weight. This number of ending frames with zero
weight must be at least the maximum IVAC lag time.
method : string, optional
Method to use for calculating the integrated correlation matrix.
Currently, 'direct' and 'fft' are supported. Method 'direct', is
usually faster for smaller numbers of lag times. The speed of
method 'fft' is mostly independent of the number of lag times
used.
Returns
-------
list of (n_frames[i],) ndarray
Weight of trajectory starting at each configuration.
"""
lags = np.atleast_1d(lags)
assert lags.ndim == 1
if weights is None:
weights = np.max(lags)
elif is_cutlag(weights):
assert weights >= np.max(lags)
ic = compute_ic(trajs, lags, weights=weights, method=method)
c0 = compute_c0(trajs, weights=weights)
w = solve_stationary(ic / len(lags), c0)
return _build_weights(trajs, w, weights)
def _build_weights(trajs, coeffs, old_weights):
"""Build weights from reweighting coefficients.
Parameters
----------
trajs : list of (n_frames[i], n_features) ndarray
List of featurized trajectories.
coeffs : (n_features,) ndarray
Expansion coefficients of the new weights.
old_weights : list of (n_frames[i],) ndarray
Initial weight of trajectory starting at each configuration,
which was used to estimate the expansion coefficients.
Returns
-------
list of (n_frames[i],) ndarray
Weight of trajectory starting at each configuration.
"""
weights = []
total = 0.0
if is_cutlag(old_weights):
for traj in trajs:
weight = traj @ coeffs
weight[len(traj) - old_weights :] = 0.0
total += np.sum(weight)
weights.append(weight)
else:
for traj, old_weight in zip(trajs, old_weights):
weight = traj @ coeffs
weight *= old_weight
total += np.sum(weight)
weights.append(weight)
# normalize weights so that their sum is 1
for weight in weights:
weight /= total
return weights
# -----------------------------------------------------------------------------
# implied timescales
def _vac_its(evals, lag):
"""Calculate implied timescales from VAC eigenvalues.
Parameters
----------
evals : (n_evecs,) array-like
VAC eigenvalues.
lag : int
VAC lag time in units of frames.
Returns
-------
(n_evecs,) ndarray
Estimated implied timescales.
This is NaN when the VAC eigenvalues are negative.
"""
its = np.full(len(evals), np.nan)
its[evals >= 1.0] = np.inf
mask = np.logical_and(0.0 < evals, evals < 1.0)
its[mask] = -lag / np.log(evals[mask])
return its
def _ivac_its(evals, minlag, maxlag, lagstep=1):
"""Calculate implied timescales from IVAC eigenvalues.
Parameters
----------
evals : (n_evecs,) array-like
IVAC eigenvalues.
minlag, maxlag : int
Minimum and maximum lag times (inclusive) in units of frames.
lagstep : int, optional
Number of frames between adjacent lag times.
Lag times are given by minlag, minlag + lagstep, ..., maxlag.
Returns
-------
(n_evecs,) ndarray
Estimated implied timescales.
This is NaN when the IVAC eigenvalues are negative
or when the calculation did not converge.
"""
its = np.full(len(evals), np.nan)
if minlag == 0:
# remove component corresponding to zero lag time
evals = evals - 1.0
minlag = lagstep
for i, val in enumerate(evals):
dlag = maxlag - minlag + lagstep
nlags = dlag / lagstep
assert nlags > 0
avg = val / nlags
if avg >= 1.0:
its[i] = np.inf
elif avg > 0.0:
# eigenvalues are bound by
# exp(-sigma * tmin) <= eval
# and
# nlags * exp(-sigma * tmax) <= eval <= nlags * exp(-sigma * tmin)
lower = max(
0.0,
-np.log(val) / minlag,
- | np.log(avg) | numpy.log |
#! /usr/bin/env Python
'''
Created on April 9 2020
@authors: <NAME> & <NAME> & <NAME>
'''
import numpy as np
from scipy import ndimage
from numpy import linalg as LA
from scipy.special import erf
from pycs.sparsity.sparse2d.starlet import *
from pycs.misc.cosmostat_init import *
from pycs.misc.mr_prog import *
from pycs.misc.utilHSS import *
from pycs.misc.im1d_tend import *
from pycs.misc.stats import *
from pycs.sparsity.sparse2d.dct import dct2d, idct2d
from pycs.sparsity.sparse2d.dct_inpainting import dct_inpainting
from pycs.misc.im_isospec import *
def get_ima_spectrum_map(Px, nx, ny):
"""
Create an isotropic image from a power spectrum
Ima[i+nx/2, j+ny/2] = Px[ sqrt(i^2 + j^j) ]
Parameters
----------
Px : : np.ndarray
1D powspec.
nx,ny : int
image size to be created.
Returns
-------
power_map : np.ndarray
2D image.
"""
Np = Px.shape[0]
# print("nx = ", nx, ", ny = ", ny, ", np = ", Np)
k_map = np.zeros((nx, ny))
power_map = np.zeros((nx, ny) )
# info(k_map)
for (i,j), val in np.ndenumerate(power_map):
k1 = i - nx/2.0
k2 = j - ny/2.0
k_map[i, j] = (np.sqrt(k1*k1 + k2*k2))
if k_map[i,j]==0:
power_map[i, j] = 0.
else:
ip = int(k_map[i, j])
if ip < Np:
power_map[i, j] = Px[ip]
return power_map
class shear_data():
'''
Class for input data, containing the shear components g1,g2, the covariance matrix,
the theoretical convergence power spectrum.
'''
g1=0 # shear 1st component
def __init__(self): # __init__ is the constructor
self.g1=0
g2=0 # shear 2nd component
Ncov=0 # diagonal noise cov mat of g = g1 + 1j g2, of same size as g1 and g2
# the noise cov mat relative to g1 alone is Ncov /2. (same for g2)
mask=0 # mask
ktr=0 # true kappa (...for simulations)
g1t=0 # true g1
g2t=0 # true g2
ps1d=0 # theoretical convergence power spectrum
nx=0
ny=0
# file names
DIR_Input=0 # dir input data
g1_fn=0 # g1 file name
g2_fn=0 # g2 file name
ktr_fn=0 # for sumulation only, true convergence map
ps1d_fn=0 # Convergence map 1D theoretical power spectrum used for Wiener filtering
ncov_fn=0 # covariance filename
def get_shear_noise(self,FillMask=False):
"""
Return a noise realisation using the covariance matrix.
If FillMask is True, the non observed area where the covariance is infinitate,
will be filled with randon values with a variance value equals to the maximum
variance in the observed area, i.e. where the mask is 1.
Parameters
----------
FillMask : TYPE, optional
DESCRIPTION. The default is False.
Returns
-------
n1 : np.ndarray
noise realisation for g1.
n2 : np.ndarray
noise realisation for g2.
"""
Mat = np.sqrt(self.Ncov / 2.)
if FillMask == True:
ind = np.where(self.mask == 1)
MaxCov = np.max(Mat[ind])
ind = np.where(self.mask == 0)
Mat[ind] = MaxCov
#info(Mat, name="cov")
#info(Mat*self.mask, name="cov")
#print("MaxCov = ", MaxCov)
#tvima(self.mask)
n1 = np.random.normal(loc=0.0, scale=Mat)
n2 = np.random.normal(loc=0.0, scale=Mat)
return n1,n2
# class shear_simu():
# def __init__(self):
# a=0
# a = np.random.normal(loc=0.0, scale=10.0, size=[200])
class massmap2d():
""" Mass Mapping class
This class contains the tools to reconstruct mass maps from shear measurements
"""
kernel1 = 0 # internal variable for wiener filtering
kernel2 = 0 # internal variable for wiener filtering
nx=0 # image size (number of lines)
ny=0 # image size (number of column)
# WT=0 # Starlet wavelet Class defined in starlet.py
Verbose = False # Parameter to switch on/off print
DEF_niter=12 # Default number if iterations in the iterative methods.
DEF_Nrea=10 # Default number of realizations.
DEF_Nsigma=3. # Default detection level in wavelet space
niter_debias =0 # For space recovery using soft thresholding, a final
# debiasing step could be useful. By default we don't any
# debiasing.
DEF_FirstDetectScale=1 # default first detection scale in wavelet space.
# very often, the noise is highly dominating the
# the signal, and the first or the first scales
# can be removed.
# DEF_FirstDetectScale=2 => the two finest scales
# are removed
WT_Sigma = 0 # Noise standard deviation in the wavelet space
WT_ActiveCoef=0 # Active wavelet coefficients
SigmaNoise = 0 # Noise standard deviation in case of Gaussian white noise
def __init__(self, name='mass'): # __init__ is the constructor
self.WT = starlet2d() # Starlet wavelet Class defined in starlet.py
def init_massmap(self,nx,ny,ns=0):
"""
Initialize the class for a given image size and a number of scales ns
to be used in the wavelet decomposition.
If ns ==0, the number of scales is automatically calculated in the
starlet initialization (see init_starlet, field self.WT.ns).
Parameters
----------
nx, ny : int
Image size
ns : int, optional
Number of scales. The default is 0.
Returns
-------
None.
"""
self.nx=nx
self.ny=ny
self.WT = starlet2d(gen2=True,l2norm=True, bord=1, verb=False)
self.WT.init_starlet(nx, ny, nscale=ns)
self.WT.name="WT-MassMap"
k1, k2 = np.meshgrid(np.fft.fftfreq(nx), np.fft.fftfreq(ny))
denom = k1*k1 + k2*k2
denom[0, 0] = 1 # avoid division by 0
self.kernel1 = (k1**2 - k2**2)/denom
self.kernel2 = (2*k1*k2)/denom
if self.Verbose:
print("Init Mass Mapping: Nx = ", nx, ", Ny = ", ny, ", Nscales = ", self.WT.ns)
def inpaint(self, kappa, mask, niter=DEF_niter):
"""
Apply the sparse inpainting recovery technique to an image using the
Discrete Cosine Transform.
Parameters
----------
kappa : np.ndarray
Input data array
mask : TYPE
DESCRIPTION.
niter : TYPE, optional
DESCRIPTION. The default is self.DEF_niter.
Returns
-------
TYPE
DESCRIPTION.
"""
return dct_inpainting(kappa, mask, niter=niter)
def get_theo_kappa_power_spectum(self, d, niter=None, PowSpecNoise=None, FirstFreqNoNoise=1):
"""
Estimate the theoretical convergence power spectrum from the data themselfve.
Two methods are available:
Method 1: Estimate inpainted ke and kb using iterative Kaiser-Squire method.
if PowSpecNoise==0, assume that there is no B-mode,
and the B-mode is used a noise power spectrum estimation.
powspec_Theo_E = powspec(ke) - powspec(kb)
powspec_Theo_B = 0
powspec_Theo_Noise = powspec(kb)
Method 2: Use the input noise power spectrum
Then:
powspec_Theo_E = powspec(ke) - PowSpecNoise
powspec_Theo_B = powspec(kb) - PowSpecNoise
Parameters
----------
d : Class shear_data
Input Class describing the obervations.
niter : int, optional
Number of iterations in the iKS method. The default is None.
PowSpecNoise : np.ndarray, optional
Noise power spectrum. The default is 0.
FirstFreqNoNoise : int, optional
At very low frequencies, signal is dominating and we generally prefer to not
apply any denoising correction. So we will have :
powspec_Theo_Noise[0:FirstFreqNoNoise] = 0
The default is 1.
Returns
-------
pke : TYPE
DESCRIPTION.
pkb : TYPE
DESCRIPTION.
pn : TYPE
DESCRIPTION.
"""
k = self.iks(d.g1, d.g2, d.mask, niter=niter)
ke = k.real
kb = k.imag
pe = im_isospec(ke)
pb = im_isospec(kb)
#fsky = mask.sum()/mask.size
if PowSpecNoise is None:
pn = pb
else:
pn = PowSpecNoise
pn[0:FirstFreqNoNoise] = 0.
pke = pe - pn
pkb = pb - pn
# Denoise the estimated powsepc
UseTendancyFiltering=False
if UseTendancyFiltering is True:
e1 = reverse(pke)
fe1 = im1d_tend(e1)
pke = reverse(fe1)
b1 = reverse(pkb)
fb1 = im1d_tend(b1) # , opt='-T50'))
pkb = reverse(fb1)
pke[pke < 0] = 0
# the smoothing does not work very well above nx/2,
# because of the increase of the variance (frequencies at the corner)
# we apodize the corner
npix = pke.shape
npix=npix[0]
fp = int(npix / np.sqrt(2.))
min_end = pke[fp]
pke[fp::]= pke[fp]
pke[fp::] = min_end
# pke[pkb < 0] = 0
pkb[pkb < 0] = 0
#pe = pe - pn/fsky
#pb = pb - pn/fsky
#pef=mr_prog(pe, prog="mr1d_filter -m5 -P ")
#pbf=mr_prog(pb, prog="mr1d_filter -m5 -P ")
tv=0
if tv:
plot(pke)
plot(pkb)
plot(pn)
plot(d.ps1d)
return pke, pkb, pn
def get_tps(self, d, niter=None, Nrea=None):
return self.get_theo_kappa_power_spectum(d,niter=niter)
def kappa_to_gamma(self, kappa):
"""
This routine performs computes the shear field from the convergence
map (no B-mode).
Parameters
----------
kappa: np.ndarray
Input convergence data array
Returns
-------
g1,g2: np.ndarray
complext output shear field
Notes
-----
"""
(Nx,Ny) = np.shape(kappa)
if self.nx != Nx or self.ny != Ny:
self.init_massmap(Nx,Ny)
k = np.fft.fft2(kappa)
g1 = np.fft.ifft2(self.kernel1 * k)
g2 = np.fft.ifft2(self.kernel2 * k)
return g1.real - g2.imag, g2.real + g1.imag
# Fast call
def k2g(self, kappa):
return self.kappa_to_gamma(kappa)
def gamma_to_cf_kappa (self, g1, g2):
"""
This routine performs a direct inversion from shear to convergence,
it return a comlex field, with the real part being the convergence (E mode),
the imaginary part being the B mode.
Parameters
----------
g1, g2: np.ndarray
Input shear field
Returns
-------
kappa: np.ndarray
output complex convergence field
Notes
-----
"""
if self.WT.nx == 0 or self.WT.ny == 0:
(nx,ny) = np.shape(g1)
self.WT.init_starlet(nx,ny,gen2=1,l2norm=1, name="WT-MassMap")
g = g1 + 1j*g2
return np.fft.ifft2((self.kernel1 - 1j*self.kernel2)* np.fft.fft2(g))
def gamma_to_kappa (self, g1, g2):
"""
Same as gamma_to_cf_kappa, but returns only the E mode (convergence)
Parameters
----------
g1, g2: np.ndarray
Input shear field
Returns
-------
kappa: np.ndarray
output convergence field
Notes
-----
"""
k = self.gamma_to_cf_kappa (g1, g2)
return k.real
# Fast interactive call to gamma_to_kappa
def g2k(self, gam1, gam2):
return self.gamma_to_kappa(gam1, gam2)
def smooth(self, map, sigma=2.):
"""
Gaussian smoothing of an image.
Parameters
----------
map : 2D np.ndarray
input image.
sigma : float, optional
Standard deviation of the used Gaussian kernel. The default is 2..
Returns
-------
np.ndarray
Smoother array.
"""
return ndimage.filters.gaussian_filter(map,sigma=sigma)
def kaiser_squires(self, gam1, gam2, sigma=2.):
"""
This routine performs a direct inversion from shear to convergence,
followed by a Gaussian filtering.
This is the standard Kaiser-Squires method.
Parameters
----------
gam1, gam2: np.ndarray
Input shear field
sigma: float, optional
Default is 2.
Returns
-------
kappa: np.ndarray
output convergence field
Notes
-----
"""
ks = self.gamma_to_cf_kappa(gam1, gam2)
ksg = ndimage.filters.gaussian_filter(ks.real,sigma=sigma)
return ksg
# Fast interactive call to kaiser_squires
def ks(self, gam1,gam2,sigma=2.):
return self.kaiser_squires(gam1, gam2, sigma=2.)
def eb_kaiser_squires(self, gam1, gam2, sigma=2.):
"""
Same as kaiser_squires, but return also the B-mnode.
Parameters
----------
gam1, gam2: np.ndarray
Input shear field
Returns
-------
E_kappa: np.ndarray
output convergence field (E mode)
B_kappa: np.ndarray
output convergence field (B mode)
Notes
-----
"""
ks = self.gamma_to_cf_kappa(gam1, gam2)
ksg = ndimage.filters.gaussian_filter(ks.real,sigma=sigma)
ksbg = ndimage.filters.gaussian_filter(ks.imag,sigma=sigma)
return ksg, ksbg
def H_operator_eb2g(self, ka_map, kb_map):
"""
This routine converts (E,B) modes to shear
Parameters
----------
ka_map, kb_map : np.ndarray
(E,B) mode
Returns
-------
(g1,g2): np.ndarray
output shear field
None.
"""
# ka_map and kb_map should be of the same size
[nx,ny] = ka_map.shape
g1_map = np.zeros((nx,ny))
g2_map = np.zeros((nx,ny))
ka_map_fft = np.fft.fft2(ka_map)
kb_map_fft = np.fft.fft2(kb_map)
f1, f2 = np.meshgrid(np.fft.fftfreq(nx),np.fft.fftfreq(ny))
p1 = f1 * f1 - f2 * f2
p2 = 2 * f1 * f2
f2 = f1 * f1 + f2 * f2
f2[0,0] = 1 # avoid division with zero
kafc = (p1 * ka_map_fft - p2 * kb_map_fft) / f2
kbfc = (p1 * kb_map_fft + p2 * ka_map_fft) / f2
g1_map[:,:] = np.fft.ifft2(kafc).real
g2_map[:,:] = np.fft.ifft2(kbfc).real
return g1_map, g2_map
# Fast interactice call to H_operator_eb2g
def eb2g(self, ka_map, kb_map):
return self.H_operator_eb2g(ka_map, kb_map)
def H_adjoint_g2eb(self, g1_map, g2_map):
"""
This routine reconstruct the (E,B) modes from the shear field
Parameters
----------
g1_map, g2_map : 2D np.ndarray
shear field.
Returns
-------
(E,B) modes : np.ndarray
output convergence field
None.
"""
[nx,ny] = g1_map.shape
kappa1 = np.zeros((nx,ny))
kappa2 = np.zeros((nx,ny))
g1_map_ifft = np.fft.ifft2(g1_map)
g2_map_ifft = np.fft.ifft2(g2_map)
f1, f2 = np.meshgrid(np.fft.fftfreq(nx),np.fft.fftfreq(ny))
p1 = f1 * f1 - f2 * f2
p2 = 2 * f1 * f2
f2 = f1 * f1 + f2 * f2
f2[0,0] = 1
g1fc = (p1 * g1_map_ifft + p2 * g2_map_ifft) / f2
g2fc = (p1 * g2_map_ifft - p2 * g1_map_ifft) / f2
kappa1[:,:] = np.fft.fft2(g1fc).real
kappa2[:,:] = np.fft.fft2(g2fc).real
return kappa1, kappa2
# Fast interactice call to H_adjoint_g2eb
def g2eb(self, g1_map, g2_map):
return self.H_adjoint_g2eb(g1_map, g2_map)
def get_wt_noise_level(self, InshearData,Nrea=DEF_Nrea):
"""
Computes the noise standard deviation for each wavelet coefficients of
the convergence map, using Nrea noise realisations of the shear field
Parameters
----------
InshearData : Class shear_data
Input Class describing the obervations.
Nrea : int, optional
Number of noise realisations. The default is 20.
Returns
-------
WT_Sigma : 3D np.ndarray
WT_Sigma[s,i,j] is the noise standard deviation at scale s and position
(i,j) of the convergence.
"""
mask = InshearData.mask
Ncov = InshearData.Ncov
for i in np.arange(Nrea):
n1,n2 = InshearData.get_shear_noise(FillMask=True)
ke, kb = self.g2eb(n1,n2)
self.WT.transform(ke)
if i == 0:
WT_Sigma = np.zeros((self.WT.ns,self.WT.nx,self.WT.ny))
WT_Sigma += (self.WT.coef)** 2. # by definition the mean of wt
# is zero.
WT_Sigma = np.sqrt(WT_Sigma / Nrea)
# info(WT_Sigma)
return WT_Sigma
def get_active_wt_coef(self, InshearData, UseRea=False, SigmaNoise=1., Nsigma=None, Nrea=None, WT_Sigma=None, FirstDetectScale=DEF_FirstDetectScale, OnlyPos=False, ComputeWTCoef=True):
"""
Estimate the active set of coefficents, i.e. the coefficients of the
convergence map with an absolute value large than Nsigma * NoiseStandardDeviation.
It returns a cube A[s,i,j] containing 0 or 1.
If A[s,i,j] == 1 then we consider we have a detection at scale s and position (i,j).
Parameters
----------
InshearData : Class shear_data
Input Class describing the obervations.
UseRea : bool, optional
If true, make noise realisation to estimate the detection level in
wavelet space. The default is False.
Nrea : int, optional
Number of noise realisations. The default is None.
WT_Sigma : 3D np.ndarray, optional
WT_Sigma[s,i,j] is the noise standard deviation at scale s and position
(i,j) of the convergence. If it not given, the function get_wt_noise_level
is used to calculate it.
SigmaNoise: int, optional
When UseRea==False, assume Gaussian nosie with standard deviation equals to SigmaNoise.
Default is 1
Nsigma : int, optional
level of detection (Nsigma * noise_std). The default is None.
FirstDetectScale: int, optional
detect coefficients at scale < FirstDetectScale
OnlyPos: Bool, optional
Detect only positive wavelet coefficients. Default is no.
ComputeWTCoef: bool, optional
if true, recompute the wavelet coefficient from the shear data.
Default is true.
Returns
-------
WT_Active : 3D np.ndarray
WT_Active[s,i,j] = 1 if an coeff of the convergence map is larger
than Nsigma * Noise std
"""
if ComputeWTCoef:
e,b = self.g2eb(InshearData.g1, InshearData.g2)
self.WT.transform(e)
WT_Support = self.WT.coef * 0.
Last = self.WT.ns - 1
if UseRea and WT_Sigma is None:
WT_Sigma = self.get_wt_noise_level(InshearData,Nrea=Nrea)
if Nsigma is None:
Nsigma = self.DEF_Nsigma
if Nrea is None:
Nrea=self.DEF_Nrea
if FirstDetectScale is None:
FirstDetectScale=DEF_FirstDetectScale
for j in range(Last):
wtscale=self.WT.get_scale(j)
#if j == 2:
# tvilut(wtscale,title='scale2')
if j == 0:
Nsig= Nsigma + 1
else:
Nsig = Nsigma
#vThres = WT_Sigma * Nsigma * self.WT.TabNorm[j]
if OnlyPos is False:
if UseRea :
wsigma=WT_Sigma[j,:,:]
ind= np.where( np.abs(wtscale) > wsigma * Nsig * self.WT.TabNorm[j])
# WT_Support[j,:,:] = np.where( np.abs(self.WT.coef[j,:,:]) > WT_Sigma[j,:,:] * Nsig * self.WT.TabNorm[j], 1, 0)
else:
ind= np.where( np.abs(wtscale) > SigmaNoise * Nsig * self.WT.TabNorm[j])
#WT_Support[j,:,:] = np.where( np.abs(self.WT.coef[j,:,:]) > SigmaNoise * Nsig * self.WT.TabNorm[j], 1, 0)
else:
if UseRea :
wsigma=WT_Sigma[j,:,:]
# WT_Support[j,:,:] = np.where( self.WT.coef[j,:,:] > WT_Sigma[j,:,:] * Nsig * self.WT.TabNorm[j], 1, 0)
ind = np.where( wtscale > wsigma * Nsig * self.WT.TabNorm[j])
else:
T = SigmaNoise * Nsig * self.WT.TabNorm[j]
ind = np.where( wtscale > T)
# WT_Support[j,:,:] = np.where( self.WT.coef[j,:,:] > SigmaNoise * Nsig * self.WT.TabNorm[j], 1, 0)
wtscale[:,:]=0
wtscale[ind]=1
#if j == 2:
# tvilut(wtscale,title='sup2')
WT_Support[j,:,:]= wtscale
if FirstDetectScale > 0:
WT_Support[0:FirstDetectScale,:,:] = 0
WT_Support[Last,:,:]=1
self.WT_ActiveCoef=WT_Support
return WT_Support
def get_noise_powspec(self, CovMat,mask=None,nsimu=100, inpaint=False):
"""
Build the noise powerspectum from the covariance map of the gamma field.
Parameters
----------
CovMat : : 2D np.ndarray
covariance matrix of the shier field.
mask : 2D np.ndarray, optional
Apply a mask to the simulated noise realisation. The default is None.
nsimu : int, optional
Number of realisation to estimate the noise power spectrum. The default is 100.
inpaint: Bool, optional
Compute the power spectrum on inpainted Kaiser-Squires maps rather than on the masked
maps. If inpaint==False, the estimated noise power spectrum is biased and
should be corrected from .the fraction of sky not observed (i.e. fsky).
Default is No
Returns
-------
px : 1D np.ndarray
Estimated Power spectrum from noise realizations.
"""
if mask is None:
m = 1.
else:
m = mask
for i in np.arange(nsimu):
n1 = np.random.normal(loc=0.0, scale=np.sqrt(CovMat/2.))*m
n2 = np.random.normal(loc=0.0, scale=np.sqrt(CovMat/2.))*m
if mask is not None and inpaint is True:
k = self.iks(n1,n2,mask)
else:
k = self.gamma_to_cf_kappa(n1,n2)
p = im_isospec(k.real)
if i==0:
Np= p.shape[0]
TabP = np.zeros([nsimu,Np], dtype = float)
TabP[i,:] = p
px = np.mean(TabP, axis=0)
return px
def mult_wiener(self, map, WienerFilterMap):
"""" apply one wiener step in the iterative wiener filtering """
return np.fft.ifft2(np.fft.fftshift(WienerFilterMap * np.fft.fftshift(np.fft.fft2(map))))
def wiener(self, gamma1, gamma2, PowSpecSignal, PowSpecNoise):
"""
Compute the standard wiener mass map.
Parameters
----------
gamma1, gamma2: 2D np.ndarray
shear fied.
PowSpecSignal : 1D np.ndarray
Signal theorical power spectrum.
PowSpecNoise: 1D np.ndarray, optional
noise theorical power spectrum.
Returns
-------
TYPE 2D np.ndarray
(E,B) reconstructed modes. Convergence = E
"""
(nx,ny) = gamma1.shape
if self.Verbose:
print("Wiener filtering: ", nx, ny)
#if mask is None:
# print("Wiener NO MASK")
# info(gamma1, name="Wiener g1: ")
# info(gamma2, name="Wiener g2: ")
# info(PowSpecSignal, name="Wiener PowSpecSignal: ")
# info(Ncv, name="Wiener Ncv: ")
# if isinstance(PowSpecNoise, int):
Ps_map = get_ima_spectrum_map(PowSpecSignal,nx,ny)
Pn_map = get_ima_spectrum_map(PowSpecNoise,nx,ny)
Den = (Ps_map + Pn_map)
ind = np.where(Den !=0)
Wfc = np.zeros((nx,ny))
Wfc[ind] = Ps_map[ind] / Den[ind]
t= self.gamma_to_cf_kappa (gamma1, gamma2) # xg + H^T(eta / Sn * (y- H * xg))
kw = self.mult_wiener(t,Wfc)
retr = np.zeros((nx,ny))
reti = | np.zeros((nx,ny)) | numpy.zeros |
# -*- coding: utf-8 -*-
"""
Created on Sun Nov 22 20:04:52 2020
@author: takashi-154
"""
import os
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import tifffile as tiff
import astropy.io.fits as iofits
from scipy import optimize
from skimage import exposure
class DynamicBackgroundEstimation:
"""
メインの処理関数の集まり。
"""
def __init__(self):
"""
Parameters
----------
name : str
プログラム名。
"""
self.name = 'DynamicBackgroundEstimation'
def initialize_image(self):
"""
処理対象の画像の初期化。
Returns
-------
img_array : np.ndarray
画像のnumpy配列(float,32bit)
"""
img_array = np.full((200,300,3), np.nan, dtype=np.float32)
print('Fin initializing image.')
return(img_array)
def read_image(self, name:str):
"""
処理対象の画像を読み込む。
Parameters
----------
name : str
対象画像のファイルパス名(TIFF, FITS対応)
Returns
-------
img_array : np.ndarray
画像のnumpy配列(float,32bit)
"""
img_array = None
if os.path.isfile(name):
path, ext = os.path.splitext(name)
ext_lower = str.lower(ext)
if ext_lower in ('.tif', '.tiff'):
print('reading tif image...')
img_array = tiff.imread(name).astype(np.float32)
elif ext_lower in ('.fits', '.fts', '.fit'):
print('reading fits image...')
with iofits.open(name) as f:
img_array = np.fliplr(np.rot90(f[0].data.T, -1)).astype(np.float32)
else:
print('cannot read image.')
else:
print('No such file.')
print('Fin reading image.')
return(img_array)
def save_image(self, name:str, image:np.ndarray, dtype:str='float32'):
"""
numpy配列の画像を指定の形式で保存する。
Parameters
----------
name : str
保存先のファイルパス名(TIFF, FITS対応)
image : np.ndarray
保存する画像のnumpy配列
dtype : str, default 'float32'
保存形式(デフォルトは[float,32bit])
"""
path, ext = os.path.splitext(name)
ext_lower = str.lower(ext)
image_cast = image.astype('float32')
round_int = lambda x: np.round((x * 2 + 1) // 2)
if dtype == 'float32':
pass
elif dtype == 'uint32':
image_cast = ((image_cast - np.min(image_cast)) / (np.max(image_cast) - np.min(image_cast))) * np.iinfo(np.uint32).max
image_cast = round_int(image_cast).astype(np.uint32)
elif dtype == 'uint16':
image_cast = ((image_cast - np.min(image_cast)) / (np.max(image_cast) - np.min(image_cast))) * np.iinfo(np.uint16).max
image_cast = round_int(image_cast).astype(np.uint16)
else:
pass
if ext_lower in ('.tif', '.tiff'):
print('saving tif image...')
tiff.imsave(name, image_cast)
elif ext_lower in ('.fits', '.fts', '.fit'):
print('saving fits image...')
hdu = iofits.PrimaryHDU(np.rot90(np.fliplr(image_cast), 1).T)
hdulist = iofits.HDUList([hdu])
hdulist.writeto(name, overwrite=True)
else:
print('cannot save image.')
print('Fin saving image.')
def initialize_list(self):
"""
指定ポイントの一覧を作成する。
Returns
-------
target : np.ndarray
指定ポイントのnumpy配列(uint,16bit)
"""
print('making new list...')
target = np.empty((0,2)).astype(np.uint16)
print('Fin reading list.')
return(target)
def read_list(self, name:str):
"""
指定ポイントの一覧を読み込む。
Parameters
----------
name : str
指定ポイントのXY座標を格納したファイルのファイルパス名(.npy形式)
Returns
-------
target : np.ndarray
指定ポイントのnumpy配列(uint,16bit)
"""
print('reading old list...')
target = np.load(name)
print('Fin reading list.')
return(target)
def save_list(self, name:str, target:np.ndarray, is_overwrite:bool=True):
"""
指定ポイントを保存する。
Parameters
----------
name : str
保存先のファイルパス名
target : np.ndarray
指定ポイントのXY座標を格納したnumpy配列
is_overwrite : bool, default True
上書きの可否(デフォルトは[True])
"""
if not os.path.isfile(name):
print('saving target list...')
np.save(name, target)
elif os.path.isfile(name) and is_overwrite:
print('overwriting target list...')
np.save(name, target)
else:
print('cannot overwrite list.')
print('Fin reading list.')
def prepare_plot_point(self, img_array:np.ndarray, target:np.ndarray,
box_window:int=20, img_color:int=0, img_scaled:bool=False):
"""
バックグラウンドを推定する指標となるポイントを打つための初期情報出力。
Parameters
----------
img_array : np.ndarray
画像のnumpy配列(float,32bit)
target : np.ndarray
指定ポイントのXY座標を格納したnumpy配列
box_window : int, default 20
指定ポイントからのバックグラウンドを推定する面積の範囲(デフォルトは[20])
img_color : int, default 0
表示画像の色設定(デフォルトは[0])
img_scaled : bool, default False
表示画像のスケーリング(デフォルトは[False])
Returns
-------
fig :
point :
PointSetterクラス用返り値
img_comp :
PointSetterクラス用返り値
img_display :
PointSetterクラス用返り値
img_show :
PointSetterクラス用返り値
mouse_show :
PointSetterクラス用返り値
box_show :
PointSetterクラス用返り値
med_show :
PointSetterクラス用返り値
box_window :
PointSetterクラス用返り値
ax0 :
ax1 :
ax2 :
ax3 :
"""
img_comp = (img_array/np.max(img_array)*255).astype('uint8')
img_display = img_comp
if img_color == 0:
pass
elif img_color == 1:
img_display[:,:,1] = img_display[:,:,2] = 0
elif img_color == 2:
img_display[:,:,0] = img_display[:,:,2] = 0
elif img_color == 3:
img_display[:,:,0] = img_display[:,:,1] = 0
else:
pass
if target.size == 0:
box_comp_array = np.full((box_window*2, box_window*2, img_display.shape[2]), np.nan, dtype='uint8')
else:
box_comp_array = img_display[target[-1,1]-box_window:target[-1,1]+box_window,
target[-1,0]-box_window:target[-1,0]+box_window]
fig = plt.figure()
fig.subplots_adjust(hspace=0.6)
gs = gridspec.GridSpec(3,3)
ax0 = fig.add_subplot(gs[:,:2])
ax0.set_title('left-click: add, right-click: remove')
img_show = ax0.imshow(img_display)
point, = ax0.plot(target[...,0].tolist(), target[...,1].tolist(), marker="o", linestyle='None', color="#FFFF00")
point.set_picker(True)
point.set_pickradius(10)
ax1 = fig.add_subplot(gs[0,-1])
ax1.set_title('mouse window box')
mouse_show = ax1.imshow(box_comp_array)
ax2 = fig.add_subplot(gs[1,-1])
ax2.set_title('point window box')
box_show = ax2.imshow(box_comp_array)
ax3 = fig.add_subplot(gs[2,-1])
ax3.set_title('box median')
ax3.axis('off')
med_show = ax3.imshow(np.nanmedian(box_comp_array, axis=(0,1), keepdims=True).astype('uint8'))
return(fig, point, img_comp, img_display, img_show, mouse_show, box_show, med_show, ax0, ax1, ax2, ax3)
def postprocess_plot_point(self, pointlist):
"""
指定したポイント情報をnumpy配列に格納する。
Parameters
----------
pointlist :
PointSetterクラス
Returns
-------
target : np.ndarray
指定ポイントのXY座標を格納したnumpy配列
"""
target = np.vstack((pointlist.xs, pointlist.ys)).T.astype(np.uint16)
return(target)
def check_point_window(self, img_array:np.ndarray, target:np.ndarray, box_window:int=20):
"""
指定したポイントの画像を表示する。
Parameters
----------
img_array : np.ndarray
画像のnumpy配列(float,32bit)
target : np.ndarray
指定ポイントのXY座標を格納したnumpy配列
box_window : int, default 20
指定ポイントからのバックグラウンドを推定する面積の範囲(デフォルトは[20])
"""
img_comp = (img_array/np.max(img_array)*255).astype('uint8')
box = np.empty((box_window*2, box_window*2, img_comp.shape[2], len(target)), dtype='uint8')
for i in range(len(target)):
t = target[i]
x = np.arange(int(t[0])-box_window, int(t[0])+box_window)
x = x[(0 <= x) & (x < img_comp.shape[1])]
y = np.arange(int(t[1])-box_window, int(t[1])+box_window)
y = y[(0 <= y) & (y < img_comp.shape[0])]
_box = np.full((box_window*2, box_window*2, img_comp.shape[2]), np.nan, dtype='uint8')
_box[np.ix_(y-np.min(y),x-np.min(x))] = img_comp[np.ix_(y,x)]
box[:,:,:,i] = _box
target_length = box.shape[3]
axes = []
fig = plt.figure()
for n in range(target_length):
axes.append(fig.add_subplot(int(np.ceil(np.sqrt(target_length))),
int(np.ceil(np.sqrt(target_length))),
n+1))
plt.imshow(box[...,n])
plt.axis('off')
fig.tight_layout()
plt.show()
def estimate_background(self, img_array:np.ndarray, target:np.ndarray, box_window:int=20, func_order:int=4):
"""
指定したポイントの情報を基にバックグラウンドを推定する。
Parameters
----------
img_array : np.ndarray
画像のnumpy配列(float,32bit)
target : np.ndarray
指定ポイントのXY座標を格納したnumpy配列
box_window : int, default 20
指定ポイントからのバックグラウンドを推定する面積の範囲(デフォルトは[20])
func_order : int, default 4
フィッティング関数の次元数(デフォルトは[4])
Returns
-------
model : np.ndarray
推定されたバックグラウンドのnumpy配列(float,32bit)
"""
def fit_func(mesh, p, *args):
x, y = mesh
pn = args
func = p
cum = 0
if func_order > 0:
for i in range(1, (func_order+1)):
x_array = np.arange(i+1)[::-1].tolist()
y_array = np.arange(i+1).tolist()
for n in range(i+1):
func = func + pn[cum+n]*(x**x_array[n])*(y**y_array[n])
cum = cum + i + 1
return func
if np.all(np.isnan(img_array)):
model = None
else:
box = np.empty((box_window*2, box_window*2, img_array.shape[2], len(target)), dtype='float32')
for i in range(len(target)):
t = target[i]
x = np.arange(int(t[0])-box_window, int(t[0])+box_window)
x = x[(0 <= x) & (x < img_array.shape[1])]
y = np.arange(int(t[1])-box_window, int(t[1])+box_window)
y = y[(0 <= y) & (y < img_array.shape[0])]
_box = np.full((box_window*2, box_window*2, img_array.shape[2]), np.nan, dtype='float32')
_box[np.ix_(y-np.min(y),x-np.min(x))] = img_array[np.ix_(y,x)]
box[:,:,:,i] = _box
target_median = np.nanmedian(box, axis=(0,1))
p_array = []
if func_order > 0:
for i in range(1, (func_order+1)):
_p_array = np.repeat(0, i+1).tolist()
p_array = np.append(p_array, _p_array)
model = np.empty_like(img_array)
for i in range(model.shape[2]):
if func_order == 0:
initial = np.array([np.mean(target_median[i,...])])
else:
initial = np.append(np.mean(target_median[i,...]), p_array)
popt, _ = optimize.curve_fit(fit_func, target.T, target_median[i,...], p0=initial)
mesh = np.meshgrid(np.linspace(1,img_array.shape[1],img_array.shape[1]),np.linspace(1,img_array.shape[0],img_array.shape[0]))
model[...,i] = fit_func(mesh, *popt)
return(model)
def check_background(self, img_array:np.ndarray, model:np.ndarray):
"""
推定したバックグラウンドモデルを表示する。
Parameters
----------
img_array : np.ndarray
画像のnumpy配列(float,32bit)
model : np.ndarray
推定されたバックグラウンドのnumpy配列(float,32bit)
"""
fig=plt.figure()
ax1=fig.add_subplot(131)
ax1.set_title('Base image')
ax1.imshow((img_array/np.max(img_array)*255).astype('uint8'))
ax2=fig.add_subplot(132)
ax2.set_title('Background model')
ax2.imshow((model/np.max(img_array)*255).astype('uint8'))
ax3=fig.add_subplot(133)
ax3.set_title('Heatmap')
ax3.imshow((((model-np.min(model))/(np.max(model)-np.min(model)))*255).astype('uint8'),
interpolation='nearest',vmin=0,vmax=255,cmap='inferno')
fig.tight_layout()
plt.show()
def subtract_background(self, img_array:np.ndarray, model:np.ndarray):
"""
元画像からバックグラウンドを減算する。
Parameters
----------
img_array : np.ndarray
画像のnumpy配列(float,32bit)
model : np.ndarray
推定されたバックグラウンドのnumpy配列(float,32bit)
Returns
-------
output : np.ndarray
減算した画像のnumpy配列(float,32bit)
"""
output = img_array - model - np.min(img_array - model)
return(output)
def divide_background(self, img_array:np.ndarray, model:np.ndarray):
"""
元画像からバックグラウンドを除算する。
Parameters
----------
img_array : np.ndarray
画像のnumpy配列(float,32bit)
model : np.ndarray
推定されたバックグラウンドのnumpy配列(float,32bit)
Returns
-------
output : np.ndarray
除算した画像のnumpy配列(float,32bit)
"""
output = np.zeros(img_array.shape)
not_zero = model != 0
output[not_zero] = img_array[not_zero] / model[not_zero]
output[~not_zero] = 1
return(output)
class PointSetter:
"""
matplotlib上で指定ポイントを追加するためのヘルパー関数を保持する。
Attributes
----------
fig :
line :
指定ポイントの配列。
img :
画像。
img_display :
表示画像。
img_show :
matplotlib_画像。
mouse_show :
matplotlib_mouse画像。
box_show :
matplotlib_window画像。
med_show :
matplotlib_window画像のmedian。
window :
windowサイズ。
img_color :
表示画像の色設定。
img_scaled :
表示画像のスケーリング。
ax0 :
ax1 :
ax2 :
ax3 :
xs :
指定ポイントのX軸
ys :
指定ポイントのX軸
cidadd :
指定ポイント追加の関数呼び出し
cidhandle :
指定ポイント選択・削除の関数呼び出し
"""
def __init__(self, fig, line, img, img_display, img_show, mouse_show, box_show, med_show,
window, img_color, img_scaled, ax0, ax1, ax2, ax3):
"""
Parameters
----------
fig :
line :
指定ポイントの配列。
img :
画像。
img_display :
画像。
img_show :
画像。
mouse_show :
mouse画像。
box_show :
window画像。
med_show :
window画像のmedian。
window :
windowサイズ。
img_color :
表示画像の色設定。
img_scaled :
表示画像のスケーリング。
ax0 :
ax1 :
ax2 :
ax3 :
"""
self.fig = fig
self.line = line
self.img = img
self.img_display = img_display
self.img_show = img_show
self.mouse_show = mouse_show
self.box_show = box_show
self.med_show = med_show
self.window = window
self.img_color = img_color
self.img_scaled = img_scaled
self.ax0 = ax0
self.ax1 = ax1
self.ax2 = ax2
self.ax3 = ax3
self.xs = list(line.get_xdata())
self.ys = list(line.get_ydata())
self.cidadd = line.figure.canvas.mpl_connect('button_press_event', self.on_add)
self.cidhandle = line.figure.canvas.mpl_connect('pick_event', self.on_handle)
self.cidmotion = line.figure.canvas.mpl_connect('motion_notify_event', self.on_motion)
def set_image(self, img_array):
"""
画像を更新する。
Parameters
----------
img_array : np.ndarray
画像のnumpy配列(float,32bit)
"""
img_comp = (img_array/np.max(img_array)*255).astype('uint8')
self.img = img_comp
display = self.img.copy()
if self.img_color == 0:
pass
elif self.img_color == 1:
display[:,:,1] = display[:,:,2] = 0
elif self.img_color == 2:
display[:,:,0] = display[:,:,2] = 0
elif self.img_color == 3:
display[:,:,0] = display[:,:,1] = 0
else:
pass
self.img_display = display
self.img_show.set_data(display)
self.img_show.set_extent((0, display.shape[1], display.shape[0], 0))
bg = self.fig.canvas.copy_from_bbox(self.ax0.bbox)
self.fig.canvas.restore_region(bg)
self.ax0.draw_artist(self.img_show)
self.fig.canvas.blit(self.ax0.bbox)
new_box = np.full((self.window*2, self.window*2, display.shape[2]), 0, dtype='uint8')
new_box = new_box.astype('uint8')
new_med = np.nanmedian(new_box, axis=(0,1), keepdims=True).astype('uint8')
self.box_show.set_data(new_box)
self.med_show.set_data(new_med)
self.ax2.draw_artist(self.ax2.patch)
self.ax2.draw_artist(self.box_show)
self.ax3.draw_artist(self.ax3.patch)
self.ax3.draw_artist(self.med_show)
self.fig.canvas.blit(self.ax2.bbox)
self.fig.canvas.blit(self.ax3.bbox)
def set_target(self, target):
"""
指定ポイントを更新する。
Parameters
----------
target : np.ndarray
指定ポイントのXY座標を格納したnumpy配列
"""
self.xs = target[...,0].tolist()
self.ys = target[...,1].tolist()
self.line.set_data(self.xs, self.ys)
bg = self.fig.canvas.copy_from_bbox(self.ax0.bbox)
self.fig.canvas.restore_region(bg)
self.ax0.draw_artist(self.line)
self.fig.canvas.blit(self.ax0.bbox)
def set_window(self, new_window):
"""
指定ポイントを更新する。
Parameters
----------
new_window : int
新しいウインドウサイズ
"""
self.window = new_window
new_box = np.full((self.window*2, self.window*2, self.img_display.shape[2]), np.nan, dtype='uint8')
if len(self.xs) > 0:
_x = np.arange(int(self.xs[-1])-self.window, int(self.xs[-1])+self.window)
_x = _x[(0 <= _x) & (_x < self.img_display.shape[1])]
_y = np.arange(int(self.ys[-1])-self.window, int(self.ys[-1])+self.window)
_y = _y[(0 <= _y) & (_y < self.img_display.shape[0])]
new_box[np.ix_(_y-np.min(_y),_x-np.min(_x))] = self.img_display[np.ix_(_y,_x)]
new_box = new_box.astype('uint8')
new_med = np.nanmedian(new_box, axis=(0,1), keepdims=True).astype('uint8')
self.box_show.set_data(new_box)
self.box_show.set_extent((0, new_box.shape[1], new_box.shape[0], 0))
self.med_show.set_data(new_med)
self.ax2.draw_artist(self.ax2.patch)
self.ax2.draw_artist(self.box_show)
self.ax2.draw_artist(self.box_show)
self.ax3.draw_artist(self.ax3.patch)
self.ax3.draw_artist(self.med_show)
self.fig.canvas.blit(self.ax2.bbox)
self.fig.canvas.blit(self.ax3.bbox)
def set_img_scaled(self, new_scaled):
"""
表示画像のスケール変更する。
Parameters
----------
new_color : int
新しい色設定
"""
self.img_scaled = new_scaled
display = self.img.copy()
if self.img_color == 0:
pass
elif self.img_color == 1:
display[:,:,1] = display[:,:,2] = 0
elif self.img_color == 2:
display[:,:,0] = display[:,:,2] = 0
elif self.img_color == 3:
display[:,:,0] = display[:,:,1] = 0
else:
pass
if new_scaled == True:
display = display.astype(np.float32)
if (np.max(display[:,:,0]) - np.min(display[:,:,0])) > 0:
display[:,:,0] = ((display[:,:,0] - np.min(display[:,:,0])) / (np.max(display[:,:,0]) - np.min(display[:,:,0])))
display[:,:,0] = exposure.equalize_hist(display[:,:,0]) * np.iinfo(np.uint8).max
if (np.max(display[:,:,1]) - np.min(display[:,:,1])) > 0:
display[:,:,1] = ((display[:,:,1] - np.min(display[:,:,1])) / (np.max(display[:,:,1]) - np.min(display[:,:,1])))
display[:,:,1] = exposure.equalize_hist(display[:,:,1]) * np.iinfo(np.uint8).max
if (np.max(display[:,:,2]) - np.min(display[:,:,2])) > 0:
display[:,:,2] = ((display[:,:,2] - np.min(display[:,:,2])) / (np.max(display[:,:,2]) - np.min(display[:,:,2])))
display[:,:,2] = exposure.equalize_hist(display[:,:,2]) * np.iinfo(np.uint8).max
display = display.astype(np.uint8)
elif new_scaled == False:
pass
else:
pass
self.img_display = display
self.img_show.set_data(display)
bg = self.fig.canvas.copy_from_bbox(self.ax0.bbox)
self.fig.canvas.restore_region(bg)
self.ax0.draw_artist(self.img_show)
self.ax0.draw_artist(self.line)
self.fig.canvas.blit(self.ax0.bbox)
new_box = np.full((self.window*2, self.window*2, self.img_display.shape[2]), np.nan, dtype='uint8')
if len(self.xs) > 0:
_x = np.arange(int(self.xs[-1])-self.window, int(self.xs[-1])+self.window)
_x = _x[(0 <= _x) & (_x < self.img_display.shape[1])]
_y = np.arange(int(self.ys[-1])-self.window, int(self.ys[-1])+self.window)
_y = _y[(0 <= _y) & (_y < self.img_display.shape[0])]
new_box[np.ix_(_y-np.min(_y),_x-np.min(_x))] = self.img_display[np.ix_(_y,_x)]
new_box = new_box.astype('uint8')
new_med = | np.nanmedian(new_box, axis=(0,1), keepdims=True) | numpy.nanmedian |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 10 11:15:49 2018
# Congo basin tree fraction using 2018 dataset with gain
@author: earjba
"""
import numpy as np
import importlib
import iris
import iris.quickplot as qplt
import matplotlib.pyplot as plt
from datetime import datetime, timedelta
from netCDF4 import Dataset, num2date
from mpl_toolkits import basemap
from jpros import readfiles
from jpros import harmonised
importlib.reload(readfiles)
importlib.reload(harmonised)
from iris.experimental.equalise_cubes import equalise_attributes
from iris.util import unify_time_units
import numpy as np
import pandas as pd
import tifffile as tiff
import h5py
import glob
import math
import gdal
import iris
from netCDF4 import Dataset as NetCDFFile
from PIL import Image
from pyhdf.SD import SD, SDC
from mpl_toolkits import basemap
def get_coords(gt, width, height):
minx = gt[0]
miny = gt[3] + width*gt[4] + height*gt[5]
resx = gt[1]
maxx = gt[0] + width*gt[1] + height*gt[2]
maxy = gt[3]
resy = gt[5]
lon = np.arange(minx, maxx, resx)
lat = np.arange(miny, maxy, -resy)
return(lat, lon)
def regrid_data(var, lat, lon, target_res=2):
if lat[0] > lat[-1]:
#flip lat and associated index
lat = lat[::-1]
var = var[:, :, ::-1, :]
new_lat = np.arange(lat[0], lat[-1]+(abs(lat[1]-lat[0])), target_res)
new_lon = np.arange(lon[0], lon[-1]+(abs(lat[1]-lat[0])), target_res)
lon_sub, lat_sub = np.meshgrid(new_lon, new_lat)
var_rescale = np.empty((var.shape[0], var.shape[1],
len(new_lat), len(new_lon)))
for yr in range(var.shape[0]):
for mn in range(12):
var_rescale[yr, mn, :, :] = basemap.interp(var[yr, mn, :, :],
lon, lat,
lon_sub, lat_sub,
order=1)
return(var_rescale, new_lat, new_lon)
def get_lat_bounds(array1d):
div = abs(array1d[1] - array1d[0])
if array1d[0] < 0:
extra_val = array1d[0] - div
bounds1d = np.concatenate(([extra_val], array1d))
else:
extra_val = array1d[-1] - div
bounds1d = np.concatenate((array1d, [extra_val]))
bounds2d = np.hstack((bounds1d[:-1, np.newaxis], bounds1d[1:, np.newaxis]))
bounds2d = bounds2d.astype('float')
return(bounds2d)
def get_lon_bounds(array1d):
div = abs(array1d[1] - array1d[0])
extra_val = array1d[-1] + div
bounds1d = np.concatenate((array1d, [extra_val]))
bounds2d = np.hstack((bounds1d[:-1, np.newaxis], bounds1d[1:, np.newaxis]))
bounds2d = bounds2d.astype('float')
return(bounds2d)
def minus180_to_plus180(var, lon):
if len(var.shape) < 4:
raise TypeError('Variable not in correct format')
else:
l = int(var.shape[-1]/2)
temp1 = var[:, :, :, 0:l]
temp2 = var[:, :, :, l:]
new_var = np.concatenate((temp2, temp1), axis=3)
new_lon = np.arange(-180, 180, (abs(lon[1]-lon[0])))
return(new_var, new_lon)
path = ('/nfs/a68/gyjcab/datasets/lapse_data_harmonised/Jan_2018/mon_1.0deg/')
# read in surface air temperture 2001- 2018
one_deg_cube = path+'tas_airs_mon_1.0deg_2003_2018.nc'
path = ('/nfs/a68/gyjcab/datasets/lapse_data_harmonised/Jan_2018/mon_0.25deg/')
# read in surface albedo
pt25_cube = path+'sal_clara_mon_0.25deg_1982_2015_direct_from_netcdf.nc'
pt5_cube = ('/nfs/a68/gyjcab/datasets/lapse_data_harmonised/Jan_2018/Final/'
# read in Global Precipitation Climatology Centre monthly total of Precipitation
'0.5deg/pr_gpcc_mon_0.5deg_1983_2018.nc')
regrid_cube = one_deg_cube
#%%
def get_forest_cover_2018(res=0.05):
# read canopy cover data
forest_2000_path = '/nfs/a68/gyjcab/datasets/GFC_Hansen/v1.6/treecover2000/'
# read forest loss year data
year_loss_path = '/nfs/a68/gyjcab/datasets/GFC_Hansen/v1.6/lossYear/'
# read each tile and down-scale data over Central Africa
scale = int(res/abs(0.00025))
ydim = (int(40/res))
xdim1 = (int(10/res))
xdim2 = (int(40/res))
vdat = np.empty((ydim, xdim1))
hdat = | np.empty((ydim, xdim2)) | numpy.empty |
from unittest import TestCase
from recolo import kinematic_fields_from_deflections
import numpy as np
class TestConstantDeflection(TestCase):
def setUp(self):
self.tol = 1e-6
self.acceleration = 1.7
n_pts_x = 400
n_pts_y = 400
pixel_size = 1.5
time_ramp = 0.5 * self.acceleration * np.arange(0, 100, 1) ** 2.
sampling_rate = 1
deflection_field = np.ones((n_pts_x, n_pts_y))
deflection_fields = deflection_field[np.newaxis, :, :] * time_ramp[:, np.newaxis, np.newaxis]
self.field_stack = kinematic_fields_from_deflections(deflection_fields, pixel_size, sampling_rate=sampling_rate)
def test_acceleration(self):
for i, field in enumerate(self.field_stack):
# We need to skip the first and last values as they are affected by the edges
if i > 5 and i < 95:
if np.max(np.abs(field.acceleration - self.acceleration)) > self.tol:
self.fail("For frame %i, the acceleration calculation is wrong by %f" % (
i, np.max(np.abs(field.acceleration - self.acceleration))))
def test_curvature_xx(self):
for i, field in enumerate(self.field_stack):
if np.max(np.abs(field.curv_xx)) > self.tol:
self.fail("Curvature was not zero")
def test_curvature_xy(self):
for i, field in enumerate(self.field_stack):
if np.max(np.abs(field.curv_xy)) > self.tol:
self.fail("Curvature was not zero")
def test_curvature_yy(self):
for i, field in enumerate(self.field_stack):
if np.max(np.abs(field.curv_yy)) > self.tol:
self.fail("Curvature was not zero")
def test_slope_x(self):
for i, field in enumerate(self.field_stack):
if np.max(np.abs(field.slope_x)) > self.tol:
self.fail("Curvature was not zero")
def test_slope_y(self):
for i, field in enumerate(self.field_stack):
if np.max(np.abs(field.slope_y)) > self.tol:
self.fail("Curvature was not zero")
class TestHalfSineDeflection(TestCase):
def setUp(self):
self.tol = 1e-6
self.curv_rel_tol = 1e-2
self.acceleration = 1.7
n_pts_x = 200
n_pts_y = 200
pixel_size = 1
time_ramp = 0.5 * self.acceleration * | np.arange(0, 100, 1) | numpy.arange |
import numpy as np
import pytest
import snc.agents.hedgehog.strategic_idling.strategic_idling_utils
from snc.agents.hedgehog.asymptotic_workload_cov.\
compute_asymptotic_cov_bernoulli_service_and_arrivals \
import ComputeAsymptoticCovBernoulliServiceAndArrivals
import snc.agents.hedgehog.strategic_idling.hedging_utils as hedging_utils
import snc.agents.hedgehog.workload.workload as wl
from snc.agents.hedgehog.params import StrategicIdlingParams
from snc.agents.hedgehog.strategic_idling.strategic_idling import StrategicIdlingCore
from snc.agents.hedgehog.strategic_idling.strategic_idling_hedgehog_gto import \
StrategicIdlingGTO, StrategicIdlingHedgehogGTO
from snc.agents.hedgehog.strategic_idling.strategic_idling_hedging import StrategicIdlingHedging
from snc.agents.hedgehog.strategic_idling.strategic_idling_utils import get_dynamic_bottlenecks
import snc.environments.examples as examples
import snc.utils.alt_methods_test as alt_methods_test
import snc.utils.exceptions as exceptions
def test_create_strategic_idling_get_dynamic_bottlenecks():
neg_log_discount_factor = - np.log(0.99999)
env = examples.simple_reentrant_line_model(alpha1=0.33, mu1=0.69, mu2=0.35, mu3=0.69,
cost_per_buffer=np.array([1, 1, 1])[:, None])
num_wl_vec = 2
load, workload_mat, _ = wl.compute_load_workload_matrix(env, num_wl_vec)
strategic_idling_params = StrategicIdlingParams()
gto_object = StrategicIdlingGTO(workload_mat=workload_mat,
load=load,
cost_per_buffer=env.cost_per_buffer,
model_type=env.model_type,
strategic_idling_params=strategic_idling_params)
x = np.array([[158], [856], [0]])
w = workload_mat @ x
assert get_dynamic_bottlenecks(w, workload_mat, load) == set([1])
assert set(gto_object.get_allowed_idling_directions(x).k_idling_set) == set([0])
x = np.array([[493], [476], [0]])
w = workload_mat @ x
assert get_dynamic_bottlenecks(w, workload_mat, load) == set([0,1])
assert set(gto_object.get_allowed_idling_directions(x).k_idling_set) == set([])
x = np.array([[631], [338], [0]])
w = workload_mat @ x
assert get_dynamic_bottlenecks(w, workload_mat, load) == set([0])
assert set(gto_object.get_allowed_idling_directions(x).k_idling_set) == set([1])
def test_create_strategic_idling_hedgehog_gto_normal_hedging():
neg_log_discount_factor = - np.log(0.99999)
env = examples.simple_reentrant_line_model(alpha1=0.33, mu1=0.69, mu2=0.35, mu3=0.69,
cost_per_buffer=np.array([1.5, 1, 2])[:, None])
num_wl_vec = 2
load, workload_mat, _ = wl.compute_load_workload_matrix(env, num_wl_vec)
strategic_idling_params = StrategicIdlingParams()
workload_cov = np.array([[2, 0.5], [0.5, 3]])
hgto_object = StrategicIdlingHedgehogGTO(workload_mat=workload_mat,
neg_log_discount_factor=neg_log_discount_factor,
load=load,
cost_per_buffer=env.cost_per_buffer,
model_type=env.model_type,
strategic_idling_params=strategic_idling_params,
workload_cov=workload_cov)
# this case corresponds to normal hedging regime below hedging threshold
x = np.array([[631], [338], [0]])
w = workload_mat @ x
assert get_dynamic_bottlenecks(w, workload_mat, load) == set([0])
assert set(hgto_object.get_allowed_idling_directions(x).k_idling_set) == set([])
# this case corresponds to normal hedging regime above hedging threshold
x = np.array([[969],
[ 0],
[351]])
w = workload_mat @ x
assert get_dynamic_bottlenecks(w, workload_mat, load) == set([0])
assert set(hgto_object.get_allowed_idling_directions(x).k_idling_set) == set([1])
# this case corresponds to monotone region
x = np.array([[493],
[476],
[ 0]])
w = workload_mat @ x
assert get_dynamic_bottlenecks(w, workload_mat, load) == set([0,1])
assert set(hgto_object.get_allowed_idling_directions(x).k_idling_set) == set([])
# this case corresponds to monotone region
x = np.array([[100],
[476],
[ 0]])
w = workload_mat @ x
assert get_dynamic_bottlenecks(w, workload_mat, load) == set([1])
assert set(hgto_object.get_allowed_idling_directions(x).k_idling_set) == set([])
assert hgto_object._min_drain_lp is None
def test_create_strategic_idling_hedgehog_gto_switching_curve():
neg_log_discount_factor = - np.log(0.99999)
env = examples.simple_reentrant_line_model(alpha1=0.33, mu1=0.7, mu2=0.345, mu3=0.7,
cost_per_buffer=np.array([1.5, 1, 2])[:, None])
num_wl_vec = 2
load, workload_mat, _ = wl.compute_load_workload_matrix(env, num_wl_vec)
strategic_idling_params = StrategicIdlingParams()
workload_cov = np.array([[2, 0.5], [0.5, 3]])
h_object = StrategicIdlingHedging(workload_mat=workload_mat,
neg_log_discount_factor=neg_log_discount_factor,
load=load,
cost_per_buffer=env.cost_per_buffer,
model_type=env.model_type,
strategic_idling_params=strategic_idling_params,
workload_cov=workload_cov)
hgto_object = StrategicIdlingHedgehogGTO(workload_mat=workload_mat,
neg_log_discount_factor=neg_log_discount_factor,
load=load,
cost_per_buffer=env.cost_per_buffer,
model_type=env.model_type,
strategic_idling_params=strategic_idling_params,
workload_cov=workload_cov)
# This case corresponds to switching curve regime, i.e. minimum cost
# effective state can only be reached by extending the minimum draining time.
# `w` is below the hedging threshold so standard Hedgehog would allow one
# resource to idle, but it turns out that this resource is a dynamic
# bottleneck for the current `w`.
x = np.array(([[955],
[ 0],
[202]]))
w = workload_mat @ x
assert get_dynamic_bottlenecks(w, workload_mat, load) == set([0])
assert set(hgto_object.get_allowed_idling_directions(x).k_idling_set) == set([])
assert set(h_object.get_allowed_idling_directions(x).k_idling_set) == set([0])
# This case corresponds to switching curve regime (i.e., drift @ psi_plus < 0),
# `w` is below the hedging threshold so standard Hedgehog would allow one resource to idle.
# Since this resource is not a dynamic bottleneck the GTO constraint also allows it to idle.
x = np.array([[ 955],
[ 0],
[1112]])
w = workload_mat @ x
assert get_dynamic_bottlenecks(w, workload_mat, load) == set([1])
assert set(hgto_object.get_allowed_idling_directions(x).k_idling_set) == set([0])
assert set(h_object.get_allowed_idling_directions(x).k_idling_set) == set([0])
# This case corresponds to switching curve regime (i.e., drift @ psi_plus < 0),
# `w` is below the hedging threshold so standard Hedgehog would allow the
# less loaded resource to idle. This is similar to the first case, but when both
# resources are dynamic bottlenecks for the current `w`.
x = np.array([[759],
[ 0],
[595]])
w = workload_mat @ x
assert get_dynamic_bottlenecks(w, workload_mat, load) == set([0,1])
assert set(hgto_object.get_allowed_idling_directions(x).k_idling_set) == set([])
assert set(h_object.get_allowed_idling_directions(x).k_idling_set) == set([0])
# this case corresponds to monotone region so both bottlenecks are not
# allowed to idle under both standard Hedgehog and GTO policy
x = np.array([[283],
[672],
[ 0]])
w = workload_mat @ x
assert get_dynamic_bottlenecks(w, workload_mat, load) == set([0])
assert set(hgto_object.get_allowed_idling_directions(x).k_idling_set) == set([])
assert set(h_object.get_allowed_idling_directions(x).k_idling_set) == set([])
assert hgto_object._min_drain_lp is not None
def test_create_strategic_idling_no_hedging_object_with_no_asymptotic_covariance():
"""
Raise exception if asymptotic covariance is tried to be updated.
"""
env = examples.simple_reentrant_line_model(alpha1=9, mu1=22, mu2=10, mu3=22,
cost_per_buffer=np.ones((3, 1)))
num_wl_vec = 2
load, workload_mat, nu = wl.compute_load_workload_matrix(env, num_wl_vec)
strategic_idling_params = StrategicIdlingParams()
x = np.array([[413],
[ 0],
[100]])
si_object = StrategicIdlingCore(workload_mat=workload_mat, load=load,
cost_per_buffer=env.cost_per_buffer,
model_type=env.model_type,
strategic_idling_params=strategic_idling_params)
# these methods should not fail
si_object.get_allowed_idling_directions(x)
def test_create_strategic_idling_object_with_no_asymptotic_covariance():
"""
Check asymptotic covariance is passed before querying the idling decision
"""
neg_log_discount_factor = - np.log(0.95)
env = examples.simple_reentrant_line_model(alpha1=9, mu1=22, mu2=10, mu3=22,
cost_per_buffer=np.ones((3, 1)))
num_wl_vec = 2
load, workload_mat, nu = wl.compute_load_workload_matrix(env, num_wl_vec)
strategic_idling_params = StrategicIdlingParams()
x = np.array([[413],
[ 0],
[100]])
si_object = StrategicIdlingHedging(workload_mat=workload_mat,
neg_log_discount_factor=neg_log_discount_factor, load=load,
cost_per_buffer=env.cost_per_buffer,
model_type=env.model_type,
strategic_idling_params=strategic_idling_params)
with pytest.raises(AssertionError):
si_object._verify_offline_preliminaries()
with pytest.raises(AssertionError):
si_object.get_allowed_idling_directions(x)
def create_strategic_idling_object(
workload_mat=np.ones((2, 2)),
workload_cov=None,
neg_log_discount_factor=None,
load=None,
cost_per_buffer=np.ones((2, 1)),
model_type='push',
strategic_idling_params=None):
if strategic_idling_params is None:
strategic_idling_params = StrategicIdlingParams()
return StrategicIdlingHedging(workload_mat=workload_mat,
workload_cov=workload_cov,
neg_log_discount_factor=neg_log_discount_factor,
load=load,
cost_per_buffer=cost_per_buffer,
model_type=model_type,
strategic_idling_params=strategic_idling_params)
def test_create_strategic_idling_object_without_strategic_idling_params():
"""
Check assert `strategic_idling_params is not None` in constructor.
"""
neg_log_discount_factor = - np.log(0.95)
env = examples.simple_reentrant_line_model(alpha1=9, mu1=22, mu2=10, mu3=22,
cost_per_buffer=np.ones((3, 1)))
num_wl_vec = 2
load, workload_mat, nu = wl.compute_load_workload_matrix(env, num_wl_vec)
with pytest.raises(AssertionError):
_ = StrategicIdlingHedging(workload_mat=workload_mat,
neg_log_discount_factor=neg_log_discount_factor, load=load,
cost_per_buffer=env.cost_per_buffer,
model_type=env.model_type)
def test_is_negative_orthant_true():
w = np.zeros((3, 1))
w[0] = -1
assert StrategicIdlingHedging._is_negative_orthant(w)
def test_is_negative_orthant_false():
w = np.zeros((3, 1))
w[0] = 1
assert not StrategicIdlingHedging._is_negative_orthant(w)
def test_is_negative_orthant_false_since_zero_w():
w = np.zeros((3, 1))
assert not StrategicIdlingHedging._is_negative_orthant(w)
def check_equal_effective_cost_multiple_methods(w, workload_mat, cost_per_buffer):
si_object = create_strategic_idling_object(workload_mat=workload_mat,
cost_per_buffer=cost_per_buffer)
barc_a, _, eff_cost_a_1 = si_object.c_bar_solver.solve(w)
_, x_a, eff_cost_a_2 = alt_methods_test.compute_effective_cost_scipy(w, workload_mat,
cost_per_buffer)
barc_b, x_b, eff_cost_b = alt_methods_test.compute_effective_cost_cvxpy(w, workload_mat,
cost_per_buffer)
barc_c, x_c, eff_cost_c = alt_methods_test.compute_dual_effective_cost_cvxpy(w, workload_mat,
cost_per_buffer)
np.testing.assert_almost_equal(barc_a, barc_b)
np.testing.assert_almost_equal(barc_a, barc_c)
np.testing.assert_almost_equal(x_a, x_b)
np.testing.assert_almost_equal(x_a, x_c)
np.testing.assert_almost_equal(eff_cost_a_1, eff_cost_b)
np.testing.assert_almost_equal(eff_cost_a_1, eff_cost_c)
np.testing.assert_almost_equal(eff_cost_a_1, eff_cost_a_2)
return barc_a
def test_effective_cost_superfluous_inequalities():
"""We check that Scipy linprog() used in compute_dual_effective_cost() does not return a status
4 (encountered numerical difficulties)"""
# This example was known to return this status 4 before the fix
env = examples.simple_reentrant_line_with_demand_model(alpha_d=2, mu1=3, mu2=2.5, mu3=3,
mus=1e3, mud=1e3,
cost_per_buffer=np.ones((5, 1)),
initial_state=np.array([10, 25,
55, 0,
100])[:, None],
capacity=np.ones((5, 1)) * np.inf,
job_conservation_flag=True)
load, workload_mat, _ = wl.compute_load_workload_matrix(env, num_wl_vec=2,
load_threshold=None)
w = np.array([[1.], [0.]])
try:
si_object = create_strategic_idling_object(workload_mat=workload_mat,
cost_per_buffer=env.cost_per_buffer,)
c_bar, _, eff_cost = si_object.c_bar_solver.solve(w)
except exceptions.ScipyLinprogStatusError:
pytest.fail()
def test_effective_cost_ksrs_network_model_case_1():
"""Example 5.3.3 case 1 from CTCN book (online version)."""
mu1 = 1
mu3 = 1
mu2 = 1 / 3
mu4 = 1 / 3
alpha1 = 0.9
alpha3 = 0.9
cost_per_buffer = np.ones((4, 1))
env = examples.ksrs_network_model(alpha1, alpha3, mu1, mu2, mu3, mu4, cost_per_buffer)
load, workload_mat, nu = wl.compute_load_workload_matrix(env)
# Region 1 = {0 < 3 * w1 < w2 < inf}
w1 = 1
w2 = 4
w = np.array([[w1], [w2]])
barc_1 = check_equal_effective_cost_multiple_methods(w, workload_mat, cost_per_buffer)
# Different from CTCN book, [1, 0]
np.testing.assert_almost_equal(barc_1, 1 / 3 * np.array([[0], [1]]))
# Region 2 = = {0 < w1 < 3 * w2 < 9 * w1}
w1 = 2
w2 = 1
w = np.array([[w1], [w2]])
barc_2 = check_equal_effective_cost_multiple_methods(w, workload_mat, cost_per_buffer)
np.testing.assert_almost_equal(barc_2, 1 / 4 * np.ones((2, 1)))
# Region 3 = {0 < 3 * w2 < w1}
w1 = 4
w2 = 1
w = np.array([[w1], [w2]])
barc_3 = check_equal_effective_cost_multiple_methods(w, workload_mat, cost_per_buffer)
# Different from CTCN book, [1, 0]
np.testing.assert_almost_equal(barc_3, 1 / 3 * np.array([[1], [0]]))
def test_effective_cost_ksrs_network_model_case_2():
"""Example 5.3.3 case 2 from CTCN book (online version)."""
mu1 = 1 / 3
mu3 = 1 / 3
mu2 = 1
mu4 = 1
alpha1 = 0.9
alpha3 = 0.9
cost_per_buffer = np.ones((4, 1))
env = examples.ksrs_network_model(alpha1, alpha3, mu1, mu2, mu3, mu4, cost_per_buffer)
load, workload_mat, nu = wl.compute_load_workload_matrix(env)
# Region 1 = {0 < 3 * w1 < w2 < inf}
w1 = 1
w2 = 4
w = np.array([[w1], [w2]])
barc_1 = check_equal_effective_cost_multiple_methods(w, workload_mat, cost_per_buffer)
# Different from CTCN book, [1, -2]
np.testing.assert_almost_equal(barc_1, np.array([[-2], [1]]))
# Region 2 = = {0 < w1 < 3 * w2 < 9 * w1}
w1 = 2
w2 = 1
w = np.array([[w1], [w2]])
barc_2 = check_equal_effective_cost_multiple_methods(w, workload_mat, cost_per_buffer)
np.testing.assert_almost_equal(barc_2, 1 / 4 * np.ones((2, 1)))
# Region 3 = {0 < 3 * w2 < w1}
w1 = 4
w2 = 1
w = np.array([[w1], [w2]])
barc_3 = check_equal_effective_cost_multiple_methods(w, workload_mat, cost_per_buffer)
# Different from CTCN book, [-2, 1]
np.testing.assert_almost_equal(barc_3, np.array([[1], [-2]]))
def test_all_effective_cost_vectors_ksrs_network_model_case_1():
"""Example 5.3.3 from CTCN book (online version)."""
mu1 = 1
mu3 = 1
mu2 = 1 / 3
mu4 = 1 / 3
alpha1 = 0.9
alpha3 = 0.9
cost_per_buffer = np.ones((4, 1))
env = examples.ksrs_network_model(alpha1, alpha3, mu1, mu2, mu3, mu4, cost_per_buffer)
load, workload_mat, nu = wl.compute_load_workload_matrix(env)
# Compute cost vectors.
barc_vectors = alt_methods_test.get_all_effective_cost_linear_vectors(workload_mat,
cost_per_buffer)
barc_vectors_theory = np.array([[1 / 3, 0],
[0, 1 / 3],
[0.25, 0.25]])
# Due to numerical noise, different computers can obtain the barc vectors in different order.
# So we will compare sets instead of ndarrays.
np.around(barc_vectors, decimals=7, out=barc_vectors)
np.around(barc_vectors_theory, decimals=7, out=barc_vectors_theory)
barc_vectors_set = set(map(tuple, barc_vectors))
barc_vectors_theory_set = set(map(tuple, barc_vectors_theory))
assert barc_vectors_set == barc_vectors_theory_set
def test_all_effective_cost_vectors_ksrs_network_model_case_2():
"""Example 5.3.3 case 2 from CTCN book (online version)."""
mu1 = 1 / 3
mu3 = 1 / 3
mu2 = 1
mu4 = 1
alpha1 = 0.9
alpha3 = 0.9
cost_per_buffer = np.ones((4, 1))
env = examples.ksrs_network_model(alpha1, alpha3, mu1, mu2, mu3, mu4, cost_per_buffer)
load, workload_mat, nu = wl.compute_load_workload_matrix(env)
# Compute cost vectors.
barc_vectors = alt_methods_test.get_all_effective_cost_linear_vectors(workload_mat,
cost_per_buffer)
# Order of the vectors not relevant, just made up for easy comparison.
barc_vectors_theory = np.array([[1, -2],
[-2, 1],
[0.25, 0.25]])
# Due to numerical noise, different computers can obtain the barc vectors in different order.
# So we will compare sets instead of ndarrays.
np.around(barc_vectors, decimals=7, out=barc_vectors)
np.around(barc_vectors_theory, decimals=7, out=barc_vectors_theory)
barc_vectors_set = set(map(tuple, barc_vectors))
barc_vectors_theory_set = set(map(tuple, barc_vectors_theory))
assert barc_vectors_set == barc_vectors_theory_set
def test_get_vector_defining_possible_idling_direction_1():
w = np.array([[1], [0]])
w_star = np.array([[1], [1]])
v_star = StrategicIdlingHedging._get_vector_defining_possible_idling_direction(w_star, w)
np.testing.assert_almost_equal(v_star, np.array([[0], [1]]))
def test_get_vector_defining_possible_idling_direction_2():
w = np.array([[0], [1]])
w_star = np.array([[1], [1]])
v_star = StrategicIdlingHedging._get_vector_defining_possible_idling_direction(w_star, w)
np.testing.assert_almost_equal(v_star, np.array([[1], [0]]))
def test_get_vector_defining_possible_idling_direction_3():
# Although this w_star is impossible since w_star >= w, we can still calculate v_star.
w = np.array([[1], [1]])
w_star = np.array([[1], [0]])
v_star = StrategicIdlingHedging._get_vector_defining_possible_idling_direction(w_star, w)
np.testing.assert_almost_equal(v_star, np.array([[0], [-1]]))
def test_get_vector_defining_possible_idling_direction_4():
# Although this w_star is impossible since w_star >= w, we can still calculate v_star.
w = np.array([[1], [1]])
w_star = np.array([[0], [1]])
v_star = StrategicIdlingHedging._get_vector_defining_possible_idling_direction(w_star, w)
np.testing.assert_almost_equal(v_star, np.array([[-1], [0]]))
def test_project_workload_on_monotone_region_along_minimal_cost_negative_w():
"""We use the single server queue with demand model. The expected result when we project
negative workload with the effective cost LP is zero."""
env = examples.single_station_demand_model(alpha_d=9, mu=10, mus=1e3, mud=1e2)
_, workload_mat, _ = wl.compute_load_workload_matrix(env)
num_wl = workload_mat.shape[0]
w = - np.ones((num_wl, 1))
si_object = create_strategic_idling_object(workload_mat=workload_mat,
cost_per_buffer=env.cost_per_buffer)
w_star = si_object._find_workload_with_min_eff_cost_by_idling(w)
np.testing.assert_almost_equal(w_star, np.zeros((num_wl, 1)))
def test_project_workload_on_monotone_region_along_minimal_cost_w_equal_w_star_ksrs_region_2():
"""We use the KSRS model, for which we know the boundary of the monotone region. Therefore, if
we set w in the boundary, we should get w_star = w."""
mu1 = 1 / 3
mu3 = 1 / 3
mu2 = 1
mu4 = 1
workload_mat = np.array([[1 / mu1, 0, 1 / mu4, 1 / mu4], [1 / mu2, 1 / mu2, 1 / mu3, 0]])
cost_per_buffer = np.ones((4, 1))
# Region 1 = {0 < 3 * w1 < w2 < inf}, and Region 2 = {0 < w1 < 3 * w2 < 9 * w1}, so w = (1, 3)
# is already right in the boundary.
w1 = 1
w2 = 3
w = np.array([[w1], [w2]])
si_object = create_strategic_idling_object(workload_mat=workload_mat,
cost_per_buffer=cost_per_buffer)
w_star = si_object._find_workload_with_min_eff_cost_by_idling(w)
np.testing.assert_almost_equal(w, w_star)
def test_project_workload_on_monotone_region_along_minimal_cost_ksrs_region_1():
"""We use the KSRS model, for which we know the boundary of the monotone region."""
mu1 = 1 / 3
mu3 = 1 / 3
mu2 = 1
mu4 = 1
workload_mat = np.array([[1 / mu1, 0, 1 / mu4, 1 / mu4], [1 / mu2, 1 / mu2, 1 / mu3, 0]])
cost_per_buffer = np.ones((4, 1))
# Region 1 = {0 < 3 * w1 < w2 < inf}, so w = (0.5, 3) should be projected to w_star = (1, 3)
w1 = 0.5
w2 = 3
w = np.array([[w1], [w2]])
si_object = create_strategic_idling_object(workload_mat=workload_mat,
cost_per_buffer=cost_per_buffer)
w_star = si_object._find_workload_with_min_eff_cost_by_idling(w)
np.testing.assert_almost_equal(w_star, np.array([[1], [3]]))
def test_project_workload_on_monotone_region_along_minimal_cost_ksrs_region_3():
"""We use the KSRS model, for which we know the boundary of the monotone region."""
mu1 = 1 / 3
mu3 = 1 / 3
mu2 = 1
mu4 = 1
workload_mat = np.array([[1 / mu1, 0, 1 / mu4, 1 / mu4], [1 / mu2, 1 / mu2, 1 / mu3, 0]])
cost_per_buffer = np.ones((4, 1))
# Region 3 = {0 < 3 * w2 < w1}, so w = (3, 0.5) should be projected to w_star = (3, 1)
w1 = 3
w2 = 0.5
w = np.array([[w1], [w2]])
si_object = create_strategic_idling_object(workload_mat=workload_mat,
cost_per_buffer=cost_per_buffer)
w_star = si_object._find_workload_with_min_eff_cost_by_idling(w)
np.testing.assert_almost_equal(w_star, np.array([[3], [1]]))
def test_project_workload_on_monotone_region_along_minimal_cost_pseudorandom_values():
"""Since this uses random values, it could happen that the simplex (SciPy-LinProg) and SCS (CVX)
solvers give different solutions. This is uncommon, but possible."""
np.random.seed(42)
num_buffers = 4
num_wl = 3
num_tests = 1e3
strategic_idling_params = StrategicIdlingParams()
discrepancy = 0
for i in range(int(num_tests)):
w = np.random.random_sample((num_wl, 1))
cost_per_buffer = np.random.random_sample((num_buffers, 1))
workload_mat = np.random.random_sample((num_wl, num_buffers))
si_object = create_strategic_idling_object(workload_mat=workload_mat,
cost_per_buffer=cost_per_buffer,
strategic_idling_params=strategic_idling_params)
w_star = si_object._find_workload_with_min_eff_cost_by_idling(w)
w_star_b = alt_methods_test.find_workload_with_min_eff_cost_by_idling_scipy(
w, workload_mat, cost_per_buffer, "revised simplex")
if not np.allclose(w_star, w_star_b):
discrepancy += 1
assert discrepancy < 5
def test_project_workload_when_monotone_region_is_a_ray():
"""We use the simple re-entrant line model."""
c_1 = 1
c_2 = 2
c_3 = 3
cost_per_buffer = np.array([[c_1], [c_2], [c_3]])
mu_1 = 2
mu_2 = 1
mu_3 = 2
workload_mat = np.array([[1 / mu_1 + 1 / mu_3, 1 / mu_3, 1 / mu_3],
[1 / mu_2, 1 / mu_2, 0]])
c_plus = np.array([[mu_1 * (c_1 - c_2)],
[mu_2 * c_2 + (mu_1 * mu_2) / mu_3 * (c_2 - c_1)]])
c_minus = np.array([[c_3 * mu_3],
[mu_2 * c_1 - c_3 * mu_2 * (mu_3 / mu_1 + 1)]])
psi_plus = c_plus - c_minus
w = np.array([[1], [0.]]) # Got from x = np.array([[0.9], [0], [0.2]])
si_object = create_strategic_idling_object(workload_mat=workload_mat,
cost_per_buffer=cost_per_buffer)
w_star = si_object._find_workload_with_min_eff_cost_by_idling(w)
w_star_theory = np.array([w[0], - w[0] * psi_plus[0] / psi_plus[1]])
np.testing.assert_almost_equal(w_star, w_star_theory)
def test_project_workload_when_idling_direction_lies_in_c_plus_level_set_zero_penalty():
"""We use the simple re-entrant line model."""
c_1 = 2
c_2 = 1
c_3 = 2
cost_per_buffer = np.array([[c_1], [c_2], [c_3]])
mu_1 = 2
mu_2 = 1
mu_3 = 2
workload_mat = np.array([[1 / mu_1 + 1 / mu_3, 1 / mu_3, 1 / mu_3],
[1 / mu_2, 1 / mu_2, 0]])
c_plus = np.array([[mu_1 * (c_1 - c_2)], [mu_2 * (c_2 * (1 + mu_1/mu_3) - c_1 * mu_1 / mu_3)]])
c_minus = np.array([[mu_3 * c_3], [mu_2 * (c_1 - c_3 * (1 + mu_3/mu_1))]])
psi_plus = c_plus - c_minus
w = np.array([[1], [0.]]) # Got from x = np.array([[0.9], [0], [0.2]])
strategic_idling_params = StrategicIdlingParams(penalty_coeff_w_star=0)
si_object = create_strategic_idling_object(
workload_mat=workload_mat, cost_per_buffer=cost_per_buffer,
strategic_idling_params=strategic_idling_params)
w_star = si_object._find_workload_with_min_eff_cost_by_idling(w)
w_star_theory = np.array([w[0], - w[0] * psi_plus[0] / psi_plus[1]])
with pytest.raises(AssertionError):
np.testing.assert_almost_equal(w_star, w_star_theory)
def test_project_workload_when_idling_direction_lies_in_c_plus_level_set():
"""We use the simple re-entrant line model."""
c_1 = 2
c_2 = 1
c_3 = 2
cost_per_buffer = np.array([[c_1], [c_2], [c_3]])
mu_1 = 2
mu_2 = 1
mu_3 = 2
workload_mat = np.array([[1 / mu_1 + 1 / mu_3, 1 / mu_3, 1 / mu_3],
[1 / mu_2, 1 / mu_2, 0]])
c_plus = np.array([[mu_1 * (c_1 - c_2)], [mu_2 * (c_2 * (1 + mu_1/mu_3) - c_1 * mu_1 / mu_3)]])
c_minus = np.array([[mu_3 * c_3], [mu_2 * (c_1 - c_3 * (1 + mu_3/mu_1))]])
psi_plus = c_plus - c_minus
w = np.array([[1], [0.]]) # Got from x = np.array([[0.9], [0], [0.2]])
si_object = create_strategic_idling_object(
workload_mat=workload_mat, cost_per_buffer=cost_per_buffer,
strategic_idling_params=StrategicIdlingParams(penalty_coeff_w_star=1e-5))
w_star = si_object._find_workload_with_min_eff_cost_by_idling(w)
w_star_theory = np.array([w[0], - w[0] * psi_plus[0] / psi_plus[1]])
np.testing.assert_almost_equal(w_star, w_star_theory, decimal=5)
def test_is_w_inside_monotone_region_ksrs_network_model_case_1():
"""Example 5.3.3 case 1 from CTCN book (online version)."""
mu1 = 1
mu3 = 1
mu2 = 1 / 3
mu4 = 1 / 3
alpha1 = 0.3
alpha3 = 0.3
cost_per_buffer = np.ones((4, 1))
env = examples.ksrs_network_model(alpha1, alpha3, mu1, mu2, mu3, mu4, cost_per_buffer)
load, workload_mat, nu = wl.compute_load_workload_matrix(env)
# Since w is already in \W^+ in any of the 3 regions, any increment in w will increase the cost,
# so w_star should equal w. Thus, v_star should be a vector of nan, in every case.
# Region 1 = {0 < 3 * w1 < w2 < inf}
w1_1 = 1
w1_2 = 4
w_1 = np.array([[w1_1], [w1_2]])
si_object_1 = create_strategic_idling_object(workload_mat=workload_mat,
cost_per_buffer=cost_per_buffer)
w_star_1 = si_object_1._find_workload_with_min_eff_cost_by_idling(w_1)
c_bar_1 = si_object_1._get_level_set_for_current_workload(w_1)
assert StrategicIdlingHedging._is_w_inside_monotone_region(w_1, w_star_1, c_bar_1)
# Region 2 = = {0 < w1 < 3 * w2 < 9 * w1}
w2_1 = 2
w2_2 = 1
w_2 = np.array([[w2_1], [w2_2]])
si_object_2 = create_strategic_idling_object(workload_mat=workload_mat,
cost_per_buffer=cost_per_buffer)
w_star_2 = si_object_2._find_workload_with_min_eff_cost_by_idling(w_2)
c_bar_2 = si_object_2._get_level_set_for_current_workload(w_2)
assert StrategicIdlingHedging._is_w_inside_monotone_region(w_2, w_star_2, c_bar_2)
# Region 3 = {0 < 3 * w2 < w1}
w3_1 = 4
w3_2 = 0.05
w_3 = np.array([[w3_1], [w3_2]])
si_object_3 = create_strategic_idling_object(workload_mat=workload_mat,
cost_per_buffer=cost_per_buffer)
w_star_3 = si_object_3._find_workload_with_min_eff_cost_by_idling(w_3)
c_bar_3 = si_object_3._get_level_set_for_current_workload(w_3)
assert StrategicIdlingHedging._is_w_inside_monotone_region(w_3, w_star_3, c_bar_3)
def test_closest_face_ksrs_network_model_case_2():
"""Example 5.3.3 case 2 from CTCN book (online version)."""
mu1 = 1 / 3
mu3 = 1 / 3
mu2 = 1
mu4 = 1
alpha1 = 0.3
alpha3 = 0.3
cost_per_buffer = np.ones((4, 1))
env = examples.ksrs_network_model(alpha1, alpha3, mu1, mu2, mu3, mu4, cost_per_buffer)
strategic_idling_params = StrategicIdlingParams()
load, workload_mat, nu = wl.compute_load_workload_matrix(env)
# Region 1 = {0 < 3 * w1 < w2 < inf}
w1_1 = 1
w1_2 = 4
w_1 = np.array([[w1_1], [w1_2]])
si_object_1 = create_strategic_idling_object(workload_mat=workload_mat,
cost_per_buffer=cost_per_buffer,
strategic_idling_params=strategic_idling_params)
w_star_1 = si_object_1._find_workload_with_min_eff_cost_by_idling(w_1)
w_star_1b = alt_methods_test.find_workload_with_min_eff_cost_by_idling_scipy(
w_1, workload_mat, cost_per_buffer, "revised simplex")
np.testing.assert_almost_equal(w_star_1, w_star_1b)
v_star_1 = StrategicIdlingHedging._get_vector_defining_possible_idling_direction(w_star_1, w_1)
psi_plus_1, c_plus_1, c_minus_1 = si_object_1._get_closest_face_and_level_sets(w_star_1,
v_star_1)
np.testing.assert_almost_equal(c_minus_1, np.array([[-2], [1]]), decimal=5)
np.testing.assert_almost_equal(c_plus_1, np.array([[0.25], [0.25]]), decimal=5)
# Region 2 = = {0 < w1 < 3 * w2 < 9 * w1}
w2_1 = 2
w2_2 = 1
w_2 = np.array([[w2_1], [w2_2]])
si_object_2 = create_strategic_idling_object(workload_mat=workload_mat,
cost_per_buffer=cost_per_buffer,
strategic_idling_params=strategic_idling_params)
w_star_2 = si_object_2._find_workload_with_min_eff_cost_by_idling(w_2)
w_star_2b = alt_methods_test.find_workload_with_min_eff_cost_by_idling_scipy(
w_2, workload_mat, cost_per_buffer, "revised simplex")
np.testing.assert_almost_equal(w_star_2, w_star_2b)
# Region 2 is in the monotone region W^+
c_bar_2 = si_object_2._get_level_set_for_current_workload(w_2)
assert StrategicIdlingHedging._is_w_inside_monotone_region(w_2, w_star_2, c_bar_2)
# Region 3 = {0 < 3 * w2 < w1}
w3_1 = 4
w3_2 = 0.05
w_3 = np.array([[w3_1], [w3_2]])
si_object_3 = create_strategic_idling_object(workload_mat=workload_mat,
cost_per_buffer=cost_per_buffer,
strategic_idling_params=strategic_idling_params)
w_star_3 = si_object_3._find_workload_with_min_eff_cost_by_idling(w_3)
w_star_3b = alt_methods_test.find_workload_with_min_eff_cost_by_idling_scipy(
w_3, workload_mat, cost_per_buffer, "revised simplex")
np.testing.assert_almost_equal(w_star_3, w_star_3b)
v_star_3 = StrategicIdlingHedging._get_vector_defining_possible_idling_direction(w_star_3, w_3)
psi_plus_3, c_plus_3, c_minus_3 = si_object_3._get_closest_face_and_level_sets(w_star_3,
v_star_3)
np.testing.assert_almost_equal(c_minus_3, np.array([[1], [-2]]), decimal=5)
np.testing.assert_almost_equal(c_plus_3, np.array([[0.25], [0.25]]), decimal=5)
def test_is_monotone_region_a_ray_negative_c_plus():
c_plus = - np.ones((3, 1))
assert not StrategicIdlingHedging._is_monotone_region_a_ray(c_plus)
def test_is_monotone_region_a_ray_nonpositive_c_plus():
c_plus = np.array([[-1], [-1], [0]])
assert not StrategicIdlingHedging._is_monotone_region_a_ray(c_plus)
def test_is_monotone_region_a_ray_zero_c_plus():
c_plus = np.zeros((3, 1))
assert not StrategicIdlingHedging._is_monotone_region_a_ray(c_plus)
def test_is_monotone_region_a_ray_positive_c_plus():
c_plus = np.ones((3, 1))
assert not StrategicIdlingHedging._is_monotone_region_a_ray(c_plus)
def test_is_monotone_region_a_ray_c_plus_with_positive_negative_and_zero_components():
c_plus = np.array([[1], [-1], [0]])
assert StrategicIdlingHedging._is_monotone_region_a_ray(c_plus)
def test_is_monotone_region_a_ray_c_plus_with_positive_and_negative_components():
c_plus = np.array([[1], [-1], [-1]])
assert StrategicIdlingHedging._is_monotone_region_a_ray(c_plus)
def test_is_monotone_region_a_ray_simple_reentrant_line():
"""We use the simple re-entrant line with parameters that make monotone region to be a ray."""
w = np.array([[1], [0]])
env = examples.simple_reentrant_line_model(mu1=2, mu2=1, mu3=2,
cost_per_buffer=np.array([[1], [2], [3]]))
load, workload_mat, nu = wl.compute_load_workload_matrix(env)
si_object = create_strategic_idling_object(workload_mat=workload_mat,
cost_per_buffer=env.cost_per_buffer)
w_star = si_object._find_workload_with_min_eff_cost_by_idling(w)
v_star = StrategicIdlingHedging._get_vector_defining_possible_idling_direction(w_star, w)
psi_plus, c_plus, c_minus = si_object._get_closest_face_and_level_sets(w_star, v_star)
assert StrategicIdlingHedging._is_monotone_region_a_ray(c_plus)
def test_is_monotone_region_infeasible_with_real_c_plus():
c_plus = np.array([[1], [-1], [-1]])
assert not StrategicIdlingHedging._is_infeasible(c_plus)
def test_is_monotone_region_infeasible():
c_plus = None
assert StrategicIdlingHedging._is_infeasible(c_plus)
def test_is_w_inside_monotone_region_when_small_tolerance():
w = np.random.random_sample((3, 1))
w_star = w + 1e-4
c_bar = np.ones((3, 1))
assert StrategicIdlingHedging._is_w_inside_monotone_region(w, w_star, c_bar)
def test_is_w_inside_monotone_region_false():
w = np.random.random_sample((3, 1))
w_star = w + 1e-2
c_bar = np.ones((3, 1))
assert not StrategicIdlingHedging._is_w_inside_monotone_region(w, w_star, c_bar)
def check_lambda_star(w, c_plus, psi_plus, w_star, test_strong_duality_flag=True):
lambda_star = StrategicIdlingHedging._get_price_lambda_star(c_plus, psi_plus)
lambda_star_b = alt_methods_test.get_price_lambda_star_lp_1_cvxpy(w, c_plus, psi_plus)
lambda_star_c = alt_methods_test.get_price_lambda_star_lp_2_cvxpy(w, c_plus, psi_plus)
lambda_star_d = alt_methods_test.get_price_lambda_star_lp_scipy(w, c_plus, psi_plus)
if test_strong_duality_flag:
lambda_star_a = alt_methods_test.get_price_lambda_star_strong_duality(w, w_star, c_plus,
psi_plus)
np.testing.assert_almost_equal(lambda_star, lambda_star_a, decimal=5)
if lambda_star_b is not None: # If primal is not accurately solved with CVX
np.testing.assert_almost_equal(lambda_star, lambda_star_b, decimal=5)
if lambda_star_c is not None:
np.testing.assert_almost_equal(lambda_star, lambda_star_c, decimal=5)
np.testing.assert_almost_equal(lambda_star, lambda_star_d)
return lambda_star
def test_get_price_lambda_star_when_c_plus_is_positive():
"""lambda_star depends on the ratio over the positive components of psi_plus."""
c_plus = np.array([[1], [1]])
w = np.array([[3], [0.1]])
psi_plus = np.array([[-.1], [0.5]])
check_lambda_star(w, c_plus, psi_plus, None, False)
def test_get_price_lambda_star_when_c_plus_is_negative():
"""c_plus should always be nonnegative"""
c_plus = np.array([[-1], [1]])
psi_plus = np.array([[-1], [0.5]])
with pytest.raises(exceptions.ArraySignError) as excinfo:
_ = StrategicIdlingHedging._get_price_lambda_star(c_plus, psi_plus)
assert (excinfo.value.array_name == "c_plus" and excinfo.value.all_components and
excinfo.value.positive and not excinfo.value.strictly)
def test_get_price_lambda_star_when_c_plus_is_zero():
"""c_plus should always have at least one strictly positive component"""
c_plus = np.array([[0], [0]])
psi_plus = np.array([[-1], [0.5]])
with pytest.raises(exceptions.ArraySignError) as excinfo:
_ = StrategicIdlingHedging._get_price_lambda_star(c_plus, psi_plus)
assert (excinfo.value.array_name == "c_plus" and not excinfo.value.all_components and
excinfo.value.positive and excinfo.value.strictly)
def test_get_price_lambda_star_when_c_plus_has_zero_components():
"""lambda_star only depends on the ratio over the positive components of psi_plus."""
c_plus = np.array([[0], [1]])
w = np.array([[3], [0.1]])
psi_plus = np.array([[-.1], [0.5]])
check_lambda_star(w, c_plus, psi_plus, None, False)
def test_get_price_lambda_star_when_c_plus_has_zero_components_with_positive_psi_plus():
"""lambda_star only depends on the ratio over the positive components of psi_plus."""
c_plus = np.array([[0], [1]])
w = np.array([[-3], [0.1]])
psi_plus = np.array([[0.5], [0.5]])
check_lambda_star(w, c_plus, psi_plus, None, False)
def test_get_price_lambda_star_when_psi_plus_is_negative():
c_plus = np.array([[1], [1]])
psi_plus = - np.ones((2, 1))
with pytest.raises(exceptions.EmptyArrayError) as excinfo:
_ = StrategicIdlingHedging._get_price_lambda_star(c_plus, psi_plus)
assert excinfo.value.array_name == "ratio"
def test_get_price_lambda_star_when_psi_plus_has_zero_and_positive_components():
c_plus = np.array([[1], [1]])
psi_plus = np.array([[0], [1]])
lambda_star = StrategicIdlingHedging._get_price_lambda_star(c_plus, psi_plus)
assert lambda_star == 1
def test_get_price_lambda_star_when_psi_plus_has_zero_and_negative_components():
c_plus = np.array([[1], [1]])
psi_plus = np.array([[0], [-1]])
with pytest.raises(exceptions.EmptyArrayError) as excinfo:
_ = StrategicIdlingHedging._get_price_lambda_star(c_plus, psi_plus)
assert excinfo.value.array_name == "ratio"
def test_get_price_lambda_star_simple_reentrant_line():
env = examples.simple_reentrant_line_model(alpha1=0.5, mu1=1.1, mu2=1.2, mu3=1.3)
load, workload_mat, nu = wl.compute_load_workload_matrix(env)
strategic_idling_params = StrategicIdlingParams()
for i in range(100):
# Set w such that a path-wise optimal solution starting from w cannot exist (p. 187,
# CTCN online ed).
w1 = i + 1
w2 = load[1] / load[0] * w1 * 0.9
w = np.array([[w1], [w2]])
si_object = create_strategic_idling_object(workload_mat=workload_mat,
cost_per_buffer=env.cost_per_buffer,
strategic_idling_params=strategic_idling_params)
w_star = si_object._find_workload_with_min_eff_cost_by_idling(w)
w_star_b = alt_methods_test.find_workload_with_min_eff_cost_by_idling_scipy(
w, workload_mat, env.cost_per_buffer, "revised simplex")
np.testing.assert_almost_equal(w_star, w_star_b, decimal=5)
v_star = StrategicIdlingHedging._get_vector_defining_possible_idling_direction(w_star, w)
psi_plus, c_plus, c_minus = si_object._get_closest_face_and_level_sets(w_star, v_star)
check_lambda_star(w, c_plus, psi_plus, w_star)
def test_get_price_lambda_star_when_monotone_region_is_a_ray_other_workload_value_using_new_cplus():
"""We use the simple re-entrant line with parameters that make monotone region to be a ray."""
state = np.array([[302], [297], [300]])
env = examples.simple_reentrant_line_model(alpha1=9, mu1=22, mu2=10, mu3=22,
cost_per_buffer=np.array([[1], [2], [3]]))
load, workload_mat, nu = wl.compute_load_workload_matrix(env)
w = workload_mat @ state # = np.array([[59.9], [54.59090909]])
si_object = create_strategic_idling_object(workload_mat=workload_mat,
cost_per_buffer=env.cost_per_buffer)
w_star = si_object._find_workload_with_min_eff_cost_by_idling(w)
v_star = StrategicIdlingHedging._get_vector_defining_possible_idling_direction(w_star, w)
psi_plus, c_plus, c_minus = si_object._get_closest_face_and_level_sets(w_star, v_star)
assert StrategicIdlingHedging._is_monotone_region_a_ray(c_plus)
# Set near zero epsilon to get same result with all lambda_star methods.
psi_plus, c_plus \
= StrategicIdlingHedging._get_closest_face_and_level_sets_for_ray_or_feasibility_boundary(
c_minus, w_star, epsilon=1e-10)
check_lambda_star(w, c_plus, psi_plus, w_star)
# Positive epsilon makes the strong duality method for lambda_star give different solution.
psi_plus, c_plus \
= StrategicIdlingHedging._get_closest_face_and_level_sets_for_ray_or_feasibility_boundary(
c_minus, w_star, epsilon=0.01)
with pytest.raises(AssertionError):
check_lambda_star(w, c_plus, psi_plus, w_star)
def test_get_price_lambda_star_when_monotone_region_is_a_ray_with_high_epsilon():
"""We use the simple re-entrant line with parameters that make monotone region to be a ray.
This test shows that if the artificial cone is very wide, w will be inside, so that we should
not compute lambda_star."""
state = np.array([[302], [297], [300]])
env = examples.simple_reentrant_line_model(alpha1=9, mu1=22, mu2=10, mu3=22,
cost_per_buffer=np.array([[1], [2], [3]]))
load, workload_mat, nu = wl.compute_load_workload_matrix(env)
w = workload_mat @ state # = np.array([[59.9], [54.59090909]])
si_object = create_strategic_idling_object(workload_mat=workload_mat,
cost_per_buffer=env.cost_per_buffer)
w_star = si_object._find_workload_with_min_eff_cost_by_idling(w)
v_star = StrategicIdlingHedging._get_vector_defining_possible_idling_direction(w_star, w)
psi_plus, c_plus, c_minus = si_object._get_closest_face_and_level_sets(w_star, v_star)
assert StrategicIdlingHedging._is_monotone_region_a_ray(c_plus)
# Positive epsilon makes the strong duality method for lambda_star give different solution.
psi_plus, c_plus \
= StrategicIdlingHedging._get_closest_face_and_level_sets_for_ray_or_feasibility_boundary(
c_minus, w_star, epsilon=0.3)
assert psi_plus.T @ w >= 0
with pytest.raises(AssertionError):
_ = alt_methods_test.get_price_lambda_star_strong_duality(w, w_star, c_plus, psi_plus)
with pytest.raises(AssertionError):
_ = alt_methods_test.get_price_lambda_star_lp_1_cvxpy(w, c_plus, psi_plus)
with pytest.raises(AssertionError):
_ = alt_methods_test.get_price_lambda_star_lp_2_cvxpy(w, c_plus, psi_plus)
with pytest.raises(AssertionError):
_ = alt_methods_test.get_price_lambda_star_lp_scipy(w, c_plus, psi_plus)
def test_get_price_lambda_star_with_infeasible_workload_space():
"""We use the single server queue with demand for which we know that there is always nonempty
infeasible region."""
env = examples.single_station_demand_model(alpha_d=9, mu=10, mus=1e3, mud=1e2,
initial_state=np.array(([300, 0, 1000])))
load, workload_mat, nu = wl.compute_load_workload_matrix(env, num_wl_vec=2)
w = np.array([[100], [10.01]])
si_object = create_strategic_idling_object(workload_mat=workload_mat,
cost_per_buffer=env.cost_per_buffer)
w_star = si_object._find_workload_with_min_eff_cost_by_idling(w)
v_star = StrategicIdlingHedging._get_vector_defining_possible_idling_direction(w_star, w)
psi_plus, c_plus, c_minus = si_object._get_closest_face_and_level_sets(w_star, v_star)
assert StrategicIdlingHedging._is_infeasible(c_plus)
psi_plus, c_plus = \
StrategicIdlingHedging._get_closest_face_and_level_sets_for_ray_or_feasibility_boundary(
c_minus, w_star, epsilon=0)
check_lambda_star(w, c_plus, psi_plus, w_star)
def test_lambda_star_in_ksrs_network_model_case_2():
"""Example 5.3.3 case 2 from CTCN book (online version)."""
mu1 = 1 / 3
mu3 = 1 / 3
mu2 = 1
mu4 = 1
alpha1 = 0.3
alpha3 = 0.3
cost_per_buffer = np.ones((4, 1))
env = examples.ksrs_network_model(alpha1, alpha3, mu1, mu2, mu3, mu4, cost_per_buffer)
load, workload_mat, nu = wl.compute_load_workload_matrix(env)
strategic_idling_params = StrategicIdlingParams()
# Region 1 = {0 < 3 * w1 < w2 < inf}
w1 = 1
w2 = 4
w = np.array([[w1], [w2]])
si_object_1 = create_strategic_idling_object(workload_mat=workload_mat,
cost_per_buffer=env.cost_per_buffer,
strategic_idling_params=strategic_idling_params)
w_star_1 = si_object_1._find_workload_with_min_eff_cost_by_idling(w)
w_star_1b = alt_methods_test.find_workload_with_min_eff_cost_by_idling_scipy(
w, workload_mat, cost_per_buffer, "revised simplex")
np.testing.assert_almost_equal(w_star_1, w_star_1b)
v_star_1 = StrategicIdlingHedging._get_vector_defining_possible_idling_direction(w_star_1, w)
psi_plus_1, c_plus_1, c_minus_1 = si_object_1._get_closest_face_and_level_sets(w_star_1,
v_star_1)
check_lambda_star(w, c_plus_1, psi_plus_1, w_star_1)
# Region 3 = {0 < 3 * w2 < w1}
w1 = 4
w2 = 0.05
w = np.array([[w1], [w2]])
si_object_3 = create_strategic_idling_object(
workload_mat=workload_mat, cost_per_buffer=env.cost_per_buffer,
strategic_idling_params=strategic_idling_params)
w_star_3 = si_object_3._find_workload_with_min_eff_cost_by_idling(w)
w_star_3b = alt_methods_test.find_workload_with_min_eff_cost_by_idling_scipy(
w, workload_mat, cost_per_buffer, "revised simplex")
np.testing.assert_almost_equal(w_star_3, w_star_3b)
v_star_3 = StrategicIdlingHedging._get_vector_defining_possible_idling_direction(w_star_3, w)
psi_plus_3, c_plus_3, c_minus_3 = si_object_3._get_closest_face_and_level_sets(w_star_3,
v_star_3)
check_lambda_star(w, c_plus_3, psi_plus_3, w_star_3)
def test_compute_height_process_case_1():
psi_plus = -np.ones((3, 1))
w = np.ones((3, 1))
height = StrategicIdlingHedging._compute_height_process(psi_plus, w)
assert height == 3
def test_compute_height_process_case_2():
psi_plus = np.array([[-1], [-0.4], [-0.3]])
w = np.array([[0.2], [1], [2]])
height = StrategicIdlingHedging._compute_height_process(psi_plus, w)
np.testing.assert_almost_equal(height, 1.2)
def test_get_possible_idling_directions_single_min_no_threshold():
w = np.array([[-1], [1]])
psi_plus = np.array([[1], [-0.5]])
beta_star = 0
v_star = np.array([[0.25], [0]])
k_idling_set = StrategicIdlingHedging._get_possible_idling_directions(w, beta_star, psi_plus,
v_star)
assert np.all(k_idling_set == np.array([0]))
def test_get_possible_idling_directions_single_min_with_high_threshold():
w = np.array([[-1], [1]])
psi_plus = np.array([[1], [-0.5]])
beta_star = 1.5 # Right in the boundary
v_star = np.array([[0.25], [0]])
k_idling_set = StrategicIdlingHedging._get_possible_idling_directions(w, beta_star, psi_plus,
v_star)
assert k_idling_set.size == 0
def test_get_possible_idling_directions_multiple_min():
w = np.array([[-1], [1]])
psi_plus = np.array([[1], [-0.5]])
beta_star = 0
v_star = np.array([[0.25], [0.25]])
k_idling_set = StrategicIdlingHedging._get_possible_idling_directions(w, beta_star, psi_plus,
v_star)
assert np.all(k_idling_set == np.array([0, 1]))
def test_get_possible_idling_directions_very_small_value_below_tolerance():
eps = 1e-6
v_star = np.array([[9e-7], [0]])
w = np.array([[-1], [1]])
psi_plus = np.array([[1], [-0.5]])
beta_star = 0
k_idling_set = StrategicIdlingHedging._get_possible_idling_directions(w, beta_star, psi_plus,
v_star, eps)
assert k_idling_set.size == 0
def test_get_possible_idling_directions_in_ksrs_network_model_case_2():
"""Example 5.3.3 case 2 from CTCN book (online version)."""
beta_star = 0 # Set to zero to verify directions of v_star
mu1 = 1 / 3
mu3 = 1 / 3
mu2 = 1
mu4 = 1
alpha1 = 0.3
alpha3 = 0.3
cost_per_buffer = np.ones((4, 1))
env = examples.ksrs_network_model(alpha1, alpha3, mu1, mu2, mu3, mu4, cost_per_buffer)
load, workload_mat, nu = wl.compute_load_workload_matrix(env)
strategic_idling_params = StrategicIdlingParams()
# Region 1 = {0 < 3 * w1 < w2 < inf}
w1 = 1
w2 = 4
w = np.array([[w1], [w2]])
si_object_1 = create_strategic_idling_object(workload_mat=workload_mat,
cost_per_buffer=env.cost_per_buffer,
strategic_idling_params=strategic_idling_params)
w_star_1 = si_object_1._find_workload_with_min_eff_cost_by_idling(w)
w_star_1b = alt_methods_test.find_workload_with_min_eff_cost_by_idling_scipy(
w, workload_mat, cost_per_buffer, "revised simplex")
np.testing.assert_almost_equal(w_star_1, w_star_1b)
v_star_1 = StrategicIdlingHedging._get_vector_defining_possible_idling_direction(w_star_1, w)
psi_plus_1, c_plus_1, c_minus_1 = si_object_1._get_closest_face_and_level_sets(w_star_1,
v_star_1)
k_idling_set_1 = StrategicIdlingHedging._get_possible_idling_directions(w, beta_star,
psi_plus_1, v_star_1)
assert np.all(k_idling_set_1 == np.array([0]))
# Region 2 = {0 < w1 < 3 * w2 < 9 * w1} ==> Already in the monotone region W^+
w1_2 = 2
w2_2 = 1
w_2 = np.array([[w1_2], [w2_2]])
si_object_2 = create_strategic_idling_object(workload_mat=workload_mat,
cost_per_buffer=env.cost_per_buffer,
strategic_idling_params=strategic_idling_params)
w_star_2 = si_object_2._find_workload_with_min_eff_cost_by_idling(w_2)
w_star_2b = alt_methods_test.find_workload_with_min_eff_cost_by_idling_scipy(
w_2, workload_mat, cost_per_buffer, "revised simplex")
np.testing.assert_almost_equal(w_star_2, w_star_2b)
# Region 2 is in the monotone region W^+
c_bar_2 = si_object_2._get_level_set_for_current_workload(w_2)
assert StrategicIdlingHedging._is_w_inside_monotone_region(w_2, w_star_2, c_bar_2)
# Region 3 = {0 < 3 * w2 < w1}
w1 = 4
w2 = 0.05
w = np.array([[w1], [w2]])
si_object_3 = create_strategic_idling_object(workload_mat=workload_mat,
cost_per_buffer=env.cost_per_buffer,
strategic_idling_params=strategic_idling_params)
w_star_3 = si_object_3._find_workload_with_min_eff_cost_by_idling(w)
w_star_3b = alt_methods_test.find_workload_with_min_eff_cost_by_idling_scipy(
w, workload_mat, cost_per_buffer, "revised simplex")
np.testing.assert_almost_equal(w_star_3, w_star_3b)
v_star_3 = StrategicIdlingHedging._get_vector_defining_possible_idling_direction(w_star_3, w)
psi_plus_3, c_plus_3, c_minus_3 = si_object_3._get_closest_face_and_level_sets(w_star_3,
v_star_3)
k_idling_set_3 = StrategicIdlingHedging._get_possible_idling_directions(w, beta_star,
psi_plus_3, v_star_3)
assert np.all(k_idling_set_3 == np.array([1]))
def test_get_possible_idling_directions_simple_reentrant_line():
env = examples.simple_reentrant_line_model(alpha1=0.5, mu1=1.1, mu2=1.2, mu3=1.3)
load, workload_mat, nu = wl.compute_load_workload_matrix(env)
# Find all c_bar vectors.
v = alt_methods_test.get_all_effective_cost_linear_vectors(workload_mat, env.cost_per_buffer)
strategic_idling_params = StrategicIdlingParams()
si_object = create_strategic_idling_object(workload_mat=workload_mat,
cost_per_buffer=env.cost_per_buffer,
strategic_idling_params=strategic_idling_params)
for i in range(100):
# Set w such that a path-wise optimal solution starting from w cannot exist (p. 187,
# CTCN online ed).
w1 = i + 1
w2 = load[1] / load[0] * w1 * 0.9
w = np.array([[w1], [w2]])
w_star = si_object._find_workload_with_min_eff_cost_by_idling(w)
w_star_b = alt_methods_test.find_workload_with_min_eff_cost_by_idling_scipy(
w, workload_mat, env.cost_per_buffer, "revised simplex")
np.testing.assert_almost_equal(w_star, w_star_b, decimal=5)
v_star = StrategicIdlingHedging._get_vector_defining_possible_idling_direction(w_star, w)
psi_plus, c_plus, c_minus = si_object._get_closest_face_and_level_sets(w_star, v_star)
np.testing.assert_almost_equal(np.hstack((c_plus, c_minus)).T, v, decimal=4)
def test_null_strategic_idling_values():
si_object = create_strategic_idling_object()
si_tuple = si_object._get_null_strategic_idling_output(w=np.array([[0]]))
assert si_tuple.beta_star == 0
assert si_tuple.k_idling_set.size == 0
assert si_tuple.sigma_2_h == 0
assert si_tuple.psi_plus is None
assert si_tuple.w_star is None
assert si_tuple.c_plus is None
assert si_tuple.c_bar is None
def test_is_pull_model_false():
assert not snc.agents.hedgehog.strategic_idling.strategic_idling_utils.is_pull_model('push')
def test_is_pull_model_true():
assert snc.agents.hedgehog.strategic_idling.strategic_idling_utils.is_pull_model('pull')
def test_get_index_deficit_buffers():
workload_mat = np.triu(- np.ones((3, 3)))
workload_mat = np.hstack((workload_mat, np.ones((3, 1))))
assert hedging_utils.get_index_deficit_buffers(workload_mat) == [3]
def test_get_index_deficit_buffers_3_buffers():
workload_mat = np.triu(- np.ones((3, 3)))
workload_mat = np.hstack((workload_mat, np.ones((3, 1)), np.ones((3, 1)), np.ones((3, 1))))
assert hedging_utils.get_index_deficit_buffers(workload_mat) == [3, 4, 5]
def test_get_index_deficit_buffers_2_buffers_not_consecutive():
workload_mat = np.triu(- np.ones((3, 3)))
workload_mat = np.hstack((np.ones((3, 1)), workload_mat, np.ones((3, 1))))
assert hedging_utils.get_index_deficit_buffers(workload_mat) == [0, 4]
def test_get_index_deficit_buffers_inconsistent_column():
workload_mat = np.hstack((np.triu(- np.ones((2, 2))), np.array([[-1], [1]]), np.ones((2, 1))))
with pytest.raises(AssertionError):
_ = hedging_utils.get_index_deficit_buffers(workload_mat)
def test_build_state_list_for_computing_cone_envelope_0_dim():
num_buffers = 0
init_x = 10
with pytest.raises(AssertionError):
_ = StrategicIdlingHedging._build_state_list_for_computing_cone_envelope(num_buffers, init_x)
def test_build_state_list_for_computing_cone_envelope_null_initx():
num_buffers = 1
init_x = 0
with pytest.raises(AssertionError):
_ = StrategicIdlingHedging._build_state_list_for_computing_cone_envelope(num_buffers, init_x)
def test_build_state_list_for_computing_cone_envelope_1_dim():
num_buffers = 1
init_x = 10
state_list = StrategicIdlingHedging._build_state_list_for_computing_cone_envelope(num_buffers,
init_x)
assert state_list == [[init_x]]
def test_build_state_list_for_computing_cone_envelope_2_dim():
num_buffers = 2
init_x = 10.
state_list = StrategicIdlingHedging._build_state_list_for_computing_cone_envelope(num_buffers,
init_x)
assert len(state_list) == 3
assert np.any(np.where(np.array([[init_x], [0]]) == state_list))
assert np.any(np.where(np.array([[0], [init_x]]) == state_list))
assert np.any(np.where(np.array([[init_x], [init_x]]) == state_list))
def test_build_state_list_for_computing_cone_envelope_3_dim():
num_buffers = 3
init_x = 10.
state_list = StrategicIdlingHedging._build_state_list_for_computing_cone_envelope(num_buffers,
init_x)
assert len(state_list) == 7
assert np.any(np.where(state_list == np.array([[init_x], [0], [0]])))
assert np.any(np.where(state_list == np.array([[0], [init_x], [0]])))
assert np.any(np.where(state_list == np.array([[0], [0], [init_x]])))
assert np.any(np.where(state_list == np.array([[init_x], [init_x], [0]])))
assert np.any(np.where(state_list == np.array([[init_x], [0], [init_x]])))
assert np.any(np.where(state_list == np.array([[0], [init_x], [init_x]])))
assert np.any(np.where(state_list == np.array([[init_x], [init_x], [init_x]])))
@pytest.mark.parametrize("max_points", [1, 3, 5])
def test_build_state_list_for_computing_cone_envelope_3_dim_max_points(max_points):
num_buffers = 3
init_x = 10.
state_list = StrategicIdlingHedging._build_state_list_for_computing_cone_envelope(
num_buffers, init_x, max_points)
assert len(state_list) == max_points
def test_build_workloads_for_computing_cone_envelope_1():
workload_mat = np.array([[-1, -2, 1], [0, -1, 0.5]])
w_list = StrategicIdlingHedging._build_workloads_for_computing_cone_envelope(workload_mat)
assert len(w_list) == 7
assert np.any(np.where(w_list == np.array([[-10], [0]])))
assert np.any(np.where(w_list == np.array([[-20], [-10]])))
assert np.any(np.where(w_list == | np.array([[10], [5]]) | numpy.array |
import unittest
import numpy as np
from PCAfold import preprocess
from PCAfold import reduction
from PCAfold import analysis
class Preprocess(unittest.TestCase):
def test_preprocess__outlier_detection__allowed_calls(self):
X = np.random.rand(100,10)
try:
(idx_outliers_removed, idx_outliers) = preprocess.outlier_detection(X, scaling='auto', method='MULTIVARIATE TRIMMING', trimming_threshold=0.6)
self.assertTrue(not np.any(np.in1d(idx_outliers_removed, idx_outliers)))
except Exception:
self.assertTrue(False)
try:
(idx_outliers_removed, idx_outliers) = preprocess.outlier_detection(X, scaling='none', method='MULTIVARIATE TRIMMING', trimming_threshold=0.6)
self.assertTrue(not np.any(np.in1d(idx_outliers_removed, idx_outliers)))
except Exception:
self.assertTrue(False)
try:
(idx_outliers_removed, idx_outliers) = preprocess.outlier_detection(X, scaling='auto', method='MULTIVARIATE TRIMMING', trimming_threshold=0.2)
self.assertTrue(not np.any(np.in1d(idx_outliers_removed, idx_outliers)))
except Exception:
self.assertTrue(False)
try:
(idx_outliers_removed, idx_outliers) = preprocess.outlier_detection(X, scaling='auto', method='MULTIVARIATE TRIMMING', trimming_threshold=0.1)
self.assertTrue(not np.any(np.in1d(idx_outliers_removed, idx_outliers)))
except Exception:
self.assertTrue(False)
try:
(idx_outliers_removed, idx_outliers) = preprocess.outlier_detection(X, scaling='auto', method='PC CLASSIFIER')
self.assertTrue(not np.any(np.in1d(idx_outliers_removed, idx_outliers)))
except Exception:
self.assertTrue(False)
try:
(idx_outliers_removed, idx_outliers) = preprocess.outlier_detection(X, scaling='range', method='PC CLASSIFIER')
self.assertTrue(not np.any(np.in1d(idx_outliers_removed, idx_outliers)))
except Exception:
self.assertTrue(False)
try:
(idx_outliers_removed, idx_outliers) = preprocess.outlier_detection(X, scaling='pareto', method='PC CLASSIFIER')
self.assertTrue(not np.any(np.in1d(idx_outliers_removed, idx_outliers)))
except Exception:
self.assertTrue(False)
try:
(idx_outliers_removed, idx_outliers) = preprocess.outlier_detection(X, scaling='none', method='PC CLASSIFIER', trimming_threshold=0.0)
self.assertTrue(not np.any(np.in1d(idx_outliers_removed, idx_outliers)))
except Exception:
self.assertTrue(False)
try:
(idx_outliers_removed, idx_outliers) = preprocess.outlier_detection(X, scaling='none', method='PC CLASSIFIER', trimming_threshold=1.0)
self.assertTrue(not np.any(np.in1d(idx_outliers_removed, idx_outliers)))
except Exception:
self.assertTrue(False)
try:
(idx_outliers_removed, idx_outliers) = preprocess.outlier_detection(X, scaling='auto', method='PC CLASSIFIER', quantile_threshold=0.9)
self.assertTrue(not np.any(np.in1d(idx_outliers_removed, idx_outliers)))
except Exception:
self.assertTrue(False)
try:
(idx_outliers_removed, idx_outliers) = preprocess.outlier_detection(X, scaling='range', method='PC CLASSIFIER', quantile_threshold=0.99)
self.assertTrue(not np.any(np.in1d(idx_outliers_removed, idx_outliers)))
except Exception:
self.assertTrue(False)
try:
(idx_outliers_removed, idx_outliers) = preprocess.outlier_detection(X, scaling='pareto', method='PC CLASSIFIER', quantile_threshold=0.8)
self.assertTrue(not np.any( | np.in1d(idx_outliers_removed, idx_outliers) | numpy.in1d |
import numpy as np
def unittest():
from PuzzleLib.Cuda import Backend
backendTest(Backend)
def backendTest(Backend):
for deviceIdx in range(Backend.getDeviceCount()):
bnd = Backend.getBackend(deviceIdx, initmode=1)
vectorTest(bnd)
for dtype, atol in bnd.dtypesSupported():
matrixTest(bnd, dtype, atol)
gbpGbpTest(bnd, dtype, atol)
gbpBgpTest(bnd, dtype, atol)
bgpGbpTest(bnd, dtype, atol)
bgpBgpTest(bnd, dtype, atol)
def vectorTest(bnd):
hostX, hostY = np.random.randn(5).astype(np.float32), np.random.randn(5).astype(np.float32)
x, y = bnd.GPUArray.toGpu(hostX), bnd.GPUArray.toGpu(hostY)
assert np.isclose(bnd.blas.dot(x, y), np.dot(hostX, hostY))
assert np.isclose(bnd.blas.l1norm(x), np.linalg.norm(hostX, ord=1))
assert np.isclose(bnd.blas.l2norm(x), np.linalg.norm(hostX, ord=2))
def matrixTest(bnd, dtype, atol):
hostA, hostB = np.random.randn(5, 3).astype(dtype), np.random.randn(3, 4).astype(dtype)
A, B = bnd.GPUArray.toGpu(hostA), bnd.GPUArray.toGpu(hostB)
C = bnd.blas.gemm(A, B)
hostC = C.get()
assert np.allclose(np.dot(hostA, hostB), hostC)
D = bnd.blas.gemm(B, C, transpB=True)
hostD = D.get()
assert np.allclose(np.dot(hostB, hostC.T), hostD)
E = bnd.blas.gemm(D, B, transpA=True)
assert np.allclose(np.dot(hostD.T, hostB), E.get(), atol=atol)
def gbpGbpTest(bnd, dtype, atol):
formatA, formatB, formatOut = bnd.GroupFormat.gbp.value, bnd.GroupFormat.gbp.value, bnd.GroupFormat.gbp.value
groups = 3
hostA = np.random.randn(groups, 4, 3).astype(dtype)
hostB = np.random.randn(groups, hostA.shape[2], 5).astype(dtype)
hostC = np.random.randn(groups, hostA.shape[1], 6).astype(dtype)
hostD = np.random.randn(groups, 8, hostC.shape[2]).astype(dtype)
A, B = bnd.GPUArray.toGpu(hostA), bnd.GPUArray.toGpu(hostB)
C, D = bnd.GPUArray.toGpu(hostC), bnd.GPUArray.toGpu(hostD)
out = bnd.blas.gemmBatched(A, B, formatA=formatA, formatB=formatB, formatOut=formatOut)
hostOut = np.empty(out.shape, dtype=dtype)
for i in range(groups):
np.dot(hostA[i], hostB[i], out=hostOut[i])
assert np.allclose(hostOut, out.get(), atol=atol)
out = bnd.blas.gemmBatched(C, A, formatA=formatA, formatB=formatB, formatOut=formatOut, transpA=True)
hostOut = np.empty(out.shape, dtype=dtype)
for i in range(groups):
np.dot(hostC[i].T, hostA[i], out=hostOut[i])
assert np.allclose(hostOut, out.get(), atol=atol)
out = bnd.blas.gemmBatched(C, D, formatA=formatA, formatB=formatB, formatOut=formatOut, transpB=True)
hostOut = np.empty(out.shape, dtype=dtype)
for i in range(groups):
np.dot(hostC[i], hostD[i].T, out=hostOut[i])
assert np.allclose(hostOut, out.get(), atol=atol)
def gbpBgpTest(bnd, dtype, atol):
formatA, formatB, formatOut = bnd.GroupFormat.gbp.value, bnd.GroupFormat.bgp.value, bnd.GroupFormat.bgp.value
groups = 3
hostA = np.random.randn(groups, 4, 7).astype(dtype)
hostB = np.random.randn(hostA.shape[2], groups, 5).astype(dtype)
hostC = np.random.randn(hostA.shape[1], groups, 8).astype(dtype)
hostD = np.random.randn(6, groups, hostA.shape[2]).astype(dtype)
A, B = bnd.GPUArray.toGpu(hostA), bnd.GPUArray.toGpu(hostB)
C, D = bnd.GPUArray.toGpu(hostC), bnd.GPUArray.toGpu(hostD)
out = bnd.blas.gemmBatched(A, B, formatA=formatA, formatB=formatB, formatOut=formatOut)
hostOut = np.empty(out.shape, dtype=dtype)
for i in range(groups):
hostOut[:, i, :] = np.dot(hostA[i], hostB[:, i, :])
assert np.allclose(hostOut, out.get(), atol=atol)
out = bnd.blas.gemmBatched(A, C, formatA=formatA, formatB=formatB, formatOut=formatOut, transpA=True)
hostOut = np.empty(out.shape, dtype=dtype)
for i in range(groups):
hostOut[:, i, :] = np.dot(hostA[i].T, hostC[:, i, :])
assert np.allclose(hostOut, out.get(), atol=atol)
out = bnd.blas.gemmBatched(A, D, formatA=formatA, formatB=formatB, formatOut=formatOut, transpB=True)
hostOut = np.empty(out.shape, dtype=dtype)
for i in range(groups):
hostOut[:, i, :] = np.dot(hostA[i], hostD[:, i, :].T)
assert np.allclose(hostOut, out.get(), atol=atol)
def bgpGbpTest(bnd, dtype, atol):
formatA, formatB, formatOut = bnd.GroupFormat.bgp.value, bnd.GroupFormat.gbp.value, bnd.GroupFormat.bgp.value
groups = 3
hostA = np.random.randn(4, groups, 7).astype(dtype)
hostB = np.random.randn(groups, hostA.shape[2], 5).astype(dtype)
hostC = np.random.randn(groups, hostA.shape[0], 8).astype(dtype)
hostD = np.random.randn(groups, 6, hostA.shape[2]).astype(dtype)
A, B = bnd.GPUArray.toGpu(hostA), bnd.GPUArray.toGpu(hostB)
C, D = bnd.GPUArray.toGpu(hostC), bnd.GPUArray.toGpu(hostD)
out = bnd.blas.gemmBatched(A, B, formatA=formatA, formatB=formatB, formatOut=formatOut)
hostOut = np.empty(out.shape, dtype=dtype)
for i in range(groups):
hostOut[:, i, :] = np.dot(hostA[:, i, :], hostB[i])
assert np.allclose(hostOut, out.get(), atol=atol)
out = bnd.blas.gemmBatched(A, C, formatA=formatA, formatB=formatB, formatOut=formatOut, transpA=True)
hostOut = np.empty(out.shape, dtype=dtype)
for i in range(groups):
hostOut[:, i, :] = np.dot(hostA[:, i, :].T, hostC[i])
assert np.allclose(hostOut, out.get(), atol=atol)
out = bnd.blas.gemmBatched(A, D, formatA=formatA, formatB=formatB, formatOut=formatOut, transpB=True)
hostOut = np.empty(out.shape, dtype=dtype)
for i in range(groups):
hostOut[:, i, :] = | np.dot(hostA[:, i, :], hostD[i].T) | numpy.dot |
# Copyright (c) 2019, <NAME>
# See LICENSE file for details: <https://github.com/moble/scri/blob/master/LICENSE>
import functools
import numpy as np
from . import jit
maxexp = np.finfo(float).maxexp * np.log(2) * 0.99
@jit
def _transition_function(x, x0, x1, y0, y1):
transition = np.empty_like(x)
ydiff = y1 - y0
i = 0
while x[i] <= x0:
i += 1
i0 = i
transition[:i0] = y0
while x[i] < x1:
tau = (x[i] - x0) / (x1 - x0)
exponent = 1.0 / tau - 1.0 / (1.0 - tau)
if exponent >= maxexp:
transition[i] = y0
else:
transition[i] = y0 + ydiff / (1.0 + np.exp(exponent))
i += 1
i1 = i
transition[i1:] = y1
return transition, i0, i1
def transition_function(x, x0, x1, y0=0.0, y1=1.0, return_indices=False):
"""Return a smooth function that is constant outside (x0, x1).
This uses the standard smooth (C^infinity) function with derivatives of compact support to
transition between the two values, being constant outside of the transition region (x0, x1).
Parameters
==========
x: array_like
One-dimensional monotonic array of floats.
x0: float
Value before which the output will equal `y0`.
x1: float
Value after which the output will equal `y1`.
y0: float [defaults to 0.0]
Value of the output before `x0`.
y1: float [defaults to 1.0]
Value of the output after `x1`.
return_indices: bool [defaults to False]
If True, return the array and the indices (i0, i1) at which the transition occurs, such that
t[:i0]==y0 and t[i1:]==y1.
"""
if return_indices:
return _transition_function(x, x0, x1, y0, y1)
return _transition_function(x, x0, x1, y0, y1)[0]
@jit
def transition_function_derivative(x, x0, x1, y0=0.0, y1=1.0):
"""Return derivative of the transition function
This function simply returns the derivative of `transition_function` with respect to the `x`
parameter. The parameters to this function are identical to those of that function.
Parameters
==========
x: array_like
One-dimensional monotonic array of floats.
x0: float
Value before which the output will equal `y0`.
x1: float
Value after which the output will equal `y1`.
y0: float [defaults to 0.0]
Value of the output before `x0`.
y1: float [defaults to 1.0]
Value of the output after `x1`.
"""
transition_prime = np.zeros_like(x)
ydiff = y1 - y0
i = 0
while x[i] <= x0:
i += 1
while x[i] < x1:
tau = (x[i] - x0) / (x1 - x0)
exponent = 1.0 / tau - 1.0 / (1.0 - tau)
if exponent >= maxexp:
transition_prime[i] = 0.0
else:
exponential = np.exp(1.0 / tau - 1.0 / (1.0 - tau))
transition_prime[i] = (
-ydiff
* exponential
* (-1.0 / tau ** 2 - 1.0 / (1.0 - tau) ** 2)
* (1 / (x1 - x0))
/ (1.0 + exponential) ** 2
)
i += 1
return transition_prime
@jit
def bump_function(x, x0, x1, x2, x3, y0=0.0, y12=1.0, y3=0.0):
"""Return a smooth bump function that is constant outside (x0, x3) and inside (x1, x2).
This uses the standard C^infinity function with derivatives of compact support to transition
between the the given values. By default, this is a standard bump function that is 0 outside of
(x0, x3), and is 1 inside (x1, x2), but the constant values can all be adjusted optionally.
Parameters
==========
x: array_like
One-dimensional monotonic array of floats.
x0: float
Value before which the output will equal `y0`.
x1, x2: float
Values between which the output will equal `y12`.
x3: float
Value after which the output will equal `y3`.
y0: float [defaults to 0.0]
Value of the output before `x0`.
y12: float [defaults to 1.0]
Value of the output after `x1` but before `x2`.
y3: float [defaults to 0.0]
Value of the output after `x3`.
"""
bump = np.empty_like(x)
ydiff01 = y12 - y0
ydiff23 = y3 - y12
i = 0
while x[i] <= x0:
i += 1
bump[:i] = y0
while x[i] < x1:
tau = (x[i] - x0) / (x1 - x0)
exponent = 1.0 / tau - 1.0 / (1.0 - tau)
if exponent >= maxexp:
bump[i] = y0
else:
bump[i] = y0 + ydiff01 / (1.0 + | np.exp(exponent) | numpy.exp |
import numpy as np
class IBM:
def __init__(self, config):
self.D = config["ibm"].get('vertical_mixing', 0) # Vertical mixing [m*2/s]
self.dt = config['dt']
self.x = np.array([])
self.y = np.array([])
self.pid = | np.array([]) | numpy.array |
"""
Dsacalib/MS_IO.PY
<NAME>, <EMAIL>, 10/2019
Routines to interact with CASA measurement sets and calibration tables.
"""
# To do:
# Replace to_deg w/ astropy versions
# Always import scipy before importing casatools.
import shutil
import os
import glob
import traceback
#from scipy.interpolate import interp1d
import numpy as np
#from pkg_resources import resource_filename
import yaml
import scipy # pylint: disable=unused-import
import astropy.units as u
import astropy.constants as c
import casatools as cc
from casatasks import importuvfits, virtualconcat
from casacore.tables import addImagingColumns, table
from pyuvdata import UVData
from dsautils import dsa_store
from dsautils import calstatus as cs
import dsautils.cnf as dsc
from dsamfs.fringestopping import calc_uvw_blt
from dsacalib import constants as ct
import dsacalib.utils as du
from dsacalib.fringestopping import calc_uvw, amplitude_sky_model
from antpos.utils import get_itrf # pylint: disable=wrong-import-order
from astropy.utils import iers # pylint: disable=wrong-import-order
iers.conf.iers_auto_url_mirror = ct.IERS_TABLE
iers.conf.auto_max_age = None
from astropy.time import Time # pylint: disable=wrong-import-position wrong-import-order
de = dsa_store.DsaStore()
CONF = dsc.Conf()
CORR_PARAMS = CONF.get('corr')
REFMJD = CONF.get('fringe')['refmjd']
def simulate_ms(ofile, tname, anum, xx, yy, zz, diam, mount, pos_obs, spwname,
freq, deltafreq, freqresolution, nchannels, integrationtime,
obstm, dt, source, stoptime, autocorr, fullpol):
"""Simulates a measurement set with cross-correlations only.
WARNING: Not simulating autocorrelations correctly regardless of inclusion
of autocorr parameter.
Parameters
----------
ofile : str
The full path to which the measurement set will be written.
tname : str
The telescope name.
xx, yy, zz : arrays
The X, Y and Z ITRF coordinates of the antennas, in meters.
diam : float
The dish diameter in meters.
mount : str
The mount type, e.g. 'alt-az'.
pos_obs : CASA measure instance
The location of the observatory.
spwname : str
The name of the spectral window, e.g. 'L-band'.
freq : str
The central frequency, as a CASA-recognized string, e.g. '1.4GHz'.
deltafreq : str
The size of each channel, as a CASA-recognized string, e.g. '1.24kHz'.
freqresolution : str
The frequency resolution, as a CASA-recognized string, e.g. '1.24kHz'.
nchannels : int
The number of frequency channels.
integrationtime : str
The subintegration time, i.e. the width of each time bin, e.g. '1.4s'.
obstm : float
The start time of the observation in MJD.
dt : float
The offset between the CASA start time and the true start time in days.
source : dsacalib.utils.source instance
The source observed (or the phase-center).
stoptime : float
The end time of the observation in MJD. DS: should be s?
autocorr : boolean
Set to ``True`` if the visibilities include autocorrelations, ``False``
if the only include crosscorrelations.
"""
me = cc.measures()
qa = cc.quanta()
sm = cc.simulator()
sm.open(ofile)
sm.setconfig(
telescopename=tname,
x=xx,
y=yy,
z=zz,
dishdiameter=diam,
mount=mount,
antname=anum,
coordsystem='global',
referencelocation=pos_obs
)
sm.setspwindow(
spwname=spwname,
freq=freq,
deltafreq=deltafreq,
freqresolution=freqresolution,
nchannels=nchannels,
stokes='XX XY YX YY' if fullpol else 'XX YY'
)
# TODO: use hourangle instead
sm.settimes(
integrationtime=integrationtime,
usehourangle=False,
referencetime=me.epoch('utc', qa.quantity(obstm-dt, 'd'))
)
sm.setfield(
sourcename=source.name,
sourcedirection=me.direction(
source.epoch,
qa.quantity(source.ra.to_value(u.rad), 'rad'),
qa.quantity(source.dec.to_value(u.rad), 'rad')
)
)
sm.setauto(autocorrwt=1.0 if autocorr else 0.0)
sm.observe(source.name, spwname, starttime='0s', stoptime=stoptime)
sm.close()
def convert_to_ms(source, vis, obstm, ofile, bname, antenna_order,
tsamp=ct.TSAMP*ct.NINT, nint=1, antpos=None, model=None,
dt=ct.CASA_TIME_OFFSET, dsa10=True):
""" Writes visibilities to an ms.
Uses the casa simulator tool to write the metadata to an ms, then uses the
casa ms tool to replace the visibilities with the observed data.
Parameters
----------
source : source class instance
The calibrator (or position) used for fringestopping.
vis : ndarray
The complex visibilities, dimensions (baseline, time, channel,
polarization).
obstm : float
The start time of the observation in MJD.
ofile : str
The name for the created ms. Writes to `ofile`.ms.
bname : list
The list of baselines names in the form [[ant1, ant2],...].
antenna_order: list
The list of the antennas, in CASA ordering.
tsamp : float
The sampling time of the input visibilities in seconds. Defaults to
the value `tsamp`*`nint` as defined in `dsacalib.constants`.
nint : int
The number of time bins to integrate by before saving to a measurement
set. Defaults 1.
antpos : str
The full path to the text file containing ITRF antenna positions or the
csv file containing the station positions in longitude and latitude.
Defaults `dsacalib.constants.PKG_DATA_PATH`/antpos_ITRF.txt.
model : ndarray
The visibility model to write to the measurement set (and against which
gain calibration will be done). Must have the same shape as the
visibilities `vis`. If given a value of ``None``, an array of ones will
be used as the model. Defaults ``None``.
dt : float
The offset between the CASA start time and the data start time in days.
Defaults to the value of `casa_time_offset` in `dsacalib.constants`.
dsa10 : boolean
Set to ``True`` if the data are from the dsa10 correlator. Defaults
``True``.
"""
if antpos is None:
antpos = '{0}/antpos_ITRF.txt'.format(ct.PKG_DATA_PATH)
vis = vis.astype(np.complex128)
if model is not None:
model = model.astype(np.complex128)
nant = len(antenna_order)
me = cc.measures()
# Observatory parameters
tname = 'OVRO_MMA'
diam = 4.5 # m
obs = 'OVRO_MMA'
mount = 'alt-az'
pos_obs = me.observatory(obs)
# Backend
if dsa10:
spwname = 'L_BAND'
freq = '1.4871533196875GHz'
deltafreq = '-0.244140625MHz'
freqresolution = deltafreq
else:
spwname = 'L_BAND'
freq = '1.28GHz'
deltafreq = '40.6901041666667kHz'
freqresolution = deltafreq
(_, _, nchannels, npol) = vis.shape
# Rebin visibilities
integrationtime = '{0}s'.format(tsamp*nint)
if nint != 1:
npad = nint-vis.shape[1]%nint
if npad == nint:
npad = 0
vis = np.nanmean(np.pad(vis, ((0, 0), (0, npad), (0, 0), (0, 0)),
mode='constant',
constant_values=(np.nan, )).reshape(
vis.shape[0], -1, nint, vis.shape[2],
vis.shape[3]), axis=2)
if model is not None:
model = np.nanmean(np.pad(model,
((0, 0), (0, npad), (0, 0), (0, 0)),
mode='constant',
constant_values=(np.nan, )).reshape(
model.shape[0], -1, nint,
model.shape[2], model.shape[3]),
axis=2)
stoptime = '{0}s'.format(vis.shape[1]*tsamp*nint)
anum, xx, yy, zz = du.get_antpos_itrf(antpos)
# Sort the antenna positions
idx_order = sorted([int(a)-1 for a in antenna_order])
anum = np.array(anum)[idx_order]
xx = np.array(xx)
yy = np.array(yy)
zz = np.array(zz)
xx = xx[idx_order]
yy = yy[idx_order]
zz = zz[idx_order]
nints = np.zeros(nant, dtype=int)
for i, an in enumerate(anum):
nints[i] = np.sum(np.array(bname)[:, 0] == an)
nints, anum, xx, yy, zz = zip(*sorted(zip(nints, anum, xx, yy, zz),
reverse=True))
# Check that the visibilities are ordered correctly by checking the order
# of baselines in bname
idx_order = []
autocorr = bname[0][0] == bname[0][1]
for i in range(nant):
for j in range(i if autocorr else i+1, nant):
idx_order += [bname.index([anum[i], anum[j]])]
assert idx_order == list(np.arange(len(bname), dtype=int)), \
'Visibilities not ordered by baseline'
anum = [str(a) for a in anum]
simulate_ms(
'{0}.ms'.format(ofile), tname, anum, xx, yy, zz, diam, mount, pos_obs,
spwname, freq, deltafreq, freqresolution, nchannels, integrationtime,
obstm, dt, source, stoptime, autocorr, fullpol=False
)
# Check that the time is correct
ms = cc.ms()
ms.open('{0}.ms'.format(ofile))
tstart_ms = ms.summary()['BeginTime']
ms.close()
print('autocorr :', autocorr)
if np.abs(tstart_ms-obstm) > 1e-10:
dt = dt+(tstart_ms-obstm)
print('Updating casa time offset to {0}s'.format(
dt*ct.SECONDS_PER_DAY))
print('Rerunning simulator')
simulate_ms(
'{0}.ms'.format(ofile), tname, anum, xx, yy, zz, diam, mount,
pos_obs, spwname, freq, deltafreq, freqresolution, nchannels,
integrationtime, obstm, dt, source, stoptime, autocorr,
fullpol=False
)
# Reopen the measurement set and write the observed visibilities
ms = cc.ms()
ms.open('{0}.ms'.format(ofile), nomodify=False)
ms.selectinit(datadescid=0)
rec = ms.getdata(["data"])
# rec['data'] has shape [scan, channel, [time*baseline]]
vis = vis.T.reshape((npol, nchannels, -1))
rec['data'] = vis
ms.putdata(rec)
ms.close()
ms = cc.ms()
ms.open('{0}.ms'.format(ofile), nomodify=False)
if model is None:
model = np.ones(vis.shape, dtype=complex)
else:
model = model.T.reshape((npol, nchannels, -1))
rec = ms.getdata(["model_data"])
rec['model_data'] = model
ms.putdata(rec)
ms.close()
# Check that the time is correct
ms = cc.ms()
ms.open('{0}.ms'.format(ofile))
tstart_ms = ms.summary()['BeginTime']
tstart_ms2 = ms.getdata('TIME')['time'][0]/ct.SECONDS_PER_DAY
ms.close()
assert np.abs(tstart_ms-(tstart_ms2-tsamp*nint/ct.SECONDS_PER_DAY/2)) \
< 1e-10, 'Data start time does not agree with MS start time'
assert np.abs(tstart_ms - obstm) < 1e-10, \
'Measurement set start time does not agree with input tstart'
print('Visibilities writing to ms {0}.ms'.format(ofile))
def extract_vis_from_ms(msname, data='data', swapaxes=True):
"""Extracts visibilities from a CASA measurement set.
Parameters
----------
msname : str
The measurement set. Opens `msname`.ms
data : str
The visibilities to extract. Can be `data`, `model` or `corrected`.
Returns
-------
vals : ndarray
The visibilities, dimensions (baseline, time, spw, freq, pol).
time : array
The time of each integration in days.
fobs : array
The frequency of each channel in GHz.
flags : ndarray
Flags for the visibilities, same shape as vals. True if flagged.
ant1, ant2 : array
The antenna indices for each baselines in the visibilities.
pt_dec : float
The pointing declination of the array. (Note: Not the phase center, but
the physical pointing of the antennas.)
spw : array
The spectral window indices.
orig_shape : list
The order of the first three axes in the ms.
"""
with table('{0}.ms'.format(msname)) as tb:
ant1 = np.array(tb.ANTENNA1[:])
ant2 = np.array(tb.ANTENNA2[:])
vals = np.array(tb.getcol(data.upper())[:])
flags = np.array(tb.FLAG[:])
time = np.array(tb.TIME[:])
spw = np.array(tb.DATA_DESC_ID[:])
with table('{0}.ms/SPECTRAL_WINDOW'.format(msname)) as tb:
fobs = (np.array(tb.col('CHAN_FREQ')[:])/1e9).reshape(-1)
baseline = 2048*(ant1+1)+(ant2+1)+2**16
time, vals, flags, ant1, ant2, spw, orig_shape = reshape_calibration_data(
vals, flags, ant1, ant2, baseline, time, spw, swapaxes)
with table('{0}.ms/FIELD'.format(msname)) as tb:
pt_dec = tb.PHASE_DIR[:][0][0][1]
return vals, time/ct.SECONDS_PER_DAY, fobs, flags, ant1, ant2, pt_dec, \
spw, orig_shape
def read_caltable(tablename, cparam=False, reshape=True):
"""Requires that each spw has the same number of frequency channels.
Parameters
----------
tablename : str
The full path to the calibration table.
cparam : bool
If True, reads the column CPARAM in the calibrtion table. Otherwise
reads FPARAM.
Returns
-------
vals : ndarray
The visibilities, dimensions (baseline, time, spw, freq, pol).
time : array
The time of each integration in days.
flags : ndarray
Flags for the visibilities, same shape as vals. True if flagged.
ant1, ant2 : array
The antenna indices for each baselines in the visibilities.
"""
with table(tablename) as tb:
try:
spw = np.array(tb.SPECTRAL_WINDOW_ID[:])
except AttributeError:
spw = np.array([0])
time = np.array(tb.TIME[:])
if cparam:
vals = np.array(tb.CPARAM[:])
else:
vals = np.array(tb.FPARAM[:])
flags = np.array(tb.FLAG[:])
ant1 = np.array(tb.ANTENNA1[:])
ant2 = np.array(tb.ANTENNA2[:])
baseline = 2048*(ant1+1)+(ant2+1)+2**16
if reshape:
time, vals, flags, ant1, ant2, _, _ = reshape_calibration_data(
vals, flags, ant1, ant2, baseline, time, spw)
return vals, time/ct.SECONDS_PER_DAY, flags, ant1, ant2
def reshape_calibration_data(
vals, flags, ant1, ant2, baseline, time, spw, swapaxes=True
):
"""Reshape calibration or measurement set data.
Reshapes the 0th axis of the input data `vals` and `flags` from a
combined (baseline-time-spw) axis into 3 axes (baseline, time, spw).
Parameters
----------
vals : ndarray
The input values, shape (baseline-time-spw, freq, pol).
flags : ndarray
Flag array, same shape as vals.
ant1, ant2 : array
The antennas in the baseline, same length as the 0th axis of `vals`.
baseline : array
The baseline index, same length as the 0th axis of `vals`.
time : array
The time of each integration, same length as the 0th axis of `vals`.
spw : array
The spectral window index of each integration, same length as the 0th
axis of `vals`.
Returns
-------
time : array
Unique times, same length as the time axis of the output `vals`.
vals, flags : ndarray
The reshaped input arrays, dimensions (baseline, time, spw, freq, pol)
ant1, ant2 : array
ant1 and ant2 for unique baselines, same length as the baseline axis of
the output `vals`.
orig_shape : list
The original order of the time, baseline and spw axes in the ms.
"""
if len(np.unique(ant1))==len(np.unique(ant2)):
nbl = len(np.unique(baseline))
else:
nbl = max([len(np.unique(ant1)), len(np.unique(ant2))])
nspw = len(np.unique(spw))
ntime = len(time)//nbl//nspw
nfreq = vals.shape[-2]
npol = vals.shape[-1]
if np.all(baseline[:ntime*nspw] == baseline[0]):
if np.all(time[:nspw] == time[0]):
orig_shape = ['baseline', 'time', 'spw']
# baseline, time, spw
time = time.reshape(nbl, ntime, nspw)[0, :, 0]
vals = vals.reshape(nbl, ntime, nspw, nfreq, npol)
flags = flags.reshape(nbl, ntime, nspw, nfreq, npol)
ant1 = ant1.reshape(nbl, ntime, nspw)[:, 0, 0]
ant2 = ant2.reshape(nbl, ntime, nspw)[:, 0, 0]
spw = spw.reshape(nbl, ntime, nspw)[0, 0, :]
else:
# baseline, spw, time
orig_shape = ['baseline', 'spw', 'time']
assert np.all(spw[:ntime] == spw[0])
time = time.reshape(nbl, nspw, ntime)[0, 0, :]
vals = vals.reshape(nbl, nspw, ntime, nfreq, npol)
flags = flags.reshape(nbl, nspw, ntime, nfreq, npol)
if swapaxes:
vals = vals.swapaxes(1, 2)
flags = flags.swapaxes(1, 2)
ant1 = ant1.reshape(nbl, nspw, ntime)[:, 0, 0]
ant2 = ant2.reshape(nbl, nspw, ntime)[:, 0, 0]
spw = spw.reshape(nbl, nspw, ntime)[0, :, 0]
elif np.all(time[:nspw*nbl] == time[0]):
if np.all(baseline[:nspw] == baseline[0]):
# time, baseline, spw
orig_shape = ['time', 'baseline', 'spw']
time = time.reshape(ntime, nbl, nspw)[:, 0, 0]
vals = vals.reshape(ntime, nbl, nspw, nfreq, npol)
flags = flags.reshape(ntime, nbl, nspw, nfreq, npol)
if swapaxes:
vals = vals.swapaxes(0, 1)
flags = flags.swapaxes(0, 1)
ant1 = ant1.reshape(ntime, nbl, nspw)[0, :, 0]
ant2 = ant2.reshape(ntime, nbl, nspw)[0, :, 0]
spw = spw.reshape(ntime, nbl, nspw)[0, 0, :]
else:
orig_shape = ['time', 'spw', 'baseline']
assert np.all(spw[:nbl] == spw[0])
time = time.reshape(ntime, nspw, nbl)[:, 0, 0]
vals = vals.reshape(ntime, nspw, nbl, nfreq, npol)
flags = flags.reshape(ntime, nspw, nbl, nfreq, npol)
if swapaxes:
vals = vals.swapaxes(1, 2).swapaxes(0, 1)
flags = flags.swapaxes(1, 2).swapaxes(0, 1)
ant1 = ant1.reshape(ntime, nspw, nbl)[0, 0, :]
ant2 = ant2.reshape(ntime, nspw, nbl)[0, 0, :]
spw = spw.reshape(ntime, nspw, nbl)[0, :, 0]
else:
assert np.all(spw[:nbl*ntime] == spw[0])
if np.all(baseline[:ntime] == baseline[0]):
# spw, baseline, time
orig_shape = ['spw', 'baseline', 'time']
time = time.reshape(nspw, nbl, ntime)[0, 0, :]
vals = vals.reshape(nspw, nbl, ntime, nfreq, npol)
flags = flags.reshape(nspw, nbl, ntime, nfreq, npol)
if swapaxes:
vals = vals.swapaxes(0, 1).swapaxes(1, 2)
flags = flags.swapaxes(0, 1).swapaxes(1, 2)
ant1 = ant1.reshape(nspw, nbl, ntime)[0, :, 0]
ant2 = ant2.reshape(nspw, nbl, ntime)[0, :, 0]
spw = spw.reshape(nspw, nbl, ntime)[:, 0, 0]
else:
assert np.all(time[:nbl] == time[0])
# spw, time, bl
orig_shape = ['spw', 'time', 'baseline']
time = time.reshape(nspw, ntime, nbl)[0, :, 0]
vals = vals.reshape(nspw, ntime, nbl, nfreq, npol)
flags = flags.reshape(nspw, ntime, nbl, nfreq, npol)
if swapaxes:
vals = vals.swapaxes(0, 2)
flags = flags.swapaxes(0, 2)
ant1 = ant1.reshape(nspw, ntime, nbl)[0, 0, :]
ant2 = ant2.reshape(nspw, ntime, nbl)[0, 0, :]
spw = spw.reshape(nspw, ntime, nbl)[:, 0, 0]
return time, vals, flags, ant1, ant2, spw, orig_shape
def caltable_to_etcd(
msname, calname, caltime, status, pols=None, logger=None
):
r"""Copies calibration values from delay and gain tables to etcd.
The dictionary passed to etcd should look like: {"ant_num": <i>,
"time": <d>, "pol", [<s>, <s>], "gainamp": [<d>, <d>],
"gainphase": [<d>, <d>], "delay": [<i>, <i>], "calsource": <s>,
"gaincaltime_offset": <d>, "delaycaltime_offset": <d>, 'sim': <b>,
'status': <i>}
Parameters
----------
msname : str
The measurement set name, will use solutions created from the
measurement set `msname`.ms.
calname : str
The calibrator name. Will open the calibration tables
`msname`\_`calname`\_kcal and `msname`\_`calname`\_gcal_ant.
caltime : float
The time of calibration transit in mjd.
status : int
The status of the calibration. Decode with dsautils.calstatus.
pols : list
The names of the polarizations. If ``None``, will be set to
``['B', 'A']``. Defaults ``None``.
logger : dsautils.dsa_syslog.DsaSyslogger() instance
Logger to write messages too. If None, messages are printed.
"""
if pols is None:
pols = ['B', 'A']
try:
# Complex gains for each antenna.
amps, tamp, flags, ant1, ant2 = read_caltable(
'{0}_{1}_gacal'.format(msname, calname),
cparam=True
)
mask = np.ones(flags.shape)
mask[flags == 1] = np.nan
amps = amps*mask
phase, _tphase, flags, ant1, ant2 = read_caltable(
'{0}_{1}_gpcal'.format(msname, calname),
cparam=True
)
mask = np.ones(flags.shape)
mask[flags == 1] = np.nan
phase = phase*mask
if | np.all(ant2 == ant2[0]) | numpy.all |
import sys
import time
from multiprocessing import Pool
from unittest import TestCase, main
import numpy as np
import pandas as pd
from numpy.testing import assert_array_equal
from pandas.util.testing import assert_series_equal, assert_frame_equal
sys.path.append("../")
from valhalla.extract import DataExtractor
"""
test.h5
내부 데이터는 총 4개의 group(bcateid, price, model, pid)로 구성되어 있고,
카카오에서 제공한 데이터와 동일한 포맷으로 구성되어 있음.
DataLoader의 동작이 제대로 되는지 테스트하기 위한 코드로, 데이터로서의 의미를 가지는 건 아니다
bcateid price model pid
0 24 -1 Q4081781803
1 17 -1 W4203425504
2 24 84750 인터파크/오피스메인/프린터/라벨/도트/바코드/기타/프린터 기타 G4453903364
3 35 -1 중성펜/젤러펜 필기구 볼펜류 볼펜심제브라 Refill SK U4418629259
4 40 -1 무형광아기세탁망원형[45cm] I4066071748
5 54 87210 근조화환 J4586931195
6 35 -1 기타 F4662379886
7 34 -1 O3764058858
8 14 966000 인터파크/에트로/여성가방/숄더백(천연가죽) J3959473240
9 3 16620 인터파크/얀케이스/스마트폰/태블릿케이스/태블릿케이스/파우치/갤럭시용케이스/파우치 K4487826783
"""
class DataLoaderSimpleTest(TestCase):
def setUp(self):
self.dl = DataExtractor("test.h5", 'train')
def tearDown(self):
del self.dl
def test_init_dataloader(self):
pass
def test_length_of_dataloader(self):
self.assertEqual(len(self.dl), 10)
def test_columns_of_dataloader(self):
answer = ['bcateid', 'price', 'model', 'pid']
self.assertEqual(len(answer), len(self.dl.columns)) # 길이 같은지 확인
self.assertListEqual(list(set(answer)), list(
set(self.dl.columns))) # Element 같은지 확인
def test_get_item_by_column_name_bcateid(self):
pred = self.dl['bcateid']
answer = pd.Series([24, 17, 24, 35, 40, 54, 35, 34, 14, 3],
dtype='int32', name='bcateid')
assert_series_equal(pred, answer)
def test_get_item_by_coumn_name_model(self):
pred = self.dl['model']
answer = pd.Series(["",
"",
"인터파크/오피스메인/프린터/라벨/도트/바코드/기타/프린터 기타",
'중성펜/젤러펜 필기구 볼펜류 볼펜심제브라 Refill SK',
'무형광아기세탁망원형[45cm]',
'근조화환',
'기타',
'',
'인터파크/에트로/여성가방/숄더백(천연가죽)',
'인터파크/얀케이스/스마트폰/태블릿케이스/태블릿케이스/파우치/갤럭시용케이스/파우치'],
name='model')
assert_series_equal(pred, answer)
def test_get_item_by_multiple_column(self):
pred = self.dl[['bcateid', 'price']]
answer = pd.DataFrame([[24, -1],
[17, -1],
[24, 84750],
[35, -1],
[40, -1],
[54, 87210],
[35, -1],
[34, -1],
[14, 966000],
[3, 16620]],
columns=['bcateid', 'price'], dtype='int32')
assert_frame_equal(pred, answer)
def test_get_item_by_column_and_index(self):
pred = self.dl['bcateid', 0]
answer = pd.Series([24], name='bcateid')
assert_series_equal(pred, answer)
def test_get_item_by_column_and_slice(self):
pred = self.dl['bcateid', 0:3]
answer = pd.Series([24, 17, 24], name='bcateid', dtype='int32')
assert_series_equal(pred, answer)
def test_get_item_by_multiple_column_and_slice(self):
pred = self.dl[['bcateid', 'price'], 0:3]
answer = pd.DataFrame([[24, -1],
[17, -1],
[24, 84750]],
columns=['bcateid', 'price'], dtype='int32')
assert_frame_equal(pred, answer)
def test_get_item_by_multiple_column_and_list(self):
pred = self.dl[['bcateid', 'price'], [0, 3]]
answer = pd.DataFrame([[24, -1],
[35, -1]],
columns=['bcateid', 'price'], dtype='int32')
assert_frame_equal(pred, answer)
def test_get_item_by_multiple_column_and_list_2(self):
pred = self.dl[['bcateid', 'price'], [5, 3, 4, 6, 0]]
answer = pd.DataFrame([
[54, 87210],
[35, -1],
[40, -1],
[35, -1],
[24, -1]],
columns=['bcateid', 'price'], dtype='int32')
assert_frame_equal(pred, answer)
def test_get_value_by_multiprocessing(self):
pool = Pool(100)
rc = pool.map_async(get_add_value, range(0, 1000))
result = rc.get()
self.assertEqual(sum(result), 0)
def get_add_value(i):
dex = DataExtractor("test.h5", 'train')
for j in range(0, 10):
time.sleep(0.0001)
try:
x = dex['model', j]
except:
return True
return False
class DataLoaderNumpyOutTest(TestCase):
def setUp(self):
self.dl = DataExtractor("test.h5", 'train', df_format=False)
def tearDown(self):
del self.dl
def test_init_dataloader(self):
pass
def test_length_of_dataloader(self):
self.assertEqual(len(self.dl), 10)
def test_columns_of_dataloader(self):
answer = ['bcateid', 'price', 'model', 'pid']
self.assertEqual(len(answer), len(self.dl.columns)) # 길이 같은지 확인
self.assertListEqual(list(set(answer)), list(
set(self.dl.columns))) # Element 같은지 확인
def test_get_item_by_column_name_bcateid(self):
pred = self.dl['bcateid']
answer = np.array([24, 17, 24, 35, 40, 54, 35, 34, 14, 3],
dtype='int32')
assert_array_equal(pred, answer)
def test_get_item_by_coumn_name_model(self):
pred = self.dl['model']
answer = np.array(["",
"",
"인터파크/오피스메인/프린터/라벨/도트/바코드/기타/프린터 기타",
'중성펜/젤러펜 필기구 볼펜류 볼펜심제브라 Refill SK',
'무형광아기세탁망원형[45cm]',
'근조화환',
'기타',
'',
'인터파크/에트로/여성가방/숄더백(천연가죽)',
'인터파크/얀케이스/스마트폰/태블릿케이스/태블릿케이스/파우치/갤럭시용케이스/파우치'])
assert_array_equal(pred, answer)
def test_get_item_by_multiple_column(self):
pred = self.dl[['bcateid', 'price']]
answer = np.array([[24, -1],
[17, -1],
[24, 84750],
[35, -1],
[40, -1],
[54, 87210],
[35, -1],
[34, -1],
[14, 966000],
[3, 16620]],
dtype='int32')
assert_array_equal(pred, answer)
def test_get_item_by_column_and_index(self):
pred = self.dl['bcateid', 0]
answer = np.array([24])
assert_array_equal(pred, answer)
def test_get_item_by_column_and_slice(self):
pred = self.dl['bcateid', 0:3]
answer = np.array([24, 17, 24], dtype='int32')
assert_array_equal(pred, answer)
def test_get_item_by_multiple_column_and_slice(self):
pred = self.dl[['bcateid', 'price'], 0:3]
answer = np.array([[24, -1],
[17, -1],
[24, 84750]],
dtype='int32')
assert_array_equal(pred, answer)
def test_get_item_by_multiple_column_and_list(self):
pred = self.dl[['bcateid', 'price'], [0, 3]]
answer = np.array([[24, -1],
[35, -1]],
dtype='int32')
assert_array_equal(pred, answer)
def test_get_item_by_multiple_column_and_list_2(self):
pred = self.dl[['bcateid', 'price'], [5, 3, 4, 6, 0]]
answer = np.array([
[54, 87210],
[35, -1],
[40, -1],
[35, -1],
[24, -1]],
dtype='int32')
| assert_array_equal(pred, answer) | numpy.testing.assert_array_equal |
import argparse
import torch
import numpy as np
from torch import nn
from torch import optim
import torch.nn.functional as F
from torchvision import datasets, transforms, models
import json
import sys
from PIL import Image
parser = argparse.ArgumentParser(description='Training Model')
parser.add_argument('image_path', action = 'store',help = 'Path for Image data')
parser.add_argument('checkpoint', action = 'store',help = 'Checkpoint to load data into Model')
parser.add_argument('--top_k', action='store',dest = 'topk_val', default = 3,help= 'Enter Number of top probabilities to be returned.')
parser.add_argument('--category_names', action = 'store',
dest = 'category_names', default = 'cat_to_name.json',help = 'Category Mapping')
parser.add_argument('--gpu', action = "store_true", default = False,
help = 'Turn GPU mode on or off.')
results = parser.parse_args()
image_path = results.image_path
checkpoint = results.checkpoint
topk_val = results.topk_val
category_names = results.category_names
def load_checkpoint(filepath):
checkpoint = torch.load(filepath,map_location=lambda storage, loc: storage)
arch = checkpoint['structure'].lower()
print(arch)
if arch == 'alexnet':
model_chk = models.alexnet(pretrained=True)
elif arch == 'vgg19':
model_chk = models.vgg19(pretrained=True)
elif arch == 'densenet121':
model_chk = models.densenet121(pretrained=True)
else:
print('Model not recongized.')
sys.exit()
model_chk.classifier = nn.Sequential(nn.Linear(checkpoint['input_size'], checkpoint['hidden_layer1']),
nn.ReLU(),
nn.Linear(checkpoint['hidden_layer1'] ,512),
nn.ReLU(),
nn.Linear(512,256),
nn.ReLU(),
nn.Dropout(0.2),
nn.Linear(256, checkpoint['output_size']),
nn.LogSoftmax(dim=1))
model_chk.class_to_idx = checkpoint['class_to_idx']
model_chk.load_state_dict(checkpoint['state_dict'])
return model_chk
def process_image(image):
''' Scales, crops, and normalizes a PIL image for a PyTorch model,
returns an Numpy array
'''
img_loader = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor()])
pil_image = Image.open(image)
pil_image = img_loader(pil_image).float()
np_image = np.array(pil_image)
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
np_image = ( | np.transpose(np_image, (1, 2, 0)) | numpy.transpose |
import io
import time
import csv
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.transforms as transforms
from timm.loss import SoftTargetCrossEntropy, LabelSmoothingCrossEntropy
import hdvw.ops.meters as meters
@torch.no_grad()
def test(model, n_ff, dataset,
transform=None, smoothing=0.0,
cutoffs=(0.0, 0.9), bins= | np.linspace(0.0, 1.0, 11) | numpy.linspace |
import sys
import os
import numpy as np
import unittest
sys.path.append(os.path.abspath('..'))
from ols import ols
from numpy.linalg import matrix_rank
import numpy as np
class TestRandomStuff(unittest.TestCase):
def test_singular(self):
N = 1000
K = 10
y = np.random.random((N, 1))
X = | np.ones((N, K)) | numpy.ones |
from boids import Boids
from nose.tools import assert_almost_equal
import os
import yaml
import numpy as np
def test_bad_boids_regression():
regression_data=yaml.load(open(os.path.join(os.path.dirname(__file__),'fixtures','fixture.yml')))
boid_data=np.array(regression_data["before"])
Boids().update_boids(boid_data)
after_data = np.array(regression_data["after"])
for after,before in zip(after_data,boid_data):
for after_value,before_value in zip(after,before):
| np.testing.assert_almost_equal(after_value,before_value) | numpy.testing.assert_almost_equal |
#!/usr/bin/python
from __future__ import division
import numpy as np
import math
from scipy.special import *
from numpy.matlib import repmat
from scipy.signal import lfilter
from scikits.audiolab import Sndfile, Format
import argparse
import sys
np.seterr('ignore')
def MMSESTSA(signal, fs, IS=0.25, W=1024, NoiseMargin=3, saved_params=None):
SP = 0.4
wnd = np.hamming(W)
y = segment(signal, W, SP, wnd)
Y = np.fft.fft(y, axis=0)
YPhase = np.angle(Y[0:int(np.fix(len(Y)/2))+1,:])
Y = np.abs(Y[0:int(np.fix(len(Y)/2))+1,:])
numberOfFrames = Y.shape[1]
NoiseLength = 9
NoiseCounter = 0
alpha = 0.99
NIS = int(np.fix(((IS * fs - W) / (SP * W) + 1)))
N = np.mean(Y[:,0:NIS].T).T
LambdaD = np.mean((Y[:,0:NIS].T) ** 2).T
if saved_params != None:
NIS = 0
N = saved_params['N']
LambdaD = saved_params['LambdaD']
NoiseCounter = saved_params['NoiseCounter']
G = np.ones(N.shape)
Gamma = G
Gamma1p5 = math.gamma(1.5)
X = np.zeros(Y.shape)
for i in range(numberOfFrames):
Y_i = Y[:,i]
if i < NIS:
SpeechFlag = 0
NoiseCounter = 100
else:
SpeechFlag, NoiseCounter = vad(Y_i, N, NoiseCounter, NoiseMargin)
if SpeechFlag == 0:
N = (NoiseLength * N + Y_i) / (NoiseLength + 1)
LambdaD = (NoiseLength * LambdaD + (Y_i ** 2)) / (1 + NoiseLength)
gammaNew = (Y_i ** 2) / LambdaD
xi = alpha * (G ** 2) * Gamma + (1 - alpha) * np.maximum(gammaNew - 1, 0)
Gamma = gammaNew
nu = Gamma * xi / (1 + xi)
# log MMSE algo
#G = (xi/(1 + xi)) * np.exp(0.5 * expn(1, nu))
# MMSE STSA algo
G = (Gamma1p5 * | np.sqrt(nu) | numpy.sqrt |
"""Run Demonstration Image Classification Experiments.
"""
import sys,os
sys.path.append('..')
import numpy as np
from models.BrokenModel import BrokenModel as BrokenModel
import glob
import tensorflow as tf
import pandas as pd
from timeit import default_timer as timer
from .calloc import loadChannel,quantInit
from .simmods import *
from errConceal.caltec import *
from errConceal.altec import *
from errConceal.tc_algos import *
import cv2 as cv2
from PIL import Image
# ---------------------------------------------------------------------------- #
def fnRunImgClassDemo(modelDict,splitLayerDict,ecDict,batch_size,path_base,transDict,outputDir):
print('TensorFlow version')
print(tf.__version__)
model_path = modelDict['fullModel']
customObjects = modelDict['customObjects']
task = modelDict['task']
normalize = modelDict['normalize']
reshapeDims = modelDict['reshapeDims']
splitLayer = splitLayerDict['split']
mobile_model_path = splitLayerDict['MobileModel']
cloud_model_path = splitLayerDict['CloudModel']
rowsPerPacket = transDict['rowsperpacket']
quantization = transDict['quantization']
numberOfBits_1 = quantization[1]['numberOfBits']
numberOfBits_2 = quantization[2]['numberOfBits']
channel = transDict['channel']
res_data_dir = outputDir['resDataDir'] # directory for loss maps.
sim_data_dir = outputDir['simDataDir'] # directory for simulation results.
# ------------------------------------------------------------------------ #
# tensorflow.keras deep model loading.
loaded_model = tf.keras.models.load_model(os.path.join(model_path))
loaded_model_config = loaded_model.get_config()
loaded_model_name = loaded_model_config['name']
# Check if mobile and cloud sub-models are already available:
if os.path.isfile(mobile_model_path) and os.path.isfile(cloud_model_path):
print(f'Sub-models of {loaded_model_name} split at {splitLayer} are available.')
mobile_model = tf.keras.models.load_model(os.path.join(mobile_model_path))
cloud_model = tf.keras.models.load_model(os.path.join(cloud_model_path))
else:
# if not, split the deep model.
# Object for splitting a tf.keras model into a mobile sub-model and a cloud
# sub-model at the chosen split layer 'splitLayer'.
testModel = BrokenModel(loaded_model, splitLayer, customObjects)
testModel.splitModel()
mobile_model = testModel.deviceModel
cloud_model = testModel.remoteModel
# Save the mobile and cloud sub-model
mobile_model.save(mobile_model_path)
cloud_model.save(cloud_model_path)
# ---------------------------------------------------------------------------- #
# Create results directory
if 'GilbertChannel' in channel:
lossProbability = channel['GilbertChannel']['lossProbability']
burstLength = channel['GilbertChannel']['burstLength']
results_dir = os.path.join(sim_data_dir,path_base,loaded_model_name,'demo',splitLayer+'_lp_'+str(lossProbability)+'_Bl_'+str(burstLength))
channel_flag = 'GC'
elif 'RandomLossChannel' in channel:
lossProbability = channel['RandomLossChannel']['lossProbability']
results_dir = os.path.join(sim_data_dir,path_base,loaded_model_name,'demo',splitLayer+'_lp_'+str(lossProbability))
channel_flag = 'RL'
elif 'ExternalChannel' in channel:
print('External packet traces imported')
results_dir = os.path.join(sim_data_dir,path_base,loaded_model_name,'demo',splitLayer+'_ext_trace')
channel_flag = 'EX'
num_channels = transDict['channel']['ExternalChannel']['num_channels']
ext_dir = os.path.join(res_data_dir,path_base,loaded_model_name,splitLayer)
else:
# No lossy channel. This means we are doing a quantization experiment.
channel_flag = 'NC'
results_dir = os.path.join(sim_data_dir,path_base,loaded_model_name,'demo',splitLayer+'_NoChannel')
MC_runs = [0,1] # with no lossy channel, there's no need to do monte carlo runs because each monte carlo run would give the same results.
if channel_flag in ['GC','RL','EX']:
# Only load altec weights if we will be doing error concealment.
tc_weights_path = ecDict['ALTeC']['weightspath']
altec_w_path = os.path.join(tc_weights_path,loaded_model_name,splitLayer,splitLayer+'_rpp_'+str(rowsPerPacket)+'_'+str(numberOfBits_1)+'Bits_tensor_weights.npy')
altec_pkt_w = np.load(altec_w_path)
print(f'Loaded ALTeC weights for splitLayer {splitLayer} and {rowsPerPacket} rows per packet. Shape {np.shape(altec_pkt_w)}')
halrtc_iters = ecDict['HaLRTC']['numiters']
silrtc_iters = ecDict['SiLRTC']['numiters']
inpaint_radius = ecDict['InpaintNS']['radius']
os.makedirs(results_dir,exist_ok=True)
res_filename = '_'+str(numberOfBits_1)+'Bits_'+str(numberOfBits_2)+'Bits_'
# ------------------------------------------------------------------------ #
# Objects for the channel, quantization.
if channel_flag != 'EX':
channel = loadChannel(channel)
quant_tensor1 = quantInit(quantization,tensor_id = 1)
quant_tensor2 = quantInit(quantization,tensor_id = 2)
# ------------------------------------------------------------------------ #
# Load the dataset
dataset_x_files,dataset_y_labels,file_names = fn_Data_PreProcessing_ImgClass(path_base,reshapeDims,normalize)
# ------------------------------------------------------------------------ #
# Process the dataset.
batched_y_labels = [dataset_y_labels[i:i + batch_size] for i in range(0, len(dataset_y_labels), batch_size)]
batched_x_files = [dataset_x_files[i: i + batch_size] for i in range(0,len(dataset_x_files),batch_size)]
if channel_flag == 'EX':
loss_matrix_mc = []
print('Loading external packet traces')
for i_mc in range(MC_runs[0],MC_runs[1]):
# Load external packet traces as loss matrices.
lossMap_list = []
for i_c in range(num_channels):
df = pd.read_excel(os.path.join(ext_dir,'Rpp_'+str(rowsPerPacket)+'_MC_'+str(i_mc)+'.xlsx'),sheet_name=[str(i_c)],engine='openpyxl')
lossMap_channel = (df[str(i_c)].to_numpy())[:,1:].astype(np.bool)
lossMap_list.append(lossMap_channel)
loss_matrix_all = np.dstack(lossMap_list)
loss_matrix_ex = [loss_matrix_all[k_batch:k_batch+batch_size,:,:] for k_batch in range(0,np.shape(loss_matrix_all)[0],batch_size)]
loss_matrix_mc.append(loss_matrix_ex)
# lists to store results.
true_labels = []
top1_pred_full_model = []
top1_pred_split_model = []
top5_pred_full_model = []
top5_pred_split_model = []
top1_pred_caltec = []
top5_pred_caltec = []
top1_pred_altec = []
top5_pred_altec = []
top1_pred_halrtc = []
top5_pred_halrtc = []
top1_pred_silrtc = []
top5_pred_silrtc = []
top1_pred_inpaint = []
top5_pred_inpaint = []
top1_conf_full = []
top1_conf_split = []
top1_conf_caltec = []
top1_conf_altec = []
top1_conf_halrtc = []
top1_conf_silrtc = []
top1_conf_inpaint = []
for i_b in range(len(batched_y_labels)):
# Run through Monte Carlo experiments through each batch.
print(f"Batch {i_b}")
batch_labels = np.asarray(batched_y_labels[i_b],dtype=np.int64)
true_labels.extend(batch_labels)
batch_imgs = batched_x_files[i_b]
batch_imgs_stacked = | np.vstack([i[np.newaxis,...] for i in batch_imgs]) | numpy.vstack |
# implementation of DQN with experience replay and target networks to play Atari breakout
import gym
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from gym.core import ObservationWrapper
from gym.spaces import Box
import cv2
import gym
from framebuffer import FrameBuffer
from replay_buffer import ReplayBuffer
import tensorflow as tf
from keras.layers import Conv2D, Dense, Flatten
import keras
from tqdm import trange
from pandas import DataFrame
# Processing game image
class PreprocessAtari(ObservationWrapper):
def __init__(self, env):
"""A gym wrapper that crops, scales image into the desired shapes and optionally grayscales it."""
ObservationWrapper.__init__(self, env)
self.img_size = (64, 64)
self.observation_space = Box(0.0, 1.0, (self.img_size[0], self.img_size[1], 1))
def _observation(self, img):
"""what happens to each observation"""
# crop image (top and bottom, top from 34, bottom remove last 16)
img = img[34:-16, :, :]
# resize image
img = cv2.resize(img, self.img_size)
# grayscale
img = img.mean(-1, keepdims=True)
# convert pixels to range (0,1)
img = img.astype('float32') / 255.
return img
class DQNAgent:
def __init__(self, name, state_shape, n_actions, epsilon=0, reuse=False):
"""A simple DQN agent"""
with tf.variable_scope(name, reuse=reuse):
self.network = keras.models.Sequential()
self.network.add(Conv2D(16, (3, 3), strides=2, activation='relu', input_shape=state_shape))
self.network.add(Conv2D(32, (3, 3), strides=2, activation='relu'))
self.network.add(Conv2D(64, (3, 3), strides=2, activation='relu'))
self.network.add(Flatten())
self.network.add(Dense(256, activation='relu'))
self.network.add(Dense(n_actions, activation='linear'))
# prepare a graph for agent step
self.state_t = tf.placeholder('float32', [None, ] + list(state_shape))
self.qvalues_t = self.get_symbolic_qvalues(self.state_t)
self.weights = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=name)
self.epsilon = epsilon
def get_symbolic_qvalues(self, state_t):
"""takes agent's observation, returns qvalues. Both are tf Tensors"""
qvalues = self.network(state_t)
assert tf.is_numeric_tensor(qvalues) and qvalues.shape.ndims == 2, \
"please return 2d tf tensor of qvalues [you got %s]" % repr(qvalues)
assert int(qvalues.shape[1]) == n_actions
return qvalues
def get_qvalues(self, state_t):
"""Same as symbolic step except it operates on numpy arrays"""
sess = tf.get_default_session()
return sess.run(self.qvalues_t, {self.state_t: state_t})
def sample_actions(self, qvalues):
"""pick actions given qvalues. Uses epsilon-greedy exploration strategy. """
epsilon = self.epsilon
batch_size, n_actions = qvalues.shape
random_actions = np.random.choice(n_actions, size=batch_size)
best_actions = qvalues.argmax(axis=-1)
should_explore = np.random.choice([0, 1], batch_size, p=[1 - epsilon, epsilon])
return np.where(should_explore, random_actions, best_actions)
def make_env():
env = gym.make("BreakoutDeterministic-v4")
env = PreprocessAtari(env)
env = FrameBuffer(env, n_frames=4, dim_order='tensorflow')
return env
def evaluate(env, agent, n_games=1, greedy=False, t_max=10000):
""" Plays n_games full games. If greedy, picks actions as argmax(qvalues). Returns mean reward. """
rewards = []
for _ in range(n_games):
s = env.reset()
reward = 0
for _ in range(t_max):
qvalues = agent.get_qvalues([s])
action = qvalues.argmax(axis=-1)[0] if greedy else agent.sample_actions(qvalues)[0]
s, r, done, _ = env.step(action)
reward += r
if done: break
rewards.append(reward)
return | np.mean(rewards) | numpy.mean |
"""
Contains wrapper around TEOBResum code which satisfies the interace requirements of Bilby.
Assumes that EOB resum has is installed in the current python environment, and libconfig must also be available.
This file doesn't do everything EOBResum can, for instance it's possible to return all of the modes along with the strain (using 'output_hpc' keyword), but we just return the full strain.
EOBResum takes several keyword arguments which are passed along:
'use_mode_lm': Array of which modes to use. The model uses a 1D representation, k, of (l,m) modes with m>0 in all cases such that increasing k corresponds to increasing l. The conversion formula is k = l*(l-1)/2 + m -2.
E.g. 'use_mode_lm': [0, 1, 2]
will use the (2,1), (2,2) and (3,1) modes.
'ode_abstol': Absolute error tolerance for ode integrator
'ode_reltol': Relative error tolerance for ode integrator
"""
import numpy as np
import EOBRun_module
from scipy.fft import fft
from pycbc.types import TimeSeries
from pycbc.waveform.utils import taper_timeseries
def eccentric_binary_black_hole_eob_resum_nonspinning(
frequency_array,
mass_1,
mass_2,
eccentricity,
chi_1,
chi_2,
luminosity_distance,
theta_jn,
phase,
**kwargs
):
domain = None
if "FrequencyDomain" in kwargs and not "TimeDomain" in kwargs:
domain = 0
return native_frequency_domain(
frequency_array,
mass_1,
mass_2,
eccentricity,
0.,
0.,
luminosity_distance,
theta_jn,
phase,
**kwargs
)
elif "TimeDomain" in kwargs:
domain = 1
return fourier_transform_time_domain(
frequency_array,
mass_1,
mass_2,
eccentricity,
0.,
0.,
luminosity_distance,
theta_jn,
phase,
**kwargs
)
else:
raise RuntimeError("Either TimeDomain or FrequencyDomain should be true")
def eccentric_binary_black_hole_eob_resum_aligned_spins(
frequency_array,
mass_1,
mass_2,
eccentricity,
chi_1,
chi_2,
luminosity_distance,
theta_jn,
phase,
**kwargs
):
domain = None
if "FrequencyDomain" in kwargs and not "TimeDomain" in kwargs:
domain = 0
return native_frequency_domain(
frequency_array,
mass_1,
mass_2,
eccentricity,
chi_1,
chi_2,
luminosity_distance,
theta_jn,
phase,
**kwargs
)
elif "TimeDomain" in kwargs:
domain = 1
return fourier_transform_time_domain(
frequency_array,
mass_1,
mass_2,
eccentricity,
chi_1,
chi_2,
luminosity_distance,
theta_jn,
phase,
**kwargs
)
else:
raise RuntimeError("Either TimeDomain or FrequencyDomain should be true")
def native_frequency_domain(
frequency_array,
mass_1,
mass_2,
eccentricity,
chi_1,
chi_2,
luminosity_distance,
theta_jn,
phase,
**kwargs
):
if "maximum_frequency" not in kwargs:
maximum_frequency = frequency_array[-1]
else:
maximum_frequency = kwargs["maximum_frequency"]
if "minimum_frequency" not in kwargs:
minimum_frequency = frequency_array[0]
else:
minimum_frequency = kwargs["minimum_frequency"]
frequency_bounds = (frequency_array >= minimum_frequency) * (
frequency_array <= maximum_frequency
)
df = frequency_array[1] - frequency_array[0]
pars = {
"M": mass_1 + mass_2,
"q": mass_1 / mass_2,
"ecc": eccentricity,
"Lambda1": 0.0,
"Lambda2": 0.0,
"chi1": chi_1,
"chi2": chi_2,
"domain": 1, # 1 for TD, 1 for FD
"arg_out": 0, # Output hlm/hflm. Default = 0
# "use_mode_lm": kwargs['use_mode_lm'], # List of modes to use/output through EOBRunPy
# "srate_interp": kwargs['sampling_rate'], # srate at which to interpolate. Default = 4096.
"srate_interp": int(max(frequency_array) * 2),
"use_geometric_units": 0, # Output quantities in geometric units. Default = 1
"initial_frequency": kwargs[
"minimum_frequency"
], # in Hz if use_geometric_units = 0, else in geometric units
"interp_uniform_grid": 1, # Interpolate mode by mode on a uniform grid. Default = 0 (no interpolation)
"distance": luminosity_distance, # Mpc,
"inclination": theta_jn,
"output_hpc": 0,
"df": df,
**kwargs
}
f, hp_real, hp_imag, hc_real, hc_imag = EOBRun_module.EOBRunPy(pars)
h_plus = np.zeros_like(frequency_array, dtype=np.complex)
h_cross = np.zeros_like(frequency_array, dtype=np.complex)
h_plus *= frequency_bounds
h_cross *= frequency_bounds
nonzero_mask = (frequency_array >= minimum_frequency) & (
frequency_array <= maximum_frequency
)
h_plus.real[nonzero_mask] = hp_real
h_plus.imag[nonzero_mask] = hp_imag
h_cross.real[nonzero_mask] = hc_real
h_cross.imag[nonzero_mask] = hc_imag
return dict(plus=h_plus, cross=h_cross)
def fourier_transform_time_domain(
frequency_array,
mass_1,
mass_2,
eccentricity,
chi_1,
chi_2,
luminosity_distance,
theta_jn,
phase,
**kwargs
):
# raise RuntimeError(f"Value of {frequency_array}")
# srate_interp should be determined from
if "maximum_frequency" not in kwargs:
maximum_frequency = frequency_array[-1]
else:
maximum_frequency = kwargs["maximum_frequency"]
if "minimum_frequency" not in kwargs:
minimum_frequency = frequency_array[0]
else:
minimum_frequency = kwargs["minimum_frequency"]
frequency_bounds = (frequency_array >= minimum_frequency) * (
frequency_array <= maximum_frequency
)
df = frequency_array[1] - frequency_array[0]
srate_interp = int(max(frequency_array) * 2)
dt = 1.0 / (srate_interp)
pars = {
"M": mass_1 + mass_2,
"q": mass_1 / mass_2,
"ecc": eccentricity,
"Lambda1": 0.0,
"Lambda2": 0.0,
"chi1": chi_1,
"chi2": chi_2,
"domain": 0, # 0 for TD, 1 for FD
"arg_out": 0, # Output hlm/hflm. Default = 0
# "use_mode_lm": kwargs['use_mode_lm'], # List of modes to use/output through EOBRunPy
"srate_interp": srate_interp,
"use_geometric_units": 0, # Output quantities in geometric units. Default = 1
"initial_frequency": kwargs.pop(
"minimum_frequency"
), # in Hz if use_geometric_units = 0, else in geometric units
"interp_uniform_grid": 1, # Interpolate mode by mode on a uniform grid. Default = 0 (no interpolation)
"distance": luminosity_distance, # Mpc,
"inclination": theta_jn,
"output_hpc": 0,
"dt_interp": dt,
**kwargs,
}
# Since EOB performs an ODE integration, we can't control the actual length of the result, which means the df of the FT won't match with df of frequency_array. Its possible to interpolate but I noticed spurious behaviour at the lower frequency. So we'll fix df by shortening the time domain waveform. (This might not be a good idea...)
T_max = 1.0 / df
N_max = int(T_max / dt)
t, hp, hc = EOBRun_module.EOBRunPy(pars)
if len(t) > N_max:
t = t[-N_max:]
hp = hp[-N_max:]
hc = hc[-N_max:]
h_plus = taper_timeseries(
TimeSeries(hp, delta_t=dt), tapermethod="TAPER_START"
).to_frequencyseries()
h_cross = taper_timeseries(
TimeSeries(hc, delta_t=dt), tapermethod="TAPER_START"
).to_frequencyseries()
elif len(t) <= N_max:
deficit = N_max - len(t)
new_hp = np.zeros(N_max)
new_hc = np.zeros(N_max)
new_hp[deficit:N_max] = taper_timeseries(TimeSeries(hp, delta_t=dt), tapermethod="TAPER_START")
new_hc[deficit:N_max] = taper_timeseries(TimeSeries(hc, delta_t=dt), tapermethod="TAPER_START")
hp, hc = new_hp, new_hc
h_plus = TimeSeries(new_hp, delta_t=dt).to_frequencyseries()
h_cross = TimeSeries(new_hc, delta_t=dt).to_frequencyseries()
h_plus *= frequency_bounds
h_cross *= frequency_bounds
# nonzero_mask = (frequency_array >= minimum_frequency) \
# & (frequency_array <= maximum_frequency)
# h_plus.real[nonzero_mask] = hp_real
# h_plus.imag[nonzero_mask] = hp_imag
# h_cross.real[nonzero_mask] = hc_real
# h_cross.imag[nonzero_mask] = hc_imag
assert np.all(np.array(h_plus.sample_frequencies) == frequency_array)
return dict(plus= | np.array(h_plus) | numpy.array |
#!/usr/bin/env python
"""
BSD 2-Clause License
Copyright (c) 2021 (<EMAIL>)
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import numpy as np
import os
import sys
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
import torchvision
import pickle
import random
import time
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from matplotlib.backends.backend_pdf import PdfPages
import math
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
from torch.utils.tensorboard import SummaryWriter
import itertools
import seaborn as sns
import pandas as pd
import argparse
from distutils.util import strtobool
import json
import math
sys.path.insert(0, '/hopfield-layers/')
from modules.transformer import HopfieldEncoderLayer, HopfieldDecoderLayer
from modules import Hopfield, HopfieldPooling
sys.path.insert(0, '/basecaller-modules')
from read_config import read_configuration_file
from cnn import SimpleCNN, BasicBlock, SimpleCNN_res
from cnn import outputLen_Conv, outputLen_AvgPool, outputLen_MaxPool
from hopfield_encoder_nosqrt import Embedder, PositionalEncoding, Encoder
from hopfield_decoder import Decoder
from early_stopping import EarlyStopping
from lr_scheduler2 import NoamOpt
from plot_performance import plot_error_accuarcy, plot_error_accuarcy_iterations_train, plot_error_accuarcy_iterations_val, plot_activations, plot_heatmap, bestPerformance2File
from plot_softmax import calculate_k_patterns, plot_softmax_head
plt.switch_backend('agg')
# in torch.Size([256, 1, 250])
#after layer 1 torch.Size([256, 100, 210])
#after layer 2 torch.Size([256, 100, 180])
#after layer 3 torch.Size([256, 100, 85])
class Range(object):
def __init__(self, start, end):
self.start = start
self.end = end
def __eq__(self, other):
return self.start <= other <= self.end
def make_argparser():
parser = argparse.ArgumentParser(description='Nanopore Basecaller')
parser.add_argument('-i', '--input', required = True,
help="File path to the pickle input file.")
parser.add_argument('-o', '--output', required = True,
help="Output folder name")
parser.add_argument('-g', '--gpu_port', default="None",
help="Port on GPU mode")
parser.add_argument('-s', '--set_seed', type=int, default=1234,
help="Set seed")
parser.add_argument('-b', '--batch_size', type=int, default=256,
help="Batch size")
parser.add_argument('-e', '--epochs', type=int, default=500,
help="Number of epochs")
parser.add_argument('-v', '--make_validation', type=int, default=1000,
help="Make every n updates evaluation on the validation set")
parser.add_argument('-max_w', '--max_window_size', type=int, default=1000,
help="Maximum window size")
parser.add_argument('-max_t', '--max_target_size', type=int, default=200,
help="Maximum target size")
# CNN arguments
parser.add_argument("--input_bias_cnn", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=True)
parser.add_argument('-c', '--channel_number', nargs='+', type=int, default=[256, 256, 256],
help="Number of output channels in Encoder-CNN")
parser.add_argument('-l', '--cnn_layers', type=int, default=2,
help="Number of layers in Encoder-CNN")
parser.add_argument('--pooling_type', default="None",
help="Pooling type in Encoder-CNN")
parser.add_argument('--strides', nargs='+', type=int, default=[1, 2, 1],
help="Strides in Encoder-CNN")
parser.add_argument('--kernel', nargs='+', type=int, default=[11, 11, 11],
help="Kernel sizes in Encoder-CNN")
parser.add_argument('--padding', nargs='+', type=int, default=[0, 0, 0],
help="Padding in Encoder-CNN")
parser.add_argument("--dropout_cnn", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=False)
parser.add_argument("--dropout_input", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=False)
parser.add_argument('--drop_prob', type=float, default=Range(0.0, 1.0),
help="Dropout probability Encoder-CNN")
parser.add_argument("--batch_norm", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=False)
parser.add_argument('--src_emb', default="cnn",
help="Embedding type of input. Options: 'cnn', 'residual_blocks', 'hopfield_pooling'")
parser.add_argument('--nhead_embedding', type=int, default=6,
help="number of heads in the multiheadattention models")
# Hopfield arguments
parser.add_argument("--input_bias_hopfield", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=True)
parser.add_argument('-u', '--hidden_units', type=int, default=256,
help="Number of hidden units in the Transformer")
parser.add_argument('--dff', type=int, default=1024,
help="Number of hidden units in the Feed-Forward Layer of the Transformer")
parser.add_argument('--lstm_layers', type=int, default=5,
help="Number of layers in the Transformer")
parser.add_argument('--nhead', type=int, default=6,
help="number of heads in the multiheadattention models")
parser.add_argument('--drop_transf', type=float, default=Range(0.0, 1.0),
help="Dropout probability Transformer")
parser.add_argument('--dropout_pos', type=float, default=Range(0.0, 1.0),
help="Positional Dropout probability")
parser.add_argument('--scaling', default="None",
help="Gradient clipping")
parser.add_argument("--pattern_projection_as_connected", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=False)
parser.add_argument("--normalize_stored_pattern", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=False)
parser.add_argument("--normalize_stored_pattern_affine", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=False)
parser.add_argument("--normalize_state_pattern", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=False)
parser.add_argument("--normalize_state_pattern_affine", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=False)
parser.add_argument("--normalize_pattern_projection", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=False)
parser.add_argument("--normalize_pattern_projection_affine", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=False)
parser.add_argument("--stored_pattern_as_static", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=False)
parser.add_argument("--state_pattern_as_static", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=False)
parser.add_argument("--pattern_projection_as_static", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=False)
parser.add_argument('--weight_decay', type=float, default=0,
help="Weight decay")
parser.add_argument('--learning_rate', type=float, default=0.001,
help="Learning rate")
parser.add_argument("--decrease_lr", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=False)
parser.add_argument("--xavier_init", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=True)
parser.add_argument('--warmup_steps', type=int, default=2000,
help="Weight decay")
parser.add_argument('--gradient_clip', default="None",
help="Gradient clipping")
# early stopping
parser.add_argument("--early_stop", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=False)
parser.add_argument('--patience', type=int, default=25,
help="Patience in early stopping")
parser.add_argument("--editD", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=False)
parser.add_argument("--plot_weights", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=False)
parser.add_argument("--continue_training", type=lambda x:bool(strtobool(x)), nargs='?', const=True, default=False)
parser.add_argument('--model_file', help="File path to model file.")
parser.add_argument('--config_file', default="None", help="Path to config file")
return parser
# Network
# -----------
# * CNN-Encoder
# * LSTM-Encoder
# * LSTM-Decoder
class Transformer(nn.Module):
def __init__(self, cnn_encoder, ntoken, d_model, nhead, nhid, dff, nlayers, dropout=0.5, dropout_pos=0.5,
max_len=250, max_len_trg=250, port=1, pattern_projection_as_connected=False, scaling=None,
normalize_stored_pattern=False, normalize_stored_pattern_affine=False,
normalize_state_pattern=False, normalize_state_pattern_affine=False,
normalize_pattern_projection=False, normalize_pattern_projection_affine=False, input_bias_hopfield=True):
super().__init__()
self.cnn_encoder = cnn_encoder
self.d_model = d_model
hopfield_self_src = Hopfield(input_size=d_model, hidden_size=nhid, num_heads=nhead,
batch_first=False, scaling=scaling, dropout=dropout, pattern_projection_as_connected=pattern_projection_as_connected,
disable_out_projection=False,
normalize_stored_pattern=normalize_stored_pattern,
normalize_stored_pattern_affine= normalize_stored_pattern_affine,
normalize_state_pattern=normalize_state_pattern,
normalize_state_pattern_affine=normalize_state_pattern_affine,
normalize_pattern_projection=normalize_pattern_projection,
normalize_pattern_projection_affine=normalize_pattern_projection_affine,
stored_pattern_as_static=False, state_pattern_as_static=False,
pattern_projection_as_static=False, input_bias=input_bias_hopfield)
hopfield_self_target = Hopfield(input_size=d_model, hidden_size=nhid, num_heads=nhead,
batch_first=False, scaling=scaling, dropout=dropout, pattern_projection_as_connected=pattern_projection_as_connected,
disable_out_projection=False,
normalize_stored_pattern=normalize_stored_pattern,
normalize_stored_pattern_affine= normalize_stored_pattern_affine,
normalize_state_pattern=normalize_state_pattern,
normalize_state_pattern_affine=normalize_state_pattern_affine,
normalize_pattern_projection=normalize_pattern_projection,
normalize_pattern_projection_affine=normalize_pattern_projection_affine,
stored_pattern_as_static=False, state_pattern_as_static=False,
pattern_projection_as_static=False, input_bias=input_bias_hopfield)
self.pos_enc_encoder = PositionalEncoding(d_model, dropout_pos, max_len) #, 115)
self.pos_enc_decoder = PositionalEncoding(d_model, dropout_pos, max_len_trg) #, max_len_trg)
self.embed_target = nn.Embedding(7, d_model, padding_idx=5) # input 7=<SOS>ACTG<EOF>PADDING, 0-5
self.encoder = Encoder(hopfield_self_src, d_model, nhead, nhid, dff, nlayers, dropout, port)
self.decoder = Decoder(hopfield_self_target, hopfield_self_src, d_model, nhead, nhid, dff, nlayers, dropout, port)
self.fc = nn.Linear(hopfield_self_target.output_size, ntoken) # 7
#self.fc = nn.Linear(d_model, ntoken)
self.port = port
def _generate_square_subsequent_mask(self, sz):
mask = torch.triu(torch.ones(sz, sz), 1)
mask = mask.masked_fill(mask==1, float('-inf')).to(self.port)
mask = Variable(mask)
return mask
def forward(self, src, trg, seq_len, trg_len, src_emb="cnn", update=None):
#if src_emb == "cnn" or src_emb == "residual_blocks":
src = src.detach()
seq_len = seq_len.detach()
trg = trg.detach()
trg_len = trg_len.detach()
src, seq_len_cnn = self.cnn_encoder(src, seq_len)
src = src.transpose(1, 2).transpose(0, 1)
trg_mask = ((trg == 5) | (trg == 4))
trg_mask = Variable(trg_mask)
nopeak_mask = self._generate_square_subsequent_mask(trg.size(1))
trg = self.embed_target(trg).transpose(0, 1)
src = self.pos_enc_encoder(src)
trg = self.pos_enc_decoder(trg)
e_outputs, src_mask = self.encoder(src, seq_len_cnn, src_mask=None, src_emb=src_emb) #, update=update)
output = self.decoder(target=trg, encoder_output=e_outputs, encoder_output_mask=src_mask, target_mask=trg_mask, nopeak_mask=nopeak_mask) #, update=update)
output = output.transpose(0, 1)
output = self.fc(output)
output = F.log_softmax(output, dim=2)
if update == 0 or update == 55000 or update == 150000 or update == 250000:
fig = None
fig2 = None
else:
fig = None
fig2 = None
return output, fig, fig2
def get_train_loader_trainVal(tensor_x, sig_len, tensor_y, label_len, label10, batch_size, shuffle=True):
print(tensor_x.size(), tensor_y.size(), sig_len.size(), label_len.size(), label10.size())
my_dataset = torch.utils.data.TensorDataset(tensor_x, tensor_y, sig_len, label_len, label10) # create your datset
train_loader = torch.utils.data.DataLoader(my_dataset, batch_size=batch_size,
num_workers=0, pin_memory=False, shuffle=shuffle) # create your dataloader
return(train_loader)
def get_train_loader(tensor_x, sig_len, tensor_y, label_len, label10, read_idx, batch_size, shuffle=False):
my_dataset = torch.utils.data.TensorDataset(tensor_x, tensor_y, sig_len, label_len, label10, read_idx) # create your datset
train_loader = torch.utils.data.DataLoader(my_dataset, batch_size=batch_size,
num_workers=0, pin_memory=False, shuffle=shuffle) # create your dataloader
return(train_loader)
def convert_to_string(pred, target, target_lengths):
import editdistance
#vocab = {0: "A", 1: "C", 2: "G", 3: "T", 4: "_"}
vocab = {0: "A", 1: "C", 2: "G", 3: "T", 4: "<EOS>", 5: "<PAD>", 6: "<SOS>"}
editd = 0
num_chars = 0
for idx, length in enumerate(target_lengths):
length = int(length.item())
seq = pred[idx]
seq_target = target[idx]
encoded_pred = []
for p in seq:
if p == 4:
break
encoded_pred.append(vocab[int(p.item())])
encoded_pred = ''.join(encoded_pred)
encoded_target = ''.join([vocab[int(x.item())] for x in seq_target[0:length]])
result = editdistance.eval(encoded_pred, encoded_target)
editd += result
num_chars += len(encoded_target)
return editd, num_chars
def trainNet(model, train_ds, optimizer, criterion, clipping_value=None, val_ds=None,
test_ds=None, batch_size=256, n_epochs=500,
make_validation=1000, mode="train", shuffle=True, patience = 25,
file_name="model", earlyStopping=False, writer="",
device=0, editD=True, decrease_lr=True, src_emb="cnn", plot_weights=True, last_checkpoint=None, file_path=None):
#Print all of the hyperparameters of the training iteration:
print("===== HYPERPARAMETERS =====")
print("batch_size=", batch_size)
print("epochs=", n_epochs)
print("gradient clipping=", clipping_value)
print("shuffle=", shuffle)
print("device=", device)
if val_ds is not None:
input_x_val = val_ds[0]
input_y_val = val_ds[1]
input_y10_val = val_ds[2]
signal_len_val = val_ds[3]
label_len_val = val_ds[4]
read_val = val_ds[5]
input_x = train_ds[0]
input_y = train_ds[1]
input_y10 = train_ds[2]
signal_len = train_ds[3]
label_len = train_ds[4]
read_train = train_ds[5]
#Get training data
train_loader = get_train_loader_trainVal(input_x, signal_len,
input_y, label_len,
input_y10, batch_size=batch_size, shuffle=True)
if val_ds != None:
val_loader = get_train_loader_trainVal(input_x_val, signal_len_val,
input_y_val, label_len_val,
input_y10_val, batch_size=batch_size, shuffle=True)
if earlyStopping:
# initialize the early_stopping object
early_stopping = EarlyStopping(patience=patience, verbose=True, delta=0.01, name=file_name, relative=True, decrease_lr_scheduler=decrease_lr)
dict_activations_in, dict_activations_forget, dict_activations_cell, dict_activations_out = {}, {}, {}, {}
dict_activations_in_decoder, dict_activations_forget_decoder, dict_activations_cell_decoder, dict_activations_out_decoder = {}, {}, {}, {}
dict_training_loss, dict_validation_loss, dict_training_acc, dict_validation_acc, dict_training_editd, dict_validation_editd = {}, {}, {}, {}, {}, {}
dict_training_loss2, dict_validation_loss2, dict_training_acc2, dict_validation_acc2, dict_training_editd2, dict_validation_editd2 = {}, {}, {}, {}, {}, {}
dict_weights, dict_gradients = {}, {}
running_loss_train, running_loss_val, running_acc_train, running_acc_val, running_editd_train, running_editd_val= 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
if last_checkpoint is not None:
updates = last_checkpoint #+ 1
else:
updates = 0
updates_newTraining = 0
heatmap_g = None
heatmap_w = None
heatmap_g_b = None
heatmap_w_b = None
counter_updates_teacherForcing = 0
old_ed = 0
#Loop for n_epochs
#scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 0.01, gamma=0.95)
for epoch in range(n_epochs):
if earlyStopping and early_stopping.early_stop: # break epoch loop
print("Early stopping")
break
model.train()
epoch_loss, epoch_acc, epoch_loss_val, epoch_acc_val, epoch_editd_val, epoch_editd = 0, 0, 0, 0, 0, 0
print("=" * 30)
print("epoch {}/{}".format(epoch+1, n_epochs))
print("=" * 30)
total_train_loss = 0
loss_iteration = []
acc_iteration = []
editd_iteration = []
for iteration, data in enumerate(train_loader):
model.train()
#Set the parameter gradients to zero
optimizer.zero_grad()
batch_x = data[0]
batch_y = data[1]
seq_len = data[2]
lab_len = data[3]
batch_y10 = data[4]
#Wrap them in a Variable object
inputs, labels, labels10 = Variable(batch_x, requires_grad=False), Variable(batch_y, requires_grad=False), Variable(batch_y10, requires_grad=False) # batch_size x out_size x seq_length
cnn_before = True
if cnn_before:
inputs = inputs
else:
inputs = inputs.transpose(1,2) #.squeeze(1)#.long()
if str(device) == "cpu":
labels = labels.cpu()
trg = torch.cat((torch.Tensor([6]).to(device).repeat(labels.size(0), 1), labels.float()), 1).type(torch.LongTensor)
labels = labels.type(torch.LongTensor)
else:
trg = torch.cat((torch.Tensor([6]).to(device).repeat(labels.size(0), 1), labels.float()), 1).type(torch.cuda.LongTensor)
labels = labels.type(torch.cuda.LongTensor)
if updates == 0:
print("labels", labels.size())
output, figure_softmax_enc, figure_softmax_dec = model(src=inputs, trg=trg[:, :-1], seq_len=seq_len, trg_len=lab_len, src_emb=src_emb, update=updates) #, src_mask=None, trg_mask=None)
output = output.contiguous()
# Calculate cross entropy loss
# output = (seq*batch, out dim), target = (seq*batch)
# Target nicht one-hot encoden
reshaped_output = output.view(-1, output.size(2))
reshaped_sorted_labels = labels.view(-1)
notpadded_index = reshaped_sorted_labels != 5 # indices of not padded elements
loss = criterion(reshaped_output, reshaped_sorted_labels.long())
# Backward pass
loss.backward()
#clipping_value = 1 #arbitrary number of your choosing
if clipping_value != "None":
nn.utils.clip_grad_norm_(model.parameters(), float(clipping_value))
# Update encoder and decoder
optimizer.step()
loss_iteration.append(loss.detach().cpu().item()) # detach.item
epoch_loss += loss.item()
running_loss_train += loss.item()
acc = (reshaped_output[notpadded_index, :].argmax(1) ==
reshaped_sorted_labels[notpadded_index]
).sum().item() / reshaped_sorted_labels[notpadded_index].size(0)
epoch_acc += acc
running_acc_train += acc
acc_iteration.append(acc) # acc
#if editD:
# if updates % make_validation == 0:
# ed = np.mean(np.array(convert_to_string(output.argmax(2), labels, lab_len)))
# ed2 = ed
# else:
# ed = 0
# ed2 = old_ed
#
# old_ed = ed2
# epoch_editd += ed
# running_editd_train += ed
# editd_iteration.append(ed2) #ed2
if updates % make_validation == 0:
print("=" * 30)
print("batch {} in epoch {}/{}".format(iteration+1, epoch+1, n_epochs))
print("=" * 30)
print("loss= {0:.4f}".format(epoch_loss / float(iteration + 1)))
print("acc= {0:.4f} %".format((epoch_acc / float(iteration + 1)) * 100))
print("update= ", updates, ", half of updates= ", int((len(train_loader) * n_epochs)*0.5))
if decrease_lr:
print("lr= " + str(optimizer._optimizer.param_groups[0]['lr']))
else:
print("lr= " + str(optimizer.param_groups[0]['lr']))
#if editD and (updates % make_validation == 0):
# print("edit distance= {0:.4f}".format((epoch_editd / float(iteration + 1))))
#data_heads_enc = []
#data_heads_dec = []
#
#if figure_softmax_enc is not None:
# data_heads_enc.append(figure_softmax_enc)
# del figure_softmax_enc
# #data_heads_dec.append(figure_softmax_dec)
# #del figure_softmax_dec
#
# plot_enc = calculate_k_patterns(data_heads_enc)
# plot_enc.savefig(file_path + "softmax_training_encoder_update{}.pdf".format(str(updates)))
# del data_heads_enc[:]
# del data_heads_enc
# #plot_dec = calculate_k_patterns(data_heads_dec)
# #plot_dec.savefig(file_path + "softmax_training_decoder_update{}.pdf".format(str(updates)))
# #del data_heads_dec[:]
# #del data_heads_dec
if (val_ds != None) and (updates % make_validation == 0): # or updates == int((len(train_loader) * n_epochs))-1: # or (updates == n_epochs-1)):
val_losses = []
val_acc = []
val_editd = []
# Evaluation on the validation set
model.eval()
data_heads_val_enc = []
data_heads_val_dec = []
samples_softmax = 0
real_samples = 0
total_ed = 0
total_num_chars = 0
with torch.no_grad():
for iteration_val, data_val in enumerate(val_loader):
batch_x_val = data_val[0]
batch_y_val = data_val[1]
seq_len_val = data_val[2]
lab_len_val = data_val[3]
batch_y10_val = data_val[4]
inputs_val, labels_val, labels10_val = Variable(batch_x_val, requires_grad=False), Variable(batch_y_val, requires_grad=False), Variable(batch_y10_val, requires_grad=False)
# batch_size x out_size x seq_length
cnn_before = True
if cnn_before:
inputs_val = inputs_val
else:
inputs_val = inputs_val.transpose(1,2) #.squeeze(1)#.long()
if str(device) == "cpu":
labels_val = labels_val.cpu()
trg_val = torch.cat((torch.Tensor([6]).to(device).repeat(labels_val.size(0), 1), labels_val.float()), 1).type(torch.LongTensor)
labels_val = labels_val.type(torch.LongTensor)
else:
trg_val = torch.cat((torch.Tensor([6]).to(device).repeat(labels_val.size(0), 1), labels_val.float()), 1).type(torch.cuda.LongTensor)
labels_val = labels_val.type(torch.cuda.LongTensor)
if iteration_val == 0 or iteration_val % 10 == 0: # get softmax of heads only for every second sample, otherwise too memory intesive
updates_val = updates - 1
samples_softmax += int(inputs_val.size(0))
if samples_softmax <= 2592: # calculate softmax over 162 samples*32 = 5184 (0-161) afterwards stop
real_samples += int(inputs_val.size(0))
updates_val = updates
else:
updates_val = updates - 1
output_val, figure_softmax_enc, figure_softmax_dec = model(src=inputs_val, trg=trg_val[:, :-1],
seq_len=seq_len_val, trg_len=lab_len_val, src_emb=src_emb, update=updates_val)
output_val = output_val.contiguous()
# Calculate cross entropy loss
# output = (seq*batch, out dim), target = (seq*batch)
# Target nicht one-hot encoden
reshaped_output_val = output_val.view(-1, output_val.size(2))
reshaped_sorted_labels_val = labels_val.view(-1)
notpadded_index_val = reshaped_sorted_labels_val != 5 # indices of not padded elements
loss_val = criterion(reshaped_output_val, reshaped_sorted_labels_val.long())
if torch.any(reshaped_output_val.isnan()).item():
sys.exit()
val_losses.append(loss_val.detach().cpu().item()) # detach.item
epoch_loss_val += loss_val.item()
running_loss_val += loss_val.item()
acc_val = (reshaped_output_val[notpadded_index_val, :].argmax(1) ==
reshaped_sorted_labels_val[notpadded_index_val]
).sum().item() / reshaped_sorted_labels_val[notpadded_index_val].size(0)
epoch_acc_val += acc_val
running_acc_val += acc_val
val_acc.append(acc_val) # acc_val
#if figure_softmax_enc is not None:
# data_heads_val_enc.append(figure_softmax_enc)
# del figure_softmax_enc
# #data_heads_val_dec.append(figure_softmax_dec)
# #del figure_softmax_dec
if editD:
ed_val, num_char_ref = convert_to_string(output_val.argmax(2), labels_val, lab_len_val)
epoch_editd_val += ed_val
running_editd_val += ed_val
val_editd.append(ed_val) # ed val
total_ed += ed_val
total_num_chars += num_char_ref
#if len(data_heads_val_enc) > 0:
# print("nr of batches in softmax plots", len(data_heads_val_enc), "nr of samples", real_samples)
# plot_enc = calculate_k_patterns(data_heads_val_enc)
# plot_enc.savefig(file_path + "softmax_validation_encoder_update{}.pdf".format(str(updates)))
# del data_heads_val_enc[:]
# del data_heads_val_enc
#
# #plot_dec = calculate_k_patterns(data_heads_val_dec)
# #plot_dec.savefig(file_path + "softmax_validation_decoder_update{}.pdf".format(str(updates)))
# #del data_heads_val_dec[:]
# #del data_heads_val_dec
if editD:
cer = float(total_ed) / total_num_chars
if updates == 0 or updates_newTraining == 0:
writer.add_scalar('Loss/train', np.mean(loss_iteration), updates)
writer.add_scalar('Loss/validation', np.mean(val_losses), updates)
writer.add_scalar('Accuracy/train', np.mean(acc_iteration), updates)
writer.add_scalar('Accuracy/validation', np.mean(val_acc), updates)
if editD:
#writer.add_scalar('Edit Distance/train', running_editd_train, updates)
writer.add_scalar('Edit Distance/validation', cer, updates)
#dict_training_editd2[updates] = running_editd_train
dict_validation_editd2[updates] = (cer)
dict_training_loss2[updates] = np.mean(loss_iteration)
dict_training_acc2[updates] = np.mean(acc_iteration)
dict_validation_loss2[updates] = (np.mean(val_losses))
dict_validation_acc2[updates] = (np.mean(val_acc))
else:
writer.add_scalar('Loss/train', np.mean(loss_iteration), updates)
writer.add_scalar('Loss/validation', np.mean(val_losses), updates)
writer.add_scalar('Accuracy/train', np.mean(acc_iteration), updates)
writer.add_scalar('Accuracy/validation', np.mean(val_acc), updates)
if editD:
#writer.add_scalar('Edit Distance/train', running_editd_train, updates) #/ float(make_validation), updates)
writer.add_scalar('Edit Distance/validation', cer, updates)
#dict_training_editd2[updates] = running_editd_train #/ float(make_validation)
dict_validation_editd2[updates] = (cer)
dict_training_loss2[updates] = (np.mean(val_losses))
dict_training_acc2[updates] = (np.mean(acc_iteration))
dict_validation_loss2[updates] = (np.mean(val_losses))
dict_validation_acc2[updates] = ( | np.mean(val_acc) | numpy.mean |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2019 SAP SE or an SAP affiliate company. All rights reserved
# ============================================================================
""" Graph Generator """
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import xai.constants as Const
from wordcloud import WordCloud
import shap
from xai.graphs.basic_graph import Graph
from typing import List
from collections import Counter
import operator
from xai.data.exceptions import NoItemsError
class ReliabilityDiagram(Graph):
def __init__(self, figure_path, data, title):
super(ReliabilityDiagram, self).__init__(file_path=figure_path, data=data, title=title, figure_size=(5, 5),
x_label="Accuracy",
y_label="Confidence")
def draw_core(self):
prob = np.array(self.data[Const.KEY_PROBABILITY])
gt = np.array(self.data[Const.KEY_GROUNDTRUTH])
m = Const.RELIABILITY_BINSIZE
# process input
conf = np.max(prob, axis=1)
pred = np.argmax(prob, axis=1)
accuracy = np.zeros((prob.shape[0], 1))
accuracy[np.where(gt == pred)] = 1
# generate confidence/accuracy
reliability = []
for i in range(m):
lower = 1 / m * i
upper = 1 / m * (1 + i)
condition = (conf >= lower) & (conf < upper)
sample_num = accuracy[condition].shape[0]
ave_acc = np.sum(accuracy[condition]) / sample_num
ave_conf = np.mean(conf[condition])
reliability.append((lower, upper, ave_conf, ave_acc, sample_num))
for item in reliability:
lower, upper, conf, acc, sample_num = item
x = plt.bar(lower, height=acc, width=upper - lower, bottom=0, align='edge', color='b')
ece = conf - acc
if ece > 0:
y = plt.bar(lower, height=conf - acc, width=upper - lower, bottom=acc, align='edge', color='r',
alpha=0.5)
else:
y = plt.bar(lower, height=acc - conf, width=upper - lower, bottom=conf, align='edge', color='r',
alpha=0.5)
plt.legend((x, y), ('accuracy', 'gap'))
class ReliabilityDiagramForMultiClass(Graph):
def __init__(self, data, title):
super(ReliabilityDiagramForMultiClass, self).__init__(data, title, figure_size=(5, 5),
x_label="Accuracy",
y_label="Confidence")
def draw_core(self, current_class_label):
conf = np.array(self.data[Const.KEY_PROBABILITY])
gt = np.array(self.data[Const.KEY_GROUNDTRUTH])
m = Const.RELIABILITY_BINSIZE
# process input
accuracy = np.zeros(conf.shape)
accuracy[np.where(gt == current_class_label)] = 1
# generate confidence/accuracy
reliability = []
for i in range(m):
lower = 1 / m * i
upper = 1 / m * (1 + i)
condition = (conf >= lower) & (conf < upper)
sample_num = accuracy[condition].shape[0]
ave_acc = np.sum(accuracy[condition]) / sample_num
ave_conf = np.mean(conf[condition])
reliability.append((lower, upper, ave_conf, ave_acc, sample_num))
for item in reliability:
lower, upper, conf, acc, sample_num = item
x = plt.bar(lower, height=acc, width=upper - lower, bottom=0, align='edge', color='b')
ece = conf - acc
if ece > 0:
y = plt.bar(lower, height=conf - acc, width=upper - lower, bottom=acc, align='edge', color='r',
alpha=0.5)
else:
y = plt.bar(lower, height=acc - conf, width=upper - lower, bottom=conf, align='edge', color='r',
alpha=0.5)
plt.legend((x, y), ('accuracy', 'gap'))
plt.title('Reliability for Class %s' % current_class_label)
class HeatMap(Graph):
def __init__(self, figure_path, data, title, x_label=None, y_label=None):
if len(data) < 3:
fig_size = (5, 5)
else:
fig_size = (10, 10)
super(HeatMap, self).__init__(file_path=figure_path, data=data, title=title, figure_size=fig_size,
x_label=x_label, y_label=y_label)
def draw_core(self, x_tick: List[str] = None, y_tick: List[str] = None, color_bar=False, grey_scale=False):
data = np.array(self.data)
df_data = pd.DataFrame(data, x_tick, y_tick)
sns.set(font_scale=1.5) # label size
if len(x_tick) > 30:
annot = False
annot_kws = None
elif len(x_tick) > 10:
annot = True
annot_kws = {}
else:
annot = True
annot_kws = {"size": 25}
if grey_scale:
self.label_ax = sns.heatmap(df_data, annot=annot, annot_kws=annot_kws, fmt='g', cbar=color_bar,
cmap='Greys') # font size
else:
self.label_ax = sns.heatmap(df_data, annot=annot, annot_kws=annot_kws, fmt='g', cbar=color_bar) # font size
class ResultProbability(Graph):
def __init__(self, figure_path, data, title):
super(ResultProbability, self).__init__(file_path=figure_path, data=data, title=title,
figure_size=(6, 6),
x_label='Class',
y_label='Probability')
def draw_core(self, limit_size=Const.DEFAULT_LIMIT_SIZE):
prob = np.array(self.data['probability'])
gt = np.array(self.data['gt'])
num_sample = len(prob)
if num_sample > limit_size:
idx = np.random.rand(num_sample) < limit_size / num_sample
prob = prob[idx, 1]
gt = gt[idx]
else:
prob = prob[:, 1]
data_frame = {'predict_prob': prob, 'gt': gt}
df = pd.DataFrame(data_frame)
self.label_ax = sns.violinplot(x="gt", y="predict_prob", data=df)
class ResultProbabilityForMultiClass(Graph):
def __init__(self, figure_path, data, title):
super(ResultProbabilityForMultiClass, self).__init__(file_path=figure_path, data=data, title=title,
figure_size=(12, 6),
x_label='Ground Truth Class',
y_label='Confidence')
def draw_core(self, limit_size=Const.DEFAULT_LIMIT_SIZE, TOP_K_CLASS=10):
conf = np.array(self.data[Const.KEY_PROBABILITY])
gt = | np.array(self.data[Const.KEY_GROUNDTRUTH]) | numpy.array |
import cv2
import glob
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import re
# STEP0. CALIBRATION BOARD BEFORE CAMERA CALIBRATION
images = glob.glob("camera_cal/calibration*.jpg")
# Chess board (9,6)
objpoints = []
imgpoints = []
objp = np.zeros((6*9, 3), np.float32)
objp[:,:2] = np.mgrid[0:9, 0:6].T.reshape(-1,2)
for fname in images:
img = cv2.imread(fname)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Find the chessboard corners
ret, corners = cv2.findChessboardCorners(gray, (9,6), None)
if ret == True:
imgpoints.append(corners)
objpoints.append(objp)
img = cv2.drawChessboardCorners(img, (9,6), corners, ret)
def cal_undistort(img, objpoints, imgpoints):
img_size = (img.shape[1], img.shape[0]) # x, y
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, img_size, None, None)
undist = cv2.undistort(img, mtx, dist, None, mtx)
return undist
def color_thresholding(img, threshold=(0,255), opt=("rgb")):
# read using mpimg as R.G.B
img_in = np.copy(img)
if (opt == "rgb"):
rgb = img_in
r_channel = rgb[:,:,0]
g_channel = rgb[:,:,1]
b_channel = rgb[:,:,2]
r_binary = np.zeros_like(r_channel)
r_channel = cv2.equalizeHist(r_channel)
r_binary[(r_channel >= threshold[0]) & (r_channel <= threshold[1])]=1
return r_binary
elif (opt == "hls"):
hls = cv2.cvtColor(img_in, cv2.COLOR_RGB2HLS)
h_channel = hls[:,:,0]
l_channel = hls[:,:,1]
s_channel = hls[:,:,2]
s_binary = np.zeros_like(s_channel)
s_channel = cv2.equalizeHist(s_channel)
s_binary[(s_channel >= threshold[0]) & (s_channel <= threshold[1])]=1
return s_binary
else:
return img_in
def gradient_thresholding(img, threshold=(0,255), opt=("comb")):
# read using mpimg as R.G.B
img_in = np.copy(img)
gray= cv2.cvtColor(img_in, cv2.COLOR_RGB2GRAY)
gray = cv2.equalizeHist(gray)
img_sobel_x = cv2.Sobel(gray, cv2.CV_64F, 1,0, ksize=3)
img_sobel_y = cv2.Sobel(gray, cv2.CV_64F, 0,1, ksize=3)
abs_sobelx = np.absolute(img_sobel_x)
abs_sobely = np.absolute(img_sobel_y)
scaled_sobelx = np.uint8(
255*abs_sobelx / np.max(abs_sobelx)
)
scaled_sobely = np.uint8(
255*abs_sobely / np.max(abs_sobely)
)
img_sobel_xy = | np.sqrt(img_sobel_x**2 + img_sobel_y**2) | numpy.sqrt |
import itertools
import copy
import numpy as np
import numpy.testing as npt
import pytest
import quara.objects.composite_system as csys
import quara.objects.elemental_system as esys
from quara.objects.matrix_basis import (
get_comp_basis,
get_gell_mann_basis,
get_normalized_pauli_basis,
get_pauli_basis,
convert_vec,
)
from quara.objects.operators import tensor_product
from quara.objects.povm import (
Povm,
convert_var_index_to_povm_index,
convert_povm_index_to_var_index,
convert_var_to_povm,
convert_vecs_to_var,
calc_gradient_from_povm,
get_x_povm,
get_xx_povm,
get_xy_povm,
get_xz_povm,
get_y_povm,
get_yx_povm,
get_yy_povm,
get_yz_povm,
get_z_povm,
get_zx_povm,
get_zy_povm,
get_zz_povm,
)
from quara.objects.state import get_x0_1q
from quara.settings import Settings
from quara.objects.composite_system_typical import generate_composite_system
from quara.objects.qoperation_typical import generate_qoperation_object
from quara.objects.operators import tensor_product
class TestPovm:
def test_validate_dtype_ng(self):
p1 = np.array([1, 0, 0, 0], dtype=np.complex128)
p2 = np.array([0, 0, 0, 1], dtype=np.complex128)
vecs = [p1, p2]
e_sys = esys.ElementalSystem(1, get_comp_basis())
c_sys = csys.CompositeSystem([e_sys])
# entries of vecs are not real numbers
with pytest.raises(ValueError):
Povm(c_sys=c_sys, vecs=vecs)
def test_validate_set_of_hermitian_matrices_ok(self):
# Arrange
p1 = np.array([1, 0, 0, 0], dtype=np.float64)
p2 = np.array([0, 0, 0, 1], dtype=np.float64)
vecs = [p1, p2]
e_sys = esys.ElementalSystem(1, get_comp_basis())
c_sys = csys.CompositeSystem([e_sys])
# Act
povm = Povm(c_sys=c_sys, vecs=vecs)
# Assert
expected = [p1, p2]
assert (povm[0] == expected[0]).all()
assert (povm[1] == expected[1]).all()
assert povm.composite_system is c_sys
def test_validate_set_of_hermitian_matrices_ng(self):
# Arrange
p1 = np.array([1, 0, 0, 0], dtype=np.float64)
p2 = np.array([0, 1, 0, 0], dtype=np.float64)
vecs = [p1, p2]
e_sys = esys.ElementalSystem(1, get_comp_basis())
c_sys = csys.CompositeSystem([e_sys])
# Act & Assert
with pytest.raises(ValueError):
# ValueError: povm must be a set of Hermitian matrices
_ = Povm(c_sys=c_sys, vecs=vecs)
def test_validate_set_of_hermitian_matrices_not_physical_ok(self):
# Arrange
p1 = np.array([1, 0, 0, 0], dtype=np.float64)
p2 = np.array([0, 1, 0, 0], dtype=np.float64)
vecs = [p1, p2]
e_sys = esys.ElementalSystem(1, get_comp_basis())
c_sys = csys.CompositeSystem([e_sys])
# Act & Assert
# Test that no exceptions are raised.
_ = Povm(c_sys=c_sys, vecs=vecs, is_physicality_required=False)
def test_validate_sum_is_identity_sum_ok(self):
# Arrange
p1 = np.array([1, 0, 0, 0], dtype=np.float64)
p2 = np.array([0, 0, 0, 1], dtype=np.float64)
vecs = [p1, p2]
e_sys = esys.ElementalSystem(1, get_comp_basis())
c_sys = csys.CompositeSystem([e_sys])
# Act
povm = Povm(c_sys=c_sys, vecs=vecs)
actual = povm.is_identity_sum()
# Assert
assert actual is True
def test_validate_sum_is_identity_sum_ng(self):
# Arrange
p1 = np.array([1, 0, 0, 0], dtype=np.float64)
p2 = np.array([0, 1, 0, 0], dtype=np.float64)
vecs = [p1, p2]
e_sys = esys.ElementalSystem(1, get_comp_basis())
c_sys = csys.CompositeSystem([e_sys])
# Act & Assert
with pytest.raises(ValueError):
# ValueError: The sum of the elements of POVM must be an identity matrix.
_ = Povm(c_sys=c_sys, vecs=vecs)
def test_validate_sum_is_identity_sum_not_physical_ok(self):
# Arrange
p1 = np.array([1, 0, 0, 1], dtype=np.float64)
p2 = np.array([1, 0, 0, 1], dtype=np.float64)
vecs = [p1, p2]
e_sys = esys.ElementalSystem(1, get_comp_basis())
c_sys = csys.CompositeSystem([e_sys])
# Act & Assert
# Test that no exceptions are raised.
_ = Povm(c_sys=c_sys, vecs=vecs, is_physicality_required=False)
def test_validate_is_positive_semidefinite_ok(self):
# Arrange
ps_1 = np.array([1, 0, 0, 0], dtype=np.float64)
ps_2 = np.array([0, 0, 0, 1], dtype=np.float64)
vecs = [ps_1, ps_2]
e_sys = esys.ElementalSystem(1, get_comp_basis())
c_sys = csys.CompositeSystem([e_sys])
# Act
povm = Povm(c_sys=c_sys, vecs=vecs)
actual = povm.is_positive_semidefinite()
# Assert
assert actual is True
# Act
actual = povm.is_ineq_constraint_satisfied()
# Assert
assert actual is True
def test_validate_is_positive_semidefinite_ng(self):
# Arrange
ps = np.array([1, 0, 0, 2], dtype=np.float64)
not_ps = np.array([[0, 0, 0, -1]], dtype=np.float64)
vecs = [ps, not_ps]
e_sys = esys.ElementalSystem(1, get_comp_basis())
c_sys = csys.CompositeSystem([e_sys])
# Act & Assert
with pytest.raises(ValueError):
_ = Povm(c_sys=c_sys, vecs=vecs)
def test_validate_is_positive_semidefinite_not_physical_ok(self):
# Arrange
ps = np.array([1, 0, 0, 2], dtype=np.float64)
not_ps = np.array([0, 0, 0, -1], dtype=np.float64)
vecs = [ps, not_ps]
e_sys = esys.ElementalSystem(1, get_pauli_basis())
c_sys = csys.CompositeSystem([e_sys])
# Act & Assert
# Test that no exceptions are raised.
povm = Povm(c_sys=c_sys, vecs=vecs, is_physicality_required=False)
actual = povm.is_positive_semidefinite()
# Assert
assert actual is False
# Act
actual = povm.is_ineq_constraint_satisfied()
# Assert
assert actual is False
def test_calc_eigenvalues_all(self):
# Arrange
vec_1 = np.array([1, 0, 0, 0], dtype=np.float64)
vec_2 = np.array([0, 0, 0, 1], dtype=np.float64)
vecs = [vec_1, vec_2]
e_sys = esys.ElementalSystem(1, get_comp_basis())
c_sys = csys.CompositeSystem([e_sys])
# Act
povm = Povm(c_sys=c_sys, vecs=vecs)
actual = povm.calc_eigenvalues()
# Assert
expected = [
np.array([1, 0], dtype=np.float64),
np.array([1, 0], dtype=np.float64),
]
assert len(actual) == len(expected)
npt.assert_almost_equal(actual[0], expected[0], decimal=15)
npt.assert_almost_equal(actual[1], expected[1], decimal=15)
def test_calc_eigenvalues_one(self):
# Arrange
vec_1 = np.array([1, 0, 0, 0], dtype=np.float64)
vec_2 = np.array([0, 0, 0, 1], dtype=np.float64)
vecs = [vec_1, vec_2]
e_sys = esys.ElementalSystem(1, get_comp_basis())
c_sys = csys.CompositeSystem([e_sys])
# Act
povm = Povm(c_sys=c_sys, vecs=vecs)
actual = povm.calc_eigenvalues(0)
# Assert
expected = np.array([1, 0], dtype=np.float64)
npt.assert_almost_equal(actual, expected, decimal=15)
# Act
povm = Povm(c_sys=c_sys, vecs=vecs)
actual = povm.calc_eigenvalues(1)
# Assert
expected = np.array([1, 0], dtype=np.float64)
npt.assert_almost_equal(actual, expected, decimal=15)
# def test_validate_dim_ng(self):
# # Arrange
# test_root_dir = Path(os.path.dirname(__file__)).parent.parent
# data_dir = test_root_dir / "data"
# dim = 2 ** 2 # 2 qubits
# num_state = 16
# num_povm = 9
# num_outcome = 4
# povms = s_io.load_povm_list(
# data_dir / "tester_2qubit_povm.csv",
# dim=dim,
# num_povm=num_povm,
# num_outcome=num_outcome,
# )
# vecs = list(povms[0]) # 2qubit
# e_sys = esys.ElementalSystem(1, get_pauli_basis()) # 1qubit
# c_sys = csys.CompositeSystem([e_sys])
# # Act & Assert
# with pytest.raises(ValueError):
# _ = Povm(c_sys=c_sys, vecs=vecs)
def test_convert_basis(self):
# Arrange
e_sys = esys.ElementalSystem(1, get_comp_basis())
c_sys = csys.CompositeSystem([e_sys])
ps_1 = np.array([1, 0, 0, 0], dtype=np.float64)
ps_2 = | np.array([0, 0, 0, 1], dtype=np.float64) | numpy.array |
# Apache License Version 2.0
# Copyright 2022 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import allel
import gzip
import os
import numpy as np
import pandas as pd
from multiprocessing import Process, Queue
from sstar.utils import read_data, py2round, read_mapped_region_file, cal_match_pct
#@profile
def cal_pvalue(vcf, ref_ind_file, tgt_ind_file, src_ind_file, anc_allele_file, output, thread, score_file, ref_match_pct_file, mapped_region_file, low_memory, mapped_len_esp, len_esp, var_esp, sfs_esp):
"""
Description:
Calculate p-values for S* haplotypes in the target population with source genomes.
Arguments:
vcf str: Name of the VCF file containing genotypes.
src_vcf str: Name of the VCF file containing genotypes from source populations.
ref_ind_file str: Name of the file containing sample information from reference populations.
tgt_ind_file str: Name of the file containing sample information from target populations.
src_ind_file str: Name of the file containing sample information from source populations.
anc_allele_file str: Name of the file containing ancestral allele information.
output str: Name of the output file.
thread int: Number of threads.
score_file str: Name of the file containing S* scores calculated by `s-star score`.
ref_match_pct_file str: Names of the files containing match percents in reference populations calculated by `s-star rmatch`.
mapped_region_file str: Name of the BED file containing mapped regions.
mapped_len_esp float: Increment of the length of the mapped region.
len_esp float: Increment of the length of the haplotype.
var_esp float: Increment of the number of derived alleles on the haplotype.
sfs_esp float: Increment of mean site frequency spectrum.
"""
ref_data, ref_samples, tgt_data, tgt_samples, src_data, src_samples = read_data(vcf, ref_ind_file, tgt_ind_file, src_ind_file, anc_allele_file)
res = []
chr_names = ref_data.keys()
mapped_intervals = read_mapped_region_file(mapped_region_file)
data, windows, samples = _read_score_file(score_file, chr_names, tgt_samples)
sample_size = len(samples)
header = 'chrom\tstart\tend\tsample\tp-value\t'
header += 'src_sample\thap_index\tS*_start\tS*_end\tS*_SNP_num\t'
header += "hap_dSNV_num\thap_len\thap_mapped_len\thap_match_num\thap_tot_num\thap_dSNP_per_site_num\thap_S*_match(%)\thap_num_match_ref"
# Read match percents in reference populations from a file
# Use whole-genome match percents as the null distributions
if low_memory:
try:
ref_match_pct = pd.read_csv(ref_match_pct_file, compression="gzip", sep="\t")
except:
ref_match_pct = pd.read_csv(ref_match_pct_file, sep="\t")
query_ref_match_pct = _query_ref_match_pct_pandas
else:
ref_match_pct = _read_ref_match_pct_file(ref_match_pct_file)
query_ref_match_pct = _query_ref_match_pct_naive
#for s in samples[0:1]:
# i = samples.index(s)
# res = _cal_pvalue_ind(data[s], i, mapped_intervals, tgt_data, src_data, src_samples, ref_match_pct, sample_size, query_ref_match_pct, mapped_len_esp, len_esp, var_esp, sfs_esp)
if thread is None: thread = min(os.cpu_count()-1, sample_size)
res = _cal_tgt_match_pct_manager(data, mapped_intervals, samples, tgt_samples, src_samples, tgt_data, src_data, ref_match_pct, sample_size, query_ref_match_pct, thread, mapped_len_esp, len_esp, var_esp, sfs_esp)
with open(output, 'w') as o:
o.write(header+"\n")
o.write("\n".join(res)+"\n")
#@profile
def _read_score_file(score_file, chr_names, tgt_samples):
"""
Description:
Helper function for reading the file generated by `sstar score`.
Arguments:
score_file str: Name of the file containing S* scores generated by `sstar score`.
chr_names list: List containing names of chromosomes for analysis.
tgt_samples list: List containing names of samples from the target population for analysis.
Returns:
data dict: Dictionary containing S* for analysis.
windows dict: Dictionary containing windows for analysis.
header str: Header from the file generated by `sstar score`.
samples list: List containing names of samples in the target population for analysis.
"""
data = dict()
windows = dict()
for c in chr_names:
windows[c] = []
samples = []
with open(score_file, 'r') as f:
header = f.readline().rstrip()
for line in f.readlines():
line = line.rstrip()
elements = line.split("\t")
chr_name = elements[0]
win_start = elements[1]
win_end = elements[2]
sample = elements[3]
if sample not in tgt_samples: continue
if elements[6] == 'NA': continue
if sample not in data.keys():
data[sample] = []
samples.append(sample)
data[sample].append(line)
star_snps = elements[-1].split(",")
windows[c].append((int(win_start), int(win_end)))
windows[c].append((int(star_snps[0]), int(star_snps[-1])))
return data, windows, samples
#@profile
def _read_ref_match_pct_file(ref_match_pct_file):
"""
Description:
Helper function for reading match percents from the reference population.
Arguments:
ref_match_pct_file str: Name of the file containing match percents from the reference population.
Returns:
ref_match_pct dict: Dictionary containing match percents from the reference population.
"""
f = gzip.open(ref_match_pct_file, 'rt')
try:
f.readline()
except:
f.close()
f = open(ref_match_pct_file, 'r')
f.readline()
ref_match_pct = dict()
for line in f.readlines():
elements = line.rstrip().split("\t")
count = int(elements[0])
mapped_bases_bin = int(elements[1])
hap_len = int(elements[2])
mh_sites = int(elements[3])
tot_sites = int(elements[4])
sfs = float(elements[5])
match = float(elements[6])
if mapped_bases_bin not in ref_match_pct.keys(): ref_match_pct[mapped_bases_bin] = dict()
if hap_len not in ref_match_pct[mapped_bases_bin].keys(): ref_match_pct[mapped_bases_bin][hap_len] = dict()
if mh_sites not in ref_match_pct[mapped_bases_bin][hap_len].keys(): ref_match_pct[mapped_bases_bin][hap_len][mh_sites] = dict()
if sfs not in ref_match_pct[mapped_bases_bin][hap_len][mh_sites].keys():
ref_match_pct[mapped_bases_bin][hap_len][mh_sites][sfs] = dict()
ref_match_pct[mapped_bases_bin][hap_len][mh_sites][sfs]['count'] = []
ref_match_pct[mapped_bases_bin][hap_len][mh_sites][sfs]['match_pct'] = []
ref_match_pct[mapped_bases_bin][hap_len][mh_sites][sfs]['count'].append(count)
ref_match_pct[mapped_bases_bin][hap_len][mh_sites][sfs]['match_pct'].append(match / tot_sites)
f.close()
return ref_match_pct
def _cal_tgt_match_pct_manager(data, mapped_intervals, samples, tgt_samples, src_samples, tgt_data, src_data, ref_match_pct, sample_size, query_ref_match_pct, thread, mapped_len_esp, len_esp, var_esp, sfs_esp):
"""
Description:
Manager function to calculate match percents in target populations using multiprocessing.
Arguments:
data dict: Lines from the output file created by `sstar score`.
mapped_intervals dict: Dictionary of tuples containing mapped regions across the genome.
sample list: Sample information for individuals needed to be estimated match percents.
tgt_samples list: Sample information from target populations.
src_samples list: Sample information from source populations.
tgt_data dict: Genotype data from target populations.
src_data dict: Genotype data from source populations.
ref_match_pct dict: Match percents calculated from reference populations.
sample_size int: Number of individuals analyzed.
query_ref_match_pct func: Function used to query match percentage from reference populations.
thread int: Number of threads.
mapped_len_esp float: Increment of the length of the mapped region.
len_esp float: Increment of the length of the haplotype.
var_esp float: Increment of the number of derived alleles on the haplotype.
sfs_esp float: Increment of mean site frequency spectrum.
Returns:
res list: Match percents for target populations.
"""
try:
from pytest_cov.embed import cleanup_on_sigterm
except ImportError:
pass
else:
cleanup_on_sigterm()
res = []
in_queue, out_queue = Queue(), Queue()
workers = [Process(target=_cal_tgt_match_pct_worker, args=(in_queue, out_queue, mapped_intervals, tgt_data, src_data, src_samples, ref_match_pct, len(tgt_samples), query_ref_match_pct, mapped_len_esp, len_esp, var_esp, sfs_esp)) for ii in range(thread)]
for t in samples:
index = tgt_samples.index(t)
in_queue.put((index, data[t]))
try:
for worker in workers:
worker.start()
for s in range(sample_size):
item = out_queue.get()
if item != '': res.append(item)
for worker in workers:
worker.terminate()
finally:
for worker in workers:
worker.join()
return res
def _cal_tgt_match_pct_worker(in_queue, out_queue, mapped_intervals, tgt_data, src_data, src_samples, ref_match_pct, sample_size, query_ref_match_pct, mapped_len_esp, len_esp, var_esp, sfs_esp):
"""
Description:
Worker function to calculate match percents in target populations.
Arguments:
in_queue multiprocessing.Queue: multiprocessing.Queue instance to receive parameters from the manager.
out_queue multiprocessing.Queue: multiprocessing.Queue instance to send results back to the manager.
mapped_intervals dict: Dictionary of tuples containing mapped regions across the genome.
tgt_data dict: Genotype data from target populations.
src_data dict: Genotype data from source populations.
src_samples list: List containing sample information for source populations.
ref_match_pct dict: Match percents in reference populations as the null distribution.
sample_size int: Number of individuals analyzed.
query_ref_match_pct func: Function used to query match percentages from reference popualtions.
mapped_len_esp float: Increment of the length of the mapped region.
len_esp float: Increment of the length of the haplotype.
var_esp float: Increment of the number of derived alleles on the haplotype.
sfs_esp float: Increment of mean site frequency spectrum.
"""
while True:
index, data = in_queue.get()
res = _cal_pvalue_ind(data, index, mapped_intervals, tgt_data, src_data, src_samples, ref_match_pct, sample_size, query_ref_match_pct, mapped_len_esp, len_esp, var_esp, sfs_esp)
out_queue.put("\n".join(res))
#@profile
def _get_ssnps_range(chr_name, data, ind_index, hap_index, win_start, win_end, s_star_snps):
"""
Description:
Helper function to obtain the range of a haplotype containing S* SNPs.
If the haplotype contains less than two S* SNPs, it will be ignored.
Arguments:
chr_name str: Name of the chromosome.
data dict: Dictionary containing genotype data and position information.
ind_index int: Index of the individual carrying S* SNPs.
hap_index int: Index of the haplotype carrying S* SNPs.
win_start int: Start position of the local window containing S* SNPs.
wind_end int: End position of the local window containing S* SNPs.
s_star_snps list: List containing positions of S* SNPs.
Returns:
hap_pos_min int: Start position of the haplotype.
hap_pos_max int: End position of the haplotype.
"""
gt = data[chr_name]['GT']
pos = data[chr_name]['POS']
sub_snps = np.where((pos>=win_start) & (pos<=win_end))[0]
sub_gt = gt[sub_snps][:,ind_index]
sub_pos = pos[sub_snps]
hap = sub_gt[:,hap_index]
s_star_snps_pos = [int(s) for s in s_star_snps]
index = np.in1d(sub_pos, s_star_snps_pos)
hap_num = np.sum(hap[index])
if hap_num < 2:
hap_pos_max = 'NA'
hap_pos_min = 'NA'
else:
hap_pos_max = int(np.array(s_star_snps_pos)[np.equal(hap[index],1)][-1])
hap_pos_min = int(np.array(s_star_snps_pos)[ | np.equal(hap[index],1) | numpy.equal |
# coding: utf-8
# Created by ay27 at 28/01/2018
import signal
import struct
from binary import ImageNet
import numpy as np
from tqdm import tqdm
import pickle
from multiprocessing import Queue, Process
import sys
def transform(p, q, dataset):
sums = np.zeros((dataset.C * dataset.H * dataset.W), np.float64)
cnt = 0
while True:
read_n, encoded = p.get(True)
if (read_n is None) or (encoded is None):
break
batch = dataset.unpack(read_n, encoded)
sums += np.sum(batch, axis=0, dtype=np.float64)
# mean = mean * (float(cnt) / float(cnt + read_n)) + \
# (np.sum(batch, axis=0, dtype=np.float32) / float(cnt + read_n))
cnt += batch.shape[0]
q.put((cnt, sums))
if __name__ == '__main__':
if len(sys.argv) != 5:
print('Argument Error!\n'
'python2.7 mean.py [bin-format] [output] [#split] [#workers]\n'
'\tbin-format: \ttrain binary file format, Example: train_data_{}.bin\n'
'\toutput: \toutput file, Example: train_mean.bin\n'
'\t#splits: \tnumber of original data split, Example: 1\n'
'\t#workers: \tcpu process to compute, Example: 4\n')
exit(0)
bin_format = sys.argv[1]
output_file = sys.argv[2]
splits = int(sys.argv[3])
workers = int(sys.argv[4])
dataset_sum = None
threads = []
def signal_handler(signal, frame):
print('You pressed Ctrl+C!')
q.close()
p.close()
for t in threads:
t.terminate()
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
C, H, W = 0, 0, 0
cnt = 0.0
for split in range(splits):
print('processing split {}'.format(split))
dataset = ImageNet(bin_format.format(split), 'B')
dataset.read_meta()
if dataset_sum is None:
C = dataset.C
H = dataset.H
W = dataset.W
dataset_sum = np.zeros([C * H * W], np.float64)
p = Queue(1024)
q = Queue(1024)
for ii in range(workers):
t = Process(target=transform, name='T-{}'.format(ii),
args=(p, q, dataset))
threads.append(t)
t.start()
for ii in tqdm(range(dataset.N)):
p.put(dataset.read(10))
for t in threads:
p.put((None, None))
for ii in range(workers):
processed_n, sums = q.get(True)
dataset_sum += sums
# mean = mean * (float(cnt) / float(cnt + processed_n)) + means * (
# float(processed_n) / float(cnt + processed_n))
cnt += processed_n
print('split #{} total processed n: {}'.format(split, cnt))
for t in threads:
t.join()
t.terminate()
threads = []
mean = dataset_sum / float(cnt)
mean = | np.asarray(mean, np.float32) | numpy.asarray |
# write tests for bfs
import pytest
import numpy as np
from mst import Graph
from sklearn.metrics import pairwise_distances
def check_mst(adj_mat: np.ndarray,
mst: np.ndarray,
expected_weight: int,
allowed_error: float = 0.0001):
""" Helper function to check the correctness of the adjacency matrix encoding an MST.
Note that because the MST of a graph is not guaranteed to be unique, we cannot
simply check for equality against a known MST of a graph.
Arguments:
adj_mat: Adjacency matrix of full graph
mst: Adjacency matrix of proposed minimum spanning tree
expected_weight: weight of the minimum spanning tree of the full graph
allowed_error: Allowed difference between proposed MST weight and `expected_weight`
TODO:
Add additional assertions to ensure the correctness of your MST implementation
For example, how many edges should a minimum spanning tree have? Are minimum spanning trees
always connected? What else can you think of?
"""
def approx_equal(a, b):
return abs(a - b) < allowed_error
total = 0
for i in range(mst.shape[0]):
for j in range(i+1):
total += mst[i, j]
assert approx_equal(total, expected_weight), 'Proposed MST has incorrect expected weight'
## additional checks ##
assert np.allclose(mst, mst.T), "Proposed MST is not symmetric"
assert np.sum(adj_mat) >= np.sum(mst), "Proposed MST has more weight than original graph"
if np.min(np.sum(adj_mat, axis=1)) > 0:
assert np.min(np.sum(mst, axis=1)), "Proposed MST is not fully connected but original graph is fully connected"
def test_mst_small():
""" Unit test for the construction of a minimum spanning tree on a small graph """
file_path = './data/small.csv'
g = Graph(file_path)
g.construct_mst()
check_mst(g.adj_mat, g.mst, 8)
def test_mst_single_cell_data():
""" Unit test for the construction of a minimum spanning tree using
single cell data, taken from the Slingshot R package
(https://bioconductor.org/packages/release/bioc/html/slingshot.html)
"""
file_path = './data/slingshot_example.txt'
# load coordinates of single cells in low-dimensional subspace
coords = np.loadtxt(file_path)
# compute pairwise distances for all 140 cells to form an undirected weighted graph
dist_mat = pairwise_distances(coords)
g = Graph(dist_mat)
g.construct_mst()
check_mst(g.adj_mat, g.mst, 57.263561605571695)
def test_mst_student():
""" TODO: Write at least one unit test for MST construction """
# large adj mat
adj_mat = [[0, 40, 14, 52, 38, 79, 53, 66, 72, 55],
[40, 0, 44, 25, 81, 34, 54, 59, 56, 65],
[14, 44, 0, 64, 79, 50, 73, 71, 55, 44],
[52, 25, 64, 0, 43, 75, 49, 20, 65, 48],
[38, 81, 79, 43, 0, 71, 48, 38, 40, 33],
[79, 34, 50, 75, 71, 0, 13, 64, 34, 47],
[53, 54, 73, 49, 48, 13, 0, 81, 9, 73],
[66, 59, 71, 20, 38, 64, 81, 0, 19, 55],
[72, 56, 55, 65, 40, 34, 9, 19, 0, 28],
[55, 65, 44, 48, 33, 47, 73, 55, 28, 0]]
g = Graph(np.array(adj_mat))
g.construct_mst()
check_mst(g.adj_mat, g.mst, 199)
# small adj mat
adj_mat = [[1, 1, 3, 4, 4],
[1, 0, 2, 3, 1],
[3, 2, 2, 2, 1],
[4, 3, 2, 3, 2],
[4, 1, 1, 2, 1]]
g = Graph( | np.array(adj_mat) | numpy.array |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 22 16:06:27 2020
@author: glatt
"""
import random
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import networkx as nx
#COSTANTS
max_E=200 #Max value for interactions matrix
l=8 #Average preys for predator
b=5 #Energy gain for basal
d=2.5 #Energy dissipation for timestep
delta=20 #Energy for new born
P_death=0.002 #Death chance for timestep
E_rep=200 #Energy needed for reproducing
class IBM:
def __init__(self):
self.max_E=200
self.l=8
self.b=5
self.d=2.5
self.delta=20
self.P_death=0.002
self.E_rep=200
#self.default_par=[self.Area,E_rep,P_death,max_E,delta,l,d,b]
#creo dataframe vuoto in cui ogni riga conterrò
#l'energia e la specie dell'i-esimo individuo
#Individuals[individual_energy][species][ID]
self.Individuals= | np.empty((0,3)) | numpy.empty |
import unittest
import numpy.testing as npt
import numpy as np
from macromax.utils.display.hsv import hsv2rgb, rgb2hsv
class TestHsv2Rgb(unittest.TestCase):
def test_scalar(self):
npt.assert_array_equal(hsv2rgb(0, 0, 0), [0, 0, 0])
npt.assert_array_equal(hsv2rgb(0, 1, 0), [0, 0, 0])
npt.assert_array_equal(hsv2rgb(1, 0, 0), [0, 0, 0])
npt.assert_array_equal(hsv2rgb(0, 0, 1), [1, 1, 1])
npt.assert_array_equal(hsv2rgb(0, 0, 0.5), [0.5, 0.5, 0.5])
npt.assert_array_equal(hsv2rgb(0, 0, 1.5), [1.5, 1.5, 1.5])
npt.assert_array_equal(hsv2rgb(0, 1, 1), [1, 0, 0])
npt.assert_array_equal(hsv2rgb(0, 0.5, 1), [1, 0.5, 0.5])
npt.assert_array_equal(hsv2rgb(0, 1, 1), [1, 0, 0])
npt.assert_array_equal(hsv2rgb(1, 1, 1), [1, 0, 0])
npt.assert_array_equal(hsv2rgb(1/6, 1, 1), [1, 1, 0])
npt.assert_array_equal(hsv2rgb(2/6, 1, 1), [0, 1, 0])
npt.assert_array_equal(hsv2rgb(3/6, 1, 1), [0, 1, 1])
npt.assert_array_equal(hsv2rgb(4/6, 1, 1), [0, 0, 1])
npt.assert_array_equal(hsv2rgb(5/6, 1, 1), [1, 0, 1])
def test_vector(self):
npt.assert_array_equal(hsv2rgb(np.ones(4), np.ones(4), np.ones(4)),
np.concatenate((np.ones((4, 1)), np.zeros((4, 1)), np.zeros((4, 1))), axis=-1))
npt.assert_array_equal(hsv2rgb([0], [0], [0]), [[0, 0, 0]])
npt.assert_array_equal(hsv2rgb([0], [0], [1]), [[1, 1, 1]])
npt.assert_array_equal(hsv2rgb([0], [0], [0.5]), [[0.5, 0.5, 0.5]])
npt.assert_array_equal(hsv2rgb([0], [0], [1.5]), [[1.5, 1.5, 1.5]])
npt.assert_array_equal(hsv2rgb([0], [1], [1]), [[1, 0, 0]])
npt.assert_array_equal(hsv2rgb([0], [0.5], [1]), [[1, 0.5, 0.5]])
npt.assert_array_equal(hsv2rgb([0], [1], [1]), [[1, 0, 0]])
npt.assert_array_equal(hsv2rgb([1], [1], [1]), [[1, 0, 0]])
npt.assert_array_equal(hsv2rgb([1/6], [1], [1]), [[1, 1, 0]])
npt.assert_array_equal(hsv2rgb([2/6], [1], [1]), [[0, 1, 0]])
npt.assert_array_equal(hsv2rgb([3/6], [1], [1]), [[0, 1, 1]])
npt.assert_array_equal(hsv2rgb([4/6], [1], [1]), [[0, 0, 1]])
npt.assert_array_equal(hsv2rgb([5/6], [1], [1]), [[1, 0, 1]])
npt.assert_array_equal(hsv2rgb([0], [0], [0]), [[0, 0, 0]])
def test_matrix(self):
npt.assert_array_equal(hsv2rgb(np.ones((5, 4)), np.ones((5, 4)), np.ones((5, 4))),
np.concatenate((np.ones((5, 4, 1)), np.zeros((5, 4, 1)), np.zeros((5, 4, 1))), axis=-1))
npt.assert_array_equal(hsv2rgb([[0]], [[0]], [[0]]), [[[0, 0, 0]]])
npt.assert_array_equal(hsv2rgb([[0]], [[0]], [[1]]), [[[1, 1, 1]]])
npt.assert_array_equal(hsv2rgb([[0]], [[0]], [[0.5]]), [[[0.5, 0.5, 0.5]]])
npt.assert_array_equal(hsv2rgb([[0]], [[0]], [[1.5]]), [[[1.5, 1.5, 1.5]]])
npt.assert_array_equal(hsv2rgb([[0]], [[1]], [[1]]), [[[1, 0, 0]]])
npt.assert_array_equal(hsv2rgb([[0]], [[0.5]], [[1]]), [[[1, 0.5, 0.5]]])
npt.assert_array_equal(hsv2rgb([[0]], [[1]], [[1]]), [[[1, 0, 0]]])
npt.assert_array_equal(hsv2rgb([[1]], [[1]], [[1]]), [[[1, 0, 0]]])
npt.assert_array_equal(hsv2rgb([[1/6]], [[1]], [[1]]), [[[1, 1, 0]]])
npt.assert_array_equal(hsv2rgb([[2/6]], [[1]], [[1]]), [[[0, 1, 0]]])
npt.assert_array_equal(hsv2rgb([[3/6]], [[1]], [[1]]), [[[0, 1, 1]]])
npt.assert_array_equal(hsv2rgb([[4/6]], [[1]], [[1]]), [[[0, 0, 1]]])
npt.assert_array_equal(hsv2rgb([[5/6]], [[1]], [[1]]), [[[1, 0, 1]]])
npt.assert_array_equal(hsv2rgb([[0]], [[0]], [[0]]), [[[0, 0, 0]]])
def test_tensor(self):
npt.assert_array_equal(hsv2rgb(np.ones((6, 5, 4)), np.ones((6, 5, 4)), np.ones((6, 5, 4))),
np.concatenate((np.ones((6, 5, 4, 1)), np.zeros((6, 5, 4, 1)), np.zeros((6, 5, 4, 1))), axis=-1))
npt.assert_array_equal(hsv2rgb(np.ones((6, 5, 4)), 1, | np.ones((6, 5, 4)) | numpy.ones |
import numpy as np
def power_method(A, error_tol):
lim = 10000 #Numero maximo de iteracoes
error = np.inf #atribuicao de infinito para o erro
n = A.shape[1] #pegando a dimensao da matriz A quadrada
y0 = np.zeros(n)
y0[0] = 1 #chute inicial normalizado
for k in range (0, lim):
xk = A.dot(y0) #produto de matrizes (x^k) = A * y^(k-1)
yk = xk/ | np.linalg.norm(xk) | numpy.linalg.norm |
"""Shuttle Reentry maximum crossrange optimal control problem."""
import functools
import numpy as np
import sympy
from sympy import sin, cos, exp, sqrt
from scipy import constants, integrate, interpolate
import sym2num.model
import sym2num.utils
from sym2num import var
from ceacoest import oc
from ceacoest.modelling import symoc
@symoc.collocate(order=3)
class ShuttleReentry:
"""Shuttle Reentry maximum crossrange optimal control model."""
@sym2num.utils.classproperty
@functools.lru_cache()
def variables(cls):
"""Model variables definition."""
consts = ['a0', 'a1', 'mu', 'b0', 'b1', 'b2', 'S', 'Re', 'rho0',
'hr', 'm', 'c0', 'c1', 'c2', 'c3', 'qU']
x = ['h', 'phi', 'theta', 'v', 'gamma', 'psi']
vars = [var.SymbolObject('self', var.SymbolArray('consts', consts)),
sym2num.var.SymbolArray('x', x),
sym2num.var.SymbolArray('u', ['alpha', 'beta']),
sym2num.var.SymbolArray('p', ['tf'])]
return sym2num.var.make_dict(vars)
@sym2num.model.collect_symbols
def f(self, x, u, p, *, s):
"""ODE function."""
rho = s.rho0 * exp(-s.h / s.hr)
qbar = 0.5 * rho * s.v**2
CL = s.a0 + s.a1 * s.alpha
CD = s.b0 + s.b1 * s.alpha + s.b2 * s.alpha ** 2
L = qbar * s.S * CL
D = qbar * s.S * CD
r = s.Re + s.h
g = s.mu / r**2
hd = s.v * sin(s.gamma)
phid = s.v / r * cos(s.gamma) * sin(s.psi) / cos(s.theta)
thetad = s.v / r * cos(s.gamma) * cos(s.psi)
vd = -D / s.m - g * sin(s.gamma)
gammad = L / (s.m * s.v) * cos(s.beta) + cos(s.gamma) * (s.v/r - g/s.v)
psid = (L * sin(s.beta) / (s.m * s.v * cos(s.gamma))
+ s.v / (r*cos(s.theta)) * cos(s.gamma)*sin(s.psi)*sin(s.theta))
return sympy.Array([hd, phid, thetad, vd, gammad, psid]) * s.tf
@sym2num.model.collect_symbols
def g(self, x, u, p, *, s):
"""Path constraints."""
rho = s.rho0 * exp(-s.h / s.hr)
qr = 17700 * sqrt(rho) * (0.0001 * s.v) ** 3.07
qa = s.c0 + s.c1 * s.alpha + s.c2 * s.alpha ** 2 + s.c3 * s.alpha ** 3
return [qa*qr / s.qU]
@sym2num.model.collect_symbols
def h(self, xe, p, *, s):
"""Endpoint constraints."""
return []
@sym2num.model.collect_symbols
def M(self, xe, p, *, s):
"""Mayer (endpoint) cost."""
return s.theta_final
@sym2num.model.collect_symbols
def L(self, x, u, p, *, s):
"""Lagrange (running) cost."""
return 0
def guess(problem):
def u(x):
ret = np.zeros(x.shape[:-1] + (2,))
ret[..., 0] = -2*constants.degree - x[..., 4]
ret[..., 1] = -5*constants.degree
return ret
def xdot(t, x):
return problem.model.f(x, u(x), [1])
tf = 80
x0 = [260e3, 0, 0, 25.6e3, -1*constants.degree, 0]
tspan = [0, tf]
sol = integrate.solve_ivp(xdot, tspan, x0, max_step=1)
x = interpolate.interp1d(sol.t / tf, sol.y)(problem.tc).T
u = interpolate.interp1d(sol.t / tf, u(sol.y.T).T)(problem.tc).T
dec0 = np.zeros(problem.ndec)
problem.set_decision_item('tf', tf, dec0)
problem.set_decision('u', u, dec0)
problem.set_decision('x', x, dec0)
return dec0
if __name__ == '__main__':
symbolic_model = ShuttleReentry()
GeneratedShuttleReentry = sym2num.model.compile_class(symbolic_model)
d2r = constants.degree
r2d = 1 / d2r
given = {
'rho0': 0.002378, 'hr': 23800, 'Re': 20902900, 'S': 2690,
'a0': -0.20704, 'a1': 0.029244 * r2d, 'mu': 0.14076539e17,
'b0': 0.07854, 'b1': -0.61592e-2 * r2d, 'b2': 0.621408 * r2d**2,
'c0': 1.0672181, 'c1': -0.19213774e-1 * r2d,
'c2': 0.21286289e-3 * r2d**2, 'c3': -0.10117249e-5 * r2d**3,
'qU': 70, 'm': 203e3 / 32.174,
}
model = GeneratedShuttleReentry(**given)
t = np.linspace(0, 1, 1000)
problem = oc.Problem(model, t)
ntc = problem.tc.size
dec_L, dec_U = np.repeat([[-np.inf], [np.inf]], problem.ndec, axis=-1)
problem.set_decision_item('tf', 0, dec_L)
problem.set_decision_item('h', 0, dec_L)
problem.set_decision_item('h', 300e3, dec_U)
problem.set_decision_item('v', 100, dec_L)
problem.set_decision_item('alpha', -70*d2r, dec_L)
problem.set_decision_item('alpha', 70*d2r, dec_U)
problem.set_decision_item('beta', -89*d2r, dec_L)
problem.set_decision_item('beta', 1*d2r, dec_U)
problem.set_decision_item('theta', 0*d2r, dec_L)
problem.set_decision_item('theta', 89*d2r, dec_U)
problem.set_decision_item('gamma', -89*d2r, dec_L)
problem.set_decision_item('gamma', 89*d2r, dec_U)
problem.set_decision_item('h_initial', 260e3, dec_L)
problem.set_decision_item('h_initial', 260e3, dec_U)
problem.set_decision_item('v_initial', 25.6e3, dec_L)
problem.set_decision_item('v_initial', 25.6e3, dec_U)
problem.set_decision_item('gamma_initial', -1*d2r, dec_L)
problem.set_decision_item('gamma_initial', -1*d2r, dec_U)
problem.set_decision_item('phi_initial', 0, dec_L)
problem.set_decision_item('phi_initial', 0, dec_U)
problem.set_decision_item('theta_initial', 0, dec_L)
problem.set_decision_item('theta_initial', 0, dec_U)
problem.set_decision_item('psi_initial', 90*d2r, dec_L)
problem.set_decision_item('psi_initial', 90*d2r, dec_U)
problem.set_decision_item('h_final', 80e3, dec_L)
problem.set_decision_item('h_final', 80e3, dec_U)
problem.set_decision_item('v_final', 2.5e3, dec_L)
problem.set_decision_item('v_final', 2.5e3, dec_U)
problem.set_decision_item('gamma_final', -5*d2r, dec_L)
problem.set_decision_item('gamma_final', -5*d2r, dec_U)
problem.set_decision_item('psi_final', 50*d2r, dec_U)
problem.set_decision_item('theta_final', 10*d2r, dec_L)
dec_scale = np.ones(problem.ndec)
problem.set_decision_item('tf', 1 / 1000, dec_scale)
problem.set_decision_item('h', 1 / 260e3, dec_scale)
problem.set_decision_item('v', 1 / 20e3, dec_scale)
problem.set_decision_item('gamma', 2, dec_scale)
problem.set_decision_item('alpha', 2, dec_scale)
constr_scale = | np.ones(problem.ncons) | numpy.ones |
import os
import pickle
import shutil
import hashlib
import numpy as np
from scipy.special import erf
def md5Hash(tree):
return hashlib.md5(str(tree).encode()).hexdigest()
class Entry:
"""A helper class for the Archive"""
def __init__(self, tree, savePath):
self.savePath = savePath
self._tree = md5Hash(tree)
self.tree = tree
self.cost = np.inf
# self.converged = False
self.bestParams = None
# self.bestErrors = None
# Use getters/setters to read/write objects from pickle files
@property
def tree(self):
path = os.path.join(self.savePath, self._tree, 'tree.pkl')
with open(path, 'rb') as saveFile:
t = pickle.load(saveFile)
return t
@tree.setter
def tree(self, tree):
self._tree = md5Hash(tree)
os.mkdir(os.path.join(self.savePath, self._tree))
path = os.path.join(self.savePath, self._tree, 'tree.pkl')
with open(path, 'wb') as saveFile:
pickle.dump(tree, saveFile)
@property
def optimizer(self):
path = os.path.join(self.savePath, self._tree, 'opt.pkl')
with open(path, 'rb') as saveFile:
o = pickle.load(saveFile)
return o
@optimizer.setter
def optimizer(self, opt):
path = os.path.join(self.savePath, self._tree, 'opt.pkl')
with open(path, 'wb') as saveFile:
pickle.dump(opt, saveFile)
def __getstate__(self):
self_dict = self.__dict__.copy()
del self_dict['_tree']
# TODO: I think this is saving the Optimizers still
return self_dict
class Archive(dict):
"""
A class for handling the archiving of tree objects and their optimizers to
log results and help avoid sampling duplicate trees during symbolic
regression.
"""
def __init__(self, savePath):
dict.__init__(self,)
self.savePath = savePath
if os.path.isdir(self.savePath):
shutil.rmtree(self.savePath)
os.mkdir(self.savePath)
self.hashLog = {}
def update(self, tree, cost, errors, params, optimizer):
# def update(self, tree, cost, params, optimizer):
# Check if tree in archive, otherwise create a new entry for it
key = md5Hash(tree)
# entry = self.get(key, Entry(tree, self.savePath))
if key in self:
raise RuntimeError("How did you re-run an existing tree? {}".format(key))
entry = self[key]
else:
entry = Entry(tree, self.savePath)
# Update optimizer
entry.optimizer = optimizer
# entry.converged = optimizer.stop()
entry.cost = cost
entry.bestParams = params
entry.bestErrors = errors
# # Update best cost and parameter set of entry
# bestIdx = np.argmin(cost)
# if cost[bestIdx] < entry.cost:
# entry.cost = cost[bestIdx]
# entry.bestParams = params[bestIdx]
# entry.bestErrors = errors[bestIdx]
self[key] = entry
self.hashLog[key] = str(tree)
# @property
# def convergences(self):
# return [entry.converged for entry in self]
@property
def fitnesses(self):
return [entry.cost for entry in self]
def sample(self, N):
"""Returns a sample of N unique trees and their optimizers"""
keys = list(self.keys())
costs = np.array([self[k].cost for k in keys])
costs = 1-erf(np.log(costs))
costs[np.where( | np.isnan(costs) | numpy.isnan |
import os
import tempfile
import numpy as np
import scipy.ndimage.measurements as meas
from functools import reduce
import warnings
import sys
sys.path.append(os.path.abspath(r'../lib'))
import NumCppPy as NumCpp # noqa E402
####################################################################################
def factors(n):
return set(reduce(list.__add__,
([i, n//i] for i in range(1, int(n**0.5) + 1) if n % i == 0)))
####################################################################################
def test_seed():
np.random.seed(1)
####################################################################################
def test_abs():
randValue = np.random.randint(-100, -1, [1, ]).astype(np.double).item()
assert NumCpp.absScaler(randValue) == np.abs(randValue)
components = np.random.randint(-100, -1, [2, ]).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.absScaler(value), 9) == np.round(np.abs(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.absArray(cArray), np.abs(data))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols]) + \
1j * np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.absArray(cArray), 9), np.round(np.abs(data), 9))
####################################################################################
def test_add():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(-100, 100, [shape.rows, shape.cols])
data2 = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.add(cArray1, cArray2), data1 + data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(cArray, value), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(value, cArray), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.add(cArray1, cArray2), data1 + data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(cArray, value), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(value, cArray), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArray(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
data2 = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.add(cArray1, cArray2), data1 + data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols])
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.add(cArray1, cArray2), data1 + data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(cArray, value), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(value, cArray), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(cArray, value), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(value, cArray), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.add(cArray1, cArray2), data1 + data2)
####################################################################################
def test_alen():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.alen(cArray) == shape.rows
####################################################################################
def test_all():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.all(cArray, NumCpp.Axis.NONE).astype(bool).item() == np.all(data).item()
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.all(cArray, NumCpp.Axis.NONE).astype(bool).item() == np.all(data).item()
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.all(cArray, NumCpp.Axis.ROW).flatten().astype(bool), np.all(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.all(cArray, NumCpp.Axis.ROW).flatten().astype(bool), np.all(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.all(cArray, NumCpp.Axis.COL).flatten().astype(bool), np.all(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.all(cArray, NumCpp.Axis.COL).flatten().astype(bool), np.all(data, axis=1))
####################################################################################
def test_allclose():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
cArray3 = NumCpp.NdArray(shape)
tolerance = 1e-5
data1 = np.random.randn(shape.rows, shape.cols)
data2 = data1 + tolerance / 10
data3 = data1 + 1
cArray1.setArray(data1)
cArray2.setArray(data2)
cArray3.setArray(data3)
assert NumCpp.allclose(cArray1, cArray2, tolerance) and not NumCpp.allclose(cArray1, cArray3, tolerance)
####################################################################################
def test_amax():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.amax(cArray, NumCpp.Axis.NONE).item() == np.max(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.amax(cArray, NumCpp.Axis.NONE).item() == np.max(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.amax(cArray, NumCpp.Axis.ROW).flatten(), np.max(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.amax(cArray, NumCpp.Axis.ROW).flatten(), np.max(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.amax(cArray, NumCpp.Axis.COL).flatten(), np.max(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.amax(cArray, NumCpp.Axis.COL).flatten(), np.max(data, axis=1))
####################################################################################
def test_amin():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.amin(cArray, NumCpp.Axis.NONE).item() == np.min(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.amin(cArray, NumCpp.Axis.NONE).item() == np.min(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.amin(cArray, NumCpp.Axis.ROW).flatten(), np.min(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.amin(cArray, NumCpp.Axis.ROW).flatten(), np.min(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.amin(cArray, NumCpp.Axis.COL).flatten(), np.min(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.amin(cArray, NumCpp.Axis.COL).flatten(), np.min(data, axis=1))
####################################################################################
def test_angle():
components = np.random.randint(-100, -1, [2, ]).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.angleScaler(value), 9) == np.round(np.angle(value), 9) # noqa
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols]) + \
1j * np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.angleArray(cArray), 9), np.round(np.angle(data), 9))
####################################################################################
def test_any():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.any(cArray, NumCpp.Axis.NONE).astype(bool).item() == np.any(data).item()
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.any(cArray, NumCpp.Axis.NONE).astype(bool).item() == np.any(data).item()
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.any(cArray, NumCpp.Axis.ROW).flatten().astype(bool), np.any(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.any(cArray, NumCpp.Axis.ROW).flatten().astype(bool), np.any(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.any(cArray, NumCpp.Axis.COL).flatten().astype(bool), np.any(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.any(cArray, NumCpp.Axis.COL).flatten().astype(bool), np.any(data, axis=1))
####################################################################################
def test_append():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(0, 100, [shape.rows, shape.cols])
data2 = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.append(cArray1, cArray2, NumCpp.Axis.NONE).getNumpyArray().flatten(),
np.append(data1, data2))
shapeInput = np.random.randint(20, 100, [2, ])
numRows = np.random.randint(1, 100, [1, ]).item()
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[0].item() + numRows, shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
data1 = np.random.randint(0, 100, [shape1.rows, shape1.cols])
data2 = np.random.randint(0, 100, [shape2.rows, shape2.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.append(cArray1, cArray2, NumCpp.Axis.ROW).getNumpyArray(),
np.append(data1, data2, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
NumCppols = np.random.randint(1, 100, [1, ]).item()
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + NumCppols)
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
data1 = np.random.randint(0, 100, [shape1.rows, shape1.cols])
data2 = np.random.randint(0, 100, [shape2.rows, shape2.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.append(cArray1, cArray2, NumCpp.Axis.COL).getNumpyArray(),
np.append(data1, data2, axis=1))
####################################################################################
def test_arange():
start = np.random.randn(1).item()
stop = np.random.randn(1).item() * 100
step = np.abs(np.random.randn(1).item())
if stop < start:
step *= -1
data = np.arange(start, stop, step)
assert np.array_equal(np.round(NumCpp.arange(start, stop, step).flatten(), 9), np.round(data, 9))
####################################################################################
def test_arccos():
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.arccosScaler(value), 9) == np.round(np.arccos(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.arccosScaler(value), 9) == np.round(np.arccos(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.arccosArray(cArray), 9), np.round(np.arccos(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.arccosArray(cArray), 9), np.round(np.arccos(data), 9))
####################################################################################
def test_arccosh():
value = np.abs(np.random.rand(1).item()) + 1
assert np.round(NumCpp.arccoshScaler(value), 9) == np.round(np.arccosh(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.arccoshScaler(value), 9) == np.round(np.arccosh(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols) + 1
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.arccoshArray(cArray), 9), np.round(np.arccosh(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.arccoshArray(cArray), 9), np.round(np.arccosh(data), 9))
####################################################################################
def test_arcsin():
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.arcsinScaler(value), 9) == np.round(np.arcsin(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.arcsinScaler(value), 9) == np.round(np.arcsin(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.arcsinArray(cArray), 9), np.round(np.arcsin(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
np.array_equal(np.round(NumCpp.arcsinArray(cArray), 9), np.round(np.arcsin(data), 9))
####################################################################################
def test_arcsinh():
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.arcsinhScaler(value), 9) == np.round(np.arcsinh(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.arcsinhScaler(value), 9) == np.round(np.arcsinh(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.arcsinhArray(cArray), 9), np.round(np.arcsinh(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
np.array_equal(np.round(NumCpp.arcsinhArray(cArray), 9), np.round(np.arcsinh(data), 9))
####################################################################################
def test_arctan():
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.arctanScaler(value), 9) == np.round(np.arctan(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.arctanScaler(value), 9) == np.round(np.arctan(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.arctanArray(cArray), 9), np.round(np.arctan(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
np.array_equal(np.round(NumCpp.arctanArray(cArray), 9), np.round(np.arctan(data), 9))
####################################################################################
def test_arctan2():
xy = np.random.rand(2) * 2 - 1
assert np.round(NumCpp.arctan2Scaler(xy[1], xy[0]), 9) == np.round(np.arctan2(xy[1], xy[0]), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArrayX = NumCpp.NdArray(shape)
cArrayY = NumCpp.NdArray(shape)
xy = np.random.rand(*shapeInput, 2) * 2 - 1
xData = xy[:, :, 0].reshape(shapeInput)
yData = xy[:, :, 1].reshape(shapeInput)
cArrayX.setArray(xData)
cArrayY.setArray(yData)
assert np.array_equal(np.round(NumCpp.arctan2Array(cArrayY, cArrayX), 9), np.round(np.arctan2(yData, xData), 9))
####################################################################################
def test_arctanh():
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.arctanhScaler(value), 9) == np.round(np.arctanh(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.arctanhScaler(value), 9) == np.round(np.arctanh(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.arctanhArray(cArray), 9), np.round(np.arctanh(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
np.array_equal(np.round(NumCpp.arctanhArray(cArray), 9), np.round(np.arctanh(data), 9))
####################################################################################
def test_argmax():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.argmax(cArray, NumCpp.Axis.NONE).item(), np.argmax(data))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.argmax(cArray, NumCpp.Axis.NONE).item(), np.argmax(data))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.argmax(cArray, NumCpp.Axis.ROW).flatten(), np.argmax(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.argmax(cArray, NumCpp.Axis.ROW).flatten(), np.argmax(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.argmax(cArray, NumCpp.Axis.COL).flatten(), np.argmax(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.argmax(cArray, NumCpp.Axis.COL).flatten(), np.argmax(data, axis=1))
####################################################################################
def test_argmin():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.argmin(cArray, NumCpp.Axis.NONE).item(), np.argmin(data))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.argmin(cArray, NumCpp.Axis.NONE).item(), np.argmin(data))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.argmin(cArray, NumCpp.Axis.ROW).flatten(), np.argmin(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.argmin(cArray, NumCpp.Axis.ROW).flatten(), np.argmin(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.argmin(cArray, NumCpp.Axis.COL).flatten(), np.argmin(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.argmin(cArray, NumCpp.Axis.COL).flatten(), np.argmin(data, axis=1))
####################################################################################
def test_argsort():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
dataFlat = data.flatten()
assert np.array_equal(dataFlat[NumCpp.argsort(cArray, NumCpp.Axis.NONE).flatten().astype(np.uint32)],
dataFlat[np.argsort(data, axis=None)])
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
dataFlat = data.flatten()
assert np.array_equal(dataFlat[NumCpp.argsort(cArray, NumCpp.Axis.NONE).flatten().astype(np.uint32)],
dataFlat[np.argsort(data, axis=None)])
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
pIdx = np.argsort(data, axis=0)
cIdx = NumCpp.argsort(cArray, NumCpp.Axis.ROW).astype(np.uint16)
allPass = True
for idx, row in enumerate(data.T):
if not np.array_equal(row[cIdx[:, idx]], row[pIdx[:, idx]]):
allPass = False
break
assert allPass
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
pIdx = np.argsort(data, axis=0)
cIdx = NumCpp.argsort(cArray, NumCpp.Axis.ROW).astype(np.uint16)
allPass = True
for idx, row in enumerate(data.T):
if not np.array_equal(row[cIdx[:, idx]], row[pIdx[:, idx]]):
allPass = False
break
assert allPass
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
pIdx = np.argsort(data, axis=1)
cIdx = NumCpp.argsort(cArray, NumCpp.Axis.COL).astype(np.uint16)
allPass = True
for idx, row in enumerate(data):
if not np.array_equal(row[cIdx[idx, :]], row[pIdx[idx, :]]): # noqa
allPass = False
break
assert allPass
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
pIdx = np.argsort(data, axis=1)
cIdx = NumCpp.argsort(cArray, NumCpp.Axis.COL).astype(np.uint16)
allPass = True
for idx, row in enumerate(data):
if not np.array_equal(row[cIdx[idx, :]], row[pIdx[idx, :]]):
allPass = False
break
assert allPass
####################################################################################
def test_argwhere():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
randValue = np.random.randint(0, 100, [1, ]).item()
data2 = data > randValue
cArray.setArray(data2)
assert np.array_equal(NumCpp.argwhere(cArray).flatten(), np.argwhere(data.flatten() > randValue).flatten())
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
randValue = np.random.randint(0, 100, [1, ]).item()
data2 = data > randValue
cArray.setArray(data2)
assert np.array_equal(NumCpp.argwhere(cArray).flatten(), np.argwhere(data.flatten() > randValue).flatten())
####################################################################################
def test_around():
value = np.abs(np.random.rand(1).item()) * np.random.randint(1, 10, [1, ]).item()
numDecimalsRound = np.random.randint(0, 10, [1, ]).astype(np.uint8).item()
assert NumCpp.aroundScaler(value, numDecimalsRound) == np.round(value, numDecimalsRound)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols) * np.random.randint(1, 10, [1, ]).item()
cArray.setArray(data)
numDecimalsRound = np.random.randint(0, 10, [1, ]).astype(np.uint8).item()
assert np.array_equal(NumCpp.aroundArray(cArray, numDecimalsRound), np.round(data, numDecimalsRound))
####################################################################################
def test_array_equal():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
cArray3 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 100, shapeInput)
data2 = np.random.randint(1, 100, shapeInput)
cArray1.setArray(data1)
cArray2.setArray(data1)
cArray3.setArray(data2)
assert NumCpp.array_equal(cArray1, cArray2) and not NumCpp.array_equal(cArray1, cArray3)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
cArray3 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data1)
cArray3.setArray(data2)
assert NumCpp.array_equal(cArray1, cArray2) and not NumCpp.array_equal(cArray1, cArray3)
####################################################################################
def test_array_equiv():
shapeInput1 = np.random.randint(1, 100, [2, ])
shapeInput3 = np.random.randint(1, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput1[0].item(), shapeInput1[1].item())
shape2 = NumCpp.Shape(shapeInput1[1].item(), shapeInput1[0].item())
shape3 = NumCpp.Shape(shapeInput3[0].item(), shapeInput3[1].item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
cArray3 = NumCpp.NdArray(shape3)
data1 = np.random.randint(1, 100, shapeInput1)
data3 = np.random.randint(1, 100, shapeInput3)
cArray1.setArray(data1)
cArray2.setArray(data1.reshape([shapeInput1[1].item(), shapeInput1[0].item()]))
cArray3.setArray(data3)
assert NumCpp.array_equiv(cArray1, cArray2) and not NumCpp.array_equiv(cArray1, cArray3)
shapeInput1 = np.random.randint(1, 100, [2, ])
shapeInput3 = np.random.randint(1, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput1[0].item(), shapeInput1[1].item())
shape2 = NumCpp.Shape(shapeInput1[1].item(), shapeInput1[0].item())
shape3 = NumCpp.Shape(shapeInput3[0].item(), shapeInput3[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape1)
cArray2 = NumCpp.NdArrayComplexDouble(shape2)
cArray3 = NumCpp.NdArrayComplexDouble(shape3)
real1 = np.random.randint(1, 100, [shape1.rows, shape1.cols])
imag1 = np.random.randint(1, 100, [shape1.rows, shape1.cols])
data1 = real1 + 1j * imag1
real3 = np.random.randint(1, 100, [shape3.rows, shape3.cols])
imag3 = np.random.randint(1, 100, [shape3.rows, shape3.cols])
data3 = real3 + 1j * imag3
cArray1.setArray(data1)
cArray2.setArray(data1.reshape([shapeInput1[1].item(), shapeInput1[0].item()]))
cArray3.setArray(data3)
assert NumCpp.array_equiv(cArray1, cArray2) and not NumCpp.array_equiv(cArray1, cArray3)
####################################################################################
def test_asarray():
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayArray1D(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayArray1D(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayArray1DCopy(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayArray1DCopy(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayArray2D(*values), data)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayArray2D(*values), data)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayArray2DCopy(*values), data)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayArray2DCopy(*values), data)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayVector1D(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayVector1D(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayVector1DCopy(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayVector1DCopy(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayVector2D(*values), data)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayVector2D(*values), data)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayVectorArray2D(*values), data)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayVectorArray2D(*values), data)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayVectorArray2DCopy(*values), data)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayVectorArray2DCopy(*values), data)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayDeque1D(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayDeque1D(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayDeque2D(*values), data)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayDeque2D(*values), data)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayList(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayList(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayIterators(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayIterators(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayPointerIterators(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayPointerIterators(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayPointer(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayPointer(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayPointer2D(*values), data)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayPointer2D(*values), data)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayPointerShell(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayPointerShell(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayPointerShell2D(*values), data)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayPointerShell2D(*values), data)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayPointerShellTakeOwnership(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayPointerShellTakeOwnership(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayPointerShell2DTakeOwnership(*values), data)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayPointerShell2DTakeOwnership(*values), data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
cArrayCast = NumCpp.astypeDoubleToUint32(cArray).getNumpyArray()
assert np.array_equal(cArrayCast, data.astype(np.uint32))
assert cArrayCast.dtype == np.uint32
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
cArrayCast = NumCpp.astypeDoubleToComplex(cArray).getNumpyArray()
assert np.array_equal(cArrayCast, data.astype(np.complex128))
assert cArrayCast.dtype == np.complex128
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
cArrayCast = NumCpp.astypeComplexToComplex(cArray).getNumpyArray()
assert np.array_equal(cArrayCast, data.astype(np.complex64))
assert cArrayCast.dtype == np.complex64
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
cArrayCast = NumCpp.astypeComplexToDouble(cArray).getNumpyArray()
warnings.filterwarnings('ignore', category=np.ComplexWarning)
assert np.array_equal(cArrayCast, data.astype(np.double))
warnings.filters.pop() # noqa
assert cArrayCast.dtype == np.double
####################################################################################
def test_average():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.round(NumCpp.average(cArray, NumCpp.Axis.NONE).item(), 9) == np.round(np.average(data), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.round(NumCpp.average(cArray, NumCpp.Axis.NONE).item(), 9) == np.round(np.average(data), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.average(cArray, NumCpp.Axis.ROW).flatten(), 9),
np.round(np.average(data, axis=0), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.average(cArray, NumCpp.Axis.ROW).flatten(), 9),
np.round(np.average(data, axis=0), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.average(cArray, NumCpp.Axis.COL).flatten(), 9),
np.round(np.average(data, axis=1), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.average(cArray, NumCpp.Axis.COL).flatten(), 9),
np.round(np.average(data, axis=1), 9))
####################################################################################
def test_averageWeighted():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
cWeights = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
weights = np.random.randint(1, 5, [shape.rows, shape.cols])
cArray.setArray(data)
cWeights.setArray(weights)
assert np.round(NumCpp.averageWeighted(cArray, cWeights, NumCpp.Axis.NONE).item(), 9) == \
np.round(np.average(data, weights=weights), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
cWeights = NumCpp.NdArray(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
weights = np.random.randint(1, 5, [shape.rows, shape.cols])
cArray.setArray(data)
cWeights.setArray(weights)
assert np.round(NumCpp.averageWeighted(cArray, cWeights, NumCpp.Axis.NONE).item(), 9) == \
np.round(np.average(data, weights=weights), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
cWeights = NumCpp.NdArray(1, shape.cols)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
weights = np.random.randint(1, 5, [1, shape.rows])
cArray.setArray(data)
cWeights.setArray(weights)
assert np.array_equal(np.round(NumCpp.averageWeighted(cArray, cWeights, NumCpp.Axis.ROW).flatten(), 9),
np.round(np.average(data, weights=weights.flatten(), axis=0), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
cWeights = NumCpp.NdArray(1, shape.cols)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
weights = np.random.randint(1, 5, [1, shape.rows])
cArray.setArray(data)
cWeights.setArray(weights)
assert np.array_equal(np.round(NumCpp.averageWeighted(cArray, cWeights, NumCpp.Axis.ROW).flatten(), 9),
np.round(np.average(data, weights=weights.flatten(), axis=0), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
cWeights = NumCpp.NdArray(1, shape.rows)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
weights = np.random.randint(1, 5, [1, shape.cols])
cWeights.setArray(weights)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.averageWeighted(cArray, cWeights, NumCpp.Axis.COL).flatten(), 9),
np.round(np.average(data, weights=weights.flatten(), axis=1), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
cWeights = NumCpp.NdArray(1, shape.rows)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
weights = np.random.randint(1, 5, [1, shape.cols])
cWeights.setArray(weights)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.averageWeighted(cArray, cWeights, NumCpp.Axis.COL).flatten(), 9),
np.round(np.average(data, weights=weights.flatten(), axis=1), 9))
####################################################################################
def test_binaryRepr():
value = np.random.randint(0, np.iinfo(np.uint64).max, [1, ], dtype=np.uint64).item()
assert NumCpp.binaryRepr(np.uint64(value)) == np.binary_repr(value, np.iinfo(np.uint64).bits)
####################################################################################
def test_bincount():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayUInt32(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint16)
cArray.setArray(data)
assert np.array_equal(NumCpp.bincount(cArray, 0).flatten(), np.bincount(data.flatten(), minlength=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayUInt32(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint16)
cArray.setArray(data)
minLength = int(np.max(data) + 10)
assert np.array_equal(NumCpp.bincount(cArray, minLength).flatten(),
np.bincount(data.flatten(), minlength=minLength))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayUInt32(shape)
cWeights = NumCpp.NdArrayUInt32(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint16)
weights = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint16)
cArray.setArray(data)
cWeights.setArray(weights)
assert np.array_equal(NumCpp.bincountWeighted(cArray, cWeights, 0).flatten(),
np.bincount(data.flatten(), minlength=0, weights=weights.flatten()))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayUInt32(shape)
cWeights = NumCpp.NdArrayUInt32(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint16)
weights = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint16)
cArray.setArray(data)
cWeights.setArray(weights)
minLength = int(np.max(data) + 10)
assert np.array_equal(NumCpp.bincountWeighted(cArray, cWeights, minLength).flatten(),
np.bincount(data.flatten(), minlength=minLength, weights=weights.flatten()))
####################################################################################
def test_bitwise_and():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayUInt64(shape)
cArray2 = NumCpp.NdArrayUInt64(shape)
data1 = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint64)
data2 = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint64)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.bitwise_and(cArray1, cArray2), np.bitwise_and(data1, data2))
####################################################################################
def test_bitwise_not():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayUInt64(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint64)
cArray.setArray(data)
assert np.array_equal(NumCpp.bitwise_not(cArray), np.bitwise_not(data))
####################################################################################
def test_bitwise_or():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayUInt64(shape)
cArray2 = NumCpp.NdArrayUInt64(shape)
data1 = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint64)
data2 = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint64)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.bitwise_or(cArray1, cArray2), np.bitwise_or(data1, data2))
####################################################################################
def test_bitwise_xor():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayUInt64(shape)
cArray2 = NumCpp.NdArrayUInt64(shape)
data1 = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint64)
data2 = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint64)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.bitwise_xor(cArray1, cArray2), np.bitwise_xor(data1, data2))
####################################################################################
def test_byteswap():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayUInt64(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint64)
cArray.setArray(data)
assert np.array_equal(NumCpp.byteswap(cArray).shape, shapeInput)
####################################################################################
def test_cbrt():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.cbrtArray(cArray), 9), np.round(np.cbrt(data), 9))
####################################################################################
def test_ceil():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols).astype(np.double) * 1000
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.ceilArray(cArray), 9), np.round(np.ceil(data), 9))
####################################################################################
def test_center_of_mass():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols).astype(np.double) * 1000
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.centerOfMass(cArray, NumCpp.Axis.NONE).flatten(), 9),
np.round(meas.center_of_mass(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols).astype(np.double) * 1000
cArray.setArray(data)
coms = list()
for col in range(data.shape[1]):
coms.append(np.round(meas.center_of_mass(data[:, col])[0], 9))
assert np.array_equal(np.round(NumCpp.centerOfMass(cArray, NumCpp.Axis.ROW).flatten(), 9), np.round(coms, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols).astype(np.double) * 1000
cArray.setArray(data)
coms = list()
for row in range(data.shape[0]):
coms.append(np.round(meas.center_of_mass(data[row, :])[0], 9))
assert np.array_equal(np.round(NumCpp.centerOfMass(cArray, NumCpp.Axis.COL).flatten(), 9), np.round(coms, 9))
####################################################################################
def test_clip():
value = np.random.randint(0, 100, [1, ]).item()
minValue = np.random.randint(0, 10, [1, ]).item()
maxValue = np.random.randint(90, 100, [1, ]).item()
assert NumCpp.clipScaler(value, minValue, maxValue) == np.clip(value, minValue, maxValue)
value = np.random.randint(0, 100, [1, ]).item() + 1j * np.random.randint(0, 100, [1, ]).item()
minValue = np.random.randint(0, 10, [1, ]).item() + 1j * np.random.randint(0, 10, [1, ]).item()
maxValue = np.random.randint(90, 100, [1, ]).item() + 1j * np.random.randint(0, 100, [1, ]).item()
assert NumCpp.clipScaler(value, minValue, maxValue) == np.clip(value, minValue, maxValue) # noqa
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
minValue = np.random.randint(0, 10, [1, ]).item()
maxValue = np.random.randint(90, 100, [1, ]).item()
assert np.array_equal(NumCpp.clipArray(cArray, minValue, maxValue), np.clip(data, minValue, maxValue))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
minValue = np.random.randint(0, 10, [1, ]).item() + 1j * np.random.randint(0, 10, [1, ]).item()
maxValue = np.random.randint(90, 100, [1, ]).item() + 1j * np.random.randint(0, 100, [1, ]).item()
assert np.array_equal(NumCpp.clipArray(cArray, minValue, maxValue), np.clip(data, minValue, maxValue)) # noqa
####################################################################################
def test_column_stack():
shapeInput = np.random.randint(20, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
shape3 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
shape4 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
cArray3 = NumCpp.NdArray(shape3)
cArray4 = NumCpp.NdArray(shape4)
data1 = np.random.randint(1, 100, [shape1.rows, shape1.cols])
data2 = np.random.randint(1, 100, [shape2.rows, shape2.cols])
data3 = np.random.randint(1, 100, [shape3.rows, shape3.cols])
data4 = np.random.randint(1, 100, [shape4.rows, shape4.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
cArray3.setArray(data3)
cArray4.setArray(data4)
assert np.array_equal(NumCpp.column_stack(cArray1, cArray2, cArray3, cArray4),
np.column_stack([data1, data2, data3, data4]))
####################################################################################
def test_complex():
real = np.random.rand(1).astype(np.double).item()
value = complex(real)
assert np.round(NumCpp.complexScaler(real), 9) == np.round(value, 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.complexScaler(components[0], components[1]), 9) == np.round(value, 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
realArray = NumCpp.NdArray(shape)
real = np.random.rand(shape.rows, shape.cols)
realArray.setArray(real)
assert np.array_equal(np.round(NumCpp.complexArray(realArray), 9), np.round(real + 1j * np.zeros_like(real), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
realArray = NumCpp.NdArray(shape)
imagArray = NumCpp.NdArray(shape)
real = np.random.rand(shape.rows, shape.cols)
imag = np.random.rand(shape.rows, shape.cols)
realArray.setArray(real)
imagArray.setArray(imag)
assert np.array_equal(np.round(NumCpp.complexArray(realArray, imagArray), 9), np.round(real + 1j * imag, 9))
####################################################################################
def test_concatenate():
shapeInput = np.random.randint(20, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
shape3 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
shape4 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
cArray3 = NumCpp.NdArray(shape3)
cArray4 = NumCpp.NdArray(shape4)
data1 = np.random.randint(1, 100, [shape1.rows, shape1.cols])
data2 = np.random.randint(1, 100, [shape2.rows, shape2.cols])
data3 = np.random.randint(1, 100, [shape3.rows, shape3.cols])
data4 = np.random.randint(1, 100, [shape4.rows, shape4.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
cArray3.setArray(data3)
cArray4.setArray(data4)
assert np.array_equal(NumCpp.concatenate(cArray1, cArray2, cArray3, cArray4, NumCpp.Axis.NONE).flatten(),
np.concatenate([data1.flatten(), data2.flatten(), data3.flatten(), data4.flatten()]))
shapeInput = np.random.randint(20, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[0].item() + np.random.randint(1, 10, [1, ]).item(), shapeInput[1].item())
shape3 = NumCpp.Shape(shapeInput[0].item() + np.random.randint(1, 10, [1, ]).item(), shapeInput[1].item())
shape4 = NumCpp.Shape(shapeInput[0].item() + np.random.randint(1, 10, [1, ]).item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
cArray3 = NumCpp.NdArray(shape3)
cArray4 = NumCpp.NdArray(shape4)
data1 = np.random.randint(1, 100, [shape1.rows, shape1.cols])
data2 = np.random.randint(1, 100, [shape2.rows, shape2.cols])
data3 = np.random.randint(1, 100, [shape3.rows, shape3.cols])
data4 = np.random.randint(1, 100, [shape4.rows, shape4.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
cArray3.setArray(data3)
cArray4.setArray(data4)
assert np.array_equal(NumCpp.concatenate(cArray1, cArray2, cArray3, cArray4, NumCpp.Axis.ROW),
np.concatenate([data1, data2, data3, data4], axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
shape3 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
shape4 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
cArray3 = NumCpp.NdArray(shape3)
cArray4 = NumCpp.NdArray(shape4)
data1 = np.random.randint(1, 100, [shape1.rows, shape1.cols])
data2 = np.random.randint(1, 100, [shape2.rows, shape2.cols])
data3 = np.random.randint(1, 100, [shape3.rows, shape3.cols])
data4 = np.random.randint(1, 100, [shape4.rows, shape4.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
cArray3.setArray(data3)
cArray4.setArray(data4)
assert np.array_equal(NumCpp.concatenate(cArray1, cArray2, cArray3, cArray4, NumCpp.Axis.COL),
np.concatenate([data1, data2, data3, data4], axis=1))
####################################################################################
def test_conj():
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.conjScaler(value), 9) == np.round(np.conj(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.conjArray(cArray), 9), np.round(np.conj(data), 9))
####################################################################################
def test_contains():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
value = np.random.randint(0, 100, [1, ]).item()
cArray.setArray(data)
assert NumCpp.contains(cArray, value, NumCpp.Axis.NONE).getNumpyArray().item() == (value in data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
value = np.random.randint(0, 100, [1, ]).item() + 1j * np.random.randint(0, 100, [1, ]).item()
cArray.setArray(data)
assert NumCpp.contains(cArray, value, NumCpp.Axis.NONE).getNumpyArray().item() == (value in data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
value = np.random.randint(0, 100, [1, ]).item()
cArray.setArray(data)
truth = list()
for row in data:
truth.append(value in row)
assert np.array_equal(NumCpp.contains(cArray, value, NumCpp.Axis.COL).getNumpyArray().flatten(), np.asarray(truth))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
value = np.random.randint(0, 100, [1, ]).item() + 1j * np.random.randint(0, 100, [1, ]).item()
cArray.setArray(data)
truth = list()
for row in data:
truth.append(value in row)
assert np.array_equal(NumCpp.contains(cArray, value, NumCpp.Axis.COL).getNumpyArray().flatten(), np.asarray(truth))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
value = np.random.randint(0, 100, [1, ]).item()
cArray.setArray(data)
truth = list()
for row in data.T:
truth.append(value in row)
assert np.array_equal(NumCpp.contains(cArray, value, NumCpp.Axis.ROW).getNumpyArray().flatten(), np.asarray(truth))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
value = np.random.randint(0, 100, [1, ]).item() + 1j * np.random.randint(0, 100, [1, ]).item()
cArray.setArray(data)
truth = list()
for row in data.T:
truth.append(value in row)
assert np.array_equal(NumCpp.contains(cArray, value, NumCpp.Axis.ROW).getNumpyArray().flatten(), np.asarray(truth))
####################################################################################
def test_copy():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.copy(cArray), data)
####################################################################################
def test_copysign():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(-100, 100, [shape.rows, shape.cols])
data2 = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.copysign(cArray1, cArray2), np.copysign(data1, data2))
####################################################################################
def test_copyto():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray()
data1 = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
assert np.array_equal(NumCpp.copyto(cArray2, cArray1), data1)
####################################################################################
def test_cos():
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.cosScaler(value), 9) == np.round(np.cos(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.cosScaler(value), 9) == np.round(np.cos(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.cosArray(cArray), 9), np.round(np.cos(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.cosArray(cArray), 9), np.round(np.cos(data), 9))
####################################################################################
def test_cosh():
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.coshScaler(value), 9) == np.round(np.cosh(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.coshScaler(value), 9) == np.round(np.cosh(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.coshArray(cArray), 9), np.round(np.cosh(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.coshArray(cArray), 9), np.round(np.cosh(data), 9))
####################################################################################
def test_count_nonzero():
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 3, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
assert NumCpp.count_nonzero(cArray, NumCpp.Axis.NONE) == np.count_nonzero(data)
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 3, [shape.rows, shape.cols])
imag = np.random.randint(1, 3, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.count_nonzero(cArray, NumCpp.Axis.NONE) == np.count_nonzero(data)
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 3, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
assert np.array_equal(NumCpp.count_nonzero(cArray, NumCpp.Axis.ROW).flatten(), np.count_nonzero(data, axis=0))
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 3, [shape.rows, shape.cols])
imag = np.random.randint(1, 3, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.count_nonzero(cArray, NumCpp.Axis.ROW).flatten(), np.count_nonzero(data, axis=0))
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 3, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
assert np.array_equal(NumCpp.count_nonzero(cArray, NumCpp.Axis.COL).flatten(), np.count_nonzero(data, axis=1))
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 3, [shape.rows, shape.cols])
imag = np.random.randint(1, 3, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.count_nonzero(cArray, NumCpp.Axis.COL).flatten(), np.count_nonzero(data, axis=1))
####################################################################################
def test_cross():
shape = NumCpp.Shape(1, 2)
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
data2 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert NumCpp.cross(cArray1, cArray2, NumCpp.Axis.NONE).item() == np.cross(data1, data2).item()
shape = NumCpp.Shape(1, 2)
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert NumCpp.cross(cArray1, cArray2, NumCpp.Axis.NONE).item() == np.cross(data1, data2).item()
shape = NumCpp.Shape(2, np.random.randint(1, 100, [1, ]).item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
data2 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.cross(cArray1, cArray2, NumCpp.Axis.ROW).getNumpyArray().flatten(),
np.cross(data1, data2, axis=0))
shape = NumCpp.Shape(2, np.random.randint(1, 100, [1, ]).item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.cross(cArray1, cArray2, NumCpp.Axis.ROW).getNumpyArray().flatten(),
np.cross(data1, data2, axis=0))
shape = NumCpp.Shape(np.random.randint(1, 100, [1, ]).item(), 2)
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
data2 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.cross(cArray1, cArray2, NumCpp.Axis.COL).getNumpyArray().flatten(),
np.cross(data1, data2, axis=1))
shape = NumCpp.Shape(np.random.randint(1, 100, [1, ]).item(), 2)
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.cross(cArray1, cArray2, NumCpp.Axis.COL).getNumpyArray().flatten(),
np.cross(data1, data2, axis=1))
shape = NumCpp.Shape(1, 3)
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
data2 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.cross(cArray1, cArray2, NumCpp.Axis.NONE).getNumpyArray().flatten(),
np.cross(data1, data2).flatten())
shape = NumCpp.Shape(1, 3)
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.cross(cArray1, cArray2, NumCpp.Axis.NONE).getNumpyArray().flatten(),
np.cross(data1, data2).flatten())
shape = NumCpp.Shape(3, np.random.randint(1, 100, [1, ]).item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
data2 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.cross(cArray1, cArray2, NumCpp.Axis.ROW).getNumpyArray(),
np.cross(data1, data2, axis=0))
shape = NumCpp.Shape(3, np.random.randint(1, 100, [1, ]).item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.cross(cArray1, cArray2, NumCpp.Axis.ROW).getNumpyArray(),
np.cross(data1, data2, axis=0))
shape = NumCpp.Shape(np.random.randint(1, 100, [1, ]).item(), 3)
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
data2 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.cross(cArray1, cArray2, NumCpp.Axis.COL).getNumpyArray(),
np.cross(data1, data2, axis=1))
shape = NumCpp.Shape(np.random.randint(1, 100, [1, ]).item(), 3)
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.cross(cArray1, cArray2, NumCpp.Axis.COL).getNumpyArray(),
np.cross(data1, data2, axis=1))
####################################################################################
def test_cube():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.cube(cArray), 9), np.round(data * data * data, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.cube(cArray), 9), np.round(data * data * data, 9))
####################################################################################
def test_cumprod():
shapeInput = np.random.randint(1, 5, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 4, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
assert np.array_equal(NumCpp.cumprod(cArray, NumCpp.Axis.NONE).flatten(), data.cumprod())
shapeInput = np.random.randint(1, 5, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 4, [shape.rows, shape.cols])
imag = np.random.randint(1, 4, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.cumprod(cArray, NumCpp.Axis.NONE).flatten(), data.cumprod())
shapeInput = np.random.randint(1, 5, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 4, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.cumprod(cArray, NumCpp.Axis.ROW), data.cumprod(axis=0))
shapeInput = np.random.randint(1, 5, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 4, [shape.rows, shape.cols])
imag = np.random.randint(1, 4, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.cumprod(cArray, NumCpp.Axis.ROW), data.cumprod(axis=0))
shapeInput = np.random.randint(1, 5, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 4, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.cumprod(cArray, NumCpp.Axis.COL), data.cumprod(axis=1))
shapeInput = np.random.randint(1, 5, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 4, [shape.rows, shape.cols])
imag = np.random.randint(1, 4, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.cumprod(cArray, NumCpp.Axis.COL), data.cumprod(axis=1))
####################################################################################
def test_cumsum():
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.cumsum(cArray, NumCpp.Axis.NONE).flatten(), data.cumsum())
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 50, [shape.rows, shape.cols])
imag = np.random.randint(1, 50, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.cumsum(cArray, NumCpp.Axis.NONE).flatten(), data.cumsum())
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.cumsum(cArray, NumCpp.Axis.ROW), data.cumsum(axis=0))
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 50, [shape.rows, shape.cols])
imag = np.random.randint(1, 50, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.cumsum(cArray, NumCpp.Axis.ROW), data.cumsum(axis=0))
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.cumsum(cArray, NumCpp.Axis.COL), data.cumsum(axis=1))
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 50, [shape.rows, shape.cols])
imag = np.random.randint(1, 50, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.cumsum(cArray, NumCpp.Axis.COL), data.cumsum(axis=1))
####################################################################################
def test_deg2rad():
value = np.abs(np.random.rand(1).item()) * 360
assert np.round(NumCpp.deg2radScaler(value), 9) == np.round(np.deg2rad(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols) * 360
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.deg2radArray(cArray), 9), np.round(np.deg2rad(data), 9))
####################################################################################
def test_degrees():
value = np.abs(np.random.rand(1).item()) * 2 * np.pi
assert np.round(NumCpp.degreesScaler(value), 9) == np.round(np.degrees(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols) * 2 * np.pi
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.degreesArray(cArray), 9), np.round(np.degrees(data), 9))
####################################################################################
def test_deleteIndices():
shapeInput = np.asarray([100, 100])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
indices = NumCpp.Slice(0, 100, 4)
indicesPy = slice(0, 99, 4)
cArray.setArray(data)
assert np.array_equal(NumCpp.deleteIndicesSlice(cArray, indices, NumCpp.Axis.NONE).flatten(),
np.delete(data, indicesPy, axis=None))
shapeInput = np.asarray([100, 100])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
indices = NumCpp.Slice(0, 100, 4)
indicesPy = slice(0, 99, 4)
cArray.setArray(data)
assert np.array_equal(NumCpp.deleteIndicesSlice(cArray, indices, NumCpp.Axis.ROW),
np.delete(data, indicesPy, axis=0))
shapeInput = np.asarray([100, 100])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
indices = NumCpp.Slice(0, 100, 4)
indicesPy = slice(0, 99, 4)
cArray.setArray(data)
assert np.array_equal(NumCpp.deleteIndicesSlice(cArray, indices, NumCpp.Axis.COL),
np.delete(data, indicesPy, axis=1))
shapeInput = np.asarray([100, 100])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
index = np.random.randint(0, shape.size(), [1, ]).item()
cArray.setArray(data)
assert np.array_equal(NumCpp.deleteIndicesScaler(cArray, index, NumCpp.Axis.NONE).flatten(),
np.delete(data, index, axis=None))
shapeInput = np.asarray([100, 100])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
index = np.random.randint(0, 100, [1, ]).item()
cArray.setArray(data)
assert np.array_equal(NumCpp.deleteIndicesScaler(cArray, index, NumCpp.Axis.ROW), np.delete(data, index, axis=0))
shapeInput = np.asarray([100, 100])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
index = np.random.randint(0, 100, [1, ]).item()
cArray.setArray(data)
assert np.array_equal(NumCpp.deleteIndicesScaler(cArray, index, NumCpp.Axis.COL), np.delete(data, index, axis=1))
####################################################################################
def test_diag():
shapeInput = np.random.randint(2, 25, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
k = np.random.randint(0, np.min(shapeInput), [1, ]).item()
elements = np.random.randint(1, 100, shapeInput)
cElements = NumCpp.NdArray(shape)
cElements.setArray(elements)
assert np.array_equal(NumCpp.diag(cElements, k).flatten(), np.diag(elements, k))
shapeInput = np.random.randint(2, 25, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
k = np.random.randint(0, np.min(shapeInput), [1, ]).item()
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
elements = real + 1j * imag
cElements = NumCpp.NdArrayComplexDouble(shape)
cElements.setArray(elements)
assert np.array_equal(NumCpp.diag(cElements, k).flatten(), np.diag(elements, k))
####################################################################################
def test_diagflat():
numElements = np.random.randint(2, 25, [1, ]).item()
shape = NumCpp.Shape(1, numElements)
k = np.random.randint(0, 10, [1, ]).item()
elements = np.random.randint(1, 100, [numElements, ])
cElements = NumCpp.NdArray(shape)
cElements.setArray(elements)
assert np.array_equal(NumCpp.diagflat(cElements, k), np.diagflat(elements, k))
numElements = np.random.randint(2, 25, [1, ]).item()
shape = NumCpp.Shape(1, numElements)
k = np.random.randint(0, 10, [1, ]).item()
real = np.random.randint(1, 100, [numElements, ])
imag = np.random.randint(1, 100, [numElements, ])
elements = real + 1j * imag
cElements = NumCpp.NdArrayComplexDouble(shape)
cElements.setArray(elements)
assert np.array_equal(NumCpp.diagflat(cElements, k), np.diagflat(elements, k))
numElements = np.random.randint(1, 25, [1, ]).item()
shape = NumCpp.Shape(1, numElements)
k = np.random.randint(0, 10, [1, ]).item()
elements = np.random.randint(1, 100, [numElements, ])
cElements = NumCpp.NdArray(shape)
cElements.setArray(elements)
assert np.array_equal(NumCpp.diagflat(cElements, k), np.diagflat(elements, k))
numElements = np.random.randint(1, 25, [1, ]).item()
shape = NumCpp.Shape(1, numElements)
k = np.random.randint(0, 10, [1, ]).item()
real = np.random.randint(1, 100, [numElements, ])
imag = np.random.randint(1, 100, [numElements, ])
elements = real + 1j * imag
cElements = NumCpp.NdArrayComplexDouble(shape)
cElements.setArray(elements)
assert np.array_equal(NumCpp.diagflat(cElements, k), np.diagflat(elements, k))
####################################################################################
def test_diagonal():
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols])
cArray.setArray(data)
offset = np.random.randint(0, min(shape.rows, shape.cols), [1, ]).item()
assert np.array_equal(NumCpp.diagonal(cArray, offset, NumCpp.Axis.ROW).flatten(),
np.diagonal(data, offset, axis1=0, axis2=1))
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 50, [shape.rows, shape.cols])
imag = np.random.randint(1, 50, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
offset = np.random.randint(0, min(shape.rows, shape.cols), [1, ]).item()
assert np.array_equal(NumCpp.diagonal(cArray, offset, NumCpp.Axis.ROW).flatten(),
np.diagonal(data, offset, axis1=0, axis2=1))
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols])
cArray.setArray(data)
offset = np.random.randint(0, min(shape.rows, shape.cols), [1, ]).item()
assert np.array_equal(NumCpp.diagonal(cArray, offset, NumCpp.Axis.COL).flatten(),
np.diagonal(data, offset, axis1=1, axis2=0))
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 50, [shape.rows, shape.cols])
imag = np.random.randint(1, 50, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
offset = np.random.randint(0, min(shape.rows, shape.cols), [1, ]).item()
assert np.array_equal(NumCpp.diagonal(cArray, offset, NumCpp.Axis.COL).flatten(),
np.diagonal(data, offset, axis1=1, axis2=0))
####################################################################################
def test_diff():
shapeInput = np.random.randint(10, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.diff(cArray, NumCpp.Axis.NONE).flatten(),
np.diff(data.flatten()))
shapeInput = np.random.randint(10, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 50, [shape.rows, shape.cols])
imag = np.random.randint(1, 50, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.diff(cArray, NumCpp.Axis.NONE).flatten(),
np.diff(data.flatten()))
shapeInput = np.random.randint(10, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.diff(cArray, NumCpp.Axis.ROW), np.diff(data, axis=0))
shapeInput = np.random.randint(10, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 50, [shape.rows, shape.cols])
imag = np.random.randint(1, 50, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.diff(cArray, NumCpp.Axis.ROW), np.diff(data, axis=0))
shapeInput = np.random.randint(10, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols]).astype(np.uint32)
cArray.setArray(data)
assert np.array_equal(NumCpp.diff(cArray, NumCpp.Axis.COL).astype(np.uint32), np.diff(data, axis=1))
shapeInput = np.random.randint(10, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 50, [shape.rows, shape.cols])
imag = np.random.randint(1, 50, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.diff(cArray, NumCpp.Axis.COL), np.diff(data, axis=1))
####################################################################################
def test_divide():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(-100, 100, [shape.rows, shape.cols])
data2 = np.random.randint(-100, 100, [shape.rows, shape.cols])
data2[data2 == 0] = 1
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(np.round(NumCpp.divide(cArray1, cArray2), 9),
np.round(data1 / data2, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = 0
while value == 0:
value = np.random.randint(-100, 100)
assert np.array_equal(np.round(NumCpp.divide(cArray, value), 9),
np.round(data / value, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
data[data == 0] = 1
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(np.round(NumCpp.divide(value, cArray), 9),
np.round(value / data, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
data2[data2 == complex(0)] = complex(1)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(np.round(NumCpp.divide(cArray1, cArray2), 9),
np.round(data1 / data2, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = 0
while value == complex(0):
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(np.round(NumCpp.divide(cArray, value), 9),
np.round(data / value, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
data[data == complex(0)] = complex(1)
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(np.round(NumCpp.divide(value, cArray), 9),
np.round(value / data, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArray(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
data2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2[data2 == 0] = 1
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(np.round(NumCpp.divide(cArray1, cArray2), 9),
np.round(data1 / data2, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols])
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
data2[data2 == complex(0)] = complex(1)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(np.round(NumCpp.divide(cArray1, cArray2), 9),
np.round(data1 / data2, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
while value == complex(0):
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(np.round(NumCpp.divide(cArray, value), 9),
np.round(data / value, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
data[data == 0] = 1
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(np.round(NumCpp.divide(value, cArray), 9),
np.round(value / data, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = 0
while value == 0:
value = np.random.randint(-100, 100)
assert np.array_equal(np.round(NumCpp.divide(cArray, value), 9),
np.round(data / value, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
data[data == complex(0)] = complex(1)
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(np.round(NumCpp.divide(value, cArray), 9),
np.round(value / data, 9))
####################################################################################
def test_dot():
size = np.random.randint(1, 100, [1, ]).item()
shape = NumCpp.Shape(1, size)
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 50, [shape.rows, shape.cols])
data2 = np.random.randint(1, 50, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert NumCpp.dot(cArray1, cArray2).item() == np.dot(data1, data2.T).item()
size = np.random.randint(1, 100, [1, ]).item()
shape = NumCpp.Shape(1, size)
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
data1 = np.random.randint(1, 50, [shape.rows, shape.cols])
real2 = np.random.randint(1, 50, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 50, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert NumCpp.dot(cArray1, cArray2).item() == np.dot(data1, data2.T).item()
size = np.random.randint(1, 100, [1, ]).item()
shape = NumCpp.Shape(1, size)
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 50, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 50, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 50, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 50, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert NumCpp.dot(cArray1, cArray2).item() == np.dot(data1, data2.T).item()
size = np.random.randint(1, 100, [1, ]).item()
shape = NumCpp.Shape(1, size)
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArray(shape)
real1 = np.random.randint(1, 50, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 50, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
data2 = np.random.randint(1, 50, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert NumCpp.dot(cArray1, cArray2).item() == np.dot(data1, data2.T).item()
shapeInput = np.random.randint(1, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[1].item(), np.random.randint(1, 100, [1, ]).item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
data1 = np.random.randint(1, 50, [shape1.rows, shape1.cols])
data2 = np.random.randint(1, 50, [shape2.rows, shape2.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.dot(cArray1, cArray2), np.dot(data1, data2))
shapeInput = np.random.randint(1, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[1].item(), np.random.randint(1, 100, [1, ]).item())
cArray1 = NumCpp.NdArrayComplexDouble(shape1)
cArray2 = NumCpp.NdArrayComplexDouble(shape2)
real1 = np.random.randint(1, 50, [shape1.rows, shape1.cols])
imag1 = np.random.randint(1, 50, [shape1.rows, shape1.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 50, [shape2.rows, shape2.cols])
imag2 = np.random.randint(1, 50, [shape2.rows, shape2.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.dot(cArray1, cArray2), np.dot(data1, data2))
####################################################################################
def test_empty():
shapeInput = np.random.randint(1, 100, [2, ])
cArray = NumCpp.emptyRowCol(shapeInput[0].item(), shapeInput[1].item())
assert cArray.shape[0] == shapeInput[0]
assert cArray.shape[1] == shapeInput[1]
assert cArray.size == shapeInput.prod()
shapeInput = np.random.randint(1, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.emptyShape(shape)
assert cArray.shape[0] == shape.rows
assert cArray.shape[1] == shape.cols
assert cArray.size == shapeInput.prod()
shapeInput = np.random.randint(1, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.empty_like(cArray1)
assert cArray2.shape().rows == shape.rows
assert cArray2.shape().cols == shape.cols
assert cArray2.size() == shapeInput.prod()
####################################################################################
def test_endianess():
shapeInput = np.random.randint(1, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
assert NumCpp.endianess(cArray) == NumCpp.Endian.NATIVE
####################################################################################
def test_equal():
shapeInput = np.random.randint(1, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(0, 10, [shape.rows, shape.cols])
data2 = np.random.randint(0, 10, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.equal(cArray1, cArray2), np.equal(data1, data2))
shapeInput = np.random.randint(1, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.equal(cArray1, cArray2), np.equal(data1, data2))
####################################################################################
def test_exp2():
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.expScaler(value), 9) == np.round(np.exp(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.expScaler(value), 9) == np.round(np.exp(value), 9)
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.exp2Scaler(value), 9) == np.round(np.exp2(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.exp2Array(cArray), 9), np.round(np.exp2(data), 9))
####################################################################################
def test_exp():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.expArray(cArray), 9), np.round(np.exp(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.expArray(cArray), 9), np.round(np.exp(data), 9))
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.expm1Scaler(value), 9) == np.round(np.expm1(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.expm1Scaler(value), 9) == np.round(np.expm1(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.expm1Array(cArray), 9), np.round(np.expm1(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.rand(shape.rows, shape.cols)
imag = np.random.rand(shape.rows, shape.cols)
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.expm1Array(cArray), 9), np.round(np.expm1(data), 9))
####################################################################################
def test_eye():
shapeInput = np.random.randint(1, 100, [1, ]).item()
randK = np.random.randint(0, shapeInput, [1, ]).item()
assert np.array_equal(NumCpp.eye1D(shapeInput, randK), np.eye(shapeInput, k=randK))
shapeInput = np.random.randint(1, 100, [1, ]).item()
randK = np.random.randint(0, shapeInput, [1, ]).item()
assert np.array_equal(NumCpp.eye1DComplex(shapeInput, randK),
np.eye(shapeInput, k=randK) + 1j * np.zeros([shapeInput, shapeInput]))
shapeInput = np.random.randint(10, 100, [2, ])
randK = np.random.randint(0, np.min(shapeInput), [1, ]).item()
assert np.array_equal(NumCpp.eye2D(shapeInput[0].item(), shapeInput[1].item(), randK),
np.eye(shapeInput[0].item(), shapeInput[1].item(), k=randK))
shapeInput = np.random.randint(10, 100, [2, ])
randK = np.random.randint(0, np.min(shapeInput), [1, ]).item()
assert np.array_equal(NumCpp.eye2DComplex(shapeInput[0].item(), shapeInput[1].item(), randK),
np.eye(shapeInput[0].item(), shapeInput[1].item(), k=randK) +
1j * np.zeros(shapeInput))
shapeInput = np.random.randint(10, 100, [2, ])
cShape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
randK = np.random.randint(0, np.min(shapeInput), [1, ]).item()
assert np.array_equal(NumCpp.eyeShape(cShape, randK), np.eye(shapeInput[0].item(), shapeInput[1].item(), k=randK))
shapeInput = np.random.randint(10, 100, [2, ])
cShape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
randK = np.random.randint(0, np.min(shapeInput), [1, ]).item()
assert np.array_equal(NumCpp.eyeShapeComplex(cShape, randK),
np.eye(shapeInput[0].item(), shapeInput[1].item(), k=randK) +
1j * np.zeros(shapeInput))
####################################################################################
def test_fill_diagonal():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols) * 100
cArray.setArray(data)
NumCpp.fillDiagonal(cArray, 666)
np.fill_diagonal(data, 666)
assert np.array_equal(cArray.getNumpyArray(), data)
####################################################################################
def test_find():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols) * 100
cArray.setArray(data)
value = data.mean()
cMask = NumCpp.operatorGreater(cArray, value)
cMaskArray = NumCpp.NdArrayBool(cMask.shape[0], cMask.shape[1])
cMaskArray.setArray(cMask)
idxs = NumCpp.find(cMaskArray).astype(np.int64)
idxsPy = np.nonzero((data > value).flatten())[0]
assert np.array_equal(idxs.flatten(), idxsPy)
####################################################################################
def test_findN():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols) * 100
cArray.setArray(data)
value = data.mean()
cMask = NumCpp.operatorGreater(cArray, value)
cMaskArray = NumCpp.NdArrayBool(cMask.shape[0], cMask.shape[1])
cMaskArray.setArray(cMask)
idxs = NumCpp.findN(cMaskArray, 8).astype(np.int64)
idxsPy = np.nonzero((data > value).flatten())[0]
assert np.array_equal(idxs.flatten(), idxsPy[:8])
####################################################################################
def fix():
value = np.random.randn(1).item() * 100
assert NumCpp.fixScaler(value) == np.fix(value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols) * 100
cArray.setArray(data)
assert np.array_equal(NumCpp.fixArray(cArray), np.fix(data))
####################################################################################
def test_flatten():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.flatten(cArray).getNumpyArray(), np.resize(data, [1, data.size]))
####################################################################################
def test_flatnonzero():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.flatnonzero(cArray).getNumpyArray().flatten(), np.flatnonzero(data))
####################################################################################
def test_flip():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.flip(cArray, NumCpp.Axis.NONE).getNumpyArray(),
np.flip(data.reshape(1, data.size), axis=1).reshape(shapeInput))
####################################################################################
def test_fliplr():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.fliplr(cArray).getNumpyArray(), np.fliplr(data))
####################################################################################
def test_flipud():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.flipud(cArray).getNumpyArray(), np.flipud(data))
####################################################################################
def test_floor():
value = np.random.randn(1).item() * 100
assert NumCpp.floorScaler(value) == np.floor(value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols) * 100
cArray.setArray(data)
assert np.array_equal(NumCpp.floorArray(cArray), np.floor(data))
####################################################################################
def test_floor_divide():
value1 = np.random.randn(1).item() * 100 + 1000
value2 = np.random.randn(1).item() * 100 + 1000
assert NumCpp.floor_divideScaler(value1, value2) == np.floor_divide(value1, value2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randn(shape.rows, shape.cols) * 100 + 1000
data2 = np.random.randn(shape.rows, shape.cols) * 100 + 1000
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.floor_divideArray(cArray1, cArray2), np.floor_divide(data1, data2))
####################################################################################
def test_fmax():
value1 = np.random.randn(1).item() * 100 + 1000
value2 = np.random.randn(1).item() * 100 + 1000
assert NumCpp.fmaxScaler(value1, value2) == np.fmax(value1, value2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randn(shape.rows, shape.cols) * 100 + 1000
data2 = np.random.randn(shape.rows, shape.cols) * 100 + 1000
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.fmaxArray(cArray1, cArray2), np.fmax(data1, data2))
####################################################################################
def test_fmin():
value1 = np.random.randn(1).item() * 100 + 1000
value2 = np.random.randn(1).item() * 100 + 1000
assert NumCpp.fminScaler(value1, value2) == np.fmin(value1, value2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randn(shape.rows, shape.cols) * 100 + 1000
data2 = np.random.randn(shape.rows, shape.cols) * 100 + 1000
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.fminArray(cArray1, cArray2), np.fmin(data1, data2))
####################################################################################
def test_fmod():
value1 = np.random.randint(1, 100, [1, ]).item() * 100 + 1000
value2 = np.random.randint(1, 100, [1, ]).item() * 100 + 1000
assert NumCpp.fmodScaler(value1, value2) == np.fmod(value1, value2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayUInt32(shape)
cArray2 = NumCpp.NdArrayUInt32(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols], dtype=np.uint32) * 100 + 1000
data2 = np.random.randint(1, 100, [shape.rows, shape.cols], dtype=np.uint32) * 100 + 1000
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.fmodArray(cArray1, cArray2), np.fmod(data1, data2))
####################################################################################
def test_fromfile():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
tempDir = r'C:\Temp'
if not os.path.exists(tempDir):
os.mkdir(tempDir)
tempFile = os.path.join(tempDir, 'NdArrayDump.bin')
NumCpp.dump(cArray, tempFile)
assert os.path.isfile(tempFile)
data2 = NumCpp.fromfile(tempFile, '').reshape(shape)
assert np.array_equal(data, data2)
os.remove(tempFile)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
tempDir = tempfile.gettempdir()
tempFile = os.path.join(tempDir, 'NdArrayDump')
NumCpp.tofile(cArray, tempFile, '\n')
assert os.path.exists(tempFile + '.txt')
data2 = NumCpp.fromfile(tempFile + '.txt', '\n').reshape(shape)
assert np.array_equal(data, data2)
os.remove(tempFile + '.txt')
####################################################################################
def test_fromiter():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.fromiter(cArray).flatten(), data.flatten())
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.fromiter(cArray).flatten(), data.flatten())
####################################################################################
def test_full():
shapeInput = np.random.randint(1, 100, [1, ]).item()
value = np.random.randint(1, 100, [1, ]).item()
cArray = NumCpp.fullSquare(shapeInput, value)
assert (cArray.shape[0] == shapeInput and cArray.shape[1] == shapeInput and
cArray.size == shapeInput**2 and np.all(cArray == value))
shapeInput = np.random.randint(1, 100, [1, ]).item()
value = np.random.randint(1, 100, [1, ]).item() + 1j * np.random.randint(1, 100, [1, ]).item()
cArray = NumCpp.fullSquareComplex(shapeInput, value)
assert (cArray.shape[0] == shapeInput and cArray.shape[1] == shapeInput and
cArray.size == shapeInput**2 and np.all(cArray == value))
shapeInput = np.random.randint(1, 100, [2, ])
value = np.random.randint(1, 100, [1, ]).item()
cArray = NumCpp.fullRowCol(shapeInput[0].item(), shapeInput[1].item(), value)
assert (cArray.shape[0] == shapeInput[0] and cArray.shape[1] == shapeInput[1] and
cArray.size == shapeInput.prod() and np.all(cArray == value))
shapeInput = np.random.randint(1, 100, [2, ])
value = np.random.randint(1, 100, [1, ]).item() + 1j * np.random.randint(1, 100, [1, ]).item()
cArray = NumCpp.fullRowColComplex(shapeInput[0].item(), shapeInput[1].item(), value)
assert (cArray.shape[0] == shapeInput[0] and cArray.shape[1] == shapeInput[1] and
cArray.size == shapeInput.prod() and np.all(cArray == value))
shapeInput = np.random.randint(1, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
value = np.random.randint(1, 100, [1, ]).item()
cArray = NumCpp.fullShape(shape, value)
assert (cArray.shape[0] == shape.rows and cArray.shape[1] == shape.cols and
cArray.size == shapeInput.prod() and np.all(cArray == value))
shapeInput = np.random.randint(1, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
value = np.random.randint(1, 100, [1, ]).item()
cArray = NumCpp.fullShape(shape, value)
assert (cArray.shape[0] == shape.rows and cArray.shape[1] == shape.cols and
cArray.size == shapeInput.prod() and np.all(cArray == value))
####################################################################################
def test_full_like():
shapeInput = np.random.randint(1, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
value = np.random.randint(1, 100, [1, ]).item()
cArray2 = NumCpp.full_like(cArray1, value)
assert (cArray2.shape().rows == shape.rows and cArray2.shape().cols == shape.cols and
cArray2.size() == shapeInput.prod() and np.all(cArray2.getNumpyArray() == value))
shapeInput = np.random.randint(1, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
value = np.random.randint(1, 100, [1, ]).item() + 1j * np.random.randint(1, 100, [1, ]).item()
cArray2 = NumCpp.full_likeComplex(cArray1, value)
assert (cArray2.shape().rows == shape.rows and cArray2.shape().cols == shape.cols and
cArray2.size() == shapeInput.prod() and np.all(cArray2.getNumpyArray() == value))
####################################################################################
def test_gcd():
if not NumCpp.NUMCPP_NO_USE_BOOST or NumCpp.STL_GCD_LCM:
value1 = np.random.randint(1, 1000, [1, ]).item()
value2 = np.random.randint(1, 1000, [1, ]).item()
assert NumCpp.gcdScaler(value1, value2) == np.gcd(value1, value2)
if not NumCpp.NUMCPP_NO_USE_BOOST:
size = np.random.randint(20, 100, [1, ]).item()
cArray = NumCpp.NdArrayUInt32(1, size)
data = np.random.randint(1, 1000, [size, ], dtype=np.uint32)
cArray.setArray(data)
assert NumCpp.gcdArray(cArray) == np.gcd.reduce(data) # noqa
####################################################################################
def test_gradient():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 1000, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.gradient(cArray, NumCpp.Axis.ROW), np.gradient(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 1000, [shape.rows, shape.cols])
imag = np.random.randint(1, 1000, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.gradient(cArray, NumCpp.Axis.ROW), np.gradient(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 1000, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.gradient(cArray, NumCpp.Axis.COL), np.gradient(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 1000, [shape.rows, shape.cols])
imag = np.random.randint(1, 1000, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.gradient(cArray, NumCpp.Axis.COL), np.gradient(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 1000, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.gradient(cArray, NumCpp.Axis.NONE).flatten(),
np.gradient(data.flatten(), axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 1000, [shape.rows, shape.cols])
imag = np.random.randint(1, 1000, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.gradient(cArray, NumCpp.Axis.NONE).flatten(),
np.gradient(data.flatten(), axis=0))
####################################################################################
def test_greater():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.greater(cArray1, cArray2).getNumpyArray(),
np.greater(data1, data2))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.greater(cArray1, cArray2).getNumpyArray(),
np.greater(data1, data2))
####################################################################################
def test_greater_equal():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.greater_equal(cArray1, cArray2).getNumpyArray(),
np.greater_equal(data1, data2))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.greater_equal(cArray1, cArray2).getNumpyArray(),
np.greater_equal(data1, data2))
####################################################################################
def test_histogram():
shape = NumCpp.Shape(1024, 1024)
cArray = NumCpp.NdArray(shape)
data = np.random.randn(1024, 1024) * np.random.randint(1, 10, [1, ]).item() + np.random.randint(1, 10, [1, ]).item()
cArray.setArray(data)
numBins = np.random.randint(10, 30, [1, ]).item()
histogram, bins = NumCpp.histogram(cArray, numBins)
h, b = np.histogram(data, numBins)
assert np.array_equal(histogram.getNumpyArray().flatten().astype(np.int32), h)
assert np.array_equal(np.round(bins.getNumpyArray().flatten(), 9), np.round(b, 9))
shape = NumCpp.Shape(1024, 1024)
cArray = NumCpp.NdArray(shape)
data = np.random.randn(1024, 1024) * np.random.randint(1, 10, [1, ]).item() + np.random.randint(1, 10, [1, ]).item()
cArray.setArray(data)
binEdges = np.linspace(data.min(), data.max(), 15, endpoint=True)
cBinEdges = NumCpp.NdArray(1, binEdges.size)
cBinEdges.setArray(binEdges)
histogram = NumCpp.histogram(cArray, cBinEdges)
h, _ = np.histogram(data, binEdges)
assert np.array_equal(histogram.flatten().astype(np.int32), h)
####################################################################################
def test_hstack():
shapeInput = np.random.randint(20, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
shape3 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
shape4 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
cArray3 = NumCpp.NdArray(shape3)
cArray4 = NumCpp.NdArray(shape4)
data1 = np.random.randint(1, 100, [shape1.rows, shape1.cols])
data2 = np.random.randint(1, 100, [shape2.rows, shape2.cols])
data3 = np.random.randint(1, 100, [shape3.rows, shape3.cols])
data4 = np.random.randint(1, 100, [shape4.rows, shape4.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
cArray3.setArray(data3)
cArray4.setArray(data4)
assert np.array_equal(NumCpp.hstack(cArray1, cArray2, cArray3, cArray4),
np.hstack([data1, data2, data3, data4]))
####################################################################################
def test_hypot():
value1 = np.random.randn(1).item() * 100 + 1000
value2 = np.random.randn(1).item() * 100 + 1000
assert NumCpp.hypotScaler(value1, value2) == np.hypot(value1, value2)
value1 = np.random.randn(1).item() * 100 + 1000
value2 = np.random.randn(1).item() * 100 + 1000
value3 = np.random.randn(1).item() * 100 + 1000
assert (np.round(NumCpp.hypotScalerTriple(value1, value2, value3), 9) ==
np.round(np.sqrt(value1**2 + value2**2 + value3**2), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randn(shape.rows, shape.cols) * 100 + 1000
data2 = np.random.randn(shape.rows, shape.cols) * 100 + 1000
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(np.round(NumCpp.hypotArray(cArray1, cArray2), 9),
np.round(np.hypot(data1, data2), 9))
####################################################################################
def test_identity():
squareSize = np.random.randint(10, 100, [1, ]).item()
assert np.array_equal(NumCpp.identity(squareSize).getNumpyArray(), np.identity(squareSize))
squareSize = np.random.randint(10, 100, [1, ]).item()
assert np.array_equal(NumCpp.identityComplex(squareSize).getNumpyArray(),
np.identity(squareSize) + 1j * np.zeros([squareSize, squareSize]))
####################################################################################
def test_imag():
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.imagScaler(value), 9) == np.round(np.imag(value), 9) # noqa
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.imagArray(cArray), 9), np.round(np.imag(data), 9))
####################################################################################
def test_interp():
endPoint = np.random.randint(10, 20, [1, ]).item()
numPoints = np.random.randint(50, 100, [1, ]).item()
resample = np.random.randint(2, 5, [1, ]).item()
xpData = np.linspace(0, endPoint, numPoints, endpoint=True)
fpData = np.sin(xpData)
xData = np.linspace(0, endPoint, numPoints * resample, endpoint=True)
cXp = NumCpp.NdArray(1, numPoints)
cFp = NumCpp.NdArray(1, numPoints)
cX = NumCpp.NdArray(1, numPoints * resample)
cXp.setArray(xpData)
cFp.setArray(fpData)
cX.setArray(xData)
assert np.array_equal(np.round(NumCpp.interp(cX, cXp, cFp).flatten(), 9),
np.round(np.interp(xData, xpData, fpData), 9))
####################################################################################
def test_intersect1d():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayUInt32(shape)
cArray2 = NumCpp.NdArrayUInt32(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols]).astype(np.uint32)
data2 = np.random.randint(1, 100, [shape.rows, shape.cols]).astype(np.uint32)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.intersect1d(cArray1, cArray2).getNumpyArray().flatten(), np.intersect1d(data1, data2))
####################################################################################
def test_invert():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayUInt32(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
assert np.array_equal(NumCpp.invert(cArray).getNumpyArray(), np.invert(data))
####################################################################################
def test_isclose():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.rand(shape.rows, shape.cols)
data2 = data1 + np.random.randn(shape.rows, shape.cols) * 1e-5
cArray1.setArray(data1)
cArray2.setArray(data2)
rtol = 1e-5
atol = 1e-8
assert np.array_equal(NumCpp.isclose(cArray1, cArray2, rtol, atol).getNumpyArray(),
np.isclose(data1, data2, rtol=rtol, atol=atol))
####################################################################################
def test_isinf():
value = np.random.randn(1).item() * 100 + 1000
assert NumCpp.isinfScaler(value) == np.isinf(value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols) * 100 + 1000
data[data > 1000] = np.inf
cArray.setArray(data)
assert np.array_equal(NumCpp.isinfArray(cArray), np.isinf(data))
####################################################################################
def test_isnan():
value = np.random.randn(1).item() * 100 + 1000
assert NumCpp.isnanScaler(value) == np.isnan(value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols) * 100 + 1000
data[data > 1000] = np.nan
cArray.setArray(data)
assert np.array_equal(NumCpp.isnanArray(cArray), np.isnan(data))
####################################################################################
def test_lcm():
if not NumCpp.NUMCPP_NO_USE_BOOST or NumCpp.STL_GCD_LCM:
value1 = np.random.randint(1, 1000, [1, ]).item()
value2 = np.random.randint(1, 1000, [1, ]).item()
assert NumCpp.lcmScaler(value1, value2) == np.lcm(value1, value2)
if not NumCpp.NUMCPP_NO_USE_BOOST:
size = np.random.randint(2, 10, [1, ]).item()
cArray = NumCpp.NdArrayUInt32(1, size)
data = np.random.randint(1, 100, [size, ], dtype=np.uint32)
cArray.setArray(data)
assert NumCpp.lcmArray(cArray) == np.lcm.reduce(data) # noqa
####################################################################################
def test_ldexp():
value1 = np.random.randn(1).item() * 100
value2 = np.random.randint(1, 20, [1, ]).item()
assert np.round(NumCpp.ldexpScaler(value1, value2), 9) == np.round(np.ldexp(value1, value2), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArrayUInt8(shape)
data1 = np.random.randn(shape.rows, shape.cols) * 100
data2 = np.random.randint(1, 20, [shape.rows, shape.cols], dtype=np.uint8)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(np.round(NumCpp.ldexpArray(cArray1, cArray2), 9), np.round(np.ldexp(data1, data2), 9))
####################################################################################
def test_left_shift():
shapeInput = np.random.randint(20, 100, [2, ])
bitsToshift = np.random.randint(1, 32, [1, ]).item()
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayUInt32(shape)
data = np.random.randint(1, np.iinfo(np.uint32).max, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
assert np.array_equal(NumCpp.left_shift(cArray, bitsToshift).getNumpyArray(),
np.left_shift(data, bitsToshift))
####################################################################################
def test_less():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.less(cArray1, cArray2).getNumpyArray(),
np.less(data1, data2))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.less(cArray1, cArray2).getNumpyArray(),
np.less(data1, data2))
####################################################################################
def test_less_equal():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.less_equal(cArray1, cArray2).getNumpyArray(),
np.less_equal(data1, data2))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.less_equal(cArray1, cArray2).getNumpyArray(),
np.less_equal(data1, data2))
####################################################################################
def test_load():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
tempDir = tempfile.gettempdir()
tempFile = os.path.join(tempDir, 'NdArrayDump.bin')
NumCpp.dump(cArray, tempFile)
assert os.path.isfile(tempFile)
data2 = NumCpp.load(tempFile).reshape(shape)
assert np.array_equal(data, data2)
os.remove(tempFile)
####################################################################################
def test_linspace():
start = np.random.randint(1, 10, [1, ]).item()
end = np.random.randint(start + 10, 100, [1, ]).item()
numPoints = np.random.randint(1, 100, [1, ]).item()
assert np.array_equal(np.round(NumCpp.linspace(start, end, numPoints, True).getNumpyArray().flatten(), 9),
np.round(np.linspace(start, end, numPoints, endpoint=True), 9))
start = np.random.randint(1, 10, [1, ]).item()
end = np.random.randint(start + 10, 100, [1, ]).item()
numPoints = np.random.randint(1, 100, [1, ]).item()
assert np.array_equal(np.round(NumCpp.linspace(start, end, numPoints, False).getNumpyArray().flatten(), 9),
np.round(np.linspace(start, end, numPoints, endpoint=False), 9))
####################################################################################
def test_log():
value = np.random.randn(1).item() * 100 + 1000
assert np.round(NumCpp.logScaler(value), 9) == np.round(np.log(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.logScaler(value), 9) == np.round(np.log(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols) * 100 + 1000
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.logArray(cArray), 9), np.round(np.log(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.logArray(cArray), 9), np.round(np.log(data), 9))
####################################################################################
def test_log10():
value = np.random.randn(1).item() * 100 + 1000
assert np.round(NumCpp.log10Scaler(value), 9) == np.round(np.log10(value), 9)
components = np.random.randn(2).astype(np.double) * 100 + 100
value = complex(components[0], components[1])
assert np.round(NumCpp.log10Scaler(value), 9) == np.round(np.log10(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols) * 100 + 1000
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.log10Array(cArray), 9), np.round(np.log10(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.log10Array(cArray), 9), np.round(np.log10(data), 9))
####################################################################################
def test_log1p():
value = np.random.randn(1).item() * 100 + 1000
assert np.round(NumCpp.log1pScaler(value), 9) == np.round(np.log1p(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols) * 100 + 1000
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.log1pArray(cArray), 9), np.round(np.log1p(data), 9))
####################################################################################
def test_log2():
value = np.random.randn(1).item() * 100 + 1000
assert np.round(NumCpp.log2Scaler(value), 9) == np.round(np.log2(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols) * 100 + 1000
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.log2Array(cArray), 9), np.round(np.log2(data), 9))
####################################################################################
def test_logical_and():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(0, 20, [shape.rows, shape.cols])
data2 = np.random.randint(0, 20, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.logical_and(cArray1, cArray2).getNumpyArray(), np.logical_and(data1, data2))
####################################################################################
def test_logical_not():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 20, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.logical_not(cArray).getNumpyArray(), np.logical_not(data))
####################################################################################
def test_logical_or():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(0, 20, [shape.rows, shape.cols])
data2 = np.random.randint(0, 20, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.logical_or(cArray1, cArray2).getNumpyArray(), np.logical_or(data1, data2))
####################################################################################
def test_logical_xor():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(0, 20, [shape.rows, shape.cols])
data2 = np.random.randint(0, 20, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.logical_xor(cArray1, cArray2).getNumpyArray(), np.logical_xor(data1, data2))
####################################################################################
def test_matmul():
shapeInput = np.random.randint(20, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[1].item(), shapeInput[0].item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
data1 = np.random.randint(0, 20, [shape1.rows, shape1.cols])
data2 = np.random.randint(0, 20, [shape2.rows, shape2.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.matmul(cArray1, cArray2), np.matmul(data1, data2))
shapeInput = np.random.randint(20, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[1].item(), shapeInput[0].item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArrayComplexDouble(shape2)
data1 = np.random.randint(0, 20, [shape1.rows, shape1.cols])
real2 = np.random.randint(1, 100, [shape2.rows, shape2.cols])
imag2 = np.random.randint(1, 100, [shape2.rows, shape2.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.matmul(cArray1, cArray2), np.matmul(data1, data2))
shapeInput = np.random.randint(20, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[1].item(), shapeInput[0].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape1)
cArray2 = NumCpp.NdArrayComplexDouble(shape2)
real1 = np.random.randint(1, 100, [shape1.rows, shape1.cols])
imag1 = np.random.randint(1, 100, [shape1.rows, shape1.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape2.rows, shape2.cols])
imag2 = np.random.randint(1, 100, [shape2.rows, shape2.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.matmul(cArray1, cArray2), np.matmul(data1, data2))
shapeInput = np.random.randint(20, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[1].item(), shapeInput[0].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape1)
cArray2 = NumCpp.NdArray(shape2)
real1 = np.random.randint(1, 100, [shape1.rows, shape1.cols])
imag1 = np.random.randint(1, 100, [shape1.rows, shape1.cols])
data1 = real1 + 1j * imag1
data2 = np.random.randint(0, 20, [shape2.rows, shape2.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.matmul(cArray1, cArray2), np.matmul(data1, data2))
####################################################################################
def test_max():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.max(cArray, NumCpp.Axis.NONE).item() == np.max(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.max(cArray, NumCpp.Axis.NONE).item() == np.max(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.max(cArray, NumCpp.Axis.ROW).flatten(),
np.max(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.max(cArray, NumCpp.Axis.ROW).flatten(),
np.max(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.max(cArray, NumCpp.Axis.COL).flatten(),
np.max(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.max(cArray, NumCpp.Axis.COL).flatten(),
np.max(data, axis=1))
####################################################################################
def test_maximum():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(0, 100, [shape.rows, shape.cols])
data2 = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.maximum(cArray1, cArray2), np.maximum(data1, data2))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.maximum(cArray1, cArray2), np.maximum(data1, data2))
####################################################################################
def test_mean():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.round(NumCpp.mean(cArray, NumCpp.Axis.NONE).getNumpyArray().item(), 9) == \
np.round(np.mean(data, axis=None).item(), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.round(NumCpp.mean(cArray, NumCpp.Axis.NONE).getNumpyArray().item(), 9) == \
np.round(np.mean(data, axis=None).item(), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.mean(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(), 9),
np.round(np.mean(data, axis=0), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.mean(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(), 9),
np.round(np.mean(data, axis=0), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.mean(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(), 9),
np.round(np.mean(data, axis=1), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.mean(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(), 9),
np.round(np.mean(data, axis=1), 9))
####################################################################################
def test_median():
isEven = True
while isEven:
shapeInput = np.random.randint(20, 100, [2, ])
isEven = shapeInput.prod().item() % 2 == 0
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) # noqa
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.median(cArray, NumCpp.Axis.NONE).getNumpyArray().flatten().item() == np.median(data, axis=None).item()
isEven = True
while isEven:
shapeInput = np.random.randint(20, 100, [2, ])
isEven = shapeInput.prod().item() % 2 == 0
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.median(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(), np.median(data, axis=0))
isEven = True
while isEven:
shapeInput = np.random.randint(20, 100, [2, ])
isEven = shapeInput.prod().item() % 2 == 0
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.median(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(), np.median(data, axis=1))
####################################################################################
def test_meshgrid():
start = np.random.randint(0, 20, [1, ]).item()
end = np.random.randint(30, 100, [1, ]).item()
step = np.random.randint(1, 5, [1, ]).item()
dataI = np.arange(start, end, step)
iSlice = NumCpp.Slice(start, end, step)
start = np.random.randint(0, 20, [1, ]).item()
end = np.random.randint(30, 100, [1, ]).item()
step = np.random.randint(1, 5, [1, ]).item()
dataJ = np.arange(start, end, step)
jSlice = NumCpp.Slice(start, end, step)
iMesh, jMesh = np.meshgrid(dataI, dataJ)
iMeshC, jMeshC = NumCpp.meshgrid(iSlice, jSlice)
assert np.array_equal(iMeshC.getNumpyArray(), iMesh)
assert np.array_equal(jMeshC.getNumpyArray(), jMesh)
####################################################################################
def test_min():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.min(cArray, NumCpp.Axis.NONE).item() == np.min(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.min(cArray, NumCpp.Axis.NONE).item() == np.min(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.min(cArray, NumCpp.Axis.ROW).flatten(),
np.min(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.min(cArray, NumCpp.Axis.ROW).flatten(),
np.min(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.min(cArray, NumCpp.Axis.COL).flatten(),
np.min(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.min(cArray, NumCpp.Axis.COL).flatten(),
np.min(data, axis=1))
####################################################################################
def test_minimum():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(0, 100, [shape.rows, shape.cols])
data2 = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.minimum(cArray1, cArray2), np.minimum(data1, data2))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.minimum(cArray1, cArray2), np.minimum(data1, data2))
####################################################################################
def test_mod():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayUInt32(shape)
cArray2 = NumCpp.NdArrayUInt32(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols], dtype=np.uint32)
data2 = np.random.randint(1, 100, [shape.rows, shape.cols], dtype=np.uint32)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.mod(cArray1, cArray2).getNumpyArray(), np.mod(data1, data2))
####################################################################################
def test_multiply():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(-100, 100, [shape.rows, shape.cols])
data2 = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.multiply(cArray1, cArray2), data1 * data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.multiply(cArray, value), data * value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.multiply(value, cArray), data * value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.multiply(cArray1, cArray2), data1 * data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.multiply(cArray, value), data * value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.multiply(value, cArray), data * value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArray(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
data2 = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.multiply(cArray1, cArray2), data1 * data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols])
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.multiply(cArray1, cArray2), data1 * data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.multiply(cArray, value), data * value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.multiply(value, cArray), data * value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.multiply(cArray, value), data * value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.multiply(value, cArray), data * value)
####################################################################################
def test_nan_to_num():
shapeInput = np.random.randint(50, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.size(), ]).astype(np.double)
nan_idx = np.random.choice(range(data.size), 10, replace=False)
pos_inf_idx = np.random.choice(range(data.size), 10, replace=False)
neg_inf_idx = np.random.choice(range(data.size), 10, replace=False)
data[nan_idx] = np.nan
data[pos_inf_idx] = np.inf
data[neg_inf_idx] = -np.inf
data = data.reshape(shapeInput)
cArray.setArray(data)
nan_replace = float(np.random.randint(100))
pos_inf_replace = float(np.random.randint(100))
neg_inf_replace = float(np.random.randint(100))
assert np.array_equal(NumCpp.nan_to_num(cArray, nan_replace, pos_inf_replace, neg_inf_replace),
np.nan_to_num(data, nan=nan_replace, posinf=pos_inf_replace, neginf=neg_inf_replace))
####################################################################################
def test_nanargmax():
shapeInput = np.random.randint(10, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert NumCpp.nanargmax(cArray, NumCpp.Axis.NONE).item() == np.nanargmax(data)
shapeInput = np.random.randint(10, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nanargmax(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(),
np.nanargmax(data, axis=0))
shapeInput = np.random.randint(10, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nanargmax(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(),
np.nanargmax(data, axis=1))
####################################################################################
def test_nanargmin():
shapeInput = np.random.randint(10, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert NumCpp.nanargmin(cArray, NumCpp.Axis.NONE).item() == np.nanargmin(data)
shapeInput = np.random.randint(10, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nanargmin(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(),
np.nanargmin(data, axis=0))
shapeInput = np.random.randint(10, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nanargmin(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(),
np.nanargmin(data, axis=1))
####################################################################################
def test_nancumprod():
shapeInput = np.random.randint(1, 5, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 4, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nancumprod(cArray, NumCpp.Axis.NONE).getNumpyArray().flatten(),
np.nancumprod(data, axis=None))
shapeInput = np.random.randint(1, 5, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 4, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nancumprod(cArray, NumCpp.Axis.ROW).getNumpyArray(), np.nancumprod(data, axis=0))
shapeInput = np.random.randint(1, 5, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 4, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nancumprod(cArray, NumCpp.Axis.COL).getNumpyArray(), np.nancumprod(data, axis=1))
####################################################################################
def test_nancumsum():
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nancumsum(cArray, NumCpp.Axis.NONE).getNumpyArray().flatten(),
np.nancumsum(data, axis=None))
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nancumsum(cArray, NumCpp.Axis.ROW).getNumpyArray(), np.nancumsum(data, axis=0))
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nancumsum(cArray, NumCpp.Axis.COL).getNumpyArray(), np.nancumsum(data, axis=1))
####################################################################################
def test_nanmax():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert NumCpp.nanmax(cArray, NumCpp.Axis.NONE).item() == np.nanmax(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nanmax(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(),
np.nanmax(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nanmax(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(),
np.nanmax(data, axis=1))
####################################################################################
def test_nanmean():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert NumCpp.nanmean(cArray, NumCpp.Axis.NONE).item() == np.nanmean(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nanmean(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(),
np.nanmean(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nanmean(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(),
np.nanmean(data, axis=1))
####################################################################################
def test_nanmedian():
isEven = True
while isEven:
shapeInput = np.random.randint(20, 100, [2, ])
isEven = shapeInput.prod().item() % 2 == 0
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) # noqa
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert (NumCpp.nanmedian(cArray, NumCpp.Axis.NONE).getNumpyArray().flatten().item() ==
np.nanmedian(data, axis=None).item())
# isEven = True
# while isEven:
# shapeInput = np.random.randint(20, 100, [2, ])
# isEven = shapeInput[0].item() % 2 == 0
# shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
# cArray = NumCpp.NdArray(shape)
# data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
# data = data.flatten()
# data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
# data = data.reshape(shapeInput)
# cArray.setArray(data)
# assert np.array_equal(NumCpp.nanmedian(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(),
# np.nanmedian(data, axis=0))
#
# isEven = True
# while isEven:
# shapeInput = np.random.randint(20, 100, [2, ])
# isEven = shapeInput[1].item() % 2 == 0
# shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
# cArray = NumCpp.NdArray(shape)
# data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
# data = data.flatten()
# data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
# data = data.reshape(shapeInput)
# cArray.setArray(data)
# assert np.array_equal(NumCpp.nanmedian(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(),
# np.nanmedian(data, axis=1))
####################################################################################
def test_nanmin():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert NumCpp.nanmin(cArray, NumCpp.Axis.NONE).item() == np.nanmin(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nanmin(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(),
np.nanmin(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nanmin(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(),
np.nanmin(data, axis=1))
####################################################################################
def test_nanpercentile():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert (NumCpp.nanpercentile(cArray, percentile, NumCpp.Axis.NONE, 'lower').item() ==
np.nanpercentile(data, percentile, axis=None, interpolation='lower'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert (NumCpp.nanpercentile(cArray, percentile, NumCpp.Axis.NONE, 'higher').item() ==
np.nanpercentile(data, percentile, axis=None, interpolation='higher'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert (NumCpp.nanpercentile(cArray, percentile, NumCpp.Axis.NONE, 'nearest').item() ==
np.nanpercentile(data, percentile, axis=None, interpolation='nearest'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert (NumCpp.nanpercentile(cArray, percentile, NumCpp.Axis.NONE, 'midpoint').item() ==
np.nanpercentile(data, percentile, axis=None, interpolation='midpoint'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert (NumCpp.nanpercentile(cArray, percentile, NumCpp.Axis.NONE, 'linear').item() ==
np.nanpercentile(data, percentile, axis=None, interpolation='linear'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(NumCpp.nanpercentile(cArray, percentile, NumCpp.Axis.ROW, 'lower').getNumpyArray().flatten(),
np.nanpercentile(data, percentile, axis=0, interpolation='lower'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(NumCpp.nanpercentile(cArray, percentile, NumCpp.Axis.ROW, 'higher').getNumpyArray().flatten(),
np.nanpercentile(data, percentile, axis=0, interpolation='higher'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(NumCpp.nanpercentile(cArray,
percentile,
NumCpp.Axis.ROW,
'nearest').getNumpyArray().flatten(),
np.nanpercentile(data, percentile, axis=0, interpolation='nearest'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(
np.round(NumCpp.nanpercentile(cArray,
percentile,
NumCpp.Axis.ROW,
'midpoint').getNumpyArray().flatten(), 9),
np.round(np.nanpercentile(data, percentile, axis=0, interpolation='midpoint'), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(np.round(NumCpp.nanpercentile(cArray,
percentile,
NumCpp.Axis.ROW,
'linear').getNumpyArray().flatten(), 9),
np.round(np.nanpercentile(data, percentile, axis=0, interpolation='linear'), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(NumCpp.nanpercentile(cArray, percentile, NumCpp.Axis.COL, 'lower').getNumpyArray().flatten(),
np.nanpercentile(data, percentile, axis=1, interpolation='lower'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(NumCpp.nanpercentile(cArray, percentile, NumCpp.Axis.COL, 'higher').getNumpyArray().flatten(),
np.nanpercentile(data, percentile, axis=1, interpolation='higher'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(NumCpp.nanpercentile(cArray,
percentile,
NumCpp.Axis.COL,
'nearest').getNumpyArray().flatten(),
np.nanpercentile(data, percentile, axis=1, interpolation='nearest'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(
np.round(NumCpp.nanpercentile(cArray,
percentile,
NumCpp.Axis.COL,
'midpoint').getNumpyArray().flatten(), 9),
np.round(np.nanpercentile(data, percentile, axis=1, interpolation='midpoint'), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(
np.round(NumCpp.nanpercentile(cArray, percentile, NumCpp.Axis.COL, 'linear').getNumpyArray().flatten(), 9),
np.round(np.nanpercentile(data, percentile, axis=1, interpolation='linear'), 9))
####################################################################################
def test_nanprod():
shapeInput = np.random.randint(1, 10, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 15, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert NumCpp.nanprod(cArray, NumCpp.Axis.NONE).item() == np.nanprod(data, axis=None)
shapeInput = np.random.randint(1, 10, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 15, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nanprod(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(), np.nanprod(data, axis=0))
shapeInput = np.random.randint(1, 10, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 15, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nanprod(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(), np.nanprod(data, axis=1))
####################################################################################
def test_nans():
shapeInput = np.random.randint(1, 100, [1, ]).item()
cArray = NumCpp.nansSquare(shapeInput)
assert (cArray.shape[0] == shapeInput and cArray.shape[1] == shapeInput and
cArray.size == shapeInput ** 2 and np.all(np.isnan(cArray)))
shapeInput = np.random.randint(20, 100, [2, ])
cArray = NumCpp.nansRowCol(shapeInput[0].item(), shapeInput[1].item())
assert (cArray.shape[0] == shapeInput[0] and cArray.shape[1] == shapeInput[1] and
cArray.size == shapeInput.prod() and np.all(np.isnan(cArray)))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.nansShape(shape)
assert (cArray.shape[0] == shape.rows and cArray.shape[1] == shape.cols and
cArray.size == shapeInput.prod() and np.all(np.isnan(cArray)))
####################################################################################
def test_nans_like():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.nans_like(cArray1)
assert (cArray2.shape().rows == shape.rows and cArray2.shape().cols == shape.cols and
cArray2.size() == shapeInput.prod() and np.all(np.isnan(cArray2.getNumpyArray())))
####################################################################################
def test_nanstd():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.round(NumCpp.nanstdev(cArray, NumCpp.Axis.NONE).item(), 9) == np.round(np.nanstd(data), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.nanstdev(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(), 9),
np.round(np.nanstd(data, axis=0), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.nanstdev(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(), 9),
np.round(np.nanstd(data, axis=1), 9))
####################################################################################
def test_nansum():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert NumCpp.nansum(cArray, NumCpp.Axis.NONE).item() == np.nansum(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nansum(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(), np.nansum(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nansum(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(), np.nansum(data, axis=1))
####################################################################################
def test_nanvar():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.round(NumCpp.nanvar(cArray, NumCpp.Axis.NONE).item(), 8) == np.round(np.nanvar(data), 8)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.nanvar(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(), 8),
np.round(np.nanvar(data, axis=0), 8))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.nanvar(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(), 8),
np.round(np.nanvar(data, axis=1), 8))
####################################################################################
def test_nbytes():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.nbytes(cArray) == data.size * 8
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.nbytes(cArray) == data.size * 16
####################################################################################
def test_negative():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.negative(cArray).getNumpyArray(), 9),
np.round(np.negative(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.negative(cArray).getNumpyArray(), 9),
np.round(np.negative(data), 9))
####################################################################################
def test_newbyteorderArray():
value = np.random.randint(1, 100, [1, ]).item()
assert (NumCpp.newbyteorderScaler(value, NumCpp.Endian.BIG) ==
np.asarray([value], dtype=np.uint32).newbyteorder().item())
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayUInt32(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint32)
cArray.setArray(data)
assert np.array_equal(NumCpp.newbyteorderArray(cArray, NumCpp.Endian.BIG),
data.newbyteorder())
####################################################################################
def test_none():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.none(cArray, NumCpp.Axis.NONE).astype(bool).item() == np.logical_not(np.any(data).item())
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.none(cArray, NumCpp.Axis.NONE).astype(bool).item() == np.logical_not(np.any(data).item())
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.none(cArray, NumCpp.Axis.ROW).flatten().astype(bool),
np.logical_not(np.any(data, axis=0)))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.none(cArray, NumCpp.Axis.ROW).flatten().astype(bool),
np.logical_not(np.any(data, axis=0)))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.none(cArray, NumCpp.Axis.COL).flatten().astype(bool),
np.logical_not(np.any(data, axis=1)))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.none(cArray, NumCpp.Axis.COL).flatten().astype(bool),
np.logical_not(np.any(data, axis=1)))
####################################################################################
def test_nonzero():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
row, col = np.nonzero(data)
rowC, colC = NumCpp.nonzero(cArray)
assert (np.array_equal(rowC.getNumpyArray().flatten(), row) and
np.array_equal(colC.getNumpyArray().flatten(), col))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
row, col = np.nonzero(data)
rowC, colC = NumCpp.nonzero(cArray)
assert (np.array_equal(rowC.getNumpyArray().flatten(), row) and
np.array_equal(colC.getNumpyArray().flatten(), col))
####################################################################################
def test_norm():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.norm(cArray, NumCpp.Axis.NONE).flatten() == np.linalg.norm(data.flatten())
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.norm(cArray, NumCpp.Axis.NONE).item() is not None
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
norms = NumCpp.norm(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten()
allPass = True
for idx, row in enumerate(data.transpose()):
if norms[idx] != np.linalg.norm(row):
allPass = False
break
assert allPass
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
norms = NumCpp.norm(cArray, NumCpp.Axis.COL).getNumpyArray().flatten()
allPass = True
for idx, row in enumerate(data):
if norms[idx] != np.linalg.norm(row):
allPass = False
break
assert allPass
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
norms = NumCpp.norm(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten()
assert norms is not None
####################################################################################
def test_not_equal():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.not_equal(cArray1, cArray2).getNumpyArray(), np.not_equal(data1, data2))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.not_equal(cArray1, cArray2).getNumpyArray(), np.not_equal(data1, data2))
####################################################################################
def test_ones():
shapeInput = np.random.randint(1, 100, [1, ]).item()
cArray = NumCpp.onesSquare(shapeInput)
assert (cArray.shape[0] == shapeInput and cArray.shape[1] == shapeInput and
cArray.size == shapeInput ** 2 and np.all(cArray == 1))
shapeInput = np.random.randint(1, 100, [1, ]).item()
cArray = NumCpp.onesSquareComplex(shapeInput)
assert (cArray.shape[0] == shapeInput and cArray.shape[1] == shapeInput and
cArray.size == shapeInput ** 2 and np.all(cArray == complex(1, 0)))
shapeInput = np.random.randint(20, 100, [2, ])
cArray = NumCpp.onesRowCol(shapeInput[0].item(), shapeInput[1].item())
assert (cArray.shape[0] == shapeInput[0] and cArray.shape[1] == shapeInput[1] and
cArray.size == shapeInput.prod() and np.all(cArray == 1))
shapeInput = np.random.randint(20, 100, [2, ])
cArray = NumCpp.onesRowColComplex(shapeInput[0].item(), shapeInput[1].item())
assert (cArray.shape[0] == shapeInput[0] and cArray.shape[1] == shapeInput[1] and
cArray.size == shapeInput.prod() and np.all(cArray == complex(1, 0)))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.onesShape(shape)
assert (cArray.shape[0] == shape.rows and cArray.shape[1] == shape.cols and
cArray.size == shapeInput.prod() and np.all(cArray == 1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.onesShapeComplex(shape)
assert (cArray.shape[0] == shape.rows and cArray.shape[1] == shape.cols and
cArray.size == shapeInput.prod() and np.all(cArray == complex(1, 0)))
####################################################################################
def test_ones_like():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.ones_like(cArray1)
assert (cArray2.shape().rows == shape.rows and cArray2.shape().cols == shape.cols and
cArray2.size() == shapeInput.prod() and np.all(cArray2.getNumpyArray() == 1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.ones_likeComplex(cArray1)
assert (cArray2.shape().rows == shape.rows and cArray2.shape().cols == shape.cols and
cArray2.size() == shapeInput.prod() and np.all(cArray2.getNumpyArray() == complex(1, 0)))
####################################################################################
def test_outer():
size = np.random.randint(1, 100, [1, ]).item()
shape = NumCpp.Shape(1, size)
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 50, [shape.rows, shape.cols], dtype=np.uint32)
data2 = np.random.randint(1, 50, [shape.rows, shape.cols], dtype=np.uint32)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.outer(cArray1, cArray2), np.outer(data1, data2))
size = np.random.randint(1, 100, [1, ]).item()
shape = NumCpp.Shape(1, size)
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 50, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 50, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 50, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 50, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.outer(cArray1, cArray2), np.outer(data1, data2))
####################################################################################
def test_pad():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
padWidth = np.random.randint(1, 10, [1, ]).item()
padValue = np.random.randint(1, 100, [1, ]).item()
data = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray = NumCpp.NdArray(shape)
cArray.setArray(data)
assert np.array_equal(NumCpp.pad(cArray, padWidth, padValue).getNumpyArray(),
np.pad(data, padWidth, mode='constant', constant_values=padValue))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
padWidth = np.random.randint(1, 10, [1, ]).item()
padValue = np.random.randint(1, 100, [1, ]).item() + 1j * np.random.randint(1, 100, [1, ]).item()
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray = NumCpp.NdArrayComplexDouble(shape)
cArray.setArray(data)
assert np.array_equal(NumCpp.pad(cArray, padWidth, padValue).getNumpyArray(),
np.pad(data, padWidth, mode='constant', constant_values=padValue))
####################################################################################
def test_partition():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
kthElement = np.random.randint(0, shapeInput.prod(), [1, ], dtype=np.uint32).item()
partitionedArray = NumCpp.partition(cArray, kthElement, NumCpp.Axis.NONE).getNumpyArray().flatten()
assert (np.all(partitionedArray[kthElement] <= partitionedArray[kthElement]) and
np.all(partitionedArray[kthElement:] >= partitionedArray[kthElement]))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
kthElement = np.random.randint(0, shapeInput.prod(), [1, ], dtype=np.uint32).item()
partitionedArray = NumCpp.partition(cArray, kthElement, NumCpp.Axis.NONE).getNumpyArray().flatten()
assert (np.all(partitionedArray[kthElement] <= partitionedArray[kthElement]) and
np.all(partitionedArray[kthElement:] >= partitionedArray[kthElement]))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
kthElement = np.random.randint(0, shapeInput[0], [1, ], dtype=np.uint32).item()
partitionedArray = NumCpp.partition(cArray, kthElement, NumCpp.Axis.ROW).getNumpyArray().transpose()
allPass = True
for row in partitionedArray:
if not (np.all(row[kthElement] <= row[kthElement]) and
np.all(row[kthElement:] >= row[kthElement])):
allPass = False
break
assert allPass
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
kthElement = np.random.randint(0, shapeInput[0], [1, ], dtype=np.uint32).item()
partitionedArray = NumCpp.partition(cArray, kthElement, NumCpp.Axis.ROW).getNumpyArray().transpose()
allPass = True
for row in partitionedArray:
if not (np.all(row[kthElement] <= row[kthElement]) and
np.all(row[kthElement:] >= row[kthElement])):
allPass = False
break
assert allPass
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
kthElement = np.random.randint(0, shapeInput[1], [1, ], dtype=np.uint32).item()
partitionedArray = NumCpp.partition(cArray, kthElement, NumCpp.Axis.COL).getNumpyArray()
allPass = True
for row in partitionedArray:
if not (np.all(row[kthElement] <= row[kthElement]) and
np.all(row[kthElement:] >= row[kthElement])):
allPass = False
break
assert allPass
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
kthElement = np.random.randint(0, shapeInput[1], [1, ], dtype=np.uint32).item()
partitionedArray = NumCpp.partition(cArray, kthElement, NumCpp.Axis.COL).getNumpyArray()
allPass = True
for row in partitionedArray:
if not (np.all(row[kthElement] <= row[kthElement]) and
np.all(row[kthElement:] >= row[kthElement])):
allPass = False
break
assert allPass
####################################################################################
def test_percentile():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert (NumCpp.percentile(cArray, percentile, NumCpp.Axis.NONE, 'lower').item() ==
np.percentile(data, percentile, axis=None, interpolation='lower'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert (NumCpp.percentile(cArray, percentile, NumCpp.Axis.NONE, 'higher').item() ==
np.percentile(data, percentile, axis=None, interpolation='higher'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert (NumCpp.percentile(cArray, percentile, NumCpp.Axis.NONE, 'nearest').item() ==
np.percentile(data, percentile, axis=None, interpolation='nearest'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert (NumCpp.percentile(cArray, percentile, NumCpp.Axis.NONE, 'midpoint').item() ==
np.percentile(data, percentile, axis=None, interpolation='midpoint'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert (NumCpp.percentile(cArray, percentile, NumCpp.Axis.NONE, 'linear').item() ==
np.percentile(data, percentile, axis=None, interpolation='linear'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(NumCpp.percentile(cArray, percentile, NumCpp.Axis.ROW, 'lower').getNumpyArray().flatten(),
np.percentile(data, percentile, axis=0, interpolation='lower'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(NumCpp.percentile(cArray, percentile, NumCpp.Axis.ROW, 'higher').getNumpyArray().flatten(),
np.percentile(data, percentile, axis=0, interpolation='higher'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(NumCpp.percentile(cArray, percentile, NumCpp.Axis.ROW, 'nearest').getNumpyArray().flatten(),
np.percentile(data, percentile, axis=0, interpolation='nearest'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(np.round(NumCpp.percentile(cArray,
percentile,
NumCpp.Axis.ROW,
'midpoint').getNumpyArray().flatten(), 9),
np.round(np.percentile(data, percentile, axis=0, interpolation='midpoint'), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(np.round(NumCpp.percentile(cArray,
percentile,
NumCpp.Axis.ROW,
'linear').getNumpyArray().flatten(), 9),
np.round(np.percentile(data, percentile, axis=0, interpolation='linear'), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(NumCpp.percentile(cArray, percentile, NumCpp.Axis.COL, 'lower').getNumpyArray().flatten(),
np.percentile(data, percentile, axis=1, interpolation='lower'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(NumCpp.percentile(cArray, percentile, NumCpp.Axis.COL, 'higher').getNumpyArray().flatten(),
np.percentile(data, percentile, axis=1, interpolation='higher'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(NumCpp.percentile(cArray, percentile, NumCpp.Axis.COL, 'nearest').getNumpyArray().flatten(),
np.percentile(data, percentile, axis=1, interpolation='nearest'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(np.round(NumCpp.percentile(cArray,
percentile,
NumCpp.Axis.COL,
'midpoint').getNumpyArray().flatten(), 9),
np.round(np.percentile(data, percentile, axis=1, interpolation='midpoint'), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(np.round(NumCpp.percentile(cArray,
percentile,
NumCpp.Axis.COL,
'linear').getNumpyArray().flatten(), 9),
np.round(np.percentile(data, percentile, axis=1, interpolation='linear'), 9))
####################################################################################
def test_polar():
components = np.random.rand(2).astype(np.double)
assert NumCpp.polarScaler(components[0], components[1])
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
magArray = NumCpp.NdArray(shape)
angleArray = NumCpp.NdArray(shape)
mag = np.random.rand(shape.rows, shape.cols)
angle = np.random.rand(shape.rows, shape.cols)
magArray.setArray(mag)
angleArray.setArray(angle)
assert NumCpp.polarArray(magArray, angleArray) is not None
####################################################################################
def test_power():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
exponent = np.random.randint(0, 5, [1, ]).item()
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.powerArrayScaler(cArray, exponent), 9),
np.round(np.power(data, exponent), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
exponent = np.random.randint(0, 5, [1, ]).item()
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.powerArrayScaler(cArray, exponent), 9),
np.round(np.power(data, exponent), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
cExponents = NumCpp.NdArrayUInt8(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
exponents = np.random.randint(0, 5, [shape.rows, shape.cols]).astype(np.uint8)
cArray.setArray(data)
cExponents.setArray(exponents)
assert np.array_equal(np.round(NumCpp.powerArrayArray(cArray, cExponents), 9),
np.round(np.power(data, exponents), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
cExponents = NumCpp.NdArrayUInt8(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
exponents = np.random.randint(0, 5, [shape.rows, shape.cols]).astype(np.uint8)
cArray.setArray(data)
cExponents.setArray(exponents)
assert np.array_equal(np.round(NumCpp.powerArrayArray(cArray, cExponents), 9),
np.round(np.power(data, exponents), 9))
####################################################################################
def test_powerf():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
exponent = np.random.rand(1).item() * 3
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.powerfArrayScaler(cArray, exponent), 9),
np.round(np.power(data, exponent), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
exponent = np.random.rand(1).item() * 3
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.powerfArrayScaler(cArray, exponent), 9),
np.round(np.power(data, exponent), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
cExponents = NumCpp.NdArray(shape)
data = np.random.randint(0, 20, [shape.rows, shape.cols])
exponents = np.random.rand(shape.rows, shape.cols) * 3
cArray.setArray(data)
cExponents.setArray(exponents)
assert np.array_equal(np.round(NumCpp.powerfArrayArray(cArray, cExponents), 9),
np.round(np.power(data, exponents), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
cExponents = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
exponents = np.random.rand(shape.rows, shape.cols) * 3 + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
cExponents.setArray(exponents)
assert np.array_equal(np.round(NumCpp.powerfArrayArray(cArray, cExponents), 9),
np.round(np.power(data, exponents), 9))
####################################################################################
def test_prod():
shapeInput = np.random.randint(1, 10, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 15, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
assert NumCpp.prod(cArray, NumCpp.Axis.NONE).item() == data.prod()
shapeInput = np.random.randint(1, 10, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 15, [shape.rows, shape.cols])
imag = np.random.randint(1, 15, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.prod(cArray, NumCpp.Axis.NONE).item() == data.prod()
shapeInput = np.random.randint(1, 10, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 15, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
assert np.array_equal(NumCpp.prod(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(), data.prod(axis=0))
shapeInput = np.random.randint(1, 10, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 15, [shape.rows, shape.cols])
imag = np.random.randint(1, 15, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.prod(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(), data.prod(axis=0))
shapeInput = np.random.randint(1, 10, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 15, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
assert np.array_equal(NumCpp.prod(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(), data.prod(axis=1))
shapeInput = np.random.randint(1, 10, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 15, [shape.rows, shape.cols])
imag = np.random.randint(1, 15, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.prod(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(), data.prod(axis=1))
####################################################################################
def test_proj():
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert NumCpp.projScaler(value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cData = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cData.setArray(data)
assert NumCpp.projArray(cData) is not None
####################################################################################
def test_ptp():
shapeInput = np.random.randint(1, 10, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.ptp(cArray, NumCpp.Axis.NONE).getNumpyArray().item() == data.ptp()
shapeInput = np.random.randint(1, 10, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.ptp(cArray, NumCpp.Axis.NONE).getNumpyArray().item() == data.ptp()
shapeInput = np.random.randint(1, 10, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 15, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
assert np.array_equal(NumCpp.ptp(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten().astype(np.uint32),
data.ptp(axis=0))
shapeInput = np.random.randint(1, 10, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 15, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
assert np.array_equal(NumCpp.ptp(cArray, NumCpp.Axis.COL).getNumpyArray().flatten().astype(np.uint32),
data.ptp(axis=1))
####################################################################################
def test_put():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
numIndices = np.random.randint(0, shape.size())
indices = np.asarray(range(numIndices), np.uint32)
value = np.random.randint(1, 500)
cIndices = NumCpp.NdArrayUInt32(1, numIndices)
cIndices.setArray(indices)
NumCpp.put(cArray, cIndices, value)
data.put(indices, value)
assert np.array_equal(cArray.getNumpyArray(), data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
numIndices = np.random.randint(0, shape.size())
indices = np.asarray(range(numIndices), dtype=np.uint32)
values = np.random.randint(1, 500, [numIndices, ])
cIndices = NumCpp.NdArrayUInt32(1, numIndices)
cValues = NumCpp.NdArray(1, numIndices)
cIndices.setArray(indices)
cValues.setArray(values)
NumCpp.put(cArray, cIndices, cValues)
data.put(indices, values)
assert np.array_equal(cArray.getNumpyArray(), data)
####################################################################################
def test_rad2deg():
value = np.abs(np.random.rand(1).item()) * 2 * np.pi
assert np.round(NumCpp.rad2degScaler(value), 9) == np.round(np.rad2deg(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols) * 2 * np.pi
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.rad2degArray(cArray), 9), np.round(np.rad2deg(data), 9))
####################################################################################
def test_radians():
value = np.abs(np.random.rand(1).item()) * 360
assert np.round(NumCpp.radiansScaler(value), 9) == np.round(np.radians(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols) * 360
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.radiansArray(cArray), 9), np.round(np.radians(data), 9))
####################################################################################
def test_ravel():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
cArray2 = NumCpp.ravel(cArray)
assert np.array_equal(cArray2.getNumpyArray().flatten(), np.ravel(data))
####################################################################################
def test_real():
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.realScaler(value), 9) == np.round(np.real(value), 9) # noqa
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.realArray(cArray), 9), np.round(np.real(data), 9))
####################################################################################
def test_reciprocal():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.reciprocal(cArray), 9), np.round(np.reciprocal(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols]).astype(np.double)
imag = np.random.randint(1, 100, [shape.rows, shape.cols]).astype(np.double)
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.reciprocal(cArray), 9), np.round(np.reciprocal(data), 9))
####################################################################################
def test_remainder():
# numpy and cmath remainders are calculated differently, so convert for testing purposes
values = np.random.rand(2) * 100
values = np.sort(values)
res = NumCpp.remainderScaler(values[1].item(), values[0].item())
if res < 0:
res += values[0].item()
assert np.round(res, 9) == np.round(np.remainder(values[1], values[0]), 9)
# numpy and cmath remainders are calculated differently, so convert for testing purposes
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.rand(shape.rows, shape.cols) * 100 + 10
data2 = data1 - np.random.rand(shape.rows, shape.cols) * 10
cArray1.setArray(data1)
cArray2.setArray(data2)
res = NumCpp.remainderArray(cArray1, cArray2)
res[res < 0] = res[res < 0] + data2[res < 0]
assert np.array_equal(np.round(res, 9), np.round(np.remainder(data1, data2), 9))
####################################################################################
def test_replace():
shapeInput = np.random.randint(1, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
oldValue = np.random.randint(1, 100, 1).item()
newValue = np.random.randint(1, 100, 1).item()
dataCopy = data.copy()
dataCopy[dataCopy == oldValue] = newValue
assert np.array_equal(NumCpp.replace(cArray, oldValue, newValue), dataCopy)
shapeInput = np.random.randint(1, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
oldValue = np.random.randint(1, 100, 1).item() + 1j * np.random.randint(1, 100, 1).item()
newValue = np.random.randint(1, 100, 1).item() + 1j * np.random.randint(1, 100, 1).item()
dataCopy = data.copy()
dataCopy[dataCopy == oldValue] = newValue
assert np.array_equal(NumCpp.replace(cArray, oldValue, newValue), dataCopy)
####################################################################################
def test_reshape():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
newShape = data.size
NumCpp.reshape(cArray, newShape)
assert np.array_equal(cArray.getNumpyArray(), data.reshape(1, newShape))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
newShape = NumCpp.Shape(shapeInput[1].item(), shapeInput[0].item())
NumCpp.reshape(cArray, newShape)
assert np.array_equal(cArray.getNumpyArray(), data.reshape(shapeInput[::-1]))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
newShape = NumCpp.Shape(shapeInput[1].item(), shapeInput[0].item())
NumCpp.reshapeList(cArray, newShape)
assert np.array_equal(cArray.getNumpyArray(), data.reshape(shapeInput[::-1]))
shapeInput = np.random.randint(1, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
newNumCols = np.random.choice(np.array(list(factors(data.size))), 1).item()
NumCpp.reshape(cArray, -1, newNumCols)
assert np.array_equal(cArray.getNumpyArray(), data.reshape(-1, newNumCols))
shapeInput = np.random.randint(1, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
newNumRows = np.random.choice(np.array(list(factors(data.size))), 1).item()
NumCpp.reshape(cArray, newNumRows, -1)
assert np.array_equal(cArray.getNumpyArray(), data.reshape(newNumRows, -1))
####################################################################################
def test_resize():
shapeInput1 = np.random.randint(1, 100, [2, ])
shapeInput2 = np.random.randint(1, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput1[0].item(), shapeInput1[1].item())
shape2 = NumCpp.Shape(shapeInput2[0].item(), shapeInput2[1].item())
cArray = NumCpp.NdArray(shape1)
data = np.random.randint(1, 100, [shape1.rows, shape1.cols], dtype=np.uint32)
cArray.setArray(data)
NumCpp.resizeFast(cArray, shape2)
assert cArray.shape().rows == shape2.rows
assert cArray.shape().cols == shape2.cols
shapeInput1 = np.random.randint(1, 100, [2, ])
shapeInput2 = np.random.randint(1, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput1[0].item(), shapeInput1[1].item())
shape2 = NumCpp.Shape(shapeInput2[0].item(), shapeInput2[1].item())
cArray = NumCpp.NdArray(shape1)
data = np.random.randint(1, 100, [shape1.rows, shape1.cols], dtype=np.uint32)
cArray.setArray(data)
NumCpp.resizeSlow(cArray, shape2)
assert cArray.shape().rows == shape2.rows
assert cArray.shape().cols == shape2.cols
####################################################################################
def test_right_shift():
shapeInput = np.random.randint(20, 100, [2, ])
bitsToshift = np.random.randint(1, 32, [1, ]).item()
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayUInt32(shape)
data = np.random.randint(1, np.iinfo(np.uint32).max, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
assert np.array_equal(NumCpp.right_shift(cArray, bitsToshift).getNumpyArray(),
np.right_shift(data, bitsToshift))
####################################################################################
def test_rint():
value = np.abs(np.random.rand(1).item()) * 2 * np.pi
assert NumCpp.rintScaler(value) == np.rint(value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols) * 2 * np.pi
cArray.setArray(data)
assert np.array_equal(NumCpp.rintArray(cArray), np.rint(data))
####################################################################################
def test_rms():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert (np.round(NumCpp.rms(cArray, NumCpp.Axis.NONE).getNumpyArray().flatten().item(), 9) ==
np.round(np.sqrt(np.mean(np.square(data), axis=None)).item(), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert (np.round(NumCpp.rms(cArray, NumCpp.Axis.NONE).getNumpyArray().flatten().item(), 9) ==
np.round(np.sqrt(np.mean(np.square(data), axis=None)).item(), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.rms(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(), 9),
np.round(np.sqrt(np.mean(np.square(data), axis=0)), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.rms(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(), 9),
np.round(np.sqrt(np.mean(np.square(data), axis=0)), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.rms(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(), 9),
np.round(np.sqrt(np.mean(np.square(data), axis=1)), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.rms(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(), 9),
np.round(np.sqrt(np.mean(np.square(data), axis=1)), 9))
####################################################################################
def test_roll():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
amount = np.random.randint(0, data.size, [1, ]).item()
cArray.setArray(data)
assert np.array_equal(NumCpp.roll(cArray, amount, NumCpp.Axis.NONE).getNumpyArray(),
np.roll(data, amount, axis=None))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
amount = np.random.randint(0, shape.cols, [1, ]).item()
cArray.setArray(data)
assert np.array_equal(NumCpp.roll(cArray, amount, NumCpp.Axis.ROW).getNumpyArray(),
np.roll(data, amount, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
amount = np.random.randint(0, shape.rows, [1, ]).item()
cArray.setArray(data)
assert np.array_equal(NumCpp.roll(cArray, amount, NumCpp.Axis.COL).getNumpyArray(),
np.roll(data, amount, axis=1))
####################################################################################
def test_rot90():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
amount = np.random.randint(1, 4, [1, ]).item()
cArray.setArray(data)
assert np.array_equal(NumCpp.rot90(cArray, amount).getNumpyArray(), np.rot90(data, amount))
####################################################################################
def test_round():
value = np.abs(np.random.rand(1).item()) * 2 * np.pi
assert NumCpp.roundScaler(value, 10) == np.round(value, 10)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols) * 2 * np.pi
cArray.setArray(data)
assert np.array_equal(NumCpp.roundArray(cArray, 9), np.round(data, 9))
####################################################################################
def test_row_stack():
shapeInput = np.random.randint(20, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[0].item() + np.random.randint(1, 10, [1, ]).item(), shapeInput[1].item())
shape3 = NumCpp.Shape(shapeInput[0].item() + np.random.randint(1, 10, [1, ]).item(), shapeInput[1].item())
shape4 = NumCpp.Shape(shapeInput[0].item() + np.random.randint(1, 10, [1, ]).item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
cArray3 = NumCpp.NdArray(shape3)
cArray4 = NumCpp.NdArray(shape4)
data1 = np.random.randint(1, 100, [shape1.rows, shape1.cols])
data2 = np.random.randint(1, 100, [shape2.rows, shape2.cols])
data3 = np.random.randint(1, 100, [shape3.rows, shape3.cols])
data4 = np.random.randint(1, 100, [shape4.rows, shape4.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
cArray3.setArray(data3)
cArray4.setArray(data4)
assert np.array_equal(NumCpp.row_stack(cArray1, cArray2, cArray3, cArray4),
np.row_stack([data1, data2, data3, data4]))
####################################################################################
def test_setdiff1d():
shapeInput = np.random.randint(1, 10, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayUInt32(shape)
cArray2 = NumCpp.NdArrayUInt32(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols], dtype=np.uint32)
data2 = np.random.randint(1, 100, [shape.rows, shape.cols], dtype=np.uint32)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.setdiff1d(cArray1, cArray2).getNumpyArray().flatten(),
np.setdiff1d(data1, data2))
shapeInput = np.random.randint(1, 10, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.setdiff1d(cArray1, cArray2).getNumpyArray().flatten(),
np.setdiff1d(data1, data2))
####################################################################################
def test_shape():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
assert cArray.shape().rows == shape.rows and cArray.shape().cols == shape.cols
####################################################################################
def test_sign():
value = np.random.randn(1).item() * 100
assert NumCpp.signScaler(value) == np.sign(value)
value = np.random.randn(1).item() * 100 + 1j * np.random.randn(1).item() * 100
assert NumCpp.signScaler(value) == np.sign(value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols) * 100
cArray.setArray(data)
assert np.array_equal(NumCpp.signArray(cArray), np.sign(data))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.signArray(cArray), np.sign(data))
####################################################################################
def test_signbit():
value = np.random.randn(1).item() * 100
assert NumCpp.signbitScaler(value) == np.signbit(value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols) * 100
cArray.setArray(data)
assert np.array_equal(NumCpp.signbitArray(cArray), np.signbit(data))
####################################################################################
def test_sin():
value = np.random.randn(1).item()
assert np.round(NumCpp.sinScaler(value), 9) == np.round(np.sin(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.sinScaler(value), 9) == np.round(np.sin(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.sinArray(cArray), 9), np.round(np.sin(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.sinArray(cArray), 9), np.round(np.sin(data), 9))
####################################################################################
def test_sinc():
value = np.random.randn(1)
assert np.round(NumCpp.sincScaler(value.item()), 9) == np.round(np.sinc(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.sincArray(cArray), 9), np.round(np.sinc(data), 9))
####################################################################################
def test_sinh():
value = np.random.randn(1).item()
assert np.round(NumCpp.sinhScaler(value), 9) == np.round(np.sinh(value), 9)
value = np.random.randn(1).item() + 1j * np.random.randn(1).item()
assert np.round(NumCpp.sinhScaler(value), 9) == np.round(np.sinh(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.sinhArray(cArray), 9), np.round(np.sinh(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.randn(shape.rows, shape.cols) + 1j * np.random.randn(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.sinhArray(cArray), 9), np.round(np.sinh(data), 9))
####################################################################################
def test_size():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
assert cArray.size() == shapeInput.prod().item()
####################################################################################
def test_sort():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
d = data.flatten()
d.sort()
assert np.array_equal(NumCpp.sort(cArray, NumCpp.Axis.NONE).getNumpyArray().flatten(), d)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
d = data.flatten()
d.sort()
assert np.array_equal(NumCpp.sort(cArray, NumCpp.Axis.NONE).getNumpyArray().flatten(), d)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
pSorted = np.sort(data, axis=0)
cSorted = NumCpp.sort(cArray, NumCpp.Axis.ROW).getNumpyArray()
assert np.array_equal(cSorted, pSorted)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
pSorted = np.sort(data, axis=0)
cSorted = NumCpp.sort(cArray, NumCpp.Axis.ROW).getNumpyArray()
assert np.array_equal(cSorted, pSorted)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
pSorted = np.sort(data, axis=1)
cSorted = NumCpp.sort(cArray, NumCpp.Axis.COL).getNumpyArray()
assert np.array_equal(cSorted, pSorted)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
pSorted = np.sort(data, axis=1)
cSorted = NumCpp.sort(cArray, NumCpp.Axis.COL).getNumpyArray()
assert np.array_equal(cSorted, pSorted)
####################################################################################
def test_sqrt():
value = np.random.randint(1, 100, [1, ]).item()
assert np.round(NumCpp.sqrtScaler(value), 9) == np.round(np.sqrt(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.sqrtScaler(value), 9) == np.round(np.sqrt(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.sqrtArray(cArray), 9), np.round(np.sqrt(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.sqrtArray(cArray), 9), np.round(np.sqrt(data), 9))
####################################################################################
def test_square():
value = np.random.randint(1, 100, [1, ]).item()
assert np.round(NumCpp.squareScaler(value), 9) == np.round(np.square(value), 9)
value = np.random.randint(1, 100, [1, ]).item() + 1j * np.random.randint(1, 100, [1, ]).item()
assert np.round(NumCpp.squareScaler(value), 9) == np.round(np.square(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.squareArray(cArray), 9), np.round(np.square(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.squareArray(cArray), 9), np.round(np.square(data), 9))
####################################################################################
def test_stack():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
cArray3 = NumCpp.NdArray(shape)
cArray4 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data3 = np.random.randint(1, 100, [shape.rows, shape.cols])
data4 = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
cArray3.setArray(data3)
cArray4.setArray(data4)
assert np.array_equal(NumCpp.stack(cArray1, cArray2, cArray3, cArray4, NumCpp.Axis.ROW),
np.vstack([data1, data2, data3, data4]))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
cArray3 = NumCpp.NdArray(shape)
cArray4 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data3 = np.random.randint(1, 100, [shape.rows, shape.cols])
data4 = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
cArray3.setArray(data3)
cArray4.setArray(data4)
assert np.array_equal(NumCpp.stack(cArray1, cArray2, cArray3, cArray4, NumCpp.Axis.COL),
np.hstack([data1, data2, data3, data4]))
####################################################################################
def test_stdev():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.round(NumCpp.stdev(cArray, NumCpp.Axis.NONE).item(), 9) == np.round(np.std(data), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.stdev(cArray, NumCpp.Axis.NONE).item() is not None
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.stdev(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(), 9),
np.round(np.std(data, axis=0), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.stdev(cArray, NumCpp.Axis.ROW) is not None
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.stdev(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(), 9),
np.round(np.std(data, axis=1), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.stdev(cArray, NumCpp.Axis.COL) is not None
####################################################################################
def test_subtract():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(-100, 100, [shape.rows, shape.cols])
data2 = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.subtract(cArray1, cArray2), data1 - data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.subtract(cArray, value), data - value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.subtract(value, cArray), value - data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.subtract(cArray1, cArray2), data1 - data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.subtract(cArray, value), data - value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.subtract(value, cArray), value - data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArray(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
data2 = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.subtract(cArray1, cArray2), data1 - data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols])
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.subtract(cArray1, cArray2), data1 - data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.subtract(cArray, value), data - value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.subtract(value, cArray), value - data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.subtract(cArray, value), data - value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.subtract(value, cArray), value - data)
####################################################################################
def test_sumo():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.sum(cArray, NumCpp.Axis.NONE).item() == np.sum(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.sum(cArray, NumCpp.Axis.NONE).item() == np.sum(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
assert np.array_equal(NumCpp.sum(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(), np.sum(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.sum(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(), np.sum(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
assert np.array_equal(NumCpp.sum(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(), np.sum(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.sum(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(), np.sum(data, axis=1))
####################################################################################
def test_swap():
shapeInput1 = np.random.randint(20, 100, [2, ])
shapeInput2 = np.random.randint(20, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput1[0].item(), shapeInput1[1].item())
shape2 = NumCpp.Shape(shapeInput2[0].item(), shapeInput2[1].item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
data1 = np.random.randint(0, 100, [shape1.rows, shape1.cols]).astype(np.double)
data2 = np.random.randint(0, 100, [shape2.rows, shape2.cols]).astype(np.double)
cArray1.setArray(data1)
cArray2.setArray(data2)
NumCpp.swap(cArray1, cArray2)
assert (np.array_equal(cArray1.getNumpyArray(), data2) and
np.array_equal(cArray2.getNumpyArray(), data1))
####################################################################################
def test_swapaxes():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
assert np.array_equal(NumCpp.swapaxes(cArray).getNumpyArray(), data.T)
####################################################################################
def test_tan():
value = np.random.rand(1).item() * np.pi
assert np.round(NumCpp.tanScaler(value), 9) == np.round(np.tan(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.tanScaler(value), 9) == np.round(np.tan(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.tanArray(cArray), 9), np.round(np.tan(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.tanArray(cArray), 9), np.round(np.tan(data), 9))
####################################################################################
def test_tanh():
value = np.random.rand(1).item() * np.pi
assert np.round(NumCpp.tanhScaler(value), 9) == np.round(np.tanh(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.tanhScaler(value), 9) == np.round(np.tanh(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.tanhArray(cArray), 9), np.round(np.tanh(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.tanhArray(cArray), 9), np.round(np.tanh(data), 9))
####################################################################################
def test_tile():
shapeInput = np.random.randint(1, 10, [2, ])
shapeRepeat = np.random.randint(1, 10, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shapeR = NumCpp.Shape(shapeRepeat[0].item(), shapeRepeat[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.tileRectangle(cArray, shapeR.rows, shapeR.cols), np.tile(data, shapeRepeat))
shapeInput = np.random.randint(1, 10, [2, ])
shapeRepeat = np.random.randint(1, 10, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shapeR = NumCpp.Shape(shapeRepeat[0].item(), shapeRepeat[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.tileShape(cArray, shapeR), np.tile(data, shapeRepeat))
####################################################################################
def test_tofile():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
tempDir = tempfile.gettempdir()
filename = os.path.join(tempDir, 'temp.bin')
NumCpp.tofile(cArray, filename, '')
assert os.path.exists(filename)
data2 = np.fromfile(filename, np.double).reshape(shapeInput)
assert np.array_equal(data, data2)
os.remove(filename)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
tempDir = tempfile.gettempdir()
filename = os.path.join(tempDir, 'temp.txt')
NumCpp.tofile(cArray, filename, '\n')
assert os.path.exists(filename)
data2 = | np.fromfile(filename, dtype=np.double, sep='\n') | numpy.fromfile |
"""Plot a snapshot of the particle distribution"""
# --------------------------------
# <NAME> <<EMAIL>>
# Institute of Marine Research
# November 2020
# --------------------------------
import numpy as np
import matplotlib.pyplot as plt
from netCDF4 import Dataset
from postladim import ParticleFile
# ---------------
# User settings
# ---------------
# Files
particle_file = "out.nc"
grid_file = "../data/ocean_avg_0014.nc"
# Subgrid definition
i0, i1 = 100, 136
j0, j1 = 90, 121
# timestamp
t = 176
# ----------------
# ROMS grid, plot domain
with Dataset(grid_file) as nc:
H = nc.variables["h"][j0:j1, i0:i1]
M = nc.variables["mask_rho"][j0:j1, i0:i1]
lon = nc.variables["lon_rho"][j0:j1, i0:i1]
lat = nc.variables["lat_rho"][j0:j1, i0:i1]
M[M > 0] = np.nan # Mask out sea cells
# particle_file
pf = ParticleFile(particle_file)
fig = plt.figure(figsize=(12, 10))
ax = fig.add_subplot(1, 1, 1)
# Center and boundary grid points
Xc = np.arange(i0, i1)
Yc = np.arange(j0, j1)
Xb = np.arange(i0 - 0.5, i1)
Yb = | np.arange(j0 - 0.5, j1) | numpy.arange |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Utilities for geometric operations in Numpy."""
import numpy as np
# LDIF is an internal package, should be imported last.
# pylint: disable=g-bad-import-order
from ldif.util import np_util
from ldif.util.file_util import log
# pylint: enable=g-bad-import-order
def apply_4x4(arr, m, are_points=True, feature_count=0):
"""Applies a 4x4 matrix to 3D points/vectors.
Args:
arr: Numpy array with shape [..., 3 + feature_count].
m: Matrix with shape [4, 4].
are_points: Boolean. Whether to treat arr as points or vectors.
feature_count: Int. The number of extra features after the points.
Returns:
Numpy array with shape [..., 3].
"""
shape_in = arr.shape
if are_points:
hom = np.ones_like(arr[..., 0:1], dtype=np.float32)
else:
hom = np.zeros_like(arr[..., 0:1], dtype=np.float32)
assert arr.shape[-1] == 3 + feature_count
to_tx = np.concatenate([arr[..., :3], hom], axis=-1)
to_tx = np.reshape(to_tx, [-1, 4])
transformed = np.matmul(to_tx, m.T)[:, :3]
if feature_count:
flat_samples = np.reshape(arr[..., 3:], [-1, feature_count])
transformed = np.concatenate([transformed, flat_samples], axis=1)
return np.reshape(transformed, shape_in).astype(np.float32)
def batch_apply_4x4(arrs, ms, are_points=True):
"""Applies a batch of 4x4 matrices to a batch of 3D points/vectors.
Args:
arrs: Numpy array with shape [bs, ..., 3].
ms: Matrix with shape [bs, 4, 4].
are_points: Boolean. Whether to treat arr as points or vectors.
Returns:
Numpy array with shape [bs, ..., 3].
"""
log.info('Input shapes to batch_apply_4x4: %s and %s' %
(repr(arrs.shape), repr(ms.shape)))
bs = arrs.shape[0]
assert ms.shape[0] == bs
assert len(ms.shape) == 3
out = []
for i in range(bs):
out.append(apply_4x4(arrs[i, ...], ms[i, ...], are_points))
return np.stack(out)
def transform_normals(normals, tx):
"""Transforms normals to a new coordinate frame (applies inverse-transpose).
Args:
normals: Numpy array with shape [batch_size, ..., 3].
tx: Numpy array with shape [batch_size, 4, 4] or [4, 4]. Somewhat
inefficient for [4,4] inputs (tiles across the batch dimension).
Returns:
Numpy array with shape [batch_size, ..., 3]. The transformed normals.
"""
batch_size = normals.shape[0]
assert normals.shape[-1] == 3
normal_shape = list(normals.shape[1:-1])
flat_normal_len = int(np.prod(normal_shape)) # 1 if []
normals = np.reshape(normals, [batch_size, flat_normal_len, 3])
assert len(tx.shape) in [2, 3]
assert tx.shape[-1] == 4
assert tx.shape[-2] == 4
if len(tx.shape) == 2:
tx = np.tile(tx[np.newaxis, ...], [batch_size, 1, 1])
assert tx.shape[0] == batch_size
normals_invalid = np.all(np.equal(normals, 0.0), axis=-1)
tx_invt = np.linalg.inv(np.transpose(tx, axes=[0, 2, 1]))
transformed = batch_apply_4x4(normals, tx_invt)
transformed[normals_invalid, :] = 0.0
norm = np.linalg.norm(transformed, axis=-1, keepdims=True)
log.info('Norm shape, transformed shape: %s %s' %
(repr(norm.shape), repr(transformed.shape)))
transformed /= norm + 1e-8
return np.reshape(transformed, [batch_size] + normal_shape + [3])
def world_xyzn_im_to_pts(world_xyz, world_n):
"""Makes a 10K long XYZN pointcloud from an XYZ image and a normal image."""
# world im + world normals -> world points+normals
is_valid = np.logical_not(np.all(world_xyz == 0.0, axis=-1))
world_xyzn = np.concatenate([world_xyz, world_n], axis=-1)
world_xyzn = world_xyzn[is_valid, :]
world_xyzn = np.reshape(world_xyzn, [-1, 6])
np.random.shuffle(world_xyzn)
point_count = world_xyzn.shape[0]
assert point_count > 0
log.info('The number of valid samples is: %i' % point_count)
while point_count < 10000:
world_xyzn = np.tile(world_xyzn, [2, 1])
point_count = world_xyzn.shape[0]
return world_xyzn[:10000, :]
def transform_r2n2_normal_cam_image_to_world_frame(normal_im, idx, e):
is_valid = np.all(normal_im == 0.0, axis=-1)
log.info(is_valid.shape)
is_valid = is_valid.reshape([224, 224])
world_im = apply_4x4(
normal_im, np.linalg.inv(e.r2n2_cam2world[idx, ...]).T, are_points=False)
world_im /= (np.linalg.norm(world_im, axis=-1, keepdims=True) + 1e-8)
# world_im = np_util.zero_by_mask(is_valid, world_im).astype(np.float32)
return world_im
def compute_argmax_image(xyz_image, decoder, embedding, k=1):
"""Uses the world space XYZ image to compute the maxblob influence image."""
mask = np_util.make_pixel_mask(xyz_image) # TODO(kgenova) Figure this out...
assert len(mask.shape) == 2
flat_xyz = np.reshape(xyz_image, [-1, 3])
influences = decoder.rbf_influence_at_samples(embedding, flat_xyz)
assert len(influences.shape) == 2
rbf_image = np.reshape(influences, list(mask.shape) + [-1])
# argmax_image = np.expand_dims(np.argmax(rbf_image, axis=-1), axis=-1)
argmax_image = np.flip(np.argsort(rbf_image, axis=-1), axis=-1)
argmax_image = argmax_image[..., :k]
# TODO(kgenova) Insert an equivalence class map here.
log.info(mask.shape)
log.info(argmax_image.shape)
argmax_image = np_util.zero_by_mask(mask, argmax_image, replace_with=-1)
log.info(argmax_image.shape)
return argmax_image.astype(np.int32)
def tile_world2local_frames(world2local, lyr):
"""Lifts from element_count world2local to effective_element_count frames."""
world2local = world2local.copy()
first_k = world2local[:lyr, :, :]
refl = np.array([1., 0., 0.0, 0.,
0., 1., 0.0, 0.,
0., 0., -1., 0.,
0., 0., 0.0, 1.], dtype=np.float32)
refl = np.tile(np.reshape(refl, [1, 4, 4]), [lyr, 1, 1])
# log.info(refl.shape)
first_k = np.matmul(first_k, refl)
all_bases = np.concatenate([world2local, first_k], axis=0)
# log.info(all_bases[1, ...])
# log.info(all_bases[31, ...])
# cand = np.array([1, 1, 1, 1], dtype=np.float32)
# log.info(np.matmul(all_bases[1, ...], cand))
# log.info(np.matmul(all_bases[31, ...], cand))
return all_bases
def extract_local_frame_images(world_xyz_im, world_normal_im, embedding,
decoder):
"""Computes local frame XYZ images for each of the world2local frames."""
world2local = np.squeeze(decoder.world2local(embedding))
log.info(world2local.shape)
world2local = tile_world2local_frames(world2local, 15)
log.info(world2local.shape)
xyz_ims = []
nrm_ims = []
is_invalid = | np.all(world_xyz_im == 0.0, axis=-1) | numpy.all |
#! usr/bin/env python
import numpy as np
import seaborn as sns
import scipy as sp
import functools
import numpy as np
from scipy.stats import multivariate_normal
import scipy.stats as stats
import time
import scipy as scipy
import sys
import pandas as pd
from scipy.stats import norm
from numpy import linalg as la
import pandas as pd
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.model_selection import train_test_split
import itertools
__author__ = '<NAME>'
class IBO(object):
"""
IBO: Intelligent Bayesian OPtimization
A class to perform Bayesian Optimization on a 1D or 2D domain.
Can either have an objective function to maximize or a true function
to maximize"""
def __init__(self, kernel = 'squared_kernel'):
"""Define the parameters for the bayesian optimization.
The train points should be x,y coordinate that you already know about your
function"""
if kernel == 'squared_kernel':
self.kernel = self.__squared_kernel__
elif kernel == 'matern':
self.kernel = self.__matern_kernel__
def fit(self, train_points_x, train_points_y,
test_domain, train_y_func, y_func_type = 'real',
samples = 10 , test_points_x = None, test_points_y = None,
model_train_points_x = None, model_train_points_y = None,
covariance_noise = 5e-5, n_posteriors = 30, kernel_params = None,
model_obj = GradientBoostingRegressor,
verbose = True):
"""Define the parameters for the GP.
PARAMS:
train_points_x, - x coordinates to train on
train_points_y, - resulting output from the function, either objective or
true function
test_domain - the domain to test
test_points_y - If using ab objective function, this is from the
train test split data
test_points_x = if using an objective function, this is from the
train test split
model - the model to fit for use with the objective function. Currently
works with Gradient Boosting
y_func_type - either the real function or the objective function.
The objective function implemented in negative MSE (since BO is
a maximization procedure)
verbose = Whether to print out the points Bayesian OPtimization is
picking
train_y_func - This can either be an objective function or a true function
kernel_params: dictionary of {'length':value} for squaredkernel
model_train_points: the training points for the objective function
"""
try:
type(train_points_x).__module__ == np.__name__
type(train_points_y).__module__ == np.__name__
except Exception as e:
print(e)
return ' You need to input numpy types'
# Store the training points
self.train_points_x = train_points_x
self.train_points_y = train_points_y
self.test_domain = test_domain
# setup the kernel parameters
if kernel_params != None:
self.squared_length = kernel_params['rbf_length']
else:
self.squared_length = None
# Y func can either be an objective function, or the true underlying func.
if y_func_type == 'real':
self.train_y_func = train_y_func
elif y_func_type == 'objective':
if model_obj == None:
return ' you need to pass in a model (GradientBoostingRegressor)'
# Only if using an objective function, from the 'test' split
self.test_points_x = test_points_x
self.test_points_y = test_points_y
self.model_train_points_x = model_train_points_x
self.model_train_points_y = model_train_points_y
# model to train and fit
self.model = model_obj
self.train_y_func = self.hyperparam_choice_function
# store the testing parameters
self.covariance_noise = covariance_noise
self.n_posteriors = n_posteriors
self.samples = samples
self.verbose = verbose
if self.train_points_x.shape[1] ==1: # one dimension
self.dimensions ='one'
elif self.train_points_x.shape[1] ==2:
self.dimensions = 'two'
else:
print('Either you entered more than two dimensions, \
or not a numpy array.')
print(type(self.train_points_x))
# create the generator
self.bo_gen = self.__sample_from_function__(verbose=self.verbose)
def predict(self):
"""returns x_sampled_points, y_sampled_points, best_x, best_y"""
x_sampled_points, y_sampled_points, sampled_var, \
best_x, best_y, improvements, domain, mus = next(self.bo_gen)
return x_sampled_points, y_sampled_points, best_x, best_y
def maximize(self, n_steps=10, verbose = None):
"""For the n_steps defined, find the best x and y coordinate
and return them.
Verbose controls whether to print out the points being sampled"""
verbose_ = self.verbose
self.samples = n_steps
bo_gen = self.__sample_from_function__(verbose = verbose_)
for _ in range(self.samples):
x_sampled_points, y_sampled_points, sampled_var, \
best_x, best_y, improvements, domain, mus = next(self.bo_gen)
self.best_x = best_x
self.best_y = best_y
# return the best PARAMS
return best_x, best_y
def __test_gaussian_process__(self, return_cov = False,
return_sample = False):
"""Test one new point in the Gaussian process or an array of points
Returns the mu, variance, as well as the posterior vector.
Improvements is the expected improvement for each potential test point.
Domain, is the domain over which you are searching.
Return cov = True will return the full covariance matrix.
If return_sample= True
returns samples ( a vector) from the
informed posterior and the uninformed prior distribution
Covariance diagonal noise is used to help enforce positive definite matrices
"""
# Update the covaraince matrices
self.covariance_train_train = self.kernel(self.train_points_x,
self.train_points_x, train=True)
self.covariance_test_train = self.kernel(self.test_domain,
self.train_points_x)
self.covariance_test_test = self.kernel(self.test_domain,
self.test_domain)
# Use cholskey decomposition to increase speed for calculating mean
try :# First try,
L_test_test = np.linalg.cholesky(self.covariance_test_test + \
self.covariance_noise * np.eye(len(self.covariance_test_test)))
L_train_train = np.linalg.cholesky(self.covariance_train_train + \
self.covariance_noise * np.eye(len(self.covariance_train_train)))
Lk = np.linalg.solve(L_train_train, self.covariance_test_train.T)
mus = np.dot(Lk.T, np.linalg.solve(L_train_train,
self.train_points_y)).reshape(
(len(self.test_domain),))
# Compute the standard deviation so we can plot it
s2 = np.diag(self.covariance_test_test) - np.sum(Lk**2, axis=0)
stdv = np.sqrt(abs(s2))
except Exception as e:
print(e)#LinAlgError: # In case the covariance matrix is not positive definite
# Find the near positive definite matrix to decompose
decompose_train_train = self.nearestPD(
self.covariance_train_train + self.covariance_noise * np.eye(
len(self.train_points_x)))
decompose_test_test = self.nearestPD(
self.covariance_test_test + self.covariance_noise * np.eye(
len(self.test_domain)))
# cholskey decomposition on the nearest PD matrix
L_train_train = np.linalg.cholesky(decompose_train_train)
L_test_test = np.linalg.cholesky(decompose_test_test)
Lk = np.linalg.solve(L_train_train, self.covariance_test_train.T)
mus = np.dot(Lk.T, np.linalg.solve(L_train_train,
self.train_points_y)).reshape((len(self.test_domain)),)
# Compute the standard deviation so we can plot it
s2 = np.diag(self.covariance_test_test) - np.sum(Lk**2, axis=0)
stdv = np.sqrt(abs(s2))
# ##### FULL INVERSION ####
# mus = covariance_test_train @ np.linalg.pinv(covariance_train_train) @ train_y_numbers
# s2 = covariance_test_test - covariance_test_train @ np.linalg.pinv(covariance_train_train ) \
# @ covariance_test_train.T
def sample_from_posterior(n_priors=3):
"""Draw samples from the prior distribution of the GP.
len(test_x) is the number of samplese to draw.
Resource: http://katbailey.github.io/post/gaussian-processes-for-dummies/.
N-Posteriors / N-Priors tells the number of functions to samples from the dsitribution"""
try: # try inside sample from posterior function
L = np.linalg.cholesky(self.covariance_test_test +
self.covariance_noise * np.eye(
len(self.test_domain))- np.dot(Lk.T, Lk))
except Exception as e:
print(e)
# Find the neareset Positive Definite Matrix
near_decompose = self.nearestPD(self.covariance_test_test +
self.covariance_noise * np.eye(
len(self.test_domain)) - np.dot(Lk.T, Lk))
L = np.linalg.cholesky(near_decompose.astype(float) )
# within posterior
# sample from the posterior
f_post = mus.reshape(-1,1) + np.dot(L, np.random.normal(
size=(len(self.test_domain), self.n_posteriors)))
# Sample X sets of standard normals for our test points,
# multiply them by the square root of the covariance matrix
f_prior_uninformed = np.dot(L_test_test,
np.random.normal(size=(len(self.test_domain), n_priors)))
# For the posterior, the columns are the vector for that function
return (f_prior_uninformed, f_post)
if return_cov == True:
return y_pred_mean.ravel(), var_y_pred_diag.ravel(), var_y_pred
if return_sample == True:
f_prior, f_post = sample_from_posterior()
return mus.ravel(), s2.ravel(), f_prior, f_post
else:
return mus.ravel(), s2.ravel()
def __sample_from_function__(self, verbose=None):
"""Sample N times from the unknown function and for each time find the
point that will have the highest expected improvement (find the maxima of the function).
Verbose signifies if the function should print out the points where it is sampling
Returns a generator of x_sampled_points, y_sampled_points, vars_, best_x, best_y, \
list_of_expected_improvements, testing_domain, mus
for improvements. Mus and Vars are the mean and var for each sampled point
in the gaussian process.
Starts off the search for expected improvement with a coarse search and then hones in on
the domain the the highest expected improvement.
Note - the y-function can EITHER by the actual y-function (for evaluation
purposes, or an objective function
(i.e. - RMSE))"""
verbose = self.verbose
# for plotting the points sampled
x_sampled_points = []
y_sampled_points = []
best_x = self.train_points_x[np.argmax(self.train_points_y ),:]
best_y =self.train_points_y [np.argmax(self.train_points_y ),:]
for i in range(self.samples):
if i == 0:
if self.train_points_x .shape[1]==1: ## one dimensional case
testing_domain = np.array([self.test_domain]).reshape(-1,1)
else:
testing_domain = self.test_domain
# find the next x-point to sample
mus, vars_, prior, post = self.__test_gaussian_process__(
return_sample = True)
sigmas_post = np.var(post,axis=1)
mus_post = np.mean(post,axis=1)
# get the expected values from the posterior distribution
list_of_expected_improvements = self.expected_improvement(
mus_post, sigmas_post ,best_y)
max_improv_x_idx = np.argmax(np.array(
list_of_expected_improvements))
#print(max_improv_x_idx,'max_improv_x_idx')
max_improv_x = testing_domain[max_improv_x_idx]
# don't resample the same point
c = 1
while max_improv_x in x_sampled_points:
if c == 1:
if self.train_points_x .shape[1]==1:
sorted_points_idx = np.argsort(list(np.array(
list_of_expected_improvements)))
else:
sorted_points_idx = np.argsort(list(np.array(
list_of_expected_improvements)),axis=0)
c+=1
max_improv_x_idx = int(sorted_points_idx[c])
max_improv_x = testing_domain[max_improv_x_idx]
# only wait until we've gon through half of the list
if c > round(len(list_of_expected_improvements)/2):
max_improv_x_idx = int(
np.argmax(list_of_expected_improvements))
max_improv_x = testing_domain[max_improv_x_idx]
break
if self.train_points_x.shape[1]==1:
max_improv_y = self.train_y_func(max_improv_x)
else: # Two D
try: # see if we are passing in the actual function
max_improv_y = self.train_y_func(
max_improv_x[0], max_improv_x[1])
except: # we are passing the objective function in
max_improv_y = self.train_y_func(
max_improv_x[0], dimensions = 'two',
hyperparameter_value_two = max_improv_x[1])
if max_improv_y > best_y: ## use to find out where to search next
best_y = max_improv_y
best_x = max_improv_x
if verbose:
print(f"Bayesian Optimization just sampled point = {best_x}")
print(f"Best x (Bayesian Optimization) = {best_x},\
Best y = {best_y}")
# append the point to sample
x_sampled_points.append(max_improv_x)
y_sampled_points.append(max_improv_y)
# append our new the newly sampled point to the training data
self.train_points_x = np.vstack((self.train_points_x,
max_improv_x))
self.train_points_y = np.vstack((self.train_points_y,
max_improv_y))
yield x_sampled_points, y_sampled_points, vars_, best_x, best_y, \
list_of_expected_improvements, testing_domain, mus
else:
# append the point to sample
x_sampled_points.append(max_improv_x)
y_sampled_points.append(max_improv_y)
# append our new the newly sampled point to the training data
self.train_points_x = np.vstack((self.train_points_x, max_improv_x))
self.train_points_y = np.vstack((self.train_points_y, max_improv_y))
yield x_sampled_points, y_sampled_points, vars_, best_x, best_y, \
list_of_expected_improvements, testing_domain, mus
else:
if self.train_points_x.shape[1]==1:
testing_domain = np.array([testing_domain]).reshape(-1,1)
else:
testing_domain = self.test_domain
mus, vars_, prior, post = self.__test_gaussian_process__(
return_sample = True)
igmas_post = np.var(post,axis=1)
mus_post = np.mean(post,axis=1)
# get the expected values from the posterior distribution
list_of_expected_improvements = self.expected_improvement(
mus_post, sigmas_post ,best_y)
max_improv_x_idx = np.argmax(list_of_expected_improvements)
max_improv_x = testing_domain[max_improv_x_idx]
# don't resample the same point
c = 1
while max_improv_x in x_sampled_points:
if c == 1:
if self.train_points_x .shape[1]==1:
sorted_points_idx = np.argsort(list(np.array(
list_of_expected_improvements)))
else:
sorted_points_idx = np.argsort(list(np.array(
list_of_expected_improvements)),axis=0)
c+=1
max_improv_x_idx = int(sorted_points_idx[c])
max_improv_x = testing_domain[max_improv_x_idx]
# only wait until we've gon through half of the list
if c > round(len(list_of_expected_improvements)/2):
max_improv_x_idx = int(
np.argmax(list_of_expected_improvements))
max_improv_x = testing_domain[max_improv_x_idx]
break
if self.train_points_x .shape[1]==1:
max_improv_y = self.train_y_func(max_improv_x)
else: # Two D
try: # see if we are passing in the actual function
max_improv_y = self.train_y_func(
max_improv_x[0], max_improv_x[1])
except: # we are passing the objective function in
max_improv_y = self.train_y_func(
max_improv_x[0], dimensions = 'two',
hyperparameter_value_two = max_improv_x[1])
if max_improv_y > best_y: ## use to find out where to search next
best_y = max_improv_y
best_x = max_improv_x
if verbose:
print(f"Bayesian Optimization just sampled point = {max_improv_x}")
print(f"Best x (Bayesian Optimization) = {best_x}, Best y = {best_y}")
# append the point to sample
x_sampled_points.append(max_improv_x)
y_sampled_points.append(max_improv_y)
# append our new the newly sampled point to the training data
self.train_points_x = np.vstack((self.train_points_x, max_improv_x))
self.train_points_y = np.vstack((self.train_points_y, max_improv_y))
yield x_sampled_points, y_sampled_points, vars_, best_x, best_y, \
list_of_expected_improvements, testing_domain, mus
else:
# append the point to sample
x_sampled_points.append(max_improv_x)
y_sampled_points.append(max_improv_y)
# append our new the newly sampled point to the training data
self.train_points_x = np.vstack((self.train_points_x, max_improv_x))
self.train_points_y = np.vstack((self.train_points_y, max_improv_y))
yield x_sampled_points, y_sampled_points, vars_, best_x, best_y, \
list_of_expected_improvements, testing_domain, mus
def hyperparam_choice_function(self, hyperparameter_value,
dimensions = 'one', hyperparameter_value_two = None):
"""Returns the negative MSE of the input hyperparameter for the given
hyperparameter.
Used with GradientBoostingRegressor estimator currently
If dimensions = one, then search n_estimators. if dimension equal
two then search over n_estimators and max_depth"""
#definethe model
model = self.model
# define the training points
train_points_x = self.model_train_points_x
train_points_y = self.model_train_points_y
if self.dimensions == 'one':
try:
m = model(n_estimators= int(hyperparameter_value))
except:
m = model(n_estimators= hyperparameter_value)
m.fit(train_points_x, train_points_y)
pred = m.predict(self.test_points_x )
n_mse = self.root_mean_squared_error(self.test_points_y , pred)
return n_mse
elif self.dimensions =='two':
try:
m = model(n_estimators = int(hyperparameter_value),
max_depth = int(hyperparameter_value_two))
except:
m = model(n_estimators = hyperparameter_value,
max_depth = hyperparameter_value_two)
m.fit(train_points_x, train_points_y)
pred = m.predict(self.test_points_x)
n_mse = self.root_mean_squared_error(self.test_points_y , pred)
return n_mse
else:
return ' We do not support this number of dimensions yet'
def root_mean_squared_error(self, actual, predicted, negative = True):
"""MSE of actual and predicted value.
Negative turn the MSE negative to allow for
maximization instead of minimization"""
if negative == True:
return - np.sqrt(sum((actual.reshape(-1,1) - predicted.reshape(-1,1)**2))
/len(actual))
else:
return np.sqrt(sum((actual.reshape(-1,1) - predicted.reshape(-1,1)**2))
/len(actual))
def expected_improvement(self, mean_x, sigma_squared_x,
y_val_for_best_hyperparameters, normal_dist=None,
point_est = False):
"""Finds the expected improvement of a point give the current best point.
If point_est = False, then computes the expected value on a vector
from the posterior distribution.
"""
with np.errstate(divide='ignore'): # in case sigma equals zero
# Expected val for one point
if point_est ==True:
sigma_x = np.sqrt(sigma_squared_x) # get the standard deviation from the variance
Z = (mean_x - y_val_for_best_hyperparameters) / sigma_x
if round(sigma_x,8) == 0:
return 0
else:
return (mean_x -
y_val_for_best_hyperparameters)*normal_dist.cdf(Z)+\
sigma_x*normal_dist.pdf(Z)
else:
# Sample from the posterior functions
for _ in range(len(mean_x)):
list_of_improvements = []
m_s = []
for m, z, s in zip(mean_x, ((mean_x -y_val_for_best_hyperparameters)\
/ np.std(sigma_squared_x)),np.sqrt(sigma_squared_x) ):
list_of_improvements.append(((m-y_val_for_best_hyperparameters)*\
norm().cdf(z)\
+s * norm().pdf(z)))
m_s.append(m)
return list_of_improvements
def nearestPD(self, A):
"""
#https://stackoverflow.com/questions/43238173/python-convert-matrix-to-positive-semi-definite/43244194#43244194
Find the nearest positive-definite matrix to input
A Python/Numpy port of <NAME>'s `nearestSPD` MATLAB code [1], which
credits [2].
[1] https://www.mathworks.com/matlabcentral/fileexchange/42885-nearestspd
[2] <NAME>, "Computing a nearest symmetric positive semidefinite
matrix" (1988): https://doi.org/10.1016/0024-3795(88)90223-6
"""
def isPD(B):
"""Returns true when input is positive-definite, via Cholesky"""
try:
_ = la.cholesky(B)
return True
except la.LinAlgError:
return False
B = (A + A.T) / 2
_, s, V = la.svd(B)
H = np.dot(V.T, np.dot(np.diag(s), V))
A2 = (B + H) / 2
A3 = (A2 + A2.T) / 2
if isPD(A3):
return A3
spacing = np.spacing(la.norm(A))
# The above is different from [1]. It appears that MATLAB's `chol` Cholesky
# decomposition will accept matrixes with exactly 0-eigenvalue, whereas
# Numpy's will not. So where [1] uses `eps(mineig)` (where `eps` is Matlab
# for `np.spacing`), we use the above definition. CAVEAT: our `spacing`
# will be much larger than [1]'s `eps(mineig)`, since `mineig` is usually on
# the order of 1e-16, and `eps(1e-16)` is on the order of 1e-34, whereas
# `spacing` will, for Gaussian random matrixes of small dimension, be on
# othe order of 1e-16. In practice, both ways converge, as the unit test
# below suggests.
I = np.eye(A.shape[0])
k = 1
while not self.isPD(A3):
mineig = np.min(np.real(la.eigvals(A3)))
A3 += I * (-mineig * k**2 + spacing)
k += 1
return A3
def __squared_kernel__(self, a, b, param=2.0, train=False,
train_noise = 5e-3, vertical_scale=1.5):
"""Calculated the squared exponential kernel.
Adds a noise term for the covariance of the training data
Adjusting the param changes the difference where points will have a positive covariance
Returns a covaraince Matrix.
Vertical scale controls the vertical scale of the function"""
if self.squared_length != None:
vertical_scale = self.squared_length
if train == False:
# ensure a and b are numpy arrays
a = np.array(a)
b = np.array(b)
sqdist = np.sum(a**2,1).reshape(-1,1) + np.sum(b**2,1) - 2*np.dot(a, b.T)
return vertical_scale*np.exp(-.5 * (1/param) * sqdist)
else:
# ensure a and b are numpy arrays
a = np.array(a)
b = np.array(b)
noisy_observations = train_noise*np.eye(len(a))
sqdist = np.sum(a**2,1).reshape(-1,1) + np.sum(b**2,1) - 2*np.dot(a, b.T)
return vertical_scale*np.exp(-.5 * (1/param) * sqdist) + noisy_observations
def __matern_kernel__(self, a,b,C_smoothness=3/2,train=False, train_noise = 5e-2):
"""The class of Matern kernels is a generalization of the RBF and the
absolute exponential kernel parameterized by an additional parameter
nu. The smaller nu, the less smooth the approximated function is.
For nu=inf, the kernel becomes equivalent to the RBF kernel and for nu=0.5
to the absolute exponential kernel. Important intermediate values are
nu=1.5 (once differentiable functions) and nu=2.5 (twice differentiable
functions).
c_smoother = inf = RBF
The train keyword is used to add noisy observations to the matrix"""
if C_smoothness not in [1/2,3/2]:
return "You choose an incorrect hyparameter, please choose either 1/2 or 3/2"
matrix_norm = np.array([np.linalg.norm(a[i] - b,axis=(1)) for i in range(len(a))])
if C_smoothness == 1/2:
if train == True:
max(np.var(a),np.var(b)) * np.exp(-matrix_norm) + np.eye(len(matrix_norm))*train_noise
else:
return max(np.var(a),np.var(b)) * | np.exp(-matrix_norm) | numpy.exp |
import os
# supress tensorflow logging other than errors
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import numpy as np
import tensorflow as tf
from keras import backend as K
from keras.datasets import mnist
from keras.models import Sequential, load_model
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, MaxPooling2D
from keras.utils import np_utils
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from attacks.fgsm import fgsm
def random_orthogonal(i):
"""Return a random vector orthogonal to i."""
v = | np.random.random(i.shape) | numpy.random.random |
'''
Unit test suite for the :py:mod:`bella.word_vectors` module.
'''
from pathlib import Path
import os
from unittest import TestCase
import tempfile
import pytest
import numpy as np
from bella.helper import read_config
import bella.tokenisers as tokenisers
from bella.word_vectors import WordVectors
from bella.word_vectors import GensimVectors
from bella.word_vectors import PreTrained
from bella.word_vectors import GloveTwitterVectors
from bella.word_vectors import GloveCommonCrawl
from bella.word_vectors import VoVectors
from bella.word_vectors import SSWE
CONFIG_FP = Path(__file__).parent.joinpath('..', 'config.yaml')
class TestWordVectors(TestCase):
'''
Contains the following functions:
1. test_wordvector_methods
2. test_gensim_word2vec
3. test_pre_trained
'''
def test_wordvector_methods(self):
'''
Tests the :py:class:`bella.word_vectors.WordVectors`
'''
hello_vec = np.asarray([0.5, 0.3, 0.4], dtype=np.float32)
another_vec = np.asarray([0.3333, 0.2222, 0.1111])
test_vectors = {'hello' : hello_vec,
'another' : another_vec}
word_vector = WordVectors(test_vectors)
vec_size = word_vector.vector_size
self.assertEqual(vec_size, 3, msg='Vector size should be 3 not {}'\
.format(vec_size))
# Testing the methods
hello_lookup = word_vector.lookup_vector('hello')
self.assertEqual(True, np.array_equal(hello_lookup, hello_vec), msg='{} '\
'should equal {}'.format(hello_lookup, hello_vec))
zero_vec = np.zeros(3)
nothing_vec = word_vector.lookup_vector('nothing')
self.assertEqual(True, np.array_equal(zero_vec, nothing_vec), msg='{} '\
'should be a zero vector'.format(nothing_vec))
index2word = word_vector.index2word
word2index = word_vector.word2index
index2vector = word_vector.index2vector
embedding_matrix = word_vector.embedding_matrix
another_index = word2index['another']
self.assertEqual('another', index2word[another_index], msg='index2word '\
'and word2index do not match on word `another`')
index_correct = np.array_equal(index2vector[another_index], another_vec)
self.assertEqual(True, index_correct, msg='index2vector does not return '\
'the correct vector for `another`')
# Check that it returns 0 index for unknown words
self.assertEqual(0, word2index['nothing'], msg='All unknown words should '\
'be mapped to the zero index')
# Check that unknown words are mapped to the <unk> token
self.assertEqual('<unk>', index2word[0], msg='All zero index should be '\
'mapped to the <unk> token')
# Check that the unkown words map to the unknown vector
self.assertEqual(True, np.array_equal(np.zeros(3), index2vector[0]),
msg='Zero index should map to the unknown vector')
# Test the embedding matrix
hello_index = word2index['hello']
is_hello_vector = np.array_equal(hello_vec, embedding_matrix[hello_index])
self.assertEqual(True, is_hello_vector, msg='The embedding matrix lookup'\
' is wrong for the `hello` word')
unknown_index = word2index['nothing']
is_nothing_vector = np.array_equal(zero_vec, embedding_matrix[unknown_index])
self.assertEqual(True, is_nothing_vector, msg='The embedding matrix lookup'\
' is wrong for the unknwon word')
def test_unit_norm(self):
'''
Testing the unit_length of WordVectors
'''
hello_vec = np.asarray([0.5, 0.3, 0.4], dtype=np.float32)
another_vec = np.asarray([0.3333, 0.2222, 0.1111], dtype=np.float32)
test_vectors = {'hello' : hello_vec,
'another' : another_vec}
word_vector = WordVectors(test_vectors, unit_length=True)
# Tests the normal case
unit_hello_vec = np.asarray([0.70710677, 0.4242641,
0.56568545], dtype=np.float32)
unit_is_equal = np.array_equal(unit_hello_vec,
word_vector.lookup_vector('hello'))
self.assertEqual(True, unit_is_equal, msg='Unit vector is not working')
# Test the l2 norm of a unit vector is 1
test_unit_mag = np.linalg.norm(word_vector.lookup_vector('hello'))
self.assertEqual(1.0, test_unit_mag, msg='l2 norm of a unit vector '\
'should be 1 not {}'.format(test_unit_mag))
# Test that it does not affect zero vectors these should still be zero
unknown_vector = word_vector.lookup_vector('nothing')
self.assertEqual(True, np.array_equal(np.zeros(3), unknown_vector),
msg='unknown vector should be a zero vector and not {}'\
.format(unknown_vector))
hello_index = word_vector.word2index['hello']
hello_embedding = word_vector.embedding_matrix[hello_index]
self.assertEqual(True, np.array_equal(unit_hello_vec, hello_embedding),
msg='The embedding matrix is not applying the unit '\
'normalization {} should be {}'\
.format(hello_embedding, unit_hello_vec))
@pytest.mark.skip(reason="Takes a long time to test only add on large tests")
def test_padded_vector(self):
hello_vec = np.asarray([0.5, 0.3, 0.4], dtype=np.float32)
another_vec = np.asarray([0.3333, 0.2222, 0.1111])
test_vectors = {'hello' : hello_vec,
'another' : another_vec}
pad_vec = np.asarray([-1, -1, -1], dtype=np.float32)
word_vector = WordVectors(test_vectors, padding_value=-1)
self.assertEqual('<pad>', word_vector.index2word[0])
self.assertEqual('<unk>', word_vector.index2word[3])
anno_unk_vec = np.array_equal(np.zeros(3),
word_vector.lookup_vector('anno'))
self.assertEqual(3, word_vector.word2index['anno'])
self.assertEqual(True, anno_unk_vec)
embedding_matrix = word_vector.embedding_matrix
pad_emb_vec = np.array_equal(pad_vec, embedding_matrix[0])
unk_emb_vec = np.array_equal(np.zeros(3), embedding_matrix[3])
hello_emb_vec = np.array_equal(hello_vec, embedding_matrix[1])
self.assertEqual(True, pad_emb_vec)
self.assertEqual(True, unk_emb_vec)
self.assertEqual(True, hello_emb_vec)
self.assertEqual('<pad>', word_vector.index2word[0])
self.assertEqual('<unk>', word_vector.index2word[3])
self.assertEqual(3, word_vector.unknown_index)
pad_vec = np.asarray([-1]*100, dtype=np.float32)
vo_zhang = VoVectors(skip_conf=True, padding_value=-1)
vo_zhang_unk_index = vo_zhang.unknown_index
embedding_matrix = vo_zhang.embedding_matrix
pad_emb_vec = np.array_equal(pad_vec, embedding_matrix[0])
unk_emb_vec = np.array_equal(vo_zhang.unknown_vector, embedding_matrix[vo_zhang_unk_index])
unk_not_equal_pad = np.array_equal(embedding_matrix[0], vo_zhang.unknown_vector)
self.assertEqual(True, pad_emb_vec, msg='{} {}'.format(pad_vec, embedding_matrix[0]))
self.assertEqual(True, unk_emb_vec)
self.assertEqual(True, hello_emb_vec)
self.assertEqual(True, vo_zhang_unk_index != 0)
self.assertEqual(True, vo_zhang_unk_index != 0)
self.assertEqual(False, unk_not_equal_pad)
self.assertEqual('<pad>', vo_zhang.index2word[0])
self.assertEqual('<unk>', vo_zhang.index2word[vo_zhang_unk_index])
# Ensure that padding does not affect word vectors that do not state
# it is required
word_vector = WordVectors(test_vectors)
self.assertEqual('<unk>', word_vector.index2word[0])
self.assertEqual('<unk>', word_vector.index2word[word_vector.unknown_index])
self.assertEqual('<unk>', word_vector.padding_word)
@pytest.mark.skip(reason="Takes a long time to test only add on large tests")
def test_gensim_word2vec(self):
'''
Tests the :py:class:`bella.word_vectors.GensimVectors`
'''
# Test loading word vectors from a file
vo_zhang = VoVectors(skip_conf=True)
self.assertEqual(vo_zhang.vector_size, 100, msg='Vector size should be equal'\
' to 100 not {}'.format(vo_zhang.vector_size))
# Check zero vectors work for OOV words
zero_vector = np.zeros(100)
oov_word = 'thisssssdoesssssnotexists'
oov_vector = vo_zhang.lookup_vector(oov_word)
self.assertEqual(True, np.array_equal(oov_vector, zero_vector),
msg='This word {} should not exists and have a zero '\
'vector and not {}'.format(oov_word, oov_vector))
# Check it does get word vectors
the_vector = vo_zhang.lookup_vector('the')
self.assertEqual(False, np.array_equal(the_vector, zero_vector),
msg='The word `the` should have a non-zero vector.')
with self.assertRaises(ValueError, msg='Should raise a value for any param'\
'that is not a String and this is a list'):
vo_zhang.lookup_vector(['the'])
# Check if the word, index and vector lookups match
index_word = vo_zhang.index2word
word_index = vo_zhang.word2index
the_index = word_index['the']
self.assertEqual('the', index_word[the_index], msg='index2word and '\
'word2index do not match for the word `the`')
index_vector = vo_zhang.index2vector
the_vectors_match = np.array_equal(index_vector[the_index],
vo_zhang.lookup_vector('the'))
self.assertEqual(True, the_vectors_match, msg='index2vector does not match'\
' lookup_vector func for the word `the`')
# Test the constructor
test_file_path = 'this'
with self.assertRaises(Exception, msg='The file path should have no saved '\
'word vector file {} and there is no training data'\
.format(test_file_path)):
GensimVectors(test_file_path, 'fake data', model='word2vec')
with self.assertRaises(Exception, msg='Should not accept neither no saved '\
'word vector model nor no training data'):
GensimVectors(None, None, model='word2vec')
with self.assertRaises(Exception, msg='Should only accept the following models'\
' {}'.format(['word2vec', 'fasttext'])):
GensimVectors(None, [['hello', 'how', 'are']], model='nothing',
min_count=1)
# Test creating vectors from data
data_path = os.path.abspath(read_config('sherlock_holmes_test',
CONFIG_FP))
with open(data_path, 'r') as data:
data = map(tokenisers.whitespace, data)
with tempfile.NamedTemporaryFile() as temp_file:
data_vector = GensimVectors(temp_file.name, data, model='word2vec',
size=200, name='sherlock')
d_vec_size = data_vector.vector_size
self.assertEqual(d_vec_size, 200, msg='Vector size should be 200 not'\
' {}'.format(d_vec_size))
sherlock_vec = data_vector.lookup_vector('sherlock')
self.assertEqual(False, np.array_equal(zero_vector, sherlock_vec),
msg='Sherlock should be a non-zero vector')
# Test that it saved the trained model
saved_vector = GensimVectors(temp_file.name, None, model='word2vec')
s_vec_size = saved_vector.vector_size
self.assertEqual(s_vec_size, 200, msg='Vector size should be 200 not'\
' {}'.format(s_vec_size))
equal_sherlocks = np.array_equal(sherlock_vec,
saved_vector.lookup_vector('sherlock'))
self.assertEqual(True, equal_sherlocks, msg='The saved model and '\
'the trained model should have the same vectors')
# Ensure the name attributes works
self.assertEqual('sherlock', data_vector.name, msg='The name '\
'of the instance should be sherlock and not {}'\
.format(data_vector.name))
@pytest.mark.skip(reason="Takes a long time to test only add on large tests")
def test_pre_trained(self):
'''
Tests the :py:class:`bella.word_vectors.PreTrained`
'''
# Test constructor
with self.assertRaises(TypeError, msg='Should not accept a list when '\
'file path is expect to be a String'):
PreTrained(['a fake file path'])
with self.assertRaises(ValueError, msg='Should not accept strings that '\
'are not file paths'):
PreTrained('file.txt')
# Test if model loads correctly
sswe_model = SSWE(skip_conf=True)
sswe_vec_size = sswe_model.vector_size
self.assertEqual(sswe_vec_size, 50, msg='Vector size should be 50 not '\
'{}'.format(sswe_vec_size))
unknown_word = '$$$ZERO_TOKEN$$$'
unknown_vector = sswe_model.lookup_vector(unknown_word)
zero_vector = | np.zeros(sswe_vec_size) | numpy.zeros |
#%%
import random
import matplotlib.pyplot as plt
import tensorflow as tf
import tensorflow.keras as keras
import seaborn as sns
import numpy as np
import pickle
from sklearn.model_selection import StratifiedKFold
from math import log2, ceil
import sys
sys.path.append("../../src/")
from lifelong_dnn import LifeLongDNN
from joblib import Parallel, delayed
#%%
def unpickle(file):
with open(file, 'rb') as fo:
dict = pickle.load(fo, encoding='bytes')
return dict
def get_colors(colors, inds):
c = [colors[i] for i in inds]
return c
def generate_2d_rotation(theta=0, acorn=None):
if acorn is not None:
np.random.seed(acorn)
R = np.array([
[np.cos(theta), np.sin(theta)],
[-np.sin(theta), np.cos(theta)]
])
return R
def generate_spirals(N, D=2, K=5, noise = 0.5, acorn = None, density=0.3):
#N number of poinst per class
#D number of features,
#K number of classes
X = []
Y = []
if acorn is not None:
np.random.seed(acorn)
if K == 2:
turns = 2
elif K==3:
turns = 2.5
elif K==5:
turns = 3.5
elif K==7:
turns = 4.5
else:
print ("sorry, can't currently surpport %s classes " %K)
return
mvt = np.random.multinomial(N, 1/K * np.ones(K))
if K == 2:
r = np.random.uniform(0,1,size=int(N/K))
r = np.sort(r)
t = np.linspace(0, np.pi* 4 * turns/K, int(N/K)) + noise * np.random.normal(0, density, int(N/K))
dx = r * np.cos(t)
dy = r* np.sin(t)
X.append(np.vstack([dx, dy]).T )
X.append(np.vstack([-dx, -dy]).T)
Y += [0] * int(N/K)
Y += [1] * int(N/K)
else:
for j in range(1, K+1):
r = np.linspace(0.01, 1, int(mvt[j-1]))
t = np.linspace((j-1) * np.pi *4 *turns/K, j* np.pi * 4* turns/K, int(mvt[j-1])) + noise * np.random.normal(0, density, int(mvt[j-1]))
dx = r * np.cos(t)
dy = r* np.sin(t)
dd = np.vstack([dx, dy]).T
X.append(dd)
#label
Y += [j-1] * int(mvt[j-1])
return np.vstack(X), np.array(Y).astype(int)
#%%
def experiment(n_spiral3, n_spiral5, n_test, reps, n_trees, max_depth, acorn=None):
#print(1)
if n_spiral3==0 and n_rxor==0:
raise ValueError('Wake up and provide samples to train!!!')
if acorn != None:
np.random.seed(acorn)
errors = np.zeros((reps,4),dtype=float)
for i in range(reps):
l2f = LifeLongDNN()
uf = LifeLongDNN()
#source data
spiral3, label_spiral3 = generate_spirals(n_spiral3, 2, 3, noise = 2.5)
test_spiral3, test_label_spiral3 = generate_spirals(n_test, 2, 3, noise = 2.5)
#target data
spiral5, label_spiral5 = generate_spirals(n_spiral5, 2, 5, noise = 2.5)
test_spiral5, test_label_spiral5 = generate_spirals(n_test, 2, 5, noise = 2.5)
if n_spiral3 == 0:
l2f.new_forest(spiral5, label_spiral5, n_estimators=n_trees,max_depth=max_depth)
errors[i,0] = 0.5
errors[i,1] = 0.5
uf_task2=l2f.predict(test_spiral5, representation=0, decider=0)
l2f_task2=l2f.predict(test_spiral5, representation='all', decider=0)
errors[i,2] = 1 - np.sum(uf_task2 == test_label_spiral5)/n_test
errors[i,3] = 1 - np.sum(l2f_task2 == test_label_spiral5)/n_test
elif n_spiral5 == 0:
l2f.new_forest(spiral3, label_spiral3, n_estimators=n_trees,max_depth=max_depth)
uf_task1=l2f.predict(test_spiral3, representation=0, decider=0)
l2f_task1=l2f.predict(test_spiral3, representation='all', decider=0)
errors[i,0] = 1 - np.sum(uf_task1 == test_label_spiral3)/n_test
errors[i,1] = 1 - np.sum(l2f_task1 == test_label_spiral3)/n_test
errors[i,2] = 0.5
errors[i,3] = 0.5
else:
l2f.new_forest(spiral3, label_spiral3, n_estimators=n_trees,max_depth=max_depth)
l2f.new_forest(spiral5, label_spiral5, n_estimators=n_trees,max_depth=max_depth)
uf.new_forest(spiral3, label_spiral3, n_estimators=2*n_trees,max_depth=max_depth)
uf.new_forest(spiral5, label_spiral5, n_estimators=2*n_trees,max_depth=max_depth)
uf_task1=uf.predict(test_spiral3, representation=0, decider=0)
l2f_task1=l2f.predict(test_spiral3, representation='all', decider=0)
uf_task2=uf.predict(test_spiral5, representation=1, decider=1)
l2f_task2=l2f.predict(test_spiral5, representation='all', decider=1)
errors[i,0] = 1 - np.sum(uf_task1 == test_label_spiral3)/n_test
errors[i,1] = 1 - np.sum(l2f_task1 == test_label_spiral3)/n_test
errors[i,2] = 1 - np.sum(uf_task2 == test_label_spiral5)/n_test
errors[i,3] = 1 - np.sum(l2f_task2 == test_label_spiral5)/n_test
return np.mean(errors,axis=0)
#%%
mc_rep = 1000
n_test = 1000
n_trees = 10
n_spiral3 = (100*np.arange(0.5, 7.25, step=0.25)).astype(int)
n_spiral5 = (100*np.arange(0.5, 7.50, step=0.25)).astype(int)
mean_error = np.zeros((4, len(n_spiral3)+len(n_spiral5)))
std_error = np.zeros((4, len(n_spiral3)+len(n_spiral5)))
mean_te = np.zeros((2, len(n_spiral3)+len(n_spiral5)))
std_te = np.zeros((2, len(n_spiral3)+len(n_spiral5)))
for i,n1 in enumerate(n_spiral3):
print('starting to compute %s spiral 3\n'%n1)
error = np.array(
Parallel(n_jobs=40,verbose=1)(
delayed(experiment)(n1,0,n_test,1,n_trees=n_trees,max_depth=ceil(log2(750))) for _ in range(mc_rep)
)
)
mean_error[:,i] = np.mean(error,axis=0)
std_error[:,i] = np.std(error,ddof=1,axis=0)
mean_te[0,i] = np.mean(error[:,0]/error[:,1])
mean_te[1,i] = np.mean(error[:,2]/error[:,3])
std_te[0,i] = np.std(error[:,0]/error[:,1],ddof=1)
std_te[1,i] = np.std(error[:,2]/error[:,3],ddof=1)
if n1==n_spiral3[-1]:
for j,n2 in enumerate(n_spiral5):
print('starting to compute %s spiral 5\n'%n2)
error = np.array(
Parallel(n_jobs=40,verbose=1)(
delayed(experiment)(n1,n2,n_test,1,n_trees=n_trees,max_depth=ceil(log2(750))) for _ in range(mc_rep)
)
)
mean_error[:,i+j+1] = np.mean(error,axis=0)
std_error[:,i+j+1] = np.std(error,ddof=1,axis=0)
mean_te[0,i+j+1] = np.mean(error[:,0]/error[:,1])
mean_te[1,i+j+1] = np.mean(error[:,2]/error[:,3])
std_te[0,i+j+1] = | np.std(error[:,0]/error[:,1],ddof=1) | numpy.std |
import sys
import os
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + "/..")
import unittest
import numpy as np
from scipy.signal import convolve2d
from MyConvolution import convolve
class TestMyConvolution(unittest.TestCase):
def test_shape(self):
im = np.ones((5,5))
k = np.ones((3,3))
conv = convolve(im, k)
in_shape = im.shape
out_shape = conv.shape
np.testing.assert_equal(out_shape, in_shape)
def test_result(self):
im = np.ones((5,5))
k = np.ones((3,3))
conv = convolve(im, k)
exp = np.array([
[4., 6., 6., 6., 4.],
[6., 9., 9., 9., 6.],
[6., 9., 9., 9., 6.],
[6., 9., 9., 9., 6.],
[4., 6., 6., 6., 4.]
])
np.testing.assert_array_equal(conv, exp)
def test_scipy_shape(self):
im = np.ones((5,5))
k = np.ones((3,3))
conv = convolve2d(im, k, mode='same')
in_shape = im.shape
out_shape = conv.shape
np.testing.assert_equal(out_shape, in_shape)
def test_scipy_result(self):
im = np.ones((5,5))
k = | np.ones((3,3)) | numpy.ones |
# -*- coding: utf-8 -*-
"""
@author: bstarly
"""
import scipy
import matplotlib.pyplot as plt
from scipy.fft import fft
import numpy as np
import pandas as pd
signal_length = 20 #[ seconds ]
def calc_euclidean(x, y):
return np.sqrt( | np.sum((x - y) ** 2) | numpy.sum |
#!/usr/bin/env
import utils
import rogp
import numpy as np
import scipy as sp
import pyomo.environ as p
from rogp.util.numpy import _to_np_obj_array, _pyomo_to_np
class Sep():
def __init__(self, X):
m = p.ConcreteModel()
m.cons = p.ConstraintList()
m.r = p.Var(X, within=p.NonNegativeReals, bounds=(0, 1))
self.m = m
def check_feasibility(s, bb=False):
k = 0
feas = True
if bb:
check_block = check_deg_block_bb
else:
check_block = check_deg_block
for i, x in enumerate(s.Xvar):
if not isinstance(x, (float, int)):
if not check_block(s, k, i):
feas = False
break
k = i
if feas:
return check_block(s, k, len(s.X) - 1)
def check_deg_block(s, k, i):
fc = s.drillstring.pdm.failure
fc.rogp.set_tanh(False)
# Initialize parameters
alpha = 1 - (1 - s.alpha)/(len(s.Xm) + 1)
F = sp.stats.norm.ppf(alpha)
X = s.X[k:i]
Xvar = s.Xvar
delta = {s.X[j]: Xvar[j+1] - Xvar[j] for j in range(k, i)}
dp = [[s.m.rop[x].deltap()] for x in X]
dp = _to_np_obj_array(dp)
# TODO: make eps = 0.001 a parameter
dt = [[delta[x]/(s.m.rop[x].V + 0.001)] for x in X]
dt = [[x[0]()] for x in dt]
dt = _to_np_obj_array(dt)
sep = Sep(X)
r = _pyomo_to_np(sep.m.r, ind=X)
# Calculate matrices
Sig = fc.rogp.predict_cov_latent(dp).astype('float')
inv = np.linalg.inv(Sig)
hz = fc.rogp.warp(r)
mu = fc.rogp.predict_mu_latent(dp)
diff = hz - mu
obj = np.matmul(dt.T, r)[0, 0]
sep.m.Obj = p.Objective(expr=obj, sense=p.maximize)
c = np.matmul(np.matmul(diff.T, inv), diff)[0, 0]
sep.m.cons.add(c <= F)
utils.solve(sep, solver='Baron')
if obj() - 1.0 > 10e-5:
return False
return True
def get_deg_block(s, k, i):
fc = s.drillstring.pdm.failure
fc.rogp.set_tanh(False)
# Initialize parameters
alpha = 1 - (1 - s.alpha)/(len(s.Xm) + 1)
F = sp.stats.norm.ppf(alpha)
X = s.X[k:i]
Xvar = s.Xvar
delta = {s.X[j]: Xvar[j+1] - Xvar[j] for j in range(k, i)}
dp = [[s.m.rop[x].deltap()] for x in X]
dp = _to_np_obj_array(dp)
# TODO: make eps = 0.001 a parameter
dt = [[delta[x]/(s.m.rop[x].V + 0.001)] for x in X]
dt = [[x[0]()] for x in dt]
dt = _to_np_obj_array(dt)
# Calculate matrices
cov = fc.rogp.predict_cov_latent(dp).astype('float')*F
mu = fc.rogp.predict_mu_latent(dp).astype('float')
c = dt.astype('float')
return mu, cov, c.flatten()
def check_deg_block_bb(s, k, i):
print(k, i)
mu, cov, c = get_deg_block(s, k, i)
warping = s.drillstring.pdm.failure.rogp
bb = rogp.util.sep.BoxTree(mu, cov, warping, c)
lb, ub, node, n_iter, tt = bb.solve(max_iter=1000000, eps=0.001)
if ub - 1 <= 0.001:
return True
else:
return False
def get_extrema(s, k, i):
fc = s.drillstring.pdm.failure
mu, cov, c = get_deg_block(s, k, i)
inv = np.linalg.inv(cov)
rad = np.sqrt( | np.diag(cov) | numpy.diag |
"""Custom expansions of :mod:`sklearn` functionalities.
Note
----
This module provides custom expansions of some :mod:`sklearn` classes and
functions which are necessary to fit the purposes for the desired
functionalities of the :ref:`MLR module <api.esmvaltool.diag_scripts.mlr>`. As
long-term goal we would like to include these functionalities to the
:mod:`sklearn` package since we believe these additions might be helpful for
everyone. This module serves as interim solution. To ensure that all features
are properly working this module is also covered by tests, which will also be
expanded in the future.
"""
# pylint: disable=arguments-differ
# pylint: disable=attribute-defined-outside-init
# pylint: disable=protected-access
# pylint: disable=super-init-not-called
# pylint: disable=too-many-arguments
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-locals
import itertools
import logging
import numbers
import os
import time
import warnings
from contextlib import suppress
from copy import deepcopy
from inspect import getfullargspec
from traceback import format_exception_only
import numpy as np
from joblib import Parallel, delayed, effective_n_jobs
from sklearn.base import BaseEstimator, clone, is_classifier
from sklearn.compose import ColumnTransformer, TransformedTargetRegressor
from sklearn.exceptions import FitFailedWarning, NotFittedError
from sklearn.feature_selection import RFE
from sklearn.feature_selection._base import SelectorMixin
from sklearn.linear_model import LinearRegression
from sklearn.metrics import check_scoring
from sklearn.metrics._scorer import (
_check_multimetric_scoring,
_MultimetricScorer,
)
from sklearn.model_selection import check_cv
from sklearn.model_selection._validation import _aggregate_score_dicts, _score
from sklearn.pipeline import Pipeline
from sklearn.utils import (
_message_with_time,
check_array,
check_X_y,
indexable,
safe_sqr,
)
from sklearn.utils.metaestimators import _safe_split, if_delegate_has_method
from sklearn.utils.validation import _check_fit_params, check_is_fitted
from esmvaltool.diag_scripts import mlr
logger = logging.getLogger(os.path.basename(__file__))
def _fit_and_score_weighted(estimator, x_data, y_data, scorer, train, test,
verbose, parameters, fit_params,
error_score=np.nan, sample_weights=None):
"""Expand :func:`sklearn.model_selection._validation._fit_and_score`."""
if verbose > 1:
if parameters is None:
msg = ''
else:
msg = '%s' % (', '.join('%s=%s' % (k, v)
for k, v in parameters.items()))
print("[CV] %s %s" % (msg, (64 - len(msg)) * '.'))
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = _check_fit_params(x_data, fit_params, train)
if parameters is not None:
# clone after setting parameters in case any parameters
# are estimators (like pipeline steps)
# because pipeline doesn't clone steps in fit
cloned_parameters = {}
for (key, val) in parameters.items():
cloned_parameters[key] = clone(val, safe=False)
estimator = estimator.set_params(**cloned_parameters)
start_time = time.time()
x_train, y_train = _safe_split(estimator, x_data, y_data, train)
x_test, y_test = _safe_split(estimator, x_data, y_data, test, train)
if sample_weights is not None:
sample_weights_test = sample_weights[test]
else:
sample_weights_test = None
try:
if y_train is None:
estimator.fit(x_train, **fit_params)
else:
estimator.fit(x_train, y_train, **fit_params)
except Exception as exc:
# Note fit time as time until error
fit_time = time.time() - start_time
score_time = 0.0
if error_score == 'raise':
raise
if isinstance(error_score, numbers.Number):
if isinstance(scorer, dict):
test_scores = {name: error_score for name in scorer}
else:
test_scores = error_score
warnings.warn("Estimator fit failed. The score on this train-test"
" partition for these parameters will be set to %f. "
"Details: \n%s" %
(error_score, format_exception_only(type(exc),
exc)[0]),
FitFailedWarning)
else:
raise ValueError("error_score must be the string 'raise' or a"
" numeric value. (Hint: if using 'raise', please"
" make sure that it has been spelled correctly.)")
else:
fit_time = time.time() - start_time
test_scores = _score_weighted(estimator, x_test, y_test, scorer,
sample_weights=sample_weights_test)
score_time = time.time() - start_time - fit_time
if verbose > 2:
if isinstance(test_scores, dict):
for scorer_name in sorted(test_scores):
msg += ", %s=" % scorer_name
msg += "%.3f" % test_scores[scorer_name]
else:
msg += ", score="
msg += "%.3f" % test_scores
if verbose > 1:
total_time = score_time + fit_time
print(_message_with_time('CV', msg, total_time))
return [test_scores]
def _get_fit_parameters(fit_kwargs, steps, cls):
"""Retrieve fit parameters from ``fit_kwargs``."""
params = {name: {} for (name, step) in steps if step is not None}
step_names = list(params.keys())
for (param_name, param_val) in fit_kwargs.items():
param_split = param_name.split('__', 1)
if len(param_split) != 2:
raise ValueError(
f"Fit parameters for {cls} have to be given in the form "
f"'s__p', where 's' is the name of the step and 'p' the name "
f"of the parameter, got '{param_name}'")
try:
params[param_split[0]][param_split[1]] = param_val
except KeyError:
raise ValueError(
f"Expected one of {step_names} for step of fit parameter, got "
f"'{param_split[0]}' for parameter '{param_name}'")
return params
def _map_features(features, support):
"""Map old features indices to new ones using boolean mask."""
feature_mapping = {}
new_idx = 0
for (old_idx, supported) in enumerate(support):
if supported:
val = new_idx
new_idx += 1
else:
val = None
feature_mapping[old_idx] = val
new_features = []
for feature in features:
new_feature = feature_mapping[feature]
if new_feature is not None:
new_features.append(new_feature)
return new_features
def _rfe_single_fit(rfe, estimator, x_data, y_data, train, test, scorer,
**fit_kwargs):
"""Return the score for a fit across one fold."""
(x_train, y_train) = _safe_split(estimator, x_data, y_data, train)
(x_test, y_test) = _safe_split(estimator, x_data, y_data, test, train)
(fit_kwargs_train, _) = _split_fit_kwargs(fit_kwargs, train, test)
def step_score(estimator, features):
"""Score for a single step in the recursive feature elimination."""
return _score(estimator, x_test[:, features], y_test, scorer)
return rfe._fit(x_train, y_train, step_score=step_score,
**fit_kwargs_train).scores_
def _score_weighted(estimator, x_test, y_test, scorer, sample_weights=None):
"""Expand :func:`sklearn.model_selection._validation._score`."""
if isinstance(scorer, dict):
# will cache method calls if needed. scorer() returns a dict
scorer = _MultimetricScorer(**scorer)
if y_test is None:
scores = scorer(estimator, x_test, sample_weight=sample_weights)
else:
scores = scorer(estimator, x_test, y_test,
sample_weight=sample_weights)
error_msg = ("scoring must return a number, got %s (%s) "
"instead. (scorer=%s)")
if isinstance(scores, dict):
for name, score in scores.items():
if hasattr(score, 'item'):
with suppress(ValueError):
# e.g. unwrap memmapped scalars
score = score.item()
if not isinstance(score, numbers.Number):
raise ValueError(error_msg % (score, type(score), name))
scores[name] = score
else: # scalar
if hasattr(scores, 'item'):
with suppress(ValueError):
# e.g. unwrap memmapped scalars
scores = scores.item()
if not isinstance(scores, numbers.Number):
raise ValueError(error_msg % (scores, type(scores), scorer))
return scores
def _split_fit_kwargs(fit_kwargs, train_idx, test_idx):
"""Get split fit kwargs for single CV step."""
fit_kwargs_train = {}
fit_kwargs_test = {}
for (key, val) in fit_kwargs.items():
if 'sample_weight' in key and 'sample_weight_eval_set' not in key:
fit_kwargs_train[key] = deepcopy(val)[train_idx]
fit_kwargs_test[key] = deepcopy(val)[test_idx]
else:
fit_kwargs_train[key] = deepcopy(val)
fit_kwargs_test[key] = deepcopy(val)
return (fit_kwargs_train, fit_kwargs_test)
def _update_transformers_param(estimator, support):
"""Update ``transformers`` argument of ``ColumnTransformer`` steps."""
all_params = estimator.get_params()
params = []
for key in all_params:
if key.endswith('transformers'):
params.append(key)
if isinstance(estimator, (Pipeline, AdvancedPipeline)):
step = estimator.named_steps[key.split('__')[0]]
if not isinstance(step, ColumnTransformer):
raise TypeError(
f"Found 'transformers' parameter ('{key}'), but the "
f"corresponding pipeline step is not a "
f"ColumnTransformer (got '{type(step)}')")
else:
raise TypeError(
f"Found 'transformers' parameter ('{key}'), but the "
f"corresponding estimator is not a Pipeline or "
f"AdvancedPipeline")
new_params = {}
for param in params:
new_transformers = []
for transformer in all_params[param]:
new_columns = _map_features(transformer[2], support)
new_transformers.append(
(transformer[0], transformer[1], new_columns))
new_params[param] = new_transformers
estimator.set_params(**new_params)
def cross_val_score_weighted(estimator, x_data, y_data=None, groups=None,
scoring=None, cv=None, n_jobs=None, verbose=0,
fit_params=None, pre_dispatch='2*n_jobs',
error_score=np.nan, sample_weights=None):
"""Expand :func:`sklearn.model_selection.cross_val_score`."""
scorer = check_scoring(estimator, scoring=scoring)
scorer_name = 'score'
scoring = {scorer_name: scorer}
x_data, y_data, groups = indexable(x_data, y_data, groups)
cv = check_cv(cv, y_data, classifier=is_classifier(estimator))
scorers, _ = _check_multimetric_scoring(estimator, scoring=scoring)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
scores = parallel(
delayed(_fit_and_score_weighted)(
clone(estimator), x_data, y_data, scorers, train, test, verbose,
None, fit_params, error_score=error_score,
sample_weights=sample_weights)
for train, test in cv.split(x_data, y_data, groups))
test_scores = list(zip(*scores))[0]
test_scores = _aggregate_score_dicts(test_scores)
return np.array(test_scores[scorer_name])
def get_rfecv_transformer(rfecv_estimator):
"""Get transformer step of RFECV estimator."""
try:
check_is_fitted(rfecv_estimator)
except NotFittedError:
raise NotFittedError(
"RFECV instance used to initialize FeatureSelectionTransformer "
"must be fitted")
transformer = FeatureSelectionTransformer(
grid_scores=rfecv_estimator.grid_scores_,
n_features=rfecv_estimator.n_features_,
ranking=rfecv_estimator.ranking_,
support=rfecv_estimator.support_,
)
return transformer
def perform_efecv(estimator, x_data, y_data, **kwargs):
"""Perform exhaustive feature selection."""
x_data, y_data = check_X_y(
x_data, y_data, ensure_min_features=2, force_all_finite='allow-nan')
n_all_features = x_data.shape[1]
# Iterate over all possible feature combinations
supports = list(itertools.product([False, True], repeat=n_all_features))
supports.remove(tuple([False] * n_all_features))
logger.info(
"Testing all %i possible feature combinations for exhaustive feature "
"selection", len(supports))
grid_scores = []
for support in supports:
support = np.array(support)
features = np.arange(n_all_features)[support]
# Evaluate estimator on new subset of features
new_estimator = clone(estimator)
_update_transformers_param(new_estimator, support)
scores = cross_val_score_weighted(new_estimator, x_data[:, features],
y_data, **kwargs)
grid_scores.append(np.mean(scores))
logger.debug("Fitted estimator with %i features, CV score was %.5f",
support.sum(), np.mean(scores))
# Final parameters
grid_scores = np.array(grid_scores)
best_idx = np.argmax(grid_scores)
support = np.array(supports[best_idx])
features = | np.arange(n_all_features) | numpy.arange |
from builtins import object
import astropy.io.fits as fits
import astropy.units as u
import numpy as np
import os
import warnings
from threeML.io.fits_file import FITSExtension, FITSFile
from threeML.utils.OGIP.response import EBOUNDS, SPECRESP_MATRIX
class PHAWrite(object):
def __init__(self, *ogiplike):
"""
This class handles writing of PHA files from OGIPLike style plugins. It takes an arbitrary number of plugins as
input. While OGIPLike provides a write_pha method, it is only for writing the given instance to disk. The class
in general can be used to save an entire series of OGIPLikes to PHAs which can be used for time-resolved style
plugins. An example implentation is given in FermiGBMTTELike.
:param ogiplike: OGIPLike plugin(s) to be written to disk
"""
self._ogiplike = ogiplike
self._n_spectra = len(ogiplike)
# The following lists corresponds to the different columns in the PHA/CSPEC
# formats, and they will be filled up by addSpectrum()
self._tstart = {'pha': [], 'bak': []}
self._tstop = {'pha': [], 'bak': []}
self._channel = {'pha': [], 'bak': []}
self._rate = {'pha': [], 'bak': []}
self._stat_err = {'pha': [], 'bak': []}
self._sys_err = {'pha': [], 'bak': []}
self._backscal = {'pha': [], 'bak': []}
self._quality = {'pha': [], 'bak': []}
self._grouping = {'pha': [], 'bak': []}
self._exposure = {'pha': [], 'bak': []}
self._backfile = {'pha': [], 'bak': []}
self._respfile = {'pha': [], 'bak': []}
self._ancrfile = {'pha': [], 'bak': []}
self._mission = {'pha': [], 'bak': []}
self._instrument = {'pha': [], 'bak': []}
# If the PHAs have existing background files
# then it is assumed that we will not need to write them
# out. THe most likely case is that the background file does not
# exist i.e. these are simulations are from EventList object
# Just one instance of no background file existing cause the write
self._write_bak_file = False
# Assuming all entries will have one answer
self._is_poisson = {'pha': True, 'bak': True}
self._pseudo_time = 0.
self._spec_iterator = 1
def write(self, outfile_name, overwrite=True, force_rsp_write=False):
"""
Write a PHA Type II and BAK file for the given OGIP plugin. Automatically determines
if BAK files should be generated.
:param outfile_name: string (excluding .pha) of the PHA to write
:param overwrite: (optional) bool to overwrite existing file
:param force_rsp_write: force the writing of an RSP
:return:
"""
# Remove the .pha extension if any
if os.path.splitext(outfile_name)[-1].lower() == '.pha':
outfile_name = os.path.splitext(outfile_name)[0]
self._outfile_basename = outfile_name
self._outfile_name = {'pha': '%s.pha' % outfile_name, 'bak': '%s_bak.pha' % outfile_name}
self._out_rsp = []
for ogip in self._ogiplike:
self._append_ogip(ogip, force_rsp_write)
self._write_phaII(overwrite)
def _append_ogip(self, ogip, force_rsp_write):
"""
Add an ogip instance's data into the data list
:param ogip: and OGIPLike instance
:param force_rsp_write: force the writing of an rsp
:return: None
"""
# grab the ogip pha info
pha_info = ogip.get_pha_files()
first_channel = pha_info['rsp'].first_channel
for key in ['pha', 'bak']:
if key not in pha_info: continue
if key == 'pha' and 'bak' in pha_info:
if pha_info[key].background_file is not None:
self._backfile[key].append(pha_info[key].background_file)
else:
self._backfile[key].append('%s_bak.pha{%d}' % (self._outfile_basename, self._spec_iterator))
# We want to write the bak file
self._write_bak_file = True
else:
self._backfile[key] = None
if pha_info[key].ancillary_file is not None:
self._ancrfile[key].append(pha_info[key].ancillary_file)
else:
# There is no ancillary file, so we need to flag it.
self._ancrfile[key].append('NONE')
if pha_info['rsp'].rsp_filename is not None and not force_rsp_write:
self._respfile[key].append(pha_info['rsp'].rsp_filename)
else:
# This will be reached in the case that a response was generated from a plugin
# e.g. if we want to use weighted DRMs from GBM.
rsp_file_name = "%s.rsp{%d}"%(self._outfile_basename,self._spec_iterator)
self._respfile[key].append(rsp_file_name)
if key == 'pha':
self._out_rsp.append(pha_info['rsp'])
self._rate[key].append(pha_info[key].rates.tolist())
self._backscal[key].append(pha_info[key].scale_factor)
if not pha_info[key].is_poisson:
self._is_poisson[key] = pha_info[key].is_poisson
self._stat_err[key].append(pha_info[key].rate_errors.tolist())
else:
self._stat_err[key] = None
# If there is systematic error, we add it
# otherwise create an array of zeros as XSPEC
# simply adds systematic in quadrature to statistical
# error.
if pha_info[key].sys_errors.tolist() is not None: # It returns an array which does not work!
self._sys_err[key].append(pha_info[key].sys_errors.tolist())
else:
self._sys_err[key].append(np.zeros_like(pha_info[key].rates, dtype=np.float32).tolist())
self._exposure[key].append(pha_info[key].exposure)
self._quality[key].append(ogip.quality.to_ogip().tolist())
self._grouping[key].append(ogip.grouping.tolist())
self._channel[key].append(np.arange(pha_info[key].n_channels, dtype=np.int32) + first_channel)
self._instrument[key] = pha_info[key].instrument
self._mission[key] = pha_info[key].mission
if ogip.tstart is not None:
self._tstart[key].append(ogip.tstart)
if ogip.tstop is not None:
self._tstop[key].append(ogip.tstop)
else:
RuntimeError('OGIP TSTART is a number but TSTOP is None. This is a bug.')
# We will assume that the exposure is the true DT
# and assign starts and stops accordingly. This means
# we are most likely are dealing with a simulation.
else:
self._tstart[key].append(self._pseudo_time)
self._pseudo_time += pha_info[key].exposure
self._tstop[key].append(self._pseudo_time)
self._spec_iterator += 1
def _write_phaII(self, overwrite):
# Fix this later... if needed.
trigger_time = None
if self._backfile['pha'] is not None:
# Assuming background and pha files have the same
# number of channels
assert len(self._rate['pha'][0]) == len(
self._rate['bak'][0]), "PHA and BAK files do not have the same number of channels. Something is wrong."
assert self._instrument['pha'] == self._instrument[
'bak'], "Instrument for PHA and BAK (%s,%s) are not the same. Something is wrong with the files. " % (
self._instrument['pha'], self._instrument['bak'])
assert self._mission['pha'] == self._mission[
'bak'], "Mission for PHA and BAK (%s,%s) are not the same. Something is wrong with the files. " % (
self._mission['pha'], self._mission['bak'])
if self._write_bak_file:
keys = ['pha', 'bak']
else:
keys = ['pha']
for key in keys:
if trigger_time is not None:
tstart = self._tstart[key] - trigger_time
else:
tstart = self._tstart[key]
# build a PHAII instance
fits_file = PHAII(self._instrument[key],
self._mission[key],
tstart,
np.array(self._tstop[key]) - np.array(self._tstart[key]),
self._channel[key],
self._rate[key],
self._quality[key],
self._grouping[key],
self._exposure[key],
self._backscal[key],
self._respfile[key],
self._ancrfile[key],
back_file=self._backfile[key],
sys_err=self._sys_err[key],
stat_err=self._stat_err[key],
is_poisson=self._is_poisson[key])
# write the file
fits_file.writeto(self._outfile_name[key], overwrite=overwrite)
if self._out_rsp:
# add the various responses needed
extensions = [EBOUNDS(self._out_rsp[0].ebounds)]
extensions.extend([SPECRESP_MATRIX(this_rsp.monte_carlo_energies, this_rsp.ebounds, this_rsp.matrix) for this_rsp in self._out_rsp])
for i, ext in enumerate(extensions[1:]):
# Set telescope and instrument name
ext.hdu.header.set("TELESCOP", self._mission['pha'])
ext.hdu.header.set("INSTRUME", self._instrument['pha'])
ext.hdu.header.set("EXTVER", i+1)
rsp2 = FITSFile(fits_extensions=extensions)
rsp2.writeto("%s.rsp" % self._outfile_basename, overwrite=True)
def _atleast_2d_with_dtype(value,dtype=None):
if dtype is not None:
value = np.array(value,dtype=dtype)
arr = np.atleast_2d(value)
return arr
def _atleast_1d_with_dtype(value,dtype=None):
if dtype is not None:
value = np.array(value,dtype=dtype)
if dtype == str:
# convert None to NONE
# which is needed for None Type args
# to string arrays
idx = np.core.defchararray.lower(value) == 'none'
value[idx] = 'NONE'
arr = np.atleast_1d(value)
return arr
class SPECTRUM(FITSExtension):
_HEADER_KEYWORDS = (('EXTNAME', 'SPECTRUM', 'Extension name'),
('CONTENT', 'OGIP PHA data', 'File content'),
('HDUCLASS', 'OGIP ', 'format conforms to OGIP standard'),
('HDUVERS', '1.1.0 ', 'Version of format (OGIP memo CAL/GEN/92-002a)'),
('HDUDOC', 'OGIP memos CAL/GEN/92-002 & 92-002a', 'Documents describing the forma'),
('HDUVERS1', '1.0.0 ', 'Obsolete - included for backwards compatibility'),
('HDUVERS2', '1.1.0 ', 'Obsolete - included for backwards compatibility'),
('HDUCLAS1', 'SPECTRUM', 'Extension contains spectral data '),
('HDUCLAS2', 'TOTAL ', ''),
('HDUCLAS3', 'RATE ', ''),
('HDUCLAS4', 'TYPE:II ', ''),
('FILTER', '', 'Filter used'),
('CHANTYPE', 'PHA', 'Channel type'),
('POISSERR', False, 'Are the rates Poisson distributed'),
('DETCHANS', None, 'Number of channels'),
('CORRSCAL',1.0,''),
('AREASCAL',1.0,'')
)
def __init__(self, tstart, telapse, channel, rate, quality, grouping, exposure, backscale, respfile,
ancrfile, back_file=None, sys_err=None, stat_err=None, is_poisson=False):
"""
Represents the SPECTRUM extension of a PHAII file.
:param tstart: array of interval start times
:param telapse: array of times elapsed since start
:param channel: arrary of channel numbers
:param rate: array of rates
:param quality: array of OGIP quality values
:param grouping: array of OGIP grouping values
:param exposure: array of exposures
:param backscale: array of backscale values
:param respfile: array of associated response file names
:param ancrfile: array of associate ancillary file names
:param back_file: array of associated background file names
:param sys_err: array of optional systematic errors
:param stat_err: array of optional statistical errors (required of non poisson!)
"""
n_spectra = len(tstart)
data_list = [('TSTART', tstart),
('TELAPSE', telapse),
('SPEC_NUM',np.arange(1, n_spectra + 1, dtype=np.int16)),
('CHANNEL', channel),
('RATE',rate),
('QUALITY',quality),
('BACKSCAL', backscale),
('GROUPING',grouping),
('EXPOSURE',exposure),
('RESPFILE',respfile),
('ANCRFILE',ancrfile)]
if back_file is not None:
data_list.append(('BACKFILE', back_file))
if stat_err is not None:
assert is_poisson == False, "Tying to enter STAT_ERR error but have POISSERR set true"
data_list.append(('STAT_ERR', stat_err))
if sys_err is not None:
data_list.append(('SYS_ERR', sys_err))
super(SPECTRUM, self).__init__(tuple(data_list), self._HEADER_KEYWORDS)
self.hdu.header.set("POISSERR", is_poisson)
class PHAII(FITSFile):
def __init__(self, instrument_name, telescope_name, tstart, telapse, channel, rate, quality, grouping, exposure, backscale, respfile,
ancrfile, back_file=None, sys_err=None, stat_err=None,is_poisson=False):
"""
A generic PHAII fits file
:param instrument_name: name of the instrument
:param telescope_name: name of the telescope
:param tstart: array of interval start times
:param telapse: array of times elapsed since start
:param channel: arrary of channel numbers
:param rate: array of rates
:param quality: array of OGIP quality values
:param grouping: array of OGIP grouping values
:param exposure: array of exposures
:param backscale: array of backscale values
:param respfile: array of associated response file names
:param ancrfile: array of associate ancillary file names
:param back_file: array of associated background file names
:param sys_err: array of optional systematic errors
:param stat_err: array of optional statistical errors (required of non poisson!)
"""
# collect the data so that we can have a general
# extension builder
self._tstart = _atleast_1d_with_dtype(tstart , np.float32) * u.s
self._telapse = _atleast_1d_with_dtype(telapse, np.float32) * u.s
self._channel = _atleast_2d_with_dtype(channel, np.int16)
self._rate = _atleast_2d_with_dtype(rate, np.float32) * 1./u.s
self._exposure = _atleast_1d_with_dtype(exposure, np.float32) * u.s
self._quality = _atleast_2d_with_dtype(quality, np.int16)
self._grouping = _atleast_2d_with_dtype(grouping, np.int16)
self._backscale = _atleast_1d_with_dtype(backscale, np.float32)
self._respfile = _atleast_1d_with_dtype(respfile,str)
self._ancrfile = _atleast_1d_with_dtype(ancrfile,str)
if sys_err is not None:
self._sys_err = _atleast_2d_with_dtype(sys_err, np.float32)
else:
self._sys_err = sys_err
if stat_err is not None:
self._stat_err = _atleast_2d_with_dtype(stat_err,np.float32)
else:
self._stat_err = stat_err
if back_file is not None:
self._back_file = _atleast_1d_with_dtype(back_file,str)
else:
self._back_file = | np.array(['NONE'] * self._tstart.shape[0]) | numpy.array |
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from scipy.stats import multivariate_normal
from matplotlib.colors import rgb2hex
class BayesianRegression:
X = -1
phi_X = -1
y = -1
prior_mu = -1
prior_V = -1
n_features = -1
inv_prior_V = -1
inv_prior_V_dot_prior_mu = -1
var_error = -1
function = -1
cmap = cm.get_cmap('Set2')
def __init__(self, var_error, prior_mu, prior_V, function=lambda x: np.array([[1, xx] for xx in x])):
self.var_error = var_error
self.prior_mu = prior_mu
self.prior_V = prior_V
self.n_features = len(prior_mu)
self.inv_prior_V = np.linalg.pinv(prior_V)
self.inv_prior_V_dot_prior_mu = np.dot(self.inv_prior_V, self.prior_mu)
self.function = function
def add_observations(self, x, y):
x, y = np.array(x), np.array(y)
if isinstance(self.X, int):
self.X = x
self.phi_X = np.array(self.function(x))
self.y = np.array(y)
else:
self.X = np.hstack((self.X, x))
self.phi_X = np.vstack((self.phi_X, self.function(x)))
self.y = | np.hstack((self.y, y)) | numpy.hstack |
import copy
import numpy as np
import random
from localization.particle import Particle
from localization.landmark import Landmark
from localization.maps import MAP_X, MAP_Y
class ParticleFilter:
def __init__(
self,
num_particles=40,
resampling_rate=1.0,
x=0.4,
y=1.35,
o=0,
std_v=1.6,
std_y=0.3,
std_m=0.2,
save=False,
):
self.num_particles = num_particles
self.resampling_rate = resampling_rate
self.particles = []
for i in range(self.num_particles):
self.init(i, x, y, o)
self.std_v = std_v
self.std_y = std_y
self.std_m = std_m
self.cumulated_weights = []
self.i = 0
self.save = save
self.best_index = -1
self.t = 0
def run(self, key, sensor_data, sensor_poses, VELOCITY, TURN_RATE):
if len(sensor_data):
timestamp = sensor_data[0]
if self.t == 0:
self.t = timestamp
return
measurement = [x / 100 for x in sensor_data[1]]
dt = (timestamp - self.t) / 1000
if key == 0:
vel = VELOCITY
yaw_rate = 0
elif key == 1:
vel = 0
yaw_rate = TURN_RATE
elif key == 2:
vel = -VELOCITY
yaw_rate = 0
elif key == 3:
vel = 0
yaw_rate = -TURN_RATE
else:
vel = 0
yaw_rate = 0
self.predict(dt, vel, yaw_rate)
best_index = self.update(measurement, sensor_poses)
self.resample()
self.t = timestamp
return best_index
return None
def get_standard_deviation(self, vel, yaw_rate):
std_v = self.std_v * abs(vel) + 0.02
std_y = self.std_y * abs(yaw_rate) + 0.02
std_p = 0.02
return [std_v, std_y, std_p]
def show(self, index):
self.particles[index].show()
def init(self, index, x, y, o):
self.particles.append(Particle(index, x, y, o))
def predict(self, dt, vel, yaw_rate):
[std_v, std_y, std_p] = self.get_standard_deviation(vel, yaw_rate)
for particle in self.particles:
n_vel = np.random.normal(vel, std_v)
n_yaw_rate = np.random.normal(yaw_rate, std_y)
particle.o += n_yaw_rate * dt
if np.abs(n_yaw_rate) > 0.001:
dx = (
n_vel
/ n_yaw_rate
* (
np.sin(particle.o + n_yaw_rate * dt)
- np.sin(particle.o)
)
)
dy = (
n_vel
/ n_yaw_rate
* (
np.cos(particle.o)
- np.cos(particle.o + n_yaw_rate * dt)
)
)
else:
dx = n_vel * dt * np.cos(particle.o)
dy = n_vel * dt * np.sin(particle.o)
n_x = np.random.normal(0, std_p)
n_y = np.random.normal(0, std_p)
particle.x += dx + n_x
particle.y += dy + n_y
# self.draw("Predict")
def update(self, measurement, sensor_poses):
self.sum_w = 0.0
max_w = 0.0
best_index = -1
for i, p in enumerate(self.particles):
# print("i", i)
w = 1.0
p.landmarks = []
# print("P", p.x, p.y, p.o)
for j, m in enumerate(measurement):
if m > 5.0:
print("Skip", m)
continue
x = p.x + sensor_poses[j][0]
y = p.y + sensor_poses[j][1]
o_new = p.o + sensor_poses[j][2]
# print("L", x, y, o_new)
x_new = x + np.cos(o_new) * m
y_new = y + | np.sin(o_new) | numpy.sin |
'''
TODO:
Median trimmer
'''
import numpy as np
def mad(arr,axis=None):
mid = np.median(arr,axis=axis)
return np.median(abs(arr-mid),axis=axis)
def bin_median(x,y,nbin):
binsize = (x.max()-x.min()) / (2*nbin)
bin_centers = np.linspace(x.min()+binsize,x.max()-binsize,nbin)
binned = np.empty(nbin)
error = np.empty(nbin)
for c in range(bin_centers.size):
mask = (x >= bin_centers[c]-binsize) & (x <= bin_centers[c]+binsize)
binned[c] = | np.median(y[mask]) | numpy.median |
###############################
### Imports
###############################
# Standard I/O and Data Handling
import pandas as pd
import numpy as np
import os, glob, sys
import datetime
import copy
import pickle
import statsmodels.api as sm
# Data Loading
from scripts.libraries.helpers import load_pickle, dump_pickle, load_tapping_data
# Warning Supression
import warnings
warnings.simplefilter("ignore")
###############################
### Helpers
###############################
def quantile_variation_coefficient(x):
"""
Compute the Quartile variation coeffient of x
Args:
x (array): Array of numeric values
Returns:
qvc (float): Quartile variation coefficient
"""
q1, q3 = np.nanpercentile(x, [25, 75])
qvc = (q3 - q1) / (q3 + q1)
return qvc
###############################
### Load Data
###############################
# Main Data Directory
data_dir = "./data/"
# Tapping Data Directory
tapping_data_dir = data_dir + "tapping/"
# Load Tap Data
stage_4_processed_file = data_dir + "stage_4_processed.pickle"
stage_4_processed = load_pickle(stage_4_processed_file)
# Load Survey Data
survey_data_filename = data_dir + "survey.csv"
survey_data = pd.read_csv(survey_data_filename).drop("Unnamed: 15",axis=1)
survey_data = survey_data.rename(columns = dict((col, col.lower()) for col in survey_data.columns))
# Tapping Filenames
tapping_filenames = glob.glob(tapping_data_dir + "*/*")
tapping_filenames = [tapping_filenames[i] for i in np.argsort([int(t.split("/")[-1].replace(".mat","")) for t in tapping_filenames])]
###############################
### Processing
###############################
# Sample Rate
sample_rate = 2000 # samples / second
# Processing Store
processed_results = []
# Process each subject
for sub, subject_file in enumerate(tapping_filenames):
# Parse subject identifiers
subject_id = int(subject_file.split("/")[-1].replace(".mat",""))
collection_date = pd.to_datetime(subject_file.split("/")[-2])
print("Processing Subject %s" % subject_id)
# Load Processed Taps and Raw Data
subject_taps = stage_4_processed[subject_id]
subject_data = load_tapping_data(subject_file)
# Move past subjects without data
if subject_taps is None:
continue
# Split out data components
preferred_period = float(subject_data["preferred_period_online"])
frequency_sequence = subject_data["frequency_sequence"]
metronome_signal = subject_data["trial_metronome"]
# Cycle through each trial
speed_seen = []
for trial, frequency, metronome in zip(range(1,7), frequency_sequence, metronome_signal):
# Isolate Trial Taps
if subject_taps[trial] is None:
continue
else:
trial_tap_data = subject_taps[trial]
# Occurence
occurence = 1
if frequency in speed_seen:
occurence = 2
else:
speed_seen.append(frequency)
# Identify Metronome Beats and Expected ITI
metronome = metronome/metronome.max()
metronome_beats = np.nonzero(np.diff(abs(metronome)) == 1)[0] + 1
expected_iti = | np.diff(metronome_beats) | numpy.diff |
# Copyright 2021 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Metrics."""
import math
import note_seq
import numpy as np
import scipy
from sklearn import metrics
def frechet_distance(real, fake):
"""Frechet distance.
Lower score is better.
"""
mu1, sigma1 = np.mean(real, axis=0), np.cov(real, rowvar=False)
mu2, sigma2 = np.mean(fake, axis=0), np.cov(fake, rowvar=False)
diff = mu1 - mu2
covmean, _ = scipy.linalg.sqrtm(sigma1.dot(sigma2), disp=False)
if not np.isfinite(covmean).all():
msg = ('fid calculation produces singular product; '
'adding %s to diagonal of cov estimates') % eps
print(msg)
offset = np.eye(sigma1.shape[0]) * eps
covmean = scipy.linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset))
# Numerical error might give slight imaginary component
if | np.iscomplexobj(covmean) | numpy.iscomplexobj |
# -*- coding: utf-8 -*-
"""Proximity Forest time series classifier
a decision tree forest which uses distance measures to partition data.
<NAME> and <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
<NAME> and <NAME>
Proximity Forest: an effective and scalable distance-based classifier for
time series,
Data Mining and Knowledge Discovery, 33(3): 607-635, 2019
"""
# linkedin.com/goastler; github.com/goastler
__author__ = ["<NAME>"]
__all__ = ["ProximityForest", "_CachedTransformer", "ProximityStump", "ProximityTree"]
import numpy as np
import pandas as pd
from joblib import Parallel
from joblib import delayed
from scipy import stats
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import normalize
from sklearn.utils import check_random_state
from sktime.classification.base import BaseClassifier
from sktime.distances.elastic_cython import dtw_distance
from sktime.distances.elastic_cython import erp_distance
from sktime.distances.elastic_cython import lcss_distance
from sktime.distances.elastic_cython import msm_distance
from sktime.distances.elastic_cython import twe_distance
from sktime.distances.elastic_cython import wdtw_distance
from sktime.transformers.base import _PanelToPanelTransformer
from sktime.transformers.panel.summarize import DerivativeSlopeTransformer
from sktime.utils import comparison
from sktime.utils import dataset_properties
from sktime.utils.data_container import from_nested_to_2d_array
from sktime.utils.validation.panel import check_X
from sktime.utils.validation.panel import check_X_y
# todo unit tests / sort out current unit tests
# todo logging package rather than print to screen
# todo get params avoid func pointer - use name
# todo set params use func name or func pointer
# todo constructor accept str name func / pointer
# todo duck-type functions
class _CachedTransformer(_PanelToPanelTransformer):
"""Transformer container that transforms data and adds the transformed
version to a cache.
If the transformation is called again on already seen data the data is
fetched
from the cache rather than performing the expensive transformation.
Parameters
----------
transformer : the transformer to transform uncached data
Attributes
----------
cache : location to store transforms seen before for fast look up
"""
_required_parameters = ["transformer"]
def __init__(self, transformer):
self.cache = {}
self.transformer = transformer
super(_CachedTransformer, self).__init__()
def clear(self):
"""
clear the cache
"""
self.cache = {}
def transform(self, X, y=None):
"""
Fit transformer, creating a cache for transformation.
Parameters
----------
X : pandas DataFrame of shape [n_instances, n_features]
Input data
y : pandas Series, shape (n_instances), optional
Targets for supervised learning.
Returns
-------
cached_instances.
"""
# for each instance, get transformed instance from cache or
# transform and add to cache
cached_instances = {}
uncached_indices = []
for index in X.index.values:
try:
cached_instances[index] = self.cache[index]
except Exception:
uncached_indices.append(index)
if len(uncached_indices) > 0:
uncached_instances = X.loc[uncached_indices, :]
transformed_uncached_instances = self.transformer.fit_transform(
uncached_instances
)
transformed_uncached_instances.index = uncached_instances.index
transformed_uncached_instances = transformed_uncached_instances.to_dict(
"index"
)
self.cache.update(transformed_uncached_instances)
cached_instances.update(transformed_uncached_instances)
cached_instances = pd.DataFrame.from_dict(cached_instances, orient="index")
return cached_instances
def __str__(self):
return self.transformer.__str__()
def _derivative_distance(distance_measure, transformer):
"""
take derivative before conducting distance measure
:param distance_measure: the distance measure to use
:param transformer: the transformer to use
:return: a distance measure function with built in transformation
"""
def distance(instance_a, instance_b, **params):
df = pd.DataFrame([instance_a, instance_b])
df = transformer.transform(X=df)
instance_a = df.iloc[0, :]
instance_b = df.iloc[1, :]
return distance_measure(instance_a, instance_b, **params)
return distance
def distance_predefined_params(distance_measure, **params):
"""
conduct distance measurement with a predefined set of parameters
:param distance_measure: the distance measure to use
:param params: the parameters to use in the distance measure
:return: a distance measure with no parameters
"""
def distance(instance_a, instance_b):
return distance_measure(instance_a, instance_b, **params)
return distance
def cython_wrapper(distance_measure):
"""
wrap a distance measure in cython conversion (to 1 column per dimension
format)
:param distance_measure: distance measure to wrap
:return: a distance measure which automatically formats data for cython
distance measures
"""
def distance(instance_a, instance_b, **params):
# find distance
instance_a = from_nested_to_2d_array(
instance_a, return_numpy=True
) # todo use specific
# dimension rather than whole
# thing?
instance_b = from_nested_to_2d_array(
instance_b, return_numpy=True
) # todo use specific
# dimension rather than whole thing?
instance_a = np.transpose(instance_a)
instance_b = np.transpose(instance_b)
return distance_measure(instance_a, instance_b, **params)
return distance
def pure(y):
"""
test whether a set of class labels are pure (i.e. all the same)
----
Parameters
----
y : 1d array like
array of class labels
----
Returns
----
result : boolean
whether the set of class labels is pure
"""
# get unique class labels
unique_class_labels = np.unique(np.array(y))
# if more than 1 unique then not pure
return len(unique_class_labels) <= 1
def gini_gain(y, y_subs):
"""
get gini score of a split, i.e. the gain from parent to children
----
Parameters
----
y : 1d array like
array of class labels at parent
y_subs : list of 1d array like
list of array of class labels, one array per child
----
Returns
----
score : float
gini score of the split from parent class labels to children. Note a
higher score means better gain,
i.e. a better split
"""
y = np.array(y)
# find number of instances overall
parent_n_instances = y.shape[0]
# if parent has no instances then is pure
if parent_n_instances == 0:
for child in y_subs:
if len(child) > 0:
raise ValueError("children populated but parent empty")
return 0.5
# find gini for parent node
score = gini(y)
# sum the children's gini scores
for index in range(len(y_subs)):
child_class_labels = y_subs[index]
# ignore empty children
if len(child_class_labels) > 0:
# find gini score for this child
child_score = gini(child_class_labels)
# weight score by proportion of instances at child compared to
# parent
child_size = len(child_class_labels)
child_score *= child_size / parent_n_instances
# add to cumulative sum
score -= child_score
return score
def gini(y):
"""
get gini score at a specific node
----
Parameters
----
y : 1d numpy array
array of class labels
----
Returns
----
score : float
gini score for the set of class labels (i.e. how pure they are). A
larger score means more impurity. Zero means
pure.
"""
y = np.array(y)
# get number instances at node
n_instances = y.shape[0]
if n_instances > 0:
# count each class
unique_class_labels, class_counts = np.unique(y, return_counts=True)
# subtract class entropy from current score for each class
class_counts = np.divide(class_counts, n_instances)
class_counts = np.power(class_counts, 2)
sum = np.sum(class_counts)
return 1 - sum
else:
# y is empty, therefore considered pure
raise ValueError(" y empty")
def get_one_exemplar_per_class_proximity(proximity):
"""
unpack proximity object into X, y and random_state for picking exemplars.
----
Parameters
----
proximity : Proximity object
Proximity like object containing the X, y and random_state variables
required for picking exemplars.
----
Returns
----
result : function
function choosing one exemplar per class
"""
return get_one_exemplar_per_class(proximity.X, proximity.y, proximity.random_state)
def get_one_exemplar_per_class(X, y, random_state):
"""
Pick one exemplar instance per class in the dataset.
----
Parameters
----
X : array-like or sparse matrix of shape = [n_instances, n_columns]
The training input samples. If a Pandas data frame is passed,
the column _dim_to_use is extracted
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The class labels.
random_state : numpy RandomState
a random state for sampling random numbers
----
Returns
----
chosen_instances : list
list of the chosen exemplar instances.
chosen_class_labels : array
list of corresponding class labels for each of the chosen exemplar
instances.
"""
# find unique class labels
unique_class_labels = np.unique(y)
n_unique_class_labels = len(unique_class_labels)
chosen_instances = [None] * n_unique_class_labels
# for each class randomly choose and instance
for class_label_index in range(n_unique_class_labels):
class_label = unique_class_labels[class_label_index]
# filter class labels for desired class and get indices
indices = np.argwhere(y == class_label)
# flatten numpy output
indices = np.ravel(indices)
# random choice
index = random_state.choice(indices)
# record exemplar instance and class label
instance = X.iloc[index, :]
chosen_instances[class_label_index] = instance
# convert lists to numpy arrays
return chosen_instances, unique_class_labels
def dtw_distance_measure_getter(X):
"""
generate the dtw distance measure
:param X: dataset to derive parameter ranges from
:return: distance measure and parameter range dictionary
"""
return {
"distance_measure": [cython_wrapper(dtw_distance)],
"w": stats.uniform(0, 0.25),
}
def msm_distance_measure_getter(X):
"""
generate the msm distance measure
:param X: dataset to derive parameter ranges from
:return: distance measure and parameter range dictionary
"""
n_dimensions = 1 # todo use other dimensions
return {
"distance_measure": [cython_wrapper(msm_distance)],
"dim_to_use": stats.randint(low=0, high=n_dimensions),
"c": [
0.01,
0.01375,
0.0175,
0.02125,
0.025,
0.02875,
0.0325,
0.03625,
0.04,
0.04375,
0.0475,
0.05125,
0.055,
0.05875,
0.0625,
0.06625,
0.07,
0.07375,
0.0775,
0.08125,
0.085,
0.08875,
0.0925,
0.09625,
0.1,
0.136,
0.172,
0.208,
0.244,
0.28,
0.316,
0.352,
0.388,
0.424,
0.46,
0.496,
0.532,
0.568,
0.604,
0.64,
0.676,
0.712,
0.748,
0.784,
0.82,
0.856,
0.892,
0.928,
0.964,
1,
1.36,
1.72,
2.08,
2.44,
2.8,
3.16,
3.52,
3.88,
4.24,
4.6,
4.96,
5.32,
5.68,
6.04,
6.4,
6.76,
7.12,
7.48,
7.84,
8.2,
8.56,
8.92,
9.28,
9.64,
10,
13.6,
17.2,
20.8,
24.4,
28,
31.6,
35.2,
38.8,
42.4,
46,
49.6,
53.2,
56.8,
60.4,
64,
67.6,
71.2,
74.8,
78.4,
82,
85.6,
89.2,
92.8,
96.4,
100,
],
}
def erp_distance_measure_getter(X):
"""
generate the erp distance measure
:param X: dataset to derive parameter ranges from
:return: distance measure and parameter range dictionary
"""
stdp = dataset_properties.stdp(X)
instance_length = dataset_properties.max_instance_length(
X
) # todo should this use the max instance
# length for unequal length dataset instances?
max_raw_warping_window = np.floor((instance_length + 1) / 4)
n_dimensions = 1 # todo use other dimensions
return {
"distance_measure": [cython_wrapper(erp_distance)],
"dim_to_use": stats.randint(low=0, high=n_dimensions),
"g": stats.uniform(0.2 * stdp, 0.8 * stdp - 0.2 * stdp),
"band_size": stats.randint(low=0, high=max_raw_warping_window + 1)
# scipy stats randint is exclusive on the max value, hence + 1
}
def lcss_distance_measure_getter(X):
"""
generate the lcss distance measure
:param X: dataset to derive parameter ranges from
:return: distance measure and parameter range dictionary
"""
stdp = dataset_properties.stdp(X)
instance_length = dataset_properties.max_instance_length(
X
) # todo should this use the max instance
# length for unequal length dataset instances?
max_raw_warping_window = np.floor((instance_length + 1) / 4)
n_dimensions = 1 # todo use other dimensions
return {
"distance_measure": [cython_wrapper(lcss_distance)],
"dim_to_use": stats.randint(low=0, high=n_dimensions),
"epsilon": stats.uniform(0.2 * stdp, stdp - 0.2 * stdp),
# scipy stats randint is exclusive on the max value, hence + 1
"delta": stats.randint(low=0, high=max_raw_warping_window + 1),
}
def twe_distance_measure_getter(X):
"""
generate the twe distance measure
:param X: dataset to derive parameter ranges from
:return: distance measure and parameter range dictionary
"""
return {
"distance_measure": [cython_wrapper(twe_distance)],
"penalty": [
0,
0.011111111,
0.022222222,
0.033333333,
0.044444444,
0.055555556,
0.066666667,
0.077777778,
0.088888889,
0.1,
],
"stiffness": [0.00001, 0.0001, 0.0005, 0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 1],
}
def wdtw_distance_measure_getter(X):
"""
generate the wdtw distance measure
:param X: dataset to derive parameter ranges from
:return: distance measure and parameter range dictionary
"""
return {
"distance_measure": [cython_wrapper(wdtw_distance)],
"g": stats.uniform(0, 1),
}
def euclidean_distance_measure_getter(X):
"""
generate the ed distance measure
:param X: dataset to derive parameter ranges from
:return: distance measure and parameter range dictionary
"""
return {"distance_measure": [cython_wrapper(dtw_distance)], "w": [0]}
def setup_wddtw_distance_measure_getter(transformer):
"""
generate the wddtw distance measure by baking the derivative transformer
into the wdtw distance measure
:param transformer: the transformer to use
:return: a getter to produce the distance measure
"""
def getter(X):
return {
"distance_measure": [
_derivative_distance(cython_wrapper(wdtw_distance), transformer)
],
"g": stats.uniform(0, 1),
}
return getter
def setup_ddtw_distance_measure_getter(transformer):
"""
generate the ddtw distance measure by baking the derivative transformer
into the dtw distance measure
:param transformer: the transformer to use
:return: a getter to produce the distance measure
"""
def getter(X):
return {
"distance_measure": [
_derivative_distance(cython_wrapper(dtw_distance), transformer)
],
"w": stats.uniform(0, 0.25),
}
return getter
def setup_all_distance_measure_getter(proximity):
"""
setup all distance measure getter functions from a proximity object
:param proximity: a PT / PF / PS
:return: a list of distance measure getters
"""
transformer = _CachedTransformer(DerivativeSlopeTransformer())
distance_measure_getters = [
euclidean_distance_measure_getter,
dtw_distance_measure_getter,
setup_ddtw_distance_measure_getter(transformer),
wdtw_distance_measure_getter,
setup_wddtw_distance_measure_getter(transformer),
msm_distance_measure_getter,
lcss_distance_measure_getter,
erp_distance_measure_getter,
twe_distance_measure_getter,
]
def pick_rand_distance_measure(proximity):
"""
generate a distance measure from a range of parameters
:param proximity: proximity object containing distance measures,
ranges and dataset
:return: a distance measure with no parameters
"""
random_state = proximity.random_state
X = proximity.X
distance_measure_getter = random_state.choice(distance_measure_getters)
distance_measure_perm = distance_measure_getter(X)
param_perm = pick_rand_param_perm_from_dict(distance_measure_perm, random_state)
distance_measure = param_perm["distance_measure"]
del param_perm["distance_measure"]
return distance_predefined_params(distance_measure, **param_perm)
return pick_rand_distance_measure
def pick_rand_param_perm_from_dict(param_pool, random_state):
"""
pick a parameter permutation given a list of dictionaries contain
potential values OR a list of values OR a
distribution of values (a distribution must have the .rvs() function to
sample values)
----------
param_pool : list of dicts OR list OR distribution
parameters in the same format as GridSearchCV from scikit-learn.
example:
param_grid = [
{'C': [1, 10, 100, 1000], 'kernel': ['linear']},
{'C': [1, 10, 100, 1000], 'gamma': [{'C': [1, 10, 100, 1000],
'kernel': ['linear']}],
'kernel': ['rbf']},
]
Returns
-------
param_perm : dict
distance measure and corresponding parameters in dictionary format
"""
# construct empty permutation
param_perm = {}
# for each parameter
for param_name, param_values in param_pool.items():
# if it is a list
if isinstance(param_values, list):
# randomly pick a value
param_value = param_values[random_state.randint(len(param_values))]
# if the value is another dict then get a random parameter
# permutation from that dict (recursive over
# 2 funcs)
# if isinstance(param_value, dict): # no longer require
# recursive param perms
# param_value = _pick_param_permutation(param_value,
# random_state)
# else if parameter is a distribution
elif hasattr(param_values, "rvs"):
# sample from the distribution
param_value = param_values.rvs(random_state=random_state)
else:
# otherwise we don't know how to obtain a value from the parameter
raise Exception("unknown type of parameter pool")
# add parameter name and value to permutation
param_perm[param_name] = param_value
return param_perm
def pick_rand_param_perm_from_list(params, random_state):
"""
get a random parameter permutation providing a distance measure and
corresponding parameters
----------
params : list of dicts
parameters in the same format as GridSearchCV from scikit-learn.
example:
param_grid = [
{'C': [1, 10, 100, 1000], 'kernel': ['linear']},
{'C': [1, 10, 100, 1000], 'gamma': [{'C': [1, 10, 100, 1000],
'kernel': ['linear']}], 'kernel': ['rbf']},
]
Returns
-------
permutation : dict
distance measure and corresponding parameters in dictionary format
"""
#
param_pool = random_state.choice(params)
permutation = pick_rand_param_perm_from_dict(param_pool, random_state)
return permutation
def best_of_n_stumps(n):
"""
Generate the function to pick the best of n stump evaluations.
----
Parameters
----
n : int
the number of stumps to evaluate before picking the best. Must be 1
or more.
----
Returns
----
find_best_stump : func
function to find the best of n stumps.
"""
if n < 1:
raise ValueError("n cannot be less than 1")
def find_best_stump(proximity):
"""
Pick the best of n stump evaluations.
----
Parameters
----
proximity : Proximity like object
the proximity object to split data from.
----
Returns
----
stump : ProximityStump
the best stump / split of data of the n attempts.
"""
stumps = []
# for n stumps
for _ in range(n):
# duplicate tree configuration
stump = ProximityStump(
random_state=proximity.random_state,
get_exemplars=proximity.get_exemplars,
distance_measure=proximity.distance_measure,
setup_distance_measure=proximity.setup_distance_measure,
get_distance_measure=proximity.get_distance_measure,
get_gain=proximity.get_gain,
verbosity=proximity.verbosity,
n_jobs=proximity.n_jobs,
)
# grow the stump
stump.fit(proximity.X, proximity.y)
stump.grow()
stumps.append(stump)
# pick the best stump based upon gain
stump = comparison.max(
stumps, proximity.random_state, lambda stump: stump.entropy
)
return stump
return find_best_stump
class ProximityStump(BaseClassifier):
"""
Proximity Stump class to model a decision stump which uses a distance
measure to partition data.
Attributes:
label_encoder: label encoder to change string labels to numeric indices
y_exemplar: class label list of the exemplar instances
X_exemplar: dataframe of the exemplar instances
X_branches: dataframes for each branch, one per exemplar
y_branches: class label list for each branch, one per exemplar
classes_: unique list of classes
entropy: the gain associated with the split of data
random_state: the random state
get_exemplars: function to extract exemplars from a dataframe and
class value list
setup_distance_measure: function to setup the distance measure
getters from dataframe and class value list
get_distance_measure: distance measure getters
distance_measure: distance measures
get_gain: function to score the quality of a split
verbosity: logging verbosity
n_jobs: number of jobs to run in parallel *across threads"
"""
__author__ = "<NAME> (linkedin.com/goastler; github.com/goastler)"
def __init__(
self,
random_state=None,
get_exemplars=get_one_exemplar_per_class_proximity,
setup_distance_measure=setup_all_distance_measure_getter,
get_distance_measure=None,
distance_measure=None,
get_gain=gini_gain,
verbosity=0,
n_jobs=1,
):
"""
construct a proximity stump
:param random_state: the random state
:param get_exemplars: function to extract exemplars from a dataframe
and class value list
:param setup_distance_measure: function to setup the distance
measure getters from dataframe and class value list
:param get_distance_measure: distance measure getters
:param distance_measure: distance measures
:param get_gain: function to score the quality of a split
:param verbosity: logging verbosity
:param n_jobs: number of jobs to run in parallel *across threads"
"""
self.setup_distance_measure = setup_distance_measure
self.random_state = random_state
self.get_distance_measure = get_distance_measure
self.distance_measure = distance_measure
self.pick_exemplars = get_exemplars
self.get_gain = get_gain
self.verbosity = verbosity
self.n_jobs = n_jobs
# set in fit
self.label_encoder = None
self.y_exemplar = None
self.X_exemplar = None
self.X_branches = None
self.y_branches = None
self.X = None
self.y = None
self.classes_ = None
self.entropy = None
super(ProximityStump, self).__init__()
@staticmethod
def _distance_to_exemplars_inst(exemplars, instance, distance_measure):
"""
find distance between a given instance and the exemplar instances
:param exemplars: the exemplars to use
:param instance: the instance to compare to each exemplar
:param distance_measure: the distance measure to provide similarity
values
:return: list of distances to each exemplar
"""
n_exemplars = len(exemplars)
distances = np.empty(n_exemplars)
min_distance = np.math.inf
for exemplar_index in range(n_exemplars):
exemplar = exemplars[exemplar_index]
if exemplar.name == instance.name:
distance = 0
else:
distance = distance_measure(instance, exemplar) # , min_distance)
if distance < min_distance:
min_distance = distance
distances[exemplar_index] = distance
return distances
def distance_to_exemplars(self, X):
"""
find distance to exemplars
:param X: the dataset containing a list of instances
:return: 2d numpy array of distances from each instance to each
exemplar (instance by exemplar)
"""
check_X(X)
if self.n_jobs > 1 or self.n_jobs < 0:
parallel = Parallel(self.n_jobs)
distances = parallel(
delayed(self._distance_to_exemplars_inst)(
self.X_exemplar, X.iloc[index, :], self.distance_measure
)
for index in range(X.shape[0])
)
else:
distances = [
self._distance_to_exemplars_inst(
self.X_exemplar, X.iloc[index, :], self.distance_measure
)
for index in range(X.shape[0])
]
distances = np.vstack(np.array(distances))
return distances
def fit(self, X, y):
"""
Build the classifier on the training set (X, y)
----------
X : array-like or sparse matrix of shape = [n_instances, n_columns]
The training input samples. If a Pandas data frame is passed,
column 0 is extracted.
y : array-like, shape = [n_instances]
The class labels.
Returns
-------
self : object
"""
X, y = check_X_y(X, y, enforce_univariate=True, coerce_to_pandas=True)
self.X = dataset_properties.positive_dataframe_indices(X)
self.random_state = check_random_state(self.random_state)
# setup label encoding
if self.label_encoder is None:
self.label_encoder = LabelEncoder()
y = self.label_encoder.fit_transform(y)
self.y = y
self.classes_ = self.label_encoder.classes_
if self.distance_measure is None:
if self.get_distance_measure is None:
self.get_distance_measure = self.setup_distance_measure(self)
self.distance_measure = self.get_distance_measure(self)
self.X_exemplar, self.y_exemplar = self.pick_exemplars(self)
self._is_fitted = True
return self
def find_closest_exemplar_indices(self, X):
"""
find the closest exemplar index for each instance in a dataframe
:param X: the dataframe containing instances
:return: 1d numpy array of indices, one for each instance,
reflecting the index of the closest exemplar
"""
check_X(X) # todo make checks optional and propogate from forest downwards
n_instances = X.shape[0]
distances = self.distance_to_exemplars(X)
indices = np.empty(X.shape[0], dtype=int)
for index in range(n_instances):
exemplar_distances = distances[index]
closest_exemplar_index = comparison.arg_min(
exemplar_distances, self.random_state
)
indices[index] = closest_exemplar_index
return indices
def grow(self):
"""
grow the stump, creating branches for each exemplar
:return: self
"""
n_exemplars = len(self.y_exemplar)
indices = self.find_closest_exemplar_indices(self.X)
self.X_branches = [None] * n_exemplars
self.y_branches = [None] * n_exemplars
for index in range(n_exemplars):
instance_indices = np.argwhere(indices == index)
instance_indices = np.ravel(instance_indices)
self.X_branches[index] = self.X.iloc[instance_indices, :]
y = np.take(self.y, instance_indices)
self.y_branches[index] = y
self.entropy = self.get_gain(self.y, self.y_branches)
return self
def predict_proba(self, X):
"""
Find probability estimates for each class for all cases in X.
Parameters
----------
X : array-like or sparse matrix of shape = [n_instances, n_columns]
The training input samples.
If a Pandas data frame is passed (sktime format)
If a Pandas data frame is passed, a check is performed that it
only has one column.
If not, an exception is thrown, since this classifier does not
yet have
multivariate capability.
Returns
-------
output : array of shape = [n_instances, n_classes] of probabilities
"""
X = check_X(X, enforce_univariate=True, coerce_to_pandas=True)
X = dataset_properties.negative_dataframe_indices(X)
distances = self.distance_to_exemplars(X)
ones = np.ones(distances.shape)
distances = np.add(distances, ones)
distributions = np.divide(ones, distances)
normalize(distributions, copy=False, norm="l1")
return distributions
class ProximityTree(BaseClassifier):
"""
Proximity Tree class to model a decision tree which uses distance
measures to partition data.
@article{lucas19proximity,
title={Proximity Forest: an effective and scalable distance-based
classifier for time series},
author={<NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME>},
journal={Data Mining and Knowledge Discovery},
volume={33},
number={3},
pages={607--635},
year={2019}
}
https://arxiv.org/abs/1808.10594
Attributes:
label_encoder: label encoder to change string labels to numeric indices
classes_: unique list of classes
random_state: the random state
get_exemplars: function to extract exemplars from a dataframe and
class value list
setup_distance_measure: function to setup the distance measure
getters from dataframe and class value list
get_distance_measure: distance measure getters
distance_measure: distance measures
get_gain: function to score the quality of a split
verbosity: logging verbosity
n_jobs: number of jobs to run in parallel *across threads"
find_stump: function to find the best split of data
max_depth: max tree depth
depth: current depth of tree, as each node is a tree itself,
therefore can have a depth of >=0
X: train data
y: train data labels
stump: the stump used to split data at this node
branches: the partitions of data driven by the stump
"""
def __init__(
self,
# note: any changes of these params must be reflected in
# the fit method for building trees / clones
random_state=None,
get_exemplars=get_one_exemplar_per_class_proximity,
distance_measure=None,
get_distance_measure=None,
setup_distance_measure=setup_all_distance_measure_getter,
get_gain=gini_gain,
max_depth=np.math.inf,
is_leaf=pure,
verbosity=0,
n_jobs=1,
n_stump_evaluations=5,
find_stump=None,
):
"""
build a Proximity Tree object
:param random_state: the random state
:param get_exemplars: get the exemplars from a given dataframe and
list of class labels
:param distance_measure: distance measure to use
:param get_distance_measure: method to get the distance measure if
no already set
:param setup_distance_measure: method to setup the distance measures
based upon the dataset given
:param get_gain: method to find the gain of a data split
:param max_depth: maximum depth of the tree
:param is_leaf: function to decide when to mark a node as a leaf node
:param verbosity: number reflecting the verbosity of logging
:param n_jobs: number of parallel threads to use while building
:param find_stump: method to find the best split of data / stump at
a node
:param n_stump_evaluations: number of stump evaluations to do if
find_stump method is None
"""
self.verbosity = verbosity
self.n_stump_evaluations = n_stump_evaluations
self.find_stump = find_stump
self.max_depth = max_depth
self.get_distance_measure = distance_measure
self.random_state = random_state
self.is_leaf = is_leaf
self.get_distance_measure = get_distance_measure
self.setup_distance_measure = setup_distance_measure
self.get_exemplars = get_exemplars
self.get_gain = get_gain
self.n_jobs = n_jobs
self.depth = 0
# below set in fit method
self.label_encoder = None
self.distance_measure = None
self.stump = None
self.branches = None
self.X = None
self.y = None
self.classes_ = None
super(ProximityTree, self).__init__()
def fit(self, X, y):
"""
Build the classifier on the training set (X, y)
----------
X : array-like or sparse matrix of shape = [n_instances, n_columns]
The training input samples. If a Pandas data frame is passed,
column 0 is extracted.
y : array-like, shape = [n_instances]
The class labels.
Returns
-------
self : object
"""
X, y = check_X_y(X, y, enforce_univariate=True, coerce_to_pandas=True)
self.X = dataset_properties.positive_dataframe_indices(X)
self.random_state = check_random_state(self.random_state)
if self.find_stump is None:
self.find_stump = best_of_n_stumps(self.n_stump_evaluations)
# setup label encoding
if self.label_encoder is None:
self.label_encoder = LabelEncoder()
y = self.label_encoder.fit_transform(y)
self.y = y
self.classes_ = self.label_encoder.classes_
if self.distance_measure is None:
if self.get_distance_measure is None:
self.get_distance_measure = self.setup_distance_measure(self)
self.distance_measure = self.get_distance_measure(self)
self.stump = self.find_stump(self)
n_branches = len(self.stump.y_exemplar)
self.branches = [None] * n_branches
if self.depth < self.max_depth:
for index in range(n_branches):
sub_y = self.stump.y_branches[index]
if not self.is_leaf(sub_y):
sub_tree = ProximityTree(
random_state=self.random_state,
get_exemplars=self.get_exemplars,
distance_measure=self.distance_measure,
setup_distance_measure=self.setup_distance_measure,
get_distance_measure=self.get_distance_measure,
get_gain=self.get_gain,
is_leaf=self.is_leaf,
verbosity=self.verbosity,
max_depth=self.max_depth,
n_jobs=self.n_jobs,
)
sub_tree.label_encoder = self.label_encoder
sub_tree.depth = self.depth + 1
self.branches[index] = sub_tree
sub_X = self.stump.X_branches[index]
sub_tree.fit(sub_X, sub_y)
self._is_fitted = True
return self
def predict_proba(self, X):
"""
Find probability estimates for each class for all cases in X.
Parameters
----------
X : array-like or sparse matrix of shape = [n_instances, n_columns]
The training input samples.
If a Pandas data frame is passed (sktime format)
If a Pandas data frame is passed, a check is performed that it
only has one column.
If not, an exception is thrown, since this classifier does not
yet have
multivariate capability.
Returns
-------
output : array of shape = [n_instances, n_classes] of probabilities
"""
X = check_X(X, enforce_univariate=True, coerce_to_pandas=True)
X = dataset_properties.negative_dataframe_indices(X)
closest_exemplar_indices = self.stump.find_closest_exemplar_indices(X)
n_classes = len(self.label_encoder.classes_)
distribution = np.zeros((X.shape[0], n_classes))
for index in range(len(self.branches)):
indices = | np.argwhere(closest_exemplar_indices == index) | numpy.argwhere |
# -*- coding: utf-8 -*-
# author: <NAME> (16 Jan 2019)
# <NAME>, <NAME>, <NAME>, "Scalable Learning with a
# Structural Recurrent Neural Network for Short-Term Traffic Prediction", \
# IEEE Sensors Journal, Aug 2019
# main script
import argparse
import os
import time
import torch
from torch.autograd import Variable
import torch.nn as nn
import numpy as np
from dataLoader import DataLoader
from st_graph import ST_GRAPH
from model import SRNN
data_dir = 'dataset/Santander/'
save_dir = 'save/'
log_dir = 'log/'
def main():
parser = argparse.ArgumentParser()
# Data size
parser.add_argument('--numNodes_set', type=int, default=-1,
help='Number of nodes to be used')
parser.add_argument('--numData_set', type=int, default=-1,
help='Number of time steps to be used')
parser.add_argument('--numData_train_set', type=int, default=-1,
help='Number of train data')
# RNN size
parser.add_argument('--node_rnn_size', type=int, default=64,
help='Size of Node RNN hidden state')
parser.add_argument('--edge_rnn_size', type=int, default=64,
help='Size of Edge RNN hidden state')
# Embedding size
parser.add_argument('--node_embedding_size', type=int, default=32,
help='Embedding size of node features')
parser.add_argument('--edge_embedding_size', type=int, default=32,
help='Embedding size of edge features')
# Multi-layer RNN layer size
parser.add_argument('--num_layer', type=int, default=3,
help='Number of layers of RNN')
# Sequence length
parser.add_argument('--seq_length', type=int, default=10,
help='Sequence length')
# Batch size
parser.add_argument('--batch_size', type=int, default=32,
help='Batch size')
# Number of epochs
parser.add_argument('--num_epochs', type=int, default=1,
help='number of epochs')
# Gradient value at which it should be clipped
parser.add_argument('--grad_clip', type=float, default=1.,
help='clip gradients at this value')
# Lambda regularization parameter (L2)
parser.add_argument('--lambda_param', type=float, default=0.00001,
help='L2 regularization parameter')
# Learning rate parameter
parser.add_argument('--learning_rate', type=float, default=0.0005,
help='learning rate')
# Decay rate for the learning rate parameter
parser.add_argument('--decay_rate', type=float, default=0.99,
help='decay rate for the optimizer')
# Dropout rate
parser.add_argument('--dropout', type=float, default=0.5,
help='Dropout probability')
# Print every x batch
parser.add_argument('--printEvery', type=int, default=1,
help='Train/Eval result print period')
# Input and output size
parser.add_argument('--node_input_size', type=int, default=1,
help='Dimension of the node features')
parser.add_argument('--edge_input_size', type=int, default=2,
help='Dimension of the edge features')
parser.add_argument('--node_output_size', type=int, default=1,
help='Dimension of the node output')
args = parser.parse_args()
Run_SRNN_NormalCase(args, no_dataset = 1)
#Run_SRNN_Scalability(args)
#Run_SRNN_Different_Dataset(args,2,1)
#Run_SRNN_test_parameters(args)
# run with various combinations of hyperparameters to find the best one
def Run_SRNN_test_parameters(args):
args.num_epochs = 1
args.numData_set = 2000
args.printEvery = 50
grad_clip_list = [1.0, 5.0]
learning_rate_list = [0.0001, 0.0005]
lambda_list = [0.00001, 0.00005]
node_rnn_size_list = [64, 128]
edge_rnn_size_list = [64, 128]
log_dir_test = log_dir+'parameter_test/'
if not os.path.exists(log_dir_test):
os.makedirs(log_dir_test)
f1 = open(log_dir_test+"parameter_test3_4.txt", "w")
f2 = open(log_dir_test+"parameter_test5_5.txt", "w")
out_str_merge1 = ''
out_str_merge2 = ''
for lr in learning_rate_list:
args.learning_rate = lr
for ll in lambda_list:
args.lambda_param = ll
for nr in node_rnn_size_list:
args.node_rnn_size = nr
for er in edge_rnn_size_list:
args.edge_rnn_size = er
for gc in grad_clip_list:
args.grad_clip = gc
out_str = '== learning rate: {}, lambda: {}, node rnn size: {}, edge rnn size: {}, grad_clip: {}:\n'.format(lr,
ll, nr, er, gc)
out_str += str(Run_SRNN_Different_Dataset(args,3,4)) + '\n'
print(out_str)
out_str_merge1 += out_str
out_str = '== learning rate: {}, lambda: {}, node rnn size: {}, edge rnn size: {}, grad_clip: {}:\n'.format(lr,
ll, nr, er, gc)
out_str += str(Run_SRNN_Different_Dataset(args,5,5)) + '\n'
print(out_str)
out_str_merge2 += out_str
print(out_str_merge1)
print('')
print(out_str_merge2)
f1.write(out_str_merge1)
f2.write(out_str_merge2)
f1.close()
f2.close()
# run for testing the sacalability
def Run_SRNN_Scalability(args):
no_dataset_list = [1, 2, 3, 4]
args.num_epochs = 10
args.numData_set = -1
args.printEvery = 50
for no_dataset_train in no_dataset_list:
for no_dataset_eval in no_dataset_list:
Run_SRNN_Different_Dataset(args, no_dataset_train, no_dataset_eval)
# train with no_dataset_train and evalaute with no_dataset_eval
def Run_SRNN_Different_Dataset(args, no_dataset_train, no_dataset_eval):
print('')
print('')
print('[[ Train on Dataset {} and Evaluation on Dataset {} ]]'.format(no_dataset_train, no_dataset_eval))
# Initialize net
net = SRNN(args)
# Construct the DataLoader object that loads data
dataloader = DataLoader(args)
# Construct the ST-graph object that reads graph
stgraph = ST_GRAPH(args)
optimizer = torch.optim.Adagrad(net.parameters())
print('- Number of trainable parameters:', sum(p.numel() for p in net.parameters() if p.requires_grad))
best_eval_loss = 10000
best_epoch = 0
eval_loss_res = np.zeros((args.num_epochs+1, 2))
for e in range(args.num_epochs):
epoch = e + 1
start_train = time.time()
#### Training ####
print('')
print('-- Training, epoch {}/{}, Dataset {} on {}'.format(epoch, args.num_epochs, no_dataset_train, no_dataset_eval))
loss_epoch = 0
if (epoch > 1):
net.initialize()
data_path, graph_path = Data_path(no_dataset_train)
dataloader.load_data(data_path)
stgraph.readGraph(dataloader.num_sensor, graph_path)
net.setStgraph(stgraph)
# For each batch
for b in range(dataloader.num_batches_train):
batch = b + 1;
start = time.time()
# Get batch data
x = dataloader.next_batch_train()
# Loss for this batch
loss_batch = 0
# For each sequence in the batch
for sequence in range(dataloader.batch_size):
# put node and edge features
stgraph.putSequenceData(x[sequence])
# get data to feed
data_nodes, data_temporalEdges, data_spatialEdges = stgraph.getSequenceData()
# put a sequence to net
loss_output, data_nodes, outputs = forward(net, optimizer, args, stgraph,
data_nodes, data_temporalEdges, data_spatialEdges)
loss_output.backward()
loss_batch += loss_RMSE(data_nodes[-1], outputs[-1], dataloader.scaler)
# Clip gradients
torch.nn.utils.clip_grad_norm_(net.parameters(), args.grad_clip)
# Update parameters
optimizer.step()
end = time.time()
loss_batch = loss_batch / dataloader.batch_size
loss_epoch += loss_batch
if ((e * dataloader.num_batches_train + batch) % args.printEvery == 1):
print(
'Train: {}/{}, train_loss = {:.3f}, time/batch = {:.3f}'.format(e * dataloader.num_batches_train + batch,
args.num_epochs * dataloader.num_batches_train,
loss_batch,
end - start))
end_train = time.time()
# Compute loss for the entire epoch
loss_epoch /= dataloader.num_batches_train
print('(epoch {}), train_loss = {:.3f}, time/train = {:.3f}'.format(epoch, loss_epoch,
end_train - start_train))
# Save the model after each epoch
save_path = Save_path(no_dataset_train, epoch)
print('Saving model to '+save_path)
torch.save({
'epoch': epoch,
'state_dict': net.state_dict(),
'optimizer_state_dict': optimizer.state_dict()
}, save_path)
print('')
#### Evaluation ####
print('-- Evaluation, epoch {}/{}, Dataset {} on {}'.format(epoch, args.num_epochs, no_dataset_train, no_dataset_eval))
data_path, graph_path = Data_path(no_dataset_eval)
log_path = Log_path(no_dataset_train, no_dataset_eval, 'SRNN')
dataloader.load_data(data_path)
stgraph.readGraph(dataloader.num_sensor, graph_path)
net.setStgraph(stgraph)
loss_epoch = 0
for b in range(dataloader.num_batches_eval):
batch = b + 1;
start = time.time()
# Get batch data
x = dataloader.next_batch_eval()
# Loss for this batch
loss_batch = 0
for sequence in range(dataloader.batch_size):
# put node and edge features
stgraph.putSequenceData(x[sequence])
# get data to feed
data_nodes, data_temporalEdges, data_spatialEdges = stgraph.getSequenceData()
# put a sequence to net
_, data_nodes, outputs = forward(net, optimizer, args, stgraph,
data_nodes, data_temporalEdges, data_spatialEdges)
loss_batch += loss_RMSE(data_nodes[-1], outputs[-1], dataloader.scaler)
end = time.time()
loss_batch = loss_batch / dataloader.batch_size
loss_epoch += loss_batch
if ((e * dataloader.num_batches_eval + batch) % args.printEvery == 1):
print(
'Eval: {}/{}, eval_loss = {:.3f}, time/batch = {:.3f}'.format(e * dataloader.num_batches_eval + batch,
args.num_epochs * dataloader.num_batches_eval,
loss_batch,
end - start))
loss_epoch /= dataloader.num_batches_eval
eval_loss_res[e] = (epoch, loss_epoch)
# Update best validation loss until now
if loss_epoch < best_eval_loss:
best_eval_loss = loss_epoch
best_epoch = epoch
print('(epoch {}), eval_loss = {:.3f}'.format(epoch, loss_epoch))
print('--> Best epoch: {}, Best evaluation loss {:.3f}'.format(best_epoch, best_eval_loss))
# Record the best epoch and best validation loss overall
eval_loss_res[-1] = (best_epoch, best_eval_loss)
np.savetxt(log_path, eval_loss_res, fmt='%d, %.3f')
print ('- Eval result has been saved in ', log_path)
return eval_loss_res[-1,1]
# train with no_dataset and evaluate with the same dataset
def Run_SRNN_NormalCase(args, no_dataset):
data_path, graph_path = Data_path(no_dataset)
log_path = Log_path(no_dataset)
# Construct the DataLoader object that loads data
dataloader = DataLoader(args)
dataloader.load_data(data_path)
# Construct the ST-graph object that reads graph
stgraph = ST_GRAPH(args)
stgraph.readGraph(dataloader.num_sensor, graph_path)
# Initialize net
net = SRNN(args)
net.setStgraph(stgraph)
print('- Number of trainable parameters:', sum(p.numel() for p in net.parameters() if p.requires_grad))
# optimizer = torch.optim.Adam(net.parameters(), lr=args.learning_rate)
# optimizer = torch.optim.RMSprop(net.parameters(), lr=args.learning_rate, momentum=0.0001, centered=True)
optimizer = torch.optim.Adagrad(net.parameters())
best_eval_loss = 10000
best_epoch = 0
print('')
print('---- Train and Evaluation ----')
eval_loss_res = | np.zeros((args.num_epochs+1, 2)) | numpy.zeros |
#!/usr/bin/env python
# coding: utf-8
# In[2]:
get_ipython().system('pip install dask')
from dask import dataframe
import pandas as pd
import yfinance as yf
import os
import logging
import numpy as np
from scipy.cluster.hierarchy import dendrogram, linkage, leaves_list
import seaborn as sns
import matplotlib.pyplot as plt
import bahc
class MeanVariancePortfolio():
def __init__(self, cfg):
self.cfg = cfg
self.data = self.load_data()
def load_data(self):
return self.preprocess(pd.concat([pd.read_parquet(os.path.join(self.cfg.data_dir, f))['Close'] for f in os.listdir(self.cfg.data_dir)]))
def preprocess(self, x, percent0 = 0.5, percent1 = 0.2):
tmp = x.dropna(axis=0, thresh=int(percent0*x.shape[1])).dropna(axis=1, thresh=int(percent1*x.shape[0])).fillna(method="ffill")
dropped = set(x.columns) - set(tmp.columns)
logging.info("Preprocessing dropped the following stocks" + "-".join(list(dropped)))
return tmp
def clean_portfolio(self):
self.data.fillna(method = 'ffill', inplace = True)
columns_missing = self.data.columns[self.data.isna().sum() > 10].values
self.data.drop(columns_missing, inplace= True, axis=1)
self.data.fillna(method = 'bfill', inplace = True)
return self
def min_var_portfolio(mu, cov, target_return):
inv_cov = np.linalg.inv(cov)
ones = np.ones(len(mu))[:, np.newaxis]
a = ones.T @ inv_cov @ ones
b = mu.T @ inv_cov @ ones
c = mu.T.to_numpy() @ inv_cov @ mu
a = a[0][0]
b = b.loc['mu', 0]
c = c.loc[0, 'mu']
num1 = (a * inv_cov @ mu - b * inv_cov @ ones) * target_return
num2 = (c * inv_cov @ ones- b * inv_cov @ mu)
den = a*c - b**2
w = (num1 + num2) / den
var = w.T.to_numpy() @ cov.to_numpy() @ w.to_numpy()
return w, var**0.5
def __call__(self, training_period = 10, num_assets = 50, rf = 0.05, bahc_bool = False, plot_bool = True):
def get_log_returns_matrix(portfolio_data):
log_returns_matrix = np.log(portfolio_data/portfolio_data.shift(1))
log_returns_matrix.fillna(0, inplace=True)
log_returns_matrix = log_returns_matrix[(log_returns_matrix.T != 0).any()]
return log_returns_matrix
def get_stocks_reordered(log_returns_matrix):
cov_daily = log_returns_matrix.cov()
stocks = list(cov_daily.columns)
link = linkage(cov_daily, 'average')
reordered_cov_daily = cov_daily.copy()
stocks_reordered = [stocks[i] for i in leaves_list(link)]
reordered_cov_daily = reordered_cov_daily[stocks_reordered]
reordered_cov_daily = reordered_cov_daily.reindex(stocks_reordered)
return stocks_reordered, reordered_cov_daily
def get_bahc_cov_matrix(log_returns_matrix, stocks_reordered):
cov_bahc = pd.DataFrame(bahc.filterCovariance( | np.array(log_returns_matrix) | numpy.array |
import numpy as np
from sim.Passenger import Passenger
from sim.Bus import Bus
from sim.Route import Route
import matplotlib.pyplot as plt
from model.Group_MemoryC import Memory
import pandas as pd
import time
import math
class Engine():
def __init__(self, bus_list,busstop_list,route_list,simulation_step,dispatch_times, demand=0,agents=None,share_scale=0, is_allow_overtake=0,hold_once_arr=1,control_type=1,seed=1,all=0,weight=0):
self.all=all
self.busstop_list = busstop_list
self.simulation_step = simulation_step
self.pax_list = {} # passenger on road
self.arr_pax_list = {} # passenger who has finished trip
self.dispatch_buslist = {}
self.agents = {}
self.route_list = route_list
self.dispatch_buslist = {}
self.is_allow_overtake = is_allow_overtake
self.hold_once_arr = hold_once_arr
self.control_type = control_type
self.agents = agents
self.bus_list = bus_list
self.bunching_times = 0
self.arrstops = 0
self.reward_signal = {}
self.reward_signalp1={}
self.reward_signalp2={}
self.qloss = {}
self.weight = weight/10.
self.demand = demand
self.records = []
self.share_scale =share_scale
self.step = 0
self.dispatch_times = dispatch_times
self.cvlog=[]
members = list(self.bus_list.keys())
self.GM = Memory(members)
self.rs = {}
for b_id,b in self.bus_list.items():
self.reward_signal[b_id]=[]
self.reward_signalp1[b_id] = []
self.reward_signalp2[b_id] = []
self.arrivals = {}
# stop hash
self.stop_hash = {}
k = 0
for bus_stop_id, bus_stop in self.busstop_list.items():
self.stop_hash[bus_stop_id]=k
k+=1
self.bus_hash={}
k = 0
for bus_id, bus in self.bus_list.items():
self.bus_hash[bus_id]=k
k+=1
self.action_record = []
self.reward_record = []
self.state_record = []
def cal_statistic(self,name,train=1):
print('total pax:%d'%(len(self.pax_list)))
wait_cost = []
travel_cost = []
headways_var = {}
headways_mean = {}
boards = []
arrs = []
origins = []
dests = []
still_wait = 0
stop_wise_wait = {}
stop_wise_hold = {}
delay = []
for pax_id, pax in self.pax_list.items():
w = min(pax.onboard_time - pax.arr_time, self.simulation_step-pax.arr_time)
wait_cost.append(w)
if pax.origin in stop_wise_wait:
stop_wise_wait[pax.origin].append(w)
else:
stop_wise_wait[pax.origin]=[w]
if pax.onboard_time<99999999:
boards.append(pax.onboard_time )
if pax.alight_time<999999:
travel_cost.append(pax.alight_time-pax.onboard_time )
delay.append(pax.alight_time-pax.arr_time-pax.onroad_cost)
else:
still_wait+=1
hold_cost = []
for bus_id, bus in self.bus_list.items():
tt = [ ]
for k,v in bus.stay.items():
if v>0:
tt.append(bus.hold_cost[k])
hold_cost.append(bus.hold_cost[k])
if k in stop_wise_hold:
stop_wise_hold[k].append(bus.hold_cost[k])
else:
stop_wise_hold[k] = [bus.hold_cost[k]]
stop_wise_wait_order = []
stop_wise_hold_order = []
arr_times = []
buslog = pd.DataFrame()
for bus_stop_id in bus.pass_stop:
buslog[bus_stop_id]=self.busstop_list[bus_stop_id].arr_log[bus.route_id]
arr_times.append([bus_stop_id]+self.busstop_list[bus_stop_id].arr_log[bus.route_id])
try:
stop_wise_wait_order.append(np.mean(stop_wise_wait[ bus_stop_id ]))
except:
stop_wise_wait_order.append(0)
try:
stop_wise_hold_order.append(np.mean(stop_wise_hold[bus_stop_id]))
except:
stop_wise_hold_order.append(0)
for k,v in self.busstop_list[bus_stop_id].arr_log.items():
h = np.array(v )[1:] - np.array(v)[:-1]
try:
headways_var[bus_stop_id].append(np.var(h))
headways_mean[bus_stop_id].append(np.mean(h))
except:
headways_var[bus_stop_id]=[np.var(h)]
headways_mean[bus_stop_id]=[np.mean(h)]
log = {}
log['wait_cost'] = wait_cost
log['travel_cost'] = travel_cost
log['hold_cost'] = hold_cost
log['headways_var'] = headways_var
log['headways_mean'] = headways_mean
log['stw'] = stop_wise_wait_order
log['sth'] = stop_wise_hold_order
log['bunching'] = self.bunching_times
log['delay'] = delay
print('bunching times:%g headway mean:%g hedaway var:%g EV:%g'%(self.bunching_times, np.mean(list(headways_mean.values())),np.mean(list(headways_var.values())), (np.mean(list(headways_var.values()))/(np.mean(list(headways_mean.values()))**2)) ))
AWT = []
AHD = []
AOD = []
for k in bus.pass_stop:
AHD.append(np.mean(stop_wise_hold[k]))
try:
if math.isnan(np.var(self.busstop_list[k].arr_bus_load) / np.mean(self.busstop_list[k].arr_bus_load)):
AOD.append(0)
else:
AOD.append(np.var(self.busstop_list[k].arr_bus_load) / np.mean(self.busstop_list[k].arr_bus_load))
except:
AOD.append(0.)
try:
AWT.append(np.mean(stop_wise_wait[k]))
except:
AWT.append(0.)
log['sto'] = AOD
log['AOD'] = np.mean(AOD)
if train==0 :
print('AWT:%g'%(np.mean(wait_cost)))
print('AHD:%g' % (np.mean(AHD)))
print('AOD:%g' % (np.mean(AOD)))
print('headways_var:%g' % (np.sqrt(np.mean(list(headways_var.values())))))
log['arr_times'] = arr_times
return log
def close(self):
return
# update passengers when bus arriving at stops
def serve(self,bus,stop):
board_cost = 0
alight_cost = 0
board_pax = []
alight_pax = []
if bus!=None:
alight_pax = bus.pax_alight_fix(stop, self.pax_list)
for p in alight_pax:
self.pax_list[p].alight_time = self.simulation_step
bus.onboard_list.remove(p)
self.arr_pax_list[p] = self.pax_list[p]
alight_cost = len(alight_pax) * bus.alight_period
# boarding procedure
for d in stop.dest.keys():
new_arr = stop.pax_gen_od(bus, sim_step=self.simulation_step,dest_id=d)
if len(new_arr)==0:
continue
num = len(self.pax_list) + 1
for t in new_arr:
self.pax_list[num] = Passenger(id=num, origin=stop.id, arr_time=t)
self.pax_list[num].took_bus = bus.id
self.pax_list[num].route = bus.route_id
self.pax_list[num].dest= d
self.busstop_list[stop.id].waiting_list.append(num)
num += 1
pax_leave_stop = []
waitinglist = sorted(self.busstop_list[stop.id].waiting_list)[:]
for num in waitinglist:
# add logic to consider multiline impact (i.e. the passenger can not board bus this time can board the bus with same destination later?)
if bus != None and self.pax_list[
num].route == bus.route_id:
self.pax_list[num].miss += 1
if bus != None and bus.capacity - len(bus.onboard_list) > 0 and self.pax_list[
num].route == bus.route_id:
self.pax_list[num].onboard_time = self.simulation_step
bus.onboard_list.append(num)
board_cost += bus.board_period
pax_leave_stop.append(num)
for num in pax_leave_stop:
self.busstop_list[stop.id].waiting_list.remove(num)
return alight_cost,board_cost
def sim(self):
# update bus state
## dispatch bus
for bus_id, bus in self.bus_list.items():
if bus.is_dispatch==0 and bus.dispatch_time<=self.simulation_step:
bus.is_dispatch=1
if bus.is_virtual!=1:
bus.current_speed = bus.speed * np.random.randint(60., 120.) / 100.
else:
bus.current_speed = bus.speed*0.8
self.dispatch_buslist[bus_id]=bus
if bus.is_dispatch==1 and len(self.dispatch_buslist[bus_id].left_stop)<=0:
bus.is_dispatch = -1
self.dispatch_buslist.pop(bus_id,None)
for bus_id,bus in self.dispatch_buslist.items():
if bus.backward_bus!=None and self.bus_list[bus.backward_bus].is_dispatch==-1:
bus.backward_bus=None
if bus.forward_bus!=None and self.bus_list[bus.forward_bus].is_dispatch==-1:
bus.forward_bus=None
## bus dynamic
for bus_id, bus in self.dispatch_buslist.items():
bus.serve_remain = max(bus.serve_remain - 1,0)
bus.hold_remain = max(bus.hold_remain - 1, 0)
if bus.is_virtual==1 and bus.arr==0 and abs(bus.loc[-1]-bus.stop_dist[bus.left_stop[0]])<bus.speed :
curr_stop = self.busstop_list[bus.left_stop[0]]
bus.hold_remain = 0
bus.serve_remain = 0
bus.pass_stop.append(curr_stop.id)
bus.left_stop = bus.left_stop[1:]
bus.arr = 1
### on-arrival
if bus.is_virtual==0 and bus.arr==0 and abs(bus.loc[-1]-bus.stop_dist[bus.left_stop[0]])<bus.speed :
#### determine boarding and alight cost
if bus.left_stop[0] not in self.busstop_list:
self.busstop_list[bus.left_stop[0]] = self.busstop_list[bus.left_stop[0].split('_')[0]]
curr_stop = self.busstop_list[bus.left_stop[0]]
self.busstop_list[bus.left_stop[0]].arr_bus_load.append(len(bus.onboard_list))
if bus.route_id in self.busstop_list[curr_stop.id].arr_log:
self.busstop_list[curr_stop.id].arr_log[bus.route_id].append(self.simulation_step)#([bus.id, self.simulation_step])
else:
self.busstop_list[curr_stop.id].arr_log[bus.route_id] =[self.simulation_step]# [[bus.id, self.simulation_step]]
board_cost,alight_cost = self.serve(bus,curr_stop)
bus.arr=1
bus.serve_remain = max(board_cost,alight_cost)+1.
bus.stay[curr_stop.id] = 1
bus.cost[curr_stop.id] = bus.serve_remain
bus.pass_stop.append(curr_stop.id)
bus.left_stop = bus.left_stop[1:]
## if determine holding once arriving
if self.hold_once_arr==1 and len(bus.pass_stop)>1 and self.dispatch_times[bus.route_id].index(bus.dispatch_time)>0 :#and len(self.dispatch_buslist)>2 and len(bus.pass_stop)>2 and len(bus.left_stop)>1 and bus.forward_bus!=None:
if self.simulation_step in self.arrivals:
self.arrivals[self.simulation_step].append([curr_stop.id, bus_id, len(bus.onboard_list)])
else:
self.arrivals[self.simulation_step] = [[curr_stop.id, bus_id, len(bus.onboard_list)]]
bus.hold_remain = self.control(bus, curr_stop,type=self.control_type)
if bus.hold_remain > 0:
bus.stay[curr_stop.id] = 1
if bus.hold_remain<10:
bus.hold_remain = 0
bus.hold_cost[curr_stop.id] = bus.hold_remain
bus.is_hold = 1
if bus.hold_remain>0 or bus.serve_remain>0:
bus.stop()
else:
if self.is_allow_overtake == 1:
bus.dep()
else:
if bus.forward_bus in self.dispatch_buslist and bus.speed+bus.loc[-1]>=self.dispatch_buslist[bus.forward_bus].loc[-1]:
bus.stop()
bus.current_speed = bus.speed * | np.random.randint(60, 120) | numpy.random.randint |
from timeit import default_timer as timer
from filterpy.kalman import KalmanFilter
import numpy as np
def one_iter():
num_max = 1 # the number of maximum objects assumed to be detected in each frame
num_obj = 80 # the total number of classes of objects that can be recognized by the recognition alg
iC = 0 # relative index of c, the confidence score of the object recognized, taking value in [0, 1]
iX = 1 # relative index of x, the x-position of the center of the bounding box
iY = 2 # relative index of y, the y-position of the center of the bounding box
iW = 3 # relative index of w, the width of the bounding box
iH = 4 # relative index of h, the height of the bounding box
dim = num_max * num_obj * (iH + 1) # the dimension of the state vector
f = KalmanFilter(dim_x=dim, dim_z=dim, dim_u=2)
initial_state = np.zeros((dim, 1)) # TODO: change this
f.x = initial_state
f.F = np.eye(dim) # state transition matrix
f.H = np.eye(dim) # the measurement function
B = np.zeros((dim, 2))
for i in range(num_obj * num_max):
start_index = i * (iH + 1)
x_index = start_index + iX
y_index = start_index + iY
B[x_index][0] = 1
B[y_index][1] = 1
f.B = B # control transition matrix
f.predict(u=np.array([[2], [3]]))
obs = np.zeros(400)
obs[275] = 0.88
obs[276] = 200
obs[277] = 300
obs[278] = 63
obs[279] = 27
f.update(z=obs)
def two_iter():
num_max = 2 # the number of maximum objects assumed to be detected in each frame
num_obj = 80 # the total number of classes of objects that can be recognized by the recognition alg
iC = 0 # relative index of c, the confidence score of the object recognized, taking value in [0, 1]
iX = 1 # relative index of x, the x-position of the center of the bounding box
iY = 2 # relative index of y, the y-position of the center of the bounding box
iW = 3 # relative index of w, the width of the bounding box
iH = 4 # relative index of h, the height of the bounding box
dim = num_max * num_obj * (iH + 1) # the dimension of the state vector
f = KalmanFilter(dim_x=dim, dim_z=dim, dim_u=2)
initial_state = np.zeros((dim, 1)) # TODO: change this
f.x = initial_state
f.F = np.eye(dim) # state transition matrix
f.H = np.eye(dim) # the measurement function
B = | np.zeros((dim, 2)) | numpy.zeros |
import numpy as np
import matplotlib.pyplot as plt
from scipy.io import loadmat
def sigmoide(X):
return 1/(1+np.exp(-X))
def fun(a3, etiq):
return np.argmax(a3) + 1 == etiq
data = loadmat("ex3data1.mat")
X = data['X']
Y = data['y']
Y = Y.astype(int)
m = np.shape(X)[0]
X = np.hstack([np.ones([m,1]), X])
weights = loadmat("ex3weights.mat")
theta1, theta2 = weights["Theta1"], weights["Theta2"]
a1 = X
z2 = np.dot(theta1, np.transpose(a1))
a2 = sigmoide(z2)
a2 = np.vstack((np.ones(np.shape(a2)[1]), a2))
z3 = | np.dot(theta2, a2) | numpy.dot |
#------------------------------------------Single Rectangle dection-------------------------------------------#
## Adapted from https://github.com/jrieke/shape-detection by <NAME>
# Import libraries:
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import time
import os
# Import tensorflow to use GPUs on keras:
import tensorflow as tf
# Set keras with GPUs
import keras
config = tf.ConfigProto( device_count = {'GPU': 1 , 'CPU': 12} )
sess = tf.Session(config=config)
keras.backend.set_session(sess)
# Import keras tools:
from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout
from keras.optimizers import SGD
# Create images with random rectangles and bounding boxes:
num_imgs = 50000
img_size = 8
min_object_size = 1
max_object_size = 4
num_objects = 1
bboxes = np.zeros((num_imgs, num_objects, 4))
imgs = np.zeros((num_imgs, img_size, img_size)) # set background to
# Generating random images and bounding boxes:
for i_img in range(num_imgs):
for i_object in range(num_objects):
w, h = np.random.randint(min_object_size, max_object_size, size=2) # bbox width (w) and height (h)
x = np.random.randint(0, img_size - w) # bbox x lower left corner coordinate
y = np.random.randint(0, img_size - h) # bbox y lower left corner coordinate
imgs[i_img, x:x+w, y:y+h] = 1. # set rectangle to 1
bboxes[i_img, i_object] = [x, y, w, h] # store coordinates
# Lets plot one example of generated image:
i = 0
plt.imshow(imgs[i].T, cmap='Greys', interpolation='none', origin='lower', extent=[0, img_size, 0, img_size])
for bbox in bboxes[i]:
plt.gca().add_patch(matplotlib.patches.Rectangle((bbox[0], bbox[1]), bbox[2], bbox[3], ec='r', fc='none'))
## Obs:
# - The transpose was done for using properly both plt functions
# - extent is the size of the image
# - ec is the color of the border of the bounding box
# - fc is to avoid any coloured background of the bounding box
# Display plot:
# plt.show()
# Reshape (stack rows horizontally) and normalize the image data to mean 0 and std 1:
X = (imgs.reshape(num_imgs, -1) - np.mean(imgs)) / np.std(imgs)
X.shape, np.mean(X), np.std(X)
# Normalize x, y, w, h by img_size, so that all values are between 0 and 1:
# Important: Do not shift to negative values (e.g. by setting to mean 0)
#----------- because the IOU calculation needs positive w and h
y = bboxes.reshape(num_imgs, -1) / img_size
y.shape, np.mean(y), np.std(y)
# Split training and test:
i = int(0.8 * num_imgs)
train_X = X[:i]
test_X = X[i:]
train_y = y[:i]
test_y = y[i:]
test_imgs = imgs[i:]
test_bboxes = bboxes[i:]
# Build the model:
model = Sequential([Dense(200, input_dim=X.shape[-1]),
Activation('relu'),
Dropout(0.2),
Dense(y.shape[-1])])
model.compile('adadelta', 'mse')
# Fit the model:
tic = time.time()
model.fit(train_X, train_y,nb_epoch=30, validation_data=(test_X, test_y), verbose=2)
toc = time.time() - tic
print(toc)
# Predict bounding boxes on the test images:
pred_y = model.predict(test_X)
pred_bboxes = pred_y * img_size
pred_bboxes = pred_bboxes.reshape(len(pred_bboxes), num_objects, -1)
pred_bboxes.shape
# Function to define the intersection over the union of the bounding boxes pair:
def IOU(bbox1, bbox2):
'''Calculate overlap between two bounding boxes [x, y, w, h]
as the area of intersection over the area of unity'''
x1, y1, w1, h1 = bbox1[0], bbox1[1], bbox1[2], bbox1[3]
x2, y2, w2, h2 = bbox2[0], bbox2[1], bbox2[2], bbox2[3]
w_I = min(x1 + w1, x2 + w2) - max(x1, x2)
h_I = min(y1 + h1, y2 + h2) - max(y1, y2)
if w_I <= 0 or h_I <= 0: # no overlap
return 0
else:
I = w_I * h_I
U = w1 * h1 + w2 * h2 - I
return I / U
# Show a few images and predicted bounding boxes from the test dataset.
os.chdir('/workdir/jp2476/repo/diversity-proj/files')
plt.figure(figsize=(12, 3))
for i_subplot in range(1, 5):
plt.subplot(1, 4, i_subplot)
i = np.random.randint(len(test_imgs))
plt.imshow(test_imgs[i].T, cmap='Greys',
interpolation='none',
origin='lower',
extent=[0, img_size, 0, img_size])
for pred_bbox, train_bbox in zip(pred_bboxes[i], test_bboxes[i]):
plt.gca().add_patch(matplotlib.patches.Rectangle((pred_bbox[0],
pred_bbox[1]),
pred_bbox[2],
pred_bbox[3],
ec='r', fc='none'))
plt.annotate('IOU: {:.2f}'.format(IOU(pred_bbox, train_bbox)),
(pred_bbox[0], pred_bbox[1]+pred_bbox[3]+0.2),
color='r')
# plt.savefig("simple_detection.pdf", dpi=150)
# plt.savefig("simple_detection.png", dpi=150)
plt.show()
plt.clf()
# Calculate the mean IOU (overlap) between the predicted and expected bounding boxes on the test dataset:
summed_IOU = 0.
for pred_bbox, test_bbox in zip(pred_bboxes.reshape(-1, 4), test_bboxes.reshape(-1, 4)):
summed_IOU += IOU(pred_bbox, test_bbox)
mean_IOU = summed_IOU / len(pred_bboxes)
mean_IOU
#-------------------------------------------Two Rectangle dection---------------------------------------------#
## Adapted from https://github.com/jrieke/shape-detection by <NAME>
# Import libraries:
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import time
import os
# Import tensorflow to use GPUs on keras:
import tensorflow as tf
# Set keras with GPUs
import keras
config = tf.ConfigProto( device_count = {'GPU': 1 , 'CPU': 12} )
sess = tf.Session(config=config)
keras.backend.set_session(sess)
# Import keras tools:
from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout
from keras.optimizers import SGD
# Create images with random rectangles and bounding boxes:
num_imgs = 50000
# Image parameters for simulation:
img_size = 8
min_rect_size = 1
max_rect_size = 4
num_objects = 2
# Initialize objects:
bboxes = np.zeros((num_imgs, num_objects, 4))
imgs = np.zeros((num_imgs, img_size, img_size))
# Generate images and bounding boxes:
for i_img in range(num_imgs):
for i_object in range(num_objects):
w, h = np.random.randint(min_rect_size, max_rect_size, size=2)
x = np.random.randint(0, img_size - w)
y = np.random.randint(0, img_size - h)
imgs[i_img, x:x+w, y:y+h] = 1.
bboxes[i_img, i_object] = [x, y, w, h]
# Get shapes:
imgs.shape, bboxes.shape
# Plot one example of generated images:
i = 0
plt.imshow(imgs[i].T, cmap='Greys', interpolation='none', origin='lower', extent=[0, img_size, 0, img_size])
for bbox in bboxes[i]:
plt.gca().add_patch(matplotlib.patches.Rectangle((bbox[0], bbox[1]), bbox[2], bbox[3], ec='r', fc='none'))
# plt.show()
# Reshape and normalize the data to mean 0 and std 1:
X = (imgs.reshape(num_imgs, -1) - np.mean(imgs)) / np.std(imgs)
X.shape, np.mean(X), np.std(X)
# Normalize x, y, w, h by img_size, so that all values are between 0 and 1:
# Important: Do not shift to negative values (e.g. by setting to mean 0),
#---------- because the IOU calculation needs positive w and h
y = bboxes.reshape(num_imgs, -1) / img_size
y.shape, np.mean(y), np.std(y)
# Function to define the intersection over the union of the bounding boxes pair:
def IOU(bbox1, bbox2):
'''Calculate overlap between two bounding boxes [x, y, w, h]
as the area of intersection over the area of unity'''
x1, y1, w1, h1 = bbox1[0], bbox1[1], bbox1[2], bbox1[3]
x2, y2, w2, h2 = bbox2[0], bbox2[1], bbox2[2], bbox2[3]
w_I = min(x1 + w1, x2 + w2) - max(x1, x2)
h_I = min(y1 + h1, y2 + h2) - max(y1, y2)
if w_I <= 0 or h_I <= 0: # no overlap
return 0
else:
I = w_I * h_I
U = w1 * h1 + w2 * h2 - I
return I / U
# Split training and test.
i = int(0.8 * num_imgs)
train_X = X[:i]
test_X = X[i:]
train_y = y[:i]
test_y = y[i:]
test_imgs = imgs[i:]
test_bboxes = bboxes[i:]
# Build the model.
from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout
from keras.optimizers import SGD
model = Sequential([
Dense(256, input_dim=X.shape[-1]),
Activation('relu'),
Dropout(0.4),
Dense(y.shape[-1])
])
model.compile('adadelta', 'mse')
# Flip bboxes during training:
# Note: The validation loss is always quite big here because we don't flip the bounding boxes for
#------ the validation data
# Define the distance between the two bounding boxes:
def distance(bbox1, bbox2):
return np.sqrt(np.sum( | np.square(bbox1[:2] - bbox2[:2]) | numpy.square |
"""Softmax.
For a given scores as n-dimensional array of where each column represents a sample
the softmax should return the probabilities of same n-dimensional array with same shape.
The probabilities for each sample (column) must sum to 1
Softmax Function S(yi) = exp(yi) / Sigsum(exp(yi))
"""
scores = [3.0, 1.0, 0.2]
import numpy as np
import math
def softmax(x):
"""Compute softmax values for each sets of scores in x."""
# The longest way, some bug in code even though the approach is same to calculate softmax
"""
probabilities_array = list()
x_contains_array_el = False
x_denom = 0
for ele in x:
if isinstance(ele, np.ndarray):
x_contains_array_el = True
tmp_probabilities_array = list()
tmp_el_denom = 0
for el in ele:
tmp_el_denom += math.exp(el)
for el in ele:
tmp_prb = math.exp(el) / tmp_el_denom
tmp_probabilities_array.append(tmp_prb)
probabilities_array.append(tmp_probabilities_array)
# Assuming either it is just a single value list or its a pure numpy array with defined shape.
else:
#x_denom += math.exp(ele)
x_denom += ele
if not x_contains_array_el:
for ele in x:
probability = math.exp(ele) / x_denom
probabilities_array.append(probability)
return np.array(probabilities_array)
"""
# The shortest way and the efficient way as per tutorial.
return | np.exp(x) | numpy.exp |
import numpy as np
import math
import functools as fu
import cv2
import random as rand
def transform_points(m, points):
""" It transforms the given point/points using the given transformation matrix.
:param points: numpy array, list
The point/points to be transformed given the transformation matrix.
:param m: An 3x3 matrix
The transformation matrix which will be used for the transformation.
:return: The transformed point/points.
"""
ph = make_homogeneous(points).T
ph = m @ ph
return make_euclidean(ph.T)
def transform_image(image, m):
""" It transforms the given image using the given transformation matrix.
:param img: An image
The image to be transformed given the transformation matrix.
:param m: An 3x3 matrix
The transformation matrix which will be used for the transformation.
:return: The transformed image.
"""
row, col, _ = image.shape
return cv2.warpPerspective(image, m, (col, row))
def make_homogeneous(points):
""" It converts the given point/points in an euclidean coordinates into a homogeneous coordinate
:param points: numpy array, list
The point/points to be converted into a homogeneous coordinate.
:return: The converted point/points in the homogeneous coordinates.
"""
if isinstance(points, list):
points = np.asarray([points], dtype=np.float64)
return np.hstack((points, | np.ones((points.shape[0], 1), dtype=points.dtype) | numpy.ones |
# Copyright 2019 <NAME>. All rights reserved.
# Copyright 2019 DATA Lab at Texas A&M University. All rights reserved.
# Copyright 2019 DeepMind Technologies Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
''' Neural Fictitious Self-Play (NFSP) agent implemented in TensorFlow.
See the paper https://arxiv.org/abs/1603.01121 for more details.
'''
import collections
import enum
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from rlcard.agents.dqn_agent_pytorch import DQNAgent
from rlcard.agents.nfsp_agent import ReservoirBuffer
from rlcard.utils.utils import remove_illegal
Transition = collections.namedtuple('Transition', 'info_state action_probs')
MODE = enum.Enum('mode', 'best_response average_policy')
class NFSPAgent(object):
''' An approximate clone of rlcard.agents.nfsp_agent that uses
pytorch instead of tensorflow. Note that this implementation
differs from Henrich and Silver (2016) in that the supervised
training minimizes cross-entropy with respect to the stored
action probabilities rather than the realized actions.
'''
def __init__(self,
scope,
action_num=4,
state_shape=None,
hidden_layers_sizes=None,
reservoir_buffer_capacity=int(1e6),
anticipatory_param=0.1,
batch_size=256,
train_every=1,
rl_learning_rate=0.1,
sl_learning_rate=0.005,
min_buffer_size_to_learn=1000,
q_replay_memory_size=30000,
q_replay_memory_init_size=1000,
q_update_target_estimator_every=1000,
q_discount_factor=0.99,
q_epsilon_start=0.06,
q_epsilon_end=0,
q_epsilon_decay_steps=int(1e6),
q_batch_size=256,
q_train_every=1,
q_mlp_layers=None,
evaluate_with='average_policy',
device=None):
''' Initialize the NFSP agent.
Args:
scope (string): The name scope of NFSPAgent.
action_num (int): The number of actions.
state_shape (list): The shape of the state space.
hidden_layers_sizes (list): The hidden layers sizes for the layers of
the average policy.
reservoir_buffer_capacity (int): The size of the buffer for average policy.
anticipatory_param (float): The hyper-parameter that balances rl/avarage policy.
batch_size (int): The batch_size for training average policy.
train_every (int): Train the SL policy every X steps.
rl_learning_rate (float): The learning rate of the RL agent.
sl_learning_rate (float): the learning rate of the average policy.
min_buffer_size_to_learn (int): The minimum buffer size to learn for average policy.
q_replay_memory_size (int): The memory size of inner DQN agent.
q_replay_memory_init_size (int): The initial memory size of inner DQN agent.
q_update_target_estimator_every (int): The frequency of updating target network for
inner DQN agent.
q_discount_factor (float): The discount factor of inner DQN agent.
q_epsilon_start (float): The starting epsilon of inner DQN agent.
q_epsilon_end (float): the end epsilon of inner DQN agent.
q_epsilon_decay_steps (int): The decay steps of inner DQN agent.
q_batch_size (int): The batch size of inner DQN agent.
q_train_step (int): Train the model every X steps.
q_mlp_layers (list): The layer sizes of inner DQN agent.
device (torch.device): Whether to use the cpu or gpu
'''
self.use_raw = False
self._scope = scope
self._action_num = action_num
self._state_shape = state_shape
self._layer_sizes = hidden_layers_sizes + [action_num]
self._batch_size = batch_size
self._train_every = train_every
self._sl_learning_rate = sl_learning_rate
self._anticipatory_param = anticipatory_param
self._min_buffer_size_to_learn = min_buffer_size_to_learn
self._reservoir_buffer = ReservoirBuffer(reservoir_buffer_capacity)
self._prev_timestep = None
self._prev_action = None
self.evaluate_with = evaluate_with
if device is None:
self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
else:
self.device = device
# Total timesteps
self.total_t = 0
# Step counter to keep track of learning.
self._step_counter = 0
# Build the action-value network
self._rl_agent = DQNAgent(scope+'_dqn', q_replay_memory_size, q_replay_memory_init_size, \
q_update_target_estimator_every, q_discount_factor, q_epsilon_start, q_epsilon_end, \
q_epsilon_decay_steps, q_batch_size, action_num, state_shape, q_train_every, q_mlp_layers, \
rl_learning_rate, device)
# Build the average policy supervised model
self._build_model()
self.sample_episode_policy()
def _build_model(self):
''' Build the average policy network
'''
# configure the average policy network
policy_network = AveragePolicyNetwork(self._action_num, self._state_shape, self._layer_sizes)
policy_network = policy_network.to(self.device)
self.policy_network = policy_network
self.policy_network.eval()
# xavier init
for p in self.policy_network.parameters():
if len(p.data.shape) > 1:
nn.init.xavier_uniform_(p.data)
# configure optimizer
self.policy_network_optimizer = torch.optim.Adam(self.policy_network.parameters(), lr=self._sl_learning_rate)
def feed(self, ts):
''' Feed data to inner RL agent
Args:
ts (list): A list of 5 elements that represent the transition.
'''
self._rl_agent.feed(ts)
self.total_t += 1
if self.total_t>0 and len(self._reservoir_buffer) >= self._min_buffer_size_to_learn and self.total_t%self._train_every == 0:
sl_loss = self.train_sl()
print('\rINFO - Agent {}, step {}, sl-loss: {}'.format(self._scope, self.total_t, sl_loss), end='')
def step(self, state):
''' Returns the action to be taken.
Args:
state (dict): The current state
Returns:
action (int): An action id
'''
obs = state['obs']
legal_actions = state['legal_actions']
if self._mode == MODE.best_response:
probs = self._rl_agent.predict(obs)
self._add_transition(obs, probs)
elif self._mode == MODE.average_policy:
probs = self._act(obs)
probs = remove_illegal(probs, legal_actions)
action = np.random.choice(len(probs), p=probs)
return action
def eval_step(self, state):
''' Use the average policy for evaluation purpose
Args:
state (dict): The current state.
Returns:
action (int): An action id.
'''
if self.evaluate_with == 'best_response':
action, probs = self._rl_agent.eval_step(state)
elif self.evaluate_with == 'average_policy':
obs = state['obs']
legal_actions = state['legal_actions']
probs = self._act(obs)
probs = remove_illegal(probs, legal_actions)
action = np.random.choice(len(probs), p=probs)
else:
raise ValueError("'evaluate_with' should be either 'average_policy' or 'best_response'.")
return action, probs
def sample_episode_policy(self):
''' Sample average/best_response policy
'''
if np.random.rand() < self._anticipatory_param:
self._mode = MODE.best_response
else:
self._mode = MODE.average_policy
def _act(self, info_state):
''' Predict action probability givin the observation and legal actions
Not connected to computation graph
Args:
info_state (numpy.array): An obervation.
Returns:
action_probs (numpy.array): The predicted action probability.
'''
info_state = np.expand_dims(info_state, axis=0)
info_state = torch.from_numpy(info_state).float().to(self.device)
with torch.no_grad():
log_action_probs = self.policy_network(info_state).cpu().numpy()
action_probs = np.exp(log_action_probs)[0]
return action_probs
def _add_transition(self, state, probs):
''' Adds the new transition to the reservoir buffer.
Transitions are in the form (state, probs).
Args:
state (numpy.array): The state.
probs (numpy.array): The probabilities of each action.
'''
transition = Transition(
info_state=state,
action_probs=probs)
self._reservoir_buffer.add(transition)
def train_sl(self):
''' Compute the loss on sampled transitions and perform a avg-network update.
If there are not enough elements in the buffer, no loss is computed and
`None` is returned instead.
Returns:
loss (float): The average loss obtained on this batch of transitions or `None`.
'''
if (len(self._reservoir_buffer) < self._batch_size or
len(self._reservoir_buffer) < self._min_buffer_size_to_learn):
return None
transitions = self._reservoir_buffer.sample(self._batch_size)
info_states = [t.info_state for t in transitions]
action_probs = [t.action_probs for t in transitions]
self.policy_network_optimizer.zero_grad()
self.policy_network.train()
# (batch, state_size)
info_states = torch.from_numpy( | np.array(info_states) | numpy.array |
#!/usr/bin/env python3
"""
Copyright (c) 2018-2021 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import argparse
import csv
import platform
import numpy as np
import colorspacious
parser = argparse.ArgumentParser(
description="Sort color sets by HCL (hue, chroma, luminance) [CAM02-UCS based].",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"input", metavar="INPUT", help="Color sets to be sorted (space separated)"
)
args = parser.parse_args()
with open(args.input) as csv_file:
with open(args.input.split(".")[0] + "_hcl_sorted.txt", "w") as outfile:
# Copy header rows
outfile.write(csv_file.readline())
outfile.write(csv_file.readline())
outfile.write(csv_file.readline())
# Record environment
outfile.write("# Python " + platform.sys.version.replace("\n", "") + "\n")
outfile.write(
f"# NumPy {np.__version__}, Colorspacious {colorspacious.__version__}\n"
)
csv_reader = csv.reader(csv_file, delimiter=" ")
for row in csv_reader:
row = [i.strip() for i in row]
rgb = [(int(i[:2], 16), int(i[2:4], 16), int(i[4:], 16)) for i in row]
jab = [colorspacious.cspace_convert(i, "sRGB255", "CAM02-UCS") for i in rgb]
hcl = np.array(
[
[np.arctan2(i[2], i[1]), np.sqrt(i[1] ** 2 + i[2] ** 2), i[0]]
for i in jab
]
)
new_row = " ".join( | np.array(row) | numpy.array |
# -*- coding: utf-8 -*-
"""
author: <NAME>, University of Bristol, <EMAIL>
"""
import numpy as np
from derivative import derivative
def nominator(F_x, F_y, F_z, F_xx, F_xy, F_yy, F_yz, F_zz, F_xz):
m = np.array([[F_xx, F_xy, F_xz, F_x],
[F_xy, F_yy, F_yz, F_y],
[F_xz, F_yz, F_zz, F_z],
[F_x, F_y, F_z, 0]])
d = np.linalg.det(m)
return d
def denominator(F_x,F_y, F_z):
g = np.array([F_x,F_y,F_z])
mag_g = | np.linalg.norm(g) | numpy.linalg.norm |
import cv2
import numpy as np
img = cv2.imread("imori.jpg").astype(np.float32)
b = img[:,:,0].copy()
g = img[:,:,1].copy()
r = img[:,:,2].copy()
H,W,C = img.shape
gray=0.2126*r+0.7152*g+0.0722*b
gray=gray.astype(np.uint8)
maxX=0
for pt in range (1,255):
c0 = gray[np.where(gray<pt)]
m0 = | np.mean(c0) | numpy.mean |
import numpy as np
import os
import sys
import inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, parentdir)
from utils.kalman import SwitchingKalmanState, SwitchingKalmanFilter, KalmanFilter, KalmanState
from utils.kalman.models import NDCWPA, NDBrownian, RandAcc
import matplotlib.pyplot as plt
# Generate toyexample data
K_pos = 7.0
n_pts = 1000
pos_min, pos_max = 0.1, 20.0
dx = (pos_max-pos_min)/n_pts
pos_ary = np.linspace(pos_min, pos_max, n_pts)
f_ary = 1000*(pos_ary-K_pos)
f_ary[f_ary<0] = 0
f_noise = f_ary + 5*np.random.randn(*f_ary.shape)
# evaluate single Kalman filter
model_name = 'RandAcc'
if model_name == 'NDCWPA':
state = KalmanState(mean=np.zeros(3), covariance=1.0 * np.eye(3), ord=3)
model = NDCWPA(dt=dx, q=2e-2, r=10.0, n_dim=1)
elif model_name == 'RandAcc':
state = KalmanState(mean=np.zeros(2), covariance=1.0 * np.eye(2), ord=2)
model = RandAcc(dt=dx, q=2e-2, r=10.0)
kalman = KalmanFilter(model=model)
filtered_states_kf = [state] * n_pts
for i in range(n_pts):
observation = f_noise[i]
state = kalman.filter(state, observation)
filtered_states_kf[i] = state
smoothed_states_kf = [state] * n_pts
for i in range(1, n_pts):
j = n_pts - 1 - i
state = kalman.smoother(filtered_states_kf[j], state)
smoothed_states_kf[j] = state
filtered_kf = np.asarray([state.x()[0] for state in filtered_states_kf])
filtered_df_kf = np.asarray([state.x()[1] for state in filtered_states_kf])
smoothed_kf = np.asarray([state.x()[0] for state in smoothed_states_kf])
print("smoothed kf shape:", smoothed_kf.shape)
np.save("f_ary.npy", f_ary)
np.save("pos_ary.npy", pos_ary)
np.save(model_name+"_f.npy", filtered_kf)
np.save(model_name+"_K.npy", filtered_df_kf)
sys.exit(0)
# evaluate switching Kalman filter
models = [
NDCWPA(dt=1.0, q=2e-2, r=10.0, n_dim=2),
NDBrownian(dt=1.0, q=2e-2, r=10.0, n_dim=2)
]
Z = np.log(np.asarray([
[0.99, 0.01],
[0.01, 0.99]
]))
masks = [
np.array([
np.diag([1, 0, 1, 0, 1, 0]),
np.diag([0, 1, 0, 1, 0, 1])
]),
np.array([
np.diag([1, 0]),
| np.diag([0, 1]) | numpy.diag |
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 28 11:35:57 2015
@author: <NAME>, <NAME>, <NAME>
"""
from __future__ import division, print_function, absolute_import, unicode_literals
import numpy as np
from numpy import exp, abs, sqrt, sum, real, imag, arctan2, append
from scipy.optimize import minimize
def SHOfunc(parms, w_vec):
"""
Generates the SHO response over the given frequency band
Parameters
-----------
parms : list or tuple
SHO parae=(A,w0,Q,phi)
w_vec : 1D numpy array
Vector of frequency values
"""
return parms[0] * exp(1j * parms[3]) * parms[1] ** 2 / \
(w_vec ** 2 - 1j * w_vec * parms[1] / parms[2] - parms[1] ** 2)
def SHOfit(parms, w_vec, resp_vec):
"""
Cost function minimization of SHO fitting
Parameters
-----------
parms : list or tuple
SHO parameters=(A,w0,Q,phi)
w_vec : 1D numpy array
Vector of frequency values
resp_vec : 1D complex numpy array or list
Cantilever response vector as a function of frequency
"""
# Cost function to minimize.
cost = lambda p: np.sum((np.abs(SHOfunc(p, w_vec)) - np.abs(resp_vec)) ** 2)
popt = minimize(cost, parms, method='TNC', options={'disp':False})
return popt.x
def SHOestimateGuess(resp_vec, w_vec, num_points=5):
"""
Generates good initial guesses for fitting
Parameters
------------
w_vec : 1D numpy array or list
Vector of BE frequencies
resp_vec : 1D complex numpy array or list
BE response vector as a function of frequency
num_points : (Optional) unsigned int
Quality factor of the SHO peak
Returns
---------
retval : tuple
SHO fit parameters arranged as amplitude, frequency, quality factor, phase
"""
ii = np.argsort(abs(resp_vec))[::-1]
a_mat = | np.array([]) | numpy.array |
# From http://www.hs.uni-hamburg.de/DE/Ins/Per/Czesla/PyA/PyA/pyaslDoc/aslDoc/unredDoc.html
import numpy as np
import scipy.interpolate as interpolate
def unred(wave, flux, ebv, R_V=3.1, LMC2=False, AVGLMC=False):
"""
Deredden a flux vector using the Fitzpatrick (1999) parameterization
Parameters
----------
wave : array
Wavelength in Angstrom
flux : array
Calibrated flux vector, same number of elements as wave.
ebv : float, optional
Color excess E(B-V). If a negative ebv is supplied,
then fluxes will be reddened rather than dereddened.
The default is 3.1.
AVGLMC : boolean
If True, then the default fit parameters c1,c2,c3,c4,gamma,x0
are set to the average values determined for reddening in the
general Large Magellanic Cloud (LMC) field by
Misselt et al. (1999, ApJ, 515, 128). The default is
False.
LMC2 : boolean
If True, the fit parameters are set to the values determined
for the LMC2 field (including 30 Dor) by Misselt et al.
Note that neither `AVGLMC` nor `LMC2` will alter the default value
of R_V, which is poorly known for the LMC.
Returns
-------
new_flux : array
Dereddened flux vector, same units and number of elements
as input flux.
Notes
-----
.. note:: This function was ported from the IDL Astronomy User's Library.
:IDL - Documentation:
PURPOSE:
Deredden a flux vector using the Fitzpatrick (1999) parameterization
EXPLANATION:
The R-dependent Galactic extinction curve is that of Fitzpatrick & Massa
(Fitzpatrick, 1999, PASP, 111, 63; astro-ph/9809387 ).
Parameterization is valid from the IR to the far-UV (3.5 microns to 0.1
microns). UV extinction curve is extrapolated down to 912 Angstroms.
CALLING SEQUENCE:
FM_UNRED, wave, flux, ebv, [ funred, R_V = , /LMC2, /AVGLMC, ExtCurve=
gamma =, x0=, c1=, c2=, c3=, c4= ]
INPUT:
WAVE - wavelength vector (Angstroms)
FLUX - calibrated flux vector, same number of elements as WAVE
If only 3 parameters are supplied, then this vector will
updated on output to contain the dereddened flux.
EBV - color excess E(B-V), scalar. If a negative EBV is supplied,
then fluxes will be reddened rather than dereddened.
OUTPUT:
FUNRED - unreddened flux vector, same units and number of elements
as FLUX
OPTIONAL INPUT KEYWORDS
R_V - scalar specifying the ratio of total to selective extinction
R(V) = A(V) / E(B - V). If not specified, then R = 3.1
Extreme values of R(V) range from 2.3 to 5.3
/AVGLMC - if set, then the default fit parameters c1,c2,c3,c4,gamma,x0
are set to the average values determined for reddening in the
general Large Magellanic Cloud (LMC) field by Misselt et al.
(1999, ApJ, 515, 128)
/LMC2 - if set, then the fit parameters are set to the values determined
for the LMC2 field (including 30 Dor) by Misselt et al.
Note that neither /AVGLMC or /LMC2 will alter the default value
of R_V which is poorly known for the LMC.
The following five input keyword parameters allow the user to customize
the adopted extinction curve. For example, see Clayton et al. (2003,
ApJ, 588, 871) for examples of these parameters in different interstellar
environments.
x0 - Centroid of 2200 A bump in microns (default = 4.596)
gamma - Width of 2200 A bump in microns (default =0.99)
c3 - Strength of the 2200 A bump (default = 3.23)
c4 - FUV curvature (default = 0.41)
c2 - Slope of the linear UV extinction component
(default = -0.824 + 4.717/R)
c1 - Intercept of the linear UV extinction component
(default = 2.030 - 3.007*c2
"""
x = 10000./ wave # Convert to inverse microns
curve = x*0.
# Set some standard values:
x0 = 4.596
gamma = 0.99
c3 = 3.23
c4 = 0.41
c2 = -0.824 + 4.717/R_V
c1 = 2.030 - 3.007*c2
if LMC2:
x0 = 4.626
gamma = 1.05
c4 = 0.42
c3 = 1.92
c2 = 1.31
c1 = -2.16
elif AVGLMC:
x0 = 4.596
gamma = 0.91
c4 = 0.64
c3 = 2.73
c2 = 1.11
c1 = -1.28
# Compute UV portion of A(lambda)/E(B-V) curve using FM fitting function and
# R-dependent coefficients
xcutuv = np.array([10000.0/2700.0])
xspluv = 10000.0/np.array([2700.0,2600.0])
iuv = np.where(x >= xcutuv)[0]
N_UV = len(iuv)
iopir = np.where(x < xcutuv)[0]
Nopir = len(iopir)
if (N_UV > 0): xuv = np.concatenate((xspluv,x[iuv]))
else: xuv = xspluv
yuv = c1 + c2*xuv
yuv = yuv + c3*xuv**2/((xuv**2-x0**2)**2 +(xuv*gamma)**2)
yuv = yuv + c4*(0.5392*(np.maximum(xuv,5.9)-5.9)**2+0.05644*(np.maximum(xuv,5.9)-5.9)**3)
yuv = yuv + R_V
yspluv = yuv[0:2] # save spline points
if (N_UV > 0): curve[iuv] = yuv[2::] # remove spline points
# Compute optical portion of A(lambda)/E(B-V) curve
# using cubic spline anchored in UV, optical, and IR
xsplopir = np.concatenate(([0],10000.0/np.array([26500.0,12200.0,6000.0,5470.0,4670.0,4110.0])))
ysplir = np.array([0.0,0.26469,0.82925])*R_V/3.1
ysplop = np.array((np.polyval([-4.22809e-01, 1.00270, 2.13572e-04][::-1],R_V ),
np.polyval([-5.13540e-02, 1.00216, -7.35778e-05][::-1],R_V ),
np.polyval([ 7.00127e-01, 1.00184, -3.32598e-05][::-1],R_V ),
np.polyval([ 1.19456, 1.01707, -5.46959e-03, 7.97809e-04, -4.45636e-05][::-1],R_V ) ))
ysplopir = np.concatenate((ysplir,ysplop))
if (Nopir > 0):
tck = interpolate.splrep(np.concatenate((xsplopir,xspluv)),np.concatenate((ysplopir,yspluv)),s=0)
curve[iopir] = interpolate.splev(x[iopir], tck)
#Now apply extinction correction to input flux vector
curve *= ebv
return flux * 10.**(0.4*curve)
def A_lams(wave, A_V, R_V=3.1):
ebv = A_V/R_V
f = unred(wave, np.ones_like(wave), ebv, R_V=R_V)
A_lam = 2.5 * np.log10(f)
return A_lam
def main():
import matplotlib.pyplot as plt
nlam = 200
wl = np.linspace(4000, 10000, num=nlam)
fl = | np.ones((nlam,)) | numpy.ones |
import numpy as np
from os.path import join
import tensorflow as tf
from random import sample
from tensorflow.keras import backend as K
print(tf.__version__)
class Conv2dRF(tf.keras.layers.Layer):
def __init__(self,
op_name,
fflp, # fixed filters load path
kernel_size,
nrsfkpc, # number of randomly selected filters per channel
c_in,
c_out,
kernel_initializer=tf.keras.initializers.glorot_uniform(seed=None),
kernel_regularizer=tf.keras.regularizers.l2(1.e-4),
strides=(1, 1, 1, 1),
padding='SAME',
data_format='channels_last',
bias_initializer=tf.zeros_initializer()):
super(Conv2dRF, self).__init__(name=op_name)
self.op_name = op_name
self.fflp = fflp
self.fixed_filters = np.load(self.fflp)
self.kernel_size = kernel_size
assert kernel_size[0] == np.shape(self.fixed_filters)[0] and kernel_size[1] == np.shape(self.fixed_filters)[1]
self.nrsfkpc = nrsfkpc # number of randomly selected fixed kernels per channel
self.c_in = c_in
self.c_out = c_out
self.kernel_initializer = kernel_initializer
self.kernel_regularizer = kernel_regularizer
self.strides = strides
self.padding = padding
if data_format == 'channels_last':
self.data_format = 'NHWC'
else:
self.data_format = 'NCHW'
self.bias_initializer = bias_initializer
def build_fixed_kernels(self, fixed_filters: np.ndarray, nrsfkpc: int, c_in: int, c_out: int) -> np.ndarray:
"""
:param fixed_filters: np.array of fixed kernels;
must be of shape [filters_height, filters_width, num_filters]
:param nrsfkpc: number of randomly selected fixed kernels per channel
:param c_in: number of input channels in the tf.nn.conv2d
:param c_out: number of output channels in the tf.nn.conv2d
:return:
"""
assert fixed_filters.ndim == 3 # dimension of fixed filters
h = fixed_filters.shape[0] # height of fixed filters
w = fixed_filters.shape[1] # width of fixed filters
nff = fixed_filters.shape[2] # number of fixed filters
channels = np.zeros((c_out, c_in, h, w, nrsfkpc))
for k in range(c_out):
for j in range(c_in):
channels[k, j] = fixed_filters[:, :, sample(range(0, nff), nrsfkpc)]
channels = np.float32(np.transpose(channels, (2, 3, 4, 1, 0)))
channels = tf.convert_to_tensor(channels)
return channels
def build(self, input_shape):
self.fixed_kernels = self.build_fixed_kernels(
fixed_filters=self.fixed_filters,
nrsfkpc=self.nrsfkpc,
c_in=self.c_in,
c_out=self.c_out)
self.coeff_matrix = self.add_variable(
name='w',
shape=[self.nrsfkpc, self.c_in, self.c_out],
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
trainable=True,
)
self.kernel = tf.einsum('ijklm,klm->ijlm', self.fixed_kernels, self.coeff_matrix)
if self.data_format == 'NHWC':
bias_shape = [self.c_out]
else:
bias_shape = [self.c_out, 1, 1]
self.bias = self.add_variable(
name='b',
shape=bias_shape,
initializer=self.bias_initializer,
trainable=True,)
def call(self, input):
return tf.nn.conv2d(
input=input,
filter=self.kernel,
strides=self.strides,
padding=self.padding,
data_format=self.data_format,
name='conv') + self.bias
def main(_):
# hyper-parameters
# C_IN = 1 # number of input channels
# C_OUT = 32 # number of output channels
# NRSFKPC = 1 # number of randomly selected filters per channel
N1 = 64
N2 = 32
data_format = 'channels_first'
fashion_mnist = tf.keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
# class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
# 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
print(train_images.shape)
print(test_images.shape)
if data_format == 'channels_last':
train_images = | np.expand_dims(train_images, axis=-1) | numpy.expand_dims |
"""
Usage Instructions:
10-shot sinusoid:
python main.py --datasource=sinusoid --logdir=logs/sine/ --metatrain_iterations=70000 --norm=None --update_batch_size=10
10-shot sinusoid baselines:
python main.py --datasource=sinusoid --logdir=logs/sine/ --pretrain_iterations=70000 --metatrain_iterations=0 --norm=None --update_batch_size=10 --baseline=oracle
python main.py --datasource=sinusoid --logdir=logs/sine/ --pretrain_iterations=70000 --metatrain_iterations=0 --norm=None --update_batch_size=10
5-way, 1-shot omniglot:
python main.py --datasource=omniglot --metatrain_iterations=60000 --meta_batch_size=32 --update_batch_size=1 --update_lr=0.4 --num_updates=1 --logdir=logs/omniglot5way/
20-way, 1-shot omniglot:
python main.py --datasource=omniglot --metatrain_iterations=60000 --meta_batch_size=16 --update_batch_size=1 --num_classes=20 --update_lr=0.1 --num_updates=5 --logdir=logs/omniglot20way/
5-way 1-shot mini imagenet:
python main.py --datasource=miniimagenet --metatrain_iterations=60000 --meta_batch_size=4 --update_batch_size=1 --update_lr=0.01 --num_updates=5 --num_classes=5 --logdir=logs/miniimagenet1shot/ --num_filters=32 --max_pool=True
5-way 5-shot mini imagenet:
python main.py --datasource=miniimagenet --metatrain_iterations=60000 --meta_batch_size=4 --update_batch_size=5 --update_lr=0.01 --num_updates=5 --num_classes=5 --logdir=logs/miniimagenet5shot/ --num_filters=32 --max_pool=True
To run evaluation, use the '--train=False' flag and the '--test_set=True' flag to use the test set.
For omniglot and miniimagenet training, acquire the dataset online, put it in the correspoding data directory, and see the python script instructions in that directory to preprocess the data.
Note that better sinusoid results can be achieved by using a larger network.
"""
import csv
import numpy as np
import pickle
import random
import tensorflow as tf
import matplotlib.pyplot as plt
from data_generator import DataGenerator
from maml import MAML
from tensorflow.python.platform import flags
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
FLAGS = flags.FLAGS
## Dataset/method options
flags.DEFINE_string('datasource', 'sinusoid', 'sinusoid or omniglot or miniimagenet')
flags.DEFINE_integer('num_classes', 5, 'number of classes used in classification (e.g. 5-way classification).')
# oracle means task id is input (only suitable for sinusoid)
# flags.DEFINE_string('baseline', "oracle", 'oracle, or None')
flags.DEFINE_string('baseline', None, 'oracle, or None')
## Training options
flags.DEFINE_integer('pretrain_iterations', 0, 'number of pre-training iterations.')
flags.DEFINE_integer('metatrain_iterations', 15000, 'number of metatraining iterations.') # 15k for omniglot, 50k for sinusoid
flags.DEFINE_integer('meta_batch_size', 25, 'number of tasks sampled per meta-update')
flags.DEFINE_float('meta_lr', 0.001, 'the base learning rate of the generator')
flags.DEFINE_integer('update_batch_size', 5, 'number of examples used for inner gradient update (K for K-shot learning).')
flags.DEFINE_float('update_lr', 1e-3, 'step size alpha for inner gradient update.') # 0.1 for omniglot
# flags.DEFINE_float('update_lr', 1e-2, 'step size alpha for inner gradient update.') # 0.1 for omniglot
flags.DEFINE_integer('num_updates', 1, 'number of inner gradient updates during training.')
## Model options
flags.DEFINE_string('norm', 'batch_norm', 'batch_norm, layer_norm, or None')
flags.DEFINE_integer('num_filters', 64, 'number of filters for conv nets -- 32 for miniimagenet, 64 for omiglot.')
flags.DEFINE_bool('conv', True, 'whether or not to use a convolutional network, only applicable in some cases')
flags.DEFINE_bool('max_pool', False, 'Whether or not to use max pooling rather than strided convolutions')
flags.DEFINE_bool('stop_grad', False, 'if True, do not use second derivatives in meta-optimization (for speed)')
flags.DEFINE_float('keep_prob', 0.5, 'if not None, used as keep_prob for all layers')
flags.DEFINE_bool('drop_connect', True, 'if True, use dropconnect, otherwise, use dropout')
# flags.DEFINE_float('keep_prob', None, 'if not None, used as keep_prob for all layers')
## Logging, saving, and testing options
flags.DEFINE_bool('log', True, 'if false, do not log summaries, for debugging code.')
flags.DEFINE_string('logdir', '/tmp/data', 'directory for summaries and checkpoints.')
flags.DEFINE_bool('resume', False, 'resume training if there is a model available')
flags.DEFINE_bool('train', True, 'True to train, False to test.')
flags.DEFINE_integer('test_iter', -1, 'iteration to load model (-1 for latest model)')
flags.DEFINE_bool('test_set', False, 'Set to true to test on the the test set, False for the validation set.')
flags.DEFINE_integer('train_update_batch_size', -1, 'number of examples used for gradient update during training (use if you want to test with a different number).')
flags.DEFINE_float('train_update_lr', -1, 'value of inner gradient step step during training. (use if you want to test with a different value)') # 0.1 for omniglot
def train(model, saver, sess, exp_string, data_generator, resume_itr=0):
SUMMARY_INTERVAL = 100
SAVE_INTERVAL = 1000
if FLAGS.datasource == 'sinusoid':
PRINT_INTERVAL = 1000
TEST_PRINT_INTERVAL = PRINT_INTERVAL*5
else:
PRINT_INTERVAL = 100
TEST_PRINT_INTERVAL = PRINT_INTERVAL*5
if FLAGS.log:
train_writer = tf.summary.FileWriter(FLAGS.logdir + '/' + exp_string, sess.graph)
print('Done initializing, starting training.')
prelosses, postlosses = [], []
num_classes = data_generator.num_classes # for classification, 1 otherwise
multitask_weights, reg_weights = [], []
for itr in range(resume_itr, FLAGS.pretrain_iterations + FLAGS.metatrain_iterations):
feed_dict = {}
if 'generate' in dir(data_generator):
batch_x, batch_y, amp, phase = data_generator.generate()
if FLAGS.baseline == 'oracle':
batch_x = np.concatenate([batch_x, np.zeros([batch_x.shape[0], batch_x.shape[1], 2])], 2)
for i in range(FLAGS.meta_batch_size):
batch_x[i, :, 1] = amp[i]
batch_x[i, :, 2] = phase[i]
inputa = batch_x[:, :num_classes*FLAGS.update_batch_size, :]
labela = batch_y[:, :num_classes*FLAGS.update_batch_size, :]
inputb = batch_x[:, num_classes*FLAGS.update_batch_size:, :] # b used for testing
labelb = batch_y[:, num_classes*FLAGS.update_batch_size:, :]
feed_dict = {model.inputa: inputa, model.inputb: inputb, model.labela: labela, model.labelb: labelb}
if itr < FLAGS.pretrain_iterations:
input_tensors = [model.pretrain_op]
else:
input_tensors = [model.metatrain_op]
if (itr % SUMMARY_INTERVAL == 0 or itr % PRINT_INTERVAL == 0):
input_tensors.extend([model.summ_op, model.total_loss1, model.total_losses2[FLAGS.num_updates-1]])
if model.classification:
input_tensors.extend([model.total_accuracy1, model.total_accuracies2[FLAGS.num_updates-1]])
result = sess.run(input_tensors, feed_dict)
if itr % SUMMARY_INTERVAL == 0:
prelosses.append(result[-2])
if FLAGS.log:
train_writer.add_summary(result[1], itr)
postlosses.append(result[-1])
if (itr!=0) and itr % PRINT_INTERVAL == 0:
if itr < FLAGS.pretrain_iterations:
print_str = 'Pretrain Iteration ' + str(itr)
else:
print_str = 'Iteration ' + str(itr - FLAGS.pretrain_iterations)
print_str += ': ' + str(np.mean(prelosses)) + ', ' + str(np.mean(postlosses))
print(print_str)
prelosses, postlosses = [], []
if (itr!=0) and itr % SAVE_INTERVAL == 0:
saver.save(sess, FLAGS.logdir + '/' + exp_string + '/model' + str(itr))
# sinusoid is infinite data, so no need to test on meta-validation set.
if (itr!=0) and itr % TEST_PRINT_INTERVAL == 0 and FLAGS.datasource !='sinusoid':
if 'generate' not in dir(data_generator):
feed_dict = {}
if model.classification:
input_tensors = [model.metaval_total_accuracy1, model.metaval_total_accuracies2[FLAGS.num_updates-1], model.summ_op]
else:
input_tensors = [model.metaval_total_loss1, model.metaval_total_losses2[FLAGS.num_updates-1], model.summ_op]
else:
batch_x, batch_y, amp, phase = data_generator.generate(train=False)
inputa = batch_x[:, :num_classes*FLAGS.update_batch_size, :]
inputb = batch_x[:, num_classes*FLAGS.update_batch_size:, :]
labela = batch_y[:, :num_classes*FLAGS.update_batch_size, :]
labelb = batch_y[:, num_classes*FLAGS.update_batch_size:, :]
feed_dict = {model.inputa: inputa, model.inputb: inputb, model.labela: labela, model.labelb: labelb, model.meta_lr: 0.0}
if model.classification:
input_tensors = [model.total_accuracy1, model.total_accuracies2[FLAGS.num_updates-1]]
else:
input_tensors = [model.total_loss1, model.total_losses2[FLAGS.num_updates-1]]
result = sess.run(input_tensors, feed_dict)
print('Validation results: ' + str(result[0]) + ', ' + str(result[1]))
saver.save(sess, FLAGS.logdir + '/' + exp_string + '/model' + str(itr))
# calculated for omniglot
NUM_TEST_POINTS = 600
def generate_test():
batch_size = 2
num_points = 101
# amp = np.array([3, 5])
# phase = np.array([0, 2.3])
amp = np.array([5, 3])
phase = np.array([2.3, 0])
outputs = np.zeros([batch_size, num_points, 1])
init_inputs = np.zeros([batch_size, num_points, 1])
for func in range(batch_size):
init_inputs[func, :, 0] = np.linspace(-5, 5, num_points)
outputs[func] = amp[func] * np.sin(init_inputs[func] - phase[func])
if FLAGS.baseline == 'oracle': # NOTE - this flag is specific to sinusoid
init_inputs = np.concatenate([init_inputs, np.zeros([init_inputs.shape[0], init_inputs.shape[1], 2])], 2)
for i in range(batch_size):
init_inputs[i, :, 1] = amp[i]
init_inputs[i, :, 2] = phase[i]
return init_inputs, outputs, amp, phase
def test_line_limit_Baye(model, sess, exp_string, mc_simulation=20, points_train=10, random_seed=1999):
inputs_all, outputs_all, amp_test, phase_test = generate_test()
np.random.seed(random_seed)
index = np.random.choice(inputs_all.shape[1], [inputs_all.shape[0], points_train], replace=False)
inputs_a = np.zeros([inputs_all.shape[0], points_train, inputs_all.shape[2]])
outputs_a = np.zeros([outputs_all.shape[0], points_train, outputs_all.shape[2]])
for line in range(len(index)):
inputs_a[line] = inputs_all[line, index[line], :]
outputs_a[line] = outputs_all[line, index[line], :]
feed_dict_line = {model.inputa: inputs_a, model.inputb: inputs_all, model.labela: outputs_a, model.labelb: outputs_all, model.meta_lr: 0.0}
mc_prediction = []
for mc_iter in range(mc_simulation):
predictions_all = sess.run(model.outputbs, feed_dict_line)
mc_prediction.append(np.array(predictions_all))
print("total mc simulation: ", mc_simulation)
print("shape of predictions_all is: ", predictions_all[0].shape)
prob_mean = np.nanmean(mc_prediction, axis=0)
prob_variance = np.var(mc_prediction, axis=0)
for line in range(len(inputs_all)):
plt.figure()
plt.plot(inputs_all[line, ..., 0].squeeze(), outputs_all[line, ..., 0].squeeze(), "r-", label="ground_truth")
# for update_step in range(len(predictions_all)):
for update_step in [0, len(predictions_all)-1]:
X = inputs_all[line, ..., 0].squeeze()
mu = prob_mean[update_step][line, ...].squeeze()
uncertainty = np.sqrt(prob_variance[update_step][line, ...].squeeze())
plt.plot(X, mu, "--", label="update_step_{:d}".format(update_step))
plt.fill_between(X, mu + uncertainty, mu - uncertainty, alpha=0.1)
plt.legend()
out_figure = FLAGS.logdir + '/' + exp_string + '/' + 'test_ubs' + str(
FLAGS.update_batch_size) + '_stepsize' + str(FLAGS.update_lr) + 'line_{0:d}_numtrain_{1:d}_seed_{2:d}.png'.format(line, points_train, random_seed)
plt.plot(inputs_a[line, :, 0], outputs_a[line, :, 0], "b*", label="training points")
plt.savefig(out_figure, bbox_inches="tight", dpi=300)
plt.close()
def test_line_limit(model, sess, exp_string, num_train=10, random_seed=1999):
inputs_all, outputs_all, amp_test, phase_test = generate_test()
np.random.seed(random_seed)
index = np.random.choice(inputs_all.shape[1], [inputs_all.shape[0], num_train], replace=False)
inputs_a = np.zeros([inputs_all.shape[0], num_train, inputs_all.shape[2]])
outputs_a = np.zeros([outputs_all.shape[0], num_train, outputs_all.shape[2]])
for line in range(len(index)):
inputs_a[line] = inputs_all[line, index[line], :]
outputs_a[line] = outputs_all[line, index[line], :]
feed_dict_line = {model.inputa: inputs_a, model.inputb: inputs_all, model.labela: outputs_a, model.labelb: outputs_all, model.meta_lr: 0.0}
predictions_all = sess.run([model.outputas, model.outputbs], feed_dict_line)
print("shape of predictions_all is: ", predictions_all[0].shape)
for line in range(len(inputs_all)):
plt.figure()
plt.plot(inputs_all[line, ..., 0].squeeze(), outputs_all[line, ..., 0].squeeze(), "r-", label="ground_truth")
for update_step in range(len(predictions_all[1])):
plt.plot(inputs_all[line, ..., 0].squeeze(), predictions_all[1][update_step][line, ...].squeeze(), "--", label="update_step_{:d}".format(update_step))
plt.legend()
out_figure = FLAGS.logdir + '/' + exp_string + '/' + 'test_ubs' + str(
FLAGS.update_batch_size) + '_stepsize' + str(FLAGS.update_lr) + 'line_{0:d}_numtrain_{1:d}_seed_{2:d}.png'.format(line, num_train, random_seed)
plt.plot(inputs_a[line, :, 0], outputs_a[line, :, 0], "b*", label="training points")
plt.savefig(out_figure, bbox_inches="tight", dpi=300)
plt.close()
def test_line(model, sess, exp_string):
inputs_all, outputs_all, amp_test, phase_test = generate_test()
feed_dict_line = {model.inputa: inputs_all, model.inputb: inputs_all, model.labela: outputs_all, model.labelb: outputs_all, model.meta_lr: 0.0}
predictions_all = sess.run([model.outputas, model.outputbs], feed_dict_line)
print("shape of predictions_all is: ", predictions_all[0].shape)
for line in range(len(inputs_all)):
plt.figure()
plt.plot(inputs_all[line, ..., 0].squeeze(), outputs_all[line, ..., 0].squeeze(), "r-", label="ground_truth")
for update_step in range(len(predictions_all[1])):
plt.plot(inputs_all[line, ..., 0].squeeze(), predictions_all[1][update_step][line, ...].squeeze(), "--", label="update_step_{:d}".format(update_step))
plt.legend()
out_figure = FLAGS.logdir + '/' + exp_string + '/' + 'test_ubs' + str(
FLAGS.update_batch_size) + '_stepsize' + str(FLAGS.update_lr) + 'line_{0:d}.png'.format(line)
plt.savefig(out_figure, bbox_inches="tight", dpi=300)
plt.close()
# for line in range(len(inputs_all)):
# plt.figure()
# plt.plot(inputs_all[line, ..., 0].squeeze(), outputs_all[line, ..., 0].squeeze(), "r-", label="ground_truth")
#
# plt.plot(inputs_all[line, ..., 0].squeeze(), predictions_all[0][line, ...].squeeze(), "--",
# label="initial")
# plt.legend()
#
# out_figure = FLAGS.logdir + '/' + exp_string + '/' + 'test_ubs' + str(
# FLAGS.update_batch_size) + '_stepsize' + str(FLAGS.update_lr) + 'init_line_{0:d}.png'.format(line)
#
# plt.savefig(out_figure, bbox_inches="tight", dpi=300)
# plt.close()
def test(model, saver, sess, exp_string, data_generator, test_num_updates=None):
num_classes = data_generator.num_classes # for classification, 1 otherwise
np.random.seed(1)
random.seed(1)
metaval_accuracies = []
for _ in range(NUM_TEST_POINTS):
if 'generate' not in dir(data_generator):
feed_dict = {}
feed_dict = {model.meta_lr : 0.0}
else:
batch_x, batch_y, amp, phase = data_generator.generate(train=False)
if FLAGS.baseline == 'oracle': # NOTE - this flag is specific to sinusoid
batch_x = np.concatenate([batch_x, np.zeros([batch_x.shape[0], batch_x.shape[1], 2])], 2)
batch_x[0, :, 1] = amp[0]
batch_x[0, :, 2] = phase[0]
inputa = batch_x[:, :num_classes*FLAGS.update_batch_size, :]
inputb = batch_x[:,num_classes*FLAGS.update_batch_size:, :]
labela = batch_y[:, :num_classes*FLAGS.update_batch_size, :]
labelb = batch_y[:,num_classes*FLAGS.update_batch_size:, :]
feed_dict = {model.inputa: inputa, model.inputb: inputb, model.labela: labela, model.labelb: labelb, model.meta_lr: 0.0}
if model.classification:
result = sess.run([model.metaval_total_accuracy1] + model.metaval_total_accuracies2, feed_dict)
else: # this is for sinusoid
result = sess.run([model.total_loss1] + model.total_losses2, feed_dict)
metaval_accuracies.append(result)
metaval_accuracies = | np.array(metaval_accuracies) | numpy.array |
import numpy as np
import utiltools.robotmath as rm
import trimesh.transformations as tf
from panda3d.core import *
class Rigidbody(object):
def __init__(self, name = 'generalrbdname', mass = 1.0, pos = np.array([0,0,0]), com = np.array([0,0,0]),
rotmat = np.identity(3), inertiatensor = np.identity(3)):
# note anglew must be in radian!
# initialize a rigid body
self.__name = name
self.__mass = mass
# inertiatensor and center of mass are described in local coordinate system
self.__com = com
self.__inertiatensor = inertiatensor
# the following values are in world coordinate system
self.__pos = pos
self.__rotmat = rotmat
self.__linearv = np.array([0,0,0])
self.__dlinearv = np.array([0,0,0])
self.__angularw = np.array([0,0,0])
self.__dangularw = np.array([0,0,0])
@property
def mass(self):
return self.__mass
@property
def com(self):
return self.__com
@property
def inertiatensor(self):
return self.__inertiatensor
@property
def pos(self):
return self.__pos
@pos.setter
def pos(self, value):
self.__pos = value
@property
def rotmat(self):
return self.__rotmat
@rotmat.setter
def rotmat(self, value):
self.__rotmat = value
@property
def linearv(self):
return self.__linearv
@linearv.setter
def linearv(self, value):
self.__linearv = value
@property
def angularw(self):
return self.__angularw
@angularw.setter
def angularw(self, value):
self.__angularw = value
@property
def dlinearv(self):
return self.__dlinearv
@dlinearv.setter
def dlinearv(self, value):
self.__dlinearv = value
@property
def dangularw(self):
return self.__dangularw
@dangularw.setter
def dangularw(self, value):
self.__dangularw = value
def genForce(rbd, dtime):
gravity = 9800
Df = 1.0
Kf = 100.0
globalcom = rbd.rotmat.dot(rbd.com)+rbd.pos
force = np.array([0,0,-rbd.mass*gravity])
torque = np.cross(globalcom, force)
if rbd.pos[2] < 0.0:
v = rbd.linearv + np.cross(rbd.angularw, rbd.pos)
force_re = np.array([-Df*v[0], -Df*v[1], -Kf*rbd.pos[2]-Df*v[2]])
force = force + force_re
torque = torque + np.cross(rbd.pos, force_re)
force = np.array([0.0,0.0,0.0])
torque = np.array([0.0,0.0,0.0])
return force, torque
def updateRbdPR(rbd, dtime):
eps = 1e-6
angularwvalue = | np.linalg.norm(rbd.angularw) | numpy.linalg.norm |
# Copyright (c) OpenMMLab. All rights reserved.
from .coco import CocoDataset
from copy import deepcopy
import contextlib
import io
import itertools
import logging
import warnings
from collections import OrderedDict
import mmcv
import numpy as np
from mmcv.utils import print_log
from terminaltables import AsciiTable
from .api_wrappers import COCOeval, COCO
from .builder import DATASETS
@DATASETS.register_module()
class CocoOpenDataset(CocoDataset):
SEEN_CLASSES = ('truck', 'traffic light', 'fire hydrant', 'stop sign', 'parking meter', 'bench',
'bear', 'zebra', 'backpack', 'umbrella', 'tie', 'suitcase', 'frisbee', 'skis', 'sports ball',
'kite', 'baseball bat', 'baseball glove', 'skateboard', 'cup', 'knife',
'spoon', 'apple', 'sandwich', 'broccoli', 'hot dog', 'pizza', 'donut', 'bed', 'toilet',
'laptop', 'mouse', 'keyboard', 'cell phone', 'microwave', 'toaster',
'sink', 'book', 'vase', 'toothbrush')
def __init__(self,
seen_classes=None,
**kwargs):
if seen_classes == 'ALL':
self.SEEN_CLASSES = self.CLASSES
elif isinstance(seen_classes, list) \
or isinstance(seen_classes, tuple):
self.SEEN_CLASSES = seen_classes
super(CocoOpenDataset, self).__init__(**kwargs)
self.PALETTE = [(0, 0, 0) if idx+1 in self.seen_cat_ids else (255, 0, 0)
for idx, p in enumerate(self.PALETTE)]
def _cat_id2cat_name(self):
self.cat_id2cat_name = {}
self.cat_name2cat_id = {}
self.seen_cat_ids = []
self.unseen_cat_ids = []
cnt = 0
for cat_id, cat in self.coco.cats.items():
self.cat_id2cat_name[cat_id] = cat['name']
self.cat_name2cat_id[cat['name']] = cat_id
if cat['name'] in self.SEEN_CLASSES:
assert cat['name'] == self.SEEN_CLASSES[cnt]
cnt += 1
self.seen_cat_ids.append(cat_id)
else:
self.unseen_cat_ids.append(cat_id)
def _parse_ann_info(self, img_info, ann_info):
"""Parse bbox and mask annotation.
Args:
ann_info (list[dict]): Annotation info of an image.
with_mask (bool): Whether to parse mask annotations.
Returns:
dict: A dict containing the following keys: bboxes, bboxes_ignore,\
labels, masks, seg_map. "masks" are raw annotations and not \
decoded into binary masks.
"""
gt_bboxes = []
gt_labels = []
gt_bboxes_ignore = []
gt_masks_ann = []
gt_bboxes_unseen = []
gt_labels_unseen = []
gt_masks_ann_unseen = []
things = []
gt_ids = []
gt_ids_unseen = []
for i, ann in enumerate(ann_info):
if ann.get('ignore', False):
continue
x1, y1, w, h = ann['bbox']
inter_w = max(0, min(x1 + w, img_info['width']) - max(x1, 0))
inter_h = max(0, min(y1 + h, img_info['height']) - max(y1, 0))
if inter_w * inter_h == 0:
continue
if ann['area'] <= 0 or w < 1 or h < 1:
continue
if ann['category_id'] not in self.seen_cat_ids:
continue
bbox = [x1, y1, x1 + w, y1 + h]
if ann.get('iscrowd', False):
gt_bboxes_ignore.append(bbox)
else:
category = self.coco.cats[ann['category_id']]['name']
if category in self.SEEN_CLASSES:
gt_bboxes.append(bbox)
gt_labels.append(self.seen_cat2label[ann['category_id']])
gt_masks_ann.append(ann.get('segmentation', None))
gt_ids.append(ann['id'])
things.append(category)
else:
gt_bboxes_unseen.append(bbox)
gt_labels_unseen.append(self.unseen_cat2label[ann['category_id']])
gt_masks_ann_unseen.append(ann.get('segmentation', None))
gt_ids_unseen.append(ann['id'])
if gt_bboxes:
gt_bboxes = | np.array(gt_bboxes, dtype=np.float32) | numpy.array |
#data.py
#load and save data for heliocats
#https://github.com/cmoestl/heliocats
import numpy as np
import pandas as pd
import scipy
import copy
import matplotlib.dates as mdates
import datetime
import urllib
import json
import os
import pdb
from sunpy.time import parse_time
import scipy.io
import scipy.signal
import pickle
import time
import sys
import cdflib
import matplotlib.pyplot as plt
import heliosat
from numba import njit
from astropy.time import Time
import heliopy.data.cassini as cassinidata
import heliopy.data.helios as heliosdata
import heliopy.data.spice as spicedata
import heliopy.spice as spice
import astropy
import requests
import math
import h5py
from config import data_path
#data_path='/nas/helio/data/insitu_python/'
heliosat_data_path='/nas/helio/data/heliosat/data/'
data_path_sun='/nas/helio/data/SDO_realtime/'
'''
MIT LICENSE
Copyright 2020, <NAME>, <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy of this
software and associated documentation files (the "Software"), to deal in the Software
without restriction, including without limitation the rights to use, copy, modify,
merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be included in all copies
or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
####################################### get new data ####################################
def remove_wind_spikes_gaps(data):
#nan intervals
nt1=parse_time('2020-04-20 17:06').datetime
nt2=parse_time('2020-04-20 17:14').datetime
gapind1=np.where(np.logical_and(data.time >= nt1,data.time <= nt2 ))[0]
nt1=parse_time('2020-04-21 01:20').datetime
nt2=parse_time('2020-04-21 01:22').datetime
gapind2=np.where(np.logical_and(data.time >= nt1,data.time <= nt2 ))[0]
nt1=parse_time('2020-11-09T16:04Z').datetime
nt2=parse_time('2020-11-09T17:08Z').datetime
gapind3=np.where(np.logical_and(data.time >= nt1,data.time <= nt2 ))[0]
nt1=parse_time('2020-08-31T16:58Z').datetime
nt2=parse_time('2020-08-31T18:32Z').datetime
gapind4=np.where(np.logical_and(data.time >= nt1,data.time <= nt2 ))[0]
nt1=parse_time('2021-02-01T12:32Z').datetime
nt2=parse_time('2021-02-01T14:04Z').datetime
gapind5=np.where(np.logical_and(data.time >= nt1,data.time <= nt2 ))[0]
data.bt[np.hstack([gapind1,gapind2,gapind3,gapind4,gapind5])]=np.nan
data.bx[np.hstack([gapind1,gapind2,gapind3,gapind4,gapind5])]=np.nan
data.by[np.hstack([gapind1,gapind2,gapind3,gapind4,gapind5])]=np.nan
data.bz[np.hstack([gapind1,gapind2,gapind3,gapind4,gapind5])]=np.nan
return data
def save_stereoa_science_data_merge_rtn(data_path,file):
print('STEREO-A science data merging')
filesta="stereoa_2007_2019_rtn.p"
[sta0,hsta0]=pickle.load(open(data_path+filesta, "rb" ) )
filesta="stereoa_2020_april_rtn.p"
[sta1,hsta1]=pickle.load(open(data_path+filesta, "rb" ) )
filesta="stereoa_2020_may_july_rtn.p"
[sta2,hsta2]=pickle.load(open(data_path+filesta, "rb" ) )
#beacon data
#filesta='stereoa_2019_now_sceq_beacon.p'
#[sta3,hsta3]=pickle.load(open(data_path+filesta2, "rb" ) )
#sta2=sta2[np.where(sta2.time >= parse_time('2020-Aug-01 00:00').datetime)[0]]
#make array
sta=np.zeros(np.size(sta0.time)+np.size(sta1.time)+np.size(sta2.time),dtype=[('time',object),('bx', float),('by', float),\
('bz', float),('bt', float),('vt', float),('np', float),('tp', float),\
('x', float),('y', float),('z', float),\
('r', float),('lat', float),('lon', float)])
#convert to recarray
sta = sta.view(np.recarray)
sta.time=np.hstack((sta0.time,sta1.time,sta2.time))
sta.bx=np.hstack((sta0.bx,sta1.bx,sta2.bx))
sta.by=np.hstack((sta0.by,sta1.by,sta2.by))
sta.bz=np.hstack((sta0.bz,sta1.bz,sta2.bz))
sta.bt=np.hstack((sta0.bt,sta1.bt,sta2.bt))
sta.vt=np.hstack((sta0.vt,sta1.vt,sta2.vt))
sta.np=np.hstack((sta0.np,sta1.np,sta2.np))
sta.tp=np.hstack((sta0.tp,sta1.tp,sta2.tp))
sta.x=np.hstack((sta0.x,sta1.x,sta2.x))
sta.y=np.hstack((sta0.y,sta1.y,sta2.y))
sta.z=np.hstack((sta0.z,sta1.z,sta2.z))
sta.r=np.hstack((sta0.r,sta1.r,sta2.r))
sta.lon=np.hstack((sta0.lon,sta1.lon,sta2.lon))
sta.lat=np.hstack((sta0.lat,sta1.lat,sta2.lat))
pickle.dump(sta, open(data_path+file, "wb"))
print('STEREO-A merging done')
return 0
def save_stereoa_science_data_merge_sceq(data_path,file):
print('STEREO-A science data merging')
filesta="stereoa_2007_2019_sceq.p"
[sta0,hsta0]=pickle.load(open(data_path+filesta, "rb" ) )
filesta="stereoa_2020_april_sceq.p"
[sta1,hsta1]=pickle.load(open(data_path+filesta, "rb" ) )
filesta="stereoa_2020_may_july_sceq.p"
[sta2,hsta2]=pickle.load(open(data_path+filesta, "rb" ) )
#beacon data
#filesta='stereoa_2019_now_sceq_beacon.p'
#[sta3,hsta3]=pickle.load(open(data_path+filesta2, "rb" ) )
#sta2=sta2[np.where(sta2.time >= parse_time('2020-Aug-01 00:00').datetime)[0]]
#make array
sta=np.zeros(np.size(sta0.time)+np.size(sta1.time)+np.size(sta2.time),dtype=[('time',object),('bx', float),('by', float),\
('bz', float),('bt', float),('vt', float),('np', float),('tp', float),\
('x', float),('y', float),('z', float),\
('r', float),('lat', float),('lon', float)])
#convert to recarray
sta = sta.view(np.recarray)
sta.time=np.hstack((sta0.time,sta1.time,sta2.time))
sta.bx=np.hstack((sta0.bx,sta1.bx,sta2.bx))
sta.by=np.hstack((sta0.by,sta1.by,sta2.by))
sta.bz=np.hstack((sta0.bz,sta1.bz,sta2.bz))
sta.bt=np.hstack((sta0.bt,sta1.bt,sta2.bt))
sta.vt=np.hstack((sta0.vt,sta1.vt,sta2.vt))
sta.np=np.hstack((sta0.np,sta1.np,sta2.np))
sta.tp=np.hstack((sta0.tp,sta1.tp,sta2.tp))
sta.x=np.hstack((sta0.x,sta1.x,sta2.x))
sta.y=np.hstack((sta0.y,sta1.y,sta2.y))
sta.z=np.hstack((sta0.z,sta1.z,sta2.z))
sta.r=np.hstack((sta0.r,sta1.r,sta2.r))
sta.lon=np.hstack((sta0.lon,sta1.lon,sta2.lon))
sta.lat=np.hstack((sta0.lat,sta1.lat,sta2.lat))
pickle.dump(sta, open(data_path+file, "wb"))
print('STEREO-A merging done')
def save_stereoa_science_data(path,file,t_start, t_end,sceq):
#impact https://stereo-ssc.nascom.nasa.gov/data/ins_data/impact/level2/ahead/
#download with heliosat
#-------------------
#print('start STA')
#sta_sat = heliosat.STA()
#create an array with 1 minute resolution between t start and end
#time = [ t_start + datetime.timedelta(minutes=1*n) for n in range(int ((t_end - t_start).days*60*24))]
#time_mat=mdates.date2num(time)
#tm, mag = sta_sat.get_data_raw(t_start, t_end, "sta_impact_l1")
#print('download complete')
#---------------------------
#2020 PLASTIC download manually
#https://stereo-ssc.nascom.nasa.gov/data/ins_data/plastic/level2/Protons/Derived_from_1D_Maxwellian/ASCII/1min/A/2020/
sta_impact_path='/nas/helio/data/heliosat/data/sta_impact_l1/'
sta_plastic_path='/nas/helio/data/heliosat/data/sta_plastic_l2_ascii/'
t_start1=copy.deepcopy(t_start)
time_1=[]
#make 1 min datetimes
while t_start1 < t_end:
time_1.append(t_start1)
t_start1 += datetime.timedelta(minutes=1)
#make array for 1 min data
sta=np.zeros(len(time_1),dtype=[('time',object),('bx', float),('by', float),\
('bz', float),('bt', float),('vt', float),('np', float),('tp', float),\
('x', float),('y', float),('z', float),\
('r', float),('lat', float),('lon', float)])
#convert to recarray
sta = sta.view(np.recarray)
sta.time=time_1
#make data file names
t_start1=copy.deepcopy(t_start)
days_sta = []
days_str = []
i=0
while t_start < t_end:
days_sta.append(t_start)
days_str.append(str(days_sta[i])[0:4]+str(days_sta[i])[5:7]+str(days_sta[i])[8:10])
i=i+1
t_start +=datetime.timedelta(days=1)
#go through all files
bt=np.zeros(int(1e9))
bx=np.zeros(int(1e9))
by=np.zeros(int(1e9))
bz=np.zeros(int(1e9))
t2=[]
i=0
for days_date in days_str:
cdf_file = 'STA_L1_MAG_RTN_{}_V06.cdf'.format(days_date)
if os.path.exists(sta_impact_path+cdf_file):
print(cdf_file)
f1 = cdflib.CDF(sta_impact_path+cdf_file)
t1=parse_time(f1.varget('Epoch'),format='cdf_epoch').datetime
t2.extend(t1)
bfield=f1.varget('BFIELD')
bt[i:i+len(bfield[:,3])]=bfield[:,3]
bx[i:i+len(bfield[:,0])]=bfield[:,0]
by[i:i+len(bfield[:,1])]=bfield[:,1]
bz[i:i+len(bfield[:,2])]=bfield[:,2]
i=i+len(bfield[:,3])
#cut array
bt=bt[0:i]
bx=bx[0:i]
by=by[0:i]
bz=bz[0:i]
tm2=mdates.date2num(t2)
time_mat=mdates.date2num(time_1)
#linear interpolation to time_mat times
sta.bx = np.interp(time_mat, tm2, bx )
sta.by = np.interp(time_mat, tm2, by )
sta.bz = np.interp(time_mat, tm2, bz )
#sta.bt = np.sqrt(sta.bx**2+sta.by**2+sta.bz**2)
#round first each original time to full minutes original data at 30sec
tround=copy.deepcopy(t2)
format_str = '%Y-%m-%d %H:%M'
for k in np.arange(np.size(t2)):
tround[k] = datetime.datetime.strptime(datetime.datetime.strftime(t2[k], format_str), format_str)
tm2_round=parse_time(tround).plot_date
#which values are not in original data compared to full time range
isin=np.isin(time_mat,tm2_round)
setnan=np.where(isin==False)
#set to to nan that is not in original data
sta.bx[setnan]=np.nan
sta.by[setnan]=np.nan
sta.bz[setnan]=np.nan
sta.bt = np.sqrt(sta.bx**2+sta.by**2+sta.bz**2)
########### get PLASTIC new prel data
#PLASTIC
#2019 monthly if needed
#https://stereo-ssc.nascom.nasa.gov/data/ins_data/plastic/level2/Protons/Derived_from_1D_Maxwellian/ASCII/1min/A/2019/
#2020 manually all
#https://stereo-ssc.nascom.nasa.gov/data/ins_data/plastic/level2/Protons/Derived_from_1D_Maxwellian/ASCII/1min/A/2020/
#STA_L2_PLA_1DMax_1min_202004_092_PRELIM_v01.txt
#STA_L2_PLA_1DMax_1min_202005_122_PRELIM_v01.txt
#STA_L2_PLA_1DMax_1min_202006_153_PRELIM_v01.txt
#STA_L2_PLA_1DMax_1min_202007_183_PRELIM_v01.txt
########
pvt=np.zeros(int(1e8))
pnp=np.zeros(int(1e8))
ptp=np.zeros(int(1e8))
pt2=[]
pfiles=['STA_L2_PLA_1DMax_1min_202004_092_PRELIM_v01.txt',
'STA_L2_PLA_1DMax_1min_202005_122_PRELIM_v01.txt',
'STA_L2_PLA_1DMax_1min_202006_153_PRELIM_v01.txt',
'STA_L2_PLA_1DMax_1min_202007_183_PRELIM_v01.txt']
j=0
for name in pfiles:
p1=np.genfromtxt(sta_plastic_path+name,skip_header=2)
print(name)
vt1=p1[:,8]
np1=p1[:,9]
tp1=p1[:,10]
#YEAR DOY hour min sec
year1=p1[:,0]
doy1=p1[:,1]
hour1=p1[:,2]
min1=p1[:,3]
sec1=p1[:,4]
p1t=[]
#make datetime array from year and doy
for i in np.arange(len(doy1)):
p1t.append(parse_time(str(int(year1[i]))+'-01-01 00:00').datetime+datetime.timedelta(days=doy1[i]-1)+\
+datetime.timedelta(hours=hour1[i]) + datetime.timedelta(minutes=min1[i]) )
pvt[j:j+len(vt1)]=vt1
pnp[j:j+len(np1)]=np1
ptp[j:j+len(tp1)]=tp1
pt2.extend(p1t)
j=j+len(vt1)
#cut array
pvt=pvt[0:j]
pnp=pnp[0:j]
ptp=ptp[0:j]
pt2=pt2[0:j]
pt2m=mdates.date2num(pt2)
#linear interpolation to time_mat times
sta.vt = np.interp(time_mat, pt2m, pvt )
sta.np = np.interp(time_mat, pt2m, pnp )
sta.tp = np.interp(time_mat, pt2m, ptp )
#which values are not in original data compared to full time range
isin=np.isin(time_mat,pt2m)
setnan=np.where(isin==False)
#set to to nan that is not in original data
sta.vt[setnan]=np.nan
sta.np[setnan]=np.nan
sta.tp[setnan]=np.nan
#add position
print('position start')
frame='HEEQ'
kernels = spicedata.get_kernel('stereo_a')
kernels += spicedata.get_kernel('stereo_a_pred')
spice.furnish(kernels)
statra=spice.Trajectory('-234') #STEREO-A SPICE NAIF code
statra.generate_positions(sta.time,'Sun',frame)
statra.change_units(astropy.units.AU)
[r, lat, lon]=cart2sphere(statra.x,statra.y,statra.z)
sta.x=statra.x
sta.y=statra.y
sta.z=statra.z
sta.r=r
sta.lat=np.degrees(lat)
sta.lon=np.degrees(lon)
print('position end ')
coord='RTN'
#convert magnetic field to SCEQ
if sceq==True:
print('convert RTN to SCEQ ')
coord='SCEQ'
sta=convert_RTN_to_SCEQ(sta,'STEREO-A')
header='STEREO-A magnetic field (IMPACT instrument, science data) and plasma data (PLASTIC, preliminary science data), ' + \
'obtained from https://stereo-ssc.nascom.nasa.gov/data/ins_data/impact/level2/ahead/ and '+ \
'https://stereo-ssc.nascom.nasa.gov/data/ins_data/plastic/level2/Protons/Derived_from_1D_Maxwellian/ASCII/1min/A/2020/ '+ \
'Timerange: '+sta.time[0].strftime("%Y-%b-%d %H:%M")+' to '+sta.time[-1].strftime("%Y-%b-%d %H:%M")+\
', with an average time resolution of '+str(np.mean(np.diff(sta.time)).seconds)+' seconds. '+\
'The data are available in a numpy recarray, fields can be accessed by sta.time, sta.bx, sta.vt etc. '+\
'Missing data has been set to "np.nan". Total number of data points: '+str(sta.size)+'. '+\
'Units are btxyz [nT, '+coord+', vt [km/s], np[cm^-3], tp [K], heliospheric position x/y/z/r/lon/lat [AU, degree, HEEQ]. '+\
'Made with https://github.com/cmoestl/heliocats '+\
'and https://github.com/heliopython/heliopy. '+\
'By <NAME> (twitter @chrisoutofspace), <NAME>, <NAME> and <NAME>. File creation date: '+\
datetime.datetime.utcnow().strftime("%Y-%b-%d %H:%M")+' UTC'
print('save pickle file')
pickle.dump([sta,header], open(path+file, "wb"))
print('done sta')
print()
return 0
def save_wsa_hux(filein):
#load wsa hux
windraw = np.loadtxt('data/wsa_hux_mars_aug2014_jan2018.txt', dtype=[('time','<U30'),('time2','<U30'),('time_mat', float),('vt', float)] )
windraw = windraw.view(np.recarray)
wind=np.zeros(len(windraw),dtype=[('time',object),('vt', float)])
wind=wind.view(np.recarray)
for i in np.arange(len(windraw)):
wind_time_str=windraw.time[i][8:12]+'-'+windraw.time[i][4:7]+'-'+windraw.time[i][1:3]+' '+windraw.time2[i][0:8]
wind.time[i]=(parse_time(wind_time_str).datetime)
wind.vt=windraw.vt
fileout='wsa_hux_mars_aug2014_jan2018.p'
pickle.dump(wind, open(data_path+fileout, "wb"))
return 0
def load_mars_wsa_hux():
file='wsa_hux_mars_aug2014_jan2018.p'
rad=pickle.load(open(data_path+file, "rb"))
return rad
def load_maven_sir_huang():
#Huang et al. 2019 APJ convert PDF to excel with https://pdftoxls.com
mavensir='sircat/sources/Huang_2019_SIR_MAVEN_table_1.xlsx'
print('load MAVEN Huang SIR catalog from ', mavensir)
ms=pd.read_excel(mavensir)
ms=ms.drop(index=[0,1,2])
ms_num=np.array(ms['No.'])
ms_start=np.array(ms['Start'])
ms_end=np.array(ms['End'])
ms_si=np.array(ms['SI'])
ms=np.zeros(len(ms_num),dtype=[('start',object),('end',object),('si',object)])
ms=ms.view(np.recarray)
#make correct years for start time
ms_num[np.where(ms_num< 7)[0]]=2014
ms_num[np.where(ms_num< 27)[0]]=2015
ms_num[np.where(ms_num< 64)[0]]=2016
ms_num[np.where(ms_num< 83)[0]]=2017
ms_num[np.where(ms_num< 127)[0]]=2018
#make correct years for end and si time
ms_num2=copy.deepcopy(ms_num)
ms_num2[3]=2015
ms_num2[62]=2017
#transform date of start time
for t in np.arange(0,len(ms_start)):
#check for nans in between time strings
if pd.isna(ms_start[t])==False:
####################### start time
#year
year=str(ms_num[t])
#month
datetimestr=ms_start[t]
datestr=datetimestr[0:2]
monthfloat=float(datestr)
month=str(int(np.floor(monthfloat)))
#day
if int(month) < 10: day=datetimestr[2:4]
if int(month) > 9: day=datetimestr[3:5]
#time
timestr=datetimestr[-5:]
#construct year month day
datetimestrfin=str(ms_num[t])+'-'+month+'-'+day
#remove white spaces at the end and add time
finaldatetime=datetimestrfin.strip()+' '+timestr
#print(ms_start[t])
#print(finaldatetime)
ms.start[t]=parse_time(finaldatetime).datetime
################### end time
#year
year=str(ms_num2[t])
#month
datetimestr=ms_end[t]
datestr=datetimestr[0:2]
monthfloat=float(datestr)
month=str(int(np.floor(monthfloat)))
#day
if int(month) < 10: day=datetimestr[2:4]
if int(month) > 9: day=datetimestr[3:5]
#time
timestr=datetimestr[-5:]
#construct year month day
datetimestrfin=str(ms_num2[t])+'-'+month+'-'+day
#remove white spaces at the end and add time
finaldatetime=datetimestrfin.strip()+' '+timestr
#print(ms_end[t])
#print(finaldatetime)
ms.end[t]=parse_time(finaldatetime).datetime
############# stream interface time
#year
year=str(ms_num2[t])
#month
datetimestr=ms_si[t]
datestr=datetimestr[0:2]
monthfloat=float(datestr)
month=str(int( | np.floor(monthfloat) | numpy.floor |
from typing import List
import numpy as np
from oolearning.model_processors.DecoratorBase import DecoratorBase
from oolearning.converters.TwoClassRocOptimizerConverter import TwoClassRocOptimizerConverter
from oolearning.converters.TwoClassPrecisionRecallOptimizerConverter import TwoClassPrecisionRecallOptimizerConverter # noqa
class TwoClassThresholdDecorator(DecoratorBase):
"""
In object-oriented programming, the `decorator` is pattern is described as a way to "attach additional
responsibilities to an object dynamically. Decorators provide a flexible alternative to subclassing for
extending functionality." (https://sourcemaking.com/design_patterns/decorator)
This Decorator is passed into a resampler and, for each time the model is trained in the Resampler, the
Decorator is run via `.decorate()`, calculating the ideal thresholds that minimizes the
distance to the upper left corner for the ROC curve, and minimizes the distance to the upper right
corner for the Precision/Recall curve (i.e. balancing the inherent trade-offs in both curves.
"""
def __init__(self, parallelization_cores: int = -1):
"""
:param parallelization_cores: the number of cores to use for parallelization. -1 is all, 0 or 1 is
"off".
"""
super().__init__()
self._roc_ideal_thresholds = list()
self._precision_recall_ideal_thresholds = list()
self._parallelization_cores = parallelization_cores
def __str__(self):
if len(self._roc_ideal_thresholds) > 0:
string = "ROC Ideal Threshold\n-------------------\n" + \
str(round(self.roc_threshold_mean, 4)) + " (mean); " + \
str(round(self.roc_threshold_st_dev, 4)) + " (standard dev.); " + \
str(round(self.roc_threshold_cv, 2)) + " (standard dev.)\n\n"
string += "Precision/Recall Ideal Threshold\n-------------------\n" + \
str(round(self.precision_recall_threshold_mean, 4)) + " (mean); " + \
str(round(self.precision_recall_threshold_st_dev, 4)) + " (standard dev.); " + \
str(round(self.precision_recall_threshold_cv, 2)) + " (standard dev.)\n"
return string
else:
return ""
# noinspection PyProtectedMember
def decorate(self, **kwargs):
# Specific to 2-class classification; need to use the right objects, or this will explode.
# in future, if there will be additional shit like this, could consider passing in a list
# of objects with common interface that is ran at the end of each resample fold
# and iterating through objects, which hold the specific information (like resampled
# thresholds)
scores = kwargs['scores']
holdout_actual_values = kwargs['holdout_actual_values']
holdout_predicted_values = kwargs['holdout_predicted_values']
positive_class = None
# we need to get the name of the 'positive class;
# the Score object should either have `_positive_class` directly (e.g. AUC) or in the converter
first_score = scores[0]
if hasattr(first_score, '_positive_class'):
positive_class = first_score.positive_class
elif hasattr(first_score, '_converter'):
if hasattr(first_score._converter, 'positive_class'):
positive_class = first_score._converter.positive_class
if positive_class is None:
raise ValueError("Cannot find positive class in Score or Score's Converter")
converter = TwoClassRocOptimizerConverter(actual_classes=holdout_actual_values,
positive_class=positive_class,
parallelization_cores=self._parallelization_cores)
converter.convert(values=holdout_predicted_values)
self._roc_ideal_thresholds.append(converter.ideal_threshold) # TODO: rename roc_ideal_thresholds to something like resampled_roc_ideal_threshold ... shitty name.
converter = TwoClassPrecisionRecallOptimizerConverter(actual_classes=holdout_actual_values,
positive_class=positive_class,
parallelization_cores=self._parallelization_cores) # noqa
converter.convert(values=holdout_predicted_values)
self._precision_recall_ideal_thresholds.append(converter.ideal_threshold)
@property
def roc_ideal_thresholds(self) -> List[float]:
"""
:return: for each time the model is trained in the Resampler, the threshold associated with the
point on the ROC curve that minimizes the distance to the upper left corner of the curve (thereby
balancing the trade-off between the true positive rate (sensitivity) and the true negative rate
(specificity)) is added to the list of "ideal thresholds", which is returned.
"""
return self._roc_ideal_thresholds
@property
def precision_recall_ideal_thresholds(self) -> List[float]:
"""
:return: each time the model is trained in the Resampler, the threshold associated with the
point on the precision/recall curve that minimizes the distance to the upper right corner of the
curve (thereby balancing the trade-off between the true positive rate (recall) and the positive
predictive value (precision)) is added to the list of "ideal thresholds", which is returned.
"""
return self._precision_recall_ideal_thresholds
@property
def roc_threshold_mean(self) -> float:
"""
:return: the mean of the "ideal" thresholds
"""
return float( | np.mean(self._roc_ideal_thresholds) | numpy.mean |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
'''
---------------------------------
Figure Plot
Author: <NAME>
Date: 07/01/2019
---------------------------------
'''
import os
import glob
import pandas as pd
import numpy as np # for easy generation of arrays
import matplotlib.pyplot as plt
import matplotlib as mpl
# parameter setting
script_path = os.path.split(os.path.realpath(__file__))[0] # os.getcwd() returns the path in your terminal
(base_path, script_dir) = os.path.split(script_path)
search_path = os.path.join(script_path, '..','..','..','data', 'fig_plot')
filename = os.path.join('*', 'state.csv')
global_path_file = os.path.join(script_path,'..','..','..','data','fig_plot','global_path.txt')
# print('search_path', search_path)
# print('global_path_file', global_path_file)
save_enable = False
image_path = script_path
fig_dpi = 100; # inches = pixels / dpi
figsize_inch = list(np.array([800, 600]) / fig_dpi)
# dynamic rc settings
mpl.rcParams['font.size'] = 18
# mpl.rcParams['font.family'] = 'sans'
mpl.rcParams['font.style'] = 'normal'
mpl.rc('axes', titlesize=18, labelsize=18)
mpl.rc('lines', linewidth=2.5, markersize=5)
mpl.rcParams['xtick.labelsize'] = 16
mpl.rcParams['ytick.labelsize'] = 16
# customize color
green_lv1 = list(np.array([229, 245, 249]) / 255.0)
green_lv2 = list(np.array([153, 216, 201]) / 255.0)
green_lv3 = list(np.array([44, 162, 95]) / 255.0)
gray = list(np.array([99, 99, 99]) / 255.0)
# import data
## read global path from txt file
global_path = pd.read_csv(global_path_file, delimiter=' ', header=None)
global_path.columns = ["point_id", "position_x", "position_y"]
# print(global_path.head())
## read vehicle states from csv files
dataset = []
file_list = glob.glob(os.path.join(search_path, filename))
# print('file_list has:', file_list)
print(os.path.join(search_path, filename))
for file in file_list:
raw_data = pd.read_csv(file)
dataset.append(raw_data)
print(file, '-> dataset[%d]' %(len(dataset)))
# path_comparison
fig = plt.figure(figsize=figsize_inch, dpi=fig_dpi, frameon=False)
ax = fig.add_subplot(111)
line_global, = ax.plot(global_path['position_x'], global_path['position_y'],
linestyle='-', color=gray, linewidth=1.5, label='global path')
lines = []
lines.append(line_global)
i = 0
for data in dataset:
i += 1
line_tmp, = ax.plot(data['position_x'], data['position_y'],
linestyle='--', linewidth=1.5, label='vehicle path # %d'%(i))
lines.append(line_tmp)
plt.axis('equal')
ax.set_adjustable('box')
# range and ticks setting
x_min = np.min(global_path["position_x"])
x_max = | np.max(global_path["position_x"]) | numpy.max |
# astar implementation with some inspiration from https://medium.com/@nicholas.w.swift/easy-a-star-pathfinding-7e6689c7f7b2
import os
import gflags
import sys
import numpy as np
from numpy import linalg as LA
import math
from operator import attrgetter
import cv2
import load_params_demo
import utils_demo
argv = gflags.FLAGS(sys.argv)
#colors
blue = [255, 0, 0]
red = [0, 0, 255]
green = [0, 255, 0]
white = [255, 255, 255]
black = [0, 0, 0]
class Measure_params:
def __init__(self, range, n_seg, centroid, slice_tol, ray_seg):
self.range = range
self.n_seg = n_seg
self.centroid = centroid
self.slice_tol = slice_tol
self.ray_seg = ray_seg
class Measurement:
def __init__(self):
self.geometry = self.Geometry()
self.coords = self.Coords()
self.slice_tol = None
self.ray = self.Ray()
class Geometry:
def __init__(self):
self.range = None
self.n_seg = None
self.centroid = None
self.slice_tol = None
self.ray_seg = None
class Coords:
def __init__(self):
self.point_coords = None
self.slice_coords = None
self.slice_keep_coords = None
self.slice_obs_coords = None
self.see_obs_list = None
self.pts_obs_list = None
class Ray:
def __init__(self):
self.edge_x = None
self.edge_y = None
self.ray_x = None
self.ray_y = None
def init(self, params, time_step, seq, img_coords, output_dir, resize=None):
# img = cv2.imread(output_dir + '{}_s{}_output.png'.format(time_step, seq))
img = cv2.imread(os.path.join(output_dir, '{}_s{}_output.png'.format(time_step, seq)))
obs_map = utils_demo.process_for_astar(img)
if resize is not None:
obs_map = cv2.resize(obs_map, resize.dim, interpolation=cv2.INTER_AREA)
obs_map = refine_map(obs_map)
self.geometry.range = params.range
self.geometry.n_seg = params.n_seg
self.geometry.centroid = params.centroid
self.geometry.ray_seg = params.ray_seg
self.geometry.slice_tol = params.slice_tol
#get all points from truth map that are within the circle
img_coords = np.asarray(img_coords)
X = img_coords[:, 1] #column
Y = img_coords[:, 0] #row
cx = self.geometry.centroid[1]
cy = self.geometry.centroid[0]
check = np.square((cx - X)) + np.square((cy - Y))
check = np.where(check < self.geometry.range ** 2)[0]
self.coords.point_coords = img_coords[check] #this is a numpy array
X = self.coords.point_coords[:, 1]
Y = self.coords.point_coords[:, 0]
sliceno = np.int32((math.pi + np.arctan2(Y - cy, X - cx)) * (self.geometry.n_seg / (2 * math.pi)) - \
self.geometry.slice_tol)
slice_coords = []
for un in np.arange(self.geometry.n_seg):
slice_coords.append(self.coords.point_coords[np.where(sliceno == un)[0]])
self.coords.slice_coords = slice_coords
#unit vectors for rays
thetas = np.linspace(0, 2*math.pi, self.geometry.n_seg, endpoint=False)
thetas = thetas + (thetas[1] - thetas[0]) / 2.
thetas = np.flip(thetas) + math.pi
ux = np.cos(thetas)
uy = np.sin(thetas)
if len(slice_coords) != thetas.shape[0]:
print("centroid", self.geometry.centroid)
assert len(slice_coords) == thetas.shape[0]
# exit(1)
self.ray.edge_x = np.int32(ux * self.geometry.range + cx)
self.ray.edge_y = np.int32(uy * self.geometry.range + cy)
#calculate rays
ray_disc = np.linspace(0, self.geometry.range, self.geometry.ray_seg, endpoint=True)
ray_disc = ray_disc.reshape((1, ray_disc.shape[0]))
ux = ux.reshape((ux.shape[0], 1))
uy = uy.reshape((uy.shape[0], 1))
self.ray.ray_x = np.multiply(ux, ray_disc).astype(np.int32) + cx
self.ray.ray_y = np.multiply(uy, ray_disc).astype(np.int32) + cy
slice_keep_coords = []
slice_obs_coords = []
see_obs_list = [] #"viewed" pixels which are obstacles
pts_obs_list = [] #list of points which are detected to hit an obstacle first
slice_coords = slice_coords[::-1]
for row_x, row_y, slice in zip(self.ray.ray_x, self.ray.ray_y, slice_coords):
if len(slice) == 0:
continue
x_keep = np.where((0 < row_x) & (row_x < obs_map.shape[1]))[0] # which indexes to keep from that row for x
row_x = row_x[x_keep]
row_y = row_y[x_keep]
y_keep = np.where((0 < row_y) & (row_y < obs_map.shape[0]))[0]
row_x = row_x[y_keep]
row_y = row_y[y_keep]
obs_pixels = np.where((obs_map[row_y, row_x, :] == (0, 0, 0)).all(axis=1))[0]
if obs_pixels.shape[0] == 0:
slice_keep_coords.append(slice)
continue
else:
obs_dist = LA.norm((np.array([cy - row_y[obs_pixels[0]], cx - row_x[obs_pixels[0]]]))) # distance from centroid to first obstacle pixel
pts_obs_list.append([row_y[obs_pixels[0]], row_x[obs_pixels[0]]])
slice_pts_dist = LA.norm((slice - np.array([cy, cx])), axis=1) #distance for each point in the slice to the centroid
slice_keep = np.where(slice_pts_dist < obs_dist)[0]
# pixels labeled as obstacle, use sqrt(2) for diagonal distance
slice_obs = np.where(
np.logical_and(obs_dist - math.sqrt(2) < slice_pts_dist, slice_pts_dist < obs_dist + math.sqrt(2)))[
0]
slice_obs_check = slice[slice_obs]
obs_coords = np.where((obs_map[slice_obs_check[:, 0], slice_obs_check[:, 1], :] == (0, 0, 0)).all(axis=1))[0] #where slice obs coordinates are actually an obstacle
if len(obs_coords) != 0:
slice_obs_coords.append(slice_obs_check[obs_coords])
slice_check = slice[slice_keep]
obs_coords = np.where((obs_map[slice_check[:, 0], slice_check[:, 1], :] == (0, 0, 0)).all(axis=1))[0] #where slice keep coordinates are actually an obstacle
if len(obs_coords) != 0:
see_obs_list.append(slice_check[obs_coords])
slice_keep_coords.append(slice[slice_keep]) #[[row, column]......]
self.coords.see_obs_list = see_obs_list
self.coords.pts_obs_list = pts_obs_list
self.coords.slice_keep_coords = slice_keep_coords
self.coords.slice_obs_coords = slice_obs_coords
class node_astar:
def __init__(self):
self.parent = None
self.type = None
self.position = None
self.g = 0
self.h = 0
self.f = 0
def __eq__(self, other):
return self.position == other.position
def astar_map(img):
white = [255, 255, 255]
black = [0, 0, 0]
rows = img.shape[0]
cols = img.shape[1]
my_map = | np.zeros((rows, cols)) | numpy.zeros |
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import mpl_toolkits.mplot3d as plt3d
import matplotlib.lines as mlines
import numpy as np
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import nestle
from numpy import linalg
from random import random
class EllipsoidTool:
"""Some stuff for playing with ellipsoids"""
def __init__(self): pass
def getMinVolEllipse(self, P=None, tolerance=0.01):
""" Find the minimum volume ellipsoid which holds all the points
Based on work by <NAME>
http://www.mathworks.com/matlabcentral/fileexchange/9542
and also by looking at:
http://cctbx.sourceforge.net/current/python/scitbx.math.minimum_covering_ellipsoid.html
Which is based on the first reference anyway!
Here, P is a numpy array of N dimensional points like this:
P = [[x,y,z,...], <-- one point per line
[x,y,z,...],
[x,y,z,...]]
Returns:
(center, radii, rotation)
"""
(N, d) = np.shape(P)
d = float(d)
# Q will be our working array
Q = np.vstack([np.copy(P.T), np.ones(N)])
QT = Q.T
# initializations
err = 1.0 + tolerance
u = (1.0 / N) * np.ones(N)
# Khachiyan Algorithm
while err > tolerance:
V = np.dot(Q, np.dot(np.diag(u), QT))
M = np.diag(np.dot(QT , np.dot(linalg.inv(V), Q))) # M the diagonal vector of an NxN matrix
j = np.argmax(M)
maximum = M[j]
step_size = (maximum - d - 1.0) / ((d + 1.0) * (maximum - 1.0))
new_u = (1.0 - step_size) * u
new_u[j] += step_size
err = np.linalg.norm(new_u - u)
u = new_u
# center of the ellipse
center = np.dot(P.T, u)
# the A matrix for the ellipse
A = linalg.inv(
np.dot(P.T, np.dot(np.diag(u), P)) -
np.array([[a * b for b in center] for a in center])
) / d
# Get the values we'd like to return
U, s, rotation = linalg.svd(A)
radii = 1.0/np.sqrt(s)
return (center, radii, rotation)
def getEllipsoidVolume(self, radii):
"""Calculate the volume of the blob"""
return 4./3.*np.pi*radii[0]*radii[1]*radii[2]
def plotEllipsoid(self, center, radii, rotation, ax=None, plotAxes=False, cageColor='b', cageAlpha=0.2):
"""Plot an ellipsoid"""
make_ax = ax == None
if make_ax:
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
u = np.linspace(0.0, 2.0 * np.pi, 100)
v = np.linspace(0.0, np.pi, 100)
# cartesian coordinates that correspond to the spherical angles:
x = radii[0] * np.outer(np.cos(u), np.sin(v))
y = radii[1] * np.outer(np.sin(u), np.sin(v))
z = radii[2] * np.outer(np.ones_like(u), np.cos(v))
# rotate accordingly
for i in range(len(x)):
for j in range(len(x)):
[x[i,j],y[i,j],z[i,j]] = np.dot([x[i,j],y[i,j],z[i,j]], rotation) + center
if plotAxes:
# make some purdy axes
axes = np.array([[radii[0],0.0,0.0],
[0.0,radii[1],0.0],
[0.0,0.0,radii[2]]])
# rotate accordingly
for i in range(len(axes)):
axes[i] = np.dot(axes[i], rotation)
# plot axes
for p in axes:
X3 = np.linspace(-p[0], p[0], 100) + center[0]
Y3 = np.linspace(-p[1], p[1], 100) + center[1]
Z3 = np.linspace(-p[2], p[2], 100) + center[2]
ax.plot(X3, Y3, Z3, color=cageColor)
# plot ellipsoid
ax.plot_wireframe(x, y, z, rstride=6, cstride=6, color=cageColor, alpha=cageAlpha)
if make_ax:
plt.show()
plt.close(fig)
del fig
def plot_ellipsoid_3d(ell, ax):
"""Plot the 3-d Ellipsoid ell on the Axes3D ax."""
# points on unit sphere
u = np.linspace(0.0, 2.0 * np.pi, 100)
v = np.linspace(0.0, np.pi, 100)
z = np.outer(np.cos(u), np.sin(v))
y = np.outer(np.sin(u), np.sin(v))
x = np.outer(np.ones_like(u), np.cos(v))
# transform points to ellipsoid
for i in range(len(x)):
for j in range(len(x)):
x[i,j], y[i,j], z[i,j] = ell.ctr + np.dot(ell.axes,
[x[i,j],y[i,j],z[i,j]])
ax.plot_wireframe(x, y, z, rstride=4, cstride=4, color='#2980b9', alpha=0.2)
def plot_hand_points(hand_points):
x_coords = hand_points[::3]
y_coords = hand_points[1::3]
z_coords = hand_points[2::3]
mean_x_coords = np.mean(x_coords)
mean_y_coords = np.mean(y_coords)
mean_z_coords = np.mean(z_coords)
fig = plt.figure()
fig.set_size_inches(10,10)
ax = fig.add_subplot(111, projection='3d', aspect='equal')
hand_plot = ax.scatter(x_coords, y_coords, z_coords, depthshade=False)
def plot_finger(inds_array):
for i in range(len(inds_array)-1):
xs = (x_coords[inds_array[i]], x_coords[inds_array[i+1]])
ys = (y_coords[inds_array[i]], y_coords[inds_array[i+1]])
zs = (z_coords[inds_array[i]], z_coords[inds_array[i+1]])
line_seg = plt3d.art3d.Line3D(xs, ys, zs)
ax.add_line(line_seg)
# Draw thumb
thumb_inds = [0, 1, 6, 7, 8]
plot_finger(thumb_inds)
# Draw index
index_inds = [0, 2, 9, 10, 11]
plot_finger(index_inds)
# Draw middle
middle_inds = [0, 3, 12, 13, 14]
plot_finger(middle_inds)
# Draw ring
ring_inds = [0, 4, 15, 16, 17]
plot_finger(ring_inds)
# Draw pinky
pinky_inds = [0, 5, 18, 19, 20]
plot_finger(pinky_inds)
# Working out axes
axis_size = 120.0
ax.set_xlim(mean_x_coords-axis_size/2.0, mean_x_coords+axis_size/2.0)
ax.set_ylim(mean_y_coords-axis_size/2.0, mean_y_coords+axis_size/2.0)
ax.set_zlim(mean_z_coords-axis_size/2.0, mean_z_coords+axis_size/2.0)
ax.set_xlabel('X axis')
ax.set_ylabel('Y axis')
ax.set_zlabel('Z axis')
blue_line = mlines.Line2D([], [], color='blue', label='Ground Truth')
plt.legend(handles=[blue_line])
plt.show()
def plot_two_hands(hand_points1, hand_points2, save_fig = False, save_name = None, first_hand_label='Prediction', second_hand_label='Ground Truth', pred_uncertainty=None):
x_coords1 = hand_points1[::3]
y_coords1 = hand_points1[1::3]
z_coords1 = hand_points1[2::3]
mean_x_coords = np.mean(x_coords1)
mean_y_coords = np.mean(y_coords1)
mean_z_coords = np.mean(z_coords1)
x_coords2 = hand_points2[::3]
y_coords2 = hand_points2[1::3]
z_coords2 = hand_points2[2::3]
fig = plt.figure()
fig.set_size_inches(10,10)
ax = fig.add_subplot(111, projection='3d', aspect='equal')
hand_plot1 = ax.scatter(x_coords1, y_coords1, z_coords1, depthshade=False)
hand_plot2 = ax.scatter(x_coords2, y_coords2, z_coords2, depthshade=False, c='r')
if pred_uncertainty is not None:
#the given matrix is of size 3*num_points x 3*num_points
#each submatrix of size 3x3 around the diagonal is the covariance of the point in 3D space
for (idx, (x_mean, y_mean, z_mean)) in enumerate(zip(x_coords1, y_coords1, z_coords1)):
ET = EllipsoidTool()
uncertainty_mat = pred_uncertainty[0, 0, idx:idx+3,idx:idx+3]
mu= | np.array([x_mean,y_mean,z_mean]) | numpy.array |
# Authors: <NAME>
#
# License: BSD (3-clause)
"""Read datasets containing ECG data and sleep stage annotations."""
import csv
import datetime
from dataclasses import dataclass
from enum import IntEnum
from pathlib import Path
from typing import Iterator, NamedTuple, Optional, Union
from xml.etree import ElementTree
import numpy as np
from ..config import get_config
from ..heartbeats import detect_heartbeats
from .nsrr import _download_nsrr_file, _get_nsrr_url, _list_nsrr, download_nsrr
from .physionet import _list_physionet, download_physionet
class SleepStage(IntEnum):
"""
Mapping of AASM sleep stages to integers.
To facilitate hypnogram plotting, values increase with wakefulness.
"""
WAKE = 5
REM = 4
N1 = 3
N2 = 2
N3 = 1
UNDEFINED = -1
class Gender(IntEnum):
"""Mapping of gender to integers."""
FEMALE = 0
MALE = 1
@dataclass
class SubjectData:
"""
Store data about a single subject.
Attributes
----------
gender : int, optional
The subject's gender, stored as an integer as defined by `Gender`,
by default `None`.
age : int, optional
The subject's age in years, by default `None`.
weight : float, optional
The subject's weight in kg, by default `None`.
"""
gender: Optional[int] = None
age: Optional[int] = None
weight: Optional[float] = None
@dataclass
class SleepRecord:
"""
Store a single sleep record.
Attributes
----------
sleep_stages : np.ndarray, optional
Sleep stages according to AASM guidelines, stored as integers as
defined by :class:`SleepStage`, by default `None`.
sleep_stage_duration : int, optional
Duration of each sleep stage in seconds, by default `None`.
id : str, optional
The record's ID, by default `None`.
recording_start_time : datetime.time, optional
Time at which the recording was started, by default `None`.
heartbeat_times : np.ndarray, optional
Times of heartbeats relative to recording start in seconds, by
default `None`.
subject_data : SubjectData, optional
Dataclass containing subject data, such as gender or age, by
default `None`.
"""
sleep_stages: Optional[np.ndarray] = None
sleep_stage_duration: Optional[int] = None
id: Optional[str] = None
recording_start_time: Optional[datetime.time] = None
heartbeat_times: Optional[np.ndarray] = None
subject_data: Optional[SubjectData] = None
class _ParseNsrrXmlResult(NamedTuple):
sleep_stages: np.ndarray
sleep_stage_duration: int
recording_start_time: datetime.time
def _parse_nsrr_xml(xml_filepath: Path) -> _ParseNsrrXmlResult:
"""
Parse NSRR XML sleep stage annotation file.
Parameters
----------
xml_filepath : pathlib.Path
Path of the annotation file to read.
Returns
-------
sleep_stages : np.ndarray
Sleep stages according to AASM guidelines, stored as integers as
defined by :class:`SleepStage`.
sleep_stage_duration : int
Duration of each sleep stage in seconds.
recording_start_time : datetime.time
Time at which the recording was started.
"""
STAGE_MAPPING = {
'Wake|0': SleepStage.WAKE,
'Stage 1 sleep|1': SleepStage.N1,
'Stage 2 sleep|2': SleepStage.N2,
'Stage 3 sleep|3': SleepStage.N3,
'Stage 4 sleep|4': SleepStage.N3,
'REM sleep|5': SleepStage.REM,
'Unscored|9': SleepStage.UNDEFINED,
}
root = ElementTree.parse(xml_filepath).getroot()
epoch_length = root.findtext('EpochLength')
if epoch_length is None:
raise RuntimeError(f'EpochLength not found in {xml_filepath}.')
epoch_length = int(epoch_length)
start_time = None
annot_stages = []
for event in root.find('ScoredEvents'):
if event.find('EventConcept').text == 'Recording Start Time':
start_time = event.find('ClockTime').text.split()[1]
start_time = datetime.datetime.strptime(start_time, '%H.%M.%S').time()
if event.find('EventType').text == 'Stages|Stages':
epoch_duration = int(float(event.findtext('Duration')))
stage = STAGE_MAPPING[event.findtext('EventConcept')]
annot_stages.extend([stage] * int(epoch_duration / epoch_length))
if start_time is None:
raise RuntimeError(f'"Recording Start Time" not found in {xml_filepath}.')
return _ParseNsrrXmlResult(
np.array(annot_stages, dtype=np.int8),
epoch_length,
start_time,
)
def read_mesa(
records_pattern: str = '*',
heartbeats_source: str = 'annotation',
offline: bool = False,
keep_edfs: bool = False,
data_dir: Optional[Union[str, Path]] = None,
) -> Iterator[SleepRecord]:
"""
Lazily read records from MESA (https://sleepdata.org/datasets/mesa).
Each MESA record consists of an `.edf` file containing raw
polysomnography data and an `.xml` file containing annotated events.
Since the entire MESA dataset requires about 385 GB of disk space,
`.edf` files can be deleted after heartbeat times have been extracted.
Heartbeat times are cached in an `.npy` file in
`<data_dir>/mesa/preprocessed/heartbeats`.
Parameters
----------
records_pattern : str, optional
Glob-like pattern to select record IDs, by default `'*'`.
heartbeats_source : {'annotation', 'cached', 'ecg'}, optional
If `'annotation'` (default), get heartbeat times from
`polysomnography/annotations-rpoints/<record_id>-rpoints.csv` (not
available for all records). If `'ecg'`, use
`sleepecg.detect_heartbeats` on the ECG contained in
`polysomnography/edfs/<record_id>.edf` and cache the result to
`preprocessed/heartbeats/<record_id>.npy`. If `'cached'`, get the
cached heartbeats.
offline : bool, optional
If `True`, search for local files only instead of using the NSRR
API, by default `False`.
keep_edfs : bool, optional
If `False`, remove `.edf` after heartbeat detection, by default
`False`.
data_dir : str | pathlib.Path, optional
Directory where all datasets are stored. If `None` (default), the
value will be taken from the configuration.
Yields
------
SleepRecord
Each element in the generator is a :class:`SleepRecord`.
"""
from mne.io import read_raw_edf
DB_SLUG = 'mesa'
ANNOTATION_DIRNAME = 'polysomnography/annotations-events-nsrr'
EDF_DIRNAME = 'polysomnography/edfs'
HEARTBEATS_DIRNAME = 'preprocessed/heartbeats'
RPOINTS_DIRNAME = 'polysomnography/annotations-rpoints'
GENDER_MAPPING = {0: Gender.FEMALE, 1: Gender.MALE}
heartbeats_source_options = {'annotation', 'cached', 'ecg'}
if heartbeats_source not in heartbeats_source_options:
raise ValueError(
f'Invalid value for parameter `heartbeats_source`: {heartbeats_source}, '
f'possible options: {heartbeats_source_options}',
)
if data_dir is None:
data_dir = get_config('data_dir')
db_dir = Path(data_dir).expanduser() / DB_SLUG
annotations_dir = db_dir / ANNOTATION_DIRNAME
edf_dir = db_dir / EDF_DIRNAME
heartbeats_dir = db_dir / HEARTBEATS_DIRNAME
for directory in (annotations_dir, edf_dir, heartbeats_dir):
directory.mkdir(parents=True, exist_ok=True)
if not offline:
download_url = _get_nsrr_url(DB_SLUG)
subject_data_filename, subject_data_checksum = _list_nsrr(
'mesa',
'datasets',
'mesa-sleep-dataset-*.csv',
shallow=True,
)[0]
subject_data_filepath = db_dir / subject_data_filename
_download_nsrr_file(
download_url + subject_data_filename,
target_filepath=subject_data_filepath,
checksum=subject_data_checksum,
)
checksums = {}
xml_files = _list_nsrr(
DB_SLUG,
ANNOTATION_DIRNAME,
f'mesa-sleep-{records_pattern}-nsrr.xml',
shallow=True,
)
checksums.update(xml_files)
requested_records = [Path(file).stem[:-5] for file, _ in xml_files]
edf_files = _list_nsrr(
DB_SLUG,
EDF_DIRNAME,
f'mesa-sleep-{records_pattern}.edf',
shallow=True,
)
checksums.update(edf_files)
rpoints_files = _list_nsrr(
DB_SLUG,
RPOINTS_DIRNAME,
f'mesa-sleep-{records_pattern}-rpoint.csv',
shallow=True,
)
checksums.update(rpoints_files)
else:
subject_data_filepath = next((db_dir / 'datasets').glob('mesa-sleep-dataset-*.csv'))
xml_files = sorted(annotations_dir.glob(f'mesa-sleep-{records_pattern}-nsrr.xml'))
requested_records = [file.stem[:-5] for file in xml_files]
subject_data_array = np.loadtxt(
subject_data_filepath,
delimiter=',',
skiprows=1,
usecols=[0, 3, 5], # [mesaid, gender, age]
dtype=int,
)
subject_data = {}
for mesaid, gender, age in subject_data_array:
subject_data[f'mesa-sleep-{mesaid:04}'] = SubjectData(
gender=GENDER_MAPPING[gender],
age=age,
)
for record_id in requested_records:
heartbeats_file = heartbeats_dir / f'{record_id}.npy'
if heartbeats_source == 'annotation':
rpoints_filename = f'{RPOINTS_DIRNAME}/{record_id}-rpoint.csv'
rpoints_filepath = db_dir / rpoints_filename
if not rpoints_filepath.is_file():
if not offline and rpoints_filename in checksums:
_download_nsrr_file(
download_url + rpoints_filename,
rpoints_filepath,
checksums[rpoints_filename],
)
else:
print(f'Skipping {record_id} due to missing heartbeat annotations.')
continue
heartbeat_times = np.loadtxt(
rpoints_filepath,
delimiter=',',
skiprows=1,
usecols=18, # column 18 ('seconds') contains the annotated heartbeat times
)
# for some reason some (39) records have unsorted annotations
heartbeat_times.sort()
elif heartbeats_source == 'cached':
if not heartbeats_file.is_file():
print(f'Skipping {record_id} due to missing cached heartbeats.')
continue
heartbeat_times = np.load(heartbeats_file)
elif heartbeats_source == 'ecg':
edf_filename = EDF_DIRNAME + f'/{record_id}.edf'
edf_filepath = db_dir / edf_filename
edf_was_available = edf_filepath.is_file()
if not offline:
_download_nsrr_file(
download_url + edf_filename,
edf_filepath,
checksums[edf_filename],
)
rec = read_raw_edf(edf_filepath, verbose=False)
ecg = rec.get_data('EKG').ravel()
fs = rec.info['sfreq']
heartbeat_indices = detect_heartbeats(ecg, fs)
heartbeat_times = heartbeat_indices / fs
np.save(heartbeats_file, heartbeat_times)
if not edf_was_available and not keep_edfs:
edf_filepath.unlink()
xml_filename = ANNOTATION_DIRNAME + f'/{record_id}-nsrr.xml'
xml_filepath = db_dir / xml_filename
if not offline:
_download_nsrr_file(
download_url + xml_filename,
xml_filepath,
checksums[xml_filename],
)
parsed_xml = _parse_nsrr_xml(xml_filepath)
yield SleepRecord(
sleep_stages=parsed_xml.sleep_stages,
sleep_stage_duration=parsed_xml.sleep_stage_duration,
id=record_id,
recording_start_time=parsed_xml.recording_start_time,
heartbeat_times=heartbeat_times,
subject_data=subject_data[record_id],
)
def read_slpdb(
records_pattern: str = '*',
offline: bool = False,
data_dir: Optional[Union[str, Path]] = None,
) -> Iterator[SleepRecord]:
"""
Lazily read records from SLPDB (https://physionet.org/content/slpdb).
Required files are downloaded from PhysioNet to `<data_dir>/slpdb`.
Parameters
----------
records_pattern : str, optional
Glob-like pattern to select record IDs, by default `'*'`.
offline : bool, optional
If `True`, search for local files only instead of downloading from
PhysioNet, by default `False`.
data_dir : str | pathlib.Path, optional
Directory where all datasets are stored. If `None` (default), the
value will be taken from the configuration.
Yields
------
SleepRecord
Each element in the generator is a :class:`SleepRecord`.
"""
# https://physionet.org/content/slpdb/1.0.0/
import wfdb
DB_SLUG = 'slpdb'
STAGE_MAPPING = {
'W': SleepStage.WAKE,
'R': SleepStage.REM,
'1': SleepStage.N1,
'2': SleepStage.N2,
'3': SleepStage.N3,
'4': SleepStage.N3,
}
if data_dir is None:
data_dir = get_config('data_dir')
data_dir = Path(data_dir).expanduser()
db_dir = data_dir / DB_SLUG
requested_records = _list_physionet(
data_dir=data_dir,
db_slug=DB_SLUG,
pattern=records_pattern,
)
if not offline:
download_physionet(
data_dir=data_dir,
db_slug=DB_SLUG,
requested_records=requested_records,
extensions=['.hea', '.dat', '.st'],
)
for record_id in requested_records:
record_file = str(db_dir / record_id)
record = wfdb.rdrecord(record_file)
start_time = record.base_time
ecg = np.asarray(record.p_signal[:, record.sig_name.index('ECG')])
fs = record.fs
heartbeat_indices = detect_heartbeats(ecg, fs)
heartbeat_times = heartbeat_indices / fs
annot_st = wfdb.rdann(record_file, 'st')
# Some 30 second windows don't have a sleep stage annotation, so
# the annotation array is initialized with `SleepStage.UNDEFINED`
# for every 30 second window.
for sample_time, annotation in zip(annot_st.sample[::-1], annot_st.aux_note[::-1]):
if annotation[0] in STAGE_MAPPING:
number_of_sleep_stages = sample_time // (30 * fs) + 1
break
sleep_stages = | np.full(number_of_sleep_stages, SleepStage.UNDEFINED) | numpy.full |
# -- coding: utf-8 --
from __future__ import division
from __future__ import print_function
from spatial_temporal_model.hyparameter import parameter
from spatial_temporal_model.encoder import cnn_lstm
from model.decoder import Dcoderlstm
from model.utils import construct_feed_dict
from model.encoder import Encoderlstm
import pandas as pd
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import model.normalization as normalization
import spatial_temporal_model.process as data_load
import os
import argparse
tf.reset_default_graph()
os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
logs_path="board"
def embedding(inputs,
vocab_size,
num_units,
zero_pad=False,
scale=True,
scope="embedding",
reuse=None):
'''Embeds a given tensor.
Args:
inputs: A `Tensor` with type `int32` or `int64` containing the ids
to be looked up in `lookup table`.
vocab_size: An int. Vocabulary size.
num_units: An int. Number of embedding hidden units.
zero_pad: A boolean. If True, all the values of the fist row (id 0)
should be constant zeros.
scale: A boolean. If True. the outputs is multiplied by sqrt num_units.
scope: Optional scope for `variable_scope`.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Returns:
A `Tensor` with one more rank than inputs's. The last dimensionality
should be `num_units`.
For example,
```
import tensorflow as tf
inputs = tf.to_int32(tf.reshape(tf.range(2*3), (2, 3)))
outputs = embedding(inputs, 6, 2, zero_pad=True)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
print sess.run(outputs)
>>
[[[ 0. 0. ]
[ 0.09754146 0.67385566]
[ 0.37864095 -0.35689294]]
[[-1.01329422 -1.09939694]
[ 0.7521342 0.38203377]
[-0.04973143 -0.06210355]]]
```
```
import tensorflow as tf
inputs = tf.to_int32(tf.reshape(tf.range(2*3), (2, 3)))
outputs = embedding(inputs, 6, 2, zero_pad=False)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
print sess.run(outputs)
>>
[[[-0.19172323 -0.39159766]
[-0.43212751 -0.66207761]
[ 1.03452027 -0.26704335]]
[[-0.11634696 -0.35983452]
[ 0.50208133 0.53509563]
[ 1.22204471 -0.96587461]]]
```
'''
with tf.variable_scope(scope, reuse=reuse):
lookup_table = tf.get_variable('lookup_table',
dtype=tf.float32,
shape=[vocab_size, num_units],
initializer=tf.truncated_normal_initializer(mean=0, stddev=1, seed=0))
if zero_pad:
lookup_table = tf.concat((tf.zeros(shape=[1, num_units]),
lookup_table[1:, :]), 0)
outputs = tf.nn.embedding_lookup(lookup_table, inputs)
if scale:
outputs = outputs * (num_units ** 0.5)
return outputs
class Model(object):
def __init__(self,para):
self.para=para
self.pollutant_id={'AQI':0, 'PM2.5':1,'PM10':3, 'SO2':5, 'NO2':7, 'O3':9, 'CO':13}
# define placeholders
self.placeholders = {
# None : batch _size * time _size
'month': tf.placeholder(tf.int32, shape=(None, self.para.input_length+self.para.output_length), name='input_month'),
'day': tf.placeholder(tf.int32, shape=(None, self.para.input_length+self.para.output_length), name='input_day'),
'hour': tf.placeholder(tf.int32, shape=(None, self.para.input_length+self.para.output_length), name='input_hour'),
'features1': tf.placeholder(tf.float32, shape=[None, self.para.input_length, self.para.site_num, self.para.features1],name='input_1'),
'features2': tf.placeholder(tf.float32, shape=[None, self.para.input_length, self.para.features2], name='input_2'),
'labels': tf.placeholder(tf.float32, shape=[None, self.para.output_length]),
'dropout': tf.placeholder_with_default(0., shape=()),
'is_training': tf.placeholder(tf.bool, shape=(),name='input_is_training'),
}
self.model()
def model(self):
'''
:param batch_size: 64
:param encoder_layer:
:param decoder_layer:
:param encoder_nodes:
:param prediction_size:
:param is_training: True
:return:
'''
with tf.variable_scope('month'):
self.m_emb = embedding(self.placeholders['month'], vocab_size=13, num_units=self.para.hidden_size,
scale=False, scope="month_embed")
print('d_emd shape is : ', self.m_emb.shape)
with tf.variable_scope('day'):
self.d_emb = embedding(self.placeholders['day'], vocab_size=32, num_units=self.para.hidden_size,
scale=False, scope="day_embed")
print('d_emd shape is : ', self.d_emb.shape)
with tf.variable_scope('hour'):
self.h_emb = embedding(self.placeholders['hour'], vocab_size=24, num_units=self.para.hidden_size,
scale=False, scope="hour_embed")
print('h_emd shape is : ', self.h_emb.shape)
# create model
# this step use to encoding the input series data
'''
rlstm, return --- for example ,output shape is :(32, 3, 128)
axis=0: bath size
axis=1: input data time size
axis=2: output feature size
'''
# shape is [batch, input length, embedding size]
emb=tf.add_n([self.m_emb,self.d_emb,self.h_emb])
# cnn时空特征提取
l = cnn_lstm(batch_size=self.para.batch_size,
layer_num=self.para.hidden_layer,
nodes=self.para.hidden_size,
highth=self.para.h,
width=self.para.w,
placeholders=self.placeholders)
# [batch, time ,hidden size]
(h_states1, c_states1) = l.encoding(self.placeholders['features1'], emb[:,:self.para.input_length,:])
print('h_states1 shape is : ', h_states1.shape)
# lstm 时序特征提取
encoder_init =Encoderlstm(self.para.batch_size,
self.para.hidden_layer,
self.para.hidden_size,
placeholders=self.placeholders)
## [batch, time , hidden size]
(h_states2, c_states2) = encoder_init.encoding(self.placeholders['features2'], emb[:,:self.para.input_length,:])
print('h_states2 shape is : ', h_states2.shape)
h_states=tf.layers.dense(tf.concat([h_states1,h_states2],axis=-1),units=self.para.hidden_size, activation=tf.nn.relu, name='layers')
# this step to predict the pollutant concentration
'''
decoder, return --- for example ,output shape is :(32, 162, 1)
axis=0: bath size
axis=1: numbers of the nodes
axis=2: label size
'''
decoder_init = Dcoderlstm(self.para.batch_size,
self.para.output_length,
self.para.hidden_layer,
self.para.hidden_size,
placeholders=self.placeholders)
self.pres = decoder_init.decoding(h_states, emb[:,self.para.input_length: ,:])
print('pres shape is : ', self.pres.shape)
self.cross_entropy = tf.reduce_mean(
tf.sqrt(tf.reduce_mean(tf.square(self.pres + 1e-10 - self.placeholders['labels']), axis=0)))
print(self.cross_entropy)
print('cross shape is : ',self.cross_entropy.shape)
tf.summary.scalar('cross_entropy',self.cross_entropy)
# backprocess and update the parameters
self.train_op = tf.train.AdamOptimizer(self.para.learning_rate).minimize(self.cross_entropy)
print('#...............................in the training step.....................................#')
def test(self):
'''
:param batch_size: usually use 1
:param encoder_layer:
:param decoder_layer:
:param encoder_nodes:
:param prediction_size:
:param is_training: False
:return:
'''
model_file = tf.train.latest_checkpoint('weights/')
self.saver.restore(self.sess, model_file)
def accuracy(self,label,predict):
'''
:param label: represents the observed value
:param predict: represents the predicted value
:param epoch:
:param steps:
:return:
'''
error = label - predict
average_error = np.mean(np.fabs(error.astype(float)))
print("mae is : %.6f" % (average_error))
rmse_error = np.sqrt(np.mean(np.square(label - predict)))
print("rmse is : %.6f" % (rmse_error))
cor = np.mean(np.multiply((label - np.mean(label)),
(predict - np.mean(predict)))) / ( | np.std(predict) | numpy.std |
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 22 05:44:13 2019
@author: alan_
"""
### Se implementaran las funciones desarrolladas en la parte 1 en las registros.
import numpy as np
from matplotlib import pyplot as plt
from scipy import signal as sig
from scipy import fftpack as fft
from scipy.signal import chebwin
def PSD(matrix):
N1=np.size(matrix,1)
N2=np.size(matrix,0)
#rxx=sig.convolve2d(matrix,matrix,mode='same')
rxx=matrix
rxx=rxx*(chebwin(N1,at=100).reshape(1,N1)*np.ones((N2,1)))
sxx=np.fft.fft(rxx,axis=1)
mag_sxx=(np.abs(sxx[:,0:N1//2])*ts)**2
mag_sxx=10*np.log10(np.mean(mag_sxx,0))
F=np.linspace(0,fs//2,len(mag_sxx))
return F,mag_sxx
def Periodograma(p,v):
N=len(p)
if N % v!=0:
Nzeros=v-(N % v)
x=np.append(p,np.zeros(Nzeros)) # agregar ceros para mejorar la estimación, y para que el reshape se pueda hacer
else:
x=p
Nv=len(x)//v
matrix=x.reshape(v,Nv)
F,sxx=PSD(matrix)
plt.plot(F[0:len(F)//4],sxx[0:len(F)//4],label=lavel[j])
legend = plt.legend(loc='upper right', shadow=True, fontsize='small')
legend.get_frame().set_facecolor('pink')
plt.xlabel('Hz')
plt.ylabel('dBs')
plt.title(senal[i])
plt.grid()
def RF(den,num,fm):
w, h = sig.freqz(den,num)
h[h==0] = 1E-5
H = 20*np.log10( np.abs(h) )
W = np.angle (h)
W = np.unwrap (W)
W = np.degrees(W)
w = | np.linspace(0,fs//2,H.shape[0] ) | numpy.linspace |
import numpy as np
import numpy.linalg.linalg as la
def ladmm(x0, A, b, lam=1, r=1, niter=10, tol=1e-3):
(m, n) = A.shape
z = x0
x = x0
u = np.zeros((n, 1))
Q = la.inv(A.T.dot(A) + r * np.eye(n))
sthreshv = np.vectorize(sthresh)
k = 0
while k < niter:
x = Q.dot(A.T.dot(b) + r * (z - u))
z = sthreshv(x + u, lam / r)
u = u + x - z
k += 1
return (x, z, u)
def admm(x0, A, b, G, lam=1, r=1, niter=10, tol=1e-3):
(m, n) = A.shape
zs = [x0[gi] for gi in G]
x = x0
us = [np.zeros(len(gi)) for gi in G]
Q = la.inv(A.T.dot(A) + r * np.eye(n))
k = 0
while k < niter:
x = update_x(zs, us, G, r, A, b, Q)
zs = update_zs(x, us, lam, r, G)
us = update_us(us, x, zs, G)
k += 1
return (x, zs, us)
def update_x(zs, us, G, r, A, b, Q):
n = A.shape[1]
N = len(G)
votes = np.zeros((n, 1))
zsum = np.zeros((n, 1))
usum = np.zeros((n, 1))
for i in range(N):
votes[G[i]] += 1
zsum[G[i]] += zs[i]
usum[G[i]] += us[i]
zbar = np.divide(zsum, votes)
ubar = np.divide(usum, votes)
x = Q.dot(A.T.dot(b) + r * (zbar - ubar))
return (x)
def update_zs(x, us, lam, r, G):
N = len(us)
zs = [Sthresh(x[G[i]] + us[i], lam / r) for i in range(N)]
return (zs)
def update_us(us, x, zs, G):
N = len(G)
usnew = [us[i] + x[G[i]] - zs[i] for i in range(N)]
return (usnew)
def sthresh(a, thresh):
'''
Apply soft threshold to the scalar `a`
'''
norm = abs(a)
if norm == 0:
scal = 0
else:
scal = (1 - thresh / norm)
if scal < 0:
scal = 0
return (scal * a)
def Sthresh(vec, thresh):
'''
Perform vector form of soft thresholding of vector `vec`
'''
norm = la.norm(vec, 2)
if norm == 0:
scal = 0
else:
scal = (1 - thresh / norm)
if scal < 0:
scal = 0
return (scal * vec)
def sim(m, n, G, grate=0.5, comp=True, sig2=1):
A = np.random.randn(m, n)
N = len(G)
if comp: # complement of union
live = np.random.binomial(1, grate, N)
mask = np.ones(n) # mask starts on, turn stuff off
for i in range(N):
mask[G[i]] *= live[i]
xstar = np.random.randn(n) * mask
else:
live = np.random.binomial(1, grate, N)
mask = np.zeros(n) # mask starts off, turn stuff on
for i in range(N):
mask[G[i]] = 1 * live[i]
xstar = np.random.randn(n) * mask
b = A.dot(xstar) + sig2 * np.random.randn(m)
return (xstar, A, b)
def sim2(m, n, G, xstar):
A = np.random.randn(m,n)
N = len(G)
b = A.dot(xstar) + np.random.randn(m,1)
return(A,b,G,xstar)
def test2(A, b, G, x0, xstar, r,lam,eps,niter):
xhat = ladmm(x0, A, b, lam, r, niter)[0]
xhat[ | np.abs(xhat) | numpy.abs |
import numpy as np
from scipy.interpolate import interp2d,interp1d
from .params import default_params
import camb
from camb import model
import scipy.interpolate as si
import scipy.constants as constants
"""
This module will (eventually) abstract away the choice of boltzmann codes.
However, it does it stupidly by simply providing a common
stunted interface. It makes no guarantee that the same set
of parameters passed to the two engines will produce the same
results. It could be a test-bed for converging towards that.
"""
class Cosmology(object):
def __init__(self,params=None,halofit=None,engine='camb'):
assert engine in ['camb','class']
if engine=='class': raise NotImplementedError
self.p = dict(params) if params is not None else {}
for param in default_params.keys():
if param not in self.p.keys(): self.p[param] = default_params[param]
# Cosmology
self._init_cosmology(self.p,halofit)
def sigma_crit(self,zlens,zsource):
Gval = 4.517e-48 # Newton G in Mpc,seconds,Msun units
cval = 9.716e-15 # speed of light in Mpc,second units
Dd = self.angular_diameter_distance(zlens)
Ds = self.angular_diameter_distance(zsource)
Dds = np.asarray([self.results.angular_diameter_distance2(zl,zsource) for zl in zlens])
return cval**2 * Ds / 4 / np.pi / Gval / Dd / Dds
def P_mm_linear(self,zs,ks):
pass
def P_mm_nonlinear(self,ks,zs,halofit_version='mead'):
pass
def comoving_radial_distance(self,z):
return self.results.comoving_radial_distance(z)
def angular_diameter_distance(self,z):
return self.results.angular_diameter_distance(z)
def hubble_parameter(self,z):
# H(z) in km/s/Mpc
return self.results.hubble_parameter(z)
def h_of_z(self,z):
# H(z) in 1/Mpc
return self.results.h_of_z(z)
def _init_cosmology(self,params,halofit):
try:
theta = params['theta100']/100.
H0 = None
print("WARNING: Using theta100 parameterization. H0 ignored.")
except:
H0 = params['H0']
theta = None
try:
omm = params['omm']
h = params['H0']/100.
params['omch2'] = omm*h**2-params['ombh2']
print("WARNING: omm specified. Ignoring omch2.")
except:
pass
self.pars = camb.set_params(ns=params['ns'],As=params['As'],H0=H0,
cosmomc_theta=theta,ombh2=params['ombh2'],
omch2=params['omch2'], mnu=params['mnu'],
tau=params['tau'],nnu=params['nnu'],
num_massive_neutrinos=
params['num_massive_neutrinos'],
w=params['w0'],wa=params['wa'],
dark_energy_model='ppf',
halofit_version=self.p['default_halofit'] if halofit is None else halofit,
AccuracyBoost=2)
self.results = camb.get_background(self.pars)
self.params = params
self.h = self.params['H0']/100.
omh2 = self.params['omch2']+self.params['ombh2'] # FIXME: neutrinos
self.om0 = omh2 / (self.params['H0']/100.)**2.
try: self.as8 = self.params['as8']
except: self.as8 = 1
def _get_matter_power(self,zs,ks,nonlinear=False):
PK = camb.get_matter_power_interpolator(self.pars, nonlinear=nonlinear,
hubble_units=False,
k_hunit=False, kmax=ks.max(),
zmax=zs.max()+1.)
return (self.as8**2.) * PK.P(zs, ks, grid=True)
def rho_matter_z(self,z):
return self.rho_critical_z(0.) * self.om0 \
* (1+np.atleast_1d(z))**3. # in msolar / megaparsec3
def omz(self,z):
return self.rho_matter_z(z)/self.rho_critical_z(z)
def rho_critical_z(self,z):
Hz = self.hubble_parameter(z) * 3.241e-20 # SI # FIXME: constants need checking
G = 6.67259e-11 # SI
rho = 3.*(Hz**2.)/8./np.pi/G # SI
return rho * 1.477543e37 # in msolar / megaparsec3
def D_growth(self, a):
# From <NAME>?
_amin = 0.001 # minimum scale factor
_amax = 1.0 # maximum scale factor
_na = 512 # number of points in interpolation arrays
atab = np.linspace(_amin,
_amax,
_na)
ks = np.logspace(np.log10(1e-5),np.log10(1.),num=100)
zs = a2z(atab)
deltakz = self.results.get_redshift_evolution(ks, zs, ['delta_cdm']) #index: k,z,0
D_camb = deltakz[0,:,0]/deltakz[0,0,0]
_da_interp = interp1d(atab, D_camb, kind='linear')
_da_interp_type = "camb"
return _da_interp(a)/_da_interp(1.0)
def P_lin(self,ks,zs,knorm = 1e-4,kmax = 0.1):
"""
This function will provide the linear matter power spectrum used in calculation
of sigma2. It is written as
P_lin(k,z) = norm(z) * T(k)**2
where T(k) is the <NAME>, 1998 transfer function.
Care has to be taken about interpreting this beyond LCDM.
For example, the transfer function can be inaccurate for nuCDM and wCDM cosmologies.
If this function is only used to model sigma2 -> N(M,z) -> halo model power spectra at small
scales, and cosmological dependence is obtained through an accurate CAMB based P(k),
one should be fine.
"""
tk = self.Tk(ks,'eisenhu_osc')
assert knorm<kmax
PK = camb.get_matter_power_interpolator(self.pars, nonlinear=False,
hubble_units=False, k_hunit=False, kmax=kmax,
zmax=zs.max()+1.)
pnorm = PK.P(zs, knorm,grid=True)
tnorm = self.Tk(knorm,'eisenhu_osc') * knorm**(self.params['ns'])
plin = (pnorm/tnorm) * tk**2. * ks**(self.params['ns'])
return (self.as8**2.) *plin
def P_lin_slow(self,ks,zs,kmax = 0.1):
PK = camb.get_matter_power_interpolator(self.pars, nonlinear=False,
hubble_units=False, k_hunit=False, kmax=kmax,
zmax=zs.max()+1.)
plin = PK.P(zs, ks,grid=True)
return (self.as8**2.) * plin
def Tk(self,ks,type ='eisenhu_osc'):
"""
Pulled from cosmicpy https://github.com/cosmicpy/cosmicpy/blob/master/LICENSE.rst
"""
k = ks/self.h
self.tcmb = 2.726
T_2_7_sqr = (self.tcmb/2.7)**2
h2 = self.h**2
w_m = self.params['omch2'] + self.params['ombh2']
w_b = self.params['ombh2']
self._k_eq = 7.46e-2*w_m/T_2_7_sqr / self.h # Eq. (3) [h/Mpc]
self._z_eq = 2.50e4*w_m/(T_2_7_sqr)**2 # Eq. (2)
# z drag from Eq. (4)
b1 = 0.313*pow(w_m, -0.419)*(1.0+0.607*pow(w_m, 0.674))
b2 = 0.238*pow(w_m, 0.223)
self._z_d = 1291.0*pow(w_m, 0.251)/(1.0+0.659*pow(w_m, 0.828)) * \
(1.0 + b1*pow(w_b, b2))
# Ratio of the baryon to photon momentum density at z_d Eq. (5)
self._R_d = 31.5 * w_b / (T_2_7_sqr)**2 * (1.e3/self._z_d)
# Ratio of the baryon to photon momentum density at z_eq Eq. (5)
self._R_eq = 31.5 * w_b / (T_2_7_sqr)**2 * (1.e3/self._z_eq)
# Sound horizon at drag epoch in h^-1 Mpc Eq. (6)
self.sh_d = 2.0/(3.0*self._k_eq) * np.sqrt(6.0/self._R_eq) * \
np.log((np.sqrt(1.0 + self._R_d) + np.sqrt(self._R_eq + self._R_d)) /
(1.0 + np.sqrt(self._R_eq)))
# Eq. (7) but in [hMpc^{-1}]
self._k_silk = 1.6 * pow(w_b, 0.52) * pow(w_m, 0.73) * \
(1.0 + pow(10.4*w_m, -0.95)) / self.h
Omega_m = self.om0
fb = self.params['ombh2'] / (self.params['omch2']+self.params['ombh2']) # self.Omega_b / self.Omega_m
fc = self.params['omch2'] / (self.params['omch2']+self.params['ombh2']) # self.params['ombh2'] #(self.Omega_m - self.Omega_b) / self.Omega_m
alpha_gamma = 1.-0.328*np.log(431.*w_m)*w_b/w_m + \
0.38*np.log(22.3*w_m)*(fb)**2
gamma_eff = Omega_m*self.h * \
(alpha_gamma + (1.-alpha_gamma)/(1.+(0.43*k*self.sh_d)**4))
res = np.zeros_like(k)
if(type == 'eisenhu'):
q = k * pow(self.tcmb/2.7, 2)/gamma_eff
# EH98 (29) #
L = np.log(2.*np.exp(1.0) + 1.8*q)
C = 14.2 + 731.0/(1.0 + 62.5*q)
res = L/(L + C*q*q)
elif(type == 'eisenhu_osc'):
# Cold dark matter transfer function
# EH98 (11, 12)
a1 = pow(46.9*w_m, 0.670) * (1.0 + pow(32.1*w_m, -0.532))
a2 = pow(12.0*w_m, 0.424) * (1.0 + pow(45.0*w_m, -0.582))
alpha_c = pow(a1, -fb) * pow(a2, -fb**3)
b1 = 0.944 / (1.0 + pow(458.0*w_m, -0.708))
b2 = pow(0.395*w_m, -0.0266)
beta_c = 1.0 + b1*(pow(fc, b2) - 1.0)
beta_c = 1.0 / beta_c
# EH98 (19). [k] = h/Mpc
def T_tilde(k1, alpha, beta):
# EH98 (10); [q] = 1 BUT [k] = h/Mpc
q = k1 / (13.41 * self._k_eq)
L = np.log(np.exp(1.0) + 1.8 * beta * q)
C = 14.2 / alpha + 386.0 / (1.0 + 69.9 * pow(q, 1.08))
T0 = L/(L + C*q*q)
return T0
# EH98 (17, 18)
f = 1.0 / (1.0 + (k * self.sh_d / 5.4)**4)
Tc = f * T_tilde(k, 1.0, beta_c) + \
(1.0 - f) * T_tilde(k, alpha_c, beta_c)
# Baryon transfer function
# EH98 (19, 14, 21)
y = (1.0 + self._z_eq) / (1.0 + self._z_d)
x = np.sqrt(1.0 + y)
G_EH98 = y * (-6.0 * x +
(2.0 + 3.0*y) * np.log((x + 1.0) / (x - 1.0)))
alpha_b = 2.07 * self._k_eq * self.sh_d * \
pow(1.0 + self._R_d, -0.75) * G_EH98
beta_node = 8.41 * pow(w_m, 0.435)
tilde_s = self.sh_d / pow(1.0 + (beta_node /
(k * self.sh_d))**3, 1.0/3.0)
beta_b = 0.5 + fb + (3.0 - 2.0 * fb) * np.sqrt((17.2 * w_m)**2 + 1.0)
# [tilde_s] = Mpc/h
Tb = (T_tilde(k, 1.0, 1.0) / (1.0 + (k * self.sh_d / 5.2)**2) +
alpha_b / (1.0 + (beta_b/(k * self.sh_d))**3) *
np.exp(-pow(k / self._k_silk, 1.4))) * np.sinc(k*tilde_s/np.pi)
# Total transfer function
res = fb * Tb + fc * Tc
return res
def lensing_window(self,ezs,zs,dndz=None):
"""
Generates a lensing convergence window
W(z).
zs: If (nz,) with nz>2 and dndz is not None, then these are the points
at which dndz is defined. If nz=2 and no dndz is provided, it is (zmin,zmax)
for a top-hat window. If a single number, and no dndz is provided,
it is a delta function source at zs.
"""
zs = np.array(zs).reshape(-1)
H0 = self.h_of_z(0.)
H = self.h_of_z(ezs)
chis = self.comoving_radial_distance(ezs)
chistar = self.comoving_radial_distance(zs)
if zs.size==1:
assert dndz is None
integrand = (chistar - chis)/chistar
integral = integrand
integral[ezs>zs] = 0
else:
nznorm = np.trapz(dndz,zs)
dndz = dndz/nznorm
# integrand has shape (num_z,num_zs) to be integrated over zs
integrand = (chistar[None,:] - chis[:,None])/chistar[None,:] * dndz[None,:]
for i in range(integrand.shape[0]): integrand[i][zs<ezs[i]] = 0 # FIXME: vectorize this
integral = np.trapz(integrand,zs,axis=-1)
return 1.5*self.om0*H0**2.*(1.+ezs)*chis/H * integral
def C_kg(self,ells,zs,ks,Pgm,gzs,gdndz=None,lzs=None,ldndz=None,lwindow=None):
gzs = np.array(gzs).reshape(-1)
if lwindow is None: Wz1s = self.lensing_window(gzs,lzs,ldndz)
else: Wz1s = lwindow
chis = self.comoving_radial_distance(gzs)
hzs = self.h_of_z(gzs) # 1/Mpc
if gzs.size>1:
nznorm = np.trapz(gdndz,gzs)
Wz2s = gdndz/nznorm
else:
Wz2s = 1.
return limber_integral(ells,zs,ks,Pgm,gzs,Wz1s,Wz2s,hzs,chis)
def C_gg(self,ells,zs,ks,Pgg,gzs,gdndz=None,zmin=None,zmax=None):
gzs = np.asarray(gzs)
chis = self.comoving_radial_distance(gzs)
hzs = self.h_of_z(gzs) # 1/Mpc
if gzs.size>1:
nznorm = np.trapz(gdndz,gzs)
Wz1s = gdndz/nznorm
Wz2s = gdndz/nznorm
else:
dchi = self.comoving_radial_distance(zmax) - self.comoving_radial_distance(zmin)
Wz1s = 1.
Wz2s = 1./dchi/hzs
return limber_integral(ells,zs,ks,Pgg,gzs,Wz1s,Wz2s,hzs,chis)
def C_kk(self,ells,zs,ks,Pmm,lzs1=None,ldndz1=None,lzs2=None,ldndz2=None,lwindow1=None,lwindow2=None):
if lwindow1 is None: lwindow1 = self.lensing_window(zs,lzs1,ldndz1)
if lwindow2 is None: lwindow2 = self.lensing_window(zs,lzs2,ldndz2)
chis = self.comoving_radial_distance(zs)
hzs = self.h_of_z(zs) # 1/Mpc
return limber_integral(ells,zs,ks,Pmm,zs,lwindow1,lwindow2,hzs,chis)
def C_gy(self,ells,zs,ks,Pgp,gzs,gdndz=None,zmin=None,zmax=None):
gzs = np.asarray(gzs)
chis = self.comoving_radial_distance(gzs)
hzs = self.h_of_z(gzs) # 1/Mpc
if gzs.size>1:
nznorm = np.trapz(gdndz,gzs)
Wz1s = dndz/nznorm
Wz2s = gdndz/nznorm
else:
dchi = self.comoving_radial_distance(zmax) - self.comoving_radial_distance(zmin)
Wz1s = 1.
Wz2s = 1./dchi/hzs
return limber_integral(ells,zs,ks,Ppy,gzs,1,Wz2s,hzs,chis)
def C_ky(self,ells,zs,ks,Pym,lzs1=None,ldndz1=None,lzs2=None,ldndz2=None,lwindow1=None):
if lwindow1 is None: lwindow1 = self.lensing_window(zs,lzs1,ldndz1)
chis = self.comoving_radial_distance(zs)
hzs = self.h_of_z(zs) # 1/Mpc
return limber_integral(ells,zs,ks,Pym,zs,lwindow1,1,hzs,chis)
def C_yy(self,ells,zs,ks,Ppp,dndz=None,zmin=None,zmax=None):
chis = self.comoving_radial_distance(zs)
hzs = self.h_of_z(zs) # 1/Mpc
# Convert to y units
#
return limber_integral(ells,zs,ks,Ppp,zs,1,1,hzs,chis)
def total_matter_power_spectrum(self,Pnn,Pne,Pee):
omtoth2 = self.p['omch2'] + self.p['ombh2']
fc = self.p['omch2']/omtoth2
fb = self.p['ombh2']/omtoth2
return fc**2.*Pnn + 2.*fc*fb*Pne + fb*fb*Pee
def total_matter_galaxy_power_spectrum(self,Pgn,Pge):
omtoth2 = self.p['omch2'] + self.p['ombh2']
fc = self.p['omch2']/omtoth2
fb = self.p['ombh2']/omtoth2
return fc*Pgn + fb*Pge
def a2z(a): return (1.0/a)-1.0
def limber_integral(ells,zs,ks,Pzks,gzs,Wz1s,Wz2s,hzs,chis):
"""
Get C(ell) = \int dz (H(z)/c) W1(z) W2(z) Pzks(z,k=ell/chi) / chis**2.
ells: (nells,) multipoles looped over
zs: redshifts (npzs,) corresponding to Pzks
ks: comoving wavenumbers (nks,) corresponding to Pzks
Pzks: (npzs,nks) power specrum
gzs: (nzs,) corersponding to Wz1s, W2zs, Hzs and chis
Wz1s: weight function (nzs,)
Wz2s: weight function (nzs,)
hzs: Hubble parameter (nzs,) in *1/Mpc* (e.g. camb.results.h_of_z(z))
chis: comoving distances (nzs,)
We interpolate P(z,k)
"""
hzs = | np.array(hzs) | numpy.array |
r"""
Solve Klein-Gordon equation on [-2pi, 2pi]**3 with periodic bcs
u_tt = div(grad(u)) - u + u*|u|**2 (1)
Discretize in time by defining f = u_t and use mixed formulation
f_t = div(grad(u)) - u + u*|u|**2 (1)
u_t = f (2)
with both u(x, y, z, t=0) and f(x, y, z, t=0) given.
Using the Fourier basis for all three spatial directions.
"""
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sympy import symbols, exp
from shenfun import *
from mpi4py_fft import generate_xdmf
from spectralDNS.utilities import Timer
rank = comm.Get_rank()
timer = Timer()
# Use sympy to set up initial condition
x, y, z = symbols("x,y,z", real=True)
ue = 0.1*exp(-(x**2 + y**2 + z**2))
# Size of discretization
N = (32, 32, 32)
# Defocusing or focusing
gamma = 1
threads = 1
K0 = FunctionSpace(N[0], 'F', dtype='D', domain=(-2*np.pi, 2*np.pi))
K1 = FunctionSpace(N[1], 'F', dtype='D', domain=(-2*np.pi, 2*np.pi))
K2 = FunctionSpace(N[2], 'F', dtype='d', domain=(-2*np.pi, 2*np.pi))
T = TensorProductSpace(comm, (K0, K1, K2), axes=(0, 1, 2),
**{'planner_effort': 'FFTW_MEASURE',
'threads': threads,
'collapse_fourier': True})
TT = CompositeSpace([T, T])
TV = VectorSpace(T)
Tp = T.get_dealiased((1.5, 1.5, 1.5))
fu = Array(TT, buffer=(0, ue))
f, u = fu
up = Array(Tp)
K = np.array(T.local_wavenumbers(True, True, True))
dfu = Function(TT)
df, du = dfu
fu_hat = Function(TT)
fu_hat = fu.forward()
f_hat, u_hat = fu_hat
gradu = Array(TV)
uh = TrialFunction(T)
vh = TestFunction(T)
L = inner(grad(vh), -grad(uh)) + [inner(vh, -gamma*uh)]
L = la.SolverDiagonal(L).mat.scale
# Coupled equations with no linear terms in their own variables,
# so place everything in NonlinearRHS
count = 0
def NonlinearRHS(self, fu, fu_hat, dfu_hat, **par):
global count, up
count += 1
dfu_hat.fill(0)
f_hat, u_hat = fu_hat
df_hat, du_hat = dfu_hat
up = Tp.backward(u_hat, up)
df_hat = Tp.forward(gamma*up**3, df_hat)
df_hat += L*u_hat
du_hat[:] = f_hat
return dfu_hat
X = T.local_mesh(True)
if rank == 0:
plt.figure()
image = plt.contourf(X[1][..., 0], X[0][..., 0], u[..., N[2]//2], 100)
plt.draw()
plt.pause(1e-4)
#def energy_fourier(comm, a):
# result = 2*np.sum(abs(a[..., 1:-1])**2) + np.sum(abs(a[..., 0])**2) + np.sum(abs(a[..., -1])**2)
# result = comm.allreduce(result)
# return result
def update(self, fu, fu_hat, t, tstep, **params):
global gradu
timer()
transformed = False
if rank == 0 and tstep % params['plot_tstep'] == 0 and params['plot_tstep'] > 0:
fu = fu_hat.backward(fu)
f, u = fu[:]
image.ax.clear()
image.ax.contourf(X[1][..., 0], X[0][..., 0], u[..., N[2]//2], 100)
plt.pause(1e-6)
transformed = True
if tstep % params['write_slice_tstep'][0] == 0:
if transformed is False:
fu = fu_hat.backward(fu)
transformed = True
params['file'].write(tstep, params['write_slice_tstep'][1], as_scalar=True)
if tstep % params['write_tstep'][0] == 0:
if transformed is False:
fu = fu_hat.backward(fu)
transformed = True
params['file'].write(tstep, params['write_tstep'][1], as_scalar=True)
if tstep % params['Compute_energy'] == 0:
if transformed is False:
fu = fu_hat.backward(fu)
f, u = fu
f_hat, u_hat = fu_hat
ekin = 0.5*energy_fourier(f_hat, T)
es = 0.5*energy_fourier(1j*(K*u_hat), T)
eg = gamma* | np.sum(0.5*u**2 - 0.25*u**4) | numpy.sum |
""" Test out the 'resid_scaler' metadata, which allows a user to scale an unknown
or residual on the way in."""
from __future__ import print_function
import unittest
from six import iteritems
from six.moves import cStringIO
import numpy as np
from openmdao.api import Problem, Group, Component, IndepVarComp, ExecComp, ScipyGMRES, Newton
from openmdao.test.util import assert_rel_error
from openmdao.test.sellar import SellarDis1withDerivatives, SellarDis2withDerivatives
class SimpleImplicitComp(Component):
""" A Simple Implicit Component with an additional output equation.
f(x,z) = xz + z - 4
y = x + 2z
Sol: when x = 0.5, z = 2.666
Coupled derivs:
y = x + 8/(x+1)
dy_dx = 1 - 8/(x+1)**2 = -2.5555555555555554
z = 4/(x+1)
dz_dx = -4/(x+1)**2 = -1.7777777777777777
"""
def __init__(self, resid_scaler=1.0):
super(SimpleImplicitComp, self).__init__()
# Params
self.add_param('x', 0.5)
# Unknowns
self.add_output('y', 0.0)
# States
self.add_state('z', 0.0, resid_scaler=resid_scaler)
self.maxiter = 10
self.atol = 1.0e-12
def solve_nonlinear(self, params, unknowns, resids):
""" Simple iterative solve. (Babylonian method)."""
x = params['x']
z = unknowns['z']
znew = z
iter = 0
eps = 1.0e99
while iter < self.maxiter and abs(eps) > self.atol:
z = znew
znew = 4.0 - x*z
eps = x*znew + znew - 4.0
unknowns['z'] = znew
unknowns['y'] = x + 2.0*znew
resids['z'] = eps
#print(unknowns['y'], unknowns['z'])
def apply_nonlinear(self, params, unknowns, resids):
""" Don't solve; just calculate the residual."""
x = params['x']
z = unknowns['z']
resids['z'] = x*z + z - 4.0
# Output equations need to evaluate a residual just like an explicit comp.
resids['y'] = x + 2.0*z - unknowns['y']
#print(x, unknowns['y'], z, resids['z'], resids['y'])
def linearize(self, params, unknowns, resids):
"""Analytical derivatives."""
J = {}
# Output equation
J[('y', 'x')] = np.array([1.0])
J[('y', 'z')] = | np.array([2.0]) | numpy.array |
""" Survival regression with Cox's proportional hazard model. """
import argparse
import logging
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from lifelines import CoxPHFitter
MAX_DUR = 180
def get_pmf_from_survival(survival_f):
pmf = survival_f.copy()
for i in range(survival_f.shape[0] - 1):
pmf[i, :] -= pmf[i + 1, :]
sums = np.sum(pmf, axis=0)
#plt.plot(np.arange(MAX_DUR), pmf)
#plt.show()
assert (sums > 0.95).all(), survival_f[0]
return pmf
def loglikelihood(hazards, cum_hazards):
""" Refer to https://stats.stackexchange.com/questions/417303/
what-is-the-likelihood-for-this-process"""
assert hazards.shape == cum_hazards.shape
lls = np.log(hazards) - cum_hazards
return lls
def get_hazard_from_cum_hazard(cum_hazard):
"""
Refer to the Discrete survival models section in lifelines.
"""
hazard = 1 - np.exp(cum_hazard[:-1,:] - cum_hazard[1:,:])
return hazard
def get_covariates_dict_from_list(covs_list):
m = np.array([c.flatten() for c in covs_list])
d = {}
for i, dim in enumerate(m.T):
d['c{}'.format(i)] = dim
return d
def get_state_sequence_and_residual_time_from_vit(vit):
states = []
residual_times = []
for hs, dur in vit:
states.extend([hs] * dur)
residual_times.extend(np.arange(dur)[::-1])
assert len(states) == len(residual_times) and len(states) == vit[:,1].sum()
return states, residual_times
def get_pd(horizon, lobs, lvit, ntest_obs=None):
""" Get a pandas data frame from input data (lobs, lvit). horizon denotes
the number of observations that will be feed into the predictive model.
"""
assert horizon > 0
survival_times = []
covariates = []
for obs, vit in zip(lobs, lvit):
dim, nobs = obs.shape
_, residual_times = get_state_sequence_and_residual_time_from_vit(vit)
assert len(residual_times) == nobs
if ntest_obs is not None:
nobs = ntest_obs
for t in range(nobs - horizon + 1):
covariates.append(obs[:, t: t + horizon])
survival_time = residual_times[t + horizon - 1]
assert survival_time >= 0
survival_times.append(survival_time)
complete = [1] * len(survival_times) # All segments are complete. No censoring.
data_dict = {'survival_time': survival_times, 'complete': complete}
covariates_dict = get_covariates_dict_from_list(covariates)
data_dict.update(covariates_dict)
return pd.DataFrame(data_dict)
def fit_cph(data):
cph = CoxPHFitter()
cph.fit(data, duration_col='survival_time', event_col='complete')
# cph.print_summary()
return cph
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
np.random.seed(10)
parser = argparse.ArgumentParser(description=__doc__)
# Training arguments.
parser.add_argument('horizon', type=int)
parser.add_argument('training_obs')
parser.add_argument('training_vit')
parser.add_argument('nfiles', type=int)
# Testing arguments. Note that a single file is assumed.
parser.add_argument('testing_obs')
parser.add_argument('testing_vit')
parser.add_argument('--ntest_obs', type=int)
args = parser.parse_args()
logging.info('horizon: ' + str(args.horizon))
lobs = []
lvit = []
for i in range(args.nfiles):
obs = np.loadtxt(args.training_obs + '.' + str(i), ndmin=2)
vit = np.loadtxt(args.training_vit + '.' + str(i)).astype('int')
lobs.append(obs)
lvit.append(vit)
# Testing pandas data frame.
test_obs = np.loadtxt(args.testing_obs, ndmin=2)
test_vit = np.loadtxt(args.testing_vit).astype('int')
test_pd = get_pd(args.horizon, [test_obs], [test_vit], args.ntest_obs)
lls = []
for leave_one_out in range(args.nfiles):
lobs_copy = list(lobs)
lvit_copy = list(lvit)
lobs_copy.pop(leave_one_out)
lvit_copy.pop(leave_one_out)
# Training pandas data frame.
train_pd = get_pd(args.horizon, lobs_copy, lvit_copy)
# Learning Cox's proportional hazards model.
cph = fit_cph(train_pd)
times_to_predict = | np.arange(MAX_DUR) | numpy.arange |
# ~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~
# MIT License
#
# Copyright (c) 2022 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~
import warnings
from functools import lru_cache
from numbers import Number
from typing import List
from typing import Optional
from typing import Tuple
from typing import Union
import numpy as np
import torch
from numpy.core.multiarray import normalize_axis_index
from PIL import Image
# ========================================================================= #
# Type Hints #
# ========================================================================= #
# from torch.testing._internal.common_utils import numpy_to_torch_dtype_dict
_NP_TO_TORCH_DTYPE = {
np.dtype('bool'): torch.bool,
np.dtype('uint8'): torch.uint8,
np.dtype('int8'): torch.int8,
np.dtype('int16'): torch.int16,
np.dtype('int32'): torch.int32,
np.dtype('int64'): torch.int64,
np.dtype('float16'): torch.float16,
np.dtype('float32'): torch.float32,
np.dtype('float64'): torch.float64,
np.dtype('complex64'): torch.complex64,
np.dtype('complex128'): torch.complex128
}
MinMaxHint = Union[Number, Tuple[Number, ...], np.ndarray]
@lru_cache()
def _dtype_min_max(dtype: torch.dtype) -> Tuple[Union[float, int], Union[float, int]]:
"""Get the min and max values for a dtype"""
dinfo = torch.finfo(dtype) if dtype.is_floating_point else torch.iinfo(dtype)
return dinfo.min, dinfo.max
@lru_cache()
def _check_image_dtype(dtype: torch.dtype):
"""Check that a dtype can hold image values"""
# check that the datatype is within the right range -- this is not actually necessary if the below is correct!
dmin, dmax = _dtype_min_max(dtype)
imin, imax = (0, 1) if dtype.is_floating_point else (0, 255)
assert (dmin <= imin) and (imax <= dmax), f'The dtype: {repr(dtype)} with range [{dmin}, {dmax}] cannot store image values in the range [{imin}, {imax}]'
# check the datatype is allowed
if dtype not in _ALLOWED_DTYPES:
raise TypeError(f'The dtype: {repr(dtype)} is not allowed, must be one of: {list(_ALLOWED_DTYPES)}')
# return the min and max values
return imin, imax
# ========================================================================= #
# Image Helper Functions #
# ========================================================================= #
def torch_image_has_valid_range(tensor: torch.Tensor, check_mode: Optional[str] = None) -> bool:
"""
Check that the range of values in the image is correct!
"""
if check_mode not in {'error', 'warn', 'bool', None}:
raise KeyError(f'invalid check_mode: {repr(check_mode)}')
# get the range for the dtype
imin, imax = _check_image_dtype(tensor.dtype)
# get the values
m = tensor.amin().cpu().numpy()
M = tensor.amax().cpu().numpy()
if (m < imin) or (imax < M):
if check_mode == 'error':
raise ValueError(f'images value range: [{m}, {M}] is outside of the required range: [{imin}, {imax}], for dtype: {repr(tensor.dtype)}')
elif check_mode == 'warn':
warnings.warn(f'images value range: [{m}, {M}] is outside of the required range: [{imin}, {imax}], for dtype: {repr(tensor.dtype)}')
return False
return True
@torch.no_grad()
def torch_image_clamp(tensor: torch.Tensor, clamp_mode: str = 'warn') -> torch.Tensor:
"""
Clamp the image based on the dtype
Valid `clamp_mode`s are {'warn', 'error', 'clamp'}
"""
# check range of values
if clamp_mode in ('warn', 'error'):
torch_image_has_valid_range(tensor, check_mode=clamp_mode)
elif clamp_mode != 'clamp':
raise KeyError(f'invalid clamp mode: {repr(clamp_mode)}')
# get the range for the dtype
imin, imax = _check_image_dtype(tensor.dtype)
# clamp!
return torch.clamp(tensor, imin, imax)
@torch.no_grad()
def torch_image_to_dtype(tensor: torch.Tensor, out_dtype: torch.dtype):
"""
Convert an image to the specified dtype
- Scaling is automatically performed based on the input and output dtype
Floats should be in the range [0, 1], integers should be in the range [0, 255]
- if precision will be lost (), then the values are clamped!
"""
_check_image_dtype(tensor.dtype)
_check_image_dtype(out_dtype)
# check scale
torch_image_has_valid_range(tensor, check_mode='error')
# convert
if tensor.dtype.is_floating_point and (not out_dtype.is_floating_point):
# [float -> int] -- cast after scaling
return torch.clamp(tensor * 255, 0, 255).to(out_dtype)
elif (not tensor.dtype.is_floating_point) and out_dtype.is_floating_point:
# [int -> float] -- cast before scaling
return torch.clamp(tensor.to(out_dtype) / 255, 0, 1)
else:
# [int -> int] | [float -> float]
return tensor.to(out_dtype)
@torch.no_grad()
def torch_image_normalize_channels(
tensor: torch.Tensor,
in_min: MinMaxHint,
in_max: MinMaxHint,
channel_dim: int = -1,
out_dtype: Optional[torch.dtype] = None
):
if out_dtype is None:
out_dtype = tensor.dtype
# check dtypes
_check_image_dtype(out_dtype)
assert out_dtype.is_floating_point, f'out_dtype must be a floating point, got: {repr(out_dtype)}'
# get norm values padded to the dimension of the channel
in_min, in_max = _torch_channel_broadcast_scale_values(in_min, in_max, in_dtype=tensor.dtype, dim=channel_dim, ndim=tensor.ndim)
# convert
tensor = tensor.to(out_dtype)
in_min = torch.as_tensor(in_min, dtype=tensor.dtype, device=tensor.device)
in_max = torch.as_tensor(in_max, dtype=tensor.dtype, device=tensor.device)
# warn if the values are the same
if torch.any(in_min == in_max):
m = in_min.cpu().detach().numpy()
M = in_min.cpu().detach().numpy()
warnings.warn(f'minimum: {m} and maximum: {M} values are the same, scaling values to zero.')
# handle equal values
divisor = in_max - in_min
divisor[divisor == 0] = 1
# normalize
return (tensor - in_min) / divisor
# ========================================================================= #
# Argument Helper #
# ========================================================================= #
# float16 doesnt always work, rather convert to float32 first
_ALLOWED_DTYPES = {
torch.float32, torch.float64,
torch.uint8,
torch.int, torch.int16, torch.int32, torch.int64,
torch.long,
}
@lru_cache()
def _torch_to_images_normalise_args(in_tensor_shape: Tuple[int, ...], in_tensor_dtype: torch.dtype, in_dims: str, out_dims: str, in_dtype: Optional[torch.dtype], out_dtype: Optional[torch.dtype]):
# check types
if not isinstance(in_dims, str): raise TypeError(f'in_dims must be of type: {str}, but got: {type(in_dims)}')
if not isinstance(out_dims, str): raise TypeError(f'out_dims must be of type: {str}, but got: {type(out_dims)}')
# normalise dim names
in_dims = in_dims.upper()
out_dims = out_dims.upper()
# check dim values
if sorted(in_dims) != sorted('CHW'): raise KeyError(f'in_dims contains the symbols: {repr(in_dims)}, must contain only permutations of: {repr("CHW")}')
if sorted(out_dims) != sorted('CHW'): raise KeyError(f'out_dims contains the symbols: {repr(out_dims)}, must contain only permutations of: {repr("CHW")}')
# get dimension indices
in_c_dim = in_dims.index('C') - len(in_dims)
out_c_dim = out_dims.index('C') - len(out_dims)
transpose_indices = tuple(in_dims.index(c) - len(in_dims) for c in out_dims)
# check image tensor
if len(in_tensor_shape) < 3:
raise ValueError(f'images must have 3 or more dimensions corresponding to: (..., {", ".join(in_dims)}), but got shape: {in_tensor_shape}')
if in_tensor_shape[in_c_dim] not in (1, 3):
raise ValueError(f'images do not have the correct number of channels for dim "C", required: 1 or 3. Input format is (..., {", ".join(in_dims)}), but got shape: {in_tensor_shape}')
# get default values
if in_dtype is None: in_dtype = in_tensor_dtype
if out_dtype is None: out_dtype = in_dtype
# check dtypes allowed
if in_dtype not in _ALLOWED_DTYPES: raise TypeError(f'in_dtype is not allowed, got: {repr(in_dtype)} must be one of: {list(_ALLOWED_DTYPES)}')
if out_dtype not in _ALLOWED_DTYPES: raise TypeError(f'out_dtype is not allowed, got: {repr(out_dtype)} must be one of: {list(_ALLOWED_DTYPES)}')
# done!
return transpose_indices, in_dtype, out_dtype, out_c_dim
def _torch_channel_broadcast_scale_values(
in_min: MinMaxHint,
in_max: MinMaxHint,
in_dtype: torch.dtype,
dim: int,
ndim: int,
) -> Tuple[List[Number], List[Number]]:
return __torch_channel_broadcast_scale_values(
in_min=tuple(np.array(in_min).reshape(-1).tolist()), # TODO: this is slow?
in_max=tuple(np.array(in_max).reshape(-1).tolist()), # TODO: this is slow?
in_dtype=in_dtype,
dim=dim,
ndim=ndim,
)
@lru_cache()
@torch.no_grad()
def __torch_channel_broadcast_scale_values(
in_min: MinMaxHint,
in_max: MinMaxHint,
in_dtype: torch.dtype,
dim: int,
ndim: int,
) -> Tuple[List[Number], List[Number]]:
# get the default values
in_min: np.ndarray = np.array((0.0 if in_dtype.is_floating_point else 0.0) if (in_min is None) else in_min)
in_max: np.ndarray = np.array((1.0 if in_dtype.is_floating_point else 255.0) if (in_max is None) else in_max)
# add missing axes
if in_min.ndim == 0: in_min = in_min[None]
if in_max.ndim == 0: in_max = in_max[None]
# checks
assert in_min.ndim == 1
assert in_max.ndim == 1
assert np.all(in_min <= in_max), f'min values are not <= the max values: {in_min} !<= {in_max}'
# normalize dim
dim = normalize_axis_index(dim, ndim=ndim)
# pad dim
r_pad = ndim - (dim + 1)
if r_pad > 0:
in_min = in_min[(...,) + ((None,)*r_pad)]
in_max = in_max[(...,) + ((None,)*r_pad)]
# done!
return in_min.tolist(), in_max.tolist()
# ========================================================================= #
# Image Conversion #
# ========================================================================= #
@torch.no_grad()
def torch_to_images(
tensor: torch.Tensor,
in_dims: str = 'CHW', # we always treat numpy by default as HWC, and torch.Tensor as CHW
out_dims: str = 'HWC',
in_dtype: Optional[torch.dtype] = None,
out_dtype: Optional[torch.dtype] = torch.uint8,
clamp_mode: str = 'warn', # clamp, warn, error
always_rgb: bool = False,
in_min: Optional[MinMaxHint] = None,
in_max: Optional[MinMaxHint] = None,
to_numpy: bool = False,
) -> Union[torch.Tensor, np.ndarray]:
"""
Convert a batch of image-like tensors to images.
A batch in this case consists of an arbitrary number of dimensions of a tensor,
with the last 3 dimensions making up the actual images.
Process:
1. check input dtype
2. move axis
3. normalize
4. clamp values
5. auto scale and convert
6. convert to rgb
7. check output dtype
example:
Convert a tensor of non-normalised images (..., C, H, W) to a
tensor of normalised and clipped images (..., H, W, C).
- integer dtypes are expected to be in the range [0, 255]
- float dtypes are expected to be in the range [0, 1]
# TODO: add support for uneven in/out dims, eg. in_dims="HW", out_dims="HWC"
"""
# 0.a. check tensor
if not isinstance(tensor, torch.Tensor):
raise TypeError(f'images must be of type: {torch.Tensor}, got: {type(tensor)}')
# 0.b. get arguments
transpose_indices, in_dtype, out_dtype, out_c_dim = _torch_to_images_normalise_args(
in_tensor_shape=tuple(tensor.shape),
in_tensor_dtype=tensor.dtype,
in_dims=in_dims,
out_dims=out_dims,
in_dtype=in_dtype,
out_dtype=out_dtype,
)
# 1. check input dtype
if in_dtype != tensor.dtype:
raise TypeError(f'images dtype: {repr(tensor.dtype)} does not match in_dtype: {repr(in_dtype)}')
# 2. move axes
tensor = tensor.permute(*(i-tensor.ndim for i in range(tensor.ndim-3)), *transpose_indices)
# 3. normalise
if (in_min is not None) or (in_max is not None):
norm_dtype = (out_dtype if out_dtype.is_floating_point else torch.float32)
tensor = torch_image_normalize_channels(tensor, in_min=in_min, in_max=in_max, channel_dim=out_c_dim, out_dtype=norm_dtype)
# 4. clamp
tensor = torch_image_clamp(tensor, clamp_mode=clamp_mode)
# 5. auto scale and convert
tensor = torch_image_to_dtype(tensor, out_dtype=out_dtype)
# 6. convert to rgb
if always_rgb:
if tensor.shape[out_c_dim] == 1:
tensor = torch.repeat_interleave(tensor, 3, dim=out_c_dim) # torch.repeat is like np.tile, torch.repeat_interleave is like np.repeat
# 7. check output dtype
if out_dtype != tensor.dtype:
raise RuntimeError(f'[THIS IS A BUG!]: After conversion, images tensor dtype: {repr(tensor.dtype)} does not match out_dtype: {repr(in_dtype)}')
if torch.any(torch.isnan(tensor)):
raise RuntimeError('[THIS IS A BUG!]: After conversion, images contain NaN values!')
# convert to numpy
if to_numpy:
return tensor.detach().cpu().numpy()
return tensor
def numpy_to_images(
ndarray: np.ndarray,
in_dims: str = 'HWC', # we always treat numpy by default as HWC, and torch.Tensor as CHW
out_dims: str = 'HWC',
in_dtype: Optional[Union[str, np.dtype]] = None,
out_dtype: Optional[Union[str, np.dtype]] = np.dtype('uint8'),
clamp_mode: str = 'warn', # clamp, warn, error
always_rgb: bool = False,
in_min: Optional[MinMaxHint] = None,
in_max: Optional[MinMaxHint] = None,
) -> np.ndarray:
"""
Convert a batch of image-like arrays to images.
A batch in this case consists of an arbitrary number of dimensions of an array,
with the last 3 dimensions making up the actual images.
- See the docs for: torch_to_images(...)
"""
# convert numpy dtypes to torch
if in_dtype is not None: in_dtype = _NP_TO_TORCH_DTYPE[np.dtype(in_dtype)]
if out_dtype is not None: out_dtype = _NP_TO_TORCH_DTYPE[np.dtype(out_dtype)]
# convert back
array = torch_to_images(
tensor=torch.from_numpy(ndarray),
in_dims=in_dims,
out_dims=out_dims,
in_dtype=in_dtype,
out_dtype=out_dtype,
clamp_mode=clamp_mode,
always_rgb=always_rgb,
in_min=in_min,
in_max=in_max,
to_numpy=True,
)
# done!
return array
def numpy_to_pil_images(
ndarray: np.ndarray,
in_dims: str = 'HWC', # we always treat numpy by default as HWC, and torch.Tensor as CHW
clamp_mode: str = 'warn',
always_rgb: bool = False,
in_min: Optional[MinMaxHint] = None,
in_max: Optional[MinMaxHint] = None,
) -> Union[np.ndarray]:
"""
Convert a numpy array containing images (..., C, H, W) to an array of PIL images (...,)
"""
imgs = numpy_to_images(
ndarray=ndarray,
in_dims=in_dims,
out_dims='HWC',
in_dtype=None,
out_dtype='uint8',
clamp_mode=clamp_mode,
always_rgb=always_rgb,
in_min=in_min,
in_max=in_max,
)
# all the cases (even ndim == 3)... bravo numpy, bravo!
images = [Image.fromarray(imgs[idx]) for idx in np.ndindex(imgs.shape[:-3])]
images = | np.array(images, dtype=object) | numpy.array |
"""
Example distributions.
"""
from __future__ import absolute_import, division, print_function
import collections
from typing import Callable, Optional
from scipy.stats import multivariate_normal, ortho_group
import tensorflow_probability as tfp
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from config import TF_FLOATS
tfd = tfp.distributions
TF_FLOAT = TF_FLOATS[tf.keras.backend.floatx()]
# NP_FLOAT = [tf.keras.backend.floatx()]
# pylint:disable=invalid-name
# pylint:disable=unused-argument
def w1(z):
"""Transformation."""
return tf.math.sin(2. * np.pi * z[0] / 4.)
def w2(z):
"""Transformation."""
return 3. * tf.exp(-0.5 * (((z[0] - 1.) / 0.6)) ** 2)
def w3(z):
"""Transformation."""
return 3. * (1 + tf.exp(-(z[0] - 1.) / 0.3)) ** (-1)
def plot_samples2D(
samples: np.ndarray,
title: str = None,
fig: Optional[plt.Figure] = None,
ax: Optional[plt.Axes] = None,
**kwargs,
):
"""Plot collection of 2D samples.
NOTE: **kwargs are passed to `ax.plot(...)`.
"""
if fig is None and ax is None:
fig, ax = plt.subplots()
_ = ax.plot(samples[:, 0], samples[:, 1], **kwargs)
if title is not None:
_ = ax.set_title(title, fontsize='x-large')
return fig, ax
def meshgrid(x, y=None):
"""Create a mesgrid of dtype 'float32'."""
if y is None:
y = x
[gx, gy] = np.meshgrid(x, y, indexing='ij')
gx, gy = np.float32(gx), np.float32(gy)
grid = np.concatenate([gx.ravel()[None, :], gy.ravel()[None, :]], axis=0)
return grid.T.reshape(x.size, y.size, 2)
# pylint:disable=too-many-arguments
def contour_potential(
potential_fn: Callable,
ax: Optional[plt.Axes] = None,
title: Optional[str] = None,
xlim: Optional[float] = 5.,
ylim: Optional[float] = 5.,
cmap: Optional[str] = 'inferno',
dtype: Optional[str] = 'float32'
):
"""Plot contours of `potential_fn`."""
if isinstance(xlim, (tuple, list)):
x0, x1 = xlim
else:
x0 = -xlim
x1 = xlim
if isinstance(ylim, (tuple, list)):
y0, y1 = ylim
else:
y0 = -ylim
y1 = ylim
grid = np.mgrid[x0:x1:100j, y0:y1:100j]
# grid_2d = meshgrid(np.arange(x0, x1, 0.05), np.arange(y0, y1, 0.05))
grid_2d = grid.reshape(2, -1).T
cmap = plt.get_cmap(cmap)
if ax is None:
_, ax = plt.subplots()
try:
pdf1e = np.exp(-potential_fn(grid_2d))
except Exception as e:
pdf1e = np.exp(-potential_fn(tf.cast(grid_2d, dtype)))
z = pdf1e.reshape(100, 100)
_ = ax.contourf(grid[0], grid[1], z, cmap=cmap, levels=8)
if title is not None:
ax.set_title(title, fontsize='x-large')
plt.tight_layout()
return ax
def two_moons_potential(z):
"""two-moons like potential."""
z = tf.transpose(z)
term1 = 0.5 * ((tf.linalg.norm(z, axis=0) - 2.) / 0.4) ** 2
logterm1 = tf.exp(-0.5 * ((z[0] - 2.) / 0.6) ** 2)
logterm2 = tf.exp(-0.5 * ((z[0] + 2.) / 0.6) ** 2)
output = term1 - tf.math.log(logterm1 + logterm2)
return output
def sin_potential(z):
"""Sin-like potential."""
z = tf.transpose(z)
x = z[0]
y = z[1]
# x, y = z
return 0.5 * ((y - w1(z)) / 0.4) ** 2 + 0.1 * tf.math.abs(x)
def sin_potential1(z):
"""Modified sin potential."""
z = tf.transpose(z)
logterm1 = tf.math.exp(-0.5 * ((z[1] - w1(z)) / 0.35) ** 2)
logterm2 = tf.math.exp(-0.5 * ((z[1] - w1(z) + w2(z)) / 0.35) ** 2)
term3 = 0.1 * tf.math.abs(z[0])
output = -1. * tf.math.log(logterm1 + logterm2) + term3
return output
def sin_potential2(z):
"""Modified sin potential."""
z = tf.transpose(z)
logterm1 = tf.math.exp(-0.5 * ((z[1] - w1(z)) / 0.4) ** 2)
logterm2 = tf.math.exp(-0.5 * ((z[1] - w1(z) + w3(z)) / 0.35) ** 2)
term3 = 0.1 * tf.math.abs(z[0])
output = -1. * tf.math.log(logterm1 + logterm2) + term3
return output
def quadratic_gaussian(x, mu, S):
"""Simple quadratic Gaussian (normal) distribution."""
x = tf.cast(x, dtype=TF_FLOAT)
return tf.linalg.diag_part(0.5 * ((x - mu) @ S) @ tf.transpose((x - mu)))
def random_tilted_gaussian(dim, log_min=-2., log_max=2.):
"""Implements a randomly tilted Gaussian (Normal) distribution."""
mu = np.zeros((dim,))
R = ortho_group.rvs(dim)
sigma = np.diag(
np.exp(np.log(10.) * np.random.uniform(log_min, log_max, size=(dim,)))
)
S = R.T.dot(sigma).dot(R)
return Gaussian(mu, S)
def gen_ring(r=1., var=1., nb_mixtures=2):
"""Generate a ring of Gaussian distributions."""
base_points = []
for t in range(nb_mixtures):
c = np.cos(2 * np.pi * t / nb_mixtures)
s = np.sin(2 * np.pi * t / nb_mixtures)
base_points.append(np.array([r * c, r * s]))
# v = np.array(base_points)
sigmas = [var * np.eye(2) for t in range(nb_mixtures)]
pis = [1. / nb_mixtures] * nb_mixtures
pis[0] += 1. - sum(pis)
return GMM(base_points, sigmas, pis)
def distribution_arr(x_dim, num_distributions):
"""Create array describing likelihood of drawing from distributions."""
if num_distributions > x_dim:
pis = [1. / num_distributions] * num_distributions
pis[0] += 1 - sum(pis)
if x_dim == num_distributions:
big_pi = round(1.0 / num_distributions, x_dim)
pis = num_distributions * [big_pi]
else:
big_pi = (1.0 / num_distributions) - x_dim * 1e-16
pis = num_distributions * [big_pi]
small_pi = (1. - sum(pis)) / (x_dim - num_distributions)
pis.extend((x_dim - num_distributions) * [small_pi])
# return np.array(pis)#, dtype=NP_FLOAT)
return np.array(pis)
def ring_of_gaussians(num_modes, sigma, r=1.):
"""Create ring of Gaussians for GaussianMixtureModel.
Args:
num_modes (int): Number of modes for distribution.
sigma (float): Standard deviation of each mode.
x_dim (int): Spatial dimensionality in which the distribution exists.
radius (float): Radius from the origin along which the modes are
located.
Returns:
distribution (GMM object): Gaussian mixture distribution.
mus (np.ndarray): Array of the means of the distribution.
covs (np.array): Covariance matrices.
distances (np.ndarray): Array of the differences between different
modes.
"""
distribution = gen_ring(r=r, var=sigma, nb_mixtures=num_modes)
mus = np.array(distribution.mus)
covs = np.array(distribution.sigmas)
diffs = mus[1:] - mus[:-1, :]
distances = [np.sqrt(np.dot(d, d.T)) for d in diffs]
return distribution, mus, covs, distances
def lattice_of_gaussians(num_modes, sigma, x_dim=2, size=None):
"""Create lattice of Gaussians for GaussianMixtureModel.
Args:
num_modes (int): Number of modes for distribution.
sigma (float): Standard deviation of each mode.
x_dim (int): Spatial dimensionality in which the distribution exists.
size (int): Spatial extent of lattice.
Returns:
distribution (GMM object): Gaussian mixture distribution.
covs (np.array): Covariance matrices.
mus (np.ndarray): Array of the means of the distribution.
pis (np.ndarray): Array of relative probabilities for each mode. Must
sum to 1.
"""
if size is None:
size = int(np.sqrt(num_modes))
mus = np.array([(i, j) for i in range(size) for j in range(size)])
covs = np.array([sigma * np.eye(x_dim) for _ in range(num_modes)])
pis = [1. / num_modes] * num_modes
pis[0] += 1. - sum(pis)
distribution = GMM(mus, covs, pis)
return distribution, mus, covs, pis
class Gaussian:
"""Implements a standard Gaussian distribution."""
def __init__(self, mu, sigma):
self.mu = mu
self.sigma = sigma
self.i_sigma = np.linalg.inv(np.copy(sigma))
def get_energy_function(self):
"""Return the potential energy function of the Gaussian."""
def fn(x, *args, **kwargs):
S = tf.constant(tf.cast(self.i_sigma, TF_FLOAT))
mu = tf.constant(tf.cast(self.mu, TF_FLOAT))
return quadratic_gaussian(x, mu, S)
return fn
def get_samples(self, n):
"""Get `n` samples from the distribution."""
C = np.linalg.cholesky(self.sigma)
x = np.random.randn(n, self.sigma.shape[0])
return x.dot(C.T)
def log_density(self, x):
"""Return the log_density of the distribution."""
return multivariate_normal(mean=self.mu, cov=self.sigma).logpdf(x)
class TiltedGaussian(Gaussian):
"""Implements a tilted Gaussian."""
def __init__(self, dim, log_min, log_max):
self.R = ortho_group.rvs(dim)
rand_unif = np.random.uniform(log_min, log_max, size=(dim,))
self.diag = np.diag(np.exp(np.log(10.) * rand_unif))
S = self.R.T.dot(self.diag).dot(self.R)
self.dim = dim
Gaussian.__init__(self, np.zeros((dim,)), S)
def get_samples(self, n):
"""Get `n` samples from the distribution."""
x = np.random.randn(200, self.dim)
x = x.dot(np.sqrt(self.diag))
x = x.dot(self.R)
return x
class RoughWell:
"""Implements a rough well distribution."""
def __init__(self, dim, eps, easy=False):
self.dim = dim
self.eps = eps
self.easy = easy
def get_energy_function(self):
"""Return the potential energy function of the distribution."""
def fn(x, *args, **kwargs):
n = tf.reduce_sum(tf.square(x), 1)
eps2 = self.eps * self.eps
if not self.easy:
out = (0.5 * n
+ self.eps * tf.reduce_sum(tf.math.cos(x/eps2), 1))
else:
out = (0.5 * n
+ self.eps * tf.reduce_sum(tf.cos(x / self.eps), 1))
return out
return fn
def get_samples(self, n):
"""Get `n` samples from the distribution."""
# we can approximate by a gaussian for eps small enough
return np.random.randn(n, self.dim)
class GaussianFunnel:
"""Gaussian funnel distribution."""
def __init__(self, dim=2, clip=6., sigma=2.):
self.dim = dim
self.sigma = sigma
self.clip = 4 * self.sigma
def get_energy_function(self):
"""Returns the (potential) energy function of the distribution."""
def fn(x):
v = x[:, 0]
log_p_v = tf.square(v / self.sigma)
s = tf.exp(v)
sum_sq = tf.reduce_sum(tf.square(x[:, 1:]), axis=1)
n = tf.cast(tf.shape(x)[1] - 1, TF_FLOAT)
E = 0.5 * (log_p_v + sum_sq / s + n * tf.math.log(2.0 * np.pi * s))
s_min = tf.exp(-self.clip)
s_max = tf.exp(self.clip)
E_safe1 = 0.5 * (
log_p_v + sum_sq / s_max + n * tf.math.log(2. * np.pi * s_max)
)
E_safe2 = 0.5 * (
log_p_v + sum_sq / s_min + n * tf.math.log(2.0 * np.pi * s_min)
)
# E_safe = tf.minimum(E_safe1, E_safe2)
E_ = tf.where(tf.greater(v, self.clip), E_safe1, E)
E_ = tf.where(tf.greater(-self.clip, v), E_safe2, E_)
return E_
return fn
def get_samples(self, n):
"""Get `n` samples from the distribution."""
samples = np.zeros((n, self.dim))
for t in range(n):
v = self.sigma * np.random.randn()
s = np.exp(v / 2)
samples[t, 0] = v
samples[t, 1:] = s * np.random.randn(self.dim-1)
return samples
def log_density(self, x):
"""Return the log density of the distribution."""
v = x[:, 0]
log_p_v = np.square(v / self.sigma)
s = np.exp(v)
sum_sq = np.square(x[:, 1:]).sum(axis=1)
n = tf.shape(x)[1] - 1
return 0.5 * (
log_p_v + sum_sq / s + (n / 2) * tf.math.log(2 * np.pi * s)
)
class GMM:
"""Implements a Gaussian Mixutre Model distribution."""
def __init__(self, mus, sigmas, pis):
assert len(mus) == len(sigmas)
assert sum(pis) == 1.0
self.mus = mus
self.sigmas = sigmas
self.pis = pis
self.nb_mixtures = len(pis)
self.k = mus[0].shape[0]
self.i_sigmas = []
self.constants = []
for i, sigma in enumerate(sigmas):
self.i_sigmas.append(tf.cast(np.linalg.inv(sigma), TF_FLOAT))
det = np.sqrt((2 * np.pi) ** self.k * np.linalg.det(sigma))
det = tf.cast(det, TF_FLOAT)
self.constants.append(tf.cast(pis[i] / det, dtype=TF_FLOAT))
def get_energy_function(self):
"""Get the energy function of the distribution."""
def fn(x):
V = tf.concat([
tf.expand_dims(-quadratic_gaussian(x, self.mus[i],
self.i_sigmas[i])
+ tf.math.log(self.constants[i]), axis=1)
for i in range(self.nb_mixtures)
], axis=1)
return -1.0 * tf.math.reduce_logsumexp(V, axis=1)
return fn
def get_samples(self, n):
"""Get `n` samples from the distribution."""
categorical = np.random.choice(self.nb_mixtures, size=(n,), p=self.pis)
counter_samples = collections.Counter(categorical)
samples = []
for k, v in counter_samples.items():
samples.append(
np.random.multivariate_normal(
self.mus[k], self.sigmas[k], size=(v,)
)
)
samples = np.concatenate(samples, axis=0)
np.random.shuffle(samples)
return samples
def log_density(self, x):
"""Returns the log density of the distribution."""
exp_arr = [
self.pis[i] * multivariate_normal(
mean=self.mus[i], cov=self.sigmas[i]
).pdf(x) for i in range(self.nb_mixtures)
]
return np.log(sum(exp_arr))
class GaussianMixtureModel:
"""Gaussian mixture model, using tensorflow-probability."""
def __init__(self, mus, sigmas, pis):
self.mus = tf.convert_to_tensor(mus, dtype=tf.float32)
self.sigmas = tf.convert_to_tensor(sigmas, dtype=tf.float32)
self.pis = tf.convert_to_tensor(pis, dtype=tf.float32)
self.dist = tfd.Mixture(
cat=tfd.Categorical(probs=self.pis),
components=[
tfd.MultivariateNormalDiag(loc=m, scale_diag=s)
for m, s in zip(self.mus, self.sigmas)
]
)
def get_energy_function(self):
"""Get the energy function (log probability) of the distribution."""
def f(x):
return -1 * self.dist.log_prob(x)
return f
def plot_contours(self, num_pts=500):
"""Plot contours of the target distribution."""
grid = meshgrid(np.linspace(np.min(self.mus) - 1,
| np.max(self.mus) | numpy.max |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:percent
# notebook_metadata_filter: all
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.6.0
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %% [markdown]
# # Slicing NDDatasets
#
# This tutorial shows how to handle NDDatasets using python slicing. As prerequisite, the user is
# expected to have read the [Import Tutorials](../importexport/import.html).
# %%
import numpy as np
import spectrochempy as scp
# %% [markdown]
# ## What is the slicing ?
#
# The slicing of a list or an array means taking elements from a given index (or set of indexes) to another index (or set of indexes). Slicing is specified using the colon operator `:` with a `from` and `to` index before and after the first column, and a `step` after the second column. Hence a slice of the object `X` will be set as:
#
# `X[from:to:step]`
#
# and will extend from the ‘from’ index, ends one item before the ‘to’ index and with an increment of `step`between each index. When not given the default values are respectively 0 (i.e. starts at the 1st index), length in the dimension (stops at the last index), and 1.
#
# Let's first illustrate the concept on a 1D example:
# %%
X = np.arange(10) # generates a 1D array of 10 elements from 0 to 9
print(X)
print(X[2:5]) # selects all elements from 2 to 4
print(X[::2]) # selects one out of two elements
print(X[:-3]) # a negative index will be counted from the end of the array
print(X[::-2]) # a negative step will slice backward, starting from 'to', ending at 'from'
# %% [markdown]
# The same applies to multidimensional arrays by indicating slices separated by commas:
# %%
X = | np.random.rand(10, 10) | numpy.random.rand |
import paddle
import numpy as np
import paddle.nn.functional as F
from ppgan.models.generators.generator_styleganv2ada import conv2d_resample, conv2d_resample_grad
class Model(paddle.nn.Layer):
def __init__(self, w_shape):
super().__init__()
self.weight = self.create_parameter(w_shape, default_initializer=paddle.nn.initializer.Normal())
def forward(self, x, f, up, down, padding, groups, flip_weight, flip_filter):
y, x_1 = conv2d_resample(x, self.weight, filter=f, up=up, down=down, padding=padding, groups=groups, flip_weight=flip_weight, flip_filter=flip_filter)
return y, x_1
lr = 0.0001
dic2 = np.load('06_grad.npz')
for batch_idx in range(8):
print('======================== batch_%.3d ========================'%batch_idx)
# x_shape = [1, 512, 4, 4]
# w_shape = [512, 512, 3, 3]
# f_shape = [4, 4]
# up = 1
# down = 1
# padding = 1
# groups = 1
# flip_weight = True
# flip_filter = False
# x_shape = [1, 512, 4, 4]
# w_shape = [3, 512, 1, 1]
# f_shape = None
# up = 1
# down = 1
# padding = 0
# groups = 1
# flip_weight = True
# flip_filter = False
# x_shape = [1, 512, 4, 4]
# w_shape = [512, 512, 3, 3]
# f_shape = [4, 4]
# up = 2
# down = 1
# padding = 1
# groups = 1
# flip_weight = False
# flip_filter = False
# x_shape = [1, 512, 8, 8]
# w_shape = [512, 512, 3, 3]
# f_shape = [4, 4]
# up = 1
# down = 1
# padding = 1
# groups = 1
# flip_weight = True
# flip_filter = False
# x_shape = [1, 512, 8, 8]
# w_shape = [3, 512, 1, 1]
# f_shape = None
# up = 1
# down = 1
# padding = 0
# groups = 1
# flip_weight = True
# flip_filter = False
# x_shape = [1, 512, 8, 8]
# w_shape = [512, 512, 3, 3]
# f_shape = [4, 4]
# up = 2
# down = 1
# padding = 1
# groups = 1
# flip_weight = False
# flip_filter = False
# x_shape = [1, 512, 16, 16]
# w_shape = [512, 512, 3, 3]
# f_shape = [4, 4]
# up = 1
# down = 1
# padding = 1
# groups = 1
# flip_weight = True
# flip_filter = False
# x_shape = [1, 512, 16, 16]
# w_shape = [3, 512, 1, 1]
# f_shape = None
# up = 1
# down = 1
# padding = 0
# groups = 1
# flip_weight = True
# flip_filter = False
# x_shape = [1, 512, 16, 16]
# w_shape = [512, 512, 3, 3]
# f_shape = [4, 4]
# up = 2
# down = 1
# padding = 1
# groups = 1
# flip_weight = False
# flip_filter = False
# x_shape = [1, 512, 32, 32]
# w_shape = [512, 512, 3, 3]
# f_shape = [4, 4]
# up = 1
# down = 1
# padding = 1
# groups = 1
# flip_weight = True
# flip_filter = False
# x_shape = [1, 512, 32, 32]
# w_shape = [3, 512, 1, 1]
# f_shape = None
# up = 1
# down = 1
# padding = 0
# groups = 1
# flip_weight = True
# flip_filter = False
# x_shape = [1, 512, 32, 32]
# w_shape = [512, 512, 3, 3]
# f_shape = [4, 4]
# up = 2
# down = 1
# padding = 1
# groups = 1
# flip_weight = False
# flip_filter = False
# x_shape = [1, 512, 64, 64]
# w_shape = [512, 512, 3, 3]
# f_shape = [4, 4]
# up = 1
# down = 1
# padding = 1
# groups = 1
# flip_weight = True
# flip_filter = False
# x_shape = [1, 512, 64, 64]
# w_shape = [3, 512, 1, 1]
# f_shape = None
# up = 1
# down = 1
# padding = 0
# groups = 1
# flip_weight = True
# flip_filter = False
# x_shape = [1, 512, 64, 64]
# w_shape = [256, 512, 3, 3]
# f_shape = [4, 4]
# up = 2
# down = 1
# padding = 1
# groups = 1
# flip_weight = False
# flip_filter = False
# x_shape = [1, 256, 128, 128]
# w_shape = [256, 256, 3, 3]
# f_shape = [4, 4]
# up = 1
# down = 1
# padding = 1
# groups = 1
# flip_weight = True
# flip_filter = False
# x_shape = [1, 256, 128, 128]
# w_shape = [3, 256, 1, 1]
# f_shape = None
# up = 1
# down = 1
# padding = 0
# groups = 1
# flip_weight = True
# flip_filter = False
# x_shape = [1, 256, 128, 128]
# w_shape = [128, 256, 3, 3]
# f_shape = [4, 4]
# up = 2
# down = 1
# padding = 1
# groups = 1
# flip_weight = False
# flip_filter = False
x_shape = [1, 128, 256, 256]
w_shape = [128, 128, 3, 3]
f_shape = [4, 4]
up = 1
down = 1
padding = 1
groups = 1
flip_weight = True
flip_filter = False
# x_shape = [1, 128, 256, 256]
# w_shape = [3, 128, 1, 1]
# f_shape = None
# up = 1
# down = 1
# padding = 0
# groups = 1
# flip_weight = True
# flip_filter = False
# x_shape = [1, 128, 256, 256]
# w_shape = [64, 128, 3, 3]
# f_shape = [4, 4]
# up = 2
# down = 1
# padding = 1
# groups = 1
# flip_weight = False
# flip_filter = False
# x_shape = [1, 64, 512, 512]
# w_shape = [64, 64, 3, 3]
# f_shape = [4, 4]
# up = 1
# down = 1
# padding = 1
# groups = 1
# flip_weight = True
# flip_filter = False
# x_shape = [1, 64, 512, 512]
# w_shape = [3, 64, 1, 1]
# f_shape = None
# up = 1
# down = 1
# padding = 0
# groups = 1
# flip_weight = True
# flip_filter = False
dy_dx_pytorch = dic2['batch_%.3d.dy_dx'%batch_idx]
dy_dw_pytorch = dic2['batch_%.3d.dy_dw'%batch_idx]
y_pytorch = dic2['batch_%.3d.y'%batch_idx]
x = dic2['batch_%.3d.x'%batch_idx]
x = paddle.to_tensor(x)
x.stop_gradient = False
if 'batch_%.3d.f'%batch_idx in dic2.keys():
f = paddle.to_tensor(dic2['batch_%.3d.f'%batch_idx])
else:
f = None
if batch_idx == 0:
model = Model(w_shape)
model.train()
optimizer = paddle.optimizer.Momentum(parameters=model.parameters(), learning_rate=lr, momentum=0.9)
model.set_state_dict(paddle.load("model.pdparams"))
y, x_1 = model(x, f=f, up=up, down=down, padding=padding, groups=groups, flip_weight=flip_weight, flip_filter=flip_filter)
# dy_dx = paddle.grad(outputs=[y.sum()], inputs=[x], create_graph=True)[0]
# dy_dw = paddle.grad(outputs=[y.sum()], inputs=[model.weight], create_graph=True)[0]
dysum_dy = paddle.ones(y.shape, dtype=paddle.float32)
dy_dx, dy_dw = conv2d_resample_grad(dysum_dy, x_1, x, model.weight, filter=f, up=up, down=down, padding=padding, groups=groups, flip_weight=flip_weight, flip_filter=flip_filter)
y_paddle = y.numpy()
ddd = np.sum((y_pytorch - y_paddle) ** 2)
print('ddd=%.6f' % ddd)
dy_dx_paddle = dy_dx.numpy()
ddd = np.sum((dy_dx_pytorch - dy_dx_paddle) ** 2)
print('ddd=%.6f' % ddd)
dy_dw_paddle = dy_dw.numpy()
ddd = | np.sum((dy_dw_pytorch - dy_dw_paddle) ** 2) | numpy.sum |
#################################################################################
# The Institute for the Design of Advanced Energy Systems Integrated Platform
# Framework (IDAES IP) was produced under the DOE Institute for the
# Design of Advanced Energy Systems (IDAES), and is copyright (c) 2018-2021
# by the software owners: The Regents of the University of California, through
# Lawrence Berkeley National Laboratory, National Technology & Engineering
# Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia University
# Research Corporation, et al. All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and
# license information.
#################################################################################
import sys, os
from unittest.mock import patch
sys.path.append(os.path.abspath("..")) # current folder is ~/tests
from idaes.core.surrogate.pysmo.polynomial_regression import (
PolynomialRegression,
FeatureScaling,
)
import numpy as np
import pandas as pd
import pytest
class TestFeatureScaling:
test_data_1d = [[x] for x in range(10)]
test_data_2d = [[x, (x + 1) ** 2] for x in range(10)]
test_data_3d = [[x, x + 10, (x + 1) ** 2 + x + 10] for x in range(10)]
test_data_3d_constant = [[x, 10, (x + 1) ** 2 + 10] for x in range(10)]
@pytest.mark.unit
@pytest.mark.parametrize("array_type", [np.array, pd.DataFrame])
def test_data_scaling_01(self, array_type):
input_array = array_type(self.test_data_1d)
output_1, output_2, output_3 = FeatureScaling.data_scaling(input_array)
expected_output_3 = np.array([[9]])
expected_output_2 = np.array([[0]])
expected_output_1 = (input_array - expected_output_2) / (
expected_output_3 - expected_output_2
)
np.testing.assert_array_equal(output_3, expected_output_3)
np.testing.assert_array_equal(output_2, expected_output_2)
np.testing.assert_array_equal(
output_1, np.array(expected_output_1).reshape(10, 1)
)
@pytest.mark.unit
@pytest.mark.parametrize("array_type", [np.array, pd.DataFrame])
def test_data_scaling_02(self, array_type):
input_array = array_type(self.test_data_2d)
output_1, output_2, output_3 = FeatureScaling.data_scaling(input_array)
expected_output_3 = np.array([[9, 100]])
expected_output_2 = np.array([[0, 1]])
expected_output_1 = (input_array - expected_output_2) / (
expected_output_3 - expected_output_2
)
np.testing.assert_array_equal(output_3, expected_output_3)
np.testing.assert_array_equal(output_2, expected_output_2)
np.testing.assert_array_equal(output_1, expected_output_1)
@pytest.mark.unit
@pytest.mark.parametrize("array_type", [np.array, pd.DataFrame])
def test_data_scaling_03(self, array_type):
input_array = array_type(self.test_data_3d)
output_1, output_2, output_3 = FeatureScaling.data_scaling(input_array)
expected_output_3 = np.array([[9, 19, 119]])
expected_output_2 = np.array([[0, 10, 11]])
expected_output_1 = (input_array - expected_output_2) / (
expected_output_3 - expected_output_2
)
np.testing.assert_array_equal(output_3, expected_output_3)
np.testing.assert_array_equal(output_2, expected_output_2)
np.testing.assert_array_equal(output_1, expected_output_1)
@pytest.mark.unit
@pytest.mark.parametrize("array_type", [np.array, pd.DataFrame])
def test_data_scaling_04(self, array_type):
input_array = array_type(self.test_data_3d_constant)
output_1, output_2, output_3 = FeatureScaling.data_scaling(input_array)
expected_output_3 = np.array([[9, 10, 110]])
expected_output_2 = np.array([[0, 10, 11]])
scale = expected_output_3 - expected_output_2
scale[scale == 0.0] = 1.0
expected_output_1 = (input_array - expected_output_2) / scale
np.testing.assert_array_equal(output_3, expected_output_3)
np.testing.assert_array_equal(output_2, expected_output_2)
np.testing.assert_array_equal(output_1, expected_output_1)
@pytest.mark.unit
@pytest.mark.parametrize("array_type", [list])
def test_data_scaling_05(self, array_type):
input_array = array_type(self.test_data_2d)
with pytest.raises(TypeError):
FeatureScaling.data_scaling(input_array)
@pytest.mark.unit
@pytest.mark.parametrize("array_type", [np.array])
def test_data_unscaling_01(self, array_type):
input_array = array_type(self.test_data_1d)
output_1, output_2, output_3 = FeatureScaling.data_scaling(input_array)
output_1 = output_1.reshape(
output_1.shape[0],
)
un_output_1 = FeatureScaling.data_unscaling(output_1, output_2, output_3)
np.testing.assert_array_equal(un_output_1, input_array.reshape(10, 1))
@pytest.mark.unit
@pytest.mark.parametrize("array_type", [np.array])
def test_data_unscaling_02(self, array_type):
input_array = array_type(self.test_data_2d)
output_1, output_2, output_3 = FeatureScaling.data_scaling(input_array)
un_output_1 = FeatureScaling.data_unscaling(output_1, output_2, output_3)
np.testing.assert_array_equal(un_output_1, input_array)
@pytest.mark.unit
@pytest.mark.parametrize("array_type", [np.array])
def test_data_unscaling_03(self, array_type):
input_array = array_type(self.test_data_3d)
output_1, output_2, output_3 = FeatureScaling.data_scaling(input_array)
un_output_1 = FeatureScaling.data_unscaling(output_1, output_2, output_3)
np.testing.assert_array_equal(un_output_1, input_array)
@pytest.mark.unit
@pytest.mark.parametrize("array_type", [np.array])
def test_data_unscaling_04(self, array_type):
input_array = array_type(self.test_data_3d_constant)
output_1, output_2, output_3 = FeatureScaling.data_scaling(input_array)
un_output_1 = FeatureScaling.data_unscaling(output_1, output_2, output_3)
np.testing.assert_array_equal(un_output_1, input_array)
@pytest.mark.unit
@pytest.mark.parametrize("array_type", [np.array])
def test_data_unscaling_05(self, array_type):
input_array = array_type(self.test_data_2d)
output_1, output_2, output_3 = FeatureScaling.data_scaling(input_array)
min_array = np.array([[1]])
max_array = np.array([[5]])
with pytest.raises(IndexError):
FeatureScaling.data_unscaling(output_1, min_array, max_array)
@pytest.mark.unit
@pytest.mark.parametrize("array_type", [np.array])
def test_data_unscaling_06(self, array_type):
input_array = array_type(self.test_data_2d)
output_1, output_2, output_3 = FeatureScaling.data_scaling(input_array)
min_array = np.array([[1, 2, 3]])
max_array = np.array([[5, 6, 7]])
with pytest.raises(IndexError):
FeatureScaling.data_unscaling(output_1, min_array, max_array)
class TestPolynomialRegression:
y = np.array(
[
[i, j, ((i + 1) ** 2) + ((j + 1) ** 2)]
for i in np.linspace(0, 10, 21)
for j in np.linspace(0, 10, 21)
]
)
full_data = {"x1": y[:, 0], "x2": y[:, 1], "y": y[:, 2]}
training_data = [
[i, j, ((i + 1) ** 2) + ((j + 1) ** 2)]
for i in np.linspace(0, 10, 5)
for j in np.linspace(0, 10, 5)
]
test_data = [[i, (i + 1) ** 2] for i in range(10)]
test_data_large = [[i, (i + 1) ** 2] for i in range(200)]
test_data_1d = [[(i + 1) ** 2] for i in range(10)]
test_data_3d = [[i, (i + 1) ** 2, (i + 2) ** 2] for i in range(10)]
sample_points = [[i, (i + 1) ** 2] for i in range(8)]
sample_points_large = [[i, (i + 1) ** 2] for i in range(100)]
sample_points_1d = [[(i + 1) ** 2] for i in range(8)]
sample_points_3d = [[i, (i + 1) ** 2, (i + 2) ** 2] for i in range(8)]
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__01(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
PolyClass = PolynomialRegression(
original_data_input, regression_data_input, maximum_polynomial_order=5
)
assert PolyClass.max_polynomial_order == 5
assert (
PolyClass.number_of_crossvalidations == 3
) # Default number of cross-validations
assert PolyClass.no_adaptive_samples == 4 # Default number of adaptive samples
assert PolyClass.fraction_training == 0.75 # Default training split
assert (
PolyClass.max_fraction_training_samples == 0.5
) # Default fraction for the maximum number of training samples
assert PolyClass.max_iter == 10 # Default maximum number of iterations
assert PolyClass.solution_method == "pyomo" # Default solution_method
assert PolyClass.multinomials == 1 # Default multinomials
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
@pytest.mark.unit
def test__init__02(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
PolyClass = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=3,
number_of_crossvalidations=5,
no_adaptive_samples=6,
training_split=0.5,
max_fraction_training_samples=0.4,
max_iter=20,
solution_method="MLe",
multinomials=0,
)
assert PolyClass.max_polynomial_order == 3
assert (
PolyClass.number_of_crossvalidations == 5
) # Default number of cross-validations
assert PolyClass.no_adaptive_samples == 6 # Default number of adaptive samples
assert PolyClass.fraction_training == 0.5 # Default training split
assert (
PolyClass.max_fraction_training_samples == 0.4
) # Default fraction for the maximum number of training samples
assert PolyClass.max_iter == 20 # Default maximum number of iterations
assert (
PolyClass.solution_method == "mle"
) # Default solution_method, doesn't matter lower / upper characters
assert PolyClass.multinomials == 0 # Default multinomials
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [list])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__03(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
with pytest.raises(ValueError):
PolyClass = PolynomialRegression(
original_data_input, regression_data_input, maximum_polynomial_order=5
)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [list])
def test__init__04(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
with pytest.raises(ValueError):
PolyClass = PolynomialRegression(
original_data_input, regression_data_input, maximum_polynomial_order=5
)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__05(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points_large)
with pytest.raises(Exception):
PolyClass = PolynomialRegression(
original_data_input, regression_data_input, maximum_polynomial_order=5
)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__06(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data_3d)
regression_data_input = array_type2(self.sample_points)
with pytest.raises(Exception):
PolyClass = PolynomialRegression(
original_data_input, regression_data_input, maximum_polynomial_order=5
)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__07(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points_3d)
with pytest.raises(Exception):
PolyClass = PolynomialRegression(
original_data_input, regression_data_input, maximum_polynomial_order=5
)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__08(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data_1d)
regression_data_input = array_type2(self.sample_points_1d)
with pytest.raises(Exception):
PolyClass = PolynomialRegression(
original_data_input, regression_data_input, maximum_polynomial_order=5
)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__09(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
with pytest.warns(Warning):
PolyClass = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=5,
number_of_crossvalidations=11,
)
assert (
PolyClass.number_of_crossvalidations == 11
) # Default number of cross-validations
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__10(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
with pytest.raises(Exception):
PolyClass = PolynomialRegression(
original_data_input, regression_data_input, maximum_polynomial_order=1.2
)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__11(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data_large)
regression_data_input = array_type2(self.sample_points_large)
with pytest.warns(Warning):
PolyClass = PolynomialRegression(
original_data_input, regression_data_input, maximum_polynomial_order=11
)
assert PolyClass.max_polynomial_order == 10
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__12(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
with pytest.raises(Exception):
PolyClass = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=5,
training_split=1,
)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__13(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
with pytest.raises(Exception):
PolyClass = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=5,
training_split=-1,
)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__14(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
with pytest.raises(Exception):
PolyClass = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=5,
max_fraction_training_samples=1.2,
)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__15(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
with pytest.raises(Exception):
PolyClass = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=5,
max_fraction_training_samples=-1.2,
)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__16(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
PolyClass = PolynomialRegression(
regression_data_input,
regression_data_input,
maximum_polynomial_order=5,
max_iter=100,
)
assert PolyClass.max_iter == 0
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__17(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
PolyClass = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=5,
no_adaptive_samples=0,
max_iter=100,
)
assert PolyClass.max_iter == 0
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__18(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
with pytest.raises(Exception):
PolyClass = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=5,
number_of_crossvalidations=1.2,
)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__19(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
with pytest.raises(Exception):
PolyClass = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=5,
no_adaptive_samples=4.2,
)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__20(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
with pytest.raises(Exception):
PolyClass = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=5,
max_iter=4.2,
)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__21(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
with pytest.raises(Exception):
PolyClass = PolynomialRegression(
original_data_input, regression_data_input, maximum_polynomial_order=15
)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__22(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
with pytest.raises(Exception):
PolyClass = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=5,
solution_method=1,
)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__23(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
with pytest.raises(Exception):
PolyClass = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=5,
solution_method="idaes",
)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__24(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
with pytest.raises(Exception):
PolyClass = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=5,
multinomials=3,
)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__25(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
with pytest.raises(Exception):
PolyClass = PolynomialRegression(
original_data_input, regression_data_input, maximum_polynomial_order=-2
)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__26(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
with pytest.raises(Exception):
PolyClass = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=3,
number_of_crossvalidations=-3,
)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__27(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
with pytest.raises(Exception):
PolyClass = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=3,
no_adaptive_samples=-3,
)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__28(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
with pytest.raises(Exception):
PolyClass = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=3,
max_iter=-3,
)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__29(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
with pytest.raises(Exception):
PolyClass = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=3,
overwrite=1,
)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__30(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
with pytest.raises(Exception):
PolyClass = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=3,
fname="solution.pkl",
)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__31(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
with pytest.raises(Exception):
PolyClass = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=3,
fname=1,
)
@pytest.mark.unit
@pytest.fixture(scope="module")
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__32(self, array_type1, array_type2):
file_name = "sol_check.pickle"
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
PolyClass1 = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=3,
fname=file_name,
overwrite=True,
)
PolyClass1.get_feature_vector()
results = PolyClass1.polynomial_regression_fitting()
PolyClass2 = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=3,
fname=file_name,
overwrite=True,
)
assert PolyClass1.filename == PolyClass2.filename
@pytest.mark.unit
@pytest.fixture(scope="module")
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__33(self, array_type1, array_type2):
file_name1 = "sol_check1.pickle"
file_name2 = "sol_check2.pickle"
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
PolyClass1 = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=3,
fname=file_name1,
overwrite=True,
)
PolyClass1.get_feature_vector()
results = PolyClass1.polynomial_regression_fitting()
PolyClass2 = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=3,
fname=file_name2,
overwrite=True,
)
assert PolyClass1.filename == file_name1
assert PolyClass2.filename == file_name2
@pytest.mark.unit
@pytest.fixture(scope="module")
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test__init__34(self, array_type1, array_type2):
file_name = "sol_check.pickle"
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
PolyClass1 = PolynomialRegression(
original_data_input,
regression_data_input,
fname=file_name,
maximum_polynomial_order=3,
overwrite=True,
)
PolyClass1.get_feature_vector()
results = PolyClass1.polynomial_regression_fitting()
PolygClass2 = PolynomialRegression(
original_data_input,
regression_data_input,
fname=file_name,
maximum_polynomial_order=3,
overwrite=True,
)
assert PolyClass1.filename == PolygClass2.filename
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test_training_test_data_creation_01(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
PolyClass = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=5,
training_split=0.01,
)
with pytest.raises(Exception):
training_data, cross_val_data = PolyClass.training_test_data_creation()
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [np.array, pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array, pd.DataFrame])
def test_training_test_data_creation_02(self, array_type1, array_type2):
original_data_input = array_type1(self.test_data)
regression_data_input = array_type2(self.sample_points)
PolyClass = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=5,
training_split=0.99,
)
with pytest.raises(Exception):
training_data, cross_val_data = PolyClass.training_test_data_creation()
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array])
def test_training_test_data_creation_03(self, array_type1, array_type2):
original_data_input = array_type1(self.full_data)
regression_data_input = array_type2(self.training_data)
PolyClass = PolynomialRegression(
original_data_input, regression_data_input, maximum_polynomial_order=5
)
training_data, cross_val_data = PolyClass.training_test_data_creation()
expected_training_size = int(
np.around(PolyClass.number_of_samples * PolyClass.fraction_training)
)
expected_test_size = PolyClass.regression_data.shape[0] - expected_training_size
assert len(training_data) == PolyClass.number_of_crossvalidations
assert len(cross_val_data) == PolyClass.number_of_crossvalidations
for i in range(1, PolyClass.number_of_crossvalidations + 1):
assert (
training_data["training_set_" + str(i)].shape[0]
== expected_training_size
)
assert cross_val_data["test_set_" + str(i)].shape[0] == expected_test_size
concat_01 = np.concatenate(
(
training_data["training_set_" + str(i)],
cross_val_data["test_set_" + str(i)],
),
axis=0,
)
sample_data_sorted = regression_data_input[
np.lexsort(
(
regression_data_input[:, 2],
regression_data_input[:, 1],
regression_data_input[:, 0],
)
)
]
concat_01_sorted = concat_01[
np.lexsort((concat_01[:, 2], concat_01[:, 1], concat_01[:, 0]))
]
np.testing.assert_equal(sample_data_sorted, concat_01_sorted)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array])
def test_training_test_data_creation_04(self, array_type1, array_type2):
original_data_input = array_type1(self.full_data)
regression_data_input = array_type2(self.training_data)
PolyClass = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=3,
number_of_crossvalidations=5,
no_adaptive_samples=6,
training_split=0.5,
max_fraction_training_samples=0.4,
max_iter=20,
solution_method="MLe",
multinomials=0,
)
training_data, cross_val_data = PolyClass.training_test_data_creation()
expected_training_size = int(
np.around(PolyClass.number_of_samples * PolyClass.fraction_training)
)
expected_test_size = PolyClass.regression_data.shape[0] - expected_training_size
assert len(training_data) == PolyClass.number_of_crossvalidations
assert len(cross_val_data) == PolyClass.number_of_crossvalidations
for i in range(1, PolyClass.number_of_crossvalidations + 1):
assert (
training_data["training_set_" + str(i)].shape[0]
== expected_training_size
)
assert cross_val_data["test_set_" + str(i)].shape[0] == expected_test_size
concat_01 = np.concatenate(
(
training_data["training_set_" + str(i)],
cross_val_data["test_set_" + str(i)],
),
axis=0,
)
sample_data_sorted = regression_data_input[
np.lexsort(
(
regression_data_input[:, 2],
regression_data_input[:, 1],
regression_data_input[:, 0],
)
)
]
concat_01_sorted = concat_01[
np.lexsort((concat_01[:, 2], concat_01[:, 1], concat_01[:, 0]))
]
np.testing.assert_equal(sample_data_sorted, concat_01_sorted)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array])
def test_training_test_data_creation_05(self, array_type1, array_type2):
original_data_input = array_type1(self.full_data)
regression_data_input = array_type2(self.training_data)
PolyClass = PolynomialRegression(
original_data_input, regression_data_input, maximum_polynomial_order=5
)
additional_data_input = np.array(
[
[
i**2,
((i + 1) * 2) + ((j + 1) * 2),
j**4,
((i + 1) * 2) + ((j + 1) ** 2),
]
for i in range(5)
for j in range(5)
]
)
training_data, cross_val_data = PolyClass.training_test_data_creation(
additional_features=additional_data_input
)
expected_training_size = int(
np.around(PolyClass.number_of_samples * PolyClass.fraction_training)
)
expected_test_size = PolyClass.regression_data.shape[0] - expected_training_size
assert len(training_data) == PolyClass.number_of_crossvalidations * 2
assert len(cross_val_data) == PolyClass.number_of_crossvalidations * 2
for i in range(1, PolyClass.number_of_crossvalidations + 1):
assert (
training_data["training_set_" + str(i)].shape[0]
== expected_training_size
)
assert (
training_data["training_extras_" + str(i)].shape[0]
== expected_training_size
)
assert cross_val_data["test_set_" + str(i)].shape[0] == expected_test_size
assert (
cross_val_data["test_extras_" + str(i)].shape[0] == expected_test_size
)
concat_01 = np.concatenate(
(
training_data["training_set_" + str(i)],
cross_val_data["test_set_" + str(i)],
),
axis=0,
)
sample_data_sorted = regression_data_input[
np.lexsort(
(
regression_data_input[:, 2],
regression_data_input[:, 1],
regression_data_input[:, 0],
)
)
]
concat_01_sorted = concat_01[
np.lexsort((concat_01[:, 2], concat_01[:, 1], concat_01[:, 0]))
]
np.testing.assert_equal(sample_data_sorted, concat_01_sorted)
concat_02 = np.concatenate(
(
training_data["training_extras_" + str(i)],
cross_val_data["test_extras_" + str(i)],
),
axis=0,
)
additional_data_sorted = additional_data_input[
np.lexsort(
(
additional_data_input[:, 3],
additional_data_input[:, 2],
additional_data_input[:, 1],
additional_data_input[:, 0],
)
)
]
concat_02_sorted = concat_02[
np.lexsort(
(concat_02[:, 3], concat_02[:, 2], concat_02[:, 1], concat_02[:, 0])
)
]
np.testing.assert_equal(additional_data_sorted, concat_02_sorted)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array])
def test_polygeneration_01(self, array_type1, array_type2):
original_data_input = array_type1(self.full_data)
regression_data_input = array_type2(self.training_data)
x_input_train_data = regression_data_input[:, :-1]
data_feed = PolynomialRegression(
original_data_input, regression_data_input, maximum_polynomial_order=1
)
poly_degree = 1
output_1 = data_feed.polygeneration(
poly_degree, data_feed.multinomials, x_input_train_data
)
expected_output_nr = x_input_train_data.shape[0]
expected_output_nc = 4 # New number of features should be = 2 * max_polynomial_order + 2 for two input features
expected_output = np.zeros((expected_output_nr, expected_output_nc))
expected_output[:, 0] = 1
expected_output[:, 1] = x_input_train_data[:, 0]
expected_output[:, 2] = x_input_train_data[:, 1]
expected_output[:, 3] = x_input_train_data[:, 0] * x_input_train_data[:, 1]
np.testing.assert_equal(output_1, expected_output)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array])
def test_polygeneration_02(self, array_type1, array_type2):
original_data_input = array_type1(self.full_data)
regression_data_input = array_type2(self.training_data)
x_input_train_data = regression_data_input[:, :-1]
data_feed = PolynomialRegression(
original_data_input, regression_data_input, maximum_polynomial_order=2
)
poly_degree = 2
output_1 = data_feed.polygeneration(
poly_degree, data_feed.multinomials, x_input_train_data
)
expected_output_nr = x_input_train_data.shape[0]
expected_output_nc = 6 # New number of features should be = 2 * max_polynomial_order + 2 for two input features
expected_output = np.zeros((expected_output_nr, expected_output_nc))
expected_output[:, 0] = 1
expected_output[:, 1] = x_input_train_data[:, 0]
expected_output[:, 2] = x_input_train_data[:, 1]
expected_output[:, 3] = x_input_train_data[:, 0] ** 2
expected_output[:, 4] = x_input_train_data[:, 1] ** 2
expected_output[:, 5] = x_input_train_data[:, 0] * x_input_train_data[:, 1]
np.testing.assert_equal(output_1, expected_output)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array])
def test_polygeneration_03(self, array_type1, array_type2):
original_data_input = array_type1(self.full_data)
regression_data_input = array_type2(self.training_data)
x_input_train_data = regression_data_input[:, :-1]
data_feed = PolynomialRegression(
original_data_input, regression_data_input, maximum_polynomial_order=10
)
poly_degree = 10
output_1 = data_feed.polygeneration(
poly_degree, data_feed.multinomials, x_input_train_data
)
expected_output_nr = x_input_train_data.shape[0]
expected_output_nc = 22 # New number of features should be = 2 * max_polynomial_order + 2 for two input features
expected_output = np.zeros((expected_output_nr, expected_output_nc))
expected_output[:, 0] = 1
expected_output[:, 1] = x_input_train_data[:, 0]
expected_output[:, 2] = x_input_train_data[:, 1]
expected_output[:, 3] = x_input_train_data[:, 0] ** 2
expected_output[:, 4] = x_input_train_data[:, 1] ** 2
expected_output[:, 5] = x_input_train_data[:, 0] ** 3
expected_output[:, 6] = x_input_train_data[:, 1] ** 3
expected_output[:, 7] = x_input_train_data[:, 0] ** 4
expected_output[:, 8] = x_input_train_data[:, 1] ** 4
expected_output[:, 9] = x_input_train_data[:, 0] ** 5
expected_output[:, 10] = x_input_train_data[:, 1] ** 5
expected_output[:, 11] = x_input_train_data[:, 0] ** 6
expected_output[:, 12] = x_input_train_data[:, 1] ** 6
expected_output[:, 13] = x_input_train_data[:, 0] ** 7
expected_output[:, 14] = x_input_train_data[:, 1] ** 7
expected_output[:, 15] = x_input_train_data[:, 0] ** 8
expected_output[:, 16] = x_input_train_data[:, 1] ** 8
expected_output[:, 17] = x_input_train_data[:, 0] ** 9
expected_output[:, 18] = x_input_train_data[:, 1] ** 9
expected_output[:, 19] = x_input_train_data[:, 0] ** 10
expected_output[:, 20] = x_input_train_data[:, 1] ** 10
expected_output[:, 21] = x_input_train_data[:, 0] * x_input_train_data[:, 1]
np.testing.assert_equal(output_1, expected_output)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array])
def test_polygeneration_04(self, array_type1, array_type2):
original_data_input = array_type1(self.full_data)
regression_data_input = array_type2(self.training_data)
x_input_train_data = regression_data_input[:, :-1]
data_feed = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=10,
multinomials=0,
)
poly_degree = 10
output_1 = data_feed.polygeneration(
poly_degree, data_feed.multinomials, x_input_train_data
)
expected_output_nr = x_input_train_data.shape[0]
expected_output_nc = 21 # New number of features should be = 2 * max_polynomial_order + 2 for two input features
expected_output = np.zeros((expected_output_nr, expected_output_nc))
expected_output[:, 0] = 1
expected_output[:, 1] = x_input_train_data[:, 0]
expected_output[:, 2] = x_input_train_data[:, 1]
expected_output[:, 3] = x_input_train_data[:, 0] ** 2
expected_output[:, 4] = x_input_train_data[:, 1] ** 2
expected_output[:, 5] = x_input_train_data[:, 0] ** 3
expected_output[:, 6] = x_input_train_data[:, 1] ** 3
expected_output[:, 7] = x_input_train_data[:, 0] ** 4
expected_output[:, 8] = x_input_train_data[:, 1] ** 4
expected_output[:, 9] = x_input_train_data[:, 0] ** 5
expected_output[:, 10] = x_input_train_data[:, 1] ** 5
expected_output[:, 11] = x_input_train_data[:, 0] ** 6
expected_output[:, 12] = x_input_train_data[:, 1] ** 6
expected_output[:, 13] = x_input_train_data[:, 0] ** 7
expected_output[:, 14] = x_input_train_data[:, 1] ** 7
expected_output[:, 15] = x_input_train_data[:, 0] ** 8
expected_output[:, 16] = x_input_train_data[:, 1] ** 8
expected_output[:, 17] = x_input_train_data[:, 0] ** 9
expected_output[:, 18] = x_input_train_data[:, 1] ** 9
expected_output[:, 19] = x_input_train_data[:, 0] ** 10
expected_output[:, 20] = x_input_train_data[:, 1] ** 10
np.testing.assert_equal(output_1, expected_output)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array])
def test_polygeneration_05(self, array_type1, array_type2):
original_data_input = array_type1(self.full_data)
regression_data_input = array_type2(self.training_data)
x_input_train_data = regression_data_input[:, :-1]
data_feed = PolynomialRegression(
original_data_input, regression_data_input, maximum_polynomial_order=1
)
poly_degree = 1
additional_term = np.sqrt(x_input_train_data)
output_1 = data_feed.polygeneration(
poly_degree, data_feed.multinomials, x_input_train_data, additional_term
)
expected_output_nr = x_input_train_data.shape[0]
expected_output_nc = (
6 # New number of features should be = 2 * max_polynomial_order + 4
)
expected_output = np.zeros((expected_output_nr, expected_output_nc))
expected_output[:, 0] = 1
expected_output[:, 1] = x_input_train_data[:, 0]
expected_output[:, 2] = x_input_train_data[:, 1]
expected_output[:, 3] = x_input_train_data[:, 0] * x_input_train_data[:, 1]
expected_output[:, 4] = additional_term[:, 0]
expected_output[:, 5] = additional_term[:, 1]
np.testing.assert_equal(output_1, expected_output)
@pytest.mark.unit
@pytest.mark.parametrize("array_type", [np.array])
def test_cost_function_01(self, array_type):
regression_data_input = array_type(self.training_data)
x = regression_data_input[:, :-1]
y = regression_data_input[:, -1]
x_data_nr = x.shape[0]
x_data_nc = 6
x_vector = np.zeros((x_data_nr, x_data_nc))
x_vector[:, 0] = 1
x_vector[:, 1] = x[:, 0]
x_vector[:, 2] = x[:, 1]
x_vector[:, 3] = x[:, 0] ** 2
x_vector[:, 4] = x[:, 1] ** 2
x_vector[:, 5] = x[:, 0] * x[:, 1]
theta = np.zeros((x_data_nc, 1))
expected_value = 6613.875 # Calculated externally as sum(y^2) / 2m
output_1 = PolynomialRegression.cost_function(
theta, x_vector, y, reg_parameter=0
)
assert output_1 == expected_value
@pytest.mark.unit
@pytest.mark.parametrize("array_type", [np.array])
def test_cost_function_02(self, array_type):
regression_data_input = array_type(self.training_data)
x = regression_data_input[:, :-1]
y = regression_data_input[:, -1]
x_data_nr = x.shape[0]
x_data_nc = 6
x_vector = np.zeros((x_data_nr, x_data_nc))
x_vector[:, 0] = 1
x_vector[:, 1] = x[:, 0]
x_vector[:, 2] = x[:, 1]
x_vector[:, 3] = x[:, 0] ** 2
x_vector[:, 4] = x[:, 1] ** 2
x_vector[:, 5] = x[:, 0] * x[:, 1]
theta = np.array(
[[4.5], [3], [3], [1], [1], [0]]
) # coefficients in (x1 + 1.5)^2 + (x2 + 1.5) ^ 2
expected_value = 90.625 # Calculated externally as sum(dy^2) / 2m
output_1 = PolynomialRegression.cost_function(
theta, x_vector, y, reg_parameter=0
)
assert output_1 == expected_value
@pytest.mark.unit
@pytest.mark.parametrize("array_type", [np.array])
def test_cost_function_03(self, array_type):
regression_data_input = array_type(self.training_data)
x = regression_data_input[:, :-1]
y = regression_data_input[:, -1]
x_data_nr = x.shape[0]
x_data_nc = 6
x_vector = np.zeros((x_data_nr, x_data_nc))
x_vector[:, 0] = 1
x_vector[:, 1] = x[:, 0]
x_vector[:, 2] = x[:, 1]
x_vector[:, 3] = x[:, 0] ** 2
x_vector[:, 4] = x[:, 1] ** 2
x_vector[:, 5] = x[:, 0] * x[:, 1]
theta = np.array(
[[2], [2], [2], [1], [1], [0]]
) # Actual coefficients in (x1 + 1)^2 + (x2 + 1) ^ 2
expected_value = 0 # Value should return zero for exact solution
output_1 = PolynomialRegression.cost_function(
theta, x_vector, y, reg_parameter=0
)
assert output_1 == expected_value
@pytest.mark.unit
@pytest.mark.parametrize("array_type", [np.array])
def test_gradient_function_01(self, array_type):
regression_data_input = array_type(self.training_data)
x = regression_data_input[:, :-1]
y = regression_data_input[:, -1]
x_data_nr = x.shape[0]
x_data_nc = 6
x_vector = np.zeros((x_data_nr, x_data_nc))
x_vector[:, 0] = 1
x_vector[:, 1] = x[:, 0]
x_vector[:, 2] = x[:, 1]
x_vector[:, 3] = x[:, 0] ** 2
x_vector[:, 4] = x[:, 1] ** 2
x_vector[:, 5] = x[:, 0] * x[:, 1]
theta = np.zeros((x_data_nc,))
expected_value = np.array(
[[-97], [-635], [-635], [-5246.875], [-5246.875], [-3925]]
) # Calculated externally: see Excel sheet
expected_value = expected_value.reshape(
expected_value.shape[0],
)
output_1 = PolynomialRegression.gradient_function(
theta, x_vector, y, reg_parameter=0
)
np.testing.assert_equal(output_1, expected_value)
@pytest.mark.unit
@pytest.mark.parametrize("array_type", [np.array])
def test_gradient_function_02(self, array_type):
regression_data_input = array_type(self.training_data)
x = regression_data_input[:, :-1]
y = regression_data_input[:, -1]
x_data_nr = x.shape[0]
x_data_nc = 6
x_vector = np.zeros((x_data_nr, x_data_nc))
x_vector[:, 0] = 1
x_vector[:, 1] = x[:, 0]
x_vector[:, 2] = x[:, 1]
x_vector[:, 3] = x[:, 0] ** 2
x_vector[:, 4] = x[:, 1] ** 2
x_vector[:, 5] = x[:, 0] * x[:, 1]
theta = np.array(
[[4.5], [3], [3], [1], [1], [0]]
) # coefficients in (x1 + 1.5)^2 + (x2 + 1.5) ^ 2
theta = theta.reshape(
theta.shape[0],
)
expected_value = np.array(
[[12.5], [75], [75], [593.75], [593.75], [437.5]]
) # Calculated externally: see Excel sheet
expected_value = expected_value.reshape(
expected_value.shape[0],
)
output_1 = PolynomialRegression.gradient_function(
theta, x_vector, y, reg_parameter=0
)
np.testing.assert_equal(output_1, expected_value)
@pytest.mark.unit
@pytest.mark.parametrize("array_type", [np.array])
def test_gradient_function_03(self, array_type):
regression_data_input = array_type(self.training_data)
x = regression_data_input[:, :-1]
y = regression_data_input[:, -1]
x_data_nr = x.shape[0]
x_data_nc = 6
x_vector = np.zeros((x_data_nr, x_data_nc))
x_vector[:, 0] = 1
x_vector[:, 1] = x[:, 0]
x_vector[:, 2] = x[:, 1]
x_vector[:, 3] = x[:, 0] ** 2
x_vector[:, 4] = x[:, 1] ** 2
x_vector[:, 5] = x[:, 0] * x[:, 1]
theta = np.array(
[[2], [2], [2], [1], [1], [0]]
) # Actual coefficients in (x1 + 1)^2 + (x2 + 1) ^ 2
theta = theta.reshape(
theta.shape[0],
)
expected_value = np.array(
[[0], [0], [0], [0], [0], [0]]
) # Calculated externally: see Excel sheet
expected_value = expected_value.reshape(
expected_value.shape[0],
)
output_1 = PolynomialRegression.gradient_function(
theta, x_vector, y, reg_parameter=0
)
np.testing.assert_equal(output_1, expected_value)
@pytest.mark.unit
@pytest.mark.parametrize("array_type", [np.array, pd.DataFrame])
def test_bfgs_parameter_optimization_01(self, array_type):
original_data_input = array_type(self.test_data)
# Create x vector for ax2 + bx + c: x data supplied in x_vector
input_array = np.array(
[
[0, 1],
[1, 4],
[2, 9],
[3, 16],
[4, 25],
[5, 36],
[6, 49],
[7, 64],
[8, 81],
[9, 100],
]
)
x = input_array[:, 0]
y = input_array[:, 1]
x_vector = np.zeros((x.shape[0], 3))
x_vector[:, 0] = (
x[
:,
]
** 2
)
x_vector[:, 1] = x[
:,
]
x_vector[:, 2] = 1
expected_value = np.array([[1.0], [2.0], [1.0]]).reshape(
3,
)
data_feed = PolynomialRegression(
original_data_input,
input_array,
maximum_polynomial_order=5,
solution_method="bfgs",
)
output_1 = data_feed.bfgs_parameter_optimization(x_vector, y)
assert data_feed.solution_method == "bfgs"
np.testing.assert_array_equal(expected_value, np.round(output_1, 4))
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array])
def test_bfgs_parameter_optimization_02(self, array_type1, array_type2):
original_data_input = array_type1(self.full_data)
regression_data_input = array_type2(self.training_data)
# Create x vector for ax2 + bx + c: x data supplied in x_vector
x = regression_data_input[:, :-1]
y = regression_data_input[:, -1]
x_vector = np.zeros((x.shape[0], 6))
x_vector[:, 0] = x[:, 0] ** 2
x_vector[:, 1] = x[:, 1] ** 2
x_vector[:, 2] = x[:, 0]
x_vector[:, 3] = x[:, 1]
x_vector[:, 4] = x[:, 1] * x[:, 0]
x_vector[:, 5] = 1
expected_value = np.array([[1.0], [1.0], [2.0], [2.0], [0.0], [2.0]]).reshape(
6,
)
data_feed = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=4,
solution_method="bfgs",
)
output_1 = data_feed.bfgs_parameter_optimization(x_vector, y)
assert data_feed.solution_method == "bfgs"
np.testing.assert_array_equal(expected_value, np.round(output_1, 4))
@pytest.mark.unit
def test_mle_estimate_01(self):
# Create x vector for ax2 + bx + c: x data supplied in x_vector
input_array = np.array(
[
[0, 1],
[1, 4],
[2, 9],
[3, 16],
[4, 25],
[5, 36],
[6, 49],
[7, 64],
[8, 81],
[9, 100],
]
)
x = input_array[:, 0]
y = input_array[:, 1]
x_vector = np.zeros((x.shape[0], 3))
x_vector[:, 0] = (
x[
:,
]
** 2
)
x_vector[:, 1] = x[
:,
]
x_vector[:, 2] = 1
expected_value = np.array([[1.0], [2.0], [1.0]]).reshape(
3,
)
output_1 = PolynomialRegression.MLE_estimate(x_vector, y)
np.testing.assert_array_equal(expected_value, np.round(output_1, 4))
@pytest.mark.unit
@pytest.mark.parametrize("array_type", [np.array])
def test_mle_estimate_02(self, array_type):
regression_data_input = array_type(self.training_data)
x = regression_data_input[:, :-1]
y = regression_data_input[:, -1]
x_vector = np.zeros((x.shape[0], 6))
x_vector[:, 0] = x[:, 0] ** 2
x_vector[:, 1] = x[:, 1] ** 2
x_vector[:, 2] = x[:, 0]
x_vector[:, 3] = x[:, 1]
x_vector[:, 4] = x[:, 1] * x[:, 0]
x_vector[:, 5] = 1
expected_value = np.array([[1.0], [1.0], [2.0], [2.0], [0.0], [2.0]]).reshape(
6,
)
output_1 = PolynomialRegression.MLE_estimate(x_vector, y)
np.testing.assert_array_equal(expected_value, np.round(output_1, 4))
@pytest.mark.unit
def test_pyomo_optimization_01(self):
x_vector = np.array([[i**2, i, 1] for i in range(10)])
y = np.array([[i**2] for i in range(1, 11)])
expected_value = np.array([[1.0], [2.0], [1.0]])
output_1 = PolynomialRegression.pyomo_optimization(x_vector, y)
np.testing.assert_array_equal(expected_value, np.round(output_1, 4))
@pytest.mark.unit
@pytest.mark.parametrize("array_type", [np.array])
def test_pyomo_optimization_02(self, array_type):
regression_data_input = array_type(self.training_data)
x = regression_data_input[:, :-1]
y = regression_data_input[:, -1]
x_vector = np.zeros((x.shape[0], 6))
x_vector[:, 0] = x[:, 0] ** 2
x_vector[:, 1] = x[:, 1] ** 2
x_vector[:, 2] = x[:, 0]
x_vector[:, 3] = x[:, 1]
x_vector[:, 4] = x[:, 1] * x[:, 0]
x_vector[:, 5] = 1
expected_value = np.array([[1.0], [1.0], [2.0], [2.0], [0.0], [2.0]])
output_1 = PolynomialRegression.pyomo_optimization(x_vector, y)
np.testing.assert_array_equal(expected_value, np.round(output_1, 4))
@pytest.mark.unit
@pytest.mark.parametrize("array_type", [np.array])
def test_cross_validation_error_calculation_01(self, array_type):
regression_data_input = array_type(self.training_data)
x = regression_data_input[:, :-1]
y = regression_data_input[:, -1].reshape(regression_data_input.shape[0], 1)
x_data_nr = x.shape[0]
x_data_nc = 6
x_vector = np.zeros((x_data_nr, x_data_nc))
x_vector[:, 0] = 1
x_vector[:, 1] = x[:, 0]
x_vector[:, 2] = x[:, 1]
x_vector[:, 3] = x[:, 0] ** 2
x_vector[:, 4] = x[:, 1] ** 2
x_vector[:, 5] = x[:, 0] * x[:, 1]
theta = np.zeros((x_data_nc, 1))
expected_value = 2 * 6613.875 # Calculated externally as sum(y^2) / m
output_1 = PolynomialRegression.cross_validation_error_calculation(
theta, x_vector, y
)
assert output_1 == expected_value
@pytest.mark.unit
@pytest.mark.parametrize("array_type", [np.array])
def test_cross_validation_error_calculation_02(self, array_type):
regression_data_input = array_type(self.training_data)
x = regression_data_input[:, :-1]
y = regression_data_input[:, -1].reshape(regression_data_input.shape[0], 1)
x_data_nr = x.shape[0]
x_data_nc = 6
x_vector = np.zeros((x_data_nr, x_data_nc))
x_vector[:, 0] = 1
x_vector[:, 1] = x[:, 0]
x_vector[:, 2] = x[:, 1]
x_vector[:, 3] = x[:, 0] ** 2
x_vector[:, 4] = x[:, 1] ** 2
x_vector[:, 5] = x[:, 0] * x[:, 1]
theta = np.array(
[[4.5], [3], [3], [1], [1], [0]]
) # coefficients in (x1 + 1.5)^2 + (x2 + 1.5) ^ 2
expected_value = 2 * 90.625 # Calculated externally as sum(dy^2) / 2m
output_1 = PolynomialRegression.cross_validation_error_calculation(
theta, x_vector, y
)
assert output_1 == expected_value
@pytest.mark.unit
@pytest.mark.parametrize("array_type", [np.array])
def test_cross_validation_error_calculation_02(self, array_type):
regression_data_input = array_type(self.training_data)
x = regression_data_input[:, :-1]
y = regression_data_input[:, -1].reshape(regression_data_input.shape[0], 1)
x_data_nr = x.shape[0]
x_data_nc = 6
x_vector = np.zeros((x_data_nr, x_data_nc))
x_vector[:, 0] = 1
x_vector[:, 1] = x[:, 0]
x_vector[:, 2] = x[:, 1]
x_vector[:, 3] = x[:, 0] ** 2
x_vector[:, 4] = x[:, 1] ** 2
x_vector[:, 5] = x[:, 0] * x[:, 1]
theta = np.array(
[[2], [2], [2], [1], [1], [0]]
) # Actual coefficients in (x1 + 1)^2 + (x2 + 1) ^ 2
expected_value = 2 * 0 # Value should return zero for exact solution
output_1 = PolynomialRegression.cross_validation_error_calculation(
theta, x_vector, y
)
assert output_1 == expected_value
def mock_optimization(self, x, y):
return 10 * np.ones((x.shape[1], 1))
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array])
@patch.object(PolynomialRegression, "MLE_estimate", mock_optimization)
def test_polyregression_01(self, array_type1, array_type2):
original_data_input = array_type1(self.full_data)
regression_data_input = array_type2(self.training_data)
data_feed = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=5,
solution_method="mle",
)
poly_order = 2
training_data = regression_data_input[0:20, :]
test_data = regression_data_input[20:, :]
expected_output = 10 * np.ones((6, 1))
output_1, _, _ = data_feed.polyregression(poly_order, training_data, test_data)
np.testing.assert_array_equal(expected_output, output_1)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array])
@patch.object(
PolynomialRegression, "bfgs_parameter_optimization", mock_optimization
)
def test_polyregression_02(self, array_type1, array_type2):
original_data_input = array_type1(self.full_data)
regression_data_input = array_type2(self.training_data)
data_feed = PolynomialRegression(
original_data_input,
regression_data_input,
maximum_polynomial_order=5,
solution_method="bfgs",
)
poly_order = 2
training_data = regression_data_input[0:20, :]
test_data = regression_data_input[20:, :]
expected_output = 10 * np.ones((6, 1))
output_1, _, _ = data_feed.polyregression(poly_order, training_data, test_data)
np.testing.assert_array_equal(expected_output, output_1)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array])
@patch.object(PolynomialRegression, "pyomo_optimization", mock_optimization)
def test_polyregression_03(self, array_type1, array_type2):
original_data_input = array_type1(self.full_data)
regression_data_input = array_type2(self.training_data)
data_feed = PolynomialRegression(
original_data_input, regression_data_input, maximum_polynomial_order=5
)
poly_order = 2
training_data = regression_data_input[0:20, :]
test_data = regression_data_input[20:, :]
expected_output = 10 * np.ones((6, 1))
output_1, _, _ = data_feed.polyregression(poly_order, training_data, test_data)
np.testing.assert_array_equal(expected_output, output_1)
@pytest.mark.unit
@pytest.mark.parametrize("array_type1", [pd.DataFrame])
@pytest.mark.parametrize("array_type2", [np.array])
def test_polyregression_04(self, array_type1, array_type2):
original_data_input = array_type1(self.full_data)
regression_data_input = array_type2(self.training_data)
data_feed = PolynomialRegression(
original_data_input, regression_data_input, maximum_polynomial_order=5
)
poly_order = 10
training_data = regression_data_input[0:20, :]
test_data = regression_data_input[20:, :]
expected_output = np.Inf
output_1, output_2, output_3 = data_feed.polyregression(
poly_order, training_data, test_data
)
np.testing.assert_array_equal(expected_output, output_1)
np.testing.assert_array_equal(expected_output, output_2)
np.testing.assert_array_equal(expected_output, output_3)
@pytest.mark.unit
@pytest.mark.parametrize("array_type", [np.array, pd.DataFrame])
def test_surrogate_performance_01(self, array_type):
original_data_input = array_type(self.test_data)
input_array = np.array(
[
[0, 1],
[1, 4],
[2, 9],
[3, 16],
[4, 25],
[5, 36],
[6, 49],
[7, 64],
[8, 81],
[9, 100],
]
)
order_best = 2
phi_best = np.array([[0.0], [0.0], [0.0]])
expected_value_1 = 38.5
expected_value_2 = 2533.3
expected_value_3 = -1.410256
expected_value_4 = 0
data_feed = PolynomialRegression(
original_data_input, input_array, maximum_polynomial_order=5
)
_, output_1, output_2, output_3, output_4 = data_feed.surrogate_performance(
phi_best, order_best
)
assert output_1 == expected_value_1
assert output_2 == expected_value_2
assert np.round(output_3, 4) == np.round(expected_value_3, 4)
assert np.round(output_4, 4) == np.round(expected_value_4, 4)
@pytest.mark.unit
@pytest.mark.parametrize("array_type", [np.array, pd.DataFrame])
def test_surrogate_performance_02(self, array_type):
original_data_input = array_type(self.test_data)
input_array = np.array(
[
[0, 1],
[1, 4],
[2, 9],
[3, 16],
[4, 25],
[5, 36],
[6, 49],
[7, 64],
[8, 81],
[9, 100],
]
)
order_best = 2
phi_best = np.array([[1.0], [2.0], [1.0]])
expected_value_1 = 0
expected_value_2 = 0
expected_value_3 = 1
expected_value_4 = 1
data_feed = PolynomialRegression(
original_data_input, input_array, maximum_polynomial_order=5
)
_, output_1, output_2, output_3, output_4 = data_feed.surrogate_performance(
phi_best, order_best
)
assert output_1 == expected_value_1
assert output_2 == expected_value_2
assert np.round(output_3, 4) == np.round(expected_value_3, 4)
assert np.round(output_4, 4) == | np.round(expected_value_4, 4) | numpy.round |
import numpy as np
import pytest
from matplotlib.lines import Path
from astropy.visualization.wcsaxes.grid_paths import get_lon_lat_path
@pytest.mark.parametrize('step_in_degrees', [10, 1, 0.01])
def test_round_trip_visibility(step_in_degrees):
zero = np.zeros(100)
# The pixel values are irrelevant for this test
pixel = np.stack([zero, zero]).T
# Create a grid line of constant latitude with a point every step
line = np.stack([np.arange(100), zero]).T * step_in_degrees
# Create a modified grid line where the point spacing is larger by 5%
# Starting with point 20, the discrepancy between `line` and `line_round` is greater than `step`
line_round = line * 1.05
# Perform the round-trip check
path = get_lon_lat_path(line, pixel, line_round)
# The grid line should be visible for only the initial part line (19 points)
codes_check = | np.full(100, Path.MOVETO) | numpy.full |
import os
import sys
from tqdm import tqdm
from tensorboardX import SummaryWriter
import shutil
import argparse
import logging
import time
import random
import numpy as np
import torch
import torch.optim as optim
from torchvision import transforms
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
from torch.utils.data import DataLoader
from torchvision.utils import make_grid
from networks.vnet_multi_head import VNetMultiHead
from dataloaders.la_heart import LAHeart, RandomCrop, CenterCrop, RandomRotFlip, ToTensor, TwoStreamBatchSampler
from scipy.ndimage import distance_transform_edt as distance
from skimage import segmentation as skimage_seg
"""
Train a multi-head vnet to output
1) predicted segmentation
2) regress the signed distance function map
e.g.
Deep Distance Transform for Tubular Structure Segmentation in CT Scans
https://arxiv.org/abs/1912.03383
Shape-Aware Complementary-Task Learning for Multi-Organ Segmentation
https://arxiv.org/abs/1908.05099
"""
parser = argparse.ArgumentParser()
parser.add_argument('--root_path', type=str, default='../data/2018LA_Seg_Training Set/', help='Name of Experiment')
parser.add_argument('--exp', type=str, default='vnet_dp_la_MH_SDFL1PlusL2', help='model_name;dp:add dropout; MH:multi-head')
parser.add_argument('--max_iterations', type=int, default=10000, help='maximum epoch number to train')
parser.add_argument('--batch_size', type=int, default=4, help='batch_size per gpu')
parser.add_argument('--base_lr', type=float, default=0.01, help='maximum epoch number to train')
parser.add_argument('--deterministic', type=int, default=1, help='whether use deterministic training')
parser.add_argument('--seed', type=int, default=2019, help='random seed')
parser.add_argument('--gpu', type=str, default='0', help='GPU to use')
args = parser.parse_args()
train_data_path = args.root_path
snapshot_path = "../model_la/" + args.exp + "/"
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
batch_size = args.batch_size * len(args.gpu.split(','))
max_iterations = args.max_iterations
base_lr = args.base_lr
if args.deterministic:
cudnn.benchmark = False
cudnn.deterministic = True
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
patch_size = (112, 112, 80)
num_classes = 2
def dice_loss(score, target):
target = target.float()
smooth = 1e-5
intersect = torch.sum(score * target)
y_sum = torch.sum(target * target)
z_sum = torch.sum(score * score)
loss = (2 * intersect + smooth) / (z_sum + y_sum + smooth)
loss = 1 - loss
return loss
def compute_sdf(img_gt, out_shape):
"""
compute the signed distance map of binary mask
input: segmentation, shape = (batch_size,c, x, y, z)
output: the Signed Distance Map (SDM)
sdf(x) = 0; x in segmentation boundary
-inf|x-y|; x in segmentation
+inf|x-y|; x out of segmentation
normalize sdf to [-1,1]
"""
img_gt = img_gt.astype(np.uint8)
normalized_sdf = np.zeros(out_shape)
for b in range(out_shape[0]): # batch size
for c in range(out_shape[1]):
posmask = img_gt[b].astype(np.bool)
if posmask.any():
negmask = ~posmask
posdis = distance(posmask)
negdis = distance(negmask)
boundary = skimage_seg.find_boundaries(posmask, mode='inner').astype(np.uint8)
sdf = (negdis-np.min(negdis))/(np.max(negdis)-np.min(negdis)) - (posdis-np.min(posdis))/(np.max(posdis)-np.min(posdis))
sdf[boundary==1] = 0
normalized_sdf[b][c] = sdf
assert np.min(sdf) == -1.0, print( | np.min(posdis) | numpy.min |
"""
Title: RM model
Authors: <NAME> & <NAME>
Date: 23 Dec 2018
Description: Most of this code has been written by <NAME> to
create Rossiter McLaughlin effected lineprofiles. Some small
modifications were made by <NAME> to utilize it for
some more specific purposes.
Requirements:
Requires C-code to be compiled using
g++ -Wall -fPIC -O3 -march=native -fopenmp -c utils.cpp
g++ -shared -o libutils.so utils.o
"""
import numpy as np
from scipy import ndimage
import matplotlib.pyplot as plt
import time
from astropy.io import fits
from numba import jit
import ctypes as c
from numpy.ctypeslib import ndpointer
import os
from matplotlib import gridspec
"""
This part was written by <NAME>.
"""
libUTILS = c.cdll.LoadLibrary('./libutils.so')
prototype = c.CFUNCTYPE(c.c_void_p,
c.c_int,
c.c_int,
c.c_double,
c.c_double,
c.c_double,
ndpointer(c.c_double, flags="C_CONTIGUOUS")
)
make_planet_c = prototype(('make_planet', libUTILS))
prototype = c.CFUNCTYPE(c.c_void_p,
c.c_int,
c.c_int,
c.c_double,
c.c_double,
ndpointer(c.c_double, flags="C_CONTIGUOUS")
)
dist_circle = prototype(('dist_circle', libUTILS))
prototype = c.CFUNCTYPE(c.c_void_p,
c.c_int,
c.c_int,
c.c_double,
c.c_double,
c.c_double,
c.c_double,
c.c_double,
ndpointer(c.c_double, flags="C_CONTIGUOUS")
)
make_star_c = prototype(('make_star', libUTILS))
@jit
def make_lineprofile(npix,rstar,xc,vgrid,A,veq,linewidth):
"""
returns the line profile for the different points on the star
as a 2d array with one axis being velocity and other axis position
on the star
npix - number of pixels along one axis of the star (assumes solid bosy rotation)
rstar - the radius of the star in pixels
xc - the midpoint of the star in pixels
vgrid - the velocity grid for the spectrum you wish to make (1d array in km/s)
A - the line depth of the intrinsic profile - the bottom is at (1 - A) is the max line depth (single value)
veq - the equatorial velocity (the v sin i for star of inclination i) in km/s (single value)
linewidth - the sigma of your Gaussian line profile in km/s (single value)
"""
vc=(np.arange(npix)-xc)/rstar*veq
vs=vgrid[np.newaxis,:]-vc[:,np.newaxis]
profile=1.-A*np.exp( -(vs*vs)/2./linewidth**2)
return profile
@jit
def make_star(npix0,osf,xc,yc,rstar,u1,u2,map0):
"""
makes a circular disk with limb darkening
returns a 2D map of the limb darkened star
npix0 - number of pixels along one side of the output square array
osf - the oversampling factor (integer multiplier)
xc - x coordinate of the star
yc - y coordinate of the star
rstar - radius of the star in pixels
u1 and u2 - linear and quadratic limb darkening coefficients
"""
npix=int(np.floor(npix0*osf))
make_star_c(npix,npix,xc*osf,yc*osf,rstar*osf,u1,u2,map0)
star=map0.copy().reshape((npix0,osf,npix0,osf))
star=star.mean(axis=3).mean(axis=1)
return star
@jit
def make_planet(npix0,osf,xc,yc,rplanet,map0):
"""
returns a 2D mask with the planet at 0.
npix0 - number of pixels along one side of the output square array
osf - the oversampling factor (integer multiplier)
xc - x coordinate of the planet
yc - y coordinate of the planet
rplanet - radius of the planet in pixels
"""
npix=int(np.floor(npix0*osf))
make_planet_c(npix,npix,xc*osf,yc*osf,rplanet*osf,map0)
planet=map0.copy().reshape((npix0,osf,npix0,osf))
planet=planet.mean(axis=3).mean(axis=1)
return planet
"""
This part was initally written by <NAME>, with some small modifications
by <NAME> to utilitze the code for some specific purposes.
"""
def simimprof(radvel, A):
##initialise the model
Rs_Rjup=17.9 #radius of the star in Rjup (rounded)
Rp_Rjup=1.0 #radius of planet in Rjup
Rs=510 # radius of the star in pixels
RpRs=Rp_Rjup/Rs_Rjup # radius of planet in stellar radii
Rp=RpRs*Rs # radius of the planet in stellar radii
npix_star=1025 # number of pixels in the stellar map
OSF=10 # oversampling factor for star to avoid jagged edges
map0=np.zeros((npix_star*OSF,npix_star*OSF)) # Map for calculating star, needed for C code
u1=0.2752 # linear limbdarkening coefficient
u2=0.3790 # quadratic limbdarkening coefficient
xc=511.5 # x-coordinate of disk centre
yc=511.5 # y-coordinate of disk centre
veq=130. # V sin i (km/s)
l_fwhm=20. # Intrinsic line FWHM (km/s)
lw=l_fwhm/2.35 # Intinsice line width in sigma (km/s)
vgrid=np.copy(radvel) # velocity grid
profile=make_lineprofile(npix_star,Rs,xc,vgrid,A,veq,lw) # make line profile for each vertical slice of the star
star=make_star(npix_star,OSF,xc,yc,Rs,u1,u2,map0) # make a limb-darkened stellar disk
sflux=star.sum(axis=1) # sum up the stellar disk across the x-axis
improf=np.sum(sflux[:,np.newaxis]*profile,axis=0) # calculate the spectrum for an unocculted star
improf=improf/improf[0]
return improf
def simtransit(Rp_Rjup, radvel, A, b, theta, x0, outputfolder):
##initialise the model
Rs_Rjup=17.9 #radius of the star in Rjup (rounded)
Rs=510 # radius of the star in pixels
RpRs=Rp_Rjup/Rs_Rjup # radius of planet in stellar radii
Rp=RpRs*Rs # radius of the planet in stellar radii
npix_star=1025 # number of pixels in the stellar map
OSF=10 # oversampling factor for star to avoid jagged edges
map0=np.zeros((npix_star*OSF,npix_star*OSF)) # Map for calculating star, needed for C code
u1=0.2752 # linear limbdarkening coefficient
u2=0.3790 # quadratic limbdarkening coefficient
xc=511.5 # x-coordinate of disk centre
yc=511.5 # y-coordinate of disk centre
veq=130. # V sin i (km/s)
l_fwhm=20. # Intrinsic line FWHM (km/s)
lw=l_fwhm/2.35 # Intinsice line width in sigma (km/s)
vgrid=np.copy(radvel) # velocity grid
profile=make_lineprofile(npix_star,Rs,xc,vgrid,A,veq,lw) # make line profile for each vertical slice of the star
star=make_star(npix_star,OSF,xc,yc,Rs,u1,u2,map0) # make a limb-darkened stellar disk
sflux=star.sum(axis=1) # sum up the stellar disk across the x-axis
improf=np.sum(sflux[:,np.newaxis]*profile,axis=0) # calculate the spectrum for an unocculted star
normalisation=improf[0]
improf=improf/normalisation
#set up the orbit including a spin-orbit misalignment
x0 = xc + x0 * Rs
y0 = yc + np.zeros(len(x0)) + b * Rs
x1=( (x0-xc)*np.cos(theta) - (y0-yc)*np.sin(theta)) + xc
y1=( (x0-xc)*np.sin(theta) + (y0-yc)*np.cos(theta)) + yc
#define some basic arrays
line_profile1=np.zeros( (x1.shape[0],vgrid.shape[0]) )
img1=np.zeros( (x1.shape[0],npix_star,npix_star) )
#Loop over the positions of the planet
for i in range(x1.shape[0]):
tmap1=star*make_planet(npix_star,OSF,y1[i],x1[i],Rp,map0)
sflux=tmap1.sum(axis=0)
line_profile1[i,:]= | np.dot(sflux,profile) | numpy.dot |
"""
-----------------------------------------------------------------------
Harmoni: a Novel Method for Eliminating Spurious Neuronal Interactions due to the Harmonic Components in Neuronal Data
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>
https://doi.org/10.1101/2021.10.06.463319
-----------------------------------------------------------------------
script for:
** Lemon Data analysis **
-----------------------------------------------------------------------
(c) <NAME> (<EMAIL>) @ Neurolgy Dept, MPI CBS, 2021
https://github.com/minajamshidi
(c) please cite the above paper in case of using this code for your research
License: MIT License
-----------------------------------------------------------------------
last modified: 20211004 by \Mina
-----------------------------------------------------------------------
-----------------------------------------------------------------------
"""
import os.path as op
import os
import itertools
from operator import itemgetter
import multiprocessing
from functools import partial
import time
from matplotlib import pyplot as plt
import matplotlib
import pandas as pd
import numpy as np
from numpy import pi
import scipy.stats as stats
from scipy.signal import butter
from tools_general import *
from tools_source_space import *
from tools_connectivity import *
from tools_connectivity_plot import *
# directories and settings -----------------------------------------------------
# fill in these directories with your own data directories
subjects_dir = '/data/pt_02076/mne_data/MNE-fsaverage-data/' # dir for the head model
subject = 'fsaverage'
_oct = '6'
src_dir = op.join(subjects_dir, subject, 'bem', subject + '-oct' + _oct + '-src.fif')
fwd_dir = op.join(subjects_dir, subject, 'bem', subject + '-oct' + _oct + '-fwd.fif')
inv_method = 'eLORETA'
condition = 'EC'
# dir_adjmat = op.join('/data/pt_02076/LEMON/lemon_processed_data/networks_bandpass/eloreta/Schaefer100/', condition)
dir_adjmat = '/data/pt_02076/LEMON/lemon_processed_data/networks_coh_indiv_alphapeak_broadsvd_noperm/'
dir_raw_set = '/data/pt_nro109/Share/EEG_MPILMBB_LEMON/EEG_Preprocessed_BIDS_ID/EEG_Preprocessed/'
"""
NOTE ABOUT DATA
You have to download the data of eyes-closed rsEEG of subject sub-010017 from
https://ftp.gwdg.de/pub/misc/MPI-Leipzig_Mind-Brain-Body-LEMON/EEG_MPILMBB_LEMON/EEG_Raw_BIDS_ID/sub-010017/RSEEG/
and put it in the data_dir you specify here.
"""
# -----------------------------------------------------
# read the parcellation
# -----------------------------------------------------
parcellation = dict(name='Schaefer2018_100Parcels_7Networks_order', abb='Schaefer100')
labels = mne.read_labels_from_annot(subject, subjects_dir=subjects_dir, parc=parcellation['name'])
labels = labels[:-2]
# labels = labels[:-1]
labels_sorted, idx_sorted = rearrange_labels(labels) # rearrange labels
labels_sorted2, idx_sorted2 = rearrange_labels_network(labels) # rearrange labels
labels_network_sorted, idx_lbl_sort = rearrange_labels_network(labels_sorted)
n_parc = len(labels)
n_parc_range_prod = list(itertools.product(np.arange(n_parc), np.arange(n_parc)))
# read forward solution ---------------------------------------------------
fwd = mne.read_forward_solution(fwd_dir)
fwd_fixed = mne.convert_forward_solution(fwd, surf_ori=True, force_fixed=True, use_cps=True)
leadfield = fwd_fixed['sol']['data']
n_vox = leadfield.shape[1]
src = fwd_fixed['src']
sfreq = 250
vertices = [src[0]['vertno'], src[1]['vertno']]
iir_params = dict(order=2, ftype='butter')
b10, a10 = butter(N=2, Wn=np.array([8, 12]) / sfreq * 2, btype='bandpass')
b20, a20 = butter(N=2, Wn=np.array([16, 24]) / sfreq * 2, btype='bandpass')
# -----------------------------------------------------
# ID settings
# -----------------------------------------------------
# ids1 = select_subjects('young', 'male', 'right', meta_file_path)
list_ids = listdir_restricted(dir_adjmat, 'sub-')
ids = [list_ids1[:10] for list_ids1 in list_ids]
ids = np.unique(np.sort(ids))
n_subj = len(ids)
# ----------------------------------------------------------------------------------------------------------------------
# Harmoni and rsEEG data - panel A
# 1:2 coh all subjects source-space
# This part is commented because it takes a lot of time - just uncomment it if you wanna run it
# ----------------------------------------------------------------------------------------------------------------------
# plv_src = np.zeros((n_vox, n_subj))
# for i_subj, subj in enumerate(ids):
# print(i_subj, '**************')
# raw_name = op.join(dir_raw_set, subj + '_EC.set')
# raw = read_eeglab_standard_chanloc(raw_name)
# data_raw = raw.get_data()
# inv_sol, inv_op, inv = extract_inv_sol(data_raw.shape, fwd, raw.info)
# fwd_ch = fwd_fixed.ch_names
# raw_ch = raw.info['ch_names']
# ind = [fwd_ch.index(ch) for ch in raw_ch]
# leadfield_raw = leadfield[ind, :]
# sfreq = raw.info['sfreq']
#
# # alpha sources --------
# raw_alpha = raw.copy()
# raw_alpha.load_data()
# raw_alpha.filter(l_freq=8, h_freq=12, method='iir', iir_params=iir_params)
# raw_alpha.set_eeg_reference(projection=True)
# stc_alpha_raw = mne.minimum_norm.apply_inverse_raw(raw_alpha, inverse_operator=inv,
# lambda2=0.05, method=inv_method, pick_ori='normal')
# # beta sources --------
# raw_beta = raw.copy()
# raw_beta.load_data()
# raw_beta.filter(l_freq=16, h_freq=24, method='iir', iir_params=iir_params)
# raw_beta.set_eeg_reference(projection=True)
# stc_beta_raw = mne.minimum_norm.apply_inverse_raw(raw_beta, inverse_operator=inv,
# lambda2=0.1, method=inv_method, pick_ori='normal')
#
# for i_parc, label1 in enumerate(labels):
# print(i_parc)
# parc_idx, _ = label_idx_whole_brain(src, label1)
# data1 = stc_alpha_raw.data[parc_idx, :]
# data2 = stc_beta_raw.data[parc_idx]
# plv_src[parc_idx, i_subj] = compute_phase_connectivity(data1, data2, 1, 2, measure='coh', axis=1, type1='abs')
#
# save_json_from_numpy('/NOBACKUP/Results/lemon_processed_data/parcels/plv_vertices_all-subj', plv_src)
#
# stc_new = mne.SourceEstimate(np.mean(plv_src, axis=-1, keepdims=True), vertices, tmin=0, tstep=0.01, subject='fsaverage')
# stc_new.plot(subject='fsaverage', subjects_dir=subjects_dir, time_viewer=True, hemi='split', background='white',
# surface='pial')
# ----------------------------------------------------------------------------------------------------------------------
# read the graphs
# ----------------------------------------------------------------------------------------------------------------------
# containers for the graphs and asymmetry index -----------------------------------
# all graphs, not thresholded
conn1_all = np.zeros((n_parc, n_parc, n_subj))
conn2_all = np.zeros((n_parc, n_parc, n_subj))
conn2_corr_all = np.zeros((n_parc, n_parc, n_subj))
conn12_all = np.zeros((n_parc, n_parc, n_subj))
conn12_corr_all = np.zeros((n_parc, n_parc, n_subj))
conn12_symm_idx = np.zeros((n_subj, 2)) # asymmetry index container
ind_triu = np.triu_indices(n_parc, k=1)
ind_diag = np.diag_indices(n_parc)
"""
************************* !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
CAUTION: graph adjacency matrices are rearranged here --> the parcels are rearranged as the in labels_sorted
they are rearranged in the posterior-anterior direction. In most cases, nearby parcels are also adjacent physically
************************* !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
"""
for i_subj, subj in enumerate(ids):
print(i_subj)
pickle_name = op.join(dir_adjmat, subj + '-alpha-alpha')
conn1, pval1, pval1_ = load_pickle(pickle_name)
pickle_name = op.join(dir_adjmat, subj + '-beta-beta')
conn2, pval2, _ = load_pickle(pickle_name)
pickle_name = op.join(dir_adjmat, subj + '-beta-beta-corr-grad')
conn2_corr, pval2_corr, _ = load_pickle(pickle_name)
pickle_name = op.join(dir_adjmat, subj + '-alpha-beta')
conn12, pval12, _ = load_pickle(pickle_name)
pickle_name = op.join(dir_adjmat, subj + '-alpha-beta-corr-grad')
conn12_corr, pval12_corr, _ = load_pickle(pickle_name)
# save the original graphs
conn1_all[:, :, i_subj] = conn1[idx_sorted, :][:, idx_sorted]
conn2_all[:, :, i_subj] = conn2[idx_sorted, :][:, idx_sorted]
conn2_corr_all[:, :, i_subj] = conn2_corr[idx_sorted, :][:, idx_sorted]
conn12_all[:, :, i_subj] = conn12[idx_sorted, :][:, idx_sorted]
conn12_corr_all[:, :, i_subj] = conn12_corr[idx_sorted, :][:, idx_sorted]
# asymmetry index from original graphs
conn12_symm_idx[i_subj, 0] = np.linalg.norm((conn12 - conn12.T)) / (2) / np.linalg.norm(conn12)
conn12_symm_idx[i_subj, 1] = np.linalg.norm((conn12_corr - conn12_corr.T)) / (2) / np.linalg.norm(conn12_corr)
conn12_all = zscore_matrix_fischer(conn12_all)
conn12_corr_all = zscore_matrix_fischer(conn12_corr_all)
# ----------------------------------------------------------------------------------------------------------------------
# Harmoni and rsEEG data - panels B & C & D & E
# means
# # ----------------------------------------------------------------------------------------------------------------------
net_mean_before = np.mean(conn12_all, axis=-1)
net_mean_after = np.mean(conn12_corr_all, axis=-1)
# zscore all -------------------------
conn12_all_z = np.zeros_like(conn12_all)
conn12_corr_all_z = | np.zeros_like(conn12_corr_all) | numpy.zeros_like |
from arg_parser import UserArgs
from collections import Counter
from dataset_handler.dataset import CUB_Xian, SUN_Xian, AWA1_Xian
from dataset_handler.transfer_task_split import ZSLsplit, GZSLsplit, ImbalancedDataSplit, DragonSplit, GFSLSplit
from attribute_expert.model import AttributeExpert
from keras.utils import to_categorical
import numpy as np
class DataLoader(object):
def __init__(self, should_test_split):
# init data factory and split factory
self.data_loaders_factory = {
'CUB': CUB_Xian,
'SUN': SUN_Xian,
'AWA1': AWA1_Xian
}
self.task_factory = {
'ZSL': ZSLsplit(val_fold_id=1),
'GZSL': GZSLsplit(seen_val_seed=1002),
'IMB': ImbalancedDataSplit(classes_shuffle_seed=0, seen_val_seed=0),
'GFSL': GFSLSplit(val_seed=0, test_seed=0, fs_nsamples=UserArgs.train_max_fs_samples),
'DRAGON': DragonSplit(val_seed=0, test_seed=0,
train_dist_function=UserArgs.train_dist,
fs_nsamples=UserArgs.train_max_fs_samples)
}
self.dataset = self.data_loaders_factory[UserArgs.dataset_name](UserArgs.data_dir)
# split dataset to train, val and test
self.data = self.task_factory[UserArgs.transfer_task]._split(self.dataset, should_test_split)
self.data, \
self.X_train, self.Y_train, self.Attributes_train, self.train_classes, \
self.X_val, self.Y_val, self.Attributes_val, self.val_classes, \
self.X_test, self.Y_test, self.Attributes_test, self.test_classes, \
self.input_dim, self.categories_dim, self.attributes_dim, \
self.class_descriptions_crossval, \
self.attributes_groups_ranges_ids = AttributeExpert.prepare_data_for_model(self.data)
# one hot encoding for Y's
self.Y_train_oh = to_categorical(self.Y_train, num_classes=self.categories_dim)
self.Y_val_oh = to_categorical(self.Y_val, num_classes=self.categories_dim)
self.Y_test_oh = to_categorical(self.Y_test, num_classes=self.categories_dim)
# prepare evaluation parameters
self.train_data = (self.X_train, self.Y_train, self.Attributes_train, self.train_classes)
self.val_data = (self.X_val, self.Y_val, self.Attributes_val, self.val_classes)
self.test_data = (self.X_test, self.Y_test, self.Attributes_test, self.test_classes)
train_distribution = self.task_factory[UserArgs.transfer_task].train_distribution
# save num_training_samples_per_class
class_samples_map = Counter(self.Y_train)
self.num_training_samples_per_class = [class_samples_map[key] for key in
sorted(class_samples_map.keys(), reverse=False)]
# save many_shot and few_shot classes
self.ms_classes = self.task_factory[UserArgs.transfer_task].ms_classes
self.fs_classes = self.task_factory[UserArgs.transfer_task].fs_classes
# seperate validation to many shot, few shot indexes
val_ms_indexes, val_fs_indexes = self.get_ms_and_fs_indexes(self.Y_val)
X_val_many = self.X_val[val_ms_indexes]
Y_val_many = self.Y_val[val_ms_indexes]
X_val_few = self.X_val[val_fs_indexes]
Y_val_few = self.Y_val[val_fs_indexes]
self.eval_params = (self.X_val, self.Y_val, self.val_classes,
train_distribution,self.ms_classes, self.fs_classes, X_val_many,
Y_val_many, X_val_few, Y_val_few)
test_ms_indexes, test_fs_indexes = self.get_ms_and_fs_indexes(self.Y_test)
X_test_many = self.X_test[test_ms_indexes]
Y_test_many = self.Y_test[test_ms_indexes]
X_test_few = self.X_test[test_fs_indexes]
Y_test_few = self.Y_test[test_fs_indexes]
self.test_eval_params = (self.X_test, self.Y_test, self.test_classes,
train_distribution, self.ms_classes, self.fs_classes, X_test_many,
Y_test_many, X_test_few, Y_test_few)
print(f"""Dataset: {UserArgs.dataset_name}
Train Shape: {self.X_train.shape}
Val Shape: {self.X_val.shape}
Test Shape: {self.X_test.shape}""")
# Evaluate many and few shot accuracies
def get_ms_and_fs_indexes(self, Y):
# get all indexes of many_shot classes
ms_indexes = np.array([], dtype=int)
for ms_class in self.ms_classes:
cur_class_indexes = np.where(Y == ms_class)[0]
ms_indexes = np.append(ms_indexes, cur_class_indexes)
# get all indexes of few_shot classes
fs_indexes = | np.array([], dtype=int) | numpy.array |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.