file_path
stringlengths 21
202
| content
stringlengths 19
1.02M
| size
int64 19
1.02M
| lang
stringclasses 8
values | avg_line_length
float64 5.88
100
| max_line_length
int64 12
993
| alphanum_fraction
float64 0.27
0.93
|
---|---|---|---|---|---|---|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_scalarmath.py | import contextlib
import sys
import warnings
import itertools
import operator
import platform
from numpy.compat import _pep440
import pytest
from hypothesis import given, settings
from hypothesis.strategies import sampled_from
from hypothesis.extra import numpy as hynp
import numpy as np
from numpy.testing import (
assert_, assert_equal, assert_raises, assert_almost_equal,
assert_array_equal, IS_PYPY, suppress_warnings, _gen_alignment_data,
assert_warns,
)
types = [np.bool_, np.byte, np.ubyte, np.short, np.ushort, np.intc, np.uintc,
np.int_, np.uint, np.longlong, np.ulonglong,
np.single, np.double, np.longdouble, np.csingle,
np.cdouble, np.clongdouble]
floating_types = np.floating.__subclasses__()
complex_floating_types = np.complexfloating.__subclasses__()
objecty_things = [object(), None]
reasonable_operators_for_scalars = [
operator.lt, operator.le, operator.eq, operator.ne, operator.ge,
operator.gt, operator.add, operator.floordiv, operator.mod,
operator.mul, operator.pow, operator.sub, operator.truediv,
]
# This compares scalarmath against ufuncs.
class TestTypes:
def test_types(self):
for atype in types:
a = atype(1)
assert_(a == 1, "error with %r: got %r" % (atype, a))
def test_type_add(self):
# list of types
for k, atype in enumerate(types):
a_scalar = atype(3)
a_array = np.array([3], dtype=atype)
for l, btype in enumerate(types):
b_scalar = btype(1)
b_array = np.array([1], dtype=btype)
c_scalar = a_scalar + b_scalar
c_array = a_array + b_array
# It was comparing the type numbers, but the new ufunc
# function-finding mechanism finds the lowest function
# to which both inputs can be cast - which produces 'l'
# when you do 'q' + 'b'. The old function finding mechanism
# skipped ahead based on the first argument, but that
# does not produce properly symmetric results...
assert_equal(c_scalar.dtype, c_array.dtype,
"error with types (%d/'%c' + %d/'%c')" %
(k, np.dtype(atype).char, l, np.dtype(btype).char))
def test_type_create(self):
for k, atype in enumerate(types):
a = np.array([1, 2, 3], atype)
b = atype([1, 2, 3])
assert_equal(a, b)
def test_leak(self):
# test leak of scalar objects
# a leak would show up in valgrind as still-reachable of ~2.6MB
for i in range(200000):
np.add(1, 1)
@pytest.mark.slow
@settings(max_examples=10000, deadline=2000)
@given(sampled_from(reasonable_operators_for_scalars),
hynp.arrays(dtype=hynp.scalar_dtypes(), shape=()),
hynp.arrays(dtype=hynp.scalar_dtypes(), shape=()))
def test_array_scalar_ufunc_equivalence(op, arr1, arr2):
"""
This is a thorough test attempting to cover important promotion paths
and ensuring that arrays and scalars stay as aligned as possible.
However, if it creates troubles, it should maybe just be removed.
"""
scalar1 = arr1[()]
scalar2 = arr2[()]
assert isinstance(scalar1, np.generic)
assert isinstance(scalar2, np.generic)
if arr1.dtype.kind == "c" or arr2.dtype.kind == "c":
comp_ops = {operator.ge, operator.gt, operator.le, operator.lt}
if op in comp_ops and (np.isnan(scalar1) or np.isnan(scalar2)):
pytest.xfail("complex comp ufuncs use sort-order, scalars do not.")
# ignore fpe's since they may just mismatch for integers anyway.
with warnings.catch_warnings(), np.errstate(all="ignore"):
# Comparisons DeprecationWarnings replacing errors (2022-03):
warnings.simplefilter("error", DeprecationWarning)
try:
res = op(arr1, arr2)
except Exception as e:
with pytest.raises(type(e)):
op(scalar1, scalar2)
else:
scalar_res = op(scalar1, scalar2)
assert_array_equal(scalar_res, res)
class TestBaseMath:
def test_blocked(self):
# test alignments offsets for simd instructions
# alignments for vz + 2 * (vs - 1) + 1
for dt, sz in [(np.float32, 11), (np.float64, 7), (np.int32, 11)]:
for out, inp1, inp2, msg in _gen_alignment_data(dtype=dt,
type='binary',
max_size=sz):
exp1 = np.ones_like(inp1)
inp1[...] = np.ones_like(inp1)
inp2[...] = np.zeros_like(inp2)
assert_almost_equal(np.add(inp1, inp2), exp1, err_msg=msg)
assert_almost_equal(np.add(inp1, 2), exp1 + 2, err_msg=msg)
assert_almost_equal(np.add(1, inp2), exp1, err_msg=msg)
np.add(inp1, inp2, out=out)
assert_almost_equal(out, exp1, err_msg=msg)
inp2[...] += np.arange(inp2.size, dtype=dt) + 1
assert_almost_equal(np.square(inp2),
np.multiply(inp2, inp2), err_msg=msg)
# skip true divide for ints
if dt != np.int32:
assert_almost_equal(np.reciprocal(inp2),
np.divide(1, inp2), err_msg=msg)
inp1[...] = np.ones_like(inp1)
np.add(inp1, 2, out=out)
assert_almost_equal(out, exp1 + 2, err_msg=msg)
inp2[...] = np.ones_like(inp2)
np.add(2, inp2, out=out)
assert_almost_equal(out, exp1 + 2, err_msg=msg)
def test_lower_align(self):
# check data that is not aligned to element size
# i.e doubles are aligned to 4 bytes on i386
d = np.zeros(23 * 8, dtype=np.int8)[4:-4].view(np.float64)
o = np.zeros(23 * 8, dtype=np.int8)[4:-4].view(np.float64)
assert_almost_equal(d + d, d * 2)
np.add(d, d, out=o)
np.add(np.ones_like(d), d, out=o)
np.add(d, np.ones_like(d), out=o)
np.add(np.ones_like(d), d)
np.add(d, np.ones_like(d))
class TestPower:
def test_small_types(self):
for t in [np.int8, np.int16, np.float16]:
a = t(3)
b = a ** 4
assert_(b == 81, "error with %r: got %r" % (t, b))
def test_large_types(self):
for t in [np.int32, np.int64, np.float32, np.float64, np.longdouble]:
a = t(51)
b = a ** 4
msg = "error with %r: got %r" % (t, b)
if np.issubdtype(t, np.integer):
assert_(b == 6765201, msg)
else:
assert_almost_equal(b, 6765201, err_msg=msg)
def test_integers_to_negative_integer_power(self):
# Note that the combination of uint64 with a signed integer
# has common type np.float64. The other combinations should all
# raise a ValueError for integer ** negative integer.
exp = [np.array(-1, dt)[()] for dt in 'bhilq']
# 1 ** -1 possible special case
base = [np.array(1, dt)[()] for dt in 'bhilqBHILQ']
for i1, i2 in itertools.product(base, exp):
if i1.dtype != np.uint64:
assert_raises(ValueError, operator.pow, i1, i2)
else:
res = operator.pow(i1, i2)
assert_(res.dtype.type is np.float64)
assert_almost_equal(res, 1.)
# -1 ** -1 possible special case
base = [np.array(-1, dt)[()] for dt in 'bhilq']
for i1, i2 in itertools.product(base, exp):
if i1.dtype != np.uint64:
assert_raises(ValueError, operator.pow, i1, i2)
else:
res = operator.pow(i1, i2)
assert_(res.dtype.type is np.float64)
assert_almost_equal(res, -1.)
# 2 ** -1 perhaps generic
base = [np.array(2, dt)[()] for dt in 'bhilqBHILQ']
for i1, i2 in itertools.product(base, exp):
if i1.dtype != np.uint64:
assert_raises(ValueError, operator.pow, i1, i2)
else:
res = operator.pow(i1, i2)
assert_(res.dtype.type is np.float64)
assert_almost_equal(res, .5)
def test_mixed_types(self):
typelist = [np.int8, np.int16, np.float16,
np.float32, np.float64, np.int8,
np.int16, np.int32, np.int64]
for t1 in typelist:
for t2 in typelist:
a = t1(3)
b = t2(2)
result = a**b
msg = ("error with %r and %r:"
"got %r, expected %r") % (t1, t2, result, 9)
if np.issubdtype(np.dtype(result), np.integer):
assert_(result == 9, msg)
else:
assert_almost_equal(result, 9, err_msg=msg)
def test_modular_power(self):
# modular power is not implemented, so ensure it errors
a = 5
b = 4
c = 10
expected = pow(a, b, c) # noqa: F841
for t in (np.int32, np.float32, np.complex64):
# note that 3-operand power only dispatches on the first argument
assert_raises(TypeError, operator.pow, t(a), b, c)
assert_raises(TypeError, operator.pow, np.array(t(a)), b, c)
def floordiv_and_mod(x, y):
return (x // y, x % y)
def _signs(dt):
if dt in np.typecodes['UnsignedInteger']:
return (+1,)
else:
return (+1, -1)
class TestModulus:
def test_modulus_basic(self):
dt = np.typecodes['AllInteger'] + np.typecodes['Float']
for op in [floordiv_and_mod, divmod]:
for dt1, dt2 in itertools.product(dt, dt):
for sg1, sg2 in itertools.product(_signs(dt1), _signs(dt2)):
fmt = 'op: %s, dt1: %s, dt2: %s, sg1: %s, sg2: %s'
msg = fmt % (op.__name__, dt1, dt2, sg1, sg2)
a = np.array(sg1*71, dtype=dt1)[()]
b = np.array(sg2*19, dtype=dt2)[()]
div, rem = op(a, b)
assert_equal(div*b + rem, a, err_msg=msg)
if sg2 == -1:
assert_(b < rem <= 0, msg)
else:
assert_(b > rem >= 0, msg)
def test_float_modulus_exact(self):
# test that float results are exact for small integers. This also
# holds for the same integers scaled by powers of two.
nlst = list(range(-127, 0))
plst = list(range(1, 128))
dividend = nlst + [0] + plst
divisor = nlst + plst
arg = list(itertools.product(dividend, divisor))
tgt = list(divmod(*t) for t in arg)
a, b = np.array(arg, dtype=int).T
# convert exact integer results from Python to float so that
# signed zero can be used, it is checked.
tgtdiv, tgtrem = np.array(tgt, dtype=float).T
tgtdiv = np.where((tgtdiv == 0.0) & ((b < 0) ^ (a < 0)), -0.0, tgtdiv)
tgtrem = np.where((tgtrem == 0.0) & (b < 0), -0.0, tgtrem)
for op in [floordiv_and_mod, divmod]:
for dt in np.typecodes['Float']:
msg = 'op: %s, dtype: %s' % (op.__name__, dt)
fa = a.astype(dt)
fb = b.astype(dt)
# use list comprehension so a_ and b_ are scalars
div, rem = zip(*[op(a_, b_) for a_, b_ in zip(fa, fb)])
assert_equal(div, tgtdiv, err_msg=msg)
assert_equal(rem, tgtrem, err_msg=msg)
def test_float_modulus_roundoff(self):
# gh-6127
dt = np.typecodes['Float']
for op in [floordiv_and_mod, divmod]:
for dt1, dt2 in itertools.product(dt, dt):
for sg1, sg2 in itertools.product((+1, -1), (+1, -1)):
fmt = 'op: %s, dt1: %s, dt2: %s, sg1: %s, sg2: %s'
msg = fmt % (op.__name__, dt1, dt2, sg1, sg2)
a = np.array(sg1*78*6e-8, dtype=dt1)[()]
b = np.array(sg2*6e-8, dtype=dt2)[()]
div, rem = op(a, b)
# Equal assertion should hold when fmod is used
assert_equal(div*b + rem, a, err_msg=msg)
if sg2 == -1:
assert_(b < rem <= 0, msg)
else:
assert_(b > rem >= 0, msg)
def test_float_modulus_corner_cases(self):
# Check remainder magnitude.
for dt in np.typecodes['Float']:
b = np.array(1.0, dtype=dt)
a = np.nextafter(np.array(0.0, dtype=dt), -b)
rem = operator.mod(a, b)
assert_(rem <= b, 'dt: %s' % dt)
rem = operator.mod(-a, -b)
assert_(rem >= -b, 'dt: %s' % dt)
# Check nans, inf
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "invalid value encountered in remainder")
sup.filter(RuntimeWarning, "divide by zero encountered in remainder")
sup.filter(RuntimeWarning, "divide by zero encountered in floor_divide")
sup.filter(RuntimeWarning, "divide by zero encountered in divmod")
sup.filter(RuntimeWarning, "invalid value encountered in divmod")
for dt in np.typecodes['Float']:
fone = np.array(1.0, dtype=dt)
fzer = np.array(0.0, dtype=dt)
finf = np.array(np.inf, dtype=dt)
fnan = np.array(np.nan, dtype=dt)
rem = operator.mod(fone, fzer)
assert_(np.isnan(rem), 'dt: %s' % dt)
# MSVC 2008 returns NaN here, so disable the check.
#rem = operator.mod(fone, finf)
#assert_(rem == fone, 'dt: %s' % dt)
rem = operator.mod(fone, fnan)
assert_(np.isnan(rem), 'dt: %s' % dt)
rem = operator.mod(finf, fone)
assert_(np.isnan(rem), 'dt: %s' % dt)
for op in [floordiv_and_mod, divmod]:
div, mod = op(fone, fzer)
assert_(np.isinf(div)) and assert_(np.isnan(mod))
def test_inplace_floordiv_handling(self):
# issue gh-12927
# this only applies to in-place floordiv //=, because the output type
# promotes to float which does not fit
a = np.array([1, 2], np.int64)
b = np.array([1, 2], np.uint64)
with pytest.raises(TypeError,
match=r"Cannot cast ufunc 'floor_divide' output from"):
a //= b
class TestComplexDivision:
def test_zero_division(self):
with np.errstate(all="ignore"):
for t in [np.complex64, np.complex128]:
a = t(0.0)
b = t(1.0)
assert_(np.isinf(b/a))
b = t(complex(np.inf, np.inf))
assert_(np.isinf(b/a))
b = t(complex(np.inf, np.nan))
assert_(np.isinf(b/a))
b = t(complex(np.nan, np.inf))
assert_(np.isinf(b/a))
b = t(complex(np.nan, np.nan))
assert_(np.isnan(b/a))
b = t(0.)
assert_(np.isnan(b/a))
def test_signed_zeros(self):
with np.errstate(all="ignore"):
for t in [np.complex64, np.complex128]:
# tupled (numerator, denominator, expected)
# for testing as expected == numerator/denominator
data = (
(( 0.0,-1.0), ( 0.0, 1.0), (-1.0,-0.0)),
(( 0.0,-1.0), ( 0.0,-1.0), ( 1.0,-0.0)),
(( 0.0,-1.0), (-0.0,-1.0), ( 1.0, 0.0)),
(( 0.0,-1.0), (-0.0, 1.0), (-1.0, 0.0)),
(( 0.0, 1.0), ( 0.0,-1.0), (-1.0, 0.0)),
(( 0.0,-1.0), ( 0.0,-1.0), ( 1.0,-0.0)),
((-0.0,-1.0), ( 0.0,-1.0), ( 1.0,-0.0)),
((-0.0, 1.0), ( 0.0,-1.0), (-1.0,-0.0))
)
for cases in data:
n = cases[0]
d = cases[1]
ex = cases[2]
result = t(complex(n[0], n[1])) / t(complex(d[0], d[1]))
# check real and imag parts separately to avoid comparison
# in array context, which does not account for signed zeros
assert_equal(result.real, ex[0])
assert_equal(result.imag, ex[1])
def test_branches(self):
with np.errstate(all="ignore"):
for t in [np.complex64, np.complex128]:
# tupled (numerator, denominator, expected)
# for testing as expected == numerator/denominator
data = list()
# trigger branch: real(fabs(denom)) > imag(fabs(denom))
# followed by else condition as neither are == 0
data.append((( 2.0, 1.0), ( 2.0, 1.0), (1.0, 0.0)))
# trigger branch: real(fabs(denom)) > imag(fabs(denom))
# followed by if condition as both are == 0
# is performed in test_zero_division(), so this is skipped
# trigger else if branch: real(fabs(denom)) < imag(fabs(denom))
data.append((( 1.0, 2.0), ( 1.0, 2.0), (1.0, 0.0)))
for cases in data:
n = cases[0]
d = cases[1]
ex = cases[2]
result = t(complex(n[0], n[1])) / t(complex(d[0], d[1]))
# check real and imag parts separately to avoid comparison
# in array context, which does not account for signed zeros
assert_equal(result.real, ex[0])
assert_equal(result.imag, ex[1])
class TestConversion:
def test_int_from_long(self):
l = [1e6, 1e12, 1e18, -1e6, -1e12, -1e18]
li = [10**6, 10**12, 10**18, -10**6, -10**12, -10**18]
for T in [None, np.float64, np.int64]:
a = np.array(l, dtype=T)
assert_equal([int(_m) for _m in a], li)
a = np.array(l[:3], dtype=np.uint64)
assert_equal([int(_m) for _m in a], li[:3])
def test_iinfo_long_values(self):
for code in 'bBhH':
res = np.array(np.iinfo(code).max + 1, dtype=code)
tgt = np.iinfo(code).min
assert_(res == tgt)
for code in np.typecodes['AllInteger']:
res = np.array(np.iinfo(code).max, dtype=code)
tgt = np.iinfo(code).max
assert_(res == tgt)
for code in np.typecodes['AllInteger']:
res = np.dtype(code).type(np.iinfo(code).max)
tgt = np.iinfo(code).max
assert_(res == tgt)
def test_int_raise_behaviour(self):
def overflow_error_func(dtype):
dtype(np.iinfo(dtype).max + 1)
for code in [np.int_, np.uint, np.longlong, np.ulonglong]:
assert_raises(OverflowError, overflow_error_func, code)
def test_int_from_infinite_longdouble(self):
# gh-627
x = np.longdouble(np.inf)
assert_raises(OverflowError, int, x)
with suppress_warnings() as sup:
sup.record(np.ComplexWarning)
x = np.clongdouble(np.inf)
assert_raises(OverflowError, int, x)
assert_equal(len(sup.log), 1)
@pytest.mark.skipif(not IS_PYPY, reason="Test is PyPy only (gh-9972)")
def test_int_from_infinite_longdouble___int__(self):
x = np.longdouble(np.inf)
assert_raises(OverflowError, x.__int__)
with suppress_warnings() as sup:
sup.record(np.ComplexWarning)
x = np.clongdouble(np.inf)
assert_raises(OverflowError, x.__int__)
assert_equal(len(sup.log), 1)
@pytest.mark.skipif(np.finfo(np.double) == np.finfo(np.longdouble),
reason="long double is same as double")
@pytest.mark.skipif(platform.machine().startswith("ppc"),
reason="IBM double double")
def test_int_from_huge_longdouble(self):
# Produce a longdouble that would overflow a double,
# use exponent that avoids bug in Darwin pow function.
exp = np.finfo(np.double).maxexp - 1
huge_ld = 2 * 1234 * np.longdouble(2) ** exp
huge_i = 2 * 1234 * 2 ** exp
assert_(huge_ld != np.inf)
assert_equal(int(huge_ld), huge_i)
def test_int_from_longdouble(self):
x = np.longdouble(1.5)
assert_equal(int(x), 1)
x = np.longdouble(-10.5)
assert_equal(int(x), -10)
def test_numpy_scalar_relational_operators(self):
# All integer
for dt1 in np.typecodes['AllInteger']:
assert_(1 > np.array(0, dtype=dt1)[()], "type %s failed" % (dt1,))
assert_(not 1 < np.array(0, dtype=dt1)[()], "type %s failed" % (dt1,))
for dt2 in np.typecodes['AllInteger']:
assert_(np.array(1, dtype=dt1)[()] > np.array(0, dtype=dt2)[()],
"type %s and %s failed" % (dt1, dt2))
assert_(not np.array(1, dtype=dt1)[()] < np.array(0, dtype=dt2)[()],
"type %s and %s failed" % (dt1, dt2))
#Unsigned integers
for dt1 in 'BHILQP':
assert_(-1 < np.array(1, dtype=dt1)[()], "type %s failed" % (dt1,))
assert_(not -1 > np.array(1, dtype=dt1)[()], "type %s failed" % (dt1,))
assert_(-1 != np.array(1, dtype=dt1)[()], "type %s failed" % (dt1,))
#unsigned vs signed
for dt2 in 'bhilqp':
assert_(np.array(1, dtype=dt1)[()] > np.array(-1, dtype=dt2)[()],
"type %s and %s failed" % (dt1, dt2))
assert_(not np.array(1, dtype=dt1)[()] < np.array(-1, dtype=dt2)[()],
"type %s and %s failed" % (dt1, dt2))
assert_(np.array(1, dtype=dt1)[()] != np.array(-1, dtype=dt2)[()],
"type %s and %s failed" % (dt1, dt2))
#Signed integers and floats
for dt1 in 'bhlqp' + np.typecodes['Float']:
assert_(1 > np.array(-1, dtype=dt1)[()], "type %s failed" % (dt1,))
assert_(not 1 < np.array(-1, dtype=dt1)[()], "type %s failed" % (dt1,))
assert_(-1 == np.array(-1, dtype=dt1)[()], "type %s failed" % (dt1,))
for dt2 in 'bhlqp' + np.typecodes['Float']:
assert_(np.array(1, dtype=dt1)[()] > np.array(-1, dtype=dt2)[()],
"type %s and %s failed" % (dt1, dt2))
assert_(not np.array(1, dtype=dt1)[()] < np.array(-1, dtype=dt2)[()],
"type %s and %s failed" % (dt1, dt2))
assert_(np.array(-1, dtype=dt1)[()] == np.array(-1, dtype=dt2)[()],
"type %s and %s failed" % (dt1, dt2))
def test_scalar_comparison_to_none(self):
# Scalars should just return False and not give a warnings.
# The comparisons are flagged by pep8, ignore that.
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', FutureWarning)
assert_(not np.float32(1) == None)
assert_(not np.str_('test') == None)
# This is dubious (see below):
assert_(not np.datetime64('NaT') == None)
assert_(np.float32(1) != None)
assert_(np.str_('test') != None)
# This is dubious (see below):
assert_(np.datetime64('NaT') != None)
assert_(len(w) == 0)
# For documentation purposes, this is why the datetime is dubious.
# At the time of deprecation this was no behaviour change, but
# it has to be considered when the deprecations are done.
assert_(np.equal(np.datetime64('NaT'), None))
#class TestRepr:
# def test_repr(self):
# for t in types:
# val = t(1197346475.0137341)
# val_repr = repr(val)
# val2 = eval(val_repr)
# assert_equal( val, val2 )
class TestRepr:
def _test_type_repr(self, t):
finfo = np.finfo(t)
last_fraction_bit_idx = finfo.nexp + finfo.nmant
last_exponent_bit_idx = finfo.nexp
storage_bytes = np.dtype(t).itemsize*8
# could add some more types to the list below
for which in ['small denorm', 'small norm']:
# Values from https://en.wikipedia.org/wiki/IEEE_754
constr = np.array([0x00]*storage_bytes, dtype=np.uint8)
if which == 'small denorm':
byte = last_fraction_bit_idx // 8
bytebit = 7-(last_fraction_bit_idx % 8)
constr[byte] = 1 << bytebit
elif which == 'small norm':
byte = last_exponent_bit_idx // 8
bytebit = 7-(last_exponent_bit_idx % 8)
constr[byte] = 1 << bytebit
else:
raise ValueError('hmm')
val = constr.view(t)[0]
val_repr = repr(val)
val2 = t(eval(val_repr))
if not (val2 == 0 and val < 1e-100):
assert_equal(val, val2)
def test_float_repr(self):
# long double test cannot work, because eval goes through a python
# float
for t in [np.float32, np.float64]:
self._test_type_repr(t)
if not IS_PYPY:
# sys.getsizeof() is not valid on PyPy
class TestSizeOf:
def test_equal_nbytes(self):
for type in types:
x = type(0)
assert_(sys.getsizeof(x) > x.nbytes)
def test_error(self):
d = np.float32()
assert_raises(TypeError, d.__sizeof__, "a")
class TestMultiply:
def test_seq_repeat(self):
# Test that basic sequences get repeated when multiplied with
# numpy integers. And errors are raised when multiplied with others.
# Some of this behaviour may be controversial and could be open for
# change.
accepted_types = set(np.typecodes["AllInteger"])
deprecated_types = {'?'}
forbidden_types = (
set(np.typecodes["All"]) - accepted_types - deprecated_types)
forbidden_types -= {'V'} # can't default-construct void scalars
for seq_type in (list, tuple):
seq = seq_type([1, 2, 3])
for numpy_type in accepted_types:
i = np.dtype(numpy_type).type(2)
assert_equal(seq * i, seq * int(i))
assert_equal(i * seq, int(i) * seq)
for numpy_type in deprecated_types:
i = np.dtype(numpy_type).type()
assert_equal(
assert_warns(DeprecationWarning, operator.mul, seq, i),
seq * int(i))
assert_equal(
assert_warns(DeprecationWarning, operator.mul, i, seq),
int(i) * seq)
for numpy_type in forbidden_types:
i = np.dtype(numpy_type).type()
assert_raises(TypeError, operator.mul, seq, i)
assert_raises(TypeError, operator.mul, i, seq)
def test_no_seq_repeat_basic_array_like(self):
# Test that an array-like which does not know how to be multiplied
# does not attempt sequence repeat (raise TypeError).
# See also gh-7428.
class ArrayLike:
def __init__(self, arr):
self.arr = arr
def __array__(self):
return self.arr
# Test for simple ArrayLike above and memoryviews (original report)
for arr_like in (ArrayLike(np.ones(3)), memoryview(np.ones(3))):
assert_array_equal(arr_like * np.float32(3.), np.full(3, 3.))
assert_array_equal(np.float32(3.) * arr_like, np.full(3, 3.))
assert_array_equal(arr_like * np.int_(3), np.full(3, 3))
assert_array_equal(np.int_(3) * arr_like, np.full(3, 3))
class TestNegative:
def test_exceptions(self):
a = np.ones((), dtype=np.bool_)[()]
assert_raises(TypeError, operator.neg, a)
def test_result(self):
types = np.typecodes['AllInteger'] + np.typecodes['AllFloat']
with suppress_warnings() as sup:
sup.filter(RuntimeWarning)
for dt in types:
a = np.ones((), dtype=dt)[()]
assert_equal(operator.neg(a) + a, 0)
class TestSubtract:
def test_exceptions(self):
a = np.ones((), dtype=np.bool_)[()]
assert_raises(TypeError, operator.sub, a, a)
def test_result(self):
types = np.typecodes['AllInteger'] + np.typecodes['AllFloat']
with suppress_warnings() as sup:
sup.filter(RuntimeWarning)
for dt in types:
a = np.ones((), dtype=dt)[()]
assert_equal(operator.sub(a, a), 0)
class TestAbs:
def _test_abs_func(self, absfunc, test_dtype):
x = test_dtype(-1.5)
assert_equal(absfunc(x), 1.5)
x = test_dtype(0.0)
res = absfunc(x)
# assert_equal() checks zero signedness
assert_equal(res, 0.0)
x = test_dtype(-0.0)
res = absfunc(x)
assert_equal(res, 0.0)
x = test_dtype(np.finfo(test_dtype).max)
assert_equal(absfunc(x), x.real)
with suppress_warnings() as sup:
sup.filter(UserWarning)
x = test_dtype(np.finfo(test_dtype).tiny)
assert_equal(absfunc(x), x.real)
x = test_dtype(np.finfo(test_dtype).min)
assert_equal(absfunc(x), -x.real)
@pytest.mark.parametrize("dtype", floating_types + complex_floating_types)
def test_builtin_abs(self, dtype):
if (
sys.platform == "cygwin" and dtype == np.clongdouble and
(
_pep440.parse(platform.release().split("-")[0])
< _pep440.Version("3.3.0")
)
):
pytest.xfail(
reason="absl is computed in double precision on cygwin < 3.3"
)
self._test_abs_func(abs, dtype)
@pytest.mark.parametrize("dtype", floating_types + complex_floating_types)
def test_numpy_abs(self, dtype):
if (
sys.platform == "cygwin" and dtype == np.clongdouble and
(
_pep440.parse(platform.release().split("-")[0])
< _pep440.Version("3.3.0")
)
):
pytest.xfail(
reason="absl is computed in double precision on cygwin < 3.3"
)
self._test_abs_func(np.abs, dtype)
class TestBitShifts:
@pytest.mark.parametrize('type_code', np.typecodes['AllInteger'])
@pytest.mark.parametrize('op',
[operator.rshift, operator.lshift], ids=['>>', '<<'])
def test_shift_all_bits(self, type_code, op):
""" Shifts where the shift amount is the width of the type or wider """
# gh-2449
dt = np.dtype(type_code)
nbits = dt.itemsize * 8
for val in [5, -5]:
for shift in [nbits, nbits + 4]:
val_scl = dt.type(val)
shift_scl = dt.type(shift)
res_scl = op(val_scl, shift_scl)
if val_scl < 0 and op is operator.rshift:
# sign bit is preserved
assert_equal(res_scl, -1)
else:
assert_equal(res_scl, 0)
# Result on scalars should be the same as on arrays
val_arr = np.array([val]*32, dtype=dt)
shift_arr = np.array([shift]*32, dtype=dt)
res_arr = op(val_arr, shift_arr)
assert_equal(res_arr, res_scl)
class TestHash:
@pytest.mark.parametrize("type_code", np.typecodes['AllInteger'])
def test_integer_hashes(self, type_code):
scalar = np.dtype(type_code).type
for i in range(128):
assert hash(i) == hash(scalar(i))
@pytest.mark.parametrize("type_code", np.typecodes['AllFloat'])
def test_float_and_complex_hashes(self, type_code):
scalar = np.dtype(type_code).type
for val in [np.pi, np.inf, 3, 6.]:
numpy_val = scalar(val)
# Cast back to Python, in case the NumPy scalar has less precision
if numpy_val.dtype.kind == 'c':
val = complex(numpy_val)
else:
val = float(numpy_val)
assert val == numpy_val
assert hash(val) == hash(numpy_val)
if hash(float(np.nan)) != hash(float(np.nan)):
# If Python distinguises different NaNs we do so too (gh-18833)
assert hash(scalar(np.nan)) != hash(scalar(np.nan))
@pytest.mark.parametrize("type_code", np.typecodes['Complex'])
def test_complex_hashes(self, type_code):
# Test some complex valued hashes specifically:
scalar = np.dtype(type_code).type
for val in [np.pi+1j, np.inf-3j, 3j, 6.+1j]:
numpy_val = scalar(val)
assert hash(complex(numpy_val)) == hash(numpy_val)
@contextlib.contextmanager
def recursionlimit(n):
o = sys.getrecursionlimit()
try:
sys.setrecursionlimit(n)
yield
finally:
sys.setrecursionlimit(o)
@given(sampled_from(objecty_things),
sampled_from(reasonable_operators_for_scalars),
sampled_from(types))
def test_operator_object_left(o, op, type_):
try:
with recursionlimit(200):
op(o, type_(1))
except TypeError:
pass
@given(sampled_from(objecty_things),
sampled_from(reasonable_operators_for_scalars),
sampled_from(types))
def test_operator_object_right(o, op, type_):
try:
with recursionlimit(200):
op(type_(1), o)
except TypeError:
pass
@given(sampled_from(reasonable_operators_for_scalars),
sampled_from(types),
sampled_from(types))
def test_operator_scalars(op, type1, type2):
try:
op(type1(1), type2(1))
except TypeError:
pass
@pytest.mark.parametrize("op", reasonable_operators_for_scalars)
def test_longdouble_inf_loop(op):
try:
op(np.longdouble(3), None)
except TypeError:
pass
try:
op(None, np.longdouble(3))
except TypeError:
pass
@pytest.mark.parametrize("op", reasonable_operators_for_scalars)
def test_clongdouble_inf_loop(op):
if op in {operator.mod} and False:
pytest.xfail("The modulo operator is known to be broken")
try:
op(np.clongdouble(3), None)
except TypeError:
pass
try:
op(None, np.longdouble(3))
except TypeError:
pass
@pytest.mark.parametrize("dtype", np.typecodes["AllInteger"])
@pytest.mark.parametrize("operation", [
lambda min, max: max + max,
lambda min, max: min - max,
lambda min, max: max * max], ids=["+", "-", "*"])
def test_scalar_integer_operation_overflow(dtype, operation):
st = np.dtype(dtype).type
min = st(np.iinfo(dtype).min)
max = st(np.iinfo(dtype).max)
with pytest.warns(RuntimeWarning, match="overflow encountered"):
operation(min, max)
@pytest.mark.parametrize("dtype", np.typecodes["Integer"])
@pytest.mark.parametrize("operation", [
lambda min, neg_1: abs(min),
lambda min, neg_1: min * neg_1,
lambda min, neg_1: min // neg_1], ids=["abs", "*", "//"])
def test_scalar_signed_integer_overflow(dtype, operation):
# The minimum signed integer can "overflow" for some additional operations
st = np.dtype(dtype).type
min = st(np.iinfo(dtype).min)
neg_1 = st(-1)
with pytest.warns(RuntimeWarning, match="overflow encountered"):
operation(min, neg_1)
@pytest.mark.parametrize("dtype", np.typecodes["UnsignedInteger"])
@pytest.mark.xfail # TODO: the check is quite simply missing!
def test_scalar_signed_integer_overflow(dtype):
val = np.dtype(dtype).type(8)
with pytest.warns(RuntimeWarning, match="overflow encountered"):
-val
@pytest.mark.parametrize("dtype", np.typecodes["AllInteger"])
@pytest.mark.parametrize("operation", [
lambda val, zero: val // zero,
lambda val, zero: val % zero, ], ids=["//", "%"])
def test_scalar_integer_operation_divbyzero(dtype, operation):
st = np.dtype(dtype).type
val = st(100)
zero = st(0)
with pytest.warns(RuntimeWarning, match="divide by zero"):
operation(val, zero)
ops_with_names = [
("__lt__", "__gt__", operator.lt, True),
("__le__", "__ge__", operator.le, True),
("__eq__", "__eq__", operator.eq, True),
# Note __op__ and __rop__ may be identical here:
("__ne__", "__ne__", operator.ne, True),
("__gt__", "__lt__", operator.gt, True),
("__ge__", "__le__", operator.ge, True),
("__floordiv__", "__rfloordiv__", operator.floordiv, False),
("__truediv__", "__rtruediv__", operator.truediv, False),
("__add__", "__radd__", operator.add, False),
("__mod__", "__rmod__", operator.mod, False),
("__mul__", "__rmul__", operator.mul, False),
("__pow__", "__rpow__", operator.pow, False),
("__sub__", "__rsub__", operator.sub, False),
]
@pytest.mark.parametrize(["__op__", "__rop__", "op", "cmp"], ops_with_names)
@pytest.mark.parametrize("sctype", [np.float32, np.float64, np.longdouble])
def test_subclass_deferral(sctype, __op__, __rop__, op, cmp):
"""
This test covers scalar subclass deferral. Note that this is exceedingly
complicated, especially since it tends to fall back to the array paths and
these additionally add the "array priority" mechanism.
The behaviour was modified subtly in 1.22 (to make it closer to how Python
scalars work). Due to its complexity and the fact that subclassing NumPy
scalars is probably a bad idea to begin with. There is probably room
for adjustments here.
"""
class myf_simple1(sctype):
pass
class myf_simple2(sctype):
pass
def op_func(self, other):
return __op__
def rop_func(self, other):
return __rop__
myf_op = type("myf_op", (sctype,), {__op__: op_func, __rop__: rop_func})
# inheritance has to override, or this is correctly lost:
res = op(myf_simple1(1), myf_simple2(2))
assert type(res) == sctype or type(res) == np.bool_
assert op(myf_simple1(1), myf_simple2(2)) == op(1, 2) # inherited
# Two independent subclasses do not really define an order. This could
# be attempted, but we do not since Python's `int` does neither:
assert op(myf_op(1), myf_simple1(2)) == __op__
assert op(myf_simple1(1), myf_op(2)) == op(1, 2) # inherited
def test_longdouble_complex():
# Simple test to check longdouble and complex combinations, since these
# need to go through promotion, which longdouble needs to be careful about.
x = np.longdouble(1)
assert x + 1j == 1+1j
assert 1j + x == 1+1j
@pytest.mark.parametrize(["__op__", "__rop__", "op", "cmp"], ops_with_names)
@pytest.mark.parametrize("subtype", [float, int, complex, np.float16])
def test_pyscalar_subclasses(subtype, __op__, __rop__, op, cmp):
def op_func(self, other):
return __op__
def rop_func(self, other):
return __rop__
# Check that deferring is indicated using `__array_ufunc__`:
myt = type("myt", (subtype,),
{__op__: op_func, __rop__: rop_func, "__array_ufunc__": None})
# Just like normally, we should never presume we can modify the float.
assert op(myt(1), np.float64(2)) == __op__
assert op(np.float64(1), myt(2)) == __rop__
if op in {operator.mod, operator.floordiv} and subtype == complex:
return # module is not support for complex. Do not test.
if __rop__ == __op__:
return
# When no deferring is indicated, subclasses are handled normally.
myt = type("myt", (subtype,), {__rop__: rop_func})
# Check for float32, as a float subclass float64 may behave differently
res = op(myt(1), np.float16(2))
expected = op(subtype(1), np.float16(2))
assert res == expected
assert type(res) == type(expected)
res = op(np.float32(2), myt(1))
expected = op(np.float32(2), subtype(1))
assert res == expected
assert type(res) == type(expected)
# Same check for longdouble:
res = op(myt(1), np.longdouble(2))
expected = op(subtype(1), np.longdouble(2))
assert res == expected
assert type(res) == type(expected)
res = op(np.float32(2), myt(1))
expected = op(np.longdouble(2), subtype(1))
assert res == expected
| 40,778 | Python | 38.172911 | 85 | 0.538182 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_scalarinherit.py | """ Test printing of scalar types.
"""
import pytest
import numpy as np
from numpy.testing import assert_, assert_raises
class A:
pass
class B(A, np.float64):
pass
class C(B):
pass
class D(C, B):
pass
class B0(np.float64, A):
pass
class C0(B0):
pass
class HasNew:
def __new__(cls, *args, **kwargs):
return cls, args, kwargs
class B1(np.float64, HasNew):
pass
class TestInherit:
def test_init(self):
x = B(1.0)
assert_(str(x) == '1.0')
y = C(2.0)
assert_(str(y) == '2.0')
z = D(3.0)
assert_(str(z) == '3.0')
def test_init2(self):
x = B0(1.0)
assert_(str(x) == '1.0')
y = C0(2.0)
assert_(str(y) == '2.0')
def test_gh_15395(self):
# HasNew is the second base, so `np.float64` should have priority
x = B1(1.0)
assert_(str(x) == '1.0')
# previously caused RecursionError!?
with pytest.raises(TypeError):
B1(1.0, 2.0)
class TestCharacter:
def test_char_radd(self):
# GH issue 9620, reached gentype_add and raise TypeError
np_s = np.string_('abc')
np_u = np.unicode_('abc')
s = b'def'
u = u'def'
assert_(np_s.__radd__(np_s) is NotImplemented)
assert_(np_s.__radd__(np_u) is NotImplemented)
assert_(np_s.__radd__(s) is NotImplemented)
assert_(np_s.__radd__(u) is NotImplemented)
assert_(np_u.__radd__(np_s) is NotImplemented)
assert_(np_u.__radd__(np_u) is NotImplemented)
assert_(np_u.__radd__(s) is NotImplemented)
assert_(np_u.__radd__(u) is NotImplemented)
assert_(s + np_s == b'defabc')
assert_(u + np_u == u'defabc')
class MyStr(str, np.generic):
# would segfault
pass
with assert_raises(TypeError):
# Previously worked, but gave completely wrong result
ret = s + MyStr('abc')
class MyBytes(bytes, np.generic):
# would segfault
pass
ret = s + MyBytes(b'abc')
assert(type(ret) is type(s))
assert ret == b"defabc"
def test_char_repeat(self):
np_s = np.string_('abc')
np_u = np.unicode_('abc')
res_s = b'abc' * 5
res_u = u'abc' * 5
assert_(np_s * 5 == res_s)
assert_(np_u * 5 == res_u)
| 2,381 | Python | 23.060606 | 73 | 0.52163 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_scalar_methods.py | """
Test the scalar constructors, which also do type-coercion
"""
import sys
import fractions
import platform
import types
from typing import Any, Type
import pytest
import numpy as np
from numpy.testing import assert_equal, assert_raises
class TestAsIntegerRatio:
# derived in part from the cpython test "test_floatasratio"
@pytest.mark.parametrize("ftype", [
np.half, np.single, np.double, np.longdouble])
@pytest.mark.parametrize("f, ratio", [
(0.875, (7, 8)),
(-0.875, (-7, 8)),
(0.0, (0, 1)),
(11.5, (23, 2)),
])
def test_small(self, ftype, f, ratio):
assert_equal(ftype(f).as_integer_ratio(), ratio)
@pytest.mark.parametrize("ftype", [
np.half, np.single, np.double, np.longdouble])
def test_simple_fractions(self, ftype):
R = fractions.Fraction
assert_equal(R(0, 1),
R(*ftype(0.0).as_integer_ratio()))
assert_equal(R(5, 2),
R(*ftype(2.5).as_integer_ratio()))
assert_equal(R(1, 2),
R(*ftype(0.5).as_integer_ratio()))
assert_equal(R(-2100, 1),
R(*ftype(-2100.0).as_integer_ratio()))
@pytest.mark.parametrize("ftype", [
np.half, np.single, np.double, np.longdouble])
def test_errors(self, ftype):
assert_raises(OverflowError, ftype('inf').as_integer_ratio)
assert_raises(OverflowError, ftype('-inf').as_integer_ratio)
assert_raises(ValueError, ftype('nan').as_integer_ratio)
def test_against_known_values(self):
R = fractions.Fraction
assert_equal(R(1075, 512),
R(*np.half(2.1).as_integer_ratio()))
assert_equal(R(-1075, 512),
R(*np.half(-2.1).as_integer_ratio()))
assert_equal(R(4404019, 2097152),
R(*np.single(2.1).as_integer_ratio()))
assert_equal(R(-4404019, 2097152),
R(*np.single(-2.1).as_integer_ratio()))
assert_equal(R(4728779608739021, 2251799813685248),
R(*np.double(2.1).as_integer_ratio()))
assert_equal(R(-4728779608739021, 2251799813685248),
R(*np.double(-2.1).as_integer_ratio()))
# longdouble is platform dependent
@pytest.mark.parametrize("ftype, frac_vals, exp_vals", [
# dtype test cases generated using hypothesis
# first five generated cases per dtype
(np.half, [0.0, 0.01154830649280303, 0.31082276347447274,
0.527350517124794, 0.8308562335072596],
[0, 1, 0, -8, 12]),
(np.single, [0.0, 0.09248576989263226, 0.8160498218131407,
0.17389442853722373, 0.7956044195067877],
[0, 12, 10, 17, -26]),
(np.double, [0.0, 0.031066908499895136, 0.5214135908877832,
0.45780736035689296, 0.5906586745934036],
[0, -801, 51, 194, -653]),
pytest.param(
np.longdouble,
[0.0, 0.20492557202724854, 0.4277180662199366, 0.9888085019891495,
0.9620175814461964],
[0, -7400, 14266, -7822, -8721],
marks=[
pytest.mark.skipif(
np.finfo(np.double) == np.finfo(np.longdouble),
reason="long double is same as double"),
pytest.mark.skipif(
platform.machine().startswith("ppc"),
reason="IBM double double"),
]
)
])
def test_roundtrip(self, ftype, frac_vals, exp_vals):
for frac, exp in zip(frac_vals, exp_vals):
f = np.ldexp(ftype(frac), exp)
assert f.dtype == ftype
n, d = f.as_integer_ratio()
try:
# workaround for gh-9968
nf = np.longdouble(str(n))
df = np.longdouble(str(d))
except (OverflowError, RuntimeWarning):
# the values may not fit in any float type
pytest.skip("longdouble too small on this platform")
assert_equal(nf / df, f, "{}/{}".format(n, d))
class TestIsInteger:
@pytest.mark.parametrize("str_value", ["inf", "nan"])
@pytest.mark.parametrize("code", np.typecodes["Float"])
def test_special(self, code: str, str_value: str) -> None:
cls = np.dtype(code).type
value = cls(str_value)
assert not value.is_integer()
@pytest.mark.parametrize(
"code", np.typecodes["Float"] + np.typecodes["AllInteger"]
)
def test_true(self, code: str) -> None:
float_array = np.arange(-5, 5).astype(code)
for value in float_array:
assert value.is_integer()
@pytest.mark.parametrize("code", np.typecodes["Float"])
def test_false(self, code: str) -> None:
float_array = np.arange(-5, 5).astype(code)
float_array *= 1.1
for value in float_array:
if value == 0:
continue
assert not value.is_integer()
@pytest.mark.skipif(sys.version_info < (3, 9), reason="Requires python 3.9")
class TestClassGetItem:
@pytest.mark.parametrize("cls", [
np.number,
np.integer,
np.inexact,
np.unsignedinteger,
np.signedinteger,
np.floating,
])
def test_abc(self, cls: Type[np.number]) -> None:
alias = cls[Any]
assert isinstance(alias, types.GenericAlias)
assert alias.__origin__ is cls
def test_abc_complexfloating(self) -> None:
alias = np.complexfloating[Any, Any]
assert isinstance(alias, types.GenericAlias)
assert alias.__origin__ is np.complexfloating
@pytest.mark.parametrize("arg_len", range(4))
def test_abc_complexfloating_subscript_tuple(self, arg_len: int) -> None:
arg_tup = (Any,) * arg_len
if arg_len in (1, 2):
assert np.complexfloating[arg_tup]
else:
match = f"Too {'few' if arg_len == 0 else 'many'} arguments"
with pytest.raises(TypeError, match=match):
np.complexfloating[arg_tup]
@pytest.mark.parametrize("cls", [np.generic, np.flexible, np.character])
def test_abc_non_numeric(self, cls: Type[np.generic]) -> None:
with pytest.raises(TypeError):
cls[Any]
@pytest.mark.parametrize("code", np.typecodes["All"])
def test_concrete(self, code: str) -> None:
cls = np.dtype(code).type
with pytest.raises(TypeError):
cls[Any]
@pytest.mark.parametrize("arg_len", range(4))
def test_subscript_tuple(self, arg_len: int) -> None:
arg_tup = (Any,) * arg_len
if arg_len == 1:
assert np.number[arg_tup]
else:
with pytest.raises(TypeError):
np.number[arg_tup]
def test_subscript_scalar(self) -> None:
assert np.number[Any]
@pytest.mark.skipif(sys.version_info >= (3, 9), reason="Requires python 3.8")
@pytest.mark.parametrize("cls", [np.number, np.complexfloating, np.int64])
def test_class_getitem_38(cls: Type[np.number]) -> None:
match = "Type subscription requires python >= 3.9"
with pytest.raises(TypeError, match=match):
cls[Any]
class TestBitCount:
# derived in part from the cpython test "test_bit_count"
@pytest.mark.parametrize("itype", np.sctypes['int']+np.sctypes['uint'])
def test_small(self, itype):
for a in range(max(np.iinfo(itype).min, 0), 128):
msg = f"Smoke test for {itype}({a}).bit_count()"
assert itype(a).bit_count() == bin(a).count("1"), msg
def test_bit_count(self):
for exp in [10, 17, 63]:
a = 2**exp
assert np.uint64(a).bit_count() == 1
assert np.uint64(a - 1).bit_count() == exp
assert np.uint64(a ^ 63).bit_count() == 7
assert np.uint64((a - 1) ^ 510).bit_count() == exp - 8
| 7,923 | Python | 36.028037 | 78 | 0.5662 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_argparse.py | """
Tests for the private NumPy argument parsing functionality.
They mainly exists to ensure good test coverage without having to try the
weirder cases on actual numpy functions but test them in one place.
The test function is defined in C to be equivalent to (errors may not always
match exactly, and could be adjusted):
def func(arg1, /, arg2, *, arg3):
i = integer(arg1) # reproducing the 'i' parsing in Python.
return None
"""
import pytest
import numpy as np
from numpy.core._multiarray_tests import argparse_example_function as func
def test_invalid_integers():
with pytest.raises(TypeError,
match="integer argument expected, got float"):
func(1.)
with pytest.raises(OverflowError):
func(2**100)
def test_missing_arguments():
with pytest.raises(TypeError,
match="missing required positional argument 0"):
func()
with pytest.raises(TypeError,
match="missing required positional argument 0"):
func(arg2=1, arg3=4)
with pytest.raises(TypeError,
match=r"missing required argument \'arg2\' \(pos 1\)"):
func(1, arg3=5)
def test_too_many_positional():
# the second argument is positional but can be passed as keyword.
with pytest.raises(TypeError,
match="takes from 2 to 3 positional arguments but 4 were given"):
func(1, 2, 3, 4)
def test_multiple_values():
with pytest.raises(TypeError,
match=r"given by name \('arg2'\) and position \(position 1\)"):
func(1, 2, arg2=3)
def test_string_fallbacks():
# We can (currently?) use numpy strings to test the "slow" fallbacks
# that should normally not be taken due to string interning.
arg2 = np.unicode_("arg2")
missing_arg = np.unicode_("missing_arg")
func(1, **{arg2: 3})
with pytest.raises(TypeError,
match="got an unexpected keyword argument 'missing_arg'"):
func(2, **{missing_arg: 3})
| 1,977 | Python | 30.396825 | 77 | 0.660091 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_numeric.py | import sys
import warnings
import itertools
import platform
import pytest
import math
from decimal import Decimal
import numpy as np
from numpy.core import umath
from numpy.random import rand, randint, randn
from numpy.testing import (
assert_, assert_equal, assert_raises, assert_raises_regex,
assert_array_equal, assert_almost_equal, assert_array_almost_equal,
assert_warns, assert_array_max_ulp, HAS_REFCOUNT
)
from numpy.core._rational_tests import rational
from hypothesis import given, strategies as st
from hypothesis.extra import numpy as hynp
class TestResize:
def test_copies(self):
A = np.array([[1, 2], [3, 4]])
Ar1 = np.array([[1, 2, 3, 4], [1, 2, 3, 4]])
assert_equal(np.resize(A, (2, 4)), Ar1)
Ar2 = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
assert_equal(np.resize(A, (4, 2)), Ar2)
Ar3 = np.array([[1, 2, 3], [4, 1, 2], [3, 4, 1], [2, 3, 4]])
assert_equal(np.resize(A, (4, 3)), Ar3)
def test_repeats(self):
A = np.array([1, 2, 3])
Ar1 = np.array([[1, 2, 3, 1], [2, 3, 1, 2]])
assert_equal(np.resize(A, (2, 4)), Ar1)
Ar2 = np.array([[1, 2], [3, 1], [2, 3], [1, 2]])
assert_equal(np.resize(A, (4, 2)), Ar2)
Ar3 = np.array([[1, 2, 3], [1, 2, 3], [1, 2, 3], [1, 2, 3]])
assert_equal(np.resize(A, (4, 3)), Ar3)
def test_zeroresize(self):
A = np.array([[1, 2], [3, 4]])
Ar = np.resize(A, (0,))
assert_array_equal(Ar, np.array([]))
assert_equal(A.dtype, Ar.dtype)
Ar = np.resize(A, (0, 2))
assert_equal(Ar.shape, (0, 2))
Ar = np.resize(A, (2, 0))
assert_equal(Ar.shape, (2, 0))
def test_reshape_from_zero(self):
# See also gh-6740
A = np.zeros(0, dtype=[('a', np.float32)])
Ar = np.resize(A, (2, 1))
assert_array_equal(Ar, np.zeros((2, 1), Ar.dtype))
assert_equal(A.dtype, Ar.dtype)
def test_negative_resize(self):
A = np.arange(0, 10, dtype=np.float32)
new_shape = (-10, -1)
with pytest.raises(ValueError, match=r"negative"):
np.resize(A, new_shape=new_shape)
def test_subclass(self):
class MyArray(np.ndarray):
__array_priority__ = 1.
my_arr = np.array([1]).view(MyArray)
assert type(np.resize(my_arr, 5)) is MyArray
assert type(np.resize(my_arr, 0)) is MyArray
my_arr = np.array([]).view(MyArray)
assert type(np.resize(my_arr, 5)) is MyArray
class TestNonarrayArgs:
# check that non-array arguments to functions wrap them in arrays
def test_choose(self):
choices = [[0, 1, 2],
[3, 4, 5],
[5, 6, 7]]
tgt = [5, 1, 5]
a = [2, 0, 1]
out = np.choose(a, choices)
assert_equal(out, tgt)
def test_clip(self):
arr = [-1, 5, 2, 3, 10, -4, -9]
out = np.clip(arr, 2, 7)
tgt = [2, 5, 2, 3, 7, 2, 2]
assert_equal(out, tgt)
def test_compress(self):
arr = [[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]]
tgt = [[5, 6, 7, 8, 9]]
out = np.compress([0, 1], arr, axis=0)
assert_equal(out, tgt)
def test_count_nonzero(self):
arr = [[0, 1, 7, 0, 0],
[3, 0, 0, 2, 19]]
tgt = np.array([2, 3])
out = np.count_nonzero(arr, axis=1)
assert_equal(out, tgt)
def test_cumproduct(self):
A = [[1, 2, 3], [4, 5, 6]]
assert_(np.all(np.cumproduct(A) == np.array([1, 2, 6, 24, 120, 720])))
def test_diagonal(self):
a = [[0, 1, 2, 3],
[4, 5, 6, 7],
[8, 9, 10, 11]]
out = np.diagonal(a)
tgt = [0, 5, 10]
assert_equal(out, tgt)
def test_mean(self):
A = [[1, 2, 3], [4, 5, 6]]
assert_(np.mean(A) == 3.5)
assert_(np.all(np.mean(A, 0) == np.array([2.5, 3.5, 4.5])))
assert_(np.all(np.mean(A, 1) == np.array([2., 5.])))
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', RuntimeWarning)
assert_(np.isnan(np.mean([])))
assert_(w[0].category is RuntimeWarning)
def test_ptp(self):
a = [3, 4, 5, 10, -3, -5, 6.0]
assert_equal(np.ptp(a, axis=0), 15.0)
def test_prod(self):
arr = [[1, 2, 3, 4],
[5, 6, 7, 9],
[10, 3, 4, 5]]
tgt = [24, 1890, 600]
assert_equal(np.prod(arr, axis=-1), tgt)
def test_ravel(self):
a = [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]
tgt = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
assert_equal(np.ravel(a), tgt)
def test_repeat(self):
a = [1, 2, 3]
tgt = [1, 1, 2, 2, 3, 3]
out = np.repeat(a, 2)
assert_equal(out, tgt)
def test_reshape(self):
arr = [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]
tgt = [[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]]
assert_equal(np.reshape(arr, (2, 6)), tgt)
def test_round(self):
arr = [1.56, 72.54, 6.35, 3.25]
tgt = [1.6, 72.5, 6.4, 3.2]
assert_equal(np.around(arr, decimals=1), tgt)
s = np.float64(1.)
assert_(isinstance(s.round(), np.float64))
assert_equal(s.round(), 1.)
@pytest.mark.parametrize('dtype', [
np.int8, np.int16, np.int32, np.int64,
np.uint8, np.uint16, np.uint32, np.uint64,
np.float16, np.float32, np.float64,
])
def test_dunder_round(self, dtype):
s = dtype(1)
assert_(isinstance(round(s), int))
assert_(isinstance(round(s, None), int))
assert_(isinstance(round(s, ndigits=None), int))
assert_equal(round(s), 1)
assert_equal(round(s, None), 1)
assert_equal(round(s, ndigits=None), 1)
@pytest.mark.parametrize('val, ndigits', [
pytest.param(2**31 - 1, -1,
marks=pytest.mark.xfail(reason="Out of range of int32")
),
(2**31 - 1, 1-math.ceil(math.log10(2**31 - 1))),
(2**31 - 1, -math.ceil(math.log10(2**31 - 1)))
])
def test_dunder_round_edgecases(self, val, ndigits):
assert_equal(round(val, ndigits), round(np.int32(val), ndigits))
def test_dunder_round_accuracy(self):
f = np.float64(5.1 * 10**73)
assert_(isinstance(round(f, -73), np.float64))
assert_array_max_ulp(round(f, -73), 5.0 * 10**73)
assert_(isinstance(round(f, ndigits=-73), np.float64))
assert_array_max_ulp(round(f, ndigits=-73), 5.0 * 10**73)
i = np.int64(501)
assert_(isinstance(round(i, -2), np.int64))
assert_array_max_ulp(round(i, -2), 500)
assert_(isinstance(round(i, ndigits=-2), np.int64))
assert_array_max_ulp(round(i, ndigits=-2), 500)
@pytest.mark.xfail(raises=AssertionError, reason="gh-15896")
def test_round_py_consistency(self):
f = 5.1 * 10**73
assert_equal(round(np.float64(f), -73), round(f, -73))
def test_searchsorted(self):
arr = [-8, -5, -1, 3, 6, 10]
out = np.searchsorted(arr, 0)
assert_equal(out, 3)
def test_size(self):
A = [[1, 2, 3], [4, 5, 6]]
assert_(np.size(A) == 6)
assert_(np.size(A, 0) == 2)
assert_(np.size(A, 1) == 3)
def test_squeeze(self):
A = [[[1, 1, 1], [2, 2, 2], [3, 3, 3]]]
assert_equal(np.squeeze(A).shape, (3, 3))
assert_equal(np.squeeze(np.zeros((1, 3, 1))).shape, (3,))
assert_equal(np.squeeze(np.zeros((1, 3, 1)), axis=0).shape, (3, 1))
assert_equal(np.squeeze(np.zeros((1, 3, 1)), axis=-1).shape, (1, 3))
assert_equal(np.squeeze(np.zeros((1, 3, 1)), axis=2).shape, (1, 3))
assert_equal(np.squeeze([np.zeros((3, 1))]).shape, (3,))
assert_equal(np.squeeze([np.zeros((3, 1))], axis=0).shape, (3, 1))
assert_equal(np.squeeze([np.zeros((3, 1))], axis=2).shape, (1, 3))
assert_equal(np.squeeze([np.zeros((3, 1))], axis=-1).shape, (1, 3))
def test_std(self):
A = [[1, 2, 3], [4, 5, 6]]
assert_almost_equal(np.std(A), 1.707825127659933)
assert_almost_equal(np.std(A, 0), np.array([1.5, 1.5, 1.5]))
assert_almost_equal(np.std(A, 1), np.array([0.81649658, 0.81649658]))
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', RuntimeWarning)
assert_(np.isnan(np.std([])))
assert_(w[0].category is RuntimeWarning)
def test_swapaxes(self):
tgt = [[[0, 4], [2, 6]], [[1, 5], [3, 7]]]
a = [[[0, 1], [2, 3]], [[4, 5], [6, 7]]]
out = np.swapaxes(a, 0, 2)
assert_equal(out, tgt)
def test_sum(self):
m = [[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]
tgt = [[6], [15], [24]]
out = np.sum(m, axis=1, keepdims=True)
assert_equal(tgt, out)
def test_take(self):
tgt = [2, 3, 5]
indices = [1, 2, 4]
a = [1, 2, 3, 4, 5]
out = np.take(a, indices)
assert_equal(out, tgt)
def test_trace(self):
c = [[1, 2], [3, 4], [5, 6]]
assert_equal(np.trace(c), 5)
def test_transpose(self):
arr = [[1, 2], [3, 4], [5, 6]]
tgt = [[1, 3, 5], [2, 4, 6]]
assert_equal(np.transpose(arr, (1, 0)), tgt)
def test_var(self):
A = [[1, 2, 3], [4, 5, 6]]
assert_almost_equal(np.var(A), 2.9166666666666665)
assert_almost_equal(np.var(A, 0), np.array([2.25, 2.25, 2.25]))
assert_almost_equal(np.var(A, 1), np.array([0.66666667, 0.66666667]))
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', RuntimeWarning)
assert_(np.isnan(np.var([])))
assert_(w[0].category is RuntimeWarning)
B = np.array([None, 0])
B[0] = 1j
assert_almost_equal(np.var(B), 0.25)
class TestIsscalar:
def test_isscalar(self):
assert_(np.isscalar(3.1))
assert_(np.isscalar(np.int16(12345)))
assert_(np.isscalar(False))
assert_(np.isscalar('numpy'))
assert_(not np.isscalar([3.1]))
assert_(not np.isscalar(None))
# PEP 3141
from fractions import Fraction
assert_(np.isscalar(Fraction(5, 17)))
from numbers import Number
assert_(np.isscalar(Number()))
class TestBoolScalar:
def test_logical(self):
f = np.False_
t = np.True_
s = "xyz"
assert_((t and s) is s)
assert_((f and s) is f)
def test_bitwise_or(self):
f = np.False_
t = np.True_
assert_((t | t) is t)
assert_((f | t) is t)
assert_((t | f) is t)
assert_((f | f) is f)
def test_bitwise_and(self):
f = np.False_
t = np.True_
assert_((t & t) is t)
assert_((f & t) is f)
assert_((t & f) is f)
assert_((f & f) is f)
def test_bitwise_xor(self):
f = np.False_
t = np.True_
assert_((t ^ t) is f)
assert_((f ^ t) is t)
assert_((t ^ f) is t)
assert_((f ^ f) is f)
class TestBoolArray:
def setup_method(self):
# offset for simd tests
self.t = np.array([True] * 41, dtype=bool)[1::]
self.f = np.array([False] * 41, dtype=bool)[1::]
self.o = np.array([False] * 42, dtype=bool)[2::]
self.nm = self.f.copy()
self.im = self.t.copy()
self.nm[3] = True
self.nm[-2] = True
self.im[3] = False
self.im[-2] = False
def test_all_any(self):
assert_(self.t.all())
assert_(self.t.any())
assert_(not self.f.all())
assert_(not self.f.any())
assert_(self.nm.any())
assert_(self.im.any())
assert_(not self.nm.all())
assert_(not self.im.all())
# check bad element in all positions
for i in range(256 - 7):
d = np.array([False] * 256, dtype=bool)[7::]
d[i] = True
assert_(np.any(d))
e = np.array([True] * 256, dtype=bool)[7::]
e[i] = False
assert_(not np.all(e))
assert_array_equal(e, ~d)
# big array test for blocked libc loops
for i in list(range(9, 6000, 507)) + [7764, 90021, -10]:
d = np.array([False] * 100043, dtype=bool)
d[i] = True
assert_(np.any(d), msg="%r" % i)
e = np.array([True] * 100043, dtype=bool)
e[i] = False
assert_(not np.all(e), msg="%r" % i)
def test_logical_not_abs(self):
assert_array_equal(~self.t, self.f)
assert_array_equal(np.abs(~self.t), self.f)
assert_array_equal(np.abs(~self.f), self.t)
assert_array_equal(np.abs(self.f), self.f)
assert_array_equal(~np.abs(self.f), self.t)
assert_array_equal(~np.abs(self.t), self.f)
assert_array_equal(np.abs(~self.nm), self.im)
np.logical_not(self.t, out=self.o)
assert_array_equal(self.o, self.f)
np.abs(self.t, out=self.o)
assert_array_equal(self.o, self.t)
def test_logical_and_or_xor(self):
assert_array_equal(self.t | self.t, self.t)
assert_array_equal(self.f | self.f, self.f)
assert_array_equal(self.t | self.f, self.t)
assert_array_equal(self.f | self.t, self.t)
np.logical_or(self.t, self.t, out=self.o)
assert_array_equal(self.o, self.t)
assert_array_equal(self.t & self.t, self.t)
assert_array_equal(self.f & self.f, self.f)
assert_array_equal(self.t & self.f, self.f)
assert_array_equal(self.f & self.t, self.f)
np.logical_and(self.t, self.t, out=self.o)
assert_array_equal(self.o, self.t)
assert_array_equal(self.t ^ self.t, self.f)
assert_array_equal(self.f ^ self.f, self.f)
assert_array_equal(self.t ^ self.f, self.t)
assert_array_equal(self.f ^ self.t, self.t)
np.logical_xor(self.t, self.t, out=self.o)
assert_array_equal(self.o, self.f)
assert_array_equal(self.nm & self.t, self.nm)
assert_array_equal(self.im & self.f, False)
assert_array_equal(self.nm & True, self.nm)
assert_array_equal(self.im & False, self.f)
assert_array_equal(self.nm | self.t, self.t)
assert_array_equal(self.im | self.f, self.im)
assert_array_equal(self.nm | True, self.t)
assert_array_equal(self.im | False, self.im)
assert_array_equal(self.nm ^ self.t, self.im)
assert_array_equal(self.im ^ self.f, self.im)
assert_array_equal(self.nm ^ True, self.im)
assert_array_equal(self.im ^ False, self.im)
class TestBoolCmp:
def setup_method(self):
self.f = np.ones(256, dtype=np.float32)
self.ef = np.ones(self.f.size, dtype=bool)
self.d = np.ones(128, dtype=np.float64)
self.ed = np.ones(self.d.size, dtype=bool)
# generate values for all permutation of 256bit simd vectors
s = 0
for i in range(32):
self.f[s:s+8] = [i & 2**x for x in range(8)]
self.ef[s:s+8] = [(i & 2**x) != 0 for x in range(8)]
s += 8
s = 0
for i in range(16):
self.d[s:s+4] = [i & 2**x for x in range(4)]
self.ed[s:s+4] = [(i & 2**x) != 0 for x in range(4)]
s += 4
self.nf = self.f.copy()
self.nd = self.d.copy()
self.nf[self.ef] = np.nan
self.nd[self.ed] = np.nan
self.inff = self.f.copy()
self.infd = self.d.copy()
self.inff[::3][self.ef[::3]] = np.inf
self.infd[::3][self.ed[::3]] = np.inf
self.inff[1::3][self.ef[1::3]] = -np.inf
self.infd[1::3][self.ed[1::3]] = -np.inf
self.inff[2::3][self.ef[2::3]] = np.nan
self.infd[2::3][self.ed[2::3]] = np.nan
self.efnonan = self.ef.copy()
self.efnonan[2::3] = False
self.ednonan = self.ed.copy()
self.ednonan[2::3] = False
self.signf = self.f.copy()
self.signd = self.d.copy()
self.signf[self.ef] *= -1.
self.signd[self.ed] *= -1.
self.signf[1::6][self.ef[1::6]] = -np.inf
self.signd[1::6][self.ed[1::6]] = -np.inf
self.signf[3::6][self.ef[3::6]] = -np.nan
self.signd[3::6][self.ed[3::6]] = -np.nan
self.signf[4::6][self.ef[4::6]] = -0.
self.signd[4::6][self.ed[4::6]] = -0.
def test_float(self):
# offset for alignment test
for i in range(4):
assert_array_equal(self.f[i:] > 0, self.ef[i:])
assert_array_equal(self.f[i:] - 1 >= 0, self.ef[i:])
assert_array_equal(self.f[i:] == 0, ~self.ef[i:])
assert_array_equal(-self.f[i:] < 0, self.ef[i:])
assert_array_equal(-self.f[i:] + 1 <= 0, self.ef[i:])
r = self.f[i:] != 0
assert_array_equal(r, self.ef[i:])
r2 = self.f[i:] != np.zeros_like(self.f[i:])
r3 = 0 != self.f[i:]
assert_array_equal(r, r2)
assert_array_equal(r, r3)
# check bool == 0x1
assert_array_equal(r.view(np.int8), r.astype(np.int8))
assert_array_equal(r2.view(np.int8), r2.astype(np.int8))
assert_array_equal(r3.view(np.int8), r3.astype(np.int8))
# isnan on amd64 takes the same code path
assert_array_equal(np.isnan(self.nf[i:]), self.ef[i:])
assert_array_equal(np.isfinite(self.nf[i:]), ~self.ef[i:])
assert_array_equal(np.isfinite(self.inff[i:]), ~self.ef[i:])
assert_array_equal(np.isinf(self.inff[i:]), self.efnonan[i:])
assert_array_equal(np.signbit(self.signf[i:]), self.ef[i:])
def test_double(self):
# offset for alignment test
for i in range(2):
assert_array_equal(self.d[i:] > 0, self.ed[i:])
assert_array_equal(self.d[i:] - 1 >= 0, self.ed[i:])
assert_array_equal(self.d[i:] == 0, ~self.ed[i:])
assert_array_equal(-self.d[i:] < 0, self.ed[i:])
assert_array_equal(-self.d[i:] + 1 <= 0, self.ed[i:])
r = self.d[i:] != 0
assert_array_equal(r, self.ed[i:])
r2 = self.d[i:] != np.zeros_like(self.d[i:])
r3 = 0 != self.d[i:]
assert_array_equal(r, r2)
assert_array_equal(r, r3)
# check bool == 0x1
assert_array_equal(r.view(np.int8), r.astype(np.int8))
assert_array_equal(r2.view(np.int8), r2.astype(np.int8))
assert_array_equal(r3.view(np.int8), r3.astype(np.int8))
# isnan on amd64 takes the same code path
assert_array_equal(np.isnan(self.nd[i:]), self.ed[i:])
assert_array_equal(np.isfinite(self.nd[i:]), ~self.ed[i:])
assert_array_equal(np.isfinite(self.infd[i:]), ~self.ed[i:])
assert_array_equal(np.isinf(self.infd[i:]), self.ednonan[i:])
assert_array_equal(np.signbit(self.signd[i:]), self.ed[i:])
class TestSeterr:
def test_default(self):
err = np.geterr()
assert_equal(err,
dict(divide='warn',
invalid='warn',
over='warn',
under='ignore')
)
def test_set(self):
with np.errstate():
err = np.seterr()
old = np.seterr(divide='print')
assert_(err == old)
new = np.seterr()
assert_(new['divide'] == 'print')
np.seterr(over='raise')
assert_(np.geterr()['over'] == 'raise')
assert_(new['divide'] == 'print')
np.seterr(**old)
assert_(np.geterr() == old)
@pytest.mark.skipif(platform.machine() == "armv5tel", reason="See gh-413.")
def test_divide_err(self):
with np.errstate(divide='raise'):
with assert_raises(FloatingPointError):
np.array([1.]) / np.array([0.])
np.seterr(divide='ignore')
np.array([1.]) / np.array([0.])
def test_errobj(self):
olderrobj = np.geterrobj()
self.called = 0
try:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
with np.errstate(divide='warn'):
np.seterrobj([20000, 1, None])
np.array([1.]) / np.array([0.])
assert_equal(len(w), 1)
def log_err(*args):
self.called += 1
extobj_err = args
assert_(len(extobj_err) == 2)
assert_("divide" in extobj_err[0])
with np.errstate(divide='ignore'):
np.seterrobj([20000, 3, log_err])
np.array([1.]) / np.array([0.])
assert_equal(self.called, 1)
np.seterrobj(olderrobj)
with np.errstate(divide='ignore'):
np.divide(1., 0., extobj=[20000, 3, log_err])
assert_equal(self.called, 2)
finally:
np.seterrobj(olderrobj)
del self.called
def test_errobj_noerrmask(self):
# errmask = 0 has a special code path for the default
olderrobj = np.geterrobj()
try:
# set errobj to something non default
np.seterrobj([umath.UFUNC_BUFSIZE_DEFAULT,
umath.ERR_DEFAULT + 1, None])
# call a ufunc
np.isnan(np.array([6]))
# same with the default, lots of times to get rid of possible
# pre-existing stack in the code
for i in range(10000):
np.seterrobj([umath.UFUNC_BUFSIZE_DEFAULT, umath.ERR_DEFAULT,
None])
np.isnan(np.array([6]))
finally:
np.seterrobj(olderrobj)
class TestFloatExceptions:
def assert_raises_fpe(self, fpeerr, flop, x, y):
ftype = type(x)
try:
flop(x, y)
assert_(False,
"Type %s did not raise fpe error '%s'." % (ftype, fpeerr))
except FloatingPointError as exc:
assert_(str(exc).find(fpeerr) >= 0,
"Type %s raised wrong fpe error '%s'." % (ftype, exc))
def assert_op_raises_fpe(self, fpeerr, flop, sc1, sc2):
# Check that fpe exception is raised.
#
# Given a floating operation `flop` and two scalar values, check that
# the operation raises the floating point exception specified by
# `fpeerr`. Tests all variants with 0-d array scalars as well.
self.assert_raises_fpe(fpeerr, flop, sc1, sc2)
self.assert_raises_fpe(fpeerr, flop, sc1[()], sc2)
self.assert_raises_fpe(fpeerr, flop, sc1, sc2[()])
self.assert_raises_fpe(fpeerr, flop, sc1[()], sc2[()])
# Test for all real and complex float types
@pytest.mark.parametrize("typecode", np.typecodes["AllFloat"])
def test_floating_exceptions(self, typecode):
# Test basic arithmetic function errors
with np.errstate(all='raise'):
ftype = np.obj2sctype(typecode)
if np.dtype(ftype).kind == 'f':
# Get some extreme values for the type
fi = np.finfo(ftype)
ft_tiny = fi._machar.tiny
ft_max = fi.max
ft_eps = fi.eps
underflow = 'underflow'
divbyzero = 'divide by zero'
else:
# 'c', complex, corresponding real dtype
rtype = type(ftype(0).real)
fi = np.finfo(rtype)
ft_tiny = ftype(fi._machar.tiny)
ft_max = ftype(fi.max)
ft_eps = ftype(fi.eps)
# The complex types raise different exceptions
underflow = ''
divbyzero = ''
overflow = 'overflow'
invalid = 'invalid'
# The value of tiny for double double is NaN, so we need to
# pass the assert
if not np.isnan(ft_tiny):
self.assert_raises_fpe(underflow,
lambda a, b: a/b, ft_tiny, ft_max)
self.assert_raises_fpe(underflow,
lambda a, b: a*b, ft_tiny, ft_tiny)
self.assert_raises_fpe(overflow,
lambda a, b: a*b, ft_max, ftype(2))
self.assert_raises_fpe(overflow,
lambda a, b: a/b, ft_max, ftype(0.5))
self.assert_raises_fpe(overflow,
lambda a, b: a+b, ft_max, ft_max*ft_eps)
self.assert_raises_fpe(overflow,
lambda a, b: a-b, -ft_max, ft_max*ft_eps)
self.assert_raises_fpe(overflow,
np.power, ftype(2), ftype(2**fi.nexp))
self.assert_raises_fpe(divbyzero,
lambda a, b: a/b, ftype(1), ftype(0))
self.assert_raises_fpe(
invalid, lambda a, b: a/b, ftype(np.inf), ftype(np.inf)
)
self.assert_raises_fpe(invalid,
lambda a, b: a/b, ftype(0), ftype(0))
self.assert_raises_fpe(
invalid, lambda a, b: a-b, ftype(np.inf), ftype(np.inf)
)
self.assert_raises_fpe(
invalid, lambda a, b: a+b, ftype(np.inf), ftype(-np.inf)
)
self.assert_raises_fpe(invalid,
lambda a, b: a*b, ftype(0), ftype(np.inf))
def test_warnings(self):
# test warning code path
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
with np.errstate(all="warn"):
np.divide(1, 0.)
assert_equal(len(w), 1)
assert_("divide by zero" in str(w[0].message))
np.array(1e300) * np.array(1e300)
assert_equal(len(w), 2)
assert_("overflow" in str(w[-1].message))
np.array(np.inf) - np.array(np.inf)
assert_equal(len(w), 3)
assert_("invalid value" in str(w[-1].message))
np.array(1e-300) * np.array(1e-300)
assert_equal(len(w), 4)
assert_("underflow" in str(w[-1].message))
class TestTypes:
def check_promotion_cases(self, promote_func):
# tests that the scalars get coerced correctly.
b = np.bool_(0)
i8, i16, i32, i64 = np.int8(0), np.int16(0), np.int32(0), np.int64(0)
u8, u16, u32, u64 = np.uint8(0), np.uint16(0), np.uint32(0), np.uint64(0)
f32, f64, fld = np.float32(0), np.float64(0), np.longdouble(0)
c64, c128, cld = np.complex64(0), np.complex128(0), np.clongdouble(0)
# coercion within the same kind
assert_equal(promote_func(i8, i16), np.dtype(np.int16))
assert_equal(promote_func(i32, i8), np.dtype(np.int32))
assert_equal(promote_func(i16, i64), np.dtype(np.int64))
assert_equal(promote_func(u8, u32), np.dtype(np.uint32))
assert_equal(promote_func(f32, f64), np.dtype(np.float64))
assert_equal(promote_func(fld, f32), np.dtype(np.longdouble))
assert_equal(promote_func(f64, fld), np.dtype(np.longdouble))
assert_equal(promote_func(c128, c64), np.dtype(np.complex128))
assert_equal(promote_func(cld, c128), np.dtype(np.clongdouble))
assert_equal(promote_func(c64, fld), np.dtype(np.clongdouble))
# coercion between kinds
assert_equal(promote_func(b, i32), np.dtype(np.int32))
assert_equal(promote_func(b, u8), np.dtype(np.uint8))
assert_equal(promote_func(i8, u8), np.dtype(np.int16))
assert_equal(promote_func(u8, i32), np.dtype(np.int32))
assert_equal(promote_func(i64, u32), np.dtype(np.int64))
assert_equal(promote_func(u64, i32), np.dtype(np.float64))
assert_equal(promote_func(i32, f32), np.dtype(np.float64))
assert_equal(promote_func(i64, f32), np.dtype(np.float64))
assert_equal(promote_func(f32, i16), np.dtype(np.float32))
assert_equal(promote_func(f32, u32), np.dtype(np.float64))
assert_equal(promote_func(f32, c64), np.dtype(np.complex64))
assert_equal(promote_func(c128, f32), np.dtype(np.complex128))
assert_equal(promote_func(cld, f64), np.dtype(np.clongdouble))
# coercion between scalars and 1-D arrays
assert_equal(promote_func(np.array([b]), i8), np.dtype(np.int8))
assert_equal(promote_func(np.array([b]), u8), np.dtype(np.uint8))
assert_equal(promote_func(np.array([b]), i32), np.dtype(np.int32))
assert_equal(promote_func(np.array([b]), u32), np.dtype(np.uint32))
assert_equal(promote_func(np.array([i8]), i64), np.dtype(np.int8))
assert_equal(promote_func(u64, np.array([i32])), np.dtype(np.int32))
assert_equal(promote_func(i64, np.array([u32])), np.dtype(np.uint32))
assert_equal(promote_func(np.int32(-1), np.array([u64])),
np.dtype(np.float64))
assert_equal(promote_func(f64, np.array([f32])), np.dtype(np.float32))
assert_equal(promote_func(fld, np.array([f32])), np.dtype(np.float32))
assert_equal(promote_func(np.array([f64]), fld), np.dtype(np.float64))
assert_equal(promote_func(fld, np.array([c64])),
np.dtype(np.complex64))
assert_equal(promote_func(c64, np.array([f64])),
np.dtype(np.complex128))
assert_equal(promote_func(np.complex64(3j), np.array([f64])),
np.dtype(np.complex128))
# coercion between scalars and 1-D arrays, where
# the scalar has greater kind than the array
assert_equal(promote_func(np.array([b]), f64), np.dtype(np.float64))
assert_equal(promote_func(np.array([b]), i64), np.dtype(np.int64))
assert_equal(promote_func(np.array([b]), u64), np.dtype(np.uint64))
assert_equal(promote_func(np.array([i8]), f64), np.dtype(np.float64))
assert_equal(promote_func(np.array([u16]), f64), np.dtype(np.float64))
# uint and int are treated as the same "kind" for
# the purposes of array-scalar promotion.
assert_equal(promote_func(np.array([u16]), i32), np.dtype(np.uint16))
# float and complex are treated as the same "kind" for
# the purposes of array-scalar promotion, so that you can do
# (0j + float32array) to get a complex64 array instead of
# a complex128 array.
assert_equal(promote_func(np.array([f32]), c128),
np.dtype(np.complex64))
def test_coercion(self):
def res_type(a, b):
return np.add(a, b).dtype
self.check_promotion_cases(res_type)
# Use-case: float/complex scalar * bool/int8 array
# shouldn't narrow the float/complex type
for a in [np.array([True, False]), np.array([-3, 12], dtype=np.int8)]:
b = 1.234 * a
assert_equal(b.dtype, np.dtype('f8'), "array type %s" % a.dtype)
b = np.longdouble(1.234) * a
assert_equal(b.dtype, np.dtype(np.longdouble),
"array type %s" % a.dtype)
b = np.float64(1.234) * a
assert_equal(b.dtype, np.dtype('f8'), "array type %s" % a.dtype)
b = np.float32(1.234) * a
assert_equal(b.dtype, np.dtype('f4'), "array type %s" % a.dtype)
b = np.float16(1.234) * a
assert_equal(b.dtype, np.dtype('f2'), "array type %s" % a.dtype)
b = 1.234j * a
assert_equal(b.dtype, np.dtype('c16'), "array type %s" % a.dtype)
b = np.clongdouble(1.234j) * a
assert_equal(b.dtype, np.dtype(np.clongdouble),
"array type %s" % a.dtype)
b = np.complex128(1.234j) * a
assert_equal(b.dtype, np.dtype('c16'), "array type %s" % a.dtype)
b = np.complex64(1.234j) * a
assert_equal(b.dtype, np.dtype('c8'), "array type %s" % a.dtype)
# The following use-case is problematic, and to resolve its
# tricky side-effects requires more changes.
#
# Use-case: (1-t)*a, where 't' is a boolean array and 'a' is
# a float32, shouldn't promote to float64
#
# a = np.array([1.0, 1.5], dtype=np.float32)
# t = np.array([True, False])
# b = t*a
# assert_equal(b, [1.0, 0.0])
# assert_equal(b.dtype, np.dtype('f4'))
# b = (1-t)*a
# assert_equal(b, [0.0, 1.5])
# assert_equal(b.dtype, np.dtype('f4'))
#
# Probably ~t (bitwise negation) is more proper to use here,
# but this is arguably less intuitive to understand at a glance, and
# would fail if 't' is actually an integer array instead of boolean:
#
# b = (~t)*a
# assert_equal(b, [0.0, 1.5])
# assert_equal(b.dtype, np.dtype('f4'))
def test_result_type(self):
self.check_promotion_cases(np.result_type)
assert_(np.result_type(None) == np.dtype(None))
def test_promote_types_endian(self):
# promote_types should always return native-endian types
assert_equal(np.promote_types('<i8', '<i8'), np.dtype('i8'))
assert_equal(np.promote_types('>i8', '>i8'), np.dtype('i8'))
assert_equal(np.promote_types('>i8', '>U16'), np.dtype('U21'))
assert_equal(np.promote_types('<i8', '<U16'), np.dtype('U21'))
assert_equal(np.promote_types('>U16', '>i8'), np.dtype('U21'))
assert_equal(np.promote_types('<U16', '<i8'), np.dtype('U21'))
assert_equal(np.promote_types('<S5', '<U8'), np.dtype('U8'))
assert_equal(np.promote_types('>S5', '>U8'), np.dtype('U8'))
assert_equal(np.promote_types('<U8', '<S5'), np.dtype('U8'))
assert_equal(np.promote_types('>U8', '>S5'), np.dtype('U8'))
assert_equal(np.promote_types('<U5', '<U8'), np.dtype('U8'))
assert_equal(np.promote_types('>U8', '>U5'), np.dtype('U8'))
assert_equal(np.promote_types('<M8', '<M8'), np.dtype('M8'))
assert_equal(np.promote_types('>M8', '>M8'), np.dtype('M8'))
assert_equal(np.promote_types('<m8', '<m8'), np.dtype('m8'))
assert_equal(np.promote_types('>m8', '>m8'), np.dtype('m8'))
def test_can_cast_and_promote_usertypes(self):
# The rational type defines safe casting for signed integers,
# boolean. Rational itself *does* cast safely to double.
# (rational does not actually cast to all signed integers, e.g.
# int64 can be both long and longlong and it registers only the first)
valid_types = ["int8", "int16", "int32", "int64", "bool"]
invalid_types = "BHILQP" + "FDG" + "mM" + "f" + "V"
rational_dt = np.dtype(rational)
for numpy_dtype in valid_types:
numpy_dtype = np.dtype(numpy_dtype)
assert np.can_cast(numpy_dtype, rational_dt)
assert np.promote_types(numpy_dtype, rational_dt) is rational_dt
for numpy_dtype in invalid_types:
numpy_dtype = np.dtype(numpy_dtype)
assert not np.can_cast(numpy_dtype, rational_dt)
with pytest.raises(TypeError):
np.promote_types(numpy_dtype, rational_dt)
double_dt = np.dtype("double")
assert np.can_cast(rational_dt, double_dt)
assert np.promote_types(double_dt, rational_dt) is double_dt
@pytest.mark.parametrize("swap", ["", "swap"])
@pytest.mark.parametrize("string_dtype", ["U", "S"])
def test_promote_types_strings(self, swap, string_dtype):
if swap == "swap":
promote_types = lambda a, b: np.promote_types(b, a)
else:
promote_types = np.promote_types
S = string_dtype
# Promote numeric with unsized string:
assert_equal(promote_types('bool', S), np.dtype(S+'5'))
assert_equal(promote_types('b', S), np.dtype(S+'4'))
assert_equal(promote_types('u1', S), np.dtype(S+'3'))
assert_equal(promote_types('u2', S), np.dtype(S+'5'))
assert_equal(promote_types('u4', S), np.dtype(S+'10'))
assert_equal(promote_types('u8', S), np.dtype(S+'20'))
assert_equal(promote_types('i1', S), np.dtype(S+'4'))
assert_equal(promote_types('i2', S), np.dtype(S+'6'))
assert_equal(promote_types('i4', S), np.dtype(S+'11'))
assert_equal(promote_types('i8', S), np.dtype(S+'21'))
# Promote numeric with sized string:
assert_equal(promote_types('bool', S+'1'), np.dtype(S+'5'))
assert_equal(promote_types('bool', S+'30'), np.dtype(S+'30'))
assert_equal(promote_types('b', S+'1'), np.dtype(S+'4'))
assert_equal(promote_types('b', S+'30'), np.dtype(S+'30'))
assert_equal(promote_types('u1', S+'1'), np.dtype(S+'3'))
assert_equal(promote_types('u1', S+'30'), np.dtype(S+'30'))
assert_equal(promote_types('u2', S+'1'), np.dtype(S+'5'))
assert_equal(promote_types('u2', S+'30'), np.dtype(S+'30'))
assert_equal(promote_types('u4', S+'1'), np.dtype(S+'10'))
assert_equal(promote_types('u4', S+'30'), np.dtype(S+'30'))
assert_equal(promote_types('u8', S+'1'), np.dtype(S+'20'))
assert_equal(promote_types('u8', S+'30'), np.dtype(S+'30'))
# Promote with object:
assert_equal(promote_types('O', S+'30'), np.dtype('O'))
@pytest.mark.parametrize(["dtype1", "dtype2"],
[[np.dtype("V6"), np.dtype("V10")], # mismatch shape
# Mismatching names:
[np.dtype([("name1", "i8")]), np.dtype([("name2", "i8")])],
])
def test_invalid_void_promotion(self, dtype1, dtype2):
with pytest.raises(TypeError):
np.promote_types(dtype1, dtype2)
@pytest.mark.parametrize(["dtype1", "dtype2"],
[[np.dtype("V10"), np.dtype("V10")],
[np.dtype([("name1", "i8")]),
np.dtype([("name1", np.dtype("i8").newbyteorder())])],
[np.dtype("i8,i8"), np.dtype("i8,>i8")],
[np.dtype("i8,i8"), np.dtype("i4,i4")],
])
def test_valid_void_promotion(self, dtype1, dtype2):
assert np.promote_types(dtype1, dtype2) == dtype1
@pytest.mark.parametrize("dtype",
list(np.typecodes["All"]) +
["i,i", "10i", "S3", "S100", "U3", "U100", rational])
def test_promote_identical_types_metadata(self, dtype):
# The same type passed in twice to promote types always
# preserves metadata
metadata = {1: 1}
dtype = np.dtype(dtype, metadata=metadata)
res = np.promote_types(dtype, dtype)
assert res.metadata == dtype.metadata
# byte-swapping preserves and makes the dtype native:
dtype = dtype.newbyteorder()
if dtype.isnative:
# The type does not have byte swapping
return
res = np.promote_types(dtype, dtype)
# Metadata is (currently) generally lost on byte-swapping (except for
# unicode.
if dtype.char != "U":
assert res.metadata is None
else:
assert res.metadata == metadata
assert res.isnative
@pytest.mark.slow
@pytest.mark.filterwarnings('ignore:Promotion of numbers:FutureWarning')
@pytest.mark.parametrize(["dtype1", "dtype2"],
itertools.product(
list(np.typecodes["All"]) +
["i,i", "S3", "S100", "U3", "U100", rational],
repeat=2))
def test_promote_types_metadata(self, dtype1, dtype2):
"""Metadata handling in promotion does not appear formalized
right now in NumPy. This test should thus be considered to
document behaviour, rather than test the correct definition of it.
This test is very ugly, it was useful for rewriting part of the
promotion, but probably should eventually be replaced/deleted
(i.e. when metadata handling in promotion is better defined).
"""
metadata1 = {1: 1}
metadata2 = {2: 2}
dtype1 = np.dtype(dtype1, metadata=metadata1)
dtype2 = np.dtype(dtype2, metadata=metadata2)
try:
res = np.promote_types(dtype1, dtype2)
except TypeError:
# Promotion failed, this test only checks metadata
return
if res.char not in "USV" or res.names is not None or res.shape != ():
# All except string dtypes (and unstructured void) lose metadata
# on promotion (unless both dtypes are identical).
# At some point structured ones did not, but were restrictive.
assert res.metadata is None
elif res == dtype1:
# If one result is the result, it is usually returned unchanged:
assert res is dtype1
elif res == dtype2:
# dtype1 may have been cast to the same type/kind as dtype2.
# If the resulting dtype is identical we currently pick the cast
# version of dtype1, which lost the metadata:
if np.promote_types(dtype1, dtype2.kind) == dtype2:
res.metadata is None
else:
res.metadata == metadata2
else:
assert res.metadata is None
# Try again for byteswapped version
dtype1 = dtype1.newbyteorder()
assert dtype1.metadata == metadata1
res_bs = np.promote_types(dtype1, dtype2)
assert res_bs == res
assert res_bs.metadata == res.metadata
def test_can_cast(self):
assert_(np.can_cast(np.int32, np.int64))
assert_(np.can_cast(np.float64, complex))
assert_(not np.can_cast(complex, float))
assert_(np.can_cast('i8', 'f8'))
assert_(not np.can_cast('i8', 'f4'))
assert_(np.can_cast('i4', 'S11'))
assert_(np.can_cast('i8', 'i8', 'no'))
assert_(not np.can_cast('<i8', '>i8', 'no'))
assert_(np.can_cast('<i8', '>i8', 'equiv'))
assert_(not np.can_cast('<i4', '>i8', 'equiv'))
assert_(np.can_cast('<i4', '>i8', 'safe'))
assert_(not np.can_cast('<i8', '>i4', 'safe'))
assert_(np.can_cast('<i8', '>i4', 'same_kind'))
assert_(not np.can_cast('<i8', '>u4', 'same_kind'))
assert_(np.can_cast('<i8', '>u4', 'unsafe'))
assert_(np.can_cast('bool', 'S5'))
assert_(not np.can_cast('bool', 'S4'))
assert_(np.can_cast('b', 'S4'))
assert_(not np.can_cast('b', 'S3'))
assert_(np.can_cast('u1', 'S3'))
assert_(not np.can_cast('u1', 'S2'))
assert_(np.can_cast('u2', 'S5'))
assert_(not np.can_cast('u2', 'S4'))
assert_(np.can_cast('u4', 'S10'))
assert_(not np.can_cast('u4', 'S9'))
assert_(np.can_cast('u8', 'S20'))
assert_(not np.can_cast('u8', 'S19'))
assert_(np.can_cast('i1', 'S4'))
assert_(not np.can_cast('i1', 'S3'))
assert_(np.can_cast('i2', 'S6'))
assert_(not np.can_cast('i2', 'S5'))
assert_(np.can_cast('i4', 'S11'))
assert_(not np.can_cast('i4', 'S10'))
assert_(np.can_cast('i8', 'S21'))
assert_(not np.can_cast('i8', 'S20'))
assert_(np.can_cast('bool', 'S5'))
assert_(not np.can_cast('bool', 'S4'))
assert_(np.can_cast('b', 'U4'))
assert_(not np.can_cast('b', 'U3'))
assert_(np.can_cast('u1', 'U3'))
assert_(not np.can_cast('u1', 'U2'))
assert_(np.can_cast('u2', 'U5'))
assert_(not np.can_cast('u2', 'U4'))
assert_(np.can_cast('u4', 'U10'))
assert_(not np.can_cast('u4', 'U9'))
assert_(np.can_cast('u8', 'U20'))
assert_(not np.can_cast('u8', 'U19'))
assert_(np.can_cast('i1', 'U4'))
assert_(not np.can_cast('i1', 'U3'))
assert_(np.can_cast('i2', 'U6'))
assert_(not np.can_cast('i2', 'U5'))
assert_(np.can_cast('i4', 'U11'))
assert_(not np.can_cast('i4', 'U10'))
assert_(np.can_cast('i8', 'U21'))
assert_(not np.can_cast('i8', 'U20'))
assert_raises(TypeError, np.can_cast, 'i4', None)
assert_raises(TypeError, np.can_cast, None, 'i4')
# Also test keyword arguments
assert_(np.can_cast(from_=np.int32, to=np.int64))
def test_can_cast_simple_to_structured(self):
# Non-structured can only be cast to structured in 'unsafe' mode.
assert_(not np.can_cast('i4', 'i4,i4'))
assert_(not np.can_cast('i4', 'i4,i2'))
assert_(np.can_cast('i4', 'i4,i4', casting='unsafe'))
assert_(np.can_cast('i4', 'i4,i2', casting='unsafe'))
# Even if there is just a single field which is OK.
assert_(not np.can_cast('i2', [('f1', 'i4')]))
assert_(not np.can_cast('i2', [('f1', 'i4')], casting='same_kind'))
assert_(np.can_cast('i2', [('f1', 'i4')], casting='unsafe'))
# It should be the same for recursive structured or subarrays.
assert_(not np.can_cast('i2', [('f1', 'i4,i4')]))
assert_(np.can_cast('i2', [('f1', 'i4,i4')], casting='unsafe'))
assert_(not np.can_cast('i2', [('f1', '(2,3)i4')]))
assert_(np.can_cast('i2', [('f1', '(2,3)i4')], casting='unsafe'))
def test_can_cast_structured_to_simple(self):
# Need unsafe casting for structured to simple.
assert_(not np.can_cast([('f1', 'i4')], 'i4'))
assert_(np.can_cast([('f1', 'i4')], 'i4', casting='unsafe'))
assert_(np.can_cast([('f1', 'i4')], 'i2', casting='unsafe'))
# Since it is unclear what is being cast, multiple fields to
# single should not work even for unsafe casting.
assert_(not np.can_cast('i4,i4', 'i4', casting='unsafe'))
# But a single field inside a single field is OK.
assert_(not np.can_cast([('f1', [('x', 'i4')])], 'i4'))
assert_(np.can_cast([('f1', [('x', 'i4')])], 'i4', casting='unsafe'))
# And a subarray is fine too - it will just take the first element
# (arguably not very consistently; might also take the first field).
assert_(not np.can_cast([('f0', '(3,)i4')], 'i4'))
assert_(np.can_cast([('f0', '(3,)i4')], 'i4', casting='unsafe'))
# But a structured subarray with multiple fields should fail.
assert_(not np.can_cast([('f0', ('i4,i4'), (2,))], 'i4',
casting='unsafe'))
def test_can_cast_values(self):
# gh-5917
for dt in np.sctypes['int'] + np.sctypes['uint']:
ii = np.iinfo(dt)
assert_(np.can_cast(ii.min, dt))
assert_(np.can_cast(ii.max, dt))
assert_(not np.can_cast(ii.min - 1, dt))
assert_(not np.can_cast(ii.max + 1, dt))
for dt in np.sctypes['float']:
fi = np.finfo(dt)
assert_(np.can_cast(fi.min, dt))
assert_(np.can_cast(fi.max, dt))
# Custom exception class to test exception propagation in fromiter
class NIterError(Exception):
pass
class TestFromiter:
def makegen(self):
return (x**2 for x in range(24))
def test_types(self):
ai32 = np.fromiter(self.makegen(), np.int32)
ai64 = np.fromiter(self.makegen(), np.int64)
af = np.fromiter(self.makegen(), float)
assert_(ai32.dtype == np.dtype(np.int32))
assert_(ai64.dtype == np.dtype(np.int64))
assert_(af.dtype == np.dtype(float))
def test_lengths(self):
expected = np.array(list(self.makegen()))
a = np.fromiter(self.makegen(), int)
a20 = np.fromiter(self.makegen(), int, 20)
assert_(len(a) == len(expected))
assert_(len(a20) == 20)
assert_raises(ValueError, np.fromiter,
self.makegen(), int, len(expected) + 10)
def test_values(self):
expected = np.array(list(self.makegen()))
a = np.fromiter(self.makegen(), int)
a20 = np.fromiter(self.makegen(), int, 20)
assert_(np.alltrue(a == expected, axis=0))
assert_(np.alltrue(a20 == expected[:20], axis=0))
def load_data(self, n, eindex):
# Utility method for the issue 2592 tests.
# Raise an exception at the desired index in the iterator.
for e in range(n):
if e == eindex:
raise NIterError('error at index %s' % eindex)
yield e
@pytest.mark.parametrize("dtype", [int, object])
@pytest.mark.parametrize(["count", "error_index"], [(10, 5), (10, 9)])
def test_2592(self, count, error_index, dtype):
# Test iteration exceptions are correctly raised. The data/generator
# has `count` elements but errors at `error_index`
iterable = self.load_data(count, error_index)
with pytest.raises(NIterError):
np.fromiter(iterable, dtype=dtype, count=count)
@pytest.mark.parametrize("dtype", ["S", "S0", "V0", "U0"])
def test_empty_not_structured(self, dtype):
# Note, "S0" could be allowed at some point, so long "S" (without
# any length) is rejected.
with pytest.raises(ValueError, match="Must specify length"):
np.fromiter([], dtype=dtype)
@pytest.mark.parametrize(["dtype", "data"],
[("d", [1, 2, 3, 4, 5, 6, 7, 8, 9]),
("O", [1, 2, 3, 4, 5, 6, 7, 8, 9]),
("i,O", [(1, 2), (5, 4), (2, 3), (9, 8), (6, 7)]),
# subarray dtypes (important because their dimensions end up
# in the result arrays dimension:
("2i", [(1, 2), (5, 4), (2, 3), (9, 8), (6, 7)]),
(np.dtype(("O", (2, 3))),
[((1, 2, 3), (3, 4, 5)), ((3, 2, 1), (5, 4, 3))])])
@pytest.mark.parametrize("length_hint", [0, 1])
def test_growth_and_complicated_dtypes(self, dtype, data, length_hint):
dtype = np.dtype(dtype)
data = data * 100 # make sure we realloc a bit
class MyIter:
# Class/example from gh-15789
def __length_hint__(self):
# only required to be an estimate, this is legal
return length_hint # 0 or 1
def __iter__(self):
return iter(data)
res = np.fromiter(MyIter(), dtype=dtype)
expected = np.array(data, dtype=dtype)
assert_array_equal(res, expected)
def test_empty_result(self):
class MyIter:
def __length_hint__(self):
return 10
def __iter__(self):
return iter([]) # actual iterator is empty.
res = np.fromiter(MyIter(), dtype="d")
assert res.shape == (0,)
assert res.dtype == "d"
def test_too_few_items(self):
msg = "iterator too short: Expected 10 but iterator had only 3 items."
with pytest.raises(ValueError, match=msg):
np.fromiter([1, 2, 3], count=10, dtype=int)
def test_failed_itemsetting(self):
with pytest.raises(TypeError):
np.fromiter([1, None, 3], dtype=int)
# The following manages to hit somewhat trickier code paths:
iterable = ((2, 3, 4) for i in range(5))
with pytest.raises(ValueError):
np.fromiter(iterable, dtype=np.dtype((int, 2)))
class TestNonzero:
def test_nonzero_trivial(self):
assert_equal(np.count_nonzero(np.array([])), 0)
assert_equal(np.count_nonzero(np.array([], dtype='?')), 0)
assert_equal(np.nonzero(np.array([])), ([],))
assert_equal(np.count_nonzero(np.array([0])), 0)
assert_equal(np.count_nonzero(np.array([0], dtype='?')), 0)
assert_equal(np.nonzero(np.array([0])), ([],))
assert_equal(np.count_nonzero(np.array([1])), 1)
assert_equal(np.count_nonzero(np.array([1], dtype='?')), 1)
assert_equal(np.nonzero(np.array([1])), ([0],))
def test_nonzero_zerod(self):
assert_equal(np.count_nonzero(np.array(0)), 0)
assert_equal(np.count_nonzero(np.array(0, dtype='?')), 0)
with assert_warns(DeprecationWarning):
assert_equal(np.nonzero(np.array(0)), ([],))
assert_equal(np.count_nonzero(np.array(1)), 1)
assert_equal(np.count_nonzero(np.array(1, dtype='?')), 1)
with assert_warns(DeprecationWarning):
assert_equal(np.nonzero(np.array(1)), ([0],))
def test_nonzero_onedim(self):
x = np.array([1, 0, 2, -1, 0, 0, 8])
assert_equal(np.count_nonzero(x), 4)
assert_equal(np.count_nonzero(x), 4)
assert_equal(np.nonzero(x), ([0, 2, 3, 6],))
# x = np.array([(1, 2), (0, 0), (1, 1), (-1, 3), (0, 7)],
# dtype=[('a', 'i4'), ('b', 'i2')])
x = np.array([(1, 2, -5, -3), (0, 0, 2, 7), (1, 1, 0, 1), (-1, 3, 1, 0), (0, 7, 0, 4)],
dtype=[('a', 'i4'), ('b', 'i2'), ('c', 'i1'), ('d', 'i8')])
assert_equal(np.count_nonzero(x['a']), 3)
assert_equal(np.count_nonzero(x['b']), 4)
assert_equal(np.count_nonzero(x['c']), 3)
assert_equal(np.count_nonzero(x['d']), 4)
assert_equal(np.nonzero(x['a']), ([0, 2, 3],))
assert_equal(np.nonzero(x['b']), ([0, 2, 3, 4],))
def test_nonzero_twodim(self):
x = np.array([[0, 1, 0], [2, 0, 3]])
assert_equal(np.count_nonzero(x.astype('i1')), 3)
assert_equal(np.count_nonzero(x.astype('i2')), 3)
assert_equal(np.count_nonzero(x.astype('i4')), 3)
assert_equal(np.count_nonzero(x.astype('i8')), 3)
assert_equal(np.nonzero(x), ([0, 1, 1], [1, 0, 2]))
x = np.eye(3)
assert_equal(np.count_nonzero(x.astype('i1')), 3)
assert_equal(np.count_nonzero(x.astype('i2')), 3)
assert_equal(np.count_nonzero(x.astype('i4')), 3)
assert_equal(np.count_nonzero(x.astype('i8')), 3)
assert_equal(np.nonzero(x), ([0, 1, 2], [0, 1, 2]))
x = np.array([[(0, 1), (0, 0), (1, 11)],
[(1, 1), (1, 0), (0, 0)],
[(0, 0), (1, 5), (0, 1)]], dtype=[('a', 'f4'), ('b', 'u1')])
assert_equal(np.count_nonzero(x['a']), 4)
assert_equal(np.count_nonzero(x['b']), 5)
assert_equal(np.nonzero(x['a']), ([0, 1, 1, 2], [2, 0, 1, 1]))
assert_equal(np.nonzero(x['b']), ([0, 0, 1, 2, 2], [0, 2, 0, 1, 2]))
assert_(not x['a'].T.flags.aligned)
assert_equal(np.count_nonzero(x['a'].T), 4)
assert_equal(np.count_nonzero(x['b'].T), 5)
assert_equal(np.nonzero(x['a'].T), ([0, 1, 1, 2], [1, 1, 2, 0]))
assert_equal(np.nonzero(x['b'].T), ([0, 0, 1, 2, 2], [0, 1, 2, 0, 2]))
def test_sparse(self):
# test special sparse condition boolean code path
for i in range(20):
c = np.zeros(200, dtype=bool)
c[i::20] = True
assert_equal(np.nonzero(c)[0], np.arange(i, 200 + i, 20))
c = np.zeros(400, dtype=bool)
c[10 + i:20 + i] = True
c[20 + i*2] = True
assert_equal(np.nonzero(c)[0],
np.concatenate((np.arange(10 + i, 20 + i), [20 + i*2])))
def test_return_type(self):
class C(np.ndarray):
pass
for view in (C, np.ndarray):
for nd in range(1, 4):
shape = tuple(range(2, 2+nd))
x = np.arange(np.prod(shape)).reshape(shape).view(view)
for nzx in (np.nonzero(x), x.nonzero()):
for nzx_i in nzx:
assert_(type(nzx_i) is np.ndarray)
assert_(nzx_i.flags.writeable)
def test_count_nonzero_axis(self):
# Basic check of functionality
m = np.array([[0, 1, 7, 0, 0], [3, 0, 0, 2, 19]])
expected = np.array([1, 1, 1, 1, 1])
assert_equal(np.count_nonzero(m, axis=0), expected)
expected = np.array([2, 3])
assert_equal(np.count_nonzero(m, axis=1), expected)
assert_raises(ValueError, np.count_nonzero, m, axis=(1, 1))
assert_raises(TypeError, np.count_nonzero, m, axis='foo')
assert_raises(np.AxisError, np.count_nonzero, m, axis=3)
assert_raises(TypeError, np.count_nonzero,
m, axis=np.array([[1], [2]]))
def test_count_nonzero_axis_all_dtypes(self):
# More thorough test that the axis argument is respected
# for all dtypes and responds correctly when presented with
# either integer or tuple arguments for axis
msg = "Mismatch for dtype: %s"
def assert_equal_w_dt(a, b, err_msg):
assert_equal(a.dtype, b.dtype, err_msg=err_msg)
assert_equal(a, b, err_msg=err_msg)
for dt in np.typecodes['All']:
err_msg = msg % (np.dtype(dt).name,)
if dt != 'V':
if dt != 'M':
m = np.zeros((3, 3), dtype=dt)
n = np.ones(1, dtype=dt)
m[0, 0] = n[0]
m[1, 0] = n[0]
else: # np.zeros doesn't work for np.datetime64
m = np.array(['1970-01-01'] * 9)
m = m.reshape((3, 3))
m[0, 0] = '1970-01-12'
m[1, 0] = '1970-01-12'
m = m.astype(dt)
expected = np.array([2, 0, 0], dtype=np.intp)
assert_equal_w_dt(np.count_nonzero(m, axis=0),
expected, err_msg=err_msg)
expected = np.array([1, 1, 0], dtype=np.intp)
assert_equal_w_dt(np.count_nonzero(m, axis=1),
expected, err_msg=err_msg)
expected = np.array(2)
assert_equal(np.count_nonzero(m, axis=(0, 1)),
expected, err_msg=err_msg)
assert_equal(np.count_nonzero(m, axis=None),
expected, err_msg=err_msg)
assert_equal(np.count_nonzero(m),
expected, err_msg=err_msg)
if dt == 'V':
# There are no 'nonzero' objects for np.void, so the testing
# setup is slightly different for this dtype
m = np.array([np.void(1)] * 6).reshape((2, 3))
expected = np.array([0, 0, 0], dtype=np.intp)
assert_equal_w_dt(np.count_nonzero(m, axis=0),
expected, err_msg=err_msg)
expected = np.array([0, 0], dtype=np.intp)
assert_equal_w_dt(np.count_nonzero(m, axis=1),
expected, err_msg=err_msg)
expected = np.array(0)
assert_equal(np.count_nonzero(m, axis=(0, 1)),
expected, err_msg=err_msg)
assert_equal(np.count_nonzero(m, axis=None),
expected, err_msg=err_msg)
assert_equal(np.count_nonzero(m),
expected, err_msg=err_msg)
def test_count_nonzero_axis_consistent(self):
# Check that the axis behaviour for valid axes in
# non-special cases is consistent (and therefore
# correct) by checking it against an integer array
# that is then casted to the generic object dtype
from itertools import combinations, permutations
axis = (0, 1, 2, 3)
size = (5, 5, 5, 5)
msg = "Mismatch for axis: %s"
rng = np.random.RandomState(1234)
m = rng.randint(-100, 100, size=size)
n = m.astype(object)
for length in range(len(axis)):
for combo in combinations(axis, length):
for perm in permutations(combo):
assert_equal(
np.count_nonzero(m, axis=perm),
np.count_nonzero(n, axis=perm),
err_msg=msg % (perm,))
def test_countnonzero_axis_empty(self):
a = np.array([[0, 0, 1], [1, 0, 1]])
assert_equal(np.count_nonzero(a, axis=()), a.astype(bool))
def test_countnonzero_keepdims(self):
a = np.array([[0, 0, 1, 0],
[0, 3, 5, 0],
[7, 9, 2, 0]])
assert_equal(np.count_nonzero(a, axis=0, keepdims=True),
[[1, 2, 3, 0]])
assert_equal(np.count_nonzero(a, axis=1, keepdims=True),
[[1], [2], [3]])
assert_equal(np.count_nonzero(a, keepdims=True),
[[6]])
def test_array_method(self):
# Tests that the array method
# call to nonzero works
m = np.array([[1, 0, 0], [4, 0, 6]])
tgt = [[0, 1, 1], [0, 0, 2]]
assert_equal(m.nonzero(), tgt)
def test_nonzero_invalid_object(self):
# gh-9295
a = np.array([np.array([1, 2]), 3], dtype=object)
assert_raises(ValueError, np.nonzero, a)
class BoolErrors:
def __bool__(self):
raise ValueError("Not allowed")
assert_raises(ValueError, np.nonzero, np.array([BoolErrors()]))
def test_nonzero_sideeffect_safety(self):
# gh-13631
class FalseThenTrue:
_val = False
def __bool__(self):
try:
return self._val
finally:
self._val = True
class TrueThenFalse:
_val = True
def __bool__(self):
try:
return self._val
finally:
self._val = False
# result grows on the second pass
a = np.array([True, FalseThenTrue()])
assert_raises(RuntimeError, np.nonzero, a)
a = np.array([[True], [FalseThenTrue()]])
assert_raises(RuntimeError, np.nonzero, a)
# result shrinks on the second pass
a = np.array([False, TrueThenFalse()])
assert_raises(RuntimeError, np.nonzero, a)
a = np.array([[False], [TrueThenFalse()]])
assert_raises(RuntimeError, np.nonzero, a)
def test_nonzero_sideffects_structured_void(self):
# Checks that structured void does not mutate alignment flag of
# original array.
arr = np.zeros(5, dtype="i1,i8,i8") # `ones` may short-circuit
assert arr.flags.aligned # structs are considered "aligned"
assert not arr["f2"].flags.aligned
# make sure that nonzero/count_nonzero do not flip the flag:
np.nonzero(arr)
assert arr.flags.aligned
np.count_nonzero(arr)
assert arr.flags.aligned
def test_nonzero_exception_safe(self):
# gh-13930
class ThrowsAfter:
def __init__(self, iters):
self.iters_left = iters
def __bool__(self):
if self.iters_left == 0:
raise ValueError("called `iters` times")
self.iters_left -= 1
return True
"""
Test that a ValueError is raised instead of a SystemError
If the __bool__ function is called after the error state is set,
Python (cpython) will raise a SystemError.
"""
# assert that an exception in first pass is handled correctly
a = np.array([ThrowsAfter(5)]*10)
assert_raises(ValueError, np.nonzero, a)
# raise exception in second pass for 1-dimensional loop
a = np.array([ThrowsAfter(15)]*10)
assert_raises(ValueError, np.nonzero, a)
# raise exception in second pass for n-dimensional loop
a = np.array([[ThrowsAfter(15)]]*10)
assert_raises(ValueError, np.nonzero, a)
def test_structured_threadsafety(self):
# Nonzero (and some other functions) should be threadsafe for
# structured datatypes, see gh-15387. This test can behave randomly.
from concurrent.futures import ThreadPoolExecutor
# Create a deeply nested dtype to make a failure more likely:
dt = np.dtype([("", "f8")])
dt = np.dtype([("", dt)])
dt = np.dtype([("", dt)] * 2)
# The array should be large enough to likely run into threading issues
arr = np.random.uniform(size=(5000, 4)).view(dt)[:, 0]
def func(arr):
arr.nonzero()
tpe = ThreadPoolExecutor(max_workers=8)
futures = [tpe.submit(func, arr) for _ in range(10)]
for f in futures:
f.result()
assert arr.dtype is dt
class TestIndex:
def test_boolean(self):
a = rand(3, 5, 8)
V = rand(5, 8)
g1 = randint(0, 5, size=15)
g2 = randint(0, 8, size=15)
V[g1, g2] = -V[g1, g2]
assert_((np.array([a[0][V > 0], a[1][V > 0], a[2][V > 0]]) == a[:, V > 0]).all())
def test_boolean_edgecase(self):
a = np.array([], dtype='int32')
b = np.array([], dtype='bool')
c = a[b]
assert_equal(c, [])
assert_equal(c.dtype, np.dtype('int32'))
class TestBinaryRepr:
def test_zero(self):
assert_equal(np.binary_repr(0), '0')
def test_positive(self):
assert_equal(np.binary_repr(10), '1010')
assert_equal(np.binary_repr(12522),
'11000011101010')
assert_equal(np.binary_repr(10736848),
'101000111101010011010000')
def test_negative(self):
assert_equal(np.binary_repr(-1), '-1')
assert_equal(np.binary_repr(-10), '-1010')
assert_equal(np.binary_repr(-12522),
'-11000011101010')
assert_equal(np.binary_repr(-10736848),
'-101000111101010011010000')
def test_sufficient_width(self):
assert_equal(np.binary_repr(0, width=5), '00000')
assert_equal(np.binary_repr(10, width=7), '0001010')
assert_equal(np.binary_repr(-5, width=7), '1111011')
def test_neg_width_boundaries(self):
# see gh-8670
# Ensure that the example in the issue does not
# break before proceeding to a more thorough test.
assert_equal(np.binary_repr(-128, width=8), '10000000')
for width in range(1, 11):
num = -2**(width - 1)
exp = '1' + (width - 1) * '0'
assert_equal(np.binary_repr(num, width=width), exp)
def test_large_neg_int64(self):
# See gh-14289.
assert_equal(np.binary_repr(np.int64(-2**62), width=64),
'11' + '0'*62)
class TestBaseRepr:
def test_base3(self):
assert_equal(np.base_repr(3**5, 3), '100000')
def test_positive(self):
assert_equal(np.base_repr(12, 10), '12')
assert_equal(np.base_repr(12, 10, 4), '000012')
assert_equal(np.base_repr(12, 4), '30')
assert_equal(np.base_repr(3731624803700888, 36), '10QR0ROFCEW')
def test_negative(self):
assert_equal(np.base_repr(-12, 10), '-12')
assert_equal(np.base_repr(-12, 10, 4), '-000012')
assert_equal(np.base_repr(-12, 4), '-30')
def test_base_range(self):
with assert_raises(ValueError):
np.base_repr(1, 1)
with assert_raises(ValueError):
np.base_repr(1, 37)
class TestArrayComparisons:
def test_array_equal(self):
res = np.array_equal(np.array([1, 2]), np.array([1, 2]))
assert_(res)
assert_(type(res) is bool)
res = np.array_equal(np.array([1, 2]), np.array([1, 2, 3]))
assert_(not res)
assert_(type(res) is bool)
res = np.array_equal(np.array([1, 2]), np.array([3, 4]))
assert_(not res)
assert_(type(res) is bool)
res = np.array_equal(np.array([1, 2]), np.array([1, 3]))
assert_(not res)
assert_(type(res) is bool)
res = np.array_equal(np.array(['a'], dtype='S1'), np.array(['a'], dtype='S1'))
assert_(res)
assert_(type(res) is bool)
res = np.array_equal(np.array([('a', 1)], dtype='S1,u4'),
np.array([('a', 1)], dtype='S1,u4'))
assert_(res)
assert_(type(res) is bool)
def test_array_equal_equal_nan(self):
# Test array_equal with equal_nan kwarg
a1 = np.array([1, 2, np.nan])
a2 = np.array([1, np.nan, 2])
a3 = np.array([1, 2, np.inf])
# equal_nan=False by default
assert_(not np.array_equal(a1, a1))
assert_(np.array_equal(a1, a1, equal_nan=True))
assert_(not np.array_equal(a1, a2, equal_nan=True))
# nan's not conflated with inf's
assert_(not np.array_equal(a1, a3, equal_nan=True))
# 0-D arrays
a = np.array(np.nan)
assert_(not np.array_equal(a, a))
assert_(np.array_equal(a, a, equal_nan=True))
# Non-float dtype - equal_nan should have no effect
a = np.array([1, 2, 3], dtype=int)
assert_(np.array_equal(a, a))
assert_(np.array_equal(a, a, equal_nan=True))
# Multi-dimensional array
a = np.array([[0, 1], [np.nan, 1]])
assert_(not np.array_equal(a, a))
assert_(np.array_equal(a, a, equal_nan=True))
# Complex values
a, b = [np.array([1 + 1j])]*2
a.real, b.imag = np.nan, np.nan
assert_(not np.array_equal(a, b, equal_nan=False))
assert_(np.array_equal(a, b, equal_nan=True))
def test_none_compares_elementwise(self):
a = np.array([None, 1, None], dtype=object)
assert_equal(a == None, [True, False, True])
assert_equal(a != None, [False, True, False])
a = np.ones(3)
assert_equal(a == None, [False, False, False])
assert_equal(a != None, [True, True, True])
def test_array_equiv(self):
res = np.array_equiv(np.array([1, 2]), np.array([1, 2]))
assert_(res)
assert_(type(res) is bool)
res = np.array_equiv(np.array([1, 2]), np.array([1, 2, 3]))
assert_(not res)
assert_(type(res) is bool)
res = np.array_equiv(np.array([1, 2]), np.array([3, 4]))
assert_(not res)
assert_(type(res) is bool)
res = np.array_equiv(np.array([1, 2]), np.array([1, 3]))
assert_(not res)
assert_(type(res) is bool)
res = np.array_equiv(np.array([1, 1]), np.array([1]))
assert_(res)
assert_(type(res) is bool)
res = np.array_equiv(np.array([1, 1]), np.array([[1], [1]]))
assert_(res)
assert_(type(res) is bool)
res = np.array_equiv(np.array([1, 2]), np.array([2]))
assert_(not res)
assert_(type(res) is bool)
res = np.array_equiv(np.array([1, 2]), np.array([[1], [2]]))
assert_(not res)
assert_(type(res) is bool)
res = np.array_equiv(np.array([1, 2]), np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]))
assert_(not res)
assert_(type(res) is bool)
@pytest.mark.parametrize("dtype", ["V0", "V3", "V10"])
def test_compare_unstructured_voids(self, dtype):
zeros = np.zeros(3, dtype=dtype)
assert_array_equal(zeros, zeros)
assert not (zeros != zeros).any()
if dtype == "V0":
# Can't test != of actually different data
return
nonzeros = np.array([b"1", b"2", b"3"], dtype=dtype)
assert not (zeros == nonzeros).any()
assert (zeros != nonzeros).all()
def assert_array_strict_equal(x, y):
assert_array_equal(x, y)
# Check flags, 32 bit arches typically don't provide 16 byte alignment
if ((x.dtype.alignment <= 8 or
np.intp().dtype.itemsize != 4) and
sys.platform != 'win32'):
assert_(x.flags == y.flags)
else:
assert_(x.flags.owndata == y.flags.owndata)
assert_(x.flags.writeable == y.flags.writeable)
assert_(x.flags.c_contiguous == y.flags.c_contiguous)
assert_(x.flags.f_contiguous == y.flags.f_contiguous)
assert_(x.flags.writebackifcopy == y.flags.writebackifcopy)
# check endianness
assert_(x.dtype.isnative == y.dtype.isnative)
class TestClip:
def setup_method(self):
self.nr = 5
self.nc = 3
def fastclip(self, a, m, M, out=None, casting=None):
if out is None:
if casting is None:
return a.clip(m, M)
else:
return a.clip(m, M, casting=casting)
else:
if casting is None:
return a.clip(m, M, out)
else:
return a.clip(m, M, out, casting=casting)
def clip(self, a, m, M, out=None):
# use slow-clip
selector = np.less(a, m) + 2*np.greater(a, M)
return selector.choose((a, m, M), out=out)
# Handy functions
def _generate_data(self, n, m):
return randn(n, m)
def _generate_data_complex(self, n, m):
return randn(n, m) + 1.j * rand(n, m)
def _generate_flt_data(self, n, m):
return (randn(n, m)).astype(np.float32)
def _neg_byteorder(self, a):
a = np.asarray(a)
if sys.byteorder == 'little':
a = a.astype(a.dtype.newbyteorder('>'))
else:
a = a.astype(a.dtype.newbyteorder('<'))
return a
def _generate_non_native_data(self, n, m):
data = randn(n, m)
data = self._neg_byteorder(data)
assert_(not data.dtype.isnative)
return data
def _generate_int_data(self, n, m):
return (10 * rand(n, m)).astype(np.int64)
def _generate_int32_data(self, n, m):
return (10 * rand(n, m)).astype(np.int32)
# Now the real test cases
@pytest.mark.parametrize("dtype", '?bhilqpBHILQPefdgFDGO')
def test_ones_pathological(self, dtype):
# for preservation of behavior described in
# gh-12519; amin > amax behavior may still change
# in the future
arr = np.ones(10, dtype=dtype)
expected = np.zeros(10, dtype=dtype)
actual = np.clip(arr, 1, 0)
if dtype == 'O':
assert actual.tolist() == expected.tolist()
else:
assert_equal(actual, expected)
def test_simple_double(self):
# Test native double input with scalar min/max.
a = self._generate_data(self.nr, self.nc)
m = 0.1
M = 0.6
ac = self.fastclip(a, m, M)
act = self.clip(a, m, M)
assert_array_strict_equal(ac, act)
def test_simple_int(self):
# Test native int input with scalar min/max.
a = self._generate_int_data(self.nr, self.nc)
a = a.astype(int)
m = -2
M = 4
ac = self.fastclip(a, m, M)
act = self.clip(a, m, M)
assert_array_strict_equal(ac, act)
def test_array_double(self):
# Test native double input with array min/max.
a = self._generate_data(self.nr, self.nc)
m = np.zeros(a.shape)
M = m + 0.5
ac = self.fastclip(a, m, M)
act = self.clip(a, m, M)
assert_array_strict_equal(ac, act)
def test_simple_nonnative(self):
# Test non native double input with scalar min/max.
# Test native double input with non native double scalar min/max.
a = self._generate_non_native_data(self.nr, self.nc)
m = -0.5
M = 0.6
ac = self.fastclip(a, m, M)
act = self.clip(a, m, M)
assert_array_equal(ac, act)
# Test native double input with non native double scalar min/max.
a = self._generate_data(self.nr, self.nc)
m = -0.5
M = self._neg_byteorder(0.6)
assert_(not M.dtype.isnative)
ac = self.fastclip(a, m, M)
act = self.clip(a, m, M)
assert_array_equal(ac, act)
def test_simple_complex(self):
# Test native complex input with native double scalar min/max.
# Test native input with complex double scalar min/max.
a = 3 * self._generate_data_complex(self.nr, self.nc)
m = -0.5
M = 1.
ac = self.fastclip(a, m, M)
act = self.clip(a, m, M)
assert_array_strict_equal(ac, act)
# Test native input with complex double scalar min/max.
a = 3 * self._generate_data(self.nr, self.nc)
m = -0.5 + 1.j
M = 1. + 2.j
ac = self.fastclip(a, m, M)
act = self.clip(a, m, M)
assert_array_strict_equal(ac, act)
def test_clip_complex(self):
# Address Issue gh-5354 for clipping complex arrays
# Test native complex input without explicit min/max
# ie, either min=None or max=None
a = np.ones(10, dtype=complex)
m = a.min()
M = a.max()
am = self.fastclip(a, m, None)
aM = self.fastclip(a, None, M)
assert_array_strict_equal(am, a)
assert_array_strict_equal(aM, a)
def test_clip_non_contig(self):
# Test clip for non contiguous native input and native scalar min/max.
a = self._generate_data(self.nr * 2, self.nc * 3)
a = a[::2, ::3]
assert_(not a.flags['F_CONTIGUOUS'])
assert_(not a.flags['C_CONTIGUOUS'])
ac = self.fastclip(a, -1.6, 1.7)
act = self.clip(a, -1.6, 1.7)
assert_array_strict_equal(ac, act)
def test_simple_out(self):
# Test native double input with scalar min/max.
a = self._generate_data(self.nr, self.nc)
m = -0.5
M = 0.6
ac = np.zeros(a.shape)
act = np.zeros(a.shape)
self.fastclip(a, m, M, ac)
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
@pytest.mark.parametrize("casting", [None, "unsafe"])
def test_simple_int32_inout(self, casting):
# Test native int32 input with double min/max and int32 out.
a = self._generate_int32_data(self.nr, self.nc)
m = np.float64(0)
M = np.float64(2)
ac = np.zeros(a.shape, dtype=np.int32)
act = ac.copy()
if casting is None:
with assert_warns(DeprecationWarning):
# NumPy 1.17.0, 2018-02-24 - casting is unsafe
self.fastclip(a, m, M, ac, casting=casting)
else:
# explicitly passing "unsafe" will silence warning
self.fastclip(a, m, M, ac, casting=casting)
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
def test_simple_int64_out(self):
# Test native int32 input with int32 scalar min/max and int64 out.
a = self._generate_int32_data(self.nr, self.nc)
m = np.int32(-1)
M = np.int32(1)
ac = np.zeros(a.shape, dtype=np.int64)
act = ac.copy()
self.fastclip(a, m, M, ac)
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
def test_simple_int64_inout(self):
# Test native int32 input with double array min/max and int32 out.
a = self._generate_int32_data(self.nr, self.nc)
m = np.zeros(a.shape, np.float64)
M = np.float64(1)
ac = np.zeros(a.shape, dtype=np.int32)
act = ac.copy()
with assert_warns(DeprecationWarning):
# NumPy 1.17.0, 2018-02-24 - casting is unsafe
self.fastclip(a, m, M, ac)
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
def test_simple_int32_out(self):
# Test native double input with scalar min/max and int out.
a = self._generate_data(self.nr, self.nc)
m = -1.0
M = 2.0
ac = np.zeros(a.shape, dtype=np.int32)
act = ac.copy()
with assert_warns(DeprecationWarning):
# NumPy 1.17.0, 2018-02-24 - casting is unsafe
self.fastclip(a, m, M, ac)
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
def test_simple_inplace_01(self):
# Test native double input with array min/max in-place.
a = self._generate_data(self.nr, self.nc)
ac = a.copy()
m = np.zeros(a.shape)
M = 1.0
self.fastclip(a, m, M, a)
self.clip(a, m, M, ac)
assert_array_strict_equal(a, ac)
def test_simple_inplace_02(self):
# Test native double input with scalar min/max in-place.
a = self._generate_data(self.nr, self.nc)
ac = a.copy()
m = -0.5
M = 0.6
self.fastclip(a, m, M, a)
self.clip(ac, m, M, ac)
assert_array_strict_equal(a, ac)
def test_noncontig_inplace(self):
# Test non contiguous double input with double scalar min/max in-place.
a = self._generate_data(self.nr * 2, self.nc * 3)
a = a[::2, ::3]
assert_(not a.flags['F_CONTIGUOUS'])
assert_(not a.flags['C_CONTIGUOUS'])
ac = a.copy()
m = -0.5
M = 0.6
self.fastclip(a, m, M, a)
self.clip(ac, m, M, ac)
assert_array_equal(a, ac)
def test_type_cast_01(self):
# Test native double input with scalar min/max.
a = self._generate_data(self.nr, self.nc)
m = -0.5
M = 0.6
ac = self.fastclip(a, m, M)
act = self.clip(a, m, M)
assert_array_strict_equal(ac, act)
def test_type_cast_02(self):
# Test native int32 input with int32 scalar min/max.
a = self._generate_int_data(self.nr, self.nc)
a = a.astype(np.int32)
m = -2
M = 4
ac = self.fastclip(a, m, M)
act = self.clip(a, m, M)
assert_array_strict_equal(ac, act)
def test_type_cast_03(self):
# Test native int32 input with float64 scalar min/max.
a = self._generate_int32_data(self.nr, self.nc)
m = -2
M = 4
ac = self.fastclip(a, np.float64(m), np.float64(M))
act = self.clip(a, np.float64(m), np.float64(M))
assert_array_strict_equal(ac, act)
def test_type_cast_04(self):
# Test native int32 input with float32 scalar min/max.
a = self._generate_int32_data(self.nr, self.nc)
m = np.float32(-2)
M = np.float32(4)
act = self.fastclip(a, m, M)
ac = self.clip(a, m, M)
assert_array_strict_equal(ac, act)
def test_type_cast_05(self):
# Test native int32 with double arrays min/max.
a = self._generate_int_data(self.nr, self.nc)
m = -0.5
M = 1.
ac = self.fastclip(a, m * np.zeros(a.shape), M)
act = self.clip(a, m * np.zeros(a.shape), M)
assert_array_strict_equal(ac, act)
def test_type_cast_06(self):
# Test native with NON native scalar min/max.
a = self._generate_data(self.nr, self.nc)
m = 0.5
m_s = self._neg_byteorder(m)
M = 1.
act = self.clip(a, m_s, M)
ac = self.fastclip(a, m_s, M)
assert_array_strict_equal(ac, act)
def test_type_cast_07(self):
# Test NON native with native array min/max.
a = self._generate_data(self.nr, self.nc)
m = -0.5 * np.ones(a.shape)
M = 1.
a_s = self._neg_byteorder(a)
assert_(not a_s.dtype.isnative)
act = a_s.clip(m, M)
ac = self.fastclip(a_s, m, M)
assert_array_strict_equal(ac, act)
def test_type_cast_08(self):
# Test NON native with native scalar min/max.
a = self._generate_data(self.nr, self.nc)
m = -0.5
M = 1.
a_s = self._neg_byteorder(a)
assert_(not a_s.dtype.isnative)
ac = self.fastclip(a_s, m, M)
act = a_s.clip(m, M)
assert_array_strict_equal(ac, act)
def test_type_cast_09(self):
# Test native with NON native array min/max.
a = self._generate_data(self.nr, self.nc)
m = -0.5 * np.ones(a.shape)
M = 1.
m_s = self._neg_byteorder(m)
assert_(not m_s.dtype.isnative)
ac = self.fastclip(a, m_s, M)
act = self.clip(a, m_s, M)
assert_array_strict_equal(ac, act)
def test_type_cast_10(self):
# Test native int32 with float min/max and float out for output argument.
a = self._generate_int_data(self.nr, self.nc)
b = np.zeros(a.shape, dtype=np.float32)
m = np.float32(-0.5)
M = np.float32(1)
act = self.clip(a, m, M, out=b)
ac = self.fastclip(a, m, M, out=b)
assert_array_strict_equal(ac, act)
def test_type_cast_11(self):
# Test non native with native scalar, min/max, out non native
a = self._generate_non_native_data(self.nr, self.nc)
b = a.copy()
b = b.astype(b.dtype.newbyteorder('>'))
bt = b.copy()
m = -0.5
M = 1.
self.fastclip(a, m, M, out=b)
self.clip(a, m, M, out=bt)
assert_array_strict_equal(b, bt)
def test_type_cast_12(self):
# Test native int32 input and min/max and float out
a = self._generate_int_data(self.nr, self.nc)
b = np.zeros(a.shape, dtype=np.float32)
m = np.int32(0)
M = np.int32(1)
act = self.clip(a, m, M, out=b)
ac = self.fastclip(a, m, M, out=b)
assert_array_strict_equal(ac, act)
def test_clip_with_out_simple(self):
# Test native double input with scalar min/max
a = self._generate_data(self.nr, self.nc)
m = -0.5
M = 0.6
ac = np.zeros(a.shape)
act = np.zeros(a.shape)
self.fastclip(a, m, M, ac)
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
def test_clip_with_out_simple2(self):
# Test native int32 input with double min/max and int32 out
a = self._generate_int32_data(self.nr, self.nc)
m = np.float64(0)
M = np.float64(2)
ac = np.zeros(a.shape, dtype=np.int32)
act = ac.copy()
with assert_warns(DeprecationWarning):
# NumPy 1.17.0, 2018-02-24 - casting is unsafe
self.fastclip(a, m, M, ac)
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
def test_clip_with_out_simple_int32(self):
# Test native int32 input with int32 scalar min/max and int64 out
a = self._generate_int32_data(self.nr, self.nc)
m = np.int32(-1)
M = np.int32(1)
ac = np.zeros(a.shape, dtype=np.int64)
act = ac.copy()
self.fastclip(a, m, M, ac)
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
def test_clip_with_out_array_int32(self):
# Test native int32 input with double array min/max and int32 out
a = self._generate_int32_data(self.nr, self.nc)
m = np.zeros(a.shape, np.float64)
M = np.float64(1)
ac = np.zeros(a.shape, dtype=np.int32)
act = ac.copy()
with assert_warns(DeprecationWarning):
# NumPy 1.17.0, 2018-02-24 - casting is unsafe
self.fastclip(a, m, M, ac)
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
def test_clip_with_out_array_outint32(self):
# Test native double input with scalar min/max and int out
a = self._generate_data(self.nr, self.nc)
m = -1.0
M = 2.0
ac = np.zeros(a.shape, dtype=np.int32)
act = ac.copy()
with assert_warns(DeprecationWarning):
# NumPy 1.17.0, 2018-02-24 - casting is unsafe
self.fastclip(a, m, M, ac)
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
def test_clip_with_out_transposed(self):
# Test that the out argument works when transposed
a = np.arange(16).reshape(4, 4)
out = np.empty_like(a).T
a.clip(4, 10, out=out)
expected = self.clip(a, 4, 10)
assert_array_equal(out, expected)
def test_clip_with_out_memory_overlap(self):
# Test that the out argument works when it has memory overlap
a = np.arange(16).reshape(4, 4)
ac = a.copy()
a[:-1].clip(4, 10, out=a[1:])
expected = self.clip(ac[:-1], 4, 10)
assert_array_equal(a[1:], expected)
def test_clip_inplace_array(self):
# Test native double input with array min/max
a = self._generate_data(self.nr, self.nc)
ac = a.copy()
m = np.zeros(a.shape)
M = 1.0
self.fastclip(a, m, M, a)
self.clip(a, m, M, ac)
assert_array_strict_equal(a, ac)
def test_clip_inplace_simple(self):
# Test native double input with scalar min/max
a = self._generate_data(self.nr, self.nc)
ac = a.copy()
m = -0.5
M = 0.6
self.fastclip(a, m, M, a)
self.clip(a, m, M, ac)
assert_array_strict_equal(a, ac)
def test_clip_func_takes_out(self):
# Ensure that the clip() function takes an out=argument.
a = self._generate_data(self.nr, self.nc)
ac = a.copy()
m = -0.5
M = 0.6
a2 = np.clip(a, m, M, out=a)
self.clip(a, m, M, ac)
assert_array_strict_equal(a2, ac)
assert_(a2 is a)
def test_clip_nan(self):
d = np.arange(7.)
with assert_warns(DeprecationWarning):
assert_equal(d.clip(min=np.nan), d)
with assert_warns(DeprecationWarning):
assert_equal(d.clip(max=np.nan), d)
with assert_warns(DeprecationWarning):
assert_equal(d.clip(min=np.nan, max=np.nan), d)
with assert_warns(DeprecationWarning):
assert_equal(d.clip(min=-2, max=np.nan), d)
with assert_warns(DeprecationWarning):
assert_equal(d.clip(min=np.nan, max=10), d)
def test_object_clip(self):
a = np.arange(10, dtype=object)
actual = np.clip(a, 1, 5)
expected = np.array([1, 1, 2, 3, 4, 5, 5, 5, 5, 5])
assert actual.tolist() == expected.tolist()
def test_clip_all_none(self):
a = np.arange(10, dtype=object)
with assert_raises_regex(ValueError, 'max or min'):
np.clip(a, None, None)
def test_clip_invalid_casting(self):
a = np.arange(10, dtype=object)
with assert_raises_regex(ValueError,
'casting must be one of'):
self.fastclip(a, 1, 8, casting="garbage")
@pytest.mark.parametrize("amin, amax", [
# two scalars
(1, 0),
# mix scalar and array
(1, np.zeros(10)),
# two arrays
(np.ones(10), np.zeros(10)),
])
def test_clip_value_min_max_flip(self, amin, amax):
a = np.arange(10, dtype=np.int64)
# requirement from ufunc_docstrings.py
expected = np.minimum(np.maximum(a, amin), amax)
actual = np.clip(a, amin, amax)
assert_equal(actual, expected)
@pytest.mark.parametrize("arr, amin, amax, exp", [
# for a bug in npy_ObjectClip, based on a
# case produced by hypothesis
(np.zeros(10, dtype=np.int64),
0,
-2**64+1,
np.full(10, -2**64+1, dtype=object)),
# for bugs in NPY_TIMEDELTA_MAX, based on a case
# produced by hypothesis
(np.zeros(10, dtype='m8') - 1,
0,
0,
np.zeros(10, dtype='m8')),
])
def test_clip_problem_cases(self, arr, amin, amax, exp):
actual = np.clip(arr, amin, amax)
assert_equal(actual, exp)
@pytest.mark.xfail(reason="no scalar nan propagation yet",
raises=AssertionError,
strict=True)
@pytest.mark.parametrize("arr, amin, amax", [
# problematic scalar nan case from hypothesis
(np.zeros(10, dtype=np.int64),
np.array(np.nan),
np.zeros(10, dtype=np.int32)),
])
@pytest.mark.filterwarnings("ignore::DeprecationWarning")
def test_clip_scalar_nan_propagation(self, arr, amin, amax):
# enforcement of scalar nan propagation for comparisons
# called through clip()
expected = np.minimum(np.maximum(arr, amin), amax)
actual = np.clip(arr, amin, amax)
assert_equal(actual, expected)
@pytest.mark.xfail(reason="propagation doesn't match spec")
@pytest.mark.parametrize("arr, amin, amax", [
(np.array([1] * 10, dtype='m8'),
np.timedelta64('NaT'),
np.zeros(10, dtype=np.int32)),
])
@pytest.mark.filterwarnings("ignore::DeprecationWarning")
def test_NaT_propagation(self, arr, amin, amax):
# NOTE: the expected function spec doesn't
# propagate NaT, but clip() now does
expected = np.minimum(np.maximum(arr, amin), amax)
actual = np.clip(arr, amin, amax)
assert_equal(actual, expected)
@given(
data=st.data(),
arr=hynp.arrays(
dtype=hynp.integer_dtypes() | hynp.floating_dtypes(),
shape=hynp.array_shapes()
)
)
def test_clip_property(self, data, arr):
"""A property-based test using Hypothesis.
This aims for maximum generality: it could in principle generate *any*
valid inputs to np.clip, and in practice generates much more varied
inputs than human testers come up with.
Because many of the inputs have tricky dependencies - compatible dtypes
and mutually-broadcastable shapes - we use `st.data()` strategy draw
values *inside* the test function, from strategies we construct based
on previous values. An alternative would be to define a custom strategy
with `@st.composite`, but until we have duplicated code inline is fine.
That accounts for most of the function; the actual test is just three
lines to calculate and compare actual vs expected results!
"""
numeric_dtypes = hynp.integer_dtypes() | hynp.floating_dtypes()
# Generate shapes for the bounds which can be broadcast with each other
# and with the base shape. Below, we might decide to use scalar bounds,
# but it's clearer to generate these shapes unconditionally in advance.
in_shapes, result_shape = data.draw(
hynp.mutually_broadcastable_shapes(
num_shapes=2, base_shape=arr.shape
)
)
# Scalar `nan` is deprecated due to the differing behaviour it shows.
s = numeric_dtypes.flatmap(
lambda x: hynp.from_dtype(x, allow_nan=False))
amin = data.draw(s | hynp.arrays(dtype=numeric_dtypes,
shape=in_shapes[0], elements={"allow_nan": False}))
amax = data.draw(s | hynp.arrays(dtype=numeric_dtypes,
shape=in_shapes[1], elements={"allow_nan": False}))
# Then calculate our result and expected result and check that they're
# equal! See gh-12519 and gh-19457 for discussion deciding on this
# property and the result_type argument.
result = np.clip(arr, amin, amax)
t = np.result_type(arr, amin, amax)
expected = np.minimum(amax, np.maximum(arr, amin, dtype=t), dtype=t)
assert result.dtype == t
assert_array_equal(result, expected)
class TestAllclose:
rtol = 1e-5
atol = 1e-8
def setup_method(self):
self.olderr = np.seterr(invalid='ignore')
def teardown_method(self):
np.seterr(**self.olderr)
def tst_allclose(self, x, y):
assert_(np.allclose(x, y), "%s and %s not close" % (x, y))
def tst_not_allclose(self, x, y):
assert_(not np.allclose(x, y), "%s and %s shouldn't be close" % (x, y))
def test_ip_allclose(self):
# Parametric test factory.
arr = np.array([100, 1000])
aran = np.arange(125).reshape((5, 5, 5))
atol = self.atol
rtol = self.rtol
data = [([1, 0], [1, 0]),
([atol], [0]),
([1], [1+rtol+atol]),
(arr, arr + arr*rtol),
(arr, arr + arr*rtol + atol*2),
(aran, aran + aran*rtol),
(np.inf, np.inf),
(np.inf, [np.inf])]
for (x, y) in data:
self.tst_allclose(x, y)
def test_ip_not_allclose(self):
# Parametric test factory.
aran = np.arange(125).reshape((5, 5, 5))
atol = self.atol
rtol = self.rtol
data = [([np.inf, 0], [1, np.inf]),
([np.inf, 0], [1, 0]),
([np.inf, np.inf], [1, np.inf]),
([np.inf, np.inf], [1, 0]),
([-np.inf, 0], [np.inf, 0]),
([np.nan, 0], [np.nan, 0]),
([atol*2], [0]),
([1], [1+rtol+atol*2]),
(aran, aran + aran*atol + atol*2),
(np.array([np.inf, 1]), np.array([0, np.inf]))]
for (x, y) in data:
self.tst_not_allclose(x, y)
def test_no_parameter_modification(self):
x = np.array([np.inf, 1])
y = np.array([0, np.inf])
np.allclose(x, y)
assert_array_equal(x, np.array([np.inf, 1]))
assert_array_equal(y, np.array([0, np.inf]))
def test_min_int(self):
# Could make problems because of abs(min_int) == min_int
min_int = np.iinfo(np.int_).min
a = np.array([min_int], dtype=np.int_)
assert_(np.allclose(a, a))
def test_equalnan(self):
x = np.array([1.0, np.nan])
assert_(np.allclose(x, x, equal_nan=True))
def test_return_class_is_ndarray(self):
# Issue gh-6475
# Check that allclose does not preserve subtypes
class Foo(np.ndarray):
def __new__(cls, *args, **kwargs):
return np.array(*args, **kwargs).view(cls)
a = Foo([1])
assert_(type(np.allclose(a, a)) is bool)
class TestIsclose:
rtol = 1e-5
atol = 1e-8
def _setup(self):
atol = self.atol
rtol = self.rtol
arr = np.array([100, 1000])
aran = np.arange(125).reshape((5, 5, 5))
self.all_close_tests = [
([1, 0], [1, 0]),
([atol], [0]),
([1], [1 + rtol + atol]),
(arr, arr + arr*rtol),
(arr, arr + arr*rtol + atol),
(aran, aran + aran*rtol),
(np.inf, np.inf),
(np.inf, [np.inf]),
([np.inf, -np.inf], [np.inf, -np.inf]),
]
self.none_close_tests = [
([np.inf, 0], [1, np.inf]),
([np.inf, -np.inf], [1, 0]),
([np.inf, np.inf], [1, -np.inf]),
([np.inf, np.inf], [1, 0]),
([np.nan, 0], [np.nan, -np.inf]),
([atol*2], [0]),
([1], [1 + rtol + atol*2]),
(aran, aran + rtol*1.1*aran + atol*1.1),
(np.array([np.inf, 1]), np.array([0, np.inf])),
]
self.some_close_tests = [
([np.inf, 0], [np.inf, atol*2]),
([atol, 1, 1e6*(1 + 2*rtol) + atol], [0, np.nan, 1e6]),
(np.arange(3), [0, 1, 2.1]),
(np.nan, [np.nan, np.nan, np.nan]),
([0], [atol, np.inf, -np.inf, np.nan]),
(0, [atol, np.inf, -np.inf, np.nan]),
]
self.some_close_results = [
[True, False],
[True, False, False],
[True, True, False],
[False, False, False],
[True, False, False, False],
[True, False, False, False],
]
def test_ip_isclose(self):
self._setup()
tests = self.some_close_tests
results = self.some_close_results
for (x, y), result in zip(tests, results):
assert_array_equal(np.isclose(x, y), result)
def tst_all_isclose(self, x, y):
assert_(np.all(np.isclose(x, y)), "%s and %s not close" % (x, y))
def tst_none_isclose(self, x, y):
msg = "%s and %s shouldn't be close"
assert_(not np.any(np.isclose(x, y)), msg % (x, y))
def tst_isclose_allclose(self, x, y):
msg = "isclose.all() and allclose aren't same for %s and %s"
msg2 = "isclose and allclose aren't same for %s and %s"
if np.isscalar(x) and np.isscalar(y):
assert_(np.isclose(x, y) == np.allclose(x, y), msg=msg2 % (x, y))
else:
assert_array_equal(np.isclose(x, y).all(), np.allclose(x, y), msg % (x, y))
def test_ip_all_isclose(self):
self._setup()
for (x, y) in self.all_close_tests:
self.tst_all_isclose(x, y)
def test_ip_none_isclose(self):
self._setup()
for (x, y) in self.none_close_tests:
self.tst_none_isclose(x, y)
def test_ip_isclose_allclose(self):
self._setup()
tests = (self.all_close_tests + self.none_close_tests +
self.some_close_tests)
for (x, y) in tests:
self.tst_isclose_allclose(x, y)
def test_equal_nan(self):
assert_array_equal(np.isclose(np.nan, np.nan, equal_nan=True), [True])
arr = np.array([1.0, np.nan])
assert_array_equal(np.isclose(arr, arr, equal_nan=True), [True, True])
def test_masked_arrays(self):
# Make sure to test the output type when arguments are interchanged.
x = np.ma.masked_where([True, True, False], np.arange(3))
assert_(type(x) is type(np.isclose(2, x)))
assert_(type(x) is type(np.isclose(x, 2)))
x = np.ma.masked_where([True, True, False], [np.nan, np.inf, np.nan])
assert_(type(x) is type(np.isclose(np.inf, x)))
assert_(type(x) is type(np.isclose(x, np.inf)))
x = np.ma.masked_where([True, True, False], [np.nan, np.nan, np.nan])
y = np.isclose(np.nan, x, equal_nan=True)
assert_(type(x) is type(y))
# Ensure that the mask isn't modified...
assert_array_equal([True, True, False], y.mask)
y = np.isclose(x, np.nan, equal_nan=True)
assert_(type(x) is type(y))
# Ensure that the mask isn't modified...
assert_array_equal([True, True, False], y.mask)
x = np.ma.masked_where([True, True, False], [np.nan, np.nan, np.nan])
y = np.isclose(x, x, equal_nan=True)
assert_(type(x) is type(y))
# Ensure that the mask isn't modified...
assert_array_equal([True, True, False], y.mask)
def test_scalar_return(self):
assert_(np.isscalar(np.isclose(1, 1)))
def test_no_parameter_modification(self):
x = np.array([np.inf, 1])
y = np.array([0, np.inf])
np.isclose(x, y)
assert_array_equal(x, np.array([np.inf, 1]))
assert_array_equal(y, np.array([0, np.inf]))
def test_non_finite_scalar(self):
# GH7014, when two scalars are compared the output should also be a
# scalar
assert_(np.isclose(np.inf, -np.inf) is np.False_)
assert_(np.isclose(0, np.inf) is np.False_)
assert_(type(np.isclose(0, np.inf)) is np.bool_)
def test_timedelta(self):
# Allclose currently works for timedelta64 as long as `atol` is
# an integer or also a timedelta64
a = np.array([[1, 2, 3, "NaT"]], dtype="m8[ns]")
assert np.isclose(a, a, atol=0, equal_nan=True).all()
assert np.isclose(a, a, atol=np.timedelta64(1, "ns"), equal_nan=True).all()
assert np.allclose(a, a, atol=0, equal_nan=True)
assert np.allclose(a, a, atol=np.timedelta64(1, "ns"), equal_nan=True)
class TestStdVar:
def setup_method(self):
self.A = np.array([1, -1, 1, -1])
self.real_var = 1
def test_basic(self):
assert_almost_equal(np.var(self.A), self.real_var)
assert_almost_equal(np.std(self.A)**2, self.real_var)
def test_scalars(self):
assert_equal(np.var(1), 0)
assert_equal(np.std(1), 0)
def test_ddof1(self):
assert_almost_equal(np.var(self.A, ddof=1),
self.real_var * len(self.A) / (len(self.A) - 1))
assert_almost_equal(np.std(self.A, ddof=1)**2,
self.real_var*len(self.A) / (len(self.A) - 1))
def test_ddof2(self):
assert_almost_equal(np.var(self.A, ddof=2),
self.real_var * len(self.A) / (len(self.A) - 2))
assert_almost_equal(np.std(self.A, ddof=2)**2,
self.real_var * len(self.A) / (len(self.A) - 2))
def test_out_scalar(self):
d = np.arange(10)
out = np.array(0.)
r = np.std(d, out=out)
assert_(r is out)
assert_array_equal(r, out)
r = np.var(d, out=out)
assert_(r is out)
assert_array_equal(r, out)
r = np.mean(d, out=out)
assert_(r is out)
assert_array_equal(r, out)
class TestStdVarComplex:
def test_basic(self):
A = np.array([1, 1.j, -1, -1.j])
real_var = 1
assert_almost_equal(np.var(A), real_var)
assert_almost_equal(np.std(A)**2, real_var)
def test_scalars(self):
assert_equal(np.var(1j), 0)
assert_equal(np.std(1j), 0)
class TestCreationFuncs:
# Test ones, zeros, empty and full.
def setup_method(self):
dtypes = {np.dtype(tp) for tp in itertools.chain(*np.sctypes.values())}
# void, bytes, str
variable_sized = {tp for tp in dtypes if tp.str.endswith('0')}
self.dtypes = sorted(dtypes - variable_sized |
{np.dtype(tp.str.replace("0", str(i)))
for tp in variable_sized for i in range(1, 10)},
key=lambda dtype: dtype.str)
self.orders = {'C': 'c_contiguous', 'F': 'f_contiguous'}
self.ndims = 10
def check_function(self, func, fill_value=None):
par = ((0, 1, 2),
range(self.ndims),
self.orders,
self.dtypes)
fill_kwarg = {}
if fill_value is not None:
fill_kwarg = {'fill_value': fill_value}
for size, ndims, order, dtype in itertools.product(*par):
shape = ndims * [size]
# do not fill void type
if fill_kwarg and dtype.str.startswith('|V'):
continue
arr = func(shape, order=order, dtype=dtype,
**fill_kwarg)
assert_equal(arr.dtype, dtype)
assert_(getattr(arr.flags, self.orders[order]))
if fill_value is not None:
if dtype.str.startswith('|S'):
val = str(fill_value)
else:
val = fill_value
assert_equal(arr, dtype.type(val))
def test_zeros(self):
self.check_function(np.zeros)
def test_ones(self):
self.check_function(np.ones)
def test_empty(self):
self.check_function(np.empty)
def test_full(self):
self.check_function(np.full, 0)
self.check_function(np.full, 1)
@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
def test_for_reference_leak(self):
# Make sure we have an object for reference
dim = 1
beg = sys.getrefcount(dim)
np.zeros([dim]*10)
assert_(sys.getrefcount(dim) == beg)
np.ones([dim]*10)
assert_(sys.getrefcount(dim) == beg)
np.empty([dim]*10)
assert_(sys.getrefcount(dim) == beg)
np.full([dim]*10, 0)
assert_(sys.getrefcount(dim) == beg)
class TestLikeFuncs:
'''Test ones_like, zeros_like, empty_like and full_like'''
def setup_method(self):
self.data = [
# Array scalars
(np.array(3.), None),
(np.array(3), 'f8'),
# 1D arrays
(np.arange(6, dtype='f4'), None),
(np.arange(6), 'c16'),
# 2D C-layout arrays
(np.arange(6).reshape(2, 3), None),
(np.arange(6).reshape(3, 2), 'i1'),
# 2D F-layout arrays
(np.arange(6).reshape((2, 3), order='F'), None),
(np.arange(6).reshape((3, 2), order='F'), 'i1'),
# 3D C-layout arrays
(np.arange(24).reshape(2, 3, 4), None),
(np.arange(24).reshape(4, 3, 2), 'f4'),
# 3D F-layout arrays
(np.arange(24).reshape((2, 3, 4), order='F'), None),
(np.arange(24).reshape((4, 3, 2), order='F'), 'f4'),
# 3D non-C/F-layout arrays
(np.arange(24).reshape(2, 3, 4).swapaxes(0, 1), None),
(np.arange(24).reshape(4, 3, 2).swapaxes(0, 1), '?'),
]
self.shapes = [(), (5,), (5,6,), (5,6,7,)]
def compare_array_value(self, dz, value, fill_value):
if value is not None:
if fill_value:
try:
z = dz.dtype.type(value)
except OverflowError:
pass
else:
assert_(np.all(dz == z))
else:
assert_(np.all(dz == value))
def check_like_function(self, like_function, value, fill_value=False):
if fill_value:
fill_kwarg = {'fill_value': value}
else:
fill_kwarg = {}
for d, dtype in self.data:
# default (K) order, dtype
dz = like_function(d, dtype=dtype, **fill_kwarg)
assert_equal(dz.shape, d.shape)
assert_equal(np.array(dz.strides)*d.dtype.itemsize,
np.array(d.strides)*dz.dtype.itemsize)
assert_equal(d.flags.c_contiguous, dz.flags.c_contiguous)
assert_equal(d.flags.f_contiguous, dz.flags.f_contiguous)
if dtype is None:
assert_equal(dz.dtype, d.dtype)
else:
assert_equal(dz.dtype, np.dtype(dtype))
self.compare_array_value(dz, value, fill_value)
# C order, default dtype
dz = like_function(d, order='C', dtype=dtype, **fill_kwarg)
assert_equal(dz.shape, d.shape)
assert_(dz.flags.c_contiguous)
if dtype is None:
assert_equal(dz.dtype, d.dtype)
else:
assert_equal(dz.dtype, np.dtype(dtype))
self.compare_array_value(dz, value, fill_value)
# F order, default dtype
dz = like_function(d, order='F', dtype=dtype, **fill_kwarg)
assert_equal(dz.shape, d.shape)
assert_(dz.flags.f_contiguous)
if dtype is None:
assert_equal(dz.dtype, d.dtype)
else:
assert_equal(dz.dtype, np.dtype(dtype))
self.compare_array_value(dz, value, fill_value)
# A order
dz = like_function(d, order='A', dtype=dtype, **fill_kwarg)
assert_equal(dz.shape, d.shape)
if d.flags.f_contiguous:
assert_(dz.flags.f_contiguous)
else:
assert_(dz.flags.c_contiguous)
if dtype is None:
assert_equal(dz.dtype, d.dtype)
else:
assert_equal(dz.dtype, np.dtype(dtype))
self.compare_array_value(dz, value, fill_value)
# Test the 'shape' parameter
for s in self.shapes:
for o in 'CFA':
sz = like_function(d, dtype=dtype, shape=s, order=o,
**fill_kwarg)
assert_equal(sz.shape, s)
if dtype is None:
assert_equal(sz.dtype, d.dtype)
else:
assert_equal(sz.dtype, np.dtype(dtype))
if o == 'C' or (o == 'A' and d.flags.c_contiguous):
assert_(sz.flags.c_contiguous)
elif o == 'F' or (o == 'A' and d.flags.f_contiguous):
assert_(sz.flags.f_contiguous)
self.compare_array_value(sz, value, fill_value)
if (d.ndim != len(s)):
assert_equal(np.argsort(like_function(d, dtype=dtype,
shape=s, order='K',
**fill_kwarg).strides),
np.argsort(np.empty(s, dtype=dtype,
order='C').strides))
else:
assert_equal(np.argsort(like_function(d, dtype=dtype,
shape=s, order='K',
**fill_kwarg).strides),
np.argsort(d.strides))
# Test the 'subok' parameter
class MyNDArray(np.ndarray):
pass
a = np.array([[1, 2], [3, 4]]).view(MyNDArray)
b = like_function(a, **fill_kwarg)
assert_(type(b) is MyNDArray)
b = like_function(a, subok=False, **fill_kwarg)
assert_(type(b) is not MyNDArray)
def test_ones_like(self):
self.check_like_function(np.ones_like, 1)
def test_zeros_like(self):
self.check_like_function(np.zeros_like, 0)
def test_empty_like(self):
self.check_like_function(np.empty_like, None)
def test_filled_like(self):
self.check_like_function(np.full_like, 0, True)
self.check_like_function(np.full_like, 1, True)
self.check_like_function(np.full_like, 1000, True)
self.check_like_function(np.full_like, 123.456, True)
self.check_like_function(np.full_like, np.inf, True)
@pytest.mark.parametrize('likefunc', [np.empty_like, np.full_like,
np.zeros_like, np.ones_like])
@pytest.mark.parametrize('dtype', [str, bytes])
def test_dtype_str_bytes(self, likefunc, dtype):
# Regression test for gh-19860
a = np.arange(16).reshape(2, 8)
b = a[:, ::2] # Ensure b is not contiguous.
kwargs = {'fill_value': ''} if likefunc == np.full_like else {}
result = likefunc(b, dtype=dtype, **kwargs)
if dtype == str:
assert result.strides == (16, 4)
else:
# dtype is bytes
assert result.strides == (4, 1)
class TestCorrelate:
def _setup(self, dt):
self.x = np.array([1, 2, 3, 4, 5], dtype=dt)
self.xs = np.arange(1, 20)[::3]
self.y = np.array([-1, -2, -3], dtype=dt)
self.z1 = np.array([-3., -8., -14., -20., -26., -14., -5.], dtype=dt)
self.z1_4 = np.array([-2., -5., -8., -11., -14., -5.], dtype=dt)
self.z1r = np.array([-15., -22., -22., -16., -10., -4., -1.], dtype=dt)
self.z2 = np.array([-5., -14., -26., -20., -14., -8., -3.], dtype=dt)
self.z2r = np.array([-1., -4., -10., -16., -22., -22., -15.], dtype=dt)
self.zs = np.array([-3., -14., -30., -48., -66., -84.,
-102., -54., -19.], dtype=dt)
def test_float(self):
self._setup(float)
z = np.correlate(self.x, self.y, 'full')
assert_array_almost_equal(z, self.z1)
z = np.correlate(self.x, self.y[:-1], 'full')
assert_array_almost_equal(z, self.z1_4)
z = np.correlate(self.y, self.x, 'full')
assert_array_almost_equal(z, self.z2)
z = np.correlate(self.x[::-1], self.y, 'full')
assert_array_almost_equal(z, self.z1r)
z = np.correlate(self.y, self.x[::-1], 'full')
assert_array_almost_equal(z, self.z2r)
z = np.correlate(self.xs, self.y, 'full')
assert_array_almost_equal(z, self.zs)
def test_object(self):
self._setup(Decimal)
z = np.correlate(self.x, self.y, 'full')
assert_array_almost_equal(z, self.z1)
z = np.correlate(self.y, self.x, 'full')
assert_array_almost_equal(z, self.z2)
def test_no_overwrite(self):
d = np.ones(100)
k = np.ones(3)
np.correlate(d, k)
assert_array_equal(d, np.ones(100))
assert_array_equal(k, np.ones(3))
def test_complex(self):
x = np.array([1, 2, 3, 4+1j], dtype=complex)
y = np.array([-1, -2j, 3+1j], dtype=complex)
r_z = np.array([3-1j, 6, 8+1j, 11+5j, -5+8j, -4-1j], dtype=complex)
r_z = r_z[::-1].conjugate()
z = np.correlate(y, x, mode='full')
assert_array_almost_equal(z, r_z)
def test_zero_size(self):
with pytest.raises(ValueError):
np.correlate(np.array([]), np.ones(1000), mode='full')
with pytest.raises(ValueError):
np.correlate(np.ones(1000), np.array([]), mode='full')
def test_mode(self):
d = np.ones(100)
k = np.ones(3)
default_mode = np.correlate(d, k, mode='valid')
with assert_warns(DeprecationWarning):
valid_mode = np.correlate(d, k, mode='v')
assert_array_equal(valid_mode, default_mode)
# integer mode
with assert_raises(ValueError):
np.correlate(d, k, mode=-1)
assert_array_equal(np.correlate(d, k, mode=0), valid_mode)
# illegal arguments
with assert_raises(TypeError):
np.correlate(d, k, mode=None)
class TestConvolve:
def test_object(self):
d = [1.] * 100
k = [1.] * 3
assert_array_almost_equal(np.convolve(d, k)[2:-2], np.full(98, 3))
def test_no_overwrite(self):
d = np.ones(100)
k = np.ones(3)
np.convolve(d, k)
assert_array_equal(d, np.ones(100))
assert_array_equal(k, np.ones(3))
def test_mode(self):
d = np.ones(100)
k = np.ones(3)
default_mode = np.convolve(d, k, mode='full')
with assert_warns(DeprecationWarning):
full_mode = np.convolve(d, k, mode='f')
assert_array_equal(full_mode, default_mode)
# integer mode
with assert_raises(ValueError):
np.convolve(d, k, mode=-1)
assert_array_equal(np.convolve(d, k, mode=2), full_mode)
# illegal arguments
with assert_raises(TypeError):
np.convolve(d, k, mode=None)
class TestArgwhere:
@pytest.mark.parametrize('nd', [0, 1, 2])
def test_nd(self, nd):
# get an nd array with multiple elements in every dimension
x = np.empty((2,)*nd, bool)
# none
x[...] = False
assert_equal(np.argwhere(x).shape, (0, nd))
# only one
x[...] = False
x.flat[0] = True
assert_equal(np.argwhere(x).shape, (1, nd))
# all but one
x[...] = True
x.flat[0] = False
assert_equal(np.argwhere(x).shape, (x.size - 1, nd))
# all
x[...] = True
assert_equal(np.argwhere(x).shape, (x.size, nd))
def test_2D(self):
x = np.arange(6).reshape((2, 3))
assert_array_equal(np.argwhere(x > 1),
[[0, 2],
[1, 0],
[1, 1],
[1, 2]])
def test_list(self):
assert_equal(np.argwhere([4, 0, 2, 1, 3]), [[0], [2], [3], [4]])
class TestStringFunction:
def test_set_string_function(self):
a = np.array([1])
np.set_string_function(lambda x: "FOO", repr=True)
assert_equal(repr(a), "FOO")
np.set_string_function(None, repr=True)
assert_equal(repr(a), "array([1])")
np.set_string_function(lambda x: "FOO", repr=False)
assert_equal(str(a), "FOO")
np.set_string_function(None, repr=False)
assert_equal(str(a), "[1]")
class TestRoll:
def test_roll1d(self):
x = np.arange(10)
xr = np.roll(x, 2)
assert_equal(xr, np.array([8, 9, 0, 1, 2, 3, 4, 5, 6, 7]))
def test_roll2d(self):
x2 = np.reshape(np.arange(10), (2, 5))
x2r = np.roll(x2, 1)
assert_equal(x2r, np.array([[9, 0, 1, 2, 3], [4, 5, 6, 7, 8]]))
x2r = np.roll(x2, 1, axis=0)
assert_equal(x2r, np.array([[5, 6, 7, 8, 9], [0, 1, 2, 3, 4]]))
x2r = np.roll(x2, 1, axis=1)
assert_equal(x2r, np.array([[4, 0, 1, 2, 3], [9, 5, 6, 7, 8]]))
# Roll multiple axes at once.
x2r = np.roll(x2, 1, axis=(0, 1))
assert_equal(x2r, np.array([[9, 5, 6, 7, 8], [4, 0, 1, 2, 3]]))
x2r = np.roll(x2, (1, 0), axis=(0, 1))
assert_equal(x2r, np.array([[5, 6, 7, 8, 9], [0, 1, 2, 3, 4]]))
x2r = np.roll(x2, (-1, 0), axis=(0, 1))
assert_equal(x2r, np.array([[5, 6, 7, 8, 9], [0, 1, 2, 3, 4]]))
x2r = np.roll(x2, (0, 1), axis=(0, 1))
assert_equal(x2r, np.array([[4, 0, 1, 2, 3], [9, 5, 6, 7, 8]]))
x2r = np.roll(x2, (0, -1), axis=(0, 1))
assert_equal(x2r, np.array([[1, 2, 3, 4, 0], [6, 7, 8, 9, 5]]))
x2r = np.roll(x2, (1, 1), axis=(0, 1))
assert_equal(x2r, np.array([[9, 5, 6, 7, 8], [4, 0, 1, 2, 3]]))
x2r = np.roll(x2, (-1, -1), axis=(0, 1))
assert_equal(x2r, np.array([[6, 7, 8, 9, 5], [1, 2, 3, 4, 0]]))
# Roll the same axis multiple times.
x2r = np.roll(x2, 1, axis=(0, 0))
assert_equal(x2r, np.array([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]))
x2r = np.roll(x2, 1, axis=(1, 1))
assert_equal(x2r, np.array([[3, 4, 0, 1, 2], [8, 9, 5, 6, 7]]))
# Roll more than one turn in either direction.
x2r = np.roll(x2, 6, axis=1)
assert_equal(x2r, np.array([[4, 0, 1, 2, 3], [9, 5, 6, 7, 8]]))
x2r = np.roll(x2, -4, axis=1)
assert_equal(x2r, np.array([[4, 0, 1, 2, 3], [9, 5, 6, 7, 8]]))
def test_roll_empty(self):
x = np.array([])
assert_equal(np.roll(x, 1), np.array([]))
class TestRollaxis:
# expected shape indexed by (axis, start) for array of
# shape (1, 2, 3, 4)
tgtshape = {(0, 0): (1, 2, 3, 4), (0, 1): (1, 2, 3, 4),
(0, 2): (2, 1, 3, 4), (0, 3): (2, 3, 1, 4),
(0, 4): (2, 3, 4, 1),
(1, 0): (2, 1, 3, 4), (1, 1): (1, 2, 3, 4),
(1, 2): (1, 2, 3, 4), (1, 3): (1, 3, 2, 4),
(1, 4): (1, 3, 4, 2),
(2, 0): (3, 1, 2, 4), (2, 1): (1, 3, 2, 4),
(2, 2): (1, 2, 3, 4), (2, 3): (1, 2, 3, 4),
(2, 4): (1, 2, 4, 3),
(3, 0): (4, 1, 2, 3), (3, 1): (1, 4, 2, 3),
(3, 2): (1, 2, 4, 3), (3, 3): (1, 2, 3, 4),
(3, 4): (1, 2, 3, 4)}
def test_exceptions(self):
a = np.arange(1*2*3*4).reshape(1, 2, 3, 4)
assert_raises(np.AxisError, np.rollaxis, a, -5, 0)
assert_raises(np.AxisError, np.rollaxis, a, 0, -5)
assert_raises(np.AxisError, np.rollaxis, a, 4, 0)
assert_raises(np.AxisError, np.rollaxis, a, 0, 5)
def test_results(self):
a = np.arange(1*2*3*4).reshape(1, 2, 3, 4).copy()
aind = np.indices(a.shape)
assert_(a.flags['OWNDATA'])
for (i, j) in self.tgtshape:
# positive axis, positive start
res = np.rollaxis(a, axis=i, start=j)
i0, i1, i2, i3 = aind[np.array(res.shape) - 1]
assert_(np.all(res[i0, i1, i2, i3] == a))
assert_(res.shape == self.tgtshape[(i, j)], str((i,j)))
assert_(not res.flags['OWNDATA'])
# negative axis, positive start
ip = i + 1
res = np.rollaxis(a, axis=-ip, start=j)
i0, i1, i2, i3 = aind[np.array(res.shape) - 1]
assert_(np.all(res[i0, i1, i2, i3] == a))
assert_(res.shape == self.tgtshape[(4 - ip, j)])
assert_(not res.flags['OWNDATA'])
# positive axis, negative start
jp = j + 1 if j < 4 else j
res = np.rollaxis(a, axis=i, start=-jp)
i0, i1, i2, i3 = aind[np.array(res.shape) - 1]
assert_(np.all(res[i0, i1, i2, i3] == a))
assert_(res.shape == self.tgtshape[(i, 4 - jp)])
assert_(not res.flags['OWNDATA'])
# negative axis, negative start
ip = i + 1
jp = j + 1 if j < 4 else j
res = np.rollaxis(a, axis=-ip, start=-jp)
i0, i1, i2, i3 = aind[np.array(res.shape) - 1]
assert_(np.all(res[i0, i1, i2, i3] == a))
assert_(res.shape == self.tgtshape[(4 - ip, 4 - jp)])
assert_(not res.flags['OWNDATA'])
class TestMoveaxis:
def test_move_to_end(self):
x = np.random.randn(5, 6, 7)
for source, expected in [(0, (6, 7, 5)),
(1, (5, 7, 6)),
(2, (5, 6, 7)),
(-1, (5, 6, 7))]:
actual = np.moveaxis(x, source, -1).shape
assert_(actual, expected)
def test_move_new_position(self):
x = np.random.randn(1, 2, 3, 4)
for source, destination, expected in [
(0, 1, (2, 1, 3, 4)),
(1, 2, (1, 3, 2, 4)),
(1, -1, (1, 3, 4, 2)),
]:
actual = np.moveaxis(x, source, destination).shape
assert_(actual, expected)
def test_preserve_order(self):
x = np.zeros((1, 2, 3, 4))
for source, destination in [
(0, 0),
(3, -1),
(-1, 3),
([0, -1], [0, -1]),
([2, 0], [2, 0]),
(range(4), range(4)),
]:
actual = np.moveaxis(x, source, destination).shape
assert_(actual, (1, 2, 3, 4))
def test_move_multiples(self):
x = np.zeros((0, 1, 2, 3))
for source, destination, expected in [
([0, 1], [2, 3], (2, 3, 0, 1)),
([2, 3], [0, 1], (2, 3, 0, 1)),
([0, 1, 2], [2, 3, 0], (2, 3, 0, 1)),
([3, 0], [1, 0], (0, 3, 1, 2)),
([0, 3], [0, 1], (0, 3, 1, 2)),
]:
actual = np.moveaxis(x, source, destination).shape
assert_(actual, expected)
def test_errors(self):
x = np.random.randn(1, 2, 3)
assert_raises_regex(np.AxisError, 'source.*out of bounds',
np.moveaxis, x, 3, 0)
assert_raises_regex(np.AxisError, 'source.*out of bounds',
np.moveaxis, x, -4, 0)
assert_raises_regex(np.AxisError, 'destination.*out of bounds',
np.moveaxis, x, 0, 5)
assert_raises_regex(ValueError, 'repeated axis in `source`',
np.moveaxis, x, [0, 0], [0, 1])
assert_raises_regex(ValueError, 'repeated axis in `destination`',
np.moveaxis, x, [0, 1], [1, 1])
assert_raises_regex(ValueError, 'must have the same number',
np.moveaxis, x, 0, [0, 1])
assert_raises_regex(ValueError, 'must have the same number',
np.moveaxis, x, [0, 1], [0])
def test_array_likes(self):
x = np.ma.zeros((1, 2, 3))
result = np.moveaxis(x, 0, 0)
assert_(x.shape, result.shape)
assert_(isinstance(result, np.ma.MaskedArray))
x = [1, 2, 3]
result = np.moveaxis(x, 0, 0)
assert_(x, list(result))
assert_(isinstance(result, np.ndarray))
class TestCross:
def test_2x2(self):
u = [1, 2]
v = [3, 4]
z = -2
cp = np.cross(u, v)
assert_equal(cp, z)
cp = np.cross(v, u)
assert_equal(cp, -z)
def test_2x3(self):
u = [1, 2]
v = [3, 4, 5]
z = np.array([10, -5, -2])
cp = np.cross(u, v)
assert_equal(cp, z)
cp = np.cross(v, u)
assert_equal(cp, -z)
def test_3x3(self):
u = [1, 2, 3]
v = [4, 5, 6]
z = np.array([-3, 6, -3])
cp = np.cross(u, v)
assert_equal(cp, z)
cp = np.cross(v, u)
assert_equal(cp, -z)
def test_broadcasting(self):
# Ticket #2624 (Trac #2032)
u = np.tile([1, 2], (11, 1))
v = np.tile([3, 4], (11, 1))
z = -2
assert_equal(np.cross(u, v), z)
assert_equal(np.cross(v, u), -z)
assert_equal(np.cross(u, u), 0)
u = np.tile([1, 2], (11, 1)).T
v = np.tile([3, 4, 5], (11, 1))
z = np.tile([10, -5, -2], (11, 1))
assert_equal(np.cross(u, v, axisa=0), z)
assert_equal(np.cross(v, u.T), -z)
assert_equal(np.cross(v, v), 0)
u = np.tile([1, 2, 3], (11, 1)).T
v = np.tile([3, 4], (11, 1)).T
z = np.tile([-12, 9, -2], (11, 1))
assert_equal(np.cross(u, v, axisa=0, axisb=0), z)
assert_equal(np.cross(v.T, u.T), -z)
assert_equal(np.cross(u.T, u.T), 0)
u = np.tile([1, 2, 3], (5, 1))
v = np.tile([4, 5, 6], (5, 1)).T
z = np.tile([-3, 6, -3], (5, 1))
assert_equal(np.cross(u, v, axisb=0), z)
assert_equal(np.cross(v.T, u), -z)
assert_equal(np.cross(u, u), 0)
def test_broadcasting_shapes(self):
u = np.ones((2, 1, 3))
v = np.ones((5, 3))
assert_equal(np.cross(u, v).shape, (2, 5, 3))
u = np.ones((10, 3, 5))
v = np.ones((2, 5))
assert_equal(np.cross(u, v, axisa=1, axisb=0).shape, (10, 5, 3))
assert_raises(np.AxisError, np.cross, u, v, axisa=1, axisb=2)
assert_raises(np.AxisError, np.cross, u, v, axisa=3, axisb=0)
u = np.ones((10, 3, 5, 7))
v = np.ones((5, 7, 2))
assert_equal(np.cross(u, v, axisa=1, axisc=2).shape, (10, 5, 3, 7))
assert_raises(np.AxisError, np.cross, u, v, axisa=-5, axisb=2)
assert_raises(np.AxisError, np.cross, u, v, axisa=1, axisb=-4)
# gh-5885
u = np.ones((3, 4, 2))
for axisc in range(-2, 2):
assert_equal(np.cross(u, u, axisc=axisc).shape, (3, 4))
def test_outer_out_param():
arr1 = np.ones((5,))
arr2 = np.ones((2,))
arr3 = np.linspace(-2, 2, 5)
out1 = np.ndarray(shape=(5,5))
out2 = np.ndarray(shape=(2, 5))
res1 = np.outer(arr1, arr3, out1)
assert_equal(res1, out1)
assert_equal(np.outer(arr2, arr3, out2), out2)
class TestIndices:
def test_simple(self):
[x, y] = np.indices((4, 3))
assert_array_equal(x, np.array([[0, 0, 0],
[1, 1, 1],
[2, 2, 2],
[3, 3, 3]]))
assert_array_equal(y, np.array([[0, 1, 2],
[0, 1, 2],
[0, 1, 2],
[0, 1, 2]]))
def test_single_input(self):
[x] = np.indices((4,))
assert_array_equal(x, np.array([0, 1, 2, 3]))
[x] = np.indices((4,), sparse=True)
assert_array_equal(x, np.array([0, 1, 2, 3]))
def test_scalar_input(self):
assert_array_equal([], np.indices(()))
assert_array_equal([], np.indices((), sparse=True))
assert_array_equal([[]], np.indices((0,)))
assert_array_equal([[]], np.indices((0,), sparse=True))
def test_sparse(self):
[x, y] = np.indices((4,3), sparse=True)
assert_array_equal(x, np.array([[0], [1], [2], [3]]))
assert_array_equal(y, np.array([[0, 1, 2]]))
@pytest.mark.parametrize("dtype", [np.int32, np.int64, np.float32, np.float64])
@pytest.mark.parametrize("dims", [(), (0,), (4, 3)])
def test_return_type(self, dtype, dims):
inds = np.indices(dims, dtype=dtype)
assert_(inds.dtype == dtype)
for arr in np.indices(dims, dtype=dtype, sparse=True):
assert_(arr.dtype == dtype)
class TestRequire:
flag_names = ['C', 'C_CONTIGUOUS', 'CONTIGUOUS',
'F', 'F_CONTIGUOUS', 'FORTRAN',
'A', 'ALIGNED',
'W', 'WRITEABLE',
'O', 'OWNDATA']
def generate_all_false(self, dtype):
arr = np.zeros((2, 2), [('junk', 'i1'), ('a', dtype)])
arr.setflags(write=False)
a = arr['a']
assert_(not a.flags['C'])
assert_(not a.flags['F'])
assert_(not a.flags['O'])
assert_(not a.flags['W'])
assert_(not a.flags['A'])
return a
def set_and_check_flag(self, flag, dtype, arr):
if dtype is None:
dtype = arr.dtype
b = np.require(arr, dtype, [flag])
assert_(b.flags[flag])
assert_(b.dtype == dtype)
# a further call to np.require ought to return the same array
# unless OWNDATA is specified.
c = np.require(b, None, [flag])
if flag[0] != 'O':
assert_(c is b)
else:
assert_(c.flags[flag])
def test_require_each(self):
id = ['f8', 'i4']
fd = [None, 'f8', 'c16']
for idtype, fdtype, flag in itertools.product(id, fd, self.flag_names):
a = self.generate_all_false(idtype)
self.set_and_check_flag(flag, fdtype, a)
def test_unknown_requirement(self):
a = self.generate_all_false('f8')
assert_raises(KeyError, np.require, a, None, 'Q')
def test_non_array_input(self):
a = np.require([1, 2, 3, 4], 'i4', ['C', 'A', 'O'])
assert_(a.flags['O'])
assert_(a.flags['C'])
assert_(a.flags['A'])
assert_(a.dtype == 'i4')
assert_equal(a, [1, 2, 3, 4])
def test_C_and_F_simul(self):
a = self.generate_all_false('f8')
assert_raises(ValueError, np.require, a, None, ['C', 'F'])
def test_ensure_array(self):
class ArraySubclass(np.ndarray):
pass
a = ArraySubclass((2, 2))
b = np.require(a, None, ['E'])
assert_(type(b) is np.ndarray)
def test_preserve_subtype(self):
class ArraySubclass(np.ndarray):
pass
for flag in self.flag_names:
a = ArraySubclass((2, 2))
self.set_and_check_flag(flag, None, a)
class TestBroadcast:
def test_broadcast_in_args(self):
# gh-5881
arrs = [np.empty((6, 7)), np.empty((5, 6, 1)), np.empty((7,)),
np.empty((5, 1, 7))]
mits = [np.broadcast(*arrs),
np.broadcast(np.broadcast(*arrs[:0]), np.broadcast(*arrs[0:])),
np.broadcast(np.broadcast(*arrs[:1]), np.broadcast(*arrs[1:])),
np.broadcast(np.broadcast(*arrs[:2]), np.broadcast(*arrs[2:])),
np.broadcast(arrs[0], np.broadcast(*arrs[1:-1]), arrs[-1])]
for mit in mits:
assert_equal(mit.shape, (5, 6, 7))
assert_equal(mit.ndim, 3)
assert_equal(mit.nd, 3)
assert_equal(mit.numiter, 4)
for a, ia in zip(arrs, mit.iters):
assert_(a is ia.base)
def test_broadcast_single_arg(self):
# gh-6899
arrs = [np.empty((5, 6, 7))]
mit = np.broadcast(*arrs)
assert_equal(mit.shape, (5, 6, 7))
assert_equal(mit.ndim, 3)
assert_equal(mit.nd, 3)
assert_equal(mit.numiter, 1)
assert_(arrs[0] is mit.iters[0].base)
def test_number_of_arguments(self):
arr = np.empty((5,))
for j in range(35):
arrs = [arr] * j
if j > 32:
assert_raises(ValueError, np.broadcast, *arrs)
else:
mit = np.broadcast(*arrs)
assert_equal(mit.numiter, j)
def test_broadcast_error_kwargs(self):
#gh-13455
arrs = [np.empty((5, 6, 7))]
mit = np.broadcast(*arrs)
mit2 = np.broadcast(*arrs, **{})
assert_equal(mit.shape, mit2.shape)
assert_equal(mit.ndim, mit2.ndim)
assert_equal(mit.nd, mit2.nd)
assert_equal(mit.numiter, mit2.numiter)
assert_(mit.iters[0].base is mit2.iters[0].base)
assert_raises(ValueError, np.broadcast, 1, **{'x': 1})
def test_shape_mismatch_error_message(self):
with pytest.raises(ValueError, match=r"arg 0 with shape \(1, 3\) and "
r"arg 2 with shape \(2,\)"):
np.broadcast([[1, 2, 3]], [[4], [5]], [6, 7])
class TestKeepdims:
class sub_array(np.ndarray):
def sum(self, axis=None, dtype=None, out=None):
return np.ndarray.sum(self, axis, dtype, out, keepdims=True)
def test_raise(self):
sub_class = self.sub_array
x = np.arange(30).view(sub_class)
assert_raises(TypeError, np.sum, x, keepdims=True)
class TestTensordot:
def test_zero_dimension(self):
# Test resolution to issue #5663
a = np.ndarray((3,0))
b = np.ndarray((0,4))
td = np.tensordot(a, b, (1, 0))
assert_array_equal(td, np.dot(a, b))
assert_array_equal(td, np.einsum('ij,jk', a, b))
def test_zero_dimensional(self):
# gh-12130
arr_0d = np.array(1)
ret = np.tensordot(arr_0d, arr_0d, ([], [])) # contracting no axes is well defined
assert_array_equal(ret, arr_0d)
| 136,761 | Python | 37.042281 | 95 | 0.522291 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_einsum.py | import itertools
import pytest
import numpy as np
from numpy.testing import (
assert_, assert_equal, assert_array_equal, assert_almost_equal,
assert_raises, suppress_warnings, assert_raises_regex, assert_allclose
)
# Setup for optimize einsum
chars = 'abcdefghij'
sizes = np.array([2, 3, 4, 5, 4, 3, 2, 6, 5, 4, 3])
global_size_dict = dict(zip(chars, sizes))
class TestEinsum:
def test_einsum_errors(self):
for do_opt in [True, False]:
# Need enough arguments
assert_raises(ValueError, np.einsum, optimize=do_opt)
assert_raises(ValueError, np.einsum, "", optimize=do_opt)
# subscripts must be a string
assert_raises(TypeError, np.einsum, 0, 0, optimize=do_opt)
# out parameter must be an array
assert_raises(TypeError, np.einsum, "", 0, out='test',
optimize=do_opt)
# order parameter must be a valid order
assert_raises(ValueError, np.einsum, "", 0, order='W',
optimize=do_opt)
# casting parameter must be a valid casting
assert_raises(ValueError, np.einsum, "", 0, casting='blah',
optimize=do_opt)
# dtype parameter must be a valid dtype
assert_raises(TypeError, np.einsum, "", 0, dtype='bad_data_type',
optimize=do_opt)
# other keyword arguments are rejected
assert_raises(TypeError, np.einsum, "", 0, bad_arg=0,
optimize=do_opt)
# issue 4528 revealed a segfault with this call
assert_raises(TypeError, np.einsum, *(None,)*63, optimize=do_opt)
# number of operands must match count in subscripts string
assert_raises(ValueError, np.einsum, "", 0, 0, optimize=do_opt)
assert_raises(ValueError, np.einsum, ",", 0, [0], [0],
optimize=do_opt)
assert_raises(ValueError, np.einsum, ",", [0], optimize=do_opt)
# can't have more subscripts than dimensions in the operand
assert_raises(ValueError, np.einsum, "i", 0, optimize=do_opt)
assert_raises(ValueError, np.einsum, "ij", [0, 0], optimize=do_opt)
assert_raises(ValueError, np.einsum, "...i", 0, optimize=do_opt)
assert_raises(ValueError, np.einsum, "i...j", [0, 0], optimize=do_opt)
assert_raises(ValueError, np.einsum, "i...", 0, optimize=do_opt)
assert_raises(ValueError, np.einsum, "ij...", [0, 0], optimize=do_opt)
# invalid ellipsis
assert_raises(ValueError, np.einsum, "i..", [0, 0], optimize=do_opt)
assert_raises(ValueError, np.einsum, ".i...", [0, 0], optimize=do_opt)
assert_raises(ValueError, np.einsum, "j->..j", [0, 0], optimize=do_opt)
assert_raises(ValueError, np.einsum, "j->.j...", [0, 0], optimize=do_opt)
# invalid subscript character
assert_raises(ValueError, np.einsum, "i%...", [0, 0], optimize=do_opt)
assert_raises(ValueError, np.einsum, "...j$", [0, 0], optimize=do_opt)
assert_raises(ValueError, np.einsum, "i->&", [0, 0], optimize=do_opt)
# output subscripts must appear in input
assert_raises(ValueError, np.einsum, "i->ij", [0, 0], optimize=do_opt)
# output subscripts may only be specified once
assert_raises(ValueError, np.einsum, "ij->jij", [[0, 0], [0, 0]],
optimize=do_opt)
# dimensions much match when being collapsed
assert_raises(ValueError, np.einsum, "ii",
np.arange(6).reshape(2, 3), optimize=do_opt)
assert_raises(ValueError, np.einsum, "ii->i",
np.arange(6).reshape(2, 3), optimize=do_opt)
# broadcasting to new dimensions must be enabled explicitly
assert_raises(ValueError, np.einsum, "i", np.arange(6).reshape(2, 3),
optimize=do_opt)
assert_raises(ValueError, np.einsum, "i->i", [[0, 1], [0, 1]],
out=np.arange(4).reshape(2, 2), optimize=do_opt)
with assert_raises_regex(ValueError, "'b'"):
# gh-11221 - 'c' erroneously appeared in the error message
a = np.ones((3, 3, 4, 5, 6))
b = np.ones((3, 4, 5))
np.einsum('aabcb,abc', a, b)
# Check order kwarg, asanyarray allows 1d to pass through
assert_raises(ValueError, np.einsum, "i->i", np.arange(6).reshape(-1, 1),
optimize=do_opt, order='d')
def test_einsum_views(self):
# pass-through
for do_opt in [True, False]:
a = np.arange(6)
a.shape = (2, 3)
b = np.einsum("...", a, optimize=do_opt)
assert_(b.base is a)
b = np.einsum(a, [Ellipsis], optimize=do_opt)
assert_(b.base is a)
b = np.einsum("ij", a, optimize=do_opt)
assert_(b.base is a)
assert_equal(b, a)
b = np.einsum(a, [0, 1], optimize=do_opt)
assert_(b.base is a)
assert_equal(b, a)
# output is writeable whenever input is writeable
b = np.einsum("...", a, optimize=do_opt)
assert_(b.flags['WRITEABLE'])
a.flags['WRITEABLE'] = False
b = np.einsum("...", a, optimize=do_opt)
assert_(not b.flags['WRITEABLE'])
# transpose
a = np.arange(6)
a.shape = (2, 3)
b = np.einsum("ji", a, optimize=do_opt)
assert_(b.base is a)
assert_equal(b, a.T)
b = np.einsum(a, [1, 0], optimize=do_opt)
assert_(b.base is a)
assert_equal(b, a.T)
# diagonal
a = np.arange(9)
a.shape = (3, 3)
b = np.einsum("ii->i", a, optimize=do_opt)
assert_(b.base is a)
assert_equal(b, [a[i, i] for i in range(3)])
b = np.einsum(a, [0, 0], [0], optimize=do_opt)
assert_(b.base is a)
assert_equal(b, [a[i, i] for i in range(3)])
# diagonal with various ways of broadcasting an additional dimension
a = np.arange(27)
a.shape = (3, 3, 3)
b = np.einsum("...ii->...i", a, optimize=do_opt)
assert_(b.base is a)
assert_equal(b, [[x[i, i] for i in range(3)] for x in a])
b = np.einsum(a, [Ellipsis, 0, 0], [Ellipsis, 0], optimize=do_opt)
assert_(b.base is a)
assert_equal(b, [[x[i, i] for i in range(3)] for x in a])
b = np.einsum("ii...->...i", a, optimize=do_opt)
assert_(b.base is a)
assert_equal(b, [[x[i, i] for i in range(3)]
for x in a.transpose(2, 0, 1)])
b = np.einsum(a, [0, 0, Ellipsis], [Ellipsis, 0], optimize=do_opt)
assert_(b.base is a)
assert_equal(b, [[x[i, i] for i in range(3)]
for x in a.transpose(2, 0, 1)])
b = np.einsum("...ii->i...", a, optimize=do_opt)
assert_(b.base is a)
assert_equal(b, [a[:, i, i] for i in range(3)])
b = np.einsum(a, [Ellipsis, 0, 0], [0, Ellipsis], optimize=do_opt)
assert_(b.base is a)
assert_equal(b, [a[:, i, i] for i in range(3)])
b = np.einsum("jii->ij", a, optimize=do_opt)
assert_(b.base is a)
assert_equal(b, [a[:, i, i] for i in range(3)])
b = np.einsum(a, [1, 0, 0], [0, 1], optimize=do_opt)
assert_(b.base is a)
assert_equal(b, [a[:, i, i] for i in range(3)])
b = np.einsum("ii...->i...", a, optimize=do_opt)
assert_(b.base is a)
assert_equal(b, [a.transpose(2, 0, 1)[:, i, i] for i in range(3)])
b = np.einsum(a, [0, 0, Ellipsis], [0, Ellipsis], optimize=do_opt)
assert_(b.base is a)
assert_equal(b, [a.transpose(2, 0, 1)[:, i, i] for i in range(3)])
b = np.einsum("i...i->i...", a, optimize=do_opt)
assert_(b.base is a)
assert_equal(b, [a.transpose(1, 0, 2)[:, i, i] for i in range(3)])
b = np.einsum(a, [0, Ellipsis, 0], [0, Ellipsis], optimize=do_opt)
assert_(b.base is a)
assert_equal(b, [a.transpose(1, 0, 2)[:, i, i] for i in range(3)])
b = np.einsum("i...i->...i", a, optimize=do_opt)
assert_(b.base is a)
assert_equal(b, [[x[i, i] for i in range(3)]
for x in a.transpose(1, 0, 2)])
b = np.einsum(a, [0, Ellipsis, 0], [Ellipsis, 0], optimize=do_opt)
assert_(b.base is a)
assert_equal(b, [[x[i, i] for i in range(3)]
for x in a.transpose(1, 0, 2)])
# triple diagonal
a = np.arange(27)
a.shape = (3, 3, 3)
b = np.einsum("iii->i", a, optimize=do_opt)
assert_(b.base is a)
assert_equal(b, [a[i, i, i] for i in range(3)])
b = np.einsum(a, [0, 0, 0], [0], optimize=do_opt)
assert_(b.base is a)
assert_equal(b, [a[i, i, i] for i in range(3)])
# swap axes
a = np.arange(24)
a.shape = (2, 3, 4)
b = np.einsum("ijk->jik", a, optimize=do_opt)
assert_(b.base is a)
assert_equal(b, a.swapaxes(0, 1))
b = np.einsum(a, [0, 1, 2], [1, 0, 2], optimize=do_opt)
assert_(b.base is a)
assert_equal(b, a.swapaxes(0, 1))
def check_einsum_sums(self, dtype, do_opt=False):
# Check various sums. Does many sizes to exercise unrolled loops.
# sum(a, axis=-1)
for n in range(1, 17):
a = np.arange(n, dtype=dtype)
assert_equal(np.einsum("i->", a, optimize=do_opt),
np.sum(a, axis=-1).astype(dtype))
assert_equal(np.einsum(a, [0], [], optimize=do_opt),
np.sum(a, axis=-1).astype(dtype))
for n in range(1, 17):
a = np.arange(2*3*n, dtype=dtype).reshape(2, 3, n)
assert_equal(np.einsum("...i->...", a, optimize=do_opt),
np.sum(a, axis=-1).astype(dtype))
assert_equal(np.einsum(a, [Ellipsis, 0], [Ellipsis], optimize=do_opt),
np.sum(a, axis=-1).astype(dtype))
# sum(a, axis=0)
for n in range(1, 17):
a = np.arange(2*n, dtype=dtype).reshape(2, n)
assert_equal(np.einsum("i...->...", a, optimize=do_opt),
np.sum(a, axis=0).astype(dtype))
assert_equal(np.einsum(a, [0, Ellipsis], [Ellipsis], optimize=do_opt),
np.sum(a, axis=0).astype(dtype))
for n in range(1, 17):
a = np.arange(2*3*n, dtype=dtype).reshape(2, 3, n)
assert_equal(np.einsum("i...->...", a, optimize=do_opt),
np.sum(a, axis=0).astype(dtype))
assert_equal(np.einsum(a, [0, Ellipsis], [Ellipsis], optimize=do_opt),
np.sum(a, axis=0).astype(dtype))
# trace(a)
for n in range(1, 17):
a = np.arange(n*n, dtype=dtype).reshape(n, n)
assert_equal(np.einsum("ii", a, optimize=do_opt),
np.trace(a).astype(dtype))
assert_equal(np.einsum(a, [0, 0], optimize=do_opt),
np.trace(a).astype(dtype))
# gh-15961: should accept numpy int64 type in subscript list
np_array = np.asarray([0, 0])
assert_equal(np.einsum(a, np_array, optimize=do_opt),
np.trace(a).astype(dtype))
assert_equal(np.einsum(a, list(np_array), optimize=do_opt),
np.trace(a).astype(dtype))
# multiply(a, b)
assert_equal(np.einsum("..., ...", 3, 4), 12) # scalar case
for n in range(1, 17):
a = np.arange(3 * n, dtype=dtype).reshape(3, n)
b = np.arange(2 * 3 * n, dtype=dtype).reshape(2, 3, n)
assert_equal(np.einsum("..., ...", a, b, optimize=do_opt),
np.multiply(a, b))
assert_equal(np.einsum(a, [Ellipsis], b, [Ellipsis], optimize=do_opt),
np.multiply(a, b))
# inner(a,b)
for n in range(1, 17):
a = np.arange(2 * 3 * n, dtype=dtype).reshape(2, 3, n)
b = np.arange(n, dtype=dtype)
assert_equal(np.einsum("...i, ...i", a, b, optimize=do_opt), np.inner(a, b))
assert_equal(np.einsum(a, [Ellipsis, 0], b, [Ellipsis, 0], optimize=do_opt),
np.inner(a, b))
for n in range(1, 11):
a = np.arange(n * 3 * 2, dtype=dtype).reshape(n, 3, 2)
b = np.arange(n, dtype=dtype)
assert_equal(np.einsum("i..., i...", a, b, optimize=do_opt),
np.inner(a.T, b.T).T)
assert_equal(np.einsum(a, [0, Ellipsis], b, [0, Ellipsis], optimize=do_opt),
np.inner(a.T, b.T).T)
# outer(a,b)
for n in range(1, 17):
a = np.arange(3, dtype=dtype)+1
b = np.arange(n, dtype=dtype)+1
assert_equal(np.einsum("i,j", a, b, optimize=do_opt),
np.outer(a, b))
assert_equal(np.einsum(a, [0], b, [1], optimize=do_opt),
np.outer(a, b))
# Suppress the complex warnings for the 'as f8' tests
with suppress_warnings() as sup:
sup.filter(np.ComplexWarning)
# matvec(a,b) / a.dot(b) where a is matrix, b is vector
for n in range(1, 17):
a = np.arange(4*n, dtype=dtype).reshape(4, n)
b = np.arange(n, dtype=dtype)
assert_equal(np.einsum("ij, j", a, b, optimize=do_opt),
np.dot(a, b))
assert_equal(np.einsum(a, [0, 1], b, [1], optimize=do_opt),
np.dot(a, b))
c = np.arange(4, dtype=dtype)
np.einsum("ij,j", a, b, out=c,
dtype='f8', casting='unsafe', optimize=do_opt)
assert_equal(c,
np.dot(a.astype('f8'),
b.astype('f8')).astype(dtype))
c[...] = 0
np.einsum(a, [0, 1], b, [1], out=c,
dtype='f8', casting='unsafe', optimize=do_opt)
assert_equal(c,
np.dot(a.astype('f8'),
b.astype('f8')).astype(dtype))
for n in range(1, 17):
a = np.arange(4*n, dtype=dtype).reshape(4, n)
b = np.arange(n, dtype=dtype)
assert_equal(np.einsum("ji,j", a.T, b.T, optimize=do_opt),
np.dot(b.T, a.T))
assert_equal(np.einsum(a.T, [1, 0], b.T, [1], optimize=do_opt),
np.dot(b.T, a.T))
c = np.arange(4, dtype=dtype)
np.einsum("ji,j", a.T, b.T, out=c,
dtype='f8', casting='unsafe', optimize=do_opt)
assert_equal(c,
np.dot(b.T.astype('f8'),
a.T.astype('f8')).astype(dtype))
c[...] = 0
np.einsum(a.T, [1, 0], b.T, [1], out=c,
dtype='f8', casting='unsafe', optimize=do_opt)
assert_equal(c,
np.dot(b.T.astype('f8'),
a.T.astype('f8')).astype(dtype))
# matmat(a,b) / a.dot(b) where a is matrix, b is matrix
for n in range(1, 17):
if n < 8 or dtype != 'f2':
a = np.arange(4*n, dtype=dtype).reshape(4, n)
b = np.arange(n*6, dtype=dtype).reshape(n, 6)
assert_equal(np.einsum("ij,jk", a, b, optimize=do_opt),
np.dot(a, b))
assert_equal(np.einsum(a, [0, 1], b, [1, 2], optimize=do_opt),
np.dot(a, b))
for n in range(1, 17):
a = np.arange(4*n, dtype=dtype).reshape(4, n)
b = np.arange(n*6, dtype=dtype).reshape(n, 6)
c = np.arange(24, dtype=dtype).reshape(4, 6)
np.einsum("ij,jk", a, b, out=c, dtype='f8', casting='unsafe',
optimize=do_opt)
assert_equal(c,
np.dot(a.astype('f8'),
b.astype('f8')).astype(dtype))
c[...] = 0
np.einsum(a, [0, 1], b, [1, 2], out=c,
dtype='f8', casting='unsafe', optimize=do_opt)
assert_equal(c,
np.dot(a.astype('f8'),
b.astype('f8')).astype(dtype))
# matrix triple product (note this is not currently an efficient
# way to multiply 3 matrices)
a = np.arange(12, dtype=dtype).reshape(3, 4)
b = np.arange(20, dtype=dtype).reshape(4, 5)
c = np.arange(30, dtype=dtype).reshape(5, 6)
if dtype != 'f2':
assert_equal(np.einsum("ij,jk,kl", a, b, c, optimize=do_opt),
a.dot(b).dot(c))
assert_equal(np.einsum(a, [0, 1], b, [1, 2], c, [2, 3],
optimize=do_opt), a.dot(b).dot(c))
d = np.arange(18, dtype=dtype).reshape(3, 6)
np.einsum("ij,jk,kl", a, b, c, out=d,
dtype='f8', casting='unsafe', optimize=do_opt)
tgt = a.astype('f8').dot(b.astype('f8'))
tgt = tgt.dot(c.astype('f8')).astype(dtype)
assert_equal(d, tgt)
d[...] = 0
np.einsum(a, [0, 1], b, [1, 2], c, [2, 3], out=d,
dtype='f8', casting='unsafe', optimize=do_opt)
tgt = a.astype('f8').dot(b.astype('f8'))
tgt = tgt.dot(c.astype('f8')).astype(dtype)
assert_equal(d, tgt)
# tensordot(a, b)
if np.dtype(dtype) != np.dtype('f2'):
a = np.arange(60, dtype=dtype).reshape(3, 4, 5)
b = np.arange(24, dtype=dtype).reshape(4, 3, 2)
assert_equal(np.einsum("ijk, jil -> kl", a, b),
np.tensordot(a, b, axes=([1, 0], [0, 1])))
assert_equal(np.einsum(a, [0, 1, 2], b, [1, 0, 3], [2, 3]),
np.tensordot(a, b, axes=([1, 0], [0, 1])))
c = np.arange(10, dtype=dtype).reshape(5, 2)
np.einsum("ijk,jil->kl", a, b, out=c,
dtype='f8', casting='unsafe', optimize=do_opt)
assert_equal(c, np.tensordot(a.astype('f8'), b.astype('f8'),
axes=([1, 0], [0, 1])).astype(dtype))
c[...] = 0
np.einsum(a, [0, 1, 2], b, [1, 0, 3], [2, 3], out=c,
dtype='f8', casting='unsafe', optimize=do_opt)
assert_equal(c, np.tensordot(a.astype('f8'), b.astype('f8'),
axes=([1, 0], [0, 1])).astype(dtype))
# logical_and(logical_and(a!=0, b!=0), c!=0)
a = np.array([1, 3, -2, 0, 12, 13, 0, 1], dtype=dtype)
b = np.array([0, 3.5, 0., -2, 0, 1, 3, 12], dtype=dtype)
c = np.array([True, True, False, True, True, False, True, True])
assert_equal(np.einsum("i,i,i->i", a, b, c,
dtype='?', casting='unsafe', optimize=do_opt),
np.logical_and(np.logical_and(a != 0, b != 0), c != 0))
assert_equal(np.einsum(a, [0], b, [0], c, [0], [0],
dtype='?', casting='unsafe'),
np.logical_and(np.logical_and(a != 0, b != 0), c != 0))
a = np.arange(9, dtype=dtype)
assert_equal(np.einsum(",i->", 3, a), 3*np.sum(a))
assert_equal(np.einsum(3, [], a, [0], []), 3*np.sum(a))
assert_equal(np.einsum("i,->", a, 3), 3*np.sum(a))
assert_equal(np.einsum(a, [0], 3, [], []), 3*np.sum(a))
# Various stride0, contiguous, and SSE aligned variants
for n in range(1, 25):
a = np.arange(n, dtype=dtype)
if np.dtype(dtype).itemsize > 1:
assert_equal(np.einsum("...,...", a, a, optimize=do_opt),
np.multiply(a, a))
assert_equal(np.einsum("i,i", a, a, optimize=do_opt), np.dot(a, a))
assert_equal(np.einsum("i,->i", a, 2, optimize=do_opt), 2*a)
assert_equal(np.einsum(",i->i", 2, a, optimize=do_opt), 2*a)
assert_equal(np.einsum("i,->", a, 2, optimize=do_opt), 2*np.sum(a))
assert_equal(np.einsum(",i->", 2, a, optimize=do_opt), 2*np.sum(a))
assert_equal(np.einsum("...,...", a[1:], a[:-1], optimize=do_opt),
np.multiply(a[1:], a[:-1]))
assert_equal(np.einsum("i,i", a[1:], a[:-1], optimize=do_opt),
np.dot(a[1:], a[:-1]))
assert_equal(np.einsum("i,->i", a[1:], 2, optimize=do_opt), 2*a[1:])
assert_equal(np.einsum(",i->i", 2, a[1:], optimize=do_opt), 2*a[1:])
assert_equal(np.einsum("i,->", a[1:], 2, optimize=do_opt),
2*np.sum(a[1:]))
assert_equal(np.einsum(",i->", 2, a[1:], optimize=do_opt),
2*np.sum(a[1:]))
# An object array, summed as the data type
a = np.arange(9, dtype=object)
b = np.einsum("i->", a, dtype=dtype, casting='unsafe')
assert_equal(b, np.sum(a))
assert_equal(b.dtype, np.dtype(dtype))
b = np.einsum(a, [0], [], dtype=dtype, casting='unsafe')
assert_equal(b, np.sum(a))
assert_equal(b.dtype, np.dtype(dtype))
# A case which was failing (ticket #1885)
p = np.arange(2) + 1
q = np.arange(4).reshape(2, 2) + 3
r = np.arange(4).reshape(2, 2) + 7
assert_equal(np.einsum('z,mz,zm->', p, q, r), 253)
# singleton dimensions broadcast (gh-10343)
p = np.ones((10,2))
q = np.ones((1,2))
assert_array_equal(np.einsum('ij,ij->j', p, q, optimize=True),
np.einsum('ij,ij->j', p, q, optimize=False))
assert_array_equal(np.einsum('ij,ij->j', p, q, optimize=True),
[10.] * 2)
# a blas-compatible contraction broadcasting case which was failing
# for optimize=True (ticket #10930)
x = np.array([2., 3.])
y = np.array([4.])
assert_array_equal(np.einsum("i, i", x, y, optimize=False), 20.)
assert_array_equal(np.einsum("i, i", x, y, optimize=True), 20.)
# all-ones array was bypassing bug (ticket #10930)
p = np.ones((1, 5)) / 2
q = np.ones((5, 5)) / 2
for optimize in (True, False):
assert_array_equal(np.einsum("...ij,...jk->...ik", p, p,
optimize=optimize),
np.einsum("...ij,...jk->...ik", p, q,
optimize=optimize))
assert_array_equal(np.einsum("...ij,...jk->...ik", p, q,
optimize=optimize),
np.full((1, 5), 1.25))
# Cases which were failing (gh-10899)
x = np.eye(2, dtype=dtype)
y = np.ones(2, dtype=dtype)
assert_array_equal(np.einsum("ji,i->", x, y, optimize=optimize),
[2.]) # contig_contig_outstride0_two
assert_array_equal(np.einsum("i,ij->", y, x, optimize=optimize),
[2.]) # stride0_contig_outstride0_two
assert_array_equal(np.einsum("ij,i->", x, y, optimize=optimize),
[2.]) # contig_stride0_outstride0_two
def test_einsum_sums_int8(self):
self.check_einsum_sums('i1')
def test_einsum_sums_uint8(self):
self.check_einsum_sums('u1')
def test_einsum_sums_int16(self):
self.check_einsum_sums('i2')
def test_einsum_sums_uint16(self):
self.check_einsum_sums('u2')
def test_einsum_sums_int32(self):
self.check_einsum_sums('i4')
self.check_einsum_sums('i4', True)
def test_einsum_sums_uint32(self):
self.check_einsum_sums('u4')
self.check_einsum_sums('u4', True)
def test_einsum_sums_int64(self):
self.check_einsum_sums('i8')
def test_einsum_sums_uint64(self):
self.check_einsum_sums('u8')
def test_einsum_sums_float16(self):
self.check_einsum_sums('f2')
def test_einsum_sums_float32(self):
self.check_einsum_sums('f4')
def test_einsum_sums_float64(self):
self.check_einsum_sums('f8')
self.check_einsum_sums('f8', True)
def test_einsum_sums_longdouble(self):
self.check_einsum_sums(np.longdouble)
def test_einsum_sums_cfloat64(self):
self.check_einsum_sums('c8')
self.check_einsum_sums('c8', True)
def test_einsum_sums_cfloat128(self):
self.check_einsum_sums('c16')
def test_einsum_sums_clongdouble(self):
self.check_einsum_sums(np.clongdouble)
def test_einsum_misc(self):
# This call used to crash because of a bug in
# PyArray_AssignZero
a = np.ones((1, 2))
b = np.ones((2, 2, 1))
assert_equal(np.einsum('ij...,j...->i...', a, b), [[[2], [2]]])
assert_equal(np.einsum('ij...,j...->i...', a, b, optimize=True), [[[2], [2]]])
# Regression test for issue #10369 (test unicode inputs with Python 2)
assert_equal(np.einsum(u'ij...,j...->i...', a, b), [[[2], [2]]])
assert_equal(np.einsum('...i,...i', [1, 2, 3], [2, 3, 4]), 20)
assert_equal(np.einsum(u'...i,...i', [1, 2, 3], [2, 3, 4]), 20)
assert_equal(np.einsum('...i,...i', [1, 2, 3], [2, 3, 4],
optimize=u'greedy'), 20)
# The iterator had an issue with buffering this reduction
a = np.ones((5, 12, 4, 2, 3), np.int64)
b = np.ones((5, 12, 11), np.int64)
assert_equal(np.einsum('ijklm,ijn,ijn->', a, b, b),
np.einsum('ijklm,ijn->', a, b))
assert_equal(np.einsum('ijklm,ijn,ijn->', a, b, b, optimize=True),
np.einsum('ijklm,ijn->', a, b, optimize=True))
# Issue #2027, was a problem in the contiguous 3-argument
# inner loop implementation
a = np.arange(1, 3)
b = np.arange(1, 5).reshape(2, 2)
c = np.arange(1, 9).reshape(4, 2)
assert_equal(np.einsum('x,yx,zx->xzy', a, b, c),
[[[1, 3], [3, 9], [5, 15], [7, 21]],
[[8, 16], [16, 32], [24, 48], [32, 64]]])
assert_equal(np.einsum('x,yx,zx->xzy', a, b, c, optimize=True),
[[[1, 3], [3, 9], [5, 15], [7, 21]],
[[8, 16], [16, 32], [24, 48], [32, 64]]])
# Ensure explicitly setting out=None does not cause an error
# see issue gh-15776 and issue gh-15256
assert_equal(np.einsum('i,j', [1], [2], out=None), [[2]])
def test_subscript_range(self):
# Issue #7741, make sure that all letters of Latin alphabet (both uppercase & lowercase) can be used
# when creating a subscript from arrays
a = np.ones((2, 3))
b = np.ones((3, 4))
np.einsum(a, [0, 20], b, [20, 2], [0, 2], optimize=False)
np.einsum(a, [0, 27], b, [27, 2], [0, 2], optimize=False)
np.einsum(a, [0, 51], b, [51, 2], [0, 2], optimize=False)
assert_raises(ValueError, lambda: np.einsum(a, [0, 52], b, [52, 2], [0, 2], optimize=False))
assert_raises(ValueError, lambda: np.einsum(a, [-1, 5], b, [5, 2], [-1, 2], optimize=False))
def test_einsum_broadcast(self):
# Issue #2455 change in handling ellipsis
# remove the 'middle broadcast' error
# only use the 'RIGHT' iteration in prepare_op_axes
# adds auto broadcast on left where it belongs
# broadcast on right has to be explicit
# We need to test the optimized parsing as well
A = np.arange(2 * 3 * 4).reshape(2, 3, 4)
B = np.arange(3)
ref = np.einsum('ijk,j->ijk', A, B, optimize=False)
for opt in [True, False]:
assert_equal(np.einsum('ij...,j...->ij...', A, B, optimize=opt), ref)
assert_equal(np.einsum('ij...,...j->ij...', A, B, optimize=opt), ref)
assert_equal(np.einsum('ij...,j->ij...', A, B, optimize=opt), ref) # used to raise error
A = np.arange(12).reshape((4, 3))
B = np.arange(6).reshape((3, 2))
ref = np.einsum('ik,kj->ij', A, B, optimize=False)
for opt in [True, False]:
assert_equal(np.einsum('ik...,k...->i...', A, B, optimize=opt), ref)
assert_equal(np.einsum('ik...,...kj->i...j', A, B, optimize=opt), ref)
assert_equal(np.einsum('...k,kj', A, B, optimize=opt), ref) # used to raise error
assert_equal(np.einsum('ik,k...->i...', A, B, optimize=opt), ref) # used to raise error
dims = [2, 3, 4, 5]
a = np.arange(np.prod(dims)).reshape(dims)
v = np.arange(dims[2])
ref = np.einsum('ijkl,k->ijl', a, v, optimize=False)
for opt in [True, False]:
assert_equal(np.einsum('ijkl,k', a, v, optimize=opt), ref)
assert_equal(np.einsum('...kl,k', a, v, optimize=opt), ref) # used to raise error
assert_equal(np.einsum('...kl,k...', a, v, optimize=opt), ref)
J, K, M = 160, 160, 120
A = np.arange(J * K * M).reshape(1, 1, 1, J, K, M)
B = np.arange(J * K * M * 3).reshape(J, K, M, 3)
ref = np.einsum('...lmn,...lmno->...o', A, B, optimize=False)
for opt in [True, False]:
assert_equal(np.einsum('...lmn,lmno->...o', A, B,
optimize=opt), ref) # used to raise error
def test_einsum_fixedstridebug(self):
# Issue #4485 obscure einsum bug
# This case revealed a bug in nditer where it reported a stride
# as 'fixed' (0) when it was in fact not fixed during processing
# (0 or 4). The reason for the bug was that the check for a fixed
# stride was using the information from the 2D inner loop reuse
# to restrict the iteration dimensions it had to validate to be
# the same, but that 2D inner loop reuse logic is only triggered
# during the buffer copying step, and hence it was invalid to
# rely on those values. The fix is to check all the dimensions
# of the stride in question, which in the test case reveals that
# the stride is not fixed.
#
# NOTE: This test is triggered by the fact that the default buffersize,
# used by einsum, is 8192, and 3*2731 = 8193, is larger than that
# and results in a mismatch between the buffering and the
# striding for operand A.
A = np.arange(2 * 3).reshape(2, 3).astype(np.float32)
B = np.arange(2 * 3 * 2731).reshape(2, 3, 2731).astype(np.int16)
es = np.einsum('cl, cpx->lpx', A, B)
tp = np.tensordot(A, B, axes=(0, 0))
assert_equal(es, tp)
# The following is the original test case from the bug report,
# made repeatable by changing random arrays to aranges.
A = np.arange(3 * 3).reshape(3, 3).astype(np.float64)
B = np.arange(3 * 3 * 64 * 64).reshape(3, 3, 64, 64).astype(np.float32)
es = np.einsum('cl, cpxy->lpxy', A, B)
tp = np.tensordot(A, B, axes=(0, 0))
assert_equal(es, tp)
def test_einsum_fixed_collapsingbug(self):
# Issue #5147.
# The bug only occurred when output argument of einssum was used.
x = np.random.normal(0, 1, (5, 5, 5, 5))
y1 = np.zeros((5, 5))
np.einsum('aabb->ab', x, out=y1)
idx = np.arange(5)
y2 = x[idx[:, None], idx[:, None], idx, idx]
assert_equal(y1, y2)
def test_einsum_failed_on_p9_and_s390x(self):
# Issues gh-14692 and gh-12689
# Bug with signed vs unsigned char errored on power9 and s390x Linux
tensor = np.random.random_sample((10, 10, 10, 10))
x = np.einsum('ijij->', tensor)
y = tensor.trace(axis1=0, axis2=2).trace()
assert_allclose(x, y)
def test_einsum_all_contig_non_contig_output(self):
# Issue gh-5907, tests that the all contiguous special case
# actually checks the contiguity of the output
x = np.ones((5, 5))
out = np.ones(10)[::2]
correct_base = np.ones(10)
correct_base[::2] = 5
# Always worked (inner iteration is done with 0-stride):
np.einsum('mi,mi,mi->m', x, x, x, out=out)
assert_array_equal(out.base, correct_base)
# Example 1:
out = np.ones(10)[::2]
np.einsum('im,im,im->m', x, x, x, out=out)
assert_array_equal(out.base, correct_base)
# Example 2, buffering causes x to be contiguous but
# special cases do not catch the operation before:
out = np.ones((2, 2, 2))[..., 0]
correct_base = np.ones((2, 2, 2))
correct_base[..., 0] = 2
x = np.ones((2, 2), np.float32)
np.einsum('ij,jk->ik', x, x, out=out)
assert_array_equal(out.base, correct_base)
@pytest.mark.parametrize("dtype",
np.typecodes["AllFloat"] + np.typecodes["AllInteger"])
def test_different_paths(self, dtype):
# Test originally added to cover broken float16 path: gh-20305
# Likely most are covered elsewhere, at least partially.
dtype = np.dtype(dtype)
# Simple test, designed to excersize most specialized code paths,
# note the +0.5 for floats. This makes sure we use a float value
# where the results must be exact.
arr = (np.arange(7) + 0.5).astype(dtype)
scalar = np.array(2, dtype=dtype)
# contig -> scalar:
res = np.einsum('i->', arr)
assert res == arr.sum()
# contig, contig -> contig:
res = np.einsum('i,i->i', arr, arr)
assert_array_equal(res, arr * arr)
# noncontig, noncontig -> contig:
res = np.einsum('i,i->i', arr.repeat(2)[::2], arr.repeat(2)[::2])
assert_array_equal(res, arr * arr)
# contig + contig -> scalar
assert np.einsum('i,i->', arr, arr) == (arr * arr).sum()
# contig + scalar -> contig (with out)
out = np.ones(7, dtype=dtype)
res = np.einsum('i,->i', arr, dtype.type(2), out=out)
assert_array_equal(res, arr * dtype.type(2))
# scalar + contig -> contig (with out)
res = np.einsum(',i->i', scalar, arr)
assert_array_equal(res, arr * dtype.type(2))
# scalar + contig -> scalar
res = np.einsum(',i->', scalar, arr)
# Use einsum to compare to not have difference due to sum round-offs:
assert res == np.einsum('i->', scalar * arr)
# contig + scalar -> scalar
res = np.einsum('i,->', arr, scalar)
# Use einsum to compare to not have difference due to sum round-offs:
assert res == np.einsum('i->', scalar * arr)
# contig + contig + contig -> scalar
arr = np.array([0.5, 0.5, 0.25, 4.5, 3.], dtype=dtype)
res = np.einsum('i,i,i->', arr, arr, arr)
assert_array_equal(res, (arr * arr * arr).sum())
# four arrays:
res = np.einsum('i,i,i,i->', arr, arr, arr, arr)
assert_array_equal(res, (arr * arr * arr * arr).sum())
def test_small_boolean_arrays(self):
# See gh-5946.
# Use array of True embedded in False.
a = np.zeros((16, 1, 1), dtype=np.bool_)[:2]
a[...] = True
out = np.zeros((16, 1, 1), dtype=np.bool_)[:2]
tgt = np.ones((2, 1, 1), dtype=np.bool_)
res = np.einsum('...ij,...jk->...ik', a, a, out=out)
assert_equal(res, tgt)
def test_out_is_res(self):
a = np.arange(9).reshape(3, 3)
res = np.einsum('...ij,...jk->...ik', a, a, out=a)
assert res is a
def optimize_compare(self, subscripts, operands=None):
# Tests all paths of the optimization function against
# conventional einsum
if operands is None:
args = [subscripts]
terms = subscripts.split('->')[0].split(',')
for term in terms:
dims = [global_size_dict[x] for x in term]
args.append(np.random.rand(*dims))
else:
args = [subscripts] + operands
noopt = np.einsum(*args, optimize=False)
opt = np.einsum(*args, optimize='greedy')
assert_almost_equal(opt, noopt)
opt = np.einsum(*args, optimize='optimal')
assert_almost_equal(opt, noopt)
def test_hadamard_like_products(self):
# Hadamard outer products
self.optimize_compare('a,ab,abc->abc')
self.optimize_compare('a,b,ab->ab')
def test_index_transformations(self):
# Simple index transformation cases
self.optimize_compare('ea,fb,gc,hd,abcd->efgh')
self.optimize_compare('ea,fb,abcd,gc,hd->efgh')
self.optimize_compare('abcd,ea,fb,gc,hd->efgh')
def test_complex(self):
# Long test cases
self.optimize_compare('acdf,jbje,gihb,hfac,gfac,gifabc,hfac')
self.optimize_compare('acdf,jbje,gihb,hfac,gfac,gifabc,hfac')
self.optimize_compare('cd,bdhe,aidb,hgca,gc,hgibcd,hgac')
self.optimize_compare('abhe,hidj,jgba,hiab,gab')
self.optimize_compare('bde,cdh,agdb,hica,ibd,hgicd,hiac')
self.optimize_compare('chd,bde,agbc,hiad,hgc,hgi,hiad')
self.optimize_compare('chd,bde,agbc,hiad,bdi,cgh,agdb')
self.optimize_compare('bdhe,acad,hiab,agac,hibd')
def test_collapse(self):
# Inner products
self.optimize_compare('ab,ab,c->')
self.optimize_compare('ab,ab,c->c')
self.optimize_compare('ab,ab,cd,cd->')
self.optimize_compare('ab,ab,cd,cd->ac')
self.optimize_compare('ab,ab,cd,cd->cd')
self.optimize_compare('ab,ab,cd,cd,ef,ef->')
def test_expand(self):
# Outer products
self.optimize_compare('ab,cd,ef->abcdef')
self.optimize_compare('ab,cd,ef->acdf')
self.optimize_compare('ab,cd,de->abcde')
self.optimize_compare('ab,cd,de->be')
self.optimize_compare('ab,bcd,cd->abcd')
self.optimize_compare('ab,bcd,cd->abd')
def test_edge_cases(self):
# Difficult edge cases for optimization
self.optimize_compare('eb,cb,fb->cef')
self.optimize_compare('dd,fb,be,cdb->cef')
self.optimize_compare('bca,cdb,dbf,afc->')
self.optimize_compare('dcc,fce,ea,dbf->ab')
self.optimize_compare('fdf,cdd,ccd,afe->ae')
self.optimize_compare('abcd,ad')
self.optimize_compare('ed,fcd,ff,bcf->be')
self.optimize_compare('baa,dcf,af,cde->be')
self.optimize_compare('bd,db,eac->ace')
self.optimize_compare('fff,fae,bef,def->abd')
self.optimize_compare('efc,dbc,acf,fd->abe')
self.optimize_compare('ba,ac,da->bcd')
def test_inner_product(self):
# Inner products
self.optimize_compare('ab,ab')
self.optimize_compare('ab,ba')
self.optimize_compare('abc,abc')
self.optimize_compare('abc,bac')
self.optimize_compare('abc,cba')
def test_random_cases(self):
# Randomly built test cases
self.optimize_compare('aab,fa,df,ecc->bde')
self.optimize_compare('ecb,fef,bad,ed->ac')
self.optimize_compare('bcf,bbb,fbf,fc->')
self.optimize_compare('bb,ff,be->e')
self.optimize_compare('bcb,bb,fc,fff->')
self.optimize_compare('fbb,dfd,fc,fc->')
self.optimize_compare('afd,ba,cc,dc->bf')
self.optimize_compare('adb,bc,fa,cfc->d')
self.optimize_compare('bbd,bda,fc,db->acf')
self.optimize_compare('dba,ead,cad->bce')
self.optimize_compare('aef,fbc,dca->bde')
def test_combined_views_mapping(self):
# gh-10792
a = np.arange(9).reshape(1, 1, 3, 1, 3)
b = np.einsum('bbcdc->d', a)
assert_equal(b, [12])
def test_broadcasting_dot_cases(self):
# Ensures broadcasting cases are not mistaken for GEMM
a = np.random.rand(1, 5, 4)
b = np.random.rand(4, 6)
c = np.random.rand(5, 6)
d = np.random.rand(10)
self.optimize_compare('ijk,kl,jl', operands=[a, b, c])
self.optimize_compare('ijk,kl,jl,i->i', operands=[a, b, c, d])
e = np.random.rand(1, 1, 5, 4)
f = np.random.rand(7, 7)
self.optimize_compare('abjk,kl,jl', operands=[e, b, c])
self.optimize_compare('abjk,kl,jl,ab->ab', operands=[e, b, c, f])
# Edge case found in gh-11308
g = np.arange(64).reshape(2, 4, 8)
self.optimize_compare('obk,ijk->ioj', operands=[g, g])
def test_output_order(self):
# Ensure output order is respected for optimize cases, the below
# conraction should yield a reshaped tensor view
# gh-16415
a = np.ones((2, 3, 5), order='F')
b = np.ones((4, 3), order='F')
for opt in [True, False]:
tmp = np.einsum('...ft,mf->...mt', a, b, order='a', optimize=opt)
assert_(tmp.flags.f_contiguous)
tmp = np.einsum('...ft,mf->...mt', a, b, order='f', optimize=opt)
assert_(tmp.flags.f_contiguous)
tmp = np.einsum('...ft,mf->...mt', a, b, order='c', optimize=opt)
assert_(tmp.flags.c_contiguous)
tmp = np.einsum('...ft,mf->...mt', a, b, order='k', optimize=opt)
assert_(tmp.flags.c_contiguous is False)
assert_(tmp.flags.f_contiguous is False)
tmp = np.einsum('...ft,mf->...mt', a, b, optimize=opt)
assert_(tmp.flags.c_contiguous is False)
assert_(tmp.flags.f_contiguous is False)
c = np.ones((4, 3), order='C')
for opt in [True, False]:
tmp = np.einsum('...ft,mf->...mt', a, c, order='a', optimize=opt)
assert_(tmp.flags.c_contiguous)
d = np.ones((2, 3, 5), order='C')
for opt in [True, False]:
tmp = np.einsum('...ft,mf->...mt', d, c, order='a', optimize=opt)
assert_(tmp.flags.c_contiguous)
class TestEinsumPath:
def build_operands(self, string, size_dict=global_size_dict):
# Builds views based off initial operands
operands = [string]
terms = string.split('->')[0].split(',')
for term in terms:
dims = [size_dict[x] for x in term]
operands.append(np.random.rand(*dims))
return operands
def assert_path_equal(self, comp, benchmark):
# Checks if list of tuples are equivalent
ret = (len(comp) == len(benchmark))
assert_(ret)
for pos in range(len(comp) - 1):
ret &= isinstance(comp[pos + 1], tuple)
ret &= (comp[pos + 1] == benchmark[pos + 1])
assert_(ret)
def test_memory_contraints(self):
# Ensure memory constraints are satisfied
outer_test = self.build_operands('a,b,c->abc')
path, path_str = np.einsum_path(*outer_test, optimize=('greedy', 0))
self.assert_path_equal(path, ['einsum_path', (0, 1, 2)])
path, path_str = np.einsum_path(*outer_test, optimize=('optimal', 0))
self.assert_path_equal(path, ['einsum_path', (0, 1, 2)])
long_test = self.build_operands('acdf,jbje,gihb,hfac')
path, path_str = np.einsum_path(*long_test, optimize=('greedy', 0))
self.assert_path_equal(path, ['einsum_path', (0, 1, 2, 3)])
path, path_str = np.einsum_path(*long_test, optimize=('optimal', 0))
self.assert_path_equal(path, ['einsum_path', (0, 1, 2, 3)])
def test_long_paths(self):
# Long complex cases
# Long test 1
long_test1 = self.build_operands('acdf,jbje,gihb,hfac,gfac,gifabc,hfac')
path, path_str = np.einsum_path(*long_test1, optimize='greedy')
self.assert_path_equal(path, ['einsum_path',
(3, 6), (3, 4), (2, 4), (2, 3), (0, 2), (0, 1)])
path, path_str = np.einsum_path(*long_test1, optimize='optimal')
self.assert_path_equal(path, ['einsum_path',
(3, 6), (3, 4), (2, 4), (2, 3), (0, 2), (0, 1)])
# Long test 2
long_test2 = self.build_operands('chd,bde,agbc,hiad,bdi,cgh,agdb')
path, path_str = np.einsum_path(*long_test2, optimize='greedy')
self.assert_path_equal(path, ['einsum_path',
(3, 4), (0, 3), (3, 4), (1, 3), (1, 2), (0, 1)])
path, path_str = np.einsum_path(*long_test2, optimize='optimal')
self.assert_path_equal(path, ['einsum_path',
(0, 5), (1, 4), (3, 4), (1, 3), (1, 2), (0, 1)])
def test_edge_paths(self):
# Difficult edge cases
# Edge test1
edge_test1 = self.build_operands('eb,cb,fb->cef')
path, path_str = np.einsum_path(*edge_test1, optimize='greedy')
self.assert_path_equal(path, ['einsum_path', (0, 2), (0, 1)])
path, path_str = np.einsum_path(*edge_test1, optimize='optimal')
self.assert_path_equal(path, ['einsum_path', (0, 2), (0, 1)])
# Edge test2
edge_test2 = self.build_operands('dd,fb,be,cdb->cef')
path, path_str = np.einsum_path(*edge_test2, optimize='greedy')
self.assert_path_equal(path, ['einsum_path', (0, 3), (0, 1), (0, 1)])
path, path_str = np.einsum_path(*edge_test2, optimize='optimal')
self.assert_path_equal(path, ['einsum_path', (0, 3), (0, 1), (0, 1)])
# Edge test3
edge_test3 = self.build_operands('bca,cdb,dbf,afc->')
path, path_str = np.einsum_path(*edge_test3, optimize='greedy')
self.assert_path_equal(path, ['einsum_path', (1, 2), (0, 2), (0, 1)])
path, path_str = np.einsum_path(*edge_test3, optimize='optimal')
self.assert_path_equal(path, ['einsum_path', (1, 2), (0, 2), (0, 1)])
# Edge test4
edge_test4 = self.build_operands('dcc,fce,ea,dbf->ab')
path, path_str = np.einsum_path(*edge_test4, optimize='greedy')
self.assert_path_equal(path, ['einsum_path', (1, 2), (0, 1), (0, 1)])
path, path_str = np.einsum_path(*edge_test4, optimize='optimal')
self.assert_path_equal(path, ['einsum_path', (1, 2), (0, 2), (0, 1)])
# Edge test5
edge_test4 = self.build_operands('a,ac,ab,ad,cd,bd,bc->',
size_dict={"a": 20, "b": 20, "c": 20, "d": 20})
path, path_str = np.einsum_path(*edge_test4, optimize='greedy')
self.assert_path_equal(path, ['einsum_path', (0, 1), (0, 1, 2, 3, 4, 5)])
path, path_str = np.einsum_path(*edge_test4, optimize='optimal')
self.assert_path_equal(path, ['einsum_path', (0, 1), (0, 1, 2, 3, 4, 5)])
def test_path_type_input(self):
# Test explicit path handling
path_test = self.build_operands('dcc,fce,ea,dbf->ab')
path, path_str = np.einsum_path(*path_test, optimize=False)
self.assert_path_equal(path, ['einsum_path', (0, 1, 2, 3)])
path, path_str = np.einsum_path(*path_test, optimize=True)
self.assert_path_equal(path, ['einsum_path', (1, 2), (0, 1), (0, 1)])
exp_path = ['einsum_path', (0, 2), (0, 2), (0, 1)]
path, path_str = np.einsum_path(*path_test, optimize=exp_path)
self.assert_path_equal(path, exp_path)
# Double check einsum works on the input path
noopt = np.einsum(*path_test, optimize=False)
opt = np.einsum(*path_test, optimize=exp_path)
assert_almost_equal(noopt, opt)
def test_path_type_input_internal_trace(self):
#gh-20962
path_test = self.build_operands('cab,cdd->ab')
exp_path = ['einsum_path', (1,), (0, 1)]
path, path_str = np.einsum_path(*path_test, optimize=exp_path)
self.assert_path_equal(path, exp_path)
# Double check einsum works on the input path
noopt = np.einsum(*path_test, optimize=False)
opt = np.einsum(*path_test, optimize=exp_path)
assert_almost_equal(noopt, opt)
def test_path_type_input_invalid(self):
path_test = self.build_operands('ab,bc,cd,de->ae')
exp_path = ['einsum_path', (2, 3), (0, 1)]
assert_raises(RuntimeError, np.einsum, *path_test, optimize=exp_path)
assert_raises(
RuntimeError, np.einsum_path, *path_test, optimize=exp_path)
path_test = self.build_operands('a,a,a->a')
exp_path = ['einsum_path', (1,), (0, 1)]
assert_raises(RuntimeError, np.einsum, *path_test, optimize=exp_path)
assert_raises(
RuntimeError, np.einsum_path, *path_test, optimize=exp_path)
def test_spaces(self):
#gh-10794
arr = np.array([[1]])
for sp in itertools.product(['', ' '], repeat=4):
# no error for any spacing
np.einsum('{}...a{}->{}...a{}'.format(*sp), arr)
def test_overlap():
a = np.arange(9, dtype=int).reshape(3, 3)
b = np.arange(9, dtype=int).reshape(3, 3)
d = np.dot(a, b)
# sanity check
c = np.einsum('ij,jk->ik', a, b)
assert_equal(c, d)
#gh-10080, out overlaps one of the operands
c = np.einsum('ij,jk->ik', a, b, out=b)
assert_equal(c, d)
| 50,113 | Python | 43.153304 | 108 | 0.513559 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_deprecations.py | """
Tests related to deprecation warnings. Also a convenient place
to document how deprecations should eventually be turned into errors.
"""
import datetime
import operator
import warnings
import pytest
import tempfile
import re
import sys
import numpy as np
from numpy.testing import (
assert_raises, assert_warns, assert_, assert_array_equal, SkipTest,
KnownFailureException, break_cycles,
)
from numpy.core._multiarray_tests import fromstring_null_term_c_api
try:
import pytz
_has_pytz = True
except ImportError:
_has_pytz = False
class _DeprecationTestCase:
# Just as warning: warnings uses re.match, so the start of this message
# must match.
message = ''
warning_cls = DeprecationWarning
def setup_method(self):
self.warn_ctx = warnings.catch_warnings(record=True)
self.log = self.warn_ctx.__enter__()
# Do *not* ignore other DeprecationWarnings. Ignoring warnings
# can give very confusing results because of
# https://bugs.python.org/issue4180 and it is probably simplest to
# try to keep the tests cleanly giving only the right warning type.
# (While checking them set to "error" those are ignored anyway)
# We still have them show up, because otherwise they would be raised
warnings.filterwarnings("always", category=self.warning_cls)
warnings.filterwarnings("always", message=self.message,
category=self.warning_cls)
def teardown_method(self):
self.warn_ctx.__exit__()
def assert_deprecated(self, function, num=1, ignore_others=False,
function_fails=False,
exceptions=np._NoValue,
args=(), kwargs={}):
"""Test if DeprecationWarnings are given and raised.
This first checks if the function when called gives `num`
DeprecationWarnings, after that it tries to raise these
DeprecationWarnings and compares them with `exceptions`.
The exceptions can be different for cases where this code path
is simply not anticipated and the exception is replaced.
Parameters
----------
function : callable
The function to test
num : int
Number of DeprecationWarnings to expect. This should normally be 1.
ignore_others : bool
Whether warnings of the wrong type should be ignored (note that
the message is not checked)
function_fails : bool
If the function would normally fail, setting this will check for
warnings inside a try/except block.
exceptions : Exception or tuple of Exceptions
Exception to expect when turning the warnings into an error.
The default checks for DeprecationWarnings. If exceptions is
empty the function is expected to run successfully.
args : tuple
Arguments for `function`
kwargs : dict
Keyword arguments for `function`
"""
__tracebackhide__ = True # Hide traceback for py.test
# reset the log
self.log[:] = []
if exceptions is np._NoValue:
exceptions = (self.warning_cls,)
try:
function(*args, **kwargs)
except (Exception if function_fails else tuple()):
pass
# just in case, clear the registry
num_found = 0
for warning in self.log:
if warning.category is self.warning_cls:
num_found += 1
elif not ignore_others:
raise AssertionError(
"expected %s but got: %s" %
(self.warning_cls.__name__, warning.category))
if num is not None and num_found != num:
msg = "%i warnings found but %i expected." % (len(self.log), num)
lst = [str(w) for w in self.log]
raise AssertionError("\n".join([msg] + lst))
with warnings.catch_warnings():
warnings.filterwarnings("error", message=self.message,
category=self.warning_cls)
try:
function(*args, **kwargs)
if exceptions != tuple():
raise AssertionError(
"No error raised during function call")
except exceptions:
if exceptions == tuple():
raise AssertionError(
"Error raised during function call")
def assert_not_deprecated(self, function, args=(), kwargs={}):
"""Test that warnings are not raised.
This is just a shorthand for:
self.assert_deprecated(function, num=0, ignore_others=True,
exceptions=tuple(), args=args, kwargs=kwargs)
"""
self.assert_deprecated(function, num=0, ignore_others=True,
exceptions=tuple(), args=args, kwargs=kwargs)
class _VisibleDeprecationTestCase(_DeprecationTestCase):
warning_cls = np.VisibleDeprecationWarning
class TestComparisonDeprecations(_DeprecationTestCase):
"""This tests the deprecation, for non-element-wise comparison logic.
This used to mean that when an error occurred during element-wise comparison
(i.e. broadcasting) NotImplemented was returned, but also in the comparison
itself, False was given instead of the error.
Also test FutureWarning for the None comparison.
"""
message = "elementwise.* comparison failed; .*"
def test_normal_types(self):
for op in (operator.eq, operator.ne):
# Broadcasting errors:
self.assert_deprecated(op, args=(np.zeros(3), []))
a = np.zeros(3, dtype='i,i')
# (warning is issued a couple of times here)
self.assert_deprecated(op, args=(a, a[:-1]), num=None)
# ragged array comparison returns True/False
a = np.array([1, np.array([1,2,3])], dtype=object)
b = np.array([1, np.array([1,2,3])], dtype=object)
self.assert_deprecated(op, args=(a, b), num=None)
def test_string(self):
# For two string arrays, strings always raised the broadcasting error:
a = np.array(['a', 'b'])
b = np.array(['a', 'b', 'c'])
assert_raises(ValueError, lambda x, y: x == y, a, b)
# The empty list is not cast to string, and this used to pass due
# to dtype mismatch; now (2018-06-21) it correctly leads to a
# FutureWarning.
assert_warns(FutureWarning, lambda: a == [])
def test_void_dtype_equality_failures(self):
class NotArray:
def __array__(self):
raise TypeError
# Needed so Python 3 does not raise DeprecationWarning twice.
def __ne__(self, other):
return NotImplemented
self.assert_deprecated(lambda: np.arange(2) == NotArray())
self.assert_deprecated(lambda: np.arange(2) != NotArray())
def test_array_richcompare_legacy_weirdness(self):
# It doesn't really work to use assert_deprecated here, b/c part of
# the point of assert_deprecated is to check that when warnings are
# set to "error" mode then the error is propagated -- which is good!
# But here we are testing a bunch of code that is deprecated *because*
# it has the habit of swallowing up errors and converting them into
# different warnings. So assert_warns will have to be sufficient.
assert_warns(FutureWarning, lambda: np.arange(2) == "a")
assert_warns(FutureWarning, lambda: np.arange(2) != "a")
# No warning for scalar comparisons
with warnings.catch_warnings():
warnings.filterwarnings("error")
assert_(not (np.array(0) == "a"))
assert_(np.array(0) != "a")
assert_(not (np.int16(0) == "a"))
assert_(np.int16(0) != "a")
for arg1 in [np.asarray(0), np.int16(0)]:
struct = np.zeros(2, dtype="i4,i4")
for arg2 in [struct, "a"]:
for f in [operator.lt, operator.le, operator.gt, operator.ge]:
with warnings.catch_warnings() as l:
warnings.filterwarnings("always")
assert_raises(TypeError, f, arg1, arg2)
assert_(not l)
class TestDatetime64Timezone(_DeprecationTestCase):
"""Parsing of datetime64 with timezones deprecated in 1.11.0, because
datetime64 is now timezone naive rather than UTC only.
It will be quite a while before we can remove this, because, at the very
least, a lot of existing code uses the 'Z' modifier to avoid conversion
from local time to UTC, even if otherwise it handles time in a timezone
naive fashion.
"""
def test_string(self):
self.assert_deprecated(np.datetime64, args=('2000-01-01T00+01',))
self.assert_deprecated(np.datetime64, args=('2000-01-01T00Z',))
@pytest.mark.skipif(not _has_pytz,
reason="The pytz module is not available.")
def test_datetime(self):
tz = pytz.timezone('US/Eastern')
dt = datetime.datetime(2000, 1, 1, 0, 0, tzinfo=tz)
self.assert_deprecated(np.datetime64, args=(dt,))
class TestArrayDataAttributeAssignmentDeprecation(_DeprecationTestCase):
"""Assigning the 'data' attribute of an ndarray is unsafe as pointed
out in gh-7093. Eventually, such assignment should NOT be allowed, but
in the interests of maintaining backwards compatibility, only a Deprecation-
Warning will be raised instead for the time being to give developers time to
refactor relevant code.
"""
def test_data_attr_assignment(self):
a = np.arange(10)
b = np.linspace(0, 1, 10)
self.message = ("Assigning the 'data' attribute is an "
"inherently unsafe operation and will "
"be removed in the future.")
self.assert_deprecated(a.__setattr__, args=('data', b.data))
class TestBinaryReprInsufficientWidthParameterForRepresentation(_DeprecationTestCase):
"""
If a 'width' parameter is passed into ``binary_repr`` that is insufficient to
represent the number in base 2 (positive) or 2's complement (negative) form,
the function used to silently ignore the parameter and return a representation
using the minimal number of bits needed for the form in question. Such behavior
is now considered unsafe from a user perspective and will raise an error in the future.
"""
def test_insufficient_width_positive(self):
args = (10,)
kwargs = {'width': 2}
self.message = ("Insufficient bit width provided. This behavior "
"will raise an error in the future.")
self.assert_deprecated(np.binary_repr, args=args, kwargs=kwargs)
def test_insufficient_width_negative(self):
args = (-5,)
kwargs = {'width': 2}
self.message = ("Insufficient bit width provided. This behavior "
"will raise an error in the future.")
self.assert_deprecated(np.binary_repr, args=args, kwargs=kwargs)
class TestDTypeAttributeIsDTypeDeprecation(_DeprecationTestCase):
# Deprecated 2021-01-05, NumPy 1.21
message = r".*`.dtype` attribute"
def test_deprecation_dtype_attribute_is_dtype(self):
class dt:
dtype = "f8"
class vdt(np.void):
dtype = "f,f"
self.assert_deprecated(lambda: np.dtype(dt))
self.assert_deprecated(lambda: np.dtype(dt()))
self.assert_deprecated(lambda: np.dtype(vdt))
self.assert_deprecated(lambda: np.dtype(vdt(1)))
class TestTestDeprecated:
def test_assert_deprecated(self):
test_case_instance = _DeprecationTestCase()
test_case_instance.setup_method()
assert_raises(AssertionError,
test_case_instance.assert_deprecated,
lambda: None)
def foo():
warnings.warn("foo", category=DeprecationWarning, stacklevel=2)
test_case_instance.assert_deprecated(foo)
test_case_instance.teardown_method()
class TestNonNumericConjugate(_DeprecationTestCase):
"""
Deprecate no-op behavior of ndarray.conjugate on non-numeric dtypes,
which conflicts with the error behavior of np.conjugate.
"""
def test_conjugate(self):
for a in np.array(5), np.array(5j):
self.assert_not_deprecated(a.conjugate)
for a in (np.array('s'), np.array('2016', 'M'),
np.array((1, 2), [('a', int), ('b', int)])):
self.assert_deprecated(a.conjugate)
class TestNPY_CHAR(_DeprecationTestCase):
# 2017-05-03, 1.13.0
def test_npy_char_deprecation(self):
from numpy.core._multiarray_tests import npy_char_deprecation
self.assert_deprecated(npy_char_deprecation)
assert_(npy_char_deprecation() == 'S1')
class TestPyArray_AS1D(_DeprecationTestCase):
def test_npy_pyarrayas1d_deprecation(self):
from numpy.core._multiarray_tests import npy_pyarrayas1d_deprecation
assert_raises(NotImplementedError, npy_pyarrayas1d_deprecation)
class TestPyArray_AS2D(_DeprecationTestCase):
def test_npy_pyarrayas2d_deprecation(self):
from numpy.core._multiarray_tests import npy_pyarrayas2d_deprecation
assert_raises(NotImplementedError, npy_pyarrayas2d_deprecation)
class TestDatetimeEvent(_DeprecationTestCase):
# 2017-08-11, 1.14.0
def test_3_tuple(self):
for cls in (np.datetime64, np.timedelta64):
# two valid uses - (unit, num) and (unit, num, den, None)
self.assert_not_deprecated(cls, args=(1, ('ms', 2)))
self.assert_not_deprecated(cls, args=(1, ('ms', 2, 1, None)))
# trying to use the event argument, removed in 1.7.0, is deprecated
# it used to be a uint8
self.assert_deprecated(cls, args=(1, ('ms', 2, 'event')))
self.assert_deprecated(cls, args=(1, ('ms', 2, 63)))
self.assert_deprecated(cls, args=(1, ('ms', 2, 1, 'event')))
self.assert_deprecated(cls, args=(1, ('ms', 2, 1, 63)))
class TestTruthTestingEmptyArrays(_DeprecationTestCase):
# 2017-09-25, 1.14.0
message = '.*truth value of an empty array is ambiguous.*'
def test_1d(self):
self.assert_deprecated(bool, args=(np.array([]),))
def test_2d(self):
self.assert_deprecated(bool, args=(np.zeros((1, 0)),))
self.assert_deprecated(bool, args=(np.zeros((0, 1)),))
self.assert_deprecated(bool, args=(np.zeros((0, 0)),))
class TestBincount(_DeprecationTestCase):
# 2017-06-01, 1.14.0
def test_bincount_minlength(self):
self.assert_deprecated(lambda: np.bincount([1, 2, 3], minlength=None))
class TestGeneratorSum(_DeprecationTestCase):
# 2018-02-25, 1.15.0
def test_generator_sum(self):
self.assert_deprecated(np.sum, args=((i for i in range(5)),))
class TestPositiveOnNonNumerical(_DeprecationTestCase):
# 2018-06-28, 1.16.0
def test_positive_on_non_number(self):
self.assert_deprecated(operator.pos, args=(np.array('foo'),))
class TestFromstring(_DeprecationTestCase):
# 2017-10-19, 1.14
def test_fromstring(self):
self.assert_deprecated(np.fromstring, args=('\x00'*80,))
class TestFromStringAndFileInvalidData(_DeprecationTestCase):
# 2019-06-08, 1.17.0
# Tests should be moved to real tests when deprecation is done.
message = "string or file could not be read to its end"
@pytest.mark.parametrize("invalid_str", [",invalid_data", "invalid_sep"])
def test_deprecate_unparsable_data_file(self, invalid_str):
x = np.array([1.51, 2, 3.51, 4], dtype=float)
with tempfile.TemporaryFile(mode="w") as f:
x.tofile(f, sep=',', format='%.2f')
f.write(invalid_str)
f.seek(0)
self.assert_deprecated(lambda: np.fromfile(f, sep=","))
f.seek(0)
self.assert_deprecated(lambda: np.fromfile(f, sep=",", count=5))
# Should not raise:
with warnings.catch_warnings():
warnings.simplefilter("error", DeprecationWarning)
f.seek(0)
res = np.fromfile(f, sep=",", count=4)
assert_array_equal(res, x)
@pytest.mark.parametrize("invalid_str", [",invalid_data", "invalid_sep"])
def test_deprecate_unparsable_string(self, invalid_str):
x = np.array([1.51, 2, 3.51, 4], dtype=float)
x_str = "1.51,2,3.51,4{}".format(invalid_str)
self.assert_deprecated(lambda: np.fromstring(x_str, sep=","))
self.assert_deprecated(lambda: np.fromstring(x_str, sep=",", count=5))
# The C-level API can use not fixed size, but 0 terminated strings,
# so test that as well:
bytestr = x_str.encode("ascii")
self.assert_deprecated(lambda: fromstring_null_term_c_api(bytestr))
with assert_warns(DeprecationWarning):
# this is slightly strange, in that fromstring leaves data
# potentially uninitialized (would be good to error when all is
# read, but count is larger then actual data maybe).
res = np.fromstring(x_str, sep=",", count=5)
assert_array_equal(res[:-1], x)
with warnings.catch_warnings():
warnings.simplefilter("error", DeprecationWarning)
# Should not raise:
res = np.fromstring(x_str, sep=",", count=4)
assert_array_equal(res, x)
class Test_GetSet_NumericOps(_DeprecationTestCase):
# 2018-09-20, 1.16.0
def test_get_numeric_ops(self):
from numpy.core._multiarray_tests import getset_numericops
self.assert_deprecated(getset_numericops, num=2)
# empty kwargs prevents any state actually changing which would break
# other tests.
self.assert_deprecated(np.set_numeric_ops, kwargs={})
assert_raises(ValueError, np.set_numeric_ops, add='abc')
class TestShape1Fields(_DeprecationTestCase):
warning_cls = FutureWarning
# 2019-05-20, 1.17.0
def test_shape_1_fields(self):
self.assert_deprecated(np.dtype, args=([('a', int, 1)],))
class TestNonZero(_DeprecationTestCase):
# 2019-05-26, 1.17.0
def test_zerod(self):
self.assert_deprecated(lambda: np.nonzero(np.array(0)))
self.assert_deprecated(lambda: np.nonzero(np.array(1)))
def test_deprecate_ragged_arrays():
# 2019-11-29 1.19.0
#
# NEP 34 deprecated automatic object dtype when creating ragged
# arrays. Also see the "ragged" tests in `test_multiarray`
#
# emits a VisibleDeprecationWarning
arg = [1, [2, 3]]
with assert_warns(np.VisibleDeprecationWarning):
np.array(arg)
class TestTooDeepDeprecation(_VisibleDeprecationTestCase):
# NumPy 1.20, 2020-05-08
# This is a bit similar to the above ragged array deprecation case.
message = re.escape("Creating an ndarray from nested sequences exceeding")
def test_deprecation(self):
nested = [1]
for i in range(np.MAXDIMS - 1):
nested = [nested]
self.assert_not_deprecated(np.array, args=(nested,))
self.assert_not_deprecated(np.array,
args=(nested,), kwargs=dict(dtype=object))
self.assert_deprecated(np.array, args=([nested],))
class TestToString(_DeprecationTestCase):
# 2020-03-06 1.19.0
message = re.escape("tostring() is deprecated. Use tobytes() instead.")
def test_tostring(self):
arr = np.array(list(b"test\xFF"), dtype=np.uint8)
self.assert_deprecated(arr.tostring)
def test_tostring_matches_tobytes(self):
arr = np.array(list(b"test\xFF"), dtype=np.uint8)
b = arr.tobytes()
with assert_warns(DeprecationWarning):
s = arr.tostring()
assert s == b
class TestDTypeCoercion(_DeprecationTestCase):
# 2020-02-06 1.19.0
message = "Converting .* to a dtype .*is deprecated"
deprecated_types = [
# The builtin scalar super types:
np.generic, np.flexible, np.number,
np.inexact, np.floating, np.complexfloating,
np.integer, np.unsignedinteger, np.signedinteger,
# character is a deprecated S1 special case:
np.character,
]
def test_dtype_coercion(self):
for scalar_type in self.deprecated_types:
self.assert_deprecated(np.dtype, args=(scalar_type,))
def test_array_construction(self):
for scalar_type in self.deprecated_types:
self.assert_deprecated(np.array, args=([], scalar_type,))
def test_not_deprecated(self):
# All specific types are not deprecated:
for group in np.sctypes.values():
for scalar_type in group:
self.assert_not_deprecated(np.dtype, args=(scalar_type,))
for scalar_type in [type, dict, list, tuple]:
# Typical python types are coerced to object currently:
self.assert_not_deprecated(np.dtype, args=(scalar_type,))
class BuiltInRoundComplexDType(_DeprecationTestCase):
# 2020-03-31 1.19.0
deprecated_types = [np.csingle, np.cdouble, np.clongdouble]
not_deprecated_types = [
np.int8, np.int16, np.int32, np.int64,
np.uint8, np.uint16, np.uint32, np.uint64,
np.float16, np.float32, np.float64,
]
def test_deprecated(self):
for scalar_type in self.deprecated_types:
scalar = scalar_type(0)
self.assert_deprecated(round, args=(scalar,))
self.assert_deprecated(round, args=(scalar, 0))
self.assert_deprecated(round, args=(scalar,), kwargs={'ndigits': 0})
def test_not_deprecated(self):
for scalar_type in self.not_deprecated_types:
scalar = scalar_type(0)
self.assert_not_deprecated(round, args=(scalar,))
self.assert_not_deprecated(round, args=(scalar, 0))
self.assert_not_deprecated(round, args=(scalar,), kwargs={'ndigits': 0})
class TestIncorrectAdvancedIndexWithEmptyResult(_DeprecationTestCase):
# 2020-05-27, NumPy 1.20.0
message = "Out of bound index found. This was previously ignored.*"
@pytest.mark.parametrize("index", [([3, 0],), ([0, 0], [3, 0])])
def test_empty_subspace(self, index):
# Test for both a single and two/multiple advanced indices. These
# This will raise an IndexError in the future.
arr = np.ones((2, 2, 0))
self.assert_deprecated(arr.__getitem__, args=(index,))
self.assert_deprecated(arr.__setitem__, args=(index, 0.))
# for this array, the subspace is only empty after applying the slice
arr2 = np.ones((2, 2, 1))
index2 = (slice(0, 0),) + index
self.assert_deprecated(arr2.__getitem__, args=(index2,))
self.assert_deprecated(arr2.__setitem__, args=(index2, 0.))
def test_empty_index_broadcast_not_deprecated(self):
arr = np.ones((2, 2, 2))
index = ([[3], [2]], []) # broadcast to an empty result.
self.assert_not_deprecated(arr.__getitem__, args=(index,))
self.assert_not_deprecated(arr.__setitem__,
args=(index, np.empty((2, 0, 2))))
class TestNonExactMatchDeprecation(_DeprecationTestCase):
# 2020-04-22
def test_non_exact_match(self):
arr = np.array([[3, 6, 6], [4, 5, 1]])
# misspelt mode check
self.assert_deprecated(lambda: np.ravel_multi_index(arr, (7, 6), mode='Cilp'))
# using completely different word with first character as R
self.assert_deprecated(lambda: np.searchsorted(arr[0], 4, side='Random'))
class TestDeprecatedGlobals(_DeprecationTestCase):
# 2020-06-06
def test_type_aliases(self):
# from builtins
self.assert_deprecated(lambda: np.bool(True))
self.assert_deprecated(lambda: np.int(1))
self.assert_deprecated(lambda: np.float(1))
self.assert_deprecated(lambda: np.complex(1))
self.assert_deprecated(lambda: np.object())
self.assert_deprecated(lambda: np.str('abc'))
# from np.compat
self.assert_deprecated(lambda: np.long(1))
self.assert_deprecated(lambda: np.unicode('abc'))
# from np.core.numerictypes
self.assert_deprecated(lambda: np.typeDict)
class TestMatrixInOuter(_DeprecationTestCase):
# 2020-05-13 NumPy 1.20.0
message = (r"add.outer\(\) was passed a numpy matrix as "
r"(first|second) argument.")
def test_deprecated(self):
arr = np.array([1, 2, 3])
m = np.array([1, 2, 3]).view(np.matrix)
self.assert_deprecated(np.add.outer, args=(m, m), num=2)
self.assert_deprecated(np.add.outer, args=(arr, m))
self.assert_deprecated(np.add.outer, args=(m, arr))
self.assert_not_deprecated(np.add.outer, args=(arr, arr))
class TestRaggedArray(_DeprecationTestCase):
# 2020-07-24, NumPy 1.20.0
message = "setting an array element with a sequence"
def test_deprecated(self):
arr = np.ones((1, 1))
# Deprecated if the array is a leave node:
self.assert_deprecated(lambda: np.array([arr, 0], dtype=np.float64))
self.assert_deprecated(lambda: np.array([0, arr], dtype=np.float64))
# And when it is an assignment into a lower dimensional subarray:
self.assert_deprecated(lambda: np.array([arr, [0]], dtype=np.float64))
self.assert_deprecated(lambda: np.array([[0], arr], dtype=np.float64))
class FlatteningConcatenateUnsafeCast(_DeprecationTestCase):
# NumPy 1.20, 2020-09-03
message = "concatenate with `axis=None` will use same-kind casting"
def test_deprecated(self):
self.assert_deprecated(np.concatenate,
args=(([0.], [1.]),),
kwargs=dict(axis=None, out=np.empty(2, dtype=np.int64)))
def test_not_deprecated(self):
self.assert_not_deprecated(np.concatenate,
args=(([0.], [1.]),),
kwargs={'axis': None, 'out': np.empty(2, dtype=np.int64),
'casting': "unsafe"})
with assert_raises(TypeError):
# Tests should notice if the deprecation warning is given first...
np.concatenate(([0.], [1.]), out=np.empty(2, dtype=np.int64),
casting="same_kind")
class TestDeprecateSubarrayDTypeDuringArrayCoercion(_DeprecationTestCase):
warning_cls = FutureWarning
message = "(creating|casting) an array (with|to) a subarray dtype"
def test_deprecated_array(self):
# Arrays are more complex, since they "broadcast" on success:
arr = np.array([1, 2])
self.assert_deprecated(lambda: arr.astype("(2)i,"))
with pytest.warns(FutureWarning):
res = arr.astype("(2)i,")
assert_array_equal(res, [[1, 2], [1, 2]])
self.assert_deprecated(lambda: np.array(arr, dtype="(2)i,"))
with pytest.warns(FutureWarning):
res = np.array(arr, dtype="(2)i,")
assert_array_equal(res, [[1, 2], [1, 2]])
with pytest.warns(FutureWarning):
res = np.array([[(1,), (2,)], arr], dtype="(2)i,")
assert_array_equal(res, [[[1, 1], [2, 2]], [[1, 2], [1, 2]]])
def test_deprecated_and_error(self):
# These error paths do not give a warning, but will succeed in the
# future.
arr = np.arange(5 * 2).reshape(5, 2)
def check():
with pytest.raises(ValueError):
arr.astype("(2,2)f")
self.assert_deprecated(check)
def check():
with pytest.raises(ValueError):
np.array(arr, dtype="(2,2)f")
self.assert_deprecated(check)
class TestFutureWarningArrayLikeNotIterable(_DeprecationTestCase):
# Deprecated 2020-12-09, NumPy 1.20
warning_cls = FutureWarning
message = "The input object of type.*but not a sequence"
@pytest.mark.parametrize("protocol",
["__array__", "__array_interface__", "__array_struct__"])
def test_deprecated(self, protocol):
"""Test that these objects give a warning since they are not 0-D,
not coerced at the top level `np.array(obj)`, but nested, and do
*not* define the sequence protocol.
NOTE: Tests for the versions including __len__ and __getitem__ exist
in `test_array_coercion.py` and they can be modified or amended
when this deprecation expired.
"""
blueprint = np.arange(10)
MyArr = type("MyArr", (), {protocol: getattr(blueprint, protocol)})
self.assert_deprecated(lambda: np.array([MyArr()], dtype=object))
@pytest.mark.parametrize("protocol",
["__array__", "__array_interface__", "__array_struct__"])
def test_0d_not_deprecated(self, protocol):
# 0-D always worked (albeit it would use __float__ or similar for the
# conversion, which may not happen anymore)
blueprint = np.array(1.)
MyArr = type("MyArr", (), {protocol: getattr(blueprint, protocol)})
myarr = MyArr()
self.assert_not_deprecated(lambda: np.array([myarr], dtype=object))
res = np.array([myarr], dtype=object)
expected = np.empty(1, dtype=object)
expected[0] = myarr
assert_array_equal(res, expected)
@pytest.mark.parametrize("protocol",
["__array__", "__array_interface__", "__array_struct__"])
def test_unnested_not_deprecated(self, protocol):
blueprint = np.arange(10)
MyArr = type("MyArr", (), {protocol: getattr(blueprint, protocol)})
myarr = MyArr()
self.assert_not_deprecated(lambda: np.array(myarr))
res = np.array(myarr)
assert_array_equal(res, blueprint)
@pytest.mark.parametrize("protocol",
["__array__", "__array_interface__", "__array_struct__"])
def test_strange_dtype_handling(self, protocol):
"""The old code would actually use the dtype from the array, but
then end up not using the array (for dimension discovery)
"""
blueprint = np.arange(10).astype("f4")
MyArr = type("MyArr", (), {protocol: getattr(blueprint, protocol),
"__float__": lambda _: 0.5})
myarr = MyArr()
# Make sure we warn (and capture the FutureWarning)
with pytest.warns(FutureWarning, match=self.message):
res = np.array([[myarr]])
assert res.shape == (1, 1)
assert res.dtype == "f4"
assert res[0, 0] == 0.5
@pytest.mark.parametrize("protocol",
["__array__", "__array_interface__", "__array_struct__"])
def test_assignment_not_deprecated(self, protocol):
# If the result is dtype=object we do not unpack a nested array or
# array-like, if it is nested at exactly the right depth.
# NOTE: We actually do still call __array__, etc. but ignore the result
# in the end. For `dtype=object` we could optimize that away.
blueprint = np.arange(10).astype("f4")
MyArr = type("MyArr", (), {protocol: getattr(blueprint, protocol),
"__float__": lambda _: 0.5})
myarr = MyArr()
res = np.empty(3, dtype=object)
def set():
res[:] = [myarr, myarr, myarr]
self.assert_not_deprecated(set)
assert res[0] is myarr
assert res[1] is myarr
assert res[2] is myarr
class TestDeprecatedUnpickleObjectScalar(_DeprecationTestCase):
# Deprecated 2020-11-24, NumPy 1.20
"""
Technically, it should be impossible to create numpy object scalars,
but there was an unpickle path that would in theory allow it. That
path is invalid and must lead to the warning.
"""
message = "Unpickling a scalar with object dtype is deprecated."
def test_deprecated(self):
ctor = np.core.multiarray.scalar
self.assert_deprecated(lambda: ctor(np.dtype("O"), 1))
try:
with warnings.catch_warnings():
warnings.simplefilter("always")
import nose # noqa: F401
except ImportError:
HAVE_NOSE = False
else:
HAVE_NOSE = True
@pytest.mark.skipif(not HAVE_NOSE, reason="Needs nose")
class TestNoseDecoratorsDeprecated(_DeprecationTestCase):
class DidntSkipException(Exception):
pass
def test_slow(self):
def _test_slow():
@np.testing.dec.slow
def slow_func(x, y, z):
pass
assert_(slow_func.slow)
self.assert_deprecated(_test_slow)
def test_setastest(self):
def _test_setastest():
@np.testing.dec.setastest()
def f_default(a):
pass
@np.testing.dec.setastest(True)
def f_istest(a):
pass
@np.testing.dec.setastest(False)
def f_isnottest(a):
pass
assert_(f_default.__test__)
assert_(f_istest.__test__)
assert_(not f_isnottest.__test__)
self.assert_deprecated(_test_setastest, num=3)
def test_skip_functions_hardcoded(self):
def _test_skip_functions_hardcoded():
@np.testing.dec.skipif(True)
def f1(x):
raise self.DidntSkipException
try:
f1('a')
except self.DidntSkipException:
raise Exception('Failed to skip')
except SkipTest().__class__:
pass
@np.testing.dec.skipif(False)
def f2(x):
raise self.DidntSkipException
try:
f2('a')
except self.DidntSkipException:
pass
except SkipTest().__class__:
raise Exception('Skipped when not expected to')
self.assert_deprecated(_test_skip_functions_hardcoded, num=2)
def test_skip_functions_callable(self):
def _test_skip_functions_callable():
def skip_tester():
return skip_flag == 'skip me!'
@np.testing.dec.skipif(skip_tester)
def f1(x):
raise self.DidntSkipException
try:
skip_flag = 'skip me!'
f1('a')
except self.DidntSkipException:
raise Exception('Failed to skip')
except SkipTest().__class__:
pass
@np.testing.dec.skipif(skip_tester)
def f2(x):
raise self.DidntSkipException
try:
skip_flag = 'five is right out!'
f2('a')
except self.DidntSkipException:
pass
except SkipTest().__class__:
raise Exception('Skipped when not expected to')
self.assert_deprecated(_test_skip_functions_callable, num=2)
def test_skip_generators_hardcoded(self):
def _test_skip_generators_hardcoded():
@np.testing.dec.knownfailureif(True, "This test is known to fail")
def g1(x):
yield from range(x)
try:
for j in g1(10):
pass
except KnownFailureException().__class__:
pass
else:
raise Exception('Failed to mark as known failure')
@np.testing.dec.knownfailureif(False, "This test is NOT known to fail")
def g2(x):
yield from range(x)
raise self.DidntSkipException('FAIL')
try:
for j in g2(10):
pass
except KnownFailureException().__class__:
raise Exception('Marked incorrectly as known failure')
except self.DidntSkipException:
pass
self.assert_deprecated(_test_skip_generators_hardcoded, num=2)
def test_skip_generators_callable(self):
def _test_skip_generators_callable():
def skip_tester():
return skip_flag == 'skip me!'
@np.testing.dec.knownfailureif(skip_tester, "This test is known to fail")
def g1(x):
yield from range(x)
try:
skip_flag = 'skip me!'
for j in g1(10):
pass
except KnownFailureException().__class__:
pass
else:
raise Exception('Failed to mark as known failure')
@np.testing.dec.knownfailureif(skip_tester, "This test is NOT known to fail")
def g2(x):
yield from range(x)
raise self.DidntSkipException('FAIL')
try:
skip_flag = 'do not skip'
for j in g2(10):
pass
except KnownFailureException().__class__:
raise Exception('Marked incorrectly as known failure')
except self.DidntSkipException:
pass
self.assert_deprecated(_test_skip_generators_callable, num=2)
def test_deprecated(self):
def _test_deprecated():
@np.testing.dec.deprecated(True)
def non_deprecated_func():
pass
@np.testing.dec.deprecated()
def deprecated_func():
import warnings
warnings.warn("TEST: deprecated func", DeprecationWarning, stacklevel=1)
@np.testing.dec.deprecated()
def deprecated_func2():
import warnings
warnings.warn("AHHHH", stacklevel=1)
raise ValueError
@np.testing.dec.deprecated()
def deprecated_func3():
import warnings
warnings.warn("AHHHH", stacklevel=1)
# marked as deprecated, but does not raise DeprecationWarning
assert_raises(AssertionError, non_deprecated_func)
# should be silent
deprecated_func()
with warnings.catch_warnings(record=True):
warnings.simplefilter("always") # do not propagate unrelated warnings
# fails if deprecated decorator just disables test. See #1453.
assert_raises(ValueError, deprecated_func2)
# warning is not a DeprecationWarning
assert_raises(AssertionError, deprecated_func3)
self.assert_deprecated(_test_deprecated, num=4)
def test_parametrize(self):
def _test_parametrize():
# dec.parametrize assumes that it is being run by nose. Because
# we are running under pytest, we need to explicitly check the
# results.
@np.testing.dec.parametrize('base, power, expected',
[(1, 1, 1),
(2, 1, 2),
(2, 2, 4)])
def check_parametrize(base, power, expected):
assert_(base**power == expected)
count = 0
for test in check_parametrize():
test[0](*test[1:])
count += 1
assert_(count == 3)
self.assert_deprecated(_test_parametrize)
class TestSingleElementSignature(_DeprecationTestCase):
# Deprecated 2021-04-01, NumPy 1.21
message = r"The use of a length 1"
def test_deprecated(self):
self.assert_deprecated(lambda: np.add(1, 2, signature="d"))
self.assert_deprecated(lambda: np.add(1, 2, sig=(np.dtype("l"),)))
class TestComparisonBadDType(_DeprecationTestCase):
# Deprecated 2021-04-01, NumPy 1.21
message = r"using `dtype=` in comparisons is only useful for"
def test_deprecated(self):
self.assert_deprecated(lambda: np.equal(1, 1, dtype=np.int64))
# Not an error only for the transition
self.assert_deprecated(lambda: np.equal(1, 1, sig=(None, None, "l")))
def test_not_deprecated(self):
np.equal(True, False, dtype=bool)
np.equal(3, 5, dtype=bool, casting="unsafe")
np.equal([None], [4], dtype=object)
class TestComparisonBadObjectDType(_DeprecationTestCase):
# Deprecated 2021-04-01, NumPy 1.21 (different branch of the above one)
message = r"using `dtype=object` \(or equivalent signature\) will"
warning_cls = FutureWarning
def test_deprecated(self):
self.assert_deprecated(lambda: np.equal(1, 1, dtype=object))
self.assert_deprecated(
lambda: np.equal(1, 1, sig=(None, None, object)))
class TestCtypesGetter(_DeprecationTestCase):
# Deprecated 2021-05-18, Numpy 1.21.0
warning_cls = DeprecationWarning
ctypes = np.array([1]).ctypes
@pytest.mark.parametrize(
"name", ["get_data", "get_shape", "get_strides", "get_as_parameter"]
)
def test_deprecated(self, name: str) -> None:
func = getattr(self.ctypes, name)
self.assert_deprecated(lambda: func())
@pytest.mark.parametrize(
"name", ["data", "shape", "strides", "_as_parameter_"]
)
def test_not_deprecated(self, name: str) -> None:
self.assert_not_deprecated(lambda: getattr(self.ctypes, name))
class TestUFuncForcedDTypeWarning(_DeprecationTestCase):
message = "The `dtype` and `signature` arguments to ufuncs only select the"
def test_not_deprecated(self):
import pickle
# does not warn (test relies on bad pickling behaviour, simply remove
# it if the `assert int64 is not int64_2` should start failing.
int64 = np.dtype("int64")
int64_2 = pickle.loads(pickle.dumps(int64))
assert int64 is not int64_2
self.assert_not_deprecated(lambda: np.add(3, 4, dtype=int64_2))
def test_deprecation(self):
int64 = np.dtype("int64")
self.assert_deprecated(lambda: np.add(3, 5, dtype=int64.newbyteorder()))
self.assert_deprecated(lambda: np.add(3, 5, dtype="m8[ns]"))
def test_behaviour(self):
int64 = np.dtype("int64")
arr = np.arange(10, dtype="m8[s]")
with pytest.warns(DeprecationWarning, match=self.message):
np.add(3, 5, dtype=int64.newbyteorder())
with pytest.warns(DeprecationWarning, match=self.message):
np.add(3, 5, dtype="m8[ns]") # previously used the "ns"
with pytest.warns(DeprecationWarning, match=self.message):
np.add(arr, arr, dtype="m8[ns]") # never preserved the "ns"
with pytest.warns(DeprecationWarning, match=self.message):
np.maximum(arr, arr, dtype="m8[ns]") # previously used the "ns"
with pytest.warns(DeprecationWarning, match=self.message):
np.maximum.reduce(arr, dtype="m8[ns]") # never preserved the "ns"
PARTITION_DICT = {
"partition method": np.arange(10).partition,
"argpartition method": np.arange(10).argpartition,
"partition function": lambda kth: np.partition(np.arange(10), kth),
"argpartition function": lambda kth: np.argpartition(np.arange(10), kth),
}
@pytest.mark.parametrize("func", PARTITION_DICT.values(), ids=PARTITION_DICT)
class TestPartitionBoolIndex(_DeprecationTestCase):
# Deprecated 2021-09-29, NumPy 1.22
warning_cls = DeprecationWarning
message = "Passing booleans as partition index is deprecated"
def test_deprecated(self, func):
self.assert_deprecated(lambda: func(True))
self.assert_deprecated(lambda: func([False, True]))
def test_not_deprecated(self, func):
self.assert_not_deprecated(lambda: func(1))
self.assert_not_deprecated(lambda: func([0, 1]))
class TestMachAr(_DeprecationTestCase):
# Deprecated 2021-10-19, NumPy 1.22
warning_cls = DeprecationWarning
def test_deprecated(self):
self.assert_deprecated(lambda: np.MachAr)
def test_deprecated_module(self):
self.assert_deprecated(lambda: getattr(np.core, "machar"))
def test_deprecated_attr(self):
finfo = np.finfo(float)
self.assert_deprecated(lambda: getattr(finfo, "machar"))
class TestQuantileInterpolationDeprecation(_DeprecationTestCase):
# Deprecated 2021-11-08, NumPy 1.22
@pytest.mark.parametrize("func",
[np.percentile, np.quantile, np.nanpercentile, np.nanquantile])
def test_deprecated(self, func):
self.assert_deprecated(
lambda: func([0., 1.], 0., interpolation="linear"))
self.assert_deprecated(
lambda: func([0., 1.], 0., interpolation="nearest"))
@pytest.mark.parametrize("func",
[np.percentile, np.quantile, np.nanpercentile, np.nanquantile])
def test_both_passed(self, func):
with warnings.catch_warnings():
# catch the DeprecationWarning so that it does not raise:
warnings.simplefilter("always", DeprecationWarning)
with pytest.raises(TypeError):
func([0., 1.], 0., interpolation="nearest", method="nearest")
class TestMemEventHook(_DeprecationTestCase):
# Deprecated 2021-11-18, NumPy 1.23
def test_mem_seteventhook(self):
# The actual tests are within the C code in
# multiarray/_multiarray_tests.c.src
import numpy.core._multiarray_tests as ma_tests
with pytest.warns(DeprecationWarning,
match='PyDataMem_SetEventHook is deprecated'):
ma_tests.test_pydatamem_seteventhook_start()
# force an allocation and free of a numpy array
# needs to be larger then limit of small memory cacher in ctors.c
a = np.zeros(1000)
del a
break_cycles()
with pytest.warns(DeprecationWarning,
match='PyDataMem_SetEventHook is deprecated'):
ma_tests.test_pydatamem_seteventhook_end()
class TestArrayFinalizeNone(_DeprecationTestCase):
message = "Setting __array_finalize__ = None"
def test_use_none_is_deprecated(self):
# Deprecated way that ndarray itself showed nothing needs finalizing.
class NoFinalize(np.ndarray):
__array_finalize__ = None
self.assert_deprecated(lambda: np.array(1).view(NoFinalize))
class TestAxisNotMAXDIMS(_DeprecationTestCase):
# Deprecated 2022-01-08, NumPy 1.23
message = r"Using `axis=32` \(MAXDIMS\) is deprecated"
def test_deprecated(self):
a = np.zeros((1,)*32)
self.assert_deprecated(lambda: np.repeat(a, 1, axis=np.MAXDIMS))
class TestLoadtxtParseIntsViaFloat(_DeprecationTestCase):
# Deprecated 2022-07-03, NumPy 1.23
# This test can be removed without replacement after the deprecation.
# The tests:
# * numpy/lib/tests/test_loadtxt.py::test_integer_signs
# * lib/tests/test_loadtxt.py::test_implicit_cast_float_to_int_fails
# Have a warning filter that needs to be removed.
message = r"loadtxt\(\): Parsing an integer via a float is deprecated.*"
@pytest.mark.parametrize("dtype", np.typecodes["AllInteger"])
def test_deprecated_warning(self, dtype):
with pytest.warns(DeprecationWarning, match=self.message):
np.loadtxt(["10.5"], dtype=dtype)
@pytest.mark.parametrize("dtype", np.typecodes["AllInteger"])
def test_deprecated_raised(self, dtype):
# The DeprecationWarning is chained when raised, so test manually:
with warnings.catch_warnings():
warnings.simplefilter("error", DeprecationWarning)
try:
np.loadtxt(["10.5"], dtype=dtype)
except ValueError as e:
assert isinstance(e.__cause__, DeprecationWarning)
| 47,590 | Python | 37.348912 | 91 | 0.610044 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_print.py | import sys
import pytest
import numpy as np
from numpy.testing import assert_, assert_equal
from numpy.core.tests._locales import CommaDecimalPointLocale
from io import StringIO
_REF = {np.inf: 'inf', -np.inf: '-inf', np.nan: 'nan'}
@pytest.mark.parametrize('tp', [np.float32, np.double, np.longdouble])
def test_float_types(tp):
""" Check formatting.
This is only for the str function, and only for simple types.
The precision of np.float32 and np.longdouble aren't the same as the
python float precision.
"""
for x in [0, 1, -1, 1e20]:
assert_equal(str(tp(x)), str(float(x)),
err_msg='Failed str formatting for type %s' % tp)
if tp(1e16).itemsize > 4:
assert_equal(str(tp(1e16)), str(float('1e16')),
err_msg='Failed str formatting for type %s' % tp)
else:
ref = '1e+16'
assert_equal(str(tp(1e16)), ref,
err_msg='Failed str formatting for type %s' % tp)
@pytest.mark.parametrize('tp', [np.float32, np.double, np.longdouble])
def test_nan_inf_float(tp):
""" Check formatting of nan & inf.
This is only for the str function, and only for simple types.
The precision of np.float32 and np.longdouble aren't the same as the
python float precision.
"""
for x in [np.inf, -np.inf, np.nan]:
assert_equal(str(tp(x)), _REF[x],
err_msg='Failed str formatting for type %s' % tp)
@pytest.mark.parametrize('tp', [np.complex64, np.cdouble, np.clongdouble])
def test_complex_types(tp):
"""Check formatting of complex types.
This is only for the str function, and only for simple types.
The precision of np.float32 and np.longdouble aren't the same as the
python float precision.
"""
for x in [0, 1, -1, 1e20]:
assert_equal(str(tp(x)), str(complex(x)),
err_msg='Failed str formatting for type %s' % tp)
assert_equal(str(tp(x*1j)), str(complex(x*1j)),
err_msg='Failed str formatting for type %s' % tp)
assert_equal(str(tp(x + x*1j)), str(complex(x + x*1j)),
err_msg='Failed str formatting for type %s' % tp)
if tp(1e16).itemsize > 8:
assert_equal(str(tp(1e16)), str(complex(1e16)),
err_msg='Failed str formatting for type %s' % tp)
else:
ref = '(1e+16+0j)'
assert_equal(str(tp(1e16)), ref,
err_msg='Failed str formatting for type %s' % tp)
@pytest.mark.parametrize('dtype', [np.complex64, np.cdouble, np.clongdouble])
def test_complex_inf_nan(dtype):
"""Check inf/nan formatting of complex types."""
TESTS = {
complex(np.inf, 0): "(inf+0j)",
complex(0, np.inf): "infj",
complex(-np.inf, 0): "(-inf+0j)",
complex(0, -np.inf): "-infj",
complex(np.inf, 1): "(inf+1j)",
complex(1, np.inf): "(1+infj)",
complex(-np.inf, 1): "(-inf+1j)",
complex(1, -np.inf): "(1-infj)",
complex(np.nan, 0): "(nan+0j)",
complex(0, np.nan): "nanj",
complex(-np.nan, 0): "(nan+0j)",
complex(0, -np.nan): "nanj",
complex(np.nan, 1): "(nan+1j)",
complex(1, np.nan): "(1+nanj)",
complex(-np.nan, 1): "(nan+1j)",
complex(1, -np.nan): "(1+nanj)",
}
for c, s in TESTS.items():
assert_equal(str(dtype(c)), s)
# print tests
def _test_redirected_print(x, tp, ref=None):
file = StringIO()
file_tp = StringIO()
stdout = sys.stdout
try:
sys.stdout = file_tp
print(tp(x))
sys.stdout = file
if ref:
print(ref)
else:
print(x)
finally:
sys.stdout = stdout
assert_equal(file.getvalue(), file_tp.getvalue(),
err_msg='print failed for type%s' % tp)
@pytest.mark.parametrize('tp', [np.float32, np.double, np.longdouble])
def test_float_type_print(tp):
"""Check formatting when using print """
for x in [0, 1, -1, 1e20]:
_test_redirected_print(float(x), tp)
for x in [np.inf, -np.inf, np.nan]:
_test_redirected_print(float(x), tp, _REF[x])
if tp(1e16).itemsize > 4:
_test_redirected_print(float(1e16), tp)
else:
ref = '1e+16'
_test_redirected_print(float(1e16), tp, ref)
@pytest.mark.parametrize('tp', [np.complex64, np.cdouble, np.clongdouble])
def test_complex_type_print(tp):
"""Check formatting when using print """
# We do not create complex with inf/nan directly because the feature is
# missing in python < 2.6
for x in [0, 1, -1, 1e20]:
_test_redirected_print(complex(x), tp)
if tp(1e16).itemsize > 8:
_test_redirected_print(complex(1e16), tp)
else:
ref = '(1e+16+0j)'
_test_redirected_print(complex(1e16), tp, ref)
_test_redirected_print(complex(np.inf, 1), tp, '(inf+1j)')
_test_redirected_print(complex(-np.inf, 1), tp, '(-inf+1j)')
_test_redirected_print(complex(-np.nan, 1), tp, '(nan+1j)')
def test_scalar_format():
"""Test the str.format method with NumPy scalar types"""
tests = [('{0}', True, np.bool_),
('{0}', False, np.bool_),
('{0:d}', 130, np.uint8),
('{0:d}', 50000, np.uint16),
('{0:d}', 3000000000, np.uint32),
('{0:d}', 15000000000000000000, np.uint64),
('{0:d}', -120, np.int8),
('{0:d}', -30000, np.int16),
('{0:d}', -2000000000, np.int32),
('{0:d}', -7000000000000000000, np.int64),
('{0:g}', 1.5, np.float16),
('{0:g}', 1.5, np.float32),
('{0:g}', 1.5, np.float64),
('{0:g}', 1.5, np.longdouble),
('{0:g}', 1.5+0.5j, np.complex64),
('{0:g}', 1.5+0.5j, np.complex128),
('{0:g}', 1.5+0.5j, np.clongdouble)]
for (fmat, val, valtype) in tests:
try:
assert_equal(fmat.format(val), fmat.format(valtype(val)),
"failed with val %s, type %s" % (val, valtype))
except ValueError as e:
assert_(False,
"format raised exception (fmt='%s', val=%s, type=%s, exc='%s')" %
(fmat, repr(val), repr(valtype), str(e)))
#
# Locale tests: scalar types formatting should be independent of the locale
#
class TestCommaDecimalPointLocale(CommaDecimalPointLocale):
def test_locale_single(self):
assert_equal(str(np.float32(1.2)), str(float(1.2)))
def test_locale_double(self):
assert_equal(str(np.double(1.2)), str(float(1.2)))
def test_locale_longdouble(self):
assert_equal(str(np.longdouble('1.2')), str(float(1.2)))
| 6,737 | Python | 32.522388 | 80 | 0.556628 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_api.py | import sys
import numpy as np
from numpy.core._rational_tests import rational
import pytest
from numpy.testing import (
assert_, assert_equal, assert_array_equal, assert_raises, assert_warns,
HAS_REFCOUNT
)
def test_array_array():
tobj = type(object)
ones11 = np.ones((1, 1), np.float64)
tndarray = type(ones11)
# Test is_ndarray
assert_equal(np.array(ones11, dtype=np.float64), ones11)
if HAS_REFCOUNT:
old_refcount = sys.getrefcount(tndarray)
np.array(ones11)
assert_equal(old_refcount, sys.getrefcount(tndarray))
# test None
assert_equal(np.array(None, dtype=np.float64),
np.array(np.nan, dtype=np.float64))
if HAS_REFCOUNT:
old_refcount = sys.getrefcount(tobj)
np.array(None, dtype=np.float64)
assert_equal(old_refcount, sys.getrefcount(tobj))
# test scalar
assert_equal(np.array(1.0, dtype=np.float64),
np.ones((), dtype=np.float64))
if HAS_REFCOUNT:
old_refcount = sys.getrefcount(np.float64)
np.array(np.array(1.0, dtype=np.float64), dtype=np.float64)
assert_equal(old_refcount, sys.getrefcount(np.float64))
# test string
S2 = np.dtype((bytes, 2))
S3 = np.dtype((bytes, 3))
S5 = np.dtype((bytes, 5))
assert_equal(np.array(b"1.0", dtype=np.float64),
np.ones((), dtype=np.float64))
assert_equal(np.array(b"1.0").dtype, S3)
assert_equal(np.array(b"1.0", dtype=bytes).dtype, S3)
assert_equal(np.array(b"1.0", dtype=S2), np.array(b"1."))
assert_equal(np.array(b"1", dtype=S5), np.ones((), dtype=S5))
# test string
U2 = np.dtype((str, 2))
U3 = np.dtype((str, 3))
U5 = np.dtype((str, 5))
assert_equal(np.array("1.0", dtype=np.float64),
np.ones((), dtype=np.float64))
assert_equal(np.array("1.0").dtype, U3)
assert_equal(np.array("1.0", dtype=str).dtype, U3)
assert_equal(np.array("1.0", dtype=U2), np.array(str("1.")))
assert_equal(np.array("1", dtype=U5), np.ones((), dtype=U5))
builtins = getattr(__builtins__, '__dict__', __builtins__)
assert_(hasattr(builtins, 'get'))
# test memoryview
dat = np.array(memoryview(b'1.0'), dtype=np.float64)
assert_equal(dat, [49.0, 46.0, 48.0])
assert_(dat.dtype.type is np.float64)
dat = np.array(memoryview(b'1.0'))
assert_equal(dat, [49, 46, 48])
assert_(dat.dtype.type is np.uint8)
# test array interface
a = np.array(100.0, dtype=np.float64)
o = type("o", (object,),
dict(__array_interface__=a.__array_interface__))
assert_equal(np.array(o, dtype=np.float64), a)
# test array_struct interface
a = np.array([(1, 4.0, 'Hello'), (2, 6.0, 'World')],
dtype=[('f0', int), ('f1', float), ('f2', str)])
o = type("o", (object,),
dict(__array_struct__=a.__array_struct__))
## wasn't what I expected... is np.array(o) supposed to equal a ?
## instead we get a array([...], dtype=">V18")
assert_equal(bytes(np.array(o).data), bytes(a.data))
# test array
o = type("o", (object,),
dict(__array__=lambda *x: np.array(100.0, dtype=np.float64)))()
assert_equal(np.array(o, dtype=np.float64), np.array(100.0, np.float64))
# test recursion
nested = 1.5
for i in range(np.MAXDIMS):
nested = [nested]
# no error
np.array(nested)
# Exceeds recursion limit
assert_raises(ValueError, np.array, [nested], dtype=np.float64)
# Try with lists...
assert_equal(np.array([None] * 10, dtype=np.float64),
np.full((10,), np.nan, dtype=np.float64))
assert_equal(np.array([[None]] * 10, dtype=np.float64),
np.full((10, 1), np.nan, dtype=np.float64))
assert_equal(np.array([[None] * 10], dtype=np.float64),
np.full((1, 10), np.nan, dtype=np.float64))
assert_equal(np.array([[None] * 10] * 10, dtype=np.float64),
np.full((10, 10), np.nan, dtype=np.float64))
assert_equal(np.array([1.0] * 10, dtype=np.float64),
np.ones((10,), dtype=np.float64))
assert_equal(np.array([[1.0]] * 10, dtype=np.float64),
np.ones((10, 1), dtype=np.float64))
assert_equal(np.array([[1.0] * 10], dtype=np.float64),
np.ones((1, 10), dtype=np.float64))
assert_equal(np.array([[1.0] * 10] * 10, dtype=np.float64),
np.ones((10, 10), dtype=np.float64))
# Try with tuples
assert_equal(np.array((None,) * 10, dtype=np.float64),
np.full((10,), np.nan, dtype=np.float64))
assert_equal(np.array([(None,)] * 10, dtype=np.float64),
np.full((10, 1), np.nan, dtype=np.float64))
assert_equal(np.array([(None,) * 10], dtype=np.float64),
np.full((1, 10), np.nan, dtype=np.float64))
assert_equal(np.array([(None,) * 10] * 10, dtype=np.float64),
np.full((10, 10), np.nan, dtype=np.float64))
assert_equal(np.array((1.0,) * 10, dtype=np.float64),
np.ones((10,), dtype=np.float64))
assert_equal(np.array([(1.0,)] * 10, dtype=np.float64),
np.ones((10, 1), dtype=np.float64))
assert_equal(np.array([(1.0,) * 10], dtype=np.float64),
np.ones((1, 10), dtype=np.float64))
assert_equal(np.array([(1.0,) * 10] * 10, dtype=np.float64),
np.ones((10, 10), dtype=np.float64))
@pytest.mark.parametrize("array", [True, False])
def test_array_impossible_casts(array):
# All builtin types can be forcibly cast, at least theoretically,
# but user dtypes cannot necessarily.
rt = rational(1, 2)
if array:
rt = np.array(rt)
with assert_raises(TypeError):
np.array(rt, dtype="M8")
def test_fastCopyAndTranspose():
# 0D array
a = np.array(2)
b = np.fastCopyAndTranspose(a)
assert_equal(b, a.T)
assert_(b.flags.owndata)
# 1D array
a = np.array([3, 2, 7, 0])
b = np.fastCopyAndTranspose(a)
assert_equal(b, a.T)
assert_(b.flags.owndata)
# 2D array
a = np.arange(6).reshape(2, 3)
b = np.fastCopyAndTranspose(a)
assert_equal(b, a.T)
assert_(b.flags.owndata)
def test_array_astype():
a = np.arange(6, dtype='f4').reshape(2, 3)
# Default behavior: allows unsafe casts, keeps memory layout,
# always copies.
b = a.astype('i4')
assert_equal(a, b)
assert_equal(b.dtype, np.dtype('i4'))
assert_equal(a.strides, b.strides)
b = a.T.astype('i4')
assert_equal(a.T, b)
assert_equal(b.dtype, np.dtype('i4'))
assert_equal(a.T.strides, b.strides)
b = a.astype('f4')
assert_equal(a, b)
assert_(not (a is b))
# copy=False parameter can sometimes skip a copy
b = a.astype('f4', copy=False)
assert_(a is b)
# order parameter allows overriding of the memory layout,
# forcing a copy if the layout is wrong
b = a.astype('f4', order='F', copy=False)
assert_equal(a, b)
assert_(not (a is b))
assert_(b.flags.f_contiguous)
b = a.astype('f4', order='C', copy=False)
assert_equal(a, b)
assert_(a is b)
assert_(b.flags.c_contiguous)
# casting parameter allows catching bad casts
b = a.astype('c8', casting='safe')
assert_equal(a, b)
assert_equal(b.dtype, np.dtype('c8'))
assert_raises(TypeError, a.astype, 'i4', casting='safe')
# subok=False passes through a non-subclassed array
b = a.astype('f4', subok=0, copy=False)
assert_(a is b)
class MyNDArray(np.ndarray):
pass
a = np.array([[0, 1, 2], [3, 4, 5]], dtype='f4').view(MyNDArray)
# subok=True passes through a subclass
b = a.astype('f4', subok=True, copy=False)
assert_(a is b)
# subok=True is default, and creates a subtype on a cast
b = a.astype('i4', copy=False)
assert_equal(a, b)
assert_equal(type(b), MyNDArray)
# subok=False never returns a subclass
b = a.astype('f4', subok=False, copy=False)
assert_equal(a, b)
assert_(not (a is b))
assert_(type(b) is not MyNDArray)
# Make sure converting from string object to fixed length string
# does not truncate.
a = np.array([b'a'*100], dtype='O')
b = a.astype('S')
assert_equal(a, b)
assert_equal(b.dtype, np.dtype('S100'))
a = np.array([u'a'*100], dtype='O')
b = a.astype('U')
assert_equal(a, b)
assert_equal(b.dtype, np.dtype('U100'))
# Same test as above but for strings shorter than 64 characters
a = np.array([b'a'*10], dtype='O')
b = a.astype('S')
assert_equal(a, b)
assert_equal(b.dtype, np.dtype('S10'))
a = np.array([u'a'*10], dtype='O')
b = a.astype('U')
assert_equal(a, b)
assert_equal(b.dtype, np.dtype('U10'))
a = np.array(123456789012345678901234567890, dtype='O').astype('S')
assert_array_equal(a, np.array(b'1234567890' * 3, dtype='S30'))
a = np.array(123456789012345678901234567890, dtype='O').astype('U')
assert_array_equal(a, np.array(u'1234567890' * 3, dtype='U30'))
a = np.array([123456789012345678901234567890], dtype='O').astype('S')
assert_array_equal(a, np.array(b'1234567890' * 3, dtype='S30'))
a = np.array([123456789012345678901234567890], dtype='O').astype('U')
assert_array_equal(a, np.array(u'1234567890' * 3, dtype='U30'))
a = np.array(123456789012345678901234567890, dtype='S')
assert_array_equal(a, np.array(b'1234567890' * 3, dtype='S30'))
a = np.array(123456789012345678901234567890, dtype='U')
assert_array_equal(a, np.array(u'1234567890' * 3, dtype='U30'))
a = np.array(u'a\u0140', dtype='U')
b = np.ndarray(buffer=a, dtype='uint32', shape=2)
assert_(b.size == 2)
a = np.array([1000], dtype='i4')
assert_raises(TypeError, a.astype, 'S1', casting='safe')
a = np.array(1000, dtype='i4')
assert_raises(TypeError, a.astype, 'U1', casting='safe')
@pytest.mark.parametrize("dt", ["S", "U"])
def test_array_astype_to_string_discovery_empty(dt):
# See also gh-19085
arr = np.array([""], dtype=object)
# Note, the itemsize is the `0 -> 1` logic, which should change.
# The important part the test is rather that it does not error.
assert arr.astype(dt).dtype.itemsize == np.dtype(f"{dt}1").itemsize
# check the same thing for `np.can_cast` (since it accepts arrays)
assert np.can_cast(arr, dt, casting="unsafe")
assert not np.can_cast(arr, dt, casting="same_kind")
# as well as for the object as a descriptor:
assert np.can_cast("O", dt, casting="unsafe")
@pytest.mark.parametrize("dt", ["d", "f", "S13", "U32"])
def test_array_astype_to_void(dt):
dt = np.dtype(dt)
arr = np.array([], dtype=dt)
assert arr.astype("V").dtype.itemsize == dt.itemsize
def test_object_array_astype_to_void():
# This is different to `test_array_astype_to_void` as object arrays
# are inspected. The default void is "V8" (8 is the length of double)
arr = np.array([], dtype="O").astype("V")
assert arr.dtype == "V8"
@pytest.mark.parametrize("t",
np.sctypes['uint'] + np.sctypes['int'] + np.sctypes['float']
)
def test_array_astype_warning(t):
# test ComplexWarning when casting from complex to float or int
a = np.array(10, dtype=np.complex_)
assert_warns(np.ComplexWarning, a.astype, t)
@pytest.mark.parametrize(["dtype", "out_dtype"],
[(np.bytes_, np.bool_),
(np.unicode_, np.bool_),
(np.dtype("S10,S9"), np.dtype("?,?"))])
def test_string_to_boolean_cast(dtype, out_dtype):
"""
Currently, for `astype` strings are cast to booleans effectively by
calling `bool(int(string)`. This is not consistent (see gh-9875) and
will eventually be deprecated.
"""
arr = np.array(["10", "10\0\0\0", "0\0\0", "0"], dtype=dtype)
expected = np.array([True, True, False, False], dtype=out_dtype)
assert_array_equal(arr.astype(out_dtype), expected)
@pytest.mark.parametrize(["dtype", "out_dtype"],
[(np.bytes_, np.bool_),
(np.unicode_, np.bool_),
(np.dtype("S10,S9"), np.dtype("?,?"))])
def test_string_to_boolean_cast_errors(dtype, out_dtype):
"""
These currently error out, since cast to integers fails, but should not
error out in the future.
"""
for invalid in ["False", "True", "", "\0", "non-empty"]:
arr = np.array([invalid], dtype=dtype)
with assert_raises(ValueError):
arr.astype(out_dtype)
@pytest.mark.parametrize("str_type", [str, bytes, np.str_, np.unicode_])
@pytest.mark.parametrize("scalar_type",
[np.complex64, np.complex128, np.clongdouble])
def test_string_to_complex_cast(str_type, scalar_type):
value = scalar_type(b"1+3j")
assert scalar_type(value) == 1+3j
assert np.array([value], dtype=object).astype(scalar_type)[()] == 1+3j
assert np.array(value).astype(scalar_type)[()] == 1+3j
arr = np.zeros(1, dtype=scalar_type)
arr[0] = value
assert arr[0] == 1+3j
@pytest.mark.parametrize("dtype", np.typecodes["AllFloat"])
def test_none_to_nan_cast(dtype):
# Note that at the time of writing this test, the scalar constructors
# reject None
arr = np.zeros(1, dtype=dtype)
arr[0] = None
assert np.isnan(arr)[0]
assert np.isnan(np.array(None, dtype=dtype))[()]
assert np.isnan(np.array([None], dtype=dtype))[0]
assert np.isnan(np.array(None).astype(dtype))[()]
def test_copyto_fromscalar():
a = np.arange(6, dtype='f4').reshape(2, 3)
# Simple copy
np.copyto(a, 1.5)
assert_equal(a, 1.5)
np.copyto(a.T, 2.5)
assert_equal(a, 2.5)
# Where-masked copy
mask = np.array([[0, 1, 0], [0, 0, 1]], dtype='?')
np.copyto(a, 3.5, where=mask)
assert_equal(a, [[2.5, 3.5, 2.5], [2.5, 2.5, 3.5]])
mask = np.array([[0, 1], [1, 1], [1, 0]], dtype='?')
np.copyto(a.T, 4.5, where=mask)
assert_equal(a, [[2.5, 4.5, 4.5], [4.5, 4.5, 3.5]])
def test_copyto():
a = np.arange(6, dtype='i4').reshape(2, 3)
# Simple copy
np.copyto(a, [[3, 1, 5], [6, 2, 1]])
assert_equal(a, [[3, 1, 5], [6, 2, 1]])
# Overlapping copy should work
np.copyto(a[:, :2], a[::-1, 1::-1])
assert_equal(a, [[2, 6, 5], [1, 3, 1]])
# Defaults to 'same_kind' casting
assert_raises(TypeError, np.copyto, a, 1.5)
# Force a copy with 'unsafe' casting, truncating 1.5 to 1
np.copyto(a, 1.5, casting='unsafe')
assert_equal(a, 1)
# Copying with a mask
np.copyto(a, 3, where=[True, False, True])
assert_equal(a, [[3, 1, 3], [3, 1, 3]])
# Casting rule still applies with a mask
assert_raises(TypeError, np.copyto, a, 3.5, where=[True, False, True])
# Lists of integer 0's and 1's is ok too
np.copyto(a, 4.0, casting='unsafe', where=[[0, 1, 1], [1, 0, 0]])
assert_equal(a, [[3, 4, 4], [4, 1, 3]])
# Overlapping copy with mask should work
np.copyto(a[:, :2], a[::-1, 1::-1], where=[[0, 1], [1, 1]])
assert_equal(a, [[3, 4, 4], [4, 3, 3]])
# 'dst' must be an array
assert_raises(TypeError, np.copyto, [1, 2, 3], [2, 3, 4])
def test_copyto_permut():
# test explicit overflow case
pad = 500
l = [True] * pad + [True, True, True, True]
r = np.zeros(len(l)-pad)
d = np.ones(len(l)-pad)
mask = np.array(l)[pad:]
np.copyto(r, d, where=mask[::-1])
# test all permutation of possible masks, 9 should be sufficient for
# current 4 byte unrolled code
power = 9
d = np.ones(power)
for i in range(2**power):
r = np.zeros(power)
l = [(i & x) != 0 for x in range(power)]
mask = np.array(l)
np.copyto(r, d, where=mask)
assert_array_equal(r == 1, l)
assert_equal(r.sum(), sum(l))
r = np.zeros(power)
np.copyto(r, d, where=mask[::-1])
assert_array_equal(r == 1, l[::-1])
assert_equal(r.sum(), sum(l))
r = np.zeros(power)
np.copyto(r[::2], d[::2], where=mask[::2])
assert_array_equal(r[::2] == 1, l[::2])
assert_equal(r[::2].sum(), sum(l[::2]))
r = np.zeros(power)
np.copyto(r[::2], d[::2], where=mask[::-2])
assert_array_equal(r[::2] == 1, l[::-2])
assert_equal(r[::2].sum(), sum(l[::-2]))
for c in [0xFF, 0x7F, 0x02, 0x10]:
r = np.zeros(power)
mask = np.array(l)
imask = np.array(l).view(np.uint8)
imask[mask != 0] = c
np.copyto(r, d, where=mask)
assert_array_equal(r == 1, l)
assert_equal(r.sum(), sum(l))
r = np.zeros(power)
np.copyto(r, d, where=True)
assert_equal(r.sum(), r.size)
r = np.ones(power)
d = np.zeros(power)
np.copyto(r, d, where=False)
assert_equal(r.sum(), r.size)
def test_copy_order():
a = np.arange(24).reshape(2, 1, 3, 4)
b = a.copy(order='F')
c = np.arange(24).reshape(2, 1, 4, 3).swapaxes(2, 3)
def check_copy_result(x, y, ccontig, fcontig, strides=False):
assert_(not (x is y))
assert_equal(x, y)
assert_equal(res.flags.c_contiguous, ccontig)
assert_equal(res.flags.f_contiguous, fcontig)
# Validate the initial state of a, b, and c
assert_(a.flags.c_contiguous)
assert_(not a.flags.f_contiguous)
assert_(not b.flags.c_contiguous)
assert_(b.flags.f_contiguous)
assert_(not c.flags.c_contiguous)
assert_(not c.flags.f_contiguous)
# Copy with order='C'
res = a.copy(order='C')
check_copy_result(res, a, ccontig=True, fcontig=False, strides=True)
res = b.copy(order='C')
check_copy_result(res, b, ccontig=True, fcontig=False, strides=False)
res = c.copy(order='C')
check_copy_result(res, c, ccontig=True, fcontig=False, strides=False)
res = np.copy(a, order='C')
check_copy_result(res, a, ccontig=True, fcontig=False, strides=True)
res = np.copy(b, order='C')
check_copy_result(res, b, ccontig=True, fcontig=False, strides=False)
res = np.copy(c, order='C')
check_copy_result(res, c, ccontig=True, fcontig=False, strides=False)
# Copy with order='F'
res = a.copy(order='F')
check_copy_result(res, a, ccontig=False, fcontig=True, strides=False)
res = b.copy(order='F')
check_copy_result(res, b, ccontig=False, fcontig=True, strides=True)
res = c.copy(order='F')
check_copy_result(res, c, ccontig=False, fcontig=True, strides=False)
res = np.copy(a, order='F')
check_copy_result(res, a, ccontig=False, fcontig=True, strides=False)
res = np.copy(b, order='F')
check_copy_result(res, b, ccontig=False, fcontig=True, strides=True)
res = np.copy(c, order='F')
check_copy_result(res, c, ccontig=False, fcontig=True, strides=False)
# Copy with order='K'
res = a.copy(order='K')
check_copy_result(res, a, ccontig=True, fcontig=False, strides=True)
res = b.copy(order='K')
check_copy_result(res, b, ccontig=False, fcontig=True, strides=True)
res = c.copy(order='K')
check_copy_result(res, c, ccontig=False, fcontig=False, strides=True)
res = np.copy(a, order='K')
check_copy_result(res, a, ccontig=True, fcontig=False, strides=True)
res = np.copy(b, order='K')
check_copy_result(res, b, ccontig=False, fcontig=True, strides=True)
res = np.copy(c, order='K')
check_copy_result(res, c, ccontig=False, fcontig=False, strides=True)
def test_contiguous_flags():
a = np.ones((4, 4, 1))[::2,:,:]
a.strides = a.strides[:2] + (-123,)
b = np.ones((2, 2, 1, 2, 2)).swapaxes(3, 4)
def check_contig(a, ccontig, fcontig):
assert_(a.flags.c_contiguous == ccontig)
assert_(a.flags.f_contiguous == fcontig)
# Check if new arrays are correct:
check_contig(a, False, False)
check_contig(b, False, False)
check_contig(np.empty((2, 2, 0, 2, 2)), True, True)
check_contig(np.array([[[1], [2]]], order='F'), True, True)
check_contig(np.empty((2, 2)), True, False)
check_contig(np.empty((2, 2), order='F'), False, True)
# Check that np.array creates correct contiguous flags:
check_contig(np.array(a, copy=False), False, False)
check_contig(np.array(a, copy=False, order='C'), True, False)
check_contig(np.array(a, ndmin=4, copy=False, order='F'), False, True)
# Check slicing update of flags and :
check_contig(a[0], True, True)
check_contig(a[None, ::4, ..., None], True, True)
check_contig(b[0, 0, ...], False, True)
check_contig(b[:, :, 0:0, :, :], True, True)
# Test ravel and squeeze.
check_contig(a.ravel(), True, True)
check_contig(np.ones((1, 3, 1)).squeeze(), True, True)
def test_broadcast_arrays():
# Test user defined dtypes
a = np.array([(1, 2, 3)], dtype='u4,u4,u4')
b = np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], dtype='u4,u4,u4')
result = np.broadcast_arrays(a, b)
assert_equal(result[0], np.array([(1, 2, 3), (1, 2, 3), (1, 2, 3)], dtype='u4,u4,u4'))
assert_equal(result[1], np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], dtype='u4,u4,u4'))
@pytest.mark.parametrize(["shape", "fill_value", "expected_output"],
[((2, 2), [5.0, 6.0], np.array([[5.0, 6.0], [5.0, 6.0]])),
((3, 2), [1.0, 2.0], np.array([[1.0, 2.0], [1.0, 2.0], [1.0, 2.0]]))])
def test_full_from_list(shape, fill_value, expected_output):
output = np.full(shape, fill_value)
assert_equal(output, expected_output)
def test_astype_copyflag():
# test the various copyflag options
arr = np.arange(10, dtype=np.intp)
res_true = arr.astype(np.intp, copy=True)
assert not np.may_share_memory(arr, res_true)
res_always = arr.astype(np.intp, copy=np._CopyMode.ALWAYS)
assert not np.may_share_memory(arr, res_always)
res_false = arr.astype(np.intp, copy=False)
# `res_false is arr` currently, but check `may_share_memory`.
assert np.may_share_memory(arr, res_false)
res_if_needed = arr.astype(np.intp, copy=np._CopyMode.IF_NEEDED)
# `res_if_needed is arr` currently, but check `may_share_memory`.
assert np.may_share_memory(arr, res_if_needed)
res_never = arr.astype(np.intp, copy=np._CopyMode.NEVER)
assert np.may_share_memory(arr, res_never)
# Simple tests for when a copy is necessary:
res_false = arr.astype(np.float64, copy=False)
assert_array_equal(res_false, arr)
res_if_needed = arr.astype(np.float64,
copy=np._CopyMode.IF_NEEDED)
assert_array_equal(res_if_needed, arr)
assert_raises(ValueError, arr.astype, np.float64,
copy=np._CopyMode.NEVER)
| 22,474 | Python | 36.026359 | 90 | 0.597312 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_memmap.py | import sys
import os
import mmap
import pytest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryFile
from numpy import (
memmap, sum, average, product, ndarray, isscalar, add, subtract, multiply)
from numpy import arange, allclose, asarray
from numpy.testing import (
assert_, assert_equal, assert_array_equal, suppress_warnings, IS_PYPY,
break_cycles
)
class TestMemmap:
def setup_method(self):
self.tmpfp = NamedTemporaryFile(prefix='mmap')
self.shape = (3, 4)
self.dtype = 'float32'
self.data = arange(12, dtype=self.dtype)
self.data.resize(self.shape)
def teardown_method(self):
self.tmpfp.close()
self.data = None
if IS_PYPY:
break_cycles()
break_cycles()
def test_roundtrip(self):
# Write data to file
fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+',
shape=self.shape)
fp[:] = self.data[:]
del fp # Test __del__ machinery, which handles cleanup
# Read data back from file
newfp = memmap(self.tmpfp, dtype=self.dtype, mode='r',
shape=self.shape)
assert_(allclose(self.data, newfp))
assert_array_equal(self.data, newfp)
assert_equal(newfp.flags.writeable, False)
def test_open_with_filename(self, tmp_path):
tmpname = tmp_path / 'mmap'
fp = memmap(tmpname, dtype=self.dtype, mode='w+',
shape=self.shape)
fp[:] = self.data[:]
del fp
def test_unnamed_file(self):
with TemporaryFile() as f:
fp = memmap(f, dtype=self.dtype, shape=self.shape)
del fp
def test_attributes(self):
offset = 1
mode = "w+"
fp = memmap(self.tmpfp, dtype=self.dtype, mode=mode,
shape=self.shape, offset=offset)
assert_equal(offset, fp.offset)
assert_equal(mode, fp.mode)
del fp
def test_filename(self, tmp_path):
tmpname = tmp_path / "mmap"
fp = memmap(tmpname, dtype=self.dtype, mode='w+',
shape=self.shape)
abspath = Path(os.path.abspath(tmpname))
fp[:] = self.data[:]
assert_equal(abspath, fp.filename)
b = fp[:1]
assert_equal(abspath, b.filename)
del b
del fp
def test_path(self, tmp_path):
tmpname = tmp_path / "mmap"
fp = memmap(Path(tmpname), dtype=self.dtype, mode='w+',
shape=self.shape)
# os.path.realpath does not resolve symlinks on Windows
# see: https://bugs.python.org/issue9949
# use Path.resolve, just as memmap class does internally
abspath = str(Path(tmpname).resolve())
fp[:] = self.data[:]
assert_equal(abspath, str(fp.filename.resolve()))
b = fp[:1]
assert_equal(abspath, str(b.filename.resolve()))
del b
del fp
def test_filename_fileobj(self):
fp = memmap(self.tmpfp, dtype=self.dtype, mode="w+",
shape=self.shape)
assert_equal(fp.filename, self.tmpfp.name)
@pytest.mark.skipif(sys.platform == 'gnu0',
reason="Known to fail on hurd")
def test_flush(self):
fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+',
shape=self.shape)
fp[:] = self.data[:]
assert_equal(fp[0], self.data[0])
fp.flush()
def test_del(self):
# Make sure a view does not delete the underlying mmap
fp_base = memmap(self.tmpfp, dtype=self.dtype, mode='w+',
shape=self.shape)
fp_base[0] = 5
fp_view = fp_base[0:1]
assert_equal(fp_view[0], 5)
del fp_view
# Should still be able to access and assign values after
# deleting the view
assert_equal(fp_base[0], 5)
fp_base[0] = 6
assert_equal(fp_base[0], 6)
def test_arithmetic_drops_references(self):
fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+',
shape=self.shape)
tmp = (fp + 10)
if isinstance(tmp, memmap):
assert_(tmp._mmap is not fp._mmap)
def test_indexing_drops_references(self):
fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+',
shape=self.shape)
tmp = fp[(1, 2), (2, 3)]
if isinstance(tmp, memmap):
assert_(tmp._mmap is not fp._mmap)
def test_slicing_keeps_references(self):
fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+',
shape=self.shape)
assert_(fp[:2, :2]._mmap is fp._mmap)
def test_view(self):
fp = memmap(self.tmpfp, dtype=self.dtype, shape=self.shape)
new1 = fp.view()
new2 = new1.view()
assert_(new1.base is fp)
assert_(new2.base is fp)
new_array = asarray(fp)
assert_(new_array.base is fp)
def test_ufunc_return_ndarray(self):
fp = memmap(self.tmpfp, dtype=self.dtype, shape=self.shape)
fp[:] = self.data
with suppress_warnings() as sup:
sup.filter(FutureWarning, "np.average currently does not preserve")
for unary_op in [sum, average, product]:
result = unary_op(fp)
assert_(isscalar(result))
assert_(result.__class__ is self.data[0, 0].__class__)
assert_(unary_op(fp, axis=0).__class__ is ndarray)
assert_(unary_op(fp, axis=1).__class__ is ndarray)
for binary_op in [add, subtract, multiply]:
assert_(binary_op(fp, self.data).__class__ is ndarray)
assert_(binary_op(self.data, fp).__class__ is ndarray)
assert_(binary_op(fp, fp).__class__ is ndarray)
fp += 1
assert(fp.__class__ is memmap)
add(fp, 1, out=fp)
assert(fp.__class__ is memmap)
def test_getitem(self):
fp = memmap(self.tmpfp, dtype=self.dtype, shape=self.shape)
fp[:] = self.data
assert_(fp[1:, :-1].__class__ is memmap)
# Fancy indexing returns a copy that is not memmapped
assert_(fp[[0, 1]].__class__ is ndarray)
def test_memmap_subclass(self):
class MemmapSubClass(memmap):
pass
fp = MemmapSubClass(self.tmpfp, dtype=self.dtype, shape=self.shape)
fp[:] = self.data
# We keep previous behavior for subclasses of memmap, i.e. the
# ufunc and __getitem__ output is never turned into a ndarray
assert_(sum(fp, axis=0).__class__ is MemmapSubClass)
assert_(sum(fp).__class__ is MemmapSubClass)
assert_(fp[1:, :-1].__class__ is MemmapSubClass)
assert(fp[[0, 1]].__class__ is MemmapSubClass)
def test_mmap_offset_greater_than_allocation_granularity(self):
size = 5 * mmap.ALLOCATIONGRANULARITY
offset = mmap.ALLOCATIONGRANULARITY + 1
fp = memmap(self.tmpfp, shape=size, mode='w+', offset=offset)
assert_(fp.offset == offset)
def test_no_shape(self):
self.tmpfp.write(b'a'*16)
mm = memmap(self.tmpfp, dtype='float64')
assert_equal(mm.shape, (2,))
def test_empty_array(self):
# gh-12653
with pytest.raises(ValueError, match='empty file'):
memmap(self.tmpfp, shape=(0,4), mode='w+')
self.tmpfp.write(b'\0')
# ok now the file is not empty
memmap(self.tmpfp, shape=(0,4), mode='w+')
| 7,483 | Python | 33.648148 | 79 | 0.567419 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_regression.py | import copy
import sys
import gc
import tempfile
import pytest
from os import path
from io import BytesIO
from itertools import chain
import numpy as np
from numpy.testing import (
assert_, assert_equal, IS_PYPY, assert_almost_equal,
assert_array_equal, assert_array_almost_equal, assert_raises,
assert_raises_regex, assert_warns, suppress_warnings,
_assert_valid_refcount, HAS_REFCOUNT, IS_PYSTON
)
from numpy.testing._private.utils import _no_tracing, requires_memory
from numpy.compat import asbytes, asunicode, pickle
class TestRegression:
def test_invalid_round(self):
# Ticket #3
v = 4.7599999999999998
assert_array_equal(np.array([v]), np.array(v))
def test_mem_empty(self):
# Ticket #7
np.empty((1,), dtype=[('x', np.int64)])
def test_pickle_transposed(self):
# Ticket #16
a = np.transpose(np.array([[2, 9], [7, 0], [3, 8]]))
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
with BytesIO() as f:
pickle.dump(a, f, protocol=proto)
f.seek(0)
b = pickle.load(f)
assert_array_equal(a, b)
def test_dtype_names(self):
# Ticket #35
# Should succeed
np.dtype([(('name', 'label'), np.int32, 3)])
def test_reduce(self):
# Ticket #40
assert_almost_equal(np.add.reduce([1., .5], dtype=None), 1.5)
def test_zeros_order(self):
# Ticket #43
np.zeros([3], int, 'C')
np.zeros([3], order='C')
np.zeros([3], int, order='C')
def test_asarray_with_order(self):
# Check that nothing is done when order='F' and array C/F-contiguous
a = np.ones(2)
assert_(a is np.asarray(a, order='F'))
def test_ravel_with_order(self):
# Check that ravel works when order='F' and array C/F-contiguous
a = np.ones(2)
assert_(not a.ravel('F').flags.owndata)
def test_sort_bigendian(self):
# Ticket #47
a = np.linspace(0, 10, 11)
c = a.astype(np.dtype('<f8'))
c.sort()
assert_array_almost_equal(c, a)
def test_negative_nd_indexing(self):
# Ticket #49
c = np.arange(125).reshape((5, 5, 5))
origidx = np.array([-1, 0, 1])
idx = np.array(origidx)
c[idx]
assert_array_equal(idx, origidx)
def test_char_dump(self):
# Ticket #50
ca = np.char.array(np.arange(1000, 1010), itemsize=4)
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
with BytesIO() as f:
pickle.dump(ca, f, protocol=proto)
f.seek(0)
ca = np.load(f, allow_pickle=True)
def test_noncontiguous_fill(self):
# Ticket #58.
a = np.zeros((5, 3))
b = a[:, :2,]
def rs():
b.shape = (10,)
assert_raises(AttributeError, rs)
def test_bool(self):
# Ticket #60
np.bool_(1) # Should succeed
def test_indexing1(self):
# Ticket #64
descr = [('x', [('y', [('z', 'c16', (2,)),]),]),]
buffer = ((([6j, 4j],),),)
h = np.array(buffer, dtype=descr)
h['x']['y']['z']
def test_indexing2(self):
# Ticket #65
descr = [('x', 'i4', (2,))]
buffer = ([3, 2],)
h = np.array(buffer, dtype=descr)
h['x']
def test_round(self):
# Ticket #67
x = np.array([1+2j])
assert_almost_equal(x**(-1), [1/(1+2j)])
def test_scalar_compare(self):
# Trac Ticket #72
# https://github.com/numpy/numpy/issues/565
a = np.array(['test', 'auto'])
assert_array_equal(a == 'auto', np.array([False, True]))
assert_(a[1] == 'auto')
assert_(a[0] != 'auto')
b = np.linspace(0, 10, 11)
# This should return true for now, but will eventually raise an error:
with suppress_warnings() as sup:
sup.filter(FutureWarning)
assert_(b != 'auto')
assert_(b[0] != 'auto')
def test_unicode_swapping(self):
# Ticket #79
ulen = 1
ucs_value = u'\U0010FFFF'
ua = np.array([[[ucs_value*ulen]*2]*3]*4, dtype='U%s' % ulen)
ua.newbyteorder() # Should succeed.
def test_object_array_fill(self):
# Ticket #86
x = np.zeros(1, 'O')
x.fill([])
def test_mem_dtype_align(self):
# Ticket #93
assert_raises(TypeError, np.dtype,
{'names':['a'], 'formats':['foo']}, align=1)
def test_endian_bool_indexing(self):
# Ticket #105
a = np.arange(10., dtype='>f8')
b = np.arange(10., dtype='<f8')
xa = np.where((a > 2) & (a < 6))
xb = np.where((b > 2) & (b < 6))
ya = ((a > 2) & (a < 6))
yb = ((b > 2) & (b < 6))
assert_array_almost_equal(xa, ya.nonzero())
assert_array_almost_equal(xb, yb.nonzero())
assert_(np.all(a[ya] > 0.5))
assert_(np.all(b[yb] > 0.5))
def test_endian_where(self):
# GitHub issue #369
net = np.zeros(3, dtype='>f4')
net[1] = 0.00458849
net[2] = 0.605202
max_net = net.max()
test = np.where(net <= 0., max_net, net)
correct = np.array([ 0.60520202, 0.00458849, 0.60520202])
assert_array_almost_equal(test, correct)
def test_endian_recarray(self):
# Ticket #2185
dt = np.dtype([
('head', '>u4'),
('data', '>u4', 2),
])
buf = np.recarray(1, dtype=dt)
buf[0]['head'] = 1
buf[0]['data'][:] = [1, 1]
h = buf[0]['head']
d = buf[0]['data'][0]
buf[0]['head'] = h
buf[0]['data'][0] = d
assert_(buf[0]['head'] == 1)
def test_mem_dot(self):
# Ticket #106
x = np.random.randn(0, 1)
y = np.random.randn(10, 1)
# Dummy array to detect bad memory access:
_z = np.ones(10)
_dummy = np.empty((0, 10))
z = np.lib.stride_tricks.as_strided(_z, _dummy.shape, _dummy.strides)
np.dot(x, np.transpose(y), out=z)
assert_equal(_z, np.ones(10))
# Do the same for the built-in dot:
np.core.multiarray.dot(x, np.transpose(y), out=z)
assert_equal(_z, np.ones(10))
def test_arange_endian(self):
# Ticket #111
ref = np.arange(10)
x = np.arange(10, dtype='<f8')
assert_array_equal(ref, x)
x = np.arange(10, dtype='>f8')
assert_array_equal(ref, x)
def test_arange_inf_step(self):
ref = np.arange(0, 1, 10)
x = np.arange(0, 1, np.inf)
assert_array_equal(ref, x)
ref = np.arange(0, 1, -10)
x = np.arange(0, 1, -np.inf)
assert_array_equal(ref, x)
ref = np.arange(0, -1, -10)
x = np.arange(0, -1, -np.inf)
assert_array_equal(ref, x)
ref = np.arange(0, -1, 10)
x = np.arange(0, -1, np.inf)
assert_array_equal(ref, x)
def test_arange_underflow_stop_and_step(self):
finfo = np.finfo(np.float64)
ref = np.arange(0, finfo.eps, 2 * finfo.eps)
x = np.arange(0, finfo.eps, finfo.max)
assert_array_equal(ref, x)
ref = np.arange(0, finfo.eps, -2 * finfo.eps)
x = np.arange(0, finfo.eps, -finfo.max)
assert_array_equal(ref, x)
ref = np.arange(0, -finfo.eps, -2 * finfo.eps)
x = np.arange(0, -finfo.eps, -finfo.max)
assert_array_equal(ref, x)
ref = np.arange(0, -finfo.eps, 2 * finfo.eps)
x = np.arange(0, -finfo.eps, finfo.max)
assert_array_equal(ref, x)
def test_argmax(self):
# Ticket #119
a = np.random.normal(0, 1, (4, 5, 6, 7, 8))
for i in range(a.ndim):
a.argmax(i) # Should succeed
def test_mem_divmod(self):
# Ticket #126
for i in range(10):
divmod(np.array([i])[0], 10)
def test_hstack_invalid_dims(self):
# Ticket #128
x = np.arange(9).reshape((3, 3))
y = np.array([0, 0, 0])
assert_raises(ValueError, np.hstack, (x, y))
def test_squeeze_type(self):
# Ticket #133
a = np.array([3])
b = np.array(3)
assert_(type(a.squeeze()) is np.ndarray)
assert_(type(b.squeeze()) is np.ndarray)
def test_add_identity(self):
# Ticket #143
assert_equal(0, np.add.identity)
def test_numpy_float_python_long_addition(self):
# Check that numpy float and python longs can be added correctly.
a = np.float_(23.) + 2**135
assert_equal(a, 23. + 2**135)
def test_binary_repr_0(self):
# Ticket #151
assert_equal('0', np.binary_repr(0))
def test_rec_iterate(self):
# Ticket #160
descr = np.dtype([('i', int), ('f', float), ('s', '|S3')])
x = np.rec.array([(1, 1.1, '1.0'),
(2, 2.2, '2.0')], dtype=descr)
x[0].tolist()
[i for i in x[0]]
def test_unicode_string_comparison(self):
# Ticket #190
a = np.array('hello', np.unicode_)
b = np.array('world')
a == b
def test_tobytes_FORTRANORDER_discontiguous(self):
# Fix in r2836
# Create non-contiguous Fortran ordered array
x = np.array(np.random.rand(3, 3), order='F')[:, :2]
assert_array_almost_equal(x.ravel(), np.frombuffer(x.tobytes()))
def test_flat_assignment(self):
# Correct behaviour of ticket #194
x = np.empty((3, 1))
x.flat = np.arange(3)
assert_array_almost_equal(x, [[0], [1], [2]])
x.flat = np.arange(3, dtype=float)
assert_array_almost_equal(x, [[0], [1], [2]])
def test_broadcast_flat_assignment(self):
# Ticket #194
x = np.empty((3, 1))
def bfa():
x[:] = np.arange(3)
def bfb():
x[:] = np.arange(3, dtype=float)
assert_raises(ValueError, bfa)
assert_raises(ValueError, bfb)
def test_nonarray_assignment(self):
# See also Issue gh-2870, test for non-array assignment
# and equivalent unsafe casted array assignment
a = np.arange(10)
b = np.ones(10, dtype=bool)
r = np.arange(10)
def assign(a, b, c):
a[b] = c
assert_raises(ValueError, assign, a, b, np.nan)
a[b] = np.array(np.nan) # but not this.
assert_raises(ValueError, assign, a, r, np.nan)
a[r] = np.array(np.nan)
def test_unpickle_dtype_with_object(self):
# Implemented in r2840
dt = np.dtype([('x', int), ('y', np.object_), ('z', 'O')])
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
with BytesIO() as f:
pickle.dump(dt, f, protocol=proto)
f.seek(0)
dt_ = pickle.load(f)
assert_equal(dt, dt_)
def test_mem_array_creation_invalid_specification(self):
# Ticket #196
dt = np.dtype([('x', int), ('y', np.object_)])
# Wrong way
assert_raises(ValueError, np.array, [1, 'object'], dt)
# Correct way
np.array([(1, 'object')], dt)
def test_recarray_single_element(self):
# Ticket #202
a = np.array([1, 2, 3], dtype=np.int32)
b = a.copy()
r = np.rec.array(a, shape=1, formats=['3i4'], names=['d'])
assert_array_equal(a, b)
assert_equal(a, r[0][0])
def test_zero_sized_array_indexing(self):
# Ticket #205
tmp = np.array([])
def index_tmp():
tmp[np.array(10)]
assert_raises(IndexError, index_tmp)
def test_chararray_rstrip(self):
# Ticket #222
x = np.chararray((1,), 5)
x[0] = b'a '
x = x.rstrip()
assert_equal(x[0], b'a')
def test_object_array_shape(self):
# Ticket #239
assert_equal(np.array([[1, 2], 3, 4], dtype=object).shape, (3,))
assert_equal(np.array([[1, 2], [3, 4]], dtype=object).shape, (2, 2))
assert_equal(np.array([(1, 2), (3, 4)], dtype=object).shape, (2, 2))
assert_equal(np.array([], dtype=object).shape, (0,))
assert_equal(np.array([[], [], []], dtype=object).shape, (3, 0))
assert_equal(np.array([[3, 4], [5, 6], None], dtype=object).shape, (3,))
def test_mem_around(self):
# Ticket #243
x = np.zeros((1,))
y = [0]
decimal = 6
np.around(abs(x-y), decimal) <= 10.0**(-decimal)
def test_character_array_strip(self):
# Ticket #246
x = np.char.array(("x", "x ", "x "))
for c in x:
assert_equal(c, "x")
def test_lexsort(self):
# Lexsort memory error
v = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
assert_equal(np.lexsort(v), 0)
def test_lexsort_invalid_sequence(self):
# Issue gh-4123
class BuggySequence:
def __len__(self):
return 4
def __getitem__(self, key):
raise KeyError
assert_raises(KeyError, np.lexsort, BuggySequence())
def test_lexsort_zerolen_custom_strides(self):
# Ticket #14228
xs = np.array([], dtype='i8')
assert np.lexsort((xs,)).shape[0] == 0 # Works
xs.strides = (16,)
assert np.lexsort((xs,)).shape[0] == 0 # Was: MemoryError
def test_lexsort_zerolen_custom_strides_2d(self):
xs = np.array([], dtype='i8')
xs.shape = (0, 2)
xs.strides = (16, 16)
assert np.lexsort((xs,), axis=0).shape[0] == 0
xs.shape = (2, 0)
xs.strides = (16, 16)
assert np.lexsort((xs,), axis=0).shape[0] == 2
def test_lexsort_invalid_axis(self):
assert_raises(np.AxisError, np.lexsort, (np.arange(1),), axis=2)
assert_raises(np.AxisError, np.lexsort, (np.array([]),), axis=1)
assert_raises(np.AxisError, np.lexsort, (np.array(1),), axis=10)
def test_lexsort_zerolen_element(self):
dt = np.dtype([]) # a void dtype with no fields
xs = np.empty(4, dt)
assert np.lexsort((xs,)).shape[0] == xs.shape[0]
def test_pickle_py2_bytes_encoding(self):
# Check that arrays and scalars pickled on Py2 are
# unpickleable on Py3 using encoding='bytes'
test_data = [
# (original, py2_pickle)
(np.unicode_('\u6f2c'),
b"cnumpy.core.multiarray\nscalar\np0\n(cnumpy\ndtype\np1\n"
b"(S'U1'\np2\nI0\nI1\ntp3\nRp4\n(I3\nS'<'\np5\nNNNI4\nI4\n"
b"I0\ntp6\nbS',o\\x00\\x00'\np7\ntp8\nRp9\n."),
(np.array([9e123], dtype=np.float64),
b"cnumpy.core.multiarray\n_reconstruct\np0\n(cnumpy\nndarray\n"
b"p1\n(I0\ntp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\ncnumpy\ndtype\n"
b"p7\n(S'f8'\np8\nI0\nI1\ntp9\nRp10\n(I3\nS'<'\np11\nNNNI-1\nI-1\n"
b"I0\ntp12\nbI00\nS'O\\x81\\xb7Z\\xaa:\\xabY'\np13\ntp14\nb."),
(np.array([(9e123,)], dtype=[('name', float)]),
b"cnumpy.core.multiarray\n_reconstruct\np0\n(cnumpy\nndarray\np1\n"
b"(I0\ntp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\ncnumpy\ndtype\np7\n"
b"(S'V8'\np8\nI0\nI1\ntp9\nRp10\n(I3\nS'|'\np11\nN(S'name'\np12\ntp13\n"
b"(dp14\ng12\n(g7\n(S'f8'\np15\nI0\nI1\ntp16\nRp17\n(I3\nS'<'\np18\nNNNI-1\n"
b"I-1\nI0\ntp19\nbI0\ntp20\nsI8\nI1\nI0\ntp21\n"
b"bI00\nS'O\\x81\\xb7Z\\xaa:\\xabY'\np22\ntp23\nb."),
]
for original, data in test_data:
result = pickle.loads(data, encoding='bytes')
assert_equal(result, original)
if isinstance(result, np.ndarray) and result.dtype.names is not None:
for name in result.dtype.names:
assert_(isinstance(name, str))
def test_pickle_dtype(self):
# Ticket #251
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
pickle.dumps(float, protocol=proto)
def test_swap_real(self):
# Ticket #265
assert_equal(np.arange(4, dtype='>c8').imag.max(), 0.0)
assert_equal(np.arange(4, dtype='<c8').imag.max(), 0.0)
assert_equal(np.arange(4, dtype='>c8').real.max(), 3.0)
assert_equal(np.arange(4, dtype='<c8').real.max(), 3.0)
def test_object_array_from_list(self):
# Ticket #270 (gh-868)
assert_(np.array([1, None, 'A']).shape == (3,))
def test_multiple_assign(self):
# Ticket #273
a = np.zeros((3, 1), int)
a[[1, 2]] = 1
def test_empty_array_type(self):
assert_equal(np.array([]).dtype, np.zeros(0).dtype)
def test_void_copyswap(self):
dt = np.dtype([('one', '<i4'), ('two', '<i4')])
x = np.array((1, 2), dtype=dt)
x = x.byteswap()
assert_(x['one'] > 1 and x['two'] > 2)
def test_method_args(self):
# Make sure methods and functions have same default axis
# keyword and arguments
funcs1 = ['argmax', 'argmin', 'sum', ('product', 'prod'),
('sometrue', 'any'),
('alltrue', 'all'), 'cumsum', ('cumproduct', 'cumprod'),
'ptp', 'cumprod', 'prod', 'std', 'var', 'mean',
'round', 'min', 'max', 'argsort', 'sort']
funcs2 = ['compress', 'take', 'repeat']
for func in funcs1:
arr = np.random.rand(8, 7)
arr2 = arr.copy()
if isinstance(func, tuple):
func_meth = func[1]
func = func[0]
else:
func_meth = func
res1 = getattr(arr, func_meth)()
res2 = getattr(np, func)(arr2)
if res1 is None:
res1 = arr
if res1.dtype.kind in 'uib':
assert_((res1 == res2).all(), func)
else:
assert_(abs(res1-res2).max() < 1e-8, func)
for func in funcs2:
arr1 = np.random.rand(8, 7)
arr2 = np.random.rand(8, 7)
res1 = None
if func == 'compress':
arr1 = arr1.ravel()
res1 = getattr(arr2, func)(arr1)
else:
arr2 = (15*arr2).astype(int).ravel()
if res1 is None:
res1 = getattr(arr1, func)(arr2)
res2 = getattr(np, func)(arr1, arr2)
assert_(abs(res1-res2).max() < 1e-8, func)
def test_mem_lexsort_strings(self):
# Ticket #298
lst = ['abc', 'cde', 'fgh']
np.lexsort((lst,))
def test_fancy_index(self):
# Ticket #302
x = np.array([1, 2])[np.array([0])]
assert_equal(x.shape, (1,))
def test_recarray_copy(self):
# Ticket #312
dt = [('x', np.int16), ('y', np.float64)]
ra = np.array([(1, 2.3)], dtype=dt)
rb = np.rec.array(ra, dtype=dt)
rb['x'] = 2.
assert_(ra['x'] != rb['x'])
def test_rec_fromarray(self):
# Ticket #322
x1 = np.array([[1, 2], [3, 4], [5, 6]])
x2 = np.array(['a', 'dd', 'xyz'])
x3 = np.array([1.1, 2, 3])
np.rec.fromarrays([x1, x2, x3], formats="(2,)i4,a3,f8")
def test_object_array_assign(self):
x = np.empty((2, 2), object)
x.flat[2] = (1, 2, 3)
assert_equal(x.flat[2], (1, 2, 3))
def test_ndmin_float64(self):
# Ticket #324
x = np.array([1, 2, 3], dtype=np.float64)
assert_equal(np.array(x, dtype=np.float32, ndmin=2).ndim, 2)
assert_equal(np.array(x, dtype=np.float64, ndmin=2).ndim, 2)
def test_ndmin_order(self):
# Issue #465 and related checks
assert_(np.array([1, 2], order='C', ndmin=3).flags.c_contiguous)
assert_(np.array([1, 2], order='F', ndmin=3).flags.f_contiguous)
assert_(np.array(np.ones((2, 2), order='F'), ndmin=3).flags.f_contiguous)
assert_(np.array(np.ones((2, 2), order='C'), ndmin=3).flags.c_contiguous)
def test_mem_axis_minimization(self):
# Ticket #327
data = np.arange(5)
data = np.add.outer(data, data)
def test_mem_float_imag(self):
# Ticket #330
np.float64(1.0).imag
def test_dtype_tuple(self):
# Ticket #334
assert_(np.dtype('i4') == np.dtype(('i4', ())))
def test_dtype_posttuple(self):
# Ticket #335
np.dtype([('col1', '()i4')])
def test_numeric_carray_compare(self):
# Ticket #341
assert_equal(np.array(['X'], 'c'), b'X')
def test_string_array_size(self):
# Ticket #342
assert_raises(ValueError,
np.array, [['X'], ['X', 'X', 'X']], '|S1')
def test_dtype_repr(self):
# Ticket #344
dt1 = np.dtype(('uint32', 2))
dt2 = np.dtype(('uint32', (2,)))
assert_equal(dt1.__repr__(), dt2.__repr__())
def test_reshape_order(self):
# Make sure reshape order works.
a = np.arange(6).reshape(2, 3, order='F')
assert_equal(a, [[0, 2, 4], [1, 3, 5]])
a = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
b = a[:, 1]
assert_equal(b.reshape(2, 2, order='F'), [[2, 6], [4, 8]])
def test_reshape_zero_strides(self):
# Issue #380, test reshaping of zero strided arrays
a = np.ones(1)
a = np.lib.stride_tricks.as_strided(a, shape=(5,), strides=(0,))
assert_(a.reshape(5, 1).strides[0] == 0)
def test_reshape_zero_size(self):
# GitHub Issue #2700, setting shape failed for 0-sized arrays
a = np.ones((0, 2))
a.shape = (-1, 2)
# Cannot test if NPY_RELAXED_STRIDES_DEBUG changes the strides.
# With NPY_RELAXED_STRIDES_DEBUG the test becomes superfluous.
@pytest.mark.skipif(np.ones(1).strides[0] == np.iinfo(np.intp).max,
reason="Using relaxed stride debug")
def test_reshape_trailing_ones_strides(self):
# GitHub issue gh-2949, bad strides for trailing ones of new shape
a = np.zeros(12, dtype=np.int32)[::2] # not contiguous
strides_c = (16, 8, 8, 8)
strides_f = (8, 24, 48, 48)
assert_equal(a.reshape(3, 2, 1, 1).strides, strides_c)
assert_equal(a.reshape(3, 2, 1, 1, order='F').strides, strides_f)
assert_equal(np.array(0, dtype=np.int32).reshape(1, 1).strides, (4, 4))
def test_repeat_discont(self):
# Ticket #352
a = np.arange(12).reshape(4, 3)[:, 2]
assert_equal(a.repeat(3), [2, 2, 2, 5, 5, 5, 8, 8, 8, 11, 11, 11])
def test_array_index(self):
# Make sure optimization is not called in this case.
a = np.array([1, 2, 3])
a2 = np.array([[1, 2, 3]])
assert_equal(a[np.where(a == 3)], a2[np.where(a2 == 3)])
def test_object_argmax(self):
a = np.array([1, 2, 3], dtype=object)
assert_(a.argmax() == 2)
def test_recarray_fields(self):
# Ticket #372
dt0 = np.dtype([('f0', 'i4'), ('f1', 'i4')])
dt1 = np.dtype([('f0', 'i8'), ('f1', 'i8')])
for a in [np.array([(1, 2), (3, 4)], "i4,i4"),
np.rec.array([(1, 2), (3, 4)], "i4,i4"),
np.rec.array([(1, 2), (3, 4)]),
np.rec.fromarrays([(1, 2), (3, 4)], "i4,i4"),
np.rec.fromarrays([(1, 2), (3, 4)])]:
assert_(a.dtype in [dt0, dt1])
def test_random_shuffle(self):
# Ticket #374
a = np.arange(5).reshape((5, 1))
b = a.copy()
np.random.shuffle(b)
assert_equal(np.sort(b, axis=0), a)
def test_refcount_vdot(self):
# Changeset #3443
_assert_valid_refcount(np.vdot)
def test_startswith(self):
ca = np.char.array(['Hi', 'There'])
assert_equal(ca.startswith('H'), [True, False])
def test_noncommutative_reduce_accumulate(self):
# Ticket #413
tosubtract = np.arange(5)
todivide = np.array([2.0, 0.5, 0.25])
assert_equal(np.subtract.reduce(tosubtract), -10)
assert_equal(np.divide.reduce(todivide), 16.0)
assert_array_equal(np.subtract.accumulate(tosubtract),
np.array([0, -1, -3, -6, -10]))
assert_array_equal(np.divide.accumulate(todivide),
np.array([2., 4., 16.]))
def test_convolve_empty(self):
# Convolve should raise an error for empty input array.
assert_raises(ValueError, np.convolve, [], [1])
assert_raises(ValueError, np.convolve, [1], [])
def test_multidim_byteswap(self):
# Ticket #449
r = np.array([(1, (0, 1, 2))], dtype="i2,3i2")
assert_array_equal(r.byteswap(),
np.array([(256, (0, 256, 512))], r.dtype))
def test_string_NULL(self):
# Changeset 3557
assert_equal(np.array("a\x00\x0b\x0c\x00").item(),
'a\x00\x0b\x0c')
def test_junk_in_string_fields_of_recarray(self):
# Ticket #483
r = np.array([[b'abc']], dtype=[('var1', '|S20')])
assert_(asbytes(r['var1'][0][0]) == b'abc')
def test_take_output(self):
# Ensure that 'take' honours output parameter.
x = np.arange(12).reshape((3, 4))
a = np.take(x, [0, 2], axis=1)
b = np.zeros_like(a)
np.take(x, [0, 2], axis=1, out=b)
assert_array_equal(a, b)
def test_take_object_fail(self):
# Issue gh-3001
d = 123.
a = np.array([d, 1], dtype=object)
if HAS_REFCOUNT:
ref_d = sys.getrefcount(d)
try:
a.take([0, 100])
except IndexError:
pass
if HAS_REFCOUNT:
assert_(ref_d == sys.getrefcount(d))
def test_array_str_64bit(self):
# Ticket #501
s = np.array([1, np.nan], dtype=np.float64)
with np.errstate(all='raise'):
np.array_str(s) # Should succeed
def test_frompyfunc_endian(self):
# Ticket #503
from math import radians
uradians = np.frompyfunc(radians, 1, 1)
big_endian = np.array([83.4, 83.5], dtype='>f8')
little_endian = np.array([83.4, 83.5], dtype='<f8')
assert_almost_equal(uradians(big_endian).astype(float),
uradians(little_endian).astype(float))
def test_mem_string_arr(self):
# Ticket #514
s = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
t = []
np.hstack((t, s))
def test_arr_transpose(self):
# Ticket #516
x = np.random.rand(*(2,)*16)
x.transpose(list(range(16))) # Should succeed
def test_string_mergesort(self):
# Ticket #540
x = np.array(['a']*32)
assert_array_equal(x.argsort(kind='m'), np.arange(32))
def test_argmax_byteorder(self):
# Ticket #546
a = np.arange(3, dtype='>f')
assert_(a[a.argmax()] == a.max())
def test_rand_seed(self):
# Ticket #555
for l in np.arange(4):
np.random.seed(l)
def test_mem_deallocation_leak(self):
# Ticket #562
a = np.zeros(5, dtype=float)
b = np.array(a, dtype=float)
del a, b
def test_mem_on_invalid_dtype(self):
"Ticket #583"
assert_raises(ValueError, np.fromiter, [['12', ''], ['13', '']], str)
def test_dot_negative_stride(self):
# Ticket #588
x = np.array([[1, 5, 25, 125., 625]])
y = np.array([[20.], [160.], [640.], [1280.], [1024.]])
z = y[::-1].copy()
y2 = y[::-1]
assert_equal(np.dot(x, z), np.dot(x, y2))
def test_object_casting(self):
# This used to trigger the object-type version of
# the bitwise_or operation, because float64 -> object
# casting succeeds
def rs():
x = np.ones([484, 286])
y = np.zeros([484, 286])
x |= y
assert_raises(TypeError, rs)
def test_unicode_scalar(self):
# Ticket #600
x = np.array(["DROND", "DROND1"], dtype="U6")
el = x[1]
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
new = pickle.loads(pickle.dumps(el, protocol=proto))
assert_equal(new, el)
def test_arange_non_native_dtype(self):
# Ticket #616
for T in ('>f4', '<f4'):
dt = np.dtype(T)
assert_equal(np.arange(0, dtype=dt).dtype, dt)
assert_equal(np.arange(0.5, dtype=dt).dtype, dt)
assert_equal(np.arange(5, dtype=dt).dtype, dt)
def test_bool_flat_indexing_invalid_nr_elements(self):
s = np.ones(10, dtype=float)
x = np.array((15,), dtype=float)
def ia(x, s, v):
x[(s > 0)] = v
assert_raises(IndexError, ia, x, s, np.zeros(9, dtype=float))
assert_raises(IndexError, ia, x, s, np.zeros(11, dtype=float))
# Old special case (different code path):
assert_raises(ValueError, ia, x.flat, s, np.zeros(9, dtype=float))
assert_raises(ValueError, ia, x.flat, s, np.zeros(11, dtype=float))
def test_mem_scalar_indexing(self):
# Ticket #603
x = np.array([0], dtype=float)
index = np.array(0, dtype=np.int32)
x[index]
def test_binary_repr_0_width(self):
assert_equal(np.binary_repr(0, width=3), '000')
def test_fromstring(self):
assert_equal(np.fromstring("12:09:09", dtype=int, sep=":"),
[12, 9, 9])
def test_searchsorted_variable_length(self):
x = np.array(['a', 'aa', 'b'])
y = np.array(['d', 'e'])
assert_equal(x.searchsorted(y), [3, 3])
def test_string_argsort_with_zeros(self):
# Check argsort for strings containing zeros.
x = np.frombuffer(b"\x00\x02\x00\x01", dtype="|S2")
assert_array_equal(x.argsort(kind='m'), np.array([1, 0]))
assert_array_equal(x.argsort(kind='q'), np.array([1, 0]))
def test_string_sort_with_zeros(self):
# Check sort for strings containing zeros.
x = np.frombuffer(b"\x00\x02\x00\x01", dtype="|S2")
y = np.frombuffer(b"\x00\x01\x00\x02", dtype="|S2")
assert_array_equal(np.sort(x, kind="q"), y)
def test_copy_detection_zero_dim(self):
# Ticket #658
np.indices((0, 3, 4)).T.reshape(-1, 3)
def test_flat_byteorder(self):
# Ticket #657
x = np.arange(10)
assert_array_equal(x.astype('>i4'), x.astype('<i4').flat[:])
assert_array_equal(x.astype('>i4').flat[:], x.astype('<i4'))
def test_sign_bit(self):
x = np.array([0, -0.0, 0])
assert_equal(str(np.abs(x)), '[0. 0. 0.]')
def test_flat_index_byteswap(self):
for dt in (np.dtype('<i4'), np.dtype('>i4')):
x = np.array([-1, 0, 1], dtype=dt)
assert_equal(x.flat[0].dtype, x[0].dtype)
def test_copy_detection_corner_case(self):
# Ticket #658
np.indices((0, 3, 4)).T.reshape(-1, 3)
# Cannot test if NPY_RELAXED_STRIDES_DEBUG changes the strides.
# With NPY_RELAXED_STRIDES_DEBUG the test becomes superfluous,
# 0-sized reshape itself is tested elsewhere.
@pytest.mark.skipif(np.ones(1).strides[0] == np.iinfo(np.intp).max,
reason="Using relaxed stride debug")
def test_copy_detection_corner_case2(self):
# Ticket #771: strides are not set correctly when reshaping 0-sized
# arrays
b = np.indices((0, 3, 4)).T.reshape(-1, 3)
assert_equal(b.strides, (3 * b.itemsize, b.itemsize))
def test_object_array_refcounting(self):
# Ticket #633
if not hasattr(sys, 'getrefcount'):
return
# NB. this is probably CPython-specific
cnt = sys.getrefcount
a = object()
b = object()
c = object()
cnt0_a = cnt(a)
cnt0_b = cnt(b)
cnt0_c = cnt(c)
# -- 0d -> 1-d broadcast slice assignment
arr = np.zeros(5, dtype=np.object_)
arr[:] = a
assert_equal(cnt(a), cnt0_a + 5)
arr[:] = b
assert_equal(cnt(a), cnt0_a)
assert_equal(cnt(b), cnt0_b + 5)
arr[:2] = c
assert_equal(cnt(b), cnt0_b + 3)
assert_equal(cnt(c), cnt0_c + 2)
del arr
# -- 1-d -> 2-d broadcast slice assignment
arr = np.zeros((5, 2), dtype=np.object_)
arr0 = np.zeros(2, dtype=np.object_)
arr0[0] = a
assert_(cnt(a) == cnt0_a + 1)
arr0[1] = b
assert_(cnt(b) == cnt0_b + 1)
arr[:, :] = arr0
assert_(cnt(a) == cnt0_a + 6)
assert_(cnt(b) == cnt0_b + 6)
arr[:, 0] = None
assert_(cnt(a) == cnt0_a + 1)
del arr, arr0
# -- 2-d copying + flattening
arr = np.zeros((5, 2), dtype=np.object_)
arr[:, 0] = a
arr[:, 1] = b
assert_(cnt(a) == cnt0_a + 5)
assert_(cnt(b) == cnt0_b + 5)
arr2 = arr.copy()
assert_(cnt(a) == cnt0_a + 10)
assert_(cnt(b) == cnt0_b + 10)
arr2 = arr[:, 0].copy()
assert_(cnt(a) == cnt0_a + 10)
assert_(cnt(b) == cnt0_b + 5)
arr2 = arr.flatten()
assert_(cnt(a) == cnt0_a + 10)
assert_(cnt(b) == cnt0_b + 10)
del arr, arr2
# -- concatenate, repeat, take, choose
arr1 = np.zeros((5, 1), dtype=np.object_)
arr2 = np.zeros((5, 1), dtype=np.object_)
arr1[...] = a
arr2[...] = b
assert_(cnt(a) == cnt0_a + 5)
assert_(cnt(b) == cnt0_b + 5)
tmp = np.concatenate((arr1, arr2))
assert_(cnt(a) == cnt0_a + 5 + 5)
assert_(cnt(b) == cnt0_b + 5 + 5)
tmp = arr1.repeat(3, axis=0)
assert_(cnt(a) == cnt0_a + 5 + 3*5)
tmp = arr1.take([1, 2, 3], axis=0)
assert_(cnt(a) == cnt0_a + 5 + 3)
x = np.array([[0], [1], [0], [1], [1]], int)
tmp = x.choose(arr1, arr2)
assert_(cnt(a) == cnt0_a + 5 + 2)
assert_(cnt(b) == cnt0_b + 5 + 3)
del tmp # Avoid pyflakes unused variable warning
def test_mem_custom_float_to_array(self):
# Ticket 702
class MyFloat:
def __float__(self):
return 1.0
tmp = np.atleast_1d([MyFloat()])
tmp.astype(float) # Should succeed
def test_object_array_refcount_self_assign(self):
# Ticket #711
class VictimObject:
deleted = False
def __del__(self):
self.deleted = True
d = VictimObject()
arr = np.zeros(5, dtype=np.object_)
arr[:] = d
del d
arr[:] = arr # refcount of 'd' might hit zero here
assert_(not arr[0].deleted)
arr[:] = arr # trying to induce a segfault by doing it again...
assert_(not arr[0].deleted)
def test_mem_fromiter_invalid_dtype_string(self):
x = [1, 2, 3]
assert_raises(ValueError,
np.fromiter, [xi for xi in x], dtype='S')
def test_reduce_big_object_array(self):
# Ticket #713
oldsize = np.setbufsize(10*16)
a = np.array([None]*161, object)
assert_(not np.any(a))
np.setbufsize(oldsize)
def test_mem_0d_array_index(self):
# Ticket #714
np.zeros(10)[np.array(0)]
def test_nonnative_endian_fill(self):
# Non-native endian arrays were incorrectly filled with scalars
# before r5034.
if sys.byteorder == 'little':
dtype = np.dtype('>i4')
else:
dtype = np.dtype('<i4')
x = np.empty([1], dtype=dtype)
x.fill(1)
assert_equal(x, np.array([1], dtype=dtype))
def test_dot_alignment_sse2(self):
# Test for ticket #551, changeset r5140
x = np.zeros((30, 40))
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
y = pickle.loads(pickle.dumps(x, protocol=proto))
# y is now typically not aligned on a 8-byte boundary
z = np.ones((1, y.shape[0]))
# This shouldn't cause a segmentation fault:
np.dot(z, y)
def test_astype_copy(self):
# Ticket #788, changeset r5155
# The test data file was generated by scipy.io.savemat.
# The dtype is float64, but the isbuiltin attribute is 0.
data_dir = path.join(path.dirname(__file__), 'data')
filename = path.join(data_dir, "astype_copy.pkl")
with open(filename, 'rb') as f:
xp = pickle.load(f, encoding='latin1')
xpd = xp.astype(np.float64)
assert_((xp.__array_interface__['data'][0] !=
xpd.__array_interface__['data'][0]))
def test_compress_small_type(self):
# Ticket #789, changeset 5217.
# compress with out argument segfaulted if cannot cast safely
import numpy as np
a = np.array([[1, 2], [3, 4]])
b = np.zeros((2, 1), dtype=np.single)
try:
a.compress([True, False], axis=1, out=b)
raise AssertionError("compress with an out which cannot be "
"safely casted should not return "
"successfully")
except TypeError:
pass
def test_attributes(self):
# Ticket #791
class TestArray(np.ndarray):
def __new__(cls, data, info):
result = np.array(data)
result = result.view(cls)
result.info = info
return result
def __array_finalize__(self, obj):
self.info = getattr(obj, 'info', '')
dat = TestArray([[1, 2, 3, 4], [5, 6, 7, 8]], 'jubba')
assert_(dat.info == 'jubba')
dat.resize((4, 2))
assert_(dat.info == 'jubba')
dat.sort()
assert_(dat.info == 'jubba')
dat.fill(2)
assert_(dat.info == 'jubba')
dat.put([2, 3, 4], [6, 3, 4])
assert_(dat.info == 'jubba')
dat.setfield(4, np.int32, 0)
assert_(dat.info == 'jubba')
dat.setflags()
assert_(dat.info == 'jubba')
assert_(dat.all(1).info == 'jubba')
assert_(dat.any(1).info == 'jubba')
assert_(dat.argmax(1).info == 'jubba')
assert_(dat.argmin(1).info == 'jubba')
assert_(dat.argsort(1).info == 'jubba')
assert_(dat.astype(TestArray).info == 'jubba')
assert_(dat.byteswap().info == 'jubba')
assert_(dat.clip(2, 7).info == 'jubba')
assert_(dat.compress([0, 1, 1]).info == 'jubba')
assert_(dat.conj().info == 'jubba')
assert_(dat.conjugate().info == 'jubba')
assert_(dat.copy().info == 'jubba')
dat2 = TestArray([2, 3, 1, 0], 'jubba')
choices = [[0, 1, 2, 3], [10, 11, 12, 13],
[20, 21, 22, 23], [30, 31, 32, 33]]
assert_(dat2.choose(choices).info == 'jubba')
assert_(dat.cumprod(1).info == 'jubba')
assert_(dat.cumsum(1).info == 'jubba')
assert_(dat.diagonal().info == 'jubba')
assert_(dat.flatten().info == 'jubba')
assert_(dat.getfield(np.int32, 0).info == 'jubba')
assert_(dat.imag.info == 'jubba')
assert_(dat.max(1).info == 'jubba')
assert_(dat.mean(1).info == 'jubba')
assert_(dat.min(1).info == 'jubba')
assert_(dat.newbyteorder().info == 'jubba')
assert_(dat.prod(1).info == 'jubba')
assert_(dat.ptp(1).info == 'jubba')
assert_(dat.ravel().info == 'jubba')
assert_(dat.real.info == 'jubba')
assert_(dat.repeat(2).info == 'jubba')
assert_(dat.reshape((2, 4)).info == 'jubba')
assert_(dat.round().info == 'jubba')
assert_(dat.squeeze().info == 'jubba')
assert_(dat.std(1).info == 'jubba')
assert_(dat.sum(1).info == 'jubba')
assert_(dat.swapaxes(0, 1).info == 'jubba')
assert_(dat.take([2, 3, 5]).info == 'jubba')
assert_(dat.transpose().info == 'jubba')
assert_(dat.T.info == 'jubba')
assert_(dat.var(1).info == 'jubba')
assert_(dat.view(TestArray).info == 'jubba')
# These methods do not preserve subclasses
assert_(type(dat.nonzero()[0]) is np.ndarray)
assert_(type(dat.nonzero()[1]) is np.ndarray)
def test_recarray_tolist(self):
# Ticket #793, changeset r5215
# Comparisons fail for NaN, so we can't use random memory
# for the test.
buf = np.zeros(40, dtype=np.int8)
a = np.recarray(2, formats="i4,f8,f8", names="id,x,y", buf=buf)
b = a.tolist()
assert_( a[0].tolist() == b[0])
assert_( a[1].tolist() == b[1])
def test_nonscalar_item_method(self):
# Make sure that .item() fails graciously when it should
a = np.arange(5)
assert_raises(ValueError, a.item)
def test_char_array_creation(self):
a = np.array('123', dtype='c')
b = np.array([b'1', b'2', b'3'])
assert_equal(a, b)
def test_unaligned_unicode_access(self):
# Ticket #825
for i in range(1, 9):
msg = 'unicode offset: %d chars' % i
t = np.dtype([('a', 'S%d' % i), ('b', 'U2')])
x = np.array([(b'a', u'b')], dtype=t)
assert_equal(str(x), "[(b'a', 'b')]", err_msg=msg)
def test_sign_for_complex_nan(self):
# Ticket 794.
with np.errstate(invalid='ignore'):
C = np.array([-np.inf, -2+1j, 0, 2-1j, np.inf, np.nan])
have = np.sign(C)
want = np.array([-1+0j, -1+0j, 0+0j, 1+0j, 1+0j, np.nan])
assert_equal(have, want)
def test_for_equal_names(self):
# Ticket #674
dt = np.dtype([('foo', float), ('bar', float)])
a = np.zeros(10, dt)
b = list(a.dtype.names)
b[0] = "notfoo"
a.dtype.names = b
assert_(a.dtype.names[0] == "notfoo")
assert_(a.dtype.names[1] == "bar")
def test_for_object_scalar_creation(self):
# Ticket #816
a = np.object_()
b = np.object_(3)
b2 = np.object_(3.0)
c = np.object_([4, 5])
d = np.object_([None, {}, []])
assert_(a is None)
assert_(type(b) is int)
assert_(type(b2) is float)
assert_(type(c) is np.ndarray)
assert_(c.dtype == object)
assert_(d.dtype == object)
def test_array_resize_method_system_error(self):
# Ticket #840 - order should be an invalid keyword.
x = np.array([[0, 1], [2, 3]])
assert_raises(TypeError, x.resize, (2, 2), order='C')
def test_for_zero_length_in_choose(self):
"Ticket #882"
a = np.array(1)
assert_raises(ValueError, lambda x: x.choose([]), a)
def test_array_ndmin_overflow(self):
"Ticket #947."
assert_raises(ValueError, lambda: np.array([1], ndmin=33))
def test_void_scalar_with_titles(self):
# No ticket
data = [('john', 4), ('mary', 5)]
dtype1 = [(('source:yy', 'name'), 'O'), (('source:xx', 'id'), int)]
arr = np.array(data, dtype=dtype1)
assert_(arr[0][0] == 'john')
assert_(arr[0][1] == 4)
def test_void_scalar_constructor(self):
#Issue #1550
#Create test string data, construct void scalar from data and assert
#that void scalar contains original data.
test_string = np.array("test")
test_string_void_scalar = np.core.multiarray.scalar(
np.dtype(("V", test_string.dtype.itemsize)), test_string.tobytes())
assert_(test_string_void_scalar.view(test_string.dtype) == test_string)
#Create record scalar, construct from data and assert that
#reconstructed scalar is correct.
test_record = np.ones((), "i,i")
test_record_void_scalar = np.core.multiarray.scalar(
test_record.dtype, test_record.tobytes())
assert_(test_record_void_scalar == test_record)
# Test pickle and unpickle of void and record scalars
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
assert_(pickle.loads(
pickle.dumps(test_string, protocol=proto)) == test_string)
assert_(pickle.loads(
pickle.dumps(test_record, protocol=proto)) == test_record)
@_no_tracing
def test_blasdot_uninitialized_memory(self):
# Ticket #950
for m in [0, 1, 2]:
for n in [0, 1, 2]:
for k in range(3):
# Try to ensure that x->data contains non-zero floats
x = np.array([123456789e199], dtype=np.float64)
if IS_PYPY:
x.resize((m, 0), refcheck=False)
else:
x.resize((m, 0))
y = np.array([123456789e199], dtype=np.float64)
if IS_PYPY:
y.resize((0, n), refcheck=False)
else:
y.resize((0, n))
# `dot` should just return zero (m, n) matrix
z = np.dot(x, y)
assert_(np.all(z == 0))
assert_(z.shape == (m, n))
def test_zeros(self):
# Regression test for #1061.
# Set a size which cannot fit into a 64 bits signed integer
sz = 2 ** 64
with assert_raises_regex(ValueError,
'Maximum allowed dimension exceeded'):
np.empty(sz)
def test_huge_arange(self):
# Regression test for #1062.
# Set a size which cannot fit into a 64 bits signed integer
sz = 2 ** 64
with assert_raises_regex(ValueError,
'Maximum allowed size exceeded'):
np.arange(sz)
assert_(np.size == sz)
def test_fromiter_bytes(self):
# Ticket #1058
a = np.fromiter(list(range(10)), dtype='b')
b = np.fromiter(list(range(10)), dtype='B')
assert_(np.alltrue(a == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])))
assert_(np.alltrue(b == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])))
def test_array_from_sequence_scalar_array(self):
# Ticket #1078: segfaults when creating an array with a sequence of
# 0d arrays.
a = np.array((np.ones(2), np.array(2)), dtype=object)
assert_equal(a.shape, (2,))
assert_equal(a.dtype, np.dtype(object))
assert_equal(a[0], np.ones(2))
assert_equal(a[1], np.array(2))
a = np.array(((1,), np.array(1)), dtype=object)
assert_equal(a.shape, (2,))
assert_equal(a.dtype, np.dtype(object))
assert_equal(a[0], (1,))
assert_equal(a[1], np.array(1))
def test_array_from_sequence_scalar_array2(self):
# Ticket #1081: weird array with strange input...
t = np.array([np.array([]), np.array(0, object)], dtype=object)
assert_equal(t.shape, (2,))
assert_equal(t.dtype, np.dtype(object))
def test_array_too_big(self):
# Ticket #1080.
assert_raises(ValueError, np.zeros, [975]*7, np.int8)
assert_raises(ValueError, np.zeros, [26244]*5, np.int8)
def test_dtype_keyerrors_(self):
# Ticket #1106.
dt = np.dtype([('f1', np.uint)])
assert_raises(KeyError, dt.__getitem__, "f2")
assert_raises(IndexError, dt.__getitem__, 1)
assert_raises(TypeError, dt.__getitem__, 0.0)
def test_lexsort_buffer_length(self):
# Ticket #1217, don't segfault.
a = np.ones(100, dtype=np.int8)
b = np.ones(100, dtype=np.int32)
i = np.lexsort((a[::-1], b))
assert_equal(i, np.arange(100, dtype=int))
def test_object_array_to_fixed_string(self):
# Ticket #1235.
a = np.array(['abcdefgh', 'ijklmnop'], dtype=np.object_)
b = np.array(a, dtype=(np.str_, 8))
assert_equal(a, b)
c = np.array(a, dtype=(np.str_, 5))
assert_equal(c, np.array(['abcde', 'ijklm']))
d = np.array(a, dtype=(np.str_, 12))
assert_equal(a, d)
e = np.empty((2, ), dtype=(np.str_, 8))
e[:] = a[:]
assert_equal(a, e)
def test_unicode_to_string_cast(self):
# Ticket #1240.
a = np.array([[u'abc', u'\u03a3'],
[u'asdf', u'erw']],
dtype='U')
assert_raises(UnicodeEncodeError, np.array, a, 'S4')
def test_unicode_to_string_cast_error(self):
# gh-15790
a = np.array([u'\x80'] * 129, dtype='U3')
assert_raises(UnicodeEncodeError, np.array, a, 'S')
b = a.reshape(3, 43)[:-1, :-1]
assert_raises(UnicodeEncodeError, np.array, b, 'S')
def test_mixed_string_unicode_array_creation(self):
a = np.array(['1234', u'123'])
assert_(a.itemsize == 16)
a = np.array([u'123', '1234'])
assert_(a.itemsize == 16)
a = np.array(['1234', u'123', '12345'])
assert_(a.itemsize == 20)
a = np.array([u'123', '1234', u'12345'])
assert_(a.itemsize == 20)
a = np.array([u'123', '1234', u'1234'])
assert_(a.itemsize == 16)
def test_misaligned_objects_segfault(self):
# Ticket #1198 and #1267
a1 = np.zeros((10,), dtype='O,c')
a2 = np.array(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j'], 'S10')
a1['f0'] = a2
repr(a1)
np.argmax(a1['f0'])
a1['f0'][1] = "FOO"
a1['f0'] = "FOO"
np.array(a1['f0'], dtype='S')
np.nonzero(a1['f0'])
a1.sort()
copy.deepcopy(a1)
def test_misaligned_scalars_segfault(self):
# Ticket #1267
s1 = np.array(('a', 'Foo'), dtype='c,O')
s2 = np.array(('b', 'Bar'), dtype='c,O')
s1['f1'] = s2['f1']
s1['f1'] = 'Baz'
def test_misaligned_dot_product_objects(self):
# Ticket #1267
# This didn't require a fix, but it's worth testing anyway, because
# it may fail if .dot stops enforcing the arrays to be BEHAVED
a = np.array([[(1, 'a'), (0, 'a')], [(0, 'a'), (1, 'a')]], dtype='O,c')
b = np.array([[(4, 'a'), (1, 'a')], [(2, 'a'), (2, 'a')]], dtype='O,c')
np.dot(a['f0'], b['f0'])
def test_byteswap_complex_scalar(self):
# Ticket #1259 and gh-441
for dtype in [np.dtype('<'+t) for t in np.typecodes['Complex']]:
z = np.array([2.2-1.1j], dtype)
x = z[0] # always native-endian
y = x.byteswap()
if x.dtype.byteorder == z.dtype.byteorder:
# little-endian machine
assert_equal(x, np.frombuffer(y.tobytes(), dtype=dtype.newbyteorder()))
else:
# big-endian machine
assert_equal(x, np.frombuffer(y.tobytes(), dtype=dtype))
# double check real and imaginary parts:
assert_equal(x.real, y.real.byteswap())
assert_equal(x.imag, y.imag.byteswap())
def test_structured_arrays_with_objects1(self):
# Ticket #1299
stra = 'aaaa'
strb = 'bbbb'
x = np.array([[(0, stra), (1, strb)]], 'i8,O')
x[x.nonzero()] = x.ravel()[:1]
assert_(x[0, 1] == x[0, 0])
@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
def test_structured_arrays_with_objects2(self):
# Ticket #1299 second test
stra = 'aaaa'
strb = 'bbbb'
numb = sys.getrefcount(strb)
numa = sys.getrefcount(stra)
x = np.array([[(0, stra), (1, strb)]], 'i8,O')
x[x.nonzero()] = x.ravel()[:1]
assert_(sys.getrefcount(strb) == numb)
assert_(sys.getrefcount(stra) == numa + 2)
def test_duplicate_title_and_name(self):
# Ticket #1254
dtspec = [(('a', 'a'), 'i'), ('b', 'i')]
assert_raises(ValueError, np.dtype, dtspec)
def test_signed_integer_division_overflow(self):
# Ticket #1317.
def test_type(t):
min = np.array([np.iinfo(t).min])
min //= -1
with np.errstate(over="ignore"):
for t in (np.int8, np.int16, np.int32, np.int64, int):
test_type(t)
def test_buffer_hashlib(self):
from hashlib import sha256
x = np.array([1, 2, 3], dtype=np.dtype('<i4'))
assert_equal(sha256(x).hexdigest(), '4636993d3e1da4e9d6b8f87b79e8f7c6d018580d52661950eabc3845c5897a4d')
def test_0d_string_scalar(self):
# Bug #1436; the following should succeed
np.asarray('x', '>c')
def test_log1p_compiler_shenanigans(self):
# Check if log1p is behaving on 32 bit intel systems.
assert_(np.isfinite(np.log1p(np.exp2(-53))))
def test_fromiter_comparison(self):
a = np.fromiter(list(range(10)), dtype='b')
b = np.fromiter(list(range(10)), dtype='B')
assert_(np.alltrue(a == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])))
assert_(np.alltrue(b == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])))
def test_fromstring_crash(self):
# Ticket #1345: the following should not cause a crash
with assert_warns(DeprecationWarning):
np.fromstring(b'aa, aa, 1.0', sep=',')
def test_ticket_1539(self):
dtypes = [x for x in np.sctypeDict.values()
if (issubclass(x, np.number)
and not issubclass(x, np.timedelta64))]
a = np.array([], np.bool_) # not x[0] because it is unordered
failures = []
for x in dtypes:
b = a.astype(x)
for y in dtypes:
c = a.astype(y)
try:
np.dot(b, c)
except TypeError:
failures.append((x, y))
if failures:
raise AssertionError("Failures: %r" % failures)
def test_ticket_1538(self):
x = np.finfo(np.float32)
for name in 'eps epsneg max min resolution tiny'.split():
assert_equal(type(getattr(x, name)), np.float32,
err_msg=name)
def test_ticket_1434(self):
# Check that the out= argument in var and std has an effect
data = np.array(((1, 2, 3), (4, 5, 6), (7, 8, 9)))
out = np.zeros((3,))
ret = data.var(axis=1, out=out)
assert_(ret is out)
assert_array_equal(ret, data.var(axis=1))
ret = data.std(axis=1, out=out)
assert_(ret is out)
assert_array_equal(ret, data.std(axis=1))
def test_complex_nan_maximum(self):
cnan = complex(0, np.nan)
assert_equal(np.maximum(1, cnan), cnan)
def test_subclass_int_tuple_assignment(self):
# ticket #1563
class Subclass(np.ndarray):
def __new__(cls, i):
return np.ones((i,)).view(cls)
x = Subclass(5)
x[(0,)] = 2 # shouldn't raise an exception
assert_equal(x[0], 2)
def test_ufunc_no_unnecessary_views(self):
# ticket #1548
class Subclass(np.ndarray):
pass
x = np.array([1, 2, 3]).view(Subclass)
y = np.add(x, x, x)
assert_equal(id(x), id(y))
@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
def test_take_refcount(self):
# ticket #939
a = np.arange(16, dtype=float)
a.shape = (4, 4)
lut = np.ones((5 + 3, 4), float)
rgba = np.empty(shape=a.shape + (4,), dtype=lut.dtype)
c1 = sys.getrefcount(rgba)
try:
lut.take(a, axis=0, mode='clip', out=rgba)
except TypeError:
pass
c2 = sys.getrefcount(rgba)
assert_equal(c1, c2)
def test_fromfile_tofile_seeks(self):
# On Python 3, tofile/fromfile used to get (#1610) the Python
# file handle out of sync
f0 = tempfile.NamedTemporaryFile()
f = f0.file
f.write(np.arange(255, dtype='u1').tobytes())
f.seek(20)
ret = np.fromfile(f, count=4, dtype='u1')
assert_equal(ret, np.array([20, 21, 22, 23], dtype='u1'))
assert_equal(f.tell(), 24)
f.seek(40)
np.array([1, 2, 3], dtype='u1').tofile(f)
assert_equal(f.tell(), 43)
f.seek(40)
data = f.read(3)
assert_equal(data, b"\x01\x02\x03")
f.seek(80)
f.read(4)
data = np.fromfile(f, dtype='u1', count=4)
assert_equal(data, np.array([84, 85, 86, 87], dtype='u1'))
f.close()
def test_complex_scalar_warning(self):
for tp in [np.csingle, np.cdouble, np.clongdouble]:
x = tp(1+2j)
assert_warns(np.ComplexWarning, float, x)
with suppress_warnings() as sup:
sup.filter(np.ComplexWarning)
assert_equal(float(x), float(x.real))
def test_complex_scalar_complex_cast(self):
for tp in [np.csingle, np.cdouble, np.clongdouble]:
x = tp(1+2j)
assert_equal(complex(x), 1+2j)
def test_complex_boolean_cast(self):
# Ticket #2218
for tp in [np.csingle, np.cdouble, np.clongdouble]:
x = np.array([0, 0+0.5j, 0.5+0j], dtype=tp)
assert_equal(x.astype(bool), np.array([0, 1, 1], dtype=bool))
assert_(np.any(x))
assert_(np.all(x[1:]))
def test_uint_int_conversion(self):
x = 2**64 - 1
assert_equal(int(np.uint64(x)), x)
def test_duplicate_field_names_assign(self):
ra = np.fromiter(((i*3, i*2) for i in range(10)), dtype='i8,f8')
ra.dtype.names = ('f1', 'f2')
repr(ra) # should not cause a segmentation fault
assert_raises(ValueError, setattr, ra.dtype, 'names', ('f1', 'f1'))
def test_eq_string_and_object_array(self):
# From e-mail thread "__eq__ with str and object" (Keith Goodman)
a1 = np.array(['a', 'b'], dtype=object)
a2 = np.array(['a', 'c'])
assert_array_equal(a1 == a2, [True, False])
assert_array_equal(a2 == a1, [True, False])
def test_nonzero_byteswap(self):
a = np.array([0x80000000, 0x00000080, 0], dtype=np.uint32)
a.dtype = np.float32
assert_equal(a.nonzero()[0], [1])
a = a.byteswap().newbyteorder()
assert_equal(a.nonzero()[0], [1]) # [0] if nonzero() ignores swap
def test_find_common_type_boolean(self):
# Ticket #1695
assert_(np.find_common_type([], ['?', '?']) == '?')
def test_empty_mul(self):
a = np.array([1.])
a[1:1] *= 2
assert_equal(a, [1.])
def test_array_side_effect(self):
# The second use of itemsize was throwing an exception because in
# ctors.c, discover_itemsize was calling PyObject_Length without
# checking the return code. This failed to get the length of the
# number 2, and the exception hung around until something checked
# PyErr_Occurred() and returned an error.
assert_equal(np.dtype('S10').itemsize, 10)
np.array([['abc', 2], ['long ', '0123456789']], dtype=np.string_)
assert_equal(np.dtype('S10').itemsize, 10)
def test_any_float(self):
# all and any for floats
a = np.array([0.1, 0.9])
assert_(np.any(a))
assert_(np.all(a))
def test_large_float_sum(self):
a = np.arange(10000, dtype='f')
assert_equal(a.sum(dtype='d'), a.astype('d').sum())
def test_ufunc_casting_out(self):
a = np.array(1.0, dtype=np.float32)
b = np.array(1.0, dtype=np.float64)
c = np.array(1.0, dtype=np.float32)
np.add(a, b, out=c)
assert_equal(c, 2.0)
def test_array_scalar_contiguous(self):
# Array scalars are both C and Fortran contiguous
assert_(np.array(1.0).flags.c_contiguous)
assert_(np.array(1.0).flags.f_contiguous)
assert_(np.array(np.float32(1.0)).flags.c_contiguous)
assert_(np.array(np.float32(1.0)).flags.f_contiguous)
def test_squeeze_contiguous(self):
# Similar to GitHub issue #387
a = np.zeros((1, 2)).squeeze()
b = np.zeros((2, 2, 2), order='F')[:, :, ::2].squeeze()
assert_(a.flags.c_contiguous)
assert_(a.flags.f_contiguous)
assert_(b.flags.f_contiguous)
def test_squeeze_axis_handling(self):
# Issue #10779
# Ensure proper handling of objects
# that don't support axis specification
# when squeezing
class OldSqueeze(np.ndarray):
def __new__(cls,
input_array):
obj = np.asarray(input_array).view(cls)
return obj
# it is perfectly reasonable that prior
# to numpy version 1.7.0 a subclass of ndarray
# might have been created that did not expect
# squeeze to have an axis argument
# NOTE: this example is somewhat artificial;
# it is designed to simulate an old API
# expectation to guard against regression
def squeeze(self):
return super().squeeze()
oldsqueeze = OldSqueeze(np.array([[1],[2],[3]]))
# if no axis argument is specified the old API
# expectation should give the correct result
assert_equal(np.squeeze(oldsqueeze),
np.array([1,2,3]))
# likewise, axis=None should work perfectly well
# with the old API expectation
assert_equal(np.squeeze(oldsqueeze, axis=None),
np.array([1,2,3]))
# however, specification of any particular axis
# should raise a TypeError in the context of the
# old API specification, even when using a valid
# axis specification like 1 for this array
with assert_raises(TypeError):
# this would silently succeed for array
# subclasses / objects that did not support
# squeeze axis argument handling before fixing
# Issue #10779
np.squeeze(oldsqueeze, axis=1)
# check for the same behavior when using an invalid
# axis specification -- in this case axis=0 does not
# have size 1, but the priority should be to raise
# a TypeError for the axis argument and NOT a
# ValueError for squeezing a non-empty dimension
with assert_raises(TypeError):
np.squeeze(oldsqueeze, axis=0)
# the new API knows how to handle the axis
# argument and will return a ValueError if
# attempting to squeeze an axis that is not
# of length 1
with assert_raises(ValueError):
np.squeeze(np.array([[1],[2],[3]]), axis=0)
def test_reduce_contiguous(self):
# GitHub issue #387
a = np.add.reduce(np.zeros((2, 1, 2)), (0, 1))
b = np.add.reduce(np.zeros((2, 1, 2)), 1)
assert_(a.flags.c_contiguous)
assert_(a.flags.f_contiguous)
assert_(b.flags.c_contiguous)
@pytest.mark.skipif(IS_PYSTON, reason="Pyston disables recursion checking")
def test_object_array_self_reference(self):
# Object arrays with references to themselves can cause problems
a = np.array(0, dtype=object)
a[()] = a
assert_raises(RecursionError, int, a)
assert_raises(RecursionError, float, a)
a[()] = None
@pytest.mark.skipif(IS_PYSTON, reason="Pyston disables recursion checking")
def test_object_array_circular_reference(self):
# Test the same for a circular reference.
a = np.array(0, dtype=object)
b = np.array(0, dtype=object)
a[()] = b
b[()] = a
assert_raises(RecursionError, int, a)
# NumPy has no tp_traverse currently, so circular references
# cannot be detected. So resolve it:
a[()] = None
# This was causing a to become like the above
a = np.array(0, dtype=object)
a[...] += 1
assert_equal(a, 1)
def test_object_array_nested(self):
# but is fine with a reference to a different array
a = np.array(0, dtype=object)
b = np.array(0, dtype=object)
a[()] = b
assert_equal(int(a), int(0))
assert_equal(float(a), float(0))
def test_object_array_self_copy(self):
# An object array being copied into itself DECREF'ed before INCREF'ing
# causing segmentation faults (gh-3787)
a = np.array(object(), dtype=object)
np.copyto(a, a)
if HAS_REFCOUNT:
assert_(sys.getrefcount(a[()]) == 2)
a[()].__class__ # will segfault if object was deleted
def test_zerosize_accumulate(self):
"Ticket #1733"
x = np.array([[42, 0]], dtype=np.uint32)
assert_equal(np.add.accumulate(x[:-1, 0]), [])
def test_objectarray_setfield(self):
# Setfield should not overwrite Object fields with non-Object data
x = np.array([1, 2, 3], dtype=object)
assert_raises(TypeError, x.setfield, 4, np.int32, 0)
def test_setting_rank0_string(self):
"Ticket #1736"
s1 = b"hello1"
s2 = b"hello2"
a = np.zeros((), dtype="S10")
a[()] = s1
assert_equal(a, np.array(s1))
a[()] = np.array(s2)
assert_equal(a, np.array(s2))
a = np.zeros((), dtype='f4')
a[()] = 3
assert_equal(a, np.array(3))
a[()] = np.array(4)
assert_equal(a, np.array(4))
def test_string_astype(self):
"Ticket #1748"
s1 = b'black'
s2 = b'white'
s3 = b'other'
a = np.array([[s1], [s2], [s3]])
assert_equal(a.dtype, np.dtype('S5'))
b = a.astype(np.dtype('S0'))
assert_equal(b.dtype, np.dtype('S5'))
def test_ticket_1756(self):
# Ticket #1756
s = b'0123456789abcdef'
a = np.array([s]*5)
for i in range(1, 17):
a1 = np.array(a, "|S%d" % i)
a2 = np.array([s[:i]]*5)
assert_equal(a1, a2)
def test_fields_strides(self):
"gh-2355"
r = np.frombuffer(b'abcdefghijklmnop'*4*3, dtype='i4,(2,3)u2')
assert_equal(r[0:3:2]['f1'], r['f1'][0:3:2])
assert_equal(r[0:3:2]['f1'][0], r[0:3:2][0]['f1'])
assert_equal(r[0:3:2]['f1'][0][()], r[0:3:2][0]['f1'][()])
assert_equal(r[0:3:2]['f1'][0].strides, r[0:3:2][0]['f1'].strides)
def test_alignment_update(self):
# Check that alignment flag is updated on stride setting
a = np.arange(10)
assert_(a.flags.aligned)
a.strides = 3
assert_(not a.flags.aligned)
def test_ticket_1770(self):
"Should not segfault on python 3k"
import numpy as np
try:
a = np.zeros((1,), dtype=[('f1', 'f')])
a['f1'] = 1
a['f2'] = 1
except ValueError:
pass
except Exception:
raise AssertionError
def test_ticket_1608(self):
"x.flat shouldn't modify data"
x = np.array([[1, 2], [3, 4]]).T
np.array(x.flat)
assert_equal(x, [[1, 3], [2, 4]])
def test_pickle_string_overwrite(self):
import re
data = np.array([1], dtype='b')
blob = pickle.dumps(data, protocol=1)
data = pickle.loads(blob)
# Check that loads does not clobber interned strings
s = re.sub("a(.)", "\x01\\1", "a_")
assert_equal(s[0], "\x01")
data[0] = 0xbb
s = re.sub("a(.)", "\x01\\1", "a_")
assert_equal(s[0], "\x01")
def test_pickle_bytes_overwrite(self):
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
data = np.array([1], dtype='b')
data = pickle.loads(pickle.dumps(data, protocol=proto))
data[0] = 0xdd
bytestring = "\x01 ".encode('ascii')
assert_equal(bytestring[0:1], '\x01'.encode('ascii'))
def test_pickle_py2_array_latin1_hack(self):
# Check that unpickling hacks in Py3 that support
# encoding='latin1' work correctly.
# Python2 output for pickle.dumps(numpy.array([129], dtype='b'))
data = (b"cnumpy.core.multiarray\n_reconstruct\np0\n(cnumpy\nndarray\np1\n(I0\n"
b"tp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\ncnumpy\ndtype\np7\n(S'i1'\np8\n"
b"I0\nI1\ntp9\nRp10\n(I3\nS'|'\np11\nNNNI-1\nI-1\nI0\ntp12\nbI00\nS'\\x81'\n"
b"p13\ntp14\nb.")
# This should work:
result = pickle.loads(data, encoding='latin1')
assert_array_equal(result, np.array([129], dtype='b'))
# Should not segfault:
assert_raises(Exception, pickle.loads, data, encoding='koi8-r')
def test_pickle_py2_scalar_latin1_hack(self):
# Check that scalar unpickling hack in Py3 that supports
# encoding='latin1' work correctly.
# Python2 output for pickle.dumps(...)
datas = [
# (original, python2_pickle, koi8r_validity)
(np.unicode_('\u6bd2'),
(b"cnumpy.core.multiarray\nscalar\np0\n(cnumpy\ndtype\np1\n"
b"(S'U1'\np2\nI0\nI1\ntp3\nRp4\n(I3\nS'<'\np5\nNNNI4\nI4\nI0\n"
b"tp6\nbS'\\xd2k\\x00\\x00'\np7\ntp8\nRp9\n."),
'invalid'),
(np.float64(9e123),
(b"cnumpy.core.multiarray\nscalar\np0\n(cnumpy\ndtype\np1\n(S'f8'\n"
b"p2\nI0\nI1\ntp3\nRp4\n(I3\nS'<'\np5\nNNNI-1\nI-1\nI0\ntp6\n"
b"bS'O\\x81\\xb7Z\\xaa:\\xabY'\np7\ntp8\nRp9\n."),
'invalid'),
(np.bytes_(b'\x9c'), # different 8-bit code point in KOI8-R vs latin1
(b"cnumpy.core.multiarray\nscalar\np0\n(cnumpy\ndtype\np1\n(S'S1'\np2\n"
b"I0\nI1\ntp3\nRp4\n(I3\nS'|'\np5\nNNNI1\nI1\nI0\ntp6\nbS'\\x9c'\np7\n"
b"tp8\nRp9\n."),
'different'),
]
for original, data, koi8r_validity in datas:
result = pickle.loads(data, encoding='latin1')
assert_equal(result, original)
# Decoding under non-latin1 encoding (e.g.) KOI8-R can
# produce bad results, but should not segfault.
if koi8r_validity == 'different':
# Unicode code points happen to lie within latin1,
# but are different in koi8-r, resulting to silent
# bogus results
result = pickle.loads(data, encoding='koi8-r')
assert_(result != original)
elif koi8r_validity == 'invalid':
# Unicode code points outside latin1, so results
# to an encoding exception
assert_raises(ValueError, pickle.loads, data, encoding='koi8-r')
else:
raise ValueError(koi8r_validity)
def test_structured_type_to_object(self):
a_rec = np.array([(0, 1), (3, 2)], dtype='i4,i8')
a_obj = np.empty((2,), dtype=object)
a_obj[0] = (0, 1)
a_obj[1] = (3, 2)
# astype records -> object
assert_equal(a_rec.astype(object), a_obj)
# '=' records -> object
b = np.empty_like(a_obj)
b[...] = a_rec
assert_equal(b, a_obj)
# '=' object -> records
b = np.empty_like(a_rec)
b[...] = a_obj
assert_equal(b, a_rec)
def test_assign_obj_listoflists(self):
# Ticket # 1870
# The inner list should get assigned to the object elements
a = np.zeros(4, dtype=object)
b = a.copy()
a[0] = [1]
a[1] = [2]
a[2] = [3]
a[3] = [4]
b[...] = [[1], [2], [3], [4]]
assert_equal(a, b)
# The first dimension should get broadcast
a = np.zeros((2, 2), dtype=object)
a[...] = [[1, 2]]
assert_equal(a, [[1, 2], [1, 2]])
@pytest.mark.slow_pypy
def test_memoryleak(self):
# Ticket #1917 - ensure that array data doesn't leak
for i in range(1000):
# 100MB times 1000 would give 100GB of memory usage if it leaks
a = np.empty((100000000,), dtype='i1')
del a
@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
def test_ufunc_reduce_memoryleak(self):
a = np.arange(6)
acnt = sys.getrefcount(a)
np.add.reduce(a)
assert_equal(sys.getrefcount(a), acnt)
def test_search_sorted_invalid_arguments(self):
# Ticket #2021, should not segfault.
x = np.arange(0, 4, dtype='datetime64[D]')
assert_raises(TypeError, x.searchsorted, 1)
def test_string_truncation(self):
# Ticket #1990 - Data can be truncated in creation of an array from a
# mixed sequence of numeric values and strings (gh-2583)
for val in [True, 1234, 123.4, complex(1, 234)]:
for tostr, dtype in [(asunicode, "U"), (asbytes, "S")]:
b = np.array([val, tostr('xx')], dtype=dtype)
assert_equal(tostr(b[0]), tostr(val))
b = np.array([tostr('xx'), val], dtype=dtype)
assert_equal(tostr(b[1]), tostr(val))
# test also with longer strings
b = np.array([val, tostr('xxxxxxxxxx')], dtype=dtype)
assert_equal(tostr(b[0]), tostr(val))
b = np.array([tostr('xxxxxxxxxx'), val], dtype=dtype)
assert_equal(tostr(b[1]), tostr(val))
def test_string_truncation_ucs2(self):
# Ticket #2081. Python compiled with two byte unicode
# can lead to truncation if itemsize is not properly
# adjusted for NumPy's four byte unicode.
a = np.array(['abcd'])
assert_equal(a.dtype.itemsize, 16)
def test_unique_stable(self):
# Ticket #2063 must always choose stable sort for argsort to
# get consistent results
v = np.array(([0]*5 + [1]*6 + [2]*6)*4)
res = np.unique(v, return_index=True)
tgt = (np.array([0, 1, 2]), np.array([ 0, 5, 11]))
assert_equal(res, tgt)
def test_unicode_alloc_dealloc_match(self):
# Ticket #1578, the mismatch only showed up when running
# python-debug for python versions >= 2.7, and then as
# a core dump and error message.
a = np.array(['abc'], dtype=np.unicode_)[0]
del a
def test_refcount_error_in_clip(self):
# Ticket #1588
a = np.zeros((2,), dtype='>i2').clip(min=0)
x = a + a
# This used to segfault:
y = str(x)
# Check the final string:
assert_(y == "[0 0]")
def test_searchsorted_wrong_dtype(self):
# Ticket #2189, it used to segfault, so we check that it raises the
# proper exception.
a = np.array([('a', 1)], dtype='S1, int')
assert_raises(TypeError, np.searchsorted, a, 1.2)
# Ticket #2066, similar problem:
dtype = np.format_parser(['i4', 'i4'], [], [])
a = np.recarray((2,), dtype)
a[...] = [(1, 2), (3, 4)]
assert_raises(TypeError, np.searchsorted, a, 1)
def test_complex64_alignment(self):
# Issue gh-2668 (trac 2076), segfault on sparc due to misalignment
dtt = np.complex64
arr = np.arange(10, dtype=dtt)
# 2D array
arr2 = np.reshape(arr, (2, 5))
# Fortran write followed by (C or F) read caused bus error
data_str = arr2.tobytes('F')
data_back = np.ndarray(arr2.shape,
arr2.dtype,
buffer=data_str,
order='F')
assert_array_equal(arr2, data_back)
def test_structured_count_nonzero(self):
arr = np.array([0, 1]).astype('i4, (2)i4')[:1]
count = np.count_nonzero(arr)
assert_equal(count, 0)
def test_copymodule_preserves_f_contiguity(self):
a = np.empty((2, 2), order='F')
b = copy.copy(a)
c = copy.deepcopy(a)
assert_(b.flags.fortran)
assert_(b.flags.f_contiguous)
assert_(c.flags.fortran)
assert_(c.flags.f_contiguous)
def test_fortran_order_buffer(self):
import numpy as np
a = np.array([['Hello', 'Foob']], dtype='U5', order='F')
arr = np.ndarray(shape=[1, 2, 5], dtype='U1', buffer=a)
arr2 = np.array([[[u'H', u'e', u'l', u'l', u'o'],
[u'F', u'o', u'o', u'b', u'']]])
assert_array_equal(arr, arr2)
def test_assign_from_sequence_error(self):
# Ticket #4024.
arr = np.array([1, 2, 3])
assert_raises(ValueError, arr.__setitem__, slice(None), [9, 9])
arr.__setitem__(slice(None), [9])
assert_equal(arr, [9, 9, 9])
def test_format_on_flex_array_element(self):
# Ticket #4369.
dt = np.dtype([('date', '<M8[D]'), ('val', '<f8')])
arr = np.array([('2000-01-01', 1)], dt)
formatted = '{0}'.format(arr[0])
assert_equal(formatted, str(arr[0]))
def test_deepcopy_on_0d_array(self):
# Ticket #3311.
arr = np.array(3)
arr_cp = copy.deepcopy(arr)
assert_equal(arr, arr_cp)
assert_equal(arr.shape, arr_cp.shape)
assert_equal(int(arr), int(arr_cp))
assert_(arr is not arr_cp)
assert_(isinstance(arr_cp, type(arr)))
def test_deepcopy_F_order_object_array(self):
# Ticket #6456.
a = {'a': 1}
b = {'b': 2}
arr = np.array([[a, b], [a, b]], order='F')
arr_cp = copy.deepcopy(arr)
assert_equal(arr, arr_cp)
assert_(arr is not arr_cp)
# Ensure that we have actually copied the item.
assert_(arr[0, 1] is not arr_cp[1, 1])
# Ensure we are allowed to have references to the same object.
assert_(arr[0, 1] is arr[1, 1])
# Check the references hold for the copied objects.
assert_(arr_cp[0, 1] is arr_cp[1, 1])
def test_deepcopy_empty_object_array(self):
# Ticket #8536.
# Deepcopy should succeed
a = np.array([], dtype=object)
b = copy.deepcopy(a)
assert_(a.shape == b.shape)
def test_bool_subscript_crash(self):
# gh-4494
c = np.rec.array([(1, 2, 3), (4, 5, 6)])
masked = c[np.array([True, False])]
base = masked.base
del masked, c
base.dtype
def test_richcompare_crash(self):
# gh-4613
import operator as op
# dummy class where __array__ throws exception
class Foo:
__array_priority__ = 1002
def __array__(self, *args, **kwargs):
raise Exception()
rhs = Foo()
lhs = np.array(1)
for f in [op.lt, op.le, op.gt, op.ge]:
assert_raises(TypeError, f, lhs, rhs)
assert_(not op.eq(lhs, rhs))
assert_(op.ne(lhs, rhs))
def test_richcompare_scalar_and_subclass(self):
# gh-4709
class Foo(np.ndarray):
def __eq__(self, other):
return "OK"
x = np.array([1, 2, 3]).view(Foo)
assert_equal(10 == x, "OK")
assert_equal(np.int32(10) == x, "OK")
assert_equal(np.array([10]) == x, "OK")
def test_pickle_empty_string(self):
# gh-3926
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
test_string = np.string_('')
assert_equal(pickle.loads(
pickle.dumps(test_string, protocol=proto)), test_string)
def test_frompyfunc_many_args(self):
# gh-5672
def passer(*args):
pass
assert_raises(ValueError, np.frompyfunc, passer, 32, 1)
def test_repeat_broadcasting(self):
# gh-5743
a = np.arange(60).reshape(3, 4, 5)
for axis in chain(range(-a.ndim, a.ndim), [None]):
assert_equal(a.repeat(2, axis=axis), a.repeat([2], axis=axis))
def test_frompyfunc_nout_0(self):
# gh-2014
def f(x):
x[0], x[-1] = x[-1], x[0]
uf = np.frompyfunc(f, 1, 0)
a = np.array([[1, 2, 3], [4, 5], [6, 7, 8, 9]], dtype=object)
assert_equal(uf(a), ())
expected = np.array([[3, 2, 1], [5, 4], [9, 7, 8, 6]], dtype=object)
assert_array_equal(a, expected)
@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
def test_leak_in_structured_dtype_comparison(self):
# gh-6250
recordtype = np.dtype([('a', np.float64),
('b', np.int32),
('d', (str, 5))])
# Simple case
a = np.zeros(2, dtype=recordtype)
for i in range(100):
a == a
assert_(sys.getrefcount(a) < 10)
# The case in the bug report.
before = sys.getrefcount(a)
u, v = a[0], a[1]
u == v
del u, v
gc.collect()
after = sys.getrefcount(a)
assert_equal(before, after)
def test_empty_percentile(self):
# gh-6530 / gh-6553
assert_array_equal(np.percentile(np.arange(10), []), np.array([]))
def test_void_compare_segfault(self):
# gh-6922. The following should not segfault
a = np.ones(3, dtype=[('object', 'O'), ('int', '<i2')])
a.sort()
def test_reshape_size_overflow(self):
# gh-7455
a = np.ones(20)[::2]
if np.dtype(np.intp).itemsize == 8:
# 64 bit. The following are the prime factors of 2**63 + 5,
# plus a leading 2, so when multiplied together as int64,
# the result overflows to a total size of 10.
new_shape = (2, 13, 419, 691, 823, 2977518503)
else:
# 32 bit. The following are the prime factors of 2**31 + 5,
# plus a leading 2, so when multiplied together as int32,
# the result overflows to a total size of 10.
new_shape = (2, 7, 7, 43826197)
assert_raises(ValueError, a.reshape, new_shape)
@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8),
reason="PyPy bug in error formatting")
def test_invalid_structured_dtypes(self):
# gh-2865
# mapping python objects to other dtypes
assert_raises(ValueError, np.dtype, ('O', [('name', 'i8')]))
assert_raises(ValueError, np.dtype, ('i8', [('name', 'O')]))
assert_raises(ValueError, np.dtype,
('i8', [('name', [('name', 'O')])]))
assert_raises(ValueError, np.dtype, ([('a', 'i4'), ('b', 'i4')], 'O'))
assert_raises(ValueError, np.dtype, ('i8', 'O'))
# wrong number/type of tuple elements in dict
assert_raises(ValueError, np.dtype,
('i', {'name': ('i', 0, 'title', 'oops')}))
assert_raises(ValueError, np.dtype,
('i', {'name': ('i', 'wrongtype', 'title')}))
# disallowed as of 1.13
assert_raises(ValueError, np.dtype,
([('a', 'O'), ('b', 'O')], [('c', 'O'), ('d', 'O')]))
# allowed as a special case due to existing use, see gh-2798
a = np.ones(1, dtype=('O', [('name', 'O')]))
assert_equal(a[0], 1)
# In particular, the above union dtype (and union dtypes in general)
# should mainly behave like the main (object) dtype:
assert a[0] is a.item()
assert type(a[0]) is int
def test_correct_hash_dict(self):
# gh-8887 - __hash__ would be None despite tp_hash being set
all_types = set(np.sctypeDict.values()) - {np.void}
for t in all_types:
val = t()
try:
hash(val)
except TypeError as e:
assert_equal(t.__hash__, None)
else:
assert_(t.__hash__ != None)
def test_scalar_copy(self):
scalar_types = set(np.sctypeDict.values())
values = {
np.void: b"a",
np.bytes_: b"a",
np.unicode_: "a",
np.datetime64: "2017-08-25",
}
for sctype in scalar_types:
item = sctype(values.get(sctype, 1))
item2 = copy.copy(item)
assert_equal(item, item2)
def test_void_item_memview(self):
va = np.zeros(10, 'V4')
x = va[:1].item()
va[0] = b'\xff\xff\xff\xff'
del va
assert_equal(x, b'\x00\x00\x00\x00')
def test_void_getitem(self):
# Test fix for gh-11668.
assert_(np.array([b'a'], 'V1').astype('O') == b'a')
assert_(np.array([b'ab'], 'V2').astype('O') == b'ab')
assert_(np.array([b'abc'], 'V3').astype('O') == b'abc')
assert_(np.array([b'abcd'], 'V4').astype('O') == b'abcd')
def test_structarray_title(self):
# The following used to segfault on pypy, due to NPY_TITLE_KEY
# not working properly and resulting to double-decref of the
# structured array field items:
# See: https://bitbucket.org/pypy/pypy/issues/2789
for j in range(5):
structure = np.array([1], dtype=[(('x', 'X'), np.object_)])
structure[0]['x'] = np.array([2])
gc.collect()
def test_dtype_scalar_squeeze(self):
# gh-11384
values = {
'S': b"a",
'M': "2018-06-20",
}
for ch in np.typecodes['All']:
if ch in 'O':
continue
sctype = np.dtype(ch).type
scvalue = sctype(values.get(ch, 3))
for axis in [None, ()]:
squeezed = scvalue.squeeze(axis=axis)
assert_equal(squeezed, scvalue)
assert_equal(type(squeezed), type(scvalue))
def test_field_access_by_title(self):
# gh-11507
s = 'Some long field name'
if HAS_REFCOUNT:
base = sys.getrefcount(s)
t = np.dtype([((s, 'f1'), np.float64)])
data = np.zeros(10, t)
for i in range(10):
str(data[['f1']])
if HAS_REFCOUNT:
assert_(base <= sys.getrefcount(s))
@pytest.mark.parametrize('val', [
# arrays and scalars
np.ones((10, 10), dtype='int32'),
np.uint64(10),
])
@pytest.mark.parametrize('protocol',
range(2, pickle.HIGHEST_PROTOCOL + 1)
)
def test_pickle_module(self, protocol, val):
# gh-12837
s = pickle.dumps(val, protocol)
assert b'_multiarray_umath' not in s
if protocol == 5 and len(val.shape) > 0:
# unpickling ndarray goes through _frombuffer for protocol 5
assert b'numpy.core.numeric' in s
else:
assert b'numpy.core.multiarray' in s
def test_object_casting_errors(self):
# gh-11993 update to ValueError (see gh-16909), since strings can in
# principle be converted to complex, but this string cannot.
arr = np.array(['AAAAA', 18465886.0, 18465886.0], dtype=object)
assert_raises(ValueError, arr.astype, 'c8')
def test_eff1d_casting(self):
# gh-12711
x = np.array([1, 2, 4, 7, 0], dtype=np.int16)
res = np.ediff1d(x, to_begin=-99, to_end=np.array([88, 99]))
assert_equal(res, [-99, 1, 2, 3, -7, 88, 99])
# The use of safe casting means, that 1<<20 is cast unsafely, an
# error may be better, but currently there is no mechanism for it.
res = np.ediff1d(x, to_begin=(1<<20), to_end=(1<<20))
assert_equal(res, [0, 1, 2, 3, -7, 0])
def test_pickle_datetime64_array(self):
# gh-12745 (would fail with pickle5 installed)
d = np.datetime64('2015-07-04 12:59:59.50', 'ns')
arr = np.array([d])
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
dumped = pickle.dumps(arr, protocol=proto)
assert_equal(pickle.loads(dumped), arr)
def test_bad_array_interface(self):
class T:
__array_interface__ = {}
with assert_raises(ValueError):
np.array([T()])
def test_2d__array__shape(self):
class T:
def __array__(self):
return np.ndarray(shape=(0,0))
# Make sure __array__ is used instead of Sequence methods.
def __iter__(self):
return iter([])
def __getitem__(self, idx):
raise AssertionError("__getitem__ was called")
def __len__(self):
return 0
t = T()
# gh-13659, would raise in broadcasting [x=t for x in result]
arr = np.array([t])
assert arr.shape == (1, 0, 0)
@pytest.mark.skipif(sys.maxsize < 2 ** 31 + 1, reason='overflows 32-bit python')
def test_to_ctypes(self):
#gh-14214
arr = np.zeros((2 ** 31 + 1,), 'b')
assert arr.size * arr.itemsize > 2 ** 31
c_arr = np.ctypeslib.as_ctypes(arr)
assert_equal(c_arr._length_, arr.size)
def test_complex_conversion_error(self):
# gh-17068
with pytest.raises(TypeError, match=r"Unable to convert dtype.*"):
complex(np.array("now", np.datetime64))
def test__array_interface__descr(self):
# gh-17068
dt = np.dtype(dict(names=['a', 'b'],
offsets=[0, 0],
formats=[np.int64, np.int64]))
descr = np.array((1, 1), dtype=dt).__array_interface__['descr']
assert descr == [('', '|V8')] # instead of [(b'', '|V8')]
@pytest.mark.skipif(sys.maxsize < 2 ** 31 + 1, reason='overflows 32-bit python')
@requires_memory(free_bytes=9e9)
def test_dot_big_stride(self):
# gh-17111
# blas stride = stride//itemsize > int32 max
int32_max = np.iinfo(np.int32).max
n = int32_max + 3
a = np.empty([n], dtype=np.float32)
b = a[::n-1]
b[...] = 1
assert b.strides[0] > int32_max * b.dtype.itemsize
assert np.dot(b, b) == 2.0
def test_frompyfunc_name(self):
# name conversion was failing for python 3 strings
# resulting in the default '?' name. Also test utf-8
# encoding using non-ascii name.
def cassé(x):
return x
f = np.frompyfunc(cassé, 1, 1)
assert str(f) == "<ufunc 'cassé (vectorized)'>"
@pytest.mark.parametrize("operation", [
'add', 'subtract', 'multiply', 'floor_divide',
'conjugate', 'fmod', 'square', 'reciprocal',
'power', 'absolute', 'negative', 'positive',
'greater', 'greater_equal', 'less',
'less_equal', 'equal', 'not_equal', 'logical_and',
'logical_not', 'logical_or', 'bitwise_and', 'bitwise_or',
'bitwise_xor', 'invert', 'left_shift', 'right_shift',
'gcd', 'lcm'
]
)
@pytest.mark.parametrize("order", [
('b->', 'B->'),
('h->', 'H->'),
('i->', 'I->'),
('l->', 'L->'),
('q->', 'Q->'),
]
)
def test_ufunc_order(self, operation, order):
# gh-18075
# Ensure signed types before unsigned
def get_idx(string, str_lst):
for i, s in enumerate(str_lst):
if string in s:
return i
raise ValueError(f"{string} not in list")
types = getattr(np, operation).types
assert get_idx(order[0], types) < get_idx(order[1], types), (
f"Unexpected types order of ufunc in {operation}"
f"for {order}. Possible fix: Use signed before unsigned"
"in generate_umath.py")
| 91,101 | Python | 34.614543 | 111 | 0.527195 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_hashtable.py | import pytest
import random
from numpy.core._multiarray_tests import identityhash_tester
@pytest.mark.parametrize("key_length", [1, 3, 6])
@pytest.mark.parametrize("length", [1, 16, 2000])
def test_identity_hashtable(key_length, length):
# use a 30 object pool for everything (duplicates will happen)
pool = [object() for i in range(20)]
keys_vals = []
for i in range(length):
keys = tuple(random.choices(pool, k=key_length))
keys_vals.append((keys, random.choice(pool)))
dictionary = dict(keys_vals)
# add a random item at the end:
keys_vals.append(random.choice(keys_vals))
# the expected one could be different with duplicates:
expected = dictionary[keys_vals[-1][0]]
res = identityhash_tester(key_length, keys_vals, replace=True)
assert res is expected
# check that ensuring one duplicate definitely raises:
keys_vals.insert(0, keys_vals[-2])
with pytest.raises(RuntimeError):
identityhash_tester(key_length, keys_vals)
| 1,011 | Python | 31.64516 | 66 | 0.691395 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_umath_complex.py | import sys
import platform
import pytest
import numpy as np
# import the c-extension module directly since _arg is not exported via umath
import numpy.core._multiarray_umath as ncu
from numpy.testing import (
assert_raises, assert_equal, assert_array_equal, assert_almost_equal, assert_array_max_ulp
)
# TODO: branch cuts (use Pauli code)
# TODO: conj 'symmetry'
# TODO: FPU exceptions
# At least on Windows the results of many complex functions are not conforming
# to the C99 standard. See ticket 1574.
# Ditto for Solaris (ticket 1642) and OS X on PowerPC.
#FIXME: this will probably change when we require full C99 campatibility
with np.errstate(all='ignore'):
functions_seem_flaky = ((np.exp(complex(np.inf, 0)).imag != 0)
or (np.log(complex(np.NZERO, 0)).imag != np.pi))
# TODO: replace with a check on whether platform-provided C99 funcs are used
xfail_complex_tests = (not sys.platform.startswith('linux') or functions_seem_flaky)
# TODO This can be xfail when the generator functions are got rid of.
platform_skip = pytest.mark.skipif(xfail_complex_tests,
reason="Inadequate C99 complex support")
class TestCexp:
def test_simple(self):
check = check_complex_value
f = np.exp
check(f, 1, 0, np.exp(1), 0, False)
check(f, 0, 1, np.cos(1), np.sin(1), False)
ref = np.exp(1) * complex(np.cos(1), np.sin(1))
check(f, 1, 1, ref.real, ref.imag, False)
@platform_skip
def test_special_values(self):
# C99: Section G 6.3.1
check = check_complex_value
f = np.exp
# cexp(+-0 + 0i) is 1 + 0i
check(f, np.PZERO, 0, 1, 0, False)
check(f, np.NZERO, 0, 1, 0, False)
# cexp(x + infi) is nan + nani for finite x and raises 'invalid' FPU
# exception
check(f, 1, np.inf, np.nan, np.nan)
check(f, -1, np.inf, np.nan, np.nan)
check(f, 0, np.inf, np.nan, np.nan)
# cexp(inf + 0i) is inf + 0i
check(f, np.inf, 0, np.inf, 0)
# cexp(-inf + yi) is +0 * (cos(y) + i sin(y)) for finite y
check(f, -np.inf, 1, np.PZERO, np.PZERO)
check(f, -np.inf, 0.75 * np.pi, np.NZERO, np.PZERO)
# cexp(inf + yi) is +inf * (cos(y) + i sin(y)) for finite y
check(f, np.inf, 1, np.inf, np.inf)
check(f, np.inf, 0.75 * np.pi, -np.inf, np.inf)
# cexp(-inf + inf i) is +-0 +- 0i (signs unspecified)
def _check_ninf_inf(dummy):
msgform = "cexp(-inf, inf) is (%f, %f), expected (+-0, +-0)"
with np.errstate(invalid='ignore'):
z = f(np.array(complex(-np.inf, np.inf)))
if z.real != 0 or z.imag != 0:
raise AssertionError(msgform % (z.real, z.imag))
_check_ninf_inf(None)
# cexp(inf + inf i) is +-inf + NaNi and raised invalid FPU ex.
def _check_inf_inf(dummy):
msgform = "cexp(inf, inf) is (%f, %f), expected (+-inf, nan)"
with np.errstate(invalid='ignore'):
z = f(np.array(complex(np.inf, np.inf)))
if not np.isinf(z.real) or not np.isnan(z.imag):
raise AssertionError(msgform % (z.real, z.imag))
_check_inf_inf(None)
# cexp(-inf + nan i) is +-0 +- 0i
def _check_ninf_nan(dummy):
msgform = "cexp(-inf, nan) is (%f, %f), expected (+-0, +-0)"
with np.errstate(invalid='ignore'):
z = f(np.array(complex(-np.inf, np.nan)))
if z.real != 0 or z.imag != 0:
raise AssertionError(msgform % (z.real, z.imag))
_check_ninf_nan(None)
# cexp(inf + nan i) is +-inf + nan
def _check_inf_nan(dummy):
msgform = "cexp(-inf, nan) is (%f, %f), expected (+-inf, nan)"
with np.errstate(invalid='ignore'):
z = f(np.array(complex(np.inf, np.nan)))
if not np.isinf(z.real) or not np.isnan(z.imag):
raise AssertionError(msgform % (z.real, z.imag))
_check_inf_nan(None)
# cexp(nan + yi) is nan + nani for y != 0 (optional: raises invalid FPU
# ex)
check(f, np.nan, 1, np.nan, np.nan)
check(f, np.nan, -1, np.nan, np.nan)
check(f, np.nan, np.inf, np.nan, np.nan)
check(f, np.nan, -np.inf, np.nan, np.nan)
# cexp(nan + nani) is nan + nani
check(f, np.nan, np.nan, np.nan, np.nan)
# TODO This can be xfail when the generator functions are got rid of.
@pytest.mark.skip(reason="cexp(nan + 0I) is wrong on most platforms")
def test_special_values2(self):
# XXX: most implementations get it wrong here (including glibc <= 2.10)
# cexp(nan + 0i) is nan + 0i
check = check_complex_value
f = np.exp
check(f, np.nan, 0, np.nan, 0)
class TestClog:
def test_simple(self):
x = np.array([1+0j, 1+2j])
y_r = np.log(np.abs(x)) + 1j * np.angle(x)
y = np.log(x)
assert_almost_equal(y, y_r)
@platform_skip
@pytest.mark.skipif(platform.machine() == "armv5tel", reason="See gh-413.")
def test_special_values(self):
xl = []
yl = []
# From C99 std (Sec 6.3.2)
# XXX: check exceptions raised
# --- raise for invalid fails.
# clog(-0 + i0) returns -inf + i pi and raises the 'divide-by-zero'
# floating-point exception.
with np.errstate(divide='raise'):
x = np.array([np.NZERO], dtype=complex)
y = complex(-np.inf, np.pi)
assert_raises(FloatingPointError, np.log, x)
with np.errstate(divide='ignore'):
assert_almost_equal(np.log(x), y)
xl.append(x)
yl.append(y)
# clog(+0 + i0) returns -inf + i0 and raises the 'divide-by-zero'
# floating-point exception.
with np.errstate(divide='raise'):
x = np.array([0], dtype=complex)
y = complex(-np.inf, 0)
assert_raises(FloatingPointError, np.log, x)
with np.errstate(divide='ignore'):
assert_almost_equal(np.log(x), y)
xl.append(x)
yl.append(y)
# clog(x + i inf returns +inf + i pi /2, for finite x.
x = np.array([complex(1, np.inf)], dtype=complex)
y = complex(np.inf, 0.5 * np.pi)
assert_almost_equal(np.log(x), y)
xl.append(x)
yl.append(y)
x = np.array([complex(-1, np.inf)], dtype=complex)
assert_almost_equal(np.log(x), y)
xl.append(x)
yl.append(y)
# clog(x + iNaN) returns NaN + iNaN and optionally raises the
# 'invalid' floating- point exception, for finite x.
with np.errstate(invalid='raise'):
x = np.array([complex(1., np.nan)], dtype=complex)
y = complex(np.nan, np.nan)
#assert_raises(FloatingPointError, np.log, x)
with np.errstate(invalid='ignore'):
assert_almost_equal(np.log(x), y)
xl.append(x)
yl.append(y)
with np.errstate(invalid='raise'):
x = np.array([np.inf + 1j * np.nan], dtype=complex)
#assert_raises(FloatingPointError, np.log, x)
with np.errstate(invalid='ignore'):
assert_almost_equal(np.log(x), y)
xl.append(x)
yl.append(y)
# clog(- inf + iy) returns +inf + ipi , for finite positive-signed y.
x = np.array([-np.inf + 1j], dtype=complex)
y = complex(np.inf, np.pi)
assert_almost_equal(np.log(x), y)
xl.append(x)
yl.append(y)
# clog(+ inf + iy) returns +inf + i0, for finite positive-signed y.
x = np.array([np.inf + 1j], dtype=complex)
y = complex(np.inf, 0)
assert_almost_equal(np.log(x), y)
xl.append(x)
yl.append(y)
# clog(- inf + i inf) returns +inf + i3pi /4.
x = np.array([complex(-np.inf, np.inf)], dtype=complex)
y = complex(np.inf, 0.75 * np.pi)
assert_almost_equal(np.log(x), y)
xl.append(x)
yl.append(y)
# clog(+ inf + i inf) returns +inf + ipi /4.
x = np.array([complex(np.inf, np.inf)], dtype=complex)
y = complex(np.inf, 0.25 * np.pi)
assert_almost_equal(np.log(x), y)
xl.append(x)
yl.append(y)
# clog(+/- inf + iNaN) returns +inf + iNaN.
x = np.array([complex(np.inf, np.nan)], dtype=complex)
y = complex(np.inf, np.nan)
assert_almost_equal(np.log(x), y)
xl.append(x)
yl.append(y)
x = np.array([complex(-np.inf, np.nan)], dtype=complex)
assert_almost_equal(np.log(x), y)
xl.append(x)
yl.append(y)
# clog(NaN + iy) returns NaN + iNaN and optionally raises the
# 'invalid' floating-point exception, for finite y.
x = np.array([complex(np.nan, 1)], dtype=complex)
y = complex(np.nan, np.nan)
assert_almost_equal(np.log(x), y)
xl.append(x)
yl.append(y)
# clog(NaN + i inf) returns +inf + iNaN.
x = np.array([complex(np.nan, np.inf)], dtype=complex)
y = complex(np.inf, np.nan)
assert_almost_equal(np.log(x), y)
xl.append(x)
yl.append(y)
# clog(NaN + iNaN) returns NaN + iNaN.
x = np.array([complex(np.nan, np.nan)], dtype=complex)
y = complex(np.nan, np.nan)
assert_almost_equal(np.log(x), y)
xl.append(x)
yl.append(y)
# clog(conj(z)) = conj(clog(z)).
xa = np.array(xl, dtype=complex)
ya = np.array(yl, dtype=complex)
with np.errstate(divide='ignore'):
for i in range(len(xa)):
assert_almost_equal(np.log(xa[i].conj()), ya[i].conj())
class TestCsqrt:
def test_simple(self):
# sqrt(1)
check_complex_value(np.sqrt, 1, 0, 1, 0)
# sqrt(1i)
rres = 0.5*np.sqrt(2)
ires = rres
check_complex_value(np.sqrt, 0, 1, rres, ires, False)
# sqrt(-1)
check_complex_value(np.sqrt, -1, 0, 0, 1)
def test_simple_conjugate(self):
ref = np.conj(np.sqrt(complex(1, 1)))
def f(z):
return np.sqrt(np.conj(z))
check_complex_value(f, 1, 1, ref.real, ref.imag, False)
#def test_branch_cut(self):
# _check_branch_cut(f, -1, 0, 1, -1)
@platform_skip
def test_special_values(self):
# C99: Sec G 6.4.2
check = check_complex_value
f = np.sqrt
# csqrt(+-0 + 0i) is 0 + 0i
check(f, np.PZERO, 0, 0, 0)
check(f, np.NZERO, 0, 0, 0)
# csqrt(x + infi) is inf + infi for any x (including NaN)
check(f, 1, np.inf, np.inf, np.inf)
check(f, -1, np.inf, np.inf, np.inf)
check(f, np.PZERO, np.inf, np.inf, np.inf)
check(f, np.NZERO, np.inf, np.inf, np.inf)
check(f, np.inf, np.inf, np.inf, np.inf)
check(f, -np.inf, np.inf, np.inf, np.inf)
check(f, -np.nan, np.inf, np.inf, np.inf)
# csqrt(x + nani) is nan + nani for any finite x
check(f, 1, np.nan, np.nan, np.nan)
check(f, -1, np.nan, np.nan, np.nan)
check(f, 0, np.nan, np.nan, np.nan)
# csqrt(-inf + yi) is +0 + infi for any finite y > 0
check(f, -np.inf, 1, np.PZERO, np.inf)
# csqrt(inf + yi) is +inf + 0i for any finite y > 0
check(f, np.inf, 1, np.inf, np.PZERO)
# csqrt(-inf + nani) is nan +- infi (both +i infi are valid)
def _check_ninf_nan(dummy):
msgform = "csqrt(-inf, nan) is (%f, %f), expected (nan, +-inf)"
z = np.sqrt(np.array(complex(-np.inf, np.nan)))
#Fixme: ugly workaround for isinf bug.
with np.errstate(invalid='ignore'):
if not (np.isnan(z.real) and np.isinf(z.imag)):
raise AssertionError(msgform % (z.real, z.imag))
_check_ninf_nan(None)
# csqrt(+inf + nani) is inf + nani
check(f, np.inf, np.nan, np.inf, np.nan)
# csqrt(nan + yi) is nan + nani for any finite y (infinite handled in x
# + nani)
check(f, np.nan, 0, np.nan, np.nan)
check(f, np.nan, 1, np.nan, np.nan)
check(f, np.nan, np.nan, np.nan, np.nan)
# XXX: check for conj(csqrt(z)) == csqrt(conj(z)) (need to fix branch
# cuts first)
class TestCpow:
def setup_method(self):
self.olderr = np.seterr(invalid='ignore')
def teardown_method(self):
np.seterr(**self.olderr)
def test_simple(self):
x = np.array([1+1j, 0+2j, 1+2j, np.inf, np.nan])
y_r = x ** 2
y = np.power(x, 2)
assert_almost_equal(y, y_r)
def test_scalar(self):
x = np.array([1, 1j, 2, 2.5+.37j, np.inf, np.nan])
y = np.array([1, 1j, -0.5+1.5j, -0.5+1.5j, 2, 3])
lx = list(range(len(x)))
# Hardcode the expected `builtins.complex` values,
# as complex exponentiation is broken as of bpo-44698
p_r = [
1+0j,
0.20787957635076193+0j,
0.35812203996480685+0.6097119028618724j,
0.12659112128185032+0.48847676699581527j,
complex(np.inf, np.nan),
complex(np.nan, np.nan),
]
n_r = [x[i] ** y[i] for i in lx]
for i in lx:
assert_almost_equal(n_r[i], p_r[i], err_msg='Loop %d\n' % i)
def test_array(self):
x = np.array([1, 1j, 2, 2.5+.37j, np.inf, np.nan])
y = np.array([1, 1j, -0.5+1.5j, -0.5+1.5j, 2, 3])
lx = list(range(len(x)))
# Hardcode the expected `builtins.complex` values,
# as complex exponentiation is broken as of bpo-44698
p_r = [
1+0j,
0.20787957635076193+0j,
0.35812203996480685+0.6097119028618724j,
0.12659112128185032+0.48847676699581527j,
complex(np.inf, np.nan),
complex(np.nan, np.nan),
]
n_r = x ** y
for i in lx:
assert_almost_equal(n_r[i], p_r[i], err_msg='Loop %d\n' % i)
class TestCabs:
def setup_method(self):
self.olderr = np.seterr(invalid='ignore')
def teardown_method(self):
np.seterr(**self.olderr)
def test_simple(self):
x = np.array([1+1j, 0+2j, 1+2j, np.inf, np.nan])
y_r = np.array([np.sqrt(2.), 2, np.sqrt(5), np.inf, np.nan])
y = np.abs(x)
assert_almost_equal(y, y_r)
def test_fabs(self):
# Test that np.abs(x +- 0j) == np.abs(x) (as mandated by C99 for cabs)
x = np.array([1+0j], dtype=complex)
assert_array_equal(np.abs(x), np.real(x))
x = np.array([complex(1, np.NZERO)], dtype=complex)
assert_array_equal(np.abs(x), np.real(x))
x = np.array([complex(np.inf, np.NZERO)], dtype=complex)
assert_array_equal(np.abs(x), np.real(x))
x = np.array([complex(np.nan, np.NZERO)], dtype=complex)
assert_array_equal(np.abs(x), np.real(x))
def test_cabs_inf_nan(self):
x, y = [], []
# cabs(+-nan + nani) returns nan
x.append(np.nan)
y.append(np.nan)
check_real_value(np.abs, np.nan, np.nan, np.nan)
x.append(np.nan)
y.append(-np.nan)
check_real_value(np.abs, -np.nan, np.nan, np.nan)
# According to C99 standard, if exactly one of the real/part is inf and
# the other nan, then cabs should return inf
x.append(np.inf)
y.append(np.nan)
check_real_value(np.abs, np.inf, np.nan, np.inf)
x.append(-np.inf)
y.append(np.nan)
check_real_value(np.abs, -np.inf, np.nan, np.inf)
# cabs(conj(z)) == conj(cabs(z)) (= cabs(z))
def f(a):
return np.abs(np.conj(a))
def g(a, b):
return np.abs(complex(a, b))
xa = np.array(x, dtype=complex)
assert len(xa) == len(x) == len(y)
for xi, yi in zip(x, y):
ref = g(xi, yi)
check_real_value(f, xi, yi, ref)
class TestCarg:
def test_simple(self):
check_real_value(ncu._arg, 1, 0, 0, False)
check_real_value(ncu._arg, 0, 1, 0.5*np.pi, False)
check_real_value(ncu._arg, 1, 1, 0.25*np.pi, False)
check_real_value(ncu._arg, np.PZERO, np.PZERO, np.PZERO)
# TODO This can be xfail when the generator functions are got rid of.
@pytest.mark.skip(
reason="Complex arithmetic with signed zero fails on most platforms")
def test_zero(self):
# carg(-0 +- 0i) returns +- pi
check_real_value(ncu._arg, np.NZERO, np.PZERO, np.pi, False)
check_real_value(ncu._arg, np.NZERO, np.NZERO, -np.pi, False)
# carg(+0 +- 0i) returns +- 0
check_real_value(ncu._arg, np.PZERO, np.PZERO, np.PZERO)
check_real_value(ncu._arg, np.PZERO, np.NZERO, np.NZERO)
# carg(x +- 0i) returns +- 0 for x > 0
check_real_value(ncu._arg, 1, np.PZERO, np.PZERO, False)
check_real_value(ncu._arg, 1, np.NZERO, np.NZERO, False)
# carg(x +- 0i) returns +- pi for x < 0
check_real_value(ncu._arg, -1, np.PZERO, np.pi, False)
check_real_value(ncu._arg, -1, np.NZERO, -np.pi, False)
# carg(+- 0 + yi) returns pi/2 for y > 0
check_real_value(ncu._arg, np.PZERO, 1, 0.5 * np.pi, False)
check_real_value(ncu._arg, np.NZERO, 1, 0.5 * np.pi, False)
# carg(+- 0 + yi) returns -pi/2 for y < 0
check_real_value(ncu._arg, np.PZERO, -1, 0.5 * np.pi, False)
check_real_value(ncu._arg, np.NZERO, -1, -0.5 * np.pi, False)
#def test_branch_cuts(self):
# _check_branch_cut(ncu._arg, -1, 1j, -1, 1)
def test_special_values(self):
# carg(-np.inf +- yi) returns +-pi for finite y > 0
check_real_value(ncu._arg, -np.inf, 1, np.pi, False)
check_real_value(ncu._arg, -np.inf, -1, -np.pi, False)
# carg(np.inf +- yi) returns +-0 for finite y > 0
check_real_value(ncu._arg, np.inf, 1, np.PZERO, False)
check_real_value(ncu._arg, np.inf, -1, np.NZERO, False)
# carg(x +- np.infi) returns +-pi/2 for finite x
check_real_value(ncu._arg, 1, np.inf, 0.5 * np.pi, False)
check_real_value(ncu._arg, 1, -np.inf, -0.5 * np.pi, False)
# carg(-np.inf +- np.infi) returns +-3pi/4
check_real_value(ncu._arg, -np.inf, np.inf, 0.75 * np.pi, False)
check_real_value(ncu._arg, -np.inf, -np.inf, -0.75 * np.pi, False)
# carg(np.inf +- np.infi) returns +-pi/4
check_real_value(ncu._arg, np.inf, np.inf, 0.25 * np.pi, False)
check_real_value(ncu._arg, np.inf, -np.inf, -0.25 * np.pi, False)
# carg(x + yi) returns np.nan if x or y is nan
check_real_value(ncu._arg, np.nan, 0, np.nan, False)
check_real_value(ncu._arg, 0, np.nan, np.nan, False)
check_real_value(ncu._arg, np.nan, np.inf, np.nan, False)
check_real_value(ncu._arg, np.inf, np.nan, np.nan, False)
def check_real_value(f, x1, y1, x, exact=True):
z1 = np.array([complex(x1, y1)])
if exact:
assert_equal(f(z1), x)
else:
assert_almost_equal(f(z1), x)
def check_complex_value(f, x1, y1, x2, y2, exact=True):
z1 = np.array([complex(x1, y1)])
z2 = complex(x2, y2)
with np.errstate(invalid='ignore'):
if exact:
assert_equal(f(z1), z2)
else:
assert_almost_equal(f(z1), z2)
class TestSpecialComplexAVX:
@pytest.mark.parametrize("stride", [-4,-2,-1,1,2,4])
@pytest.mark.parametrize("astype", [np.complex64, np.complex128])
def test_array(self, stride, astype):
arr = np.array([complex(np.nan , np.nan),
complex(np.nan , np.inf),
complex(np.inf , np.nan),
complex(np.inf , np.inf),
complex(0. , np.inf),
complex(np.inf , 0.),
complex(0. , 0.),
complex(0. , np.nan),
complex(np.nan , 0.)], dtype=astype)
abs_true = np.array([np.nan, np.inf, np.inf, np.inf, np.inf, np.inf, 0., np.nan, np.nan], dtype=arr.real.dtype)
sq_true = np.array([complex(np.nan, np.nan),
complex(np.nan, np.nan),
complex(np.nan, np.nan),
complex(np.nan, np.inf),
complex(-np.inf, np.nan),
complex(np.inf, np.nan),
complex(0., 0.),
complex(np.nan, np.nan),
complex(np.nan, np.nan)], dtype=astype)
assert_equal(np.abs(arr[::stride]), abs_true[::stride])
with np.errstate(invalid='ignore'):
assert_equal(np.square(arr[::stride]), sq_true[::stride])
class TestComplexAbsoluteAVX:
@pytest.mark.parametrize("arraysize", [1,2,3,4,5,6,7,8,9,10,11,13,15,17,18,19])
@pytest.mark.parametrize("stride", [-4,-3,-2,-1,1,2,3,4])
@pytest.mark.parametrize("astype", [np.complex64, np.complex128])
# test to ensure masking and strides work as intended in the AVX implementation
def test_array(self, arraysize, stride, astype):
arr = np.ones(arraysize, dtype=astype)
abs_true = np.ones(arraysize, dtype=arr.real.dtype)
assert_equal(np.abs(arr[::stride]), abs_true[::stride])
# Testcase taken as is from https://github.com/numpy/numpy/issues/16660
class TestComplexAbsoluteMixedDTypes:
@pytest.mark.parametrize("stride", [-4,-3,-2,-1,1,2,3,4])
@pytest.mark.parametrize("astype", [np.complex64, np.complex128])
@pytest.mark.parametrize("func", ['abs', 'square', 'conjugate'])
def test_array(self, stride, astype, func):
dtype = [('template_id', '<i8'), ('bank_chisq','<f4'),
('bank_chisq_dof','<i8'), ('chisq', '<f4'), ('chisq_dof','<i8'),
('cont_chisq', '<f4'), ('psd_var_val', '<f4'), ('sg_chisq','<f4'),
('mycomplex', astype), ('time_index', '<i8')]
vec = np.array([
(0, 0., 0, -31.666483, 200, 0., 0., 1. , 3.0+4.0j , 613090),
(1, 0., 0, 260.91525 , 42, 0., 0., 1. , 5.0+12.0j , 787315),
(1, 0., 0, 52.15155 , 42, 0., 0., 1. , 8.0+15.0j , 806641),
(1, 0., 0, 52.430195, 42, 0., 0., 1. , 7.0+24.0j , 1363540),
(2, 0., 0, 304.43646 , 58, 0., 0., 1. , 20.0+21.0j , 787323),
(3, 0., 0, 299.42108 , 52, 0., 0., 1. , 12.0+35.0j , 787332),
(4, 0., 0, 39.4836 , 28, 0., 0., 9.182192, 9.0+40.0j , 787304),
(4, 0., 0, 76.83787 , 28, 0., 0., 1. , 28.0+45.0j, 1321869),
(5, 0., 0, 143.26366 , 24, 0., 0., 10.996129, 11.0+60.0j , 787299)], dtype=dtype)
myfunc = getattr(np, func)
a = vec['mycomplex']
g = myfunc(a[::stride])
b = vec['mycomplex'].copy()
h = myfunc(b[::stride])
assert_array_max_ulp(h.real, g.real, 1)
assert_array_max_ulp(h.imag, g.imag, 1)
| 23,243 | Python | 36.309791 | 119 | 0.531042 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_scalar_ctors.py | """
Test the scalar constructors, which also do type-coercion
"""
import pytest
import numpy as np
from numpy.testing import (
assert_equal, assert_almost_equal, assert_warns,
)
class TestFromString:
def test_floating(self):
# Ticket #640, floats from string
fsingle = np.single('1.234')
fdouble = np.double('1.234')
flongdouble = np.longdouble('1.234')
assert_almost_equal(fsingle, 1.234)
assert_almost_equal(fdouble, 1.234)
assert_almost_equal(flongdouble, 1.234)
def test_floating_overflow(self):
""" Strings containing an unrepresentable float overflow """
fhalf = np.half('1e10000')
assert_equal(fhalf, np.inf)
fsingle = np.single('1e10000')
assert_equal(fsingle, np.inf)
fdouble = np.double('1e10000')
assert_equal(fdouble, np.inf)
flongdouble = assert_warns(RuntimeWarning, np.longdouble, '1e10000')
assert_equal(flongdouble, np.inf)
fhalf = np.half('-1e10000')
assert_equal(fhalf, -np.inf)
fsingle = np.single('-1e10000')
assert_equal(fsingle, -np.inf)
fdouble = np.double('-1e10000')
assert_equal(fdouble, -np.inf)
flongdouble = assert_warns(RuntimeWarning, np.longdouble, '-1e10000')
assert_equal(flongdouble, -np.inf)
class TestExtraArgs:
def test_superclass(self):
# try both positional and keyword arguments
s = np.str_(b'\\x61', encoding='unicode-escape')
assert s == 'a'
s = np.str_(b'\\x61', 'unicode-escape')
assert s == 'a'
# previously this would return '\\xx'
with pytest.raises(UnicodeDecodeError):
np.str_(b'\\xx', encoding='unicode-escape')
with pytest.raises(UnicodeDecodeError):
np.str_(b'\\xx', 'unicode-escape')
# superclass fails, but numpy succeeds
assert np.bytes_(-2) == b'-2'
def test_datetime(self):
dt = np.datetime64('2000-01', ('M', 2))
assert np.datetime_data(dt) == ('M', 2)
with pytest.raises(TypeError):
np.datetime64('2000', garbage=True)
def test_bool(self):
with pytest.raises(TypeError):
np.bool_(False, garbage=True)
def test_void(self):
with pytest.raises(TypeError):
np.void(b'test', garbage=True)
class TestFromInt:
def test_intp(self):
# Ticket #99
assert_equal(1024, np.intp(1024))
def test_uint64_from_negative(self):
assert_equal(np.uint64(-2), np.uint64(18446744073709551614))
int_types = [np.byte, np.short, np.intc, np.int_, np.longlong]
uint_types = [np.ubyte, np.ushort, np.uintc, np.uint, np.ulonglong]
float_types = [np.half, np.single, np.double, np.longdouble]
cfloat_types = [np.csingle, np.cdouble, np.clongdouble]
class TestArrayFromScalar:
""" gh-15467 """
def _do_test(self, t1, t2):
x = t1(2)
arr = np.array(x, dtype=t2)
# type should be preserved exactly
if t2 is None:
assert arr.dtype.type is t1
else:
assert arr.dtype.type is t2
@pytest.mark.parametrize('t1', int_types + uint_types)
@pytest.mark.parametrize('t2', int_types + uint_types + [None])
def test_integers(self, t1, t2):
return self._do_test(t1, t2)
@pytest.mark.parametrize('t1', float_types)
@pytest.mark.parametrize('t2', float_types + [None])
def test_reals(self, t1, t2):
return self._do_test(t1, t2)
@pytest.mark.parametrize('t1', cfloat_types)
@pytest.mark.parametrize('t2', cfloat_types + [None])
def test_complex(self, t1, t2):
return self._do_test(t1, t2)
| 3,688 | Python | 30.801724 | 77 | 0.610087 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_mem_overlap.py | import itertools
import pytest
import numpy as np
from numpy.core._multiarray_tests import solve_diophantine, internal_overlap
from numpy.core import _umath_tests
from numpy.lib.stride_tricks import as_strided
from numpy.testing import (
assert_, assert_raises, assert_equal, assert_array_equal
)
ndims = 2
size = 10
shape = tuple([size] * ndims)
MAY_SHARE_BOUNDS = 0
MAY_SHARE_EXACT = -1
def _indices_for_nelems(nelems):
"""Returns slices of length nelems, from start onwards, in direction sign."""
if nelems == 0:
return [size // 2] # int index
res = []
for step in (1, 2):
for sign in (-1, 1):
start = size // 2 - nelems * step * sign // 2
stop = start + nelems * step * sign
res.append(slice(start, stop, step * sign))
return res
def _indices_for_axis():
"""Returns (src, dst) pairs of indices."""
res = []
for nelems in (0, 2, 3):
ind = _indices_for_nelems(nelems)
res.extend(itertools.product(ind, ind)) # all assignments of size "nelems"
return res
def _indices(ndims):
"""Returns ((axis0_src, axis0_dst), (axis1_src, axis1_dst), ... ) index pairs."""
ind = _indices_for_axis()
return itertools.product(ind, repeat=ndims)
def _check_assignment(srcidx, dstidx):
"""Check assignment arr[dstidx] = arr[srcidx] works."""
arr = np.arange(np.product(shape)).reshape(shape)
cpy = arr.copy()
cpy[dstidx] = arr[srcidx]
arr[dstidx] = arr[srcidx]
assert_(np.all(arr == cpy),
'assigning arr[%s] = arr[%s]' % (dstidx, srcidx))
def test_overlapping_assignments():
# Test automatically generated assignments which overlap in memory.
inds = _indices(ndims)
for ind in inds:
srcidx = tuple([a[0] for a in ind])
dstidx = tuple([a[1] for a in ind])
_check_assignment(srcidx, dstidx)
@pytest.mark.slow
def test_diophantine_fuzz():
# Fuzz test the diophantine solver
rng = np.random.RandomState(1234)
max_int = np.iinfo(np.intp).max
for ndim in range(10):
feasible_count = 0
infeasible_count = 0
min_count = 500//(ndim + 1)
while min(feasible_count, infeasible_count) < min_count:
# Ensure big and small integer problems
A_max = 1 + rng.randint(0, 11, dtype=np.intp)**6
U_max = rng.randint(0, 11, dtype=np.intp)**6
A_max = min(max_int, A_max)
U_max = min(max_int-1, U_max)
A = tuple(int(rng.randint(1, A_max+1, dtype=np.intp))
for j in range(ndim))
U = tuple(int(rng.randint(0, U_max+2, dtype=np.intp))
for j in range(ndim))
b_ub = min(max_int-2, sum(a*ub for a, ub in zip(A, U)))
b = rng.randint(-1, b_ub+2, dtype=np.intp)
if ndim == 0 and feasible_count < min_count:
b = 0
X = solve_diophantine(A, U, b)
if X is None:
# Check the simplified decision problem agrees
X_simplified = solve_diophantine(A, U, b, simplify=1)
assert_(X_simplified is None, (A, U, b, X_simplified))
# Check no solution exists (provided the problem is
# small enough so that brute force checking doesn't
# take too long)
ranges = tuple(range(0, a*ub+1, a) for a, ub in zip(A, U))
size = 1
for r in ranges:
size *= len(r)
if size < 100000:
assert_(not any(sum(w) == b for w in itertools.product(*ranges)))
infeasible_count += 1
else:
# Check the simplified decision problem agrees
X_simplified = solve_diophantine(A, U, b, simplify=1)
assert_(X_simplified is not None, (A, U, b, X_simplified))
# Check validity
assert_(sum(a*x for a, x in zip(A, X)) == b)
assert_(all(0 <= x <= ub for x, ub in zip(X, U)))
feasible_count += 1
def test_diophantine_overflow():
# Smoke test integer overflow detection
max_intp = np.iinfo(np.intp).max
max_int64 = np.iinfo(np.int64).max
if max_int64 <= max_intp:
# Check that the algorithm works internally in 128-bit;
# solving this problem requires large intermediate numbers
A = (max_int64//2, max_int64//2 - 10)
U = (max_int64//2, max_int64//2 - 10)
b = 2*(max_int64//2) - 10
assert_equal(solve_diophantine(A, U, b), (1, 1))
def check_may_share_memory_exact(a, b):
got = np.may_share_memory(a, b, max_work=MAY_SHARE_EXACT)
assert_equal(np.may_share_memory(a, b),
np.may_share_memory(a, b, max_work=MAY_SHARE_BOUNDS))
a.fill(0)
b.fill(0)
a.fill(1)
exact = b.any()
err_msg = ""
if got != exact:
err_msg = " " + "\n ".join([
"base_a - base_b = %r" % (a.__array_interface__['data'][0] - b.__array_interface__['data'][0],),
"shape_a = %r" % (a.shape,),
"shape_b = %r" % (b.shape,),
"strides_a = %r" % (a.strides,),
"strides_b = %r" % (b.strides,),
"size_a = %r" % (a.size,),
"size_b = %r" % (b.size,)
])
assert_equal(got, exact, err_msg=err_msg)
def test_may_share_memory_manual():
# Manual test cases for may_share_memory
# Base arrays
xs0 = [
np.zeros([13, 21, 23, 22], dtype=np.int8),
np.zeros([13, 21, 23*2, 22], dtype=np.int8)[:,:,::2,:]
]
# Generate all negative stride combinations
xs = []
for x in xs0:
for ss in itertools.product(*(([slice(None), slice(None, None, -1)],)*4)):
xp = x[ss]
xs.append(xp)
for x in xs:
# The default is a simple extent check
assert_(np.may_share_memory(x[:,0,:], x[:,1,:]))
assert_(np.may_share_memory(x[:,0,:], x[:,1,:], max_work=None))
# Exact checks
check_may_share_memory_exact(x[:,0,:], x[:,1,:])
check_may_share_memory_exact(x[:,::7], x[:,3::3])
try:
xp = x.ravel()
if xp.flags.owndata:
continue
xp = xp.view(np.int16)
except ValueError:
continue
# 0-size arrays cannot overlap
check_may_share_memory_exact(x.ravel()[6:6],
xp.reshape(13, 21, 23, 11)[:,::7])
# Test itemsize is dealt with
check_may_share_memory_exact(x[:,::7],
xp.reshape(13, 21, 23, 11))
check_may_share_memory_exact(x[:,::7],
xp.reshape(13, 21, 23, 11)[:,3::3])
check_may_share_memory_exact(x.ravel()[6:7],
xp.reshape(13, 21, 23, 11)[:,::7])
# Check unit size
x = np.zeros([1], dtype=np.int8)
check_may_share_memory_exact(x, x)
check_may_share_memory_exact(x, x.copy())
def iter_random_view_pairs(x, same_steps=True, equal_size=False):
rng = np.random.RandomState(1234)
if equal_size and same_steps:
raise ValueError()
def random_slice(n, step):
start = rng.randint(0, n+1, dtype=np.intp)
stop = rng.randint(start, n+1, dtype=np.intp)
if rng.randint(0, 2, dtype=np.intp) == 0:
stop, start = start, stop
step *= -1
return slice(start, stop, step)
def random_slice_fixed_size(n, step, size):
start = rng.randint(0, n+1 - size*step)
stop = start + (size-1)*step + 1
if rng.randint(0, 2) == 0:
stop, start = start-1, stop-1
if stop < 0:
stop = None
step *= -1
return slice(start, stop, step)
# First a few regular views
yield x, x
for j in range(1, 7, 3):
yield x[j:], x[:-j]
yield x[...,j:], x[...,:-j]
# An array with zero stride internal overlap
strides = list(x.strides)
strides[0] = 0
xp = as_strided(x, shape=x.shape, strides=strides)
yield x, xp
yield xp, xp
# An array with non-zero stride internal overlap
strides = list(x.strides)
if strides[0] > 1:
strides[0] = 1
xp = as_strided(x, shape=x.shape, strides=strides)
yield x, xp
yield xp, xp
# Then discontiguous views
while True:
steps = tuple(rng.randint(1, 11, dtype=np.intp)
if rng.randint(0, 5, dtype=np.intp) == 0 else 1
for j in range(x.ndim))
s1 = tuple(random_slice(p, s) for p, s in zip(x.shape, steps))
t1 = np.arange(x.ndim)
rng.shuffle(t1)
if equal_size:
t2 = t1
else:
t2 = np.arange(x.ndim)
rng.shuffle(t2)
a = x[s1]
if equal_size:
if a.size == 0:
continue
steps2 = tuple(rng.randint(1, max(2, p//(1+pa)))
if rng.randint(0, 5) == 0 else 1
for p, s, pa in zip(x.shape, s1, a.shape))
s2 = tuple(random_slice_fixed_size(p, s, pa)
for p, s, pa in zip(x.shape, steps2, a.shape))
elif same_steps:
steps2 = steps
else:
steps2 = tuple(rng.randint(1, 11, dtype=np.intp)
if rng.randint(0, 5, dtype=np.intp) == 0 else 1
for j in range(x.ndim))
if not equal_size:
s2 = tuple(random_slice(p, s) for p, s in zip(x.shape, steps2))
a = a.transpose(t1)
b = x[s2].transpose(t2)
yield a, b
def check_may_share_memory_easy_fuzz(get_max_work, same_steps, min_count):
# Check that overlap problems with common strides are solved with
# little work.
x = np.zeros([17,34,71,97], dtype=np.int16)
feasible = 0
infeasible = 0
pair_iter = iter_random_view_pairs(x, same_steps)
while min(feasible, infeasible) < min_count:
a, b = next(pair_iter)
bounds_overlap = np.may_share_memory(a, b)
may_share_answer = np.may_share_memory(a, b)
easy_answer = np.may_share_memory(a, b, max_work=get_max_work(a, b))
exact_answer = np.may_share_memory(a, b, max_work=MAY_SHARE_EXACT)
if easy_answer != exact_answer:
# assert_equal is slow...
assert_equal(easy_answer, exact_answer)
if may_share_answer != bounds_overlap:
assert_equal(may_share_answer, bounds_overlap)
if bounds_overlap:
if exact_answer:
feasible += 1
else:
infeasible += 1
@pytest.mark.slow
def test_may_share_memory_easy_fuzz():
# Check that overlap problems with common strides are always
# solved with little work.
check_may_share_memory_easy_fuzz(get_max_work=lambda a, b: 1,
same_steps=True,
min_count=2000)
@pytest.mark.slow
def test_may_share_memory_harder_fuzz():
# Overlap problems with not necessarily common strides take more
# work.
#
# The work bound below can't be reduced much. Harder problems can
# also exist but not be detected here, as the set of problems
# comes from RNG.
check_may_share_memory_easy_fuzz(get_max_work=lambda a, b: max(a.size, b.size)//2,
same_steps=False,
min_count=2000)
def test_shares_memory_api():
x = np.zeros([4, 5, 6], dtype=np.int8)
assert_equal(np.shares_memory(x, x), True)
assert_equal(np.shares_memory(x, x.copy()), False)
a = x[:,::2,::3]
b = x[:,::3,::2]
assert_equal(np.shares_memory(a, b), True)
assert_equal(np.shares_memory(a, b, max_work=None), True)
assert_raises(np.TooHardError, np.shares_memory, a, b, max_work=1)
def test_may_share_memory_bad_max_work():
x = np.zeros([1])
assert_raises(OverflowError, np.may_share_memory, x, x, max_work=10**100)
assert_raises(OverflowError, np.shares_memory, x, x, max_work=10**100)
def test_internal_overlap_diophantine():
def check(A, U, exists=None):
X = solve_diophantine(A, U, 0, require_ub_nontrivial=1)
if exists is None:
exists = (X is not None)
if X is not None:
assert_(sum(a*x for a, x in zip(A, X)) == sum(a*u//2 for a, u in zip(A, U)))
assert_(all(0 <= x <= u for x, u in zip(X, U)))
assert_(any(x != u//2 for x, u in zip(X, U)))
if exists:
assert_(X is not None, repr(X))
else:
assert_(X is None, repr(X))
# Smoke tests
check((3, 2), (2*2, 3*2), exists=True)
check((3*2, 2), (15*2, (3-1)*2), exists=False)
def test_internal_overlap_slices():
# Slicing an array never generates internal overlap
x = np.zeros([17,34,71,97], dtype=np.int16)
rng = np.random.RandomState(1234)
def random_slice(n, step):
start = rng.randint(0, n+1, dtype=np.intp)
stop = rng.randint(start, n+1, dtype=np.intp)
if rng.randint(0, 2, dtype=np.intp) == 0:
stop, start = start, stop
step *= -1
return slice(start, stop, step)
cases = 0
min_count = 5000
while cases < min_count:
steps = tuple(rng.randint(1, 11, dtype=np.intp)
if rng.randint(0, 5, dtype=np.intp) == 0 else 1
for j in range(x.ndim))
t1 = np.arange(x.ndim)
rng.shuffle(t1)
s1 = tuple(random_slice(p, s) for p, s in zip(x.shape, steps))
a = x[s1].transpose(t1)
assert_(not internal_overlap(a))
cases += 1
def check_internal_overlap(a, manual_expected=None):
got = internal_overlap(a)
# Brute-force check
m = set()
ranges = tuple(range(n) for n in a.shape)
for v in itertools.product(*ranges):
offset = sum(s*w for s, w in zip(a.strides, v))
if offset in m:
expected = True
break
else:
m.add(offset)
else:
expected = False
# Compare
if got != expected:
assert_equal(got, expected, err_msg=repr((a.strides, a.shape)))
if manual_expected is not None and expected != manual_expected:
assert_equal(expected, manual_expected)
return got
def test_internal_overlap_manual():
# Stride tricks can construct arrays with internal overlap
# We don't care about memory bounds, the array is not
# read/write accessed
x = np.arange(1).astype(np.int8)
# Check low-dimensional special cases
check_internal_overlap(x, False) # 1-dim
check_internal_overlap(x.reshape([]), False) # 0-dim
a = as_strided(x, strides=(3, 4), shape=(4, 4))
check_internal_overlap(a, False)
a = as_strided(x, strides=(3, 4), shape=(5, 4))
check_internal_overlap(a, True)
a = as_strided(x, strides=(0,), shape=(0,))
check_internal_overlap(a, False)
a = as_strided(x, strides=(0,), shape=(1,))
check_internal_overlap(a, False)
a = as_strided(x, strides=(0,), shape=(2,))
check_internal_overlap(a, True)
a = as_strided(x, strides=(0, -9993), shape=(87, 22))
check_internal_overlap(a, True)
a = as_strided(x, strides=(0, -9993), shape=(1, 22))
check_internal_overlap(a, False)
a = as_strided(x, strides=(0, -9993), shape=(0, 22))
check_internal_overlap(a, False)
def test_internal_overlap_fuzz():
# Fuzz check; the brute-force check is fairly slow
x = np.arange(1).astype(np.int8)
overlap = 0
no_overlap = 0
min_count = 100
rng = np.random.RandomState(1234)
while min(overlap, no_overlap) < min_count:
ndim = rng.randint(1, 4, dtype=np.intp)
strides = tuple(rng.randint(-1000, 1000, dtype=np.intp)
for j in range(ndim))
shape = tuple(rng.randint(1, 30, dtype=np.intp)
for j in range(ndim))
a = as_strided(x, strides=strides, shape=shape)
result = check_internal_overlap(a)
if result:
overlap += 1
else:
no_overlap += 1
def test_non_ndarray_inputs():
# Regression check for gh-5604
class MyArray:
def __init__(self, data):
self.data = data
@property
def __array_interface__(self):
return self.data.__array_interface__
class MyArray2:
def __init__(self, data):
self.data = data
def __array__(self):
return self.data
for cls in [MyArray, MyArray2]:
x = np.arange(5)
assert_(np.may_share_memory(cls(x[::2]), x[1::2]))
assert_(not np.shares_memory(cls(x[::2]), x[1::2]))
assert_(np.shares_memory(cls(x[1::3]), x[::2]))
assert_(np.may_share_memory(cls(x[1::3]), x[::2]))
def view_element_first_byte(x):
"""Construct an array viewing the first byte of each element of `x`"""
from numpy.lib.stride_tricks import DummyArray
interface = dict(x.__array_interface__)
interface['typestr'] = '|b1'
interface['descr'] = [('', '|b1')]
return np.asarray(DummyArray(interface, x))
def assert_copy_equivalent(operation, args, out, **kwargs):
"""
Check that operation(*args, out=out) produces results
equivalent to out[...] = operation(*args, out=out.copy())
"""
kwargs['out'] = out
kwargs2 = dict(kwargs)
kwargs2['out'] = out.copy()
out_orig = out.copy()
out[...] = operation(*args, **kwargs2)
expected = out.copy()
out[...] = out_orig
got = operation(*args, **kwargs).copy()
if (got != expected).any():
assert_equal(got, expected)
class TestUFunc:
"""
Test ufunc call memory overlap handling
"""
def check_unary_fuzz(self, operation, get_out_axis_size, dtype=np.int16,
count=5000):
shapes = [7, 13, 8, 21, 29, 32]
rng = np.random.RandomState(1234)
for ndim in range(1, 6):
x = rng.randint(0, 2**16, size=shapes[:ndim]).astype(dtype)
it = iter_random_view_pairs(x, same_steps=False, equal_size=True)
min_count = count // (ndim + 1)**2
overlapping = 0
while overlapping < min_count:
a, b = next(it)
a_orig = a.copy()
b_orig = b.copy()
if get_out_axis_size is None:
assert_copy_equivalent(operation, [a], out=b)
if np.shares_memory(a, b):
overlapping += 1
else:
for axis in itertools.chain(range(ndim), [None]):
a[...] = a_orig
b[...] = b_orig
# Determine size for reduction axis (None if scalar)
outsize, scalarize = get_out_axis_size(a, b, axis)
if outsize == 'skip':
continue
# Slice b to get an output array of the correct size
sl = [slice(None)] * ndim
if axis is None:
if outsize is None:
sl = [slice(0, 1)] + [0]*(ndim - 1)
else:
sl = [slice(0, outsize)] + [0]*(ndim - 1)
else:
if outsize is None:
k = b.shape[axis]//2
if ndim == 1:
sl[axis] = slice(k, k + 1)
else:
sl[axis] = k
else:
assert b.shape[axis] >= outsize
sl[axis] = slice(0, outsize)
b_out = b[tuple(sl)]
if scalarize:
b_out = b_out.reshape([])
if np.shares_memory(a, b_out):
overlapping += 1
# Check result
assert_copy_equivalent(operation, [a], out=b_out, axis=axis)
@pytest.mark.slow
def test_unary_ufunc_call_fuzz(self):
self.check_unary_fuzz(np.invert, None, np.int16)
@pytest.mark.slow
def test_unary_ufunc_call_complex_fuzz(self):
# Complex typically has a smaller alignment than itemsize
self.check_unary_fuzz(np.negative, None, np.complex128, count=500)
def test_binary_ufunc_accumulate_fuzz(self):
def get_out_axis_size(a, b, axis):
if axis is None:
if a.ndim == 1:
return a.size, False
else:
return 'skip', False # accumulate doesn't support this
else:
return a.shape[axis], False
self.check_unary_fuzz(np.add.accumulate, get_out_axis_size,
dtype=np.int16, count=500)
def test_binary_ufunc_reduce_fuzz(self):
def get_out_axis_size(a, b, axis):
return None, (axis is None or a.ndim == 1)
self.check_unary_fuzz(np.add.reduce, get_out_axis_size,
dtype=np.int16, count=500)
def test_binary_ufunc_reduceat_fuzz(self):
def get_out_axis_size(a, b, axis):
if axis is None:
if a.ndim == 1:
return a.size, False
else:
return 'skip', False # reduceat doesn't support this
else:
return a.shape[axis], False
def do_reduceat(a, out, axis):
if axis is None:
size = len(a)
step = size//len(out)
else:
size = a.shape[axis]
step = a.shape[axis] // out.shape[axis]
idx = np.arange(0, size, step)
return np.add.reduceat(a, idx, out=out, axis=axis)
self.check_unary_fuzz(do_reduceat, get_out_axis_size,
dtype=np.int16, count=500)
def test_binary_ufunc_reduceat_manual(self):
def check(ufunc, a, ind, out):
c1 = ufunc.reduceat(a.copy(), ind.copy(), out=out.copy())
c2 = ufunc.reduceat(a, ind, out=out)
assert_array_equal(c1, c2)
# Exactly same input/output arrays
a = np.arange(10000, dtype=np.int16)
check(np.add, a, a[::-1].copy(), a)
# Overlap with index
a = np.arange(10000, dtype=np.int16)
check(np.add, a, a[::-1], a)
@pytest.mark.slow
def test_unary_gufunc_fuzz(self):
shapes = [7, 13, 8, 21, 29, 32]
gufunc = _umath_tests.euclidean_pdist
rng = np.random.RandomState(1234)
for ndim in range(2, 6):
x = rng.rand(*shapes[:ndim])
it = iter_random_view_pairs(x, same_steps=False, equal_size=True)
min_count = 500 // (ndim + 1)**2
overlapping = 0
while overlapping < min_count:
a, b = next(it)
if min(a.shape[-2:]) < 2 or min(b.shape[-2:]) < 2 or a.shape[-1] < 2:
continue
# Ensure the shapes are so that euclidean_pdist is happy
if b.shape[-1] > b.shape[-2]:
b = b[...,0,:]
else:
b = b[...,:,0]
n = a.shape[-2]
p = n * (n - 1) // 2
if p <= b.shape[-1] and p > 0:
b = b[...,:p]
else:
n = max(2, int(np.sqrt(b.shape[-1]))//2)
p = n * (n - 1) // 2
a = a[...,:n,:]
b = b[...,:p]
# Call
if np.shares_memory(a, b):
overlapping += 1
with np.errstate(over='ignore', invalid='ignore'):
assert_copy_equivalent(gufunc, [a], out=b)
def test_ufunc_at_manual(self):
def check(ufunc, a, ind, b=None):
a0 = a.copy()
if b is None:
ufunc.at(a0, ind.copy())
c1 = a0.copy()
ufunc.at(a, ind)
c2 = a.copy()
else:
ufunc.at(a0, ind.copy(), b.copy())
c1 = a0.copy()
ufunc.at(a, ind, b)
c2 = a.copy()
assert_array_equal(c1, c2)
# Overlap with index
a = np.arange(10000, dtype=np.int16)
check(np.invert, a[::-1], a)
# Overlap with second data array
a = np.arange(100, dtype=np.int16)
ind = np.arange(0, 100, 2, dtype=np.int16)
check(np.add, a, ind, a[25:75])
def test_unary_ufunc_1d_manual(self):
# Exercise ufunc fast-paths (that avoid creation of an `np.nditer`)
def check(a, b):
a_orig = a.copy()
b_orig = b.copy()
b0 = b.copy()
c1 = ufunc(a, out=b0)
c2 = ufunc(a, out=b)
assert_array_equal(c1, c2)
# Trigger "fancy ufunc loop" code path
mask = view_element_first_byte(b).view(np.bool_)
a[...] = a_orig
b[...] = b_orig
c1 = ufunc(a, out=b.copy(), where=mask.copy()).copy()
a[...] = a_orig
b[...] = b_orig
c2 = ufunc(a, out=b, where=mask.copy()).copy()
# Also, mask overlapping with output
a[...] = a_orig
b[...] = b_orig
c3 = ufunc(a, out=b, where=mask).copy()
assert_array_equal(c1, c2)
assert_array_equal(c1, c3)
dtypes = [np.int8, np.int16, np.int32, np.int64, np.float32,
np.float64, np.complex64, np.complex128]
dtypes = [np.dtype(x) for x in dtypes]
for dtype in dtypes:
if np.issubdtype(dtype, np.integer):
ufunc = np.invert
else:
ufunc = np.reciprocal
n = 1000
k = 10
indices = [
np.index_exp[:n],
np.index_exp[k:k+n],
np.index_exp[n-1::-1],
np.index_exp[k+n-1:k-1:-1],
np.index_exp[:2*n:2],
np.index_exp[k:k+2*n:2],
np.index_exp[2*n-1::-2],
np.index_exp[k+2*n-1:k-1:-2],
]
for xi, yi in itertools.product(indices, indices):
v = np.arange(1, 1 + n*2 + k, dtype=dtype)
x = v[xi]
y = v[yi]
with np.errstate(all='ignore'):
check(x, y)
# Scalar cases
check(x[:1], y)
check(x[-1:], y)
check(x[:1].reshape([]), y)
check(x[-1:].reshape([]), y)
def test_unary_ufunc_where_same(self):
# Check behavior at wheremask overlap
ufunc = np.invert
def check(a, out, mask):
c1 = ufunc(a, out=out.copy(), where=mask.copy())
c2 = ufunc(a, out=out, where=mask)
assert_array_equal(c1, c2)
# Check behavior with same input and output arrays
x = np.arange(100).astype(np.bool_)
check(x, x, x)
check(x, x.copy(), x)
check(x, x, x.copy())
@pytest.mark.slow
def test_binary_ufunc_1d_manual(self):
ufunc = np.add
def check(a, b, c):
c0 = c.copy()
c1 = ufunc(a, b, out=c0)
c2 = ufunc(a, b, out=c)
assert_array_equal(c1, c2)
for dtype in [np.int8, np.int16, np.int32, np.int64,
np.float32, np.float64, np.complex64, np.complex128]:
# Check different data dependency orders
n = 1000
k = 10
indices = []
for p in [1, 2]:
indices.extend([
np.index_exp[:p*n:p],
np.index_exp[k:k+p*n:p],
np.index_exp[p*n-1::-p],
np.index_exp[k+p*n-1:k-1:-p],
])
for x, y, z in itertools.product(indices, indices, indices):
v = np.arange(6*n).astype(dtype)
x = v[x]
y = v[y]
z = v[z]
check(x, y, z)
# Scalar cases
check(x[:1], y, z)
check(x[-1:], y, z)
check(x[:1].reshape([]), y, z)
check(x[-1:].reshape([]), y, z)
check(x, y[:1], z)
check(x, y[-1:], z)
check(x, y[:1].reshape([]), z)
check(x, y[-1:].reshape([]), z)
def test_inplace_op_simple_manual(self):
rng = np.random.RandomState(1234)
x = rng.rand(200, 200) # bigger than bufsize
x += x.T
assert_array_equal(x - x.T, 0)
| 29,084 | Python | 30.207082 | 108 | 0.500825 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_nditer.py | import sys
import pytest
import textwrap
import subprocess
import numpy as np
import numpy.core._multiarray_tests as _multiarray_tests
from numpy import array, arange, nditer, all
from numpy.testing import (
assert_, assert_equal, assert_array_equal, assert_raises,
HAS_REFCOUNT, suppress_warnings, break_cycles
)
def iter_multi_index(i):
ret = []
while not i.finished:
ret.append(i.multi_index)
i.iternext()
return ret
def iter_indices(i):
ret = []
while not i.finished:
ret.append(i.index)
i.iternext()
return ret
def iter_iterindices(i):
ret = []
while not i.finished:
ret.append(i.iterindex)
i.iternext()
return ret
@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
def test_iter_refcount():
# Make sure the iterator doesn't leak
# Basic
a = arange(6)
dt = np.dtype('f4').newbyteorder()
rc_a = sys.getrefcount(a)
rc_dt = sys.getrefcount(dt)
with nditer(a, [],
[['readwrite', 'updateifcopy']],
casting='unsafe',
op_dtypes=[dt]) as it:
assert_(not it.iterationneedsapi)
assert_(sys.getrefcount(a) > rc_a)
assert_(sys.getrefcount(dt) > rc_dt)
# del 'it'
it = None
assert_equal(sys.getrefcount(a), rc_a)
assert_equal(sys.getrefcount(dt), rc_dt)
# With a copy
a = arange(6, dtype='f4')
dt = np.dtype('f4')
rc_a = sys.getrefcount(a)
rc_dt = sys.getrefcount(dt)
it = nditer(a, [],
[['readwrite']],
op_dtypes=[dt])
rc2_a = sys.getrefcount(a)
rc2_dt = sys.getrefcount(dt)
it2 = it.copy()
assert_(sys.getrefcount(a) > rc2_a)
assert_(sys.getrefcount(dt) > rc2_dt)
it = None
assert_equal(sys.getrefcount(a), rc2_a)
assert_equal(sys.getrefcount(dt), rc2_dt)
it2 = None
assert_equal(sys.getrefcount(a), rc_a)
assert_equal(sys.getrefcount(dt), rc_dt)
del it2 # avoid pyflakes unused variable warning
def test_iter_best_order():
# The iterator should always find the iteration order
# with increasing memory addresses
# Test the ordering for 1-D to 5-D shapes
for shape in [(5,), (3, 4), (2, 3, 4), (2, 3, 4, 3), (2, 3, 2, 2, 3)]:
a = arange(np.prod(shape))
# Test each combination of positive and negative strides
for dirs in range(2**len(shape)):
dirs_index = [slice(None)]*len(shape)
for bit in range(len(shape)):
if ((2**bit) & dirs):
dirs_index[bit] = slice(None, None, -1)
dirs_index = tuple(dirs_index)
aview = a.reshape(shape)[dirs_index]
# C-order
i = nditer(aview, [], [['readonly']])
assert_equal([x for x in i], a)
# Fortran-order
i = nditer(aview.T, [], [['readonly']])
assert_equal([x for x in i], a)
# Other order
if len(shape) > 2:
i = nditer(aview.swapaxes(0, 1), [], [['readonly']])
assert_equal([x for x in i], a)
def test_iter_c_order():
# Test forcing C order
# Test the ordering for 1-D to 5-D shapes
for shape in [(5,), (3, 4), (2, 3, 4), (2, 3, 4, 3), (2, 3, 2, 2, 3)]:
a = arange(np.prod(shape))
# Test each combination of positive and negative strides
for dirs in range(2**len(shape)):
dirs_index = [slice(None)]*len(shape)
for bit in range(len(shape)):
if ((2**bit) & dirs):
dirs_index[bit] = slice(None, None, -1)
dirs_index = tuple(dirs_index)
aview = a.reshape(shape)[dirs_index]
# C-order
i = nditer(aview, order='C')
assert_equal([x for x in i], aview.ravel(order='C'))
# Fortran-order
i = nditer(aview.T, order='C')
assert_equal([x for x in i], aview.T.ravel(order='C'))
# Other order
if len(shape) > 2:
i = nditer(aview.swapaxes(0, 1), order='C')
assert_equal([x for x in i],
aview.swapaxes(0, 1).ravel(order='C'))
def test_iter_f_order():
# Test forcing F order
# Test the ordering for 1-D to 5-D shapes
for shape in [(5,), (3, 4), (2, 3, 4), (2, 3, 4, 3), (2, 3, 2, 2, 3)]:
a = arange(np.prod(shape))
# Test each combination of positive and negative strides
for dirs in range(2**len(shape)):
dirs_index = [slice(None)]*len(shape)
for bit in range(len(shape)):
if ((2**bit) & dirs):
dirs_index[bit] = slice(None, None, -1)
dirs_index = tuple(dirs_index)
aview = a.reshape(shape)[dirs_index]
# C-order
i = nditer(aview, order='F')
assert_equal([x for x in i], aview.ravel(order='F'))
# Fortran-order
i = nditer(aview.T, order='F')
assert_equal([x for x in i], aview.T.ravel(order='F'))
# Other order
if len(shape) > 2:
i = nditer(aview.swapaxes(0, 1), order='F')
assert_equal([x for x in i],
aview.swapaxes(0, 1).ravel(order='F'))
def test_iter_c_or_f_order():
# Test forcing any contiguous (C or F) order
# Test the ordering for 1-D to 5-D shapes
for shape in [(5,), (3, 4), (2, 3, 4), (2, 3, 4, 3), (2, 3, 2, 2, 3)]:
a = arange(np.prod(shape))
# Test each combination of positive and negative strides
for dirs in range(2**len(shape)):
dirs_index = [slice(None)]*len(shape)
for bit in range(len(shape)):
if ((2**bit) & dirs):
dirs_index[bit] = slice(None, None, -1)
dirs_index = tuple(dirs_index)
aview = a.reshape(shape)[dirs_index]
# C-order
i = nditer(aview, order='A')
assert_equal([x for x in i], aview.ravel(order='A'))
# Fortran-order
i = nditer(aview.T, order='A')
assert_equal([x for x in i], aview.T.ravel(order='A'))
# Other order
if len(shape) > 2:
i = nditer(aview.swapaxes(0, 1), order='A')
assert_equal([x for x in i],
aview.swapaxes(0, 1).ravel(order='A'))
def test_nditer_multi_index_set():
# Test the multi_index set
a = np.arange(6).reshape(2, 3)
it = np.nditer(a, flags=['multi_index'])
# Removes the iteration on two first elements of a[0]
it.multi_index = (0, 2,)
assert_equal([i for i in it], [2, 3, 4, 5])
@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
def test_nditer_multi_index_set_refcount():
# Test if the reference count on index variable is decreased
index = 0
i = np.nditer(np.array([111, 222, 333, 444]), flags=['multi_index'])
start_count = sys.getrefcount(index)
i.multi_index = (index,)
end_count = sys.getrefcount(index)
assert_equal(start_count, end_count)
def test_iter_best_order_multi_index_1d():
# The multi-indices should be correct with any reordering
a = arange(4)
# 1D order
i = nditer(a, ['multi_index'], [['readonly']])
assert_equal(iter_multi_index(i), [(0,), (1,), (2,), (3,)])
# 1D reversed order
i = nditer(a[::-1], ['multi_index'], [['readonly']])
assert_equal(iter_multi_index(i), [(3,), (2,), (1,), (0,)])
def test_iter_best_order_multi_index_2d():
# The multi-indices should be correct with any reordering
a = arange(6)
# 2D C-order
i = nditer(a.reshape(2, 3), ['multi_index'], [['readonly']])
assert_equal(iter_multi_index(i), [(0, 0), (0, 1), (0, 2), (1, 0), (1, 1), (1, 2)])
# 2D Fortran-order
i = nditer(a.reshape(2, 3).copy(order='F'), ['multi_index'], [['readonly']])
assert_equal(iter_multi_index(i), [(0, 0), (1, 0), (0, 1), (1, 1), (0, 2), (1, 2)])
# 2D reversed C-order
i = nditer(a.reshape(2, 3)[::-1], ['multi_index'], [['readonly']])
assert_equal(iter_multi_index(i), [(1, 0), (1, 1), (1, 2), (0, 0), (0, 1), (0, 2)])
i = nditer(a.reshape(2, 3)[:, ::-1], ['multi_index'], [['readonly']])
assert_equal(iter_multi_index(i), [(0, 2), (0, 1), (0, 0), (1, 2), (1, 1), (1, 0)])
i = nditer(a.reshape(2, 3)[::-1, ::-1], ['multi_index'], [['readonly']])
assert_equal(iter_multi_index(i), [(1, 2), (1, 1), (1, 0), (0, 2), (0, 1), (0, 0)])
# 2D reversed Fortran-order
i = nditer(a.reshape(2, 3).copy(order='F')[::-1], ['multi_index'], [['readonly']])
assert_equal(iter_multi_index(i), [(1, 0), (0, 0), (1, 1), (0, 1), (1, 2), (0, 2)])
i = nditer(a.reshape(2, 3).copy(order='F')[:, ::-1],
['multi_index'], [['readonly']])
assert_equal(iter_multi_index(i), [(0, 2), (1, 2), (0, 1), (1, 1), (0, 0), (1, 0)])
i = nditer(a.reshape(2, 3).copy(order='F')[::-1, ::-1],
['multi_index'], [['readonly']])
assert_equal(iter_multi_index(i), [(1, 2), (0, 2), (1, 1), (0, 1), (1, 0), (0, 0)])
def test_iter_best_order_multi_index_3d():
# The multi-indices should be correct with any reordering
a = arange(12)
# 3D C-order
i = nditer(a.reshape(2, 3, 2), ['multi_index'], [['readonly']])
assert_equal(iter_multi_index(i),
[(0, 0, 0), (0, 0, 1), (0, 1, 0), (0, 1, 1), (0, 2, 0), (0, 2, 1),
(1, 0, 0), (1, 0, 1), (1, 1, 0), (1, 1, 1), (1, 2, 0), (1, 2, 1)])
# 3D Fortran-order
i = nditer(a.reshape(2, 3, 2).copy(order='F'), ['multi_index'], [['readonly']])
assert_equal(iter_multi_index(i),
[(0, 0, 0), (1, 0, 0), (0, 1, 0), (1, 1, 0), (0, 2, 0), (1, 2, 0),
(0, 0, 1), (1, 0, 1), (0, 1, 1), (1, 1, 1), (0, 2, 1), (1, 2, 1)])
# 3D reversed C-order
i = nditer(a.reshape(2, 3, 2)[::-1], ['multi_index'], [['readonly']])
assert_equal(iter_multi_index(i),
[(1, 0, 0), (1, 0, 1), (1, 1, 0), (1, 1, 1), (1, 2, 0), (1, 2, 1),
(0, 0, 0), (0, 0, 1), (0, 1, 0), (0, 1, 1), (0, 2, 0), (0, 2, 1)])
i = nditer(a.reshape(2, 3, 2)[:, ::-1], ['multi_index'], [['readonly']])
assert_equal(iter_multi_index(i),
[(0, 2, 0), (0, 2, 1), (0, 1, 0), (0, 1, 1), (0, 0, 0), (0, 0, 1),
(1, 2, 0), (1, 2, 1), (1, 1, 0), (1, 1, 1), (1, 0, 0), (1, 0, 1)])
i = nditer(a.reshape(2, 3, 2)[:,:, ::-1], ['multi_index'], [['readonly']])
assert_equal(iter_multi_index(i),
[(0, 0, 1), (0, 0, 0), (0, 1, 1), (0, 1, 0), (0, 2, 1), (0, 2, 0),
(1, 0, 1), (1, 0, 0), (1, 1, 1), (1, 1, 0), (1, 2, 1), (1, 2, 0)])
# 3D reversed Fortran-order
i = nditer(a.reshape(2, 3, 2).copy(order='F')[::-1],
['multi_index'], [['readonly']])
assert_equal(iter_multi_index(i),
[(1, 0, 0), (0, 0, 0), (1, 1, 0), (0, 1, 0), (1, 2, 0), (0, 2, 0),
(1, 0, 1), (0, 0, 1), (1, 1, 1), (0, 1, 1), (1, 2, 1), (0, 2, 1)])
i = nditer(a.reshape(2, 3, 2).copy(order='F')[:, ::-1],
['multi_index'], [['readonly']])
assert_equal(iter_multi_index(i),
[(0, 2, 0), (1, 2, 0), (0, 1, 0), (1, 1, 0), (0, 0, 0), (1, 0, 0),
(0, 2, 1), (1, 2, 1), (0, 1, 1), (1, 1, 1), (0, 0, 1), (1, 0, 1)])
i = nditer(a.reshape(2, 3, 2).copy(order='F')[:,:, ::-1],
['multi_index'], [['readonly']])
assert_equal(iter_multi_index(i),
[(0, 0, 1), (1, 0, 1), (0, 1, 1), (1, 1, 1), (0, 2, 1), (1, 2, 1),
(0, 0, 0), (1, 0, 0), (0, 1, 0), (1, 1, 0), (0, 2, 0), (1, 2, 0)])
def test_iter_best_order_c_index_1d():
# The C index should be correct with any reordering
a = arange(4)
# 1D order
i = nditer(a, ['c_index'], [['readonly']])
assert_equal(iter_indices(i), [0, 1, 2, 3])
# 1D reversed order
i = nditer(a[::-1], ['c_index'], [['readonly']])
assert_equal(iter_indices(i), [3, 2, 1, 0])
def test_iter_best_order_c_index_2d():
# The C index should be correct with any reordering
a = arange(6)
# 2D C-order
i = nditer(a.reshape(2, 3), ['c_index'], [['readonly']])
assert_equal(iter_indices(i), [0, 1, 2, 3, 4, 5])
# 2D Fortran-order
i = nditer(a.reshape(2, 3).copy(order='F'),
['c_index'], [['readonly']])
assert_equal(iter_indices(i), [0, 3, 1, 4, 2, 5])
# 2D reversed C-order
i = nditer(a.reshape(2, 3)[::-1], ['c_index'], [['readonly']])
assert_equal(iter_indices(i), [3, 4, 5, 0, 1, 2])
i = nditer(a.reshape(2, 3)[:, ::-1], ['c_index'], [['readonly']])
assert_equal(iter_indices(i), [2, 1, 0, 5, 4, 3])
i = nditer(a.reshape(2, 3)[::-1, ::-1], ['c_index'], [['readonly']])
assert_equal(iter_indices(i), [5, 4, 3, 2, 1, 0])
# 2D reversed Fortran-order
i = nditer(a.reshape(2, 3).copy(order='F')[::-1],
['c_index'], [['readonly']])
assert_equal(iter_indices(i), [3, 0, 4, 1, 5, 2])
i = nditer(a.reshape(2, 3).copy(order='F')[:, ::-1],
['c_index'], [['readonly']])
assert_equal(iter_indices(i), [2, 5, 1, 4, 0, 3])
i = nditer(a.reshape(2, 3).copy(order='F')[::-1, ::-1],
['c_index'], [['readonly']])
assert_equal(iter_indices(i), [5, 2, 4, 1, 3, 0])
def test_iter_best_order_c_index_3d():
# The C index should be correct with any reordering
a = arange(12)
# 3D C-order
i = nditer(a.reshape(2, 3, 2), ['c_index'], [['readonly']])
assert_equal(iter_indices(i),
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])
# 3D Fortran-order
i = nditer(a.reshape(2, 3, 2).copy(order='F'),
['c_index'], [['readonly']])
assert_equal(iter_indices(i),
[0, 6, 2, 8, 4, 10, 1, 7, 3, 9, 5, 11])
# 3D reversed C-order
i = nditer(a.reshape(2, 3, 2)[::-1], ['c_index'], [['readonly']])
assert_equal(iter_indices(i),
[6, 7, 8, 9, 10, 11, 0, 1, 2, 3, 4, 5])
i = nditer(a.reshape(2, 3, 2)[:, ::-1], ['c_index'], [['readonly']])
assert_equal(iter_indices(i),
[4, 5, 2, 3, 0, 1, 10, 11, 8, 9, 6, 7])
i = nditer(a.reshape(2, 3, 2)[:,:, ::-1], ['c_index'], [['readonly']])
assert_equal(iter_indices(i),
[1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10])
# 3D reversed Fortran-order
i = nditer(a.reshape(2, 3, 2).copy(order='F')[::-1],
['c_index'], [['readonly']])
assert_equal(iter_indices(i),
[6, 0, 8, 2, 10, 4, 7, 1, 9, 3, 11, 5])
i = nditer(a.reshape(2, 3, 2).copy(order='F')[:, ::-1],
['c_index'], [['readonly']])
assert_equal(iter_indices(i),
[4, 10, 2, 8, 0, 6, 5, 11, 3, 9, 1, 7])
i = nditer(a.reshape(2, 3, 2).copy(order='F')[:,:, ::-1],
['c_index'], [['readonly']])
assert_equal(iter_indices(i),
[1, 7, 3, 9, 5, 11, 0, 6, 2, 8, 4, 10])
def test_iter_best_order_f_index_1d():
# The Fortran index should be correct with any reordering
a = arange(4)
# 1D order
i = nditer(a, ['f_index'], [['readonly']])
assert_equal(iter_indices(i), [0, 1, 2, 3])
# 1D reversed order
i = nditer(a[::-1], ['f_index'], [['readonly']])
assert_equal(iter_indices(i), [3, 2, 1, 0])
def test_iter_best_order_f_index_2d():
# The Fortran index should be correct with any reordering
a = arange(6)
# 2D C-order
i = nditer(a.reshape(2, 3), ['f_index'], [['readonly']])
assert_equal(iter_indices(i), [0, 2, 4, 1, 3, 5])
# 2D Fortran-order
i = nditer(a.reshape(2, 3).copy(order='F'),
['f_index'], [['readonly']])
assert_equal(iter_indices(i), [0, 1, 2, 3, 4, 5])
# 2D reversed C-order
i = nditer(a.reshape(2, 3)[::-1], ['f_index'], [['readonly']])
assert_equal(iter_indices(i), [1, 3, 5, 0, 2, 4])
i = nditer(a.reshape(2, 3)[:, ::-1], ['f_index'], [['readonly']])
assert_equal(iter_indices(i), [4, 2, 0, 5, 3, 1])
i = nditer(a.reshape(2, 3)[::-1, ::-1], ['f_index'], [['readonly']])
assert_equal(iter_indices(i), [5, 3, 1, 4, 2, 0])
# 2D reversed Fortran-order
i = nditer(a.reshape(2, 3).copy(order='F')[::-1],
['f_index'], [['readonly']])
assert_equal(iter_indices(i), [1, 0, 3, 2, 5, 4])
i = nditer(a.reshape(2, 3).copy(order='F')[:, ::-1],
['f_index'], [['readonly']])
assert_equal(iter_indices(i), [4, 5, 2, 3, 0, 1])
i = nditer(a.reshape(2, 3).copy(order='F')[::-1, ::-1],
['f_index'], [['readonly']])
assert_equal(iter_indices(i), [5, 4, 3, 2, 1, 0])
def test_iter_best_order_f_index_3d():
# The Fortran index should be correct with any reordering
a = arange(12)
# 3D C-order
i = nditer(a.reshape(2, 3, 2), ['f_index'], [['readonly']])
assert_equal(iter_indices(i),
[0, 6, 2, 8, 4, 10, 1, 7, 3, 9, 5, 11])
# 3D Fortran-order
i = nditer(a.reshape(2, 3, 2).copy(order='F'),
['f_index'], [['readonly']])
assert_equal(iter_indices(i),
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])
# 3D reversed C-order
i = nditer(a.reshape(2, 3, 2)[::-1], ['f_index'], [['readonly']])
assert_equal(iter_indices(i),
[1, 7, 3, 9, 5, 11, 0, 6, 2, 8, 4, 10])
i = nditer(a.reshape(2, 3, 2)[:, ::-1], ['f_index'], [['readonly']])
assert_equal(iter_indices(i),
[4, 10, 2, 8, 0, 6, 5, 11, 3, 9, 1, 7])
i = nditer(a.reshape(2, 3, 2)[:,:, ::-1], ['f_index'], [['readonly']])
assert_equal(iter_indices(i),
[6, 0, 8, 2, 10, 4, 7, 1, 9, 3, 11, 5])
# 3D reversed Fortran-order
i = nditer(a.reshape(2, 3, 2).copy(order='F')[::-1],
['f_index'], [['readonly']])
assert_equal(iter_indices(i),
[1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10])
i = nditer(a.reshape(2, 3, 2).copy(order='F')[:, ::-1],
['f_index'], [['readonly']])
assert_equal(iter_indices(i),
[4, 5, 2, 3, 0, 1, 10, 11, 8, 9, 6, 7])
i = nditer(a.reshape(2, 3, 2).copy(order='F')[:,:, ::-1],
['f_index'], [['readonly']])
assert_equal(iter_indices(i),
[6, 7, 8, 9, 10, 11, 0, 1, 2, 3, 4, 5])
def test_iter_no_inner_full_coalesce():
# Check no_inner iterators which coalesce into a single inner loop
for shape in [(5,), (3, 4), (2, 3, 4), (2, 3, 4, 3), (2, 3, 2, 2, 3)]:
size = np.prod(shape)
a = arange(size)
# Test each combination of forward and backwards indexing
for dirs in range(2**len(shape)):
dirs_index = [slice(None)]*len(shape)
for bit in range(len(shape)):
if ((2**bit) & dirs):
dirs_index[bit] = slice(None, None, -1)
dirs_index = tuple(dirs_index)
aview = a.reshape(shape)[dirs_index]
# C-order
i = nditer(aview, ['external_loop'], [['readonly']])
assert_equal(i.ndim, 1)
assert_equal(i[0].shape, (size,))
# Fortran-order
i = nditer(aview.T, ['external_loop'], [['readonly']])
assert_equal(i.ndim, 1)
assert_equal(i[0].shape, (size,))
# Other order
if len(shape) > 2:
i = nditer(aview.swapaxes(0, 1),
['external_loop'], [['readonly']])
assert_equal(i.ndim, 1)
assert_equal(i[0].shape, (size,))
def test_iter_no_inner_dim_coalescing():
# Check no_inner iterators whose dimensions may not coalesce completely
# Skipping the last element in a dimension prevents coalescing
# with the next-bigger dimension
a = arange(24).reshape(2, 3, 4)[:,:, :-1]
i = nditer(a, ['external_loop'], [['readonly']])
assert_equal(i.ndim, 2)
assert_equal(i[0].shape, (3,))
a = arange(24).reshape(2, 3, 4)[:, :-1,:]
i = nditer(a, ['external_loop'], [['readonly']])
assert_equal(i.ndim, 2)
assert_equal(i[0].shape, (8,))
a = arange(24).reshape(2, 3, 4)[:-1,:,:]
i = nditer(a, ['external_loop'], [['readonly']])
assert_equal(i.ndim, 1)
assert_equal(i[0].shape, (12,))
# Even with lots of 1-sized dimensions, should still coalesce
a = arange(24).reshape(1, 1, 2, 1, 1, 3, 1, 1, 4, 1, 1)
i = nditer(a, ['external_loop'], [['readonly']])
assert_equal(i.ndim, 1)
assert_equal(i[0].shape, (24,))
def test_iter_dim_coalescing():
# Check that the correct number of dimensions are coalesced
# Tracking a multi-index disables coalescing
a = arange(24).reshape(2, 3, 4)
i = nditer(a, ['multi_index'], [['readonly']])
assert_equal(i.ndim, 3)
# A tracked index can allow coalescing if it's compatible with the array
a3d = arange(24).reshape(2, 3, 4)
i = nditer(a3d, ['c_index'], [['readonly']])
assert_equal(i.ndim, 1)
i = nditer(a3d.swapaxes(0, 1), ['c_index'], [['readonly']])
assert_equal(i.ndim, 3)
i = nditer(a3d.T, ['c_index'], [['readonly']])
assert_equal(i.ndim, 3)
i = nditer(a3d.T, ['f_index'], [['readonly']])
assert_equal(i.ndim, 1)
i = nditer(a3d.T.swapaxes(0, 1), ['f_index'], [['readonly']])
assert_equal(i.ndim, 3)
# When C or F order is forced, coalescing may still occur
a3d = arange(24).reshape(2, 3, 4)
i = nditer(a3d, order='C')
assert_equal(i.ndim, 1)
i = nditer(a3d.T, order='C')
assert_equal(i.ndim, 3)
i = nditer(a3d, order='F')
assert_equal(i.ndim, 3)
i = nditer(a3d.T, order='F')
assert_equal(i.ndim, 1)
i = nditer(a3d, order='A')
assert_equal(i.ndim, 1)
i = nditer(a3d.T, order='A')
assert_equal(i.ndim, 1)
def test_iter_broadcasting():
# Standard NumPy broadcasting rules
# 1D with scalar
i = nditer([arange(6), np.int32(2)], ['multi_index'], [['readonly']]*2)
assert_equal(i.itersize, 6)
assert_equal(i.shape, (6,))
# 2D with scalar
i = nditer([arange(6).reshape(2, 3), np.int32(2)],
['multi_index'], [['readonly']]*2)
assert_equal(i.itersize, 6)
assert_equal(i.shape, (2, 3))
# 2D with 1D
i = nditer([arange(6).reshape(2, 3), arange(3)],
['multi_index'], [['readonly']]*2)
assert_equal(i.itersize, 6)
assert_equal(i.shape, (2, 3))
i = nditer([arange(2).reshape(2, 1), arange(3)],
['multi_index'], [['readonly']]*2)
assert_equal(i.itersize, 6)
assert_equal(i.shape, (2, 3))
# 2D with 2D
i = nditer([arange(2).reshape(2, 1), arange(3).reshape(1, 3)],
['multi_index'], [['readonly']]*2)
assert_equal(i.itersize, 6)
assert_equal(i.shape, (2, 3))
# 3D with scalar
i = nditer([np.int32(2), arange(24).reshape(4, 2, 3)],
['multi_index'], [['readonly']]*2)
assert_equal(i.itersize, 24)
assert_equal(i.shape, (4, 2, 3))
# 3D with 1D
i = nditer([arange(3), arange(24).reshape(4, 2, 3)],
['multi_index'], [['readonly']]*2)
assert_equal(i.itersize, 24)
assert_equal(i.shape, (4, 2, 3))
i = nditer([arange(3), arange(8).reshape(4, 2, 1)],
['multi_index'], [['readonly']]*2)
assert_equal(i.itersize, 24)
assert_equal(i.shape, (4, 2, 3))
# 3D with 2D
i = nditer([arange(6).reshape(2, 3), arange(24).reshape(4, 2, 3)],
['multi_index'], [['readonly']]*2)
assert_equal(i.itersize, 24)
assert_equal(i.shape, (4, 2, 3))
i = nditer([arange(2).reshape(2, 1), arange(24).reshape(4, 2, 3)],
['multi_index'], [['readonly']]*2)
assert_equal(i.itersize, 24)
assert_equal(i.shape, (4, 2, 3))
i = nditer([arange(3).reshape(1, 3), arange(8).reshape(4, 2, 1)],
['multi_index'], [['readonly']]*2)
assert_equal(i.itersize, 24)
assert_equal(i.shape, (4, 2, 3))
# 3D with 3D
i = nditer([arange(2).reshape(1, 2, 1), arange(3).reshape(1, 1, 3),
arange(4).reshape(4, 1, 1)],
['multi_index'], [['readonly']]*3)
assert_equal(i.itersize, 24)
assert_equal(i.shape, (4, 2, 3))
i = nditer([arange(6).reshape(1, 2, 3), arange(4).reshape(4, 1, 1)],
['multi_index'], [['readonly']]*2)
assert_equal(i.itersize, 24)
assert_equal(i.shape, (4, 2, 3))
i = nditer([arange(24).reshape(4, 2, 3), arange(12).reshape(4, 1, 3)],
['multi_index'], [['readonly']]*2)
assert_equal(i.itersize, 24)
assert_equal(i.shape, (4, 2, 3))
def test_iter_itershape():
# Check that allocated outputs work with a specified shape
a = np.arange(6, dtype='i2').reshape(2, 3)
i = nditer([a, None], [], [['readonly'], ['writeonly', 'allocate']],
op_axes=[[0, 1, None], None],
itershape=(-1, -1, 4))
assert_equal(i.operands[1].shape, (2, 3, 4))
assert_equal(i.operands[1].strides, (24, 8, 2))
i = nditer([a.T, None], [], [['readonly'], ['writeonly', 'allocate']],
op_axes=[[0, 1, None], None],
itershape=(-1, -1, 4))
assert_equal(i.operands[1].shape, (3, 2, 4))
assert_equal(i.operands[1].strides, (8, 24, 2))
i = nditer([a.T, None], [], [['readonly'], ['writeonly', 'allocate']],
order='F',
op_axes=[[0, 1, None], None],
itershape=(-1, -1, 4))
assert_equal(i.operands[1].shape, (3, 2, 4))
assert_equal(i.operands[1].strides, (2, 6, 12))
# If we specify 1 in the itershape, it shouldn't allow broadcasting
# of that dimension to a bigger value
assert_raises(ValueError, nditer, [a, None], [],
[['readonly'], ['writeonly', 'allocate']],
op_axes=[[0, 1, None], None],
itershape=(-1, 1, 4))
# Test bug that for no op_axes but itershape, they are NULLed correctly
i = np.nditer([np.ones(2), None, None], itershape=(2,))
def test_iter_broadcasting_errors():
# Check that errors are thrown for bad broadcasting shapes
# 1D with 1D
assert_raises(ValueError, nditer, [arange(2), arange(3)],
[], [['readonly']]*2)
# 2D with 1D
assert_raises(ValueError, nditer,
[arange(6).reshape(2, 3), arange(2)],
[], [['readonly']]*2)
# 2D with 2D
assert_raises(ValueError, nditer,
[arange(6).reshape(2, 3), arange(9).reshape(3, 3)],
[], [['readonly']]*2)
assert_raises(ValueError, nditer,
[arange(6).reshape(2, 3), arange(4).reshape(2, 2)],
[], [['readonly']]*2)
# 3D with 3D
assert_raises(ValueError, nditer,
[arange(36).reshape(3, 3, 4), arange(24).reshape(2, 3, 4)],
[], [['readonly']]*2)
assert_raises(ValueError, nditer,
[arange(8).reshape(2, 4, 1), arange(24).reshape(2, 3, 4)],
[], [['readonly']]*2)
# Verify that the error message mentions the right shapes
try:
nditer([arange(2).reshape(1, 2, 1),
arange(3).reshape(1, 3),
arange(6).reshape(2, 3)],
[],
[['readonly'], ['readonly'], ['writeonly', 'no_broadcast']])
raise AssertionError('Should have raised a broadcast error')
except ValueError as e:
msg = str(e)
# The message should contain the shape of the 3rd operand
assert_(msg.find('(2,3)') >= 0,
'Message "%s" doesn\'t contain operand shape (2,3)' % msg)
# The message should contain the broadcast shape
assert_(msg.find('(1,2,3)') >= 0,
'Message "%s" doesn\'t contain broadcast shape (1,2,3)' % msg)
try:
nditer([arange(6).reshape(2, 3), arange(2)],
[],
[['readonly'], ['readonly']],
op_axes=[[0, 1], [0, np.newaxis]],
itershape=(4, 3))
raise AssertionError('Should have raised a broadcast error')
except ValueError as e:
msg = str(e)
# The message should contain "shape->remappedshape" for each operand
assert_(msg.find('(2,3)->(2,3)') >= 0,
'Message "%s" doesn\'t contain operand shape (2,3)->(2,3)' % msg)
assert_(msg.find('(2,)->(2,newaxis)') >= 0,
('Message "%s" doesn\'t contain remapped operand shape' +
'(2,)->(2,newaxis)') % msg)
# The message should contain the itershape parameter
assert_(msg.find('(4,3)') >= 0,
'Message "%s" doesn\'t contain itershape parameter (4,3)' % msg)
try:
nditer([np.zeros((2, 1, 1)), np.zeros((2,))],
[],
[['writeonly', 'no_broadcast'], ['readonly']])
raise AssertionError('Should have raised a broadcast error')
except ValueError as e:
msg = str(e)
# The message should contain the shape of the bad operand
assert_(msg.find('(2,1,1)') >= 0,
'Message "%s" doesn\'t contain operand shape (2,1,1)' % msg)
# The message should contain the broadcast shape
assert_(msg.find('(2,1,2)') >= 0,
'Message "%s" doesn\'t contain the broadcast shape (2,1,2)' % msg)
def test_iter_flags_errors():
# Check that bad combinations of flags produce errors
a = arange(6)
# Not enough operands
assert_raises(ValueError, nditer, [], [], [])
# Too many operands
assert_raises(ValueError, nditer, [a]*100, [], [['readonly']]*100)
# Bad global flag
assert_raises(ValueError, nditer, [a], ['bad flag'], [['readonly']])
# Bad op flag
assert_raises(ValueError, nditer, [a], [], [['readonly', 'bad flag']])
# Bad order parameter
assert_raises(ValueError, nditer, [a], [], [['readonly']], order='G')
# Bad casting parameter
assert_raises(ValueError, nditer, [a], [], [['readonly']], casting='noon')
# op_flags must match ops
assert_raises(ValueError, nditer, [a]*3, [], [['readonly']]*2)
# Cannot track both a C and an F index
assert_raises(ValueError, nditer, a,
['c_index', 'f_index'], [['readonly']])
# Inner iteration and multi-indices/indices are incompatible
assert_raises(ValueError, nditer, a,
['external_loop', 'multi_index'], [['readonly']])
assert_raises(ValueError, nditer, a,
['external_loop', 'c_index'], [['readonly']])
assert_raises(ValueError, nditer, a,
['external_loop', 'f_index'], [['readonly']])
# Must specify exactly one of readwrite/readonly/writeonly per operand
assert_raises(ValueError, nditer, a, [], [[]])
assert_raises(ValueError, nditer, a, [], [['readonly', 'writeonly']])
assert_raises(ValueError, nditer, a, [], [['readonly', 'readwrite']])
assert_raises(ValueError, nditer, a, [], [['writeonly', 'readwrite']])
assert_raises(ValueError, nditer, a,
[], [['readonly', 'writeonly', 'readwrite']])
# Python scalars are always readonly
assert_raises(TypeError, nditer, 1.5, [], [['writeonly']])
assert_raises(TypeError, nditer, 1.5, [], [['readwrite']])
# Array scalars are always readonly
assert_raises(TypeError, nditer, np.int32(1), [], [['writeonly']])
assert_raises(TypeError, nditer, np.int32(1), [], [['readwrite']])
# Check readonly array
a.flags.writeable = False
assert_raises(ValueError, nditer, a, [], [['writeonly']])
assert_raises(ValueError, nditer, a, [], [['readwrite']])
a.flags.writeable = True
# Multi-indices available only with the multi_index flag
i = nditer(arange(6), [], [['readonly']])
assert_raises(ValueError, lambda i:i.multi_index, i)
# Index available only with an index flag
assert_raises(ValueError, lambda i:i.index, i)
# GotoCoords and GotoIndex incompatible with buffering or no_inner
def assign_multi_index(i):
i.multi_index = (0,)
def assign_index(i):
i.index = 0
def assign_iterindex(i):
i.iterindex = 0
def assign_iterrange(i):
i.iterrange = (0, 1)
i = nditer(arange(6), ['external_loop'])
assert_raises(ValueError, assign_multi_index, i)
assert_raises(ValueError, assign_index, i)
assert_raises(ValueError, assign_iterindex, i)
assert_raises(ValueError, assign_iterrange, i)
i = nditer(arange(6), ['buffered'])
assert_raises(ValueError, assign_multi_index, i)
assert_raises(ValueError, assign_index, i)
assert_raises(ValueError, assign_iterrange, i)
# Can't iterate if size is zero
assert_raises(ValueError, nditer, np.array([]))
def test_iter_slice():
a, b, c = np.arange(3), np.arange(3), np.arange(3.)
i = nditer([a, b, c], [], ['readwrite'])
with i:
i[0:2] = (3, 3)
assert_equal(a, [3, 1, 2])
assert_equal(b, [3, 1, 2])
assert_equal(c, [0, 1, 2])
i[1] = 12
assert_equal(i[0:2], [3, 12])
def test_iter_assign_mapping():
a = np.arange(24, dtype='f8').reshape(2, 3, 4).T
it = np.nditer(a, [], [['readwrite', 'updateifcopy']],
casting='same_kind', op_dtypes=[np.dtype('f4')])
with it:
it.operands[0][...] = 3
it.operands[0][...] = 14
assert_equal(a, 14)
it = np.nditer(a, [], [['readwrite', 'updateifcopy']],
casting='same_kind', op_dtypes=[np.dtype('f4')])
with it:
x = it.operands[0][-1:1]
x[...] = 14
it.operands[0][...] = -1234
assert_equal(a, -1234)
# check for no warnings on dealloc
x = None
it = None
def test_iter_nbo_align_contig():
# Check that byte order, alignment, and contig changes work
# Byte order change by requesting a specific dtype
a = np.arange(6, dtype='f4')
au = a.byteswap().newbyteorder()
assert_(a.dtype.byteorder != au.dtype.byteorder)
i = nditer(au, [], [['readwrite', 'updateifcopy']],
casting='equiv',
op_dtypes=[np.dtype('f4')])
with i:
# context manager triggers WRITEBACKIFCOPY on i at exit
assert_equal(i.dtypes[0].byteorder, a.dtype.byteorder)
assert_equal(i.operands[0].dtype.byteorder, a.dtype.byteorder)
assert_equal(i.operands[0], a)
i.operands[0][:] = 2
assert_equal(au, [2]*6)
del i # should not raise a warning
# Byte order change by requesting NBO
a = np.arange(6, dtype='f4')
au = a.byteswap().newbyteorder()
assert_(a.dtype.byteorder != au.dtype.byteorder)
with nditer(au, [], [['readwrite', 'updateifcopy', 'nbo']],
casting='equiv') as i:
# context manager triggers UPDATEIFCOPY on i at exit
assert_equal(i.dtypes[0].byteorder, a.dtype.byteorder)
assert_equal(i.operands[0].dtype.byteorder, a.dtype.byteorder)
assert_equal(i.operands[0], a)
i.operands[0][:] = 12345
i.operands[0][:] = 2
assert_equal(au, [2]*6)
# Unaligned input
a = np.zeros((6*4+1,), dtype='i1')[1:]
a.dtype = 'f4'
a[:] = np.arange(6, dtype='f4')
assert_(not a.flags.aligned)
# Without 'aligned', shouldn't copy
i = nditer(a, [], [['readonly']])
assert_(not i.operands[0].flags.aligned)
assert_equal(i.operands[0], a)
# With 'aligned', should make a copy
with nditer(a, [], [['readwrite', 'updateifcopy', 'aligned']]) as i:
assert_(i.operands[0].flags.aligned)
# context manager triggers UPDATEIFCOPY on i at exit
assert_equal(i.operands[0], a)
i.operands[0][:] = 3
assert_equal(a, [3]*6)
# Discontiguous input
a = arange(12)
# If it is contiguous, shouldn't copy
i = nditer(a[:6], [], [['readonly']])
assert_(i.operands[0].flags.contiguous)
assert_equal(i.operands[0], a[:6])
# If it isn't contiguous, should buffer
i = nditer(a[::2], ['buffered', 'external_loop'],
[['readonly', 'contig']],
buffersize=10)
assert_(i[0].flags.contiguous)
assert_equal(i[0], a[::2])
def test_iter_array_cast():
# Check that arrays are cast as requested
# No cast 'f4' -> 'f4'
a = np.arange(6, dtype='f4').reshape(2, 3)
i = nditer(a, [], [['readwrite']], op_dtypes=[np.dtype('f4')])
with i:
assert_equal(i.operands[0], a)
assert_equal(i.operands[0].dtype, np.dtype('f4'))
# Byte-order cast '<f4' -> '>f4'
a = np.arange(6, dtype='<f4').reshape(2, 3)
with nditer(a, [], [['readwrite', 'updateifcopy']],
casting='equiv',
op_dtypes=[np.dtype('>f4')]) as i:
assert_equal(i.operands[0], a)
assert_equal(i.operands[0].dtype, np.dtype('>f4'))
# Safe case 'f4' -> 'f8'
a = np.arange(24, dtype='f4').reshape(2, 3, 4).swapaxes(1, 2)
i = nditer(a, [], [['readonly', 'copy']],
casting='safe',
op_dtypes=[np.dtype('f8')])
assert_equal(i.operands[0], a)
assert_equal(i.operands[0].dtype, np.dtype('f8'))
# The memory layout of the temporary should match a (a is (48,4,16))
# except negative strides get flipped to positive strides.
assert_equal(i.operands[0].strides, (96, 8, 32))
a = a[::-1,:, ::-1]
i = nditer(a, [], [['readonly', 'copy']],
casting='safe',
op_dtypes=[np.dtype('f8')])
assert_equal(i.operands[0], a)
assert_equal(i.operands[0].dtype, np.dtype('f8'))
assert_equal(i.operands[0].strides, (96, 8, 32))
# Same-kind cast 'f8' -> 'f4' -> 'f8'
a = np.arange(24, dtype='f8').reshape(2, 3, 4).T
with nditer(a, [],
[['readwrite', 'updateifcopy']],
casting='same_kind',
op_dtypes=[np.dtype('f4')]) as i:
assert_equal(i.operands[0], a)
assert_equal(i.operands[0].dtype, np.dtype('f4'))
assert_equal(i.operands[0].strides, (4, 16, 48))
# Check that WRITEBACKIFCOPY is activated at exit
i.operands[0][2, 1, 1] = -12.5
assert_(a[2, 1, 1] != -12.5)
assert_equal(a[2, 1, 1], -12.5)
a = np.arange(6, dtype='i4')[::-2]
with nditer(a, [],
[['writeonly', 'updateifcopy']],
casting='unsafe',
op_dtypes=[np.dtype('f4')]) as i:
assert_equal(i.operands[0].dtype, np.dtype('f4'))
# Even though the stride was negative in 'a', it
# becomes positive in the temporary
assert_equal(i.operands[0].strides, (4,))
i.operands[0][:] = [1, 2, 3]
assert_equal(a, [1, 2, 3])
def test_iter_array_cast_errors():
# Check that invalid casts are caught
# Need to enable copying for casts to occur
assert_raises(TypeError, nditer, arange(2, dtype='f4'), [],
[['readonly']], op_dtypes=[np.dtype('f8')])
# Also need to allow casting for casts to occur
assert_raises(TypeError, nditer, arange(2, dtype='f4'), [],
[['readonly', 'copy']], casting='no',
op_dtypes=[np.dtype('f8')])
assert_raises(TypeError, nditer, arange(2, dtype='f4'), [],
[['readonly', 'copy']], casting='equiv',
op_dtypes=[np.dtype('f8')])
assert_raises(TypeError, nditer, arange(2, dtype='f8'), [],
[['writeonly', 'updateifcopy']],
casting='no',
op_dtypes=[np.dtype('f4')])
assert_raises(TypeError, nditer, arange(2, dtype='f8'), [],
[['writeonly', 'updateifcopy']],
casting='equiv',
op_dtypes=[np.dtype('f4')])
# '<f4' -> '>f4' should not work with casting='no'
assert_raises(TypeError, nditer, arange(2, dtype='<f4'), [],
[['readonly', 'copy']], casting='no',
op_dtypes=[np.dtype('>f4')])
# 'f4' -> 'f8' is a safe cast, but 'f8' -> 'f4' isn't
assert_raises(TypeError, nditer, arange(2, dtype='f4'), [],
[['readwrite', 'updateifcopy']],
casting='safe',
op_dtypes=[np.dtype('f8')])
assert_raises(TypeError, nditer, arange(2, dtype='f8'), [],
[['readwrite', 'updateifcopy']],
casting='safe',
op_dtypes=[np.dtype('f4')])
# 'f4' -> 'i4' is neither a safe nor a same-kind cast
assert_raises(TypeError, nditer, arange(2, dtype='f4'), [],
[['readonly', 'copy']],
casting='same_kind',
op_dtypes=[np.dtype('i4')])
assert_raises(TypeError, nditer, arange(2, dtype='i4'), [],
[['writeonly', 'updateifcopy']],
casting='same_kind',
op_dtypes=[np.dtype('f4')])
def test_iter_scalar_cast():
# Check that scalars are cast as requested
# No cast 'f4' -> 'f4'
i = nditer(np.float32(2.5), [], [['readonly']],
op_dtypes=[np.dtype('f4')])
assert_equal(i.dtypes[0], np.dtype('f4'))
assert_equal(i.value.dtype, np.dtype('f4'))
assert_equal(i.value, 2.5)
# Safe cast 'f4' -> 'f8'
i = nditer(np.float32(2.5), [],
[['readonly', 'copy']],
casting='safe',
op_dtypes=[np.dtype('f8')])
assert_equal(i.dtypes[0], np.dtype('f8'))
assert_equal(i.value.dtype, np.dtype('f8'))
assert_equal(i.value, 2.5)
# Same-kind cast 'f8' -> 'f4'
i = nditer(np.float64(2.5), [],
[['readonly', 'copy']],
casting='same_kind',
op_dtypes=[np.dtype('f4')])
assert_equal(i.dtypes[0], np.dtype('f4'))
assert_equal(i.value.dtype, np.dtype('f4'))
assert_equal(i.value, 2.5)
# Unsafe cast 'f8' -> 'i4'
i = nditer(np.float64(3.0), [],
[['readonly', 'copy']],
casting='unsafe',
op_dtypes=[np.dtype('i4')])
assert_equal(i.dtypes[0], np.dtype('i4'))
assert_equal(i.value.dtype, np.dtype('i4'))
assert_equal(i.value, 3)
# Readonly scalars may be cast even without setting COPY or BUFFERED
i = nditer(3, [], [['readonly']], op_dtypes=[np.dtype('f8')])
assert_equal(i[0].dtype, np.dtype('f8'))
assert_equal(i[0], 3.)
def test_iter_scalar_cast_errors():
# Check that invalid casts are caught
# Need to allow copying/buffering for write casts of scalars to occur
assert_raises(TypeError, nditer, np.float32(2), [],
[['readwrite']], op_dtypes=[np.dtype('f8')])
assert_raises(TypeError, nditer, 2.5, [],
[['readwrite']], op_dtypes=[np.dtype('f4')])
# 'f8' -> 'f4' isn't a safe cast if the value would overflow
assert_raises(TypeError, nditer, np.float64(1e60), [],
[['readonly']],
casting='safe',
op_dtypes=[np.dtype('f4')])
# 'f4' -> 'i4' is neither a safe nor a same-kind cast
assert_raises(TypeError, nditer, np.float32(2), [],
[['readonly']],
casting='same_kind',
op_dtypes=[np.dtype('i4')])
def test_iter_object_arrays_basic():
# Check that object arrays work
obj = {'a':3,'b':'d'}
a = np.array([[1, 2, 3], None, obj, None], dtype='O')
if HAS_REFCOUNT:
rc = sys.getrefcount(obj)
# Need to allow references for object arrays
assert_raises(TypeError, nditer, a)
if HAS_REFCOUNT:
assert_equal(sys.getrefcount(obj), rc)
i = nditer(a, ['refs_ok'], ['readonly'])
vals = [x_[()] for x_ in i]
assert_equal(np.array(vals, dtype='O'), a)
vals, i, x = [None]*3
if HAS_REFCOUNT:
assert_equal(sys.getrefcount(obj), rc)
i = nditer(a.reshape(2, 2).T, ['refs_ok', 'buffered'],
['readonly'], order='C')
assert_(i.iterationneedsapi)
vals = [x_[()] for x_ in i]
assert_equal(np.array(vals, dtype='O'), a.reshape(2, 2).ravel(order='F'))
vals, i, x = [None]*3
if HAS_REFCOUNT:
assert_equal(sys.getrefcount(obj), rc)
i = nditer(a.reshape(2, 2).T, ['refs_ok', 'buffered'],
['readwrite'], order='C')
with i:
for x in i:
x[...] = None
vals, i, x = [None]*3
if HAS_REFCOUNT:
assert_(sys.getrefcount(obj) == rc-1)
assert_equal(a, np.array([None]*4, dtype='O'))
def test_iter_object_arrays_conversions():
# Conversions to/from objects
a = np.arange(6, dtype='O')
i = nditer(a, ['refs_ok', 'buffered'], ['readwrite'],
casting='unsafe', op_dtypes='i4')
with i:
for x in i:
x[...] += 1
assert_equal(a, np.arange(6)+1)
a = np.arange(6, dtype='i4')
i = nditer(a, ['refs_ok', 'buffered'], ['readwrite'],
casting='unsafe', op_dtypes='O')
with i:
for x in i:
x[...] += 1
assert_equal(a, np.arange(6)+1)
# Non-contiguous object array
a = np.zeros((6,), dtype=[('p', 'i1'), ('a', 'O')])
a = a['a']
a[:] = np.arange(6)
i = nditer(a, ['refs_ok', 'buffered'], ['readwrite'],
casting='unsafe', op_dtypes='i4')
with i:
for x in i:
x[...] += 1
assert_equal(a, np.arange(6)+1)
#Non-contiguous value array
a = np.zeros((6,), dtype=[('p', 'i1'), ('a', 'i4')])
a = a['a']
a[:] = np.arange(6) + 98172488
i = nditer(a, ['refs_ok', 'buffered'], ['readwrite'],
casting='unsafe', op_dtypes='O')
with i:
ob = i[0][()]
if HAS_REFCOUNT:
rc = sys.getrefcount(ob)
for x in i:
x[...] += 1
if HAS_REFCOUNT:
assert_(sys.getrefcount(ob) == rc-1)
assert_equal(a, np.arange(6)+98172489)
def test_iter_common_dtype():
# Check that the iterator finds a common data type correctly
i = nditer([array([3], dtype='f4'), array([0], dtype='f8')],
['common_dtype'],
[['readonly', 'copy']]*2,
casting='safe')
assert_equal(i.dtypes[0], np.dtype('f8'))
assert_equal(i.dtypes[1], np.dtype('f8'))
i = nditer([array([3], dtype='i4'), array([0], dtype='f4')],
['common_dtype'],
[['readonly', 'copy']]*2,
casting='safe')
assert_equal(i.dtypes[0], np.dtype('f8'))
assert_equal(i.dtypes[1], np.dtype('f8'))
i = nditer([array([3], dtype='f4'), array(0, dtype='f8')],
['common_dtype'],
[['readonly', 'copy']]*2,
casting='same_kind')
assert_equal(i.dtypes[0], np.dtype('f4'))
assert_equal(i.dtypes[1], np.dtype('f4'))
i = nditer([array([3], dtype='u4'), array(0, dtype='i4')],
['common_dtype'],
[['readonly', 'copy']]*2,
casting='safe')
assert_equal(i.dtypes[0], np.dtype('u4'))
assert_equal(i.dtypes[1], np.dtype('u4'))
i = nditer([array([3], dtype='u4'), array(-12, dtype='i4')],
['common_dtype'],
[['readonly', 'copy']]*2,
casting='safe')
assert_equal(i.dtypes[0], np.dtype('i8'))
assert_equal(i.dtypes[1], np.dtype('i8'))
i = nditer([array([3], dtype='u4'), array(-12, dtype='i4'),
array([2j], dtype='c8'), array([9], dtype='f8')],
['common_dtype'],
[['readonly', 'copy']]*4,
casting='safe')
assert_equal(i.dtypes[0], np.dtype('c16'))
assert_equal(i.dtypes[1], np.dtype('c16'))
assert_equal(i.dtypes[2], np.dtype('c16'))
assert_equal(i.dtypes[3], np.dtype('c16'))
assert_equal(i.value, (3, -12, 2j, 9))
# When allocating outputs, other outputs aren't factored in
i = nditer([array([3], dtype='i4'), None, array([2j], dtype='c16')], [],
[['readonly', 'copy'],
['writeonly', 'allocate'],
['writeonly']],
casting='safe')
assert_equal(i.dtypes[0], np.dtype('i4'))
assert_equal(i.dtypes[1], np.dtype('i4'))
assert_equal(i.dtypes[2], np.dtype('c16'))
# But, if common data types are requested, they are
i = nditer([array([3], dtype='i4'), None, array([2j], dtype='c16')],
['common_dtype'],
[['readonly', 'copy'],
['writeonly', 'allocate'],
['writeonly']],
casting='safe')
assert_equal(i.dtypes[0], np.dtype('c16'))
assert_equal(i.dtypes[1], np.dtype('c16'))
assert_equal(i.dtypes[2], np.dtype('c16'))
def test_iter_copy_if_overlap():
# Ensure the iterator makes copies on read/write overlap, if requested
# Copy not needed, 1 op
for flag in ['readonly', 'writeonly', 'readwrite']:
a = arange(10)
i = nditer([a], ['copy_if_overlap'], [[flag]])
with i:
assert_(i.operands[0] is a)
# Copy needed, 2 ops, read-write overlap
x = arange(10)
a = x[1:]
b = x[:-1]
with nditer([a, b], ['copy_if_overlap'], [['readonly'], ['readwrite']]) as i:
assert_(not np.shares_memory(*i.operands))
# Copy not needed with elementwise, 2 ops, exactly same arrays
x = arange(10)
a = x
b = x
i = nditer([a, b], ['copy_if_overlap'], [['readonly', 'overlap_assume_elementwise'],
['readwrite', 'overlap_assume_elementwise']])
with i:
assert_(i.operands[0] is a and i.operands[1] is b)
with nditer([a, b], ['copy_if_overlap'], [['readonly'], ['readwrite']]) as i:
assert_(i.operands[0] is a and not np.shares_memory(i.operands[1], b))
# Copy not needed, 2 ops, no overlap
x = arange(10)
a = x[::2]
b = x[1::2]
i = nditer([a, b], ['copy_if_overlap'], [['readonly'], ['writeonly']])
assert_(i.operands[0] is a and i.operands[1] is b)
# Copy needed, 2 ops, read-write overlap
x = arange(4, dtype=np.int8)
a = x[3:]
b = x.view(np.int32)[:1]
with nditer([a, b], ['copy_if_overlap'], [['readonly'], ['writeonly']]) as i:
assert_(not np.shares_memory(*i.operands))
# Copy needed, 3 ops, read-write overlap
for flag in ['writeonly', 'readwrite']:
x = np.ones([10, 10])
a = x
b = x.T
c = x
with nditer([a, b, c], ['copy_if_overlap'],
[['readonly'], ['readonly'], [flag]]) as i:
a2, b2, c2 = i.operands
assert_(not np.shares_memory(a2, c2))
assert_(not np.shares_memory(b2, c2))
# Copy not needed, 3 ops, read-only overlap
x = np.ones([10, 10])
a = x
b = x.T
c = x
i = nditer([a, b, c], ['copy_if_overlap'],
[['readonly'], ['readonly'], ['readonly']])
a2, b2, c2 = i.operands
assert_(a is a2)
assert_(b is b2)
assert_(c is c2)
# Copy not needed, 3 ops, read-only overlap
x = np.ones([10, 10])
a = x
b = np.ones([10, 10])
c = x.T
i = nditer([a, b, c], ['copy_if_overlap'],
[['readonly'], ['writeonly'], ['readonly']])
a2, b2, c2 = i.operands
assert_(a is a2)
assert_(b is b2)
assert_(c is c2)
# Copy not needed, 3 ops, write-only overlap
x = np.arange(7)
a = x[:3]
b = x[3:6]
c = x[4:7]
i = nditer([a, b, c], ['copy_if_overlap'],
[['readonly'], ['writeonly'], ['writeonly']])
a2, b2, c2 = i.operands
assert_(a is a2)
assert_(b is b2)
assert_(c is c2)
def test_iter_op_axes():
# Check that custom axes work
# Reverse the axes
a = arange(6).reshape(2, 3)
i = nditer([a, a.T], [], [['readonly']]*2, op_axes=[[0, 1], [1, 0]])
assert_(all([x == y for (x, y) in i]))
a = arange(24).reshape(2, 3, 4)
i = nditer([a.T, a], [], [['readonly']]*2, op_axes=[[2, 1, 0], None])
assert_(all([x == y for (x, y) in i]))
# Broadcast 1D to any dimension
a = arange(1, 31).reshape(2, 3, 5)
b = arange(1, 3)
i = nditer([a, b], [], [['readonly']]*2, op_axes=[None, [0, -1, -1]])
assert_equal([x*y for (x, y) in i], (a*b.reshape(2, 1, 1)).ravel())
b = arange(1, 4)
i = nditer([a, b], [], [['readonly']]*2, op_axes=[None, [-1, 0, -1]])
assert_equal([x*y for (x, y) in i], (a*b.reshape(1, 3, 1)).ravel())
b = arange(1, 6)
i = nditer([a, b], [], [['readonly']]*2,
op_axes=[None, [np.newaxis, np.newaxis, 0]])
assert_equal([x*y for (x, y) in i], (a*b.reshape(1, 1, 5)).ravel())
# Inner product-style broadcasting
a = arange(24).reshape(2, 3, 4)
b = arange(40).reshape(5, 2, 4)
i = nditer([a, b], ['multi_index'], [['readonly']]*2,
op_axes=[[0, 1, -1, -1], [-1, -1, 0, 1]])
assert_equal(i.shape, (2, 3, 5, 2))
# Matrix product-style broadcasting
a = arange(12).reshape(3, 4)
b = arange(20).reshape(4, 5)
i = nditer([a, b], ['multi_index'], [['readonly']]*2,
op_axes=[[0, -1], [-1, 1]])
assert_equal(i.shape, (3, 5))
def test_iter_op_axes_errors():
# Check that custom axes throws errors for bad inputs
# Wrong number of items in op_axes
a = arange(6).reshape(2, 3)
assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2,
op_axes=[[0], [1], [0]])
# Out of bounds items in op_axes
assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2,
op_axes=[[2, 1], [0, 1]])
assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2,
op_axes=[[0, 1], [2, -1]])
# Duplicate items in op_axes
assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2,
op_axes=[[0, 0], [0, 1]])
assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2,
op_axes=[[0, 1], [1, 1]])
# Different sized arrays in op_axes
assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2,
op_axes=[[0, 1], [0, 1, 0]])
# Non-broadcastable dimensions in the result
assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2,
op_axes=[[0, 1], [1, 0]])
def test_iter_copy():
# Check that copying the iterator works correctly
a = arange(24).reshape(2, 3, 4)
# Simple iterator
i = nditer(a)
j = i.copy()
assert_equal([x[()] for x in i], [x[()] for x in j])
i.iterindex = 3
j = i.copy()
assert_equal([x[()] for x in i], [x[()] for x in j])
# Buffered iterator
i = nditer(a, ['buffered', 'ranged'], order='F', buffersize=3)
j = i.copy()
assert_equal([x[()] for x in i], [x[()] for x in j])
i.iterindex = 3
j = i.copy()
assert_equal([x[()] for x in i], [x[()] for x in j])
i.iterrange = (3, 9)
j = i.copy()
assert_equal([x[()] for x in i], [x[()] for x in j])
i.iterrange = (2, 18)
next(i)
next(i)
j = i.copy()
assert_equal([x[()] for x in i], [x[()] for x in j])
# Casting iterator
with nditer(a, ['buffered'], order='F', casting='unsafe',
op_dtypes='f8', buffersize=5) as i:
j = i.copy()
assert_equal([x[()] for x in j], a.ravel(order='F'))
a = arange(24, dtype='<i4').reshape(2, 3, 4)
with nditer(a, ['buffered'], order='F', casting='unsafe',
op_dtypes='>f8', buffersize=5) as i:
j = i.copy()
assert_equal([x[()] for x in j], a.ravel(order='F'))
@pytest.mark.parametrize("dtype", np.typecodes["All"])
@pytest.mark.parametrize("loop_dtype", np.typecodes["All"])
@pytest.mark.filterwarnings("ignore::numpy.ComplexWarning")
def test_iter_copy_casts(dtype, loop_dtype):
# Ensure the dtype is never flexible:
if loop_dtype.lower() == "m":
loop_dtype = loop_dtype + "[ms]"
elif np.dtype(loop_dtype).itemsize == 0:
loop_dtype = loop_dtype + "50"
# Make things a bit more interesting by requiring a byte-swap as well:
arr = np.ones(1000, dtype=np.dtype(dtype).newbyteorder())
try:
expected = arr.astype(loop_dtype)
except Exception:
# Some casts are not possible, do not worry about them
return
it = np.nditer((arr,), ["buffered", "external_loop", "refs_ok"],
op_dtypes=[loop_dtype], casting="unsafe")
if np.issubdtype(np.dtype(loop_dtype), np.number):
# Casting to strings may be strange, but for simple dtypes do not rely
# on the cast being correct:
assert_array_equal(expected, np.ones(1000, dtype=loop_dtype))
it_copy = it.copy()
res = next(it)
del it
res_copy = next(it_copy)
del it_copy
assert_array_equal(res, expected)
assert_array_equal(res_copy, expected)
def test_iter_copy_casts_structured():
# Test a complicated structured dtype for casting, as it requires
# both multiple steps and a more complex casting setup.
# Includes a structured -> unstructured (any to object), and many other
# casts, which cause this to require all steps in the casting machinery
# one level down as well as the iterator copy (which uses NpyAuxData clone)
in_dtype = np.dtype([("a", np.dtype("i,")),
("b", np.dtype(">i,<i,>d,S17,>d,(3)f,O,i1"))])
out_dtype = np.dtype([("a", np.dtype("O")),
("b", np.dtype(">i,>i,S17,>d,>U3,(3)d,i1,O"))])
arr = np.ones(1000, dtype=in_dtype)
it = np.nditer((arr,), ["buffered", "external_loop", "refs_ok"],
op_dtypes=[out_dtype], casting="unsafe")
it_copy = it.copy()
res1 = next(it)
del it
res2 = next(it_copy)
del it_copy
expected = arr["a"].astype(out_dtype["a"])
assert_array_equal(res1["a"], expected)
assert_array_equal(res2["a"], expected)
for field in in_dtype["b"].names:
# Note that the .base avoids the subarray field
expected = arr["b"][field].astype(out_dtype["b"][field].base)
assert_array_equal(res1["b"][field], expected)
assert_array_equal(res2["b"][field], expected)
def test_iter_allocate_output_simple():
# Check that the iterator will properly allocate outputs
# Simple case
a = arange(6)
i = nditer([a, None], [], [['readonly'], ['writeonly', 'allocate']],
op_dtypes=[None, np.dtype('f4')])
assert_equal(i.operands[1].shape, a.shape)
assert_equal(i.operands[1].dtype, np.dtype('f4'))
def test_iter_allocate_output_buffered_readwrite():
# Allocated output with buffering + delay_bufalloc
a = arange(6)
i = nditer([a, None], ['buffered', 'delay_bufalloc'],
[['readonly'], ['allocate', 'readwrite']])
with i:
i.operands[1][:] = 1
i.reset()
for x in i:
x[1][...] += x[0][...]
assert_equal(i.operands[1], a+1)
def test_iter_allocate_output_itorder():
# The allocated output should match the iteration order
# C-order input, best iteration order
a = arange(6, dtype='i4').reshape(2, 3)
i = nditer([a, None], [], [['readonly'], ['writeonly', 'allocate']],
op_dtypes=[None, np.dtype('f4')])
assert_equal(i.operands[1].shape, a.shape)
assert_equal(i.operands[1].strides, a.strides)
assert_equal(i.operands[1].dtype, np.dtype('f4'))
# F-order input, best iteration order
a = arange(24, dtype='i4').reshape(2, 3, 4).T
i = nditer([a, None], [], [['readonly'], ['writeonly', 'allocate']],
op_dtypes=[None, np.dtype('f4')])
assert_equal(i.operands[1].shape, a.shape)
assert_equal(i.operands[1].strides, a.strides)
assert_equal(i.operands[1].dtype, np.dtype('f4'))
# Non-contiguous input, C iteration order
a = arange(24, dtype='i4').reshape(2, 3, 4).swapaxes(0, 1)
i = nditer([a, None], [],
[['readonly'], ['writeonly', 'allocate']],
order='C',
op_dtypes=[None, np.dtype('f4')])
assert_equal(i.operands[1].shape, a.shape)
assert_equal(i.operands[1].strides, (32, 16, 4))
assert_equal(i.operands[1].dtype, np.dtype('f4'))
def test_iter_allocate_output_opaxes():
# Specifying op_axes should work
a = arange(24, dtype='i4').reshape(2, 3, 4)
i = nditer([None, a], [], [['writeonly', 'allocate'], ['readonly']],
op_dtypes=[np.dtype('u4'), None],
op_axes=[[1, 2, 0], None])
assert_equal(i.operands[0].shape, (4, 2, 3))
assert_equal(i.operands[0].strides, (4, 48, 16))
assert_equal(i.operands[0].dtype, np.dtype('u4'))
def test_iter_allocate_output_types_promotion():
# Check type promotion of automatic outputs
i = nditer([array([3], dtype='f4'), array([0], dtype='f8'), None], [],
[['readonly']]*2+[['writeonly', 'allocate']])
assert_equal(i.dtypes[2], np.dtype('f8'))
i = nditer([array([3], dtype='i4'), array([0], dtype='f4'), None], [],
[['readonly']]*2+[['writeonly', 'allocate']])
assert_equal(i.dtypes[2], np.dtype('f8'))
i = nditer([array([3], dtype='f4'), array(0, dtype='f8'), None], [],
[['readonly']]*2+[['writeonly', 'allocate']])
assert_equal(i.dtypes[2], np.dtype('f4'))
i = nditer([array([3], dtype='u4'), array(0, dtype='i4'), None], [],
[['readonly']]*2+[['writeonly', 'allocate']])
assert_equal(i.dtypes[2], np.dtype('u4'))
i = nditer([array([3], dtype='u4'), array(-12, dtype='i4'), None], [],
[['readonly']]*2+[['writeonly', 'allocate']])
assert_equal(i.dtypes[2], np.dtype('i8'))
def test_iter_allocate_output_types_byte_order():
# Verify the rules for byte order changes
# When there's just one input, the output type exactly matches
a = array([3], dtype='u4').newbyteorder()
i = nditer([a, None], [],
[['readonly'], ['writeonly', 'allocate']])
assert_equal(i.dtypes[0], i.dtypes[1])
# With two or more inputs, the output type is in native byte order
i = nditer([a, a, None], [],
[['readonly'], ['readonly'], ['writeonly', 'allocate']])
assert_(i.dtypes[0] != i.dtypes[2])
assert_equal(i.dtypes[0].newbyteorder('='), i.dtypes[2])
def test_iter_allocate_output_types_scalar():
# If the inputs are all scalars, the output should be a scalar
i = nditer([None, 1, 2.3, np.float32(12), np.complex128(3)], [],
[['writeonly', 'allocate']] + [['readonly']]*4)
assert_equal(i.operands[0].dtype, np.dtype('complex128'))
assert_equal(i.operands[0].ndim, 0)
def test_iter_allocate_output_subtype():
# Make sure that the subtype with priority wins
class MyNDArray(np.ndarray):
__array_priority__ = 15
# subclass vs ndarray
a = np.array([[1, 2], [3, 4]]).view(MyNDArray)
b = np.arange(4).reshape(2, 2).T
i = nditer([a, b, None], [],
[['readonly'], ['readonly'], ['writeonly', 'allocate']])
assert_equal(type(a), type(i.operands[2]))
assert_(type(b) is not type(i.operands[2]))
assert_equal(i.operands[2].shape, (2, 2))
# If subtypes are disabled, we should get back an ndarray.
i = nditer([a, b, None], [],
[['readonly'], ['readonly'],
['writeonly', 'allocate', 'no_subtype']])
assert_equal(type(b), type(i.operands[2]))
assert_(type(a) is not type(i.operands[2]))
assert_equal(i.operands[2].shape, (2, 2))
def test_iter_allocate_output_errors():
# Check that the iterator will throw errors for bad output allocations
# Need an input if no output data type is specified
a = arange(6)
assert_raises(TypeError, nditer, [a, None], [],
[['writeonly'], ['writeonly', 'allocate']])
# Allocated output should be flagged for writing
assert_raises(ValueError, nditer, [a, None], [],
[['readonly'], ['allocate', 'readonly']])
# Allocated output can't have buffering without delayed bufalloc
assert_raises(ValueError, nditer, [a, None], ['buffered'],
['allocate', 'readwrite'])
# Must specify at least one input
assert_raises(ValueError, nditer, [None, None], [],
[['writeonly', 'allocate'],
['writeonly', 'allocate']],
op_dtypes=[np.dtype('f4'), np.dtype('f4')])
# If using op_axes, must specify all the axes
a = arange(24, dtype='i4').reshape(2, 3, 4)
assert_raises(ValueError, nditer, [a, None], [],
[['readonly'], ['writeonly', 'allocate']],
op_dtypes=[None, np.dtype('f4')],
op_axes=[None, [0, np.newaxis, 1]])
# If using op_axes, the axes must be within bounds
assert_raises(ValueError, nditer, [a, None], [],
[['readonly'], ['writeonly', 'allocate']],
op_dtypes=[None, np.dtype('f4')],
op_axes=[None, [0, 3, 1]])
# If using op_axes, there can't be duplicates
assert_raises(ValueError, nditer, [a, None], [],
[['readonly'], ['writeonly', 'allocate']],
op_dtypes=[None, np.dtype('f4')],
op_axes=[None, [0, 2, 1, 0]])
# Not all axes may be specified if a reduction. If there is a hole
# in op_axes, this is an error.
a = arange(24, dtype='i4').reshape(2, 3, 4)
assert_raises(ValueError, nditer, [a, None], ["reduce_ok"],
[['readonly'], ['readwrite', 'allocate']],
op_dtypes=[None, np.dtype('f4')],
op_axes=[None, [0, np.newaxis, 2]])
def test_iter_remove_axis():
a = arange(24).reshape(2, 3, 4)
i = nditer(a, ['multi_index'])
i.remove_axis(1)
assert_equal([x for x in i], a[:, 0,:].ravel())
a = a[::-1,:,:]
i = nditer(a, ['multi_index'])
i.remove_axis(0)
assert_equal([x for x in i], a[0,:,:].ravel())
def test_iter_remove_multi_index_inner_loop():
# Check that removing multi-index support works
a = arange(24).reshape(2, 3, 4)
i = nditer(a, ['multi_index'])
assert_equal(i.ndim, 3)
assert_equal(i.shape, (2, 3, 4))
assert_equal(i.itviews[0].shape, (2, 3, 4))
# Removing the multi-index tracking causes all dimensions to coalesce
before = [x for x in i]
i.remove_multi_index()
after = [x for x in i]
assert_equal(before, after)
assert_equal(i.ndim, 1)
assert_raises(ValueError, lambda i:i.shape, i)
assert_equal(i.itviews[0].shape, (24,))
# Removing the inner loop means there's just one iteration
i.reset()
assert_equal(i.itersize, 24)
assert_equal(i[0].shape, tuple())
i.enable_external_loop()
assert_equal(i.itersize, 24)
assert_equal(i[0].shape, (24,))
assert_equal(i.value, arange(24))
def test_iter_iterindex():
# Make sure iterindex works
buffersize = 5
a = arange(24).reshape(4, 3, 2)
for flags in ([], ['buffered']):
i = nditer(a, flags, buffersize=buffersize)
assert_equal(iter_iterindices(i), list(range(24)))
i.iterindex = 2
assert_equal(iter_iterindices(i), list(range(2, 24)))
i = nditer(a, flags, order='F', buffersize=buffersize)
assert_equal(iter_iterindices(i), list(range(24)))
i.iterindex = 5
assert_equal(iter_iterindices(i), list(range(5, 24)))
i = nditer(a[::-1], flags, order='F', buffersize=buffersize)
assert_equal(iter_iterindices(i), list(range(24)))
i.iterindex = 9
assert_equal(iter_iterindices(i), list(range(9, 24)))
i = nditer(a[::-1, ::-1], flags, order='C', buffersize=buffersize)
assert_equal(iter_iterindices(i), list(range(24)))
i.iterindex = 13
assert_equal(iter_iterindices(i), list(range(13, 24)))
i = nditer(a[::1, ::-1], flags, buffersize=buffersize)
assert_equal(iter_iterindices(i), list(range(24)))
i.iterindex = 23
assert_equal(iter_iterindices(i), list(range(23, 24)))
i.reset()
i.iterindex = 2
assert_equal(iter_iterindices(i), list(range(2, 24)))
def test_iter_iterrange():
# Make sure getting and resetting the iterrange works
buffersize = 5
a = arange(24, dtype='i4').reshape(4, 3, 2)
a_fort = a.ravel(order='F')
i = nditer(a, ['ranged'], ['readonly'], order='F',
buffersize=buffersize)
assert_equal(i.iterrange, (0, 24))
assert_equal([x[()] for x in i], a_fort)
for r in [(0, 24), (1, 2), (3, 24), (5, 5), (0, 20), (23, 24)]:
i.iterrange = r
assert_equal(i.iterrange, r)
assert_equal([x[()] for x in i], a_fort[r[0]:r[1]])
i = nditer(a, ['ranged', 'buffered'], ['readonly'], order='F',
op_dtypes='f8', buffersize=buffersize)
assert_equal(i.iterrange, (0, 24))
assert_equal([x[()] for x in i], a_fort)
for r in [(0, 24), (1, 2), (3, 24), (5, 5), (0, 20), (23, 24)]:
i.iterrange = r
assert_equal(i.iterrange, r)
assert_equal([x[()] for x in i], a_fort[r[0]:r[1]])
def get_array(i):
val = np.array([], dtype='f8')
for x in i:
val = np.concatenate((val, x))
return val
i = nditer(a, ['ranged', 'buffered', 'external_loop'],
['readonly'], order='F',
op_dtypes='f8', buffersize=buffersize)
assert_equal(i.iterrange, (0, 24))
assert_equal(get_array(i), a_fort)
for r in [(0, 24), (1, 2), (3, 24), (5, 5), (0, 20), (23, 24)]:
i.iterrange = r
assert_equal(i.iterrange, r)
assert_equal(get_array(i), a_fort[r[0]:r[1]])
def test_iter_buffering():
# Test buffering with several buffer sizes and types
arrays = []
# F-order swapped array
arrays.append(np.arange(24,
dtype='c16').reshape(2, 3, 4).T.newbyteorder().byteswap())
# Contiguous 1-dimensional array
arrays.append(np.arange(10, dtype='f4'))
# Unaligned array
a = np.zeros((4*16+1,), dtype='i1')[1:]
a.dtype = 'i4'
a[:] = np.arange(16, dtype='i4')
arrays.append(a)
# 4-D F-order array
arrays.append(np.arange(120, dtype='i4').reshape(5, 3, 2, 4).T)
for a in arrays:
for buffersize in (1, 2, 3, 5, 8, 11, 16, 1024):
vals = []
i = nditer(a, ['buffered', 'external_loop'],
[['readonly', 'nbo', 'aligned']],
order='C',
casting='equiv',
buffersize=buffersize)
while not i.finished:
assert_(i[0].size <= buffersize)
vals.append(i[0].copy())
i.iternext()
assert_equal(np.concatenate(vals), a.ravel(order='C'))
def test_iter_write_buffering():
# Test that buffering of writes is working
# F-order swapped array
a = np.arange(24).reshape(2, 3, 4).T.newbyteorder().byteswap()
i = nditer(a, ['buffered'],
[['readwrite', 'nbo', 'aligned']],
casting='equiv',
order='C',
buffersize=16)
x = 0
with i:
while not i.finished:
i[0] = x
x += 1
i.iternext()
assert_equal(a.ravel(order='C'), np.arange(24))
def test_iter_buffering_delayed_alloc():
# Test that delaying buffer allocation works
a = np.arange(6)
b = np.arange(1, dtype='f4')
i = nditer([a, b], ['buffered', 'delay_bufalloc', 'multi_index', 'reduce_ok'],
['readwrite'],
casting='unsafe',
op_dtypes='f4')
assert_(i.has_delayed_bufalloc)
assert_raises(ValueError, lambda i:i.multi_index, i)
assert_raises(ValueError, lambda i:i[0], i)
assert_raises(ValueError, lambda i:i[0:2], i)
def assign_iter(i):
i[0] = 0
assert_raises(ValueError, assign_iter, i)
i.reset()
assert_(not i.has_delayed_bufalloc)
assert_equal(i.multi_index, (0,))
with i:
assert_equal(i[0], 0)
i[1] = 1
assert_equal(i[0:2], [0, 1])
assert_equal([[x[0][()], x[1][()]] for x in i], list(zip(range(6), [1]*6)))
def test_iter_buffered_cast_simple():
# Test that buffering can handle a simple cast
a = np.arange(10, dtype='f4')
i = nditer(a, ['buffered', 'external_loop'],
[['readwrite', 'nbo', 'aligned']],
casting='same_kind',
op_dtypes=[np.dtype('f8')],
buffersize=3)
with i:
for v in i:
v[...] *= 2
assert_equal(a, 2*np.arange(10, dtype='f4'))
def test_iter_buffered_cast_byteswapped():
# Test that buffering can handle a cast which requires swap->cast->swap
a = np.arange(10, dtype='f4').newbyteorder().byteswap()
i = nditer(a, ['buffered', 'external_loop'],
[['readwrite', 'nbo', 'aligned']],
casting='same_kind',
op_dtypes=[np.dtype('f8').newbyteorder()],
buffersize=3)
with i:
for v in i:
v[...] *= 2
assert_equal(a, 2*np.arange(10, dtype='f4'))
with suppress_warnings() as sup:
sup.filter(np.ComplexWarning)
a = np.arange(10, dtype='f8').newbyteorder().byteswap()
i = nditer(a, ['buffered', 'external_loop'],
[['readwrite', 'nbo', 'aligned']],
casting='unsafe',
op_dtypes=[np.dtype('c8').newbyteorder()],
buffersize=3)
with i:
for v in i:
v[...] *= 2
assert_equal(a, 2*np.arange(10, dtype='f8'))
def test_iter_buffered_cast_byteswapped_complex():
# Test that buffering can handle a cast which requires swap->cast->copy
a = np.arange(10, dtype='c8').newbyteorder().byteswap()
a += 2j
i = nditer(a, ['buffered', 'external_loop'],
[['readwrite', 'nbo', 'aligned']],
casting='same_kind',
op_dtypes=[np.dtype('c16')],
buffersize=3)
with i:
for v in i:
v[...] *= 2
assert_equal(a, 2*np.arange(10, dtype='c8') + 4j)
a = np.arange(10, dtype='c8')
a += 2j
i = nditer(a, ['buffered', 'external_loop'],
[['readwrite', 'nbo', 'aligned']],
casting='same_kind',
op_dtypes=[np.dtype('c16').newbyteorder()],
buffersize=3)
with i:
for v in i:
v[...] *= 2
assert_equal(a, 2*np.arange(10, dtype='c8') + 4j)
a = np.arange(10, dtype=np.clongdouble).newbyteorder().byteswap()
a += 2j
i = nditer(a, ['buffered', 'external_loop'],
[['readwrite', 'nbo', 'aligned']],
casting='same_kind',
op_dtypes=[np.dtype('c16')],
buffersize=3)
with i:
for v in i:
v[...] *= 2
assert_equal(a, 2*np.arange(10, dtype=np.clongdouble) + 4j)
a = np.arange(10, dtype=np.longdouble).newbyteorder().byteswap()
i = nditer(a, ['buffered', 'external_loop'],
[['readwrite', 'nbo', 'aligned']],
casting='same_kind',
op_dtypes=[np.dtype('f4')],
buffersize=7)
with i:
for v in i:
v[...] *= 2
assert_equal(a, 2*np.arange(10, dtype=np.longdouble))
def test_iter_buffered_cast_structured_type():
# Tests buffering of structured types
# simple -> struct type (duplicates the value)
sdt = [('a', 'f4'), ('b', 'i8'), ('c', 'c8', (2, 3)), ('d', 'O')]
a = np.arange(3, dtype='f4') + 0.5
i = nditer(a, ['buffered', 'refs_ok'], ['readonly'],
casting='unsafe',
op_dtypes=sdt)
vals = [np.array(x) for x in i]
assert_equal(vals[0]['a'], 0.5)
assert_equal(vals[0]['b'], 0)
assert_equal(vals[0]['c'], [[(0.5)]*3]*2)
assert_equal(vals[0]['d'], 0.5)
assert_equal(vals[1]['a'], 1.5)
assert_equal(vals[1]['b'], 1)
assert_equal(vals[1]['c'], [[(1.5)]*3]*2)
assert_equal(vals[1]['d'], 1.5)
assert_equal(vals[0].dtype, np.dtype(sdt))
# object -> struct type
sdt = [('a', 'f4'), ('b', 'i8'), ('c', 'c8', (2, 3)), ('d', 'O')]
a = np.zeros((3,), dtype='O')
a[0] = (0.5, 0.5, [[0.5, 0.5, 0.5], [0.5, 0.5, 0.5]], 0.5)
a[1] = (1.5, 1.5, [[1.5, 1.5, 1.5], [1.5, 1.5, 1.5]], 1.5)
a[2] = (2.5, 2.5, [[2.5, 2.5, 2.5], [2.5, 2.5, 2.5]], 2.5)
if HAS_REFCOUNT:
rc = sys.getrefcount(a[0])
i = nditer(a, ['buffered', 'refs_ok'], ['readonly'],
casting='unsafe',
op_dtypes=sdt)
vals = [x.copy() for x in i]
assert_equal(vals[0]['a'], 0.5)
assert_equal(vals[0]['b'], 0)
assert_equal(vals[0]['c'], [[(0.5)]*3]*2)
assert_equal(vals[0]['d'], 0.5)
assert_equal(vals[1]['a'], 1.5)
assert_equal(vals[1]['b'], 1)
assert_equal(vals[1]['c'], [[(1.5)]*3]*2)
assert_equal(vals[1]['d'], 1.5)
assert_equal(vals[0].dtype, np.dtype(sdt))
vals, i, x = [None]*3
if HAS_REFCOUNT:
assert_equal(sys.getrefcount(a[0]), rc)
# single-field struct type -> simple
sdt = [('a', 'f4')]
a = np.array([(5.5,), (8,)], dtype=sdt)
i = nditer(a, ['buffered', 'refs_ok'], ['readonly'],
casting='unsafe',
op_dtypes='i4')
assert_equal([x_[()] for x_ in i], [5, 8])
# make sure multi-field struct type -> simple doesn't work
sdt = [('a', 'f4'), ('b', 'i8'), ('d', 'O')]
a = np.array([(5.5, 7, 'test'), (8, 10, 11)], dtype=sdt)
assert_raises(TypeError, lambda: (
nditer(a, ['buffered', 'refs_ok'], ['readonly'],
casting='unsafe',
op_dtypes='i4')))
# struct type -> struct type (field-wise copy)
sdt1 = [('a', 'f4'), ('b', 'i8'), ('d', 'O')]
sdt2 = [('d', 'u2'), ('a', 'O'), ('b', 'f8')]
a = np.array([(1, 2, 3), (4, 5, 6)], dtype=sdt1)
i = nditer(a, ['buffered', 'refs_ok'], ['readonly'],
casting='unsafe',
op_dtypes=sdt2)
assert_equal(i[0].dtype, np.dtype(sdt2))
assert_equal([np.array(x_) for x_ in i],
[np.array((1, 2, 3), dtype=sdt2),
np.array((4, 5, 6), dtype=sdt2)])
def test_iter_buffered_cast_structured_type_failure_with_cleanup():
# make sure struct type -> struct type with different
# number of fields fails
sdt1 = [('a', 'f4'), ('b', 'i8'), ('d', 'O')]
sdt2 = [('b', 'O'), ('a', 'f8')]
a = np.array([(1, 2, 3), (4, 5, 6)], dtype=sdt1)
for intent in ["readwrite", "readonly", "writeonly"]:
# This test was initially designed to test an error at a different
# place, but will now raise earlier to to the cast not being possible:
# `assert np.can_cast(a.dtype, sdt2, casting="unsafe")` fails.
# Without a faulty DType, there is probably no reliable
# way to get the initial tested behaviour.
simple_arr = np.array([1, 2], dtype="i,i") # requires clean up
with pytest.raises(TypeError):
nditer((simple_arr, a), ['buffered', 'refs_ok'], [intent, intent],
casting='unsafe', op_dtypes=["f,f", sdt2])
def test_buffered_cast_error_paths():
with pytest.raises(ValueError):
# The input is cast into an `S3` buffer
np.nditer((np.array("a", dtype="S1"),), op_dtypes=["i"],
casting="unsafe", flags=["buffered"])
# The `M8[ns]` is cast into the `S3` output
it = np.nditer((np.array(1, dtype="i"),), op_dtypes=["S1"],
op_flags=["writeonly"], casting="unsafe", flags=["buffered"])
with pytest.raises(ValueError):
with it:
buf = next(it)
buf[...] = "a" # cannot be converted to int.
@pytest.mark.skipif(not HAS_REFCOUNT, reason="PyPy seems to not hit this.")
def test_buffered_cast_error_paths_unraisable():
# The following gives an unraisable error. Pytest sometimes captures that
# (depending python and/or pytest version). So with Python>=3.8 this can
# probably be cleaned out in the future to check for
# pytest.PytestUnraisableExceptionWarning:
code = textwrap.dedent("""
import numpy as np
it = np.nditer((np.array(1, dtype="i"),), op_dtypes=["S1"],
op_flags=["writeonly"], casting="unsafe", flags=["buffered"])
buf = next(it)
buf[...] = "a"
del buf, it # Flushing only happens during deallocate right now.
""")
res = subprocess.check_output([sys.executable, "-c", code],
stderr=subprocess.STDOUT, text=True)
assert "ValueError" in res
def test_iter_buffered_cast_subarray():
# Tests buffering of subarrays
# one element -> many (copies it to all)
sdt1 = [('a', 'f4')]
sdt2 = [('a', 'f8', (3, 2, 2))]
a = np.zeros((6,), dtype=sdt1)
a['a'] = np.arange(6)
i = nditer(a, ['buffered', 'refs_ok'], ['readonly'],
casting='unsafe',
op_dtypes=sdt2)
assert_equal(i[0].dtype, np.dtype(sdt2))
for x, count in zip(i, list(range(6))):
assert_(np.all(x['a'] == count))
# one element -> many -> back (copies it to all)
sdt1 = [('a', 'O', (1, 1))]
sdt2 = [('a', 'O', (3, 2, 2))]
a = np.zeros((6,), dtype=sdt1)
a['a'][:, 0, 0] = np.arange(6)
i = nditer(a, ['buffered', 'refs_ok'], ['readwrite'],
casting='unsafe',
op_dtypes=sdt2)
with i:
assert_equal(i[0].dtype, np.dtype(sdt2))
count = 0
for x in i:
assert_(np.all(x['a'] == count))
x['a'][0] += 2
count += 1
assert_equal(a['a'], np.arange(6).reshape(6, 1, 1)+2)
# many -> one element -> back (copies just element 0)
sdt1 = [('a', 'O', (3, 2, 2))]
sdt2 = [('a', 'O', (1,))]
a = np.zeros((6,), dtype=sdt1)
a['a'][:, 0, 0, 0] = np.arange(6)
i = nditer(a, ['buffered', 'refs_ok'], ['readwrite'],
casting='unsafe',
op_dtypes=sdt2)
with i:
assert_equal(i[0].dtype, np.dtype(sdt2))
count = 0
for x in i:
assert_equal(x['a'], count)
x['a'] += 2
count += 1
assert_equal(a['a'], np.arange(6).reshape(6, 1, 1, 1)*np.ones((1, 3, 2, 2))+2)
# many -> one element -> back (copies just element 0)
sdt1 = [('a', 'f8', (3, 2, 2))]
sdt2 = [('a', 'O', (1,))]
a = np.zeros((6,), dtype=sdt1)
a['a'][:, 0, 0, 0] = np.arange(6)
i = nditer(a, ['buffered', 'refs_ok'], ['readonly'],
casting='unsafe',
op_dtypes=sdt2)
assert_equal(i[0].dtype, np.dtype(sdt2))
count = 0
for x in i:
assert_equal(x['a'], count)
count += 1
# many -> one element (copies just element 0)
sdt1 = [('a', 'O', (3, 2, 2))]
sdt2 = [('a', 'f4', (1,))]
a = np.zeros((6,), dtype=sdt1)
a['a'][:, 0, 0, 0] = np.arange(6)
i = nditer(a, ['buffered', 'refs_ok'], ['readonly'],
casting='unsafe',
op_dtypes=sdt2)
assert_equal(i[0].dtype, np.dtype(sdt2))
count = 0
for x in i:
assert_equal(x['a'], count)
count += 1
# many -> matching shape (straightforward copy)
sdt1 = [('a', 'O', (3, 2, 2))]
sdt2 = [('a', 'f4', (3, 2, 2))]
a = np.zeros((6,), dtype=sdt1)
a['a'] = np.arange(6*3*2*2).reshape(6, 3, 2, 2)
i = nditer(a, ['buffered', 'refs_ok'], ['readonly'],
casting='unsafe',
op_dtypes=sdt2)
assert_equal(i[0].dtype, np.dtype(sdt2))
count = 0
for x in i:
assert_equal(x['a'], a[count]['a'])
count += 1
# vector -> smaller vector (truncates)
sdt1 = [('a', 'f8', (6,))]
sdt2 = [('a', 'f4', (2,))]
a = np.zeros((6,), dtype=sdt1)
a['a'] = np.arange(6*6).reshape(6, 6)
i = nditer(a, ['buffered', 'refs_ok'], ['readonly'],
casting='unsafe',
op_dtypes=sdt2)
assert_equal(i[0].dtype, np.dtype(sdt2))
count = 0
for x in i:
assert_equal(x['a'], a[count]['a'][:2])
count += 1
# vector -> bigger vector (pads with zeros)
sdt1 = [('a', 'f8', (2,))]
sdt2 = [('a', 'f4', (6,))]
a = np.zeros((6,), dtype=sdt1)
a['a'] = np.arange(6*2).reshape(6, 2)
i = nditer(a, ['buffered', 'refs_ok'], ['readonly'],
casting='unsafe',
op_dtypes=sdt2)
assert_equal(i[0].dtype, np.dtype(sdt2))
count = 0
for x in i:
assert_equal(x['a'][:2], a[count]['a'])
assert_equal(x['a'][2:], [0, 0, 0, 0])
count += 1
# vector -> matrix (broadcasts)
sdt1 = [('a', 'f8', (2,))]
sdt2 = [('a', 'f4', (2, 2))]
a = np.zeros((6,), dtype=sdt1)
a['a'] = np.arange(6*2).reshape(6, 2)
i = nditer(a, ['buffered', 'refs_ok'], ['readonly'],
casting='unsafe',
op_dtypes=sdt2)
assert_equal(i[0].dtype, np.dtype(sdt2))
count = 0
for x in i:
assert_equal(x['a'][0], a[count]['a'])
assert_equal(x['a'][1], a[count]['a'])
count += 1
# vector -> matrix (broadcasts and zero-pads)
sdt1 = [('a', 'f8', (2, 1))]
sdt2 = [('a', 'f4', (3, 2))]
a = np.zeros((6,), dtype=sdt1)
a['a'] = np.arange(6*2).reshape(6, 2, 1)
i = nditer(a, ['buffered', 'refs_ok'], ['readonly'],
casting='unsafe',
op_dtypes=sdt2)
assert_equal(i[0].dtype, np.dtype(sdt2))
count = 0
for x in i:
assert_equal(x['a'][:2, 0], a[count]['a'][:, 0])
assert_equal(x['a'][:2, 1], a[count]['a'][:, 0])
assert_equal(x['a'][2,:], [0, 0])
count += 1
# matrix -> matrix (truncates and zero-pads)
sdt1 = [('a', 'f8', (2, 3))]
sdt2 = [('a', 'f4', (3, 2))]
a = np.zeros((6,), dtype=sdt1)
a['a'] = np.arange(6*2*3).reshape(6, 2, 3)
i = nditer(a, ['buffered', 'refs_ok'], ['readonly'],
casting='unsafe',
op_dtypes=sdt2)
assert_equal(i[0].dtype, np.dtype(sdt2))
count = 0
for x in i:
assert_equal(x['a'][:2, 0], a[count]['a'][:, 0])
assert_equal(x['a'][:2, 1], a[count]['a'][:, 1])
assert_equal(x['a'][2,:], [0, 0])
count += 1
def test_iter_buffering_badwriteback():
# Writing back from a buffer cannot combine elements
# a needs write buffering, but had a broadcast dimension
a = np.arange(6).reshape(2, 3, 1)
b = np.arange(12).reshape(2, 3, 2)
assert_raises(ValueError, nditer, [a, b],
['buffered', 'external_loop'],
[['readwrite'], ['writeonly']],
order='C')
# But if a is readonly, it's fine
nditer([a, b], ['buffered', 'external_loop'],
[['readonly'], ['writeonly']],
order='C')
# If a has just one element, it's fine too (constant 0 stride, a reduction)
a = np.arange(1).reshape(1, 1, 1)
nditer([a, b], ['buffered', 'external_loop', 'reduce_ok'],
[['readwrite'], ['writeonly']],
order='C')
# check that it fails on other dimensions too
a = np.arange(6).reshape(1, 3, 2)
assert_raises(ValueError, nditer, [a, b],
['buffered', 'external_loop'],
[['readwrite'], ['writeonly']],
order='C')
a = np.arange(4).reshape(2, 1, 2)
assert_raises(ValueError, nditer, [a, b],
['buffered', 'external_loop'],
[['readwrite'], ['writeonly']],
order='C')
def test_iter_buffering_string():
# Safe casting disallows shrinking strings
a = np.array(['abc', 'a', 'abcd'], dtype=np.bytes_)
assert_equal(a.dtype, np.dtype('S4'))
assert_raises(TypeError, nditer, a, ['buffered'], ['readonly'],
op_dtypes='S2')
i = nditer(a, ['buffered'], ['readonly'], op_dtypes='S6')
assert_equal(i[0], b'abc')
assert_equal(i[0].dtype, np.dtype('S6'))
a = np.array(['abc', 'a', 'abcd'], dtype=np.unicode_)
assert_equal(a.dtype, np.dtype('U4'))
assert_raises(TypeError, nditer, a, ['buffered'], ['readonly'],
op_dtypes='U2')
i = nditer(a, ['buffered'], ['readonly'], op_dtypes='U6')
assert_equal(i[0], u'abc')
assert_equal(i[0].dtype, np.dtype('U6'))
def test_iter_buffering_growinner():
# Test that the inner loop grows when no buffering is needed
a = np.arange(30)
i = nditer(a, ['buffered', 'growinner', 'external_loop'],
buffersize=5)
# Should end up with just one inner loop here
assert_equal(i[0].size, a.size)
@pytest.mark.slow
def test_iter_buffered_reduce_reuse():
# large enough array for all views, including negative strides.
a = np.arange(2*3**5)[3**5:3**5+1]
flags = ['buffered', 'delay_bufalloc', 'multi_index', 'reduce_ok', 'refs_ok']
op_flags = [('readonly',), ('readwrite', 'allocate')]
op_axes_list = [[(0, 1, 2), (0, 1, -1)], [(0, 1, 2), (0, -1, -1)]]
# wrong dtype to force buffering
op_dtypes = [float, a.dtype]
def get_params():
for xs in range(-3**2, 3**2 + 1):
for ys in range(xs, 3**2 + 1):
for op_axes in op_axes_list:
# last stride is reduced and because of that not
# important for this test, as it is the inner stride.
strides = (xs * a.itemsize, ys * a.itemsize, a.itemsize)
arr = np.lib.stride_tricks.as_strided(a, (3, 3, 3), strides)
for skip in [0, 1]:
yield arr, op_axes, skip
for arr, op_axes, skip in get_params():
nditer2 = np.nditer([arr.copy(), None],
op_axes=op_axes, flags=flags, op_flags=op_flags,
op_dtypes=op_dtypes)
with nditer2:
nditer2.operands[-1][...] = 0
nditer2.reset()
nditer2.iterindex = skip
for (a2_in, b2_in) in nditer2:
b2_in += a2_in.astype(np.int_)
comp_res = nditer2.operands[-1]
for bufsize in range(0, 3**3):
nditer1 = np.nditer([arr, None],
op_axes=op_axes, flags=flags, op_flags=op_flags,
buffersize=bufsize, op_dtypes=op_dtypes)
with nditer1:
nditer1.operands[-1][...] = 0
nditer1.reset()
nditer1.iterindex = skip
for (a1_in, b1_in) in nditer1:
b1_in += a1_in.astype(np.int_)
res = nditer1.operands[-1]
assert_array_equal(res, comp_res)
def test_iter_no_broadcast():
# Test that the no_broadcast flag works
a = np.arange(24).reshape(2, 3, 4)
b = np.arange(6).reshape(2, 3, 1)
c = np.arange(12).reshape(3, 4)
nditer([a, b, c], [],
[['readonly', 'no_broadcast'],
['readonly'], ['readonly']])
assert_raises(ValueError, nditer, [a, b, c], [],
[['readonly'], ['readonly', 'no_broadcast'], ['readonly']])
assert_raises(ValueError, nditer, [a, b, c], [],
[['readonly'], ['readonly'], ['readonly', 'no_broadcast']])
class TestIterNested:
def test_basic(self):
# Test nested iteration basic usage
a = arange(12).reshape(2, 3, 2)
i, j = np.nested_iters(a, [[0], [1, 2]])
vals = [list(j) for _ in i]
assert_equal(vals, [[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]])
i, j = np.nested_iters(a, [[0, 1], [2]])
vals = [list(j) for _ in i]
assert_equal(vals, [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11]])
i, j = np.nested_iters(a, [[0, 2], [1]])
vals = [list(j) for _ in i]
assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]])
def test_reorder(self):
# Test nested iteration basic usage
a = arange(12).reshape(2, 3, 2)
# In 'K' order (default), it gets reordered
i, j = np.nested_iters(a, [[0], [2, 1]])
vals = [list(j) for _ in i]
assert_equal(vals, [[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]])
i, j = np.nested_iters(a, [[1, 0], [2]])
vals = [list(j) for _ in i]
assert_equal(vals, [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11]])
i, j = np.nested_iters(a, [[2, 0], [1]])
vals = [list(j) for _ in i]
assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]])
# In 'C' order, it doesn't
i, j = np.nested_iters(a, [[0], [2, 1]], order='C')
vals = [list(j) for _ in i]
assert_equal(vals, [[0, 2, 4, 1, 3, 5], [6, 8, 10, 7, 9, 11]])
i, j = np.nested_iters(a, [[1, 0], [2]], order='C')
vals = [list(j) for _ in i]
assert_equal(vals, [[0, 1], [6, 7], [2, 3], [8, 9], [4, 5], [10, 11]])
i, j = np.nested_iters(a, [[2, 0], [1]], order='C')
vals = [list(j) for _ in i]
assert_equal(vals, [[0, 2, 4], [6, 8, 10], [1, 3, 5], [7, 9, 11]])
def test_flip_axes(self):
# Test nested iteration with negative axes
a = arange(12).reshape(2, 3, 2)[::-1, ::-1, ::-1]
# In 'K' order (default), the axes all get flipped
i, j = np.nested_iters(a, [[0], [1, 2]])
vals = [list(j) for _ in i]
assert_equal(vals, [[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]])
i, j = np.nested_iters(a, [[0, 1], [2]])
vals = [list(j) for _ in i]
assert_equal(vals, [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11]])
i, j = np.nested_iters(a, [[0, 2], [1]])
vals = [list(j) for _ in i]
assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]])
# In 'C' order, flipping axes is disabled
i, j = np.nested_iters(a, [[0], [1, 2]], order='C')
vals = [list(j) for _ in i]
assert_equal(vals, [[11, 10, 9, 8, 7, 6], [5, 4, 3, 2, 1, 0]])
i, j = np.nested_iters(a, [[0, 1], [2]], order='C')
vals = [list(j) for _ in i]
assert_equal(vals, [[11, 10], [9, 8], [7, 6], [5, 4], [3, 2], [1, 0]])
i, j = np.nested_iters(a, [[0, 2], [1]], order='C')
vals = [list(j) for _ in i]
assert_equal(vals, [[11, 9, 7], [10, 8, 6], [5, 3, 1], [4, 2, 0]])
def test_broadcast(self):
# Test nested iteration with broadcasting
a = arange(2).reshape(2, 1)
b = arange(3).reshape(1, 3)
i, j = np.nested_iters([a, b], [[0], [1]])
vals = [list(j) for _ in i]
assert_equal(vals, [[[0, 0], [0, 1], [0, 2]], [[1, 0], [1, 1], [1, 2]]])
i, j = np.nested_iters([a, b], [[1], [0]])
vals = [list(j) for _ in i]
assert_equal(vals, [[[0, 0], [1, 0]], [[0, 1], [1, 1]], [[0, 2], [1, 2]]])
def test_dtype_copy(self):
# Test nested iteration with a copy to change dtype
# copy
a = arange(6, dtype='i4').reshape(2, 3)
i, j = np.nested_iters(a, [[0], [1]],
op_flags=['readonly', 'copy'],
op_dtypes='f8')
assert_equal(j[0].dtype, np.dtype('f8'))
vals = [list(j) for _ in i]
assert_equal(vals, [[0, 1, 2], [3, 4, 5]])
vals = None
# writebackifcopy - using context manager
a = arange(6, dtype='f4').reshape(2, 3)
i, j = np.nested_iters(a, [[0], [1]],
op_flags=['readwrite', 'updateifcopy'],
casting='same_kind',
op_dtypes='f8')
with i, j:
assert_equal(j[0].dtype, np.dtype('f8'))
for x in i:
for y in j:
y[...] += 1
assert_equal(a, [[0, 1, 2], [3, 4, 5]])
assert_equal(a, [[1, 2, 3], [4, 5, 6]])
# writebackifcopy - using close()
a = arange(6, dtype='f4').reshape(2, 3)
i, j = np.nested_iters(a, [[0], [1]],
op_flags=['readwrite', 'updateifcopy'],
casting='same_kind',
op_dtypes='f8')
assert_equal(j[0].dtype, np.dtype('f8'))
for x in i:
for y in j:
y[...] += 1
assert_equal(a, [[0, 1, 2], [3, 4, 5]])
i.close()
j.close()
assert_equal(a, [[1, 2, 3], [4, 5, 6]])
def test_dtype_buffered(self):
# Test nested iteration with buffering to change dtype
a = arange(6, dtype='f4').reshape(2, 3)
i, j = np.nested_iters(a, [[0], [1]],
flags=['buffered'],
op_flags=['readwrite'],
casting='same_kind',
op_dtypes='f8')
assert_equal(j[0].dtype, np.dtype('f8'))
for x in i:
for y in j:
y[...] += 1
assert_equal(a, [[1, 2, 3], [4, 5, 6]])
def test_0d(self):
a = np.arange(12).reshape(2, 3, 2)
i, j = np.nested_iters(a, [[], [1, 0, 2]])
vals = [list(j) for _ in i]
assert_equal(vals, [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]])
i, j = np.nested_iters(a, [[1, 0, 2], []])
vals = [list(j) for _ in i]
assert_equal(vals, [[0], [1], [2], [3], [4], [5], [6], [7], [8], [9], [10], [11]])
i, j, k = np.nested_iters(a, [[2, 0], [], [1]])
vals = []
for x in i:
for y in j:
vals.append([z for z in k])
assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]])
def test_iter_nested_iters_dtype_buffered(self):
# Test nested iteration with buffering to change dtype
a = arange(6, dtype='f4').reshape(2, 3)
i, j = np.nested_iters(a, [[0], [1]],
flags=['buffered'],
op_flags=['readwrite'],
casting='same_kind',
op_dtypes='f8')
with i, j:
assert_equal(j[0].dtype, np.dtype('f8'))
for x in i:
for y in j:
y[...] += 1
assert_equal(a, [[1, 2, 3], [4, 5, 6]])
def test_iter_reduction_error():
a = np.arange(6)
assert_raises(ValueError, nditer, [a, None], [],
[['readonly'], ['readwrite', 'allocate']],
op_axes=[[0], [-1]])
a = np.arange(6).reshape(2, 3)
assert_raises(ValueError, nditer, [a, None], ['external_loop'],
[['readonly'], ['readwrite', 'allocate']],
op_axes=[[0, 1], [-1, -1]])
def test_iter_reduction():
# Test doing reductions with the iterator
a = np.arange(6)
i = nditer([a, None], ['reduce_ok'],
[['readonly'], ['readwrite', 'allocate']],
op_axes=[[0], [-1]])
# Need to initialize the output operand to the addition unit
with i:
i.operands[1][...] = 0
# Do the reduction
for x, y in i:
y[...] += x
# Since no axes were specified, should have allocated a scalar
assert_equal(i.operands[1].ndim, 0)
assert_equal(i.operands[1], np.sum(a))
a = np.arange(6).reshape(2, 3)
i = nditer([a, None], ['reduce_ok', 'external_loop'],
[['readonly'], ['readwrite', 'allocate']],
op_axes=[[0, 1], [-1, -1]])
# Need to initialize the output operand to the addition unit
with i:
i.operands[1][...] = 0
# Reduction shape/strides for the output
assert_equal(i[1].shape, (6,))
assert_equal(i[1].strides, (0,))
# Do the reduction
for x, y in i:
# Use a for loop instead of ``y[...] += x``
# (equivalent to ``y[...] = y[...].copy() + x``),
# because y has zero strides we use for the reduction
for j in range(len(y)):
y[j] += x[j]
# Since no axes were specified, should have allocated a scalar
assert_equal(i.operands[1].ndim, 0)
assert_equal(i.operands[1], np.sum(a))
# This is a tricky reduction case for the buffering double loop
# to handle
a = np.ones((2, 3, 5))
it1 = nditer([a, None], ['reduce_ok', 'external_loop'],
[['readonly'], ['readwrite', 'allocate']],
op_axes=[None, [0, -1, 1]])
it2 = nditer([a, None], ['reduce_ok', 'external_loop',
'buffered', 'delay_bufalloc'],
[['readonly'], ['readwrite', 'allocate']],
op_axes=[None, [0, -1, 1]], buffersize=10)
with it1, it2:
it1.operands[1].fill(0)
it2.operands[1].fill(0)
it2.reset()
for x in it1:
x[1][...] += x[0]
for x in it2:
x[1][...] += x[0]
assert_equal(it1.operands[1], it2.operands[1])
assert_equal(it2.operands[1].sum(), a.size)
def test_iter_buffering_reduction():
# Test doing buffered reductions with the iterator
a = np.arange(6)
b = np.array(0., dtype='f8').byteswap().newbyteorder()
i = nditer([a, b], ['reduce_ok', 'buffered'],
[['readonly'], ['readwrite', 'nbo']],
op_axes=[[0], [-1]])
with i:
assert_equal(i[1].dtype, np.dtype('f8'))
assert_(i[1].dtype != b.dtype)
# Do the reduction
for x, y in i:
y[...] += x
# Since no axes were specified, should have allocated a scalar
assert_equal(b, np.sum(a))
a = np.arange(6).reshape(2, 3)
b = np.array([0, 0], dtype='f8').byteswap().newbyteorder()
i = nditer([a, b], ['reduce_ok', 'external_loop', 'buffered'],
[['readonly'], ['readwrite', 'nbo']],
op_axes=[[0, 1], [0, -1]])
# Reduction shape/strides for the output
with i:
assert_equal(i[1].shape, (3,))
assert_equal(i[1].strides, (0,))
# Do the reduction
for x, y in i:
# Use a for loop instead of ``y[...] += x``
# (equivalent to ``y[...] = y[...].copy() + x``),
# because y has zero strides we use for the reduction
for j in range(len(y)):
y[j] += x[j]
assert_equal(b, np.sum(a, axis=1))
# Iterator inner double loop was wrong on this one
p = np.arange(2) + 1
it = np.nditer([p, None],
['delay_bufalloc', 'reduce_ok', 'buffered', 'external_loop'],
[['readonly'], ['readwrite', 'allocate']],
op_axes=[[-1, 0], [-1, -1]],
itershape=(2, 2))
with it:
it.operands[1].fill(0)
it.reset()
assert_equal(it[0], [1, 2, 1, 2])
# Iterator inner loop should take argument contiguity into account
x = np.ones((7, 13, 8), np.int8)[4:6,1:11:6,1:5].transpose(1, 2, 0)
x[...] = np.arange(x.size).reshape(x.shape)
y_base = np.arange(4*4, dtype=np.int8).reshape(4, 4)
y_base_copy = y_base.copy()
y = y_base[::2,:,None]
it = np.nditer([y, x],
['buffered', 'external_loop', 'reduce_ok'],
[['readwrite'], ['readonly']])
with it:
for a, b in it:
a.fill(2)
assert_equal(y_base[1::2], y_base_copy[1::2])
assert_equal(y_base[::2], 2)
def test_iter_buffering_reduction_reuse_reduce_loops():
# There was a bug triggering reuse of the reduce loop inappropriately,
# which caused processing to happen in unnecessarily small chunks
# and overran the buffer.
a = np.zeros((2, 7))
b = np.zeros((1, 7))
it = np.nditer([a, b], flags=['reduce_ok', 'external_loop', 'buffered'],
op_flags=[['readonly'], ['readwrite']],
buffersize=5)
with it:
bufsizes = [x.shape[0] for x, y in it]
assert_equal(bufsizes, [5, 2, 5, 2])
assert_equal(sum(bufsizes), a.size)
def test_iter_writemasked_badinput():
a = np.zeros((2, 3))
b = np.zeros((3,))
m = np.array([[True, True, False], [False, True, False]])
m2 = np.array([True, True, False])
m3 = np.array([0, 1, 1], dtype='u1')
mbad1 = np.array([0, 1, 1], dtype='i1')
mbad2 = np.array([0, 1, 1], dtype='f4')
# Need an 'arraymask' if any operand is 'writemasked'
assert_raises(ValueError, nditer, [a, m], [],
[['readwrite', 'writemasked'], ['readonly']])
# A 'writemasked' operand must not be readonly
assert_raises(ValueError, nditer, [a, m], [],
[['readonly', 'writemasked'], ['readonly', 'arraymask']])
# 'writemasked' and 'arraymask' may not be used together
assert_raises(ValueError, nditer, [a, m], [],
[['readonly'], ['readwrite', 'arraymask', 'writemasked']])
# 'arraymask' may only be specified once
assert_raises(ValueError, nditer, [a, m, m2], [],
[['readwrite', 'writemasked'],
['readonly', 'arraymask'],
['readonly', 'arraymask']])
# An 'arraymask' with nothing 'writemasked' also doesn't make sense
assert_raises(ValueError, nditer, [a, m], [],
[['readwrite'], ['readonly', 'arraymask']])
# A writemasked reduction requires a similarly smaller mask
assert_raises(ValueError, nditer, [a, b, m], ['reduce_ok'],
[['readonly'],
['readwrite', 'writemasked'],
['readonly', 'arraymask']])
# But this should work with a smaller/equal mask to the reduction operand
np.nditer([a, b, m2], ['reduce_ok'],
[['readonly'],
['readwrite', 'writemasked'],
['readonly', 'arraymask']])
# The arraymask itself cannot be a reduction
assert_raises(ValueError, nditer, [a, b, m2], ['reduce_ok'],
[['readonly'],
['readwrite', 'writemasked'],
['readwrite', 'arraymask']])
# A uint8 mask is ok too
np.nditer([a, m3], ['buffered'],
[['readwrite', 'writemasked'],
['readonly', 'arraymask']],
op_dtypes=['f4', None],
casting='same_kind')
# An int8 mask isn't ok
assert_raises(TypeError, np.nditer, [a, mbad1], ['buffered'],
[['readwrite', 'writemasked'],
['readonly', 'arraymask']],
op_dtypes=['f4', None],
casting='same_kind')
# A float32 mask isn't ok
assert_raises(TypeError, np.nditer, [a, mbad2], ['buffered'],
[['readwrite', 'writemasked'],
['readonly', 'arraymask']],
op_dtypes=['f4', None],
casting='same_kind')
def _is_buffered(iterator):
try:
iterator.itviews
except ValueError:
return True
return False
@pytest.mark.parametrize("a",
[np.zeros((3,), dtype='f8'),
np.zeros((9876, 3*5), dtype='f8')[::2, :],
np.zeros((4, 312, 124, 3), dtype='f8')[::2, :, ::2, :],
# Also test with the last dimension strided (so it does not fit if
# there is repeated access)
np.zeros((9,), dtype='f8')[::3],
np.zeros((9876, 3*10), dtype='f8')[::2, ::5],
np.zeros((4, 312, 124, 3), dtype='f8')[::2, :, ::2, ::-1]])
def test_iter_writemasked(a):
# Note, the slicing above is to ensure that nditer cannot combine multiple
# axes into one. The repetition is just to make things a bit more
# interesting.
shape = a.shape
reps = shape[-1] // 3
msk = np.empty(shape, dtype=bool)
msk[...] = [True, True, False] * reps
# When buffering is unused, 'writemasked' effectively does nothing.
# It's up to the user of the iterator to obey the requested semantics.
it = np.nditer([a, msk], [],
[['readwrite', 'writemasked'],
['readonly', 'arraymask']])
with it:
for x, m in it:
x[...] = 1
# Because we violated the semantics, all the values became 1
assert_equal(a, np.broadcast_to([1, 1, 1] * reps, shape))
# Even if buffering is enabled, we still may be accessing the array
# directly.
it = np.nditer([a, msk], ['buffered'],
[['readwrite', 'writemasked'],
['readonly', 'arraymask']])
# @seberg: I honestly don't currently understand why a "buffered" iterator
# would end up not using a buffer for the small array here at least when
# "writemasked" is used, that seems confusing... Check by testing for
# actual memory overlap!
is_buffered = True
with it:
for x, m in it:
x[...] = 2.5
if np.may_share_memory(x, a):
is_buffered = False
if not is_buffered:
# Because we violated the semantics, all the values became 2.5
assert_equal(a, np.broadcast_to([2.5, 2.5, 2.5] * reps, shape))
else:
# For large sizes, the iterator may be buffered:
assert_equal(a, np.broadcast_to([2.5, 2.5, 1] * reps, shape))
a[...] = 2.5
# If buffering will definitely happening, for instance because of
# a cast, only the items selected by the mask will be copied back from
# the buffer.
it = np.nditer([a, msk], ['buffered'],
[['readwrite', 'writemasked'],
['readonly', 'arraymask']],
op_dtypes=['i8', None],
casting='unsafe')
with it:
for x, m in it:
x[...] = 3
# Even though we violated the semantics, only the selected values
# were copied back
assert_equal(a, np.broadcast_to([3, 3, 2.5] * reps, shape))
def test_iter_writemasked_decref():
# force casting (to make it interesting) by using a structured dtype.
arr = np.arange(10000).astype(">i,O")
original = arr.copy()
mask = np.random.randint(0, 2, size=10000).astype(bool)
it = np.nditer([arr, mask], ['buffered', "refs_ok"],
[['readwrite', 'writemasked'],
['readonly', 'arraymask']],
op_dtypes=["<i,O", "?"])
singleton = object()
if HAS_REFCOUNT:
count = sys.getrefcount(singleton)
for buf, mask_buf in it:
buf[...] = (3, singleton)
del buf, mask_buf, it # delete everything to ensure correct cleanup
if HAS_REFCOUNT:
# The buffer would have included additional items, they must be
# cleared correctly:
assert sys.getrefcount(singleton) - count == np.count_nonzero(mask)
assert_array_equal(arr[~mask], original[~mask])
assert (arr[mask] == np.array((3, singleton), arr.dtype)).all()
del arr
if HAS_REFCOUNT:
assert sys.getrefcount(singleton) == count
def test_iter_non_writable_attribute_deletion():
it = np.nditer(np.ones(2))
attr = ["value", "shape", "operands", "itviews", "has_delayed_bufalloc",
"iterationneedsapi", "has_multi_index", "has_index", "dtypes",
"ndim", "nop", "itersize", "finished"]
for s in attr:
assert_raises(AttributeError, delattr, it, s)
def test_iter_writable_attribute_deletion():
it = np.nditer(np.ones(2))
attr = [ "multi_index", "index", "iterrange", "iterindex"]
for s in attr:
assert_raises(AttributeError, delattr, it, s)
def test_iter_element_deletion():
it = np.nditer(np.ones(3))
try:
del it[1]
del it[1:2]
except TypeError:
pass
except Exception:
raise AssertionError
def test_iter_allocated_array_dtypes():
# If the dtype of an allocated output has a shape, the shape gets
# tacked onto the end of the result.
it = np.nditer(([1, 3, 20], None), op_dtypes=[None, ('i4', (2,))])
for a, b in it:
b[0] = a - 1
b[1] = a + 1
assert_equal(it.operands[1], [[0, 2], [2, 4], [19, 21]])
# Check the same (less sensitive) thing when `op_axes` with -1 is given.
it = np.nditer(([[1, 3, 20]], None), op_dtypes=[None, ('i4', (2,))],
flags=["reduce_ok"], op_axes=[None, (-1, 0)])
for a, b in it:
b[0] = a - 1
b[1] = a + 1
assert_equal(it.operands[1], [[0, 2], [2, 4], [19, 21]])
# Make sure this works for scalars too
it = np.nditer((10, 2, None), op_dtypes=[None, None, ('i4', (2, 2))])
for a, b, c in it:
c[0, 0] = a - b
c[0, 1] = a + b
c[1, 0] = a * b
c[1, 1] = a / b
assert_equal(it.operands[2], [[8, 12], [20, 5]])
def test_0d_iter():
# Basic test for iteration of 0-d arrays:
i = nditer([2, 3], ['multi_index'], [['readonly']]*2)
assert_equal(i.ndim, 0)
assert_equal(next(i), (2, 3))
assert_equal(i.multi_index, ())
assert_equal(i.iterindex, 0)
assert_raises(StopIteration, next, i)
# test reset:
i.reset()
assert_equal(next(i), (2, 3))
assert_raises(StopIteration, next, i)
# test forcing to 0-d
i = nditer(np.arange(5), ['multi_index'], [['readonly']], op_axes=[()])
assert_equal(i.ndim, 0)
assert_equal(len(i), 1)
i = nditer(np.arange(5), ['multi_index'], [['readonly']],
op_axes=[()], itershape=())
assert_equal(i.ndim, 0)
assert_equal(len(i), 1)
# passing an itershape alone is not enough, the op_axes are also needed
with assert_raises(ValueError):
nditer(np.arange(5), ['multi_index'], [['readonly']], itershape=())
# Test a more complex buffered casting case (same as another test above)
sdt = [('a', 'f4'), ('b', 'i8'), ('c', 'c8', (2, 3)), ('d', 'O')]
a = np.array(0.5, dtype='f4')
i = nditer(a, ['buffered', 'refs_ok'], ['readonly'],
casting='unsafe', op_dtypes=sdt)
vals = next(i)
assert_equal(vals['a'], 0.5)
assert_equal(vals['b'], 0)
assert_equal(vals['c'], [[(0.5)]*3]*2)
assert_equal(vals['d'], 0.5)
def test_object_iter_cleanup():
# see gh-18450
# object arrays can raise a python exception in ufunc inner loops using
# nditer, which should cause iteration to stop & cleanup. There were bugs
# in the nditer cleanup when decref'ing object arrays.
# This test would trigger valgrind "uninitialized read" before the bugfix.
assert_raises(TypeError, lambda: np.zeros((17000, 2), dtype='f4') * None)
# this more explicit code also triggers the invalid access
arr = np.arange(np.BUFSIZE * 10).reshape(10, -1).astype(str)
oarr = arr.astype(object)
oarr[:, -1] = None
assert_raises(TypeError, lambda: np.add(oarr[:, ::-1], arr[:, ::-1]))
# followup: this tests for a bug introduced in the first pass of gh-18450,
# caused by an incorrect fallthrough of the TypeError
class T:
def __bool__(self):
raise TypeError("Ambiguous")
assert_raises(TypeError, np.logical_or.reduce,
np.array([T(), T()], dtype='O'))
def test_object_iter_cleanup_reduce():
# Similar as above, but a complex reduction case that was previously
# missed (see gh-18810).
# The following array is special in that it cannot be flattened:
arr = np.array([[None, 1], [-1, -1], [None, 2], [-1, -1]])[::2]
with pytest.raises(TypeError):
np.sum(arr)
@pytest.mark.parametrize("arr", [
np.ones((8000, 4, 2), dtype=object)[:, ::2, :],
np.ones((8000, 4, 2), dtype=object, order="F")[:, ::2, :],
np.ones((8000, 4, 2), dtype=object)[:, ::2, :].copy("F")])
def test_object_iter_cleanup_large_reduce(arr):
# More complicated calls are possible for large arrays:
out = np.ones(8000, dtype=np.intp)
# force casting with `dtype=object`
res = np.sum(arr, axis=(1, 2), dtype=object, out=out)
assert_array_equal(res, np.full(8000, 4, dtype=object))
def test_iter_too_large():
# The total size of the iterator must not exceed the maximum intp due
# to broadcasting. Dividing by 1024 will keep it small enough to
# give a legal array.
size = np.iinfo(np.intp).max // 1024
arr = np.lib.stride_tricks.as_strided(np.zeros(1), (size,), (0,))
assert_raises(ValueError, nditer, (arr, arr[:, None]))
# test the same for multiindex. That may get more interesting when
# removing 0 dimensional axis is allowed (since an iterator can grow then)
assert_raises(ValueError, nditer,
(arr, arr[:, None]), flags=['multi_index'])
def test_iter_too_large_with_multiindex():
# When a multi index is being tracked, the error is delayed this
# checks the delayed error messages and getting below that by
# removing an axis.
base_size = 2**10
num = 1
while base_size**num < np.iinfo(np.intp).max:
num += 1
shape_template = [1, 1] * num
arrays = []
for i in range(num):
shape = shape_template[:]
shape[i * 2] = 2**10
arrays.append(np.empty(shape))
arrays = tuple(arrays)
# arrays are now too large to be broadcast. The different modes test
# different nditer functionality with or without GIL.
for mode in range(6):
with assert_raises(ValueError):
_multiarray_tests.test_nditer_too_large(arrays, -1, mode)
# but if we do nothing with the nditer, it can be constructed:
_multiarray_tests.test_nditer_too_large(arrays, -1, 7)
# When an axis is removed, things should work again (half the time):
for i in range(num):
for mode in range(6):
# an axis with size 1024 is removed:
_multiarray_tests.test_nditer_too_large(arrays, i*2, mode)
# an axis with size 1 is removed:
with assert_raises(ValueError):
_multiarray_tests.test_nditer_too_large(arrays, i*2 + 1, mode)
def test_writebacks():
a = np.arange(6, dtype='f4')
au = a.byteswap().newbyteorder()
assert_(a.dtype.byteorder != au.dtype.byteorder)
it = nditer(au, [], [['readwrite', 'updateifcopy']],
casting='equiv', op_dtypes=[np.dtype('f4')])
with it:
it.operands[0][:] = 100
assert_equal(au, 100)
# do it again, this time raise an error,
it = nditer(au, [], [['readwrite', 'updateifcopy']],
casting='equiv', op_dtypes=[np.dtype('f4')])
try:
with it:
assert_equal(au.flags.writeable, False)
it.operands[0][:] = 0
raise ValueError('exit context manager on exception')
except:
pass
assert_equal(au, 0)
assert_equal(au.flags.writeable, True)
# cannot reuse i outside context manager
assert_raises(ValueError, getattr, it, 'operands')
it = nditer(au, [], [['readwrite', 'updateifcopy']],
casting='equiv', op_dtypes=[np.dtype('f4')])
with it:
x = it.operands[0]
x[:] = 6
assert_(x.flags.writebackifcopy)
assert_equal(au, 6)
assert_(not x.flags.writebackifcopy)
x[:] = 123 # x.data still valid
assert_equal(au, 6) # but not connected to au
it = nditer(au, [],
[['readwrite', 'updateifcopy']],
casting='equiv', op_dtypes=[np.dtype('f4')])
# reentering works
with it:
with it:
for x in it:
x[...] = 123
it = nditer(au, [],
[['readwrite', 'updateifcopy']],
casting='equiv', op_dtypes=[np.dtype('f4')])
# make sure exiting the inner context manager closes the iterator
with it:
with it:
for x in it:
x[...] = 123
assert_raises(ValueError, getattr, it, 'operands')
# do not crash if original data array is decrefed
it = nditer(au, [],
[['readwrite', 'updateifcopy']],
casting='equiv', op_dtypes=[np.dtype('f4')])
del au
with it:
for x in it:
x[...] = 123
# make sure we cannot reenter the closed iterator
enter = it.__enter__
assert_raises(RuntimeError, enter)
def test_close_equivalent():
''' using a context amanger and using nditer.close are equivalent
'''
def add_close(x, y, out=None):
addop = np.add
it = np.nditer([x, y, out], [],
[['readonly'], ['readonly'], ['writeonly','allocate']])
for (a, b, c) in it:
addop(a, b, out=c)
ret = it.operands[2]
it.close()
return ret
def add_context(x, y, out=None):
addop = np.add
it = np.nditer([x, y, out], [],
[['readonly'], ['readonly'], ['writeonly','allocate']])
with it:
for (a, b, c) in it:
addop(a, b, out=c)
return it.operands[2]
z = add_close(range(5), range(5))
assert_equal(z, range(0, 10, 2))
z = add_context(range(5), range(5))
assert_equal(z, range(0, 10, 2))
def test_close_raises():
it = np.nditer(np.arange(3))
assert_equal (next(it), 0)
it.close()
assert_raises(StopIteration, next, it)
assert_raises(ValueError, getattr, it, 'operands')
def test_close_parameters():
it = np.nditer(np.arange(3))
assert_raises(TypeError, it.close, 1)
@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
def test_warn_noclose():
a = np.arange(6, dtype='f4')
au = a.byteswap().newbyteorder()
with suppress_warnings() as sup:
sup.record(RuntimeWarning)
it = np.nditer(au, [], [['readwrite', 'updateifcopy']],
casting='equiv', op_dtypes=[np.dtype('f4')])
del it
assert len(sup.log) == 1
@pytest.mark.skipif(sys.version_info[:2] == (3, 9) and sys.platform == "win32",
reason="Errors with Python 3.9 on Windows")
@pytest.mark.parametrize(["in_dtype", "buf_dtype"],
[("i", "O"), ("O", "i"), # most simple cases
("i,O", "O,O"), # structured partially only copying O
("O,i", "i,O"), # structured casting to and from O
])
@pytest.mark.parametrize("steps", [1, 2, 3])
def test_partial_iteration_cleanup(in_dtype, buf_dtype, steps):
"""
Checks for reference counting leaks during cleanup. Using explicit
reference counts lead to occasional false positives (at least in parallel
test setups). This test now should still test leaks correctly when
run e.g. with pytest-valgrind or pytest-leaks
"""
value = 2**30 + 1 # just a random value that Python won't intern
arr = np.full(int(np.BUFSIZE * 2.5), value).astype(in_dtype)
it = np.nditer(arr, op_dtypes=[np.dtype(buf_dtype)],
flags=["buffered", "external_loop", "refs_ok"], casting="unsafe")
for step in range(steps):
# The iteration finishes in 3 steps, the first two are partial
next(it)
del it # not necessary, but we test the cleanup
# Repeat the test with `iternext`
it = np.nditer(arr, op_dtypes=[np.dtype(buf_dtype)],
flags=["buffered", "external_loop", "refs_ok"], casting="unsafe")
for step in range(steps):
it.iternext()
del it # not necessary, but we test the cleanup
@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
@pytest.mark.parametrize(["in_dtype", "buf_dtype"],
[("O", "i"), # most simple cases
("O,i", "i,O"), # structured casting to and from O
])
def test_partial_iteration_error(in_dtype, buf_dtype):
value = 123 # relies on python cache (leak-check will still find it)
arr = np.full(int(np.BUFSIZE * 2.5), value).astype(in_dtype)
if in_dtype == "O":
arr[int(np.BUFSIZE * 1.5)] = None
else:
arr[int(np.BUFSIZE * 1.5)]["f0"] = None
count = sys.getrefcount(value)
it = np.nditer(arr, op_dtypes=[np.dtype(buf_dtype)],
flags=["buffered", "external_loop", "refs_ok"], casting="unsafe")
with pytest.raises(TypeError):
# pytest.raises seems to have issues with the error originating
# in the for loop, so manually unravel:
next(it)
next(it) # raises TypeError
# Repeat the test with `iternext` after resetting, the buffers should
# already be cleared from any references, so resetting is sufficient.
it.reset()
with pytest.raises(TypeError):
it.iternext()
it.iternext()
assert count == sys.getrefcount(value)
def test_debug_print(capfd):
"""
Matches the expected output of a debug print with the actual output.
Note that the iterator dump should not be considered stable API,
this test is mainly to ensure the print does not crash.
Currently uses a subprocess to avoid dealing with the C level `printf`s.
"""
# the expected output with all addresses and sizes stripped (they vary
# and/or are platform dependent).
expected = """
------ BEGIN ITERATOR DUMP ------
| Iterator Address:
| ItFlags: BUFFER REDUCE REUSE_REDUCE_LOOPS
| NDim: 2
| NOp: 2
| IterSize: 50
| IterStart: 0
| IterEnd: 50
| IterIndex: 0
| Iterator SizeOf:
| BufferData SizeOf:
| AxisData SizeOf:
|
| Perm: 0 1
| DTypes:
| DTypes: dtype('float64') dtype('int32')
| InitDataPtrs:
| BaseOffsets: 0 0
| Operands:
| Operand DTypes: dtype('int64') dtype('float64')
| OpItFlags:
| Flags[0]: READ CAST ALIGNED
| Flags[1]: READ WRITE CAST ALIGNED REDUCE
|
| BufferData:
| BufferSize: 50
| Size: 5
| BufIterEnd: 5
| REDUCE Pos: 0
| REDUCE OuterSize: 10
| REDUCE OuterDim: 1
| Strides: 8 4
| Ptrs:
| REDUCE Outer Strides: 40 0
| REDUCE Outer Ptrs:
| ReadTransferFn:
| ReadTransferData:
| WriteTransferFn:
| WriteTransferData:
| Buffers:
|
| AxisData[0]:
| Shape: 5
| Index: 0
| Strides: 16 8
| Ptrs:
| AxisData[1]:
| Shape: 10
| Index: 0
| Strides: 80 0
| Ptrs:
------- END ITERATOR DUMP -------
""".strip().splitlines()
arr1 = np.arange(100, dtype=np.int64).reshape(10, 10)[:, ::2]
arr2 = np.arange(5.)
it = np.nditer((arr1, arr2), op_dtypes=["d", "i4"], casting="unsafe",
flags=["reduce_ok", "buffered"],
op_flags=[["readonly"], ["readwrite"]])
it.debug_print()
res = capfd.readouterr().out
res = res.strip().splitlines()
assert len(res) == len(expected)
for res_line, expected_line in zip(res, expected):
# The actual output may have additional pointers listed that are
# stripped from the example output:
assert res_line.startswith(expected_line.strip())
| 127,984 | Python | 38.055539 | 95 | 0.516627 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_defchararray.py |
import numpy as np
from numpy.core.multiarray import _vec_string
from numpy.testing import (
assert_, assert_equal, assert_array_equal, assert_raises,
assert_raises_regex
)
kw_unicode_true = {'unicode': True} # make 2to3 work properly
kw_unicode_false = {'unicode': False}
class TestBasic:
def test_from_object_array(self):
A = np.array([['abc', 2],
['long ', '0123456789']], dtype='O')
B = np.char.array(A)
assert_equal(B.dtype.itemsize, 10)
assert_array_equal(B, [[b'abc', b'2'],
[b'long', b'0123456789']])
def test_from_object_array_unicode(self):
A = np.array([['abc', u'Sigma \u03a3'],
['long ', '0123456789']], dtype='O')
assert_raises(ValueError, np.char.array, (A,))
B = np.char.array(A, **kw_unicode_true)
assert_equal(B.dtype.itemsize, 10 * np.array('a', 'U').dtype.itemsize)
assert_array_equal(B, [['abc', u'Sigma \u03a3'],
['long', '0123456789']])
def test_from_string_array(self):
A = np.array([[b'abc', b'foo'],
[b'long ', b'0123456789']])
assert_equal(A.dtype.type, np.string_)
B = np.char.array(A)
assert_array_equal(B, A)
assert_equal(B.dtype, A.dtype)
assert_equal(B.shape, A.shape)
B[0, 0] = 'changed'
assert_(B[0, 0] != A[0, 0])
C = np.char.asarray(A)
assert_array_equal(C, A)
assert_equal(C.dtype, A.dtype)
C[0, 0] = 'changed again'
assert_(C[0, 0] != B[0, 0])
assert_(C[0, 0] == A[0, 0])
def test_from_unicode_array(self):
A = np.array([['abc', u'Sigma \u03a3'],
['long ', '0123456789']])
assert_equal(A.dtype.type, np.unicode_)
B = np.char.array(A)
assert_array_equal(B, A)
assert_equal(B.dtype, A.dtype)
assert_equal(B.shape, A.shape)
B = np.char.array(A, **kw_unicode_true)
assert_array_equal(B, A)
assert_equal(B.dtype, A.dtype)
assert_equal(B.shape, A.shape)
def fail():
np.char.array(A, **kw_unicode_false)
assert_raises(UnicodeEncodeError, fail)
def test_unicode_upconvert(self):
A = np.char.array(['abc'])
B = np.char.array([u'\u03a3'])
assert_(issubclass((A + B).dtype.type, np.unicode_))
def test_from_string(self):
A = np.char.array(b'abc')
assert_equal(len(A), 1)
assert_equal(len(A[0]), 3)
assert_(issubclass(A.dtype.type, np.string_))
def test_from_unicode(self):
A = np.char.array(u'\u03a3')
assert_equal(len(A), 1)
assert_equal(len(A[0]), 1)
assert_equal(A.itemsize, 4)
assert_(issubclass(A.dtype.type, np.unicode_))
class TestVecString:
def test_non_existent_method(self):
def fail():
_vec_string('a', np.string_, 'bogus')
assert_raises(AttributeError, fail)
def test_non_string_array(self):
def fail():
_vec_string(1, np.string_, 'strip')
assert_raises(TypeError, fail)
def test_invalid_args_tuple(self):
def fail():
_vec_string(['a'], np.string_, 'strip', 1)
assert_raises(TypeError, fail)
def test_invalid_type_descr(self):
def fail():
_vec_string(['a'], 'BOGUS', 'strip')
assert_raises(TypeError, fail)
def test_invalid_function_args(self):
def fail():
_vec_string(['a'], np.string_, 'strip', (1,))
assert_raises(TypeError, fail)
def test_invalid_result_type(self):
def fail():
_vec_string(['a'], np.int_, 'strip')
assert_raises(TypeError, fail)
def test_broadcast_error(self):
def fail():
_vec_string([['abc', 'def']], np.int_, 'find', (['a', 'd', 'j'],))
assert_raises(ValueError, fail)
class TestWhitespace:
def setup_method(self):
self.A = np.array([['abc ', '123 '],
['789 ', 'xyz ']]).view(np.chararray)
self.B = np.array([['abc', '123'],
['789', 'xyz']]).view(np.chararray)
def test1(self):
assert_(np.all(self.A == self.B))
assert_(np.all(self.A >= self.B))
assert_(np.all(self.A <= self.B))
assert_(not np.any(self.A > self.B))
assert_(not np.any(self.A < self.B))
assert_(not np.any(self.A != self.B))
class TestChar:
def setup_method(self):
self.A = np.array('abc1', dtype='c').view(np.chararray)
def test_it(self):
assert_equal(self.A.shape, (4,))
assert_equal(self.A.upper()[:2].tobytes(), b'AB')
class TestComparisons:
def setup_method(self):
self.A = np.array([['abc', '123'],
['789', 'xyz']]).view(np.chararray)
self.B = np.array([['efg', '123 '],
['051', 'tuv']]).view(np.chararray)
def test_not_equal(self):
assert_array_equal((self.A != self.B), [[True, False], [True, True]])
def test_equal(self):
assert_array_equal((self.A == self.B), [[False, True], [False, False]])
def test_greater_equal(self):
assert_array_equal((self.A >= self.B), [[False, True], [True, True]])
def test_less_equal(self):
assert_array_equal((self.A <= self.B), [[True, True], [False, False]])
def test_greater(self):
assert_array_equal((self.A > self.B), [[False, False], [True, True]])
def test_less(self):
assert_array_equal((self.A < self.B), [[True, False], [False, False]])
def test_type(self):
out1 = np.char.equal(self.A, self.B)
out2 = np.char.equal('a', 'a')
assert_(isinstance(out1, np.ndarray))
assert_(isinstance(out2, np.ndarray))
class TestComparisonsMixed1(TestComparisons):
"""Ticket #1276"""
def setup_method(self):
TestComparisons.setup_method(self)
self.B = np.array([['efg', '123 '],
['051', 'tuv']], np.unicode_).view(np.chararray)
class TestComparisonsMixed2(TestComparisons):
"""Ticket #1276"""
def setup_method(self):
TestComparisons.setup_method(self)
self.A = np.array([['abc', '123'],
['789', 'xyz']], np.unicode_).view(np.chararray)
class TestInformation:
def setup_method(self):
self.A = np.array([[' abc ', ''],
['12345', 'MixedCase'],
['123 \t 345 \0 ', 'UPPER']]).view(np.chararray)
self.B = np.array([[u' \u03a3 ', u''],
[u'12345', u'MixedCase'],
[u'123 \t 345 \0 ', u'UPPER']]).view(np.chararray)
def test_len(self):
assert_(issubclass(np.char.str_len(self.A).dtype.type, np.integer))
assert_array_equal(np.char.str_len(self.A), [[5, 0], [5, 9], [12, 5]])
assert_array_equal(np.char.str_len(self.B), [[3, 0], [5, 9], [12, 5]])
def test_count(self):
assert_(issubclass(self.A.count('').dtype.type, np.integer))
assert_array_equal(self.A.count('a'), [[1, 0], [0, 1], [0, 0]])
assert_array_equal(self.A.count('123'), [[0, 0], [1, 0], [1, 0]])
# Python doesn't seem to like counting NULL characters
# assert_array_equal(self.A.count('\0'), [[0, 0], [0, 0], [1, 0]])
assert_array_equal(self.A.count('a', 0, 2), [[1, 0], [0, 0], [0, 0]])
assert_array_equal(self.B.count('a'), [[0, 0], [0, 1], [0, 0]])
assert_array_equal(self.B.count('123'), [[0, 0], [1, 0], [1, 0]])
# assert_array_equal(self.B.count('\0'), [[0, 0], [0, 0], [1, 0]])
def test_endswith(self):
assert_(issubclass(self.A.endswith('').dtype.type, np.bool_))
assert_array_equal(self.A.endswith(' '), [[1, 0], [0, 0], [1, 0]])
assert_array_equal(self.A.endswith('3', 0, 3), [[0, 0], [1, 0], [1, 0]])
def fail():
self.A.endswith('3', 'fdjk')
assert_raises(TypeError, fail)
def test_find(self):
assert_(issubclass(self.A.find('a').dtype.type, np.integer))
assert_array_equal(self.A.find('a'), [[1, -1], [-1, 6], [-1, -1]])
assert_array_equal(self.A.find('3'), [[-1, -1], [2, -1], [2, -1]])
assert_array_equal(self.A.find('a', 0, 2), [[1, -1], [-1, -1], [-1, -1]])
assert_array_equal(self.A.find(['1', 'P']), [[-1, -1], [0, -1], [0, 1]])
def test_index(self):
def fail():
self.A.index('a')
assert_raises(ValueError, fail)
assert_(np.char.index('abcba', 'b') == 1)
assert_(issubclass(np.char.index('abcba', 'b').dtype.type, np.integer))
def test_isalnum(self):
assert_(issubclass(self.A.isalnum().dtype.type, np.bool_))
assert_array_equal(self.A.isalnum(), [[False, False], [True, True], [False, True]])
def test_isalpha(self):
assert_(issubclass(self.A.isalpha().dtype.type, np.bool_))
assert_array_equal(self.A.isalpha(), [[False, False], [False, True], [False, True]])
def test_isdigit(self):
assert_(issubclass(self.A.isdigit().dtype.type, np.bool_))
assert_array_equal(self.A.isdigit(), [[False, False], [True, False], [False, False]])
def test_islower(self):
assert_(issubclass(self.A.islower().dtype.type, np.bool_))
assert_array_equal(self.A.islower(), [[True, False], [False, False], [False, False]])
def test_isspace(self):
assert_(issubclass(self.A.isspace().dtype.type, np.bool_))
assert_array_equal(self.A.isspace(), [[False, False], [False, False], [False, False]])
def test_istitle(self):
assert_(issubclass(self.A.istitle().dtype.type, np.bool_))
assert_array_equal(self.A.istitle(), [[False, False], [False, False], [False, False]])
def test_isupper(self):
assert_(issubclass(self.A.isupper().dtype.type, np.bool_))
assert_array_equal(self.A.isupper(), [[False, False], [False, False], [False, True]])
def test_rfind(self):
assert_(issubclass(self.A.rfind('a').dtype.type, np.integer))
assert_array_equal(self.A.rfind('a'), [[1, -1], [-1, 6], [-1, -1]])
assert_array_equal(self.A.rfind('3'), [[-1, -1], [2, -1], [6, -1]])
assert_array_equal(self.A.rfind('a', 0, 2), [[1, -1], [-1, -1], [-1, -1]])
assert_array_equal(self.A.rfind(['1', 'P']), [[-1, -1], [0, -1], [0, 2]])
def test_rindex(self):
def fail():
self.A.rindex('a')
assert_raises(ValueError, fail)
assert_(np.char.rindex('abcba', 'b') == 3)
assert_(issubclass(np.char.rindex('abcba', 'b').dtype.type, np.integer))
def test_startswith(self):
assert_(issubclass(self.A.startswith('').dtype.type, np.bool_))
assert_array_equal(self.A.startswith(' '), [[1, 0], [0, 0], [0, 0]])
assert_array_equal(self.A.startswith('1', 0, 3), [[0, 0], [1, 0], [1, 0]])
def fail():
self.A.startswith('3', 'fdjk')
assert_raises(TypeError, fail)
class TestMethods:
def setup_method(self):
self.A = np.array([[' abc ', ''],
['12345', 'MixedCase'],
['123 \t 345 \0 ', 'UPPER']],
dtype='S').view(np.chararray)
self.B = np.array([[u' \u03a3 ', u''],
[u'12345', u'MixedCase'],
[u'123 \t 345 \0 ', u'UPPER']]).view(np.chararray)
def test_capitalize(self):
tgt = [[b' abc ', b''],
[b'12345', b'Mixedcase'],
[b'123 \t 345 \0 ', b'Upper']]
assert_(issubclass(self.A.capitalize().dtype.type, np.string_))
assert_array_equal(self.A.capitalize(), tgt)
tgt = [[u' \u03c3 ', ''],
['12345', 'Mixedcase'],
['123 \t 345 \0 ', 'Upper']]
assert_(issubclass(self.B.capitalize().dtype.type, np.unicode_))
assert_array_equal(self.B.capitalize(), tgt)
def test_center(self):
assert_(issubclass(self.A.center(10).dtype.type, np.string_))
C = self.A.center([10, 20])
assert_array_equal(np.char.str_len(C), [[10, 20], [10, 20], [12, 20]])
C = self.A.center(20, b'#')
assert_(np.all(C.startswith(b'#')))
assert_(np.all(C.endswith(b'#')))
C = np.char.center(b'FOO', [[10, 20], [15, 8]])
tgt = [[b' FOO ', b' FOO '],
[b' FOO ', b' FOO ']]
assert_(issubclass(C.dtype.type, np.string_))
assert_array_equal(C, tgt)
def test_decode(self):
A = np.char.array([b'\\u03a3'])
assert_(A.decode('unicode-escape')[0] == '\u03a3')
def test_encode(self):
B = self.B.encode('unicode_escape')
assert_(B[0][0] == str(' \\u03a3 ').encode('latin1'))
def test_expandtabs(self):
T = self.A.expandtabs()
assert_(T[2, 0] == b'123 345 \0')
def test_join(self):
# NOTE: list(b'123') == [49, 50, 51]
# so that b','.join(b'123') results to an error on Py3
A0 = self.A.decode('ascii')
A = np.char.join([',', '#'], A0)
assert_(issubclass(A.dtype.type, np.unicode_))
tgt = np.array([[' ,a,b,c, ', ''],
['1,2,3,4,5', 'M#i#x#e#d#C#a#s#e'],
['1,2,3, ,\t, ,3,4,5, ,\x00, ', 'U#P#P#E#R']])
assert_array_equal(np.char.join([',', '#'], A0), tgt)
def test_ljust(self):
assert_(issubclass(self.A.ljust(10).dtype.type, np.string_))
C = self.A.ljust([10, 20])
assert_array_equal(np.char.str_len(C), [[10, 20], [10, 20], [12, 20]])
C = self.A.ljust(20, b'#')
assert_array_equal(C.startswith(b'#'), [
[False, True], [False, False], [False, False]])
assert_(np.all(C.endswith(b'#')))
C = np.char.ljust(b'FOO', [[10, 20], [15, 8]])
tgt = [[b'FOO ', b'FOO '],
[b'FOO ', b'FOO ']]
assert_(issubclass(C.dtype.type, np.string_))
assert_array_equal(C, tgt)
def test_lower(self):
tgt = [[b' abc ', b''],
[b'12345', b'mixedcase'],
[b'123 \t 345 \0 ', b'upper']]
assert_(issubclass(self.A.lower().dtype.type, np.string_))
assert_array_equal(self.A.lower(), tgt)
tgt = [[u' \u03c3 ', u''],
[u'12345', u'mixedcase'],
[u'123 \t 345 \0 ', u'upper']]
assert_(issubclass(self.B.lower().dtype.type, np.unicode_))
assert_array_equal(self.B.lower(), tgt)
def test_lstrip(self):
tgt = [[b'abc ', b''],
[b'12345', b'MixedCase'],
[b'123 \t 345 \0 ', b'UPPER']]
assert_(issubclass(self.A.lstrip().dtype.type, np.string_))
assert_array_equal(self.A.lstrip(), tgt)
tgt = [[b' abc', b''],
[b'2345', b'ixedCase'],
[b'23 \t 345 \x00', b'UPPER']]
assert_array_equal(self.A.lstrip([b'1', b'M']), tgt)
tgt = [[u'\u03a3 ', ''],
['12345', 'MixedCase'],
['123 \t 345 \0 ', 'UPPER']]
assert_(issubclass(self.B.lstrip().dtype.type, np.unicode_))
assert_array_equal(self.B.lstrip(), tgt)
def test_partition(self):
P = self.A.partition([b'3', b'M'])
tgt = [[(b' abc ', b'', b''), (b'', b'', b'')],
[(b'12', b'3', b'45'), (b'', b'M', b'ixedCase')],
[(b'12', b'3', b' \t 345 \0 '), (b'UPPER', b'', b'')]]
assert_(issubclass(P.dtype.type, np.string_))
assert_array_equal(P, tgt)
def test_replace(self):
R = self.A.replace([b'3', b'a'],
[b'##########', b'@'])
tgt = [[b' abc ', b''],
[b'12##########45', b'MixedC@se'],
[b'12########## \t ##########45 \x00', b'UPPER']]
assert_(issubclass(R.dtype.type, np.string_))
assert_array_equal(R, tgt)
def test_rjust(self):
assert_(issubclass(self.A.rjust(10).dtype.type, np.string_))
C = self.A.rjust([10, 20])
assert_array_equal(np.char.str_len(C), [[10, 20], [10, 20], [12, 20]])
C = self.A.rjust(20, b'#')
assert_(np.all(C.startswith(b'#')))
assert_array_equal(C.endswith(b'#'),
[[False, True], [False, False], [False, False]])
C = np.char.rjust(b'FOO', [[10, 20], [15, 8]])
tgt = [[b' FOO', b' FOO'],
[b' FOO', b' FOO']]
assert_(issubclass(C.dtype.type, np.string_))
assert_array_equal(C, tgt)
def test_rpartition(self):
P = self.A.rpartition([b'3', b'M'])
tgt = [[(b'', b'', b' abc '), (b'', b'', b'')],
[(b'12', b'3', b'45'), (b'', b'M', b'ixedCase')],
[(b'123 \t ', b'3', b'45 \0 '), (b'', b'', b'UPPER')]]
assert_(issubclass(P.dtype.type, np.string_))
assert_array_equal(P, tgt)
def test_rsplit(self):
A = self.A.rsplit(b'3')
tgt = [[[b' abc '], [b'']],
[[b'12', b'45'], [b'MixedCase']],
[[b'12', b' \t ', b'45 \x00 '], [b'UPPER']]]
assert_(issubclass(A.dtype.type, np.object_))
assert_equal(A.tolist(), tgt)
def test_rstrip(self):
assert_(issubclass(self.A.rstrip().dtype.type, np.string_))
tgt = [[b' abc', b''],
[b'12345', b'MixedCase'],
[b'123 \t 345', b'UPPER']]
assert_array_equal(self.A.rstrip(), tgt)
tgt = [[b' abc ', b''],
[b'1234', b'MixedCase'],
[b'123 \t 345 \x00', b'UPP']
]
assert_array_equal(self.A.rstrip([b'5', b'ER']), tgt)
tgt = [[u' \u03a3', ''],
['12345', 'MixedCase'],
['123 \t 345', 'UPPER']]
assert_(issubclass(self.B.rstrip().dtype.type, np.unicode_))
assert_array_equal(self.B.rstrip(), tgt)
def test_strip(self):
tgt = [[b'abc', b''],
[b'12345', b'MixedCase'],
[b'123 \t 345', b'UPPER']]
assert_(issubclass(self.A.strip().dtype.type, np.string_))
assert_array_equal(self.A.strip(), tgt)
tgt = [[b' abc ', b''],
[b'234', b'ixedCas'],
[b'23 \t 345 \x00', b'UPP']]
assert_array_equal(self.A.strip([b'15', b'EReM']), tgt)
tgt = [[u'\u03a3', ''],
['12345', 'MixedCase'],
['123 \t 345', 'UPPER']]
assert_(issubclass(self.B.strip().dtype.type, np.unicode_))
assert_array_equal(self.B.strip(), tgt)
def test_split(self):
A = self.A.split(b'3')
tgt = [
[[b' abc '], [b'']],
[[b'12', b'45'], [b'MixedCase']],
[[b'12', b' \t ', b'45 \x00 '], [b'UPPER']]]
assert_(issubclass(A.dtype.type, np.object_))
assert_equal(A.tolist(), tgt)
def test_splitlines(self):
A = np.char.array(['abc\nfds\nwer']).splitlines()
assert_(issubclass(A.dtype.type, np.object_))
assert_(A.shape == (1,))
assert_(len(A[0]) == 3)
def test_swapcase(self):
tgt = [[b' ABC ', b''],
[b'12345', b'mIXEDcASE'],
[b'123 \t 345 \0 ', b'upper']]
assert_(issubclass(self.A.swapcase().dtype.type, np.string_))
assert_array_equal(self.A.swapcase(), tgt)
tgt = [[u' \u03c3 ', u''],
[u'12345', u'mIXEDcASE'],
[u'123 \t 345 \0 ', u'upper']]
assert_(issubclass(self.B.swapcase().dtype.type, np.unicode_))
assert_array_equal(self.B.swapcase(), tgt)
def test_title(self):
tgt = [[b' Abc ', b''],
[b'12345', b'Mixedcase'],
[b'123 \t 345 \0 ', b'Upper']]
assert_(issubclass(self.A.title().dtype.type, np.string_))
assert_array_equal(self.A.title(), tgt)
tgt = [[u' \u03a3 ', u''],
[u'12345', u'Mixedcase'],
[u'123 \t 345 \0 ', u'Upper']]
assert_(issubclass(self.B.title().dtype.type, np.unicode_))
assert_array_equal(self.B.title(), tgt)
def test_upper(self):
tgt = [[b' ABC ', b''],
[b'12345', b'MIXEDCASE'],
[b'123 \t 345 \0 ', b'UPPER']]
assert_(issubclass(self.A.upper().dtype.type, np.string_))
assert_array_equal(self.A.upper(), tgt)
tgt = [[u' \u03a3 ', u''],
[u'12345', u'MIXEDCASE'],
[u'123 \t 345 \0 ', u'UPPER']]
assert_(issubclass(self.B.upper().dtype.type, np.unicode_))
assert_array_equal(self.B.upper(), tgt)
def test_isnumeric(self):
def fail():
self.A.isnumeric()
assert_raises(TypeError, fail)
assert_(issubclass(self.B.isnumeric().dtype.type, np.bool_))
assert_array_equal(self.B.isnumeric(), [
[False, False], [True, False], [False, False]])
def test_isdecimal(self):
def fail():
self.A.isdecimal()
assert_raises(TypeError, fail)
assert_(issubclass(self.B.isdecimal().dtype.type, np.bool_))
assert_array_equal(self.B.isdecimal(), [
[False, False], [True, False], [False, False]])
class TestOperations:
def setup_method(self):
self.A = np.array([['abc', '123'],
['789', 'xyz']]).view(np.chararray)
self.B = np.array([['efg', '456'],
['051', 'tuv']]).view(np.chararray)
def test_add(self):
AB = np.array([['abcefg', '123456'],
['789051', 'xyztuv']]).view(np.chararray)
assert_array_equal(AB, (self.A + self.B))
assert_(len((self.A + self.B)[0][0]) == 6)
def test_radd(self):
QA = np.array([['qabc', 'q123'],
['q789', 'qxyz']]).view(np.chararray)
assert_array_equal(QA, ('q' + self.A))
def test_mul(self):
A = self.A
for r in (2, 3, 5, 7, 197):
Ar = np.array([[A[0, 0]*r, A[0, 1]*r],
[A[1, 0]*r, A[1, 1]*r]]).view(np.chararray)
assert_array_equal(Ar, (self.A * r))
for ob in [object(), 'qrs']:
with assert_raises_regex(ValueError,
'Can only multiply by integers'):
A*ob
def test_rmul(self):
A = self.A
for r in (2, 3, 5, 7, 197):
Ar = np.array([[A[0, 0]*r, A[0, 1]*r],
[A[1, 0]*r, A[1, 1]*r]]).view(np.chararray)
assert_array_equal(Ar, (r * self.A))
for ob in [object(), 'qrs']:
with assert_raises_regex(ValueError,
'Can only multiply by integers'):
ob * A
def test_mod(self):
"""Ticket #856"""
F = np.array([['%d', '%f'], ['%s', '%r']]).view(np.chararray)
C = np.array([[3, 7], [19, 1]])
FC = np.array([['3', '7.000000'],
['19', '1']]).view(np.chararray)
assert_array_equal(FC, F % C)
A = np.array([['%.3f', '%d'], ['%s', '%r']]).view(np.chararray)
A1 = np.array([['1.000', '1'], ['1', '1']]).view(np.chararray)
assert_array_equal(A1, (A % 1))
A2 = np.array([['1.000', '2'], ['3', '4']]).view(np.chararray)
assert_array_equal(A2, (A % [[1, 2], [3, 4]]))
def test_rmod(self):
assert_(("%s" % self.A) == str(self.A))
assert_(("%r" % self.A) == repr(self.A))
for ob in [42, object()]:
with assert_raises_regex(
TypeError, "unsupported operand type.* and 'chararray'"):
ob % self.A
def test_slice(self):
"""Regression test for https://github.com/numpy/numpy/issues/5982"""
arr = np.array([['abc ', 'def '], ['geh ', 'ijk ']],
dtype='S4').view(np.chararray)
sl1 = arr[:]
assert_array_equal(sl1, arr)
assert_(sl1.base is arr)
assert_(sl1.base.base is arr.base)
sl2 = arr[:, :]
assert_array_equal(sl2, arr)
assert_(sl2.base is arr)
assert_(sl2.base.base is arr.base)
assert_(arr[0, 0] == b'abc')
def test_empty_indexing():
"""Regression test for ticket 1948."""
# Check that indexing a chararray with an empty list/array returns an
# empty chararray instead of a chararray with a single empty string in it.
s = np.chararray((4,))
assert_(s[[]].size == 0)
| 24,653 | Python | 35.578635 | 94 | 0.493855 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_indexerrors.py | import numpy as np
from numpy.testing import (
assert_raises, assert_raises_regex,
)
class TestIndexErrors:
'''Tests to exercise indexerrors not covered by other tests.'''
def test_arraytypes_fasttake(self):
'take from a 0-length dimension'
x = np.empty((2, 3, 0, 4))
assert_raises(IndexError, x.take, [0], axis=2)
assert_raises(IndexError, x.take, [1], axis=2)
assert_raises(IndexError, x.take, [0], axis=2, mode='wrap')
assert_raises(IndexError, x.take, [0], axis=2, mode='clip')
def test_take_from_object(self):
# Check exception taking from object array
d = np.zeros(5, dtype=object)
assert_raises(IndexError, d.take, [6])
# Check exception taking from 0-d array
d = np.zeros((5, 0), dtype=object)
assert_raises(IndexError, d.take, [1], axis=1)
assert_raises(IndexError, d.take, [0], axis=1)
assert_raises(IndexError, d.take, [0])
assert_raises(IndexError, d.take, [0], mode='wrap')
assert_raises(IndexError, d.take, [0], mode='clip')
def test_multiindex_exceptions(self):
a = np.empty(5, dtype=object)
assert_raises(IndexError, a.item, 20)
a = np.empty((5, 0), dtype=object)
assert_raises(IndexError, a.item, (0, 0))
a = np.empty(5, dtype=object)
assert_raises(IndexError, a.itemset, 20, 0)
a = np.empty((5, 0), dtype=object)
assert_raises(IndexError, a.itemset, (0, 0), 0)
def test_put_exceptions(self):
a = np.zeros((5, 5))
assert_raises(IndexError, a.put, 100, 0)
a = np.zeros((5, 5), dtype=object)
assert_raises(IndexError, a.put, 100, 0)
a = np.zeros((5, 5, 0))
assert_raises(IndexError, a.put, 100, 0)
a = np.zeros((5, 5, 0), dtype=object)
assert_raises(IndexError, a.put, 100, 0)
def test_iterators_exceptions(self):
"cases in iterators.c"
def assign(obj, ind, val):
obj[ind] = val
a = np.zeros([1, 2, 3])
assert_raises(IndexError, lambda: a[0, 5, None, 2])
assert_raises(IndexError, lambda: a[0, 5, 0, 2])
assert_raises(IndexError, lambda: assign(a, (0, 5, None, 2), 1))
assert_raises(IndexError, lambda: assign(a, (0, 5, 0, 2), 1))
a = np.zeros([1, 0, 3])
assert_raises(IndexError, lambda: a[0, 0, None, 2])
assert_raises(IndexError, lambda: assign(a, (0, 0, None, 2), 1))
a = np.zeros([1, 2, 3])
assert_raises(IndexError, lambda: a.flat[10])
assert_raises(IndexError, lambda: assign(a.flat, 10, 5))
a = np.zeros([1, 0, 3])
assert_raises(IndexError, lambda: a.flat[10])
assert_raises(IndexError, lambda: assign(a.flat, 10, 5))
a = np.zeros([1, 2, 3])
assert_raises(IndexError, lambda: a.flat[np.array(10)])
assert_raises(IndexError, lambda: assign(a.flat, np.array(10), 5))
a = np.zeros([1, 0, 3])
assert_raises(IndexError, lambda: a.flat[np.array(10)])
assert_raises(IndexError, lambda: assign(a.flat, np.array(10), 5))
a = np.zeros([1, 2, 3])
assert_raises(IndexError, lambda: a.flat[np.array([10])])
assert_raises(IndexError, lambda: assign(a.flat, np.array([10]), 5))
a = np.zeros([1, 0, 3])
assert_raises(IndexError, lambda: a.flat[np.array([10])])
assert_raises(IndexError, lambda: assign(a.flat, np.array([10]), 5))
def test_mapping(self):
"cases from mapping.c"
def assign(obj, ind, val):
obj[ind] = val
a = np.zeros((0, 10))
assert_raises(IndexError, lambda: a[12])
a = np.zeros((3, 5))
assert_raises(IndexError, lambda: a[(10, 20)])
assert_raises(IndexError, lambda: assign(a, (10, 20), 1))
a = np.zeros((3, 0))
assert_raises(IndexError, lambda: a[(1, 0)])
assert_raises(IndexError, lambda: assign(a, (1, 0), 1))
a = np.zeros((10,))
assert_raises(IndexError, lambda: assign(a, 10, 1))
a = np.zeros((0,))
assert_raises(IndexError, lambda: assign(a, 10, 1))
a = np.zeros((3, 5))
assert_raises(IndexError, lambda: a[(1, [1, 20])])
assert_raises(IndexError, lambda: assign(a, (1, [1, 20]), 1))
a = np.zeros((3, 0))
assert_raises(IndexError, lambda: a[(1, [0, 1])])
assert_raises(IndexError, lambda: assign(a, (1, [0, 1]), 1))
def test_mapping_error_message(self):
a = np.zeros((3, 5))
index = (1, 2, 3, 4, 5)
assert_raises_regex(
IndexError,
"too many indices for array: "
"array is 2-dimensional, but 5 were indexed",
lambda: a[index])
def test_methods(self):
"cases from methods.c"
a = np.zeros((3, 3))
assert_raises(IndexError, lambda: a.item(100))
assert_raises(IndexError, lambda: a.itemset(100, 1))
a = np.zeros((0, 3))
assert_raises(IndexError, lambda: a.item(100))
assert_raises(IndexError, lambda: a.itemset(100, 1))
| 5,130 | Python | 37.291044 | 76 | 0.566667 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_longdouble.py | import warnings
import pytest
import numpy as np
from numpy.testing import (
assert_, assert_equal, assert_raises, assert_warns, assert_array_equal,
temppath,
)
from numpy.core.tests._locales import CommaDecimalPointLocale
LD_INFO = np.finfo(np.longdouble)
longdouble_longer_than_double = (LD_INFO.eps < np.finfo(np.double).eps)
_o = 1 + LD_INFO.eps
string_to_longdouble_inaccurate = (_o != np.longdouble(repr(_o)))
del _o
def test_scalar_extraction():
"""Confirm that extracting a value doesn't convert to python float"""
o = 1 + LD_INFO.eps
a = np.array([o, o, o])
assert_equal(a[1], o)
# Conversions string -> long double
# 0.1 not exactly representable in base 2 floating point.
repr_precision = len(repr(np.longdouble(0.1)))
# +2 from macro block starting around line 842 in scalartypes.c.src.
@pytest.mark.skipif(LD_INFO.precision + 2 >= repr_precision,
reason="repr precision not enough to show eps")
def test_repr_roundtrip():
# We will only see eps in repr if within printing precision.
o = 1 + LD_INFO.eps
assert_equal(np.longdouble(repr(o)), o, "repr was %s" % repr(o))
@pytest.mark.skipif(string_to_longdouble_inaccurate, reason="Need strtold_l")
def test_repr_roundtrip_bytes():
o = 1 + LD_INFO.eps
assert_equal(np.longdouble(repr(o).encode("ascii")), o)
@pytest.mark.skipif(string_to_longdouble_inaccurate, reason="Need strtold_l")
@pytest.mark.parametrize("strtype", (np.str_, np.bytes_, str, bytes))
def test_array_and_stringlike_roundtrip(strtype):
"""
Test that string representations of long-double roundtrip both
for array casting and scalar coercion, see also gh-15608.
"""
o = 1 + LD_INFO.eps
if strtype in (np.bytes_, bytes):
o_str = strtype(repr(o).encode("ascii"))
else:
o_str = strtype(repr(o))
# Test that `o` is correctly coerced from the string-like
assert o == np.longdouble(o_str)
# Test that arrays also roundtrip correctly:
o_strarr = np.asarray([o] * 3, dtype=strtype)
assert (o == o_strarr.astype(np.longdouble)).all()
# And array coercion and casting to string give the same as scalar repr:
assert (o_strarr == o_str).all()
assert (np.asarray([o] * 3).astype(strtype) == o_str).all()
def test_bogus_string():
assert_raises(ValueError, np.longdouble, "spam")
assert_raises(ValueError, np.longdouble, "1.0 flub")
@pytest.mark.skipif(string_to_longdouble_inaccurate, reason="Need strtold_l")
def test_fromstring():
o = 1 + LD_INFO.eps
s = (" " + repr(o))*5
a = np.array([o]*5)
assert_equal(np.fromstring(s, sep=" ", dtype=np.longdouble), a,
err_msg="reading '%s'" % s)
def test_fromstring_complex():
for ctype in ["complex", "cdouble", "cfloat"]:
# Check spacing between separator
assert_equal(np.fromstring("1, 2 , 3 ,4", sep=",", dtype=ctype),
np.array([1., 2., 3., 4.]))
# Real component not specified
assert_equal(np.fromstring("1j, -2j, 3j, 4e1j", sep=",", dtype=ctype),
np.array([1.j, -2.j, 3.j, 40.j]))
# Both components specified
assert_equal(np.fromstring("1+1j,2-2j, -3+3j, -4e1+4j", sep=",", dtype=ctype),
np.array([1. + 1.j, 2. - 2.j, - 3. + 3.j, - 40. + 4j]))
# Spaces at wrong places
with assert_warns(DeprecationWarning):
assert_equal(np.fromstring("1+2 j,3", dtype=ctype, sep=","),
np.array([1.]))
with assert_warns(DeprecationWarning):
assert_equal(np.fromstring("1+ 2j,3", dtype=ctype, sep=","),
np.array([1.]))
with assert_warns(DeprecationWarning):
assert_equal(np.fromstring("1 +2j,3", dtype=ctype, sep=","),
np.array([1.]))
with assert_warns(DeprecationWarning):
assert_equal(np.fromstring("1+j", dtype=ctype, sep=","),
np.array([1.]))
with assert_warns(DeprecationWarning):
assert_equal(np.fromstring("1+", dtype=ctype, sep=","),
np.array([1.]))
with assert_warns(DeprecationWarning):
assert_equal(np.fromstring("1j+1", dtype=ctype, sep=","),
np.array([1j]))
def test_fromstring_bogus():
with assert_warns(DeprecationWarning):
assert_equal(np.fromstring("1. 2. 3. flop 4.", dtype=float, sep=" "),
np.array([1., 2., 3.]))
def test_fromstring_empty():
with assert_warns(DeprecationWarning):
assert_equal(np.fromstring("xxxxx", sep="x"),
np.array([]))
def test_fromstring_missing():
with assert_warns(DeprecationWarning):
assert_equal(np.fromstring("1xx3x4x5x6", sep="x"),
np.array([1]))
class TestFileBased:
ldbl = 1 + LD_INFO.eps
tgt = np.array([ldbl]*5)
out = ''.join([repr(t) + '\n' for t in tgt])
def test_fromfile_bogus(self):
with temppath() as path:
with open(path, 'wt') as f:
f.write("1. 2. 3. flop 4.\n")
with assert_warns(DeprecationWarning):
res = np.fromfile(path, dtype=float, sep=" ")
assert_equal(res, np.array([1., 2., 3.]))
def test_fromfile_complex(self):
for ctype in ["complex", "cdouble", "cfloat"]:
# Check spacing between separator and only real component specified
with temppath() as path:
with open(path, 'wt') as f:
f.write("1, 2 , 3 ,4\n")
res = np.fromfile(path, dtype=ctype, sep=",")
assert_equal(res, np.array([1., 2., 3., 4.]))
# Real component not specified
with temppath() as path:
with open(path, 'wt') as f:
f.write("1j, -2j, 3j, 4e1j\n")
res = np.fromfile(path, dtype=ctype, sep=",")
assert_equal(res, np.array([1.j, -2.j, 3.j, 40.j]))
# Both components specified
with temppath() as path:
with open(path, 'wt') as f:
f.write("1+1j,2-2j, -3+3j, -4e1+4j\n")
res = np.fromfile(path, dtype=ctype, sep=",")
assert_equal(res, np.array([1. + 1.j, 2. - 2.j, - 3. + 3.j, - 40. + 4j]))
# Spaces at wrong places
with temppath() as path:
with open(path, 'wt') as f:
f.write("1+2 j,3\n")
with assert_warns(DeprecationWarning):
res = np.fromfile(path, dtype=ctype, sep=",")
assert_equal(res, np.array([1.]))
# Spaces at wrong places
with temppath() as path:
with open(path, 'wt') as f:
f.write("1+ 2j,3\n")
with assert_warns(DeprecationWarning):
res = np.fromfile(path, dtype=ctype, sep=",")
assert_equal(res, np.array([1.]))
# Spaces at wrong places
with temppath() as path:
with open(path, 'wt') as f:
f.write("1 +2j,3\n")
with assert_warns(DeprecationWarning):
res = np.fromfile(path, dtype=ctype, sep=",")
assert_equal(res, np.array([1.]))
# Spaces at wrong places
with temppath() as path:
with open(path, 'wt') as f:
f.write("1+j\n")
with assert_warns(DeprecationWarning):
res = np.fromfile(path, dtype=ctype, sep=",")
assert_equal(res, np.array([1.]))
# Spaces at wrong places
with temppath() as path:
with open(path, 'wt') as f:
f.write("1+\n")
with assert_warns(DeprecationWarning):
res = np.fromfile(path, dtype=ctype, sep=",")
assert_equal(res, np.array([1.]))
# Spaces at wrong places
with temppath() as path:
with open(path, 'wt') as f:
f.write("1j+1\n")
with assert_warns(DeprecationWarning):
res = np.fromfile(path, dtype=ctype, sep=",")
assert_equal(res, np.array([1.j]))
@pytest.mark.skipif(string_to_longdouble_inaccurate,
reason="Need strtold_l")
def test_fromfile(self):
with temppath() as path:
with open(path, 'wt') as f:
f.write(self.out)
res = np.fromfile(path, dtype=np.longdouble, sep="\n")
assert_equal(res, self.tgt)
@pytest.mark.skipif(string_to_longdouble_inaccurate,
reason="Need strtold_l")
def test_genfromtxt(self):
with temppath() as path:
with open(path, 'wt') as f:
f.write(self.out)
res = np.genfromtxt(path, dtype=np.longdouble)
assert_equal(res, self.tgt)
@pytest.mark.skipif(string_to_longdouble_inaccurate,
reason="Need strtold_l")
def test_loadtxt(self):
with temppath() as path:
with open(path, 'wt') as f:
f.write(self.out)
res = np.loadtxt(path, dtype=np.longdouble)
assert_equal(res, self.tgt)
@pytest.mark.skipif(string_to_longdouble_inaccurate,
reason="Need strtold_l")
def test_tofile_roundtrip(self):
with temppath() as path:
self.tgt.tofile(path, sep=" ")
res = np.fromfile(path, dtype=np.longdouble, sep=" ")
assert_equal(res, self.tgt)
# Conversions long double -> string
def test_repr_exact():
o = 1 + LD_INFO.eps
assert_(repr(o) != '1')
@pytest.mark.skipif(longdouble_longer_than_double, reason="BUG #2376")
@pytest.mark.skipif(string_to_longdouble_inaccurate,
reason="Need strtold_l")
def test_format():
o = 1 + LD_INFO.eps
assert_("{0:.40g}".format(o) != '1')
@pytest.mark.skipif(longdouble_longer_than_double, reason="BUG #2376")
@pytest.mark.skipif(string_to_longdouble_inaccurate,
reason="Need strtold_l")
def test_percent():
o = 1 + LD_INFO.eps
assert_("%.40g" % o != '1')
@pytest.mark.skipif(longdouble_longer_than_double,
reason="array repr problem")
@pytest.mark.skipif(string_to_longdouble_inaccurate,
reason="Need strtold_l")
def test_array_repr():
o = 1 + LD_INFO.eps
a = np.array([o])
b = np.array([1], dtype=np.longdouble)
if not np.all(a != b):
raise ValueError("precision loss creating arrays")
assert_(repr(a) != repr(b))
#
# Locale tests: scalar types formatting should be independent of the locale
#
class TestCommaDecimalPointLocale(CommaDecimalPointLocale):
def test_repr_roundtrip_foreign(self):
o = 1.5
assert_equal(o, np.longdouble(repr(o)))
def test_fromstring_foreign_repr(self):
f = 1.234
a = np.fromstring(repr(f), dtype=float, sep=" ")
assert_equal(a[0], f)
def test_fromstring_best_effort_float(self):
with assert_warns(DeprecationWarning):
assert_equal(np.fromstring("1,234", dtype=float, sep=" "),
np.array([1.]))
def test_fromstring_best_effort(self):
with assert_warns(DeprecationWarning):
assert_equal(np.fromstring("1,234", dtype=np.longdouble, sep=" "),
np.array([1.]))
def test_fromstring_foreign(self):
s = "1.234"
a = np.fromstring(s, dtype=np.longdouble, sep=" ")
assert_equal(a[0], np.longdouble(s))
def test_fromstring_foreign_sep(self):
a = np.array([1, 2, 3, 4])
b = np.fromstring("1,2,3,4,", dtype=np.longdouble, sep=",")
assert_array_equal(a, b)
def test_fromstring_foreign_value(self):
with assert_warns(DeprecationWarning):
b = np.fromstring("1,234", dtype=np.longdouble, sep=" ")
assert_array_equal(b[0], 1)
@pytest.mark.parametrize("int_val", [
# cases discussed in gh-10723
# and gh-9968
2 ** 1024, 0])
def test_longdouble_from_int(int_val):
# for issue gh-9968
str_val = str(int_val)
# we'll expect a RuntimeWarning on platforms
# with np.longdouble equivalent to np.double
# for large integer input
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', RuntimeWarning)
# can be inf==inf on some platforms
assert np.longdouble(int_val) == np.longdouble(str_val)
# we can't directly compare the int and
# max longdouble value on all platforms
if np.allclose(np.finfo(np.longdouble).max,
np.finfo(np.double).max) and w:
assert w[0].category is RuntimeWarning
@pytest.mark.parametrize("bool_val", [
True, False])
def test_longdouble_from_bool(bool_val):
assert np.longdouble(bool_val) == np.longdouble(int(bool_val))
| 13,042 | Python | 34.156334 | 87 | 0.564637 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_mem_policy.py | import asyncio
import gc
import os
import pytest
import numpy as np
import threading
import warnings
from numpy.testing import extbuild, assert_warns
import sys
@pytest.fixture
def get_module(tmp_path):
""" Add a memory policy that returns a false pointer 64 bytes into the
actual allocation, and fill the prefix with some text. Then check at each
memory manipulation that the prefix exists, to make sure all alloc/realloc/
free/calloc go via the functions here.
"""
if sys.platform.startswith('cygwin'):
pytest.skip('link fails on cygwin')
functions = [
("get_default_policy", "METH_NOARGS", """
Py_INCREF(PyDataMem_DefaultHandler);
return PyDataMem_DefaultHandler;
"""),
("set_secret_data_policy", "METH_NOARGS", """
PyObject *secret_data =
PyCapsule_New(&secret_data_handler, "mem_handler", NULL);
if (secret_data == NULL) {
return NULL;
}
PyObject *old = PyDataMem_SetHandler(secret_data);
Py_DECREF(secret_data);
return old;
"""),
("set_old_policy", "METH_O", """
PyObject *old;
if (args != NULL && PyCapsule_CheckExact(args)) {
old = PyDataMem_SetHandler(args);
}
else {
old = PyDataMem_SetHandler(NULL);
}
return old;
"""),
("get_array", "METH_NOARGS", """
char *buf = (char *)malloc(20);
npy_intp dims[1];
dims[0] = 20;
PyArray_Descr *descr = PyArray_DescrNewFromType(NPY_UINT8);
return PyArray_NewFromDescr(&PyArray_Type, descr, 1, dims, NULL,
buf, NPY_ARRAY_WRITEABLE, NULL);
"""),
("set_own", "METH_O", """
if (!PyArray_Check(args)) {
PyErr_SetString(PyExc_ValueError,
"need an ndarray");
return NULL;
}
PyArray_ENABLEFLAGS((PyArrayObject*)args, NPY_ARRAY_OWNDATA);
// Maybe try this too?
// PyArray_BASE(PyArrayObject *)args) = NULL;
Py_RETURN_NONE;
"""),
("get_array_with_base", "METH_NOARGS", """
char *buf = (char *)malloc(20);
npy_intp dims[1];
dims[0] = 20;
PyArray_Descr *descr = PyArray_DescrNewFromType(NPY_UINT8);
PyObject *arr = PyArray_NewFromDescr(&PyArray_Type, descr, 1, dims,
NULL, buf,
NPY_ARRAY_WRITEABLE, NULL);
if (arr == NULL) return NULL;
PyObject *obj = PyCapsule_New(buf, "buf capsule",
(PyCapsule_Destructor)&warn_on_free);
if (obj == NULL) {
Py_DECREF(arr);
return NULL;
}
if (PyArray_SetBaseObject((PyArrayObject *)arr, obj) < 0) {
Py_DECREF(arr);
Py_DECREF(obj);
return NULL;
}
return arr;
"""),
]
prologue = '''
#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
#include <numpy/arrayobject.h>
/*
* This struct allows the dynamic configuration of the allocator funcs
* of the `secret_data_allocator`. It is provided here for
* demonstration purposes, as a valid `ctx` use-case scenario.
*/
typedef struct {
void *(*malloc)(size_t);
void *(*calloc)(size_t, size_t);
void *(*realloc)(void *, size_t);
void (*free)(void *);
} SecretDataAllocatorFuncs;
NPY_NO_EXPORT void *
shift_alloc(void *ctx, size_t sz) {
SecretDataAllocatorFuncs *funcs = (SecretDataAllocatorFuncs *)ctx;
char *real = (char *)funcs->malloc(sz + 64);
if (real == NULL) {
return NULL;
}
snprintf(real, 64, "originally allocated %ld", (unsigned long)sz);
return (void *)(real + 64);
}
NPY_NO_EXPORT void *
shift_zero(void *ctx, size_t sz, size_t cnt) {
SecretDataAllocatorFuncs *funcs = (SecretDataAllocatorFuncs *)ctx;
char *real = (char *)funcs->calloc(sz + 64, cnt);
if (real == NULL) {
return NULL;
}
snprintf(real, 64, "originally allocated %ld via zero",
(unsigned long)sz);
return (void *)(real + 64);
}
NPY_NO_EXPORT void
shift_free(void *ctx, void * p, npy_uintp sz) {
SecretDataAllocatorFuncs *funcs = (SecretDataAllocatorFuncs *)ctx;
if (p == NULL) {
return ;
}
char *real = (char *)p - 64;
if (strncmp(real, "originally allocated", 20) != 0) {
fprintf(stdout, "uh-oh, unmatched shift_free, "
"no appropriate prefix\\n");
/* Make C runtime crash by calling free on the wrong address */
funcs->free((char *)p + 10);
/* funcs->free(real); */
}
else {
npy_uintp i = (npy_uintp)atoi(real +20);
if (i != sz) {
fprintf(stderr, "uh-oh, unmatched shift_free"
"(ptr, %ld) but allocated %ld\\n", sz, i);
/* This happens in some places, only print */
funcs->free(real);
}
else {
funcs->free(real);
}
}
}
NPY_NO_EXPORT void *
shift_realloc(void *ctx, void * p, npy_uintp sz) {
SecretDataAllocatorFuncs *funcs = (SecretDataAllocatorFuncs *)ctx;
if (p != NULL) {
char *real = (char *)p - 64;
if (strncmp(real, "originally allocated", 20) != 0) {
fprintf(stdout, "uh-oh, unmatched shift_realloc\\n");
return realloc(p, sz);
}
return (void *)((char *)funcs->realloc(real, sz + 64) + 64);
}
else {
char *real = (char *)funcs->realloc(p, sz + 64);
if (real == NULL) {
return NULL;
}
snprintf(real, 64, "originally allocated "
"%ld via realloc", (unsigned long)sz);
return (void *)(real + 64);
}
}
/* As an example, we use the standard {m|c|re}alloc/free funcs. */
static SecretDataAllocatorFuncs secret_data_handler_ctx = {
malloc,
calloc,
realloc,
free
};
static PyDataMem_Handler secret_data_handler = {
"secret_data_allocator",
1,
{
&secret_data_handler_ctx, /* ctx */
shift_alloc, /* malloc */
shift_zero, /* calloc */
shift_realloc, /* realloc */
shift_free /* free */
}
};
void warn_on_free(void *capsule) {
PyErr_WarnEx(PyExc_UserWarning, "in warn_on_free", 1);
void * obj = PyCapsule_GetPointer(capsule,
PyCapsule_GetName(capsule));
free(obj);
};
'''
more_init = "import_array();"
try:
import mem_policy
return mem_policy
except ImportError:
pass
# if it does not exist, build and load it
return extbuild.build_and_import_extension('mem_policy',
functions,
prologue=prologue,
include_dirs=[np.get_include()],
build_dir=tmp_path,
more_init=more_init)
def test_set_policy(get_module):
get_handler_name = np.core.multiarray.get_handler_name
get_handler_version = np.core.multiarray.get_handler_version
orig_policy_name = get_handler_name()
a = np.arange(10).reshape((2, 5)) # a doesn't own its own data
assert get_handler_name(a) is None
assert get_handler_version(a) is None
assert get_handler_name(a.base) == orig_policy_name
assert get_handler_version(a.base) == 1
orig_policy = get_module.set_secret_data_policy()
b = np.arange(10).reshape((2, 5)) # b doesn't own its own data
assert get_handler_name(b) is None
assert get_handler_version(b) is None
assert get_handler_name(b.base) == 'secret_data_allocator'
assert get_handler_version(b.base) == 1
if orig_policy_name == 'default_allocator':
get_module.set_old_policy(None) # tests PyDataMem_SetHandler(NULL)
assert get_handler_name() == 'default_allocator'
else:
get_module.set_old_policy(orig_policy)
assert get_handler_name() == orig_policy_name
def test_default_policy_singleton(get_module):
get_handler_name = np.core.multiarray.get_handler_name
# set the policy to default
orig_policy = get_module.set_old_policy(None)
assert get_handler_name() == 'default_allocator'
# re-set the policy to default
def_policy_1 = get_module.set_old_policy(None)
assert get_handler_name() == 'default_allocator'
# set the policy to original
def_policy_2 = get_module.set_old_policy(orig_policy)
# since default policy is a singleton,
# these should be the same object
assert def_policy_1 is def_policy_2 is get_module.get_default_policy()
def test_policy_propagation(get_module):
# The memory policy goes hand-in-hand with flags.owndata
class MyArr(np.ndarray):
pass
get_handler_name = np.core.multiarray.get_handler_name
orig_policy_name = get_handler_name()
a = np.arange(10).view(MyArr).reshape((2, 5))
assert get_handler_name(a) is None
assert a.flags.owndata is False
assert get_handler_name(a.base) is None
assert a.base.flags.owndata is False
assert get_handler_name(a.base.base) == orig_policy_name
assert a.base.base.flags.owndata is True
async def concurrent_context1(get_module, orig_policy_name, event):
if orig_policy_name == 'default_allocator':
get_module.set_secret_data_policy()
assert np.core.multiarray.get_handler_name() == 'secret_data_allocator'
else:
get_module.set_old_policy(None)
assert np.core.multiarray.get_handler_name() == 'default_allocator'
event.set()
async def concurrent_context2(get_module, orig_policy_name, event):
await event.wait()
# the policy is not affected by changes in parallel contexts
assert np.core.multiarray.get_handler_name() == orig_policy_name
# change policy in the child context
if orig_policy_name == 'default_allocator':
get_module.set_secret_data_policy()
assert np.core.multiarray.get_handler_name() == 'secret_data_allocator'
else:
get_module.set_old_policy(None)
assert np.core.multiarray.get_handler_name() == 'default_allocator'
async def async_test_context_locality(get_module):
orig_policy_name = np.core.multiarray.get_handler_name()
event = asyncio.Event()
# the child contexts inherit the parent policy
concurrent_task1 = asyncio.create_task(
concurrent_context1(get_module, orig_policy_name, event))
concurrent_task2 = asyncio.create_task(
concurrent_context2(get_module, orig_policy_name, event))
await concurrent_task1
await concurrent_task2
# the parent context is not affected by child policy changes
assert np.core.multiarray.get_handler_name() == orig_policy_name
def test_context_locality(get_module):
if (sys.implementation.name == 'pypy'
and sys.pypy_version_info[:3] < (7, 3, 6)):
pytest.skip('no context-locality support in PyPy < 7.3.6')
asyncio.run(async_test_context_locality(get_module))
def concurrent_thread1(get_module, event):
get_module.set_secret_data_policy()
assert np.core.multiarray.get_handler_name() == 'secret_data_allocator'
event.set()
def concurrent_thread2(get_module, event):
event.wait()
# the policy is not affected by changes in parallel threads
assert np.core.multiarray.get_handler_name() == 'default_allocator'
# change policy in the child thread
get_module.set_secret_data_policy()
def test_thread_locality(get_module):
orig_policy_name = np.core.multiarray.get_handler_name()
event = threading.Event()
# the child threads do not inherit the parent policy
concurrent_task1 = threading.Thread(target=concurrent_thread1,
args=(get_module, event))
concurrent_task2 = threading.Thread(target=concurrent_thread2,
args=(get_module, event))
concurrent_task1.start()
concurrent_task2.start()
concurrent_task1.join()
concurrent_task2.join()
# the parent thread is not affected by child policy changes
assert np.core.multiarray.get_handler_name() == orig_policy_name
@pytest.mark.slow
def test_new_policy(get_module):
a = np.arange(10)
orig_policy_name = np.core.multiarray.get_handler_name(a)
orig_policy = get_module.set_secret_data_policy()
b = np.arange(10)
assert np.core.multiarray.get_handler_name(b) == 'secret_data_allocator'
# test array manipulation. This is slow
if orig_policy_name == 'default_allocator':
# when the np.core.test tests recurse into this test, the
# policy will be set so this "if" will be false, preventing
# infinite recursion
#
# if needed, debug this by
# - running tests with -- -s (to not capture stdout/stderr
# - setting extra_argv=['-vv'] here
assert np.core.test('full', verbose=2, extra_argv=['-vv'])
# also try the ma tests, the pickling test is quite tricky
assert np.ma.test('full', verbose=2, extra_argv=['-vv'])
get_module.set_old_policy(orig_policy)
c = np.arange(10)
assert np.core.multiarray.get_handler_name(c) == orig_policy_name
@pytest.mark.xfail(sys.implementation.name == "pypy",
reason=("bad interaction between getenv and "
"os.environ inside pytest"))
@pytest.mark.parametrize("policy", ["0", "1", None])
def test_switch_owner(get_module, policy):
a = get_module.get_array()
assert np.core.multiarray.get_handler_name(a) is None
get_module.set_own(a)
oldval = os.environ.get('NUMPY_WARN_IF_NO_MEM_POLICY', None)
if policy is None:
if 'NUMPY_WARN_IF_NO_MEM_POLICY' in os.environ:
os.environ.pop('NUMPY_WARN_IF_NO_MEM_POLICY')
else:
os.environ['NUMPY_WARN_IF_NO_MEM_POLICY'] = policy
try:
# The policy should be NULL, so we have to assume we can call
# "free". A warning is given if the policy == "1"
if policy == "1":
with assert_warns(RuntimeWarning) as w:
del a
gc.collect()
else:
del a
gc.collect()
finally:
if oldval is None:
if 'NUMPY_WARN_IF_NO_MEM_POLICY' in os.environ:
os.environ.pop('NUMPY_WARN_IF_NO_MEM_POLICY')
else:
os.environ['NUMPY_WARN_IF_NO_MEM_POLICY'] = oldval
def test_owner_is_base(get_module):
a = get_module.get_array_with_base()
with pytest.warns(UserWarning, match='warn_on_free'):
del a
gc.collect()
| 15,869 | Python | 36.429245 | 79 | 0.556494 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_cython.py | import os
import shutil
import subprocess
import sys
import pytest
import numpy as np
# This import is copied from random.tests.test_extending
try:
import cython
from Cython.Compiler.Version import version as cython_version
except ImportError:
cython = None
else:
from numpy.compat import _pep440
# Cython 0.29.30 is required for Python 3.11 and there are
# other fixes in the 0.29 series that are needed even for earlier
# Python versions.
# Note: keep in sync with the one in pyproject.toml
required_version = "0.29.30"
if _pep440.parse(cython_version) < _pep440.Version(required_version):
# too old or wrong cython, skip the test
cython = None
pytestmark = pytest.mark.skipif(cython is None, reason="requires cython")
@pytest.fixture
def install_temp(request, tmp_path):
# Based in part on test_cython from random.tests.test_extending
here = os.path.dirname(__file__)
ext_dir = os.path.join(here, "examples", "cython")
cytest = str(tmp_path / "cytest")
shutil.copytree(ext_dir, cytest)
# build the examples and "install" them into a temporary directory
install_log = str(tmp_path / "tmp_install_log.txt")
subprocess.check_output(
[
sys.executable,
"setup.py",
"build",
"install",
"--prefix", str(tmp_path / "installdir"),
"--single-version-externally-managed",
"--record",
install_log,
],
cwd=cytest,
)
# In order to import the built module, we need its path to sys.path
# so parse that out of the record
with open(install_log) as fid:
for line in fid:
if "checks" in line:
sys.path.append(os.path.dirname(line))
break
else:
raise RuntimeError(f'could not parse "{install_log}"')
def test_is_timedelta64_object(install_temp):
import checks
assert checks.is_td64(np.timedelta64(1234))
assert checks.is_td64(np.timedelta64(1234, "ns"))
assert checks.is_td64(np.timedelta64("NaT", "ns"))
assert not checks.is_td64(1)
assert not checks.is_td64(None)
assert not checks.is_td64("foo")
assert not checks.is_td64(np.datetime64("now", "s"))
def test_is_datetime64_object(install_temp):
import checks
assert checks.is_dt64(np.datetime64(1234, "ns"))
assert checks.is_dt64(np.datetime64("NaT", "ns"))
assert not checks.is_dt64(1)
assert not checks.is_dt64(None)
assert not checks.is_dt64("foo")
assert not checks.is_dt64(np.timedelta64(1234))
def test_get_datetime64_value(install_temp):
import checks
dt64 = np.datetime64("2016-01-01", "ns")
result = checks.get_dt64_value(dt64)
expected = dt64.view("i8")
assert result == expected
def test_get_timedelta64_value(install_temp):
import checks
td64 = np.timedelta64(12345, "h")
result = checks.get_td64_value(td64)
expected = td64.view("i8")
assert result == expected
def test_get_datetime64_unit(install_temp):
import checks
dt64 = np.datetime64("2016-01-01", "ns")
result = checks.get_dt64_unit(dt64)
expected = 10
assert result == expected
td64 = np.timedelta64(12345, "h")
result = checks.get_dt64_unit(td64)
expected = 5
assert result == expected
def test_abstract_scalars(install_temp):
import checks
assert checks.is_integer(1)
assert checks.is_integer(np.int8(1))
assert checks.is_integer(np.uint64(1))
| 3,536 | Python | 25.2 | 73 | 0.64819 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_limited_api.py | import os
import shutil
import subprocess
import sys
import sysconfig
import pytest
@pytest.mark.xfail(
sysconfig.get_config_var("Py_DEBUG"),
reason=(
"Py_LIMITED_API is incompatible with Py_DEBUG, Py_TRACE_REFS, "
"and Py_REF_DEBUG"
),
)
def test_limited_api(tmp_path):
"""Test building a third-party C extension with the limited API."""
# Based in part on test_cython from random.tests.test_extending
here = os.path.dirname(__file__)
ext_dir = os.path.join(here, "examples", "limited_api")
cytest = str(tmp_path / "limited_api")
shutil.copytree(ext_dir, cytest)
# build the examples and "install" them into a temporary directory
install_log = str(tmp_path / "tmp_install_log.txt")
subprocess.check_output(
[
sys.executable,
"setup.py",
"build",
"install",
"--prefix", str(tmp_path / "installdir"),
"--single-version-externally-managed",
"--record",
install_log,
],
cwd=cytest,
)
| 1,075 | Python | 24.619047 | 71 | 0.596279 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_umath.py | import platform
import warnings
import fnmatch
import itertools
import pytest
import sys
import os
import operator
from fractions import Fraction
from functools import reduce
from collections import namedtuple
import numpy.core.umath as ncu
from numpy.core import _umath_tests as ncu_tests
import numpy as np
from numpy.testing import (
assert_, assert_equal, assert_raises, assert_raises_regex,
assert_array_equal, assert_almost_equal, assert_array_almost_equal,
assert_array_max_ulp, assert_allclose, assert_no_warnings, suppress_warnings,
_gen_alignment_data, assert_array_almost_equal_nulp
)
from numpy.testing._private.utils import _glibc_older_than
def interesting_binop_operands(val1, val2, dtype):
"""
Helper to create "interesting" operands to cover common code paths:
* scalar inputs
* only first "values" is an array (e.g. scalar division fast-paths)
* Longer array (SIMD) placing the value of interest at different positions
* Oddly strided arrays which may not be SIMD compatible
It does not attempt to cover unaligned access or mixed dtypes.
These are normally handled by the casting/buffering machinery.
This is not a fixture (currently), since I believe a fixture normally
only yields once?
"""
fill_value = 1 # could be a parameter, but maybe not an optional one?
arr1 = np.full(10003, dtype=dtype, fill_value=fill_value)
arr2 = np.full(10003, dtype=dtype, fill_value=fill_value)
arr1[0] = val1
arr2[0] = val2
extractor = lambda res: res
yield arr1[0], arr2[0], extractor, "scalars"
extractor = lambda res: res
yield arr1[0, ...], arr2[0, ...], extractor, "scalar-arrays"
# reset array values to fill_value:
arr1[0] = fill_value
arr2[0] = fill_value
for pos in [0, 1, 2, 3, 4, 5, -1, -2, -3, -4]:
arr1[pos] = val1
arr2[pos] = val2
extractor = lambda res: res[pos]
yield arr1, arr2, extractor, f"off-{pos}"
yield arr1, arr2[pos], extractor, f"off-{pos}-with-scalar"
arr1[pos] = fill_value
arr2[pos] = fill_value
for stride in [-1, 113]:
op1 = arr1[::stride]
op2 = arr2[::stride]
op1[10] = val1
op2[10] = val2
extractor = lambda res: res[10]
yield op1, op2, extractor, f"stride-{stride}"
op1[10] = fill_value
op2[10] = fill_value
def on_powerpc():
""" True if we are running on a Power PC platform."""
return platform.processor() == 'powerpc' or \
platform.machine().startswith('ppc')
def bad_arcsinh():
"""The blocklisted trig functions are not accurate on aarch64/PPC for
complex256. Rather than dig through the actual problem skip the
test. This should be fixed when we can move past glibc2.17
which is the version in manylinux2014
"""
if platform.machine() == 'aarch64':
x = 1.78e-10
elif on_powerpc():
x = 2.16e-10
else:
return False
v1 = np.arcsinh(np.float128(x))
v2 = np.arcsinh(np.complex256(x)).real
# The eps for float128 is 1-e33, so this is way bigger
return abs((v1 / v2) - 1.0) > 1e-23
class _FilterInvalids:
def setup_method(self):
self.olderr = np.seterr(invalid='ignore')
def teardown_method(self):
np.seterr(**self.olderr)
class TestConstants:
def test_pi(self):
assert_allclose(ncu.pi, 3.141592653589793, 1e-15)
def test_e(self):
assert_allclose(ncu.e, 2.718281828459045, 1e-15)
def test_euler_gamma(self):
assert_allclose(ncu.euler_gamma, 0.5772156649015329, 1e-15)
class TestOut:
def test_out_subok(self):
for subok in (True, False):
a = np.array(0.5)
o = np.empty(())
r = np.add(a, 2, o, subok=subok)
assert_(r is o)
r = np.add(a, 2, out=o, subok=subok)
assert_(r is o)
r = np.add(a, 2, out=(o,), subok=subok)
assert_(r is o)
d = np.array(5.7)
o1 = np.empty(())
o2 = np.empty((), dtype=np.int32)
r1, r2 = np.frexp(d, o1, None, subok=subok)
assert_(r1 is o1)
r1, r2 = np.frexp(d, None, o2, subok=subok)
assert_(r2 is o2)
r1, r2 = np.frexp(d, o1, o2, subok=subok)
assert_(r1 is o1)
assert_(r2 is o2)
r1, r2 = np.frexp(d, out=(o1, None), subok=subok)
assert_(r1 is o1)
r1, r2 = np.frexp(d, out=(None, o2), subok=subok)
assert_(r2 is o2)
r1, r2 = np.frexp(d, out=(o1, o2), subok=subok)
assert_(r1 is o1)
assert_(r2 is o2)
with assert_raises(TypeError):
# Out argument must be tuple, since there are multiple outputs.
r1, r2 = np.frexp(d, out=o1, subok=subok)
assert_raises(TypeError, np.add, a, 2, o, o, subok=subok)
assert_raises(TypeError, np.add, a, 2, o, out=o, subok=subok)
assert_raises(TypeError, np.add, a, 2, None, out=o, subok=subok)
assert_raises(ValueError, np.add, a, 2, out=(o, o), subok=subok)
assert_raises(ValueError, np.add, a, 2, out=(), subok=subok)
assert_raises(TypeError, np.add, a, 2, [], subok=subok)
assert_raises(TypeError, np.add, a, 2, out=[], subok=subok)
assert_raises(TypeError, np.add, a, 2, out=([],), subok=subok)
o.flags.writeable = False
assert_raises(ValueError, np.add, a, 2, o, subok=subok)
assert_raises(ValueError, np.add, a, 2, out=o, subok=subok)
assert_raises(ValueError, np.add, a, 2, out=(o,), subok=subok)
def test_out_wrap_subok(self):
class ArrayWrap(np.ndarray):
__array_priority__ = 10
def __new__(cls, arr):
return np.asarray(arr).view(cls).copy()
def __array_wrap__(self, arr, context):
return arr.view(type(self))
for subok in (True, False):
a = ArrayWrap([0.5])
r = np.add(a, 2, subok=subok)
if subok:
assert_(isinstance(r, ArrayWrap))
else:
assert_(type(r) == np.ndarray)
r = np.add(a, 2, None, subok=subok)
if subok:
assert_(isinstance(r, ArrayWrap))
else:
assert_(type(r) == np.ndarray)
r = np.add(a, 2, out=None, subok=subok)
if subok:
assert_(isinstance(r, ArrayWrap))
else:
assert_(type(r) == np.ndarray)
r = np.add(a, 2, out=(None,), subok=subok)
if subok:
assert_(isinstance(r, ArrayWrap))
else:
assert_(type(r) == np.ndarray)
d = ArrayWrap([5.7])
o1 = np.empty((1,))
o2 = np.empty((1,), dtype=np.int32)
r1, r2 = np.frexp(d, o1, subok=subok)
if subok:
assert_(isinstance(r2, ArrayWrap))
else:
assert_(type(r2) == np.ndarray)
r1, r2 = np.frexp(d, o1, None, subok=subok)
if subok:
assert_(isinstance(r2, ArrayWrap))
else:
assert_(type(r2) == np.ndarray)
r1, r2 = np.frexp(d, None, o2, subok=subok)
if subok:
assert_(isinstance(r1, ArrayWrap))
else:
assert_(type(r1) == np.ndarray)
r1, r2 = np.frexp(d, out=(o1, None), subok=subok)
if subok:
assert_(isinstance(r2, ArrayWrap))
else:
assert_(type(r2) == np.ndarray)
r1, r2 = np.frexp(d, out=(None, o2), subok=subok)
if subok:
assert_(isinstance(r1, ArrayWrap))
else:
assert_(type(r1) == np.ndarray)
with assert_raises(TypeError):
# Out argument must be tuple, since there are multiple outputs.
r1, r2 = np.frexp(d, out=o1, subok=subok)
class TestComparisons:
def test_ignore_object_identity_in_equal(self):
# Check comparing identical objects whose comparison
# is not a simple boolean, e.g., arrays that are compared elementwise.
a = np.array([np.array([1, 2, 3]), None], dtype=object)
assert_raises(ValueError, np.equal, a, a)
# Check error raised when comparing identical non-comparable objects.
class FunkyType:
def __eq__(self, other):
raise TypeError("I won't compare")
a = np.array([FunkyType()])
assert_raises(TypeError, np.equal, a, a)
# Check identity doesn't override comparison mismatch.
a = np.array([np.nan], dtype=object)
assert_equal(np.equal(a, a), [False])
def test_ignore_object_identity_in_not_equal(self):
# Check comparing identical objects whose comparison
# is not a simple boolean, e.g., arrays that are compared elementwise.
a = np.array([np.array([1, 2, 3]), None], dtype=object)
assert_raises(ValueError, np.not_equal, a, a)
# Check error raised when comparing identical non-comparable objects.
class FunkyType:
def __ne__(self, other):
raise TypeError("I won't compare")
a = np.array([FunkyType()])
assert_raises(TypeError, np.not_equal, a, a)
# Check identity doesn't override comparison mismatch.
a = np.array([np.nan], dtype=object)
assert_equal(np.not_equal(a, a), [True])
def test_error_in_equal_reduce(self):
# gh-20929
# make sure np.equal.reduce raises a TypeError if an array is passed
# without specifying the dtype
a = np.array([0, 0])
assert_equal(np.equal.reduce(a, dtype=bool), True)
assert_raises(TypeError, np.equal.reduce, a)
class TestAdd:
def test_reduce_alignment(self):
# gh-9876
# make sure arrays with weird strides work with the optimizations in
# pairwise_sum_@TYPE@. On x86, the 'b' field will count as aligned at a
# 4 byte offset, even though its itemsize is 8.
a = np.zeros(2, dtype=[('a', np.int32), ('b', np.float64)])
a['a'] = -1
assert_equal(a['b'].sum(), 0)
class TestDivision:
def test_division_int(self):
# int division should follow Python
x = np.array([5, 10, 90, 100, -5, -10, -90, -100, -120])
if 5 / 10 == 0.5:
assert_equal(x / 100, [0.05, 0.1, 0.9, 1,
-0.05, -0.1, -0.9, -1, -1.2])
else:
assert_equal(x / 100, [0, 0, 0, 1, -1, -1, -1, -1, -2])
assert_equal(x // 100, [0, 0, 0, 1, -1, -1, -1, -1, -2])
assert_equal(x % 100, [5, 10, 90, 0, 95, 90, 10, 0, 80])
@pytest.mark.parametrize("dtype,ex_val", itertools.product(
np.sctypes['int'] + np.sctypes['uint'], (
(
# dividend
"np.arange(fo.max-lsize, fo.max, dtype=dtype),"
# divisors
"np.arange(lsize, dtype=dtype),"
# scalar divisors
"range(15)"
),
(
# dividend
"np.arange(fo.min, fo.min+lsize, dtype=dtype),"
# divisors
"np.arange(lsize//-2, lsize//2, dtype=dtype),"
# scalar divisors
"range(fo.min, fo.min + 15)"
), (
# dividend
"np.arange(fo.max-lsize, fo.max, dtype=dtype),"
# divisors
"np.arange(lsize, dtype=dtype),"
# scalar divisors
"[1,3,9,13,neg, fo.min+1, fo.min//2, fo.max//3, fo.max//4]"
)
)
))
def test_division_int_boundary(self, dtype, ex_val):
fo = np.iinfo(dtype)
neg = -1 if fo.min < 0 else 1
# Large enough to test SIMD loops and remaind elements
lsize = 512 + 7
a, b, divisors = eval(ex_val)
a_lst, b_lst = a.tolist(), b.tolist()
c_div = lambda n, d: (
0 if d == 0 else (
fo.min if (n and n == fo.min and d == -1) else n//d
)
)
with np.errstate(divide='ignore'):
ac = a.copy()
ac //= b
div_ab = a // b
div_lst = [c_div(x, y) for x, y in zip(a_lst, b_lst)]
msg = "Integer arrays floor division check (//)"
assert all(div_ab == div_lst), msg
msg_eq = "Integer arrays floor division check (//=)"
assert all(ac == div_lst), msg_eq
for divisor in divisors:
ac = a.copy()
with np.errstate(divide='ignore', over='ignore'):
div_a = a // divisor
ac //= divisor
div_lst = [c_div(i, divisor) for i in a_lst]
assert all(div_a == div_lst), msg
assert all(ac == div_lst), msg_eq
with np.errstate(divide='raise', over='raise'):
if 0 in b:
# Verify overflow case
with pytest.raises(FloatingPointError,
match="divide by zero encountered in floor_divide"):
a // b
else:
a // b
if fo.min and fo.min in a:
with pytest.raises(FloatingPointError,
match='overflow encountered in floor_divide'):
a // -1
elif fo.min:
a // -1
with pytest.raises(FloatingPointError,
match="divide by zero encountered in floor_divide"):
a // 0
with pytest.raises(FloatingPointError,
match="divide by zero encountered in floor_divide"):
ac = a.copy()
ac //= 0
np.array([], dtype=dtype) // 0
@pytest.mark.parametrize("dtype,ex_val", itertools.product(
np.sctypes['int'] + np.sctypes['uint'], (
"np.array([fo.max, 1, 2, 1, 1, 2, 3], dtype=dtype)",
"np.array([fo.min, 1, -2, 1, 1, 2, -3], dtype=dtype)",
"np.arange(fo.min, fo.min+(100*10), 10, dtype=dtype)",
"np.arange(fo.max-(100*7), fo.max, 7, dtype=dtype)",
)
))
def test_division_int_reduce(self, dtype, ex_val):
fo = np.iinfo(dtype)
a = eval(ex_val)
lst = a.tolist()
c_div = lambda n, d: (
0 if d == 0 or (n and n == fo.min and d == -1) else n//d
)
with np.errstate(divide='ignore'):
div_a = np.floor_divide.reduce(a)
div_lst = reduce(c_div, lst)
msg = "Reduce floor integer division check"
assert div_a == div_lst, msg
with np.errstate(divide='raise', over='raise'):
with pytest.raises(FloatingPointError,
match="divide by zero encountered in reduce"):
np.floor_divide.reduce(np.arange(-100, 100, dtype=dtype))
if fo.min:
with pytest.raises(FloatingPointError,
match='overflow encountered in reduce'):
np.floor_divide.reduce(
np.array([fo.min, 1, -1], dtype=dtype)
)
@pytest.mark.parametrize(
"dividend,divisor,quotient",
[(np.timedelta64(2,'Y'), np.timedelta64(2,'M'), 12),
(np.timedelta64(2,'Y'), np.timedelta64(-2,'M'), -12),
(np.timedelta64(-2,'Y'), np.timedelta64(2,'M'), -12),
(np.timedelta64(-2,'Y'), np.timedelta64(-2,'M'), 12),
(np.timedelta64(2,'M'), np.timedelta64(-2,'Y'), -1),
(np.timedelta64(2,'Y'), np.timedelta64(0,'M'), 0),
(np.timedelta64(2,'Y'), 2, np.timedelta64(1,'Y')),
(np.timedelta64(2,'Y'), -2, np.timedelta64(-1,'Y')),
(np.timedelta64(-2,'Y'), 2, np.timedelta64(-1,'Y')),
(np.timedelta64(-2,'Y'), -2, np.timedelta64(1,'Y')),
(np.timedelta64(-2,'Y'), -2, np.timedelta64(1,'Y')),
(np.timedelta64(-2,'Y'), -3, np.timedelta64(0,'Y')),
(np.timedelta64(-2,'Y'), 0, np.timedelta64('Nat','Y')),
])
def test_division_int_timedelta(self, dividend, divisor, quotient):
# If either divisor is 0 or quotient is Nat, check for division by 0
if divisor and (isinstance(quotient, int) or not np.isnat(quotient)):
msg = "Timedelta floor division check"
assert dividend // divisor == quotient, msg
# Test for arrays as well
msg = "Timedelta arrays floor division check"
dividend_array = np.array([dividend]*5)
quotient_array = np.array([quotient]*5)
assert all(dividend_array // divisor == quotient_array), msg
else:
with np.errstate(divide='raise', invalid='raise'):
with pytest.raises(FloatingPointError):
dividend // divisor
def test_division_complex(self):
# check that implementation is correct
msg = "Complex division implementation check"
x = np.array([1. + 1.*1j, 1. + .5*1j, 1. + 2.*1j], dtype=np.complex128)
assert_almost_equal(x**2/x, x, err_msg=msg)
# check overflow, underflow
msg = "Complex division overflow/underflow check"
x = np.array([1.e+110, 1.e-110], dtype=np.complex128)
y = x**2/x
assert_almost_equal(y/x, [1, 1], err_msg=msg)
def test_zero_division_complex(self):
with np.errstate(invalid="ignore", divide="ignore"):
x = np.array([0.0], dtype=np.complex128)
y = 1.0/x
assert_(np.isinf(y)[0])
y = complex(np.inf, np.nan)/x
assert_(np.isinf(y)[0])
y = complex(np.nan, np.inf)/x
assert_(np.isinf(y)[0])
y = complex(np.inf, np.inf)/x
assert_(np.isinf(y)[0])
y = 0.0/x
assert_(np.isnan(y)[0])
def test_floor_division_complex(self):
# check that floor division, divmod and remainder raises type errors
x = np.array([.9 + 1j, -.1 + 1j, .9 + .5*1j, .9 + 2.*1j], dtype=np.complex128)
with pytest.raises(TypeError):
x // 7
with pytest.raises(TypeError):
np.divmod(x, 7)
with pytest.raises(TypeError):
np.remainder(x, 7)
def test_floor_division_signed_zero(self):
# Check that the sign bit is correctly set when dividing positive and
# negative zero by one.
x = np.zeros(10)
assert_equal(np.signbit(x//1), 0)
assert_equal(np.signbit((-x)//1), 1)
@pytest.mark.parametrize('dtype', np.typecodes['Float'])
def test_floor_division_errors(self, dtype):
fnan = np.array(np.nan, dtype=dtype)
fone = np.array(1.0, dtype=dtype)
fzer = np.array(0.0, dtype=dtype)
finf = np.array(np.inf, dtype=dtype)
# divide by zero error check
with np.errstate(divide='raise', invalid='ignore'):
assert_raises(FloatingPointError, np.floor_divide, fone, fzer)
with np.errstate(divide='ignore', invalid='raise'):
np.floor_divide(fone, fzer)
# The following already contain a NaN and should not warn
with np.errstate(all='raise'):
np.floor_divide(fnan, fone)
np.floor_divide(fone, fnan)
np.floor_divide(fnan, fzer)
np.floor_divide(fzer, fnan)
@pytest.mark.parametrize('dtype', np.typecodes['Float'])
def test_floor_division_corner_cases(self, dtype):
# test corner cases like 1.0//0.0 for errors and return vals
x = np.zeros(10, dtype=dtype)
y = np.ones(10, dtype=dtype)
fnan = np.array(np.nan, dtype=dtype)
fone = np.array(1.0, dtype=dtype)
fzer = np.array(0.0, dtype=dtype)
finf = np.array(np.inf, dtype=dtype)
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "invalid value encountered in floor_divide")
div = np.floor_divide(fnan, fone)
assert(np.isnan(div)), "dt: %s, div: %s" % (dt, div)
div = np.floor_divide(fone, fnan)
assert(np.isnan(div)), "dt: %s, div: %s" % (dt, div)
div = np.floor_divide(fnan, fzer)
assert(np.isnan(div)), "dt: %s, div: %s" % (dt, div)
# verify 1.0//0.0 computations return inf
with np.errstate(divide='ignore'):
z = np.floor_divide(y, x)
assert_(np.isinf(z).all())
def floor_divide_and_remainder(x, y):
return (np.floor_divide(x, y), np.remainder(x, y))
def _signs(dt):
if dt in np.typecodes['UnsignedInteger']:
return (+1,)
else:
return (+1, -1)
class TestRemainder:
def test_remainder_basic(self):
dt = np.typecodes['AllInteger'] + np.typecodes['Float']
for op in [floor_divide_and_remainder, np.divmod]:
for dt1, dt2 in itertools.product(dt, dt):
for sg1, sg2 in itertools.product(_signs(dt1), _signs(dt2)):
fmt = 'op: %s, dt1: %s, dt2: %s, sg1: %s, sg2: %s'
msg = fmt % (op.__name__, dt1, dt2, sg1, sg2)
a = np.array(sg1*71, dtype=dt1)
b = np.array(sg2*19, dtype=dt2)
div, rem = op(a, b)
assert_equal(div*b + rem, a, err_msg=msg)
if sg2 == -1:
assert_(b < rem <= 0, msg)
else:
assert_(b > rem >= 0, msg)
def test_float_remainder_exact(self):
# test that float results are exact for small integers. This also
# holds for the same integers scaled by powers of two.
nlst = list(range(-127, 0))
plst = list(range(1, 128))
dividend = nlst + [0] + plst
divisor = nlst + plst
arg = list(itertools.product(dividend, divisor))
tgt = list(divmod(*t) for t in arg)
a, b = np.array(arg, dtype=int).T
# convert exact integer results from Python to float so that
# signed zero can be used, it is checked.
tgtdiv, tgtrem = np.array(tgt, dtype=float).T
tgtdiv = np.where((tgtdiv == 0.0) & ((b < 0) ^ (a < 0)), -0.0, tgtdiv)
tgtrem = np.where((tgtrem == 0.0) & (b < 0), -0.0, tgtrem)
for op in [floor_divide_and_remainder, np.divmod]:
for dt in np.typecodes['Float']:
msg = 'op: %s, dtype: %s' % (op.__name__, dt)
fa = a.astype(dt)
fb = b.astype(dt)
div, rem = op(fa, fb)
assert_equal(div, tgtdiv, err_msg=msg)
assert_equal(rem, tgtrem, err_msg=msg)
def test_float_remainder_roundoff(self):
# gh-6127
dt = np.typecodes['Float']
for op in [floor_divide_and_remainder, np.divmod]:
for dt1, dt2 in itertools.product(dt, dt):
for sg1, sg2 in itertools.product((+1, -1), (+1, -1)):
fmt = 'op: %s, dt1: %s, dt2: %s, sg1: %s, sg2: %s'
msg = fmt % (op.__name__, dt1, dt2, sg1, sg2)
a = np.array(sg1*78*6e-8, dtype=dt1)
b = np.array(sg2*6e-8, dtype=dt2)
div, rem = op(a, b)
# Equal assertion should hold when fmod is used
assert_equal(div*b + rem, a, err_msg=msg)
if sg2 == -1:
assert_(b < rem <= 0, msg)
else:
assert_(b > rem >= 0, msg)
@pytest.mark.xfail(sys.platform.startswith("darwin"),
reason="MacOS seems to not give the correct 'invalid' warning for "
"`fmod`. Hopefully, others always do.")
@pytest.mark.parametrize('dtype', np.typecodes['Float'])
def test_float_divmod_errors(self, dtype):
# Check valid errors raised for divmod and remainder
fzero = np.array(0.0, dtype=dtype)
fone = np.array(1.0, dtype=dtype)
finf = np.array(np.inf, dtype=dtype)
fnan = np.array(np.nan, dtype=dtype)
# since divmod is combination of both remainder and divide
# ops it will set both dividebyzero and invalid flags
with np.errstate(divide='raise', invalid='ignore'):
assert_raises(FloatingPointError, np.divmod, fone, fzero)
with np.errstate(divide='ignore', invalid='raise'):
assert_raises(FloatingPointError, np.divmod, fone, fzero)
with np.errstate(invalid='raise'):
assert_raises(FloatingPointError, np.divmod, fzero, fzero)
with np.errstate(invalid='raise'):
assert_raises(FloatingPointError, np.divmod, finf, finf)
with np.errstate(divide='ignore', invalid='raise'):
assert_raises(FloatingPointError, np.divmod, finf, fzero)
with np.errstate(divide='raise', invalid='ignore'):
# inf / 0 does not set any flags, only the modulo creates a NaN
np.divmod(finf, fzero)
@pytest.mark.xfail(sys.platform.startswith("darwin"),
reason="MacOS seems to not give the correct 'invalid' warning for "
"`fmod`. Hopefully, others always do.")
@pytest.mark.parametrize('dtype', np.typecodes['Float'])
@pytest.mark.parametrize('fn', [np.fmod, np.remainder])
def test_float_remainder_errors(self, dtype, fn):
fzero = np.array(0.0, dtype=dtype)
fone = np.array(1.0, dtype=dtype)
finf = np.array(np.inf, dtype=dtype)
fnan = np.array(np.nan, dtype=dtype)
# The following already contain a NaN and should not warn.
with np.errstate(all='raise'):
with pytest.raises(FloatingPointError,
match="invalid value"):
fn(fone, fzero)
fn(fnan, fzero)
fn(fzero, fnan)
fn(fone, fnan)
fn(fnan, fone)
def test_float_remainder_overflow(self):
a = np.finfo(np.float64).tiny
with np.errstate(over='ignore', invalid='ignore'):
div, mod = np.divmod(4, a)
np.isinf(div)
assert_(mod == 0)
with np.errstate(over='raise', invalid='ignore'):
assert_raises(FloatingPointError, np.divmod, 4, a)
with np.errstate(invalid='raise', over='ignore'):
assert_raises(FloatingPointError, np.divmod, 4, a)
def test_float_divmod_corner_cases(self):
# check nan cases
for dt in np.typecodes['Float']:
fnan = np.array(np.nan, dtype=dt)
fone = np.array(1.0, dtype=dt)
fzer = np.array(0.0, dtype=dt)
finf = np.array(np.inf, dtype=dt)
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "invalid value encountered in divmod")
sup.filter(RuntimeWarning, "divide by zero encountered in divmod")
div, rem = np.divmod(fone, fzer)
assert(np.isinf(div)), 'dt: %s, div: %s' % (dt, rem)
assert(np.isnan(rem)), 'dt: %s, rem: %s' % (dt, rem)
div, rem = np.divmod(fzer, fzer)
assert(np.isnan(rem)), 'dt: %s, rem: %s' % (dt, rem)
assert_(np.isnan(div)), 'dt: %s, rem: %s' % (dt, rem)
div, rem = np.divmod(finf, finf)
assert(np.isnan(div)), 'dt: %s, rem: %s' % (dt, rem)
assert(np.isnan(rem)), 'dt: %s, rem: %s' % (dt, rem)
div, rem = np.divmod(finf, fzer)
assert(np.isinf(div)), 'dt: %s, rem: %s' % (dt, rem)
assert(np.isnan(rem)), 'dt: %s, rem: %s' % (dt, rem)
div, rem = np.divmod(fnan, fone)
assert(np.isnan(rem)), "dt: %s, rem: %s" % (dt, rem)
assert(np.isnan(div)), "dt: %s, rem: %s" % (dt, rem)
div, rem = np.divmod(fone, fnan)
assert(np.isnan(rem)), "dt: %s, rem: %s" % (dt, rem)
assert(np.isnan(div)), "dt: %s, rem: %s" % (dt, rem)
div, rem = np.divmod(fnan, fzer)
assert(np.isnan(rem)), "dt: %s, rem: %s" % (dt, rem)
assert(np.isnan(div)), "dt: %s, rem: %s" % (dt, rem)
def test_float_remainder_corner_cases(self):
# Check remainder magnitude.
for dt in np.typecodes['Float']:
fone = np.array(1.0, dtype=dt)
fzer = np.array(0.0, dtype=dt)
fnan = np.array(np.nan, dtype=dt)
b = np.array(1.0, dtype=dt)
a = np.nextafter(np.array(0.0, dtype=dt), -b)
rem = np.remainder(a, b)
assert_(rem <= b, 'dt: %s' % dt)
rem = np.remainder(-a, -b)
assert_(rem >= -b, 'dt: %s' % dt)
# Check nans, inf
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "invalid value encountered in remainder")
sup.filter(RuntimeWarning, "invalid value encountered in fmod")
for dt in np.typecodes['Float']:
fone = np.array(1.0, dtype=dt)
fzer = np.array(0.0, dtype=dt)
finf = np.array(np.inf, dtype=dt)
fnan = np.array(np.nan, dtype=dt)
rem = np.remainder(fone, fzer)
assert_(np.isnan(rem), 'dt: %s, rem: %s' % (dt, rem))
# MSVC 2008 returns NaN here, so disable the check.
#rem = np.remainder(fone, finf)
#assert_(rem == fone, 'dt: %s, rem: %s' % (dt, rem))
rem = np.remainder(finf, fone)
fmod = np.fmod(finf, fone)
assert_(np.isnan(fmod), 'dt: %s, fmod: %s' % (dt, fmod))
assert_(np.isnan(rem), 'dt: %s, rem: %s' % (dt, rem))
rem = np.remainder(finf, finf)
fmod = np.fmod(finf, fone)
assert_(np.isnan(rem), 'dt: %s, rem: %s' % (dt, rem))
assert_(np.isnan(fmod), 'dt: %s, fmod: %s' % (dt, fmod))
rem = np.remainder(finf, fzer)
fmod = np.fmod(finf, fzer)
assert_(np.isnan(rem), 'dt: %s, rem: %s' % (dt, rem))
assert_(np.isnan(fmod), 'dt: %s, fmod: %s' % (dt, fmod))
rem = np.remainder(fone, fnan)
fmod = np.fmod(fone, fnan)
assert_(np.isnan(rem), 'dt: %s, rem: %s' % (dt, rem))
assert_(np.isnan(fmod), 'dt: %s, fmod: %s' % (dt, fmod))
rem = np.remainder(fnan, fzer)
fmod = np.fmod(fnan, fzer)
assert_(np.isnan(rem), 'dt: %s, rem: %s' % (dt, rem))
assert_(np.isnan(fmod), 'dt: %s, fmod: %s' % (dt, rem))
rem = np.remainder(fnan, fone)
fmod = np.fmod(fnan, fone)
assert_(np.isnan(rem), 'dt: %s, rem: %s' % (dt, rem))
assert_(np.isnan(fmod), 'dt: %s, fmod: %s' % (dt, rem))
class TestDivisionIntegerOverflowsAndDivideByZero:
result_type = namedtuple('result_type',
['nocast', 'casted'])
helper_lambdas = {
'zero': lambda dtype: 0,
'min': lambda dtype: np.iinfo(dtype).min,
'neg_min': lambda dtype: -np.iinfo(dtype).min,
'min-zero': lambda dtype: (np.iinfo(dtype).min, 0),
'neg_min-zero': lambda dtype: (-np.iinfo(dtype).min, 0),
}
overflow_results = {
np.remainder: result_type(
helper_lambdas['zero'], helper_lambdas['zero']),
np.fmod: result_type(
helper_lambdas['zero'], helper_lambdas['zero']),
operator.mod: result_type(
helper_lambdas['zero'], helper_lambdas['zero']),
operator.floordiv: result_type(
helper_lambdas['min'], helper_lambdas['neg_min']),
np.floor_divide: result_type(
helper_lambdas['min'], helper_lambdas['neg_min']),
np.divmod: result_type(
helper_lambdas['min-zero'], helper_lambdas['neg_min-zero'])
}
@pytest.mark.parametrize("dtype", np.typecodes["Integer"])
def test_signed_division_overflow(self, dtype):
to_check = interesting_binop_operands(np.iinfo(dtype).min, -1, dtype)
for op1, op2, extractor, operand_identifier in to_check:
with pytest.warns(RuntimeWarning, match="overflow encountered"):
res = op1 // op2
assert res.dtype == op1.dtype
assert extractor(res) == np.iinfo(op1.dtype).min
# Remainder is well defined though, and does not warn:
res = op1 % op2
assert res.dtype == op1.dtype
assert extractor(res) == 0
# Check fmod as well:
res = np.fmod(op1, op2)
assert extractor(res) == 0
# Divmod warns for the division part:
with pytest.warns(RuntimeWarning, match="overflow encountered"):
res1, res2 = np.divmod(op1, op2)
assert res1.dtype == res2.dtype == op1.dtype
assert extractor(res1) == np.iinfo(op1.dtype).min
assert extractor(res2) == 0
@pytest.mark.parametrize("dtype", np.typecodes["AllInteger"])
def test_divide_by_zero(self, dtype):
# Note that the return value cannot be well defined here, but NumPy
# currently uses 0 consistently. This could be changed.
to_check = interesting_binop_operands(1, 0, dtype)
for op1, op2, extractor, operand_identifier in to_check:
with pytest.warns(RuntimeWarning, match="divide by zero"):
res = op1 // op2
assert res.dtype == op1.dtype
assert extractor(res) == 0
with pytest.warns(RuntimeWarning, match="divide by zero"):
res1, res2 = np.divmod(op1, op2)
assert res1.dtype == res2.dtype == op1.dtype
assert extractor(res1) == 0
assert extractor(res2) == 0
@pytest.mark.parametrize("dividend_dtype",
np.sctypes['int'])
@pytest.mark.parametrize("divisor_dtype",
np.sctypes['int'])
@pytest.mark.parametrize("operation",
[np.remainder, np.fmod, np.divmod, np.floor_divide,
operator.mod, operator.floordiv])
@np.errstate(divide='warn', over='warn')
def test_overflows(self, dividend_dtype, divisor_dtype, operation):
# SIMD tries to perform the operation on as many elements as possible
# that is a multiple of the register's size. We resort to the
# default implementation for the leftover elements.
# We try to cover all paths here.
arrays = [np.array([np.iinfo(dividend_dtype).min]*i,
dtype=dividend_dtype) for i in range(1, 129)]
divisor = np.array([-1], dtype=divisor_dtype)
# If dividend is a larger type than the divisor (`else` case),
# then, result will be a larger type than dividend and will not
# result in an overflow for `divmod` and `floor_divide`.
if np.dtype(dividend_dtype).itemsize >= np.dtype(
divisor_dtype).itemsize and operation in (
np.divmod, np.floor_divide, operator.floordiv):
with pytest.warns(
RuntimeWarning,
match="overflow encountered in"):
result = operation(
dividend_dtype(np.iinfo(dividend_dtype).min),
divisor_dtype(-1)
)
assert result == self.overflow_results[operation].nocast(
dividend_dtype)
# Arrays
for a in arrays:
# In case of divmod, we need to flatten the result
# column first as we get a column vector of quotient and
# remainder and a normal flatten of the expected result.
with pytest.warns(
RuntimeWarning,
match="overflow encountered in"):
result = np.array(operation(a, divisor)).flatten('f')
expected_array = np.array(
[self.overflow_results[operation].nocast(
dividend_dtype)]*len(a)).flatten()
assert_array_equal(result, expected_array)
else:
# Scalars
result = operation(
dividend_dtype(np.iinfo(dividend_dtype).min),
divisor_dtype(-1)
)
assert result == self.overflow_results[operation].casted(
dividend_dtype)
# Arrays
for a in arrays:
# See above comment on flatten
result = np.array(operation(a, divisor)).flatten('f')
expected_array = np.array(
[self.overflow_results[operation].casted(
dividend_dtype)]*len(a)).flatten()
assert_array_equal(result, expected_array)
class TestCbrt:
def test_cbrt_scalar(self):
assert_almost_equal((np.cbrt(np.float32(-2.5)**3)), -2.5)
def test_cbrt(self):
x = np.array([1., 2., -3., np.inf, -np.inf])
assert_almost_equal(np.cbrt(x**3), x)
assert_(np.isnan(np.cbrt(np.nan)))
assert_equal(np.cbrt(np.inf), np.inf)
assert_equal(np.cbrt(-np.inf), -np.inf)
class TestPower:
def test_power_float(self):
x = np.array([1., 2., 3.])
assert_equal(x**0, [1., 1., 1.])
assert_equal(x**1, x)
assert_equal(x**2, [1., 4., 9.])
y = x.copy()
y **= 2
assert_equal(y, [1., 4., 9.])
assert_almost_equal(x**(-1), [1., 0.5, 1./3])
assert_almost_equal(x**(0.5), [1., ncu.sqrt(2), ncu.sqrt(3)])
for out, inp, msg in _gen_alignment_data(dtype=np.float32,
type='unary',
max_size=11):
exp = [ncu.sqrt(i) for i in inp]
assert_almost_equal(inp**(0.5), exp, err_msg=msg)
np.sqrt(inp, out=out)
assert_equal(out, exp, err_msg=msg)
for out, inp, msg in _gen_alignment_data(dtype=np.float64,
type='unary',
max_size=7):
exp = [ncu.sqrt(i) for i in inp]
assert_almost_equal(inp**(0.5), exp, err_msg=msg)
np.sqrt(inp, out=out)
assert_equal(out, exp, err_msg=msg)
def test_power_complex(self):
x = np.array([1+2j, 2+3j, 3+4j])
assert_equal(x**0, [1., 1., 1.])
assert_equal(x**1, x)
assert_almost_equal(x**2, [-3+4j, -5+12j, -7+24j])
assert_almost_equal(x**3, [(1+2j)**3, (2+3j)**3, (3+4j)**3])
assert_almost_equal(x**4, [(1+2j)**4, (2+3j)**4, (3+4j)**4])
assert_almost_equal(x**(-1), [1/(1+2j), 1/(2+3j), 1/(3+4j)])
assert_almost_equal(x**(-2), [1/(1+2j)**2, 1/(2+3j)**2, 1/(3+4j)**2])
assert_almost_equal(x**(-3), [(-11+2j)/125, (-46-9j)/2197,
(-117-44j)/15625])
assert_almost_equal(x**(0.5), [ncu.sqrt(1+2j), ncu.sqrt(2+3j),
ncu.sqrt(3+4j)])
norm = 1./((x**14)[0])
assert_almost_equal(x**14 * norm,
[i * norm for i in [-76443+16124j, 23161315+58317492j,
5583548873 + 2465133864j]])
# Ticket #836
def assert_complex_equal(x, y):
assert_array_equal(x.real, y.real)
assert_array_equal(x.imag, y.imag)
for z in [complex(0, np.inf), complex(1, np.inf)]:
z = np.array([z], dtype=np.complex_)
with np.errstate(invalid="ignore"):
assert_complex_equal(z**1, z)
assert_complex_equal(z**2, z*z)
assert_complex_equal(z**3, z*z*z)
def test_power_zero(self):
# ticket #1271
zero = np.array([0j])
one = np.array([1+0j])
cnan = np.array([complex(np.nan, np.nan)])
# FIXME cinf not tested.
#cinf = np.array([complex(np.inf, 0)])
def assert_complex_equal(x, y):
x, y = np.asarray(x), np.asarray(y)
assert_array_equal(x.real, y.real)
assert_array_equal(x.imag, y.imag)
# positive powers
for p in [0.33, 0.5, 1, 1.5, 2, 3, 4, 5, 6.6]:
assert_complex_equal(np.power(zero, p), zero)
# zero power
assert_complex_equal(np.power(zero, 0), one)
with np.errstate(invalid="ignore"):
assert_complex_equal(np.power(zero, 0+1j), cnan)
# negative power
for p in [0.33, 0.5, 1, 1.5, 2, 3, 4, 5, 6.6]:
assert_complex_equal(np.power(zero, -p), cnan)
assert_complex_equal(np.power(zero, -1+0.2j), cnan)
def test_fast_power(self):
x = np.array([1, 2, 3], np.int16)
res = x**2.0
assert_((x**2.00001).dtype is res.dtype)
assert_array_equal(res, [1, 4, 9])
# check the inplace operation on the casted copy doesn't mess with x
assert_(not np.may_share_memory(res, x))
assert_array_equal(x, [1, 2, 3])
# Check that the fast path ignores 1-element not 0-d arrays
res = x ** np.array([[[2]]])
assert_equal(res.shape, (1, 1, 3))
def test_integer_power(self):
a = np.array([15, 15], 'i8')
b = np.power(a, a)
assert_equal(b, [437893890380859375, 437893890380859375])
def test_integer_power_with_integer_zero_exponent(self):
dtypes = np.typecodes['Integer']
for dt in dtypes:
arr = np.arange(-10, 10, dtype=dt)
assert_equal(np.power(arr, 0), np.ones_like(arr))
dtypes = np.typecodes['UnsignedInteger']
for dt in dtypes:
arr = np.arange(10, dtype=dt)
assert_equal(np.power(arr, 0), np.ones_like(arr))
def test_integer_power_of_1(self):
dtypes = np.typecodes['AllInteger']
for dt in dtypes:
arr = np.arange(10, dtype=dt)
assert_equal(np.power(1, arr), np.ones_like(arr))
def test_integer_power_of_zero(self):
dtypes = np.typecodes['AllInteger']
for dt in dtypes:
arr = np.arange(1, 10, dtype=dt)
assert_equal(np.power(0, arr), np.zeros_like(arr))
def test_integer_to_negative_power(self):
dtypes = np.typecodes['Integer']
for dt in dtypes:
a = np.array([0, 1, 2, 3], dtype=dt)
b = np.array([0, 1, 2, -3], dtype=dt)
one = np.array(1, dtype=dt)
minusone = np.array(-1, dtype=dt)
assert_raises(ValueError, np.power, a, b)
assert_raises(ValueError, np.power, a, minusone)
assert_raises(ValueError, np.power, one, b)
assert_raises(ValueError, np.power, one, minusone)
class TestFloat_power:
def test_type_conversion(self):
arg_type = '?bhilBHILefdgFDG'
res_type = 'ddddddddddddgDDG'
for dtin, dtout in zip(arg_type, res_type):
msg = "dtin: %s, dtout: %s" % (dtin, dtout)
arg = np.ones(1, dtype=dtin)
res = np.float_power(arg, arg)
assert_(res.dtype.name == np.dtype(dtout).name, msg)
class TestLog2:
@pytest.mark.parametrize('dt', ['f', 'd', 'g'])
def test_log2_values(self, dt):
x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
xf = np.array(x, dtype=dt)
yf = np.array(y, dtype=dt)
assert_almost_equal(np.log2(xf), yf)
@pytest.mark.parametrize("i", range(1, 65))
def test_log2_ints(self, i):
# a good log2 implementation should provide this,
# might fail on OS with bad libm
v = np.log2(2.**i)
assert_equal(v, float(i), err_msg='at exponent %d' % i)
def test_log2_special(self):
assert_equal(np.log2(1.), 0.)
assert_equal(np.log2(np.inf), np.inf)
assert_(np.isnan(np.log2(np.nan)))
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', RuntimeWarning)
assert_(np.isnan(np.log2(-1.)))
assert_(np.isnan(np.log2(-np.inf)))
assert_equal(np.log2(0.), -np.inf)
assert_(w[0].category is RuntimeWarning)
assert_(w[1].category is RuntimeWarning)
assert_(w[2].category is RuntimeWarning)
class TestExp2:
def test_exp2_values(self):
x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
for dt in ['f', 'd', 'g']:
xf = np.array(x, dtype=dt)
yf = np.array(y, dtype=dt)
assert_almost_equal(np.exp2(yf), xf)
class TestLogAddExp2(_FilterInvalids):
# Need test for intermediate precisions
def test_logaddexp2_values(self):
x = [1, 2, 3, 4, 5]
y = [5, 4, 3, 2, 1]
z = [6, 6, 6, 6, 6]
for dt, dec_ in zip(['f', 'd', 'g'], [6, 15, 15]):
xf = np.log2(np.array(x, dtype=dt))
yf = np.log2(np.array(y, dtype=dt))
zf = np.log2(np.array(z, dtype=dt))
assert_almost_equal(np.logaddexp2(xf, yf), zf, decimal=dec_)
def test_logaddexp2_range(self):
x = [1000000, -1000000, 1000200, -1000200]
y = [1000200, -1000200, 1000000, -1000000]
z = [1000200, -1000000, 1000200, -1000000]
for dt in ['f', 'd', 'g']:
logxf = np.array(x, dtype=dt)
logyf = np.array(y, dtype=dt)
logzf = np.array(z, dtype=dt)
assert_almost_equal(np.logaddexp2(logxf, logyf), logzf)
def test_inf(self):
inf = np.inf
x = [inf, -inf, inf, -inf, inf, 1, -inf, 1]
y = [inf, inf, -inf, -inf, 1, inf, 1, -inf]
z = [inf, inf, inf, -inf, inf, inf, 1, 1]
with np.errstate(invalid='raise'):
for dt in ['f', 'd', 'g']:
logxf = np.array(x, dtype=dt)
logyf = np.array(y, dtype=dt)
logzf = np.array(z, dtype=dt)
assert_equal(np.logaddexp2(logxf, logyf), logzf)
def test_nan(self):
assert_(np.isnan(np.logaddexp2(np.nan, np.inf)))
assert_(np.isnan(np.logaddexp2(np.inf, np.nan)))
assert_(np.isnan(np.logaddexp2(np.nan, 0)))
assert_(np.isnan(np.logaddexp2(0, np.nan)))
assert_(np.isnan(np.logaddexp2(np.nan, np.nan)))
def test_reduce(self):
assert_equal(np.logaddexp2.identity, -np.inf)
assert_equal(np.logaddexp2.reduce([]), -np.inf)
assert_equal(np.logaddexp2.reduce([-np.inf]), -np.inf)
assert_equal(np.logaddexp2.reduce([-np.inf, 0]), 0)
class TestLog:
def test_log_values(self):
x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
for dt in ['f', 'd', 'g']:
log2_ = 0.69314718055994530943
xf = np.array(x, dtype=dt)
yf = np.array(y, dtype=dt)*log2_
assert_almost_equal(np.log(xf), yf)
# test aliasing(issue #17761)
x = np.array([2, 0.937500, 3, 0.947500, 1.054697])
xf = np.log(x)
assert_almost_equal(np.log(x, out=x), xf)
# test log() of max for dtype does not raise
for dt in ['f', 'd', 'g']:
with np.errstate(all='raise'):
x = np.finfo(dt).max
np.log(x)
def test_log_strides(self):
np.random.seed(42)
strides = np.array([-4,-3,-2,-1,1,2,3,4])
sizes = np.arange(2,100)
for ii in sizes:
x_f64 = np.float64(np.random.uniform(low=0.01, high=100.0,size=ii))
x_special = x_f64.copy()
x_special[3:-1:4] = 1.0
y_true = np.log(x_f64)
y_special = np.log(x_special)
for jj in strides:
assert_array_almost_equal_nulp(np.log(x_f64[::jj]), y_true[::jj], nulp=2)
assert_array_almost_equal_nulp(np.log(x_special[::jj]), y_special[::jj], nulp=2)
class TestExp:
def test_exp_values(self):
x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
for dt in ['f', 'd', 'g']:
log2_ = 0.69314718055994530943
xf = np.array(x, dtype=dt)
yf = np.array(y, dtype=dt)*log2_
assert_almost_equal(np.exp(yf), xf)
def test_exp_strides(self):
np.random.seed(42)
strides = np.array([-4,-3,-2,-1,1,2,3,4])
sizes = np.arange(2,100)
for ii in sizes:
x_f64 = np.float64(np.random.uniform(low=0.01, high=709.1,size=ii))
y_true = np.exp(x_f64)
for jj in strides:
assert_array_almost_equal_nulp(np.exp(x_f64[::jj]), y_true[::jj], nulp=2)
class TestSpecialFloats:
def test_exp_values(self):
with np.errstate(under='raise', over='raise'):
x = [np.nan, np.nan, np.inf, 0.]
y = [np.nan, -np.nan, np.inf, -np.inf]
for dt in ['f', 'd', 'g']:
xf = np.array(x, dtype=dt)
yf = np.array(y, dtype=dt)
assert_equal(np.exp(yf), xf)
# See: https://github.com/numpy/numpy/issues/19192
@pytest.mark.xfail(
_glibc_older_than("2.17"),
reason="Older glibc versions may not raise appropriate FP exceptions"
)
def test_exp_exceptions(self):
with np.errstate(over='raise'):
assert_raises(FloatingPointError, np.exp, np.float32(100.))
assert_raises(FloatingPointError, np.exp, np.float32(1E19))
assert_raises(FloatingPointError, np.exp, np.float64(800.))
assert_raises(FloatingPointError, np.exp, np.float64(1E19))
with np.errstate(under='raise'):
assert_raises(FloatingPointError, np.exp, np.float32(-1000.))
assert_raises(FloatingPointError, np.exp, np.float32(-1E19))
assert_raises(FloatingPointError, np.exp, np.float64(-1000.))
assert_raises(FloatingPointError, np.exp, np.float64(-1E19))
def test_log_values(self):
with np.errstate(all='ignore'):
x = [np.nan, np.nan, np.inf, np.nan, -np.inf, np.nan]
y = [np.nan, -np.nan, np.inf, -np.inf, 0.0, -1.0]
y1p = [np.nan, -np.nan, np.inf, -np.inf, -1.0, -2.0]
for dt in ['f', 'd', 'g']:
xf = np.array(x, dtype=dt)
yf = np.array(y, dtype=dt)
yf1p = np.array(y1p, dtype=dt)
assert_equal(np.log(yf), xf)
assert_equal(np.log2(yf), xf)
assert_equal(np.log10(yf), xf)
assert_equal(np.log1p(yf1p), xf)
with np.errstate(divide='raise'):
for dt in ['f', 'd']:
assert_raises(FloatingPointError, np.log,
np.array(0.0, dtype=dt))
assert_raises(FloatingPointError, np.log2,
np.array(0.0, dtype=dt))
assert_raises(FloatingPointError, np.log10,
np.array(0.0, dtype=dt))
assert_raises(FloatingPointError, np.log1p,
np.array(-1.0, dtype=dt))
with np.errstate(invalid='raise'):
for dt in ['f', 'd']:
assert_raises(FloatingPointError, np.log,
np.array(-np.inf, dtype=dt))
assert_raises(FloatingPointError, np.log,
np.array(-1.0, dtype=dt))
assert_raises(FloatingPointError, np.log2,
np.array(-np.inf, dtype=dt))
assert_raises(FloatingPointError, np.log2,
np.array(-1.0, dtype=dt))
assert_raises(FloatingPointError, np.log10,
np.array(-np.inf, dtype=dt))
assert_raises(FloatingPointError, np.log10,
np.array(-1.0, dtype=dt))
assert_raises(FloatingPointError, np.log1p,
np.array(-np.inf, dtype=dt))
assert_raises(FloatingPointError, np.log1p,
np.array(-2.0, dtype=dt))
# See https://github.com/numpy/numpy/issues/18005
with assert_no_warnings():
a = np.array(1e9, dtype='float32')
np.log(a)
def test_sincos_values(self):
with np.errstate(all='ignore'):
x = [np.nan, np.nan, np.nan, np.nan]
y = [np.nan, -np.nan, np.inf, -np.inf]
for dt in ['f', 'd', 'g']:
xf = np.array(x, dtype=dt)
yf = np.array(y, dtype=dt)
assert_equal(np.sin(yf), xf)
assert_equal(np.cos(yf), xf)
with np.errstate(invalid='raise'):
assert_raises(FloatingPointError, np.sin, np.float32(-np.inf))
assert_raises(FloatingPointError, np.sin, np.float32(np.inf))
assert_raises(FloatingPointError, np.cos, np.float32(-np.inf))
assert_raises(FloatingPointError, np.cos, np.float32(np.inf))
@pytest.mark.parametrize('dt', ['f', 'd', 'g'])
def test_sqrt_values(self, dt):
with np.errstate(all='ignore'):
x = [np.nan, np.nan, np.inf, np.nan, 0.]
y = [np.nan, -np.nan, np.inf, -np.inf, 0.]
xf = np.array(x, dtype=dt)
yf = np.array(y, dtype=dt)
assert_equal(np.sqrt(yf), xf)
# with np.errstate(invalid='raise'):
# assert_raises(
# FloatingPointError, np.sqrt, np.array(-100., dtype=dt)
# )
def test_abs_values(self):
x = [np.nan, np.nan, np.inf, np.inf, 0., 0., 1.0, 1.0]
y = [np.nan, -np.nan, np.inf, -np.inf, 0., -0., -1.0, 1.0]
for dt in ['f', 'd', 'g']:
xf = np.array(x, dtype=dt)
yf = np.array(y, dtype=dt)
assert_equal(np.abs(yf), xf)
def test_square_values(self):
x = [np.nan, np.nan, np.inf, np.inf]
y = [np.nan, -np.nan, np.inf, -np.inf]
with np.errstate(all='ignore'):
for dt in ['f', 'd', 'g']:
xf = np.array(x, dtype=dt)
yf = np.array(y, dtype=dt)
assert_equal(np.square(yf), xf)
with np.errstate(over='raise'):
assert_raises(FloatingPointError, np.square,
np.array(1E32, dtype='f'))
assert_raises(FloatingPointError, np.square,
np.array(1E200, dtype='d'))
def test_reciprocal_values(self):
with np.errstate(all='ignore'):
x = [np.nan, np.nan, 0.0, -0.0, np.inf, -np.inf]
y = [np.nan, -np.nan, np.inf, -np.inf, 0., -0.]
for dt in ['f', 'd', 'g']:
xf = np.array(x, dtype=dt)
yf = np.array(y, dtype=dt)
assert_equal(np.reciprocal(yf), xf)
with np.errstate(divide='raise'):
for dt in ['f', 'd', 'g']:
assert_raises(FloatingPointError, np.reciprocal,
np.array(-0.0, dtype=dt))
def test_tan(self):
with np.errstate(all='ignore'):
in_ = [np.nan, -np.nan, 0.0, -0.0, np.inf, -np.inf]
out = [np.nan, np.nan, 0.0, -0.0, np.nan, np.nan]
for dt in ['f', 'd']:
in_arr = np.array(in_, dtype=dt)
out_arr = np.array(out, dtype=dt)
assert_equal(np.tan(in_arr), out_arr)
with np.errstate(invalid='raise'):
for dt in ['f', 'd']:
assert_raises(FloatingPointError, np.tan,
np.array(np.inf, dtype=dt))
assert_raises(FloatingPointError, np.tan,
np.array(-np.inf, dtype=dt))
def test_arcsincos(self):
with np.errstate(all='ignore'):
in_ = [np.nan, -np.nan, np.inf, -np.inf]
out = [np.nan, np.nan, np.nan, np.nan]
for dt in ['f', 'd']:
in_arr = np.array(in_, dtype=dt)
out_arr = np.array(out, dtype=dt)
assert_equal(np.arcsin(in_arr), out_arr)
assert_equal(np.arccos(in_arr), out_arr)
for callable in [np.arcsin, np.arccos]:
for value in [np.inf, -np.inf, 2.0, -2.0]:
for dt in ['f', 'd']:
with np.errstate(invalid='raise'):
assert_raises(FloatingPointError, callable,
np.array(value, dtype=dt))
def test_arctan(self):
with np.errstate(all='ignore'):
in_ = [np.nan, -np.nan]
out = [np.nan, np.nan]
for dt in ['f', 'd']:
in_arr = np.array(in_, dtype=dt)
out_arr = np.array(out, dtype=dt)
assert_equal(np.arctan(in_arr), out_arr)
def test_sinh(self):
in_ = [np.nan, -np.nan, np.inf, -np.inf]
out = [np.nan, np.nan, np.inf, -np.inf]
for dt in ['f', 'd']:
in_arr = np.array(in_, dtype=dt)
out_arr = np.array(out, dtype=dt)
assert_equal(np.sinh(in_arr), out_arr)
with np.errstate(over='raise'):
assert_raises(FloatingPointError, np.sinh,
np.array(120.0, dtype='f'))
assert_raises(FloatingPointError, np.sinh,
np.array(1200.0, dtype='d'))
def test_cosh(self):
in_ = [np.nan, -np.nan, np.inf, -np.inf]
out = [np.nan, np.nan, np.inf, np.inf]
for dt in ['f', 'd']:
in_arr = np.array(in_, dtype=dt)
out_arr = np.array(out, dtype=dt)
assert_equal(np.cosh(in_arr), out_arr)
with np.errstate(over='raise'):
assert_raises(FloatingPointError, np.cosh,
np.array(120.0, dtype='f'))
assert_raises(FloatingPointError, np.cosh,
np.array(1200.0, dtype='d'))
def test_tanh(self):
in_ = [np.nan, -np.nan, np.inf, -np.inf]
out = [np.nan, np.nan, 1.0, -1.0]
for dt in ['f', 'd']:
in_arr = np.array(in_, dtype=dt)
out_arr = np.array(out, dtype=dt)
assert_equal(np.tanh(in_arr), out_arr)
def test_arcsinh(self):
in_ = [np.nan, -np.nan, np.inf, -np.inf]
out = [np.nan, np.nan, np.inf, -np.inf]
for dt in ['f', 'd']:
in_arr = np.array(in_, dtype=dt)
out_arr = np.array(out, dtype=dt)
assert_equal(np.arcsinh(in_arr), out_arr)
def test_arccosh(self):
with np.errstate(all='ignore'):
in_ = [np.nan, -np.nan, np.inf, -np.inf, 1.0, 0.0]
out = [np.nan, np.nan, np.inf, np.nan, 0.0, np.nan]
for dt in ['f', 'd']:
in_arr = np.array(in_, dtype=dt)
out_arr = np.array(out, dtype=dt)
assert_equal(np.arccosh(in_arr), out_arr)
for value in [0.0, -np.inf]:
with np.errstate(invalid='raise'):
for dt in ['f', 'd']:
assert_raises(FloatingPointError, np.arccosh,
np.array(value, dtype=dt))
def test_arctanh(self):
with np.errstate(all='ignore'):
in_ = [np.nan, -np.nan, np.inf, -np.inf, 1.0, -1.0, 2.0]
out = [np.nan, np.nan, np.nan, np.nan, np.inf, -np.inf, np.nan]
for dt in ['f', 'd']:
in_arr = np.array(in_, dtype=dt)
out_arr = np.array(out, dtype=dt)
assert_equal(np.arctanh(in_arr), out_arr)
for value in [1.01, np.inf, -np.inf, 1.0, -1.0]:
with np.errstate(invalid='raise', divide='raise'):
for dt in ['f', 'd']:
assert_raises(FloatingPointError, np.arctanh,
np.array(value, dtype=dt))
# See: https://github.com/numpy/numpy/issues/20448
@pytest.mark.xfail(
_glibc_older_than("2.17"),
reason="Older glibc versions may not raise appropriate FP exceptions"
)
def test_exp2(self):
with np.errstate(all='ignore'):
in_ = [np.nan, -np.nan, np.inf, -np.inf]
out = [np.nan, np.nan, np.inf, 0.0]
for dt in ['f', 'd']:
in_arr = np.array(in_, dtype=dt)
out_arr = np.array(out, dtype=dt)
assert_equal(np.exp2(in_arr), out_arr)
for value in [2000.0, -2000.0]:
with np.errstate(over='raise', under='raise'):
for dt in ['f', 'd']:
assert_raises(FloatingPointError, np.exp2,
np.array(value, dtype=dt))
def test_expm1(self):
with np.errstate(all='ignore'):
in_ = [np.nan, -np.nan, np.inf, -np.inf]
out = [np.nan, np.nan, np.inf, -1.0]
for dt in ['f', 'd']:
in_arr = np.array(in_, dtype=dt)
out_arr = np.array(out, dtype=dt)
assert_equal(np.expm1(in_arr), out_arr)
for value in [200.0, 2000.0]:
with np.errstate(over='raise'):
assert_raises(FloatingPointError, np.expm1,
np.array(value, dtype='f'))
class TestFPClass:
@pytest.mark.parametrize("stride", [-4,-2,-1,1,2,4])
def test_fpclass(self, stride):
arr_f64 = np.array([np.nan, -np.nan, np.inf, -np.inf, -1.0, 1.0, -0.0, 0.0, 2.2251e-308, -2.2251e-308], dtype='d')
arr_f32 = np.array([np.nan, -np.nan, np.inf, -np.inf, -1.0, 1.0, -0.0, 0.0, 1.4013e-045, -1.4013e-045], dtype='f')
nan = np.array([True, True, False, False, False, False, False, False, False, False])
inf = np.array([False, False, True, True, False, False, False, False, False, False])
sign = np.array([False, True, False, True, True, False, True, False, False, True])
finite = np.array([False, False, False, False, True, True, True, True, True, True])
assert_equal(np.isnan(arr_f32[::stride]), nan[::stride])
assert_equal(np.isnan(arr_f64[::stride]), nan[::stride])
assert_equal(np.isinf(arr_f32[::stride]), inf[::stride])
assert_equal(np.isinf(arr_f64[::stride]), inf[::stride])
assert_equal(np.signbit(arr_f32[::stride]), sign[::stride])
assert_equal(np.signbit(arr_f64[::stride]), sign[::stride])
assert_equal(np.isfinite(arr_f32[::stride]), finite[::stride])
assert_equal(np.isfinite(arr_f64[::stride]), finite[::stride])
class TestLDExp:
@pytest.mark.parametrize("stride", [-4,-2,-1,1,2,4])
@pytest.mark.parametrize("dtype", ['f', 'd'])
def test_ldexp(self, dtype, stride):
mant = np.array([0.125, 0.25, 0.5, 1., 1., 2., 4., 8.], dtype=dtype)
exp = np.array([3, 2, 1, 0, 0, -1, -2, -3], dtype='i')
out = np.zeros(8, dtype=dtype)
assert_equal(np.ldexp(mant[::stride], exp[::stride], out=out[::stride]), np.ones(8, dtype=dtype)[::stride])
assert_equal(out[::stride], np.ones(8, dtype=dtype)[::stride])
class TestFRExp:
@pytest.mark.parametrize("stride", [-4,-2,-1,1,2,4])
@pytest.mark.parametrize("dtype", ['f', 'd'])
@pytest.mark.skipif(not sys.platform.startswith('linux'),
reason="np.frexp gives different answers for NAN/INF on windows and linux")
def test_frexp(self, dtype, stride):
arr = np.array([np.nan, np.nan, np.inf, -np.inf, 0.0, -0.0, 1.0, -1.0], dtype=dtype)
mant_true = np.array([np.nan, np.nan, np.inf, -np.inf, 0.0, -0.0, 0.5, -0.5], dtype=dtype)
exp_true = np.array([0, 0, 0, 0, 0, 0, 1, 1], dtype='i')
out_mant = np.ones(8, dtype=dtype)
out_exp = 2*np.ones(8, dtype='i')
mant, exp = np.frexp(arr[::stride], out=(out_mant[::stride], out_exp[::stride]))
assert_equal(mant_true[::stride], mant)
assert_equal(exp_true[::stride], exp)
assert_equal(out_mant[::stride], mant_true[::stride])
assert_equal(out_exp[::stride], exp_true[::stride])
# func : [maxulperror, low, high]
avx_ufuncs = {'sqrt' :[1, 0., 100.],
'absolute' :[0, -100., 100.],
'reciprocal' :[1, 1., 100.],
'square' :[1, -100., 100.],
'rint' :[0, -100., 100.],
'floor' :[0, -100., 100.],
'ceil' :[0, -100., 100.],
'trunc' :[0, -100., 100.]}
class TestAVXUfuncs:
def test_avx_based_ufunc(self):
strides = np.array([-4,-3,-2,-1,1,2,3,4])
np.random.seed(42)
for func, prop in avx_ufuncs.items():
maxulperr = prop[0]
minval = prop[1]
maxval = prop[2]
# various array sizes to ensure masking in AVX is tested
for size in range(1,32):
myfunc = getattr(np, func)
x_f32 = np.float32(np.random.uniform(low=minval, high=maxval,
size=size))
x_f64 = np.float64(x_f32)
x_f128 = np.longdouble(x_f32)
y_true128 = myfunc(x_f128)
if maxulperr == 0:
assert_equal(myfunc(x_f32), np.float32(y_true128))
assert_equal(myfunc(x_f64), np.float64(y_true128))
else:
assert_array_max_ulp(myfunc(x_f32), np.float32(y_true128),
maxulp=maxulperr)
assert_array_max_ulp(myfunc(x_f64), np.float64(y_true128),
maxulp=maxulperr)
# various strides to test gather instruction
if size > 1:
y_true32 = myfunc(x_f32)
y_true64 = myfunc(x_f64)
for jj in strides:
assert_equal(myfunc(x_f64[::jj]), y_true64[::jj])
assert_equal(myfunc(x_f32[::jj]), y_true32[::jj])
class TestAVXFloat32Transcendental:
def test_exp_float32(self):
np.random.seed(42)
x_f32 = np.float32(np.random.uniform(low=0.0,high=88.1,size=1000000))
x_f64 = np.float64(x_f32)
assert_array_max_ulp(np.exp(x_f32), np.float32(np.exp(x_f64)), maxulp=3)
def test_log_float32(self):
np.random.seed(42)
x_f32 = np.float32(np.random.uniform(low=0.0,high=1000,size=1000000))
x_f64 = np.float64(x_f32)
assert_array_max_ulp(np.log(x_f32), np.float32(np.log(x_f64)), maxulp=4)
def test_sincos_float32(self):
np.random.seed(42)
N = 1000000
M = np.int_(N/20)
index = np.random.randint(low=0, high=N, size=M)
x_f32 = np.float32(np.random.uniform(low=-100.,high=100.,size=N))
if not _glibc_older_than("2.17"):
# test coverage for elements > 117435.992f for which glibc is used
# this is known to be problematic on old glibc, so skip it there
x_f32[index] = np.float32(10E+10*np.random.rand(M))
x_f64 = np.float64(x_f32)
assert_array_max_ulp(np.sin(x_f32), np.float32(np.sin(x_f64)), maxulp=2)
assert_array_max_ulp(np.cos(x_f32), np.float32(np.cos(x_f64)), maxulp=2)
# test aliasing(issue #17761)
tx_f32 = x_f32.copy()
assert_array_max_ulp(np.sin(x_f32, out=x_f32), np.float32(np.sin(x_f64)), maxulp=2)
assert_array_max_ulp(np.cos(tx_f32, out=tx_f32), np.float32(np.cos(x_f64)), maxulp=2)
def test_strided_float32(self):
np.random.seed(42)
strides = np.array([-4,-3,-2,-1,1,2,3,4])
sizes = np.arange(2,100)
for ii in sizes:
x_f32 = np.float32(np.random.uniform(low=0.01,high=88.1,size=ii))
x_f32_large = x_f32.copy()
x_f32_large[3:-1:4] = 120000.0
exp_true = np.exp(x_f32)
log_true = np.log(x_f32)
sin_true = np.sin(x_f32_large)
cos_true = np.cos(x_f32_large)
for jj in strides:
assert_array_almost_equal_nulp(np.exp(x_f32[::jj]), exp_true[::jj], nulp=2)
assert_array_almost_equal_nulp(np.log(x_f32[::jj]), log_true[::jj], nulp=2)
assert_array_almost_equal_nulp(np.sin(x_f32_large[::jj]), sin_true[::jj], nulp=2)
assert_array_almost_equal_nulp(np.cos(x_f32_large[::jj]), cos_true[::jj], nulp=2)
class TestLogAddExp(_FilterInvalids):
def test_logaddexp_values(self):
x = [1, 2, 3, 4, 5]
y = [5, 4, 3, 2, 1]
z = [6, 6, 6, 6, 6]
for dt, dec_ in zip(['f', 'd', 'g'], [6, 15, 15]):
xf = np.log(np.array(x, dtype=dt))
yf = np.log(np.array(y, dtype=dt))
zf = np.log(np.array(z, dtype=dt))
assert_almost_equal(np.logaddexp(xf, yf), zf, decimal=dec_)
def test_logaddexp_range(self):
x = [1000000, -1000000, 1000200, -1000200]
y = [1000200, -1000200, 1000000, -1000000]
z = [1000200, -1000000, 1000200, -1000000]
for dt in ['f', 'd', 'g']:
logxf = np.array(x, dtype=dt)
logyf = np.array(y, dtype=dt)
logzf = np.array(z, dtype=dt)
assert_almost_equal(np.logaddexp(logxf, logyf), logzf)
def test_inf(self):
inf = np.inf
x = [inf, -inf, inf, -inf, inf, 1, -inf, 1]
y = [inf, inf, -inf, -inf, 1, inf, 1, -inf]
z = [inf, inf, inf, -inf, inf, inf, 1, 1]
with np.errstate(invalid='raise'):
for dt in ['f', 'd', 'g']:
logxf = np.array(x, dtype=dt)
logyf = np.array(y, dtype=dt)
logzf = np.array(z, dtype=dt)
assert_equal(np.logaddexp(logxf, logyf), logzf)
def test_nan(self):
assert_(np.isnan(np.logaddexp(np.nan, np.inf)))
assert_(np.isnan(np.logaddexp(np.inf, np.nan)))
assert_(np.isnan(np.logaddexp(np.nan, 0)))
assert_(np.isnan(np.logaddexp(0, np.nan)))
assert_(np.isnan(np.logaddexp(np.nan, np.nan)))
def test_reduce(self):
assert_equal(np.logaddexp.identity, -np.inf)
assert_equal(np.logaddexp.reduce([]), -np.inf)
class TestLog1p:
def test_log1p(self):
assert_almost_equal(ncu.log1p(0.2), ncu.log(1.2))
assert_almost_equal(ncu.log1p(1e-6), ncu.log(1+1e-6))
def test_special(self):
with np.errstate(invalid="ignore", divide="ignore"):
assert_equal(ncu.log1p(np.nan), np.nan)
assert_equal(ncu.log1p(np.inf), np.inf)
assert_equal(ncu.log1p(-1.), -np.inf)
assert_equal(ncu.log1p(-2.), np.nan)
assert_equal(ncu.log1p(-np.inf), np.nan)
class TestExpm1:
def test_expm1(self):
assert_almost_equal(ncu.expm1(0.2), ncu.exp(0.2)-1)
assert_almost_equal(ncu.expm1(1e-6), ncu.exp(1e-6)-1)
def test_special(self):
assert_equal(ncu.expm1(np.inf), np.inf)
assert_equal(ncu.expm1(0.), 0.)
assert_equal(ncu.expm1(-0.), -0.)
assert_equal(ncu.expm1(np.inf), np.inf)
assert_equal(ncu.expm1(-np.inf), -1.)
def test_complex(self):
x = np.asarray(1e-12)
assert_allclose(x, ncu.expm1(x))
x = x.astype(np.complex128)
assert_allclose(x, ncu.expm1(x))
class TestHypot:
def test_simple(self):
assert_almost_equal(ncu.hypot(1, 1), ncu.sqrt(2))
assert_almost_equal(ncu.hypot(0, 0), 0)
def test_reduce(self):
assert_almost_equal(ncu.hypot.reduce([3.0, 4.0]), 5.0)
assert_almost_equal(ncu.hypot.reduce([3.0, 4.0, 0]), 5.0)
assert_almost_equal(ncu.hypot.reduce([9.0, 12.0, 20.0]), 25.0)
assert_equal(ncu.hypot.reduce([]), 0.0)
def assert_hypot_isnan(x, y):
with np.errstate(invalid='ignore'):
assert_(np.isnan(ncu.hypot(x, y)),
"hypot(%s, %s) is %s, not nan" % (x, y, ncu.hypot(x, y)))
def assert_hypot_isinf(x, y):
with np.errstate(invalid='ignore'):
assert_(np.isinf(ncu.hypot(x, y)),
"hypot(%s, %s) is %s, not inf" % (x, y, ncu.hypot(x, y)))
class TestHypotSpecialValues:
def test_nan_outputs(self):
assert_hypot_isnan(np.nan, np.nan)
assert_hypot_isnan(np.nan, 1)
def test_nan_outputs2(self):
assert_hypot_isinf(np.nan, np.inf)
assert_hypot_isinf(np.inf, np.nan)
assert_hypot_isinf(np.inf, 0)
assert_hypot_isinf(0, np.inf)
assert_hypot_isinf(np.inf, np.inf)
assert_hypot_isinf(np.inf, 23.0)
def test_no_fpe(self):
assert_no_warnings(ncu.hypot, np.inf, 0)
def assert_arctan2_isnan(x, y):
assert_(np.isnan(ncu.arctan2(x, y)), "arctan(%s, %s) is %s, not nan" % (x, y, ncu.arctan2(x, y)))
def assert_arctan2_ispinf(x, y):
assert_((np.isinf(ncu.arctan2(x, y)) and ncu.arctan2(x, y) > 0), "arctan(%s, %s) is %s, not +inf" % (x, y, ncu.arctan2(x, y)))
def assert_arctan2_isninf(x, y):
assert_((np.isinf(ncu.arctan2(x, y)) and ncu.arctan2(x, y) < 0), "arctan(%s, %s) is %s, not -inf" % (x, y, ncu.arctan2(x, y)))
def assert_arctan2_ispzero(x, y):
assert_((ncu.arctan2(x, y) == 0 and not np.signbit(ncu.arctan2(x, y))), "arctan(%s, %s) is %s, not +0" % (x, y, ncu.arctan2(x, y)))
def assert_arctan2_isnzero(x, y):
assert_((ncu.arctan2(x, y) == 0 and np.signbit(ncu.arctan2(x, y))), "arctan(%s, %s) is %s, not -0" % (x, y, ncu.arctan2(x, y)))
class TestArctan2SpecialValues:
def test_one_one(self):
# atan2(1, 1) returns pi/4.
assert_almost_equal(ncu.arctan2(1, 1), 0.25 * np.pi)
assert_almost_equal(ncu.arctan2(-1, 1), -0.25 * np.pi)
assert_almost_equal(ncu.arctan2(1, -1), 0.75 * np.pi)
def test_zero_nzero(self):
# atan2(+-0, -0) returns +-pi.
assert_almost_equal(ncu.arctan2(np.PZERO, np.NZERO), np.pi)
assert_almost_equal(ncu.arctan2(np.NZERO, np.NZERO), -np.pi)
def test_zero_pzero(self):
# atan2(+-0, +0) returns +-0.
assert_arctan2_ispzero(np.PZERO, np.PZERO)
assert_arctan2_isnzero(np.NZERO, np.PZERO)
def test_zero_negative(self):
# atan2(+-0, x) returns +-pi for x < 0.
assert_almost_equal(ncu.arctan2(np.PZERO, -1), np.pi)
assert_almost_equal(ncu.arctan2(np.NZERO, -1), -np.pi)
def test_zero_positive(self):
# atan2(+-0, x) returns +-0 for x > 0.
assert_arctan2_ispzero(np.PZERO, 1)
assert_arctan2_isnzero(np.NZERO, 1)
def test_positive_zero(self):
# atan2(y, +-0) returns +pi/2 for y > 0.
assert_almost_equal(ncu.arctan2(1, np.PZERO), 0.5 * np.pi)
assert_almost_equal(ncu.arctan2(1, np.NZERO), 0.5 * np.pi)
def test_negative_zero(self):
# atan2(y, +-0) returns -pi/2 for y < 0.
assert_almost_equal(ncu.arctan2(-1, np.PZERO), -0.5 * np.pi)
assert_almost_equal(ncu.arctan2(-1, np.NZERO), -0.5 * np.pi)
def test_any_ninf(self):
# atan2(+-y, -infinity) returns +-pi for finite y > 0.
assert_almost_equal(ncu.arctan2(1, np.NINF), np.pi)
assert_almost_equal(ncu.arctan2(-1, np.NINF), -np.pi)
def test_any_pinf(self):
# atan2(+-y, +infinity) returns +-0 for finite y > 0.
assert_arctan2_ispzero(1, np.inf)
assert_arctan2_isnzero(-1, np.inf)
def test_inf_any(self):
# atan2(+-infinity, x) returns +-pi/2 for finite x.
assert_almost_equal(ncu.arctan2( np.inf, 1), 0.5 * np.pi)
assert_almost_equal(ncu.arctan2(-np.inf, 1), -0.5 * np.pi)
def test_inf_ninf(self):
# atan2(+-infinity, -infinity) returns +-3*pi/4.
assert_almost_equal(ncu.arctan2( np.inf, -np.inf), 0.75 * np.pi)
assert_almost_equal(ncu.arctan2(-np.inf, -np.inf), -0.75 * np.pi)
def test_inf_pinf(self):
# atan2(+-infinity, +infinity) returns +-pi/4.
assert_almost_equal(ncu.arctan2( np.inf, np.inf), 0.25 * np.pi)
assert_almost_equal(ncu.arctan2(-np.inf, np.inf), -0.25 * np.pi)
def test_nan_any(self):
# atan2(nan, x) returns nan for any x, including inf
assert_arctan2_isnan(np.nan, np.inf)
assert_arctan2_isnan(np.inf, np.nan)
assert_arctan2_isnan(np.nan, np.nan)
class TestLdexp:
def _check_ldexp(self, tp):
assert_almost_equal(ncu.ldexp(np.array(2., np.float32),
np.array(3, tp)), 16.)
assert_almost_equal(ncu.ldexp(np.array(2., np.float64),
np.array(3, tp)), 16.)
assert_almost_equal(ncu.ldexp(np.array(2., np.longdouble),
np.array(3, tp)), 16.)
def test_ldexp(self):
# The default Python int type should work
assert_almost_equal(ncu.ldexp(2., 3), 16.)
# The following int types should all be accepted
self._check_ldexp(np.int8)
self._check_ldexp(np.int16)
self._check_ldexp(np.int32)
self._check_ldexp('i')
self._check_ldexp('l')
def test_ldexp_overflow(self):
# silence warning emitted on overflow
with np.errstate(over="ignore"):
imax = np.iinfo(np.dtype('l')).max
imin = np.iinfo(np.dtype('l')).min
assert_equal(ncu.ldexp(2., imax), np.inf)
assert_equal(ncu.ldexp(2., imin), 0)
class TestMaximum(_FilterInvalids):
def test_reduce(self):
dflt = np.typecodes['AllFloat']
dint = np.typecodes['AllInteger']
seq1 = np.arange(11)
seq2 = seq1[::-1]
func = np.maximum.reduce
for dt in dint:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 10)
assert_equal(func(tmp2), 10)
for dt in dflt:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 10)
assert_equal(func(tmp2), 10)
tmp1[::2] = np.nan
tmp2[::2] = np.nan
assert_equal(func(tmp1), np.nan)
assert_equal(func(tmp2), np.nan)
def test_reduce_complex(self):
assert_equal(np.maximum.reduce([1, 2j]), 1)
assert_equal(np.maximum.reduce([1+3j, 2j]), 1+3j)
def test_float_nans(self):
nan = np.nan
arg1 = np.array([0, nan, nan])
arg2 = np.array([nan, 0, nan])
out = np.array([nan, nan, nan])
assert_equal(np.maximum(arg1, arg2), out)
def test_object_nans(self):
# Multiple checks to give this a chance to
# fail if cmp is used instead of rich compare.
# Failure cannot be guaranteed.
for i in range(1):
x = np.array(float('nan'), object)
y = 1.0
z = np.array(float('nan'), object)
assert_(np.maximum(x, y) == 1.0)
assert_(np.maximum(z, y) == 1.0)
def test_complex_nans(self):
nan = np.nan
for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)]:
arg1 = np.array([0, cnan, cnan], dtype=complex)
arg2 = np.array([cnan, 0, cnan], dtype=complex)
out = np.array([nan, nan, nan], dtype=complex)
assert_equal(np.maximum(arg1, arg2), out)
def test_object_array(self):
arg1 = np.arange(5, dtype=object)
arg2 = arg1 + 1
assert_equal(np.maximum(arg1, arg2), arg2)
def test_strided_array(self):
arr1 = np.array([-4.0, 1.0, 10.0, 0.0, np.nan, -np.nan, np.inf, -np.inf])
arr2 = np.array([-2.0,-1.0, np.nan, 1.0, 0.0, np.nan, 1.0, -3.0])
maxtrue = np.array([-2.0, 1.0, np.nan, 1.0, np.nan, np.nan, np.inf, -3.0])
out = np.ones(8)
out_maxtrue = np.array([-2.0, 1.0, 1.0, 10.0, 1.0, 1.0, np.nan, 1.0])
assert_equal(np.maximum(arr1,arr2), maxtrue)
assert_equal(np.maximum(arr1[::2],arr2[::2]), maxtrue[::2])
assert_equal(np.maximum(arr1[:4:], arr2[::2]), np.array([-2.0, np.nan, 10.0, 1.0]))
assert_equal(np.maximum(arr1[::3], arr2[:3:]), np.array([-2.0, 0.0, np.nan]))
assert_equal(np.maximum(arr1[:6:2], arr2[::3], out=out[::3]), np.array([-2.0, 10., np.nan]))
assert_equal(out, out_maxtrue)
def test_precision(self):
dtypes = [np.float16, np.float32, np.float64, np.longdouble]
for dt in dtypes:
dtmin = np.finfo(dt).min
dtmax = np.finfo(dt).max
d1 = dt(0.1)
d1_next = np.nextafter(d1, np.inf)
test_cases = [
# v1 v2 expected
(dtmin, -np.inf, dtmin),
(dtmax, -np.inf, dtmax),
(d1, d1_next, d1_next),
(dtmax, np.nan, np.nan),
]
for v1, v2, expected in test_cases:
assert_equal(np.maximum([v1], [v2]), [expected])
assert_equal(np.maximum.reduce([v1, v2]), expected)
class TestMinimum(_FilterInvalids):
def test_reduce(self):
dflt = np.typecodes['AllFloat']
dint = np.typecodes['AllInteger']
seq1 = np.arange(11)
seq2 = seq1[::-1]
func = np.minimum.reduce
for dt in dint:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 0)
assert_equal(func(tmp2), 0)
for dt in dflt:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 0)
assert_equal(func(tmp2), 0)
tmp1[::2] = np.nan
tmp2[::2] = np.nan
assert_equal(func(tmp1), np.nan)
assert_equal(func(tmp2), np.nan)
def test_reduce_complex(self):
assert_equal(np.minimum.reduce([1, 2j]), 2j)
assert_equal(np.minimum.reduce([1+3j, 2j]), 2j)
def test_float_nans(self):
nan = np.nan
arg1 = np.array([0, nan, nan])
arg2 = np.array([nan, 0, nan])
out = np.array([nan, nan, nan])
assert_equal(np.minimum(arg1, arg2), out)
def test_object_nans(self):
# Multiple checks to give this a chance to
# fail if cmp is used instead of rich compare.
# Failure cannot be guaranteed.
for i in range(1):
x = np.array(float('nan'), object)
y = 1.0
z = np.array(float('nan'), object)
assert_(np.minimum(x, y) == 1.0)
assert_(np.minimum(z, y) == 1.0)
def test_complex_nans(self):
nan = np.nan
for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)]:
arg1 = np.array([0, cnan, cnan], dtype=complex)
arg2 = np.array([cnan, 0, cnan], dtype=complex)
out = np.array([nan, nan, nan], dtype=complex)
assert_equal(np.minimum(arg1, arg2), out)
def test_object_array(self):
arg1 = np.arange(5, dtype=object)
arg2 = arg1 + 1
assert_equal(np.minimum(arg1, arg2), arg1)
def test_strided_array(self):
arr1 = np.array([-4.0, 1.0, 10.0, 0.0, np.nan, -np.nan, np.inf, -np.inf])
arr2 = np.array([-2.0,-1.0, np.nan, 1.0, 0.0, np.nan, 1.0, -3.0])
mintrue = np.array([-4.0, -1.0, np.nan, 0.0, np.nan, np.nan, 1.0, -np.inf])
out = np.ones(8)
out_mintrue = np.array([-4.0, 1.0, 1.0, 1.0, 1.0, 1.0, np.nan, 1.0])
assert_equal(np.minimum(arr1,arr2), mintrue)
assert_equal(np.minimum(arr1[::2],arr2[::2]), mintrue[::2])
assert_equal(np.minimum(arr1[:4:], arr2[::2]), np.array([-4.0, np.nan, 0.0, 0.0]))
assert_equal(np.minimum(arr1[::3], arr2[:3:]), np.array([-4.0, -1.0, np.nan]))
assert_equal(np.minimum(arr1[:6:2], arr2[::3], out=out[::3]), np.array([-4.0, 1.0, np.nan]))
assert_equal(out, out_mintrue)
def test_precision(self):
dtypes = [np.float16, np.float32, np.float64, np.longdouble]
for dt in dtypes:
dtmin = np.finfo(dt).min
dtmax = np.finfo(dt).max
d1 = dt(0.1)
d1_next = np.nextafter(d1, np.inf)
test_cases = [
# v1 v2 expected
(dtmin, np.inf, dtmin),
(dtmax, np.inf, dtmax),
(d1, d1_next, d1),
(dtmin, np.nan, np.nan),
]
for v1, v2, expected in test_cases:
assert_equal(np.minimum([v1], [v2]), [expected])
assert_equal(np.minimum.reduce([v1, v2]), expected)
class TestFmax(_FilterInvalids):
def test_reduce(self):
dflt = np.typecodes['AllFloat']
dint = np.typecodes['AllInteger']
seq1 = np.arange(11)
seq2 = seq1[::-1]
func = np.fmax.reduce
for dt in dint:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 10)
assert_equal(func(tmp2), 10)
for dt in dflt:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 10)
assert_equal(func(tmp2), 10)
tmp1[::2] = np.nan
tmp2[::2] = np.nan
assert_equal(func(tmp1), 9)
assert_equal(func(tmp2), 9)
def test_reduce_complex(self):
assert_equal(np.fmax.reduce([1, 2j]), 1)
assert_equal(np.fmax.reduce([1+3j, 2j]), 1+3j)
def test_float_nans(self):
nan = np.nan
arg1 = np.array([0, nan, nan])
arg2 = np.array([nan, 0, nan])
out = np.array([0, 0, nan])
assert_equal(np.fmax(arg1, arg2), out)
def test_complex_nans(self):
nan = np.nan
for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)]:
arg1 = np.array([0, cnan, cnan], dtype=complex)
arg2 = np.array([cnan, 0, cnan], dtype=complex)
out = np.array([0, 0, nan], dtype=complex)
assert_equal(np.fmax(arg1, arg2), out)
def test_precision(self):
dtypes = [np.float16, np.float32, np.float64, np.longdouble]
for dt in dtypes:
dtmin = np.finfo(dt).min
dtmax = np.finfo(dt).max
d1 = dt(0.1)
d1_next = np.nextafter(d1, np.inf)
test_cases = [
# v1 v2 expected
(dtmin, -np.inf, dtmin),
(dtmax, -np.inf, dtmax),
(d1, d1_next, d1_next),
(dtmax, np.nan, dtmax),
]
for v1, v2, expected in test_cases:
assert_equal(np.fmax([v1], [v2]), [expected])
assert_equal(np.fmax.reduce([v1, v2]), expected)
class TestFmin(_FilterInvalids):
def test_reduce(self):
dflt = np.typecodes['AllFloat']
dint = np.typecodes['AllInteger']
seq1 = np.arange(11)
seq2 = seq1[::-1]
func = np.fmin.reduce
for dt in dint:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 0)
assert_equal(func(tmp2), 0)
for dt in dflt:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 0)
assert_equal(func(tmp2), 0)
tmp1[::2] = np.nan
tmp2[::2] = np.nan
assert_equal(func(tmp1), 1)
assert_equal(func(tmp2), 1)
def test_reduce_complex(self):
assert_equal(np.fmin.reduce([1, 2j]), 2j)
assert_equal(np.fmin.reduce([1+3j, 2j]), 2j)
def test_float_nans(self):
nan = np.nan
arg1 = np.array([0, nan, nan])
arg2 = np.array([nan, 0, nan])
out = np.array([0, 0, nan])
assert_equal(np.fmin(arg1, arg2), out)
def test_complex_nans(self):
nan = np.nan
for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)]:
arg1 = np.array([0, cnan, cnan], dtype=complex)
arg2 = np.array([cnan, 0, cnan], dtype=complex)
out = np.array([0, 0, nan], dtype=complex)
assert_equal(np.fmin(arg1, arg2), out)
def test_precision(self):
dtypes = [np.float16, np.float32, np.float64, np.longdouble]
for dt in dtypes:
dtmin = np.finfo(dt).min
dtmax = np.finfo(dt).max
d1 = dt(0.1)
d1_next = np.nextafter(d1, np.inf)
test_cases = [
# v1 v2 expected
(dtmin, np.inf, dtmin),
(dtmax, np.inf, dtmax),
(d1, d1_next, d1),
(dtmin, np.nan, dtmin),
]
for v1, v2, expected in test_cases:
assert_equal(np.fmin([v1], [v2]), [expected])
assert_equal(np.fmin.reduce([v1, v2]), expected)
class TestBool:
def test_exceptions(self):
a = np.ones(1, dtype=np.bool_)
assert_raises(TypeError, np.negative, a)
assert_raises(TypeError, np.positive, a)
assert_raises(TypeError, np.subtract, a, a)
def test_truth_table_logical(self):
# 2, 3 and 4 serves as true values
input1 = [0, 0, 3, 2]
input2 = [0, 4, 0, 2]
typecodes = (np.typecodes['AllFloat']
+ np.typecodes['AllInteger']
+ '?') # boolean
for dtype in map(np.dtype, typecodes):
arg1 = np.asarray(input1, dtype=dtype)
arg2 = np.asarray(input2, dtype=dtype)
# OR
out = [False, True, True, True]
for func in (np.logical_or, np.maximum):
assert_equal(func(arg1, arg2).astype(bool), out)
# AND
out = [False, False, False, True]
for func in (np.logical_and, np.minimum):
assert_equal(func(arg1, arg2).astype(bool), out)
# XOR
out = [False, True, True, False]
for func in (np.logical_xor, np.not_equal):
assert_equal(func(arg1, arg2).astype(bool), out)
def test_truth_table_bitwise(self):
arg1 = [False, False, True, True]
arg2 = [False, True, False, True]
out = [False, True, True, True]
assert_equal(np.bitwise_or(arg1, arg2), out)
out = [False, False, False, True]
assert_equal(np.bitwise_and(arg1, arg2), out)
out = [False, True, True, False]
assert_equal(np.bitwise_xor(arg1, arg2), out)
def test_reduce(self):
none = np.array([0, 0, 0, 0], bool)
some = np.array([1, 0, 1, 1], bool)
every = np.array([1, 1, 1, 1], bool)
empty = np.array([], bool)
arrs = [none, some, every, empty]
for arr in arrs:
assert_equal(np.logical_and.reduce(arr), all(arr))
for arr in arrs:
assert_equal(np.logical_or.reduce(arr), any(arr))
for arr in arrs:
assert_equal(np.logical_xor.reduce(arr), arr.sum() % 2 == 1)
class TestBitwiseUFuncs:
bitwise_types = [np.dtype(c) for c in '?' + 'bBhHiIlLqQ' + 'O']
def test_values(self):
for dt in self.bitwise_types:
zeros = np.array([0], dtype=dt)
ones = np.array([-1], dtype=dt)
msg = "dt = '%s'" % dt.char
assert_equal(np.bitwise_not(zeros), ones, err_msg=msg)
assert_equal(np.bitwise_not(ones), zeros, err_msg=msg)
assert_equal(np.bitwise_or(zeros, zeros), zeros, err_msg=msg)
assert_equal(np.bitwise_or(zeros, ones), ones, err_msg=msg)
assert_equal(np.bitwise_or(ones, zeros), ones, err_msg=msg)
assert_equal(np.bitwise_or(ones, ones), ones, err_msg=msg)
assert_equal(np.bitwise_xor(zeros, zeros), zeros, err_msg=msg)
assert_equal(np.bitwise_xor(zeros, ones), ones, err_msg=msg)
assert_equal(np.bitwise_xor(ones, zeros), ones, err_msg=msg)
assert_equal(np.bitwise_xor(ones, ones), zeros, err_msg=msg)
assert_equal(np.bitwise_and(zeros, zeros), zeros, err_msg=msg)
assert_equal(np.bitwise_and(zeros, ones), zeros, err_msg=msg)
assert_equal(np.bitwise_and(ones, zeros), zeros, err_msg=msg)
assert_equal(np.bitwise_and(ones, ones), ones, err_msg=msg)
def test_types(self):
for dt in self.bitwise_types:
zeros = np.array([0], dtype=dt)
ones = np.array([-1], dtype=dt)
msg = "dt = '%s'" % dt.char
assert_(np.bitwise_not(zeros).dtype == dt, msg)
assert_(np.bitwise_or(zeros, zeros).dtype == dt, msg)
assert_(np.bitwise_xor(zeros, zeros).dtype == dt, msg)
assert_(np.bitwise_and(zeros, zeros).dtype == dt, msg)
def test_identity(self):
assert_(np.bitwise_or.identity == 0, 'bitwise_or')
assert_(np.bitwise_xor.identity == 0, 'bitwise_xor')
assert_(np.bitwise_and.identity == -1, 'bitwise_and')
def test_reduction(self):
binary_funcs = (np.bitwise_or, np.bitwise_xor, np.bitwise_and)
for dt in self.bitwise_types:
zeros = np.array([0], dtype=dt)
ones = np.array([-1], dtype=dt)
for f in binary_funcs:
msg = "dt: '%s', f: '%s'" % (dt, f)
assert_equal(f.reduce(zeros), zeros, err_msg=msg)
assert_equal(f.reduce(ones), ones, err_msg=msg)
# Test empty reduction, no object dtype
for dt in self.bitwise_types[:-1]:
# No object array types
empty = np.array([], dtype=dt)
for f in binary_funcs:
msg = "dt: '%s', f: '%s'" % (dt, f)
tgt = np.array(f.identity, dtype=dt)
res = f.reduce(empty)
assert_equal(res, tgt, err_msg=msg)
assert_(res.dtype == tgt.dtype, msg)
# Empty object arrays use the identity. Note that the types may
# differ, the actual type used is determined by the assign_identity
# function and is not the same as the type returned by the identity
# method.
for f in binary_funcs:
msg = "dt: '%s'" % (f,)
empty = np.array([], dtype=object)
tgt = f.identity
res = f.reduce(empty)
assert_equal(res, tgt, err_msg=msg)
# Non-empty object arrays do not use the identity
for f in binary_funcs:
msg = "dt: '%s'" % (f,)
btype = np.array([True], dtype=object)
assert_(type(f.reduce(btype)) is bool, msg)
class TestInt:
def test_logical_not(self):
x = np.ones(10, dtype=np.int16)
o = np.ones(10 * 2, dtype=bool)
tgt = o.copy()
tgt[::2] = False
os = o[::2]
assert_array_equal(np.logical_not(x, out=os), False)
assert_array_equal(o, tgt)
class TestFloatingPoint:
def test_floating_point(self):
assert_equal(ncu.FLOATING_POINT_SUPPORT, 1)
class TestDegrees:
def test_degrees(self):
assert_almost_equal(ncu.degrees(np.pi), 180.0)
assert_almost_equal(ncu.degrees(-0.5*np.pi), -90.0)
class TestRadians:
def test_radians(self):
assert_almost_equal(ncu.radians(180.0), np.pi)
assert_almost_equal(ncu.radians(-90.0), -0.5*np.pi)
class TestHeavside:
def test_heaviside(self):
x = np.array([[-30.0, -0.1, 0.0, 0.2], [7.5, np.nan, np.inf, -np.inf]])
expectedhalf = np.array([[0.0, 0.0, 0.5, 1.0], [1.0, np.nan, 1.0, 0.0]])
expected1 = expectedhalf.copy()
expected1[0, 2] = 1
h = ncu.heaviside(x, 0.5)
assert_equal(h, expectedhalf)
h = ncu.heaviside(x, 1.0)
assert_equal(h, expected1)
x = x.astype(np.float32)
h = ncu.heaviside(x, np.float32(0.5))
assert_equal(h, expectedhalf.astype(np.float32))
h = ncu.heaviside(x, np.float32(1.0))
assert_equal(h, expected1.astype(np.float32))
class TestSign:
def test_sign(self):
a = np.array([np.inf, -np.inf, np.nan, 0.0, 3.0, -3.0])
out = np.zeros(a.shape)
tgt = np.array([1., -1., np.nan, 0.0, 1.0, -1.0])
with np.errstate(invalid='ignore'):
res = ncu.sign(a)
assert_equal(res, tgt)
res = ncu.sign(a, out)
assert_equal(res, tgt)
assert_equal(out, tgt)
def test_sign_dtype_object(self):
# In reference to github issue #6229
foo = np.array([-.1, 0, .1])
a = np.sign(foo.astype(object))
b = np.sign(foo)
assert_array_equal(a, b)
def test_sign_dtype_nan_object(self):
# In reference to github issue #6229
def test_nan():
foo = np.array([np.nan])
# FIXME: a not used
a = np.sign(foo.astype(object))
assert_raises(TypeError, test_nan)
class TestMinMax:
def test_minmax_blocked(self):
# simd tests on max/min, test all alignments, slow but important
# for 2 * vz + 2 * (vs - 1) + 1 (unrolled once)
for dt, sz in [(np.float32, 15), (np.float64, 7)]:
for out, inp, msg in _gen_alignment_data(dtype=dt, type='unary',
max_size=sz):
for i in range(inp.size):
inp[:] = np.arange(inp.size, dtype=dt)
inp[i] = np.nan
emsg = lambda: '%r\n%s' % (inp, msg)
with suppress_warnings() as sup:
sup.filter(RuntimeWarning,
"invalid value encountered in reduce")
assert_(np.isnan(inp.max()), msg=emsg)
assert_(np.isnan(inp.min()), msg=emsg)
inp[i] = 1e10
assert_equal(inp.max(), 1e10, err_msg=msg)
inp[i] = -1e10
assert_equal(inp.min(), -1e10, err_msg=msg)
def test_lower_align(self):
# check data that is not aligned to element size
# i.e doubles are aligned to 4 bytes on i386
d = np.zeros(23 * 8, dtype=np.int8)[4:-4].view(np.float64)
assert_equal(d.max(), d[0])
assert_equal(d.min(), d[0])
def test_reduce_reorder(self):
# gh 10370, 11029 Some compilers reorder the call to npy_getfloatstatus
# and put it before the call to an intrisic function that causes
# invalid status to be set. Also make sure warnings are not emitted
for n in (2, 4, 8, 16, 32):
for dt in (np.float32, np.float16, np.complex64):
for r in np.diagflat(np.array([np.nan] * n, dtype=dt)):
assert_equal(np.min(r), np.nan)
def test_minimize_no_warns(self):
a = np.minimum(np.nan, 1)
assert_equal(a, np.nan)
class TestAbsoluteNegative:
def test_abs_neg_blocked(self):
# simd tests on abs, test all alignments for vz + 2 * (vs - 1) + 1
for dt, sz in [(np.float32, 11), (np.float64, 5)]:
for out, inp, msg in _gen_alignment_data(dtype=dt, type='unary',
max_size=sz):
tgt = [ncu.absolute(i) for i in inp]
np.absolute(inp, out=out)
assert_equal(out, tgt, err_msg=msg)
assert_((out >= 0).all())
tgt = [-1*(i) for i in inp]
np.negative(inp, out=out)
assert_equal(out, tgt, err_msg=msg)
for v in [np.nan, -np.inf, np.inf]:
for i in range(inp.size):
d = np.arange(inp.size, dtype=dt)
inp[:] = -d
inp[i] = v
d[i] = -v if v == -np.inf else v
assert_array_equal(np.abs(inp), d, err_msg=msg)
np.abs(inp, out=out)
assert_array_equal(out, d, err_msg=msg)
assert_array_equal(-inp, -1*inp, err_msg=msg)
d = -1 * inp
np.negative(inp, out=out)
assert_array_equal(out, d, err_msg=msg)
def test_lower_align(self):
# check data that is not aligned to element size
# i.e doubles are aligned to 4 bytes on i386
d = np.zeros(23 * 8, dtype=np.int8)[4:-4].view(np.float64)
assert_equal(np.abs(d), d)
assert_equal(np.negative(d), -d)
np.negative(d, out=d)
np.negative(np.ones_like(d), out=d)
np.abs(d, out=d)
np.abs(np.ones_like(d), out=d)
class TestPositive:
def test_valid(self):
valid_dtypes = [int, float, complex, object]
for dtype in valid_dtypes:
x = np.arange(5, dtype=dtype)
result = np.positive(x)
assert_equal(x, result, err_msg=str(dtype))
def test_invalid(self):
with assert_raises(TypeError):
np.positive(True)
with assert_raises(TypeError):
np.positive(np.datetime64('2000-01-01'))
with assert_raises(TypeError):
np.positive(np.array(['foo'], dtype=str))
with assert_raises(TypeError):
np.positive(np.array(['bar'], dtype=object))
class TestSpecialMethods:
def test_wrap(self):
class with_wrap:
def __array__(self):
return np.zeros(1)
def __array_wrap__(self, arr, context):
r = with_wrap()
r.arr = arr
r.context = context
return r
a = with_wrap()
x = ncu.minimum(a, a)
assert_equal(x.arr, np.zeros(1))
func, args, i = x.context
assert_(func is ncu.minimum)
assert_equal(len(args), 2)
assert_equal(args[0], a)
assert_equal(args[1], a)
assert_equal(i, 0)
def test_wrap_and_prepare_out(self):
# Calling convention for out should not affect how special methods are
# called
class StoreArrayPrepareWrap(np.ndarray):
_wrap_args = None
_prepare_args = None
def __new__(cls):
return np.zeros(()).view(cls)
def __array_wrap__(self, obj, context):
self._wrap_args = context[1]
return obj
def __array_prepare__(self, obj, context):
self._prepare_args = context[1]
return obj
@property
def args(self):
# We need to ensure these are fetched at the same time, before
# any other ufuncs are called by the assertions
return (self._prepare_args, self._wrap_args)
def __repr__(self):
return "a" # for short test output
def do_test(f_call, f_expected):
a = StoreArrayPrepareWrap()
f_call(a)
p, w = a.args
expected = f_expected(a)
try:
assert_equal(p, expected)
assert_equal(w, expected)
except AssertionError as e:
# assert_equal produces truly useless error messages
raise AssertionError("\n".join([
"Bad arguments passed in ufunc call",
" expected: {}".format(expected),
" __array_prepare__ got: {}".format(p),
" __array_wrap__ got: {}".format(w)
]))
# method not on the out argument
do_test(lambda a: np.add(a, 0), lambda a: (a, 0))
do_test(lambda a: np.add(a, 0, None), lambda a: (a, 0))
do_test(lambda a: np.add(a, 0, out=None), lambda a: (a, 0))
do_test(lambda a: np.add(a, 0, out=(None,)), lambda a: (a, 0))
# method on the out argument
do_test(lambda a: np.add(0, 0, a), lambda a: (0, 0, a))
do_test(lambda a: np.add(0, 0, out=a), lambda a: (0, 0, a))
do_test(lambda a: np.add(0, 0, out=(a,)), lambda a: (0, 0, a))
# Also check the where mask handling:
do_test(lambda a: np.add(a, 0, where=False), lambda a: (a, 0))
do_test(lambda a: np.add(0, 0, a, where=False), lambda a: (0, 0, a))
def test_wrap_with_iterable(self):
# test fix for bug #1026:
class with_wrap(np.ndarray):
__array_priority__ = 10
def __new__(cls):
return np.asarray(1).view(cls).copy()
def __array_wrap__(self, arr, context):
return arr.view(type(self))
a = with_wrap()
x = ncu.multiply(a, (1, 2, 3))
assert_(isinstance(x, with_wrap))
assert_array_equal(x, np.array((1, 2, 3)))
def test_priority_with_scalar(self):
# test fix for bug #826:
class A(np.ndarray):
__array_priority__ = 10
def __new__(cls):
return np.asarray(1.0, 'float64').view(cls).copy()
a = A()
x = np.float64(1)*a
assert_(isinstance(x, A))
assert_array_equal(x, np.array(1))
def test_old_wrap(self):
class with_wrap:
def __array__(self):
return np.zeros(1)
def __array_wrap__(self, arr):
r = with_wrap()
r.arr = arr
return r
a = with_wrap()
x = ncu.minimum(a, a)
assert_equal(x.arr, np.zeros(1))
def test_priority(self):
class A:
def __array__(self):
return np.zeros(1)
def __array_wrap__(self, arr, context):
r = type(self)()
r.arr = arr
r.context = context
return r
class B(A):
__array_priority__ = 20.
class C(A):
__array_priority__ = 40.
x = np.zeros(1)
a = A()
b = B()
c = C()
f = ncu.minimum
assert_(type(f(x, x)) is np.ndarray)
assert_(type(f(x, a)) is A)
assert_(type(f(x, b)) is B)
assert_(type(f(x, c)) is C)
assert_(type(f(a, x)) is A)
assert_(type(f(b, x)) is B)
assert_(type(f(c, x)) is C)
assert_(type(f(a, a)) is A)
assert_(type(f(a, b)) is B)
assert_(type(f(b, a)) is B)
assert_(type(f(b, b)) is B)
assert_(type(f(b, c)) is C)
assert_(type(f(c, b)) is C)
assert_(type(f(c, c)) is C)
assert_(type(ncu.exp(a) is A))
assert_(type(ncu.exp(b) is B))
assert_(type(ncu.exp(c) is C))
def test_failing_wrap(self):
class A:
def __array__(self):
return np.zeros(2)
def __array_wrap__(self, arr, context):
raise RuntimeError
a = A()
assert_raises(RuntimeError, ncu.maximum, a, a)
assert_raises(RuntimeError, ncu.maximum.reduce, a)
def test_failing_out_wrap(self):
singleton = np.array([1.0])
class Ok(np.ndarray):
def __array_wrap__(self, obj):
return singleton
class Bad(np.ndarray):
def __array_wrap__(self, obj):
raise RuntimeError
ok = np.empty(1).view(Ok)
bad = np.empty(1).view(Bad)
# double-free (segfault) of "ok" if "bad" raises an exception
for i in range(10):
assert_raises(RuntimeError, ncu.frexp, 1, ok, bad)
def test_none_wrap(self):
# Tests that issue #8507 is resolved. Previously, this would segfault
class A:
def __array__(self):
return np.zeros(1)
def __array_wrap__(self, arr, context=None):
return None
a = A()
assert_equal(ncu.maximum(a, a), None)
def test_default_prepare(self):
class with_wrap:
__array_priority__ = 10
def __array__(self):
return np.zeros(1)
def __array_wrap__(self, arr, context):
return arr
a = with_wrap()
x = ncu.minimum(a, a)
assert_equal(x, np.zeros(1))
assert_equal(type(x), np.ndarray)
@pytest.mark.parametrize("use_where", [True, False])
def test_prepare(self, use_where):
class with_prepare(np.ndarray):
__array_priority__ = 10
def __array_prepare__(self, arr, context):
# make sure we can return a new
return np.array(arr).view(type=with_prepare)
a = np.array(1).view(type=with_prepare)
if use_where:
x = np.add(a, a, where=np.array(True))
else:
x = np.add(a, a)
assert_equal(x, np.array(2))
assert_equal(type(x), with_prepare)
@pytest.mark.parametrize("use_where", [True, False])
def test_prepare_out(self, use_where):
class with_prepare(np.ndarray):
__array_priority__ = 10
def __array_prepare__(self, arr, context):
return np.array(arr).view(type=with_prepare)
a = np.array([1]).view(type=with_prepare)
if use_where:
x = np.add(a, a, a, where=[True])
else:
x = np.add(a, a, a)
# Returned array is new, because of the strange
# __array_prepare__ above
assert_(not np.shares_memory(x, a))
assert_equal(x, np.array([2]))
assert_equal(type(x), with_prepare)
def test_failing_prepare(self):
class A:
def __array__(self):
return np.zeros(1)
def __array_prepare__(self, arr, context=None):
raise RuntimeError
a = A()
assert_raises(RuntimeError, ncu.maximum, a, a)
assert_raises(RuntimeError, ncu.maximum, a, a, where=False)
def test_array_too_many_args(self):
class A:
def __array__(self, dtype, context):
return np.zeros(1)
a = A()
assert_raises_regex(TypeError, '2 required positional', np.sum, a)
def test_ufunc_override(self):
# check override works even with instance with high priority.
class A:
def __array_ufunc__(self, func, method, *inputs, **kwargs):
return self, func, method, inputs, kwargs
class MyNDArray(np.ndarray):
__array_priority__ = 100
a = A()
b = np.array([1]).view(MyNDArray)
res0 = np.multiply(a, b)
res1 = np.multiply(b, b, out=a)
# self
assert_equal(res0[0], a)
assert_equal(res1[0], a)
assert_equal(res0[1], np.multiply)
assert_equal(res1[1], np.multiply)
assert_equal(res0[2], '__call__')
assert_equal(res1[2], '__call__')
assert_equal(res0[3], (a, b))
assert_equal(res1[3], (b, b))
assert_equal(res0[4], {})
assert_equal(res1[4], {'out': (a,)})
def test_ufunc_override_mro(self):
# Some multi arg functions for testing.
def tres_mul(a, b, c):
return a * b * c
def quatro_mul(a, b, c, d):
return a * b * c * d
# Make these into ufuncs.
three_mul_ufunc = np.frompyfunc(tres_mul, 3, 1)
four_mul_ufunc = np.frompyfunc(quatro_mul, 4, 1)
class A:
def __array_ufunc__(self, func, method, *inputs, **kwargs):
return "A"
class ASub(A):
def __array_ufunc__(self, func, method, *inputs, **kwargs):
return "ASub"
class B:
def __array_ufunc__(self, func, method, *inputs, **kwargs):
return "B"
class C:
def __init__(self):
self.count = 0
def __array_ufunc__(self, func, method, *inputs, **kwargs):
self.count += 1
return NotImplemented
class CSub(C):
def __array_ufunc__(self, func, method, *inputs, **kwargs):
self.count += 1
return NotImplemented
a = A()
a_sub = ASub()
b = B()
c = C()
# Standard
res = np.multiply(a, a_sub)
assert_equal(res, "ASub")
res = np.multiply(a_sub, b)
assert_equal(res, "ASub")
# With 1 NotImplemented
res = np.multiply(c, a)
assert_equal(res, "A")
assert_equal(c.count, 1)
# Check our counter works, so we can trust tests below.
res = np.multiply(c, a)
assert_equal(c.count, 2)
# Both NotImplemented.
c = C()
c_sub = CSub()
assert_raises(TypeError, np.multiply, c, c_sub)
assert_equal(c.count, 1)
assert_equal(c_sub.count, 1)
c.count = c_sub.count = 0
assert_raises(TypeError, np.multiply, c_sub, c)
assert_equal(c.count, 1)
assert_equal(c_sub.count, 1)
c.count = 0
assert_raises(TypeError, np.multiply, c, c)
assert_equal(c.count, 1)
c.count = 0
assert_raises(TypeError, np.multiply, 2, c)
assert_equal(c.count, 1)
# Ternary testing.
assert_equal(three_mul_ufunc(a, 1, 2), "A")
assert_equal(three_mul_ufunc(1, a, 2), "A")
assert_equal(three_mul_ufunc(1, 2, a), "A")
assert_equal(three_mul_ufunc(a, a, 6), "A")
assert_equal(three_mul_ufunc(a, 2, a), "A")
assert_equal(three_mul_ufunc(a, 2, b), "A")
assert_equal(three_mul_ufunc(a, 2, a_sub), "ASub")
assert_equal(three_mul_ufunc(a, a_sub, 3), "ASub")
c.count = 0
assert_equal(three_mul_ufunc(c, a_sub, 3), "ASub")
assert_equal(c.count, 1)
c.count = 0
assert_equal(three_mul_ufunc(1, a_sub, c), "ASub")
assert_equal(c.count, 0)
c.count = 0
assert_equal(three_mul_ufunc(a, b, c), "A")
assert_equal(c.count, 0)
c_sub.count = 0
assert_equal(three_mul_ufunc(a, b, c_sub), "A")
assert_equal(c_sub.count, 0)
assert_equal(three_mul_ufunc(1, 2, b), "B")
assert_raises(TypeError, three_mul_ufunc, 1, 2, c)
assert_raises(TypeError, three_mul_ufunc, c_sub, 2, c)
assert_raises(TypeError, three_mul_ufunc, c_sub, 2, 3)
# Quaternary testing.
assert_equal(four_mul_ufunc(a, 1, 2, 3), "A")
assert_equal(four_mul_ufunc(1, a, 2, 3), "A")
assert_equal(four_mul_ufunc(1, 1, a, 3), "A")
assert_equal(four_mul_ufunc(1, 1, 2, a), "A")
assert_equal(four_mul_ufunc(a, b, 2, 3), "A")
assert_equal(four_mul_ufunc(1, a, 2, b), "A")
assert_equal(four_mul_ufunc(b, 1, a, 3), "B")
assert_equal(four_mul_ufunc(a_sub, 1, 2, a), "ASub")
assert_equal(four_mul_ufunc(a, 1, 2, a_sub), "ASub")
c = C()
c_sub = CSub()
assert_raises(TypeError, four_mul_ufunc, 1, 2, 3, c)
assert_equal(c.count, 1)
c.count = 0
assert_raises(TypeError, four_mul_ufunc, 1, 2, c_sub, c)
assert_equal(c_sub.count, 1)
assert_equal(c.count, 1)
c2 = C()
c.count = c_sub.count = 0
assert_raises(TypeError, four_mul_ufunc, 1, c, c_sub, c2)
assert_equal(c_sub.count, 1)
assert_equal(c.count, 1)
assert_equal(c2.count, 0)
c.count = c2.count = c_sub.count = 0
assert_raises(TypeError, four_mul_ufunc, c2, c, c_sub, c)
assert_equal(c_sub.count, 1)
assert_equal(c.count, 0)
assert_equal(c2.count, 1)
def test_ufunc_override_methods(self):
class A:
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
return self, ufunc, method, inputs, kwargs
# __call__
a = A()
with assert_raises(TypeError):
np.multiply.__call__(1, a, foo='bar', answer=42)
res = np.multiply.__call__(1, a, subok='bar', where=42)
assert_equal(res[0], a)
assert_equal(res[1], np.multiply)
assert_equal(res[2], '__call__')
assert_equal(res[3], (1, a))
assert_equal(res[4], {'subok': 'bar', 'where': 42})
# __call__, wrong args
assert_raises(TypeError, np.multiply, a)
assert_raises(TypeError, np.multiply, a, a, a, a)
assert_raises(TypeError, np.multiply, a, a, sig='a', signature='a')
assert_raises(TypeError, ncu_tests.inner1d, a, a, axis=0, axes=[0, 0])
# reduce, positional args
res = np.multiply.reduce(a, 'axis0', 'dtype0', 'out0', 'keep0')
assert_equal(res[0], a)
assert_equal(res[1], np.multiply)
assert_equal(res[2], 'reduce')
assert_equal(res[3], (a,))
assert_equal(res[4], {'dtype':'dtype0',
'out': ('out0',),
'keepdims': 'keep0',
'axis': 'axis0'})
# reduce, kwargs
res = np.multiply.reduce(a, axis='axis0', dtype='dtype0', out='out0',
keepdims='keep0', initial='init0',
where='where0')
assert_equal(res[0], a)
assert_equal(res[1], np.multiply)
assert_equal(res[2], 'reduce')
assert_equal(res[3], (a,))
assert_equal(res[4], {'dtype':'dtype0',
'out': ('out0',),
'keepdims': 'keep0',
'axis': 'axis0',
'initial': 'init0',
'where': 'where0'})
# reduce, output equal to None removed, but not other explicit ones,
# even if they are at their default value.
res = np.multiply.reduce(a, 0, None, None, False)
assert_equal(res[4], {'axis': 0, 'dtype': None, 'keepdims': False})
res = np.multiply.reduce(a, out=None, axis=0, keepdims=True)
assert_equal(res[4], {'axis': 0, 'keepdims': True})
res = np.multiply.reduce(a, None, out=(None,), dtype=None)
assert_equal(res[4], {'axis': None, 'dtype': None})
res = np.multiply.reduce(a, 0, None, None, False, 2, True)
assert_equal(res[4], {'axis': 0, 'dtype': None, 'keepdims': False,
'initial': 2, 'where': True})
# np._NoValue ignored for initial
res = np.multiply.reduce(a, 0, None, None, False,
np._NoValue, True)
assert_equal(res[4], {'axis': 0, 'dtype': None, 'keepdims': False,
'where': True})
# None kept for initial, True for where.
res = np.multiply.reduce(a, 0, None, None, False, None, True)
assert_equal(res[4], {'axis': 0, 'dtype': None, 'keepdims': False,
'initial': None, 'where': True})
# reduce, wrong args
assert_raises(ValueError, np.multiply.reduce, a, out=())
assert_raises(ValueError, np.multiply.reduce, a, out=('out0', 'out1'))
assert_raises(TypeError, np.multiply.reduce, a, 'axis0', axis='axis0')
# accumulate, pos args
res = np.multiply.accumulate(a, 'axis0', 'dtype0', 'out0')
assert_equal(res[0], a)
assert_equal(res[1], np.multiply)
assert_equal(res[2], 'accumulate')
assert_equal(res[3], (a,))
assert_equal(res[4], {'dtype':'dtype0',
'out': ('out0',),
'axis': 'axis0'})
# accumulate, kwargs
res = np.multiply.accumulate(a, axis='axis0', dtype='dtype0',
out='out0')
assert_equal(res[0], a)
assert_equal(res[1], np.multiply)
assert_equal(res[2], 'accumulate')
assert_equal(res[3], (a,))
assert_equal(res[4], {'dtype':'dtype0',
'out': ('out0',),
'axis': 'axis0'})
# accumulate, output equal to None removed.
res = np.multiply.accumulate(a, 0, None, None)
assert_equal(res[4], {'axis': 0, 'dtype': None})
res = np.multiply.accumulate(a, out=None, axis=0, dtype='dtype1')
assert_equal(res[4], {'axis': 0, 'dtype': 'dtype1'})
res = np.multiply.accumulate(a, None, out=(None,), dtype=None)
assert_equal(res[4], {'axis': None, 'dtype': None})
# accumulate, wrong args
assert_raises(ValueError, np.multiply.accumulate, a, out=())
assert_raises(ValueError, np.multiply.accumulate, a,
out=('out0', 'out1'))
assert_raises(TypeError, np.multiply.accumulate, a,
'axis0', axis='axis0')
# reduceat, pos args
res = np.multiply.reduceat(a, [4, 2], 'axis0', 'dtype0', 'out0')
assert_equal(res[0], a)
assert_equal(res[1], np.multiply)
assert_equal(res[2], 'reduceat')
assert_equal(res[3], (a, [4, 2]))
assert_equal(res[4], {'dtype':'dtype0',
'out': ('out0',),
'axis': 'axis0'})
# reduceat, kwargs
res = np.multiply.reduceat(a, [4, 2], axis='axis0', dtype='dtype0',
out='out0')
assert_equal(res[0], a)
assert_equal(res[1], np.multiply)
assert_equal(res[2], 'reduceat')
assert_equal(res[3], (a, [4, 2]))
assert_equal(res[4], {'dtype':'dtype0',
'out': ('out0',),
'axis': 'axis0'})
# reduceat, output equal to None removed.
res = np.multiply.reduceat(a, [4, 2], 0, None, None)
assert_equal(res[4], {'axis': 0, 'dtype': None})
res = np.multiply.reduceat(a, [4, 2], axis=None, out=None, dtype='dt')
assert_equal(res[4], {'axis': None, 'dtype': 'dt'})
res = np.multiply.reduceat(a, [4, 2], None, None, out=(None,))
assert_equal(res[4], {'axis': None, 'dtype': None})
# reduceat, wrong args
assert_raises(ValueError, np.multiply.reduce, a, [4, 2], out=())
assert_raises(ValueError, np.multiply.reduce, a, [4, 2],
out=('out0', 'out1'))
assert_raises(TypeError, np.multiply.reduce, a, [4, 2],
'axis0', axis='axis0')
# outer
res = np.multiply.outer(a, 42)
assert_equal(res[0], a)
assert_equal(res[1], np.multiply)
assert_equal(res[2], 'outer')
assert_equal(res[3], (a, 42))
assert_equal(res[4], {})
# outer, wrong args
assert_raises(TypeError, np.multiply.outer, a)
assert_raises(TypeError, np.multiply.outer, a, a, a, a)
assert_raises(TypeError, np.multiply.outer, a, a, sig='a', signature='a')
# at
res = np.multiply.at(a, [4, 2], 'b0')
assert_equal(res[0], a)
assert_equal(res[1], np.multiply)
assert_equal(res[2], 'at')
assert_equal(res[3], (a, [4, 2], 'b0'))
# at, wrong args
assert_raises(TypeError, np.multiply.at, a)
assert_raises(TypeError, np.multiply.at, a, a, a, a)
def test_ufunc_override_out(self):
class A:
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
return kwargs
class B:
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
return kwargs
a = A()
b = B()
res0 = np.multiply(a, b, 'out_arg')
res1 = np.multiply(a, b, out='out_arg')
res2 = np.multiply(2, b, 'out_arg')
res3 = np.multiply(3, b, out='out_arg')
res4 = np.multiply(a, 4, 'out_arg')
res5 = np.multiply(a, 5, out='out_arg')
assert_equal(res0['out'][0], 'out_arg')
assert_equal(res1['out'][0], 'out_arg')
assert_equal(res2['out'][0], 'out_arg')
assert_equal(res3['out'][0], 'out_arg')
assert_equal(res4['out'][0], 'out_arg')
assert_equal(res5['out'][0], 'out_arg')
# ufuncs with multiple output modf and frexp.
res6 = np.modf(a, 'out0', 'out1')
res7 = np.frexp(a, 'out0', 'out1')
assert_equal(res6['out'][0], 'out0')
assert_equal(res6['out'][1], 'out1')
assert_equal(res7['out'][0], 'out0')
assert_equal(res7['out'][1], 'out1')
# While we're at it, check that default output is never passed on.
assert_(np.sin(a, None) == {})
assert_(np.sin(a, out=None) == {})
assert_(np.sin(a, out=(None,)) == {})
assert_(np.modf(a, None) == {})
assert_(np.modf(a, None, None) == {})
assert_(np.modf(a, out=(None, None)) == {})
with assert_raises(TypeError):
# Out argument must be tuple, since there are multiple outputs.
np.modf(a, out=None)
# don't give positional and output argument, or too many arguments.
# wrong number of arguments in the tuple is an error too.
assert_raises(TypeError, np.multiply, a, b, 'one', out='two')
assert_raises(TypeError, np.multiply, a, b, 'one', 'two')
assert_raises(ValueError, np.multiply, a, b, out=('one', 'two'))
assert_raises(TypeError, np.multiply, a, out=())
assert_raises(TypeError, np.modf, a, 'one', out=('two', 'three'))
assert_raises(TypeError, np.modf, a, 'one', 'two', 'three')
assert_raises(ValueError, np.modf, a, out=('one', 'two', 'three'))
assert_raises(ValueError, np.modf, a, out=('one',))
def test_ufunc_override_exception(self):
class A:
def __array_ufunc__(self, *a, **kwargs):
raise ValueError("oops")
a = A()
assert_raises(ValueError, np.negative, 1, out=a)
assert_raises(ValueError, np.negative, a)
assert_raises(ValueError, np.divide, 1., a)
def test_ufunc_override_not_implemented(self):
class A:
def __array_ufunc__(self, *args, **kwargs):
return NotImplemented
msg = ("operand type(s) all returned NotImplemented from "
"__array_ufunc__(<ufunc 'negative'>, '__call__', <*>): 'A'")
with assert_raises_regex(TypeError, fnmatch.translate(msg)):
np.negative(A())
msg = ("operand type(s) all returned NotImplemented from "
"__array_ufunc__(<ufunc 'add'>, '__call__', <*>, <object *>, "
"out=(1,)): 'A', 'object', 'int'")
with assert_raises_regex(TypeError, fnmatch.translate(msg)):
np.add(A(), object(), out=1)
def test_ufunc_override_disabled(self):
class OptOut:
__array_ufunc__ = None
opt_out = OptOut()
# ufuncs always raise
msg = "operand 'OptOut' does not support ufuncs"
with assert_raises_regex(TypeError, msg):
np.add(opt_out, 1)
with assert_raises_regex(TypeError, msg):
np.add(1, opt_out)
with assert_raises_regex(TypeError, msg):
np.negative(opt_out)
# opt-outs still hold even when other arguments have pathological
# __array_ufunc__ implementations
class GreedyArray:
def __array_ufunc__(self, *args, **kwargs):
return self
greedy = GreedyArray()
assert_(np.negative(greedy) is greedy)
with assert_raises_regex(TypeError, msg):
np.add(greedy, opt_out)
with assert_raises_regex(TypeError, msg):
np.add(greedy, 1, out=opt_out)
def test_gufunc_override(self):
# gufunc are just ufunc instances, but follow a different path,
# so check __array_ufunc__ overrides them properly.
class A:
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
return self, ufunc, method, inputs, kwargs
inner1d = ncu_tests.inner1d
a = A()
res = inner1d(a, a)
assert_equal(res[0], a)
assert_equal(res[1], inner1d)
assert_equal(res[2], '__call__')
assert_equal(res[3], (a, a))
assert_equal(res[4], {})
res = inner1d(1, 1, out=a)
assert_equal(res[0], a)
assert_equal(res[1], inner1d)
assert_equal(res[2], '__call__')
assert_equal(res[3], (1, 1))
assert_equal(res[4], {'out': (a,)})
# wrong number of arguments in the tuple is an error too.
assert_raises(TypeError, inner1d, a, out='two')
assert_raises(TypeError, inner1d, a, a, 'one', out='two')
assert_raises(TypeError, inner1d, a, a, 'one', 'two')
assert_raises(ValueError, inner1d, a, a, out=('one', 'two'))
assert_raises(ValueError, inner1d, a, a, out=())
def test_ufunc_override_with_super(self):
# NOTE: this class is used in doc/source/user/basics.subclassing.rst
# if you make any changes here, do update it there too.
class A(np.ndarray):
def __array_ufunc__(self, ufunc, method, *inputs, out=None, **kwargs):
args = []
in_no = []
for i, input_ in enumerate(inputs):
if isinstance(input_, A):
in_no.append(i)
args.append(input_.view(np.ndarray))
else:
args.append(input_)
outputs = out
out_no = []
if outputs:
out_args = []
for j, output in enumerate(outputs):
if isinstance(output, A):
out_no.append(j)
out_args.append(output.view(np.ndarray))
else:
out_args.append(output)
kwargs['out'] = tuple(out_args)
else:
outputs = (None,) * ufunc.nout
info = {}
if in_no:
info['inputs'] = in_no
if out_no:
info['outputs'] = out_no
results = super().__array_ufunc__(ufunc, method,
*args, **kwargs)
if results is NotImplemented:
return NotImplemented
if method == 'at':
if isinstance(inputs[0], A):
inputs[0].info = info
return
if ufunc.nout == 1:
results = (results,)
results = tuple((np.asarray(result).view(A)
if output is None else output)
for result, output in zip(results, outputs))
if results and isinstance(results[0], A):
results[0].info = info
return results[0] if len(results) == 1 else results
class B:
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
if any(isinstance(input_, A) for input_ in inputs):
return "A!"
else:
return NotImplemented
d = np.arange(5.)
# 1 input, 1 output
a = np.arange(5.).view(A)
b = np.sin(a)
check = np.sin(d)
assert_(np.all(check == b))
assert_equal(b.info, {'inputs': [0]})
b = np.sin(d, out=(a,))
assert_(np.all(check == b))
assert_equal(b.info, {'outputs': [0]})
assert_(b is a)
a = np.arange(5.).view(A)
b = np.sin(a, out=a)
assert_(np.all(check == b))
assert_equal(b.info, {'inputs': [0], 'outputs': [0]})
# 1 input, 2 outputs
a = np.arange(5.).view(A)
b1, b2 = np.modf(a)
assert_equal(b1.info, {'inputs': [0]})
b1, b2 = np.modf(d, out=(None, a))
assert_(b2 is a)
assert_equal(b1.info, {'outputs': [1]})
a = np.arange(5.).view(A)
b = np.arange(5.).view(A)
c1, c2 = np.modf(a, out=(a, b))
assert_(c1 is a)
assert_(c2 is b)
assert_equal(c1.info, {'inputs': [0], 'outputs': [0, 1]})
# 2 input, 1 output
a = np.arange(5.).view(A)
b = np.arange(5.).view(A)
c = np.add(a, b, out=a)
assert_(c is a)
assert_equal(c.info, {'inputs': [0, 1], 'outputs': [0]})
# some tests with a non-ndarray subclass
a = np.arange(5.)
b = B()
assert_(a.__array_ufunc__(np.add, '__call__', a, b) is NotImplemented)
assert_(b.__array_ufunc__(np.add, '__call__', a, b) is NotImplemented)
assert_raises(TypeError, np.add, a, b)
a = a.view(A)
assert_(a.__array_ufunc__(np.add, '__call__', a, b) is NotImplemented)
assert_(b.__array_ufunc__(np.add, '__call__', a, b) == "A!")
assert_(np.add(a, b) == "A!")
# regression check for gh-9102 -- tests ufunc.reduce implicitly.
d = np.array([[1, 2, 3], [1, 2, 3]])
a = d.view(A)
c = a.any()
check = d.any()
assert_equal(c, check)
assert_(c.info, {'inputs': [0]})
c = a.max()
check = d.max()
assert_equal(c, check)
assert_(c.info, {'inputs': [0]})
b = np.array(0).view(A)
c = a.max(out=b)
assert_equal(c, check)
assert_(c is b)
assert_(c.info, {'inputs': [0], 'outputs': [0]})
check = a.max(axis=0)
b = np.zeros_like(check).view(A)
c = a.max(axis=0, out=b)
assert_equal(c, check)
assert_(c is b)
assert_(c.info, {'inputs': [0], 'outputs': [0]})
# simple explicit tests of reduce, accumulate, reduceat
check = np.add.reduce(d, axis=1)
c = np.add.reduce(a, axis=1)
assert_equal(c, check)
assert_(c.info, {'inputs': [0]})
b = np.zeros_like(c)
c = np.add.reduce(a, 1, None, b)
assert_equal(c, check)
assert_(c is b)
assert_(c.info, {'inputs': [0], 'outputs': [0]})
check = np.add.accumulate(d, axis=0)
c = np.add.accumulate(a, axis=0)
assert_equal(c, check)
assert_(c.info, {'inputs': [0]})
b = np.zeros_like(c)
c = np.add.accumulate(a, 0, None, b)
assert_equal(c, check)
assert_(c is b)
assert_(c.info, {'inputs': [0], 'outputs': [0]})
indices = [0, 2, 1]
check = np.add.reduceat(d, indices, axis=1)
c = np.add.reduceat(a, indices, axis=1)
assert_equal(c, check)
assert_(c.info, {'inputs': [0]})
b = np.zeros_like(c)
c = np.add.reduceat(a, indices, 1, None, b)
assert_equal(c, check)
assert_(c is b)
assert_(c.info, {'inputs': [0], 'outputs': [0]})
# and a few tests for at
d = np.array([[1, 2, 3], [1, 2, 3]])
check = d.copy()
a = d.copy().view(A)
np.add.at(check, ([0, 1], [0, 2]), 1.)
np.add.at(a, ([0, 1], [0, 2]), 1.)
assert_equal(a, check)
assert_(a.info, {'inputs': [0]})
b = np.array(1.).view(A)
a = d.copy().view(A)
np.add.at(a, ([0, 1], [0, 2]), b)
assert_equal(a, check)
assert_(a.info, {'inputs': [0, 2]})
class TestChoose:
def test_mixed(self):
c = np.array([True, True])
a = np.array([True, True])
assert_equal(np.choose(c, (a, 1)), np.array([1, 1]))
class TestRationalFunctions:
def test_lcm(self):
self._test_lcm_inner(np.int16)
self._test_lcm_inner(np.uint16)
def test_lcm_object(self):
self._test_lcm_inner(np.object_)
def test_gcd(self):
self._test_gcd_inner(np.int16)
self._test_lcm_inner(np.uint16)
def test_gcd_object(self):
self._test_gcd_inner(np.object_)
def _test_lcm_inner(self, dtype):
# basic use
a = np.array([12, 120], dtype=dtype)
b = np.array([20, 200], dtype=dtype)
assert_equal(np.lcm(a, b), [60, 600])
if not issubclass(dtype, np.unsignedinteger):
# negatives are ignored
a = np.array([12, -12, 12, -12], dtype=dtype)
b = np.array([20, 20, -20, -20], dtype=dtype)
assert_equal(np.lcm(a, b), [60]*4)
# reduce
a = np.array([3, 12, 20], dtype=dtype)
assert_equal(np.lcm.reduce([3, 12, 20]), 60)
# broadcasting, and a test including 0
a = np.arange(6).astype(dtype)
b = 20
assert_equal(np.lcm(a, b), [0, 20, 20, 60, 20, 20])
def _test_gcd_inner(self, dtype):
# basic use
a = np.array([12, 120], dtype=dtype)
b = np.array([20, 200], dtype=dtype)
assert_equal(np.gcd(a, b), [4, 40])
if not issubclass(dtype, np.unsignedinteger):
# negatives are ignored
a = np.array([12, -12, 12, -12], dtype=dtype)
b = np.array([20, 20, -20, -20], dtype=dtype)
assert_equal(np.gcd(a, b), [4]*4)
# reduce
a = np.array([15, 25, 35], dtype=dtype)
assert_equal(np.gcd.reduce(a), 5)
# broadcasting, and a test including 0
a = np.arange(6).astype(dtype)
b = 20
assert_equal(np.gcd(a, b), [20, 1, 2, 1, 4, 5])
def test_lcm_overflow(self):
# verify that we don't overflow when a*b does overflow
big = np.int32(np.iinfo(np.int32).max // 11)
a = 2*big
b = 5*big
assert_equal(np.lcm(a, b), 10*big)
def test_gcd_overflow(self):
for dtype in (np.int32, np.int64):
# verify that we don't overflow when taking abs(x)
# not relevant for lcm, where the result is unrepresentable anyway
a = dtype(np.iinfo(dtype).min) # negative power of two
q = -(a // 4)
assert_equal(np.gcd(a, q*3), q)
assert_equal(np.gcd(a, -q*3), q)
def test_decimal(self):
from decimal import Decimal
a = np.array([1, 1, -1, -1]) * Decimal('0.20')
b = np.array([1, -1, 1, -1]) * Decimal('0.12')
assert_equal(np.gcd(a, b), 4*[Decimal('0.04')])
assert_equal(np.lcm(a, b), 4*[Decimal('0.60')])
def test_float(self):
# not well-defined on float due to rounding errors
assert_raises(TypeError, np.gcd, 0.3, 0.4)
assert_raises(TypeError, np.lcm, 0.3, 0.4)
def test_builtin_long(self):
# sanity check that array coercion is alright for builtin longs
assert_equal(np.array(2**200).item(), 2**200)
# expressed as prime factors
a = np.array(2**100 * 3**5)
b = np.array([2**100 * 5**7, 2**50 * 3**10])
assert_equal(np.gcd(a, b), [2**100, 2**50 * 3**5])
assert_equal(np.lcm(a, b), [2**100 * 3**5 * 5**7, 2**100 * 3**10])
assert_equal(np.gcd(2**100, 3**100), 1)
class TestRoundingFunctions:
def test_object_direct(self):
""" test direct implementation of these magic methods """
class C:
def __floor__(self):
return 1
def __ceil__(self):
return 2
def __trunc__(self):
return 3
arr = np.array([C(), C()])
assert_equal(np.floor(arr), [1, 1])
assert_equal(np.ceil(arr), [2, 2])
assert_equal(np.trunc(arr), [3, 3])
def test_object_indirect(self):
""" test implementations via __float__ """
class C:
def __float__(self):
return -2.5
arr = np.array([C(), C()])
assert_equal(np.floor(arr), [-3, -3])
assert_equal(np.ceil(arr), [-2, -2])
with pytest.raises(TypeError):
np.trunc(arr) # consistent with math.trunc
def test_fraction(self):
f = Fraction(-4, 3)
assert_equal(np.floor(f), -2)
assert_equal(np.ceil(f), -1)
assert_equal(np.trunc(f), -1)
class TestComplexFunctions:
funcs = [np.arcsin, np.arccos, np.arctan, np.arcsinh, np.arccosh,
np.arctanh, np.sin, np.cos, np.tan, np.exp,
np.exp2, np.log, np.sqrt, np.log10, np.log2,
np.log1p]
def test_it(self):
for f in self.funcs:
if f is np.arccosh:
x = 1.5
else:
x = .5
fr = f(x)
fz = f(complex(x))
assert_almost_equal(fz.real, fr, err_msg='real part %s' % f)
assert_almost_equal(fz.imag, 0., err_msg='imag part %s' % f)
def test_precisions_consistent(self):
z = 1 + 1j
for f in self.funcs:
fcf = f(np.csingle(z))
fcd = f(np.cdouble(z))
fcl = f(np.clongdouble(z))
assert_almost_equal(fcf, fcd, decimal=6, err_msg='fch-fcd %s' % f)
assert_almost_equal(fcl, fcd, decimal=15, err_msg='fch-fcl %s' % f)
def test_branch_cuts(self):
# check branch cuts and continuity on them
_check_branch_cut(np.log, -0.5, 1j, 1, -1, True)
_check_branch_cut(np.log2, -0.5, 1j, 1, -1, True)
_check_branch_cut(np.log10, -0.5, 1j, 1, -1, True)
_check_branch_cut(np.log1p, -1.5, 1j, 1, -1, True)
_check_branch_cut(np.sqrt, -0.5, 1j, 1, -1, True)
_check_branch_cut(np.arcsin, [ -2, 2], [1j, 1j], 1, -1, True)
_check_branch_cut(np.arccos, [ -2, 2], [1j, 1j], 1, -1, True)
_check_branch_cut(np.arctan, [0-2j, 2j], [1, 1], -1, 1, True)
_check_branch_cut(np.arcsinh, [0-2j, 2j], [1, 1], -1, 1, True)
_check_branch_cut(np.arccosh, [ -1, 0.5], [1j, 1j], 1, -1, True)
_check_branch_cut(np.arctanh, [ -2, 2], [1j, 1j], 1, -1, True)
# check against bogus branch cuts: assert continuity between quadrants
_check_branch_cut(np.arcsin, [0-2j, 2j], [ 1, 1], 1, 1)
_check_branch_cut(np.arccos, [0-2j, 2j], [ 1, 1], 1, 1)
_check_branch_cut(np.arctan, [ -2, 2], [1j, 1j], 1, 1)
_check_branch_cut(np.arcsinh, [ -2, 2, 0], [1j, 1j, 1], 1, 1)
_check_branch_cut(np.arccosh, [0-2j, 2j, 2], [1, 1, 1j], 1, 1)
_check_branch_cut(np.arctanh, [0-2j, 2j, 0], [1, 1, 1j], 1, 1)
def test_branch_cuts_complex64(self):
# check branch cuts and continuity on them
_check_branch_cut(np.log, -0.5, 1j, 1, -1, True, np.complex64)
_check_branch_cut(np.log2, -0.5, 1j, 1, -1, True, np.complex64)
_check_branch_cut(np.log10, -0.5, 1j, 1, -1, True, np.complex64)
_check_branch_cut(np.log1p, -1.5, 1j, 1, -1, True, np.complex64)
_check_branch_cut(np.sqrt, -0.5, 1j, 1, -1, True, np.complex64)
_check_branch_cut(np.arcsin, [ -2, 2], [1j, 1j], 1, -1, True, np.complex64)
_check_branch_cut(np.arccos, [ -2, 2], [1j, 1j], 1, -1, True, np.complex64)
_check_branch_cut(np.arctan, [0-2j, 2j], [1, 1], -1, 1, True, np.complex64)
_check_branch_cut(np.arcsinh, [0-2j, 2j], [1, 1], -1, 1, True, np.complex64)
_check_branch_cut(np.arccosh, [ -1, 0.5], [1j, 1j], 1, -1, True, np.complex64)
_check_branch_cut(np.arctanh, [ -2, 2], [1j, 1j], 1, -1, True, np.complex64)
# check against bogus branch cuts: assert continuity between quadrants
_check_branch_cut(np.arcsin, [0-2j, 2j], [ 1, 1], 1, 1, False, np.complex64)
_check_branch_cut(np.arccos, [0-2j, 2j], [ 1, 1], 1, 1, False, np.complex64)
_check_branch_cut(np.arctan, [ -2, 2], [1j, 1j], 1, 1, False, np.complex64)
_check_branch_cut(np.arcsinh, [ -2, 2, 0], [1j, 1j, 1], 1, 1, False, np.complex64)
_check_branch_cut(np.arccosh, [0-2j, 2j, 2], [1, 1, 1j], 1, 1, False, np.complex64)
_check_branch_cut(np.arctanh, [0-2j, 2j, 0], [1, 1, 1j], 1, 1, False, np.complex64)
def test_against_cmath(self):
import cmath
points = [-1-1j, -1+1j, +1-1j, +1+1j]
name_map = {'arcsin': 'asin', 'arccos': 'acos', 'arctan': 'atan',
'arcsinh': 'asinh', 'arccosh': 'acosh', 'arctanh': 'atanh'}
atol = 4*np.finfo(complex).eps
for func in self.funcs:
fname = func.__name__.split('.')[-1]
cname = name_map.get(fname, fname)
try:
cfunc = getattr(cmath, cname)
except AttributeError:
continue
for p in points:
a = complex(func(np.complex_(p)))
b = cfunc(p)
assert_(abs(a - b) < atol, "%s %s: %s; cmath: %s" % (fname, p, a, b))
@pytest.mark.parametrize('dtype', [np.complex64, np.complex_, np.longcomplex])
def test_loss_of_precision(self, dtype):
"""Check loss of precision in complex arc* functions"""
# Check against known-good functions
info = np.finfo(dtype)
real_dtype = dtype(0.).real.dtype
eps = info.eps
def check(x, rtol):
x = x.astype(real_dtype)
z = x.astype(dtype)
d = np.absolute(np.arcsinh(x)/np.arcsinh(z).real - 1)
assert_(np.all(d < rtol), (np.argmax(d), x[np.argmax(d)], d.max(),
'arcsinh'))
z = (1j*x).astype(dtype)
d = np.absolute(np.arcsinh(x)/np.arcsin(z).imag - 1)
assert_(np.all(d < rtol), (np.argmax(d), x[np.argmax(d)], d.max(),
'arcsin'))
z = x.astype(dtype)
d = np.absolute(np.arctanh(x)/np.arctanh(z).real - 1)
assert_(np.all(d < rtol), (np.argmax(d), x[np.argmax(d)], d.max(),
'arctanh'))
z = (1j*x).astype(dtype)
d = np.absolute(np.arctanh(x)/np.arctan(z).imag - 1)
assert_(np.all(d < rtol), (np.argmax(d), x[np.argmax(d)], d.max(),
'arctan'))
# The switchover was chosen as 1e-3; hence there can be up to
# ~eps/1e-3 of relative cancellation error before it
x_series = np.logspace(-20, -3.001, 200)
x_basic = np.logspace(-2.999, 0, 10, endpoint=False)
if dtype is np.longcomplex:
if bad_arcsinh():
pytest.skip("Trig functions of np.longcomplex values known "
"to be inaccurate on aarch64 and PPC for some "
"compilation configurations.")
# It's not guaranteed that the system-provided arc functions
# are accurate down to a few epsilons. (Eg. on Linux 64-bit)
# So, give more leeway for long complex tests here:
check(x_series, 50.0*eps)
else:
check(x_series, 2.1*eps)
check(x_basic, 2.0*eps/1e-3)
# Check a few points
z = np.array([1e-5*(1+1j)], dtype=dtype)
p = 9.999999999333333333e-6 + 1.000000000066666666e-5j
d = np.absolute(1-np.arctanh(z)/p)
assert_(np.all(d < 1e-15))
p = 1.0000000000333333333e-5 + 9.999999999666666667e-6j
d = np.absolute(1-np.arcsinh(z)/p)
assert_(np.all(d < 1e-15))
p = 9.999999999333333333e-6j + 1.000000000066666666e-5
d = np.absolute(1-np.arctan(z)/p)
assert_(np.all(d < 1e-15))
p = 1.0000000000333333333e-5j + 9.999999999666666667e-6
d = np.absolute(1-np.arcsin(z)/p)
assert_(np.all(d < 1e-15))
# Check continuity across switchover points
def check(func, z0, d=1):
z0 = np.asarray(z0, dtype=dtype)
zp = z0 + abs(z0) * d * eps * 2
zm = z0 - abs(z0) * d * eps * 2
assert_(np.all(zp != zm), (zp, zm))
# NB: the cancellation error at the switchover is at least eps
good = (abs(func(zp) - func(zm)) < 2*eps)
assert_(np.all(good), (func, z0[~good]))
for func in (np.arcsinh, np.arcsinh, np.arcsin, np.arctanh, np.arctan):
pts = [rp+1j*ip for rp in (-1e-3, 0, 1e-3) for ip in(-1e-3, 0, 1e-3)
if rp != 0 or ip != 0]
check(func, pts, 1)
check(func, pts, 1j)
check(func, pts, 1+1j)
class TestAttributes:
def test_attributes(self):
add = ncu.add
assert_equal(add.__name__, 'add')
assert_(add.ntypes >= 18) # don't fail if types added
assert_('ii->i' in add.types)
assert_equal(add.nin, 2)
assert_equal(add.nout, 1)
assert_equal(add.identity, 0)
def test_doc(self):
# don't bother checking the long list of kwargs, which are likely to
# change
assert_(ncu.add.__doc__.startswith(
"add(x1, x2, /, out=None, *, where=True"))
assert_(ncu.frexp.__doc__.startswith(
"frexp(x[, out1, out2], / [, out=(None, None)], *, where=True"))
class TestSubclass:
def test_subclass_op(self):
class simple(np.ndarray):
def __new__(subtype, shape):
self = np.ndarray.__new__(subtype, shape, dtype=object)
self.fill(0)
return self
a = simple((3, 4))
assert_equal(a+a, a)
class TestFrompyfunc:
def test_identity(self):
def mul(a, b):
return a * b
# with identity=value
mul_ufunc = np.frompyfunc(mul, nin=2, nout=1, identity=1)
assert_equal(mul_ufunc.reduce([2, 3, 4]), 24)
assert_equal(mul_ufunc.reduce(np.ones((2, 2)), axis=(0, 1)), 1)
assert_equal(mul_ufunc.reduce([]), 1)
# with identity=None (reorderable)
mul_ufunc = np.frompyfunc(mul, nin=2, nout=1, identity=None)
assert_equal(mul_ufunc.reduce([2, 3, 4]), 24)
assert_equal(mul_ufunc.reduce(np.ones((2, 2)), axis=(0, 1)), 1)
assert_raises(ValueError, lambda: mul_ufunc.reduce([]))
# with no identity (not reorderable)
mul_ufunc = np.frompyfunc(mul, nin=2, nout=1)
assert_equal(mul_ufunc.reduce([2, 3, 4]), 24)
assert_raises(ValueError, lambda: mul_ufunc.reduce(np.ones((2, 2)), axis=(0, 1)))
assert_raises(ValueError, lambda: mul_ufunc.reduce([]))
def _check_branch_cut(f, x0, dx, re_sign=1, im_sign=-1, sig_zero_ok=False,
dtype=complex):
"""
Check for a branch cut in a function.
Assert that `x0` lies on a branch cut of function `f` and `f` is
continuous from the direction `dx`.
Parameters
----------
f : func
Function to check
x0 : array-like
Point on branch cut
dx : array-like
Direction to check continuity in
re_sign, im_sign : {1, -1}
Change of sign of the real or imaginary part expected
sig_zero_ok : bool
Whether to check if the branch cut respects signed zero (if applicable)
dtype : dtype
Dtype to check (should be complex)
"""
x0 = np.atleast_1d(x0).astype(dtype)
dx = np.atleast_1d(dx).astype(dtype)
if np.dtype(dtype).char == 'F':
scale = np.finfo(dtype).eps * 1e2
atol = np.float32(1e-2)
else:
scale = np.finfo(dtype).eps * 1e3
atol = 1e-4
y0 = f(x0)
yp = f(x0 + dx*scale*np.absolute(x0)/np.absolute(dx))
ym = f(x0 - dx*scale*np.absolute(x0)/np.absolute(dx))
assert_(np.all(np.absolute(y0.real - yp.real) < atol), (y0, yp))
assert_(np.all(np.absolute(y0.imag - yp.imag) < atol), (y0, yp))
assert_(np.all(np.absolute(y0.real - ym.real*re_sign) < atol), (y0, ym))
assert_(np.all(np.absolute(y0.imag - ym.imag*im_sign) < atol), (y0, ym))
if sig_zero_ok:
# check that signed zeros also work as a displacement
jr = (x0.real == 0) & (dx.real != 0)
ji = (x0.imag == 0) & (dx.imag != 0)
if np.any(jr):
x = x0[jr]
x.real = np.NZERO
ym = f(x)
assert_(np.all(np.absolute(y0[jr].real - ym.real*re_sign) < atol), (y0[jr], ym))
assert_(np.all(np.absolute(y0[jr].imag - ym.imag*im_sign) < atol), (y0[jr], ym))
if np.any(ji):
x = x0[ji]
x.imag = np.NZERO
ym = f(x)
assert_(np.all(np.absolute(y0[ji].real - ym.real*re_sign) < atol), (y0[ji], ym))
assert_(np.all(np.absolute(y0[ji].imag - ym.imag*im_sign) < atol), (y0[ji], ym))
def test_copysign():
assert_(np.copysign(1, -1) == -1)
with np.errstate(divide="ignore"):
assert_(1 / np.copysign(0, -1) < 0)
assert_(1 / np.copysign(0, 1) > 0)
assert_(np.signbit(np.copysign(np.nan, -1)))
assert_(not np.signbit(np.copysign(np.nan, 1)))
def _test_nextafter(t):
one = t(1)
two = t(2)
zero = t(0)
eps = np.finfo(t).eps
assert_(np.nextafter(one, two) - one == eps)
assert_(np.nextafter(one, zero) - one < 0)
assert_(np.isnan(np.nextafter(np.nan, one)))
assert_(np.isnan(np.nextafter(one, np.nan)))
assert_(np.nextafter(one, one) == one)
def test_nextafter():
return _test_nextafter(np.float64)
def test_nextafterf():
return _test_nextafter(np.float32)
@pytest.mark.skipif(np.finfo(np.double) == np.finfo(np.longdouble),
reason="long double is same as double")
@pytest.mark.xfail(condition=platform.machine().startswith("ppc64"),
reason="IBM double double")
def test_nextafterl():
return _test_nextafter(np.longdouble)
def test_nextafter_0():
for t, direction in itertools.product(np.sctypes['float'], (1, -1)):
# The value of tiny for double double is NaN, so we need to pass the
# assert
with suppress_warnings() as sup:
sup.filter(UserWarning)
if not np.isnan(np.finfo(t).tiny):
tiny = np.finfo(t).tiny
assert_(
0. < direction * np.nextafter(t(0), t(direction)) < tiny)
assert_equal(np.nextafter(t(0), t(direction)) / t(2.1), direction * 0.0)
def _test_spacing(t):
one = t(1)
eps = np.finfo(t).eps
nan = t(np.nan)
inf = t(np.inf)
with np.errstate(invalid='ignore'):
assert_(np.spacing(one) == eps)
assert_(np.isnan(np.spacing(nan)))
assert_(np.isnan(np.spacing(inf)))
assert_(np.isnan(np.spacing(-inf)))
assert_(np.spacing(t(1e30)) != 0)
def test_spacing():
return _test_spacing(np.float64)
def test_spacingf():
return _test_spacing(np.float32)
@pytest.mark.skipif(np.finfo(np.double) == np.finfo(np.longdouble),
reason="long double is same as double")
@pytest.mark.xfail(condition=platform.machine().startswith("ppc64"),
reason="IBM double double")
def test_spacingl():
return _test_spacing(np.longdouble)
def test_spacing_gfortran():
# Reference from this fortran file, built with gfortran 4.3.3 on linux
# 32bits:
# PROGRAM test_spacing
# INTEGER, PARAMETER :: SGL = SELECTED_REAL_KIND(p=6, r=37)
# INTEGER, PARAMETER :: DBL = SELECTED_REAL_KIND(p=13, r=200)
#
# WRITE(*,*) spacing(0.00001_DBL)
# WRITE(*,*) spacing(1.0_DBL)
# WRITE(*,*) spacing(1000._DBL)
# WRITE(*,*) spacing(10500._DBL)
#
# WRITE(*,*) spacing(0.00001_SGL)
# WRITE(*,*) spacing(1.0_SGL)
# WRITE(*,*) spacing(1000._SGL)
# WRITE(*,*) spacing(10500._SGL)
# END PROGRAM
ref = {np.float64: [1.69406589450860068E-021,
2.22044604925031308E-016,
1.13686837721616030E-013,
1.81898940354585648E-012],
np.float32: [9.09494702E-13,
1.19209290E-07,
6.10351563E-05,
9.76562500E-04]}
for dt, dec_ in zip([np.float32, np.float64], (10, 20)):
x = np.array([1e-5, 1, 1000, 10500], dtype=dt)
assert_array_almost_equal(np.spacing(x), ref[dt], decimal=dec_)
def test_nextafter_vs_spacing():
# XXX: spacing does not handle long double yet
for t in [np.float32, np.float64]:
for _f in [1, 1e-5, 1000]:
f = t(_f)
f1 = t(_f + 1)
assert_(np.nextafter(f, f1) - f == np.spacing(f))
def test_pos_nan():
"""Check np.nan is a positive nan."""
assert_(np.signbit(np.nan) == 0)
def test_reduceat():
"""Test bug in reduceat when structured arrays are not copied."""
db = np.dtype([('name', 'S11'), ('time', np.int64), ('value', np.float32)])
a = np.empty([100], dtype=db)
a['name'] = 'Simple'
a['time'] = 10
a['value'] = 100
indx = [0, 7, 15, 25]
h2 = []
val1 = indx[0]
for val2 in indx[1:]:
h2.append(np.add.reduce(a['value'][val1:val2]))
val1 = val2
h2.append(np.add.reduce(a['value'][val1:]))
h2 = np.array(h2)
# test buffered -- this should work
h1 = np.add.reduceat(a['value'], indx)
assert_array_almost_equal(h1, h2)
# This is when the error occurs.
# test no buffer
np.setbufsize(32)
h1 = np.add.reduceat(a['value'], indx)
np.setbufsize(np.UFUNC_BUFSIZE_DEFAULT)
assert_array_almost_equal(h1, h2)
def test_reduceat_empty():
"""Reduceat should work with empty arrays"""
indices = np.array([], 'i4')
x = np.array([], 'f8')
result = np.add.reduceat(x, indices)
assert_equal(result.dtype, x.dtype)
assert_equal(result.shape, (0,))
# Another case with a slightly different zero-sized shape
x = np.ones((5, 2))
result = np.add.reduceat(x, [], axis=0)
assert_equal(result.dtype, x.dtype)
assert_equal(result.shape, (0, 2))
result = np.add.reduceat(x, [], axis=1)
assert_equal(result.dtype, x.dtype)
assert_equal(result.shape, (5, 0))
def test_complex_nan_comparisons():
nans = [complex(np.nan, 0), complex(0, np.nan), complex(np.nan, np.nan)]
fins = [complex(1, 0), complex(-1, 0), complex(0, 1), complex(0, -1),
complex(1, 1), complex(-1, -1), complex(0, 0)]
with np.errstate(invalid='ignore'):
for x in nans + fins:
x = np.array([x])
for y in nans + fins:
y = np.array([y])
if np.isfinite(x) and np.isfinite(y):
continue
assert_equal(x < y, False, err_msg="%r < %r" % (x, y))
assert_equal(x > y, False, err_msg="%r > %r" % (x, y))
assert_equal(x <= y, False, err_msg="%r <= %r" % (x, y))
assert_equal(x >= y, False, err_msg="%r >= %r" % (x, y))
assert_equal(x == y, False, err_msg="%r == %r" % (x, y))
def test_rint_big_int():
# np.rint bug for large integer values on Windows 32-bit and MKL
# https://github.com/numpy/numpy/issues/6685
val = 4607998452777363968
# This is exactly representable in floating point
assert_equal(val, int(float(val)))
# Rint should not change the value
assert_equal(val, np.rint(val))
@pytest.mark.parametrize('ftype', [np.float32, np.float64])
def test_memoverlap_accumulate(ftype):
# Reproduces bug https://github.com/numpy/numpy/issues/15597
arr = np.array([0.61, 0.60, 0.77, 0.41, 0.19], dtype=ftype)
out_max = np.array([0.61, 0.61, 0.77, 0.77, 0.77], dtype=ftype)
out_min = np.array([0.61, 0.60, 0.60, 0.41, 0.19], dtype=ftype)
assert_equal(np.maximum.accumulate(arr), out_max)
assert_equal(np.minimum.accumulate(arr), out_min)
def test_signaling_nan_exceptions():
with assert_no_warnings():
a = np.ndarray(shape=(), dtype='float32', buffer=b'\x00\xe0\xbf\xff')
np.isnan(a)
@pytest.mark.parametrize("arr", [
np.arange(2),
np.matrix([0, 1]),
np.matrix([[0, 1], [2, 5]]),
])
def test_outer_subclass_preserve(arr):
# for gh-8661
class foo(np.ndarray): pass
actual = np.multiply.outer(arr.view(foo), arr.view(foo))
assert actual.__class__.__name__ == 'foo'
def test_outer_bad_subclass():
class BadArr1(np.ndarray):
def __array_finalize__(self, obj):
# The outer call reshapes to 3 dims, try to do a bad reshape.
if self.ndim == 3:
self.shape = self.shape + (1,)
def __array_prepare__(self, obj, context=None):
return obj
class BadArr2(np.ndarray):
def __array_finalize__(self, obj):
if isinstance(obj, BadArr2):
# outer inserts 1-sized dims. In that case disturb them.
if self.shape[-1] == 1:
self.shape = self.shape[::-1]
def __array_prepare__(self, obj, context=None):
return obj
for cls in [BadArr1, BadArr2]:
arr = np.ones((2, 3)).view(cls)
with assert_raises(TypeError) as a:
# The first array gets reshaped (not the second one)
np.add.outer(arr, [1, 2])
# This actually works, since we only see the reshaping error:
arr = np.ones((2, 3)).view(cls)
assert type(np.add.outer([1, 2], arr)) is cls
def test_outer_exceeds_maxdims():
deep = np.ones((1,) * 17)
with assert_raises(ValueError):
np.add.outer(deep, deep)
def test_bad_legacy_ufunc_silent_errors():
# legacy ufuncs can't report errors and NumPy can't check if the GIL
# is released. So NumPy has to check after the GIL is released just to
# cover all bases. `np.power` uses/used to use this.
arr = np.arange(3).astype(np.float64)
with pytest.raises(RuntimeError, match=r"How unexpected :\)!"):
ncu_tests.always_error(arr, arr)
with pytest.raises(RuntimeError, match=r"How unexpected :\)!"):
# not contiguous means the fast-path cannot be taken
non_contig = arr.repeat(20).reshape(-1, 6)[:, ::2]
ncu_tests.always_error(non_contig, arr)
with pytest.raises(RuntimeError, match=r"How unexpected :\)!"):
ncu_tests.always_error.outer(arr, arr)
with pytest.raises(RuntimeError, match=r"How unexpected :\)!"):
ncu_tests.always_error.reduce(arr)
with pytest.raises(RuntimeError, match=r"How unexpected :\)!"):
ncu_tests.always_error.reduceat(arr, [0, 1])
with pytest.raises(RuntimeError, match=r"How unexpected :\)!"):
ncu_tests.always_error.accumulate(arr)
with pytest.raises(RuntimeError, match=r"How unexpected :\)!"):
ncu_tests.always_error.at(arr, [0, 1, 2], arr)
@pytest.mark.parametrize('x1', [np.arange(3.0), [0.0, 1.0, 2.0]])
def test_bad_legacy_gufunc_silent_errors(x1):
# Verify that an exception raised in a gufunc loop propagates correctly.
# The signature of always_error_gufunc is '(i),()->()'.
with pytest.raises(RuntimeError, match=r"How unexpected :\)!"):
ncu_tests.always_error_gufunc(x1, 0.0)
| 162,560 | Python | 37.834448 | 135 | 0.521586 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_function_base.py | from numpy import (
logspace, linspace, geomspace, dtype, array, sctypes, arange, isnan,
ndarray, sqrt, nextafter, stack, errstate
)
from numpy.testing import (
assert_, assert_equal, assert_raises, assert_array_equal, assert_allclose,
)
class PhysicalQuantity(float):
def __new__(cls, value):
return float.__new__(cls, value)
def __add__(self, x):
assert_(isinstance(x, PhysicalQuantity))
return PhysicalQuantity(float(x) + float(self))
__radd__ = __add__
def __sub__(self, x):
assert_(isinstance(x, PhysicalQuantity))
return PhysicalQuantity(float(self) - float(x))
def __rsub__(self, x):
assert_(isinstance(x, PhysicalQuantity))
return PhysicalQuantity(float(x) - float(self))
def __mul__(self, x):
return PhysicalQuantity(float(x) * float(self))
__rmul__ = __mul__
def __div__(self, x):
return PhysicalQuantity(float(self) / float(x))
def __rdiv__(self, x):
return PhysicalQuantity(float(x) / float(self))
class PhysicalQuantity2(ndarray):
__array_priority__ = 10
class TestLogspace:
def test_basic(self):
y = logspace(0, 6)
assert_(len(y) == 50)
y = logspace(0, 6, num=100)
assert_(y[-1] == 10 ** 6)
y = logspace(0, 6, endpoint=False)
assert_(y[-1] < 10 ** 6)
y = logspace(0, 6, num=7)
assert_array_equal(y, [1, 10, 100, 1e3, 1e4, 1e5, 1e6])
def test_start_stop_array(self):
start = array([0., 1.])
stop = array([6., 7.])
t1 = logspace(start, stop, 6)
t2 = stack([logspace(_start, _stop, 6)
for _start, _stop in zip(start, stop)], axis=1)
assert_equal(t1, t2)
t3 = logspace(start, stop[0], 6)
t4 = stack([logspace(_start, stop[0], 6)
for _start in start], axis=1)
assert_equal(t3, t4)
t5 = logspace(start, stop, 6, axis=-1)
assert_equal(t5, t2.T)
def test_dtype(self):
y = logspace(0, 6, dtype='float32')
assert_equal(y.dtype, dtype('float32'))
y = logspace(0, 6, dtype='float64')
assert_equal(y.dtype, dtype('float64'))
y = logspace(0, 6, dtype='int32')
assert_equal(y.dtype, dtype('int32'))
def test_physical_quantities(self):
a = PhysicalQuantity(1.0)
b = PhysicalQuantity(5.0)
assert_equal(logspace(a, b), logspace(1.0, 5.0))
def test_subclass(self):
a = array(1).view(PhysicalQuantity2)
b = array(7).view(PhysicalQuantity2)
ls = logspace(a, b)
assert type(ls) is PhysicalQuantity2
assert_equal(ls, logspace(1.0, 7.0))
ls = logspace(a, b, 1)
assert type(ls) is PhysicalQuantity2
assert_equal(ls, logspace(1.0, 7.0, 1))
class TestGeomspace:
def test_basic(self):
y = geomspace(1, 1e6)
assert_(len(y) == 50)
y = geomspace(1, 1e6, num=100)
assert_(y[-1] == 10 ** 6)
y = geomspace(1, 1e6, endpoint=False)
assert_(y[-1] < 10 ** 6)
y = geomspace(1, 1e6, num=7)
assert_array_equal(y, [1, 10, 100, 1e3, 1e4, 1e5, 1e6])
y = geomspace(8, 2, num=3)
assert_allclose(y, [8, 4, 2])
assert_array_equal(y.imag, 0)
y = geomspace(-1, -100, num=3)
assert_array_equal(y, [-1, -10, -100])
assert_array_equal(y.imag, 0)
y = geomspace(-100, -1, num=3)
assert_array_equal(y, [-100, -10, -1])
assert_array_equal(y.imag, 0)
def test_boundaries_match_start_and_stop_exactly(self):
# make sure that the boundaries of the returned array exactly
# equal 'start' and 'stop' - this isn't obvious because
# np.exp(np.log(x)) isn't necessarily exactly equal to x
start = 0.3
stop = 20.3
y = geomspace(start, stop, num=1)
assert_equal(y[0], start)
y = geomspace(start, stop, num=1, endpoint=False)
assert_equal(y[0], start)
y = geomspace(start, stop, num=3)
assert_equal(y[0], start)
assert_equal(y[-1], stop)
y = geomspace(start, stop, num=3, endpoint=False)
assert_equal(y[0], start)
def test_nan_interior(self):
with errstate(invalid='ignore'):
y = geomspace(-3, 3, num=4)
assert_equal(y[0], -3.0)
assert_(isnan(y[1:-1]).all())
assert_equal(y[3], 3.0)
with errstate(invalid='ignore'):
y = geomspace(-3, 3, num=4, endpoint=False)
assert_equal(y[0], -3.0)
assert_(isnan(y[1:]).all())
def test_complex(self):
# Purely imaginary
y = geomspace(1j, 16j, num=5)
assert_allclose(y, [1j, 2j, 4j, 8j, 16j])
assert_array_equal(y.real, 0)
y = geomspace(-4j, -324j, num=5)
assert_allclose(y, [-4j, -12j, -36j, -108j, -324j])
assert_array_equal(y.real, 0)
y = geomspace(1+1j, 1000+1000j, num=4)
assert_allclose(y, [1+1j, 10+10j, 100+100j, 1000+1000j])
y = geomspace(-1+1j, -1000+1000j, num=4)
assert_allclose(y, [-1+1j, -10+10j, -100+100j, -1000+1000j])
# Logarithmic spirals
y = geomspace(-1, 1, num=3, dtype=complex)
assert_allclose(y, [-1, 1j, +1])
y = geomspace(0+3j, -3+0j, 3)
assert_allclose(y, [0+3j, -3/sqrt(2)+3j/sqrt(2), -3+0j])
y = geomspace(0+3j, 3+0j, 3)
assert_allclose(y, [0+3j, 3/sqrt(2)+3j/sqrt(2), 3+0j])
y = geomspace(-3+0j, 0-3j, 3)
assert_allclose(y, [-3+0j, -3/sqrt(2)-3j/sqrt(2), 0-3j])
y = geomspace(0+3j, -3+0j, 3)
assert_allclose(y, [0+3j, -3/sqrt(2)+3j/sqrt(2), -3+0j])
y = geomspace(-2-3j, 5+7j, 7)
assert_allclose(y, [-2-3j, -0.29058977-4.15771027j,
2.08885354-4.34146838j, 4.58345529-3.16355218j,
6.41401745-0.55233457j, 6.75707386+3.11795092j,
5+7j])
# Type promotion should prevent the -5 from becoming a NaN
y = geomspace(3j, -5, 2)
assert_allclose(y, [3j, -5])
y = geomspace(-5, 3j, 2)
assert_allclose(y, [-5, 3j])
def test_dtype(self):
y = geomspace(1, 1e6, dtype='float32')
assert_equal(y.dtype, dtype('float32'))
y = geomspace(1, 1e6, dtype='float64')
assert_equal(y.dtype, dtype('float64'))
y = geomspace(1, 1e6, dtype='int32')
assert_equal(y.dtype, dtype('int32'))
# Native types
y = geomspace(1, 1e6, dtype=float)
assert_equal(y.dtype, dtype('float_'))
y = geomspace(1, 1e6, dtype=complex)
assert_equal(y.dtype, dtype('complex'))
def test_start_stop_array_scalar(self):
lim1 = array([120, 100], dtype="int8")
lim2 = array([-120, -100], dtype="int8")
lim3 = array([1200, 1000], dtype="uint16")
t1 = geomspace(lim1[0], lim1[1], 5)
t2 = geomspace(lim2[0], lim2[1], 5)
t3 = geomspace(lim3[0], lim3[1], 5)
t4 = geomspace(120.0, 100.0, 5)
t5 = geomspace(-120.0, -100.0, 5)
t6 = geomspace(1200.0, 1000.0, 5)
# t3 uses float32, t6 uses float64
assert_allclose(t1, t4, rtol=1e-2)
assert_allclose(t2, t5, rtol=1e-2)
assert_allclose(t3, t6, rtol=1e-5)
def test_start_stop_array(self):
# Try to use all special cases.
start = array([1.e0, 32., 1j, -4j, 1+1j, -1])
stop = array([1.e4, 2., 16j, -324j, 10000+10000j, 1])
t1 = geomspace(start, stop, 5)
t2 = stack([geomspace(_start, _stop, 5)
for _start, _stop in zip(start, stop)], axis=1)
assert_equal(t1, t2)
t3 = geomspace(start, stop[0], 5)
t4 = stack([geomspace(_start, stop[0], 5)
for _start in start], axis=1)
assert_equal(t3, t4)
t5 = geomspace(start, stop, 5, axis=-1)
assert_equal(t5, t2.T)
def test_physical_quantities(self):
a = PhysicalQuantity(1.0)
b = PhysicalQuantity(5.0)
assert_equal(geomspace(a, b), geomspace(1.0, 5.0))
def test_subclass(self):
a = array(1).view(PhysicalQuantity2)
b = array(7).view(PhysicalQuantity2)
gs = geomspace(a, b)
assert type(gs) is PhysicalQuantity2
assert_equal(gs, geomspace(1.0, 7.0))
gs = geomspace(a, b, 1)
assert type(gs) is PhysicalQuantity2
assert_equal(gs, geomspace(1.0, 7.0, 1))
def test_bounds(self):
assert_raises(ValueError, geomspace, 0, 10)
assert_raises(ValueError, geomspace, 10, 0)
assert_raises(ValueError, geomspace, 0, 0)
class TestLinspace:
def test_basic(self):
y = linspace(0, 10)
assert_(len(y) == 50)
y = linspace(2, 10, num=100)
assert_(y[-1] == 10)
y = linspace(2, 10, endpoint=False)
assert_(y[-1] < 10)
assert_raises(ValueError, linspace, 0, 10, num=-1)
def test_corner(self):
y = list(linspace(0, 1, 1))
assert_(y == [0.0], y)
assert_raises(TypeError, linspace, 0, 1, num=2.5)
def test_type(self):
t1 = linspace(0, 1, 0).dtype
t2 = linspace(0, 1, 1).dtype
t3 = linspace(0, 1, 2).dtype
assert_equal(t1, t2)
assert_equal(t2, t3)
def test_dtype(self):
y = linspace(0, 6, dtype='float32')
assert_equal(y.dtype, dtype('float32'))
y = linspace(0, 6, dtype='float64')
assert_equal(y.dtype, dtype('float64'))
y = linspace(0, 6, dtype='int32')
assert_equal(y.dtype, dtype('int32'))
def test_start_stop_array_scalar(self):
lim1 = array([-120, 100], dtype="int8")
lim2 = array([120, -100], dtype="int8")
lim3 = array([1200, 1000], dtype="uint16")
t1 = linspace(lim1[0], lim1[1], 5)
t2 = linspace(lim2[0], lim2[1], 5)
t3 = linspace(lim3[0], lim3[1], 5)
t4 = linspace(-120.0, 100.0, 5)
t5 = linspace(120.0, -100.0, 5)
t6 = linspace(1200.0, 1000.0, 5)
assert_equal(t1, t4)
assert_equal(t2, t5)
assert_equal(t3, t6)
def test_start_stop_array(self):
start = array([-120, 120], dtype="int8")
stop = array([100, -100], dtype="int8")
t1 = linspace(start, stop, 5)
t2 = stack([linspace(_start, _stop, 5)
for _start, _stop in zip(start, stop)], axis=1)
assert_equal(t1, t2)
t3 = linspace(start, stop[0], 5)
t4 = stack([linspace(_start, stop[0], 5)
for _start in start], axis=1)
assert_equal(t3, t4)
t5 = linspace(start, stop, 5, axis=-1)
assert_equal(t5, t2.T)
def test_complex(self):
lim1 = linspace(1 + 2j, 3 + 4j, 5)
t1 = array([1.0+2.j, 1.5+2.5j, 2.0+3j, 2.5+3.5j, 3.0+4j])
lim2 = linspace(1j, 10, 5)
t2 = array([0.0+1.j, 2.5+0.75j, 5.0+0.5j, 7.5+0.25j, 10.0+0j])
assert_equal(lim1, t1)
assert_equal(lim2, t2)
def test_physical_quantities(self):
a = PhysicalQuantity(0.0)
b = PhysicalQuantity(1.0)
assert_equal(linspace(a, b), linspace(0.0, 1.0))
def test_subclass(self):
a = array(0).view(PhysicalQuantity2)
b = array(1).view(PhysicalQuantity2)
ls = linspace(a, b)
assert type(ls) is PhysicalQuantity2
assert_equal(ls, linspace(0.0, 1.0))
ls = linspace(a, b, 1)
assert type(ls) is PhysicalQuantity2
assert_equal(ls, linspace(0.0, 1.0, 1))
def test_array_interface(self):
# Regression test for https://github.com/numpy/numpy/pull/6659
# Ensure that start/stop can be objects that implement
# __array_interface__ and are convertible to numeric scalars
class Arrayish:
"""
A generic object that supports the __array_interface__ and hence
can in principle be converted to a numeric scalar, but is not
otherwise recognized as numeric, but also happens to support
multiplication by floats.
Data should be an object that implements the buffer interface,
and contains at least 4 bytes.
"""
def __init__(self, data):
self._data = data
@property
def __array_interface__(self):
return {'shape': (), 'typestr': '<i4', 'data': self._data,
'version': 3}
def __mul__(self, other):
# For the purposes of this test any multiplication is an
# identity operation :)
return self
one = Arrayish(array(1, dtype='<i4'))
five = Arrayish(array(5, dtype='<i4'))
assert_equal(linspace(one, five), linspace(1, 5))
def test_denormal_numbers(self):
# Regression test for gh-5437. Will probably fail when compiled
# with ICC, which flushes denormals to zero
for ftype in sctypes['float']:
stop = nextafter(ftype(0), ftype(1)) * 5 # A denormal number
assert_(any(linspace(0, stop, 10, endpoint=False, dtype=ftype)))
def test_equivalent_to_arange(self):
for j in range(1000):
assert_equal(linspace(0, j, j+1, dtype=int),
arange(j+1, dtype=int))
def test_retstep(self):
for num in [0, 1, 2]:
for ept in [False, True]:
y = linspace(0, 1, num, endpoint=ept, retstep=True)
assert isinstance(y, tuple) and len(y) == 2
if num == 2:
y0_expect = [0.0, 1.0] if ept else [0.0, 0.5]
assert_array_equal(y[0], y0_expect)
assert_equal(y[1], y0_expect[1])
elif num == 1 and not ept:
assert_array_equal(y[0], [0.0])
assert_equal(y[1], 1.0)
else:
assert_array_equal(y[0], [0.0][:num])
assert isnan(y[1])
def test_object(self):
start = array(1, dtype='O')
stop = array(2, dtype='O')
y = linspace(start, stop, 3)
assert_array_equal(y, array([1., 1.5, 2.]))
def test_round_negative(self):
y = linspace(-1, 3, num=8, dtype=int)
t = array([-1, -1, 0, 0, 1, 1, 2, 3], dtype=int)
assert_array_equal(y, t)
| 14,411 | Python | 34.151219 | 78 | 0.538616 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_shape_base.py | import pytest
import numpy as np
from numpy.core import (
array, arange, atleast_1d, atleast_2d, atleast_3d, block, vstack, hstack,
newaxis, concatenate, stack
)
from numpy.core.shape_base import (_block_dispatcher, _block_setup,
_block_concatenate, _block_slicing)
from numpy.testing import (
assert_, assert_raises, assert_array_equal, assert_equal,
assert_raises_regex, assert_warns, IS_PYPY
)
class TestAtleast1d:
def test_0D_array(self):
a = array(1)
b = array(2)
res = [atleast_1d(a), atleast_1d(b)]
desired = [array([1]), array([2])]
assert_array_equal(res, desired)
def test_1D_array(self):
a = array([1, 2])
b = array([2, 3])
res = [atleast_1d(a), atleast_1d(b)]
desired = [array([1, 2]), array([2, 3])]
assert_array_equal(res, desired)
def test_2D_array(self):
a = array([[1, 2], [1, 2]])
b = array([[2, 3], [2, 3]])
res = [atleast_1d(a), atleast_1d(b)]
desired = [a, b]
assert_array_equal(res, desired)
def test_3D_array(self):
a = array([[1, 2], [1, 2]])
b = array([[2, 3], [2, 3]])
a = array([a, a])
b = array([b, b])
res = [atleast_1d(a), atleast_1d(b)]
desired = [a, b]
assert_array_equal(res, desired)
def test_r1array(self):
""" Test to make sure equivalent Travis O's r1array function
"""
assert_(atleast_1d(3).shape == (1,))
assert_(atleast_1d(3j).shape == (1,))
assert_(atleast_1d(3.0).shape == (1,))
assert_(atleast_1d([[2, 3], [4, 5]]).shape == (2, 2))
class TestAtleast2d:
def test_0D_array(self):
a = array(1)
b = array(2)
res = [atleast_2d(a), atleast_2d(b)]
desired = [array([[1]]), array([[2]])]
assert_array_equal(res, desired)
def test_1D_array(self):
a = array([1, 2])
b = array([2, 3])
res = [atleast_2d(a), atleast_2d(b)]
desired = [array([[1, 2]]), array([[2, 3]])]
assert_array_equal(res, desired)
def test_2D_array(self):
a = array([[1, 2], [1, 2]])
b = array([[2, 3], [2, 3]])
res = [atleast_2d(a), atleast_2d(b)]
desired = [a, b]
assert_array_equal(res, desired)
def test_3D_array(self):
a = array([[1, 2], [1, 2]])
b = array([[2, 3], [2, 3]])
a = array([a, a])
b = array([b, b])
res = [atleast_2d(a), atleast_2d(b)]
desired = [a, b]
assert_array_equal(res, desired)
def test_r2array(self):
""" Test to make sure equivalent Travis O's r2array function
"""
assert_(atleast_2d(3).shape == (1, 1))
assert_(atleast_2d([3j, 1]).shape == (1, 2))
assert_(atleast_2d([[[3, 1], [4, 5]], [[3, 5], [1, 2]]]).shape == (2, 2, 2))
class TestAtleast3d:
def test_0D_array(self):
a = array(1)
b = array(2)
res = [atleast_3d(a), atleast_3d(b)]
desired = [array([[[1]]]), array([[[2]]])]
assert_array_equal(res, desired)
def test_1D_array(self):
a = array([1, 2])
b = array([2, 3])
res = [atleast_3d(a), atleast_3d(b)]
desired = [array([[[1], [2]]]), array([[[2], [3]]])]
assert_array_equal(res, desired)
def test_2D_array(self):
a = array([[1, 2], [1, 2]])
b = array([[2, 3], [2, 3]])
res = [atleast_3d(a), atleast_3d(b)]
desired = [a[:,:, newaxis], b[:,:, newaxis]]
assert_array_equal(res, desired)
def test_3D_array(self):
a = array([[1, 2], [1, 2]])
b = array([[2, 3], [2, 3]])
a = array([a, a])
b = array([b, b])
res = [atleast_3d(a), atleast_3d(b)]
desired = [a, b]
assert_array_equal(res, desired)
class TestHstack:
def test_non_iterable(self):
assert_raises(TypeError, hstack, 1)
def test_empty_input(self):
assert_raises(ValueError, hstack, ())
def test_0D_array(self):
a = array(1)
b = array(2)
res = hstack([a, b])
desired = array([1, 2])
assert_array_equal(res, desired)
def test_1D_array(self):
a = array([1])
b = array([2])
res = hstack([a, b])
desired = array([1, 2])
assert_array_equal(res, desired)
def test_2D_array(self):
a = array([[1], [2]])
b = array([[1], [2]])
res = hstack([a, b])
desired = array([[1, 1], [2, 2]])
assert_array_equal(res, desired)
def test_generator(self):
with assert_warns(FutureWarning):
hstack((np.arange(3) for _ in range(2)))
with assert_warns(FutureWarning):
hstack(map(lambda x: x, np.ones((3, 2))))
class TestVstack:
def test_non_iterable(self):
assert_raises(TypeError, vstack, 1)
def test_empty_input(self):
assert_raises(ValueError, vstack, ())
def test_0D_array(self):
a = array(1)
b = array(2)
res = vstack([a, b])
desired = array([[1], [2]])
assert_array_equal(res, desired)
def test_1D_array(self):
a = array([1])
b = array([2])
res = vstack([a, b])
desired = array([[1], [2]])
assert_array_equal(res, desired)
def test_2D_array(self):
a = array([[1], [2]])
b = array([[1], [2]])
res = vstack([a, b])
desired = array([[1], [2], [1], [2]])
assert_array_equal(res, desired)
def test_2D_array2(self):
a = array([1, 2])
b = array([1, 2])
res = vstack([a, b])
desired = array([[1, 2], [1, 2]])
assert_array_equal(res, desired)
def test_generator(self):
with assert_warns(FutureWarning):
vstack((np.arange(3) for _ in range(2)))
class TestConcatenate:
def test_returns_copy(self):
a = np.eye(3)
b = np.concatenate([a])
b[0, 0] = 2
assert b[0, 0] != a[0, 0]
def test_exceptions(self):
# test axis must be in bounds
for ndim in [1, 2, 3]:
a = np.ones((1,)*ndim)
np.concatenate((a, a), axis=0) # OK
assert_raises(np.AxisError, np.concatenate, (a, a), axis=ndim)
assert_raises(np.AxisError, np.concatenate, (a, a), axis=-(ndim + 1))
# Scalars cannot be concatenated
assert_raises(ValueError, concatenate, (0,))
assert_raises(ValueError, concatenate, (np.array(0),))
# dimensionality must match
assert_raises_regex(
ValueError,
r"all the input arrays must have same number of dimensions, but "
r"the array at index 0 has 1 dimension\(s\) and the array at "
r"index 1 has 2 dimension\(s\)",
np.concatenate, (np.zeros(1), np.zeros((1, 1))))
# test shapes must match except for concatenation axis
a = np.ones((1, 2, 3))
b = np.ones((2, 2, 3))
axis = list(range(3))
for i in range(3):
np.concatenate((a, b), axis=axis[0]) # OK
assert_raises_regex(
ValueError,
"all the input array dimensions for the concatenation axis "
"must match exactly, but along dimension {}, the array at "
"index 0 has size 1 and the array at index 1 has size 2"
.format(i),
np.concatenate, (a, b), axis=axis[1])
assert_raises(ValueError, np.concatenate, (a, b), axis=axis[2])
a = np.moveaxis(a, -1, 0)
b = np.moveaxis(b, -1, 0)
axis.append(axis.pop(0))
# No arrays to concatenate raises ValueError
assert_raises(ValueError, concatenate, ())
def test_concatenate_axis_None(self):
a = np.arange(4, dtype=np.float64).reshape((2, 2))
b = list(range(3))
c = ['x']
r = np.concatenate((a, a), axis=None)
assert_equal(r.dtype, a.dtype)
assert_equal(r.ndim, 1)
r = np.concatenate((a, b), axis=None)
assert_equal(r.size, a.size + len(b))
assert_equal(r.dtype, a.dtype)
r = np.concatenate((a, b, c), axis=None, dtype="U")
d = array(['0.0', '1.0', '2.0', '3.0',
'0', '1', '2', 'x'])
assert_array_equal(r, d)
out = np.zeros(a.size + len(b))
r = np.concatenate((a, b), axis=None)
rout = np.concatenate((a, b), axis=None, out=out)
assert_(out is rout)
assert_equal(r, rout)
def test_large_concatenate_axis_None(self):
# When no axis is given, concatenate uses flattened versions.
# This also had a bug with many arrays (see gh-5979).
x = np.arange(1, 100)
r = np.concatenate(x, None)
assert_array_equal(x, r)
# This should probably be deprecated:
r = np.concatenate(x, 100) # axis is >= MAXDIMS
assert_array_equal(x, r)
def test_concatenate(self):
# Test concatenate function
# One sequence returns unmodified (but as array)
r4 = list(range(4))
assert_array_equal(concatenate((r4,)), r4)
# Any sequence
assert_array_equal(concatenate((tuple(r4),)), r4)
assert_array_equal(concatenate((array(r4),)), r4)
# 1D default concatenation
r3 = list(range(3))
assert_array_equal(concatenate((r4, r3)), r4 + r3)
# Mixed sequence types
assert_array_equal(concatenate((tuple(r4), r3)), r4 + r3)
assert_array_equal(concatenate((array(r4), r3)), r4 + r3)
# Explicit axis specification
assert_array_equal(concatenate((r4, r3), 0), r4 + r3)
# Including negative
assert_array_equal(concatenate((r4, r3), -1), r4 + r3)
# 2D
a23 = array([[10, 11, 12], [13, 14, 15]])
a13 = array([[0, 1, 2]])
res = array([[10, 11, 12], [13, 14, 15], [0, 1, 2]])
assert_array_equal(concatenate((a23, a13)), res)
assert_array_equal(concatenate((a23, a13), 0), res)
assert_array_equal(concatenate((a23.T, a13.T), 1), res.T)
assert_array_equal(concatenate((a23.T, a13.T), -1), res.T)
# Arrays much match shape
assert_raises(ValueError, concatenate, (a23.T, a13.T), 0)
# 3D
res = arange(2 * 3 * 7).reshape((2, 3, 7))
a0 = res[..., :4]
a1 = res[..., 4:6]
a2 = res[..., 6:]
assert_array_equal(concatenate((a0, a1, a2), 2), res)
assert_array_equal(concatenate((a0, a1, a2), -1), res)
assert_array_equal(concatenate((a0.T, a1.T, a2.T), 0), res.T)
out = res.copy()
rout = concatenate((a0, a1, a2), 2, out=out)
assert_(out is rout)
assert_equal(res, rout)
@pytest.mark.skipif(IS_PYPY, reason="PYPY handles sq_concat, nb_add differently than cpython")
def test_operator_concat(self):
import operator
a = array([1, 2])
b = array([3, 4])
n = [1,2]
res = array([1, 2, 3, 4])
assert_raises(TypeError, operator.concat, a, b)
assert_raises(TypeError, operator.concat, a, n)
assert_raises(TypeError, operator.concat, n, a)
assert_raises(TypeError, operator.concat, a, 1)
assert_raises(TypeError, operator.concat, 1, a)
def test_bad_out_shape(self):
a = array([1, 2])
b = array([3, 4])
assert_raises(ValueError, concatenate, (a, b), out=np.empty(5))
assert_raises(ValueError, concatenate, (a, b), out=np.empty((4,1)))
assert_raises(ValueError, concatenate, (a, b), out=np.empty((1,4)))
concatenate((a, b), out=np.empty(4))
@pytest.mark.parametrize("axis", [None, 0])
@pytest.mark.parametrize("out_dtype", ["c8", "f4", "f8", ">f8", "i8", "S4"])
@pytest.mark.parametrize("casting",
['no', 'equiv', 'safe', 'same_kind', 'unsafe'])
def test_out_and_dtype(self, axis, out_dtype, casting):
# Compare usage of `out=out` with `dtype=out.dtype`
out = np.empty(4, dtype=out_dtype)
to_concat = (array([1.1, 2.2]), array([3.3, 4.4]))
if not np.can_cast(to_concat[0], out_dtype, casting=casting):
with assert_raises(TypeError):
concatenate(to_concat, out=out, axis=axis, casting=casting)
with assert_raises(TypeError):
concatenate(to_concat, dtype=out.dtype,
axis=axis, casting=casting)
else:
res_out = concatenate(to_concat, out=out,
axis=axis, casting=casting)
res_dtype = concatenate(to_concat, dtype=out.dtype,
axis=axis, casting=casting)
assert res_out is out
assert_array_equal(out, res_dtype)
assert res_dtype.dtype == out_dtype
with assert_raises(TypeError):
concatenate(to_concat, out=out, dtype=out_dtype, axis=axis)
@pytest.mark.parametrize("axis", [None, 0])
@pytest.mark.parametrize("string_dt", ["S", "U", "S0", "U0"])
@pytest.mark.parametrize("arrs",
[([0.],), ([0.], [1]), ([0], ["string"], [1.])])
def test_dtype_with_promotion(self, arrs, string_dt, axis):
# Note that U0 and S0 should be deprecated eventually and changed to
# actually give the empty string result (together with `np.array`)
res = np.concatenate(arrs, axis=axis, dtype=string_dt, casting="unsafe")
# The actual dtype should be identical to a cast (of a double array):
assert res.dtype == np.array(1.).astype(string_dt).dtype
@pytest.mark.parametrize("axis", [None, 0])
def test_string_dtype_does_not_inspect(self, axis):
with pytest.raises(TypeError):
np.concatenate(([None], [1]), dtype="S", axis=axis)
with pytest.raises(TypeError):
np.concatenate(([None], [1]), dtype="U", axis=axis)
@pytest.mark.parametrize("axis", [None, 0])
def test_subarray_error(self, axis):
with pytest.raises(TypeError, match=".*subarray dtype"):
np.concatenate(([1], [1]), dtype="(2,)i", axis=axis)
def test_stack():
# non-iterable input
assert_raises(TypeError, stack, 1)
# 0d input
for input_ in [(1, 2, 3),
[np.int32(1), np.int32(2), np.int32(3)],
[np.array(1), np.array(2), np.array(3)]]:
assert_array_equal(stack(input_), [1, 2, 3])
# 1d input examples
a = np.array([1, 2, 3])
b = np.array([4, 5, 6])
r1 = array([[1, 2, 3], [4, 5, 6]])
assert_array_equal(np.stack((a, b)), r1)
assert_array_equal(np.stack((a, b), axis=1), r1.T)
# all input types
assert_array_equal(np.stack(list([a, b])), r1)
assert_array_equal(np.stack(array([a, b])), r1)
# all shapes for 1d input
arrays = [np.random.randn(3) for _ in range(10)]
axes = [0, 1, -1, -2]
expected_shapes = [(10, 3), (3, 10), (3, 10), (10, 3)]
for axis, expected_shape in zip(axes, expected_shapes):
assert_equal(np.stack(arrays, axis).shape, expected_shape)
assert_raises_regex(np.AxisError, 'out of bounds', stack, arrays, axis=2)
assert_raises_regex(np.AxisError, 'out of bounds', stack, arrays, axis=-3)
# all shapes for 2d input
arrays = [np.random.randn(3, 4) for _ in range(10)]
axes = [0, 1, 2, -1, -2, -3]
expected_shapes = [(10, 3, 4), (3, 10, 4), (3, 4, 10),
(3, 4, 10), (3, 10, 4), (10, 3, 4)]
for axis, expected_shape in zip(axes, expected_shapes):
assert_equal(np.stack(arrays, axis).shape, expected_shape)
# empty arrays
assert_(stack([[], [], []]).shape == (3, 0))
assert_(stack([[], [], []], axis=1).shape == (0, 3))
# out
out = np.zeros_like(r1)
np.stack((a, b), out=out)
assert_array_equal(out, r1)
# edge cases
assert_raises_regex(ValueError, 'need at least one array', stack, [])
assert_raises_regex(ValueError, 'must have the same shape',
stack, [1, np.arange(3)])
assert_raises_regex(ValueError, 'must have the same shape',
stack, [np.arange(3), 1])
assert_raises_regex(ValueError, 'must have the same shape',
stack, [np.arange(3), 1], axis=1)
assert_raises_regex(ValueError, 'must have the same shape',
stack, [np.zeros((3, 3)), np.zeros(3)], axis=1)
assert_raises_regex(ValueError, 'must have the same shape',
stack, [np.arange(2), np.arange(3)])
# generator is deprecated
with assert_warns(FutureWarning):
result = stack((x for x in range(3)))
assert_array_equal(result, np.array([0, 1, 2]))
class TestBlock:
@pytest.fixture(params=['block', 'force_concatenate', 'force_slicing'])
def block(self, request):
# blocking small arrays and large arrays go through different paths.
# the algorithm is triggered depending on the number of element
# copies required.
# We define a test fixture that forces most tests to go through
# both code paths.
# Ultimately, this should be removed if a single algorithm is found
# to be faster for both small and large arrays.
def _block_force_concatenate(arrays):
arrays, list_ndim, result_ndim, _ = _block_setup(arrays)
return _block_concatenate(arrays, list_ndim, result_ndim)
def _block_force_slicing(arrays):
arrays, list_ndim, result_ndim, _ = _block_setup(arrays)
return _block_slicing(arrays, list_ndim, result_ndim)
if request.param == 'force_concatenate':
return _block_force_concatenate
elif request.param == 'force_slicing':
return _block_force_slicing
elif request.param == 'block':
return block
else:
raise ValueError('Unknown blocking request. There is a typo in the tests.')
def test_returns_copy(self, block):
a = np.eye(3)
b = block(a)
b[0, 0] = 2
assert b[0, 0] != a[0, 0]
def test_block_total_size_estimate(self, block):
_, _, _, total_size = _block_setup([1])
assert total_size == 1
_, _, _, total_size = _block_setup([[1]])
assert total_size == 1
_, _, _, total_size = _block_setup([[1, 1]])
assert total_size == 2
_, _, _, total_size = _block_setup([[1], [1]])
assert total_size == 2
_, _, _, total_size = _block_setup([[1, 2], [3, 4]])
assert total_size == 4
def test_block_simple_row_wise(self, block):
a_2d = np.ones((2, 2))
b_2d = 2 * a_2d
desired = np.array([[1, 1, 2, 2],
[1, 1, 2, 2]])
result = block([a_2d, b_2d])
assert_equal(desired, result)
def test_block_simple_column_wise(self, block):
a_2d = np.ones((2, 2))
b_2d = 2 * a_2d
expected = np.array([[1, 1],
[1, 1],
[2, 2],
[2, 2]])
result = block([[a_2d], [b_2d]])
assert_equal(expected, result)
def test_block_with_1d_arrays_row_wise(self, block):
# # # 1-D vectors are treated as row arrays
a = np.array([1, 2, 3])
b = np.array([2, 3, 4])
expected = np.array([1, 2, 3, 2, 3, 4])
result = block([a, b])
assert_equal(expected, result)
def test_block_with_1d_arrays_multiple_rows(self, block):
a = np.array([1, 2, 3])
b = np.array([2, 3, 4])
expected = np.array([[1, 2, 3, 2, 3, 4],
[1, 2, 3, 2, 3, 4]])
result = block([[a, b], [a, b]])
assert_equal(expected, result)
def test_block_with_1d_arrays_column_wise(self, block):
# # # 1-D vectors are treated as row arrays
a_1d = np.array([1, 2, 3])
b_1d = np.array([2, 3, 4])
expected = np.array([[1, 2, 3],
[2, 3, 4]])
result = block([[a_1d], [b_1d]])
assert_equal(expected, result)
def test_block_mixed_1d_and_2d(self, block):
a_2d = np.ones((2, 2))
b_1d = np.array([2, 2])
result = block([[a_2d], [b_1d]])
expected = np.array([[1, 1],
[1, 1],
[2, 2]])
assert_equal(expected, result)
def test_block_complicated(self, block):
# a bit more complicated
one_2d = np.array([[1, 1, 1]])
two_2d = np.array([[2, 2, 2]])
three_2d = np.array([[3, 3, 3, 3, 3, 3]])
four_1d = np.array([4, 4, 4, 4, 4, 4])
five_0d = np.array(5)
six_1d = np.array([6, 6, 6, 6, 6])
zero_2d = np.zeros((2, 6))
expected = np.array([[1, 1, 1, 2, 2, 2],
[3, 3, 3, 3, 3, 3],
[4, 4, 4, 4, 4, 4],
[5, 6, 6, 6, 6, 6],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]])
result = block([[one_2d, two_2d],
[three_2d],
[four_1d],
[five_0d, six_1d],
[zero_2d]])
assert_equal(result, expected)
def test_nested(self, block):
one = np.array([1, 1, 1])
two = np.array([[2, 2, 2], [2, 2, 2], [2, 2, 2]])
three = np.array([3, 3, 3])
four = np.array([4, 4, 4])
five = np.array(5)
six = np.array([6, 6, 6, 6, 6])
zero = np.zeros((2, 6))
result = block([
[
block([
[one],
[three],
[four]
]),
two
],
[five, six],
[zero]
])
expected = np.array([[1, 1, 1, 2, 2, 2],
[3, 3, 3, 2, 2, 2],
[4, 4, 4, 2, 2, 2],
[5, 6, 6, 6, 6, 6],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]])
assert_equal(result, expected)
def test_3d(self, block):
a000 = np.ones((2, 2, 2), int) * 1
a100 = np.ones((3, 2, 2), int) * 2
a010 = np.ones((2, 3, 2), int) * 3
a001 = np.ones((2, 2, 3), int) * 4
a011 = np.ones((2, 3, 3), int) * 5
a101 = np.ones((3, 2, 3), int) * 6
a110 = np.ones((3, 3, 2), int) * 7
a111 = np.ones((3, 3, 3), int) * 8
result = block([
[
[a000, a001],
[a010, a011],
],
[
[a100, a101],
[a110, a111],
]
])
expected = array([[[1, 1, 4, 4, 4],
[1, 1, 4, 4, 4],
[3, 3, 5, 5, 5],
[3, 3, 5, 5, 5],
[3, 3, 5, 5, 5]],
[[1, 1, 4, 4, 4],
[1, 1, 4, 4, 4],
[3, 3, 5, 5, 5],
[3, 3, 5, 5, 5],
[3, 3, 5, 5, 5]],
[[2, 2, 6, 6, 6],
[2, 2, 6, 6, 6],
[7, 7, 8, 8, 8],
[7, 7, 8, 8, 8],
[7, 7, 8, 8, 8]],
[[2, 2, 6, 6, 6],
[2, 2, 6, 6, 6],
[7, 7, 8, 8, 8],
[7, 7, 8, 8, 8],
[7, 7, 8, 8, 8]],
[[2, 2, 6, 6, 6],
[2, 2, 6, 6, 6],
[7, 7, 8, 8, 8],
[7, 7, 8, 8, 8],
[7, 7, 8, 8, 8]]])
assert_array_equal(result, expected)
def test_block_with_mismatched_shape(self, block):
a = np.array([0, 0])
b = np.eye(2)
assert_raises(ValueError, block, [a, b])
assert_raises(ValueError, block, [b, a])
to_block = [[np.ones((2,3)), np.ones((2,2))],
[np.ones((2,2)), np.ones((2,2))]]
assert_raises(ValueError, block, to_block)
def test_no_lists(self, block):
assert_equal(block(1), np.array(1))
assert_equal(block(np.eye(3)), np.eye(3))
def test_invalid_nesting(self, block):
msg = 'depths are mismatched'
assert_raises_regex(ValueError, msg, block, [1, [2]])
assert_raises_regex(ValueError, msg, block, [1, []])
assert_raises_regex(ValueError, msg, block, [[1], 2])
assert_raises_regex(ValueError, msg, block, [[], 2])
assert_raises_regex(ValueError, msg, block, [
[[1], [2]],
[[3, 4]],
[5] # missing brackets
])
def test_empty_lists(self, block):
assert_raises_regex(ValueError, 'empty', block, [])
assert_raises_regex(ValueError, 'empty', block, [[]])
assert_raises_regex(ValueError, 'empty', block, [[1], []])
def test_tuple(self, block):
assert_raises_regex(TypeError, 'tuple', block, ([1, 2], [3, 4]))
assert_raises_regex(TypeError, 'tuple', block, [(1, 2), (3, 4)])
def test_different_ndims(self, block):
a = 1.
b = 2 * np.ones((1, 2))
c = 3 * np.ones((1, 1, 3))
result = block([a, b, c])
expected = np.array([[[1., 2., 2., 3., 3., 3.]]])
assert_equal(result, expected)
def test_different_ndims_depths(self, block):
a = 1.
b = 2 * np.ones((1, 2))
c = 3 * np.ones((1, 2, 3))
result = block([[a, b], [c]])
expected = np.array([[[1., 2., 2.],
[3., 3., 3.],
[3., 3., 3.]]])
assert_equal(result, expected)
def test_block_memory_order(self, block):
# 3D
arr_c = np.zeros((3,)*3, order='C')
arr_f = np.zeros((3,)*3, order='F')
b_c = [[[arr_c, arr_c],
[arr_c, arr_c]],
[[arr_c, arr_c],
[arr_c, arr_c]]]
b_f = [[[arr_f, arr_f],
[arr_f, arr_f]],
[[arr_f, arr_f],
[arr_f, arr_f]]]
assert block(b_c).flags['C_CONTIGUOUS']
assert block(b_f).flags['F_CONTIGUOUS']
arr_c = np.zeros((3, 3), order='C')
arr_f = np.zeros((3, 3), order='F')
# 2D
b_c = [[arr_c, arr_c],
[arr_c, arr_c]]
b_f = [[arr_f, arr_f],
[arr_f, arr_f]]
assert block(b_c).flags['C_CONTIGUOUS']
assert block(b_f).flags['F_CONTIGUOUS']
def test_block_dispatcher():
class ArrayLike:
pass
a = ArrayLike()
b = ArrayLike()
c = ArrayLike()
assert_equal(list(_block_dispatcher(a)), [a])
assert_equal(list(_block_dispatcher([a])), [a])
assert_equal(list(_block_dispatcher([a, b])), [a, b])
assert_equal(list(_block_dispatcher([[a], [b, [c]]])), [a, b, c])
# don't recurse into non-lists
assert_equal(list(_block_dispatcher((a, b))), [(a, b)])
| 27,248 | Python | 34.712975 | 98 | 0.488696 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_scalarprint.py | """ Test printing of scalar types.
"""
import code
import platform
import pytest
import sys
from tempfile import TemporaryFile
import numpy as np
from numpy.testing import assert_, assert_equal, assert_raises
class TestRealScalars:
def test_str(self):
svals = [0.0, -0.0, 1, -1, np.inf, -np.inf, np.nan]
styps = [np.float16, np.float32, np.float64, np.longdouble]
wanted = [
['0.0', '0.0', '0.0', '0.0' ],
['-0.0', '-0.0', '-0.0', '-0.0'],
['1.0', '1.0', '1.0', '1.0' ],
['-1.0', '-1.0', '-1.0', '-1.0'],
['inf', 'inf', 'inf', 'inf' ],
['-inf', '-inf', '-inf', '-inf'],
['nan', 'nan', 'nan', 'nan']]
for wants, val in zip(wanted, svals):
for want, styp in zip(wants, styps):
msg = 'for str({}({}))'.format(np.dtype(styp).name, repr(val))
assert_equal(str(styp(val)), want, err_msg=msg)
def test_scalar_cutoffs(self):
# test that both the str and repr of np.float64 behaves
# like python floats in python3.
def check(v):
assert_equal(str(np.float64(v)), str(v))
assert_equal(str(np.float64(v)), repr(v))
assert_equal(repr(np.float64(v)), repr(v))
assert_equal(repr(np.float64(v)), str(v))
# check we use the same number of significant digits
check(1.12345678901234567890)
check(0.0112345678901234567890)
# check switch from scientific output to positional and back
check(1e-5)
check(1e-4)
check(1e15)
check(1e16)
def test_py2_float_print(self):
# gh-10753
# In python2, the python float type implements an obsolete method
# tp_print, which overrides tp_repr and tp_str when using "print" to
# output to a "real file" (ie, not a StringIO). Make sure we don't
# inherit it.
x = np.double(0.1999999999999)
with TemporaryFile('r+t') as f:
print(x, file=f)
f.seek(0)
output = f.read()
assert_equal(output, str(x) + '\n')
# In python2 the value float('0.1999999999999') prints with reduced
# precision as '0.2', but we want numpy's np.double('0.1999999999999')
# to print the unique value, '0.1999999999999'.
# gh-11031
# Only in the python2 interactive shell and when stdout is a "real"
# file, the output of the last command is printed to stdout without
# Py_PRINT_RAW (unlike the print statement) so `>>> x` and `>>> print
# x` are potentially different. Make sure they are the same. The only
# way I found to get prompt-like output is using an actual prompt from
# the 'code' module. Again, must use tempfile to get a "real" file.
# dummy user-input which enters one line and then ctrl-Ds.
def userinput():
yield 'np.sqrt(2)'
raise EOFError
gen = userinput()
input_func = lambda prompt="": next(gen)
with TemporaryFile('r+t') as fo, TemporaryFile('r+t') as fe:
orig_stdout, orig_stderr = sys.stdout, sys.stderr
sys.stdout, sys.stderr = fo, fe
code.interact(local={'np': np}, readfunc=input_func, banner='')
sys.stdout, sys.stderr = orig_stdout, orig_stderr
fo.seek(0)
capture = fo.read().strip()
assert_equal(capture, repr(np.sqrt(2)))
def test_dragon4(self):
# these tests are adapted from Ryan Juckett's dragon4 implementation,
# see dragon4.c for details.
fpos32 = lambda x, **k: np.format_float_positional(np.float32(x), **k)
fsci32 = lambda x, **k: np.format_float_scientific(np.float32(x), **k)
fpos64 = lambda x, **k: np.format_float_positional(np.float64(x), **k)
fsci64 = lambda x, **k: np.format_float_scientific(np.float64(x), **k)
preckwd = lambda prec: {'unique': False, 'precision': prec}
assert_equal(fpos32('1.0'), "1.")
assert_equal(fsci32('1.0'), "1.e+00")
assert_equal(fpos32('10.234'), "10.234")
assert_equal(fpos32('-10.234'), "-10.234")
assert_equal(fsci32('10.234'), "1.0234e+01")
assert_equal(fsci32('-10.234'), "-1.0234e+01")
assert_equal(fpos32('1000.0'), "1000.")
assert_equal(fpos32('1.0', precision=0), "1.")
assert_equal(fsci32('1.0', precision=0), "1.e+00")
assert_equal(fpos32('10.234', precision=0), "10.")
assert_equal(fpos32('-10.234', precision=0), "-10.")
assert_equal(fsci32('10.234', precision=0), "1.e+01")
assert_equal(fsci32('-10.234', precision=0), "-1.e+01")
assert_equal(fpos32('10.234', precision=2), "10.23")
assert_equal(fsci32('-10.234', precision=2), "-1.02e+01")
assert_equal(fsci64('9.9999999999999995e-08', **preckwd(16)),
'9.9999999999999995e-08')
assert_equal(fsci64('9.8813129168249309e-324', **preckwd(16)),
'9.8813129168249309e-324')
assert_equal(fsci64('9.9999999999999694e-311', **preckwd(16)),
'9.9999999999999694e-311')
# test rounding
# 3.1415927410 is closest float32 to np.pi
assert_equal(fpos32('3.14159265358979323846', **preckwd(10)),
"3.1415927410")
assert_equal(fsci32('3.14159265358979323846', **preckwd(10)),
"3.1415927410e+00")
assert_equal(fpos64('3.14159265358979323846', **preckwd(10)),
"3.1415926536")
assert_equal(fsci64('3.14159265358979323846', **preckwd(10)),
"3.1415926536e+00")
# 299792448 is closest float32 to 299792458
assert_equal(fpos32('299792458.0', **preckwd(5)), "299792448.00000")
assert_equal(fsci32('299792458.0', **preckwd(5)), "2.99792e+08")
assert_equal(fpos64('299792458.0', **preckwd(5)), "299792458.00000")
assert_equal(fsci64('299792458.0', **preckwd(5)), "2.99792e+08")
assert_equal(fpos32('3.14159265358979323846', **preckwd(25)),
"3.1415927410125732421875000")
assert_equal(fpos64('3.14159265358979323846', **preckwd(50)),
"3.14159265358979311599796346854418516159057617187500")
assert_equal(fpos64('3.14159265358979323846'), "3.141592653589793")
# smallest numbers
assert_equal(fpos32(0.5**(126 + 23), unique=False, precision=149),
"0.00000000000000000000000000000000000000000000140129846432"
"4817070923729583289916131280261941876515771757068283889791"
"08268586060148663818836212158203125")
assert_equal(fpos64(5e-324, unique=False, precision=1074),
"0.00000000000000000000000000000000000000000000000000000000"
"0000000000000000000000000000000000000000000000000000000000"
"0000000000000000000000000000000000000000000000000000000000"
"0000000000000000000000000000000000000000000000000000000000"
"0000000000000000000000000000000000000000000000000000000000"
"0000000000000000000000000000000000049406564584124654417656"
"8792868221372365059802614324764425585682500675507270208751"
"8652998363616359923797965646954457177309266567103559397963"
"9877479601078187812630071319031140452784581716784898210368"
"8718636056998730723050006387409153564984387312473397273169"
"6151400317153853980741262385655911710266585566867681870395"
"6031062493194527159149245532930545654440112748012970999954"
"1931989409080416563324524757147869014726780159355238611550"
"1348035264934720193790268107107491703332226844753335720832"
"4319360923828934583680601060115061698097530783422773183292"
"4790498252473077637592724787465608477820373446969953364701"
"7972677717585125660551199131504891101451037862738167250955"
"8373897335989936648099411642057026370902792427675445652290"
"87538682506419718265533447265625")
# largest numbers
f32x = np.finfo(np.float32).max
assert_equal(fpos32(f32x, **preckwd(0)),
"340282346638528859811704183484516925440.")
assert_equal(fpos64(np.finfo(np.float64).max, **preckwd(0)),
"1797693134862315708145274237317043567980705675258449965989"
"1747680315726078002853876058955863276687817154045895351438"
"2464234321326889464182768467546703537516986049910576551282"
"0762454900903893289440758685084551339423045832369032229481"
"6580855933212334827479782620414472316873817718091929988125"
"0404026184124858368.")
# Warning: In unique mode only the integer digits necessary for
# uniqueness are computed, the rest are 0.
assert_equal(fpos32(f32x),
"340282350000000000000000000000000000000.")
# Further tests of zero-padding vs rounding in different combinations
# of unique, fractional, precision, min_digits
# precision can only reduce digits, not add them.
# min_digits can only extend digits, not reduce them.
assert_equal(fpos32(f32x, unique=True, fractional=True, precision=0),
"340282350000000000000000000000000000000.")
assert_equal(fpos32(f32x, unique=True, fractional=True, precision=4),
"340282350000000000000000000000000000000.")
assert_equal(fpos32(f32x, unique=True, fractional=True, min_digits=0),
"340282346638528859811704183484516925440.")
assert_equal(fpos32(f32x, unique=True, fractional=True, min_digits=4),
"340282346638528859811704183484516925440.0000")
assert_equal(fpos32(f32x, unique=True, fractional=True,
min_digits=4, precision=4),
"340282346638528859811704183484516925440.0000")
assert_raises(ValueError, fpos32, f32x, unique=True, fractional=False,
precision=0)
assert_equal(fpos32(f32x, unique=True, fractional=False, precision=4),
"340300000000000000000000000000000000000.")
assert_equal(fpos32(f32x, unique=True, fractional=False, precision=20),
"340282350000000000000000000000000000000.")
assert_equal(fpos32(f32x, unique=True, fractional=False, min_digits=4),
"340282350000000000000000000000000000000.")
assert_equal(fpos32(f32x, unique=True, fractional=False,
min_digits=20),
"340282346638528859810000000000000000000.")
assert_equal(fpos32(f32x, unique=True, fractional=False,
min_digits=15),
"340282346638529000000000000000000000000.")
assert_equal(fpos32(f32x, unique=False, fractional=False, precision=4),
"340300000000000000000000000000000000000.")
# test that unique rounding is preserved when precision is supplied
# but no extra digits need to be printed (gh-18609)
a = np.float64.fromhex('-1p-97')
assert_equal(fsci64(a, unique=True), '-6.310887241768095e-30')
assert_equal(fsci64(a, unique=False, precision=15),
'-6.310887241768094e-30')
assert_equal(fsci64(a, unique=True, precision=15),
'-6.310887241768095e-30')
assert_equal(fsci64(a, unique=True, min_digits=15),
'-6.310887241768095e-30')
assert_equal(fsci64(a, unique=True, precision=15, min_digits=15),
'-6.310887241768095e-30')
# adds/remove digits in unique mode with unbiased rnding
assert_equal(fsci64(a, unique=True, precision=14),
'-6.31088724176809e-30')
assert_equal(fsci64(a, unique=True, min_digits=16),
'-6.3108872417680944e-30')
assert_equal(fsci64(a, unique=True, precision=16),
'-6.310887241768095e-30')
assert_equal(fsci64(a, unique=True, min_digits=14),
'-6.310887241768095e-30')
# test min_digits in unique mode with different rounding cases
assert_equal(fsci64('1e120', min_digits=3), '1.000e+120')
assert_equal(fsci64('1e100', min_digits=3), '1.000e+100')
# test trailing zeros
assert_equal(fpos32('1.0', unique=False, precision=3), "1.000")
assert_equal(fpos64('1.0', unique=False, precision=3), "1.000")
assert_equal(fsci32('1.0', unique=False, precision=3), "1.000e+00")
assert_equal(fsci64('1.0', unique=False, precision=3), "1.000e+00")
assert_equal(fpos32('1.5', unique=False, precision=3), "1.500")
assert_equal(fpos64('1.5', unique=False, precision=3), "1.500")
assert_equal(fsci32('1.5', unique=False, precision=3), "1.500e+00")
assert_equal(fsci64('1.5', unique=False, precision=3), "1.500e+00")
# gh-10713
assert_equal(fpos64('324', unique=False, precision=5,
fractional=False), "324.00")
def test_dragon4_interface(self):
tps = [np.float16, np.float32, np.float64]
if hasattr(np, 'float128'):
tps.append(np.float128)
fpos = np.format_float_positional
fsci = np.format_float_scientific
for tp in tps:
# test padding
assert_equal(fpos(tp('1.0'), pad_left=4, pad_right=4), " 1. ")
assert_equal(fpos(tp('-1.0'), pad_left=4, pad_right=4), " -1. ")
assert_equal(fpos(tp('-10.2'),
pad_left=4, pad_right=4), " -10.2 ")
# test exp_digits
assert_equal(fsci(tp('1.23e1'), exp_digits=5), "1.23e+00001")
# test fixed (non-unique) mode
assert_equal(fpos(tp('1.0'), unique=False, precision=4), "1.0000")
assert_equal(fsci(tp('1.0'), unique=False, precision=4),
"1.0000e+00")
# test trimming
# trim of 'k' or '.' only affects non-unique mode, since unique
# mode will not output trailing 0s.
assert_equal(fpos(tp('1.'), unique=False, precision=4, trim='k'),
"1.0000")
assert_equal(fpos(tp('1.'), unique=False, precision=4, trim='.'),
"1.")
assert_equal(fpos(tp('1.2'), unique=False, precision=4, trim='.'),
"1.2" if tp != np.float16 else "1.2002")
assert_equal(fpos(tp('1.'), unique=False, precision=4, trim='0'),
"1.0")
assert_equal(fpos(tp('1.2'), unique=False, precision=4, trim='0'),
"1.2" if tp != np.float16 else "1.2002")
assert_equal(fpos(tp('1.'), trim='0'), "1.0")
assert_equal(fpos(tp('1.'), unique=False, precision=4, trim='-'),
"1")
assert_equal(fpos(tp('1.2'), unique=False, precision=4, trim='-'),
"1.2" if tp != np.float16 else "1.2002")
assert_equal(fpos(tp('1.'), trim='-'), "1")
assert_equal(fpos(tp('1.001'), precision=1, trim='-'), "1")
@pytest.mark.skipif(not platform.machine().startswith("ppc64"),
reason="only applies to ppc float128 values")
def test_ppc64_ibm_double_double128(self):
# check that the precision decreases once we get into the subnormal
# range. Unlike float64, this starts around 1e-292 instead of 1e-308,
# which happens when the first double is normal and the second is
# subnormal.
x = np.float128('2.123123123123123123123123123123123e-286')
got = [str(x/np.float128('2e' + str(i))) for i in range(0,40)]
expected = [
"1.06156156156156156156156156156157e-286",
"1.06156156156156156156156156156158e-287",
"1.06156156156156156156156156156159e-288",
"1.0615615615615615615615615615616e-289",
"1.06156156156156156156156156156157e-290",
"1.06156156156156156156156156156156e-291",
"1.0615615615615615615615615615616e-292",
"1.0615615615615615615615615615615e-293",
"1.061561561561561561561561561562e-294",
"1.06156156156156156156156156155e-295",
"1.0615615615615615615615615616e-296",
"1.06156156156156156156156156e-297",
"1.06156156156156156156156157e-298",
"1.0615615615615615615615616e-299",
"1.06156156156156156156156e-300",
"1.06156156156156156156155e-301",
"1.0615615615615615615616e-302",
"1.061561561561561561562e-303",
"1.06156156156156156156e-304",
"1.0615615615615615618e-305",
"1.06156156156156156e-306",
"1.06156156156156157e-307",
"1.0615615615615616e-308",
"1.06156156156156e-309",
"1.06156156156157e-310",
"1.0615615615616e-311",
"1.06156156156e-312",
"1.06156156154e-313",
"1.0615615616e-314",
"1.06156156e-315",
"1.06156155e-316",
"1.061562e-317",
"1.06156e-318",
"1.06155e-319",
"1.0617e-320",
"1.06e-321",
"1.04e-322",
"1e-323",
"0.0",
"0.0"]
assert_equal(got, expected)
# Note: we follow glibc behavior, but it (or gcc) might not be right.
# In particular we can get two values that print the same but are not
# equal:
a = np.float128('2')/np.float128('3')
b = np.float128(str(a))
assert_equal(str(a), str(b))
assert_(a != b)
def float32_roundtrip(self):
# gh-9360
x = np.float32(1024 - 2**-14)
y = np.float32(1024 - 2**-13)
assert_(repr(x) != repr(y))
assert_equal(np.float32(repr(x)), x)
assert_equal(np.float32(repr(y)), y)
def float64_vs_python(self):
# gh-2643, gh-6136, gh-6908
assert_equal(repr(np.float64(0.1)), repr(0.1))
assert_(repr(np.float64(0.20000000000000004)) != repr(0.2))
| 18,694 | Python | 47.81201 | 80 | 0.583824 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_abc.py | from numpy.testing import assert_
import numbers
import numpy as np
from numpy.core.numerictypes import sctypes
class TestABC:
def test_abstract(self):
assert_(issubclass(np.number, numbers.Number))
assert_(issubclass(np.inexact, numbers.Complex))
assert_(issubclass(np.complexfloating, numbers.Complex))
assert_(issubclass(np.floating, numbers.Real))
assert_(issubclass(np.integer, numbers.Integral))
assert_(issubclass(np.signedinteger, numbers.Integral))
assert_(issubclass(np.unsignedinteger, numbers.Integral))
def test_floats(self):
for t in sctypes['float']:
assert_(isinstance(t(), numbers.Real),
"{0} is not instance of Real".format(t.__name__))
assert_(issubclass(t, numbers.Real),
"{0} is not subclass of Real".format(t.__name__))
assert_(not isinstance(t(), numbers.Rational),
"{0} is instance of Rational".format(t.__name__))
assert_(not issubclass(t, numbers.Rational),
"{0} is subclass of Rational".format(t.__name__))
def test_complex(self):
for t in sctypes['complex']:
assert_(isinstance(t(), numbers.Complex),
"{0} is not instance of Complex".format(t.__name__))
assert_(issubclass(t, numbers.Complex),
"{0} is not subclass of Complex".format(t.__name__))
assert_(not isinstance(t(), numbers.Real),
"{0} is instance of Real".format(t.__name__))
assert_(not issubclass(t, numbers.Real),
"{0} is subclass of Real".format(t.__name__))
def test_int(self):
for t in sctypes['int']:
assert_(isinstance(t(), numbers.Integral),
"{0} is not instance of Integral".format(t.__name__))
assert_(issubclass(t, numbers.Integral),
"{0} is not subclass of Integral".format(t.__name__))
def test_uint(self):
for t in sctypes['uint']:
assert_(isinstance(t(), numbers.Integral),
"{0} is not instance of Integral".format(t.__name__))
assert_(issubclass(t, numbers.Integral),
"{0} is not subclass of Integral".format(t.__name__))
| 2,328 | Python | 41.345454 | 73 | 0.567869 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_multiarray.py | import collections.abc
import tempfile
import sys
import warnings
import operator
import io
import itertools
import functools
import ctypes
import os
import gc
import weakref
import pytest
from contextlib import contextmanager
from numpy.compat import pickle
import pathlib
import builtins
from decimal import Decimal
import mmap
import numpy as np
import numpy.core._multiarray_tests as _multiarray_tests
from numpy.core._rational_tests import rational
from numpy.testing import (
assert_, assert_raises, assert_warns, assert_equal, assert_almost_equal,
assert_array_equal, assert_raises_regex, assert_array_almost_equal,
assert_allclose, IS_PYPY, IS_PYSTON, HAS_REFCOUNT, assert_array_less,
runstring, temppath, suppress_warnings, break_cycles,
)
from numpy.testing._private.utils import requires_memory, _no_tracing
from numpy.core.tests._locales import CommaDecimalPointLocale
from numpy.lib.recfunctions import repack_fields
# Need to test an object that does not fully implement math interface
from datetime import timedelta, datetime
def _aligned_zeros(shape, dtype=float, order="C", align=None):
"""
Allocate a new ndarray with aligned memory.
The ndarray is guaranteed *not* aligned to twice the requested alignment.
Eg, if align=4, guarantees it is not aligned to 8. If align=None uses
dtype.alignment."""
dtype = np.dtype(dtype)
if dtype == np.dtype(object):
# Can't do this, fall back to standard allocation (which
# should always be sufficiently aligned)
if align is not None:
raise ValueError("object array alignment not supported")
return np.zeros(shape, dtype=dtype, order=order)
if align is None:
align = dtype.alignment
if not hasattr(shape, '__len__'):
shape = (shape,)
size = functools.reduce(operator.mul, shape) * dtype.itemsize
buf = np.empty(size + 2*align + 1, np.uint8)
ptr = buf.__array_interface__['data'][0]
offset = ptr % align
if offset != 0:
offset = align - offset
if (ptr % (2*align)) == 0:
offset += align
# Note: slices producing 0-size arrays do not necessarily change
# data pointer --- so we use and allocate size+1
buf = buf[offset:offset+size+1][:-1]
data = np.ndarray(shape, dtype, buf, order=order)
data.fill(0)
return data
class TestFlags:
def setup_method(self):
self.a = np.arange(10)
def test_writeable(self):
mydict = locals()
self.a.flags.writeable = False
assert_raises(ValueError, runstring, 'self.a[0] = 3', mydict)
assert_raises(ValueError, runstring, 'self.a[0:1].itemset(3)', mydict)
self.a.flags.writeable = True
self.a[0] = 5
self.a[0] = 0
def test_writeable_any_base(self):
# Ensure that any base being writeable is sufficient to change flag;
# this is especially interesting for arrays from an array interface.
arr = np.arange(10)
class subclass(np.ndarray):
pass
# Create subclass so base will not be collapsed, this is OK to change
view1 = arr.view(subclass)
view2 = view1[...]
arr.flags.writeable = False
view2.flags.writeable = False
view2.flags.writeable = True # Can be set to True again.
arr = np.arange(10)
class frominterface:
def __init__(self, arr):
self.arr = arr
self.__array_interface__ = arr.__array_interface__
view1 = np.asarray(frominterface)
view2 = view1[...]
view2.flags.writeable = False
view2.flags.writeable = True
view1.flags.writeable = False
view2.flags.writeable = False
with assert_raises(ValueError):
# Must assume not writeable, since only base is not:
view2.flags.writeable = True
def test_writeable_from_readonly(self):
# gh-9440 - make sure fromstring, from buffer on readonly buffers
# set writeable False
data = b'\x00' * 100
vals = np.frombuffer(data, 'B')
assert_raises(ValueError, vals.setflags, write=True)
types = np.dtype( [('vals', 'u1'), ('res3', 'S4')] )
values = np.core.records.fromstring(data, types)
vals = values['vals']
assert_raises(ValueError, vals.setflags, write=True)
def test_writeable_from_buffer(self):
data = bytearray(b'\x00' * 100)
vals = np.frombuffer(data, 'B')
assert_(vals.flags.writeable)
vals.setflags(write=False)
assert_(vals.flags.writeable is False)
vals.setflags(write=True)
assert_(vals.flags.writeable)
types = np.dtype( [('vals', 'u1'), ('res3', 'S4')] )
values = np.core.records.fromstring(data, types)
vals = values['vals']
assert_(vals.flags.writeable)
vals.setflags(write=False)
assert_(vals.flags.writeable is False)
vals.setflags(write=True)
assert_(vals.flags.writeable)
@pytest.mark.skipif(IS_PYPY, reason="PyPy always copies")
def test_writeable_pickle(self):
import pickle
# Small arrays will be copied without setting base.
# See condition for using PyArray_SetBaseObject in
# array_setstate.
a = np.arange(1000)
for v in range(pickle.HIGHEST_PROTOCOL):
vals = pickle.loads(pickle.dumps(a, v))
assert_(vals.flags.writeable)
assert_(isinstance(vals.base, bytes))
def test_writeable_from_c_data(self):
# Test that the writeable flag can be changed for an array wrapping
# low level C-data, but not owning its data.
# Also see that this is deprecated to change from python.
from numpy.core._multiarray_tests import get_c_wrapping_array
arr_writeable = get_c_wrapping_array(True)
assert not arr_writeable.flags.owndata
assert arr_writeable.flags.writeable
view = arr_writeable[...]
# Toggling the writeable flag works on the view:
view.flags.writeable = False
assert not view.flags.writeable
view.flags.writeable = True
assert view.flags.writeable
# Flag can be unset on the arr_writeable:
arr_writeable.flags.writeable = False
arr_readonly = get_c_wrapping_array(False)
assert not arr_readonly.flags.owndata
assert not arr_readonly.flags.writeable
for arr in [arr_writeable, arr_readonly]:
view = arr[...]
view.flags.writeable = False # make sure it is readonly
arr.flags.writeable = False
assert not arr.flags.writeable
with assert_raises(ValueError):
view.flags.writeable = True
with warnings.catch_warnings():
warnings.simplefilter("error", DeprecationWarning)
with assert_raises(DeprecationWarning):
arr.flags.writeable = True
with assert_warns(DeprecationWarning):
arr.flags.writeable = True
def test_warnonwrite(self):
a = np.arange(10)
a.flags._warn_on_write = True
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always')
a[1] = 10
a[2] = 10
# only warn once
assert_(len(w) == 1)
@pytest.mark.parametrize(["flag", "flag_value", "writeable"],
[("writeable", True, True),
# Delete _warn_on_write after deprecation and simplify
# the parameterization:
("_warn_on_write", True, False),
("writeable", False, False)])
def test_readonly_flag_protocols(self, flag, flag_value, writeable):
a = np.arange(10)
setattr(a.flags, flag, flag_value)
class MyArr():
__array_struct__ = a.__array_struct__
assert memoryview(a).readonly is not writeable
assert a.__array_interface__['data'][1] is not writeable
assert np.asarray(MyArr()).flags.writeable is writeable
def test_otherflags(self):
assert_equal(self.a.flags.carray, True)
assert_equal(self.a.flags['C'], True)
assert_equal(self.a.flags.farray, False)
assert_equal(self.a.flags.behaved, True)
assert_equal(self.a.flags.fnc, False)
assert_equal(self.a.flags.forc, True)
assert_equal(self.a.flags.owndata, True)
assert_equal(self.a.flags.writeable, True)
assert_equal(self.a.flags.aligned, True)
assert_equal(self.a.flags.writebackifcopy, False)
assert_equal(self.a.flags['X'], False)
assert_equal(self.a.flags['WRITEBACKIFCOPY'], False)
def test_string_align(self):
a = np.zeros(4, dtype=np.dtype('|S4'))
assert_(a.flags.aligned)
# not power of two are accessed byte-wise and thus considered aligned
a = np.zeros(5, dtype=np.dtype('|S4'))
assert_(a.flags.aligned)
def test_void_align(self):
a = np.zeros(4, dtype=np.dtype([("a", "i4"), ("b", "i4")]))
assert_(a.flags.aligned)
class TestHash:
# see #3793
def test_int(self):
for st, ut, s in [(np.int8, np.uint8, 8),
(np.int16, np.uint16, 16),
(np.int32, np.uint32, 32),
(np.int64, np.uint64, 64)]:
for i in range(1, s):
assert_equal(hash(st(-2**i)), hash(-2**i),
err_msg="%r: -2**%d" % (st, i))
assert_equal(hash(st(2**(i - 1))), hash(2**(i - 1)),
err_msg="%r: 2**%d" % (st, i - 1))
assert_equal(hash(st(2**i - 1)), hash(2**i - 1),
err_msg="%r: 2**%d - 1" % (st, i))
i = max(i - 1, 1)
assert_equal(hash(ut(2**(i - 1))), hash(2**(i - 1)),
err_msg="%r: 2**%d" % (ut, i - 1))
assert_equal(hash(ut(2**i - 1)), hash(2**i - 1),
err_msg="%r: 2**%d - 1" % (ut, i))
class TestAttributes:
def setup_method(self):
self.one = np.arange(10)
self.two = np.arange(20).reshape(4, 5)
self.three = np.arange(60, dtype=np.float64).reshape(2, 5, 6)
def test_attributes(self):
assert_equal(self.one.shape, (10,))
assert_equal(self.two.shape, (4, 5))
assert_equal(self.three.shape, (2, 5, 6))
self.three.shape = (10, 3, 2)
assert_equal(self.three.shape, (10, 3, 2))
self.three.shape = (2, 5, 6)
assert_equal(self.one.strides, (self.one.itemsize,))
num = self.two.itemsize
assert_equal(self.two.strides, (5*num, num))
num = self.three.itemsize
assert_equal(self.three.strides, (30*num, 6*num, num))
assert_equal(self.one.ndim, 1)
assert_equal(self.two.ndim, 2)
assert_equal(self.three.ndim, 3)
num = self.two.itemsize
assert_equal(self.two.size, 20)
assert_equal(self.two.nbytes, 20*num)
assert_equal(self.two.itemsize, self.two.dtype.itemsize)
assert_equal(self.two.base, np.arange(20))
def test_dtypeattr(self):
assert_equal(self.one.dtype, np.dtype(np.int_))
assert_equal(self.three.dtype, np.dtype(np.float_))
assert_equal(self.one.dtype.char, 'l')
assert_equal(self.three.dtype.char, 'd')
assert_(self.three.dtype.str[0] in '<>')
assert_equal(self.one.dtype.str[1], 'i')
assert_equal(self.three.dtype.str[1], 'f')
def test_int_subclassing(self):
# Regression test for https://github.com/numpy/numpy/pull/3526
numpy_int = np.int_(0)
# int_ doesn't inherit from Python int, because it's not fixed-width
assert_(not isinstance(numpy_int, int))
def test_stridesattr(self):
x = self.one
def make_array(size, offset, strides):
return np.ndarray(size, buffer=x, dtype=int,
offset=offset*x.itemsize,
strides=strides*x.itemsize)
assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1]))
assert_raises(ValueError, make_array, 4, 4, -2)
assert_raises(ValueError, make_array, 4, 2, -1)
assert_raises(ValueError, make_array, 8, 3, 1)
assert_equal(make_array(8, 3, 0), np.array([3]*8))
# Check behavior reported in gh-2503:
assert_raises(ValueError, make_array, (2, 3), 5, np.array([-2, -3]))
make_array(0, 0, 10)
def test_set_stridesattr(self):
x = self.one
def make_array(size, offset, strides):
try:
r = np.ndarray([size], dtype=int, buffer=x,
offset=offset*x.itemsize)
except Exception as e:
raise RuntimeError(e)
r.strides = strides = strides*x.itemsize
return r
assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1]))
assert_equal(make_array(7, 3, 1), np.array([3, 4, 5, 6, 7, 8, 9]))
assert_raises(ValueError, make_array, 4, 4, -2)
assert_raises(ValueError, make_array, 4, 2, -1)
assert_raises(RuntimeError, make_array, 8, 3, 1)
# Check that the true extent of the array is used.
# Test relies on as_strided base not exposing a buffer.
x = np.lib.stride_tricks.as_strided(np.arange(1), (10, 10), (0, 0))
def set_strides(arr, strides):
arr.strides = strides
assert_raises(ValueError, set_strides, x, (10*x.itemsize, x.itemsize))
# Test for offset calculations:
x = np.lib.stride_tricks.as_strided(np.arange(10, dtype=np.int8)[-1],
shape=(10,), strides=(-1,))
assert_raises(ValueError, set_strides, x[::-1], -1)
a = x[::-1]
a.strides = 1
a[::2].strides = 2
# test 0d
arr_0d = np.array(0)
arr_0d.strides = ()
assert_raises(TypeError, set_strides, arr_0d, None)
def test_fill(self):
for t in "?bhilqpBHILQPfdgFDGO":
x = np.empty((3, 2, 1), t)
y = np.empty((3, 2, 1), t)
x.fill(1)
y[...] = 1
assert_equal(x, y)
def test_fill_max_uint64(self):
x = np.empty((3, 2, 1), dtype=np.uint64)
y = np.empty((3, 2, 1), dtype=np.uint64)
value = 2**64 - 1
y[...] = value
x.fill(value)
assert_array_equal(x, y)
def test_fill_struct_array(self):
# Filling from a scalar
x = np.array([(0, 0.0), (1, 1.0)], dtype='i4,f8')
x.fill(x[0])
assert_equal(x['f1'][1], x['f1'][0])
# Filling from a tuple that can be converted
# to a scalar
x = np.zeros(2, dtype=[('a', 'f8'), ('b', 'i4')])
x.fill((3.5, -2))
assert_array_equal(x['a'], [3.5, 3.5])
assert_array_equal(x['b'], [-2, -2])
class TestArrayConstruction:
def test_array(self):
d = np.ones(6)
r = np.array([d, d])
assert_equal(r, np.ones((2, 6)))
d = np.ones(6)
tgt = np.ones((2, 6))
r = np.array([d, d])
assert_equal(r, tgt)
tgt[1] = 2
r = np.array([d, d + 1])
assert_equal(r, tgt)
d = np.ones(6)
r = np.array([[d, d]])
assert_equal(r, np.ones((1, 2, 6)))
d = np.ones(6)
r = np.array([[d, d], [d, d]])
assert_equal(r, np.ones((2, 2, 6)))
d = np.ones((6, 6))
r = np.array([d, d])
assert_equal(r, np.ones((2, 6, 6)))
d = np.ones((6, ))
r = np.array([[d, d + 1], d + 2], dtype=object)
assert_equal(len(r), 2)
assert_equal(r[0], [d, d + 1])
assert_equal(r[1], d + 2)
tgt = np.ones((2, 3), dtype=bool)
tgt[0, 2] = False
tgt[1, 0:2] = False
r = np.array([[True, True, False], [False, False, True]])
assert_equal(r, tgt)
r = np.array([[True, False], [True, False], [False, True]])
assert_equal(r, tgt.T)
def test_array_empty(self):
assert_raises(TypeError, np.array)
def test_0d_array_shape(self):
assert np.ones(np.array(3)).shape == (3,)
def test_array_copy_false(self):
d = np.array([1, 2, 3])
e = np.array(d, copy=False)
d[1] = 3
assert_array_equal(e, [1, 3, 3])
e = np.array(d, copy=False, order='F')
d[1] = 4
assert_array_equal(e, [1, 4, 3])
e[2] = 7
assert_array_equal(d, [1, 4, 7])
def test_array_copy_true(self):
d = np.array([[1,2,3], [1, 2, 3]])
e = np.array(d, copy=True)
d[0, 1] = 3
e[0, 2] = -7
assert_array_equal(e, [[1, 2, -7], [1, 2, 3]])
assert_array_equal(d, [[1, 3, 3], [1, 2, 3]])
e = np.array(d, copy=True, order='F')
d[0, 1] = 5
e[0, 2] = 7
assert_array_equal(e, [[1, 3, 7], [1, 2, 3]])
assert_array_equal(d, [[1, 5, 3], [1,2,3]])
def test_array_cont(self):
d = np.ones(10)[::2]
assert_(np.ascontiguousarray(d).flags.c_contiguous)
assert_(np.ascontiguousarray(d).flags.f_contiguous)
assert_(np.asfortranarray(d).flags.c_contiguous)
assert_(np.asfortranarray(d).flags.f_contiguous)
d = np.ones((10, 10))[::2,::2]
assert_(np.ascontiguousarray(d).flags.c_contiguous)
assert_(np.asfortranarray(d).flags.f_contiguous)
@pytest.mark.parametrize("func",
[np.array,
np.asarray,
np.asanyarray,
np.ascontiguousarray,
np.asfortranarray])
def test_bad_arguments_error(self, func):
with pytest.raises(TypeError):
func(3, dtype="bad dtype")
with pytest.raises(TypeError):
func() # missing arguments
with pytest.raises(TypeError):
func(1, 2, 3, 4, 5, 6, 7, 8) # too many arguments
@pytest.mark.parametrize("func",
[np.array,
np.asarray,
np.asanyarray,
np.ascontiguousarray,
np.asfortranarray])
def test_array_as_keyword(self, func):
# This should likely be made positional only, but do not change
# the name accidentally.
if func is np.array:
func(object=3)
else:
func(a=3)
class TestAssignment:
def test_assignment_broadcasting(self):
a = np.arange(6).reshape(2, 3)
# Broadcasting the input to the output
a[...] = np.arange(3)
assert_equal(a, [[0, 1, 2], [0, 1, 2]])
a[...] = np.arange(2).reshape(2, 1)
assert_equal(a, [[0, 0, 0], [1, 1, 1]])
# For compatibility with <= 1.5, a limited version of broadcasting
# the output to the input.
#
# This behavior is inconsistent with NumPy broadcasting
# in general, because it only uses one of the two broadcasting
# rules (adding a new "1" dimension to the left of the shape),
# applied to the output instead of an input. In NumPy 2.0, this kind
# of broadcasting assignment will likely be disallowed.
a[...] = np.arange(6)[::-1].reshape(1, 2, 3)
assert_equal(a, [[5, 4, 3], [2, 1, 0]])
# The other type of broadcasting would require a reduction operation.
def assign(a, b):
a[...] = b
assert_raises(ValueError, assign, a, np.arange(12).reshape(2, 2, 3))
def test_assignment_errors(self):
# Address issue #2276
class C:
pass
a = np.zeros(1)
def assign(v):
a[0] = v
assert_raises((AttributeError, TypeError), assign, C())
assert_raises(ValueError, assign, [1])
def test_unicode_assignment(self):
# gh-5049
from numpy.core.numeric import set_string_function
@contextmanager
def inject_str(s):
""" replace ndarray.__str__ temporarily """
set_string_function(lambda x: s, repr=False)
try:
yield
finally:
set_string_function(None, repr=False)
a1d = np.array([u'test'])
a0d = np.array(u'done')
with inject_str(u'bad'):
a1d[0] = a0d # previously this would invoke __str__
assert_equal(a1d[0], u'done')
# this would crash for the same reason
np.array([np.array(u'\xe5\xe4\xf6')])
def test_stringlike_empty_list(self):
# gh-8902
u = np.array([u'done'])
b = np.array([b'done'])
class bad_sequence:
def __getitem__(self): pass
def __len__(self): raise RuntimeError
assert_raises(ValueError, operator.setitem, u, 0, [])
assert_raises(ValueError, operator.setitem, b, 0, [])
assert_raises(ValueError, operator.setitem, u, 0, bad_sequence())
assert_raises(ValueError, operator.setitem, b, 0, bad_sequence())
def test_longdouble_assignment(self):
# only relevant if longdouble is larger than float
# we're looking for loss of precision
for dtype in (np.longdouble, np.longcomplex):
# gh-8902
tinyb = np.nextafter(np.longdouble(0), 1).astype(dtype)
tinya = np.nextafter(np.longdouble(0), -1).astype(dtype)
# construction
tiny1d = np.array([tinya])
assert_equal(tiny1d[0], tinya)
# scalar = scalar
tiny1d[0] = tinyb
assert_equal(tiny1d[0], tinyb)
# 0d = scalar
tiny1d[0, ...] = tinya
assert_equal(tiny1d[0], tinya)
# 0d = 0d
tiny1d[0, ...] = tinyb[...]
assert_equal(tiny1d[0], tinyb)
# scalar = 0d
tiny1d[0] = tinyb[...]
assert_equal(tiny1d[0], tinyb)
arr = np.array([np.array(tinya)])
assert_equal(arr[0], tinya)
def test_cast_to_string(self):
# cast to str should do "str(scalar)", not "str(scalar.item())"
# Example: In python2, str(float) is truncated, so we want to avoid
# str(np.float64(...).item()) as this would incorrectly truncate.
a = np.zeros(1, dtype='S20')
a[:] = np.array(['1.12345678901234567890'], dtype='f8')
assert_equal(a[0], b"1.1234567890123457")
class TestDtypedescr:
def test_construction(self):
d1 = np.dtype('i4')
assert_equal(d1, np.dtype(np.int32))
d2 = np.dtype('f8')
assert_equal(d2, np.dtype(np.float64))
def test_byteorders(self):
assert_(np.dtype('<i4') != np.dtype('>i4'))
assert_(np.dtype([('a', '<i4')]) != np.dtype([('a', '>i4')]))
def test_structured_non_void(self):
fields = [('a', '<i2'), ('b', '<i2')]
dt_int = np.dtype(('i4', fields))
assert_equal(str(dt_int), "(numpy.int32, [('a', '<i2'), ('b', '<i2')])")
# gh-9821
arr_int = np.zeros(4, dt_int)
assert_equal(repr(arr_int),
"array([0, 0, 0, 0], dtype=(numpy.int32, [('a', '<i2'), ('b', '<i2')]))")
class TestZeroRank:
def setup_method(self):
self.d = np.array(0), np.array('x', object)
def test_ellipsis_subscript(self):
a, b = self.d
assert_equal(a[...], 0)
assert_equal(b[...], 'x')
assert_(a[...].base is a) # `a[...] is a` in numpy <1.9.
assert_(b[...].base is b) # `b[...] is b` in numpy <1.9.
def test_empty_subscript(self):
a, b = self.d
assert_equal(a[()], 0)
assert_equal(b[()], 'x')
assert_(type(a[()]) is a.dtype.type)
assert_(type(b[()]) is str)
def test_invalid_subscript(self):
a, b = self.d
assert_raises(IndexError, lambda x: x[0], a)
assert_raises(IndexError, lambda x: x[0], b)
assert_raises(IndexError, lambda x: x[np.array([], int)], a)
assert_raises(IndexError, lambda x: x[np.array([], int)], b)
def test_ellipsis_subscript_assignment(self):
a, b = self.d
a[...] = 42
assert_equal(a, 42)
b[...] = ''
assert_equal(b.item(), '')
def test_empty_subscript_assignment(self):
a, b = self.d
a[()] = 42
assert_equal(a, 42)
b[()] = ''
assert_equal(b.item(), '')
def test_invalid_subscript_assignment(self):
a, b = self.d
def assign(x, i, v):
x[i] = v
assert_raises(IndexError, assign, a, 0, 42)
assert_raises(IndexError, assign, b, 0, '')
assert_raises(ValueError, assign, a, (), '')
def test_newaxis(self):
a, b = self.d
assert_equal(a[np.newaxis].shape, (1,))
assert_equal(a[..., np.newaxis].shape, (1,))
assert_equal(a[np.newaxis, ...].shape, (1,))
assert_equal(a[..., np.newaxis].shape, (1,))
assert_equal(a[np.newaxis, ..., np.newaxis].shape, (1, 1))
assert_equal(a[..., np.newaxis, np.newaxis].shape, (1, 1))
assert_equal(a[np.newaxis, np.newaxis, ...].shape, (1, 1))
assert_equal(a[(np.newaxis,)*10].shape, (1,)*10)
def test_invalid_newaxis(self):
a, b = self.d
def subscript(x, i):
x[i]
assert_raises(IndexError, subscript, a, (np.newaxis, 0))
assert_raises(IndexError, subscript, a, (np.newaxis,)*50)
def test_constructor(self):
x = np.ndarray(())
x[()] = 5
assert_equal(x[()], 5)
y = np.ndarray((), buffer=x)
y[()] = 6
assert_equal(x[()], 6)
# strides and shape must be the same length
with pytest.raises(ValueError):
np.ndarray((2,), strides=())
with pytest.raises(ValueError):
np.ndarray((), strides=(2,))
def test_output(self):
x = np.array(2)
assert_raises(ValueError, np.add, x, [1], x)
def test_real_imag(self):
# contiguity checks are for gh-11245
x = np.array(1j)
xr = x.real
xi = x.imag
assert_equal(xr, np.array(0))
assert_(type(xr) is np.ndarray)
assert_equal(xr.flags.contiguous, True)
assert_equal(xr.flags.f_contiguous, True)
assert_equal(xi, np.array(1))
assert_(type(xi) is np.ndarray)
assert_equal(xi.flags.contiguous, True)
assert_equal(xi.flags.f_contiguous, True)
class TestScalarIndexing:
def setup_method(self):
self.d = np.array([0, 1])[0]
def test_ellipsis_subscript(self):
a = self.d
assert_equal(a[...], 0)
assert_equal(a[...].shape, ())
def test_empty_subscript(self):
a = self.d
assert_equal(a[()], 0)
assert_equal(a[()].shape, ())
def test_invalid_subscript(self):
a = self.d
assert_raises(IndexError, lambda x: x[0], a)
assert_raises(IndexError, lambda x: x[np.array([], int)], a)
def test_invalid_subscript_assignment(self):
a = self.d
def assign(x, i, v):
x[i] = v
assert_raises(TypeError, assign, a, 0, 42)
def test_newaxis(self):
a = self.d
assert_equal(a[np.newaxis].shape, (1,))
assert_equal(a[..., np.newaxis].shape, (1,))
assert_equal(a[np.newaxis, ...].shape, (1,))
assert_equal(a[..., np.newaxis].shape, (1,))
assert_equal(a[np.newaxis, ..., np.newaxis].shape, (1, 1))
assert_equal(a[..., np.newaxis, np.newaxis].shape, (1, 1))
assert_equal(a[np.newaxis, np.newaxis, ...].shape, (1, 1))
assert_equal(a[(np.newaxis,)*10].shape, (1,)*10)
def test_invalid_newaxis(self):
a = self.d
def subscript(x, i):
x[i]
assert_raises(IndexError, subscript, a, (np.newaxis, 0))
assert_raises(IndexError, subscript, a, (np.newaxis,)*50)
def test_overlapping_assignment(self):
# With positive strides
a = np.arange(4)
a[:-1] = a[1:]
assert_equal(a, [1, 2, 3, 3])
a = np.arange(4)
a[1:] = a[:-1]
assert_equal(a, [0, 0, 1, 2])
# With positive and negative strides
a = np.arange(4)
a[:] = a[::-1]
assert_equal(a, [3, 2, 1, 0])
a = np.arange(6).reshape(2, 3)
a[::-1,:] = a[:, ::-1]
assert_equal(a, [[5, 4, 3], [2, 1, 0]])
a = np.arange(6).reshape(2, 3)
a[::-1, ::-1] = a[:, ::-1]
assert_equal(a, [[3, 4, 5], [0, 1, 2]])
# With just one element overlapping
a = np.arange(5)
a[:3] = a[2:]
assert_equal(a, [2, 3, 4, 3, 4])
a = np.arange(5)
a[2:] = a[:3]
assert_equal(a, [0, 1, 0, 1, 2])
a = np.arange(5)
a[2::-1] = a[2:]
assert_equal(a, [4, 3, 2, 3, 4])
a = np.arange(5)
a[2:] = a[2::-1]
assert_equal(a, [0, 1, 2, 1, 0])
a = np.arange(5)
a[2::-1] = a[:1:-1]
assert_equal(a, [2, 3, 4, 3, 4])
a = np.arange(5)
a[:1:-1] = a[2::-1]
assert_equal(a, [0, 1, 0, 1, 2])
class TestCreation:
"""
Test the np.array constructor
"""
def test_from_attribute(self):
class x:
def __array__(self, dtype=None):
pass
assert_raises(ValueError, np.array, x())
def test_from_string(self):
types = np.typecodes['AllInteger'] + np.typecodes['Float']
nstr = ['123', '123']
result = np.array([123, 123], dtype=int)
for type in types:
msg = 'String conversion for %s' % type
assert_equal(np.array(nstr, dtype=type), result, err_msg=msg)
def test_void(self):
arr = np.array([], dtype='V')
assert arr.dtype == 'V8' # current default
# Same length scalars (those that go to the same void) work:
arr = np.array([b"1234", b"1234"], dtype="V")
assert arr.dtype == "V4"
# Promoting different lengths will fail (pre 1.20 this worked)
# by going via S5 and casting to V5.
with pytest.raises(TypeError):
np.array([b"1234", b"12345"], dtype="V")
with pytest.raises(TypeError):
np.array([b"12345", b"1234"], dtype="V")
# Check the same for the casting path:
arr = np.array([b"1234", b"1234"], dtype="O").astype("V")
assert arr.dtype == "V4"
with pytest.raises(TypeError):
np.array([b"1234", b"12345"], dtype="O").astype("V")
@pytest.mark.parametrize("idx",
[pytest.param(Ellipsis, id="arr"), pytest.param((), id="scalar")])
def test_structured_void_promotion(self, idx):
arr = np.array(
[np.array(1, dtype="i,i")[idx], np.array(2, dtype='i,i')[idx]],
dtype="V")
assert_array_equal(arr, np.array([(1, 1), (2, 2)], dtype="i,i"))
# The following fails to promote the two dtypes, resulting in an error
with pytest.raises(TypeError):
np.array(
[np.array(1, dtype="i,i")[idx], np.array(2, dtype='i,i,i')[idx]],
dtype="V")
def test_too_big_error(self):
# 45341 is the smallest integer greater than sqrt(2**31 - 1).
# 3037000500 is the smallest integer greater than sqrt(2**63 - 1).
# We want to make sure that the square byte array with those dimensions
# is too big on 32 or 64 bit systems respectively.
if np.iinfo('intp').max == 2**31 - 1:
shape = (46341, 46341)
elif np.iinfo('intp').max == 2**63 - 1:
shape = (3037000500, 3037000500)
else:
return
assert_raises(ValueError, np.empty, shape, dtype=np.int8)
assert_raises(ValueError, np.zeros, shape, dtype=np.int8)
assert_raises(ValueError, np.ones, shape, dtype=np.int8)
@pytest.mark.skipif(np.dtype(np.intp).itemsize != 8,
reason="malloc may not fail on 32 bit systems")
def test_malloc_fails(self):
# This test is guaranteed to fail due to a too large allocation
with assert_raises(np.core._exceptions._ArrayMemoryError):
np.empty(np.iinfo(np.intp).max, dtype=np.uint8)
def test_zeros(self):
types = np.typecodes['AllInteger'] + np.typecodes['AllFloat']
for dt in types:
d = np.zeros((13,), dtype=dt)
assert_equal(np.count_nonzero(d), 0)
# true for ieee floats
assert_equal(d.sum(), 0)
assert_(not d.any())
d = np.zeros(2, dtype='(2,4)i4')
assert_equal(np.count_nonzero(d), 0)
assert_equal(d.sum(), 0)
assert_(not d.any())
d = np.zeros(2, dtype='4i4')
assert_equal(np.count_nonzero(d), 0)
assert_equal(d.sum(), 0)
assert_(not d.any())
d = np.zeros(2, dtype='(2,4)i4, (2,4)i4')
assert_equal(np.count_nonzero(d), 0)
@pytest.mark.slow
def test_zeros_big(self):
# test big array as they might be allocated different by the system
types = np.typecodes['AllInteger'] + np.typecodes['AllFloat']
for dt in types:
d = np.zeros((30 * 1024**2,), dtype=dt)
assert_(not d.any())
# This test can fail on 32-bit systems due to insufficient
# contiguous memory. Deallocating the previous array increases the
# chance of success.
del(d)
def test_zeros_obj(self):
# test initialization from PyLong(0)
d = np.zeros((13,), dtype=object)
assert_array_equal(d, [0] * 13)
assert_equal(np.count_nonzero(d), 0)
def test_zeros_obj_obj(self):
d = np.zeros(10, dtype=[('k', object, 2)])
assert_array_equal(d['k'], 0)
def test_zeros_like_like_zeros(self):
# test zeros_like returns the same as zeros
for c in np.typecodes['All']:
if c == 'V':
continue
d = np.zeros((3,3), dtype=c)
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
# explicitly check some special cases
d = np.zeros((3,3), dtype='S5')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='U5')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='<i4')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='>i4')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='<M8[s]')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='>M8[s]')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='f4,f4')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
def test_empty_unicode(self):
# don't throw decode errors on garbage memory
for i in range(5, 100, 5):
d = np.empty(i, dtype='U')
str(d)
def test_sequence_non_homogeneous(self):
assert_equal(np.array([4, 2**80]).dtype, object)
assert_equal(np.array([4, 2**80, 4]).dtype, object)
assert_equal(np.array([2**80, 4]).dtype, object)
assert_equal(np.array([2**80] * 3).dtype, object)
assert_equal(np.array([[1, 1],[1j, 1j]]).dtype, complex)
assert_equal(np.array([[1j, 1j],[1, 1]]).dtype, complex)
assert_equal(np.array([[1, 1, 1],[1, 1j, 1.], [1, 1, 1]]).dtype, complex)
def test_non_sequence_sequence(self):
"""Should not segfault.
Class Fail breaks the sequence protocol for new style classes, i.e.,
those derived from object. Class Map is a mapping type indicated by
raising a ValueError. At some point we may raise a warning instead
of an error in the Fail case.
"""
class Fail:
def __len__(self):
return 1
def __getitem__(self, index):
raise ValueError()
class Map:
def __len__(self):
return 1
def __getitem__(self, index):
raise KeyError()
a = np.array([Map()])
assert_(a.shape == (1,))
assert_(a.dtype == np.dtype(object))
assert_raises(ValueError, np.array, [Fail()])
def test_no_len_object_type(self):
# gh-5100, want object array from iterable object without len()
class Point2:
def __init__(self):
pass
def __getitem__(self, ind):
if ind in [0, 1]:
return ind
else:
raise IndexError()
d = np.array([Point2(), Point2(), Point2()])
assert_equal(d.dtype, np.dtype(object))
def test_false_len_sequence(self):
# gh-7264, segfault for this example
class C:
def __getitem__(self, i):
raise IndexError
def __len__(self):
return 42
a = np.array(C()) # segfault?
assert_equal(len(a), 0)
def test_false_len_iterable(self):
# Special case where a bad __getitem__ makes us fall back on __iter__:
class C:
def __getitem__(self, x):
raise Exception
def __iter__(self):
return iter(())
def __len__(self):
return 2
a = np.empty(2)
with assert_raises(ValueError):
a[:] = C() # Segfault!
np.array(C()) == list(C())
def test_failed_len_sequence(self):
# gh-7393
class A:
def __init__(self, data):
self._data = data
def __getitem__(self, item):
return type(self)(self._data[item])
def __len__(self):
return len(self._data)
# len(d) should give 3, but len(d[0]) will fail
d = A([1,2,3])
assert_equal(len(np.array(d)), 3)
def test_array_too_big(self):
# Test that array creation succeeds for arrays addressable by intp
# on the byte level and fails for too large arrays.
buf = np.zeros(100)
max_bytes = np.iinfo(np.intp).max
for dtype in ["intp", "S20", "b"]:
dtype = np.dtype(dtype)
itemsize = dtype.itemsize
np.ndarray(buffer=buf, strides=(0,),
shape=(max_bytes//itemsize,), dtype=dtype)
assert_raises(ValueError, np.ndarray, buffer=buf, strides=(0,),
shape=(max_bytes//itemsize + 1,), dtype=dtype)
def _ragged_creation(self, seq):
# without dtype=object, the ragged object should raise
with assert_warns(np.VisibleDeprecationWarning):
a = np.array(seq)
b = np.array(seq, dtype=object)
assert_equal(a, b)
return b
def test_ragged_ndim_object(self):
# Lists of mismatching depths are treated as object arrays
a = self._ragged_creation([[1], 2, 3])
assert_equal(a.shape, (3,))
assert_equal(a.dtype, object)
a = self._ragged_creation([1, [2], 3])
assert_equal(a.shape, (3,))
assert_equal(a.dtype, object)
a = self._ragged_creation([1, 2, [3]])
assert_equal(a.shape, (3,))
assert_equal(a.dtype, object)
def test_ragged_shape_object(self):
# The ragged dimension of a list is turned into an object array
a = self._ragged_creation([[1, 1], [2], [3]])
assert_equal(a.shape, (3,))
assert_equal(a.dtype, object)
a = self._ragged_creation([[1], [2, 2], [3]])
assert_equal(a.shape, (3,))
assert_equal(a.dtype, object)
a = self._ragged_creation([[1], [2], [3, 3]])
assert a.shape == (3,)
assert a.dtype == object
def test_array_of_ragged_array(self):
outer = np.array([None, None])
outer[0] = outer[1] = np.array([1, 2, 3])
assert np.array(outer).shape == (2,)
assert np.array([outer]).shape == (1, 2)
outer_ragged = np.array([None, None])
outer_ragged[0] = np.array([1, 2, 3])
outer_ragged[1] = np.array([1, 2, 3, 4])
# should both of these emit deprecation warnings?
assert np.array(outer_ragged).shape == (2,)
assert np.array([outer_ragged]).shape == (1, 2,)
def test_deep_nonragged_object(self):
# None of these should raise, even though they are missing dtype=object
a = np.array([[[Decimal(1)]]])
a = np.array([1, Decimal(1)])
a = np.array([[1], [Decimal(1)]])
class TestStructured:
def test_subarray_field_access(self):
a = np.zeros((3, 5), dtype=[('a', ('i4', (2, 2)))])
a['a'] = np.arange(60).reshape(3, 5, 2, 2)
# Since the subarray is always in C-order, a transpose
# does not swap the subarray:
assert_array_equal(a.T['a'], a['a'].transpose(1, 0, 2, 3))
# In Fortran order, the subarray gets appended
# like in all other cases, not prepended as a special case
b = a.copy(order='F')
assert_equal(a['a'].shape, b['a'].shape)
assert_equal(a.T['a'].shape, a.T.copy()['a'].shape)
def test_subarray_comparison(self):
# Check that comparisons between record arrays with
# multi-dimensional field types work properly
a = np.rec.fromrecords(
[([1, 2, 3], 'a', [[1, 2], [3, 4]]), ([3, 3, 3], 'b', [[0, 0], [0, 0]])],
dtype=[('a', ('f4', 3)), ('b', object), ('c', ('i4', (2, 2)))])
b = a.copy()
assert_equal(a == b, [True, True])
assert_equal(a != b, [False, False])
b[1].b = 'c'
assert_equal(a == b, [True, False])
assert_equal(a != b, [False, True])
for i in range(3):
b[0].a = a[0].a
b[0].a[i] = 5
assert_equal(a == b, [False, False])
assert_equal(a != b, [True, True])
for i in range(2):
for j in range(2):
b = a.copy()
b[0].c[i, j] = 10
assert_equal(a == b, [False, True])
assert_equal(a != b, [True, False])
# Check that broadcasting with a subarray works, including cases that
# require promotion to work:
a = np.array([[(0,)], [(1,)]], dtype=[('a', 'f8')])
b = np.array([(0,), (0,), (1,)], dtype=[('a', 'f8')])
assert_equal(a == b, [[True, True, False], [False, False, True]])
assert_equal(b == a, [[True, True, False], [False, False, True]])
a = np.array([[(0,)], [(1,)]], dtype=[('a', 'f8', (1,))])
b = np.array([(0,), (0,), (1,)], dtype=[('a', 'f8', (1,))])
assert_equal(a == b, [[True, True, False], [False, False, True]])
assert_equal(b == a, [[True, True, False], [False, False, True]])
a = np.array([[([0, 0],)], [([1, 1],)]], dtype=[('a', 'f8', (2,))])
b = np.array([([0, 0],), ([0, 1],), ([1, 1],)], dtype=[('a', 'f8', (2,))])
assert_equal(a == b, [[True, False, False], [False, False, True]])
assert_equal(b == a, [[True, False, False], [False, False, True]])
# Check that broadcasting Fortran-style arrays with a subarray work
a = np.array([[([0, 0],)], [([1, 1],)]], dtype=[('a', 'f8', (2,))], order='F')
b = np.array([([0, 0],), ([0, 1],), ([1, 1],)], dtype=[('a', 'f8', (2,))])
assert_equal(a == b, [[True, False, False], [False, False, True]])
assert_equal(b == a, [[True, False, False], [False, False, True]])
# Check that incompatible sub-array shapes don't result to broadcasting
x = np.zeros((1,), dtype=[('a', ('f4', (1, 2))), ('b', 'i1')])
y = np.zeros((1,), dtype=[('a', ('f4', (2,))), ('b', 'i1')])
# The main importance is that it does not return True:
with pytest.raises(TypeError):
x == y
x = np.zeros((1,), dtype=[('a', ('f4', (2, 1))), ('b', 'i1')])
y = np.zeros((1,), dtype=[('a', ('f4', (2,))), ('b', 'i1')])
# The main importance is that it does not return True:
with pytest.raises(TypeError):
x == y
def test_structured_comparisons_with_promotion(self):
# Check that structured arrays can be compared so long as their
# dtypes promote fine:
a = np.array([(5, 42), (10, 1)], dtype=[('a', '>i8'), ('b', '<f8')])
b = np.array([(5, 43), (10, 1)], dtype=[('a', '<i8'), ('b', '>f8')])
assert_equal(a == b, [False, True])
assert_equal(a != b, [True, False])
a = np.array([(5, 42), (10, 1)], dtype=[('a', '>f8'), ('b', '<f8')])
b = np.array([(5, 43), (10, 1)], dtype=[('a', '<i8'), ('b', '>i8')])
assert_equal(a == b, [False, True])
assert_equal(a != b, [True, False])
# Including with embedded subarray dtype (although subarray comparison
# itself may still be a bit weird and compare the raw data)
a = np.array([(5, 42), (10, 1)], dtype=[('a', '10>f8'), ('b', '5<f8')])
b = np.array([(5, 43), (10, 1)], dtype=[('a', '10<i8'), ('b', '5>i8')])
assert_equal(a == b, [False, True])
assert_equal(a != b, [True, False])
def test_void_comparison_failures(self):
# In principle, one could decide to return an array of False for some
# if comparisons are impossible. But right now we return TypeError
# when "void" dtype are involved.
x = np.zeros(3, dtype=[('a', 'i1')])
y = np.zeros(3)
# Cannot compare non-structured to structured:
with pytest.raises(TypeError):
x == y
# Added title prevents promotion, but casts are OK:
y = np.zeros(3, dtype=[(('title', 'a'), 'i1')])
assert np.can_cast(y.dtype, x.dtype)
with pytest.raises(TypeError):
x == y
x = np.zeros(3, dtype="V7")
y = np.zeros(3, dtype="V8")
with pytest.raises(TypeError):
x == y
def test_casting(self):
# Check that casting a structured array to change its byte order
# works
a = np.array([(1,)], dtype=[('a', '<i4')])
assert_(np.can_cast(a.dtype, [('a', '>i4')], casting='unsafe'))
b = a.astype([('a', '>i4')])
assert_equal(b, a.byteswap().newbyteorder())
assert_equal(a['a'][0], b['a'][0])
# Check that equality comparison works on structured arrays if
# they are 'equiv'-castable
a = np.array([(5, 42), (10, 1)], dtype=[('a', '>i4'), ('b', '<f8')])
b = np.array([(5, 42), (10, 1)], dtype=[('a', '<i4'), ('b', '>f8')])
assert_(np.can_cast(a.dtype, b.dtype, casting='equiv'))
assert_equal(a == b, [True, True])
# Check that 'equiv' casting can change byte order
assert_(np.can_cast(a.dtype, b.dtype, casting='equiv'))
c = a.astype(b.dtype, casting='equiv')
assert_equal(a == c, [True, True])
# Check that 'safe' casting can change byte order and up-cast
# fields
t = [('a', '<i8'), ('b', '>f8')]
assert_(np.can_cast(a.dtype, t, casting='safe'))
c = a.astype(t, casting='safe')
assert_equal((c == np.array([(5, 42), (10, 1)], dtype=t)),
[True, True])
# Check that 'same_kind' casting can change byte order and
# change field widths within a "kind"
t = [('a', '<i4'), ('b', '>f4')]
assert_(np.can_cast(a.dtype, t, casting='same_kind'))
c = a.astype(t, casting='same_kind')
assert_equal((c == np.array([(5, 42), (10, 1)], dtype=t)),
[True, True])
# Check that casting fails if the casting rule should fail on
# any of the fields
t = [('a', '>i8'), ('b', '<f4')]
assert_(not np.can_cast(a.dtype, t, casting='safe'))
assert_raises(TypeError, a.astype, t, casting='safe')
t = [('a', '>i2'), ('b', '<f8')]
assert_(not np.can_cast(a.dtype, t, casting='equiv'))
assert_raises(TypeError, a.astype, t, casting='equiv')
t = [('a', '>i8'), ('b', '<i2')]
assert_(not np.can_cast(a.dtype, t, casting='same_kind'))
assert_raises(TypeError, a.astype, t, casting='same_kind')
assert_(not np.can_cast(a.dtype, b.dtype, casting='no'))
assert_raises(TypeError, a.astype, b.dtype, casting='no')
# Check that non-'unsafe' casting can't change the set of field names
for casting in ['no', 'safe', 'equiv', 'same_kind']:
t = [('a', '>i4')]
assert_(not np.can_cast(a.dtype, t, casting=casting))
t = [('a', '>i4'), ('b', '<f8'), ('c', 'i4')]
assert_(not np.can_cast(a.dtype, t, casting=casting))
def test_objview(self):
# https://github.com/numpy/numpy/issues/3286
a = np.array([], dtype=[('a', 'f'), ('b', 'f'), ('c', 'O')])
a[['a', 'b']] # TypeError?
# https://github.com/numpy/numpy/issues/3253
dat2 = np.zeros(3, [('A', 'i'), ('B', '|O')])
dat2[['B', 'A']] # TypeError?
def test_setfield(self):
# https://github.com/numpy/numpy/issues/3126
struct_dt = np.dtype([('elem', 'i4', 5),])
dt = np.dtype([('field', 'i4', 10),('struct', struct_dt)])
x = np.zeros(1, dt)
x[0]['field'] = np.ones(10, dtype='i4')
x[0]['struct'] = np.ones(1, dtype=struct_dt)
assert_equal(x[0]['field'], np.ones(10, dtype='i4'))
def test_setfield_object(self):
# make sure object field assignment with ndarray value
# on void scalar mimics setitem behavior
b = np.zeros(1, dtype=[('x', 'O')])
# next line should work identically to b['x'][0] = np.arange(3)
b[0]['x'] = np.arange(3)
assert_equal(b[0]['x'], np.arange(3))
# check that broadcasting check still works
c = np.zeros(1, dtype=[('x', 'O', 5)])
def testassign():
c[0]['x'] = np.arange(3)
assert_raises(ValueError, testassign)
def test_zero_width_string(self):
# Test for PR #6430 / issues #473, #4955, #2585
dt = np.dtype([('I', int), ('S', 'S0')])
x = np.zeros(4, dtype=dt)
assert_equal(x['S'], [b'', b'', b'', b''])
assert_equal(x['S'].itemsize, 0)
x['S'] = ['a', 'b', 'c', 'd']
assert_equal(x['S'], [b'', b'', b'', b''])
assert_equal(x['I'], [0, 0, 0, 0])
# Variation on test case from #4955
x['S'][x['I'] == 0] = 'hello'
assert_equal(x['S'], [b'', b'', b'', b''])
assert_equal(x['I'], [0, 0, 0, 0])
# Variation on test case from #2585
x['S'] = 'A'
assert_equal(x['S'], [b'', b'', b'', b''])
assert_equal(x['I'], [0, 0, 0, 0])
# Allow zero-width dtypes in ndarray constructor
y = np.ndarray(4, dtype=x['S'].dtype)
assert_equal(y.itemsize, 0)
assert_equal(x['S'], y)
# More tests for indexing an array with zero-width fields
assert_equal(np.zeros(4, dtype=[('a', 'S0,S0'),
('b', 'u1')])['a'].itemsize, 0)
assert_equal(np.empty(3, dtype='S0,S0').itemsize, 0)
assert_equal(np.zeros(4, dtype='S0,u1')['f0'].itemsize, 0)
xx = x['S'].reshape((2, 2))
assert_equal(xx.itemsize, 0)
assert_equal(xx, [[b'', b''], [b'', b'']])
# check for no uninitialized memory due to viewing S0 array
assert_equal(xx[:].dtype, xx.dtype)
assert_array_equal(eval(repr(xx), dict(array=np.array)), xx)
b = io.BytesIO()
np.save(b, xx)
b.seek(0)
yy = np.load(b)
assert_equal(yy.itemsize, 0)
assert_equal(xx, yy)
with temppath(suffix='.npy') as tmp:
np.save(tmp, xx)
yy = np.load(tmp)
assert_equal(yy.itemsize, 0)
assert_equal(xx, yy)
def test_base_attr(self):
a = np.zeros(3, dtype='i4,f4')
b = a[0]
assert_(b.base is a)
def test_assignment(self):
def testassign(arr, v):
c = arr.copy()
c[0] = v # assign using setitem
c[1:] = v # assign using "dtype_transfer" code paths
return c
dt = np.dtype([('foo', 'i8'), ('bar', 'i8')])
arr = np.ones(2, dt)
v1 = np.array([(2,3)], dtype=[('foo', 'i8'), ('bar', 'i8')])
v2 = np.array([(2,3)], dtype=[('bar', 'i8'), ('foo', 'i8')])
v3 = np.array([(2,3)], dtype=[('bar', 'i8'), ('baz', 'i8')])
v4 = np.array([(2,)], dtype=[('bar', 'i8')])
v5 = np.array([(2,3)], dtype=[('foo', 'f8'), ('bar', 'f8')])
w = arr.view({'names': ['bar'], 'formats': ['i8'], 'offsets': [8]})
ans = np.array([(2,3),(2,3)], dtype=dt)
assert_equal(testassign(arr, v1), ans)
assert_equal(testassign(arr, v2), ans)
assert_equal(testassign(arr, v3), ans)
assert_raises(TypeError, lambda: testassign(arr, v4))
assert_equal(testassign(arr, v5), ans)
w[:] = 4
assert_equal(arr, np.array([(1,4),(1,4)], dtype=dt))
# test field-reordering, assignment by position, and self-assignment
a = np.array([(1,2,3)],
dtype=[('foo', 'i8'), ('bar', 'i8'), ('baz', 'f4')])
a[['foo', 'bar']] = a[['bar', 'foo']]
assert_equal(a[0].item(), (2,1,3))
# test that this works even for 'simple_unaligned' structs
# (ie, that PyArray_EquivTypes cares about field order too)
a = np.array([(1,2)], dtype=[('a', 'i4'), ('b', 'i4')])
a[['a', 'b']] = a[['b', 'a']]
assert_equal(a[0].item(), (2,1))
def test_scalar_assignment(self):
with assert_raises(ValueError):
arr = np.arange(25).reshape(5, 5)
arr.itemset(3)
def test_structuredscalar_indexing(self):
# test gh-7262
x = np.empty(shape=1, dtype="(2)3S,(2)3U")
assert_equal(x[["f0","f1"]][0], x[0][["f0","f1"]])
assert_equal(x[0], x[0][()])
def test_multiindex_titles(self):
a = np.zeros(4, dtype=[(('a', 'b'), 'i'), ('c', 'i'), ('d', 'i')])
assert_raises(KeyError, lambda : a[['a','c']])
assert_raises(KeyError, lambda : a[['a','a']])
assert_raises(ValueError, lambda : a[['b','b']]) # field exists, but repeated
a[['b','c']] # no exception
def test_structured_cast_promotion_fieldorder(self):
# gh-15494
# dtypes with different field names are not promotable
A = ("a", "<i8")
B = ("b", ">i8")
ab = np.array([(1, 2)], dtype=[A, B])
ba = np.array([(1, 2)], dtype=[B, A])
assert_raises(TypeError, np.concatenate, ab, ba)
assert_raises(TypeError, np.result_type, ab.dtype, ba.dtype)
assert_raises(TypeError, np.promote_types, ab.dtype, ba.dtype)
# dtypes with same field names/order but different memory offsets
# and byte-order are promotable to packed nbo.
assert_equal(np.promote_types(ab.dtype, ba[['a', 'b']].dtype),
repack_fields(ab.dtype.newbyteorder('N')))
# gh-13667
# dtypes with different fieldnames but castable field types are castable
assert_equal(np.can_cast(ab.dtype, ba.dtype), True)
assert_equal(ab.astype(ba.dtype).dtype, ba.dtype)
assert_equal(np.can_cast('f8,i8', [('f0', 'f8'), ('f1', 'i8')]), True)
assert_equal(np.can_cast('f8,i8', [('f1', 'f8'), ('f0', 'i8')]), True)
assert_equal(np.can_cast('f8,i8', [('f1', 'i8'), ('f0', 'f8')]), False)
assert_equal(np.can_cast('f8,i8', [('f1', 'i8'), ('f0', 'f8')],
casting='unsafe'), True)
ab[:] = ba # make sure assignment still works
# tests of type-promotion of corresponding fields
dt1 = np.dtype([("", "i4")])
dt2 = np.dtype([("", "i8")])
assert_equal(np.promote_types(dt1, dt2), np.dtype([('f0', 'i8')]))
assert_equal(np.promote_types(dt2, dt1), np.dtype([('f0', 'i8')]))
assert_raises(TypeError, np.promote_types, dt1, np.dtype([("", "V3")]))
assert_equal(np.promote_types('i4,f8', 'i8,f4'),
np.dtype([('f0', 'i8'), ('f1', 'f8')]))
# test nested case
dt1nest = np.dtype([("", dt1)])
dt2nest = np.dtype([("", dt2)])
assert_equal(np.promote_types(dt1nest, dt2nest),
np.dtype([('f0', np.dtype([('f0', 'i8')]))]))
# note that offsets are lost when promoting:
dt = np.dtype({'names': ['x'], 'formats': ['i4'], 'offsets': [8]})
a = np.ones(3, dtype=dt)
assert_equal(np.concatenate([a, a]).dtype, np.dtype([('x', 'i4')]))
@pytest.mark.parametrize("dtype_dict", [
dict(names=["a", "b"], formats=["i4", "f"], itemsize=100),
dict(names=["a", "b"], formats=["i4", "f"],
offsets=[0, 12])])
@pytest.mark.parametrize("align", [True, False])
def test_structured_promotion_packs(self, dtype_dict, align):
# Structured dtypes are packed when promoted (we consider the packed
# form to be "canonical"), so tere is no extra padding.
dtype = np.dtype(dtype_dict, align=align)
# Remove non "canonical" dtype options:
dtype_dict.pop("itemsize", None)
dtype_dict.pop("offsets", None)
expected = np.dtype(dtype_dict, align=align)
res = np.promote_types(dtype, dtype)
assert res.itemsize == expected.itemsize
assert res.fields == expected.fields
# But the "expected" one, should just be returned unchanged:
res = np.promote_types(expected, expected)
assert res is expected
def test_structured_asarray_is_view(self):
# A scalar viewing an array preserves its view even when creating a
# new array. This test documents behaviour, it may not be the best
# desired behaviour.
arr = np.array([1], dtype="i,i")
scalar = arr[0]
assert not scalar.flags.owndata # view into the array
assert np.asarray(scalar).base is scalar
# But never when a dtype is passed in:
assert np.asarray(scalar, dtype=scalar.dtype).base is None
# A scalar which owns its data does not have this property.
# It is not easy to create one, one method is to use pickle:
scalar = pickle.loads(pickle.dumps(scalar))
assert scalar.flags.owndata
assert np.asarray(scalar).base is None
class TestBool:
def test_test_interning(self):
a0 = np.bool_(0)
b0 = np.bool_(False)
assert_(a0 is b0)
a1 = np.bool_(1)
b1 = np.bool_(True)
assert_(a1 is b1)
assert_(np.array([True])[0] is a1)
assert_(np.array(True)[()] is a1)
def test_sum(self):
d = np.ones(101, dtype=bool)
assert_equal(d.sum(), d.size)
assert_equal(d[::2].sum(), d[::2].size)
assert_equal(d[::-2].sum(), d[::-2].size)
d = np.frombuffer(b'\xff\xff' * 100, dtype=bool)
assert_equal(d.sum(), d.size)
assert_equal(d[::2].sum(), d[::2].size)
assert_equal(d[::-2].sum(), d[::-2].size)
def check_count_nonzero(self, power, length):
powers = [2 ** i for i in range(length)]
for i in range(2**power):
l = [(i & x) != 0 for x in powers]
a = np.array(l, dtype=bool)
c = builtins.sum(l)
assert_equal(np.count_nonzero(a), c)
av = a.view(np.uint8)
av *= 3
assert_equal(np.count_nonzero(a), c)
av *= 4
assert_equal(np.count_nonzero(a), c)
av[av != 0] = 0xFF
assert_equal(np.count_nonzero(a), c)
def test_count_nonzero(self):
# check all 12 bit combinations in a length 17 array
# covers most cases of the 16 byte unrolled code
self.check_count_nonzero(12, 17)
@pytest.mark.slow
def test_count_nonzero_all(self):
# check all combinations in a length 17 array
# covers all cases of the 16 byte unrolled code
self.check_count_nonzero(17, 17)
def test_count_nonzero_unaligned(self):
# prevent mistakes as e.g. gh-4060
for o in range(7):
a = np.zeros((18,), dtype=bool)[o+1:]
a[:o] = True
assert_equal(np.count_nonzero(a), builtins.sum(a.tolist()))
a = np.ones((18,), dtype=bool)[o+1:]
a[:o] = False
assert_equal(np.count_nonzero(a), builtins.sum(a.tolist()))
def _test_cast_from_flexible(self, dtype):
# empty string -> false
for n in range(3):
v = np.array(b'', (dtype, n))
assert_equal(bool(v), False)
assert_equal(bool(v[()]), False)
assert_equal(v.astype(bool), False)
assert_(isinstance(v.astype(bool), np.ndarray))
assert_(v[()].astype(bool) is np.False_)
# anything else -> true
for n in range(1, 4):
for val in [b'a', b'0', b' ']:
v = np.array(val, (dtype, n))
assert_equal(bool(v), True)
assert_equal(bool(v[()]), True)
assert_equal(v.astype(bool), True)
assert_(isinstance(v.astype(bool), np.ndarray))
assert_(v[()].astype(bool) is np.True_)
def test_cast_from_void(self):
self._test_cast_from_flexible(np.void)
@pytest.mark.xfail(reason="See gh-9847")
def test_cast_from_unicode(self):
self._test_cast_from_flexible(np.unicode_)
@pytest.mark.xfail(reason="See gh-9847")
def test_cast_from_bytes(self):
self._test_cast_from_flexible(np.bytes_)
class TestZeroSizeFlexible:
@staticmethod
def _zeros(shape, dtype=str):
dtype = np.dtype(dtype)
if dtype == np.void:
return np.zeros(shape, dtype=(dtype, 0))
# not constructable directly
dtype = np.dtype([('x', dtype, 0)])
return np.zeros(shape, dtype=dtype)['x']
def test_create(self):
zs = self._zeros(10, bytes)
assert_equal(zs.itemsize, 0)
zs = self._zeros(10, np.void)
assert_equal(zs.itemsize, 0)
zs = self._zeros(10, str)
assert_equal(zs.itemsize, 0)
def _test_sort_partition(self, name, kinds, **kwargs):
# Previously, these would all hang
for dt in [bytes, np.void, str]:
zs = self._zeros(10, dt)
sort_method = getattr(zs, name)
sort_func = getattr(np, name)
for kind in kinds:
sort_method(kind=kind, **kwargs)
sort_func(zs, kind=kind, **kwargs)
def test_sort(self):
self._test_sort_partition('sort', kinds='qhs')
def test_argsort(self):
self._test_sort_partition('argsort', kinds='qhs')
def test_partition(self):
self._test_sort_partition('partition', kinds=['introselect'], kth=2)
def test_argpartition(self):
self._test_sort_partition('argpartition', kinds=['introselect'], kth=2)
def test_resize(self):
# previously an error
for dt in [bytes, np.void, str]:
zs = self._zeros(10, dt)
zs.resize(25)
zs.resize((10, 10))
def test_view(self):
for dt in [bytes, np.void, str]:
zs = self._zeros(10, dt)
# viewing as itself should be allowed
assert_equal(zs.view(dt).dtype, np.dtype(dt))
# viewing as any non-empty type gives an empty result
assert_equal(zs.view((dt, 1)).shape, (0,))
def test_dumps(self):
zs = self._zeros(10, int)
assert_equal(zs, pickle.loads(zs.dumps()))
def test_pickle(self):
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
for dt in [bytes, np.void, str]:
zs = self._zeros(10, dt)
p = pickle.dumps(zs, protocol=proto)
zs2 = pickle.loads(p)
assert_equal(zs.dtype, zs2.dtype)
def test_pickle_empty(self):
"""Checking if an empty array pickled and un-pickled will not cause a
segmentation fault"""
arr = np.array([]).reshape(999999, 0)
pk_dmp = pickle.dumps(arr)
pk_load = pickle.loads(pk_dmp)
assert pk_load.size == 0
@pytest.mark.skipif(pickle.HIGHEST_PROTOCOL < 5,
reason="requires pickle protocol 5")
def test_pickle_with_buffercallback(self):
array = np.arange(10)
buffers = []
bytes_string = pickle.dumps(array, buffer_callback=buffers.append,
protocol=5)
array_from_buffer = pickle.loads(bytes_string, buffers=buffers)
# when using pickle protocol 5 with buffer callbacks,
# array_from_buffer is reconstructed from a buffer holding a view
# to the initial array's data, so modifying an element in array
# should modify it in array_from_buffer too.
array[0] = -1
assert array_from_buffer[0] == -1, array_from_buffer[0]
class TestMethods:
sort_kinds = ['quicksort', 'heapsort', 'stable']
def test_all_where(self):
a = np.array([[True, False, True],
[False, False, False],
[True, True, True]])
wh_full = np.array([[True, False, True],
[False, False, False],
[True, False, True]])
wh_lower = np.array([[False],
[False],
[True]])
for _ax in [0, None]:
assert_equal(a.all(axis=_ax, where=wh_lower),
np.all(a[wh_lower[:,0],:], axis=_ax))
assert_equal(np.all(a, axis=_ax, where=wh_lower),
a[wh_lower[:,0],:].all(axis=_ax))
assert_equal(a.all(where=wh_full), True)
assert_equal(np.all(a, where=wh_full), True)
assert_equal(a.all(where=False), True)
assert_equal(np.all(a, where=False), True)
def test_any_where(self):
a = np.array([[True, False, True],
[False, False, False],
[True, True, True]])
wh_full = np.array([[False, True, False],
[True, True, True],
[False, False, False]])
wh_middle = np.array([[False],
[True],
[False]])
for _ax in [0, None]:
assert_equal(a.any(axis=_ax, where=wh_middle),
np.any(a[wh_middle[:,0],:], axis=_ax))
assert_equal(np.any(a, axis=_ax, where=wh_middle),
a[wh_middle[:,0],:].any(axis=_ax))
assert_equal(a.any(where=wh_full), False)
assert_equal(np.any(a, where=wh_full), False)
assert_equal(a.any(where=False), False)
assert_equal(np.any(a, where=False), False)
def test_compress(self):
tgt = [[5, 6, 7, 8, 9]]
arr = np.arange(10).reshape(2, 5)
out = arr.compress([0, 1], axis=0)
assert_equal(out, tgt)
tgt = [[1, 3], [6, 8]]
out = arr.compress([0, 1, 0, 1, 0], axis=1)
assert_equal(out, tgt)
tgt = [[1], [6]]
arr = np.arange(10).reshape(2, 5)
out = arr.compress([0, 1], axis=1)
assert_equal(out, tgt)
arr = np.arange(10).reshape(2, 5)
out = arr.compress([0, 1])
assert_equal(out, 1)
def test_choose(self):
x = 2*np.ones((3,), dtype=int)
y = 3*np.ones((3,), dtype=int)
x2 = 2*np.ones((2, 3), dtype=int)
y2 = 3*np.ones((2, 3), dtype=int)
ind = np.array([0, 0, 1])
A = ind.choose((x, y))
assert_equal(A, [2, 2, 3])
A = ind.choose((x2, y2))
assert_equal(A, [[2, 2, 3], [2, 2, 3]])
A = ind.choose((x, y2))
assert_equal(A, [[2, 2, 3], [2, 2, 3]])
oned = np.ones(1)
# gh-12031, caused SEGFAULT
assert_raises(TypeError, oned.choose,np.void(0), [oned])
out = np.array(0)
ret = np.choose(np.array(1), [10, 20, 30], out=out)
assert out is ret
assert_equal(out[()], 20)
# gh-6272 check overlap on out
x = np.arange(5)
y = np.choose([0,0,0], [x[:3], x[:3], x[:3]], out=x[1:4], mode='wrap')
assert_equal(y, np.array([0, 1, 2]))
def test_prod(self):
ba = [1, 2, 10, 11, 6, 5, 4]
ba2 = [[1, 2, 3, 4], [5, 6, 7, 9], [10, 3, 4, 5]]
for ctype in [np.int16, np.uint16, np.int32, np.uint32,
np.float32, np.float64, np.complex64, np.complex128]:
a = np.array(ba, ctype)
a2 = np.array(ba2, ctype)
if ctype in ['1', 'b']:
assert_raises(ArithmeticError, a.prod)
assert_raises(ArithmeticError, a2.prod, axis=1)
else:
assert_equal(a.prod(axis=0), 26400)
assert_array_equal(a2.prod(axis=0),
np.array([50, 36, 84, 180], ctype))
assert_array_equal(a2.prod(axis=-1),
np.array([24, 1890, 600], ctype))
def test_repeat(self):
m = np.array([1, 2, 3, 4, 5, 6])
m_rect = m.reshape((2, 3))
A = m.repeat([1, 3, 2, 1, 1, 2])
assert_equal(A, [1, 2, 2, 2, 3,
3, 4, 5, 6, 6])
A = m.repeat(2)
assert_equal(A, [1, 1, 2, 2, 3, 3,
4, 4, 5, 5, 6, 6])
A = m_rect.repeat([2, 1], axis=0)
assert_equal(A, [[1, 2, 3],
[1, 2, 3],
[4, 5, 6]])
A = m_rect.repeat([1, 3, 2], axis=1)
assert_equal(A, [[1, 2, 2, 2, 3, 3],
[4, 5, 5, 5, 6, 6]])
A = m_rect.repeat(2, axis=0)
assert_equal(A, [[1, 2, 3],
[1, 2, 3],
[4, 5, 6],
[4, 5, 6]])
A = m_rect.repeat(2, axis=1)
assert_equal(A, [[1, 1, 2, 2, 3, 3],
[4, 4, 5, 5, 6, 6]])
def test_reshape(self):
arr = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]])
tgt = [[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]]
assert_equal(arr.reshape(2, 6), tgt)
tgt = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]
assert_equal(arr.reshape(3, 4), tgt)
tgt = [[1, 10, 8, 6], [4, 2, 11, 9], [7, 5, 3, 12]]
assert_equal(arr.reshape((3, 4), order='F'), tgt)
tgt = [[1, 4, 7, 10], [2, 5, 8, 11], [3, 6, 9, 12]]
assert_equal(arr.T.reshape((3, 4), order='C'), tgt)
def test_round(self):
def check_round(arr, expected, *round_args):
assert_equal(arr.round(*round_args), expected)
# With output array
out = np.zeros_like(arr)
res = arr.round(*round_args, out=out)
assert_equal(out, expected)
assert out is res
check_round(np.array([1.2, 1.5]), [1, 2])
check_round(np.array(1.5), 2)
check_round(np.array([12.2, 15.5]), [10, 20], -1)
check_round(np.array([12.15, 15.51]), [12.2, 15.5], 1)
# Complex rounding
check_round(np.array([4.5 + 1.5j]), [4 + 2j])
check_round(np.array([12.5 + 15.5j]), [10 + 20j], -1)
def test_squeeze(self):
a = np.array([[[1], [2], [3]]])
assert_equal(a.squeeze(), [1, 2, 3])
assert_equal(a.squeeze(axis=(0,)), [[1], [2], [3]])
assert_raises(ValueError, a.squeeze, axis=(1,))
assert_equal(a.squeeze(axis=(2,)), [[1, 2, 3]])
def test_transpose(self):
a = np.array([[1, 2], [3, 4]])
assert_equal(a.transpose(), [[1, 3], [2, 4]])
assert_raises(ValueError, lambda: a.transpose(0))
assert_raises(ValueError, lambda: a.transpose(0, 0))
assert_raises(ValueError, lambda: a.transpose(0, 1, 2))
def test_sort(self):
# test ordering for floats and complex containing nans. It is only
# necessary to check the less-than comparison, so sorts that
# only follow the insertion sort path are sufficient. We only
# test doubles and complex doubles as the logic is the same.
# check doubles
msg = "Test real sort order with nans"
a = np.array([np.nan, 1, 0])
b = np.sort(a)
assert_equal(b, a[::-1], msg)
# check complex
msg = "Test complex sort order with nans"
a = np.zeros(9, dtype=np.complex128)
a.real += [np.nan, np.nan, np.nan, 1, 0, 1, 1, 0, 0]
a.imag += [np.nan, 1, 0, np.nan, np.nan, 1, 0, 1, 0]
b = np.sort(a)
assert_equal(b, a[::-1], msg)
# all c scalar sorts use the same code with different types
# so it suffices to run a quick check with one type. The number
# of sorted items must be greater than ~50 to check the actual
# algorithm because quick and merge sort fall over to insertion
# sort for small arrays.
@pytest.mark.parametrize('dtype', [np.uint8, np.uint16, np.uint32, np.uint64,
np.float16, np.float32, np.float64,
np.longdouble])
def test_sort_unsigned(self, dtype):
a = np.arange(101, dtype=dtype)
b = a[::-1].copy()
for kind in self.sort_kinds:
msg = "scalar sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
@pytest.mark.parametrize('dtype',
[np.int8, np.int16, np.int32, np.int64, np.float16,
np.float32, np.float64, np.longdouble])
def test_sort_signed(self, dtype):
a = np.arange(-50, 51, dtype=dtype)
b = a[::-1].copy()
for kind in self.sort_kinds:
msg = "scalar sort, kind=%s" % (kind)
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
@pytest.mark.parametrize('dtype', [np.float32, np.float64, np.longdouble])
@pytest.mark.parametrize('part', ['real', 'imag'])
def test_sort_complex(self, part, dtype):
# test complex sorts. These use the same code as the scalars
# but the compare function differs.
cdtype = {
np.single: np.csingle,
np.double: np.cdouble,
np.longdouble: np.clongdouble,
}[dtype]
a = np.arange(-50, 51, dtype=dtype)
b = a[::-1].copy()
ai = (a * (1+1j)).astype(cdtype)
bi = (b * (1+1j)).astype(cdtype)
setattr(ai, part, 1)
setattr(bi, part, 1)
for kind in self.sort_kinds:
msg = "complex sort, %s part == 1, kind=%s" % (part, kind)
c = ai.copy()
c.sort(kind=kind)
assert_equal(c, ai, msg)
c = bi.copy()
c.sort(kind=kind)
assert_equal(c, ai, msg)
def test_sort_complex_byte_swapping(self):
# test sorting of complex arrays requiring byte-swapping, gh-5441
for endianness in '<>':
for dt in np.typecodes['Complex']:
arr = np.array([1+3.j, 2+2.j, 3+1.j], dtype=endianness + dt)
c = arr.copy()
c.sort()
msg = 'byte-swapped complex sort, dtype={0}'.format(dt)
assert_equal(c, arr, msg)
@pytest.mark.parametrize('dtype', [np.bytes_, np.unicode_])
def test_sort_string(self, dtype):
# np.array will perform the encoding to bytes for us in the bytes test
a = np.array(['aaaaaaaa' + chr(i) for i in range(101)], dtype=dtype)
b = a[::-1].copy()
for kind in self.sort_kinds:
msg = "kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
def test_sort_object(self):
# test object array sorts.
a = np.empty((101,), dtype=object)
a[:] = list(range(101))
b = a[::-1]
for kind in ['q', 'h', 'm']:
msg = "kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
def test_sort_structured(self):
# test record array sorts.
dt = np.dtype([('f', float), ('i', int)])
a = np.array([(i, i) for i in range(101)], dtype=dt)
b = a[::-1]
for kind in ['q', 'h', 'm']:
msg = "kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
@pytest.mark.parametrize('dtype', ['datetime64[D]', 'timedelta64[D]'])
def test_sort_time(self, dtype):
# test datetime64 and timedelta64 sorts.
a = np.arange(0, 101, dtype=dtype)
b = a[::-1]
for kind in ['q', 'h', 'm']:
msg = "kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
def test_sort_axis(self):
# check axis handling. This should be the same for all type
# specific sorts, so we only check it for one type and one kind
a = np.array([[3, 2], [1, 0]])
b = np.array([[1, 0], [3, 2]])
c = np.array([[2, 3], [0, 1]])
d = a.copy()
d.sort(axis=0)
assert_equal(d, b, "test sort with axis=0")
d = a.copy()
d.sort(axis=1)
assert_equal(d, c, "test sort with axis=1")
d = a.copy()
d.sort()
assert_equal(d, c, "test sort with default axis")
def test_sort_size_0(self):
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array sort with axis={0}'.format(axis)
assert_equal(np.sort(a, axis=axis), a, msg)
msg = 'test empty array sort with axis=None'
assert_equal(np.sort(a, axis=None), a.ravel(), msg)
def test_sort_bad_ordering(self):
# test generic class with bogus ordering,
# should not segfault.
class Boom:
def __lt__(self, other):
return True
a = np.array([Boom()] * 100, dtype=object)
for kind in self.sort_kinds:
msg = "kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
def test_void_sort(self):
# gh-8210 - previously segfaulted
for i in range(4):
rand = np.random.randint(256, size=4000, dtype=np.uint8)
arr = rand.view('V4')
arr[::-1].sort()
dt = np.dtype([('val', 'i4', (1,))])
for i in range(4):
rand = np.random.randint(256, size=4000, dtype=np.uint8)
arr = rand.view(dt)
arr[::-1].sort()
def test_sort_raises(self):
#gh-9404
arr = np.array([0, datetime.now(), 1], dtype=object)
for kind in self.sort_kinds:
assert_raises(TypeError, arr.sort, kind=kind)
#gh-3879
class Raiser:
def raises_anything(*args, **kwargs):
raise TypeError("SOMETHING ERRORED")
__eq__ = __ne__ = __lt__ = __gt__ = __ge__ = __le__ = raises_anything
arr = np.array([[Raiser(), n] for n in range(10)]).reshape(-1)
np.random.shuffle(arr)
for kind in self.sort_kinds:
assert_raises(TypeError, arr.sort, kind=kind)
def test_sort_degraded(self):
# test degraded dataset would take minutes to run with normal qsort
d = np.arange(1000000)
do = d.copy()
x = d
# create a median of 3 killer where each median is the sorted second
# last element of the quicksort partition
while x.size > 3:
mid = x.size // 2
x[mid], x[-2] = x[-2], x[mid]
x = x[:-2]
assert_equal(np.sort(d), do)
assert_equal(d[np.argsort(d)], do)
def test_copy(self):
def assert_fortran(arr):
assert_(arr.flags.fortran)
assert_(arr.flags.f_contiguous)
assert_(not arr.flags.c_contiguous)
def assert_c(arr):
assert_(not arr.flags.fortran)
assert_(not arr.flags.f_contiguous)
assert_(arr.flags.c_contiguous)
a = np.empty((2, 2), order='F')
# Test copying a Fortran array
assert_c(a.copy())
assert_c(a.copy('C'))
assert_fortran(a.copy('F'))
assert_fortran(a.copy('A'))
# Now test starting with a C array.
a = np.empty((2, 2), order='C')
assert_c(a.copy())
assert_c(a.copy('C'))
assert_fortran(a.copy('F'))
assert_c(a.copy('A'))
@pytest.mark.parametrize("dtype", ['O', np.int32, 'i,O'])
def test__deepcopy__(self, dtype):
# Force the entry of NULLs into array
a = np.empty(4, dtype=dtype)
ctypes.memset(a.ctypes.data, 0, a.nbytes)
# Ensure no error is raised, see gh-21833
b = a.__deepcopy__({})
a[0] = 42
with pytest.raises(AssertionError):
assert_array_equal(a, b)
def test__deepcopy__catches_failure(self):
class MyObj:
def __deepcopy__(self, *args, **kwargs):
raise RuntimeError
arr = np.array([1, MyObj(), 3], dtype='O')
with pytest.raises(RuntimeError):
arr.__deepcopy__({})
def test_sort_order(self):
# Test sorting an array with fields
x1 = np.array([21, 32, 14])
x2 = np.array(['my', 'first', 'name'])
x3 = np.array([3.1, 4.5, 6.2])
r = np.rec.fromarrays([x1, x2, x3], names='id,word,number')
r.sort(order=['id'])
assert_equal(r.id, np.array([14, 21, 32]))
assert_equal(r.word, np.array(['name', 'my', 'first']))
assert_equal(r.number, np.array([6.2, 3.1, 4.5]))
r.sort(order=['word'])
assert_equal(r.id, np.array([32, 21, 14]))
assert_equal(r.word, np.array(['first', 'my', 'name']))
assert_equal(r.number, np.array([4.5, 3.1, 6.2]))
r.sort(order=['number'])
assert_equal(r.id, np.array([21, 32, 14]))
assert_equal(r.word, np.array(['my', 'first', 'name']))
assert_equal(r.number, np.array([3.1, 4.5, 6.2]))
assert_raises_regex(ValueError, 'duplicate',
lambda: r.sort(order=['id', 'id']))
if sys.byteorder == 'little':
strtype = '>i2'
else:
strtype = '<i2'
mydtype = [('name', 'U5'), ('col2', strtype)]
r = np.array([('a', 1), ('b', 255), ('c', 3), ('d', 258)],
dtype=mydtype)
r.sort(order='col2')
assert_equal(r['col2'], [1, 3, 255, 258])
assert_equal(r, np.array([('a', 1), ('c', 3), ('b', 255), ('d', 258)],
dtype=mydtype))
def test_argsort(self):
# all c scalar argsorts use the same code with different types
# so it suffices to run a quick check with one type. The number
# of sorted items must be greater than ~50 to check the actual
# algorithm because quick and merge sort fall over to insertion
# sort for small arrays.
for dtype in [np.int32, np.uint32, np.float32]:
a = np.arange(101, dtype=dtype)
b = a[::-1].copy()
for kind in self.sort_kinds:
msg = "scalar argsort, kind=%s, dtype=%s" % (kind, dtype)
assert_equal(a.copy().argsort(kind=kind), a, msg)
assert_equal(b.copy().argsort(kind=kind), b, msg)
# test complex argsorts. These use the same code as the scalars
# but the compare function differs.
ai = a*1j + 1
bi = b*1j + 1
for kind in self.sort_kinds:
msg = "complex argsort, kind=%s" % kind
assert_equal(ai.copy().argsort(kind=kind), a, msg)
assert_equal(bi.copy().argsort(kind=kind), b, msg)
ai = a + 1j
bi = b + 1j
for kind in self.sort_kinds:
msg = "complex argsort, kind=%s" % kind
assert_equal(ai.copy().argsort(kind=kind), a, msg)
assert_equal(bi.copy().argsort(kind=kind), b, msg)
# test argsort of complex arrays requiring byte-swapping, gh-5441
for endianness in '<>':
for dt in np.typecodes['Complex']:
arr = np.array([1+3.j, 2+2.j, 3+1.j], dtype=endianness + dt)
msg = 'byte-swapped complex argsort, dtype={0}'.format(dt)
assert_equal(arr.argsort(),
np.arange(len(arr), dtype=np.intp), msg)
# test string argsorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)])
b = a[::-1].copy()
r = np.arange(101)
rr = r[::-1]
for kind in self.sort_kinds:
msg = "string argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test unicode argsorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)], dtype=np.unicode_)
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in self.sort_kinds:
msg = "unicode argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test object array argsorts.
a = np.empty((101,), dtype=object)
a[:] = list(range(101))
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in self.sort_kinds:
msg = "object argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test structured array argsorts.
dt = np.dtype([('f', float), ('i', int)])
a = np.array([(i, i) for i in range(101)], dtype=dt)
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in self.sort_kinds:
msg = "structured array argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test datetime64 argsorts.
a = np.arange(0, 101, dtype='datetime64[D]')
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'h', 'm']:
msg = "datetime64 argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test timedelta64 argsorts.
a = np.arange(0, 101, dtype='timedelta64[D]')
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'h', 'm']:
msg = "timedelta64 argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# check axis handling. This should be the same for all type
# specific argsorts, so we only check it for one type and one kind
a = np.array([[3, 2], [1, 0]])
b = np.array([[1, 1], [0, 0]])
c = np.array([[1, 0], [1, 0]])
assert_equal(a.copy().argsort(axis=0), b)
assert_equal(a.copy().argsort(axis=1), c)
assert_equal(a.copy().argsort(), c)
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array argsort with axis={0}'.format(axis)
assert_equal(np.argsort(a, axis=axis),
np.zeros_like(a, dtype=np.intp), msg)
msg = 'test empty array argsort with axis=None'
assert_equal(np.argsort(a, axis=None),
np.zeros_like(a.ravel(), dtype=np.intp), msg)
# check that stable argsorts are stable
r = np.arange(100)
# scalars
a = np.zeros(100)
assert_equal(a.argsort(kind='m'), r)
# complex
a = np.zeros(100, dtype=complex)
assert_equal(a.argsort(kind='m'), r)
# string
a = np.array(['aaaaaaaaa' for i in range(100)])
assert_equal(a.argsort(kind='m'), r)
# unicode
a = np.array(['aaaaaaaaa' for i in range(100)], dtype=np.unicode_)
assert_equal(a.argsort(kind='m'), r)
def test_sort_unicode_kind(self):
d = np.arange(10)
k = b'\xc3\xa4'.decode("UTF8")
assert_raises(ValueError, d.sort, kind=k)
assert_raises(ValueError, d.argsort, kind=k)
@pytest.mark.parametrize('a', [
np.array([0, 1, np.nan], dtype=np.float16),
np.array([0, 1, np.nan], dtype=np.float32),
np.array([0, 1, np.nan]),
])
def test_searchsorted_floats(self, a):
# test for floats arrays containing nans. Explicitly test
# half, single, and double precision floats to verify that
# the NaN-handling is correct.
msg = "Test real (%s) searchsorted with nans, side='l'" % a.dtype
b = a.searchsorted(a, side='left')
assert_equal(b, np.arange(3), msg)
msg = "Test real (%s) searchsorted with nans, side='r'" % a.dtype
b = a.searchsorted(a, side='right')
assert_equal(b, np.arange(1, 4), msg)
# check keyword arguments
a.searchsorted(v=1)
x = np.array([0, 1, np.nan], dtype='float32')
y = np.searchsorted(x, x[-1])
assert_equal(y, 2)
def test_searchsorted_complex(self):
# test for complex arrays containing nans.
# The search sorted routines use the compare functions for the
# array type, so this checks if that is consistent with the sort
# order.
# check double complex
a = np.zeros(9, dtype=np.complex128)
a.real += [0, 0, 1, 1, 0, 1, np.nan, np.nan, np.nan]
a.imag += [0, 1, 0, 1, np.nan, np.nan, 0, 1, np.nan]
msg = "Test complex searchsorted with nans, side='l'"
b = a.searchsorted(a, side='left')
assert_equal(b, np.arange(9), msg)
msg = "Test complex searchsorted with nans, side='r'"
b = a.searchsorted(a, side='right')
assert_equal(b, np.arange(1, 10), msg)
msg = "Test searchsorted with little endian, side='l'"
a = np.array([0, 128], dtype='<i4')
b = a.searchsorted(np.array(128, dtype='<i4'))
assert_equal(b, 1, msg)
msg = "Test searchsorted with big endian, side='l'"
a = np.array([0, 128], dtype='>i4')
b = a.searchsorted(np.array(128, dtype='>i4'))
assert_equal(b, 1, msg)
def test_searchsorted_n_elements(self):
# Check 0 elements
a = np.ones(0)
b = a.searchsorted([0, 1, 2], 'left')
assert_equal(b, [0, 0, 0])
b = a.searchsorted([0, 1, 2], 'right')
assert_equal(b, [0, 0, 0])
a = np.ones(1)
# Check 1 element
b = a.searchsorted([0, 1, 2], 'left')
assert_equal(b, [0, 0, 1])
b = a.searchsorted([0, 1, 2], 'right')
assert_equal(b, [0, 1, 1])
# Check all elements equal
a = np.ones(2)
b = a.searchsorted([0, 1, 2], 'left')
assert_equal(b, [0, 0, 2])
b = a.searchsorted([0, 1, 2], 'right')
assert_equal(b, [0, 2, 2])
def test_searchsorted_unaligned_array(self):
# Test searching unaligned array
a = np.arange(10)
aligned = np.empty(a.itemsize * a.size + 1, 'uint8')
unaligned = aligned[1:].view(a.dtype)
unaligned[:] = a
# Test searching unaligned array
b = unaligned.searchsorted(a, 'left')
assert_equal(b, a)
b = unaligned.searchsorted(a, 'right')
assert_equal(b, a + 1)
# Test searching for unaligned keys
b = a.searchsorted(unaligned, 'left')
assert_equal(b, a)
b = a.searchsorted(unaligned, 'right')
assert_equal(b, a + 1)
def test_searchsorted_resetting(self):
# Test smart resetting of binsearch indices
a = np.arange(5)
b = a.searchsorted([6, 5, 4], 'left')
assert_equal(b, [5, 5, 4])
b = a.searchsorted([6, 5, 4], 'right')
assert_equal(b, [5, 5, 5])
def test_searchsorted_type_specific(self):
# Test all type specific binary search functions
types = ''.join((np.typecodes['AllInteger'], np.typecodes['AllFloat'],
np.typecodes['Datetime'], '?O'))
for dt in types:
if dt == 'M':
dt = 'M8[D]'
if dt == '?':
a = np.arange(2, dtype=dt)
out = np.arange(2)
else:
a = np.arange(0, 5, dtype=dt)
out = np.arange(5)
b = a.searchsorted(a, 'left')
assert_equal(b, out)
b = a.searchsorted(a, 'right')
assert_equal(b, out + 1)
# Test empty array, use a fresh array to get warnings in
# valgrind if access happens.
e = np.ndarray(shape=0, buffer=b'', dtype=dt)
b = e.searchsorted(a, 'left')
assert_array_equal(b, np.zeros(len(a), dtype=np.intp))
b = a.searchsorted(e, 'left')
assert_array_equal(b, np.zeros(0, dtype=np.intp))
def test_searchsorted_unicode(self):
# Test searchsorted on unicode strings.
# 1.6.1 contained a string length miscalculation in
# arraytypes.c.src:UNICODE_compare() which manifested as
# incorrect/inconsistent results from searchsorted.
a = np.array(['P:\\20x_dapi_cy3\\20x_dapi_cy3_20100185_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100186_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100187_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100189_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100190_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100191_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100192_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100193_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100194_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100195_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100196_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100197_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100198_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100199_1'],
dtype=np.unicode_)
ind = np.arange(len(a))
assert_equal([a.searchsorted(v, 'left') for v in a], ind)
assert_equal([a.searchsorted(v, 'right') for v in a], ind + 1)
assert_equal([a.searchsorted(a[i], 'left') for i in ind], ind)
assert_equal([a.searchsorted(a[i], 'right') for i in ind], ind + 1)
def test_searchsorted_with_invalid_sorter(self):
a = np.array([5, 2, 1, 3, 4])
s = np.argsort(a)
assert_raises(TypeError, np.searchsorted, a, 0,
sorter=np.array((1, (2, 3)), dtype=object))
assert_raises(TypeError, np.searchsorted, a, 0, sorter=[1.1])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[1, 2, 3, 4])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[1, 2, 3, 4, 5, 6])
# bounds check
assert_raises(ValueError, np.searchsorted, a, 4, sorter=[0, 1, 2, 3, 5])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[-1, 0, 1, 2, 3])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[4, 0, -1, 2, 3])
def test_searchsorted_with_sorter(self):
a = np.random.rand(300)
s = a.argsort()
b = np.sort(a)
k = np.linspace(0, 1, 20)
assert_equal(b.searchsorted(k), a.searchsorted(k, sorter=s))
a = np.array([0, 1, 2, 3, 5]*20)
s = a.argsort()
k = [0, 1, 2, 3, 5]
expected = [0, 20, 40, 60, 80]
assert_equal(a.searchsorted(k, side='left', sorter=s), expected)
expected = [20, 40, 60, 80, 100]
assert_equal(a.searchsorted(k, side='right', sorter=s), expected)
# Test searching unaligned array
keys = np.arange(10)
a = keys.copy()
np.random.shuffle(s)
s = a.argsort()
aligned = np.empty(a.itemsize * a.size + 1, 'uint8')
unaligned = aligned[1:].view(a.dtype)
# Test searching unaligned array
unaligned[:] = a
b = unaligned.searchsorted(keys, 'left', s)
assert_equal(b, keys)
b = unaligned.searchsorted(keys, 'right', s)
assert_equal(b, keys + 1)
# Test searching for unaligned keys
unaligned[:] = keys
b = a.searchsorted(unaligned, 'left', s)
assert_equal(b, keys)
b = a.searchsorted(unaligned, 'right', s)
assert_equal(b, keys + 1)
# Test all type specific indirect binary search functions
types = ''.join((np.typecodes['AllInteger'], np.typecodes['AllFloat'],
np.typecodes['Datetime'], '?O'))
for dt in types:
if dt == 'M':
dt = 'M8[D]'
if dt == '?':
a = np.array([1, 0], dtype=dt)
# We want the sorter array to be of a type that is different
# from np.intp in all platforms, to check for #4698
s = np.array([1, 0], dtype=np.int16)
out = np.array([1, 0])
else:
a = np.array([3, 4, 1, 2, 0], dtype=dt)
# We want the sorter array to be of a type that is different
# from np.intp in all platforms, to check for #4698
s = np.array([4, 2, 3, 0, 1], dtype=np.int16)
out = np.array([3, 4, 1, 2, 0], dtype=np.intp)
b = a.searchsorted(a, 'left', s)
assert_equal(b, out)
b = a.searchsorted(a, 'right', s)
assert_equal(b, out + 1)
# Test empty array, use a fresh array to get warnings in
# valgrind if access happens.
e = np.ndarray(shape=0, buffer=b'', dtype=dt)
b = e.searchsorted(a, 'left', s[:0])
assert_array_equal(b, np.zeros(len(a), dtype=np.intp))
b = a.searchsorted(e, 'left', s)
assert_array_equal(b, np.zeros(0, dtype=np.intp))
# Test non-contiguous sorter array
a = np.array([3, 4, 1, 2, 0])
srt = np.empty((10,), dtype=np.intp)
srt[1::2] = -1
srt[::2] = [4, 2, 3, 0, 1]
s = srt[::2]
out = np.array([3, 4, 1, 2, 0], dtype=np.intp)
b = a.searchsorted(a, 'left', s)
assert_equal(b, out)
b = a.searchsorted(a, 'right', s)
assert_equal(b, out + 1)
def test_searchsorted_return_type(self):
# Functions returning indices should always return base ndarrays
class A(np.ndarray):
pass
a = np.arange(5).view(A)
b = np.arange(1, 3).view(A)
s = np.arange(5).view(A)
assert_(not isinstance(a.searchsorted(b, 'left'), A))
assert_(not isinstance(a.searchsorted(b, 'right'), A))
assert_(not isinstance(a.searchsorted(b, 'left', s), A))
assert_(not isinstance(a.searchsorted(b, 'right', s), A))
@pytest.mark.parametrize("dtype", np.typecodes["All"])
def test_argpartition_out_of_range(self, dtype):
# Test out of range values in kth raise an error, gh-5469
d = np.arange(10).astype(dtype=dtype)
assert_raises(ValueError, d.argpartition, 10)
assert_raises(ValueError, d.argpartition, -11)
@pytest.mark.parametrize("dtype", np.typecodes["All"])
def test_partition_out_of_range(self, dtype):
# Test out of range values in kth raise an error, gh-5469
d = np.arange(10).astype(dtype=dtype)
assert_raises(ValueError, d.partition, 10)
assert_raises(ValueError, d.partition, -11)
def test_argpartition_integer(self):
# Test non-integer values in kth raise an error/
d = np.arange(10)
assert_raises(TypeError, d.argpartition, 9.)
# Test also for generic type argpartition, which uses sorting
# and used to not bound check kth
d_obj = np.arange(10, dtype=object)
assert_raises(TypeError, d_obj.argpartition, 9.)
def test_partition_integer(self):
# Test out of range values in kth raise an error, gh-5469
d = np.arange(10)
assert_raises(TypeError, d.partition, 9.)
# Test also for generic type partition, which uses sorting
# and used to not bound check kth
d_obj = np.arange(10, dtype=object)
assert_raises(TypeError, d_obj.partition, 9.)
@pytest.mark.parametrize("kth_dtype", np.typecodes["AllInteger"])
def test_partition_empty_array(self, kth_dtype):
# check axis handling for multidimensional empty arrays
kth = np.array(0, dtype=kth_dtype)[()]
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array partition with axis={0}'.format(axis)
assert_equal(np.partition(a, kth, axis=axis), a, msg)
msg = 'test empty array partition with axis=None'
assert_equal(np.partition(a, kth, axis=None), a.ravel(), msg)
@pytest.mark.parametrize("kth_dtype", np.typecodes["AllInteger"])
def test_argpartition_empty_array(self, kth_dtype):
# check axis handling for multidimensional empty arrays
kth = np.array(0, dtype=kth_dtype)[()]
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array argpartition with axis={0}'.format(axis)
assert_equal(np.partition(a, kth, axis=axis),
np.zeros_like(a, dtype=np.intp), msg)
msg = 'test empty array argpartition with axis=None'
assert_equal(np.partition(a, kth, axis=None),
np.zeros_like(a.ravel(), dtype=np.intp), msg)
def test_partition(self):
d = np.arange(10)
assert_raises(TypeError, np.partition, d, 2, kind=1)
assert_raises(ValueError, np.partition, d, 2, kind="nonsense")
assert_raises(ValueError, np.argpartition, d, 2, kind="nonsense")
assert_raises(ValueError, d.partition, 2, axis=0, kind="nonsense")
assert_raises(ValueError, d.argpartition, 2, axis=0, kind="nonsense")
for k in ("introselect",):
d = np.array([])
assert_array_equal(np.partition(d, 0, kind=k), d)
assert_array_equal(np.argpartition(d, 0, kind=k), d)
d = np.ones(1)
assert_array_equal(np.partition(d, 0, kind=k)[0], d)
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
# kth not modified
kth = np.array([30, 15, 5])
okth = kth.copy()
np.partition(np.arange(40), kth)
assert_array_equal(kth, okth)
for r in ([2, 1], [1, 2], [1, 1]):
d = np.array(r)
tgt = np.sort(d)
assert_array_equal(np.partition(d, 0, kind=k)[0], tgt[0])
assert_array_equal(np.partition(d, 1, kind=k)[1], tgt[1])
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
assert_array_equal(d[np.argpartition(d, 1, kind=k)],
np.partition(d, 1, kind=k))
for i in range(d.size):
d[i:].partition(0, kind=k)
assert_array_equal(d, tgt)
for r in ([3, 2, 1], [1, 2, 3], [2, 1, 3], [2, 3, 1],
[1, 1, 1], [1, 2, 2], [2, 2, 1], [1, 2, 1]):
d = np.array(r)
tgt = np.sort(d)
assert_array_equal(np.partition(d, 0, kind=k)[0], tgt[0])
assert_array_equal(np.partition(d, 1, kind=k)[1], tgt[1])
assert_array_equal(np.partition(d, 2, kind=k)[2], tgt[2])
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
assert_array_equal(d[np.argpartition(d, 1, kind=k)],
np.partition(d, 1, kind=k))
assert_array_equal(d[np.argpartition(d, 2, kind=k)],
np.partition(d, 2, kind=k))
for i in range(d.size):
d[i:].partition(0, kind=k)
assert_array_equal(d, tgt)
d = np.ones(50)
assert_array_equal(np.partition(d, 0, kind=k), d)
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
# sorted
d = np.arange(49)
assert_equal(np.partition(d, 5, kind=k)[5], 5)
assert_equal(np.partition(d, 15, kind=k)[15], 15)
assert_array_equal(d[np.argpartition(d, 5, kind=k)],
np.partition(d, 5, kind=k))
assert_array_equal(d[np.argpartition(d, 15, kind=k)],
np.partition(d, 15, kind=k))
# rsorted
d = np.arange(47)[::-1]
assert_equal(np.partition(d, 6, kind=k)[6], 6)
assert_equal(np.partition(d, 16, kind=k)[16], 16)
assert_array_equal(d[np.argpartition(d, 6, kind=k)],
np.partition(d, 6, kind=k))
assert_array_equal(d[np.argpartition(d, 16, kind=k)],
np.partition(d, 16, kind=k))
assert_array_equal(np.partition(d, -6, kind=k),
np.partition(d, 41, kind=k))
assert_array_equal(np.partition(d, -16, kind=k),
np.partition(d, 31, kind=k))
assert_array_equal(d[np.argpartition(d, -6, kind=k)],
np.partition(d, 41, kind=k))
# median of 3 killer, O(n^2) on pure median 3 pivot quickselect
# exercises the median of median of 5 code used to keep O(n)
d = np.arange(1000000)
x = np.roll(d, d.size // 2)
mid = x.size // 2 + 1
assert_equal(np.partition(x, mid)[mid], mid)
d = np.arange(1000001)
x = np.roll(d, d.size // 2 + 1)
mid = x.size // 2 + 1
assert_equal(np.partition(x, mid)[mid], mid)
# max
d = np.ones(10)
d[1] = 4
assert_equal(np.partition(d, (2, -1))[-1], 4)
assert_equal(np.partition(d, (2, -1))[2], 1)
assert_equal(d[np.argpartition(d, (2, -1))][-1], 4)
assert_equal(d[np.argpartition(d, (2, -1))][2], 1)
d[1] = np.nan
assert_(np.isnan(d[np.argpartition(d, (2, -1))][-1]))
assert_(np.isnan(np.partition(d, (2, -1))[-1]))
# equal elements
d = np.arange(47) % 7
tgt = np.sort(np.arange(47) % 7)
np.random.shuffle(d)
for i in range(d.size):
assert_equal(np.partition(d, i, kind=k)[i], tgt[i])
assert_array_equal(d[np.argpartition(d, 6, kind=k)],
np.partition(d, 6, kind=k))
assert_array_equal(d[np.argpartition(d, 16, kind=k)],
np.partition(d, 16, kind=k))
for i in range(d.size):
d[i:].partition(0, kind=k)
assert_array_equal(d, tgt)
d = np.array([0, 1, 2, 3, 4, 5, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 9])
kth = [0, 3, 19, 20]
assert_equal(np.partition(d, kth, kind=k)[kth], (0, 3, 7, 7))
assert_equal(d[np.argpartition(d, kth, kind=k)][kth], (0, 3, 7, 7))
d = np.array([2, 1])
d.partition(0, kind=k)
assert_raises(ValueError, d.partition, 2)
assert_raises(np.AxisError, d.partition, 3, axis=1)
assert_raises(ValueError, np.partition, d, 2)
assert_raises(np.AxisError, np.partition, d, 2, axis=1)
assert_raises(ValueError, d.argpartition, 2)
assert_raises(np.AxisError, d.argpartition, 3, axis=1)
assert_raises(ValueError, np.argpartition, d, 2)
assert_raises(np.AxisError, np.argpartition, d, 2, axis=1)
d = np.arange(10).reshape((2, 5))
d.partition(1, axis=0, kind=k)
d.partition(4, axis=1, kind=k)
np.partition(d, 1, axis=0, kind=k)
np.partition(d, 4, axis=1, kind=k)
np.partition(d, 1, axis=None, kind=k)
np.partition(d, 9, axis=None, kind=k)
d.argpartition(1, axis=0, kind=k)
d.argpartition(4, axis=1, kind=k)
np.argpartition(d, 1, axis=0, kind=k)
np.argpartition(d, 4, axis=1, kind=k)
np.argpartition(d, 1, axis=None, kind=k)
np.argpartition(d, 9, axis=None, kind=k)
assert_raises(ValueError, d.partition, 2, axis=0)
assert_raises(ValueError, d.partition, 11, axis=1)
assert_raises(TypeError, d.partition, 2, axis=None)
assert_raises(ValueError, np.partition, d, 9, axis=1)
assert_raises(ValueError, np.partition, d, 11, axis=None)
assert_raises(ValueError, d.argpartition, 2, axis=0)
assert_raises(ValueError, d.argpartition, 11, axis=1)
assert_raises(ValueError, np.argpartition, d, 9, axis=1)
assert_raises(ValueError, np.argpartition, d, 11, axis=None)
td = [(dt, s) for dt in [np.int32, np.float32, np.complex64]
for s in (9, 16)]
for dt, s in td:
aae = assert_array_equal
at = assert_
d = np.arange(s, dtype=dt)
np.random.shuffle(d)
d1 = np.tile(np.arange(s, dtype=dt), (4, 1))
map(np.random.shuffle, d1)
d0 = np.transpose(d1)
for i in range(d.size):
p = np.partition(d, i, kind=k)
assert_equal(p[i], i)
# all before are smaller
assert_array_less(p[:i], p[i])
# all after are larger
assert_array_less(p[i], p[i + 1:])
aae(p, d[np.argpartition(d, i, kind=k)])
p = np.partition(d1, i, axis=1, kind=k)
aae(p[:, i], np.array([i] * d1.shape[0], dtype=dt))
# array_less does not seem to work right
at((p[:, :i].T <= p[:, i]).all(),
msg="%d: %r <= %r" % (i, p[:, i], p[:, :i].T))
at((p[:, i + 1:].T > p[:, i]).all(),
msg="%d: %r < %r" % (i, p[:, i], p[:, i + 1:].T))
aae(p, d1[np.arange(d1.shape[0])[:, None],
np.argpartition(d1, i, axis=1, kind=k)])
p = np.partition(d0, i, axis=0, kind=k)
aae(p[i, :], np.array([i] * d1.shape[0], dtype=dt))
# array_less does not seem to work right
at((p[:i, :] <= p[i, :]).all(),
msg="%d: %r <= %r" % (i, p[i, :], p[:i, :]))
at((p[i + 1:, :] > p[i, :]).all(),
msg="%d: %r < %r" % (i, p[i, :], p[:, i + 1:]))
aae(p, d0[np.argpartition(d0, i, axis=0, kind=k),
np.arange(d0.shape[1])[None, :]])
# check inplace
dc = d.copy()
dc.partition(i, kind=k)
assert_equal(dc, np.partition(d, i, kind=k))
dc = d0.copy()
dc.partition(i, axis=0, kind=k)
assert_equal(dc, np.partition(d0, i, axis=0, kind=k))
dc = d1.copy()
dc.partition(i, axis=1, kind=k)
assert_equal(dc, np.partition(d1, i, axis=1, kind=k))
def assert_partitioned(self, d, kth):
prev = 0
for k in np.sort(kth):
assert_array_less(d[prev:k], d[k], err_msg='kth %d' % k)
assert_((d[k:] >= d[k]).all(),
msg="kth %d, %r not greater equal %d" % (k, d[k:], d[k]))
prev = k + 1
def test_partition_iterative(self):
d = np.arange(17)
kth = (0, 1, 2, 429, 231)
assert_raises(ValueError, d.partition, kth)
assert_raises(ValueError, d.argpartition, kth)
d = np.arange(10).reshape((2, 5))
assert_raises(ValueError, d.partition, kth, axis=0)
assert_raises(ValueError, d.partition, kth, axis=1)
assert_raises(ValueError, np.partition, d, kth, axis=1)
assert_raises(ValueError, np.partition, d, kth, axis=None)
d = np.array([3, 4, 2, 1])
p = np.partition(d, (0, 3))
self.assert_partitioned(p, (0, 3))
self.assert_partitioned(d[np.argpartition(d, (0, 3))], (0, 3))
assert_array_equal(p, np.partition(d, (-3, -1)))
assert_array_equal(p, d[np.argpartition(d, (-3, -1))])
d = np.arange(17)
np.random.shuffle(d)
d.partition(range(d.size))
assert_array_equal(np.arange(17), d)
np.random.shuffle(d)
assert_array_equal(np.arange(17), d[d.argpartition(range(d.size))])
# test unsorted kth
d = np.arange(17)
np.random.shuffle(d)
keys = np.array([1, 3, 8, -2])
np.random.shuffle(d)
p = np.partition(d, keys)
self.assert_partitioned(p, keys)
p = d[np.argpartition(d, keys)]
self.assert_partitioned(p, keys)
np.random.shuffle(keys)
assert_array_equal(np.partition(d, keys), p)
assert_array_equal(d[np.argpartition(d, keys)], p)
# equal kth
d = np.arange(20)[::-1]
self.assert_partitioned(np.partition(d, [5]*4), [5])
self.assert_partitioned(np.partition(d, [5]*4 + [6, 13]),
[5]*4 + [6, 13])
self.assert_partitioned(d[np.argpartition(d, [5]*4)], [5])
self.assert_partitioned(d[np.argpartition(d, [5]*4 + [6, 13])],
[5]*4 + [6, 13])
d = np.arange(12)
np.random.shuffle(d)
d1 = np.tile(np.arange(12), (4, 1))
map(np.random.shuffle, d1)
d0 = np.transpose(d1)
kth = (1, 6, 7, -1)
p = np.partition(d1, kth, axis=1)
pa = d1[np.arange(d1.shape[0])[:, None],
d1.argpartition(kth, axis=1)]
assert_array_equal(p, pa)
for i in range(d1.shape[0]):
self.assert_partitioned(p[i,:], kth)
p = np.partition(d0, kth, axis=0)
pa = d0[np.argpartition(d0, kth, axis=0),
np.arange(d0.shape[1])[None,:]]
assert_array_equal(p, pa)
for i in range(d0.shape[1]):
self.assert_partitioned(p[:, i], kth)
def test_partition_cdtype(self):
d = np.array([('Galahad', 1.7, 38), ('Arthur', 1.8, 41),
('Lancelot', 1.9, 38)],
dtype=[('name', '|S10'), ('height', '<f8'), ('age', '<i4')])
tgt = np.sort(d, order=['age', 'height'])
assert_array_equal(np.partition(d, range(d.size),
order=['age', 'height']),
tgt)
assert_array_equal(d[np.argpartition(d, range(d.size),
order=['age', 'height'])],
tgt)
for k in range(d.size):
assert_equal(np.partition(d, k, order=['age', 'height'])[k],
tgt[k])
assert_equal(d[np.argpartition(d, k, order=['age', 'height'])][k],
tgt[k])
d = np.array(['Galahad', 'Arthur', 'zebra', 'Lancelot'])
tgt = np.sort(d)
assert_array_equal(np.partition(d, range(d.size)), tgt)
for k in range(d.size):
assert_equal(np.partition(d, k)[k], tgt[k])
assert_equal(d[np.argpartition(d, k)][k], tgt[k])
def test_partition_unicode_kind(self):
d = np.arange(10)
k = b'\xc3\xa4'.decode("UTF8")
assert_raises(ValueError, d.partition, 2, kind=k)
assert_raises(ValueError, d.argpartition, 2, kind=k)
def test_partition_fuzz(self):
# a few rounds of random data testing
for j in range(10, 30):
for i in range(1, j - 2):
d = np.arange(j)
np.random.shuffle(d)
d = d % np.random.randint(2, 30)
idx = np.random.randint(d.size)
kth = [0, idx, i, i + 1]
tgt = np.sort(d)[kth]
assert_array_equal(np.partition(d, kth)[kth], tgt,
err_msg="data: %r\n kth: %r" % (d, kth))
@pytest.mark.parametrize("kth_dtype", np.typecodes["AllInteger"])
def test_argpartition_gh5524(self, kth_dtype):
# A test for functionality of argpartition on lists.
kth = np.array(1, dtype=kth_dtype)[()]
d = [6, 7, 3, 2, 9, 0]
p = np.argpartition(d, kth)
self.assert_partitioned(np.array(d)[p],[1])
def test_flatten(self):
x0 = np.array([[1, 2, 3], [4, 5, 6]], np.int32)
x1 = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]], np.int32)
y0 = np.array([1, 2, 3, 4, 5, 6], np.int32)
y0f = np.array([1, 4, 2, 5, 3, 6], np.int32)
y1 = np.array([1, 2, 3, 4, 5, 6, 7, 8], np.int32)
y1f = np.array([1, 5, 3, 7, 2, 6, 4, 8], np.int32)
assert_equal(x0.flatten(), y0)
assert_equal(x0.flatten('F'), y0f)
assert_equal(x0.flatten('F'), x0.T.flatten())
assert_equal(x1.flatten(), y1)
assert_equal(x1.flatten('F'), y1f)
assert_equal(x1.flatten('F'), x1.T.flatten())
@pytest.mark.parametrize('func', (np.dot, np.matmul))
def test_arr_mult(self, func):
a = np.array([[1, 0], [0, 1]])
b = np.array([[0, 1], [1, 0]])
c = np.array([[9, 1], [1, -9]])
d = np.arange(24).reshape(4, 6)
ddt = np.array(
[[ 55, 145, 235, 325],
[ 145, 451, 757, 1063],
[ 235, 757, 1279, 1801],
[ 325, 1063, 1801, 2539]]
)
dtd = np.array(
[[504, 540, 576, 612, 648, 684],
[540, 580, 620, 660, 700, 740],
[576, 620, 664, 708, 752, 796],
[612, 660, 708, 756, 804, 852],
[648, 700, 752, 804, 856, 908],
[684, 740, 796, 852, 908, 964]]
)
# gemm vs syrk optimizations
for et in [np.float32, np.float64, np.complex64, np.complex128]:
eaf = a.astype(et)
assert_equal(func(eaf, eaf), eaf)
assert_equal(func(eaf.T, eaf), eaf)
assert_equal(func(eaf, eaf.T), eaf)
assert_equal(func(eaf.T, eaf.T), eaf)
assert_equal(func(eaf.T.copy(), eaf), eaf)
assert_equal(func(eaf, eaf.T.copy()), eaf)
assert_equal(func(eaf.T.copy(), eaf.T.copy()), eaf)
# syrk validations
for et in [np.float32, np.float64, np.complex64, np.complex128]:
eaf = a.astype(et)
ebf = b.astype(et)
assert_equal(func(ebf, ebf), eaf)
assert_equal(func(ebf.T, ebf), eaf)
assert_equal(func(ebf, ebf.T), eaf)
assert_equal(func(ebf.T, ebf.T), eaf)
# syrk - different shape, stride, and view validations
for et in [np.float32, np.float64, np.complex64, np.complex128]:
edf = d.astype(et)
assert_equal(
func(edf[::-1, :], edf.T),
func(edf[::-1, :].copy(), edf.T.copy())
)
assert_equal(
func(edf[:, ::-1], edf.T),
func(edf[:, ::-1].copy(), edf.T.copy())
)
assert_equal(
func(edf, edf[::-1, :].T),
func(edf, edf[::-1, :].T.copy())
)
assert_equal(
func(edf, edf[:, ::-1].T),
func(edf, edf[:, ::-1].T.copy())
)
assert_equal(
func(edf[:edf.shape[0] // 2, :], edf[::2, :].T),
func(edf[:edf.shape[0] // 2, :].copy(), edf[::2, :].T.copy())
)
assert_equal(
func(edf[::2, :], edf[:edf.shape[0] // 2, :].T),
func(edf[::2, :].copy(), edf[:edf.shape[0] // 2, :].T.copy())
)
# syrk - different shape
for et in [np.float32, np.float64, np.complex64, np.complex128]:
edf = d.astype(et)
eddtf = ddt.astype(et)
edtdf = dtd.astype(et)
assert_equal(func(edf, edf.T), eddtf)
assert_equal(func(edf.T, edf), edtdf)
@pytest.mark.parametrize('func', (np.dot, np.matmul))
@pytest.mark.parametrize('dtype', 'ifdFD')
def test_no_dgemv(self, func, dtype):
# check vector arg for contiguous before gemv
# gh-12156
a = np.arange(8.0, dtype=dtype).reshape(2, 4)
b = np.broadcast_to(1., (4, 1))
ret1 = func(a, b)
ret2 = func(a, b.copy())
assert_equal(ret1, ret2)
ret1 = func(b.T, a.T)
ret2 = func(b.T.copy(), a.T)
assert_equal(ret1, ret2)
# check for unaligned data
dt = np.dtype(dtype)
a = np.zeros(8 * dt.itemsize // 2 + 1, dtype='int16')[1:].view(dtype)
a = a.reshape(2, 4)
b = a[0]
# make sure it is not aligned
assert_(a.__array_interface__['data'][0] % dt.itemsize != 0)
ret1 = func(a, b)
ret2 = func(a.copy(), b.copy())
assert_equal(ret1, ret2)
ret1 = func(b.T, a.T)
ret2 = func(b.T.copy(), a.T.copy())
assert_equal(ret1, ret2)
def test_dot(self):
a = np.array([[1, 0], [0, 1]])
b = np.array([[0, 1], [1, 0]])
c = np.array([[9, 1], [1, -9]])
# function versus methods
assert_equal(np.dot(a, b), a.dot(b))
assert_equal(np.dot(np.dot(a, b), c), a.dot(b).dot(c))
# test passing in an output array
c = np.zeros_like(a)
a.dot(b, c)
assert_equal(c, np.dot(a, b))
# test keyword args
c = np.zeros_like(a)
a.dot(b=b, out=c)
assert_equal(c, np.dot(a, b))
def test_dot_type_mismatch(self):
c = 1.
A = np.array((1,1), dtype='i,i')
assert_raises(TypeError, np.dot, c, A)
assert_raises(TypeError, np.dot, A, c)
def test_dot_out_mem_overlap(self):
np.random.seed(1)
# Test BLAS and non-BLAS code paths, including all dtypes
# that dot() supports
dtypes = [np.dtype(code) for code in np.typecodes['All']
if code not in 'USVM']
for dtype in dtypes:
a = np.random.rand(3, 3).astype(dtype)
# Valid dot() output arrays must be aligned
b = _aligned_zeros((3, 3), dtype=dtype)
b[...] = np.random.rand(3, 3)
y = np.dot(a, b)
x = np.dot(a, b, out=b)
assert_equal(x, y, err_msg=repr(dtype))
# Check invalid output array
assert_raises(ValueError, np.dot, a, b, out=b[::2])
assert_raises(ValueError, np.dot, a, b, out=b.T)
def test_dot_matmul_out(self):
# gh-9641
class Sub(np.ndarray):
pass
a = np.ones((2, 2)).view(Sub)
b = np.ones((2, 2)).view(Sub)
out = np.ones((2, 2))
# make sure out can be any ndarray (not only subclass of inputs)
np.dot(a, b, out=out)
np.matmul(a, b, out=out)
def test_dot_matmul_inner_array_casting_fails(self):
class A:
def __array__(self, *args, **kwargs):
raise NotImplementedError
# Don't override the error from calling __array__()
assert_raises(NotImplementedError, np.dot, A(), A())
assert_raises(NotImplementedError, np.matmul, A(), A())
assert_raises(NotImplementedError, np.inner, A(), A())
def test_matmul_out(self):
# overlapping memory
a = np.arange(18).reshape(2, 3, 3)
b = np.matmul(a, a)
c = np.matmul(a, a, out=a)
assert_(c is a)
assert_equal(c, b)
a = np.arange(18).reshape(2, 3, 3)
c = np.matmul(a, a, out=a[::-1, ...])
assert_(c.base is a.base)
assert_equal(c, b)
def test_diagonal(self):
a = np.arange(12).reshape((3, 4))
assert_equal(a.diagonal(), [0, 5, 10])
assert_equal(a.diagonal(0), [0, 5, 10])
assert_equal(a.diagonal(1), [1, 6, 11])
assert_equal(a.diagonal(-1), [4, 9])
assert_raises(np.AxisError, a.diagonal, axis1=0, axis2=5)
assert_raises(np.AxisError, a.diagonal, axis1=5, axis2=0)
assert_raises(np.AxisError, a.diagonal, axis1=5, axis2=5)
assert_raises(ValueError, a.diagonal, axis1=1, axis2=1)
b = np.arange(8).reshape((2, 2, 2))
assert_equal(b.diagonal(), [[0, 6], [1, 7]])
assert_equal(b.diagonal(0), [[0, 6], [1, 7]])
assert_equal(b.diagonal(1), [[2], [3]])
assert_equal(b.diagonal(-1), [[4], [5]])
assert_raises(ValueError, b.diagonal, axis1=0, axis2=0)
assert_equal(b.diagonal(0, 1, 2), [[0, 3], [4, 7]])
assert_equal(b.diagonal(0, 0, 1), [[0, 6], [1, 7]])
assert_equal(b.diagonal(offset=1, axis1=0, axis2=2), [[1], [3]])
# Order of axis argument doesn't matter:
assert_equal(b.diagonal(0, 2, 1), [[0, 3], [4, 7]])
def test_diagonal_view_notwriteable(self):
a = np.eye(3).diagonal()
assert_(not a.flags.writeable)
assert_(not a.flags.owndata)
a = np.diagonal(np.eye(3))
assert_(not a.flags.writeable)
assert_(not a.flags.owndata)
a = np.diag(np.eye(3))
assert_(not a.flags.writeable)
assert_(not a.flags.owndata)
def test_diagonal_memleak(self):
# Regression test for a bug that crept in at one point
a = np.zeros((100, 100))
if HAS_REFCOUNT:
assert_(sys.getrefcount(a) < 50)
for i in range(100):
a.diagonal()
if HAS_REFCOUNT:
assert_(sys.getrefcount(a) < 50)
def test_size_zero_memleak(self):
# Regression test for issue 9615
# Exercises a special-case code path for dot products of length
# zero in cblasfuncs (making it is specific to floating dtypes).
a = np.array([], dtype=np.float64)
x = np.array(2.0)
for _ in range(100):
np.dot(a, a, out=x)
if HAS_REFCOUNT:
assert_(sys.getrefcount(x) < 50)
def test_trace(self):
a = np.arange(12).reshape((3, 4))
assert_equal(a.trace(), 15)
assert_equal(a.trace(0), 15)
assert_equal(a.trace(1), 18)
assert_equal(a.trace(-1), 13)
b = np.arange(8).reshape((2, 2, 2))
assert_equal(b.trace(), [6, 8])
assert_equal(b.trace(0), [6, 8])
assert_equal(b.trace(1), [2, 3])
assert_equal(b.trace(-1), [4, 5])
assert_equal(b.trace(0, 0, 1), [6, 8])
assert_equal(b.trace(0, 0, 2), [5, 9])
assert_equal(b.trace(0, 1, 2), [3, 11])
assert_equal(b.trace(offset=1, axis1=0, axis2=2), [1, 3])
out = np.array(1)
ret = a.trace(out=out)
assert ret is out
def test_trace_subclass(self):
# The class would need to overwrite trace to ensure single-element
# output also has the right subclass.
class MyArray(np.ndarray):
pass
b = np.arange(8).reshape((2, 2, 2)).view(MyArray)
t = b.trace()
assert_(isinstance(t, MyArray))
def test_put(self):
icodes = np.typecodes['AllInteger']
fcodes = np.typecodes['AllFloat']
for dt in icodes + fcodes + 'O':
tgt = np.array([0, 1, 0, 3, 0, 5], dtype=dt)
# test 1-d
a = np.zeros(6, dtype=dt)
a.put([1, 3, 5], [1, 3, 5])
assert_equal(a, tgt)
# test 2-d
a = np.zeros((2, 3), dtype=dt)
a.put([1, 3, 5], [1, 3, 5])
assert_equal(a, tgt.reshape(2, 3))
for dt in '?':
tgt = np.array([False, True, False, True, False, True], dtype=dt)
# test 1-d
a = np.zeros(6, dtype=dt)
a.put([1, 3, 5], [True]*3)
assert_equal(a, tgt)
# test 2-d
a = np.zeros((2, 3), dtype=dt)
a.put([1, 3, 5], [True]*3)
assert_equal(a, tgt.reshape(2, 3))
# check must be writeable
a = np.zeros(6)
a.flags.writeable = False
assert_raises(ValueError, a.put, [1, 3, 5], [1, 3, 5])
# when calling np.put, make sure a
# TypeError is raised if the object
# isn't an ndarray
bad_array = [1, 2, 3]
assert_raises(TypeError, np.put, bad_array, [0, 2], 5)
def test_ravel(self):
a = np.array([[0, 1], [2, 3]])
assert_equal(a.ravel(), [0, 1, 2, 3])
assert_(not a.ravel().flags.owndata)
assert_equal(a.ravel('F'), [0, 2, 1, 3])
assert_equal(a.ravel(order='C'), [0, 1, 2, 3])
assert_equal(a.ravel(order='F'), [0, 2, 1, 3])
assert_equal(a.ravel(order='A'), [0, 1, 2, 3])
assert_(not a.ravel(order='A').flags.owndata)
assert_equal(a.ravel(order='K'), [0, 1, 2, 3])
assert_(not a.ravel(order='K').flags.owndata)
assert_equal(a.ravel(), a.reshape(-1))
a = np.array([[0, 1], [2, 3]], order='F')
assert_equal(a.ravel(), [0, 1, 2, 3])
assert_equal(a.ravel(order='A'), [0, 2, 1, 3])
assert_equal(a.ravel(order='K'), [0, 2, 1, 3])
assert_(not a.ravel(order='A').flags.owndata)
assert_(not a.ravel(order='K').flags.owndata)
assert_equal(a.ravel(), a.reshape(-1))
assert_equal(a.ravel(order='A'), a.reshape(-1, order='A'))
a = np.array([[0, 1], [2, 3]])[::-1, :]
assert_equal(a.ravel(), [2, 3, 0, 1])
assert_equal(a.ravel(order='C'), [2, 3, 0, 1])
assert_equal(a.ravel(order='F'), [2, 0, 3, 1])
assert_equal(a.ravel(order='A'), [2, 3, 0, 1])
# 'K' doesn't reverse the axes of negative strides
assert_equal(a.ravel(order='K'), [2, 3, 0, 1])
assert_(a.ravel(order='K').flags.owndata)
# Test simple 1-d copy behaviour:
a = np.arange(10)[::2]
assert_(a.ravel('K').flags.owndata)
assert_(a.ravel('C').flags.owndata)
assert_(a.ravel('F').flags.owndata)
# Not contiguous and 1-sized axis with non matching stride
a = np.arange(2**3 * 2)[::2]
a = a.reshape(2, 1, 2, 2).swapaxes(-1, -2)
strides = list(a.strides)
strides[1] = 123
a.strides = strides
assert_(a.ravel(order='K').flags.owndata)
assert_equal(a.ravel('K'), np.arange(0, 15, 2))
# contiguous and 1-sized axis with non matching stride works:
a = np.arange(2**3)
a = a.reshape(2, 1, 2, 2).swapaxes(-1, -2)
strides = list(a.strides)
strides[1] = 123
a.strides = strides
assert_(np.may_share_memory(a.ravel(order='K'), a))
assert_equal(a.ravel(order='K'), np.arange(2**3))
# Test negative strides (not very interesting since non-contiguous):
a = np.arange(4)[::-1].reshape(2, 2)
assert_(a.ravel(order='C').flags.owndata)
assert_(a.ravel(order='K').flags.owndata)
assert_equal(a.ravel('C'), [3, 2, 1, 0])
assert_equal(a.ravel('K'), [3, 2, 1, 0])
# 1-element tidy strides test:
a = np.array([[1]])
a.strides = (123, 432)
# If the following stride is not 8, NPY_RELAXED_STRIDES_DEBUG is
# messing them up on purpose:
if np.ones(1).strides == (8,):
assert_(np.may_share_memory(a.ravel('K'), a))
assert_equal(a.ravel('K').strides, (a.dtype.itemsize,))
for order in ('C', 'F', 'A', 'K'):
# 0-d corner case:
a = np.array(0)
assert_equal(a.ravel(order), [0])
assert_(np.may_share_memory(a.ravel(order), a))
# Test that certain non-inplace ravels work right (mostly) for 'K':
b = np.arange(2**4 * 2)[::2].reshape(2, 2, 2, 2)
a = b[..., ::2]
assert_equal(a.ravel('K'), [0, 4, 8, 12, 16, 20, 24, 28])
assert_equal(a.ravel('C'), [0, 4, 8, 12, 16, 20, 24, 28])
assert_equal(a.ravel('A'), [0, 4, 8, 12, 16, 20, 24, 28])
assert_equal(a.ravel('F'), [0, 16, 8, 24, 4, 20, 12, 28])
a = b[::2, ...]
assert_equal(a.ravel('K'), [0, 2, 4, 6, 8, 10, 12, 14])
assert_equal(a.ravel('C'), [0, 2, 4, 6, 8, 10, 12, 14])
assert_equal(a.ravel('A'), [0, 2, 4, 6, 8, 10, 12, 14])
assert_equal(a.ravel('F'), [0, 8, 4, 12, 2, 10, 6, 14])
def test_ravel_subclass(self):
class ArraySubclass(np.ndarray):
pass
a = np.arange(10).view(ArraySubclass)
assert_(isinstance(a.ravel('C'), ArraySubclass))
assert_(isinstance(a.ravel('F'), ArraySubclass))
assert_(isinstance(a.ravel('A'), ArraySubclass))
assert_(isinstance(a.ravel('K'), ArraySubclass))
a = np.arange(10)[::2].view(ArraySubclass)
assert_(isinstance(a.ravel('C'), ArraySubclass))
assert_(isinstance(a.ravel('F'), ArraySubclass))
assert_(isinstance(a.ravel('A'), ArraySubclass))
assert_(isinstance(a.ravel('K'), ArraySubclass))
def test_swapaxes(self):
a = np.arange(1*2*3*4).reshape(1, 2, 3, 4).copy()
idx = np.indices(a.shape)
assert_(a.flags['OWNDATA'])
b = a.copy()
# check exceptions
assert_raises(np.AxisError, a.swapaxes, -5, 0)
assert_raises(np.AxisError, a.swapaxes, 4, 0)
assert_raises(np.AxisError, a.swapaxes, 0, -5)
assert_raises(np.AxisError, a.swapaxes, 0, 4)
for i in range(-4, 4):
for j in range(-4, 4):
for k, src in enumerate((a, b)):
c = src.swapaxes(i, j)
# check shape
shape = list(src.shape)
shape[i] = src.shape[j]
shape[j] = src.shape[i]
assert_equal(c.shape, shape, str((i, j, k)))
# check array contents
i0, i1, i2, i3 = [dim-1 for dim in c.shape]
j0, j1, j2, j3 = [dim-1 for dim in src.shape]
assert_equal(src[idx[j0], idx[j1], idx[j2], idx[j3]],
c[idx[i0], idx[i1], idx[i2], idx[i3]],
str((i, j, k)))
# check a view is always returned, gh-5260
assert_(not c.flags['OWNDATA'], str((i, j, k)))
# check on non-contiguous input array
if k == 1:
b = c
def test_conjugate(self):
a = np.array([1-1j, 1+1j, 23+23.0j])
ac = a.conj()
assert_equal(a.real, ac.real)
assert_equal(a.imag, -ac.imag)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1-1j, 1+1j, 23+23.0j], 'F')
ac = a.conj()
assert_equal(a.real, ac.real)
assert_equal(a.imag, -ac.imag)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1, 2, 3])
ac = a.conj()
assert_equal(a, ac)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1.0, 2.0, 3.0])
ac = a.conj()
assert_equal(a, ac)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1-1j, 1+1j, 1, 2.0], object)
ac = a.conj()
assert_equal(ac, [k.conjugate() for k in a])
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1-1j, 1, 2.0, 'f'], object)
assert_raises(TypeError, lambda: a.conj())
assert_raises(TypeError, lambda: a.conjugate())
def test_conjugate_out(self):
# Minimal test for the out argument being passed on correctly
# NOTE: The ability to pass `out` is currently undocumented!
a = np.array([1-1j, 1+1j, 23+23.0j])
out = np.empty_like(a)
res = a.conjugate(out)
assert res is out
assert_array_equal(out, a.conjugate())
def test__complex__(self):
dtypes = ['i1', 'i2', 'i4', 'i8',
'u1', 'u2', 'u4', 'u8',
'f', 'd', 'g', 'F', 'D', 'G',
'?', 'O']
for dt in dtypes:
a = np.array(7, dtype=dt)
b = np.array([7], dtype=dt)
c = np.array([[[[[7]]]]], dtype=dt)
msg = 'dtype: {0}'.format(dt)
ap = complex(a)
assert_equal(ap, a, msg)
bp = complex(b)
assert_equal(bp, b, msg)
cp = complex(c)
assert_equal(cp, c, msg)
def test__complex__should_not_work(self):
dtypes = ['i1', 'i2', 'i4', 'i8',
'u1', 'u2', 'u4', 'u8',
'f', 'd', 'g', 'F', 'D', 'G',
'?', 'O']
for dt in dtypes:
a = np.array([1, 2, 3], dtype=dt)
assert_raises(TypeError, complex, a)
dt = np.dtype([('a', 'f8'), ('b', 'i1')])
b = np.array((1.0, 3), dtype=dt)
assert_raises(TypeError, complex, b)
c = np.array([(1.0, 3), (2e-3, 7)], dtype=dt)
assert_raises(TypeError, complex, c)
d = np.array('1+1j')
assert_raises(TypeError, complex, d)
e = np.array(['1+1j'], 'U')
assert_raises(TypeError, complex, e)
class TestCequenceMethods:
def test_array_contains(self):
assert_(4.0 in np.arange(16.).reshape(4,4))
assert_(20.0 not in np.arange(16.).reshape(4,4))
class TestBinop:
def test_inplace(self):
# test refcount 1 inplace conversion
assert_array_almost_equal(np.array([0.5]) * np.array([1.0, 2.0]),
[0.5, 1.0])
d = np.array([0.5, 0.5])[::2]
assert_array_almost_equal(d * (d * np.array([1.0, 2.0])),
[0.25, 0.5])
a = np.array([0.5])
b = np.array([0.5])
c = a + b
c = a - b
c = a * b
c = a / b
assert_equal(a, b)
assert_almost_equal(c, 1.)
c = a + b * 2. / b * a - a / b
assert_equal(a, b)
assert_equal(c, 0.5)
# true divide
a = np.array([5])
b = np.array([3])
c = (a * a) / b
assert_almost_equal(c, 25 / 3)
assert_equal(a, 5)
assert_equal(b, 3)
# ndarray.__rop__ always calls ufunc
# ndarray.__iop__ always calls ufunc
# ndarray.__op__, __rop__:
# - defer if other has __array_ufunc__ and it is None
# or other is not a subclass and has higher array priority
# - else, call ufunc
def test_ufunc_binop_interaction(self):
# Python method name (without underscores)
# -> (numpy ufunc, has_in_place_version, preferred_dtype)
ops = {
'add': (np.add, True, float),
'sub': (np.subtract, True, float),
'mul': (np.multiply, True, float),
'truediv': (np.true_divide, True, float),
'floordiv': (np.floor_divide, True, float),
'mod': (np.remainder, True, float),
'divmod': (np.divmod, False, float),
'pow': (np.power, True, int),
'lshift': (np.left_shift, True, int),
'rshift': (np.right_shift, True, int),
'and': (np.bitwise_and, True, int),
'xor': (np.bitwise_xor, True, int),
'or': (np.bitwise_or, True, int),
'matmul': (np.matmul, False, float),
# 'ge': (np.less_equal, False),
# 'gt': (np.less, False),
# 'le': (np.greater_equal, False),
# 'lt': (np.greater, False),
# 'eq': (np.equal, False),
# 'ne': (np.not_equal, False),
}
class Coerced(Exception):
pass
def array_impl(self):
raise Coerced
def op_impl(self, other):
return "forward"
def rop_impl(self, other):
return "reverse"
def iop_impl(self, other):
return "in-place"
def array_ufunc_impl(self, ufunc, method, *args, **kwargs):
return ("__array_ufunc__", ufunc, method, args, kwargs)
# Create an object with the given base, in the given module, with a
# bunch of placeholder __op__ methods, and optionally a
# __array_ufunc__ and __array_priority__.
def make_obj(base, array_priority=False, array_ufunc=False,
alleged_module="__main__"):
class_namespace = {"__array__": array_impl}
if array_priority is not False:
class_namespace["__array_priority__"] = array_priority
for op in ops:
class_namespace["__{0}__".format(op)] = op_impl
class_namespace["__r{0}__".format(op)] = rop_impl
class_namespace["__i{0}__".format(op)] = iop_impl
if array_ufunc is not False:
class_namespace["__array_ufunc__"] = array_ufunc
eval_namespace = {"base": base,
"class_namespace": class_namespace,
"__name__": alleged_module,
}
MyType = eval("type('MyType', (base,), class_namespace)",
eval_namespace)
if issubclass(MyType, np.ndarray):
# Use this range to avoid special case weirdnesses around
# divide-by-0, pow(x, 2), overflow due to pow(big, big), etc.
return np.arange(3, 7).reshape(2, 2).view(MyType)
else:
return MyType()
def check(obj, binop_override_expected, ufunc_override_expected,
inplace_override_expected, check_scalar=True):
for op, (ufunc, has_inplace, dtype) in ops.items():
err_msg = ('op: %s, ufunc: %s, has_inplace: %s, dtype: %s'
% (op, ufunc, has_inplace, dtype))
check_objs = [np.arange(3, 7, dtype=dtype).reshape(2, 2)]
if check_scalar:
check_objs.append(check_objs[0][0])
for arr in check_objs:
arr_method = getattr(arr, "__{0}__".format(op))
def first_out_arg(result):
if op == "divmod":
assert_(isinstance(result, tuple))
return result[0]
else:
return result
# arr __op__ obj
if binop_override_expected:
assert_equal(arr_method(obj), NotImplemented, err_msg)
elif ufunc_override_expected:
assert_equal(arr_method(obj)[0], "__array_ufunc__",
err_msg)
else:
if (isinstance(obj, np.ndarray) and
(type(obj).__array_ufunc__ is
np.ndarray.__array_ufunc__)):
# __array__ gets ignored
res = first_out_arg(arr_method(obj))
assert_(res.__class__ is obj.__class__, err_msg)
else:
assert_raises((TypeError, Coerced),
arr_method, obj, err_msg=err_msg)
# obj __op__ arr
arr_rmethod = getattr(arr, "__r{0}__".format(op))
if ufunc_override_expected:
res = arr_rmethod(obj)
assert_equal(res[0], "__array_ufunc__",
err_msg=err_msg)
assert_equal(res[1], ufunc, err_msg=err_msg)
else:
if (isinstance(obj, np.ndarray) and
(type(obj).__array_ufunc__ is
np.ndarray.__array_ufunc__)):
# __array__ gets ignored
res = first_out_arg(arr_rmethod(obj))
assert_(res.__class__ is obj.__class__, err_msg)
else:
# __array_ufunc__ = "asdf" creates a TypeError
assert_raises((TypeError, Coerced),
arr_rmethod, obj, err_msg=err_msg)
# arr __iop__ obj
# array scalars don't have in-place operators
if has_inplace and isinstance(arr, np.ndarray):
arr_imethod = getattr(arr, "__i{0}__".format(op))
if inplace_override_expected:
assert_equal(arr_method(obj), NotImplemented,
err_msg=err_msg)
elif ufunc_override_expected:
res = arr_imethod(obj)
assert_equal(res[0], "__array_ufunc__", err_msg)
assert_equal(res[1], ufunc, err_msg)
assert_(type(res[-1]["out"]) is tuple, err_msg)
assert_(res[-1]["out"][0] is arr, err_msg)
else:
if (isinstance(obj, np.ndarray) and
(type(obj).__array_ufunc__ is
np.ndarray.__array_ufunc__)):
# __array__ gets ignored
assert_(arr_imethod(obj) is arr, err_msg)
else:
assert_raises((TypeError, Coerced),
arr_imethod, obj,
err_msg=err_msg)
op_fn = getattr(operator, op, None)
if op_fn is None:
op_fn = getattr(operator, op + "_", None)
if op_fn is None:
op_fn = getattr(builtins, op)
assert_equal(op_fn(obj, arr), "forward", err_msg)
if not isinstance(obj, np.ndarray):
if binop_override_expected:
assert_equal(op_fn(arr, obj), "reverse", err_msg)
elif ufunc_override_expected:
assert_equal(op_fn(arr, obj)[0], "__array_ufunc__",
err_msg)
if ufunc_override_expected:
assert_equal(ufunc(obj, arr)[0], "__array_ufunc__",
err_msg)
# No array priority, no array_ufunc -> nothing called
check(make_obj(object), False, False, False)
# Negative array priority, no array_ufunc -> nothing called
# (has to be very negative, because scalar priority is -1000000.0)
check(make_obj(object, array_priority=-2**30), False, False, False)
# Positive array priority, no array_ufunc -> binops and iops only
check(make_obj(object, array_priority=1), True, False, True)
# ndarray ignores array_priority for ndarray subclasses
check(make_obj(np.ndarray, array_priority=1), False, False, False,
check_scalar=False)
# Positive array_priority and array_ufunc -> array_ufunc only
check(make_obj(object, array_priority=1,
array_ufunc=array_ufunc_impl), False, True, False)
check(make_obj(np.ndarray, array_priority=1,
array_ufunc=array_ufunc_impl), False, True, False)
# array_ufunc set to None -> defer binops only
check(make_obj(object, array_ufunc=None), True, False, False)
check(make_obj(np.ndarray, array_ufunc=None), True, False, False,
check_scalar=False)
@pytest.mark.parametrize("priority", [None, "runtime error"])
def test_ufunc_binop_bad_array_priority(self, priority):
# Mainly checks that this does not crash. The second array has a lower
# priority than -1 ("error value"). If the __radd__ actually exists,
# bad things can happen (I think via the scalar paths).
# In principle both of these can probably just be errors in the future.
class BadPriority:
@property
def __array_priority__(self):
if priority == "runtime error":
raise RuntimeError("RuntimeError in __array_priority__!")
return priority
def __radd__(self, other):
return "result"
class LowPriority(np.ndarray):
__array_priority__ = -1000
# Priority failure uses the same as scalars (smaller -1000). So the
# LowPriority wins with 'result' for each element (inner operation).
res = np.arange(3).view(LowPriority) + BadPriority()
assert res.shape == (3,)
assert res[0] == 'result'
def test_ufunc_override_normalize_signature(self):
# gh-5674
class SomeClass:
def __array_ufunc__(self, ufunc, method, *inputs, **kw):
return kw
a = SomeClass()
kw = np.add(a, [1])
assert_('sig' not in kw and 'signature' not in kw)
kw = np.add(a, [1], sig='ii->i')
assert_('sig' not in kw and 'signature' in kw)
assert_equal(kw['signature'], 'ii->i')
kw = np.add(a, [1], signature='ii->i')
assert_('sig' not in kw and 'signature' in kw)
assert_equal(kw['signature'], 'ii->i')
def test_array_ufunc_index(self):
# Check that index is set appropriately, also if only an output
# is passed on (latter is another regression tests for github bug 4753)
# This also checks implicitly that 'out' is always a tuple.
class CheckIndex:
def __array_ufunc__(self, ufunc, method, *inputs, **kw):
for i, a in enumerate(inputs):
if a is self:
return i
# calls below mean we must be in an output.
for j, a in enumerate(kw['out']):
if a is self:
return (j,)
a = CheckIndex()
dummy = np.arange(2.)
# 1 input, 1 output
assert_equal(np.sin(a), 0)
assert_equal(np.sin(dummy, a), (0,))
assert_equal(np.sin(dummy, out=a), (0,))
assert_equal(np.sin(dummy, out=(a,)), (0,))
assert_equal(np.sin(a, a), 0)
assert_equal(np.sin(a, out=a), 0)
assert_equal(np.sin(a, out=(a,)), 0)
# 1 input, 2 outputs
assert_equal(np.modf(dummy, a), (0,))
assert_equal(np.modf(dummy, None, a), (1,))
assert_equal(np.modf(dummy, dummy, a), (1,))
assert_equal(np.modf(dummy, out=(a, None)), (0,))
assert_equal(np.modf(dummy, out=(a, dummy)), (0,))
assert_equal(np.modf(dummy, out=(None, a)), (1,))
assert_equal(np.modf(dummy, out=(dummy, a)), (1,))
assert_equal(np.modf(a, out=(dummy, a)), 0)
with assert_raises(TypeError):
# Out argument must be tuple, since there are multiple outputs
np.modf(dummy, out=a)
assert_raises(ValueError, np.modf, dummy, out=(a,))
# 2 inputs, 1 output
assert_equal(np.add(a, dummy), 0)
assert_equal(np.add(dummy, a), 1)
assert_equal(np.add(dummy, dummy, a), (0,))
assert_equal(np.add(dummy, a, a), 1)
assert_equal(np.add(dummy, dummy, out=a), (0,))
assert_equal(np.add(dummy, dummy, out=(a,)), (0,))
assert_equal(np.add(a, dummy, out=a), 0)
def test_out_override(self):
# regression test for github bug 4753
class OutClass(np.ndarray):
def __array_ufunc__(self, ufunc, method, *inputs, **kw):
if 'out' in kw:
tmp_kw = kw.copy()
tmp_kw.pop('out')
func = getattr(ufunc, method)
kw['out'][0][...] = func(*inputs, **tmp_kw)
A = np.array([0]).view(OutClass)
B = np.array([5])
C = np.array([6])
np.multiply(C, B, A)
assert_equal(A[0], 30)
assert_(isinstance(A, OutClass))
A[0] = 0
np.multiply(C, B, out=A)
assert_equal(A[0], 30)
assert_(isinstance(A, OutClass))
def test_pow_override_with_errors(self):
# regression test for gh-9112
class PowerOnly(np.ndarray):
def __array_ufunc__(self, ufunc, method, *inputs, **kw):
if ufunc is not np.power:
raise NotImplementedError
return "POWER!"
# explicit cast to float, to ensure the fast power path is taken.
a = np.array(5., dtype=np.float64).view(PowerOnly)
assert_equal(a ** 2.5, "POWER!")
with assert_raises(NotImplementedError):
a ** 0.5
with assert_raises(NotImplementedError):
a ** 0
with assert_raises(NotImplementedError):
a ** 1
with assert_raises(NotImplementedError):
a ** -1
with assert_raises(NotImplementedError):
a ** 2
def test_pow_array_object_dtype(self):
# test pow on arrays of object dtype
class SomeClass:
def __init__(self, num=None):
self.num = num
# want to ensure a fast pow path is not taken
def __mul__(self, other):
raise AssertionError('__mul__ should not be called')
def __div__(self, other):
raise AssertionError('__div__ should not be called')
def __pow__(self, exp):
return SomeClass(num=self.num ** exp)
def __eq__(self, other):
if isinstance(other, SomeClass):
return self.num == other.num
__rpow__ = __pow__
def pow_for(exp, arr):
return np.array([x ** exp for x in arr])
obj_arr = np.array([SomeClass(1), SomeClass(2), SomeClass(3)])
assert_equal(obj_arr ** 0.5, pow_for(0.5, obj_arr))
assert_equal(obj_arr ** 0, pow_for(0, obj_arr))
assert_equal(obj_arr ** 1, pow_for(1, obj_arr))
assert_equal(obj_arr ** -1, pow_for(-1, obj_arr))
assert_equal(obj_arr ** 2, pow_for(2, obj_arr))
def test_pos_array_ufunc_override(self):
class A(np.ndarray):
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
return getattr(ufunc, method)(*[i.view(np.ndarray) for
i in inputs], **kwargs)
tst = np.array('foo').view(A)
with assert_raises(TypeError):
+tst
class TestTemporaryElide:
# elision is only triggered on relatively large arrays
def test_extension_incref_elide(self):
# test extension (e.g. cython) calling PyNumber_* slots without
# increasing the reference counts
#
# def incref_elide(a):
# d = input.copy() # refcount 1
# return d, d + d # PyNumber_Add without increasing refcount
from numpy.core._multiarray_tests import incref_elide
d = np.ones(100000)
orig, res = incref_elide(d)
d + d
# the return original should not be changed to an inplace operation
assert_array_equal(orig, d)
assert_array_equal(res, d + d)
def test_extension_incref_elide_stack(self):
# scanning if the refcount == 1 object is on the python stack to check
# that we are called directly from python is flawed as object may still
# be above the stack pointer and we have no access to the top of it
#
# def incref_elide_l(d):
# return l[4] + l[4] # PyNumber_Add without increasing refcount
from numpy.core._multiarray_tests import incref_elide_l
# padding with 1 makes sure the object on the stack is not overwritten
l = [1, 1, 1, 1, np.ones(100000)]
res = incref_elide_l(l)
# the return original should not be changed to an inplace operation
assert_array_equal(l[4], np.ones(100000))
assert_array_equal(res, l[4] + l[4])
def test_temporary_with_cast(self):
# check that we don't elide into a temporary which would need casting
d = np.ones(200000, dtype=np.int64)
assert_equal(((d + d) + 2**222).dtype, np.dtype('O'))
r = ((d + d) / 2)
assert_equal(r.dtype, np.dtype('f8'))
r = np.true_divide((d + d), 2)
assert_equal(r.dtype, np.dtype('f8'))
r = ((d + d) / 2.)
assert_equal(r.dtype, np.dtype('f8'))
r = ((d + d) // 2)
assert_equal(r.dtype, np.dtype(np.int64))
# commutative elision into the astype result
f = np.ones(100000, dtype=np.float32)
assert_equal(((f + f) + f.astype(np.float64)).dtype, np.dtype('f8'))
# no elision into lower type
d = f.astype(np.float64)
assert_equal(((f + f) + d).dtype, d.dtype)
l = np.ones(100000, dtype=np.longdouble)
assert_equal(((d + d) + l).dtype, l.dtype)
# test unary abs with different output dtype
for dt in (np.complex64, np.complex128, np.clongdouble):
c = np.ones(100000, dtype=dt)
r = abs(c * 2.0)
assert_equal(r.dtype, np.dtype('f%d' % (c.itemsize // 2)))
def test_elide_broadcast(self):
# test no elision on broadcast to higher dimension
# only triggers elision code path in debug mode as triggering it in
# normal mode needs 256kb large matching dimension, so a lot of memory
d = np.ones((2000, 1), dtype=int)
b = np.ones((2000), dtype=bool)
r = (1 - d) + b
assert_equal(r, 1)
assert_equal(r.shape, (2000, 2000))
def test_elide_scalar(self):
# check inplace op does not create ndarray from scalars
a = np.bool_()
assert_(type(~(a & a)) is np.bool_)
def test_elide_scalar_readonly(self):
# The imaginary part of a real array is readonly. This needs to go
# through fast_scalar_power which is only called for powers of
# +1, -1, 0, 0.5, and 2, so use 2. Also need valid refcount for
# elision which can be gotten for the imaginary part of a real
# array. Should not error.
a = np.empty(100000, dtype=np.float64)
a.imag ** 2
def test_elide_readonly(self):
# don't try to elide readonly temporaries
r = np.asarray(np.broadcast_to(np.zeros(1), 100000).flat) * 0.0
assert_equal(r, 0)
def test_elide_updateifcopy(self):
a = np.ones(2**20)[::2]
b = a.flat.__array__() + 1
del b
assert_equal(a, 1)
class TestCAPI:
def test_IsPythonScalar(self):
from numpy.core._multiarray_tests import IsPythonScalar
assert_(IsPythonScalar(b'foobar'))
assert_(IsPythonScalar(1))
assert_(IsPythonScalar(2**80))
assert_(IsPythonScalar(2.))
assert_(IsPythonScalar("a"))
@pytest.mark.parametrize("converter",
[_multiarray_tests.run_scalar_intp_converter,
_multiarray_tests.run_scalar_intp_from_sequence])
def test_intp_sequence_converters(self, converter):
# Test simple values (-1 is special for error return paths)
assert converter(10) == (10,)
assert converter(-1) == (-1,)
# A 0-D array looks a bit like a sequence but must take the integer
# path:
assert converter(np.array(123)) == (123,)
# Test simple sequences (intp_from_sequence only supports length 1):
assert converter((10,)) == (10,)
assert converter(np.array([11])) == (11,)
@pytest.mark.parametrize("converter",
[_multiarray_tests.run_scalar_intp_converter,
_multiarray_tests.run_scalar_intp_from_sequence])
@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8),
reason="PyPy bug in error formatting")
def test_intp_sequence_converters_errors(self, converter):
with pytest.raises(TypeError,
match="expected a sequence of integers or a single integer, "):
converter(object())
with pytest.raises(TypeError,
match="expected a sequence of integers or a single integer, "
"got '32.0'"):
converter(32.)
with pytest.raises(TypeError,
match="'float' object cannot be interpreted as an integer"):
converter([32.])
with pytest.raises(ValueError,
match="Maximum allowed dimension"):
# These converters currently convert overflows to a ValueError
converter(2**64)
class TestSubscripting:
def test_test_zero_rank(self):
x = np.array([1, 2, 3])
assert_(isinstance(x[0], np.int_))
assert_(type(x[0, ...]) is np.ndarray)
class TestPickling:
@pytest.mark.skipif(pickle.HIGHEST_PROTOCOL >= 5,
reason=('this tests the error messages when trying to'
'protocol 5 although it is not available'))
def test_correct_protocol5_error_message(self):
array = np.arange(10)
def test_record_array_with_object_dtype(self):
my_object = object()
arr_with_object = np.array(
[(my_object, 1, 2.0)],
dtype=[('a', object), ('b', int), ('c', float)])
arr_without_object = np.array(
[('xxx', 1, 2.0)],
dtype=[('a', str), ('b', int), ('c', float)])
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
depickled_arr_with_object = pickle.loads(
pickle.dumps(arr_with_object, protocol=proto))
depickled_arr_without_object = pickle.loads(
pickle.dumps(arr_without_object, protocol=proto))
assert_equal(arr_with_object.dtype,
depickled_arr_with_object.dtype)
assert_equal(arr_without_object.dtype,
depickled_arr_without_object.dtype)
@pytest.mark.skipif(pickle.HIGHEST_PROTOCOL < 5,
reason="requires pickle protocol 5")
def test_f_contiguous_array(self):
f_contiguous_array = np.array([[1, 2, 3], [4, 5, 6]], order='F')
buffers = []
# When using pickle protocol 5, Fortran-contiguous arrays can be
# serialized using out-of-band buffers
bytes_string = pickle.dumps(f_contiguous_array, protocol=5,
buffer_callback=buffers.append)
assert len(buffers) > 0
depickled_f_contiguous_array = pickle.loads(bytes_string,
buffers=buffers)
assert_equal(f_contiguous_array, depickled_f_contiguous_array)
def test_non_contiguous_array(self):
non_contiguous_array = np.arange(12).reshape(3, 4)[:, :2]
assert not non_contiguous_array.flags.c_contiguous
assert not non_contiguous_array.flags.f_contiguous
# make sure non-contiguous arrays can be pickled-depickled
# using any protocol
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
depickled_non_contiguous_array = pickle.loads(
pickle.dumps(non_contiguous_array, protocol=proto))
assert_equal(non_contiguous_array, depickled_non_contiguous_array)
def test_roundtrip(self):
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
carray = np.array([[2, 9], [7, 0], [3, 8]])
DATA = [
carray,
np.transpose(carray),
np.array([('xxx', 1, 2.0)], dtype=[('a', (str, 3)), ('b', int),
('c', float)])
]
refs = [weakref.ref(a) for a in DATA]
for a in DATA:
assert_equal(
a, pickle.loads(pickle.dumps(a, protocol=proto)),
err_msg="%r" % a)
del a, DATA, carray
break_cycles()
# check for reference leaks (gh-12793)
for ref in refs:
assert ref() is None
def _loads(self, obj):
return pickle.loads(obj, encoding='latin1')
# version 0 pickles, using protocol=2 to pickle
# version 0 doesn't have a version field
def test_version0_int8(self):
s = b'\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02i1K\x00K\x01\x87Rq\x05(U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x04\x01\x02\x03\x04tb.'
a = np.array([1, 2, 3, 4], dtype=np.int8)
p = self._loads(s)
assert_equal(a, p)
def test_version0_float32(self):
s = b'\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02f4K\x00K\x01\x87Rq\x05(U\x01<NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x10\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@tb.'
a = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32)
p = self._loads(s)
assert_equal(a, p)
def test_version0_object(self):
s = b'\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x02\x85cnumpy\ndtype\nq\x04U\x02O8K\x00K\x01\x87Rq\x05(U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89]q\x06(}q\x07U\x01aK\x01s}q\x08U\x01bK\x02setb.'
a = np.array([{'a': 1}, {'b': 2}])
p = self._loads(s)
assert_equal(a, p)
# version 1 pickles, using protocol=2 to pickle
def test_version1_int8(self):
s = b'\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x04\x85cnumpy\ndtype\nq\x04U\x02i1K\x00K\x01\x87Rq\x05(K\x01U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x04\x01\x02\x03\x04tb.'
a = np.array([1, 2, 3, 4], dtype=np.int8)
p = self._loads(s)
assert_equal(a, p)
def test_version1_float32(self):
s = b'\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x04\x85cnumpy\ndtype\nq\x04U\x02f4K\x00K\x01\x87Rq\x05(K\x01U\x01<NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x10\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@tb.'
a = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32)
p = self._loads(s)
assert_equal(a, p)
def test_version1_object(self):
s = b'\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x02\x85cnumpy\ndtype\nq\x04U\x02O8K\x00K\x01\x87Rq\x05(K\x01U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89]q\x06(}q\x07U\x01aK\x01s}q\x08U\x01bK\x02setb.'
a = np.array([{'a': 1}, {'b': 2}])
p = self._loads(s)
assert_equal(a, p)
def test_subarray_int_shape(self):
s = b"cnumpy.core.multiarray\n_reconstruct\np0\n(cnumpy\nndarray\np1\n(I0\ntp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\ncnumpy\ndtype\np7\n(S'V6'\np8\nI0\nI1\ntp9\nRp10\n(I3\nS'|'\np11\nN(S'a'\np12\ng3\ntp13\n(dp14\ng12\n(g7\n(S'V4'\np15\nI0\nI1\ntp16\nRp17\n(I3\nS'|'\np18\n(g7\n(S'i1'\np19\nI0\nI1\ntp20\nRp21\n(I3\nS'|'\np22\nNNNI-1\nI-1\nI0\ntp23\nb(I2\nI2\ntp24\ntp25\nNNI4\nI1\nI0\ntp26\nbI0\ntp27\nsg3\n(g7\n(S'V2'\np28\nI0\nI1\ntp29\nRp30\n(I3\nS'|'\np31\n(g21\nI2\ntp32\nNNI2\nI1\nI0\ntp33\nbI4\ntp34\nsI6\nI1\nI0\ntp35\nbI00\nS'\\x01\\x01\\x01\\x01\\x01\\x02'\np36\ntp37\nb."
a = np.array([(1, (1, 2))], dtype=[('a', 'i1', (2, 2)), ('b', 'i1', 2)])
p = self._loads(s)
assert_equal(a, p)
def test_datetime64_byteorder(self):
original = np.array([['2015-02-24T00:00:00.000000000']], dtype='datetime64[ns]')
original_byte_reversed = original.copy(order='K')
original_byte_reversed.dtype = original_byte_reversed.dtype.newbyteorder('S')
original_byte_reversed.byteswap(inplace=True)
new = pickle.loads(pickle.dumps(original_byte_reversed))
assert_equal(original.dtype, new.dtype)
class TestFancyIndexing:
def test_list(self):
x = np.ones((1, 1))
x[:, [0]] = 2.0
assert_array_equal(x, np.array([[2.0]]))
x = np.ones((1, 1, 1))
x[:, :, [0]] = 2.0
assert_array_equal(x, np.array([[[2.0]]]))
def test_tuple(self):
x = np.ones((1, 1))
x[:, (0,)] = 2.0
assert_array_equal(x, np.array([[2.0]]))
x = np.ones((1, 1, 1))
x[:, :, (0,)] = 2.0
assert_array_equal(x, np.array([[[2.0]]]))
def test_mask(self):
x = np.array([1, 2, 3, 4])
m = np.array([0, 1, 0, 0], bool)
assert_array_equal(x[m], np.array([2]))
def test_mask2(self):
x = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])
m = np.array([0, 1], bool)
m2 = np.array([[0, 1, 0, 0], [1, 0, 0, 0]], bool)
m3 = np.array([[0, 1, 0, 0], [0, 0, 0, 0]], bool)
assert_array_equal(x[m], np.array([[5, 6, 7, 8]]))
assert_array_equal(x[m2], np.array([2, 5]))
assert_array_equal(x[m3], np.array([2]))
def test_assign_mask(self):
x = np.array([1, 2, 3, 4])
m = np.array([0, 1, 0, 0], bool)
x[m] = 5
assert_array_equal(x, np.array([1, 5, 3, 4]))
def test_assign_mask2(self):
xorig = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])
m = np.array([0, 1], bool)
m2 = np.array([[0, 1, 0, 0], [1, 0, 0, 0]], bool)
m3 = np.array([[0, 1, 0, 0], [0, 0, 0, 0]], bool)
x = xorig.copy()
x[m] = 10
assert_array_equal(x, np.array([[1, 2, 3, 4], [10, 10, 10, 10]]))
x = xorig.copy()
x[m2] = 10
assert_array_equal(x, np.array([[1, 10, 3, 4], [10, 6, 7, 8]]))
x = xorig.copy()
x[m3] = 10
assert_array_equal(x, np.array([[1, 10, 3, 4], [5, 6, 7, 8]]))
class TestStringCompare:
def test_string(self):
g1 = np.array(["This", "is", "example"])
g2 = np.array(["This", "was", "example"])
assert_array_equal(g1 == g2, [g1[i] == g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 != g2, [g1[i] != g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 <= g2, [g1[i] <= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 >= g2, [g1[i] >= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0, 1, 2]])
def test_mixed(self):
g1 = np.array(["spam", "spa", "spammer", "and eggs"])
g2 = "spam"
assert_array_equal(g1 == g2, [x == g2 for x in g1])
assert_array_equal(g1 != g2, [x != g2 for x in g1])
assert_array_equal(g1 < g2, [x < g2 for x in g1])
assert_array_equal(g1 > g2, [x > g2 for x in g1])
assert_array_equal(g1 <= g2, [x <= g2 for x in g1])
assert_array_equal(g1 >= g2, [x >= g2 for x in g1])
def test_unicode(self):
g1 = np.array([u"This", u"is", u"example"])
g2 = np.array([u"This", u"was", u"example"])
assert_array_equal(g1 == g2, [g1[i] == g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 != g2, [g1[i] != g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 <= g2, [g1[i] <= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 >= g2, [g1[i] >= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0, 1, 2]])
class TestArgmaxArgminCommon:
sizes = [(), (3,), (3, 2), (2, 3),
(3, 3), (2, 3, 4), (4, 3, 2),
(1, 2, 3, 4), (2, 3, 4, 1),
(3, 4, 1, 2), (4, 1, 2, 3),
(64,), (128,), (256,)]
@pytest.mark.parametrize("size, axis", itertools.chain(*[[(size, axis)
for axis in list(range(-len(size), len(size))) + [None]]
for size in sizes]))
@pytest.mark.parametrize('method', [np.argmax, np.argmin])
def test_np_argmin_argmax_keepdims(self, size, axis, method):
arr = np.random.normal(size=size)
# contiguous arrays
if axis is None:
new_shape = [1 for _ in range(len(size))]
else:
new_shape = list(size)
new_shape[axis] = 1
new_shape = tuple(new_shape)
_res_orig = method(arr, axis=axis)
res_orig = _res_orig.reshape(new_shape)
res = method(arr, axis=axis, keepdims=True)
assert_equal(res, res_orig)
assert_(res.shape == new_shape)
outarray = np.empty(res.shape, dtype=res.dtype)
res1 = method(arr, axis=axis, out=outarray,
keepdims=True)
assert_(res1 is outarray)
assert_equal(res, outarray)
if len(size) > 0:
wrong_shape = list(new_shape)
if axis is not None:
wrong_shape[axis] = 2
else:
wrong_shape[0] = 2
wrong_outarray = np.empty(wrong_shape, dtype=res.dtype)
with pytest.raises(ValueError):
method(arr.T, axis=axis,
out=wrong_outarray, keepdims=True)
# non-contiguous arrays
if axis is None:
new_shape = [1 for _ in range(len(size))]
else:
new_shape = list(size)[::-1]
new_shape[axis] = 1
new_shape = tuple(new_shape)
_res_orig = method(arr.T, axis=axis)
res_orig = _res_orig.reshape(new_shape)
res = method(arr.T, axis=axis, keepdims=True)
assert_equal(res, res_orig)
assert_(res.shape == new_shape)
outarray = np.empty(new_shape[::-1], dtype=res.dtype)
outarray = outarray.T
res1 = method(arr.T, axis=axis, out=outarray,
keepdims=True)
assert_(res1 is outarray)
assert_equal(res, outarray)
if len(size) > 0:
# one dimension lesser for non-zero sized
# array should raise an error
with pytest.raises(ValueError):
method(arr[0], axis=axis,
out=outarray, keepdims=True)
if len(size) > 0:
wrong_shape = list(new_shape)
if axis is not None:
wrong_shape[axis] = 2
else:
wrong_shape[0] = 2
wrong_outarray = np.empty(wrong_shape, dtype=res.dtype)
with pytest.raises(ValueError):
method(arr.T, axis=axis,
out=wrong_outarray, keepdims=True)
@pytest.mark.parametrize('method', ['max', 'min'])
def test_all(self, method):
a = np.random.normal(0, 1, (4, 5, 6, 7, 8))
arg_method = getattr(a, 'arg' + method)
val_method = getattr(a, method)
for i in range(a.ndim):
a_maxmin = val_method(i)
aarg_maxmin = arg_method(i)
axes = list(range(a.ndim))
axes.remove(i)
assert_(np.all(a_maxmin == aarg_maxmin.choose(
*a.transpose(i, *axes))))
@pytest.mark.parametrize('method', ['argmax', 'argmin'])
def test_output_shape(self, method):
# see also gh-616
a = np.ones((10, 5))
arg_method = getattr(a, method)
# Check some simple shape mismatches
out = np.ones(11, dtype=np.int_)
assert_raises(ValueError, arg_method, -1, out)
out = np.ones((2, 5), dtype=np.int_)
assert_raises(ValueError, arg_method, -1, out)
# these could be relaxed possibly (used to allow even the previous)
out = np.ones((1, 10), dtype=np.int_)
assert_raises(ValueError, arg_method, -1, out)
out = np.ones(10, dtype=np.int_)
arg_method(-1, out=out)
assert_equal(out, arg_method(-1))
@pytest.mark.parametrize('ndim', [0, 1])
@pytest.mark.parametrize('method', ['argmax', 'argmin'])
def test_ret_is_out(self, ndim, method):
a = np.ones((4,) + (256,)*ndim)
arg_method = getattr(a, method)
out = np.empty((256,)*ndim, dtype=np.intp)
ret = arg_method(axis=0, out=out)
assert ret is out
@pytest.mark.parametrize('np_array, method, idx, val',
[(np.zeros, 'argmax', 5942, "as"),
(np.ones, 'argmin', 6001, "0")])
def test_unicode(self, np_array, method, idx, val):
d = np_array(6031, dtype='<U9')
arg_method = getattr(d, method)
d[idx] = val
assert_equal(arg_method(), idx)
@pytest.mark.parametrize('arr_method, np_method',
[('argmax', np.argmax),
('argmin', np.argmin)])
def test_np_vs_ndarray(self, arr_method, np_method):
# make sure both ndarray.argmax/argmin and
# numpy.argmax/argmin support out/axis args
a = np.random.normal(size=(2, 3))
arg_method = getattr(a, arr_method)
# check positional args
out1 = np.zeros(2, dtype=int)
out2 = np.zeros(2, dtype=int)
assert_equal(arg_method(1, out1), np_method(a, 1, out2))
assert_equal(out1, out2)
# check keyword args
out1 = np.zeros(3, dtype=int)
out2 = np.zeros(3, dtype=int)
assert_equal(arg_method(out=out1, axis=0),
np_method(a, out=out2, axis=0))
assert_equal(out1, out2)
@pytest.mark.leaks_references(reason="replaces None with NULL.")
@pytest.mark.parametrize('method, vals',
[('argmax', (10, 30)),
('argmin', (30, 10))])
def test_object_with_NULLs(self, method, vals):
# See gh-6032
a = np.empty(4, dtype='O')
arg_method = getattr(a, method)
ctypes.memset(a.ctypes.data, 0, a.nbytes)
assert_equal(arg_method(), 0)
a[3] = vals[0]
assert_equal(arg_method(), 3)
a[1] = vals[1]
assert_equal(arg_method(), 1)
class TestArgmax:
usg_data = [
([1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], 0),
([3, 3, 3, 3, 2, 2, 2, 2], 0),
([0, 1, 2, 3, 4, 5, 6, 7], 7),
([7, 6, 5, 4, 3, 2, 1, 0], 0)
]
sg_data = usg_data + [
([1, 2, 3, 4, -4, -3, -2, -1], 3),
([1, 2, 3, 4, -1, -2, -3, -4], 3)
]
darr = [(np.array(d[0], dtype=t), d[1]) for d, t in (
itertools.product(usg_data, (
np.uint8, np.uint16, np.uint32, np.uint64
))
)]
darr = darr + [(np.array(d[0], dtype=t), d[1]) for d, t in (
itertools.product(sg_data, (
np.int8, np.int16, np.int32, np.int64, np.float32, np.float64
))
)]
darr = darr + [(np.array(d[0], dtype=t), d[1]) for d, t in (
itertools.product((
([0, 1, 2, 3, np.nan], 4),
([0, 1, 2, np.nan, 3], 3),
([np.nan, 0, 1, 2, 3], 0),
([np.nan, 0, np.nan, 2, 3], 0),
# To hit the tail of SIMD multi-level(x4, x1) inner loops
# on varient SIMD widthes
([1] * (2*5-1) + [np.nan], 2*5-1),
([1] * (4*5-1) + [np.nan], 4*5-1),
([1] * (8*5-1) + [np.nan], 8*5-1),
([1] * (16*5-1) + [np.nan], 16*5-1),
([1] * (32*5-1) + [np.nan], 32*5-1)
), (
np.float32, np.float64
))
)]
nan_arr = darr + [
([0, 1, 2, 3, complex(0, np.nan)], 4),
([0, 1, 2, 3, complex(np.nan, 0)], 4),
([0, 1, 2, complex(np.nan, 0), 3], 3),
([0, 1, 2, complex(0, np.nan), 3], 3),
([complex(0, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, np.nan), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, np.nan)], 0),
([complex(0, 0), complex(0, 2), complex(0, 1)], 1),
([complex(1, 0), complex(0, 2), complex(0, 1)], 0),
([complex(1, 0), complex(0, 2), complex(1, 1)], 2),
([np.datetime64('1923-04-14T12:43:12'),
np.datetime64('1994-06-21T14:43:15'),
np.datetime64('2001-10-15T04:10:32'),
np.datetime64('1995-11-25T16:02:16'),
np.datetime64('2005-01-04T03:14:12'),
np.datetime64('2041-12-03T14:05:03')], 5),
([np.datetime64('1935-09-14T04:40:11'),
np.datetime64('1949-10-12T12:32:11'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('2015-11-20T12:20:59'),
np.datetime64('1932-09-23T10:10:13'),
np.datetime64('2014-10-10T03:50:30')], 3),
# Assorted tests with NaTs
([np.datetime64('NaT'),
np.datetime64('NaT'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('NaT'),
np.datetime64('2015-09-23T10:10:13'),
np.datetime64('1932-10-10T03:50:30')], 0),
([np.datetime64('2059-03-14T12:43:12'),
np.datetime64('1996-09-21T14:43:15'),
np.datetime64('NaT'),
np.datetime64('2022-12-25T16:02:16'),
np.datetime64('1963-10-04T03:14:12'),
np.datetime64('2013-05-08T18:15:23')], 2),
([np.timedelta64(2, 's'),
np.timedelta64(1, 's'),
np.timedelta64('NaT', 's'),
np.timedelta64(3, 's')], 2),
([np.timedelta64('NaT', 's')] * 3, 0),
([timedelta(days=5, seconds=14), timedelta(days=2, seconds=35),
timedelta(days=-1, seconds=23)], 0),
([timedelta(days=1, seconds=43), timedelta(days=10, seconds=5),
timedelta(days=5, seconds=14)], 1),
([timedelta(days=10, seconds=24), timedelta(days=10, seconds=5),
timedelta(days=10, seconds=43)], 2),
([False, False, False, False, True], 4),
([False, False, False, True, False], 3),
([True, False, False, False, False], 0),
([True, False, True, False, False], 0),
]
@pytest.mark.parametrize('data', nan_arr)
def test_combinations(self, data):
arr, pos = data
with suppress_warnings() as sup:
sup.filter(RuntimeWarning,
"invalid value encountered in reduce")
val = np.max(arr)
assert_equal(np.argmax(arr), pos, err_msg="%r" % arr)
assert_equal(arr[np.argmax(arr)], val, err_msg="%r" % arr)
# add padding to test SIMD loops
rarr = np.repeat(arr, 129)
rpos = pos * 129
assert_equal(np.argmax(rarr), rpos, err_msg="%r" % rarr)
assert_equal(rarr[np.argmax(rarr)], val, err_msg="%r" % rarr)
padd = np.repeat(np.min(arr), 513)
rarr = np.concatenate((arr, padd))
rpos = pos
assert_equal(np.argmax(rarr), rpos, err_msg="%r" % rarr)
assert_equal(rarr[np.argmax(rarr)], val, err_msg="%r" % rarr)
def test_maximum_signed_integers(self):
a = np.array([1, 2**7 - 1, -2**7], dtype=np.int8)
assert_equal(np.argmax(a), 1)
a.repeat(129)
assert_equal(np.argmax(a), 1)
a = np.array([1, 2**15 - 1, -2**15], dtype=np.int16)
assert_equal(np.argmax(a), 1)
a.repeat(129)
assert_equal(np.argmax(a), 1)
a = np.array([1, 2**31 - 1, -2**31], dtype=np.int32)
assert_equal(np.argmax(a), 1)
a.repeat(129)
assert_equal(np.argmax(a), 1)
a = np.array([1, 2**63 - 1, -2**63], dtype=np.int64)
assert_equal(np.argmax(a), 1)
a.repeat(129)
assert_equal(np.argmax(a), 1)
class TestArgmin:
usg_data = [
([1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], 8),
([3, 3, 3, 3, 2, 2, 2, 2], 4),
([0, 1, 2, 3, 4, 5, 6, 7], 0),
([7, 6, 5, 4, 3, 2, 1, 0], 7)
]
sg_data = usg_data + [
([1, 2, 3, 4, -4, -3, -2, -1], 4),
([1, 2, 3, 4, -1, -2, -3, -4], 7)
]
darr = [(np.array(d[0], dtype=t), d[1]) for d, t in (
itertools.product(usg_data, (
np.uint8, np.uint16, np.uint32, np.uint64
))
)]
darr = darr + [(np.array(d[0], dtype=t), d[1]) for d, t in (
itertools.product(sg_data, (
np.int8, np.int16, np.int32, np.int64, np.float32, np.float64
))
)]
darr = darr + [(np.array(d[0], dtype=t), d[1]) for d, t in (
itertools.product((
([0, 1, 2, 3, np.nan], 4),
([0, 1, 2, np.nan, 3], 3),
([np.nan, 0, 1, 2, 3], 0),
([np.nan, 0, np.nan, 2, 3], 0),
# To hit the tail of SIMD multi-level(x4, x1) inner loops
# on varient SIMD widthes
([1] * (2*5-1) + [np.nan], 2*5-1),
([1] * (4*5-1) + [np.nan], 4*5-1),
([1] * (8*5-1) + [np.nan], 8*5-1),
([1] * (16*5-1) + [np.nan], 16*5-1),
([1] * (32*5-1) + [np.nan], 32*5-1)
), (
np.float32, np.float64
))
)]
nan_arr = darr + [
([0, 1, 2, 3, complex(0, np.nan)], 4),
([0, 1, 2, 3, complex(np.nan, 0)], 4),
([0, 1, 2, complex(np.nan, 0), 3], 3),
([0, 1, 2, complex(0, np.nan), 3], 3),
([complex(0, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, np.nan), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, np.nan)], 0),
([complex(0, 0), complex(0, 2), complex(0, 1)], 0),
([complex(1, 0), complex(0, 2), complex(0, 1)], 2),
([complex(1, 0), complex(0, 2), complex(1, 1)], 1),
([np.datetime64('1923-04-14T12:43:12'),
np.datetime64('1994-06-21T14:43:15'),
np.datetime64('2001-10-15T04:10:32'),
np.datetime64('1995-11-25T16:02:16'),
np.datetime64('2005-01-04T03:14:12'),
np.datetime64('2041-12-03T14:05:03')], 0),
([np.datetime64('1935-09-14T04:40:11'),
np.datetime64('1949-10-12T12:32:11'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('2014-11-20T12:20:59'),
np.datetime64('2015-09-23T10:10:13'),
np.datetime64('1932-10-10T03:50:30')], 5),
# Assorted tests with NaTs
([np.datetime64('NaT'),
np.datetime64('NaT'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('NaT'),
np.datetime64('2015-09-23T10:10:13'),
np.datetime64('1932-10-10T03:50:30')], 0),
([np.datetime64('2059-03-14T12:43:12'),
np.datetime64('1996-09-21T14:43:15'),
np.datetime64('NaT'),
np.datetime64('2022-12-25T16:02:16'),
np.datetime64('1963-10-04T03:14:12'),
np.datetime64('2013-05-08T18:15:23')], 2),
([np.timedelta64(2, 's'),
np.timedelta64(1, 's'),
np.timedelta64('NaT', 's'),
np.timedelta64(3, 's')], 2),
([np.timedelta64('NaT', 's')] * 3, 0),
([timedelta(days=5, seconds=14), timedelta(days=2, seconds=35),
timedelta(days=-1, seconds=23)], 2),
([timedelta(days=1, seconds=43), timedelta(days=10, seconds=5),
timedelta(days=5, seconds=14)], 0),
([timedelta(days=10, seconds=24), timedelta(days=10, seconds=5),
timedelta(days=10, seconds=43)], 1),
([True, True, True, True, False], 4),
([True, True, True, False, True], 3),
([False, True, True, True, True], 0),
([False, True, False, True, True], 0),
]
@pytest.mark.parametrize('data', nan_arr)
def test_combinations(self, data):
arr, pos = data
with suppress_warnings() as sup:
sup.filter(RuntimeWarning,
"invalid value encountered in reduce")
min_val = np.min(arr)
assert_equal(np.argmin(arr), pos, err_msg="%r" % arr)
assert_equal(arr[np.argmin(arr)], min_val, err_msg="%r" % arr)
# add padding to test SIMD loops
rarr = np.repeat(arr, 129)
rpos = pos * 129
assert_equal(np.argmin(rarr), rpos, err_msg="%r" % rarr)
assert_equal(rarr[np.argmin(rarr)], min_val, err_msg="%r" % rarr)
padd = np.repeat(np.max(arr), 513)
rarr = np.concatenate((arr, padd))
rpos = pos
assert_equal(np.argmin(rarr), rpos, err_msg="%r" % rarr)
assert_equal(rarr[np.argmin(rarr)], min_val, err_msg="%r" % rarr)
def test_minimum_signed_integers(self):
a = np.array([1, -2**7, -2**7 + 1, 2**7 - 1], dtype=np.int8)
assert_equal(np.argmin(a), 1)
a.repeat(129)
assert_equal(np.argmin(a), 1)
a = np.array([1, -2**15, -2**15 + 1, 2**15 - 1], dtype=np.int16)
assert_equal(np.argmin(a), 1)
a.repeat(129)
assert_equal(np.argmin(a), 1)
a = np.array([1, -2**31, -2**31 + 1, 2**31 - 1], dtype=np.int32)
assert_equal(np.argmin(a), 1)
a.repeat(129)
assert_equal(np.argmin(a), 1)
a = np.array([1, -2**63, -2**63 + 1, 2**63 - 1], dtype=np.int64)
assert_equal(np.argmin(a), 1)
a.repeat(129)
assert_equal(np.argmin(a), 1)
class TestMinMax:
def test_scalar(self):
assert_raises(np.AxisError, np.amax, 1, 1)
assert_raises(np.AxisError, np.amin, 1, 1)
assert_equal(np.amax(1, axis=0), 1)
assert_equal(np.amin(1, axis=0), 1)
assert_equal(np.amax(1, axis=None), 1)
assert_equal(np.amin(1, axis=None), 1)
def test_axis(self):
assert_raises(np.AxisError, np.amax, [1, 2, 3], 1000)
assert_equal(np.amax([[1, 2, 3]], axis=1), 3)
def test_datetime(self):
# Do not ignore NaT
for dtype in ('m8[s]', 'm8[Y]'):
a = np.arange(10).astype(dtype)
assert_equal(np.amin(a), a[0])
assert_equal(np.amax(a), a[9])
a[3] = 'NaT'
assert_equal(np.amin(a), a[3])
assert_equal(np.amax(a), a[3])
class TestNewaxis:
def test_basic(self):
sk = np.array([0, -0.1, 0.1])
res = 250*sk[:, np.newaxis]
assert_almost_equal(res.ravel(), 250*sk)
class TestClip:
def _check_range(self, x, cmin, cmax):
assert_(np.all(x >= cmin))
assert_(np.all(x <= cmax))
def _clip_type(self, type_group, array_max,
clip_min, clip_max, inplace=False,
expected_min=None, expected_max=None):
if expected_min is None:
expected_min = clip_min
if expected_max is None:
expected_max = clip_max
for T in np.sctypes[type_group]:
if sys.byteorder == 'little':
byte_orders = ['=', '>']
else:
byte_orders = ['<', '=']
for byteorder in byte_orders:
dtype = np.dtype(T).newbyteorder(byteorder)
x = (np.random.random(1000) * array_max).astype(dtype)
if inplace:
# The tests that call us pass clip_min and clip_max that
# might not fit in the destination dtype. They were written
# assuming the previous unsafe casting, which now must be
# passed explicitly to avoid a warning.
x.clip(clip_min, clip_max, x, casting='unsafe')
else:
x = x.clip(clip_min, clip_max)
byteorder = '='
if x.dtype.byteorder == '|':
byteorder = '|'
assert_equal(x.dtype.byteorder, byteorder)
self._check_range(x, expected_min, expected_max)
return x
def test_basic(self):
for inplace in [False, True]:
self._clip_type(
'float', 1024, -12.8, 100.2, inplace=inplace)
self._clip_type(
'float', 1024, 0, 0, inplace=inplace)
self._clip_type(
'int', 1024, -120, 100, inplace=inplace)
self._clip_type(
'int', 1024, 0, 0, inplace=inplace)
self._clip_type(
'uint', 1024, 0, 0, inplace=inplace)
self._clip_type(
'uint', 1024, -120, 100, inplace=inplace, expected_min=0)
def test_record_array(self):
rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
dtype=[('x', '<f8'), ('y', '<f8'), ('z', '<f8')])
y = rec['x'].clip(-0.3, 0.5)
self._check_range(y, -0.3, 0.5)
def test_max_or_min(self):
val = np.array([0, 1, 2, 3, 4, 5, 6, 7])
x = val.clip(3)
assert_(np.all(x >= 3))
x = val.clip(min=3)
assert_(np.all(x >= 3))
x = val.clip(max=4)
assert_(np.all(x <= 4))
def test_nan(self):
input_arr = np.array([-2., np.nan, 0.5, 3., 0.25, np.nan])
result = input_arr.clip(-1, 1)
expected = np.array([-1., np.nan, 0.5, 1., 0.25, np.nan])
assert_array_equal(result, expected)
class TestCompress:
def test_axis(self):
tgt = [[5, 6, 7, 8, 9]]
arr = np.arange(10).reshape(2, 5)
out = np.compress([0, 1], arr, axis=0)
assert_equal(out, tgt)
tgt = [[1, 3], [6, 8]]
out = np.compress([0, 1, 0, 1, 0], arr, axis=1)
assert_equal(out, tgt)
def test_truncate(self):
tgt = [[1], [6]]
arr = np.arange(10).reshape(2, 5)
out = np.compress([0, 1], arr, axis=1)
assert_equal(out, tgt)
def test_flatten(self):
arr = np.arange(10).reshape(2, 5)
out = np.compress([0, 1], arr)
assert_equal(out, 1)
class TestPutmask:
def tst_basic(self, x, T, mask, val):
np.putmask(x, mask, val)
assert_equal(x[mask], np.array(val, T))
def test_ip_types(self):
unchecked_types = [bytes, str, np.void]
x = np.random.random(1000)*100
mask = x < 40
for val in [-100, 0, 15]:
for types in np.sctypes.values():
for T in types:
if T not in unchecked_types:
self.tst_basic(x.copy().astype(T), T, mask, val)
# Also test string of a length which uses an untypical length
dt = np.dtype("S3")
self.tst_basic(x.astype(dt), dt.type, mask, dt.type(val)[:3])
def test_mask_size(self):
assert_raises(ValueError, np.putmask, np.array([1, 2, 3]), [True], 5)
@pytest.mark.parametrize('dtype', ('>i4', '<i4'))
def test_byteorder(self, dtype):
x = np.array([1, 2, 3], dtype)
np.putmask(x, [True, False, True], -1)
assert_array_equal(x, [-1, 2, -1])
def test_record_array(self):
# Note mixed byteorder.
rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
dtype=[('x', '<f8'), ('y', '>f8'), ('z', '<f8')])
np.putmask(rec['x'], [True, False], 10)
assert_array_equal(rec['x'], [10, 5])
assert_array_equal(rec['y'], [2, 4])
assert_array_equal(rec['z'], [3, 3])
np.putmask(rec['y'], [True, False], 11)
assert_array_equal(rec['x'], [10, 5])
assert_array_equal(rec['y'], [11, 4])
assert_array_equal(rec['z'], [3, 3])
def test_overlaps(self):
# gh-6272 check overlap
x = np.array([True, False, True, False])
np.putmask(x[1:4], [True, True, True], x[:3])
assert_equal(x, np.array([True, True, False, True]))
x = np.array([True, False, True, False])
np.putmask(x[1:4], x[:3], [True, False, True])
assert_equal(x, np.array([True, True, True, True]))
def test_writeable(self):
a = np.arange(5)
a.flags.writeable = False
with pytest.raises(ValueError):
np.putmask(a, a >= 2, 3)
class TestTake:
def tst_basic(self, x):
ind = list(range(x.shape[0]))
assert_array_equal(x.take(ind, axis=0), x)
def test_ip_types(self):
unchecked_types = [bytes, str, np.void]
x = np.random.random(24)*100
x.shape = 2, 3, 4
for types in np.sctypes.values():
for T in types:
if T not in unchecked_types:
self.tst_basic(x.copy().astype(T))
# Also test string of a length which uses an untypical length
self.tst_basic(x.astype("S3"))
def test_raise(self):
x = np.random.random(24)*100
x.shape = 2, 3, 4
assert_raises(IndexError, x.take, [0, 1, 2], axis=0)
assert_raises(IndexError, x.take, [-3], axis=0)
assert_array_equal(x.take([-1], axis=0)[0], x[1])
def test_clip(self):
x = np.random.random(24)*100
x.shape = 2, 3, 4
assert_array_equal(x.take([-1], axis=0, mode='clip')[0], x[0])
assert_array_equal(x.take([2], axis=0, mode='clip')[0], x[1])
def test_wrap(self):
x = np.random.random(24)*100
x.shape = 2, 3, 4
assert_array_equal(x.take([-1], axis=0, mode='wrap')[0], x[1])
assert_array_equal(x.take([2], axis=0, mode='wrap')[0], x[0])
assert_array_equal(x.take([3], axis=0, mode='wrap')[0], x[1])
@pytest.mark.parametrize('dtype', ('>i4', '<i4'))
def test_byteorder(self, dtype):
x = np.array([1, 2, 3], dtype)
assert_array_equal(x.take([0, 2, 1]), [1, 3, 2])
def test_record_array(self):
# Note mixed byteorder.
rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
dtype=[('x', '<f8'), ('y', '>f8'), ('z', '<f8')])
rec1 = rec.take([1])
assert_(rec1['x'] == 5.0 and rec1['y'] == 4.0)
def test_out_overlap(self):
# gh-6272 check overlap on out
x = np.arange(5)
y = np.take(x, [1, 2, 3], out=x[2:5], mode='wrap')
assert_equal(y, np.array([1, 2, 3]))
@pytest.mark.parametrize('shape', [(1, 2), (1,), ()])
def test_ret_is_out(self, shape):
# 0d arrays should not be an exception to this rule
x = np.arange(5)
inds = np.zeros(shape, dtype=np.intp)
out = np.zeros(shape, dtype=x.dtype)
ret = np.take(x, inds, out=out)
assert ret is out
class TestLexsort:
@pytest.mark.parametrize('dtype',[
np.uint8, np.uint16, np.uint32, np.uint64,
np.int8, np.int16, np.int32, np.int64,
np.float16, np.float32, np.float64
])
def test_basic(self, dtype):
a = np.array([1, 2, 1, 3, 1, 5], dtype=dtype)
b = np.array([0, 4, 5, 6, 2, 3], dtype=dtype)
idx = np.lexsort((b, a))
expected_idx = np.array([0, 4, 2, 1, 3, 5])
assert_array_equal(idx, expected_idx)
assert_array_equal(a[idx], np.sort(a))
def test_mixed(self):
a = np.array([1, 2, 1, 3, 1, 5])
b = np.array([0, 4, 5, 6, 2, 3], dtype='datetime64[D]')
idx = np.lexsort((b, a))
expected_idx = np.array([0, 4, 2, 1, 3, 5])
assert_array_equal(idx, expected_idx)
def test_datetime(self):
a = np.array([0,0,0], dtype='datetime64[D]')
b = np.array([2,1,0], dtype='datetime64[D]')
idx = np.lexsort((b, a))
expected_idx = np.array([2, 1, 0])
assert_array_equal(idx, expected_idx)
a = np.array([0,0,0], dtype='timedelta64[D]')
b = np.array([2,1,0], dtype='timedelta64[D]')
idx = np.lexsort((b, a))
expected_idx = np.array([2, 1, 0])
assert_array_equal(idx, expected_idx)
def test_object(self): # gh-6312
a = np.random.choice(10, 1000)
b = np.random.choice(['abc', 'xy', 'wz', 'efghi', 'qwst', 'x'], 1000)
for u in a, b:
left = np.lexsort((u.astype('O'),))
right = np.argsort(u, kind='mergesort')
assert_array_equal(left, right)
for u, v in (a, b), (b, a):
idx = np.lexsort((u, v))
assert_array_equal(idx, np.lexsort((u.astype('O'), v)))
assert_array_equal(idx, np.lexsort((u, v.astype('O'))))
u, v = np.array(u, dtype='object'), np.array(v, dtype='object')
assert_array_equal(idx, np.lexsort((u, v)))
def test_invalid_axis(self): # gh-7528
x = np.linspace(0., 1., 42*3).reshape(42, 3)
assert_raises(np.AxisError, np.lexsort, x, axis=2)
class TestIO:
"""Test tofile, fromfile, tobytes, and fromstring"""
@pytest.fixture()
def x(self):
shape = (2, 4, 3)
rand = np.random.random
x = rand(shape) + rand(shape).astype(complex) * 1j
x[0, :, 1] = [np.nan, np.inf, -np.inf, np.nan]
return x
@pytest.fixture(params=["string", "path_obj"])
def tmp_filename(self, tmp_path, request):
# This fixture covers two cases:
# one where the filename is a string and
# another where it is a pathlib object
filename = tmp_path / "file"
if request.param == "string":
filename = str(filename)
yield filename
def test_nofile(self):
# this should probably be supported as a file
# but for now test for proper errors
b = io.BytesIO()
assert_raises(OSError, np.fromfile, b, np.uint8, 80)
d = np.ones(7)
assert_raises(OSError, lambda x: x.tofile(b), d)
def test_bool_fromstring(self):
v = np.array([True, False, True, False], dtype=np.bool_)
y = np.fromstring('1 0 -2.3 0.0', sep=' ', dtype=np.bool_)
assert_array_equal(v, y)
def test_uint64_fromstring(self):
d = np.fromstring("9923372036854775807 104783749223640",
dtype=np.uint64, sep=' ')
e = np.array([9923372036854775807, 104783749223640], dtype=np.uint64)
assert_array_equal(d, e)
def test_int64_fromstring(self):
d = np.fromstring("-25041670086757 104783749223640",
dtype=np.int64, sep=' ')
e = np.array([-25041670086757, 104783749223640], dtype=np.int64)
assert_array_equal(d, e)
def test_fromstring_count0(self):
d = np.fromstring("1,2", sep=",", dtype=np.int64, count=0)
assert d.shape == (0,)
def test_empty_files_text(self, tmp_filename):
with open(tmp_filename, 'w') as f:
pass
y = np.fromfile(tmp_filename)
assert_(y.size == 0, "Array not empty")
def test_empty_files_binary(self, tmp_filename):
with open(tmp_filename, 'wb') as f:
pass
y = np.fromfile(tmp_filename, sep=" ")
assert_(y.size == 0, "Array not empty")
def test_roundtrip_file(self, x, tmp_filename):
with open(tmp_filename, 'wb') as f:
x.tofile(f)
# NB. doesn't work with flush+seek, due to use of C stdio
with open(tmp_filename, 'rb') as f:
y = np.fromfile(f, dtype=x.dtype)
assert_array_equal(y, x.flat)
def test_roundtrip(self, x, tmp_filename):
x.tofile(tmp_filename)
y = np.fromfile(tmp_filename, dtype=x.dtype)
assert_array_equal(y, x.flat)
def test_roundtrip_dump_pathlib(self, x, tmp_filename):
p = pathlib.Path(tmp_filename)
x.dump(p)
y = np.load(p, allow_pickle=True)
assert_array_equal(y, x)
def test_roundtrip_binary_str(self, x):
s = x.tobytes()
y = np.frombuffer(s, dtype=x.dtype)
assert_array_equal(y, x.flat)
s = x.tobytes('F')
y = np.frombuffer(s, dtype=x.dtype)
assert_array_equal(y, x.flatten('F'))
def test_roundtrip_str(self, x):
x = x.real.ravel()
s = "@".join(map(str, x))
y = np.fromstring(s, sep="@")
# NB. str imbues less precision
nan_mask = ~np.isfinite(x)
assert_array_equal(x[nan_mask], y[nan_mask])
assert_array_almost_equal(x[~nan_mask], y[~nan_mask], decimal=5)
def test_roundtrip_repr(self, x):
x = x.real.ravel()
s = "@".join(map(repr, x))
y = np.fromstring(s, sep="@")
assert_array_equal(x, y)
def test_unseekable_fromfile(self, x, tmp_filename):
# gh-6246
x.tofile(tmp_filename)
def fail(*args, **kwargs):
raise OSError('Can not tell or seek')
with io.open(tmp_filename, 'rb', buffering=0) as f:
f.seek = fail
f.tell = fail
assert_raises(OSError, np.fromfile, f, dtype=x.dtype)
def test_io_open_unbuffered_fromfile(self, x, tmp_filename):
# gh-6632
x.tofile(tmp_filename)
with io.open(tmp_filename, 'rb', buffering=0) as f:
y = np.fromfile(f, dtype=x.dtype)
assert_array_equal(y, x.flat)
def test_largish_file(self, tmp_filename):
# check the fallocate path on files > 16MB
d = np.zeros(4 * 1024 ** 2)
d.tofile(tmp_filename)
assert_equal(os.path.getsize(tmp_filename), d.nbytes)
assert_array_equal(d, np.fromfile(tmp_filename))
# check offset
with open(tmp_filename, "r+b") as f:
f.seek(d.nbytes)
d.tofile(f)
assert_equal(os.path.getsize(tmp_filename), d.nbytes * 2)
# check append mode (gh-8329)
open(tmp_filename, "w").close() # delete file contents
with open(tmp_filename, "ab") as f:
d.tofile(f)
assert_array_equal(d, np.fromfile(tmp_filename))
with open(tmp_filename, "ab") as f:
d.tofile(f)
assert_equal(os.path.getsize(tmp_filename), d.nbytes * 2)
def test_io_open_buffered_fromfile(self, x, tmp_filename):
# gh-6632
x.tofile(tmp_filename)
with io.open(tmp_filename, 'rb', buffering=-1) as f:
y = np.fromfile(f, dtype=x.dtype)
assert_array_equal(y, x.flat)
def test_file_position_after_fromfile(self, tmp_filename):
# gh-4118
sizes = [io.DEFAULT_BUFFER_SIZE//8,
io.DEFAULT_BUFFER_SIZE,
io.DEFAULT_BUFFER_SIZE*8]
for size in sizes:
with open(tmp_filename, 'wb') as f:
f.seek(size-1)
f.write(b'\0')
for mode in ['rb', 'r+b']:
err_msg = "%d %s" % (size, mode)
with open(tmp_filename, mode) as f:
f.read(2)
np.fromfile(f, dtype=np.float64, count=1)
pos = f.tell()
assert_equal(pos, 10, err_msg=err_msg)
def test_file_position_after_tofile(self, tmp_filename):
# gh-4118
sizes = [io.DEFAULT_BUFFER_SIZE//8,
io.DEFAULT_BUFFER_SIZE,
io.DEFAULT_BUFFER_SIZE*8]
for size in sizes:
err_msg = "%d" % (size,)
with open(tmp_filename, 'wb') as f:
f.seek(size-1)
f.write(b'\0')
f.seek(10)
f.write(b'12')
np.array([0], dtype=np.float64).tofile(f)
pos = f.tell()
assert_equal(pos, 10 + 2 + 8, err_msg=err_msg)
with open(tmp_filename, 'r+b') as f:
f.read(2)
f.seek(0, 1) # seek between read&write required by ANSI C
np.array([0], dtype=np.float64).tofile(f)
pos = f.tell()
assert_equal(pos, 10, err_msg=err_msg)
def test_load_object_array_fromfile(self, tmp_filename):
# gh-12300
with open(tmp_filename, 'w') as f:
# Ensure we have a file with consistent contents
pass
with open(tmp_filename, 'rb') as f:
assert_raises_regex(ValueError, "Cannot read into object array",
np.fromfile, f, dtype=object)
assert_raises_regex(ValueError, "Cannot read into object array",
np.fromfile, tmp_filename, dtype=object)
def test_fromfile_offset(self, x, tmp_filename):
with open(tmp_filename, 'wb') as f:
x.tofile(f)
with open(tmp_filename, 'rb') as f:
y = np.fromfile(f, dtype=x.dtype, offset=0)
assert_array_equal(y, x.flat)
with open(tmp_filename, 'rb') as f:
count_items = len(x.flat) // 8
offset_items = len(x.flat) // 4
offset_bytes = x.dtype.itemsize * offset_items
y = np.fromfile(
f, dtype=x.dtype, count=count_items, offset=offset_bytes
)
assert_array_equal(
y, x.flat[offset_items:offset_items+count_items]
)
# subsequent seeks should stack
offset_bytes = x.dtype.itemsize
z = np.fromfile(f, dtype=x.dtype, offset=offset_bytes)
assert_array_equal(z, x.flat[offset_items+count_items+1:])
with open(tmp_filename, 'wb') as f:
x.tofile(f, sep=",")
with open(tmp_filename, 'rb') as f:
assert_raises_regex(
TypeError,
"'offset' argument only permitted for binary files",
np.fromfile, tmp_filename, dtype=x.dtype,
sep=",", offset=1)
@pytest.mark.skipif(IS_PYPY, reason="bug in PyPy's PyNumber_AsSsize_t")
def test_fromfile_bad_dup(self, x, tmp_filename):
def dup_str(fd):
return 'abc'
def dup_bigint(fd):
return 2**68
old_dup = os.dup
try:
with open(tmp_filename, 'wb') as f:
x.tofile(f)
for dup, exc in ((dup_str, TypeError), (dup_bigint, OSError)):
os.dup = dup
assert_raises(exc, np.fromfile, f)
finally:
os.dup = old_dup
def _check_from(self, s, value, filename, **kw):
if 'sep' not in kw:
y = np.frombuffer(s, **kw)
else:
y = np.fromstring(s, **kw)
assert_array_equal(y, value)
with open(filename, 'wb') as f:
f.write(s)
y = np.fromfile(filename, **kw)
assert_array_equal(y, value)
@pytest.fixture(params=["period", "comma"])
def decimal_sep_localization(self, request):
"""
Including this fixture in a test will automatically
execute it with both types of decimal separator.
So::
def test_decimal(decimal_sep_localization):
pass
is equivalent to the following two tests::
def test_decimal_period_separator():
pass
def test_decimal_comma_separator():
with CommaDecimalPointLocale():
pass
"""
if request.param == "period":
yield
elif request.param == "comma":
with CommaDecimalPointLocale():
yield
else:
assert False, request.param
def test_nan(self, tmp_filename, decimal_sep_localization):
self._check_from(
b"nan +nan -nan NaN nan(foo) +NaN(BAR) -NAN(q_u_u_x_)",
[np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan],
tmp_filename,
sep=' ')
def test_inf(self, tmp_filename, decimal_sep_localization):
self._check_from(
b"inf +inf -inf infinity -Infinity iNfInItY -inF",
[np.inf, np.inf, -np.inf, np.inf, -np.inf, np.inf, -np.inf],
tmp_filename,
sep=' ')
def test_numbers(self, tmp_filename, decimal_sep_localization):
self._check_from(
b"1.234 -1.234 .3 .3e55 -123133.1231e+133",
[1.234, -1.234, .3, .3e55, -123133.1231e+133],
tmp_filename,
sep=' ')
def test_binary(self, tmp_filename):
self._check_from(
b'\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@',
np.array([1, 2, 3, 4]),
tmp_filename,
dtype='<f4')
@pytest.mark.slow # takes > 1 minute on mechanical hard drive
def test_big_binary(self):
"""Test workarounds for 32-bit limited fwrite, fseek, and ftell
calls in windows. These normally would hang doing something like this.
See http://projects.scipy.org/numpy/ticket/1660"""
if sys.platform != 'win32':
return
try:
# before workarounds, only up to 2**32-1 worked
fourgbplus = 2**32 + 2**16
testbytes = np.arange(8, dtype=np.int8)
n = len(testbytes)
flike = tempfile.NamedTemporaryFile()
f = flike.file
np.tile(testbytes, fourgbplus // testbytes.nbytes).tofile(f)
flike.seek(0)
a = np.fromfile(f, dtype=np.int8)
flike.close()
assert_(len(a) == fourgbplus)
# check only start and end for speed:
assert_((a[:n] == testbytes).all())
assert_((a[-n:] == testbytes).all())
except (MemoryError, ValueError):
pass
def test_string(self, tmp_filename):
self._check_from(b'1,2,3,4', [1., 2., 3., 4.], tmp_filename, sep=',')
def test_counted_string(self, tmp_filename, decimal_sep_localization):
self._check_from(
b'1,2,3,4', [1., 2., 3., 4.], tmp_filename, count=4, sep=',')
self._check_from(
b'1,2,3,4', [1., 2., 3.], tmp_filename, count=3, sep=',')
self._check_from(
b'1,2,3,4', [1., 2., 3., 4.], tmp_filename, count=-1, sep=',')
def test_string_with_ws(self, tmp_filename):
self._check_from(
b'1 2 3 4 ', [1, 2, 3, 4], tmp_filename, dtype=int, sep=' ')
def test_counted_string_with_ws(self, tmp_filename):
self._check_from(
b'1 2 3 4 ', [1, 2, 3], tmp_filename, count=3, dtype=int,
sep=' ')
def test_ascii(self, tmp_filename, decimal_sep_localization):
self._check_from(
b'1 , 2 , 3 , 4', [1., 2., 3., 4.], tmp_filename, sep=',')
self._check_from(
b'1,2,3,4', [1., 2., 3., 4.], tmp_filename, dtype=float, sep=',')
def test_malformed(self, tmp_filename, decimal_sep_localization):
with assert_warns(DeprecationWarning):
self._check_from(
b'1.234 1,234', [1.234, 1.], tmp_filename, sep=' ')
def test_long_sep(self, tmp_filename):
self._check_from(
b'1_x_3_x_4_x_5', [1, 3, 4, 5], tmp_filename, sep='_x_')
def test_dtype(self, tmp_filename):
v = np.array([1, 2, 3, 4], dtype=np.int_)
self._check_from(b'1,2,3,4', v, tmp_filename, sep=',', dtype=np.int_)
def test_dtype_bool(self, tmp_filename):
# can't use _check_from because fromstring can't handle True/False
v = np.array([True, False, True, False], dtype=np.bool_)
s = b'1,0,-2.3,0'
with open(tmp_filename, 'wb') as f:
f.write(s)
y = np.fromfile(tmp_filename, sep=',', dtype=np.bool_)
assert_(y.dtype == '?')
assert_array_equal(y, v)
def test_tofile_sep(self, tmp_filename, decimal_sep_localization):
x = np.array([1.51, 2, 3.51, 4], dtype=float)
with open(tmp_filename, 'w') as f:
x.tofile(f, sep=',')
with open(tmp_filename, 'r') as f:
s = f.read()
#assert_equal(s, '1.51,2.0,3.51,4.0')
y = np.array([float(p) for p in s.split(',')])
assert_array_equal(x,y)
def test_tofile_format(self, tmp_filename, decimal_sep_localization):
x = np.array([1.51, 2, 3.51, 4], dtype=float)
with open(tmp_filename, 'w') as f:
x.tofile(f, sep=',', format='%.2f')
with open(tmp_filename, 'r') as f:
s = f.read()
assert_equal(s, '1.51,2.00,3.51,4.00')
def test_tofile_cleanup(self, tmp_filename):
x = np.zeros((10), dtype=object)
with open(tmp_filename, 'wb') as f:
assert_raises(OSError, lambda: x.tofile(f, sep=''))
# Dup-ed file handle should be closed or remove will fail on Windows OS
os.remove(tmp_filename)
# Also make sure that we close the Python handle
assert_raises(OSError, lambda: x.tofile(tmp_filename))
os.remove(tmp_filename)
def test_fromfile_subarray_binary(self, tmp_filename):
# Test subarray dtypes which are absorbed into the shape
x = np.arange(24, dtype="i4").reshape(2, 3, 4)
x.tofile(tmp_filename)
res = np.fromfile(tmp_filename, dtype="(3,4)i4")
assert_array_equal(x, res)
x_str = x.tobytes()
with assert_warns(DeprecationWarning):
# binary fromstring is deprecated
res = np.fromstring(x_str, dtype="(3,4)i4")
assert_array_equal(x, res)
def test_parsing_subarray_unsupported(self, tmp_filename):
# We currently do not support parsing subarray dtypes
data = "12,42,13," * 50
with pytest.raises(ValueError):
expected = np.fromstring(data, dtype="(3,)i", sep=",")
with open(tmp_filename, "w") as f:
f.write(data)
with pytest.raises(ValueError):
np.fromfile(tmp_filename, dtype="(3,)i", sep=",")
def test_read_shorter_than_count_subarray(self, tmp_filename):
# Test that requesting more values does not cause any problems
# in conjunction with subarray dimensions being absorbed into the
# array dimension.
expected = np.arange(511 * 10, dtype="i").reshape(-1, 10)
binary = expected.tobytes()
with pytest.raises(ValueError):
with pytest.warns(DeprecationWarning):
np.fromstring(binary, dtype="(10,)i", count=10000)
expected.tofile(tmp_filename)
res = np.fromfile(tmp_filename, dtype="(10,)i", count=10000)
assert_array_equal(res, expected)
class TestFromBuffer:
@pytest.mark.parametrize('byteorder', ['<', '>'])
@pytest.mark.parametrize('dtype', [float, int, complex])
def test_basic(self, byteorder, dtype):
dt = np.dtype(dtype).newbyteorder(byteorder)
x = (np.random.random((4, 7)) * 5).astype(dt)
buf = x.tobytes()
assert_array_equal(np.frombuffer(buf, dtype=dt), x.flat)
@pytest.mark.parametrize("obj", [np.arange(10), b"12345678"])
def test_array_base(self, obj):
# Objects (including NumPy arrays), which do not use the
# `release_buffer` slot should be directly used as a base object.
# See also gh-21612
new = np.frombuffer(obj)
assert new.base is obj
def test_empty(self):
assert_array_equal(np.frombuffer(b''), np.array([]))
@pytest.mark.skipif(IS_PYPY,
reason="PyPy's memoryview currently does not track exports. See: "
"https://foss.heptapod.net/pypy/pypy/-/issues/3724")
def test_mmap_close(self):
# The old buffer protocol was not safe for some things that the new
# one is. But `frombuffer` always used the old one for a long time.
# Checks that it is safe with the new one (using memoryviews)
with tempfile.TemporaryFile(mode='wb') as tmp:
tmp.write(b"asdf")
tmp.flush()
mm = mmap.mmap(tmp.fileno(), 0)
arr = np.frombuffer(mm, dtype=np.uint8)
with pytest.raises(BufferError):
mm.close() # cannot close while array uses the buffer
del arr
mm.close()
class TestFlat:
def setup_method(self):
a0 = np.arange(20.0)
a = a0.reshape(4, 5)
a0.shape = (4, 5)
a.flags.writeable = False
self.a = a
self.b = a[::2, ::2]
self.a0 = a0
self.b0 = a0[::2, ::2]
def test_contiguous(self):
testpassed = False
try:
self.a.flat[12] = 100.0
except ValueError:
testpassed = True
assert_(testpassed)
assert_(self.a.flat[12] == 12.0)
def test_discontiguous(self):
testpassed = False
try:
self.b.flat[4] = 100.0
except ValueError:
testpassed = True
assert_(testpassed)
assert_(self.b.flat[4] == 12.0)
def test___array__(self):
c = self.a.flat.__array__()
d = self.b.flat.__array__()
e = self.a0.flat.__array__()
f = self.b0.flat.__array__()
assert_(c.flags.writeable is False)
assert_(d.flags.writeable is False)
assert_(e.flags.writeable is True)
assert_(f.flags.writeable is False)
assert_(c.flags.writebackifcopy is False)
assert_(d.flags.writebackifcopy is False)
assert_(e.flags.writebackifcopy is False)
assert_(f.flags.writebackifcopy is False)
@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
def test_refcount(self):
# includes regression test for reference count error gh-13165
inds = [np.intp(0), np.array([True]*self.a.size), np.array([0]), None]
indtype = np.dtype(np.intp)
rc_indtype = sys.getrefcount(indtype)
for ind in inds:
rc_ind = sys.getrefcount(ind)
for _ in range(100):
try:
self.a.flat[ind]
except IndexError:
pass
assert_(abs(sys.getrefcount(ind) - rc_ind) < 50)
assert_(abs(sys.getrefcount(indtype) - rc_indtype) < 50)
def test_index_getset(self):
it = np.arange(10).reshape(2, 1, 5).flat
with pytest.raises(AttributeError):
it.index = 10
for _ in it:
pass
# Check the value of `.index` is updated correctly (see also gh-19153)
# If the type was incorrect, this would show up on big-endian machines
assert it.index == it.base.size
class TestResize:
@_no_tracing
def test_basic(self):
x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
if IS_PYPY:
x.resize((5, 5), refcheck=False)
else:
x.resize((5, 5))
assert_array_equal(x.flat[:9],
np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]).flat)
assert_array_equal(x[9:].flat, 0)
def test_check_reference(self):
x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
y = x
assert_raises(ValueError, x.resize, (5, 1))
del y # avoid pyflakes unused variable warning.
@_no_tracing
def test_int_shape(self):
x = np.eye(3)
if IS_PYPY:
x.resize(3, refcheck=False)
else:
x.resize(3)
assert_array_equal(x, np.eye(3)[0,:])
def test_none_shape(self):
x = np.eye(3)
x.resize(None)
assert_array_equal(x, np.eye(3))
x.resize()
assert_array_equal(x, np.eye(3))
def test_0d_shape(self):
# to it multiple times to test it does not break alloc cache gh-9216
for i in range(10):
x = np.empty((1,))
x.resize(())
assert_equal(x.shape, ())
assert_equal(x.size, 1)
x = np.empty(())
x.resize((1,))
assert_equal(x.shape, (1,))
assert_equal(x.size, 1)
def test_invalid_arguments(self):
assert_raises(TypeError, np.eye(3).resize, 'hi')
assert_raises(ValueError, np.eye(3).resize, -1)
assert_raises(TypeError, np.eye(3).resize, order=1)
assert_raises(TypeError, np.eye(3).resize, refcheck='hi')
@_no_tracing
def test_freeform_shape(self):
x = np.eye(3)
if IS_PYPY:
x.resize(3, 2, 1, refcheck=False)
else:
x.resize(3, 2, 1)
assert_(x.shape == (3, 2, 1))
@_no_tracing
def test_zeros_appended(self):
x = np.eye(3)
if IS_PYPY:
x.resize(2, 3, 3, refcheck=False)
else:
x.resize(2, 3, 3)
assert_array_equal(x[0], np.eye(3))
assert_array_equal(x[1], np.zeros((3, 3)))
@_no_tracing
def test_obj_obj(self):
# check memory is initialized on resize, gh-4857
a = np.ones(10, dtype=[('k', object, 2)])
if IS_PYPY:
a.resize(15, refcheck=False)
else:
a.resize(15,)
assert_equal(a.shape, (15,))
assert_array_equal(a['k'][-5:], 0)
assert_array_equal(a['k'][:-5], 1)
def test_empty_view(self):
# check that sizes containing a zero don't trigger a reallocate for
# already empty arrays
x = np.zeros((10, 0), int)
x_view = x[...]
x_view.resize((0, 10))
x_view.resize((0, 100))
def test_check_weakref(self):
x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
xref = weakref.ref(x)
assert_raises(ValueError, x.resize, (5, 1))
del xref # avoid pyflakes unused variable warning.
class TestRecord:
def test_field_rename(self):
dt = np.dtype([('f', float), ('i', int)])
dt.names = ['p', 'q']
assert_equal(dt.names, ['p', 'q'])
def test_multiple_field_name_occurrence(self):
def test_dtype_init():
np.dtype([("A", "f8"), ("B", "f8"), ("A", "f8")])
# Error raised when multiple fields have the same name
assert_raises(ValueError, test_dtype_init)
def test_bytes_fields(self):
# Bytes are not allowed in field names and not recognized in titles
# on Py3
assert_raises(TypeError, np.dtype, [(b'a', int)])
assert_raises(TypeError, np.dtype, [(('b', b'a'), int)])
dt = np.dtype([((b'a', 'b'), int)])
assert_raises(TypeError, dt.__getitem__, b'a')
x = np.array([(1,), (2,), (3,)], dtype=dt)
assert_raises(IndexError, x.__getitem__, b'a')
y = x[0]
assert_raises(IndexError, y.__getitem__, b'a')
def test_multiple_field_name_unicode(self):
def test_dtype_unicode():
np.dtype([("\u20B9", "f8"), ("B", "f8"), ("\u20B9", "f8")])
# Error raised when multiple fields have the same name(unicode included)
assert_raises(ValueError, test_dtype_unicode)
def test_fromarrays_unicode(self):
# A single name string provided to fromarrays() is allowed to be unicode
# on both Python 2 and 3:
x = np.core.records.fromarrays([[0], [1]], names=u'a,b', formats=u'i4,i4')
assert_equal(x['a'][0], 0)
assert_equal(x['b'][0], 1)
def test_unicode_order(self):
# Test that we can sort with order as a unicode field name in both Python 2 and
# 3:
name = u'b'
x = np.array([1, 3, 2], dtype=[(name, int)])
x.sort(order=name)
assert_equal(x[u'b'], np.array([1, 2, 3]))
def test_field_names(self):
# Test unicode and 8-bit / byte strings can be used
a = np.zeros((1,), dtype=[('f1', 'i4'),
('f2', 'i4'),
('f3', [('sf1', 'i4')])])
# byte string indexing fails gracefully
assert_raises(IndexError, a.__setitem__, b'f1', 1)
assert_raises(IndexError, a.__getitem__, b'f1')
assert_raises(IndexError, a['f1'].__setitem__, b'sf1', 1)
assert_raises(IndexError, a['f1'].__getitem__, b'sf1')
b = a.copy()
fn1 = str('f1')
b[fn1] = 1
assert_equal(b[fn1], 1)
fnn = str('not at all')
assert_raises(ValueError, b.__setitem__, fnn, 1)
assert_raises(ValueError, b.__getitem__, fnn)
b[0][fn1] = 2
assert_equal(b[fn1], 2)
# Subfield
assert_raises(ValueError, b[0].__setitem__, fnn, 1)
assert_raises(ValueError, b[0].__getitem__, fnn)
# Subfield
fn3 = str('f3')
sfn1 = str('sf1')
b[fn3][sfn1] = 1
assert_equal(b[fn3][sfn1], 1)
assert_raises(ValueError, b[fn3].__setitem__, fnn, 1)
assert_raises(ValueError, b[fn3].__getitem__, fnn)
# multiple subfields
fn2 = str('f2')
b[fn2] = 3
assert_equal(b[['f1', 'f2']][0].tolist(), (2, 3))
assert_equal(b[['f2', 'f1']][0].tolist(), (3, 2))
assert_equal(b[['f1', 'f3']][0].tolist(), (2, (1,)))
# non-ascii unicode field indexing is well behaved
assert_raises(ValueError, a.__setitem__, u'\u03e0', 1)
assert_raises(ValueError, a.__getitem__, u'\u03e0')
def test_record_hash(self):
a = np.array([(1, 2), (1, 2)], dtype='i1,i2')
a.flags.writeable = False
b = np.array([(1, 2), (3, 4)], dtype=[('num1', 'i1'), ('num2', 'i2')])
b.flags.writeable = False
c = np.array([(1, 2), (3, 4)], dtype='i1,i2')
c.flags.writeable = False
assert_(hash(a[0]) == hash(a[1]))
assert_(hash(a[0]) == hash(b[0]))
assert_(hash(a[0]) != hash(b[1]))
assert_(hash(c[0]) == hash(a[0]) and c[0] == a[0])
def test_record_no_hash(self):
a = np.array([(1, 2), (1, 2)], dtype='i1,i2')
assert_raises(TypeError, hash, a[0])
def test_empty_structure_creation(self):
# make sure these do not raise errors (gh-5631)
np.array([()], dtype={'names': [], 'formats': [],
'offsets': [], 'itemsize': 12})
np.array([(), (), (), (), ()], dtype={'names': [], 'formats': [],
'offsets': [], 'itemsize': 12})
def test_multifield_indexing_view(self):
a = np.ones(3, dtype=[('a', 'i4'), ('b', 'f4'), ('c', 'u4')])
v = a[['a', 'c']]
assert_(v.base is a)
assert_(v.dtype == np.dtype({'names': ['a', 'c'],
'formats': ['i4', 'u4'],
'offsets': [0, 8]}))
v[:] = (4,5)
assert_equal(a[0].item(), (4, 1, 5))
class TestView:
def test_basic(self):
x = np.array([(1, 2, 3, 4), (5, 6, 7, 8)],
dtype=[('r', np.int8), ('g', np.int8),
('b', np.int8), ('a', np.int8)])
# We must be specific about the endianness here:
y = x.view(dtype='<i4')
# ... and again without the keyword.
z = x.view('<i4')
assert_array_equal(y, z)
assert_array_equal(y, [67305985, 134678021])
def _mean(a, **args):
return a.mean(**args)
def _var(a, **args):
return a.var(**args)
def _std(a, **args):
return a.std(**args)
class TestStats:
funcs = [_mean, _var, _std]
def setup_method(self):
np.random.seed(range(3))
self.rmat = np.random.random((4, 5))
self.cmat = self.rmat + 1j * self.rmat
self.omat = np.array([Decimal(repr(r)) for r in self.rmat.flat])
self.omat = self.omat.reshape(4, 5)
def test_python_type(self):
for x in (np.float16(1.), 1, 1., 1+0j):
assert_equal(np.mean([x]), 1.)
assert_equal(np.std([x]), 0.)
assert_equal(np.var([x]), 0.)
def test_keepdims(self):
mat = np.eye(3)
for f in self.funcs:
for axis in [0, 1]:
res = f(mat, axis=axis, keepdims=True)
assert_(res.ndim == mat.ndim)
assert_(res.shape[axis] == 1)
for axis in [None]:
res = f(mat, axis=axis, keepdims=True)
assert_(res.shape == (1, 1))
def test_out(self):
mat = np.eye(3)
for f in self.funcs:
out = np.zeros(3)
tgt = f(mat, axis=1)
res = f(mat, axis=1, out=out)
assert_almost_equal(res, out)
assert_almost_equal(res, tgt)
out = np.empty(2)
assert_raises(ValueError, f, mat, axis=1, out=out)
out = np.empty((2, 2))
assert_raises(ValueError, f, mat, axis=1, out=out)
def test_dtype_from_input(self):
icodes = np.typecodes['AllInteger']
fcodes = np.typecodes['AllFloat']
# object type
for f in self.funcs:
mat = np.array([[Decimal(1)]*3]*3)
tgt = mat.dtype.type
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = type(f(mat, axis=None))
assert_(res is Decimal)
# integer types
for f in self.funcs:
for c in icodes:
mat = np.eye(3, dtype=c)
tgt = np.float64
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None).dtype.type
assert_(res is tgt)
# mean for float types
for f in [_mean]:
for c in fcodes:
mat = np.eye(3, dtype=c)
tgt = mat.dtype.type
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None).dtype.type
assert_(res is tgt)
# var, std for float types
for f in [_var, _std]:
for c in fcodes:
mat = np.eye(3, dtype=c)
# deal with complex types
tgt = mat.real.dtype.type
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None).dtype.type
assert_(res is tgt)
def test_dtype_from_dtype(self):
mat = np.eye(3)
# stats for integer types
# FIXME:
# this needs definition as there are lots places along the line
# where type casting may take place.
# for f in self.funcs:
# for c in np.typecodes['AllInteger']:
# tgt = np.dtype(c).type
# res = f(mat, axis=1, dtype=c).dtype.type
# assert_(res is tgt)
# # scalar case
# res = f(mat, axis=None, dtype=c).dtype.type
# assert_(res is tgt)
# stats for float types
for f in self.funcs:
for c in np.typecodes['AllFloat']:
tgt = np.dtype(c).type
res = f(mat, axis=1, dtype=c).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None, dtype=c).dtype.type
assert_(res is tgt)
def test_ddof(self):
for f in [_var]:
for ddof in range(3):
dim = self.rmat.shape[1]
tgt = f(self.rmat, axis=1) * dim
res = f(self.rmat, axis=1, ddof=ddof) * (dim - ddof)
for f in [_std]:
for ddof in range(3):
dim = self.rmat.shape[1]
tgt = f(self.rmat, axis=1) * np.sqrt(dim)
res = f(self.rmat, axis=1, ddof=ddof) * np.sqrt(dim - ddof)
assert_almost_equal(res, tgt)
assert_almost_equal(res, tgt)
def test_ddof_too_big(self):
dim = self.rmat.shape[1]
for f in [_var, _std]:
for ddof in range(dim, dim + 2):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
res = f(self.rmat, axis=1, ddof=ddof)
assert_(not (res < 0).any())
assert_(len(w) > 0)
assert_(issubclass(w[0].category, RuntimeWarning))
def test_empty(self):
A = np.zeros((0, 3))
for f in self.funcs:
for axis in [0, None]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_(np.isnan(f(A, axis=axis)).all())
assert_(len(w) > 0)
assert_(issubclass(w[0].category, RuntimeWarning))
for axis in [1]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_equal(f(A, axis=axis), np.zeros([]))
def test_mean_values(self):
for mat in [self.rmat, self.cmat, self.omat]:
for axis in [0, 1]:
tgt = mat.sum(axis=axis)
res = _mean(mat, axis=axis) * mat.shape[axis]
assert_almost_equal(res, tgt)
for axis in [None]:
tgt = mat.sum(axis=axis)
res = _mean(mat, axis=axis) * np.prod(mat.shape)
assert_almost_equal(res, tgt)
def test_mean_float16(self):
# This fail if the sum inside mean is done in float16 instead
# of float32.
assert_(_mean(np.ones(100000, dtype='float16')) == 1)
def test_mean_axis_error(self):
# Ensure that AxisError is raised instead of IndexError when axis is
# out of bounds, see gh-15817.
with assert_raises(np.core._exceptions.AxisError):
np.arange(10).mean(axis=2)
def test_mean_where(self):
a = np.arange(16).reshape((4, 4))
wh_full = np.array([[False, True, False, True],
[True, False, True, False],
[True, True, False, False],
[False, False, True, True]])
wh_partial = np.array([[False],
[True],
[True],
[False]])
_cases = [(1, True, [1.5, 5.5, 9.5, 13.5]),
(0, wh_full, [6., 5., 10., 9.]),
(1, wh_full, [2., 5., 8.5, 14.5]),
(0, wh_partial, [6., 7., 8., 9.])]
for _ax, _wh, _res in _cases:
assert_allclose(a.mean(axis=_ax, where=_wh),
np.array(_res))
assert_allclose(np.mean(a, axis=_ax, where=_wh),
np.array(_res))
a3d = np.arange(16).reshape((2, 2, 4))
_wh_partial = np.array([False, True, True, False])
_res = [[1.5, 5.5], [9.5, 13.5]]
assert_allclose(a3d.mean(axis=2, where=_wh_partial),
np.array(_res))
assert_allclose(np.mean(a3d, axis=2, where=_wh_partial),
np.array(_res))
with pytest.warns(RuntimeWarning) as w:
assert_allclose(a.mean(axis=1, where=wh_partial),
np.array([np.nan, 5.5, 9.5, np.nan]))
with pytest.warns(RuntimeWarning) as w:
assert_equal(a.mean(where=False), np.nan)
with pytest.warns(RuntimeWarning) as w:
assert_equal(np.mean(a, where=False), np.nan)
def test_var_values(self):
for mat in [self.rmat, self.cmat, self.omat]:
for axis in [0, 1, None]:
msqr = _mean(mat * mat.conj(), axis=axis)
mean = _mean(mat, axis=axis)
tgt = msqr - mean * mean.conjugate()
res = _var(mat, axis=axis)
assert_almost_equal(res, tgt)
@pytest.mark.parametrize(('complex_dtype', 'ndec'), (
('complex64', 6),
('complex128', 7),
('clongdouble', 7),
))
def test_var_complex_values(self, complex_dtype, ndec):
# Test fast-paths for every builtin complex type
for axis in [0, 1, None]:
mat = self.cmat.copy().astype(complex_dtype)
msqr = _mean(mat * mat.conj(), axis=axis)
mean = _mean(mat, axis=axis)
tgt = msqr - mean * mean.conjugate()
res = _var(mat, axis=axis)
assert_almost_equal(res, tgt, decimal=ndec)
def test_var_dimensions(self):
# _var paths for complex number introduce additions on views that
# increase dimensions. Ensure this generalizes to higher dims
mat = np.stack([self.cmat]*3)
for axis in [0, 1, 2, -1, None]:
msqr = _mean(mat * mat.conj(), axis=axis)
mean = _mean(mat, axis=axis)
tgt = msqr - mean * mean.conjugate()
res = _var(mat, axis=axis)
assert_almost_equal(res, tgt)
def test_var_complex_byteorder(self):
# Test that var fast-path does not cause failures for complex arrays
# with non-native byteorder
cmat = self.cmat.copy().astype('complex128')
cmat_swapped = cmat.astype(cmat.dtype.newbyteorder())
assert_almost_equal(cmat.var(), cmat_swapped.var())
def test_var_axis_error(self):
# Ensure that AxisError is raised instead of IndexError when axis is
# out of bounds, see gh-15817.
with assert_raises(np.core._exceptions.AxisError):
np.arange(10).var(axis=2)
def test_var_where(self):
a = np.arange(25).reshape((5, 5))
wh_full = np.array([[False, True, False, True, True],
[True, False, True, True, False],
[True, True, False, False, True],
[False, True, True, False, True],
[True, False, True, True, False]])
wh_partial = np.array([[False],
[True],
[True],
[False],
[True]])
_cases = [(0, True, [50., 50., 50., 50., 50.]),
(1, True, [2., 2., 2., 2., 2.])]
for _ax, _wh, _res in _cases:
assert_allclose(a.var(axis=_ax, where=_wh),
np.array(_res))
assert_allclose(np.var(a, axis=_ax, where=_wh),
np.array(_res))
a3d = np.arange(16).reshape((2, 2, 4))
_wh_partial = np.array([False, True, True, False])
_res = [[0.25, 0.25], [0.25, 0.25]]
assert_allclose(a3d.var(axis=2, where=_wh_partial),
np.array(_res))
assert_allclose(np.var(a3d, axis=2, where=_wh_partial),
np.array(_res))
assert_allclose(np.var(a, axis=1, where=wh_full),
np.var(a[wh_full].reshape((5, 3)), axis=1))
assert_allclose(np.var(a, axis=0, where=wh_partial),
np.var(a[wh_partial[:,0]], axis=0))
with pytest.warns(RuntimeWarning) as w:
assert_equal(a.var(where=False), np.nan)
with pytest.warns(RuntimeWarning) as w:
assert_equal(np.var(a, where=False), np.nan)
def test_std_values(self):
for mat in [self.rmat, self.cmat, self.omat]:
for axis in [0, 1, None]:
tgt = np.sqrt(_var(mat, axis=axis))
res = _std(mat, axis=axis)
assert_almost_equal(res, tgt)
def test_std_where(self):
a = np.arange(25).reshape((5,5))[::-1]
whf = np.array([[False, True, False, True, True],
[True, False, True, False, True],
[True, True, False, True, False],
[True, False, True, True, False],
[False, True, False, True, True]])
whp = np.array([[False],
[False],
[True],
[True],
[False]])
_cases = [
(0, True, 7.07106781*np.ones((5))),
(1, True, 1.41421356*np.ones((5))),
(0, whf,
np.array([4.0824829 , 8.16496581, 5., 7.39509973, 8.49836586])),
(0, whp, 2.5*np.ones((5)))
]
for _ax, _wh, _res in _cases:
assert_allclose(a.std(axis=_ax, where=_wh), _res)
assert_allclose(np.std(a, axis=_ax, where=_wh), _res)
a3d = np.arange(16).reshape((2, 2, 4))
_wh_partial = np.array([False, True, True, False])
_res = [[0.5, 0.5], [0.5, 0.5]]
assert_allclose(a3d.std(axis=2, where=_wh_partial),
np.array(_res))
assert_allclose(np.std(a3d, axis=2, where=_wh_partial),
np.array(_res))
assert_allclose(a.std(axis=1, where=whf),
np.std(a[whf].reshape((5,3)), axis=1))
assert_allclose(np.std(a, axis=1, where=whf),
(a[whf].reshape((5,3))).std(axis=1))
assert_allclose(a.std(axis=0, where=whp),
np.std(a[whp[:,0]], axis=0))
assert_allclose(np.std(a, axis=0, where=whp),
(a[whp[:,0]]).std(axis=0))
with pytest.warns(RuntimeWarning) as w:
assert_equal(a.std(where=False), np.nan)
with pytest.warns(RuntimeWarning) as w:
assert_equal(np.std(a, where=False), np.nan)
def test_subclass(self):
class TestArray(np.ndarray):
def __new__(cls, data, info):
result = np.array(data)
result = result.view(cls)
result.info = info
return result
def __array_finalize__(self, obj):
self.info = getattr(obj, "info", '')
dat = TestArray([[1, 2, 3, 4], [5, 6, 7, 8]], 'jubba')
res = dat.mean(1)
assert_(res.info == dat.info)
res = dat.std(1)
assert_(res.info == dat.info)
res = dat.var(1)
assert_(res.info == dat.info)
class TestVdot:
def test_basic(self):
dt_numeric = np.typecodes['AllFloat'] + np.typecodes['AllInteger']
dt_complex = np.typecodes['Complex']
# test real
a = np.eye(3)
for dt in dt_numeric + 'O':
b = a.astype(dt)
res = np.vdot(b, b)
assert_(np.isscalar(res))
assert_equal(np.vdot(b, b), 3)
# test complex
a = np.eye(3) * 1j
for dt in dt_complex + 'O':
b = a.astype(dt)
res = np.vdot(b, b)
assert_(np.isscalar(res))
assert_equal(np.vdot(b, b), 3)
# test boolean
b = np.eye(3, dtype=bool)
res = np.vdot(b, b)
assert_(np.isscalar(res))
assert_equal(np.vdot(b, b), True)
def test_vdot_array_order(self):
a = np.array([[1, 2], [3, 4]], order='C')
b = np.array([[1, 2], [3, 4]], order='F')
res = np.vdot(a, a)
# integer arrays are exact
assert_equal(np.vdot(a, b), res)
assert_equal(np.vdot(b, a), res)
assert_equal(np.vdot(b, b), res)
def test_vdot_uncontiguous(self):
for size in [2, 1000]:
# Different sizes match different branches in vdot.
a = np.zeros((size, 2, 2))
b = np.zeros((size, 2, 2))
a[:, 0, 0] = np.arange(size)
b[:, 0, 0] = np.arange(size) + 1
# Make a and b uncontiguous:
a = a[..., 0]
b = b[..., 0]
assert_equal(np.vdot(a, b),
np.vdot(a.flatten(), b.flatten()))
assert_equal(np.vdot(a, b.copy()),
np.vdot(a.flatten(), b.flatten()))
assert_equal(np.vdot(a.copy(), b),
np.vdot(a.flatten(), b.flatten()))
assert_equal(np.vdot(a.copy('F'), b),
np.vdot(a.flatten(), b.flatten()))
assert_equal(np.vdot(a, b.copy('F')),
np.vdot(a.flatten(), b.flatten()))
class TestDot:
def setup_method(self):
np.random.seed(128)
self.A = np.random.rand(4, 2)
self.b1 = np.random.rand(2, 1)
self.b2 = np.random.rand(2)
self.b3 = np.random.rand(1, 2)
self.b4 = np.random.rand(4)
self.N = 7
def test_dotmatmat(self):
A = self.A
res = np.dot(A.transpose(), A)
tgt = np.array([[1.45046013, 0.86323640],
[0.86323640, 0.84934569]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotmatvec(self):
A, b1 = self.A, self.b1
res = np.dot(A, b1)
tgt = np.array([[0.32114320], [0.04889721],
[0.15696029], [0.33612621]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotmatvec2(self):
A, b2 = self.A, self.b2
res = np.dot(A, b2)
tgt = np.array([0.29677940, 0.04518649, 0.14468333, 0.31039293])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecmat(self):
A, b4 = self.A, self.b4
res = np.dot(b4, A)
tgt = np.array([1.23495091, 1.12222648])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecmat2(self):
b3, A = self.b3, self.A
res = np.dot(b3, A.transpose())
tgt = np.array([[0.58793804, 0.08957460, 0.30605758, 0.62716383]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecmat3(self):
A, b4 = self.A, self.b4
res = np.dot(A.transpose(), b4)
tgt = np.array([1.23495091, 1.12222648])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecvecouter(self):
b1, b3 = self.b1, self.b3
res = np.dot(b1, b3)
tgt = np.array([[0.20128610, 0.08400440], [0.07190947, 0.03001058]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecvecinner(self):
b1, b3 = self.b1, self.b3
res = np.dot(b3, b1)
tgt = np.array([[ 0.23129668]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotcolumnvect1(self):
b1 = np.ones((3, 1))
b2 = [5.3]
res = np.dot(b1, b2)
tgt = np.array([5.3, 5.3, 5.3])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotcolumnvect2(self):
b1 = np.ones((3, 1)).transpose()
b2 = [6.2]
res = np.dot(b2, b1)
tgt = np.array([6.2, 6.2, 6.2])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecscalar(self):
np.random.seed(100)
b1 = np.random.rand(1, 1)
b2 = np.random.rand(1, 4)
res = np.dot(b1, b2)
tgt = np.array([[0.15126730, 0.23068496, 0.45905553, 0.00256425]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecscalar2(self):
np.random.seed(100)
b1 = np.random.rand(4, 1)
b2 = np.random.rand(1, 1)
res = np.dot(b1, b2)
tgt = np.array([[0.00256425],[0.00131359],[0.00200324],[ 0.00398638]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_all(self):
dims = [(), (1,), (1, 1)]
dout = [(), (1,), (1, 1), (1,), (), (1,), (1, 1), (1,), (1, 1)]
for dim, (dim1, dim2) in zip(dout, itertools.product(dims, dims)):
b1 = np.zeros(dim1)
b2 = np.zeros(dim2)
res = np.dot(b1, b2)
tgt = np.zeros(dim)
assert_(res.shape == tgt.shape)
assert_almost_equal(res, tgt, decimal=self.N)
def test_vecobject(self):
class Vec:
def __init__(self, sequence=None):
if sequence is None:
sequence = []
self.array = np.array(sequence)
def __add__(self, other):
out = Vec()
out.array = self.array + other.array
return out
def __sub__(self, other):
out = Vec()
out.array = self.array - other.array
return out
def __mul__(self, other): # with scalar
out = Vec(self.array.copy())
out.array *= other
return out
def __rmul__(self, other):
return self*other
U_non_cont = np.transpose([[1., 1.], [1., 2.]])
U_cont = np.ascontiguousarray(U_non_cont)
x = np.array([Vec([1., 0.]), Vec([0., 1.])])
zeros = np.array([Vec([0., 0.]), Vec([0., 0.])])
zeros_test = np.dot(U_cont, x) - np.dot(U_non_cont, x)
assert_equal(zeros[0].array, zeros_test[0].array)
assert_equal(zeros[1].array, zeros_test[1].array)
def test_dot_2args(self):
from numpy.core.multiarray import dot
a = np.array([[1, 2], [3, 4]], dtype=float)
b = np.array([[1, 0], [1, 1]], dtype=float)
c = np.array([[3, 2], [7, 4]], dtype=float)
d = dot(a, b)
assert_allclose(c, d)
def test_dot_3args(self):
from numpy.core.multiarray import dot
np.random.seed(22)
f = np.random.random_sample((1024, 16))
v = np.random.random_sample((16, 32))
r = np.empty((1024, 32))
for i in range(12):
dot(f, v, r)
if HAS_REFCOUNT:
assert_equal(sys.getrefcount(r), 2)
r2 = dot(f, v, out=None)
assert_array_equal(r2, r)
assert_(r is dot(f, v, out=r))
v = v[:, 0].copy() # v.shape == (16,)
r = r[:, 0].copy() # r.shape == (1024,)
r2 = dot(f, v)
assert_(r is dot(f, v, r))
assert_array_equal(r2, r)
def test_dot_3args_errors(self):
from numpy.core.multiarray import dot
np.random.seed(22)
f = np.random.random_sample((1024, 16))
v = np.random.random_sample((16, 32))
r = np.empty((1024, 31))
assert_raises(ValueError, dot, f, v, r)
r = np.empty((1024,))
assert_raises(ValueError, dot, f, v, r)
r = np.empty((32,))
assert_raises(ValueError, dot, f, v, r)
r = np.empty((32, 1024))
assert_raises(ValueError, dot, f, v, r)
assert_raises(ValueError, dot, f, v, r.T)
r = np.empty((1024, 64))
assert_raises(ValueError, dot, f, v, r[:, ::2])
assert_raises(ValueError, dot, f, v, r[:, :32])
r = np.empty((1024, 32), dtype=np.float32)
assert_raises(ValueError, dot, f, v, r)
r = np.empty((1024, 32), dtype=int)
assert_raises(ValueError, dot, f, v, r)
def test_dot_array_order(self):
a = np.array([[1, 2], [3, 4]], order='C')
b = np.array([[1, 2], [3, 4]], order='F')
res = np.dot(a, a)
# integer arrays are exact
assert_equal(np.dot(a, b), res)
assert_equal(np.dot(b, a), res)
assert_equal(np.dot(b, b), res)
def test_accelerate_framework_sgemv_fix(self):
def aligned_array(shape, align, dtype, order='C'):
d = dtype(0)
N = np.prod(shape)
tmp = np.zeros(N * d.nbytes + align, dtype=np.uint8)
address = tmp.__array_interface__["data"][0]
for offset in range(align):
if (address + offset) % align == 0:
break
tmp = tmp[offset:offset+N*d.nbytes].view(dtype=dtype)
return tmp.reshape(shape, order=order)
def as_aligned(arr, align, dtype, order='C'):
aligned = aligned_array(arr.shape, align, dtype, order)
aligned[:] = arr[:]
return aligned
def assert_dot_close(A, X, desired):
assert_allclose(np.dot(A, X), desired, rtol=1e-5, atol=1e-7)
m = aligned_array(100, 15, np.float32)
s = aligned_array((100, 100), 15, np.float32)
np.dot(s, m) # this will always segfault if the bug is present
testdata = itertools.product((15, 32), (10000,), (200, 89), ('C', 'F'))
for align, m, n, a_order in testdata:
# Calculation in double precision
A_d = np.random.rand(m, n)
X_d = np.random.rand(n)
desired = np.dot(A_d, X_d)
# Calculation with aligned single precision
A_f = as_aligned(A_d, align, np.float32, order=a_order)
X_f = as_aligned(X_d, align, np.float32)
assert_dot_close(A_f, X_f, desired)
# Strided A rows
A_d_2 = A_d[::2]
desired = np.dot(A_d_2, X_d)
A_f_2 = A_f[::2]
assert_dot_close(A_f_2, X_f, desired)
# Strided A columns, strided X vector
A_d_22 = A_d_2[:, ::2]
X_d_2 = X_d[::2]
desired = np.dot(A_d_22, X_d_2)
A_f_22 = A_f_2[:, ::2]
X_f_2 = X_f[::2]
assert_dot_close(A_f_22, X_f_2, desired)
# Check the strides are as expected
if a_order == 'F':
assert_equal(A_f_22.strides, (8, 8 * m))
else:
assert_equal(A_f_22.strides, (8 * n, 8))
assert_equal(X_f_2.strides, (8,))
# Strides in A rows + cols only
X_f_2c = as_aligned(X_f_2, align, np.float32)
assert_dot_close(A_f_22, X_f_2c, desired)
# Strides just in A cols
A_d_12 = A_d[:, ::2]
desired = np.dot(A_d_12, X_d_2)
A_f_12 = A_f[:, ::2]
assert_dot_close(A_f_12, X_f_2c, desired)
# Strides in A cols and X
assert_dot_close(A_f_12, X_f_2, desired)
@pytest.mark.slow
@pytest.mark.parametrize("dtype", [np.float64, np.complex128])
@requires_memory(free_bytes=18e9) # complex case needs 18GiB+
def test_huge_vectordot(self, dtype):
# Large vector multiplications are chunked with 32bit BLAS
# Test that the chunking does the right thing, see also gh-22262
data = np.ones(2**30+100, dtype=dtype)
res = np.dot(data, data)
assert res == 2**30+100
class MatmulCommon:
"""Common tests for '@' operator and numpy.matmul.
"""
# Should work with these types. Will want to add
# "O" at some point
types = "?bhilqBHILQefdgFDGO"
def test_exceptions(self):
dims = [
((1,), (2,)), # mismatched vector vector
((2, 1,), (2,)), # mismatched matrix vector
((2,), (1, 2)), # mismatched vector matrix
((1, 2), (3, 1)), # mismatched matrix matrix
((1,), ()), # vector scalar
((), (1)), # scalar vector
((1, 1), ()), # matrix scalar
((), (1, 1)), # scalar matrix
((2, 2, 1), (3, 1, 2)), # cannot broadcast
]
for dt, (dm1, dm2) in itertools.product(self.types, dims):
a = np.ones(dm1, dtype=dt)
b = np.ones(dm2, dtype=dt)
assert_raises(ValueError, self.matmul, a, b)
def test_shapes(self):
dims = [
((1, 1), (2, 1, 1)), # broadcast first argument
((2, 1, 1), (1, 1)), # broadcast second argument
((2, 1, 1), (2, 1, 1)), # matrix stack sizes match
]
for dt, (dm1, dm2) in itertools.product(self.types, dims):
a = np.ones(dm1, dtype=dt)
b = np.ones(dm2, dtype=dt)
res = self.matmul(a, b)
assert_(res.shape == (2, 1, 1))
# vector vector returns scalars.
for dt in self.types:
a = np.ones((2,), dtype=dt)
b = np.ones((2,), dtype=dt)
c = self.matmul(a, b)
assert_(np.array(c).shape == ())
def test_result_types(self):
mat = np.ones((1,1))
vec = np.ones((1,))
for dt in self.types:
m = mat.astype(dt)
v = vec.astype(dt)
for arg in [(m, v), (v, m), (m, m)]:
res = self.matmul(*arg)
assert_(res.dtype == dt)
# vector vector returns scalars
if dt != "O":
res = self.matmul(v, v)
assert_(type(res) is np.dtype(dt).type)
def test_scalar_output(self):
vec1 = np.array([2])
vec2 = np.array([3, 4]).reshape(1, -1)
tgt = np.array([6, 8])
for dt in self.types[1:]:
v1 = vec1.astype(dt)
v2 = vec2.astype(dt)
res = self.matmul(v1, v2)
assert_equal(res, tgt)
res = self.matmul(v2.T, v1)
assert_equal(res, tgt)
# boolean type
vec = np.array([True, True], dtype='?').reshape(1, -1)
res = self.matmul(vec[:, 0], vec)
assert_equal(res, True)
def test_vector_vector_values(self):
vec1 = np.array([1, 2])
vec2 = np.array([3, 4]).reshape(-1, 1)
tgt1 = np.array([11])
tgt2 = np.array([[3, 6], [4, 8]])
for dt in self.types[1:]:
v1 = vec1.astype(dt)
v2 = vec2.astype(dt)
res = self.matmul(v1, v2)
assert_equal(res, tgt1)
# no broadcast, we must make v1 into a 2d ndarray
res = self.matmul(v2, v1.reshape(1, -1))
assert_equal(res, tgt2)
# boolean type
vec = np.array([True, True], dtype='?')
res = self.matmul(vec, vec)
assert_equal(res, True)
def test_vector_matrix_values(self):
vec = np.array([1, 2])
mat1 = np.array([[1, 2], [3, 4]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([7, 10])
tgt2 = np.stack([tgt1]*2, axis=0)
for dt in self.types[1:]:
v = vec.astype(dt)
m1 = mat1.astype(dt)
m2 = mat2.astype(dt)
res = self.matmul(v, m1)
assert_equal(res, tgt1)
res = self.matmul(v, m2)
assert_equal(res, tgt2)
# boolean type
vec = np.array([True, False])
mat1 = np.array([[True, False], [False, True]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([True, False])
tgt2 = np.stack([tgt1]*2, axis=0)
res = self.matmul(vec, mat1)
assert_equal(res, tgt1)
res = self.matmul(vec, mat2)
assert_equal(res, tgt2)
def test_matrix_vector_values(self):
vec = np.array([1, 2])
mat1 = np.array([[1, 2], [3, 4]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([5, 11])
tgt2 = np.stack([tgt1]*2, axis=0)
for dt in self.types[1:]:
v = vec.astype(dt)
m1 = mat1.astype(dt)
m2 = mat2.astype(dt)
res = self.matmul(m1, v)
assert_equal(res, tgt1)
res = self.matmul(m2, v)
assert_equal(res, tgt2)
# boolean type
vec = np.array([True, False])
mat1 = np.array([[True, False], [False, True]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([True, False])
tgt2 = np.stack([tgt1]*2, axis=0)
res = self.matmul(vec, mat1)
assert_equal(res, tgt1)
res = self.matmul(vec, mat2)
assert_equal(res, tgt2)
def test_matrix_matrix_values(self):
mat1 = np.array([[1, 2], [3, 4]])
mat2 = np.array([[1, 0], [1, 1]])
mat12 = np.stack([mat1, mat2], axis=0)
mat21 = np.stack([mat2, mat1], axis=0)
tgt11 = np.array([[7, 10], [15, 22]])
tgt12 = np.array([[3, 2], [7, 4]])
tgt21 = np.array([[1, 2], [4, 6]])
tgt12_21 = np.stack([tgt12, tgt21], axis=0)
tgt11_12 = np.stack((tgt11, tgt12), axis=0)
tgt11_21 = np.stack((tgt11, tgt21), axis=0)
for dt in self.types[1:]:
m1 = mat1.astype(dt)
m2 = mat2.astype(dt)
m12 = mat12.astype(dt)
m21 = mat21.astype(dt)
# matrix @ matrix
res = self.matmul(m1, m2)
assert_equal(res, tgt12)
res = self.matmul(m2, m1)
assert_equal(res, tgt21)
# stacked @ matrix
res = self.matmul(m12, m1)
assert_equal(res, tgt11_21)
# matrix @ stacked
res = self.matmul(m1, m12)
assert_equal(res, tgt11_12)
# stacked @ stacked
res = self.matmul(m12, m21)
assert_equal(res, tgt12_21)
# boolean type
m1 = np.array([[1, 1], [0, 0]], dtype=np.bool_)
m2 = np.array([[1, 0], [1, 1]], dtype=np.bool_)
m12 = np.stack([m1, m2], axis=0)
m21 = np.stack([m2, m1], axis=0)
tgt11 = m1
tgt12 = m1
tgt21 = np.array([[1, 1], [1, 1]], dtype=np.bool_)
tgt12_21 = np.stack([tgt12, tgt21], axis=0)
tgt11_12 = np.stack((tgt11, tgt12), axis=0)
tgt11_21 = np.stack((tgt11, tgt21), axis=0)
# matrix @ matrix
res = self.matmul(m1, m2)
assert_equal(res, tgt12)
res = self.matmul(m2, m1)
assert_equal(res, tgt21)
# stacked @ matrix
res = self.matmul(m12, m1)
assert_equal(res, tgt11_21)
# matrix @ stacked
res = self.matmul(m1, m12)
assert_equal(res, tgt11_12)
# stacked @ stacked
res = self.matmul(m12, m21)
assert_equal(res, tgt12_21)
class TestMatmul(MatmulCommon):
matmul = np.matmul
def test_out_arg(self):
a = np.ones((5, 2), dtype=float)
b = np.array([[1, 3], [5, 7]], dtype=float)
tgt = np.dot(a, b)
# test as positional argument
msg = "out positional argument"
out = np.zeros((5, 2), dtype=float)
self.matmul(a, b, out)
assert_array_equal(out, tgt, err_msg=msg)
# test as keyword argument
msg = "out keyword argument"
out = np.zeros((5, 2), dtype=float)
self.matmul(a, b, out=out)
assert_array_equal(out, tgt, err_msg=msg)
# test out with not allowed type cast (safe casting)
msg = "Cannot cast ufunc .* output"
out = np.zeros((5, 2), dtype=np.int32)
assert_raises_regex(TypeError, msg, self.matmul, a, b, out=out)
# test out with type upcast to complex
out = np.zeros((5, 2), dtype=np.complex128)
c = self.matmul(a, b, out=out)
assert_(c is out)
with suppress_warnings() as sup:
sup.filter(np.ComplexWarning, '')
c = c.astype(tgt.dtype)
assert_array_equal(c, tgt)
def test_empty_out(self):
# Check that the output cannot be broadcast, so that it cannot be
# size zero when the outer dimensions (iterator size) has size zero.
arr = np.ones((0, 1, 1))
out = np.ones((1, 1, 1))
assert self.matmul(arr, arr).shape == (0, 1, 1)
with pytest.raises(ValueError, match=r"non-broadcastable"):
self.matmul(arr, arr, out=out)
def test_out_contiguous(self):
a = np.ones((5, 2), dtype=float)
b = np.array([[1, 3], [5, 7]], dtype=float)
v = np.array([1, 3], dtype=float)
tgt = np.dot(a, b)
tgt_mv = np.dot(a, v)
# test out non-contiguous
out = np.ones((5, 2, 2), dtype=float)
c = self.matmul(a, b, out=out[..., 0])
assert c.base is out
assert_array_equal(c, tgt)
c = self.matmul(a, v, out=out[:, 0, 0])
assert_array_equal(c, tgt_mv)
c = self.matmul(v, a.T, out=out[:, 0, 0])
assert_array_equal(c, tgt_mv)
# test out contiguous in only last dim
out = np.ones((10, 2), dtype=float)
c = self.matmul(a, b, out=out[::2, :])
assert_array_equal(c, tgt)
# test transposes of out, args
out = np.ones((5, 2), dtype=float)
c = self.matmul(b.T, a.T, out=out.T)
assert_array_equal(out, tgt)
m1 = np.arange(15.).reshape(5, 3)
m2 = np.arange(21.).reshape(3, 7)
m3 = np.arange(30.).reshape(5, 6)[:, ::2] # non-contiguous
vc = np.arange(10.)
vr = np.arange(6.)
m0 = np.zeros((3, 0))
@pytest.mark.parametrize('args', (
# matrix-matrix
(m1, m2), (m2.T, m1.T), (m2.T.copy(), m1.T), (m2.T, m1.T.copy()),
# matrix-matrix-transpose, contiguous and non
(m1, m1.T), (m1.T, m1), (m1, m3.T), (m3, m1.T),
(m3, m3.T), (m3.T, m3),
# matrix-matrix non-contiguous
(m3, m2), (m2.T, m3.T), (m2.T.copy(), m3.T),
# vector-matrix, matrix-vector, contiguous
(m1, vr[:3]), (vc[:5], m1), (m1.T, vc[:5]), (vr[:3], m1.T),
# vector-matrix, matrix-vector, vector non-contiguous
(m1, vr[::2]), (vc[::2], m1), (m1.T, vc[::2]), (vr[::2], m1.T),
# vector-matrix, matrix-vector, matrix non-contiguous
(m3, vr[:3]), (vc[:5], m3), (m3.T, vc[:5]), (vr[:3], m3.T),
# vector-matrix, matrix-vector, both non-contiguous
(m3, vr[::2]), (vc[::2], m3), (m3.T, vc[::2]), (vr[::2], m3.T),
# size == 0
(m0, m0.T), (m0.T, m0), (m1, m0), (m0.T, m1.T),
))
def test_dot_equivalent(self, args):
r1 = np.matmul(*args)
r2 = np.dot(*args)
assert_equal(r1, r2)
r3 = np.matmul(args[0].copy(), args[1].copy())
assert_equal(r1, r3)
def test_matmul_object(self):
import fractions
f = np.vectorize(fractions.Fraction)
def random_ints():
return np.random.randint(1, 1000, size=(10, 3, 3))
M1 = f(random_ints(), random_ints())
M2 = f(random_ints(), random_ints())
M3 = self.matmul(M1, M2)
[N1, N2, N3] = [a.astype(float) for a in [M1, M2, M3]]
assert_allclose(N3, self.matmul(N1, N2))
def test_matmul_object_type_scalar(self):
from fractions import Fraction as F
v = np.array([F(2,3), F(5,7)])
res = self.matmul(v, v)
assert_(type(res) is F)
def test_matmul_empty(self):
a = np.empty((3, 0), dtype=object)
b = np.empty((0, 3), dtype=object)
c = np.zeros((3, 3))
assert_array_equal(np.matmul(a, b), c)
def test_matmul_exception_multiply(self):
# test that matmul fails if `__mul__` is missing
class add_not_multiply():
def __add__(self, other):
return self
a = np.full((3,3), add_not_multiply())
with assert_raises(TypeError):
b = np.matmul(a, a)
def test_matmul_exception_add(self):
# test that matmul fails if `__add__` is missing
class multiply_not_add():
def __mul__(self, other):
return self
a = np.full((3,3), multiply_not_add())
with assert_raises(TypeError):
b = np.matmul(a, a)
def test_matmul_bool(self):
# gh-14439
a = np.array([[1, 0],[1, 1]], dtype=bool)
assert np.max(a.view(np.uint8)) == 1
b = np.matmul(a, a)
# matmul with boolean output should always be 0, 1
assert np.max(b.view(np.uint8)) == 1
rg = np.random.default_rng(np.random.PCG64(43))
d = rg.integers(2, size=4*5, dtype=np.int8)
d = d.reshape(4, 5) > 0
out1 = np.matmul(d, d.reshape(5, 4))
out2 = np.dot(d, d.reshape(5, 4))
assert_equal(out1, out2)
c = np.matmul(np.zeros((2, 0), dtype=bool), np.zeros(0, dtype=bool))
assert not np.any(c)
class TestMatmulOperator(MatmulCommon):
import operator
matmul = operator.matmul
def test_array_priority_override(self):
class A:
__array_priority__ = 1000
def __matmul__(self, other):
return "A"
def __rmatmul__(self, other):
return "A"
a = A()
b = np.ones(2)
assert_equal(self.matmul(a, b), "A")
assert_equal(self.matmul(b, a), "A")
def test_matmul_raises(self):
assert_raises(TypeError, self.matmul, np.int8(5), np.int8(5))
assert_raises(TypeError, self.matmul, np.void(b'abc'), np.void(b'abc'))
assert_raises(TypeError, self.matmul, np.arange(10), np.void(b'abc'))
def test_matmul_inplace():
# It would be nice to support in-place matmul eventually, but for now
# we don't have a working implementation, so better just to error out
# and nudge people to writing "a = a @ b".
a = np.eye(3)
b = np.eye(3)
assert_raises(TypeError, a.__imatmul__, b)
import operator
assert_raises(TypeError, operator.imatmul, a, b)
assert_raises(TypeError, exec, "a @= b", globals(), locals())
def test_matmul_axes():
a = np.arange(3*4*5).reshape(3, 4, 5)
c = np.matmul(a, a, axes=[(-2, -1), (-1, -2), (1, 2)])
assert c.shape == (3, 4, 4)
d = np.matmul(a, a, axes=[(-2, -1), (-1, -2), (0, 1)])
assert d.shape == (4, 4, 3)
e = np.swapaxes(d, 0, 2)
assert_array_equal(e, c)
f = np.matmul(a, np.arange(3), axes=[(1, 0), (0), (0)])
assert f.shape == (4, 5)
class TestInner:
def test_inner_type_mismatch(self):
c = 1.
A = np.array((1,1), dtype='i,i')
assert_raises(TypeError, np.inner, c, A)
assert_raises(TypeError, np.inner, A, c)
def test_inner_scalar_and_vector(self):
for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?':
sca = np.array(3, dtype=dt)[()]
vec = np.array([1, 2], dtype=dt)
desired = np.array([3, 6], dtype=dt)
assert_equal(np.inner(vec, sca), desired)
assert_equal(np.inner(sca, vec), desired)
def test_vecself(self):
# Ticket 844.
# Inner product of a vector with itself segfaults or give
# meaningless result
a = np.zeros(shape=(1, 80), dtype=np.float64)
p = np.inner(a, a)
assert_almost_equal(p, 0, decimal=14)
def test_inner_product_with_various_contiguities(self):
# github issue 6532
for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?':
# check an inner product involving a matrix transpose
A = np.array([[1, 2], [3, 4]], dtype=dt)
B = np.array([[1, 3], [2, 4]], dtype=dt)
C = np.array([1, 1], dtype=dt)
desired = np.array([4, 6], dtype=dt)
assert_equal(np.inner(A.T, C), desired)
assert_equal(np.inner(C, A.T), desired)
assert_equal(np.inner(B, C), desired)
assert_equal(np.inner(C, B), desired)
# check a matrix product
desired = np.array([[7, 10], [15, 22]], dtype=dt)
assert_equal(np.inner(A, B), desired)
# check the syrk vs. gemm paths
desired = np.array([[5, 11], [11, 25]], dtype=dt)
assert_equal(np.inner(A, A), desired)
assert_equal(np.inner(A, A.copy()), desired)
# check an inner product involving an aliased and reversed view
a = np.arange(5).astype(dt)
b = a[::-1]
desired = np.array(10, dtype=dt).item()
assert_equal(np.inner(b, a), desired)
def test_3d_tensor(self):
for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?':
a = np.arange(24).reshape(2,3,4).astype(dt)
b = np.arange(24, 48).reshape(2,3,4).astype(dt)
desired = np.array(
[[[[ 158, 182, 206],
[ 230, 254, 278]],
[[ 566, 654, 742],
[ 830, 918, 1006]],
[[ 974, 1126, 1278],
[1430, 1582, 1734]]],
[[[1382, 1598, 1814],
[2030, 2246, 2462]],
[[1790, 2070, 2350],
[2630, 2910, 3190]],
[[2198, 2542, 2886],
[3230, 3574, 3918]]]],
dtype=dt
)
assert_equal(np.inner(a, b), desired)
assert_equal(np.inner(b, a).transpose(2,3,0,1), desired)
class TestChoose:
def setup_method(self):
self.x = 2*np.ones((3,), dtype=int)
self.y = 3*np.ones((3,), dtype=int)
self.x2 = 2*np.ones((2, 3), dtype=int)
self.y2 = 3*np.ones((2, 3), dtype=int)
self.ind = [0, 0, 1]
def test_basic(self):
A = np.choose(self.ind, (self.x, self.y))
assert_equal(A, [2, 2, 3])
def test_broadcast1(self):
A = np.choose(self.ind, (self.x2, self.y2))
assert_equal(A, [[2, 2, 3], [2, 2, 3]])
def test_broadcast2(self):
A = np.choose(self.ind, (self.x, self.y2))
assert_equal(A, [[2, 2, 3], [2, 2, 3]])
@pytest.mark.parametrize("ops",
[(1000, np.array([1], dtype=np.uint8)),
(-1, np.array([1], dtype=np.uint8)),
(1., np.float32(3)),
(1., np.array([3], dtype=np.float32))],)
def test_output_dtype(self, ops):
expected_dt = np.result_type(*ops)
assert(np.choose([0], ops).dtype == expected_dt)
class TestRepeat:
def setup_method(self):
self.m = np.array([1, 2, 3, 4, 5, 6])
self.m_rect = self.m.reshape((2, 3))
def test_basic(self):
A = np.repeat(self.m, [1, 3, 2, 1, 1, 2])
assert_equal(A, [1, 2, 2, 2, 3,
3, 4, 5, 6, 6])
def test_broadcast1(self):
A = np.repeat(self.m, 2)
assert_equal(A, [1, 1, 2, 2, 3, 3,
4, 4, 5, 5, 6, 6])
def test_axis_spec(self):
A = np.repeat(self.m_rect, [2, 1], axis=0)
assert_equal(A, [[1, 2, 3],
[1, 2, 3],
[4, 5, 6]])
A = np.repeat(self.m_rect, [1, 3, 2], axis=1)
assert_equal(A, [[1, 2, 2, 2, 3, 3],
[4, 5, 5, 5, 6, 6]])
def test_broadcast2(self):
A = np.repeat(self.m_rect, 2, axis=0)
assert_equal(A, [[1, 2, 3],
[1, 2, 3],
[4, 5, 6],
[4, 5, 6]])
A = np.repeat(self.m_rect, 2, axis=1)
assert_equal(A, [[1, 1, 2, 2, 3, 3],
[4, 4, 5, 5, 6, 6]])
# TODO: test for multidimensional
NEIGH_MODE = {'zero': 0, 'one': 1, 'constant': 2, 'circular': 3, 'mirror': 4}
@pytest.mark.parametrize('dt', [float, Decimal], ids=['float', 'object'])
class TestNeighborhoodIter:
# Simple, 2d tests
def test_simple2d(self, dt):
# Test zero and one padding for simple data type
x = np.array([[0, 1], [2, 3]], dtype=dt)
r = [np.array([[0, 0, 0], [0, 0, 1]], dtype=dt),
np.array([[0, 0, 0], [0, 1, 0]], dtype=dt),
np.array([[0, 0, 1], [0, 2, 3]], dtype=dt),
np.array([[0, 1, 0], [2, 3, 0]], dtype=dt)]
l = _multiarray_tests.test_neighborhood_iterator(
x, [-1, 0, -1, 1], x[0], NEIGH_MODE['zero'])
assert_array_equal(l, r)
r = [np.array([[1, 1, 1], [1, 0, 1]], dtype=dt),
np.array([[1, 1, 1], [0, 1, 1]], dtype=dt),
np.array([[1, 0, 1], [1, 2, 3]], dtype=dt),
np.array([[0, 1, 1], [2, 3, 1]], dtype=dt)]
l = _multiarray_tests.test_neighborhood_iterator(
x, [-1, 0, -1, 1], x[0], NEIGH_MODE['one'])
assert_array_equal(l, r)
r = [np.array([[4, 4, 4], [4, 0, 1]], dtype=dt),
np.array([[4, 4, 4], [0, 1, 4]], dtype=dt),
np.array([[4, 0, 1], [4, 2, 3]], dtype=dt),
np.array([[0, 1, 4], [2, 3, 4]], dtype=dt)]
l = _multiarray_tests.test_neighborhood_iterator(
x, [-1, 0, -1, 1], 4, NEIGH_MODE['constant'])
assert_array_equal(l, r)
# Test with start in the middle
r = [np.array([[4, 0, 1], [4, 2, 3]], dtype=dt),
np.array([[0, 1, 4], [2, 3, 4]], dtype=dt)]
l = _multiarray_tests.test_neighborhood_iterator(
x, [-1, 0, -1, 1], 4, NEIGH_MODE['constant'], 2)
assert_array_equal(l, r)
def test_mirror2d(self, dt):
x = np.array([[0, 1], [2, 3]], dtype=dt)
r = [np.array([[0, 0, 1], [0, 0, 1]], dtype=dt),
np.array([[0, 1, 1], [0, 1, 1]], dtype=dt),
np.array([[0, 0, 1], [2, 2, 3]], dtype=dt),
np.array([[0, 1, 1], [2, 3, 3]], dtype=dt)]
l = _multiarray_tests.test_neighborhood_iterator(
x, [-1, 0, -1, 1], x[0], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# Simple, 1d tests
def test_simple(self, dt):
# Test padding with constant values
x = np.linspace(1, 5, 5).astype(dt)
r = [[0, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 0]]
l = _multiarray_tests.test_neighborhood_iterator(
x, [-1, 1], x[0], NEIGH_MODE['zero'])
assert_array_equal(l, r)
r = [[1, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 1]]
l = _multiarray_tests.test_neighborhood_iterator(
x, [-1, 1], x[0], NEIGH_MODE['one'])
assert_array_equal(l, r)
r = [[x[4], 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, x[4]]]
l = _multiarray_tests.test_neighborhood_iterator(
x, [-1, 1], x[4], NEIGH_MODE['constant'])
assert_array_equal(l, r)
# Test mirror modes
def test_mirror(self, dt):
x = np.linspace(1, 5, 5).astype(dt)
r = np.array([[2, 1, 1, 2, 3], [1, 1, 2, 3, 4], [1, 2, 3, 4, 5],
[2, 3, 4, 5, 5], [3, 4, 5, 5, 4]], dtype=dt)
l = _multiarray_tests.test_neighborhood_iterator(
x, [-2, 2], x[1], NEIGH_MODE['mirror'])
assert_([i.dtype == dt for i in l])
assert_array_equal(l, r)
# Circular mode
def test_circular(self, dt):
x = np.linspace(1, 5, 5).astype(dt)
r = np.array([[4, 5, 1, 2, 3], [5, 1, 2, 3, 4], [1, 2, 3, 4, 5],
[2, 3, 4, 5, 1], [3, 4, 5, 1, 2]], dtype=dt)
l = _multiarray_tests.test_neighborhood_iterator(
x, [-2, 2], x[0], NEIGH_MODE['circular'])
assert_array_equal(l, r)
# Test stacking neighborhood iterators
class TestStackedNeighborhoodIter:
# Simple, 1d test: stacking 2 constant-padded neigh iterators
def test_simple_const(self):
dt = np.float64
# Test zero and one padding for simple data type
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0], dtype=dt),
np.array([0], dtype=dt),
np.array([1], dtype=dt),
np.array([2], dtype=dt),
np.array([3], dtype=dt),
np.array([0], dtype=dt),
np.array([0], dtype=dt)]
l = _multiarray_tests.test_neighborhood_iterator_oob(
x, [-2, 4], NEIGH_MODE['zero'], [0, 0], NEIGH_MODE['zero'])
assert_array_equal(l, r)
r = [np.array([1, 0, 1], dtype=dt),
np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt),
np.array([3, 0, 1], dtype=dt)]
l = _multiarray_tests.test_neighborhood_iterator_oob(
x, [-1, 3], NEIGH_MODE['zero'], [-1, 1], NEIGH_MODE['one'])
assert_array_equal(l, r)
# 2nd simple, 1d test: stacking 2 neigh iterators, mixing const padding and
# mirror padding
def test_simple_mirror(self):
dt = np.float64
# Stacking zero on top of mirror
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 1, 1], dtype=dt),
np.array([1, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 3], dtype=dt),
np.array([3, 3, 0], dtype=dt)]
l = _multiarray_tests.test_neighborhood_iterator_oob(
x, [-1, 3], NEIGH_MODE['mirror'], [-1, 1], NEIGH_MODE['zero'])
assert_array_equal(l, r)
# Stacking mirror on top of zero
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 0, 0], dtype=dt),
np.array([0, 0, 1], dtype=dt),
np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt)]
l = _multiarray_tests.test_neighborhood_iterator_oob(
x, [-1, 3], NEIGH_MODE['zero'], [-2, 0], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 2nd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt),
np.array([3, 0, 0], dtype=dt),
np.array([0, 0, 3], dtype=dt)]
l = _multiarray_tests.test_neighborhood_iterator_oob(
x, [-1, 3], NEIGH_MODE['zero'], [0, 2], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 3rd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 0, 0, 1, 2], dtype=dt),
np.array([0, 0, 1, 2, 3], dtype=dt),
np.array([0, 1, 2, 3, 0], dtype=dt),
np.array([1, 2, 3, 0, 0], dtype=dt),
np.array([2, 3, 0, 0, 3], dtype=dt)]
l = _multiarray_tests.test_neighborhood_iterator_oob(
x, [-1, 3], NEIGH_MODE['zero'], [-2, 2], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# 3rd simple, 1d test: stacking 2 neigh iterators, mixing const padding and
# circular padding
def test_simple_circular(self):
dt = np.float64
# Stacking zero on top of mirror
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 3, 1], dtype=dt),
np.array([3, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 1], dtype=dt),
np.array([3, 1, 0], dtype=dt)]
l = _multiarray_tests.test_neighborhood_iterator_oob(
x, [-1, 3], NEIGH_MODE['circular'], [-1, 1], NEIGH_MODE['zero'])
assert_array_equal(l, r)
# Stacking mirror on top of zero
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([3, 0, 0], dtype=dt),
np.array([0, 0, 1], dtype=dt),
np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt)]
l = _multiarray_tests.test_neighborhood_iterator_oob(
x, [-1, 3], NEIGH_MODE['zero'], [-2, 0], NEIGH_MODE['circular'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 2nd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt),
np.array([3, 0, 0], dtype=dt),
np.array([0, 0, 1], dtype=dt)]
l = _multiarray_tests.test_neighborhood_iterator_oob(
x, [-1, 3], NEIGH_MODE['zero'], [0, 2], NEIGH_MODE['circular'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 3rd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([3, 0, 0, 1, 2], dtype=dt),
np.array([0, 0, 1, 2, 3], dtype=dt),
np.array([0, 1, 2, 3, 0], dtype=dt),
np.array([1, 2, 3, 0, 0], dtype=dt),
np.array([2, 3, 0, 0, 1], dtype=dt)]
l = _multiarray_tests.test_neighborhood_iterator_oob(
x, [-1, 3], NEIGH_MODE['zero'], [-2, 2], NEIGH_MODE['circular'])
assert_array_equal(l, r)
# 4th simple, 1d test: stacking 2 neigh iterators, but with lower iterator
# being strictly within the array
def test_simple_strict_within(self):
dt = np.float64
# Stacking zero on top of zero, first neighborhood strictly inside the
# array
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 2, 3, 0], dtype=dt)]
l = _multiarray_tests.test_neighborhood_iterator_oob(
x, [1, 1], NEIGH_MODE['zero'], [-1, 2], NEIGH_MODE['zero'])
assert_array_equal(l, r)
# Stacking mirror on top of zero, first neighborhood strictly inside the
# array
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 2, 3, 3], dtype=dt)]
l = _multiarray_tests.test_neighborhood_iterator_oob(
x, [1, 1], NEIGH_MODE['zero'], [-1, 2], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# Stacking mirror on top of zero, first neighborhood strictly inside the
# array
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 2, 3, 1], dtype=dt)]
l = _multiarray_tests.test_neighborhood_iterator_oob(
x, [1, 1], NEIGH_MODE['zero'], [-1, 2], NEIGH_MODE['circular'])
assert_array_equal(l, r)
class TestWarnings:
def test_complex_warning(self):
x = np.array([1, 2])
y = np.array([1-2j, 1+2j])
with warnings.catch_warnings():
warnings.simplefilter("error", np.ComplexWarning)
assert_raises(np.ComplexWarning, x.__setitem__, slice(None), y)
assert_equal(x, [1, 2])
class TestMinScalarType:
def test_usigned_shortshort(self):
dt = np.min_scalar_type(2**8-1)
wanted = np.dtype('uint8')
assert_equal(wanted, dt)
def test_usigned_short(self):
dt = np.min_scalar_type(2**16-1)
wanted = np.dtype('uint16')
assert_equal(wanted, dt)
def test_usigned_int(self):
dt = np.min_scalar_type(2**32-1)
wanted = np.dtype('uint32')
assert_equal(wanted, dt)
def test_usigned_longlong(self):
dt = np.min_scalar_type(2**63-1)
wanted = np.dtype('uint64')
assert_equal(wanted, dt)
def test_object(self):
dt = np.min_scalar_type(2**64)
wanted = np.dtype('O')
assert_equal(wanted, dt)
from numpy.core._internal import _dtype_from_pep3118
class TestPEP3118Dtype:
def _check(self, spec, wanted):
dt = np.dtype(wanted)
actual = _dtype_from_pep3118(spec)
assert_equal(actual, dt,
err_msg="spec %r != dtype %r" % (spec, wanted))
def test_native_padding(self):
align = np.dtype('i').alignment
for j in range(8):
if j == 0:
s = 'bi'
else:
s = 'b%dxi' % j
self._check('@'+s, {'f0': ('i1', 0),
'f1': ('i', align*(1 + j//align))})
self._check('='+s, {'f0': ('i1', 0),
'f1': ('i', 1+j)})
def test_native_padding_2(self):
# Native padding should work also for structs and sub-arrays
self._check('x3T{xi}', {'f0': (({'f0': ('i', 4)}, (3,)), 4)})
self._check('^x3T{xi}', {'f0': (({'f0': ('i', 1)}, (3,)), 1)})
def test_trailing_padding(self):
# Trailing padding should be included, *and*, the item size
# should match the alignment if in aligned mode
align = np.dtype('i').alignment
size = np.dtype('i').itemsize
def aligned(n):
return align*(1 + (n-1)//align)
base = dict(formats=['i'], names=['f0'])
self._check('ix', dict(itemsize=aligned(size + 1), **base))
self._check('ixx', dict(itemsize=aligned(size + 2), **base))
self._check('ixxx', dict(itemsize=aligned(size + 3), **base))
self._check('ixxxx', dict(itemsize=aligned(size + 4), **base))
self._check('i7x', dict(itemsize=aligned(size + 7), **base))
self._check('^ix', dict(itemsize=size + 1, **base))
self._check('^ixx', dict(itemsize=size + 2, **base))
self._check('^ixxx', dict(itemsize=size + 3, **base))
self._check('^ixxxx', dict(itemsize=size + 4, **base))
self._check('^i7x', dict(itemsize=size + 7, **base))
def test_native_padding_3(self):
dt = np.dtype(
[('a', 'b'), ('b', 'i'),
('sub', np.dtype('b,i')), ('c', 'i')],
align=True)
self._check("T{b:a:xxxi:b:T{b:f0:=i:f1:}:sub:xxxi:c:}", dt)
dt = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b'), ('d', 'b'),
('e', 'b'), ('sub', np.dtype('b,i', align=True))])
self._check("T{b:a:=i:b:b:c:b:d:b:e:T{b:f0:xxxi:f1:}:sub:}", dt)
def test_padding_with_array_inside_struct(self):
dt = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b', (3,)),
('d', 'i')],
align=True)
self._check("T{b:a:xxxi:b:3b:c:xi:d:}", dt)
def test_byteorder_inside_struct(self):
# The byte order after @T{=i} should be '=', not '@'.
# Check this by noting the absence of native alignment.
self._check('@T{^i}xi', {'f0': ({'f0': ('i', 0)}, 0),
'f1': ('i', 5)})
def test_intra_padding(self):
# Natively aligned sub-arrays may require some internal padding
align = np.dtype('i').alignment
size = np.dtype('i').itemsize
def aligned(n):
return (align*(1 + (n-1)//align))
self._check('(3)T{ix}', (dict(
names=['f0'],
formats=['i'],
offsets=[0],
itemsize=aligned(size + 1)
), (3,)))
def test_char_vs_string(self):
dt = np.dtype('c')
self._check('c', dt)
dt = np.dtype([('f0', 'S1', (4,)), ('f1', 'S4')])
self._check('4c4s', dt)
def test_field_order(self):
# gh-9053 - previously, we relied on dictionary key order
self._check("(0)I:a:f:b:", [('a', 'I', (0,)), ('b', 'f')])
self._check("(0)I:b:f:a:", [('b', 'I', (0,)), ('a', 'f')])
def test_unnamed_fields(self):
self._check('ii', [('f0', 'i'), ('f1', 'i')])
self._check('ii:f0:', [('f1', 'i'), ('f0', 'i')])
self._check('i', 'i')
self._check('i:f0:', [('f0', 'i')])
class TestNewBufferProtocol:
""" Test PEP3118 buffers """
def _check_roundtrip(self, obj):
obj = np.asarray(obj)
x = memoryview(obj)
y = np.asarray(x)
y2 = np.array(x)
assert_(not y.flags.owndata)
assert_(y2.flags.owndata)
assert_equal(y.dtype, obj.dtype)
assert_equal(y.shape, obj.shape)
assert_array_equal(obj, y)
assert_equal(y2.dtype, obj.dtype)
assert_equal(y2.shape, obj.shape)
assert_array_equal(obj, y2)
def test_roundtrip(self):
x = np.array([1, 2, 3, 4, 5], dtype='i4')
self._check_roundtrip(x)
x = np.array([[1, 2], [3, 4]], dtype=np.float64)
self._check_roundtrip(x)
x = np.zeros((3, 3, 3), dtype=np.float32)[:, 0,:]
self._check_roundtrip(x)
dt = [('a', 'b'),
('b', 'h'),
('c', 'i'),
('d', 'l'),
('dx', 'q'),
('e', 'B'),
('f', 'H'),
('g', 'I'),
('h', 'L'),
('hx', 'Q'),
('i', np.single),
('j', np.double),
('k', np.longdouble),
('ix', np.csingle),
('jx', np.cdouble),
('kx', np.clongdouble),
('l', 'S4'),
('m', 'U4'),
('n', 'V3'),
('o', '?'),
('p', np.half),
]
x = np.array(
[(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
b'aaaa', 'bbbb', b'xxx', True, 1.0)],
dtype=dt)
self._check_roundtrip(x)
x = np.array(([[1, 2], [3, 4]],), dtype=[('a', (int, (2, 2)))])
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='>i2')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='<i2')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='>i4')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='<i4')
self._check_roundtrip(x)
# check long long can be represented as non-native
x = np.array([1, 2, 3], dtype='>q')
self._check_roundtrip(x)
# Native-only data types can be passed through the buffer interface
# only in native byte order
if sys.byteorder == 'little':
x = np.array([1, 2, 3], dtype='>g')
assert_raises(ValueError, self._check_roundtrip, x)
x = np.array([1, 2, 3], dtype='<g')
self._check_roundtrip(x)
else:
x = np.array([1, 2, 3], dtype='>g')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='<g')
assert_raises(ValueError, self._check_roundtrip, x)
def test_roundtrip_half(self):
half_list = [
1.0,
-2.0,
6.5504 * 10**4, # (max half precision)
2**-14, # ~= 6.10352 * 10**-5 (minimum positive normal)
2**-24, # ~= 5.96046 * 10**-8 (minimum strictly positive subnormal)
0.0,
-0.0,
float('+inf'),
float('-inf'),
0.333251953125, # ~= 1/3
]
x = np.array(half_list, dtype='>e')
self._check_roundtrip(x)
x = np.array(half_list, dtype='<e')
self._check_roundtrip(x)
def test_roundtrip_single_types(self):
for typ in np.sctypeDict.values():
dtype = np.dtype(typ)
if dtype.char in 'Mm':
# datetimes cannot be used in buffers
continue
if dtype.char == 'V':
# skip void
continue
x = np.zeros(4, dtype=dtype)
self._check_roundtrip(x)
if dtype.char not in 'qQgG':
dt = dtype.newbyteorder('<')
x = np.zeros(4, dtype=dt)
self._check_roundtrip(x)
dt = dtype.newbyteorder('>')
x = np.zeros(4, dtype=dt)
self._check_roundtrip(x)
def test_roundtrip_scalar(self):
# Issue #4015.
self._check_roundtrip(0)
def test_invalid_buffer_format(self):
# datetime64 cannot be used fully in a buffer yet
# Should be fixed in the next Numpy major release
dt = np.dtype([('a', 'uint16'), ('b', 'M8[s]')])
a = np.empty(3, dt)
assert_raises((ValueError, BufferError), memoryview, a)
assert_raises((ValueError, BufferError), memoryview, np.array((3), 'M8[D]'))
def test_export_simple_1d(self):
x = np.array([1, 2, 3, 4, 5], dtype='i')
y = memoryview(x)
assert_equal(y.format, 'i')
assert_equal(y.shape, (5,))
assert_equal(y.ndim, 1)
assert_equal(y.strides, (4,))
assert_equal(y.suboffsets, ())
assert_equal(y.itemsize, 4)
def test_export_simple_nd(self):
x = np.array([[1, 2], [3, 4]], dtype=np.float64)
y = memoryview(x)
assert_equal(y.format, 'd')
assert_equal(y.shape, (2, 2))
assert_equal(y.ndim, 2)
assert_equal(y.strides, (16, 8))
assert_equal(y.suboffsets, ())
assert_equal(y.itemsize, 8)
def test_export_discontiguous(self):
x = np.zeros((3, 3, 3), dtype=np.float32)[:, 0,:]
y = memoryview(x)
assert_equal(y.format, 'f')
assert_equal(y.shape, (3, 3))
assert_equal(y.ndim, 2)
assert_equal(y.strides, (36, 4))
assert_equal(y.suboffsets, ())
assert_equal(y.itemsize, 4)
def test_export_record(self):
dt = [('a', 'b'),
('b', 'h'),
('c', 'i'),
('d', 'l'),
('dx', 'q'),
('e', 'B'),
('f', 'H'),
('g', 'I'),
('h', 'L'),
('hx', 'Q'),
('i', np.single),
('j', np.double),
('k', np.longdouble),
('ix', np.csingle),
('jx', np.cdouble),
('kx', np.clongdouble),
('l', 'S4'),
('m', 'U4'),
('n', 'V3'),
('o', '?'),
('p', np.half),
]
x = np.array(
[(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
b'aaaa', 'bbbb', b' ', True, 1.0)],
dtype=dt)
y = memoryview(x)
assert_equal(y.shape, (1,))
assert_equal(y.ndim, 1)
assert_equal(y.suboffsets, ())
sz = sum([np.dtype(b).itemsize for a, b in dt])
if np.dtype('l').itemsize == 4:
assert_equal(y.format, 'T{b:a:=h:b:i:c:l:d:q:dx:B:e:@H:f:=I:g:L:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}')
else:
assert_equal(y.format, 'T{b:a:=h:b:i:c:q:d:q:dx:B:e:@H:f:=I:g:Q:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}')
# Cannot test if NPY_RELAXED_STRIDES_DEBUG changes the strides
if not (np.ones(1).strides[0] == np.iinfo(np.intp).max):
assert_equal(y.strides, (sz,))
assert_equal(y.itemsize, sz)
def test_export_subarray(self):
x = np.array(([[1, 2], [3, 4]],), dtype=[('a', ('i', (2, 2)))])
y = memoryview(x)
assert_equal(y.format, 'T{(2,2)i:a:}')
assert_equal(y.shape, ())
assert_equal(y.ndim, 0)
assert_equal(y.strides, ())
assert_equal(y.suboffsets, ())
assert_equal(y.itemsize, 16)
def test_export_endian(self):
x = np.array([1, 2, 3], dtype='>i')
y = memoryview(x)
if sys.byteorder == 'little':
assert_equal(y.format, '>i')
else:
assert_equal(y.format, 'i')
x = np.array([1, 2, 3], dtype='<i')
y = memoryview(x)
if sys.byteorder == 'little':
assert_equal(y.format, 'i')
else:
assert_equal(y.format, '<i')
def test_export_flags(self):
# Check SIMPLE flag, see also gh-3613 (exception should be BufferError)
assert_raises(ValueError,
_multiarray_tests.get_buffer_info,
np.arange(5)[::2], ('SIMPLE',))
@pytest.mark.parametrize(["obj", "error"], [
pytest.param(np.array([1, 2], dtype=rational), ValueError, id="array"),
pytest.param(rational(1, 2), TypeError, id="scalar")])
def test_export_and_pickle_user_dtype(self, obj, error):
# User dtypes should export successfully when FORMAT was not requested.
with pytest.raises(error):
_multiarray_tests.get_buffer_info(obj, ("STRIDED_RO", "FORMAT"))
_multiarray_tests.get_buffer_info(obj, ("STRIDED_RO",))
# This is currently also necessary to implement pickling:
pickle_obj = pickle.dumps(obj)
res = pickle.loads(pickle_obj)
assert_array_equal(res, obj)
def test_padding(self):
for j in range(8):
x = np.array([(1,), (2,)], dtype={'f0': (int, j)})
self._check_roundtrip(x)
def test_reference_leak(self):
if HAS_REFCOUNT:
count_1 = sys.getrefcount(np.core._internal)
a = np.zeros(4)
b = memoryview(a)
c = np.asarray(b)
if HAS_REFCOUNT:
count_2 = sys.getrefcount(np.core._internal)
assert_equal(count_1, count_2)
del c # avoid pyflakes unused variable warning.
def test_padded_struct_array(self):
dt1 = np.dtype(
[('a', 'b'), ('b', 'i'), ('sub', np.dtype('b,i')), ('c', 'i')],
align=True)
x1 = np.arange(dt1.itemsize, dtype=np.int8).view(dt1)
self._check_roundtrip(x1)
dt2 = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b', (3,)), ('d', 'i')],
align=True)
x2 = np.arange(dt2.itemsize, dtype=np.int8).view(dt2)
self._check_roundtrip(x2)
dt3 = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b'), ('d', 'b'),
('e', 'b'), ('sub', np.dtype('b,i', align=True))])
x3 = np.arange(dt3.itemsize, dtype=np.int8).view(dt3)
self._check_roundtrip(x3)
@pytest.mark.valgrind_error(reason="leaks buffer info cache temporarily.")
def test_relaxed_strides(self, c=np.ones((1, 10, 10), dtype='i8')):
# Note: c defined as parameter so that it is persistent and leak
# checks will notice gh-16934 (buffer info cache leak).
c.strides = (-1, 80, 8) # strides need to be fixed at export
assert_(memoryview(c).strides == (800, 80, 8))
# Writing C-contiguous data to a BytesIO buffer should work
fd = io.BytesIO()
fd.write(c.data)
fortran = c.T
assert_(memoryview(fortran).strides == (8, 80, 800))
arr = np.ones((1, 10))
if arr.flags.f_contiguous:
shape, strides = _multiarray_tests.get_buffer_info(
arr, ['F_CONTIGUOUS'])
assert_(strides[0] == 8)
arr = np.ones((10, 1), order='F')
shape, strides = _multiarray_tests.get_buffer_info(
arr, ['C_CONTIGUOUS'])
assert_(strides[-1] == 8)
@pytest.mark.valgrind_error(reason="leaks buffer info cache temporarily.")
@pytest.mark.skipif(not np.ones((10, 1), order="C").flags.f_contiguous,
reason="Test is unnecessary (but fails) without relaxed strides.")
def test_relaxed_strides_buffer_info_leak(self, arr=np.ones((1, 10))):
"""Test that alternating export of C- and F-order buffers from
an array which is both C- and F-order when relaxed strides is
active works.
This test defines array in the signature to ensure leaking more
references every time the test is run (catching the leak with
pytest-leaks).
"""
for i in range(10):
_, s = _multiarray_tests.get_buffer_info(arr, ['F_CONTIGUOUS'])
assert s == (8, 8)
_, s = _multiarray_tests.get_buffer_info(arr, ['C_CONTIGUOUS'])
assert s == (80, 8)
def test_out_of_order_fields(self):
dt = np.dtype(dict(
formats=['<i4', '<i4'],
names=['one', 'two'],
offsets=[4, 0],
itemsize=8
))
# overlapping fields cannot be represented by PEP3118
arr = np.empty(1, dt)
with assert_raises(ValueError):
memoryview(arr)
def test_max_dims(self):
a = np.ones((1,) * 32)
self._check_roundtrip(a)
@pytest.mark.slow
def test_error_too_many_dims(self):
def make_ctype(shape, scalar_type):
t = scalar_type
for dim in shape[::-1]:
t = dim * t
return t
# construct a memoryview with 33 dimensions
c_u8_33d = make_ctype((1,)*33, ctypes.c_uint8)
m = memoryview(c_u8_33d())
assert_equal(m.ndim, 33)
assert_raises_regex(
RuntimeError, "ndim",
np.array, m)
# The above seems to create some deep cycles, clean them up for
# easier reference count debugging:
del c_u8_33d, m
for i in range(33):
if gc.collect() == 0:
break
def test_error_pointer_type(self):
# gh-6741
m = memoryview(ctypes.pointer(ctypes.c_uint8()))
assert_('&' in m.format)
assert_raises_regex(
ValueError, "format string",
np.array, m)
def test_error_message_unsupported(self):
# wchar has no corresponding numpy type - if this changes in future, we
# need a better way to construct an invalid memoryview format.
t = ctypes.c_wchar * 4
with assert_raises(ValueError) as cm:
np.array(t())
exc = cm.exception
with assert_raises_regex(
NotImplementedError,
r"Unrepresentable .* 'u' \(UCS-2 strings\)"
):
raise exc.__cause__
def test_ctypes_integer_via_memoryview(self):
# gh-11150, due to bpo-10746
for c_integer in {ctypes.c_int, ctypes.c_long, ctypes.c_longlong}:
value = c_integer(42)
with warnings.catch_warnings(record=True):
warnings.filterwarnings('always', r'.*\bctypes\b', RuntimeWarning)
np.asarray(value)
def test_ctypes_struct_via_memoryview(self):
# gh-10528
class foo(ctypes.Structure):
_fields_ = [('a', ctypes.c_uint8), ('b', ctypes.c_uint32)]
f = foo(a=1, b=2)
with warnings.catch_warnings(record=True):
warnings.filterwarnings('always', r'.*\bctypes\b', RuntimeWarning)
arr = np.asarray(f)
assert_equal(arr['a'], 1)
assert_equal(arr['b'], 2)
f.a = 3
assert_equal(arr['a'], 3)
@pytest.mark.parametrize("obj", [np.ones(3), np.ones(1, dtype="i,i")[()]])
def test_error_if_stored_buffer_info_is_corrupted(self, obj):
"""
If a user extends a NumPy array before 1.20 and then runs it
on NumPy 1.20+. A C-subclassed array might in theory modify
the new buffer-info field. This checks that an error is raised
if this happens (for buffer export), an error is written on delete.
This is a sanity check to help users transition to safe code, it
may be deleted at any point.
"""
# corrupt buffer info:
_multiarray_tests.corrupt_or_fix_bufferinfo(obj)
name = type(obj)
with pytest.raises(RuntimeError,
match=f".*{name} appears to be C subclassed"):
memoryview(obj)
# Fix buffer info again before we delete (or we lose the memory)
_multiarray_tests.corrupt_or_fix_bufferinfo(obj)
def test_no_suboffsets(self):
try:
import _testbuffer
except ImportError:
raise pytest.skip("_testbuffer is not available")
for shape in [(2, 3), (2, 3, 4)]:
data = list(range(np.prod(shape)))
buffer = _testbuffer.ndarray(data, shape, format='i',
flags=_testbuffer.ND_PIL)
msg = "NumPy currently does not support.*suboffsets"
with pytest.raises(BufferError, match=msg):
np.asarray(buffer)
with pytest.raises(BufferError, match=msg):
np.asarray([buffer])
# Also check (unrelated and more limited but similar) frombuffer:
with pytest.raises(BufferError):
np.frombuffer(buffer)
class TestArrayCreationCopyArgument(object):
class RaiseOnBool:
def __bool__(self):
raise ValueError
true_vals = [True, np._CopyMode.ALWAYS, np.True_]
false_vals = [False, np._CopyMode.IF_NEEDED, np.False_]
def test_scalars(self):
# Test both numpy and python scalars
for dtype in np.typecodes["All"]:
arr = np.zeros((), dtype=dtype)
scalar = arr[()]
pyscalar = arr.item(0)
# Test never-copy raises error:
assert_raises(ValueError, np.array, scalar,
copy=np._CopyMode.NEVER)
assert_raises(ValueError, np.array, pyscalar,
copy=np._CopyMode.NEVER)
assert_raises(ValueError, np.array, pyscalar,
copy=self.RaiseOnBool())
assert_raises(ValueError, _multiarray_tests.npy_ensurenocopy,
[1])
# Casting with a dtype (to unsigned integers) can be special:
with pytest.raises(ValueError):
np.array(pyscalar, dtype=np.int64, copy=np._CopyMode.NEVER)
def test_compatible_cast(self):
# Some types are compatible even though they are different, no
# copy is necessary for them. This is mostly true for some integers
def int_types(byteswap=False):
int_types = (np.typecodes["Integer"] +
np.typecodes["UnsignedInteger"])
for int_type in int_types:
yield np.dtype(int_type)
if byteswap:
yield np.dtype(int_type).newbyteorder()
for int1 in int_types():
for int2 in int_types(True):
arr = np.arange(10, dtype=int1)
for copy in self.true_vals:
res = np.array(arr, copy=copy, dtype=int2)
assert res is not arr and res.flags.owndata
assert_array_equal(res, arr)
if int1 == int2:
# Casting is not necessary, base check is sufficient here
for copy in self.false_vals:
res = np.array(arr, copy=copy, dtype=int2)
assert res is arr or res.base is arr
res = np.array(arr,
copy=np._CopyMode.NEVER,
dtype=int2)
assert res is arr or res.base is arr
else:
# Casting is necessary, assert copy works:
for copy in self.false_vals:
res = np.array(arr, copy=copy, dtype=int2)
assert res is not arr and res.flags.owndata
assert_array_equal(res, arr)
assert_raises(ValueError, np.array,
arr, copy=np._CopyMode.NEVER,
dtype=int2)
assert_raises(ValueError, np.array,
arr, copy=None,
dtype=int2)
def test_buffer_interface(self):
# Buffer interface gives direct memory access (no copy)
arr = np.arange(10)
view = memoryview(arr)
# Checking bases is a bit tricky since numpy creates another
# memoryview, so use may_share_memory.
for copy in self.true_vals:
res = np.array(view, copy=copy)
assert not np.may_share_memory(arr, res)
for copy in self.false_vals:
res = np.array(view, copy=copy)
assert np.may_share_memory(arr, res)
res = np.array(view, copy=np._CopyMode.NEVER)
assert np.may_share_memory(arr, res)
def test_array_interfaces(self):
# Array interface gives direct memory access (much like a memoryview)
base_arr = np.arange(10)
class ArrayLike:
__array_interface__ = base_arr.__array_interface__
arr = ArrayLike()
for copy, val in [(True, None), (np._CopyMode.ALWAYS, None),
(False, arr), (np._CopyMode.IF_NEEDED, arr),
(np._CopyMode.NEVER, arr)]:
res = np.array(arr, copy=copy)
assert res.base is val
def test___array__(self):
base_arr = np.arange(10)
class ArrayLike:
def __array__(self):
# __array__ should return a copy, numpy cannot know this
# however.
return base_arr
arr = ArrayLike()
for copy in self.true_vals:
res = np.array(arr, copy=copy)
assert_array_equal(res, base_arr)
# An additional copy is currently forced by numpy in this case,
# you could argue, numpy does not trust the ArrayLike. This
# may be open for change:
assert res is not base_arr
for copy in self.false_vals:
res = np.array(arr, copy=False)
assert_array_equal(res, base_arr)
assert res is base_arr # numpy trusts the ArrayLike
with pytest.raises(ValueError):
np.array(arr, copy=np._CopyMode.NEVER)
@pytest.mark.parametrize(
"arr", [np.ones(()), np.arange(81).reshape((9, 9))])
@pytest.mark.parametrize("order1", ["C", "F", None])
@pytest.mark.parametrize("order2", ["C", "F", "A", "K"])
def test_order_mismatch(self, arr, order1, order2):
# The order is the main (python side) reason that can cause
# a never-copy to fail.
# Prepare C-order, F-order and non-contiguous arrays:
arr = arr.copy(order1)
if order1 == "C":
assert arr.flags.c_contiguous
elif order1 == "F":
assert arr.flags.f_contiguous
elif arr.ndim != 0:
# Make array non-contiguous
arr = arr[::2, ::2]
assert not arr.flags.forc
# Whether a copy is necessary depends on the order of arr:
if order2 == "C":
no_copy_necessary = arr.flags.c_contiguous
elif order2 == "F":
no_copy_necessary = arr.flags.f_contiguous
else:
# Keeporder and Anyorder are OK with non-contiguous output.
# This is not consistent with the `astype` behaviour which
# enforces contiguity for "A". It is probably historic from when
# "K" did not exist.
no_copy_necessary = True
# Test it for both the array and a memoryview
for view in [arr, memoryview(arr)]:
for copy in self.true_vals:
res = np.array(view, copy=copy, order=order2)
assert res is not arr and res.flags.owndata
assert_array_equal(arr, res)
if no_copy_necessary:
for copy in self.false_vals:
res = np.array(view, copy=copy, order=order2)
# res.base.obj refers to the memoryview
if not IS_PYPY:
assert res is arr or res.base.obj is arr
res = np.array(view, copy=np._CopyMode.NEVER,
order=order2)
if not IS_PYPY:
assert res is arr or res.base.obj is arr
else:
for copy in self.false_vals:
res = np.array(arr, copy=copy, order=order2)
assert_array_equal(arr, res)
assert_raises(ValueError, np.array,
view, copy=np._CopyMode.NEVER,
order=order2)
assert_raises(ValueError, np.array,
view, copy=None,
order=order2)
def test_striding_not_ok(self):
arr = np.array([[1, 2, 4], [3, 4, 5]])
assert_raises(ValueError, np.array,
arr.T, copy=np._CopyMode.NEVER,
order='C')
assert_raises(ValueError, np.array,
arr.T, copy=np._CopyMode.NEVER,
order='C', dtype=np.int64)
assert_raises(ValueError, np.array,
arr, copy=np._CopyMode.NEVER,
order='F')
assert_raises(ValueError, np.array,
arr, copy=np._CopyMode.NEVER,
order='F', dtype=np.int64)
class TestArrayAttributeDeletion:
def test_multiarray_writable_attributes_deletion(self):
# ticket #2046, should not seqfault, raise AttributeError
a = np.ones(2)
attr = ['shape', 'strides', 'data', 'dtype', 'real', 'imag', 'flat']
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, "Assigning the 'data' attribute")
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_multiarray_not_writable_attributes_deletion(self):
a = np.ones(2)
attr = ["ndim", "flags", "itemsize", "size", "nbytes", "base",
"ctypes", "T", "__array_interface__", "__array_struct__",
"__array_priority__", "__array_finalize__"]
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_multiarray_flags_writable_attribute_deletion(self):
a = np.ones(2).flags
attr = ['writebackifcopy', 'updateifcopy', 'aligned', 'writeable']
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_multiarray_flags_not_writable_attribute_deletion(self):
a = np.ones(2).flags
attr = ["contiguous", "c_contiguous", "f_contiguous", "fortran",
"owndata", "fnc", "forc", "behaved", "carray", "farray",
"num"]
for s in attr:
assert_raises(AttributeError, delattr, a, s)
class TestArrayInterface():
class Foo:
def __init__(self, value):
self.value = value
self.iface = {'typestr': 'f8'}
def __float__(self):
return float(self.value)
@property
def __array_interface__(self):
return self.iface
f = Foo(0.5)
@pytest.mark.parametrize('val, iface, expected', [
(f, {}, 0.5),
([f], {}, [0.5]),
([f, f], {}, [0.5, 0.5]),
(f, {'shape': ()}, 0.5),
(f, {'shape': None}, TypeError),
(f, {'shape': (1, 1)}, [[0.5]]),
(f, {'shape': (2,)}, ValueError),
(f, {'strides': ()}, 0.5),
(f, {'strides': (2,)}, ValueError),
(f, {'strides': 16}, TypeError),
])
def test_scalar_interface(self, val, iface, expected):
# Test scalar coercion within the array interface
self.f.iface = {'typestr': 'f8'}
self.f.iface.update(iface)
if HAS_REFCOUNT:
pre_cnt = sys.getrefcount(np.dtype('f8'))
if isinstance(expected, type):
assert_raises(expected, np.array, val)
else:
result = np.array(val)
assert_equal(np.array(val), expected)
assert result.dtype == 'f8'
del result
if HAS_REFCOUNT:
post_cnt = sys.getrefcount(np.dtype('f8'))
assert_equal(pre_cnt, post_cnt)
def test_interface_no_shape():
class ArrayLike:
array = np.array(1)
__array_interface__ = array.__array_interface__
assert_equal(np.array(ArrayLike()), 1)
def test_array_interface_itemsize():
# See gh-6361
my_dtype = np.dtype({'names': ['A', 'B'], 'formats': ['f4', 'f4'],
'offsets': [0, 8], 'itemsize': 16})
a = np.ones(10, dtype=my_dtype)
descr_t = np.dtype(a.__array_interface__['descr'])
typestr_t = np.dtype(a.__array_interface__['typestr'])
assert_equal(descr_t.itemsize, typestr_t.itemsize)
def test_array_interface_empty_shape():
# See gh-7994
arr = np.array([1, 2, 3])
interface1 = dict(arr.__array_interface__)
interface1['shape'] = ()
class DummyArray1:
__array_interface__ = interface1
# NOTE: Because Py2 str/Py3 bytes supports the buffer interface, setting
# the interface data to bytes would invoke the bug this tests for, that
# __array_interface__ with shape=() is not allowed if the data is an object
# exposing the buffer interface
interface2 = dict(interface1)
interface2['data'] = arr[0].tobytes()
class DummyArray2:
__array_interface__ = interface2
arr1 = np.asarray(DummyArray1())
arr2 = np.asarray(DummyArray2())
arr3 = arr[:1].reshape(())
assert_equal(arr1, arr2)
assert_equal(arr1, arr3)
def test_array_interface_offset():
arr = np.array([1, 2, 3], dtype='int32')
interface = dict(arr.__array_interface__)
interface['data'] = memoryview(arr)
interface['shape'] = (2,)
interface['offset'] = 4
class DummyArray:
__array_interface__ = interface
arr1 = np.asarray(DummyArray())
assert_equal(arr1, arr[1:])
def test_array_interface_unicode_typestr():
arr = np.array([1, 2, 3], dtype='int32')
interface = dict(arr.__array_interface__)
interface['typestr'] = '\N{check mark}'
class DummyArray:
__array_interface__ = interface
# should not be UnicodeEncodeError
with pytest.raises(TypeError):
np.asarray(DummyArray())
def test_flat_element_deletion():
it = np.ones(3).flat
try:
del it[1]
del it[1:2]
except TypeError:
pass
except Exception:
raise AssertionError
def test_scalar_element_deletion():
a = np.zeros(2, dtype=[('x', 'int'), ('y', 'int')])
assert_raises(ValueError, a[0].__delitem__, 'x')
class TestMapIter:
def test_mapiter(self):
# The actual tests are within the C code in
# multiarray/_multiarray_tests.c.src
a = np.arange(12).reshape((3, 4)).astype(float)
index = ([1, 1, 2, 0],
[0, 0, 2, 3])
vals = [50, 50, 30, 16]
_multiarray_tests.test_inplace_increment(a, index, vals)
assert_equal(a, [[0.00, 1., 2.0, 19.],
[104., 5., 6.0, 7.0],
[8.00, 9., 40., 11.]])
b = np.arange(6).astype(float)
index = (np.array([1, 2, 0]),)
vals = [50, 4, 100.1]
_multiarray_tests.test_inplace_increment(b, index, vals)
assert_equal(b, [100.1, 51., 6., 3., 4., 5.])
class TestAsCArray:
def test_1darray(self):
array = np.arange(24, dtype=np.double)
from_c = _multiarray_tests.test_as_c_array(array, 3)
assert_equal(array[3], from_c)
def test_2darray(self):
array = np.arange(24, dtype=np.double).reshape(3, 8)
from_c = _multiarray_tests.test_as_c_array(array, 2, 4)
assert_equal(array[2, 4], from_c)
def test_3darray(self):
array = np.arange(24, dtype=np.double).reshape(2, 3, 4)
from_c = _multiarray_tests.test_as_c_array(array, 1, 2, 3)
assert_equal(array[1, 2, 3], from_c)
class TestConversion:
def test_array_scalar_relational_operation(self):
# All integer
for dt1 in np.typecodes['AllInteger']:
assert_(1 > np.array(0, dtype=dt1), "type %s failed" % (dt1,))
assert_(not 1 < np.array(0, dtype=dt1), "type %s failed" % (dt1,))
for dt2 in np.typecodes['AllInteger']:
assert_(np.array(1, dtype=dt1) > np.array(0, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(not np.array(1, dtype=dt1) < np.array(0, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
# Unsigned integers
for dt1 in 'BHILQP':
assert_(-1 < np.array(1, dtype=dt1), "type %s failed" % (dt1,))
assert_(not -1 > np.array(1, dtype=dt1), "type %s failed" % (dt1,))
assert_(-1 != np.array(1, dtype=dt1), "type %s failed" % (dt1,))
# Unsigned vs signed
for dt2 in 'bhilqp':
assert_(np.array(1, dtype=dt1) > np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(not np.array(1, dtype=dt1) < np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(np.array(1, dtype=dt1) != np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
# Signed integers and floats
for dt1 in 'bhlqp' + np.typecodes['Float']:
assert_(1 > np.array(-1, dtype=dt1), "type %s failed" % (dt1,))
assert_(not 1 < np.array(-1, dtype=dt1), "type %s failed" % (dt1,))
assert_(-1 == np.array(-1, dtype=dt1), "type %s failed" % (dt1,))
for dt2 in 'bhlqp' + np.typecodes['Float']:
assert_(np.array(1, dtype=dt1) > np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(not np.array(1, dtype=dt1) < np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(np.array(-1, dtype=dt1) == np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
def test_to_bool_scalar(self):
assert_equal(bool(np.array([False])), False)
assert_equal(bool(np.array([True])), True)
assert_equal(bool(np.array([[42]])), True)
assert_raises(ValueError, bool, np.array([1, 2]))
class NotConvertible:
def __bool__(self):
raise NotImplementedError
assert_raises(NotImplementedError, bool, np.array(NotConvertible()))
assert_raises(NotImplementedError, bool, np.array([NotConvertible()]))
if IS_PYSTON:
pytest.skip("Pyston disables recursion checking")
self_containing = np.array([None])
self_containing[0] = self_containing
Error = RecursionError
assert_raises(Error, bool, self_containing) # previously stack overflow
self_containing[0] = None # resolve circular reference
def test_to_int_scalar(self):
# gh-9972 means that these aren't always the same
int_funcs = (int, lambda x: x.__int__())
for int_func in int_funcs:
assert_equal(int_func(np.array(0)), 0)
assert_equal(int_func(np.array([1])), 1)
assert_equal(int_func(np.array([[42]])), 42)
assert_raises(TypeError, int_func, np.array([1, 2]))
# gh-9972
assert_equal(4, int_func(np.array('4')))
assert_equal(5, int_func(np.bytes_(b'5')))
assert_equal(6, int_func(np.unicode_(u'6')))
# The delegation of int() to __trunc__ was deprecated in
# Python 3.11.
if sys.version_info < (3, 11):
class HasTrunc:
def __trunc__(self):
return 3
assert_equal(3, int_func(np.array(HasTrunc())))
assert_equal(3, int_func(np.array([HasTrunc()])))
else:
pass
class NotConvertible:
def __int__(self):
raise NotImplementedError
assert_raises(NotImplementedError,
int_func, np.array(NotConvertible()))
assert_raises(NotImplementedError,
int_func, np.array([NotConvertible()]))
class TestWhere:
def test_basic(self):
dts = [bool, np.int16, np.int32, np.int64, np.double, np.complex128,
np.longdouble, np.clongdouble]
for dt in dts:
c = np.ones(53, dtype=bool)
assert_equal(np.where( c, dt(0), dt(1)), dt(0))
assert_equal(np.where(~c, dt(0), dt(1)), dt(1))
assert_equal(np.where(True, dt(0), dt(1)), dt(0))
assert_equal(np.where(False, dt(0), dt(1)), dt(1))
d = np.ones_like(c).astype(dt)
e = np.zeros_like(d)
r = d.astype(dt)
c[7] = False
r[7] = e[7]
assert_equal(np.where(c, e, e), e)
assert_equal(np.where(c, d, e), r)
assert_equal(np.where(c, d, e[0]), r)
assert_equal(np.where(c, d[0], e), r)
assert_equal(np.where(c[::2], d[::2], e[::2]), r[::2])
assert_equal(np.where(c[1::2], d[1::2], e[1::2]), r[1::2])
assert_equal(np.where(c[::3], d[::3], e[::3]), r[::3])
assert_equal(np.where(c[1::3], d[1::3], e[1::3]), r[1::3])
assert_equal(np.where(c[::-2], d[::-2], e[::-2]), r[::-2])
assert_equal(np.where(c[::-3], d[::-3], e[::-3]), r[::-3])
assert_equal(np.where(c[1::-3], d[1::-3], e[1::-3]), r[1::-3])
def test_exotic(self):
# object
assert_array_equal(np.where(True, None, None), np.array(None))
# zero sized
m = np.array([], dtype=bool).reshape(0, 3)
b = np.array([], dtype=np.float64).reshape(0, 3)
assert_array_equal(np.where(m, 0, b), np.array([]).reshape(0, 3))
# object cast
d = np.array([-1.34, -0.16, -0.54, -0.31, -0.08, -0.95, 0.000, 0.313,
0.547, -0.18, 0.876, 0.236, 1.969, 0.310, 0.699, 1.013,
1.267, 0.229, -1.39, 0.487])
nan = float('NaN')
e = np.array(['5z', '0l', nan, 'Wz', nan, nan, 'Xq', 'cs', nan, nan,
'QN', nan, nan, 'Fd', nan, nan, 'kp', nan, '36', 'i1'],
dtype=object)
m = np.array([0, 0, 1, 0, 1, 1, 0, 0, 1, 1,
0, 1, 1, 0, 1, 1, 0, 1, 0, 0], dtype=bool)
r = e[:]
r[np.where(m)] = d[np.where(m)]
assert_array_equal(np.where(m, d, e), r)
r = e[:]
r[np.where(~m)] = d[np.where(~m)]
assert_array_equal(np.where(m, e, d), r)
assert_array_equal(np.where(m, e, e), e)
# minimal dtype result with NaN scalar (e.g required by pandas)
d = np.array([1., 2.], dtype=np.float32)
e = float('NaN')
assert_equal(np.where(True, d, e).dtype, np.float32)
e = float('Infinity')
assert_equal(np.where(True, d, e).dtype, np.float32)
e = float('-Infinity')
assert_equal(np.where(True, d, e).dtype, np.float32)
# also check upcast
e = float(1e150)
assert_equal(np.where(True, d, e).dtype, np.float64)
def test_ndim(self):
c = [True, False]
a = np.zeros((2, 25))
b = np.ones((2, 25))
r = np.where(np.array(c)[:,np.newaxis], a, b)
assert_array_equal(r[0], a[0])
assert_array_equal(r[1], b[0])
a = a.T
b = b.T
r = np.where(c, a, b)
assert_array_equal(r[:,0], a[:,0])
assert_array_equal(r[:,1], b[:,0])
def test_dtype_mix(self):
c = np.array([False, True, False, False, False, False, True, False,
False, False, True, False])
a = np.uint32(1)
b = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.],
dtype=np.float64)
r = np.array([5., 1., 3., 2., -1., -4., 1., -10., 10., 1., 1., 3.],
dtype=np.float64)
assert_equal(np.where(c, a, b), r)
a = a.astype(np.float32)
b = b.astype(np.int64)
assert_equal(np.where(c, a, b), r)
# non bool mask
c = c.astype(int)
c[c != 0] = 34242324
assert_equal(np.where(c, a, b), r)
# invert
tmpmask = c != 0
c[c == 0] = 41247212
c[tmpmask] = 0
assert_equal(np.where(c, b, a), r)
def test_foreign(self):
c = np.array([False, True, False, False, False, False, True, False,
False, False, True, False])
r = np.array([5., 1., 3., 2., -1., -4., 1., -10., 10., 1., 1., 3.],
dtype=np.float64)
a = np.ones(1, dtype='>i4')
b = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.],
dtype=np.float64)
assert_equal(np.where(c, a, b), r)
b = b.astype('>f8')
assert_equal(np.where(c, a, b), r)
a = a.astype('<i4')
assert_equal(np.where(c, a, b), r)
c = c.astype('>i4')
assert_equal(np.where(c, a, b), r)
def test_error(self):
c = [True, True]
a = np.ones((4, 5))
b = np.ones((5, 5))
assert_raises(ValueError, np.where, c, a, a)
assert_raises(ValueError, np.where, c[0], a, b)
def test_string(self):
# gh-4778 check strings are properly filled with nulls
a = np.array("abc")
b = np.array("x" * 753)
assert_equal(np.where(True, a, b), "abc")
assert_equal(np.where(False, b, a), "abc")
# check native datatype sized strings
a = np.array("abcd")
b = np.array("x" * 8)
assert_equal(np.where(True, a, b), "abcd")
assert_equal(np.where(False, b, a), "abcd")
def test_empty_result(self):
# pass empty where result through an assignment which reads the data of
# empty arrays, error detectable with valgrind, see gh-8922
x = np.zeros((1, 1))
ibad = np.vstack(np.where(x == 99.))
assert_array_equal(ibad,
np.atleast_2d(np.array([[],[]], dtype=np.intp)))
def test_largedim(self):
# invalid read regression gh-9304
shape = [10, 2, 3, 4, 5, 6]
np.random.seed(2)
array = np.random.rand(*shape)
for i in range(10):
benchmark = array.nonzero()
result = array.nonzero()
assert_array_equal(benchmark, result)
if not IS_PYPY:
# sys.getsizeof() is not valid on PyPy
class TestSizeOf:
def test_empty_array(self):
x = np.array([])
assert_(sys.getsizeof(x) > 0)
def check_array(self, dtype):
elem_size = dtype(0).itemsize
for length in [10, 50, 100, 500]:
x = np.arange(length, dtype=dtype)
assert_(sys.getsizeof(x) > length * elem_size)
def test_array_int32(self):
self.check_array(np.int32)
def test_array_int64(self):
self.check_array(np.int64)
def test_array_float32(self):
self.check_array(np.float32)
def test_array_float64(self):
self.check_array(np.float64)
def test_view(self):
d = np.ones(100)
assert_(sys.getsizeof(d[...]) < sys.getsizeof(d))
def test_reshape(self):
d = np.ones(100)
assert_(sys.getsizeof(d) < sys.getsizeof(d.reshape(100, 1, 1).copy()))
@_no_tracing
def test_resize(self):
d = np.ones(100)
old = sys.getsizeof(d)
d.resize(50)
assert_(old > sys.getsizeof(d))
d.resize(150)
assert_(old < sys.getsizeof(d))
def test_error(self):
d = np.ones(100)
assert_raises(TypeError, d.__sizeof__, "a")
class TestHashing:
def test_arrays_not_hashable(self):
x = np.ones(3)
assert_raises(TypeError, hash, x)
def test_collections_hashable(self):
x = np.array([])
assert_(not isinstance(x, collections.abc.Hashable))
class TestArrayPriority:
# This will go away when __array_priority__ is settled, meanwhile
# it serves to check unintended changes.
op = operator
binary_ops = [
op.pow, op.add, op.sub, op.mul, op.floordiv, op.truediv, op.mod,
op.and_, op.or_, op.xor, op.lshift, op.rshift, op.mod, op.gt,
op.ge, op.lt, op.le, op.ne, op.eq
]
class Foo(np.ndarray):
__array_priority__ = 100.
def __new__(cls, *args, **kwargs):
return np.array(*args, **kwargs).view(cls)
class Bar(np.ndarray):
__array_priority__ = 101.
def __new__(cls, *args, **kwargs):
return np.array(*args, **kwargs).view(cls)
class Other:
__array_priority__ = 1000.
def _all(self, other):
return self.__class__()
__add__ = __radd__ = _all
__sub__ = __rsub__ = _all
__mul__ = __rmul__ = _all
__pow__ = __rpow__ = _all
__div__ = __rdiv__ = _all
__mod__ = __rmod__ = _all
__truediv__ = __rtruediv__ = _all
__floordiv__ = __rfloordiv__ = _all
__and__ = __rand__ = _all
__xor__ = __rxor__ = _all
__or__ = __ror__ = _all
__lshift__ = __rlshift__ = _all
__rshift__ = __rrshift__ = _all
__eq__ = _all
__ne__ = _all
__gt__ = _all
__ge__ = _all
__lt__ = _all
__le__ = _all
def test_ndarray_subclass(self):
a = np.array([1, 2])
b = self.Bar([1, 2])
for f in self.binary_ops:
msg = repr(f)
assert_(isinstance(f(a, b), self.Bar), msg)
assert_(isinstance(f(b, a), self.Bar), msg)
def test_ndarray_other(self):
a = np.array([1, 2])
b = self.Other()
for f in self.binary_ops:
msg = repr(f)
assert_(isinstance(f(a, b), self.Other), msg)
assert_(isinstance(f(b, a), self.Other), msg)
def test_subclass_subclass(self):
a = self.Foo([1, 2])
b = self.Bar([1, 2])
for f in self.binary_ops:
msg = repr(f)
assert_(isinstance(f(a, b), self.Bar), msg)
assert_(isinstance(f(b, a), self.Bar), msg)
def test_subclass_other(self):
a = self.Foo([1, 2])
b = self.Other()
for f in self.binary_ops:
msg = repr(f)
assert_(isinstance(f(a, b), self.Other), msg)
assert_(isinstance(f(b, a), self.Other), msg)
class TestBytestringArrayNonzero:
def test_empty_bstring_array_is_falsey(self):
assert_(not np.array([''], dtype=str))
def test_whitespace_bstring_array_is_falsey(self):
a = np.array(['spam'], dtype=str)
a[0] = ' \0\0'
assert_(not a)
def test_all_null_bstring_array_is_falsey(self):
a = np.array(['spam'], dtype=str)
a[0] = '\0\0\0\0'
assert_(not a)
def test_null_inside_bstring_array_is_truthy(self):
a = np.array(['spam'], dtype=str)
a[0] = ' \0 \0'
assert_(a)
class TestUnicodeEncoding:
"""
Tests for encoding related bugs, such as UCS2 vs UCS4, round-tripping
issues, etc
"""
def test_round_trip(self):
""" Tests that GETITEM, SETITEM, and PyArray_Scalar roundtrip """
# gh-15363
arr = np.zeros(shape=(), dtype="U1")
for i in range(1, sys.maxunicode + 1):
expected = chr(i)
arr[()] = expected
assert arr[()] == expected
assert arr.item() == expected
def test_assign_scalar(self):
# gh-3258
l = np.array(['aa', 'bb'])
l[:] = np.unicode_('cc')
assert_equal(l, ['cc', 'cc'])
def test_fill_scalar(self):
# gh-7227
l = np.array(['aa', 'bb'])
l.fill(np.unicode_('cc'))
assert_equal(l, ['cc', 'cc'])
class TestUnicodeArrayNonzero:
def test_empty_ustring_array_is_falsey(self):
assert_(not np.array([''], dtype=np.unicode_))
def test_whitespace_ustring_array_is_falsey(self):
a = np.array(['eggs'], dtype=np.unicode_)
a[0] = ' \0\0'
assert_(not a)
def test_all_null_ustring_array_is_falsey(self):
a = np.array(['eggs'], dtype=np.unicode_)
a[0] = '\0\0\0\0'
assert_(not a)
def test_null_inside_ustring_array_is_truthy(self):
a = np.array(['eggs'], dtype=np.unicode_)
a[0] = ' \0 \0'
assert_(a)
class TestFormat:
def test_0d(self):
a = np.array(np.pi)
assert_equal('{:0.3g}'.format(a), '3.14')
assert_equal('{:0.3g}'.format(a[()]), '3.14')
def test_1d_no_format(self):
a = np.array([np.pi])
assert_equal('{}'.format(a), str(a))
def test_1d_format(self):
# until gh-5543, ensure that the behaviour matches what it used to be
a = np.array([np.pi])
assert_raises(TypeError, '{:30}'.format, a)
from numpy.testing import IS_PYPY
class TestCTypes:
def test_ctypes_is_available(self):
test_arr = np.array([[1, 2, 3], [4, 5, 6]])
assert_equal(ctypes, test_arr.ctypes._ctypes)
assert_equal(tuple(test_arr.ctypes.shape), (2, 3))
def test_ctypes_is_not_available(self):
from numpy.core import _internal
_internal.ctypes = None
try:
test_arr = np.array([[1, 2, 3], [4, 5, 6]])
assert_(isinstance(test_arr.ctypes._ctypes,
_internal._missing_ctypes))
assert_equal(tuple(test_arr.ctypes.shape), (2, 3))
finally:
_internal.ctypes = ctypes
def _make_readonly(x):
x.flags.writeable = False
return x
@pytest.mark.parametrize('arr', [
np.array([1, 2, 3]),
np.array([['one', 'two'], ['three', 'four']]),
np.array((1, 2), dtype='i4,i4'),
np.zeros((2,), dtype=
np.dtype(dict(
formats=['<i4', '<i4'],
names=['a', 'b'],
offsets=[0, 2],
itemsize=6
))
),
np.array([None], dtype=object),
np.array([]),
np.empty((0, 0)),
_make_readonly(np.array([1, 2, 3])),
], ids=[
'1d',
'2d',
'structured',
'overlapping',
'object',
'empty',
'empty-2d',
'readonly'
])
def test_ctypes_data_as_holds_reference(self, arr):
# gh-9647
# create a copy to ensure that pytest does not mess with the refcounts
arr = arr.copy()
arr_ref = weakref.ref(arr)
ctypes_ptr = arr.ctypes.data_as(ctypes.c_void_p)
# `ctypes_ptr` should hold onto `arr`
del arr
break_cycles()
assert_(arr_ref() is not None, "ctypes pointer did not hold onto a reference")
# but when the `ctypes_ptr` object dies, so should `arr`
del ctypes_ptr
if IS_PYPY:
# Pypy does not recycle arr objects immediately. Trigger gc to
# release arr. Cpython uses refcounts. An explicit call to gc
# should not be needed here.
break_cycles()
assert_(arr_ref() is None, "unknowable whether ctypes pointer holds a reference")
def test_ctypes_as_parameter_holds_reference(self):
arr = np.array([None]).copy()
arr_ref = weakref.ref(arr)
ctypes_ptr = arr.ctypes._as_parameter_
# `ctypes_ptr` should hold onto `arr`
del arr
break_cycles()
assert_(arr_ref() is not None, "ctypes pointer did not hold onto a reference")
# but when the `ctypes_ptr` object dies, so should `arr`
del ctypes_ptr
if IS_PYPY:
break_cycles()
assert_(arr_ref() is None, "unknowable whether ctypes pointer holds a reference")
class TestWritebackIfCopy:
# all these tests use the WRITEBACKIFCOPY mechanism
def test_argmax_with_out(self):
mat = np.eye(5)
out = np.empty(5, dtype='i2')
res = np.argmax(mat, 0, out=out)
assert_equal(res, range(5))
def test_argmin_with_out(self):
mat = -np.eye(5)
out = np.empty(5, dtype='i2')
res = np.argmin(mat, 0, out=out)
assert_equal(res, range(5))
def test_insert_noncontiguous(self):
a = np.arange(6).reshape(2,3).T # force non-c-contiguous
# uses arr_insert
np.place(a, a>2, [44, 55])
assert_equal(a, np.array([[0, 44], [1, 55], [2, 44]]))
# hit one of the failing paths
assert_raises(ValueError, np.place, a, a>20, [])
def test_put_noncontiguous(self):
a = np.arange(6).reshape(2,3).T # force non-c-contiguous
np.put(a, [0, 2], [44, 55])
assert_equal(a, np.array([[44, 3], [55, 4], [2, 5]]))
def test_putmask_noncontiguous(self):
a = np.arange(6).reshape(2,3).T # force non-c-contiguous
# uses arr_putmask
np.putmask(a, a>2, a**2)
assert_equal(a, np.array([[0, 9], [1, 16], [2, 25]]))
def test_take_mode_raise(self):
a = np.arange(6, dtype='int')
out = np.empty(2, dtype='int')
np.take(a, [0, 2], out=out, mode='raise')
assert_equal(out, np.array([0, 2]))
def test_choose_mod_raise(self):
a = np.array([[1, 0, 1], [0, 1, 0], [1, 0, 1]])
out = np.empty((3,3), dtype='int')
choices = [-10, 10]
np.choose(a, choices, out=out, mode='raise')
assert_equal(out, np.array([[ 10, -10, 10],
[-10, 10, -10],
[ 10, -10, 10]]))
def test_flatiter__array__(self):
a = np.arange(9).reshape(3,3)
b = a.T.flat
c = b.__array__()
# triggers the WRITEBACKIFCOPY resolution, assuming refcount semantics
del c
def test_dot_out(self):
# if HAVE_CBLAS, will use WRITEBACKIFCOPY
a = np.arange(9, dtype=float).reshape(3,3)
b = np.dot(a, a, out=a)
assert_equal(b, np.array([[15, 18, 21], [42, 54, 66], [69, 90, 111]]))
def test_view_assign(self):
from numpy.core._multiarray_tests import npy_create_writebackifcopy, npy_resolve
arr = np.arange(9).reshape(3, 3).T
arr_wb = npy_create_writebackifcopy(arr)
assert_(arr_wb.flags.writebackifcopy)
assert_(arr_wb.base is arr)
arr_wb[...] = -100
npy_resolve(arr_wb)
# arr changes after resolve, even though we assigned to arr_wb
assert_equal(arr, -100)
# after resolve, the two arrays no longer reference each other
assert_(arr_wb.ctypes.data != 0)
assert_equal(arr_wb.base, None)
# assigning to arr_wb does not get transferred to arr
arr_wb[...] = 100
assert_equal(arr, -100)
@pytest.mark.leaks_references(
reason="increments self in dealloc; ignore since deprecated path.")
def test_dealloc_warning(self):
with suppress_warnings() as sup:
sup.record(RuntimeWarning)
arr = np.arange(9).reshape(3, 3)
v = arr.T
_multiarray_tests.npy_abuse_writebackifcopy(v)
assert len(sup.log) == 1
def test_view_discard_refcount(self):
from numpy.core._multiarray_tests import npy_create_writebackifcopy, npy_discard
arr = np.arange(9).reshape(3, 3).T
orig = arr.copy()
if HAS_REFCOUNT:
arr_cnt = sys.getrefcount(arr)
arr_wb = npy_create_writebackifcopy(arr)
assert_(arr_wb.flags.writebackifcopy)
assert_(arr_wb.base is arr)
arr_wb[...] = -100
npy_discard(arr_wb)
# arr remains unchanged after discard
assert_equal(arr, orig)
# after discard, the two arrays no longer reference each other
assert_(arr_wb.ctypes.data != 0)
assert_equal(arr_wb.base, None)
if HAS_REFCOUNT:
assert_equal(arr_cnt, sys.getrefcount(arr))
# assigning to arr_wb does not get transferred to arr
arr_wb[...] = 100
assert_equal(arr, orig)
class TestArange:
def test_infinite(self):
assert_raises_regex(
ValueError, "size exceeded",
np.arange, 0, np.inf
)
def test_nan_step(self):
assert_raises_regex(
ValueError, "cannot compute length",
np.arange, 0, 1, np.nan
)
def test_zero_step(self):
assert_raises(ZeroDivisionError, np.arange, 0, 10, 0)
assert_raises(ZeroDivisionError, np.arange, 0.0, 10.0, 0.0)
# empty range
assert_raises(ZeroDivisionError, np.arange, 0, 0, 0)
assert_raises(ZeroDivisionError, np.arange, 0.0, 0.0, 0.0)
def test_require_range(self):
assert_raises(TypeError, np.arange)
assert_raises(TypeError, np.arange, step=3)
assert_raises(TypeError, np.arange, dtype='int64')
assert_raises(TypeError, np.arange, start=4)
def test_start_stop_kwarg(self):
keyword_stop = np.arange(stop=3)
keyword_zerotostop = np.arange(start=0, stop=3)
keyword_start_stop = np.arange(start=3, stop=9)
assert len(keyword_stop) == 3
assert len(keyword_zerotostop) == 3
assert len(keyword_start_stop) == 6
assert_array_equal(keyword_stop, keyword_zerotostop)
class TestArrayFinalize:
""" Tests __array_finalize__ """
def test_receives_base(self):
# gh-11237
class SavesBase(np.ndarray):
def __array_finalize__(self, obj):
self.saved_base = self.base
a = np.array(1).view(SavesBase)
assert_(a.saved_base is a.base)
def test_bad_finalize1(self):
class BadAttributeArray(np.ndarray):
@property
def __array_finalize__(self):
raise RuntimeError("boohoo!")
with pytest.raises(TypeError, match="not callable"):
np.arange(10).view(BadAttributeArray)
def test_bad_finalize2(self):
class BadAttributeArray(np.ndarray):
def __array_finalize__(self):
raise RuntimeError("boohoo!")
with pytest.raises(TypeError, match="takes 1 positional"):
np.arange(10).view(BadAttributeArray)
def test_bad_finalize3(self):
class BadAttributeArray(np.ndarray):
def __array_finalize__(self, obj):
raise RuntimeError("boohoo!")
with pytest.raises(RuntimeError, match="boohoo!"):
np.arange(10).view(BadAttributeArray)
def test_lifetime_on_error(self):
# gh-11237
class RaisesInFinalize(np.ndarray):
def __array_finalize__(self, obj):
# crash, but keep this object alive
raise Exception(self)
# a plain object can't be weakref'd
class Dummy: pass
# get a weak reference to an object within an array
obj_arr = np.array(Dummy())
obj_ref = weakref.ref(obj_arr[()])
# get an array that crashed in __array_finalize__
with assert_raises(Exception) as e:
obj_arr.view(RaisesInFinalize)
obj_subarray = e.exception.args[0]
del e
assert_(isinstance(obj_subarray, RaisesInFinalize))
# reference should still be held by obj_arr
break_cycles()
assert_(obj_ref() is not None, "object should not already be dead")
del obj_arr
break_cycles()
assert_(obj_ref() is not None, "obj_arr should not hold the last reference")
del obj_subarray
break_cycles()
assert_(obj_ref() is None, "no references should remain")
def test_can_use_super(self):
class SuperFinalize(np.ndarray):
def __array_finalize__(self, obj):
self.saved_result = super().__array_finalize__(obj)
a = np.array(1).view(SuperFinalize)
assert_(a.saved_result is None)
def test_orderconverter_with_nonASCII_unicode_ordering():
# gh-7475
a = np.arange(5)
assert_raises(ValueError, a.flatten, order=u'\xe2')
def test_equal_override():
# gh-9153: ndarray.__eq__ uses special logic for structured arrays, which
# did not respect overrides with __array_priority__ or __array_ufunc__.
# The PR fixed this for __array_priority__ and __array_ufunc__ = None.
class MyAlwaysEqual:
def __eq__(self, other):
return "eq"
def __ne__(self, other):
return "ne"
class MyAlwaysEqualOld(MyAlwaysEqual):
__array_priority__ = 10000
class MyAlwaysEqualNew(MyAlwaysEqual):
__array_ufunc__ = None
array = np.array([(0, 1), (2, 3)], dtype='i4,i4')
for my_always_equal_cls in MyAlwaysEqualOld, MyAlwaysEqualNew:
my_always_equal = my_always_equal_cls()
assert_equal(my_always_equal == array, 'eq')
assert_equal(array == my_always_equal, 'eq')
assert_equal(my_always_equal != array, 'ne')
assert_equal(array != my_always_equal, 'ne')
@pytest.mark.parametrize(
["fun", "npfun"],
[
(_multiarray_tests.npy_cabs, np.absolute),
(_multiarray_tests.npy_carg, np.angle)
]
)
@pytest.mark.parametrize("x", [1, np.inf, -np.inf, np.nan])
@pytest.mark.parametrize("y", [1, np.inf, -np.inf, np.nan])
@pytest.mark.parametrize("test_dtype", np.complexfloating.__subclasses__())
def test_npymath_complex(fun, npfun, x, y, test_dtype):
# Smoketest npymath functions
z = test_dtype(complex(x, y))
got = fun(z)
expected = npfun(z)
assert_allclose(got, expected)
def test_npymath_real():
# Smoketest npymath functions
from numpy.core._multiarray_tests import (
npy_log10, npy_cosh, npy_sinh, npy_tan, npy_tanh)
funcs = {npy_log10: np.log10,
npy_cosh: np.cosh,
npy_sinh: np.sinh,
npy_tan: np.tan,
npy_tanh: np.tanh}
vals = (1, np.inf, -np.inf, np.nan)
types = (np.float32, np.float64, np.longdouble)
with np.errstate(all='ignore'):
for fun, npfun in funcs.items():
for x, t in itertools.product(vals, types):
z = t(x)
got = fun(z)
expected = npfun(z)
assert_allclose(got, expected)
def test_uintalignment_and_alignment():
# alignment code needs to satisfy these requirements:
# 1. numpy structs match C struct layout
# 2. ufuncs/casting is safe wrt to aligned access
# 3. copy code is safe wrt to "uint alidned" access
#
# Complex types are the main problem, whose alignment may not be the same
# as their "uint alignment".
#
# This test might only fail on certain platforms, where uint64 alignment is
# not equal to complex64 alignment. The second 2 tests will only fail
# for DEBUG=1.
d1 = np.dtype('u1,c8', align=True)
d2 = np.dtype('u4,c8', align=True)
d3 = np.dtype({'names': ['a', 'b'], 'formats': ['u1', d1]}, align=True)
assert_equal(np.zeros(1, dtype=d1)['f1'].flags['ALIGNED'], True)
assert_equal(np.zeros(1, dtype=d2)['f1'].flags['ALIGNED'], True)
assert_equal(np.zeros(1, dtype='u1,c8')['f1'].flags['ALIGNED'], False)
# check that C struct matches numpy struct size
s = _multiarray_tests.get_struct_alignments()
for d, (alignment, size) in zip([d1,d2,d3], s):
assert_equal(d.alignment, alignment)
assert_equal(d.itemsize, size)
# check that ufuncs don't complain in debug mode
# (this is probably OK if the aligned flag is true above)
src = np.zeros((2,2), dtype=d1)['f1'] # 4-byte aligned, often
np.exp(src) # assert fails?
# check that copy code doesn't complain in debug mode
dst = np.zeros((2,2), dtype='c8')
dst[:,1] = src[:,1] # assert in lowlevel_strided_loops fails?
class TestAlignment:
# adapted from scipy._lib.tests.test__util.test__aligned_zeros
# Checks that unusual memory alignments don't trip up numpy.
# In particular, check RELAXED_STRIDES don't trip alignment assertions in
# NDEBUG mode for size-0 arrays (gh-12503)
def check(self, shape, dtype, order, align):
err_msg = repr((shape, dtype, order, align))
x = _aligned_zeros(shape, dtype, order, align=align)
if align is None:
align = np.dtype(dtype).alignment
assert_equal(x.__array_interface__['data'][0] % align, 0)
if hasattr(shape, '__len__'):
assert_equal(x.shape, shape, err_msg)
else:
assert_equal(x.shape, (shape,), err_msg)
assert_equal(x.dtype, dtype)
if order == "C":
assert_(x.flags.c_contiguous, err_msg)
elif order == "F":
if x.size > 0:
assert_(x.flags.f_contiguous, err_msg)
elif order is None:
assert_(x.flags.c_contiguous, err_msg)
else:
raise ValueError()
def test_various_alignments(self):
for align in [1, 2, 3, 4, 8, 12, 16, 32, 64, None]:
for n in [0, 1, 3, 11]:
for order in ["C", "F", None]:
for dtype in list(np.typecodes["All"]) + ['i4,i4,i4']:
if dtype == 'O':
# object dtype can't be misaligned
continue
for shape in [n, (1, 2, 3, n)]:
self.check(shape, np.dtype(dtype), order, align)
def test_strided_loop_alignments(self):
# particularly test that complex64 and float128 use right alignment
# code-paths, since these are particularly problematic. It is useful to
# turn on USE_DEBUG for this test, so lowlevel-loop asserts are run.
for align in [1, 2, 4, 8, 12, 16, None]:
xf64 = _aligned_zeros(3, np.float64)
xc64 = _aligned_zeros(3, np.complex64, align=align)
xf128 = _aligned_zeros(3, np.longdouble, align=align)
# test casting, both to and from misaligned
with suppress_warnings() as sup:
sup.filter(np.ComplexWarning, "Casting complex values")
xc64.astype('f8')
xf64.astype(np.complex64)
test = xc64 + xf64
xf128.astype('f8')
xf64.astype(np.longdouble)
test = xf128 + xf64
test = xf128 + xc64
# test copy, both to and from misaligned
# contig copy
xf64[:] = xf64.copy()
xc64[:] = xc64.copy()
xf128[:] = xf128.copy()
# strided copy
xf64[::2] = xf64[::2].copy()
xc64[::2] = xc64[::2].copy()
xf128[::2] = xf128[::2].copy()
def test_getfield():
a = np.arange(32, dtype='uint16')
if sys.byteorder == 'little':
i = 0
j = 1
else:
i = 1
j = 0
b = a.getfield('int8', i)
assert_equal(b, a)
b = a.getfield('int8', j)
assert_equal(b, 0)
pytest.raises(ValueError, a.getfield, 'uint8', -1)
pytest.raises(ValueError, a.getfield, 'uint8', 16)
pytest.raises(ValueError, a.getfield, 'uint64', 0)
class TestViewDtype:
"""
Verify that making a view of a non-contiguous array works as expected.
"""
def test_smaller_dtype_multiple(self):
# x is non-contiguous
x = np.arange(10, dtype='<i4')[::2]
with pytest.raises(ValueError,
match='the last axis must be contiguous'):
x.view('<i2')
expected = [[0, 0], [2, 0], [4, 0], [6, 0], [8, 0]]
assert_array_equal(x[:, np.newaxis].view('<i2'), expected)
def test_smaller_dtype_not_multiple(self):
# x is non-contiguous
x = np.arange(5, dtype='<i4')[::2]
with pytest.raises(ValueError,
match='the last axis must be contiguous'):
x.view('S3')
with pytest.raises(ValueError,
match='When changing to a smaller dtype'):
x[:, np.newaxis].view('S3')
# Make sure the problem is because of the dtype size
expected = [[b''], [b'\x02'], [b'\x04']]
assert_array_equal(x[:, np.newaxis].view('S4'), expected)
def test_larger_dtype_multiple(self):
# x is non-contiguous in the first dimension, contiguous in the last
x = np.arange(20, dtype='<i2').reshape(10, 2)[::2, :]
expected = np.array([[65536], [327684], [589832],
[851980], [1114128]], dtype='<i4')
assert_array_equal(x.view('<i4'), expected)
def test_larger_dtype_not_multiple(self):
# x is non-contiguous in the first dimension, contiguous in the last
x = np.arange(20, dtype='<i2').reshape(10, 2)[::2, :]
with pytest.raises(ValueError,
match='When changing to a larger dtype'):
x.view('S3')
# Make sure the problem is because of the dtype size
expected = [[b'\x00\x00\x01'], [b'\x04\x00\x05'], [b'\x08\x00\t'],
[b'\x0c\x00\r'], [b'\x10\x00\x11']]
assert_array_equal(x.view('S4'), expected)
def test_f_contiguous(self):
# x is F-contiguous
x = np.arange(4 * 3, dtype='<i4').reshape(4, 3).T
with pytest.raises(ValueError,
match='the last axis must be contiguous'):
x.view('<i2')
def test_non_c_contiguous(self):
# x is contiguous in axis=-1, but not C-contiguous in other axes
x = np.arange(2 * 3 * 4, dtype='i1').\
reshape(2, 3, 4).transpose(1, 0, 2)
expected = [[[256, 770], [3340, 3854]],
[[1284, 1798], [4368, 4882]],
[[2312, 2826], [5396, 5910]]]
assert_array_equal(x.view('<i2'), expected)
# Test various array sizes that hit different code paths in quicksort-avx512
@pytest.mark.parametrize("N", [8, 16, 24, 32, 48, 64, 96, 128, 151, 191,
256, 383, 512, 1023, 2047])
def test_sort_float(N):
# Regular data with nan sprinkled
np.random.seed(42)
arr = -0.5 + np.random.sample(N).astype('f')
arr[np.random.choice(arr.shape[0], 3)] = np.nan
assert_equal(np.sort(arr, kind='quick'), np.sort(arr, kind='heap'))
# (2) with +INF
infarr = np.inf*np.ones(N, dtype='f')
infarr[np.random.choice(infarr.shape[0], 5)] = -1.0
assert_equal(np.sort(infarr, kind='quick'), np.sort(infarr, kind='heap'))
# (3) with -INF
neginfarr = -np.inf*np.ones(N, dtype='f')
neginfarr[np.random.choice(neginfarr.shape[0], 5)] = 1.0
assert_equal(np.sort(neginfarr, kind='quick'),
np.sort(neginfarr, kind='heap'))
# (4) with +/-INF
infarr = np.inf*np.ones(N, dtype='f')
infarr[np.random.choice(infarr.shape[0], (int)(N/2))] = -np.inf
assert_equal(np.sort(infarr, kind='quick'), np.sort(infarr, kind='heap'))
def test_sort_int():
# Random data with NPY_MAX_INT32 and NPY_MIN_INT32 sprinkled
rng = np.random.default_rng(42)
N = 2047
minv = np.iinfo(np.int32).min
maxv = np.iinfo(np.int32).max
arr = rng.integers(low=minv, high=maxv, size=N).astype('int32')
arr[np.random.choice(arr.shape[0], 10)] = minv
arr[np.random.choice(arr.shape[0], 10)] = maxv
assert_equal(np.sort(arr, kind='quick'), np.sort(arr, kind='heap'))
def test_sort_uint():
# Random data with NPY_MAX_UINT32 sprinkled
rng = np.random.default_rng(42)
N = 2047
maxv = np.iinfo(np.uint32).max
arr = rng.integers(low=0, high=maxv, size=N).astype('uint32')
arr[np.random.choice(arr.shape[0], 10)] = maxv
assert_equal(np.sort(arr, kind='quick'), np.sort(arr, kind='heap'))
| 366,430 | Python | 36.885753 | 588 | 0.514508 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_half.py | import platform
import pytest
import numpy as np
from numpy import uint16, float16, float32, float64
from numpy.testing import assert_, assert_equal
def assert_raises_fpe(strmatch, callable, *args, **kwargs):
try:
callable(*args, **kwargs)
except FloatingPointError as exc:
assert_(str(exc).find(strmatch) >= 0,
"Did not raise floating point %s error" % strmatch)
else:
assert_(False,
"Did not raise floating point %s error" % strmatch)
class TestHalf:
def setup_method(self):
# An array of all possible float16 values
self.all_f16 = np.arange(0x10000, dtype=uint16)
self.all_f16.dtype = float16
self.all_f32 = np.array(self.all_f16, dtype=float32)
self.all_f64 = np.array(self.all_f16, dtype=float64)
# An array of all non-NaN float16 values, in sorted order
self.nonan_f16 = np.concatenate(
(np.arange(0xfc00, 0x7fff, -1, dtype=uint16),
np.arange(0x0000, 0x7c01, 1, dtype=uint16)))
self.nonan_f16.dtype = float16
self.nonan_f32 = np.array(self.nonan_f16, dtype=float32)
self.nonan_f64 = np.array(self.nonan_f16, dtype=float64)
# An array of all finite float16 values, in sorted order
self.finite_f16 = self.nonan_f16[1:-1]
self.finite_f32 = self.nonan_f32[1:-1]
self.finite_f64 = self.nonan_f64[1:-1]
def test_half_conversions(self):
"""Checks that all 16-bit values survive conversion
to/from 32-bit and 64-bit float"""
# Because the underlying routines preserve the NaN bits, every
# value is preserved when converting to/from other floats.
# Convert from float32 back to float16
b = np.array(self.all_f32, dtype=float16)
assert_equal(self.all_f16.view(dtype=uint16),
b.view(dtype=uint16))
# Convert from float64 back to float16
b = np.array(self.all_f64, dtype=float16)
assert_equal(self.all_f16.view(dtype=uint16),
b.view(dtype=uint16))
# Convert float16 to longdouble and back
# This doesn't necessarily preserve the extra NaN bits,
# so exclude NaNs.
a_ld = np.array(self.nonan_f16, dtype=np.longdouble)
b = np.array(a_ld, dtype=float16)
assert_equal(self.nonan_f16.view(dtype=uint16),
b.view(dtype=uint16))
# Check the range for which all integers can be represented
i_int = np.arange(-2048, 2049)
i_f16 = np.array(i_int, dtype=float16)
j = np.array(i_f16, dtype=int)
assert_equal(i_int, j)
@pytest.mark.parametrize("string_dt", ["S", "U"])
def test_half_conversion_to_string(self, string_dt):
# Currently uses S/U32 (which is sufficient for float32)
expected_dt = np.dtype(f"{string_dt}32")
assert np.promote_types(np.float16, string_dt) == expected_dt
assert np.promote_types(string_dt, np.float16) == expected_dt
arr = np.ones(3, dtype=np.float16).astype(string_dt)
assert arr.dtype == expected_dt
@pytest.mark.parametrize("string_dt", ["S", "U"])
def test_half_conversion_from_string(self, string_dt):
string = np.array("3.1416", dtype=string_dt)
assert string.astype(np.float16) == np.array(3.1416, dtype=np.float16)
@pytest.mark.parametrize("offset", [None, "up", "down"])
@pytest.mark.parametrize("shift", [None, "up", "down"])
@pytest.mark.parametrize("float_t", [np.float32, np.float64])
def test_half_conversion_rounding(self, float_t, shift, offset):
# Assumes that round to even is used during casting.
max_pattern = np.float16(np.finfo(np.float16).max).view(np.uint16)
# Test all (positive) finite numbers, denormals are most interesting
# however:
f16s_patterns = np.arange(0, max_pattern+1, dtype=np.uint16)
f16s_float = f16s_patterns.view(np.float16).astype(float_t)
# Shift the values by half a bit up or a down (or do not shift),
if shift == "up":
f16s_float = 0.5 * (f16s_float[:-1] + f16s_float[1:])[1:]
elif shift == "down":
f16s_float = 0.5 * (f16s_float[:-1] + f16s_float[1:])[:-1]
else:
f16s_float = f16s_float[1:-1]
# Increase the float by a minimal value:
if offset == "up":
f16s_float = np.nextafter(f16s_float, float_t(1e50))
elif offset == "down":
f16s_float = np.nextafter(f16s_float, float_t(-1e50))
# Convert back to float16 and its bit pattern:
res_patterns = f16s_float.astype(np.float16).view(np.uint16)
# The above calculations tries the original values, or the exact
# mid points between the float16 values. It then further offsets them
# by as little as possible. If no offset occurs, "round to even"
# logic will be necessary, an arbitrarily small offset should cause
# normal up/down rounding always.
# Calculate the expected pattern:
cmp_patterns = f16s_patterns[1:-1].copy()
if shift == "down" and offset != "up":
shift_pattern = -1
elif shift == "up" and offset != "down":
shift_pattern = 1
else:
# There cannot be a shift, either shift is None, so all rounding
# will go back to original, or shift is reduced by offset too much.
shift_pattern = 0
# If rounding occurs, is it normal rounding or round to even?
if offset is None:
# Round to even occurs, modify only non-even, cast to allow + (-1)
cmp_patterns[0::2].view(np.int16)[...] += shift_pattern
else:
cmp_patterns.view(np.int16)[...] += shift_pattern
assert_equal(res_patterns, cmp_patterns)
@pytest.mark.parametrize(["float_t", "uint_t", "bits"],
[(np.float32, np.uint32, 23),
(np.float64, np.uint64, 52)])
def test_half_conversion_denormal_round_even(self, float_t, uint_t, bits):
# Test specifically that all bits are considered when deciding
# whether round to even should occur (i.e. no bits are lost at the
# end. Compare also gh-12721. The most bits can get lost for the
# smallest denormal:
smallest_value = np.uint16(1).view(np.float16).astype(float_t)
assert smallest_value == 2**-24
# Will be rounded to zero based on round to even rule:
rounded_to_zero = smallest_value / float_t(2)
assert rounded_to_zero.astype(np.float16) == 0
# The significand will be all 0 for the float_t, test that we do not
# lose the lower ones of these:
for i in range(bits):
# slightly increasing the value should make it round up:
larger_pattern = rounded_to_zero.view(uint_t) | uint_t(1 << i)
larger_value = larger_pattern.view(float_t)
assert larger_value.astype(np.float16) == smallest_value
def test_nans_infs(self):
with np.errstate(all='ignore'):
# Check some of the ufuncs
assert_equal(np.isnan(self.all_f16), np.isnan(self.all_f32))
assert_equal(np.isinf(self.all_f16), np.isinf(self.all_f32))
assert_equal(np.isfinite(self.all_f16), np.isfinite(self.all_f32))
assert_equal(np.signbit(self.all_f16), np.signbit(self.all_f32))
assert_equal(np.spacing(float16(65504)), np.inf)
# Check comparisons of all values with NaN
nan = float16(np.nan)
assert_(not (self.all_f16 == nan).any())
assert_(not (nan == self.all_f16).any())
assert_((self.all_f16 != nan).all())
assert_((nan != self.all_f16).all())
assert_(not (self.all_f16 < nan).any())
assert_(not (nan < self.all_f16).any())
assert_(not (self.all_f16 <= nan).any())
assert_(not (nan <= self.all_f16).any())
assert_(not (self.all_f16 > nan).any())
assert_(not (nan > self.all_f16).any())
assert_(not (self.all_f16 >= nan).any())
assert_(not (nan >= self.all_f16).any())
def test_half_values(self):
"""Confirms a small number of known half values"""
a = np.array([1.0, -1.0,
2.0, -2.0,
0.0999755859375, 0.333251953125, # 1/10, 1/3
65504, -65504, # Maximum magnitude
2.0**(-14), -2.0**(-14), # Minimum normal
2.0**(-24), -2.0**(-24), # Minimum subnormal
0, -1/1e1000, # Signed zeros
np.inf, -np.inf])
b = np.array([0x3c00, 0xbc00,
0x4000, 0xc000,
0x2e66, 0x3555,
0x7bff, 0xfbff,
0x0400, 0x8400,
0x0001, 0x8001,
0x0000, 0x8000,
0x7c00, 0xfc00], dtype=uint16)
b.dtype = float16
assert_equal(a, b)
def test_half_rounding(self):
"""Checks that rounding when converting to half is correct"""
a = np.array([2.0**-25 + 2.0**-35, # Rounds to minimum subnormal
2.0**-25, # Underflows to zero (nearest even mode)
2.0**-26, # Underflows to zero
1.0+2.0**-11 + 2.0**-16, # rounds to 1.0+2**(-10)
1.0+2.0**-11, # rounds to 1.0 (nearest even mode)
1.0+2.0**-12, # rounds to 1.0
65519, # rounds to 65504
65520], # rounds to inf
dtype=float64)
rounded = [2.0**-24,
0.0,
0.0,
1.0+2.0**(-10),
1.0,
1.0,
65504,
np.inf]
# Check float64->float16 rounding
b = np.array(a, dtype=float16)
assert_equal(b, rounded)
# Check float32->float16 rounding
a = np.array(a, dtype=float32)
b = np.array(a, dtype=float16)
assert_equal(b, rounded)
def test_half_correctness(self):
"""Take every finite float16, and check the casting functions with
a manual conversion."""
# Create an array of all finite float16s
a_bits = self.finite_f16.view(dtype=uint16)
# Convert to 64-bit float manually
a_sgn = (-1.0)**((a_bits & 0x8000) >> 15)
a_exp = np.array((a_bits & 0x7c00) >> 10, dtype=np.int32) - 15
a_man = (a_bits & 0x03ff) * 2.0**(-10)
# Implicit bit of normalized floats
a_man[a_exp != -15] += 1
# Denormalized exponent is -14
a_exp[a_exp == -15] = -14
a_manual = a_sgn * a_man * 2.0**a_exp
a32_fail = np.nonzero(self.finite_f32 != a_manual)[0]
if len(a32_fail) != 0:
bad_index = a32_fail[0]
assert_equal(self.finite_f32, a_manual,
"First non-equal is half value %x -> %g != %g" %
(self.finite_f16[bad_index],
self.finite_f32[bad_index],
a_manual[bad_index]))
a64_fail = np.nonzero(self.finite_f64 != a_manual)[0]
if len(a64_fail) != 0:
bad_index = a64_fail[0]
assert_equal(self.finite_f64, a_manual,
"First non-equal is half value %x -> %g != %g" %
(self.finite_f16[bad_index],
self.finite_f64[bad_index],
a_manual[bad_index]))
def test_half_ordering(self):
"""Make sure comparisons are working right"""
# All non-NaN float16 values in reverse order
a = self.nonan_f16[::-1].copy()
# 32-bit float copy
b = np.array(a, dtype=float32)
# Should sort the same
a.sort()
b.sort()
assert_equal(a, b)
# Comparisons should work
assert_((a[:-1] <= a[1:]).all())
assert_(not (a[:-1] > a[1:]).any())
assert_((a[1:] >= a[:-1]).all())
assert_(not (a[1:] < a[:-1]).any())
# All != except for +/-0
assert_equal(np.nonzero(a[:-1] < a[1:])[0].size, a.size-2)
assert_equal(np.nonzero(a[1:] > a[:-1])[0].size, a.size-2)
def test_half_funcs(self):
"""Test the various ArrFuncs"""
# fill
assert_equal(np.arange(10, dtype=float16),
np.arange(10, dtype=float32))
# fillwithscalar
a = np.zeros((5,), dtype=float16)
a.fill(1)
assert_equal(a, np.ones((5,), dtype=float16))
# nonzero and copyswap
a = np.array([0, 0, -1, -1/1e20, 0, 2.0**-24, 7.629e-6], dtype=float16)
assert_equal(a.nonzero()[0],
[2, 5, 6])
a = a.byteswap().newbyteorder()
assert_equal(a.nonzero()[0],
[2, 5, 6])
# dot
a = np.arange(0, 10, 0.5, dtype=float16)
b = np.ones((20,), dtype=float16)
assert_equal(np.dot(a, b),
95)
# argmax
a = np.array([0, -np.inf, -2, 0.5, 12.55, 7.3, 2.1, 12.4], dtype=float16)
assert_equal(a.argmax(),
4)
a = np.array([0, -np.inf, -2, np.inf, 12.55, np.nan, 2.1, 12.4], dtype=float16)
assert_equal(a.argmax(),
5)
# getitem
a = np.arange(10, dtype=float16)
for i in range(10):
assert_equal(a.item(i), i)
def test_spacing_nextafter(self):
"""Test np.spacing and np.nextafter"""
# All non-negative finite #'s
a = np.arange(0x7c00, dtype=uint16)
hinf = np.array((np.inf,), dtype=float16)
hnan = np.array((np.nan,), dtype=float16)
a_f16 = a.view(dtype=float16)
assert_equal(np.spacing(a_f16[:-1]), a_f16[1:]-a_f16[:-1])
assert_equal(np.nextafter(a_f16[:-1], hinf), a_f16[1:])
assert_equal(np.nextafter(a_f16[0], -hinf), -a_f16[1])
assert_equal(np.nextafter(a_f16[1:], -hinf), a_f16[:-1])
assert_equal(np.nextafter(hinf, a_f16), a_f16[-1])
assert_equal(np.nextafter(-hinf, a_f16), -a_f16[-1])
assert_equal(np.nextafter(hinf, hinf), hinf)
assert_equal(np.nextafter(hinf, -hinf), a_f16[-1])
assert_equal(np.nextafter(-hinf, hinf), -a_f16[-1])
assert_equal(np.nextafter(-hinf, -hinf), -hinf)
assert_equal(np.nextafter(a_f16, hnan), hnan[0])
assert_equal(np.nextafter(hnan, a_f16), hnan[0])
assert_equal(np.nextafter(hnan, hnan), hnan)
assert_equal(np.nextafter(hinf, hnan), hnan)
assert_equal(np.nextafter(hnan, hinf), hnan)
# switch to negatives
a |= 0x8000
assert_equal(np.spacing(a_f16[0]), np.spacing(a_f16[1]))
assert_equal(np.spacing(a_f16[1:]), a_f16[:-1]-a_f16[1:])
assert_equal(np.nextafter(a_f16[0], hinf), -a_f16[1])
assert_equal(np.nextafter(a_f16[1:], hinf), a_f16[:-1])
assert_equal(np.nextafter(a_f16[:-1], -hinf), a_f16[1:])
assert_equal(np.nextafter(hinf, a_f16), -a_f16[-1])
assert_equal(np.nextafter(-hinf, a_f16), a_f16[-1])
assert_equal(np.nextafter(a_f16, hnan), hnan[0])
assert_equal(np.nextafter(hnan, a_f16), hnan[0])
def test_half_ufuncs(self):
"""Test the various ufuncs"""
a = np.array([0, 1, 2, 4, 2], dtype=float16)
b = np.array([-2, 5, 1, 4, 3], dtype=float16)
c = np.array([0, -1, -np.inf, np.nan, 6], dtype=float16)
assert_equal(np.add(a, b), [-2, 6, 3, 8, 5])
assert_equal(np.subtract(a, b), [2, -4, 1, 0, -1])
assert_equal(np.multiply(a, b), [0, 5, 2, 16, 6])
assert_equal(np.divide(a, b), [0, 0.199951171875, 2, 1, 0.66650390625])
assert_equal(np.equal(a, b), [False, False, False, True, False])
assert_equal(np.not_equal(a, b), [True, True, True, False, True])
assert_equal(np.less(a, b), [False, True, False, False, True])
assert_equal(np.less_equal(a, b), [False, True, False, True, True])
assert_equal(np.greater(a, b), [True, False, True, False, False])
assert_equal(np.greater_equal(a, b), [True, False, True, True, False])
assert_equal(np.logical_and(a, b), [False, True, True, True, True])
assert_equal(np.logical_or(a, b), [True, True, True, True, True])
assert_equal(np.logical_xor(a, b), [True, False, False, False, False])
assert_equal(np.logical_not(a), [True, False, False, False, False])
assert_equal(np.isnan(c), [False, False, False, True, False])
assert_equal(np.isinf(c), [False, False, True, False, False])
assert_equal(np.isfinite(c), [True, True, False, False, True])
assert_equal(np.signbit(b), [True, False, False, False, False])
assert_equal(np.copysign(b, a), [2, 5, 1, 4, 3])
assert_equal(np.maximum(a, b), [0, 5, 2, 4, 3])
x = np.maximum(b, c)
assert_(np.isnan(x[3]))
x[3] = 0
assert_equal(x, [0, 5, 1, 0, 6])
assert_equal(np.minimum(a, b), [-2, 1, 1, 4, 2])
x = np.minimum(b, c)
assert_(np.isnan(x[3]))
x[3] = 0
assert_equal(x, [-2, -1, -np.inf, 0, 3])
assert_equal(np.fmax(a, b), [0, 5, 2, 4, 3])
assert_equal(np.fmax(b, c), [0, 5, 1, 4, 6])
assert_equal(np.fmin(a, b), [-2, 1, 1, 4, 2])
assert_equal(np.fmin(b, c), [-2, -1, -np.inf, 4, 3])
assert_equal(np.floor_divide(a, b), [0, 0, 2, 1, 0])
assert_equal(np.remainder(a, b), [0, 1, 0, 0, 2])
assert_equal(np.divmod(a, b), ([0, 0, 2, 1, 0], [0, 1, 0, 0, 2]))
assert_equal(np.square(b), [4, 25, 1, 16, 9])
assert_equal(np.reciprocal(b), [-0.5, 0.199951171875, 1, 0.25, 0.333251953125])
assert_equal(np.ones_like(b), [1, 1, 1, 1, 1])
assert_equal(np.conjugate(b), b)
assert_equal(np.absolute(b), [2, 5, 1, 4, 3])
assert_equal(np.negative(b), [2, -5, -1, -4, -3])
assert_equal(np.positive(b), b)
assert_equal(np.sign(b), [-1, 1, 1, 1, 1])
assert_equal(np.modf(b), ([0, 0, 0, 0, 0], b))
assert_equal(np.frexp(b), ([-0.5, 0.625, 0.5, 0.5, 0.75], [2, 3, 1, 3, 2]))
assert_equal(np.ldexp(b, [0, 1, 2, 4, 2]), [-2, 10, 4, 64, 12])
def test_half_coercion(self):
"""Test that half gets coerced properly with the other types"""
a16 = np.array((1,), dtype=float16)
a32 = np.array((1,), dtype=float32)
b16 = float16(1)
b32 = float32(1)
assert_equal(np.power(a16, 2).dtype, float16)
assert_equal(np.power(a16, 2.0).dtype, float16)
assert_equal(np.power(a16, b16).dtype, float16)
assert_equal(np.power(a16, b32).dtype, float16)
assert_equal(np.power(a16, a16).dtype, float16)
assert_equal(np.power(a16, a32).dtype, float32)
assert_equal(np.power(b16, 2).dtype, float64)
assert_equal(np.power(b16, 2.0).dtype, float64)
assert_equal(np.power(b16, b16).dtype, float16)
assert_equal(np.power(b16, b32).dtype, float32)
assert_equal(np.power(b16, a16).dtype, float16)
assert_equal(np.power(b16, a32).dtype, float32)
assert_equal(np.power(a32, a16).dtype, float32)
assert_equal(np.power(a32, b16).dtype, float32)
assert_equal(np.power(b32, a16).dtype, float16)
assert_equal(np.power(b32, b16).dtype, float32)
@pytest.mark.skipif(platform.machine() == "armv5tel",
reason="See gh-413.")
def test_half_fpe(self):
with np.errstate(all='raise'):
sx16 = np.array((1e-4,), dtype=float16)
bx16 = np.array((1e4,), dtype=float16)
sy16 = float16(1e-4)
by16 = float16(1e4)
# Underflow errors
assert_raises_fpe('underflow', lambda a, b:a*b, sx16, sx16)
assert_raises_fpe('underflow', lambda a, b:a*b, sx16, sy16)
assert_raises_fpe('underflow', lambda a, b:a*b, sy16, sx16)
assert_raises_fpe('underflow', lambda a, b:a*b, sy16, sy16)
assert_raises_fpe('underflow', lambda a, b:a/b, sx16, bx16)
assert_raises_fpe('underflow', lambda a, b:a/b, sx16, by16)
assert_raises_fpe('underflow', lambda a, b:a/b, sy16, bx16)
assert_raises_fpe('underflow', lambda a, b:a/b, sy16, by16)
assert_raises_fpe('underflow', lambda a, b:a/b,
float16(2.**-14), float16(2**11))
assert_raises_fpe('underflow', lambda a, b:a/b,
float16(-2.**-14), float16(2**11))
assert_raises_fpe('underflow', lambda a, b:a/b,
float16(2.**-14+2**-24), float16(2))
assert_raises_fpe('underflow', lambda a, b:a/b,
float16(-2.**-14-2**-24), float16(2))
assert_raises_fpe('underflow', lambda a, b:a/b,
float16(2.**-14+2**-23), float16(4))
# Overflow errors
assert_raises_fpe('overflow', lambda a, b:a*b, bx16, bx16)
assert_raises_fpe('overflow', lambda a, b:a*b, bx16, by16)
assert_raises_fpe('overflow', lambda a, b:a*b, by16, bx16)
assert_raises_fpe('overflow', lambda a, b:a*b, by16, by16)
assert_raises_fpe('overflow', lambda a, b:a/b, bx16, sx16)
assert_raises_fpe('overflow', lambda a, b:a/b, bx16, sy16)
assert_raises_fpe('overflow', lambda a, b:a/b, by16, sx16)
assert_raises_fpe('overflow', lambda a, b:a/b, by16, sy16)
assert_raises_fpe('overflow', lambda a, b:a+b,
float16(65504), float16(17))
assert_raises_fpe('overflow', lambda a, b:a-b,
float16(-65504), float16(17))
assert_raises_fpe('overflow', np.nextafter, float16(65504), float16(np.inf))
assert_raises_fpe('overflow', np.nextafter, float16(-65504), float16(-np.inf))
assert_raises_fpe('overflow', np.spacing, float16(65504))
# Invalid value errors
assert_raises_fpe('invalid', np.divide, float16(np.inf), float16(np.inf))
assert_raises_fpe('invalid', np.spacing, float16(np.inf))
assert_raises_fpe('invalid', np.spacing, float16(np.nan))
# These should not raise
float16(65472)+float16(32)
float16(2**-13)/float16(2)
float16(2**-14)/float16(2**10)
np.spacing(float16(-65504))
np.nextafter(float16(65504), float16(-np.inf))
np.nextafter(float16(-65504), float16(np.inf))
np.nextafter(float16(np.inf), float16(0))
np.nextafter(float16(-np.inf), float16(0))
np.nextafter(float16(0), float16(np.nan))
np.nextafter(float16(np.nan), float16(0))
float16(2**-14)/float16(2**10)
float16(-2**-14)/float16(2**10)
float16(2**-14+2**-23)/float16(2)
float16(-2**-14-2**-23)/float16(2)
def test_half_array_interface(self):
"""Test that half is compatible with __array_interface__"""
class Dummy:
pass
a = np.ones((1,), dtype=float16)
b = Dummy()
b.__array_interface__ = a.__array_interface__
c = np.array(b)
assert_(c.dtype == float16)
assert_equal(a, c)
| 23,823 | Python | 41.926126 | 90 | 0.539731 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_scalarbuffer.py | """
Test scalar buffer interface adheres to PEP 3118
"""
import numpy as np
from numpy.core._rational_tests import rational
from numpy.core._multiarray_tests import get_buffer_info
import pytest
from numpy.testing import assert_, assert_equal, assert_raises
# PEP3118 format strings for native (standard alignment and byteorder) types
scalars_and_codes = [
(np.bool_, '?'),
(np.byte, 'b'),
(np.short, 'h'),
(np.intc, 'i'),
(np.int_, 'l'),
(np.longlong, 'q'),
(np.ubyte, 'B'),
(np.ushort, 'H'),
(np.uintc, 'I'),
(np.uint, 'L'),
(np.ulonglong, 'Q'),
(np.half, 'e'),
(np.single, 'f'),
(np.double, 'd'),
(np.longdouble, 'g'),
(np.csingle, 'Zf'),
(np.cdouble, 'Zd'),
(np.clongdouble, 'Zg'),
]
scalars_only, codes_only = zip(*scalars_and_codes)
class TestScalarPEP3118:
@pytest.mark.parametrize('scalar', scalars_only, ids=codes_only)
def test_scalar_match_array(self, scalar):
x = scalar()
a = np.array([], dtype=np.dtype(scalar))
mv_x = memoryview(x)
mv_a = memoryview(a)
assert_equal(mv_x.format, mv_a.format)
@pytest.mark.parametrize('scalar', scalars_only, ids=codes_only)
def test_scalar_dim(self, scalar):
x = scalar()
mv_x = memoryview(x)
assert_equal(mv_x.itemsize, np.dtype(scalar).itemsize)
assert_equal(mv_x.ndim, 0)
assert_equal(mv_x.shape, ())
assert_equal(mv_x.strides, ())
assert_equal(mv_x.suboffsets, ())
@pytest.mark.parametrize('scalar, code', scalars_and_codes, ids=codes_only)
def test_scalar_code_and_properties(self, scalar, code):
x = scalar()
expected = dict(strides=(), itemsize=x.dtype.itemsize, ndim=0,
shape=(), format=code, readonly=True)
mv_x = memoryview(x)
assert self._as_dict(mv_x) == expected
@pytest.mark.parametrize('scalar', scalars_only, ids=codes_only)
def test_scalar_buffers_readonly(self, scalar):
x = scalar()
with pytest.raises(BufferError, match="scalar buffer is readonly"):
get_buffer_info(x, ["WRITABLE"])
def test_void_scalar_structured_data(self):
dt = np.dtype([('name', np.unicode_, 16), ('grades', np.float64, (2,))])
x = np.array(('ndarray_scalar', (1.2, 3.0)), dtype=dt)[()]
assert_(isinstance(x, np.void))
mv_x = memoryview(x)
expected_size = 16 * np.dtype((np.unicode_, 1)).itemsize
expected_size += 2 * np.dtype(np.float64).itemsize
assert_equal(mv_x.itemsize, expected_size)
assert_equal(mv_x.ndim, 0)
assert_equal(mv_x.shape, ())
assert_equal(mv_x.strides, ())
assert_equal(mv_x.suboffsets, ())
# check scalar format string against ndarray format string
a = np.array([('Sarah', (8.0, 7.0)), ('John', (6.0, 7.0))], dtype=dt)
assert_(isinstance(a, np.ndarray))
mv_a = memoryview(a)
assert_equal(mv_x.itemsize, mv_a.itemsize)
assert_equal(mv_x.format, mv_a.format)
# Check that we do not allow writeable buffer export (technically
# we could allow it sometimes here...)
with pytest.raises(BufferError, match="scalar buffer is readonly"):
get_buffer_info(x, ["WRITABLE"])
def _as_dict(self, m):
return dict(strides=m.strides, shape=m.shape, itemsize=m.itemsize,
ndim=m.ndim, format=m.format, readonly=m.readonly)
def test_datetime_memoryview(self):
# gh-11656
# Values verified with v1.13.3, shape is not () as in test_scalar_dim
dt1 = np.datetime64('2016-01-01')
dt2 = np.datetime64('2017-01-01')
expected = dict(strides=(1,), itemsize=1, ndim=1, shape=(8,),
format='B', readonly=True)
v = memoryview(dt1)
assert self._as_dict(v) == expected
v = memoryview(dt2 - dt1)
assert self._as_dict(v) == expected
dt = np.dtype([('a', 'uint16'), ('b', 'M8[s]')])
a = np.empty(1, dt)
# Fails to create a PEP 3118 valid buffer
assert_raises((ValueError, BufferError), memoryview, a[0])
# Check that we do not allow writeable buffer export
with pytest.raises(BufferError, match="scalar buffer is readonly"):
get_buffer_info(dt1, ["WRITABLE"])
@pytest.mark.parametrize('s', [
pytest.param("\x32\x32", id="ascii"),
pytest.param("\uFE0F\uFE0F", id="basic multilingual"),
pytest.param("\U0001f4bb\U0001f4bb", id="non-BMP"),
])
def test_str_ucs4(self, s):
s = np.str_(s) # only our subclass implements the buffer protocol
# all the same, characters always encode as ucs4
expected = dict(strides=(), itemsize=8, ndim=0, shape=(), format='2w',
readonly=True)
v = memoryview(s)
assert self._as_dict(v) == expected
# integers of the paltform-appropriate endianness
code_points = np.frombuffer(v, dtype='i4')
assert_equal(code_points, [ord(c) for c in s])
# Check that we do not allow writeable buffer export
with pytest.raises(BufferError, match="scalar buffer is readonly"):
get_buffer_info(s, ["WRITABLE"])
def test_user_scalar_fails_buffer(self):
r = rational(1)
with assert_raises(TypeError):
memoryview(r)
# Check that we do not allow writeable buffer export
with pytest.raises(BufferError, match="scalar buffer is readonly"):
get_buffer_info(r, ["WRITABLE"])
| 5,588 | Python | 35.292208 | 80 | 0.594488 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_umath_accuracy.py | import numpy as np
import os
from os import path
import sys
import pytest
from ctypes import c_longlong, c_double, c_float, c_int, cast, pointer, POINTER
from numpy.testing import assert_array_max_ulp
from numpy.testing._private.utils import _glibc_older_than
from numpy.core._multiarray_umath import __cpu_features__
IS_AVX = __cpu_features__.get('AVX512F', False) or \
(__cpu_features__.get('FMA3', False) and __cpu_features__.get('AVX2', False))
# only run on linux with AVX, also avoid old glibc (numpy/numpy#20448).
runtest = (sys.platform.startswith('linux')
and IS_AVX and not _glibc_older_than("2.17"))
platform_skip = pytest.mark.skipif(not runtest,
reason="avoid testing inconsistent platform "
"library implementations")
# convert string to hex function taken from:
# https://stackoverflow.com/questions/1592158/convert-hex-to-float #
def convert(s, datatype="np.float32"):
i = int(s, 16) # convert from hex to a Python int
if (datatype == "np.float64"):
cp = pointer(c_longlong(i)) # make this into a c long long integer
fp = cast(cp, POINTER(c_double)) # cast the int pointer to a double pointer
else:
cp = pointer(c_int(i)) # make this into a c integer
fp = cast(cp, POINTER(c_float)) # cast the int pointer to a float pointer
return fp.contents.value # dereference the pointer, get the float
str_to_float = np.vectorize(convert)
class TestAccuracy:
@platform_skip
def test_validate_transcendentals(self):
with np.errstate(all='ignore'):
data_dir = path.join(path.dirname(__file__), 'data')
files = os.listdir(data_dir)
files = list(filter(lambda f: f.endswith('.csv'), files))
for filename in files:
filepath = path.join(data_dir, filename)
with open(filepath) as fid:
file_without_comments = (r for r in fid if not r[0] in ('$', '#'))
data = np.genfromtxt(file_without_comments,
dtype=('|S39','|S39','|S39',int),
names=('type','input','output','ulperr'),
delimiter=',',
skip_header=1)
npname = path.splitext(filename)[0].split('-')[3]
npfunc = getattr(np, npname)
for datatype in np.unique(data['type']):
data_subset = data[data['type'] == datatype]
inval = np.array(str_to_float(data_subset['input'].astype(str), data_subset['type'].astype(str)), dtype=eval(datatype))
outval = np.array(str_to_float(data_subset['output'].astype(str), data_subset['type'].astype(str)), dtype=eval(datatype))
perm = np.random.permutation(len(inval))
inval = inval[perm]
outval = outval[perm]
maxulperr = data_subset['ulperr'].max()
assert_array_max_ulp(npfunc(inval), outval, maxulperr)
| 3,229 | Python | 51.096773 | 145 | 0.557138 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_arrayprint.py | import sys
import gc
from hypothesis import given
from hypothesis.extra import numpy as hynp
import pytest
import numpy as np
from numpy.testing import (
assert_, assert_equal, assert_raises, assert_warns, HAS_REFCOUNT,
assert_raises_regex,
)
import textwrap
class TestArrayRepr:
def test_nan_inf(self):
x = np.array([np.nan, np.inf])
assert_equal(repr(x), 'array([nan, inf])')
def test_subclass(self):
class sub(np.ndarray): pass
# one dimensional
x1d = np.array([1, 2]).view(sub)
assert_equal(repr(x1d), 'sub([1, 2])')
# two dimensional
x2d = np.array([[1, 2], [3, 4]]).view(sub)
assert_equal(repr(x2d),
'sub([[1, 2],\n'
' [3, 4]])')
# two dimensional with flexible dtype
xstruct = np.ones((2,2), dtype=[('a', '<i4')]).view(sub)
assert_equal(repr(xstruct),
"sub([[(1,), (1,)],\n"
" [(1,), (1,)]], dtype=[('a', '<i4')])"
)
@pytest.mark.xfail(reason="See gh-10544")
def test_object_subclass(self):
class sub(np.ndarray):
def __new__(cls, inp):
obj = np.asarray(inp).view(cls)
return obj
def __getitem__(self, ind):
ret = super().__getitem__(ind)
return sub(ret)
# test that object + subclass is OK:
x = sub([None, None])
assert_equal(repr(x), 'sub([None, None], dtype=object)')
assert_equal(str(x), '[None None]')
x = sub([None, sub([None, None])])
assert_equal(repr(x),
'sub([None, sub([None, None], dtype=object)], dtype=object)')
assert_equal(str(x), '[None sub([None, None], dtype=object)]')
def test_0d_object_subclass(self):
# make sure that subclasses which return 0ds instead
# of scalars don't cause infinite recursion in str
class sub(np.ndarray):
def __new__(cls, inp):
obj = np.asarray(inp).view(cls)
return obj
def __getitem__(self, ind):
ret = super().__getitem__(ind)
return sub(ret)
x = sub(1)
assert_equal(repr(x), 'sub(1)')
assert_equal(str(x), '1')
x = sub([1, 1])
assert_equal(repr(x), 'sub([1, 1])')
assert_equal(str(x), '[1 1]')
# check it works properly with object arrays too
x = sub(None)
assert_equal(repr(x), 'sub(None, dtype=object)')
assert_equal(str(x), 'None')
# plus recursive object arrays (even depth > 1)
y = sub(None)
x[()] = y
y[()] = x
assert_equal(repr(x),
'sub(sub(sub(..., dtype=object), dtype=object), dtype=object)')
assert_equal(str(x), '...')
x[()] = 0 # resolve circular references for garbage collector
# nested 0d-subclass-object
x = sub(None)
x[()] = sub(None)
assert_equal(repr(x), 'sub(sub(None, dtype=object), dtype=object)')
assert_equal(str(x), 'None')
# gh-10663
class DuckCounter(np.ndarray):
def __getitem__(self, item):
result = super().__getitem__(item)
if not isinstance(result, DuckCounter):
result = result[...].view(DuckCounter)
return result
def to_string(self):
return {0: 'zero', 1: 'one', 2: 'two'}.get(self.item(), 'many')
def __str__(self):
if self.shape == ():
return self.to_string()
else:
fmt = {'all': lambda x: x.to_string()}
return np.array2string(self, formatter=fmt)
dc = np.arange(5).view(DuckCounter)
assert_equal(str(dc), "[zero one two many many]")
assert_equal(str(dc[0]), "zero")
def test_self_containing(self):
arr0d = np.array(None)
arr0d[()] = arr0d
assert_equal(repr(arr0d),
'array(array(..., dtype=object), dtype=object)')
arr0d[()] = 0 # resolve recursion for garbage collector
arr1d = np.array([None, None])
arr1d[1] = arr1d
assert_equal(repr(arr1d),
'array([None, array(..., dtype=object)], dtype=object)')
arr1d[1] = 0 # resolve recursion for garbage collector
first = np.array(None)
second = np.array(None)
first[()] = second
second[()] = first
assert_equal(repr(first),
'array(array(array(..., dtype=object), dtype=object), dtype=object)')
first[()] = 0 # resolve circular references for garbage collector
def test_containing_list(self):
# printing square brackets directly would be ambiguuous
arr1d = np.array([None, None])
arr1d[0] = [1, 2]
arr1d[1] = [3]
assert_equal(repr(arr1d),
'array([list([1, 2]), list([3])], dtype=object)')
def test_void_scalar_recursion(self):
# gh-9345
repr(np.void(b'test')) # RecursionError ?
def test_fieldless_structured(self):
# gh-10366
no_fields = np.dtype([])
arr_no_fields = np.empty(4, dtype=no_fields)
assert_equal(repr(arr_no_fields), 'array([(), (), (), ()], dtype=[])')
class TestComplexArray:
def test_str(self):
rvals = [0, 1, -1, np.inf, -np.inf, np.nan]
cvals = [complex(rp, ip) for rp in rvals for ip in rvals]
dtypes = [np.complex64, np.cdouble, np.clongdouble]
actual = [str(np.array([c], dt)) for c in cvals for dt in dtypes]
wanted = [
'[0.+0.j]', '[0.+0.j]', '[0.+0.j]',
'[0.+1.j]', '[0.+1.j]', '[0.+1.j]',
'[0.-1.j]', '[0.-1.j]', '[0.-1.j]',
'[0.+infj]', '[0.+infj]', '[0.+infj]',
'[0.-infj]', '[0.-infj]', '[0.-infj]',
'[0.+nanj]', '[0.+nanj]', '[0.+nanj]',
'[1.+0.j]', '[1.+0.j]', '[1.+0.j]',
'[1.+1.j]', '[1.+1.j]', '[1.+1.j]',
'[1.-1.j]', '[1.-1.j]', '[1.-1.j]',
'[1.+infj]', '[1.+infj]', '[1.+infj]',
'[1.-infj]', '[1.-infj]', '[1.-infj]',
'[1.+nanj]', '[1.+nanj]', '[1.+nanj]',
'[-1.+0.j]', '[-1.+0.j]', '[-1.+0.j]',
'[-1.+1.j]', '[-1.+1.j]', '[-1.+1.j]',
'[-1.-1.j]', '[-1.-1.j]', '[-1.-1.j]',
'[-1.+infj]', '[-1.+infj]', '[-1.+infj]',
'[-1.-infj]', '[-1.-infj]', '[-1.-infj]',
'[-1.+nanj]', '[-1.+nanj]', '[-1.+nanj]',
'[inf+0.j]', '[inf+0.j]', '[inf+0.j]',
'[inf+1.j]', '[inf+1.j]', '[inf+1.j]',
'[inf-1.j]', '[inf-1.j]', '[inf-1.j]',
'[inf+infj]', '[inf+infj]', '[inf+infj]',
'[inf-infj]', '[inf-infj]', '[inf-infj]',
'[inf+nanj]', '[inf+nanj]', '[inf+nanj]',
'[-inf+0.j]', '[-inf+0.j]', '[-inf+0.j]',
'[-inf+1.j]', '[-inf+1.j]', '[-inf+1.j]',
'[-inf-1.j]', '[-inf-1.j]', '[-inf-1.j]',
'[-inf+infj]', '[-inf+infj]', '[-inf+infj]',
'[-inf-infj]', '[-inf-infj]', '[-inf-infj]',
'[-inf+nanj]', '[-inf+nanj]', '[-inf+nanj]',
'[nan+0.j]', '[nan+0.j]', '[nan+0.j]',
'[nan+1.j]', '[nan+1.j]', '[nan+1.j]',
'[nan-1.j]', '[nan-1.j]', '[nan-1.j]',
'[nan+infj]', '[nan+infj]', '[nan+infj]',
'[nan-infj]', '[nan-infj]', '[nan-infj]',
'[nan+nanj]', '[nan+nanj]', '[nan+nanj]']
for res, val in zip(actual, wanted):
assert_equal(res, val)
class TestArray2String:
def test_basic(self):
"""Basic test of array2string."""
a = np.arange(3)
assert_(np.array2string(a) == '[0 1 2]')
assert_(np.array2string(a, max_line_width=4, legacy='1.13') == '[0 1\n 2]')
assert_(np.array2string(a, max_line_width=4) == '[0\n 1\n 2]')
def test_unexpected_kwarg(self):
# ensure than an appropriate TypeError
# is raised when array2string receives
# an unexpected kwarg
with assert_raises_regex(TypeError, 'nonsense'):
np.array2string(np.array([1, 2, 3]),
nonsense=None)
def test_format_function(self):
"""Test custom format function for each element in array."""
def _format_function(x):
if np.abs(x) < 1:
return '.'
elif np.abs(x) < 2:
return 'o'
else:
return 'O'
x = np.arange(3)
x_hex = "[0x0 0x1 0x2]"
x_oct = "[0o0 0o1 0o2]"
assert_(np.array2string(x, formatter={'all':_format_function}) ==
"[. o O]")
assert_(np.array2string(x, formatter={'int_kind':_format_function}) ==
"[. o O]")
assert_(np.array2string(x, formatter={'all':lambda x: "%.4f" % x}) ==
"[0.0000 1.0000 2.0000]")
assert_equal(np.array2string(x, formatter={'int':lambda x: hex(x)}),
x_hex)
assert_equal(np.array2string(x, formatter={'int':lambda x: oct(x)}),
x_oct)
x = np.arange(3.)
assert_(np.array2string(x, formatter={'float_kind':lambda x: "%.2f" % x}) ==
"[0.00 1.00 2.00]")
assert_(np.array2string(x, formatter={'float':lambda x: "%.2f" % x}) ==
"[0.00 1.00 2.00]")
s = np.array(['abc', 'def'])
assert_(np.array2string(s, formatter={'numpystr':lambda s: s*2}) ==
'[abcabc defdef]')
def test_structure_format(self):
dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))])
x = np.array([('Sarah', (8.0, 7.0)), ('John', (6.0, 7.0))], dtype=dt)
assert_equal(np.array2string(x),
"[('Sarah', [8., 7.]) ('John', [6., 7.])]")
np.set_printoptions(legacy='1.13')
try:
# for issue #5692
A = np.zeros(shape=10, dtype=[("A", "M8[s]")])
A[5:].fill(np.datetime64('NaT'))
assert_equal(
np.array2string(A),
textwrap.dedent("""\
[('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',)
('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',) ('NaT',) ('NaT',)
('NaT',) ('NaT',) ('NaT',)]""")
)
finally:
np.set_printoptions(legacy=False)
# same again, but with non-legacy behavior
assert_equal(
np.array2string(A),
textwrap.dedent("""\
[('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',)
('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',)
('1970-01-01T00:00:00',) ( 'NaT',)
( 'NaT',) ( 'NaT',)
( 'NaT',) ( 'NaT',)]""")
)
# and again, with timedeltas
A = np.full(10, 123456, dtype=[("A", "m8[s]")])
A[5:].fill(np.datetime64('NaT'))
assert_equal(
np.array2string(A),
textwrap.dedent("""\
[(123456,) (123456,) (123456,) (123456,) (123456,) ( 'NaT',) ( 'NaT',)
( 'NaT',) ( 'NaT',) ( 'NaT',)]""")
)
# See #8160
struct_int = np.array([([1, -1],), ([123, 1],)], dtype=[('B', 'i4', 2)])
assert_equal(np.array2string(struct_int),
"[([ 1, -1],) ([123, 1],)]")
struct_2dint = np.array([([[0, 1], [2, 3]],), ([[12, 0], [0, 0]],)],
dtype=[('B', 'i4', (2, 2))])
assert_equal(np.array2string(struct_2dint),
"[([[ 0, 1], [ 2, 3]],) ([[12, 0], [ 0, 0]],)]")
# See #8172
array_scalar = np.array(
(1., 2.1234567890123456789, 3.), dtype=('f8,f8,f8'))
assert_equal(np.array2string(array_scalar), "(1., 2.12345679, 3.)")
def test_unstructured_void_repr(self):
a = np.array([27, 91, 50, 75, 7, 65, 10, 8,
27, 91, 51, 49,109, 82,101,100], dtype='u1').view('V8')
assert_equal(repr(a[0]), r"void(b'\x1B\x5B\x32\x4B\x07\x41\x0A\x08')")
assert_equal(str(a[0]), r"b'\x1B\x5B\x32\x4B\x07\x41\x0A\x08'")
assert_equal(repr(a),
r"array([b'\x1B\x5B\x32\x4B\x07\x41\x0A\x08'," "\n"
r" b'\x1B\x5B\x33\x31\x6D\x52\x65\x64'], dtype='|V8')")
assert_equal(eval(repr(a), vars(np)), a)
assert_equal(eval(repr(a[0]), vars(np)), a[0])
def test_edgeitems_kwarg(self):
# previously the global print options would be taken over the kwarg
arr = np.zeros(3, int)
assert_equal(
np.array2string(arr, edgeitems=1, threshold=0),
"[0 ... 0]"
)
def test_summarize_1d(self):
A = np.arange(1001)
strA = '[ 0 1 2 ... 998 999 1000]'
assert_equal(str(A), strA)
reprA = 'array([ 0, 1, 2, ..., 998, 999, 1000])'
assert_equal(repr(A), reprA)
def test_summarize_2d(self):
A = np.arange(1002).reshape(2, 501)
strA = '[[ 0 1 2 ... 498 499 500]\n' \
' [ 501 502 503 ... 999 1000 1001]]'
assert_equal(str(A), strA)
reprA = 'array([[ 0, 1, 2, ..., 498, 499, 500],\n' \
' [ 501, 502, 503, ..., 999, 1000, 1001]])'
assert_equal(repr(A), reprA)
def test_linewidth(self):
a = np.full(6, 1)
def make_str(a, width, **kw):
return np.array2string(a, separator="", max_line_width=width, **kw)
assert_equal(make_str(a, 8, legacy='1.13'), '[111111]')
assert_equal(make_str(a, 7, legacy='1.13'), '[111111]')
assert_equal(make_str(a, 5, legacy='1.13'), '[1111\n'
' 11]')
assert_equal(make_str(a, 8), '[111111]')
assert_equal(make_str(a, 7), '[11111\n'
' 1]')
assert_equal(make_str(a, 5), '[111\n'
' 111]')
b = a[None,None,:]
assert_equal(make_str(b, 12, legacy='1.13'), '[[[111111]]]')
assert_equal(make_str(b, 9, legacy='1.13'), '[[[111111]]]')
assert_equal(make_str(b, 8, legacy='1.13'), '[[[11111\n'
' 1]]]')
assert_equal(make_str(b, 12), '[[[111111]]]')
assert_equal(make_str(b, 9), '[[[111\n'
' 111]]]')
assert_equal(make_str(b, 8), '[[[11\n'
' 11\n'
' 11]]]')
def test_wide_element(self):
a = np.array(['xxxxx'])
assert_equal(
np.array2string(a, max_line_width=5),
"['xxxxx']"
)
assert_equal(
np.array2string(a, max_line_width=5, legacy='1.13'),
"[ 'xxxxx']"
)
def test_multiline_repr(self):
class MultiLine:
def __repr__(self):
return "Line 1\nLine 2"
a = np.array([[None, MultiLine()], [MultiLine(), None]])
assert_equal(
np.array2string(a),
'[[None Line 1\n'
' Line 2]\n'
' [Line 1\n'
' Line 2 None]]'
)
assert_equal(
np.array2string(a, max_line_width=5),
'[[None\n'
' Line 1\n'
' Line 2]\n'
' [Line 1\n'
' Line 2\n'
' None]]'
)
assert_equal(
repr(a),
'array([[None, Line 1\n'
' Line 2],\n'
' [Line 1\n'
' Line 2, None]], dtype=object)'
)
class MultiLineLong:
def __repr__(self):
return "Line 1\nLooooooooooongestLine2\nLongerLine 3"
a = np.array([[None, MultiLineLong()], [MultiLineLong(), None]])
assert_equal(
repr(a),
'array([[None, Line 1\n'
' LooooooooooongestLine2\n'
' LongerLine 3 ],\n'
' [Line 1\n'
' LooooooooooongestLine2\n'
' LongerLine 3 , None]], dtype=object)'
)
assert_equal(
np.array_repr(a, 20),
'array([[None,\n'
' Line 1\n'
' LooooooooooongestLine2\n'
' LongerLine 3 ],\n'
' [Line 1\n'
' LooooooooooongestLine2\n'
' LongerLine 3 ,\n'
' None]],\n'
' dtype=object)'
)
def test_nested_array_repr(self):
a = np.empty((2, 2), dtype=object)
a[0, 0] = np.eye(2)
a[0, 1] = np.eye(3)
a[1, 0] = None
a[1, 1] = np.ones((3, 1))
assert_equal(
repr(a),
'array([[array([[1., 0.],\n'
' [0., 1.]]), array([[1., 0., 0.],\n'
' [0., 1., 0.],\n'
' [0., 0., 1.]])],\n'
' [None, array([[1.],\n'
' [1.],\n'
' [1.]])]], dtype=object)'
)
@given(hynp.from_dtype(np.dtype("U")))
def test_any_text(self, text):
# This test checks that, given any value that can be represented in an
# array of dtype("U") (i.e. unicode string), ...
a = np.array([text, text, text])
# casting a list of them to an array does not e.g. truncate the value
assert_equal(a[0], text)
# and that np.array2string puts a newline in the expected location
expected_repr = "[{0!r} {0!r}\n {0!r}]".format(text)
result = np.array2string(a, max_line_width=len(repr(text)) * 2 + 3)
assert_equal(result, expected_repr)
@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
def test_refcount(self):
# make sure we do not hold references to the array due to a recursive
# closure (gh-10620)
gc.disable()
a = np.arange(2)
r1 = sys.getrefcount(a)
np.array2string(a)
np.array2string(a)
r2 = sys.getrefcount(a)
gc.collect()
gc.enable()
assert_(r1 == r2)
class TestPrintOptions:
"""Test getting and setting global print options."""
def setup_method(self):
self.oldopts = np.get_printoptions()
def teardown_method(self):
np.set_printoptions(**self.oldopts)
def test_basic(self):
x = np.array([1.5, 0, 1.234567890])
assert_equal(repr(x), "array([1.5 , 0. , 1.23456789])")
np.set_printoptions(precision=4)
assert_equal(repr(x), "array([1.5 , 0. , 1.2346])")
def test_precision_zero(self):
np.set_printoptions(precision=0)
for values, string in (
([0.], "0."), ([.3], "0."), ([-.3], "-0."), ([.7], "1."),
([1.5], "2."), ([-1.5], "-2."), ([-15.34], "-15."),
([100.], "100."), ([.2, -1, 122.51], " 0., -1., 123."),
([0], "0"), ([-12], "-12"), ([complex(.3, -.7)], "0.-1.j")):
x = np.array(values)
assert_equal(repr(x), "array([%s])" % string)
def test_formatter(self):
x = np.arange(3)
np.set_printoptions(formatter={'all':lambda x: str(x-1)})
assert_equal(repr(x), "array([-1, 0, 1])")
def test_formatter_reset(self):
x = np.arange(3)
np.set_printoptions(formatter={'all':lambda x: str(x-1)})
assert_equal(repr(x), "array([-1, 0, 1])")
np.set_printoptions(formatter={'int':None})
assert_equal(repr(x), "array([0, 1, 2])")
np.set_printoptions(formatter={'all':lambda x: str(x-1)})
assert_equal(repr(x), "array([-1, 0, 1])")
np.set_printoptions(formatter={'all':None})
assert_equal(repr(x), "array([0, 1, 2])")
np.set_printoptions(formatter={'int':lambda x: str(x-1)})
assert_equal(repr(x), "array([-1, 0, 1])")
np.set_printoptions(formatter={'int_kind':None})
assert_equal(repr(x), "array([0, 1, 2])")
x = np.arange(3.)
np.set_printoptions(formatter={'float':lambda x: str(x-1)})
assert_equal(repr(x), "array([-1.0, 0.0, 1.0])")
np.set_printoptions(formatter={'float_kind':None})
assert_equal(repr(x), "array([0., 1., 2.])")
def test_0d_arrays(self):
assert_equal(str(np.array(u'café', '<U4')), u'café')
assert_equal(repr(np.array('café', '<U4')),
"array('café', dtype='<U4')")
assert_equal(str(np.array('test', np.str_)), 'test')
a = np.zeros(1, dtype=[('a', '<i4', (3,))])
assert_equal(str(a[0]), '([0, 0, 0],)')
assert_equal(repr(np.datetime64('2005-02-25')[...]),
"array('2005-02-25', dtype='datetime64[D]')")
assert_equal(repr(np.timedelta64('10', 'Y')[...]),
"array(10, dtype='timedelta64[Y]')")
# repr of 0d arrays is affected by printoptions
x = np.array(1)
np.set_printoptions(formatter={'all':lambda x: "test"})
assert_equal(repr(x), "array(test)")
# str is unaffected
assert_equal(str(x), "1")
# check `style` arg raises
assert_warns(DeprecationWarning, np.array2string,
np.array(1.), style=repr)
# but not in legacy mode
np.array2string(np.array(1.), style=repr, legacy='1.13')
# gh-10934 style was broken in legacy mode, check it works
np.array2string(np.array(1.), legacy='1.13')
def test_float_spacing(self):
x = np.array([1., 2., 3.])
y = np.array([1., 2., -10.])
z = np.array([100., 2., -1.])
w = np.array([-100., 2., 1.])
assert_equal(repr(x), 'array([1., 2., 3.])')
assert_equal(repr(y), 'array([ 1., 2., -10.])')
assert_equal(repr(np.array(y[0])), 'array(1.)')
assert_equal(repr(np.array(y[-1])), 'array(-10.)')
assert_equal(repr(z), 'array([100., 2., -1.])')
assert_equal(repr(w), 'array([-100., 2., 1.])')
assert_equal(repr(np.array([np.nan, np.inf])), 'array([nan, inf])')
assert_equal(repr(np.array([np.nan, -np.inf])), 'array([ nan, -inf])')
x = np.array([np.inf, 100000, 1.1234])
y = np.array([np.inf, 100000, -1.1234])
z = np.array([np.inf, 1.1234, -1e120])
np.set_printoptions(precision=2)
assert_equal(repr(x), 'array([ inf, 1.00e+05, 1.12e+00])')
assert_equal(repr(y), 'array([ inf, 1.00e+05, -1.12e+00])')
assert_equal(repr(z), 'array([ inf, 1.12e+000, -1.00e+120])')
def test_bool_spacing(self):
assert_equal(repr(np.array([True, True])),
'array([ True, True])')
assert_equal(repr(np.array([True, False])),
'array([ True, False])')
assert_equal(repr(np.array([True])),
'array([ True])')
assert_equal(repr(np.array(True)),
'array(True)')
assert_equal(repr(np.array(False)),
'array(False)')
def test_sign_spacing(self):
a = np.arange(4.)
b = np.array([1.234e9])
c = np.array([1.0 + 1.0j, 1.123456789 + 1.123456789j], dtype='c16')
assert_equal(repr(a), 'array([0., 1., 2., 3.])')
assert_equal(repr(np.array(1.)), 'array(1.)')
assert_equal(repr(b), 'array([1.234e+09])')
assert_equal(repr(np.array([0.])), 'array([0.])')
assert_equal(repr(c),
"array([1. +1.j , 1.12345679+1.12345679j])")
assert_equal(repr(np.array([0., -0.])), 'array([ 0., -0.])')
np.set_printoptions(sign=' ')
assert_equal(repr(a), 'array([ 0., 1., 2., 3.])')
assert_equal(repr(np.array(1.)), 'array( 1.)')
assert_equal(repr(b), 'array([ 1.234e+09])')
assert_equal(repr(c),
"array([ 1. +1.j , 1.12345679+1.12345679j])")
assert_equal(repr(np.array([0., -0.])), 'array([ 0., -0.])')
np.set_printoptions(sign='+')
assert_equal(repr(a), 'array([+0., +1., +2., +3.])')
assert_equal(repr(np.array(1.)), 'array(+1.)')
assert_equal(repr(b), 'array([+1.234e+09])')
assert_equal(repr(c),
"array([+1. +1.j , +1.12345679+1.12345679j])")
np.set_printoptions(legacy='1.13')
assert_equal(repr(a), 'array([ 0., 1., 2., 3.])')
assert_equal(repr(b), 'array([ 1.23400000e+09])')
assert_equal(repr(-b), 'array([ -1.23400000e+09])')
assert_equal(repr(np.array(1.)), 'array(1.0)')
assert_equal(repr(np.array([0.])), 'array([ 0.])')
assert_equal(repr(c),
"array([ 1.00000000+1.j , 1.12345679+1.12345679j])")
# gh-10383
assert_equal(str(np.array([-1., 10])), "[ -1. 10.]")
assert_raises(TypeError, np.set_printoptions, wrongarg=True)
def test_float_overflow_nowarn(self):
# make sure internal computations in FloatingFormat don't
# warn about overflow
repr(np.array([1e4, 0.1], dtype='f2'))
def test_sign_spacing_structured(self):
a = np.ones(2, dtype='<f,<f')
assert_equal(repr(a),
"array([(1., 1.), (1., 1.)], dtype=[('f0', '<f4'), ('f1', '<f4')])")
assert_equal(repr(a[0]), "(1., 1.)")
def test_floatmode(self):
x = np.array([0.6104, 0.922, 0.457, 0.0906, 0.3733, 0.007244,
0.5933, 0.947, 0.2383, 0.4226], dtype=np.float16)
y = np.array([0.2918820979355541, 0.5064172631089138,
0.2848750619642916, 0.4342965294660567,
0.7326538397312751, 0.3459503329096204,
0.0862072768214508, 0.39112753029631175],
dtype=np.float64)
z = np.arange(6, dtype=np.float16)/10
c = np.array([1.0 + 1.0j, 1.123456789 + 1.123456789j], dtype='c16')
# also make sure 1e23 is right (is between two fp numbers)
w = np.array(['1e{}'.format(i) for i in range(25)], dtype=np.float64)
# note: we construct w from the strings `1eXX` instead of doing
# `10.**arange(24)` because it turns out the two are not equivalent in
# python. On some architectures `1e23 != 10.**23`.
wp = np.array([1.234e1, 1e2, 1e123])
# unique mode
np.set_printoptions(floatmode='unique')
assert_equal(repr(x),
"array([0.6104 , 0.922 , 0.457 , 0.0906 , 0.3733 , 0.007244,\n"
" 0.5933 , 0.947 , 0.2383 , 0.4226 ], dtype=float16)")
assert_equal(repr(y),
"array([0.2918820979355541 , 0.5064172631089138 , 0.2848750619642916 ,\n"
" 0.4342965294660567 , 0.7326538397312751 , 0.3459503329096204 ,\n"
" 0.0862072768214508 , 0.39112753029631175])")
assert_equal(repr(z),
"array([0. , 0.1, 0.2, 0.3, 0.4, 0.5], dtype=float16)")
assert_equal(repr(w),
"array([1.e+00, 1.e+01, 1.e+02, 1.e+03, 1.e+04, 1.e+05, 1.e+06, 1.e+07,\n"
" 1.e+08, 1.e+09, 1.e+10, 1.e+11, 1.e+12, 1.e+13, 1.e+14, 1.e+15,\n"
" 1.e+16, 1.e+17, 1.e+18, 1.e+19, 1.e+20, 1.e+21, 1.e+22, 1.e+23,\n"
" 1.e+24])")
assert_equal(repr(wp), "array([1.234e+001, 1.000e+002, 1.000e+123])")
assert_equal(repr(c),
"array([1. +1.j , 1.123456789+1.123456789j])")
# maxprec mode, precision=8
np.set_printoptions(floatmode='maxprec', precision=8)
assert_equal(repr(x),
"array([0.6104 , 0.922 , 0.457 , 0.0906 , 0.3733 , 0.007244,\n"
" 0.5933 , 0.947 , 0.2383 , 0.4226 ], dtype=float16)")
assert_equal(repr(y),
"array([0.2918821 , 0.50641726, 0.28487506, 0.43429653, 0.73265384,\n"
" 0.34595033, 0.08620728, 0.39112753])")
assert_equal(repr(z),
"array([0. , 0.1, 0.2, 0.3, 0.4, 0.5], dtype=float16)")
assert_equal(repr(w[::5]),
"array([1.e+00, 1.e+05, 1.e+10, 1.e+15, 1.e+20])")
assert_equal(repr(wp), "array([1.234e+001, 1.000e+002, 1.000e+123])")
assert_equal(repr(c),
"array([1. +1.j , 1.12345679+1.12345679j])")
# fixed mode, precision=4
np.set_printoptions(floatmode='fixed', precision=4)
assert_equal(repr(x),
"array([0.6104, 0.9219, 0.4570, 0.0906, 0.3733, 0.0072, 0.5933, 0.9468,\n"
" 0.2383, 0.4226], dtype=float16)")
assert_equal(repr(y),
"array([0.2919, 0.5064, 0.2849, 0.4343, 0.7327, 0.3460, 0.0862, 0.3911])")
assert_equal(repr(z),
"array([0.0000, 0.1000, 0.2000, 0.3000, 0.3999, 0.5000], dtype=float16)")
assert_equal(repr(w[::5]),
"array([1.0000e+00, 1.0000e+05, 1.0000e+10, 1.0000e+15, 1.0000e+20])")
assert_equal(repr(wp), "array([1.2340e+001, 1.0000e+002, 1.0000e+123])")
assert_equal(repr(np.zeros(3)), "array([0.0000, 0.0000, 0.0000])")
assert_equal(repr(c),
"array([1.0000+1.0000j, 1.1235+1.1235j])")
# for larger precision, representation error becomes more apparent:
np.set_printoptions(floatmode='fixed', precision=8)
assert_equal(repr(z),
"array([0.00000000, 0.09997559, 0.19995117, 0.30004883, 0.39990234,\n"
" 0.50000000], dtype=float16)")
# maxprec_equal mode, precision=8
np.set_printoptions(floatmode='maxprec_equal', precision=8)
assert_equal(repr(x),
"array([0.610352, 0.921875, 0.457031, 0.090576, 0.373291, 0.007244,\n"
" 0.593262, 0.946777, 0.238281, 0.422607], dtype=float16)")
assert_equal(repr(y),
"array([0.29188210, 0.50641726, 0.28487506, 0.43429653, 0.73265384,\n"
" 0.34595033, 0.08620728, 0.39112753])")
assert_equal(repr(z),
"array([0.0, 0.1, 0.2, 0.3, 0.4, 0.5], dtype=float16)")
assert_equal(repr(w[::5]),
"array([1.e+00, 1.e+05, 1.e+10, 1.e+15, 1.e+20])")
assert_equal(repr(wp), "array([1.234e+001, 1.000e+002, 1.000e+123])")
assert_equal(repr(c),
"array([1.00000000+1.00000000j, 1.12345679+1.12345679j])")
# test unique special case (gh-18609)
a = np.float64.fromhex('-1p-97')
assert_equal(np.float64(np.array2string(a, floatmode='unique')), a)
def test_legacy_mode_scalars(self):
# in legacy mode, str of floats get truncated, and complex scalars
# use * for non-finite imaginary part
np.set_printoptions(legacy='1.13')
assert_equal(str(np.float64(1.123456789123456789)), '1.12345678912')
assert_equal(str(np.complex128(complex(1, np.nan))), '(1+nan*j)')
np.set_printoptions(legacy=False)
assert_equal(str(np.float64(1.123456789123456789)),
'1.1234567891234568')
assert_equal(str(np.complex128(complex(1, np.nan))), '(1+nanj)')
def test_legacy_stray_comma(self):
np.set_printoptions(legacy='1.13')
assert_equal(str(np.arange(10000)), '[ 0 1 2 ..., 9997 9998 9999]')
np.set_printoptions(legacy=False)
assert_equal(str(np.arange(10000)), '[ 0 1 2 ... 9997 9998 9999]')
def test_dtype_linewidth_wrapping(self):
np.set_printoptions(linewidth=75)
assert_equal(repr(np.arange(10,20., dtype='f4')),
"array([10., 11., 12., 13., 14., 15., 16., 17., 18., 19.], dtype=float32)")
assert_equal(repr(np.arange(10,23., dtype='f4')), textwrap.dedent("""\
array([10., 11., 12., 13., 14., 15., 16., 17., 18., 19., 20., 21., 22.],
dtype=float32)"""))
styp = '<U4'
assert_equal(repr(np.ones(3, dtype=styp)),
"array(['1', '1', '1'], dtype='{}')".format(styp))
assert_equal(repr(np.ones(12, dtype=styp)), textwrap.dedent("""\
array(['1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1'],
dtype='{}')""".format(styp)))
def test_linewidth_repr(self):
a = np.full(7, fill_value=2)
np.set_printoptions(linewidth=17)
assert_equal(
repr(a),
textwrap.dedent("""\
array([2, 2, 2,
2, 2, 2,
2])""")
)
np.set_printoptions(linewidth=17, legacy='1.13')
assert_equal(
repr(a),
textwrap.dedent("""\
array([2, 2, 2,
2, 2, 2, 2])""")
)
a = np.full(8, fill_value=2)
np.set_printoptions(linewidth=18, legacy=False)
assert_equal(
repr(a),
textwrap.dedent("""\
array([2, 2, 2,
2, 2, 2,
2, 2])""")
)
np.set_printoptions(linewidth=18, legacy='1.13')
assert_equal(
repr(a),
textwrap.dedent("""\
array([2, 2, 2, 2,
2, 2, 2, 2])""")
)
def test_linewidth_str(self):
a = np.full(18, fill_value=2)
np.set_printoptions(linewidth=18)
assert_equal(
str(a),
textwrap.dedent("""\
[2 2 2 2 2 2 2 2
2 2 2 2 2 2 2 2
2 2]""")
)
np.set_printoptions(linewidth=18, legacy='1.13')
assert_equal(
str(a),
textwrap.dedent("""\
[2 2 2 2 2 2 2 2 2
2 2 2 2 2 2 2 2 2]""")
)
def test_edgeitems(self):
np.set_printoptions(edgeitems=1, threshold=1)
a = np.arange(27).reshape((3, 3, 3))
assert_equal(
repr(a),
textwrap.dedent("""\
array([[[ 0, ..., 2],
...,
[ 6, ..., 8]],
...,
[[18, ..., 20],
...,
[24, ..., 26]]])""")
)
b = np.zeros((3, 3, 1, 1))
assert_equal(
repr(b),
textwrap.dedent("""\
array([[[[0.]],
...,
[[0.]]],
...,
[[[0.]],
...,
[[0.]]]])""")
)
# 1.13 had extra trailing spaces, and was missing newlines
np.set_printoptions(legacy='1.13')
assert_equal(
repr(a),
textwrap.dedent("""\
array([[[ 0, ..., 2],
...,
[ 6, ..., 8]],
...,
[[18, ..., 20],
...,
[24, ..., 26]]])""")
)
assert_equal(
repr(b),
textwrap.dedent("""\
array([[[[ 0.]],
...,
[[ 0.]]],
...,
[[[ 0.]],
...,
[[ 0.]]]])""")
)
def test_bad_args(self):
assert_raises(ValueError, np.set_printoptions, threshold=float('nan'))
assert_raises(TypeError, np.set_printoptions, threshold='1')
assert_raises(TypeError, np.set_printoptions, threshold=b'1')
assert_raises(TypeError, np.set_printoptions, precision='1')
assert_raises(TypeError, np.set_printoptions, precision=1.5)
def test_unicode_object_array():
expected = "array(['é'], dtype=object)"
x = np.array([u'\xe9'], dtype=object)
assert_equal(repr(x), expected)
class TestContextManager:
def test_ctx_mgr(self):
# test that context manager actually works
with np.printoptions(precision=2):
s = str(np.array([2.0]) / 3)
assert_equal(s, '[0.67]')
def test_ctx_mgr_restores(self):
# test that print options are actually restrored
opts = np.get_printoptions()
with np.printoptions(precision=opts['precision'] - 1,
linewidth=opts['linewidth'] - 4):
pass
assert_equal(np.get_printoptions(), opts)
def test_ctx_mgr_exceptions(self):
# test that print options are restored even if an exception is raised
opts = np.get_printoptions()
try:
with np.printoptions(precision=2, linewidth=11):
raise ValueError
except ValueError:
pass
assert_equal(np.get_printoptions(), opts)
def test_ctx_mgr_as_smth(self):
opts = {"precision": 2}
with np.printoptions(**opts) as ctx:
saved_opts = ctx.copy()
assert_equal({k: saved_opts[k] for k in opts}, opts)
| 37,161 | Python | 37.390496 | 91 | 0.468879 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_casting_unittests.py | """
The tests exercise the casting machinery in a more low-level manner.
The reason is mostly to test a new implementation of the casting machinery.
Unlike most tests in NumPy, these are closer to unit-tests rather
than integration tests.
"""
import pytest
import textwrap
import enum
import random
import numpy as np
from numpy.lib.stride_tricks import as_strided
from numpy.testing import assert_array_equal
from numpy.core._multiarray_umath import _get_castingimpl as get_castingimpl
# Simple skips object, parametric and long double (unsupported by struct)
simple_dtypes = "?bhilqBHILQefdFD"
if np.dtype("l").itemsize != np.dtype("q").itemsize:
# Remove l and L, the table was generated with 64bit linux in mind.
simple_dtypes = simple_dtypes.replace("l", "").replace("L", "")
simple_dtypes = [type(np.dtype(c)) for c in simple_dtypes]
def simple_dtype_instances():
for dtype_class in simple_dtypes:
dt = dtype_class()
yield pytest.param(dt, id=str(dt))
if dt.byteorder != "|":
dt = dt.newbyteorder()
yield pytest.param(dt, id=str(dt))
def get_expected_stringlength(dtype):
"""Returns the string length when casting the basic dtypes to strings.
"""
if dtype == np.bool_:
return 5
if dtype.kind in "iu":
if dtype.itemsize == 1:
length = 3
elif dtype.itemsize == 2:
length = 5
elif dtype.itemsize == 4:
length = 10
elif dtype.itemsize == 8:
length = 20
else:
raise AssertionError(f"did not find expected length for {dtype}")
if dtype.kind == "i":
length += 1 # adds one character for the sign
return length
# Note: Can't do dtype comparison for longdouble on windows
if dtype.char == "g":
return 48
elif dtype.char == "G":
return 48 * 2
elif dtype.kind == "f":
return 32 # also for half apparently.
elif dtype.kind == "c":
return 32 * 2
raise AssertionError(f"did not find expected length for {dtype}")
class Casting(enum.IntEnum):
no = 0
equiv = 1
safe = 2
same_kind = 3
unsafe = 4
def _get_cancast_table():
table = textwrap.dedent("""
X ? b h i l q B H I L Q e f d g F D G S U V O M m
? # = = = = = = = = = = = = = = = = = = = = = . =
b . # = = = = . . . . . = = = = = = = = = = = . =
h . ~ # = = = . . . . . ~ = = = = = = = = = = . =
i . ~ ~ # = = . . . . . ~ ~ = = ~ = = = = = = . =
l . ~ ~ ~ # # . . . . . ~ ~ = = ~ = = = = = = . =
q . ~ ~ ~ # # . . . . . ~ ~ = = ~ = = = = = = . =
B . ~ = = = = # = = = = = = = = = = = = = = = . =
H . ~ ~ = = = ~ # = = = ~ = = = = = = = = = = . =
I . ~ ~ ~ = = ~ ~ # = = ~ ~ = = ~ = = = = = = . =
L . ~ ~ ~ ~ ~ ~ ~ ~ # # ~ ~ = = ~ = = = = = = . ~
Q . ~ ~ ~ ~ ~ ~ ~ ~ # # ~ ~ = = ~ = = = = = = . ~
e . . . . . . . . . . . # = = = = = = = = = = . .
f . . . . . . . . . . . ~ # = = = = = = = = = . .
d . . . . . . . . . . . ~ ~ # = ~ = = = = = = . .
g . . . . . . . . . . . ~ ~ ~ # ~ ~ = = = = = . .
F . . . . . . . . . . . . . . . # = = = = = = . .
D . . . . . . . . . . . . . . . ~ # = = = = = . .
G . . . . . . . . . . . . . . . ~ ~ # = = = = . .
S . . . . . . . . . . . . . . . . . . # = = = . .
U . . . . . . . . . . . . . . . . . . . # = = . .
V . . . . . . . . . . . . . . . . . . . . # = . .
O . . . . . . . . . . . . . . . . . . . . = # . .
M . . . . . . . . . . . . . . . . . . . . = = # .
m . . . . . . . . . . . . . . . . . . . . = = . #
""").strip().split("\n")
dtypes = [type(np.dtype(c)) for c in table[0][2::2]]
convert_cast = {".": Casting.unsafe, "~": Casting.same_kind,
"=": Casting.safe, "#": Casting.equiv,
" ": -1}
cancast = {}
for from_dt, row in zip(dtypes, table[1:]):
cancast[from_dt] = {}
for to_dt, c in zip(dtypes, row[2::2]):
cancast[from_dt][to_dt] = convert_cast[c]
return cancast
CAST_TABLE = _get_cancast_table()
class TestChanges:
"""
These test cases exercise some behaviour changes
"""
@pytest.mark.parametrize("string", ["S", "U"])
@pytest.mark.parametrize("floating", ["e", "f", "d", "g"])
def test_float_to_string(self, floating, string):
assert np.can_cast(floating, string)
# 100 is long enough to hold any formatted floating
assert np.can_cast(floating, f"{string}100")
def test_to_void(self):
# But in general, we do consider these safe:
assert np.can_cast("d", "V")
assert np.can_cast("S20", "V")
# Do not consider it a safe cast if the void is too smaller:
assert not np.can_cast("d", "V1")
assert not np.can_cast("S20", "V1")
assert not np.can_cast("U1", "V1")
# Structured to unstructured is just like any other:
assert np.can_cast("d,i", "V", casting="same_kind")
# Unstructured void to unstructured is actually no cast at all:
assert np.can_cast("V3", "V", casting="no")
assert np.can_cast("V0", "V", casting="no")
class TestCasting:
size = 1500 # Best larger than NPY_LOWLEVEL_BUFFER_BLOCKSIZE * itemsize
def get_data(self, dtype1, dtype2):
if dtype2 is None or dtype1.itemsize >= dtype2.itemsize:
length = self.size // dtype1.itemsize
else:
length = self.size // dtype2.itemsize
# Assume that the base array is well enough aligned for all inputs.
arr1 = np.empty(length, dtype=dtype1)
assert arr1.flags.c_contiguous
assert arr1.flags.aligned
values = [random.randrange(-128, 128) for _ in range(length)]
for i, value in enumerate(values):
# Use item assignment to ensure this is not using casting:
arr1[i] = value
if dtype2 is None:
if dtype1.char == "?":
values = [bool(v) for v in values]
return arr1, values
if dtype2.char == "?":
values = [bool(v) for v in values]
arr2 = np.empty(length, dtype=dtype2)
assert arr2.flags.c_contiguous
assert arr2.flags.aligned
for i, value in enumerate(values):
# Use item assignment to ensure this is not using casting:
arr2[i] = value
return arr1, arr2, values
def get_data_variation(self, arr1, arr2, aligned=True, contig=True):
"""
Returns a copy of arr1 that may be non-contiguous or unaligned, and a
matching array for arr2 (although not a copy).
"""
if contig:
stride1 = arr1.dtype.itemsize
stride2 = arr2.dtype.itemsize
elif aligned:
stride1 = 2 * arr1.dtype.itemsize
stride2 = 2 * arr2.dtype.itemsize
else:
stride1 = arr1.dtype.itemsize + 1
stride2 = arr2.dtype.itemsize + 1
max_size1 = len(arr1) * 3 * arr1.dtype.itemsize + 1
max_size2 = len(arr2) * 3 * arr2.dtype.itemsize + 1
from_bytes = np.zeros(max_size1, dtype=np.uint8)
to_bytes = np.zeros(max_size2, dtype=np.uint8)
# Sanity check that the above is large enough:
assert stride1 * len(arr1) <= from_bytes.nbytes
assert stride2 * len(arr2) <= to_bytes.nbytes
if aligned:
new1 = as_strided(from_bytes[:-1].view(arr1.dtype),
arr1.shape, (stride1,))
new2 = as_strided(to_bytes[:-1].view(arr2.dtype),
arr2.shape, (stride2,))
else:
new1 = as_strided(from_bytes[1:].view(arr1.dtype),
arr1.shape, (stride1,))
new2 = as_strided(to_bytes[1:].view(arr2.dtype),
arr2.shape, (stride2,))
new1[...] = arr1
if not contig:
# Ensure we did not overwrite bytes that should not be written:
offset = arr1.dtype.itemsize if aligned else 0
buf = from_bytes[offset::stride1].tobytes()
assert buf.count(b"\0") == len(buf)
if contig:
assert new1.flags.c_contiguous
assert new2.flags.c_contiguous
else:
assert not new1.flags.c_contiguous
assert not new2.flags.c_contiguous
if aligned:
assert new1.flags.aligned
assert new2.flags.aligned
else:
assert not new1.flags.aligned or new1.dtype.alignment == 1
assert not new2.flags.aligned or new2.dtype.alignment == 1
return new1, new2
@pytest.mark.parametrize("from_Dt", simple_dtypes)
def test_simple_cancast(self, from_Dt):
for to_Dt in simple_dtypes:
cast = get_castingimpl(from_Dt, to_Dt)
for from_dt in [from_Dt(), from_Dt().newbyteorder()]:
default = cast._resolve_descriptors((from_dt, None))[1][1]
assert default == to_Dt()
del default
for to_dt in [to_Dt(), to_Dt().newbyteorder()]:
casting, (from_res, to_res), view_off = (
cast._resolve_descriptors((from_dt, to_dt)))
assert(type(from_res) == from_Dt)
assert(type(to_res) == to_Dt)
if view_off is not None:
# If a view is acceptable, this is "no" casting
# and byte order must be matching.
assert casting == Casting.no
# The above table lists this as "equivalent"
assert Casting.equiv == CAST_TABLE[from_Dt][to_Dt]
# Note that to_res may not be the same as from_dt
assert from_res.isnative == to_res.isnative
else:
if from_Dt == to_Dt:
# Note that to_res may not be the same as from_dt
assert from_res.isnative != to_res.isnative
assert casting == CAST_TABLE[from_Dt][to_Dt]
if from_Dt is to_Dt:
assert(from_dt is from_res)
assert(to_dt is to_res)
@pytest.mark.filterwarnings("ignore::numpy.ComplexWarning")
@pytest.mark.parametrize("from_dt", simple_dtype_instances())
def test_simple_direct_casts(self, from_dt):
"""
This test checks numeric direct casts for dtypes supported also by the
struct module (plus complex). It tries to be test a wide range of
inputs, but skips over possibly undefined behaviour (e.g. int rollover).
Longdouble and CLongdouble are tested, but only using double precision.
If this test creates issues, it should possibly just be simplified
or even removed (checking whether unaligned/non-contiguous casts give
the same results is useful, though).
"""
for to_dt in simple_dtype_instances():
to_dt = to_dt.values[0]
cast = get_castingimpl(type(from_dt), type(to_dt))
casting, (from_res, to_res), view_off = cast._resolve_descriptors(
(from_dt, to_dt))
if from_res is not from_dt or to_res is not to_dt:
# Do not test this case, it is handled in multiple steps,
# each of which should is tested individually.
return
safe = casting <= Casting.safe
del from_res, to_res, casting
arr1, arr2, values = self.get_data(from_dt, to_dt)
cast._simple_strided_call((arr1, arr2))
# Check via python list
assert arr2.tolist() == values
# Check that the same results are achieved for strided loops
arr1_o, arr2_o = self.get_data_variation(arr1, arr2, True, False)
cast._simple_strided_call((arr1_o, arr2_o))
assert_array_equal(arr2_o, arr2)
assert arr2_o.tobytes() == arr2.tobytes()
# Check if alignment makes a difference, but only if supported
# and only if the alignment can be wrong
if ((from_dt.alignment == 1 and to_dt.alignment == 1) or
not cast._supports_unaligned):
return
arr1_o, arr2_o = self.get_data_variation(arr1, arr2, False, True)
cast._simple_strided_call((arr1_o, arr2_o))
assert_array_equal(arr2_o, arr2)
assert arr2_o.tobytes() == arr2.tobytes()
arr1_o, arr2_o = self.get_data_variation(arr1, arr2, False, False)
cast._simple_strided_call((arr1_o, arr2_o))
assert_array_equal(arr2_o, arr2)
assert arr2_o.tobytes() == arr2.tobytes()
del arr1_o, arr2_o, cast
@pytest.mark.parametrize("from_Dt", simple_dtypes)
def test_numeric_to_times(self, from_Dt):
# We currently only implement contiguous loops, so only need to
# test those.
from_dt = from_Dt()
time_dtypes = [np.dtype("M8"), np.dtype("M8[ms]"), np.dtype("M8[4D]"),
np.dtype("m8"), np.dtype("m8[ms]"), np.dtype("m8[4D]")]
for time_dt in time_dtypes:
cast = get_castingimpl(type(from_dt), type(time_dt))
casting, (from_res, to_res), view_off = cast._resolve_descriptors(
(from_dt, time_dt))
assert from_res is from_dt
assert to_res is time_dt
del from_res, to_res
assert casting & CAST_TABLE[from_Dt][type(time_dt)]
assert view_off is None
int64_dt = np.dtype(np.int64)
arr1, arr2, values = self.get_data(from_dt, int64_dt)
arr2 = arr2.view(time_dt)
arr2[...] = np.datetime64("NaT")
if time_dt == np.dtype("M8"):
# This is a bit of a strange path, and could probably be removed
arr1[-1] = 0 # ensure at least one value is not NaT
# The cast currently succeeds, but the values are invalid:
cast._simple_strided_call((arr1, arr2))
with pytest.raises(ValueError):
str(arr2[-1]) # e.g. conversion to string fails
return
cast._simple_strided_call((arr1, arr2))
assert [int(v) for v in arr2.tolist()] == values
# Check that the same results are achieved for strided loops
arr1_o, arr2_o = self.get_data_variation(arr1, arr2, True, False)
cast._simple_strided_call((arr1_o, arr2_o))
assert_array_equal(arr2_o, arr2)
assert arr2_o.tobytes() == arr2.tobytes()
@pytest.mark.parametrize(
["from_dt", "to_dt", "expected_casting", "expected_view_off",
"nom", "denom"],
[("M8[ns]", None, Casting.no, 0, 1, 1),
(str(np.dtype("M8[ns]").newbyteorder()), None,
Casting.equiv, None, 1, 1),
("M8", "M8[ms]", Casting.safe, 0, 1, 1),
# should be invalid cast:
("M8[ms]", "M8", Casting.unsafe, None, 1, 1),
("M8[5ms]", "M8[5ms]", Casting.no, 0, 1, 1),
("M8[ns]", "M8[ms]", Casting.same_kind, None, 1, 10**6),
("M8[ms]", "M8[ns]", Casting.safe, None, 10**6, 1),
("M8[ms]", "M8[7ms]", Casting.same_kind, None, 1, 7),
("M8[4D]", "M8[1M]", Casting.same_kind, None, None,
# give full values based on NumPy 1.19.x
[-2**63, 0, -1, 1314, -1315, 564442610]),
("m8[ns]", None, Casting.no, 0, 1, 1),
(str(np.dtype("m8[ns]").newbyteorder()), None,
Casting.equiv, None, 1, 1),
("m8", "m8[ms]", Casting.safe, 0, 1, 1),
# should be invalid cast:
("m8[ms]", "m8", Casting.unsafe, None, 1, 1),
("m8[5ms]", "m8[5ms]", Casting.no, 0, 1, 1),
("m8[ns]", "m8[ms]", Casting.same_kind, None, 1, 10**6),
("m8[ms]", "m8[ns]", Casting.safe, None, 10**6, 1),
("m8[ms]", "m8[7ms]", Casting.same_kind, None, 1, 7),
("m8[4D]", "m8[1M]", Casting.unsafe, None, None,
# give full values based on NumPy 1.19.x
[-2**63, 0, 0, 1314, -1315, 564442610])])
def test_time_to_time(self, from_dt, to_dt,
expected_casting, expected_view_off,
nom, denom):
from_dt = np.dtype(from_dt)
if to_dt is not None:
to_dt = np.dtype(to_dt)
# Test a few values for casting (results generated with NumPy 1.19)
values = np.array([-2**63, 1, 2**63-1, 10000, -10000, 2**32])
values = values.astype(np.dtype("int64").newbyteorder(from_dt.byteorder))
assert values.dtype.byteorder == from_dt.byteorder
assert np.isnat(values.view(from_dt)[0])
DType = type(from_dt)
cast = get_castingimpl(DType, DType)
casting, (from_res, to_res), view_off = cast._resolve_descriptors(
(from_dt, to_dt))
assert from_res is from_dt
assert to_res is to_dt or to_dt is None
assert casting == expected_casting
assert view_off == expected_view_off
if nom is not None:
expected_out = (values * nom // denom).view(to_res)
expected_out[0] = "NaT"
else:
expected_out = np.empty_like(values)
expected_out[...] = denom
expected_out = expected_out.view(to_dt)
orig_arr = values.view(from_dt)
orig_out = np.empty_like(expected_out)
if casting == Casting.unsafe and (to_dt == "m8" or to_dt == "M8"):
# Casting from non-generic to generic units is an error and should
# probably be reported as an invalid cast earlier.
with pytest.raises(ValueError):
cast._simple_strided_call((orig_arr, orig_out))
return
for aligned in [True, True]:
for contig in [True, True]:
arr, out = self.get_data_variation(
orig_arr, orig_out, aligned, contig)
out[...] = 0
cast._simple_strided_call((arr, out))
assert_array_equal(out.view("int64"), expected_out.view("int64"))
def string_with_modified_length(self, dtype, change_length):
fact = 1 if dtype.char == "S" else 4
length = dtype.itemsize // fact + change_length
return np.dtype(f"{dtype.byteorder}{dtype.char}{length}")
@pytest.mark.parametrize("other_DT", simple_dtypes)
@pytest.mark.parametrize("string_char", ["S", "U"])
def test_string_cancast(self, other_DT, string_char):
fact = 1 if string_char == "S" else 4
string_DT = type(np.dtype(string_char))
cast = get_castingimpl(other_DT, string_DT)
other_dt = other_DT()
expected_length = get_expected_stringlength(other_dt)
string_dt = np.dtype(f"{string_char}{expected_length}")
safety, (res_other_dt, res_dt), view_off = cast._resolve_descriptors(
(other_dt, None))
assert res_dt.itemsize == expected_length * fact
assert safety == Casting.safe # we consider to string casts "safe"
assert view_off is None
assert isinstance(res_dt, string_DT)
# These casts currently implement changing the string length, so
# check the cast-safety for too long/fixed string lengths:
for change_length in [-1, 0, 1]:
if change_length >= 0:
expected_safety = Casting.safe
else:
expected_safety = Casting.same_kind
to_dt = self.string_with_modified_length(string_dt, change_length)
safety, (_, res_dt), view_off = cast._resolve_descriptors(
(other_dt, to_dt))
assert res_dt is to_dt
assert safety == expected_safety
assert view_off is None
# The opposite direction is always considered unsafe:
cast = get_castingimpl(string_DT, other_DT)
safety, _, view_off = cast._resolve_descriptors((string_dt, other_dt))
assert safety == Casting.unsafe
assert view_off is None
cast = get_castingimpl(string_DT, other_DT)
safety, (_, res_dt), view_off = cast._resolve_descriptors(
(string_dt, None))
assert safety == Casting.unsafe
assert view_off is None
assert other_dt is res_dt # returns the singleton for simple dtypes
@pytest.mark.parametrize("string_char", ["S", "U"])
@pytest.mark.parametrize("other_dt", simple_dtype_instances())
def test_simple_string_casts_roundtrip(self, other_dt, string_char):
"""
Tests casts from and to string by checking the roundtripping property.
The test also covers some string to string casts (but not all).
If this test creates issues, it should possibly just be simplified
or even removed (checking whether unaligned/non-contiguous casts give
the same results is useful, though).
"""
string_DT = type(np.dtype(string_char))
cast = get_castingimpl(type(other_dt), string_DT)
cast_back = get_castingimpl(string_DT, type(other_dt))
_, (res_other_dt, string_dt), _ = cast._resolve_descriptors(
(other_dt, None))
if res_other_dt is not other_dt:
# do not support non-native byteorder, skip test in that case
assert other_dt.byteorder != res_other_dt.byteorder
return
orig_arr, values = self.get_data(other_dt, None)
str_arr = np.zeros(len(orig_arr), dtype=string_dt)
string_dt_short = self.string_with_modified_length(string_dt, -1)
str_arr_short = np.zeros(len(orig_arr), dtype=string_dt_short)
string_dt_long = self.string_with_modified_length(string_dt, 1)
str_arr_long = np.zeros(len(orig_arr), dtype=string_dt_long)
assert not cast._supports_unaligned # if support is added, should test
assert not cast_back._supports_unaligned
for contig in [True, False]:
other_arr, str_arr = self.get_data_variation(
orig_arr, str_arr, True, contig)
_, str_arr_short = self.get_data_variation(
orig_arr, str_arr_short.copy(), True, contig)
_, str_arr_long = self.get_data_variation(
orig_arr, str_arr_long, True, contig)
cast._simple_strided_call((other_arr, str_arr))
cast._simple_strided_call((other_arr, str_arr_short))
assert_array_equal(str_arr.astype(string_dt_short), str_arr_short)
cast._simple_strided_call((other_arr, str_arr_long))
assert_array_equal(str_arr, str_arr_long)
if other_dt.kind == "b":
# Booleans do not roundtrip
continue
other_arr[...] = 0
cast_back._simple_strided_call((str_arr, other_arr))
assert_array_equal(orig_arr, other_arr)
other_arr[...] = 0
cast_back._simple_strided_call((str_arr_long, other_arr))
assert_array_equal(orig_arr, other_arr)
@pytest.mark.parametrize("other_dt", ["S8", "<U8", ">U8"])
@pytest.mark.parametrize("string_char", ["S", "U"])
def test_string_to_string_cancast(self, other_dt, string_char):
other_dt = np.dtype(other_dt)
fact = 1 if string_char == "S" else 4
div = 1 if other_dt.char == "S" else 4
string_DT = type(np.dtype(string_char))
cast = get_castingimpl(type(other_dt), string_DT)
expected_length = other_dt.itemsize // div
string_dt = np.dtype(f"{string_char}{expected_length}")
safety, (res_other_dt, res_dt), view_off = cast._resolve_descriptors(
(other_dt, None))
assert res_dt.itemsize == expected_length * fact
assert isinstance(res_dt, string_DT)
expected_view_off = None
if other_dt.char == string_char:
if other_dt.isnative:
expected_safety = Casting.no
expected_view_off = 0
else:
expected_safety = Casting.equiv
elif string_char == "U":
expected_safety = Casting.safe
else:
expected_safety = Casting.unsafe
assert view_off == expected_view_off
assert expected_safety == safety
for change_length in [-1, 0, 1]:
to_dt = self.string_with_modified_length(string_dt, change_length)
safety, (_, res_dt), view_off = cast._resolve_descriptors(
(other_dt, to_dt))
assert res_dt is to_dt
if change_length <= 0:
assert view_off == expected_view_off
else:
assert view_off is None
if expected_safety == Casting.unsafe:
assert safety == expected_safety
elif change_length < 0:
assert safety == Casting.same_kind
elif change_length == 0:
assert safety == expected_safety
elif change_length > 0:
assert safety == Casting.safe
@pytest.mark.parametrize("order1", [">", "<"])
@pytest.mark.parametrize("order2", [">", "<"])
def test_unicode_byteswapped_cast(self, order1, order2):
# Very specific tests (not using the castingimpl directly)
# that tests unicode bytedwaps including for unaligned array data.
dtype1 = np.dtype(f"{order1}U30")
dtype2 = np.dtype(f"{order2}U30")
data1 = np.empty(30 * 4 + 1, dtype=np.uint8)[1:].view(dtype1)
data2 = np.empty(30 * 4 + 1, dtype=np.uint8)[1:].view(dtype2)
if dtype1.alignment != 1:
# alignment should always be >1, but skip the check if not
assert not data1.flags.aligned
assert not data2.flags.aligned
element = "this is a ünicode string‽"
data1[()] = element
# Test both `data1` and `data1.copy()` (which should be aligned)
for data in [data1, data1.copy()]:
data2[...] = data1
assert data2[()] == element
assert data2.copy()[()] == element
def test_void_to_string_special_case(self):
# Cover a small special case in void to string casting that could
# probably just as well be turned into an error (compare
# `test_object_to_parametric_internal_error` below).
assert np.array([], dtype="V5").astype("S").dtype.itemsize == 5
assert np.array([], dtype="V5").astype("U").dtype.itemsize == 4 * 5
def test_object_to_parametric_internal_error(self):
# We reject casting from object to a parametric type, without
# figuring out the correct instance first.
object_dtype = type(np.dtype(object))
other_dtype = type(np.dtype(str))
cast = get_castingimpl(object_dtype, other_dtype)
with pytest.raises(TypeError,
match="casting from object to the parametric DType"):
cast._resolve_descriptors((np.dtype("O"), None))
@pytest.mark.parametrize("dtype", simple_dtype_instances())
def test_object_and_simple_resolution(self, dtype):
# Simple test to exercise the cast when no instance is specified
object_dtype = type(np.dtype(object))
cast = get_castingimpl(object_dtype, type(dtype))
safety, (_, res_dt), view_off = cast._resolve_descriptors(
(np.dtype("O"), dtype))
assert safety == Casting.unsafe
assert view_off is None
assert res_dt is dtype
safety, (_, res_dt), view_off = cast._resolve_descriptors(
(np.dtype("O"), None))
assert safety == Casting.unsafe
assert view_off is None
assert res_dt == dtype.newbyteorder("=")
@pytest.mark.parametrize("dtype", simple_dtype_instances())
def test_simple_to_object_resolution(self, dtype):
# Simple test to exercise the cast when no instance is specified
object_dtype = type(np.dtype(object))
cast = get_castingimpl(type(dtype), object_dtype)
safety, (_, res_dt), view_off = cast._resolve_descriptors(
(dtype, None))
assert safety == Casting.safe
assert view_off is None
assert res_dt is np.dtype("O")
@pytest.mark.parametrize("casting", ["no", "unsafe"])
def test_void_and_structured_with_subarray(self, casting):
# test case corresponding to gh-19325
dtype = np.dtype([("foo", "<f4", (3, 2))])
expected = casting == "unsafe"
assert np.can_cast("V4", dtype, casting=casting) == expected
assert np.can_cast(dtype, "V4", casting=casting) == expected
@pytest.mark.parametrize(["to_dt", "expected_off"],
[ # Same as `from_dt` but with both fields shifted:
(np.dtype({"names": ["a", "b"], "formats": ["i4", "f4"],
"offsets": [0, 4]}), 2),
# Additional change of the names
(np.dtype({"names": ["b", "a"], "formats": ["i4", "f4"],
"offsets": [0, 4]}), 2),
# Incompatible field offset change
(np.dtype({"names": ["b", "a"], "formats": ["i4", "f4"],
"offsets": [0, 6]}), None)])
def test_structured_field_offsets(self, to_dt, expected_off):
# This checks the cast-safety and view offset for swapped and "shifted"
# fields which are viewable
from_dt = np.dtype({"names": ["a", "b"],
"formats": ["i4", "f4"],
"offsets": [2, 6]})
cast = get_castingimpl(type(from_dt), type(to_dt))
safety, _, view_off = cast._resolve_descriptors((from_dt, to_dt))
if from_dt.names == to_dt.names:
assert safety == Casting.equiv
else:
assert safety == Casting.safe
# Shifting the original data pointer by -2 will align both by
# effectively adding 2 bytes of spacing before `from_dt`.
assert view_off == expected_off
@pytest.mark.parametrize(("from_dt", "to_dt", "expected_off"), [
# Subarray cases:
("i", "(1,1)i", 0),
("(1,1)i", "i", 0),
("(2,1)i", "(2,1)i", 0),
# field cases (field to field is tested explicitly also):
# Not considered viewable, because a negative offset would allow
# may structured dtype to indirectly access invalid memory.
("i", dict(names=["a"], formats=["i"], offsets=[2]), None),
(dict(names=["a"], formats=["i"], offsets=[2]), "i", 2),
# Currently considered not viewable, due to multiple fields
# even though they overlap (maybe we should not allow that?)
("i", dict(names=["a", "b"], formats=["i", "i"], offsets=[2, 2]),
None),
# different number of fields can't work, should probably just fail
# so it never reports "viewable":
("i,i", "i,i,i", None),
# Unstructured void cases:
("i4", "V3", 0), # void smaller or equal
("i4", "V4", 0), # void smaller or equal
("i4", "V10", None), # void is larger (no view)
("O", "V4", None), # currently reject objects for view here.
("O", "V8", None), # currently reject objects for view here.
("V4", "V3", 0),
("V4", "V4", 0),
("V3", "V4", None),
# Note that currently void-to-other cast goes via byte-strings
# and is not a "view" based cast like the opposite direction:
("V4", "i4", None),
# completely invalid/impossible cast:
("i,i", "i,i,i", None),
])
def test_structured_view_offsets_paramteric(
self, from_dt, to_dt, expected_off):
# TODO: While this test is fairly thorough, right now, it does not
# really test some paths that may have nonzero offsets (they don't
# really exists).
from_dt = np.dtype(from_dt)
to_dt = np.dtype(to_dt)
cast = get_castingimpl(type(from_dt), type(to_dt))
_, _, view_off = cast._resolve_descriptors((from_dt, to_dt))
assert view_off == expected_off
@pytest.mark.parametrize("dtype", np.typecodes["All"])
def test_object_casts_NULL_None_equivalence(self, dtype):
# None to <other> casts may succeed or fail, but a NULL'ed array must
# behave the same as one filled with None's.
arr_normal = np.array([None] * 5)
arr_NULLs = np.empty_like([None] * 5)
# If the check fails (maybe it should) the test would lose its purpose:
assert arr_NULLs.tobytes() == b"\x00" * arr_NULLs.nbytes
try:
expected = arr_normal.astype(dtype)
except TypeError:
with pytest.raises(TypeError):
arr_NULLs.astype(dtype),
else:
assert_array_equal(expected, arr_NULLs.astype(dtype))
@pytest.mark.parametrize("dtype",
np.typecodes["AllInteger"] + np.typecodes["AllFloat"])
def test_nonstandard_bool_to_other(self, dtype):
# simple test for casting bool_ to numeric types, which should not
# expose the detail that NumPy bools can sometimes take values other
# than 0 and 1. See also gh-19514.
nonstandard_bools = np.array([0, 3, -7], dtype=np.int8).view(bool)
res = nonstandard_bools.astype(dtype)
expected = [0, 1, 1]
assert_array_equal(res, expected)
| 33,863 | Python | 40.704433 | 81 | 0.538877 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_arraymethod.py | """
This file tests the generic aspects of ArrayMethod. At the time of writing
this is private API, but when added, public API may be added here.
"""
from __future__ import annotations
import sys
import types
from typing import Any
import pytest
import numpy as np
from numpy.core._multiarray_umath import _get_castingimpl as get_castingimpl
class TestResolveDescriptors:
# Test mainly error paths of the resolve_descriptors function,
# note that the `casting_unittests` tests exercise this non-error paths.
# Casting implementations are the main/only current user:
method = get_castingimpl(type(np.dtype("d")), type(np.dtype("f")))
@pytest.mark.parametrize("args", [
(True,), # Not a tuple.
((None,)), # Too few elements
((None, None, None),), # Too many
((None, None),), # Input dtype is None, which is invalid.
((np.dtype("d"), True),), # Output dtype is not a dtype
((np.dtype("f"), None),), # Input dtype does not match method
])
def test_invalid_arguments(self, args):
with pytest.raises(TypeError):
self.method._resolve_descriptors(*args)
class TestSimpleStridedCall:
# Test mainly error paths of the resolve_descriptors function,
# note that the `casting_unittests` tests exercise this non-error paths.
# Casting implementations are the main/only current user:
method = get_castingimpl(type(np.dtype("d")), type(np.dtype("f")))
@pytest.mark.parametrize(["args", "error"], [
((True,), TypeError), # Not a tuple
(((None,),), TypeError), # Too few elements
((None, None), TypeError), # Inputs are not arrays.
(((None, None, None),), TypeError), # Too many
(((np.arange(3), np.arange(3)),), TypeError), # Incorrect dtypes
(((np.ones(3, dtype=">d"), np.ones(3, dtype="<f")),),
TypeError), # Does not support byte-swapping
(((np.ones((2, 2), dtype="d"), np.ones((2, 2), dtype="f")),),
ValueError), # not 1-D
(((np.ones(3, dtype="d"), np.ones(4, dtype="f")),),
ValueError), # different length
(((np.frombuffer(b"\0x00"*3*2, dtype="d"),
np.frombuffer(b"\0x00"*3, dtype="f")),),
ValueError), # output not writeable
])
def test_invalid_arguments(self, args, error):
# This is private API, which may be modified freely
with pytest.raises(error):
self.method._simple_strided_call(*args)
@pytest.mark.skipif(sys.version_info < (3, 9), reason="Requires python 3.9")
@pytest.mark.parametrize(
"cls", [np.ndarray, np.recarray, np.chararray, np.matrix, np.memmap]
)
class TestClassGetItem:
def test_class_getitem(self, cls: type[np.ndarray]) -> None:
"""Test `ndarray.__class_getitem__`."""
alias = cls[Any, Any]
assert isinstance(alias, types.GenericAlias)
assert alias.__origin__ is cls
@pytest.mark.parametrize("arg_len", range(4))
def test_subscript_tup(self, cls: type[np.ndarray], arg_len: int) -> None:
arg_tup = (Any,) * arg_len
if arg_len in (1, 2):
assert cls[arg_tup]
else:
match = f"Too {'few' if arg_len == 0 else 'many'} arguments"
with pytest.raises(TypeError, match=match):
cls[arg_tup]
@pytest.mark.skipif(sys.version_info >= (3, 9), reason="Requires python 3.8")
def test_class_getitem_38() -> None:
match = "Type subscription requires python >= 3.9"
with pytest.raises(TypeError, match=match):
np.ndarray[Any, Any]
| 3,570 | Python | 36.989361 | 78 | 0.617087 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/_locales.py | """Provide class for testing in French locale
"""
import sys
import locale
import pytest
__ALL__ = ['CommaDecimalPointLocale']
def find_comma_decimal_point_locale():
"""See if platform has a decimal point as comma locale.
Find a locale that uses a comma instead of a period as the
decimal point.
Returns
-------
old_locale: str
Locale when the function was called.
new_locale: {str, None)
First French locale found, None if none found.
"""
if sys.platform == 'win32':
locales = ['FRENCH']
else:
locales = ['fr_FR', 'fr_FR.UTF-8', 'fi_FI', 'fi_FI.UTF-8']
old_locale = locale.getlocale(locale.LC_NUMERIC)
new_locale = None
try:
for loc in locales:
try:
locale.setlocale(locale.LC_NUMERIC, loc)
new_locale = loc
break
except locale.Error:
pass
finally:
locale.setlocale(locale.LC_NUMERIC, locale=old_locale)
return old_locale, new_locale
class CommaDecimalPointLocale:
"""Sets LC_NUMERIC to a locale with comma as decimal point.
Classes derived from this class have setup and teardown methods that run
tests with locale.LC_NUMERIC set to a locale where commas (',') are used as
the decimal point instead of periods ('.'). On exit the locale is restored
to the initial locale. It also serves as context manager with the same
effect. If no such locale is available, the test is skipped.
.. versionadded:: 1.15.0
"""
(cur_locale, tst_locale) = find_comma_decimal_point_locale()
def setup(self):
if self.tst_locale is None:
pytest.skip("No French locale available")
locale.setlocale(locale.LC_NUMERIC, locale=self.tst_locale)
def teardown(self):
locale.setlocale(locale.LC_NUMERIC, locale=self.cur_locale)
def __enter__(self):
if self.tst_locale is None:
pytest.skip("No French locale available")
locale.setlocale(locale.LC_NUMERIC, locale=self.tst_locale)
def __exit__(self, type, value, traceback):
locale.setlocale(locale.LC_NUMERIC, locale=self.cur_locale)
| 2,192 | Python | 28.24 | 79 | 0.636861 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_simd_module.py | import pytest
from numpy.core._simd import targets
"""
This testing unit only for checking the sanity of common functionality,
therefore all we need is just to take one submodule that represents any
of enabled SIMD extensions to run the test on it and the second submodule
required to run only one check related to the possibility of mixing
the data types among each submodule.
"""
npyvs = [npyv_mod for npyv_mod in targets.values() if npyv_mod and npyv_mod.simd]
npyv, npyv2 = (npyvs + [None, None])[:2]
unsigned_sfx = ["u8", "u16", "u32", "u64"]
signed_sfx = ["s8", "s16", "s32", "s64"]
fp_sfx = ["f32"]
if npyv and npyv.simd_f64:
fp_sfx.append("f64")
int_sfx = unsigned_sfx + signed_sfx
all_sfx = unsigned_sfx + int_sfx
@pytest.mark.skipif(not npyv, reason="could not find any SIMD extension with NPYV support")
class Test_SIMD_MODULE:
@pytest.mark.parametrize('sfx', all_sfx)
def test_num_lanes(self, sfx):
nlanes = getattr(npyv, "nlanes_" + sfx)
vector = getattr(npyv, "setall_" + sfx)(1)
assert len(vector) == nlanes
@pytest.mark.parametrize('sfx', all_sfx)
def test_type_name(self, sfx):
vector = getattr(npyv, "setall_" + sfx)(1)
assert vector.__name__ == "npyv_" + sfx
def test_raises(self):
a, b = [npyv.setall_u32(1)]*2
for sfx in all_sfx:
vcb = lambda intrin: getattr(npyv, f"{intrin}_{sfx}")
pytest.raises(TypeError, vcb("add"), a)
pytest.raises(TypeError, vcb("add"), a, b, a)
pytest.raises(TypeError, vcb("setall"))
pytest.raises(TypeError, vcb("setall"), [1])
pytest.raises(TypeError, vcb("load"), 1)
pytest.raises(ValueError, vcb("load"), [1])
pytest.raises(ValueError, vcb("store"), [1], getattr(npyv, f"reinterpret_{sfx}_u32")(a))
@pytest.mark.skipif(not npyv2, reason=(
"could not find a second SIMD extension with NPYV support"
))
def test_nomix(self):
# mix among submodules isn't allowed
a = npyv.setall_u32(1)
a2 = npyv2.setall_u32(1)
pytest.raises(TypeError, npyv.add_u32, a2, a2)
pytest.raises(TypeError, npyv2.add_u32, a, a)
@pytest.mark.parametrize('sfx', unsigned_sfx)
def test_unsigned_overflow(self, sfx):
nlanes = getattr(npyv, "nlanes_" + sfx)
maxu = (1 << int(sfx[1:])) - 1
maxu_72 = (1 << 72) - 1
lane = getattr(npyv, "setall_" + sfx)(maxu_72)[0]
assert lane == maxu
lanes = getattr(npyv, "load_" + sfx)([maxu_72] * nlanes)
assert lanes == [maxu] * nlanes
lane = getattr(npyv, "setall_" + sfx)(-1)[0]
assert lane == maxu
lanes = getattr(npyv, "load_" + sfx)([-1] * nlanes)
assert lanes == [maxu] * nlanes
@pytest.mark.parametrize('sfx', signed_sfx)
def test_signed_overflow(self, sfx):
nlanes = getattr(npyv, "nlanes_" + sfx)
maxs_72 = (1 << 71) - 1
lane = getattr(npyv, "setall_" + sfx)(maxs_72)[0]
assert lane == -1
lanes = getattr(npyv, "load_" + sfx)([maxs_72] * nlanes)
assert lanes == [-1] * nlanes
mins_72 = -1 << 71
lane = getattr(npyv, "setall_" + sfx)(mins_72)[0]
assert lane == 0
lanes = getattr(npyv, "load_" + sfx)([mins_72] * nlanes)
assert lanes == [0] * nlanes
def test_truncate_f32(self):
f32 = npyv.setall_f32(0.1)[0]
assert f32 != 0.1
assert round(f32, 1) == 0.1
def test_compare(self):
data_range = range(0, npyv.nlanes_u32)
vdata = npyv.load_u32(data_range)
assert vdata == list(data_range)
assert vdata == tuple(data_range)
for i in data_range:
assert vdata[i] == data_range[i]
| 3,758 | Python | 37.357142 | 100 | 0.589675 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_conversion_utils.py | """
Tests for numpy/core/src/multiarray/conversion_utils.c
"""
import re
import sys
import pytest
import numpy as np
import numpy.core._multiarray_tests as mt
from numpy.testing import assert_warns, IS_PYPY
class StringConverterTestCase:
allow_bytes = True
case_insensitive = True
exact_match = False
warn = True
def _check_value_error(self, val):
pattern = r'\(got {}\)'.format(re.escape(repr(val)))
with pytest.raises(ValueError, match=pattern) as exc:
self.conv(val)
def _check_conv_assert_warn(self, val, expected):
if self.warn:
with assert_warns(DeprecationWarning) as exc:
assert self.conv(val) == expected
else:
assert self.conv(val) == expected
def _check(self, val, expected):
"""Takes valid non-deprecated inputs for converters,
runs converters on inputs, checks correctness of outputs,
warnings and errors"""
assert self.conv(val) == expected
if self.allow_bytes:
assert self.conv(val.encode('ascii')) == expected
else:
with pytest.raises(TypeError):
self.conv(val.encode('ascii'))
if len(val) != 1:
if self.exact_match:
self._check_value_error(val[:1])
self._check_value_error(val + '\0')
else:
self._check_conv_assert_warn(val[:1], expected)
if self.case_insensitive:
if val != val.lower():
self._check_conv_assert_warn(val.lower(), expected)
if val != val.upper():
self._check_conv_assert_warn(val.upper(), expected)
else:
if val != val.lower():
self._check_value_error(val.lower())
if val != val.upper():
self._check_value_error(val.upper())
def test_wrong_type(self):
# common cases which apply to all the below
with pytest.raises(TypeError):
self.conv({})
with pytest.raises(TypeError):
self.conv([])
def test_wrong_value(self):
# nonsense strings
self._check_value_error('')
self._check_value_error('\N{greek small letter pi}')
if self.allow_bytes:
self._check_value_error(b'')
# bytes which can't be converted to strings via utf8
self._check_value_error(b"\xFF")
if self.exact_match:
self._check_value_error("there's no way this is supported")
class TestByteorderConverter(StringConverterTestCase):
""" Tests of PyArray_ByteorderConverter """
conv = mt.run_byteorder_converter
warn = False
def test_valid(self):
for s in ['big', '>']:
self._check(s, 'NPY_BIG')
for s in ['little', '<']:
self._check(s, 'NPY_LITTLE')
for s in ['native', '=']:
self._check(s, 'NPY_NATIVE')
for s in ['ignore', '|']:
self._check(s, 'NPY_IGNORE')
for s in ['swap']:
self._check(s, 'NPY_SWAP')
class TestSortkindConverter(StringConverterTestCase):
""" Tests of PyArray_SortkindConverter """
conv = mt.run_sortkind_converter
warn = False
def test_valid(self):
self._check('quicksort', 'NPY_QUICKSORT')
self._check('heapsort', 'NPY_HEAPSORT')
self._check('mergesort', 'NPY_STABLESORT') # alias
self._check('stable', 'NPY_STABLESORT')
class TestSelectkindConverter(StringConverterTestCase):
""" Tests of PyArray_SelectkindConverter """
conv = mt.run_selectkind_converter
case_insensitive = False
exact_match = True
def test_valid(self):
self._check('introselect', 'NPY_INTROSELECT')
class TestSearchsideConverter(StringConverterTestCase):
""" Tests of PyArray_SearchsideConverter """
conv = mt.run_searchside_converter
def test_valid(self):
self._check('left', 'NPY_SEARCHLEFT')
self._check('right', 'NPY_SEARCHRIGHT')
class TestOrderConverter(StringConverterTestCase):
""" Tests of PyArray_OrderConverter """
conv = mt.run_order_converter
warn = False
def test_valid(self):
self._check('c', 'NPY_CORDER')
self._check('f', 'NPY_FORTRANORDER')
self._check('a', 'NPY_ANYORDER')
self._check('k', 'NPY_KEEPORDER')
def test_flatten_invalid_order(self):
# invalid after gh-14596
with pytest.raises(ValueError):
self.conv('Z')
for order in [False, True, 0, 8]:
with pytest.raises(TypeError):
self.conv(order)
class TestClipmodeConverter(StringConverterTestCase):
""" Tests of PyArray_ClipmodeConverter """
conv = mt.run_clipmode_converter
def test_valid(self):
self._check('clip', 'NPY_CLIP')
self._check('wrap', 'NPY_WRAP')
self._check('raise', 'NPY_RAISE')
# integer values allowed here
assert self.conv(np.CLIP) == 'NPY_CLIP'
assert self.conv(np.WRAP) == 'NPY_WRAP'
assert self.conv(np.RAISE) == 'NPY_RAISE'
class TestCastingConverter(StringConverterTestCase):
""" Tests of PyArray_CastingConverter """
conv = mt.run_casting_converter
case_insensitive = False
exact_match = True
def test_valid(self):
self._check("no", "NPY_NO_CASTING")
self._check("equiv", "NPY_EQUIV_CASTING")
self._check("safe", "NPY_SAFE_CASTING")
self._check("same_kind", "NPY_SAME_KIND_CASTING")
self._check("unsafe", "NPY_UNSAFE_CASTING")
class TestIntpConverter:
""" Tests of PyArray_IntpConverter """
conv = mt.run_intp_converter
def test_basic(self):
assert self.conv(1) == (1,)
assert self.conv((1, 2)) == (1, 2)
assert self.conv([1, 2]) == (1, 2)
assert self.conv(()) == ()
def test_none(self):
# once the warning expires, this will raise TypeError
with pytest.warns(DeprecationWarning):
assert self.conv(None) == ()
@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8),
reason="PyPy bug in error formatting")
def test_float(self):
with pytest.raises(TypeError):
self.conv(1.0)
with pytest.raises(TypeError):
self.conv([1, 1.0])
def test_too_large(self):
with pytest.raises(ValueError):
self.conv(2**64)
def test_too_many_dims(self):
assert self.conv([1]*32) == (1,)*32
with pytest.raises(ValueError):
self.conv([1]*33)
| 6,559 | Python | 30.38756 | 76 | 0.589267 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_getlimits.py | """ Test functions for limits module.
"""
import warnings
import numpy as np
from numpy.core import finfo, iinfo
from numpy import half, single, double, longdouble
from numpy.testing import assert_equal, assert_, assert_raises
from numpy.core.getlimits import _discovered_machar, _float_ma
##################################################
class TestPythonFloat:
def test_singleton(self):
ftype = finfo(float)
ftype2 = finfo(float)
assert_equal(id(ftype), id(ftype2))
class TestHalf:
def test_singleton(self):
ftype = finfo(half)
ftype2 = finfo(half)
assert_equal(id(ftype), id(ftype2))
class TestSingle:
def test_singleton(self):
ftype = finfo(single)
ftype2 = finfo(single)
assert_equal(id(ftype), id(ftype2))
class TestDouble:
def test_singleton(self):
ftype = finfo(double)
ftype2 = finfo(double)
assert_equal(id(ftype), id(ftype2))
class TestLongdouble:
def test_singleton(self):
ftype = finfo(longdouble)
ftype2 = finfo(longdouble)
assert_equal(id(ftype), id(ftype2))
class TestFinfo:
def test_basic(self):
dts = list(zip(['f2', 'f4', 'f8', 'c8', 'c16'],
[np.float16, np.float32, np.float64, np.complex64,
np.complex128]))
for dt1, dt2 in dts:
for attr in ('bits', 'eps', 'epsneg', 'iexp', 'machep',
'max', 'maxexp', 'min', 'minexp', 'negep', 'nexp',
'nmant', 'precision', 'resolution', 'tiny',
'smallest_normal', 'smallest_subnormal'):
assert_equal(getattr(finfo(dt1), attr),
getattr(finfo(dt2), attr), attr)
assert_raises(ValueError, finfo, 'i4')
class TestIinfo:
def test_basic(self):
dts = list(zip(['i1', 'i2', 'i4', 'i8',
'u1', 'u2', 'u4', 'u8'],
[np.int8, np.int16, np.int32, np.int64,
np.uint8, np.uint16, np.uint32, np.uint64]))
for dt1, dt2 in dts:
for attr in ('bits', 'min', 'max'):
assert_equal(getattr(iinfo(dt1), attr),
getattr(iinfo(dt2), attr), attr)
assert_raises(ValueError, iinfo, 'f4')
def test_unsigned_max(self):
types = np.sctypes['uint']
for T in types:
assert_equal(iinfo(T).max, T(-1))
class TestRepr:
def test_iinfo_repr(self):
expected = "iinfo(min=-32768, max=32767, dtype=int16)"
assert_equal(repr(np.iinfo(np.int16)), expected)
def test_finfo_repr(self):
expected = "finfo(resolution=1e-06, min=-3.4028235e+38," + \
" max=3.4028235e+38, dtype=float32)"
assert_equal(repr(np.finfo(np.float32)), expected)
def test_instances():
iinfo(10)
finfo(3.0)
def assert_ma_equal(discovered, ma_like):
# Check MachAr-like objects same as calculated MachAr instances
for key, value in discovered.__dict__.items():
assert_equal(value, getattr(ma_like, key))
if hasattr(value, 'shape'):
assert_equal(value.shape, getattr(ma_like, key).shape)
assert_equal(value.dtype, getattr(ma_like, key).dtype)
def test_known_types():
# Test we are correctly compiling parameters for known types
for ftype, ma_like in ((np.float16, _float_ma[16]),
(np.float32, _float_ma[32]),
(np.float64, _float_ma[64])):
assert_ma_equal(_discovered_machar(ftype), ma_like)
# Suppress warning for broken discovery of double double on PPC
with np.errstate(all='ignore'):
ld_ma = _discovered_machar(np.longdouble)
bytes = np.dtype(np.longdouble).itemsize
if (ld_ma.it, ld_ma.maxexp) == (63, 16384) and bytes in (12, 16):
# 80-bit extended precision
assert_ma_equal(ld_ma, _float_ma[80])
elif (ld_ma.it, ld_ma.maxexp) == (112, 16384) and bytes == 16:
# IEE 754 128-bit
assert_ma_equal(ld_ma, _float_ma[128])
def test_subnormal_warning():
"""Test that the subnormal is zero warning is not being raised."""
with np.errstate(all='ignore'):
ld_ma = _discovered_machar(np.longdouble)
bytes = np.dtype(np.longdouble).itemsize
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
if (ld_ma.it, ld_ma.maxexp) == (63, 16384) and bytes in (12, 16):
# 80-bit extended precision
ld_ma.smallest_subnormal
assert len(w) == 0
elif (ld_ma.it, ld_ma.maxexp) == (112, 16384) and bytes == 16:
# IEE 754 128-bit
ld_ma.smallest_subnormal
assert len(w) == 0
else:
# Double double
ld_ma.smallest_subnormal
# This test may fail on some platforms
assert len(w) == 0
def test_plausible_finfo():
# Assert that finfo returns reasonable results for all types
for ftype in np.sctypes['float'] + np.sctypes['complex']:
info = np.finfo(ftype)
assert_(info.nmant > 1)
assert_(info.minexp < -1)
assert_(info.maxexp > 1)
| 5,207 | Python | 34.671233 | 75 | 0.571346 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_cpu_dispatcher.py | from numpy.core._multiarray_umath import __cpu_features__, __cpu_baseline__, __cpu_dispatch__
from numpy.core import _umath_tests
from numpy.testing import assert_equal
def test_dispatcher():
"""
Testing the utilities of the CPU dispatcher
"""
targets = (
"SSE2", "SSE41", "AVX2",
"VSX", "VSX2", "VSX3",
"NEON", "ASIMD", "ASIMDHP"
)
highest_sfx = "" # no suffix for the baseline
all_sfx = []
for feature in reversed(targets):
# skip baseline features, by the default `CCompilerOpt` do not generate separated objects
# for the baseline, just one object combined all of them via 'baseline' option
# within the configuration statements.
if feature in __cpu_baseline__:
continue
# check compiler and running machine support
if feature not in __cpu_dispatch__ or not __cpu_features__[feature]:
continue
if not highest_sfx:
highest_sfx = "_" + feature
all_sfx.append("func" + "_" + feature)
test = _umath_tests.test_dispatch()
assert_equal(test["func"], "func" + highest_sfx)
assert_equal(test["var"], "var" + highest_sfx)
if highest_sfx:
assert_equal(test["func_xb"], "func" + highest_sfx)
assert_equal(test["var_xb"], "var" + highest_sfx)
else:
assert_equal(test["func_xb"], "nobase")
assert_equal(test["var_xb"], "nobase")
all_sfx.append("func") # add the baseline
assert_equal(test["all"], all_sfx)
| 1,521 | Python | 34.395348 | 97 | 0.604865 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_unicode.py | import numpy as np
from numpy.testing import assert_, assert_equal, assert_array_equal
def buffer_length(arr):
if isinstance(arr, str):
if not arr:
charmax = 0
else:
charmax = max([ord(c) for c in arr])
if charmax < 256:
size = 1
elif charmax < 65536:
size = 2
else:
size = 4
return size * len(arr)
v = memoryview(arr)
if v.shape is None:
return len(v) * v.itemsize
else:
return np.prod(v.shape) * v.itemsize
# In both cases below we need to make sure that the byte swapped value (as
# UCS4) is still a valid unicode:
# Value that can be represented in UCS2 interpreters
ucs2_value = u'\u0900'
# Value that cannot be represented in UCS2 interpreters (but can in UCS4)
ucs4_value = u'\U00100900'
def test_string_cast():
str_arr = np.array(["1234", "1234\0\0"], dtype='S')
uni_arr1 = str_arr.astype('>U')
uni_arr2 = str_arr.astype('<U')
assert_(str_arr != uni_arr1)
assert_(str_arr != uni_arr2)
assert_array_equal(uni_arr1, uni_arr2)
############################################################
# Creation tests
############################################################
class CreateZeros:
"""Check the creation of zero-valued arrays"""
def content_check(self, ua, ua_scalar, nbytes):
# Check the length of the unicode base type
assert_(int(ua.dtype.str[2:]) == self.ulen)
# Check the length of the data buffer
assert_(buffer_length(ua) == nbytes)
# Small check that data in array element is ok
assert_(ua_scalar == u'')
# Encode to ascii and double check
assert_(ua_scalar.encode('ascii') == b'')
# Check buffer lengths for scalars
assert_(buffer_length(ua_scalar) == 0)
def test_zeros0D(self):
# Check creation of 0-dimensional objects
ua = np.zeros((), dtype='U%s' % self.ulen)
self.content_check(ua, ua[()], 4*self.ulen)
def test_zerosSD(self):
# Check creation of single-dimensional objects
ua = np.zeros((2,), dtype='U%s' % self.ulen)
self.content_check(ua, ua[0], 4*self.ulen*2)
self.content_check(ua, ua[1], 4*self.ulen*2)
def test_zerosMD(self):
# Check creation of multi-dimensional objects
ua = np.zeros((2, 3, 4), dtype='U%s' % self.ulen)
self.content_check(ua, ua[0, 0, 0], 4*self.ulen*2*3*4)
self.content_check(ua, ua[-1, -1, -1], 4*self.ulen*2*3*4)
class TestCreateZeros_1(CreateZeros):
"""Check the creation of zero-valued arrays (size 1)"""
ulen = 1
class TestCreateZeros_2(CreateZeros):
"""Check the creation of zero-valued arrays (size 2)"""
ulen = 2
class TestCreateZeros_1009(CreateZeros):
"""Check the creation of zero-valued arrays (size 1009)"""
ulen = 1009
class CreateValues:
"""Check the creation of unicode arrays with values"""
def content_check(self, ua, ua_scalar, nbytes):
# Check the length of the unicode base type
assert_(int(ua.dtype.str[2:]) == self.ulen)
# Check the length of the data buffer
assert_(buffer_length(ua) == nbytes)
# Small check that data in array element is ok
assert_(ua_scalar == self.ucs_value*self.ulen)
# Encode to UTF-8 and double check
assert_(ua_scalar.encode('utf-8') ==
(self.ucs_value*self.ulen).encode('utf-8'))
# Check buffer lengths for scalars
if self.ucs_value == ucs4_value:
# In UCS2, the \U0010FFFF will be represented using a
# surrogate *pair*
assert_(buffer_length(ua_scalar) == 2*2*self.ulen)
else:
# In UCS2, the \uFFFF will be represented using a
# regular 2-byte word
assert_(buffer_length(ua_scalar) == 2*self.ulen)
def test_values0D(self):
# Check creation of 0-dimensional objects with values
ua = np.array(self.ucs_value*self.ulen, dtype='U%s' % self.ulen)
self.content_check(ua, ua[()], 4*self.ulen)
def test_valuesSD(self):
# Check creation of single-dimensional objects with values
ua = np.array([self.ucs_value*self.ulen]*2, dtype='U%s' % self.ulen)
self.content_check(ua, ua[0], 4*self.ulen*2)
self.content_check(ua, ua[1], 4*self.ulen*2)
def test_valuesMD(self):
# Check creation of multi-dimensional objects with values
ua = np.array([[[self.ucs_value*self.ulen]*2]*3]*4, dtype='U%s' % self.ulen)
self.content_check(ua, ua[0, 0, 0], 4*self.ulen*2*3*4)
self.content_check(ua, ua[-1, -1, -1], 4*self.ulen*2*3*4)
class TestCreateValues_1_UCS2(CreateValues):
"""Check the creation of valued arrays (size 1, UCS2 values)"""
ulen = 1
ucs_value = ucs2_value
class TestCreateValues_1_UCS4(CreateValues):
"""Check the creation of valued arrays (size 1, UCS4 values)"""
ulen = 1
ucs_value = ucs4_value
class TestCreateValues_2_UCS2(CreateValues):
"""Check the creation of valued arrays (size 2, UCS2 values)"""
ulen = 2
ucs_value = ucs2_value
class TestCreateValues_2_UCS4(CreateValues):
"""Check the creation of valued arrays (size 2, UCS4 values)"""
ulen = 2
ucs_value = ucs4_value
class TestCreateValues_1009_UCS2(CreateValues):
"""Check the creation of valued arrays (size 1009, UCS2 values)"""
ulen = 1009
ucs_value = ucs2_value
class TestCreateValues_1009_UCS4(CreateValues):
"""Check the creation of valued arrays (size 1009, UCS4 values)"""
ulen = 1009
ucs_value = ucs4_value
############################################################
# Assignment tests
############################################################
class AssignValues:
"""Check the assignment of unicode arrays with values"""
def content_check(self, ua, ua_scalar, nbytes):
# Check the length of the unicode base type
assert_(int(ua.dtype.str[2:]) == self.ulen)
# Check the length of the data buffer
assert_(buffer_length(ua) == nbytes)
# Small check that data in array element is ok
assert_(ua_scalar == self.ucs_value*self.ulen)
# Encode to UTF-8 and double check
assert_(ua_scalar.encode('utf-8') ==
(self.ucs_value*self.ulen).encode('utf-8'))
# Check buffer lengths for scalars
if self.ucs_value == ucs4_value:
# In UCS2, the \U0010FFFF will be represented using a
# surrogate *pair*
assert_(buffer_length(ua_scalar) == 2*2*self.ulen)
else:
# In UCS2, the \uFFFF will be represented using a
# regular 2-byte word
assert_(buffer_length(ua_scalar) == 2*self.ulen)
def test_values0D(self):
# Check assignment of 0-dimensional objects with values
ua = np.zeros((), dtype='U%s' % self.ulen)
ua[()] = self.ucs_value*self.ulen
self.content_check(ua, ua[()], 4*self.ulen)
def test_valuesSD(self):
# Check assignment of single-dimensional objects with values
ua = np.zeros((2,), dtype='U%s' % self.ulen)
ua[0] = self.ucs_value*self.ulen
self.content_check(ua, ua[0], 4*self.ulen*2)
ua[1] = self.ucs_value*self.ulen
self.content_check(ua, ua[1], 4*self.ulen*2)
def test_valuesMD(self):
# Check assignment of multi-dimensional objects with values
ua = np.zeros((2, 3, 4), dtype='U%s' % self.ulen)
ua[0, 0, 0] = self.ucs_value*self.ulen
self.content_check(ua, ua[0, 0, 0], 4*self.ulen*2*3*4)
ua[-1, -1, -1] = self.ucs_value*self.ulen
self.content_check(ua, ua[-1, -1, -1], 4*self.ulen*2*3*4)
class TestAssignValues_1_UCS2(AssignValues):
"""Check the assignment of valued arrays (size 1, UCS2 values)"""
ulen = 1
ucs_value = ucs2_value
class TestAssignValues_1_UCS4(AssignValues):
"""Check the assignment of valued arrays (size 1, UCS4 values)"""
ulen = 1
ucs_value = ucs4_value
class TestAssignValues_2_UCS2(AssignValues):
"""Check the assignment of valued arrays (size 2, UCS2 values)"""
ulen = 2
ucs_value = ucs2_value
class TestAssignValues_2_UCS4(AssignValues):
"""Check the assignment of valued arrays (size 2, UCS4 values)"""
ulen = 2
ucs_value = ucs4_value
class TestAssignValues_1009_UCS2(AssignValues):
"""Check the assignment of valued arrays (size 1009, UCS2 values)"""
ulen = 1009
ucs_value = ucs2_value
class TestAssignValues_1009_UCS4(AssignValues):
"""Check the assignment of valued arrays (size 1009, UCS4 values)"""
ulen = 1009
ucs_value = ucs4_value
############################################################
# Byteorder tests
############################################################
class ByteorderValues:
"""Check the byteorder of unicode arrays in round-trip conversions"""
def test_values0D(self):
# Check byteorder of 0-dimensional objects
ua = np.array(self.ucs_value*self.ulen, dtype='U%s' % self.ulen)
ua2 = ua.newbyteorder()
# This changes the interpretation of the data region (but not the
# actual data), therefore the returned scalars are not
# the same (they are byte-swapped versions of each other).
assert_(ua[()] != ua2[()])
ua3 = ua2.newbyteorder()
# Arrays must be equal after the round-trip
assert_equal(ua, ua3)
def test_valuesSD(self):
# Check byteorder of single-dimensional objects
ua = np.array([self.ucs_value*self.ulen]*2, dtype='U%s' % self.ulen)
ua2 = ua.newbyteorder()
assert_((ua != ua2).all())
assert_(ua[-1] != ua2[-1])
ua3 = ua2.newbyteorder()
# Arrays must be equal after the round-trip
assert_equal(ua, ua3)
def test_valuesMD(self):
# Check byteorder of multi-dimensional objects
ua = np.array([[[self.ucs_value*self.ulen]*2]*3]*4,
dtype='U%s' % self.ulen)
ua2 = ua.newbyteorder()
assert_((ua != ua2).all())
assert_(ua[-1, -1, -1] != ua2[-1, -1, -1])
ua3 = ua2.newbyteorder()
# Arrays must be equal after the round-trip
assert_equal(ua, ua3)
def test_values_cast(self):
# Check byteorder of when casting the array for a strided and
# contiguous array:
test1 = np.array([self.ucs_value*self.ulen]*2, dtype='U%s' % self.ulen)
test2 = np.repeat(test1, 2)[::2]
for ua in (test1, test2):
ua2 = ua.astype(dtype=ua.dtype.newbyteorder())
assert_((ua == ua2).all())
assert_(ua[-1] == ua2[-1])
ua3 = ua2.astype(dtype=ua.dtype)
# Arrays must be equal after the round-trip
assert_equal(ua, ua3)
def test_values_updowncast(self):
# Check byteorder of when casting the array to a longer and shorter
# string length for strided and contiguous arrays
test1 = np.array([self.ucs_value*self.ulen]*2, dtype='U%s' % self.ulen)
test2 = np.repeat(test1, 2)[::2]
for ua in (test1, test2):
# Cast to a longer type with zero padding
longer_type = np.dtype('U%s' % (self.ulen+1)).newbyteorder()
ua2 = ua.astype(dtype=longer_type)
assert_((ua == ua2).all())
assert_(ua[-1] == ua2[-1])
# Cast back again with truncating:
ua3 = ua2.astype(dtype=ua.dtype)
# Arrays must be equal after the round-trip
assert_equal(ua, ua3)
class TestByteorder_1_UCS2(ByteorderValues):
"""Check the byteorder in unicode (size 1, UCS2 values)"""
ulen = 1
ucs_value = ucs2_value
class TestByteorder_1_UCS4(ByteorderValues):
"""Check the byteorder in unicode (size 1, UCS4 values)"""
ulen = 1
ucs_value = ucs4_value
class TestByteorder_2_UCS2(ByteorderValues):
"""Check the byteorder in unicode (size 2, UCS2 values)"""
ulen = 2
ucs_value = ucs2_value
class TestByteorder_2_UCS4(ByteorderValues):
"""Check the byteorder in unicode (size 2, UCS4 values)"""
ulen = 2
ucs_value = ucs4_value
class TestByteorder_1009_UCS2(ByteorderValues):
"""Check the byteorder in unicode (size 1009, UCS2 values)"""
ulen = 1009
ucs_value = ucs2_value
class TestByteorder_1009_UCS4(ByteorderValues):
"""Check the byteorder in unicode (size 1009, UCS4 values)"""
ulen = 1009
ucs_value = ucs4_value
| 12,553 | Python | 33.584022 | 84 | 0.594201 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_custom_dtypes.py | import pytest
import numpy as np
from numpy.testing import assert_array_equal
from numpy.core._multiarray_umath import (
_discover_array_parameters as discover_array_params, _get_sfloat_dtype)
SF = _get_sfloat_dtype()
class TestSFloat:
def _get_array(self, scaling, aligned=True):
if not aligned:
a = np.empty(3*8 + 1, dtype=np.uint8)[1:]
a = a.view(np.float64)
a[:] = [1., 2., 3.]
else:
a = np.array([1., 2., 3.])
a *= 1./scaling # the casting code also uses the reciprocal.
return a.view(SF(scaling))
def test_sfloat_rescaled(self):
sf = SF(1.)
sf2 = sf.scaled_by(2.)
assert sf2.get_scaling() == 2.
sf6 = sf2.scaled_by(3.)
assert sf6.get_scaling() == 6.
def test_class_discovery(self):
# This does not test much, since we always discover the scaling as 1.
# But most of NumPy (when writing) does not understand DType classes
dt, _ = discover_array_params([1., 2., 3.], dtype=SF)
assert dt == SF(1.)
@pytest.mark.parametrize("scaling", [1., -1., 2.])
def test_scaled_float_from_floats(self, scaling):
a = np.array([1., 2., 3.], dtype=SF(scaling))
assert a.dtype.get_scaling() == scaling
assert_array_equal(scaling * a.view(np.float64), [1., 2., 3.])
def test_repr(self):
# Check the repr, mainly to cover the code paths:
assert repr(SF(scaling=1.)) == "_ScaledFloatTestDType(scaling=1.0)"
@pytest.mark.parametrize("scaling", [1., -1., 2.])
def test_sfloat_from_float(self, scaling):
a = np.array([1., 2., 3.]).astype(dtype=SF(scaling))
assert a.dtype.get_scaling() == scaling
assert_array_equal(scaling * a.view(np.float64), [1., 2., 3.])
@pytest.mark.parametrize("aligned", [True, False])
@pytest.mark.parametrize("scaling", [1., -1., 2.])
def test_sfloat_getitem(self, aligned, scaling):
a = self._get_array(1., aligned)
assert a.tolist() == [1., 2., 3.]
@pytest.mark.parametrize("aligned", [True, False])
def test_sfloat_casts(self, aligned):
a = self._get_array(1., aligned)
assert np.can_cast(a, SF(-1.), casting="equiv")
assert not np.can_cast(a, SF(-1.), casting="no")
na = a.astype(SF(-1.))
assert_array_equal(-1 * na.view(np.float64), a.view(np.float64))
assert np.can_cast(a, SF(2.), casting="same_kind")
assert not np.can_cast(a, SF(2.), casting="safe")
a2 = a.astype(SF(2.))
assert_array_equal(2 * a2.view(np.float64), a.view(np.float64))
@pytest.mark.parametrize("aligned", [True, False])
def test_sfloat_cast_internal_errors(self, aligned):
a = self._get_array(2e300, aligned)
with pytest.raises(TypeError,
match="error raised inside the core-loop: non-finite factor!"):
a.astype(SF(2e-300))
def test_sfloat_promotion(self):
assert np.result_type(SF(2.), SF(3.)) == SF(3.)
assert np.result_type(SF(3.), SF(2.)) == SF(3.)
# Float64 -> SF(1.) and then promotes normally, so both of this work:
assert np.result_type(SF(3.), np.float64) == SF(3.)
assert np.result_type(np.float64, SF(0.5)) == SF(1.)
# Test an undefined promotion:
with pytest.raises(TypeError):
np.result_type(SF(1.), np.int64)
def test_basic_multiply(self):
a = self._get_array(2.)
b = self._get_array(4.)
res = a * b
# multiplies dtype scaling and content separately:
assert res.dtype.get_scaling() == 8.
expected_view = a.view(np.float64) * b.view(np.float64)
assert_array_equal(res.view(np.float64), expected_view)
def test_possible_and_impossible_reduce(self):
# For reductions to work, the first and last operand must have the
# same dtype. For this parametric DType that is not necessarily true.
a = self._get_array(2.)
# Addition reductin works (as of writing requires to pass initial
# because setting a scaled-float from the default `0` fails).
res = np.add.reduce(a, initial=0.)
assert res == a.astype(np.float64).sum()
# But each multiplication changes the factor, so a reduction is not
# possible (the relaxed version of the old refusal to handle any
# flexible dtype).
with pytest.raises(TypeError,
match="the resolved dtypes are not compatible"):
np.multiply.reduce(a)
def test_basic_ufunc_at(self):
float_a = np.array([1., 2., 3.])
b = self._get_array(2.)
float_b = b.view(np.float64).copy()
np.multiply.at(float_b, [1, 1, 1], float_a)
np.multiply.at(b, [1, 1, 1], float_a)
assert_array_equal(b.view(np.float64), float_b)
def test_basic_multiply_promotion(self):
float_a = np.array([1., 2., 3.])
b = self._get_array(2.)
res1 = float_a * b
res2 = b * float_a
# one factor is one, so we get the factor of b:
assert res1.dtype == res2.dtype == b.dtype
expected_view = float_a * b.view(np.float64)
assert_array_equal(res1.view(np.float64), expected_view)
assert_array_equal(res2.view(np.float64), expected_view)
# Check that promotion works when `out` is used:
np.multiply(b, float_a, out=res2)
with pytest.raises(TypeError):
# The promoter accepts this (maybe it should not), but the SFloat
# result cannot be cast to integer:
np.multiply(b, float_a, out=np.arange(3))
def test_basic_addition(self):
a = self._get_array(2.)
b = self._get_array(4.)
res = a + b
# addition uses the type promotion rules for the result:
assert res.dtype == np.result_type(a.dtype, b.dtype)
expected_view = (a.astype(res.dtype).view(np.float64) +
b.astype(res.dtype).view(np.float64))
assert_array_equal(res.view(np.float64), expected_view)
def test_addition_cast_safety(self):
"""The addition method is special for the scaled float, because it
includes the "cast" between different factors, thus cast-safety
is influenced by the implementation.
"""
a = self._get_array(2.)
b = self._get_array(-2.)
c = self._get_array(3.)
# sign change is "equiv":
np.add(a, b, casting="equiv")
with pytest.raises(TypeError):
np.add(a, b, casting="no")
# Different factor is "same_kind" (default) so check that "safe" fails
with pytest.raises(TypeError):
np.add(a, c, casting="safe")
# Check that casting the output fails also (done by the ufunc here)
with pytest.raises(TypeError):
np.add(a, a, out=c, casting="safe")
@pytest.mark.parametrize("ufunc",
[np.logical_and, np.logical_or, np.logical_xor])
def test_logical_ufuncs_casts_to_bool(self, ufunc):
a = self._get_array(2.)
a[0] = 0. # make sure first element is considered False.
float_equiv = a.astype(float)
expected = ufunc(float_equiv, float_equiv)
res = ufunc(a, a)
assert_array_equal(res, expected)
# also check that the same works for reductions:
expected = ufunc.reduce(float_equiv)
res = ufunc.reduce(a)
assert_array_equal(res, expected)
# The output casting does not match the bool, bool -> bool loop:
with pytest.raises(TypeError):
ufunc(a, a, out=np.empty(a.shape, dtype=int), casting="equiv")
| 7,677 | Python | 37.009901 | 79 | 0.589814 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_extint128.py | import itertools
import contextlib
import operator
import pytest
import numpy as np
import numpy.core._multiarray_tests as mt
from numpy.testing import assert_raises, assert_equal
INT64_MAX = np.iinfo(np.int64).max
INT64_MIN = np.iinfo(np.int64).min
INT64_MID = 2**32
# int128 is not two's complement, the sign bit is separate
INT128_MAX = 2**128 - 1
INT128_MIN = -INT128_MAX
INT128_MID = 2**64
INT64_VALUES = (
[INT64_MIN + j for j in range(20)] +
[INT64_MAX - j for j in range(20)] +
[INT64_MID + j for j in range(-20, 20)] +
[2*INT64_MID + j for j in range(-20, 20)] +
[INT64_MID//2 + j for j in range(-20, 20)] +
list(range(-70, 70))
)
INT128_VALUES = (
[INT128_MIN + j for j in range(20)] +
[INT128_MAX - j for j in range(20)] +
[INT128_MID + j for j in range(-20, 20)] +
[2*INT128_MID + j for j in range(-20, 20)] +
[INT128_MID//2 + j for j in range(-20, 20)] +
list(range(-70, 70)) +
[False] # negative zero
)
INT64_POS_VALUES = [x for x in INT64_VALUES if x > 0]
@contextlib.contextmanager
def exc_iter(*args):
"""
Iterate over Cartesian product of *args, and if an exception is raised,
add information of the current iterate.
"""
value = [None]
def iterate():
for v in itertools.product(*args):
value[0] = v
yield v
try:
yield iterate()
except Exception:
import traceback
msg = "At: %r\n%s" % (repr(value[0]),
traceback.format_exc())
raise AssertionError(msg)
def test_safe_binop():
# Test checked arithmetic routines
ops = [
(operator.add, 1),
(operator.sub, 2),
(operator.mul, 3)
]
with exc_iter(ops, INT64_VALUES, INT64_VALUES) as it:
for xop, a, b in it:
pyop, op = xop
c = pyop(a, b)
if not (INT64_MIN <= c <= INT64_MAX):
assert_raises(OverflowError, mt.extint_safe_binop, a, b, op)
else:
d = mt.extint_safe_binop(a, b, op)
if c != d:
# assert_equal is slow
assert_equal(d, c)
def test_to_128():
with exc_iter(INT64_VALUES) as it:
for a, in it:
b = mt.extint_to_128(a)
if a != b:
assert_equal(b, a)
def test_to_64():
with exc_iter(INT128_VALUES) as it:
for a, in it:
if not (INT64_MIN <= a <= INT64_MAX):
assert_raises(OverflowError, mt.extint_to_64, a)
else:
b = mt.extint_to_64(a)
if a != b:
assert_equal(b, a)
def test_mul_64_64():
with exc_iter(INT64_VALUES, INT64_VALUES) as it:
for a, b in it:
c = a * b
d = mt.extint_mul_64_64(a, b)
if c != d:
assert_equal(d, c)
def test_add_128():
with exc_iter(INT128_VALUES, INT128_VALUES) as it:
for a, b in it:
c = a + b
if not (INT128_MIN <= c <= INT128_MAX):
assert_raises(OverflowError, mt.extint_add_128, a, b)
else:
d = mt.extint_add_128(a, b)
if c != d:
assert_equal(d, c)
def test_sub_128():
with exc_iter(INT128_VALUES, INT128_VALUES) as it:
for a, b in it:
c = a - b
if not (INT128_MIN <= c <= INT128_MAX):
assert_raises(OverflowError, mt.extint_sub_128, a, b)
else:
d = mt.extint_sub_128(a, b)
if c != d:
assert_equal(d, c)
def test_neg_128():
with exc_iter(INT128_VALUES) as it:
for a, in it:
b = -a
c = mt.extint_neg_128(a)
if b != c:
assert_equal(c, b)
def test_shl_128():
with exc_iter(INT128_VALUES) as it:
for a, in it:
if a < 0:
b = -(((-a) << 1) & (2**128-1))
else:
b = (a << 1) & (2**128-1)
c = mt.extint_shl_128(a)
if b != c:
assert_equal(c, b)
def test_shr_128():
with exc_iter(INT128_VALUES) as it:
for a, in it:
if a < 0:
b = -((-a) >> 1)
else:
b = a >> 1
c = mt.extint_shr_128(a)
if b != c:
assert_equal(c, b)
def test_gt_128():
with exc_iter(INT128_VALUES, INT128_VALUES) as it:
for a, b in it:
c = a > b
d = mt.extint_gt_128(a, b)
if c != d:
assert_equal(d, c)
@pytest.mark.slow
def test_divmod_128_64():
with exc_iter(INT128_VALUES, INT64_POS_VALUES) as it:
for a, b in it:
if a >= 0:
c, cr = divmod(a, b)
else:
c, cr = divmod(-a, b)
c = -c
cr = -cr
d, dr = mt.extint_divmod_128_64(a, b)
if c != d or d != dr or b*d + dr != a:
assert_equal(d, c)
assert_equal(dr, cr)
assert_equal(b*d + dr, a)
def test_floordiv_128_64():
with exc_iter(INT128_VALUES, INT64_POS_VALUES) as it:
for a, b in it:
c = a // b
d = mt.extint_floordiv_128_64(a, b)
if c != d:
assert_equal(d, c)
def test_ceildiv_128_64():
with exc_iter(INT128_VALUES, INT64_POS_VALUES) as it:
for a, b in it:
c = (a + b - 1) // b
d = mt.extint_ceildiv_128_64(a, b)
if c != d:
assert_equal(d, c)
| 5,643 | Python | 24.654545 | 76 | 0.473684 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_overrides.py | import inspect
import sys
import os
import tempfile
from io import StringIO
from unittest import mock
import numpy as np
from numpy.testing import (
assert_, assert_equal, assert_raises, assert_raises_regex)
from numpy.core.overrides import (
_get_implementing_args, array_function_dispatch,
verify_matching_signatures, ARRAY_FUNCTION_ENABLED)
from numpy.compat import pickle
import pytest
requires_array_function = pytest.mark.skipif(
not ARRAY_FUNCTION_ENABLED,
reason="__array_function__ dispatch not enabled.")
def _return_not_implemented(self, *args, **kwargs):
return NotImplemented
# need to define this at the top level to test pickling
@array_function_dispatch(lambda array: (array,))
def dispatched_one_arg(array):
"""Docstring."""
return 'original'
@array_function_dispatch(lambda array1, array2: (array1, array2))
def dispatched_two_arg(array1, array2):
"""Docstring."""
return 'original'
class TestGetImplementingArgs:
def test_ndarray(self):
array = np.array(1)
args = _get_implementing_args([array])
assert_equal(list(args), [array])
args = _get_implementing_args([array, array])
assert_equal(list(args), [array])
args = _get_implementing_args([array, 1])
assert_equal(list(args), [array])
args = _get_implementing_args([1, array])
assert_equal(list(args), [array])
def test_ndarray_subclasses(self):
class OverrideSub(np.ndarray):
__array_function__ = _return_not_implemented
class NoOverrideSub(np.ndarray):
pass
array = np.array(1).view(np.ndarray)
override_sub = np.array(1).view(OverrideSub)
no_override_sub = np.array(1).view(NoOverrideSub)
args = _get_implementing_args([array, override_sub])
assert_equal(list(args), [override_sub, array])
args = _get_implementing_args([array, no_override_sub])
assert_equal(list(args), [no_override_sub, array])
args = _get_implementing_args(
[override_sub, no_override_sub])
assert_equal(list(args), [override_sub, no_override_sub])
def test_ndarray_and_duck_array(self):
class Other:
__array_function__ = _return_not_implemented
array = np.array(1)
other = Other()
args = _get_implementing_args([other, array])
assert_equal(list(args), [other, array])
args = _get_implementing_args([array, other])
assert_equal(list(args), [array, other])
def test_ndarray_subclass_and_duck_array(self):
class OverrideSub(np.ndarray):
__array_function__ = _return_not_implemented
class Other:
__array_function__ = _return_not_implemented
array = np.array(1)
subarray = np.array(1).view(OverrideSub)
other = Other()
assert_equal(_get_implementing_args([array, subarray, other]),
[subarray, array, other])
assert_equal(_get_implementing_args([array, other, subarray]),
[subarray, array, other])
def test_many_duck_arrays(self):
class A:
__array_function__ = _return_not_implemented
class B(A):
__array_function__ = _return_not_implemented
class C(A):
__array_function__ = _return_not_implemented
class D:
__array_function__ = _return_not_implemented
a = A()
b = B()
c = C()
d = D()
assert_equal(_get_implementing_args([1]), [])
assert_equal(_get_implementing_args([a]), [a])
assert_equal(_get_implementing_args([a, 1]), [a])
assert_equal(_get_implementing_args([a, a, a]), [a])
assert_equal(_get_implementing_args([a, d, a]), [a, d])
assert_equal(_get_implementing_args([a, b]), [b, a])
assert_equal(_get_implementing_args([b, a]), [b, a])
assert_equal(_get_implementing_args([a, b, c]), [b, c, a])
assert_equal(_get_implementing_args([a, c, b]), [c, b, a])
def test_too_many_duck_arrays(self):
namespace = dict(__array_function__=_return_not_implemented)
types = [type('A' + str(i), (object,), namespace) for i in range(33)]
relevant_args = [t() for t in types]
actual = _get_implementing_args(relevant_args[:32])
assert_equal(actual, relevant_args[:32])
with assert_raises_regex(TypeError, 'distinct argument types'):
_get_implementing_args(relevant_args)
class TestNDArrayArrayFunction:
@requires_array_function
def test_method(self):
class Other:
__array_function__ = _return_not_implemented
class NoOverrideSub(np.ndarray):
pass
class OverrideSub(np.ndarray):
__array_function__ = _return_not_implemented
array = np.array([1])
other = Other()
no_override_sub = array.view(NoOverrideSub)
override_sub = array.view(OverrideSub)
result = array.__array_function__(func=dispatched_two_arg,
types=(np.ndarray,),
args=(array, 1.), kwargs={})
assert_equal(result, 'original')
result = array.__array_function__(func=dispatched_two_arg,
types=(np.ndarray, Other),
args=(array, other), kwargs={})
assert_(result is NotImplemented)
result = array.__array_function__(func=dispatched_two_arg,
types=(np.ndarray, NoOverrideSub),
args=(array, no_override_sub),
kwargs={})
assert_equal(result, 'original')
result = array.__array_function__(func=dispatched_two_arg,
types=(np.ndarray, OverrideSub),
args=(array, override_sub),
kwargs={})
assert_equal(result, 'original')
with assert_raises_regex(TypeError, 'no implementation found'):
np.concatenate((array, other))
expected = np.concatenate((array, array))
result = np.concatenate((array, no_override_sub))
assert_equal(result, expected.view(NoOverrideSub))
result = np.concatenate((array, override_sub))
assert_equal(result, expected.view(OverrideSub))
def test_no_wrapper(self):
# This shouldn't happen unless a user intentionally calls
# __array_function__ with invalid arguments, but check that we raise
# an appropriate error all the same.
array = np.array(1)
func = lambda x: x
with assert_raises_regex(AttributeError, '_implementation'):
array.__array_function__(func=func, types=(np.ndarray,),
args=(array,), kwargs={})
@requires_array_function
class TestArrayFunctionDispatch:
def test_pickle(self):
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
roundtripped = pickle.loads(
pickle.dumps(dispatched_one_arg, protocol=proto))
assert_(roundtripped is dispatched_one_arg)
def test_name_and_docstring(self):
assert_equal(dispatched_one_arg.__name__, 'dispatched_one_arg')
if sys.flags.optimize < 2:
assert_equal(dispatched_one_arg.__doc__, 'Docstring.')
def test_interface(self):
class MyArray:
def __array_function__(self, func, types, args, kwargs):
return (self, func, types, args, kwargs)
original = MyArray()
(obj, func, types, args, kwargs) = dispatched_one_arg(original)
assert_(obj is original)
assert_(func is dispatched_one_arg)
assert_equal(set(types), {MyArray})
# assert_equal uses the overloaded np.iscomplexobj() internally
assert_(args == (original,))
assert_equal(kwargs, {})
def test_not_implemented(self):
class MyArray:
def __array_function__(self, func, types, args, kwargs):
return NotImplemented
array = MyArray()
with assert_raises_regex(TypeError, 'no implementation found'):
dispatched_one_arg(array)
@requires_array_function
class TestVerifyMatchingSignatures:
def test_verify_matching_signatures(self):
verify_matching_signatures(lambda x: 0, lambda x: 0)
verify_matching_signatures(lambda x=None: 0, lambda x=None: 0)
verify_matching_signatures(lambda x=1: 0, lambda x=None: 0)
with assert_raises(RuntimeError):
verify_matching_signatures(lambda a: 0, lambda b: 0)
with assert_raises(RuntimeError):
verify_matching_signatures(lambda x: 0, lambda x=None: 0)
with assert_raises(RuntimeError):
verify_matching_signatures(lambda x=None: 0, lambda y=None: 0)
with assert_raises(RuntimeError):
verify_matching_signatures(lambda x=1: 0, lambda y=1: 0)
def test_array_function_dispatch(self):
with assert_raises(RuntimeError):
@array_function_dispatch(lambda x: (x,))
def f(y):
pass
# should not raise
@array_function_dispatch(lambda x: (x,), verify=False)
def f(y):
pass
def _new_duck_type_and_implements():
"""Create a duck array type and implements functions."""
HANDLED_FUNCTIONS = {}
class MyArray:
def __array_function__(self, func, types, args, kwargs):
if func not in HANDLED_FUNCTIONS:
return NotImplemented
if not all(issubclass(t, MyArray) for t in types):
return NotImplemented
return HANDLED_FUNCTIONS[func](*args, **kwargs)
def implements(numpy_function):
"""Register an __array_function__ implementations."""
def decorator(func):
HANDLED_FUNCTIONS[numpy_function] = func
return func
return decorator
return (MyArray, implements)
@requires_array_function
class TestArrayFunctionImplementation:
def test_one_arg(self):
MyArray, implements = _new_duck_type_and_implements()
@implements(dispatched_one_arg)
def _(array):
return 'myarray'
assert_equal(dispatched_one_arg(1), 'original')
assert_equal(dispatched_one_arg(MyArray()), 'myarray')
def test_optional_args(self):
MyArray, implements = _new_duck_type_and_implements()
@array_function_dispatch(lambda array, option=None: (array,))
def func_with_option(array, option='default'):
return option
@implements(func_with_option)
def my_array_func_with_option(array, new_option='myarray'):
return new_option
# we don't need to implement every option on __array_function__
# implementations
assert_equal(func_with_option(1), 'default')
assert_equal(func_with_option(1, option='extra'), 'extra')
assert_equal(func_with_option(MyArray()), 'myarray')
with assert_raises(TypeError):
func_with_option(MyArray(), option='extra')
# but new options on implementations can't be used
result = my_array_func_with_option(MyArray(), new_option='yes')
assert_equal(result, 'yes')
with assert_raises(TypeError):
func_with_option(MyArray(), new_option='no')
def test_not_implemented(self):
MyArray, implements = _new_duck_type_and_implements()
@array_function_dispatch(lambda array: (array,), module='my')
def func(array):
return array
array = np.array(1)
assert_(func(array) is array)
assert_equal(func.__module__, 'my')
with assert_raises_regex(
TypeError, "no implementation found for 'my.func'"):
func(MyArray())
class TestNDArrayMethods:
def test_repr(self):
# gh-12162: should still be defined even if __array_function__ doesn't
# implement np.array_repr()
class MyArray(np.ndarray):
def __array_function__(*args, **kwargs):
return NotImplemented
array = np.array(1).view(MyArray)
assert_equal(repr(array), 'MyArray(1)')
assert_equal(str(array), '1')
class TestNumPyFunctions:
def test_set_module(self):
assert_equal(np.sum.__module__, 'numpy')
assert_equal(np.char.equal.__module__, 'numpy.char')
assert_equal(np.fft.fft.__module__, 'numpy.fft')
assert_equal(np.linalg.solve.__module__, 'numpy.linalg')
def test_inspect_sum(self):
signature = inspect.signature(np.sum)
assert_('axis' in signature.parameters)
@requires_array_function
def test_override_sum(self):
MyArray, implements = _new_duck_type_and_implements()
@implements(np.sum)
def _(array):
return 'yes'
assert_equal(np.sum(MyArray()), 'yes')
@requires_array_function
def test_sum_on_mock_array(self):
# We need a proxy for mocks because __array_function__ is only looked
# up in the class dict
class ArrayProxy:
def __init__(self, value):
self.value = value
def __array_function__(self, *args, **kwargs):
return self.value.__array_function__(*args, **kwargs)
def __array__(self, *args, **kwargs):
return self.value.__array__(*args, **kwargs)
proxy = ArrayProxy(mock.Mock(spec=ArrayProxy))
proxy.value.__array_function__.return_value = 1
result = np.sum(proxy)
assert_equal(result, 1)
proxy.value.__array_function__.assert_called_once_with(
np.sum, (ArrayProxy,), (proxy,), {})
proxy.value.__array__.assert_not_called()
@requires_array_function
def test_sum_forwarding_implementation(self):
class MyArray(np.ndarray):
def sum(self, axis, out):
return 'summed'
def __array_function__(self, func, types, args, kwargs):
return super().__array_function__(func, types, args, kwargs)
# note: the internal implementation of np.sum() calls the .sum() method
array = np.array(1).view(MyArray)
assert_equal(np.sum(array), 'summed')
class TestArrayLike:
def setup_method(self):
class MyArray():
def __init__(self, function=None):
self.function = function
def __array_function__(self, func, types, args, kwargs):
assert func is getattr(np, func.__name__)
try:
my_func = getattr(self, func.__name__)
except AttributeError:
return NotImplemented
return my_func(*args, **kwargs)
self.MyArray = MyArray
class MyNoArrayFunctionArray():
def __init__(self, function=None):
self.function = function
self.MyNoArrayFunctionArray = MyNoArrayFunctionArray
def add_method(self, name, arr_class, enable_value_error=False):
def _definition(*args, **kwargs):
# Check that `like=` isn't propagated downstream
assert 'like' not in kwargs
if enable_value_error and 'value_error' in kwargs:
raise ValueError
return arr_class(getattr(arr_class, name))
setattr(arr_class, name, _definition)
def func_args(*args, **kwargs):
return args, kwargs
@requires_array_function
def test_array_like_not_implemented(self):
self.add_method('array', self.MyArray)
ref = self.MyArray.array()
with assert_raises_regex(TypeError, 'no implementation found'):
array_like = np.asarray(1, like=ref)
_array_tests = [
('array', *func_args((1,))),
('asarray', *func_args((1,))),
('asanyarray', *func_args((1,))),
('ascontiguousarray', *func_args((2, 3))),
('asfortranarray', *func_args((2, 3))),
('require', *func_args((np.arange(6).reshape(2, 3),),
requirements=['A', 'F'])),
('empty', *func_args((1,))),
('full', *func_args((1,), 2)),
('ones', *func_args((1,))),
('zeros', *func_args((1,))),
('arange', *func_args(3)),
('frombuffer', *func_args(b'\x00' * 8, dtype=int)),
('fromiter', *func_args(range(3), dtype=int)),
('fromstring', *func_args('1,2', dtype=int, sep=',')),
('loadtxt', *func_args(lambda: StringIO('0 1\n2 3'))),
('genfromtxt', *func_args(lambda: StringIO(u'1,2.1'),
dtype=[('int', 'i8'), ('float', 'f8')],
delimiter=',')),
]
@pytest.mark.parametrize('function, args, kwargs', _array_tests)
@pytest.mark.parametrize('numpy_ref', [True, False])
@requires_array_function
def test_array_like(self, function, args, kwargs, numpy_ref):
self.add_method('array', self.MyArray)
self.add_method(function, self.MyArray)
np_func = getattr(np, function)
my_func = getattr(self.MyArray, function)
if numpy_ref is True:
ref = np.array(1)
else:
ref = self.MyArray.array()
like_args = tuple(a() if callable(a) else a for a in args)
array_like = np_func(*like_args, **kwargs, like=ref)
if numpy_ref is True:
assert type(array_like) is np.ndarray
np_args = tuple(a() if callable(a) else a for a in args)
np_arr = np_func(*np_args, **kwargs)
# Special-case np.empty to ensure values match
if function == "empty":
np_arr.fill(1)
array_like.fill(1)
assert_equal(array_like, np_arr)
else:
assert type(array_like) is self.MyArray
assert array_like.function is my_func
@pytest.mark.parametrize('function, args, kwargs', _array_tests)
@pytest.mark.parametrize('ref', [1, [1], "MyNoArrayFunctionArray"])
@requires_array_function
def test_no_array_function_like(self, function, args, kwargs, ref):
self.add_method('array', self.MyNoArrayFunctionArray)
self.add_method(function, self.MyNoArrayFunctionArray)
np_func = getattr(np, function)
# Instantiate ref if it's the MyNoArrayFunctionArray class
if ref == "MyNoArrayFunctionArray":
ref = self.MyNoArrayFunctionArray.array()
like_args = tuple(a() if callable(a) else a for a in args)
with assert_raises_regex(TypeError,
'The `like` argument must be an array-like that implements'):
np_func(*like_args, **kwargs, like=ref)
@pytest.mark.parametrize('numpy_ref', [True, False])
def test_array_like_fromfile(self, numpy_ref):
self.add_method('array', self.MyArray)
self.add_method("fromfile", self.MyArray)
if numpy_ref is True:
ref = np.array(1)
else:
ref = self.MyArray.array()
data = np.random.random(5)
with tempfile.TemporaryDirectory() as tmpdir:
fname = os.path.join(tmpdir, "testfile")
data.tofile(fname)
array_like = np.fromfile(fname, like=ref)
if numpy_ref is True:
assert type(array_like) is np.ndarray
np_res = np.fromfile(fname, like=ref)
assert_equal(np_res, data)
assert_equal(array_like, np_res)
else:
assert type(array_like) is self.MyArray
assert array_like.function is self.MyArray.fromfile
@requires_array_function
def test_exception_handling(self):
self.add_method('array', self.MyArray, enable_value_error=True)
ref = self.MyArray.array()
with assert_raises(TypeError):
# Raises the error about `value_error` being invalid first
np.array(1, value_error=True, like=ref)
| 20,200 | Python | 33.472696 | 79 | 0.582178 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_dtype.py | import sys
import operator
import pytest
import ctypes
import gc
import types
from typing import Any
import numpy as np
from numpy.core._rational_tests import rational
from numpy.core._multiarray_tests import create_custom_field_dtype
from numpy.testing import (
assert_, assert_equal, assert_array_equal, assert_raises, HAS_REFCOUNT,
IS_PYSTON)
from numpy.compat import pickle
from itertools import permutations
import random
import hypothesis
from hypothesis.extra import numpy as hynp
def assert_dtype_equal(a, b):
assert_equal(a, b)
assert_equal(hash(a), hash(b),
"two equivalent types do not hash to the same value !")
def assert_dtype_not_equal(a, b):
assert_(a != b)
assert_(hash(a) != hash(b),
"two different types hash to the same value !")
class TestBuiltin:
@pytest.mark.parametrize('t', [int, float, complex, np.int32, str, object,
np.compat.unicode])
def test_run(self, t):
"""Only test hash runs at all."""
dt = np.dtype(t)
hash(dt)
@pytest.mark.parametrize('t', [int, float])
def test_dtype(self, t):
# Make sure equivalent byte order char hash the same (e.g. < and = on
# little endian)
dt = np.dtype(t)
dt2 = dt.newbyteorder("<")
dt3 = dt.newbyteorder(">")
if dt == dt2:
assert_(dt.byteorder != dt2.byteorder, "bogus test")
assert_dtype_equal(dt, dt2)
else:
assert_(dt.byteorder != dt3.byteorder, "bogus test")
assert_dtype_equal(dt, dt3)
def test_equivalent_dtype_hashing(self):
# Make sure equivalent dtypes with different type num hash equal
uintp = np.dtype(np.uintp)
if uintp.itemsize == 4:
left = uintp
right = np.dtype(np.uint32)
else:
left = uintp
right = np.dtype(np.ulonglong)
assert_(left == right)
assert_(hash(left) == hash(right))
def test_invalid_types(self):
# Make sure invalid type strings raise an error
assert_raises(TypeError, np.dtype, 'O3')
assert_raises(TypeError, np.dtype, 'O5')
assert_raises(TypeError, np.dtype, 'O7')
assert_raises(TypeError, np.dtype, 'b3')
assert_raises(TypeError, np.dtype, 'h4')
assert_raises(TypeError, np.dtype, 'I5')
assert_raises(TypeError, np.dtype, 'e3')
assert_raises(TypeError, np.dtype, 'f5')
if np.dtype('g').itemsize == 8 or np.dtype('g').itemsize == 16:
assert_raises(TypeError, np.dtype, 'g12')
elif np.dtype('g').itemsize == 12:
assert_raises(TypeError, np.dtype, 'g16')
if np.dtype('l').itemsize == 8:
assert_raises(TypeError, np.dtype, 'l4')
assert_raises(TypeError, np.dtype, 'L4')
else:
assert_raises(TypeError, np.dtype, 'l8')
assert_raises(TypeError, np.dtype, 'L8')
if np.dtype('q').itemsize == 8:
assert_raises(TypeError, np.dtype, 'q4')
assert_raises(TypeError, np.dtype, 'Q4')
else:
assert_raises(TypeError, np.dtype, 'q8')
assert_raises(TypeError, np.dtype, 'Q8')
def test_richcompare_invalid_dtype_equality(self):
# Make sure objects that cannot be converted to valid
# dtypes results in False/True when compared to valid dtypes.
# Here 7 cannot be converted to dtype. No exceptions should be raised
assert not np.dtype(np.int32) == 7, "dtype richcompare failed for =="
assert np.dtype(np.int32) != 7, "dtype richcompare failed for !="
@pytest.mark.parametrize(
'operation',
[operator.le, operator.lt, operator.ge, operator.gt])
def test_richcompare_invalid_dtype_comparison(self, operation):
# Make sure TypeError is raised for comparison operators
# for invalid dtypes. Here 7 is an invalid dtype.
with pytest.raises(TypeError):
operation(np.dtype(np.int32), 7)
@pytest.mark.parametrize("dtype",
['Bool', 'Bytes0', 'Complex32', 'Complex64',
'Datetime64', 'Float16', 'Float32', 'Float64',
'Int8', 'Int16', 'Int32', 'Int64',
'Object0', 'Str0', 'Timedelta64',
'UInt8', 'UInt16', 'Uint32', 'UInt32',
'Uint64', 'UInt64', 'Void0',
"Float128", "Complex128"])
def test_numeric_style_types_are_invalid(self, dtype):
with assert_raises(TypeError):
np.dtype(dtype)
@pytest.mark.parametrize(
'value',
['m8', 'M8', 'datetime64', 'timedelta64',
'i4, (2,3)f8, f4', 'a3, 3u8, (3,4)a10',
'>f', '<f', '=f', '|f',
])
def test_dtype_bytes_str_equivalence(self, value):
bytes_value = value.encode('ascii')
from_bytes = np.dtype(bytes_value)
from_str = np.dtype(value)
assert_dtype_equal(from_bytes, from_str)
def test_dtype_from_bytes(self):
# Empty bytes object
assert_raises(TypeError, np.dtype, b'')
# Byte order indicator, but no type
assert_raises(TypeError, np.dtype, b'|')
# Single character with ordinal < NPY_NTYPES returns
# type by index into _builtin_descrs
assert_dtype_equal(np.dtype(bytes([0])), np.dtype('bool'))
assert_dtype_equal(np.dtype(bytes([17])), np.dtype(object))
# Single character where value is a valid type code
assert_dtype_equal(np.dtype(b'f'), np.dtype('float32'))
# Bytes with non-ascii values raise errors
assert_raises(TypeError, np.dtype, b'\xff')
assert_raises(TypeError, np.dtype, b's\xff')
def test_bad_param(self):
# Can't give a size that's too small
assert_raises(ValueError, np.dtype,
{'names':['f0', 'f1'],
'formats':['i4', 'i1'],
'offsets':[0, 4],
'itemsize':4})
# If alignment is enabled, the alignment (4) must divide the itemsize
assert_raises(ValueError, np.dtype,
{'names':['f0', 'f1'],
'formats':['i4', 'i1'],
'offsets':[0, 4],
'itemsize':9}, align=True)
# If alignment is enabled, the individual fields must be aligned
assert_raises(ValueError, np.dtype,
{'names':['f0', 'f1'],
'formats':['i1', 'f4'],
'offsets':[0, 2]}, align=True)
def test_field_order_equality(self):
x = np.dtype({'names': ['A', 'B'],
'formats': ['i4', 'f4'],
'offsets': [0, 4]})
y = np.dtype({'names': ['B', 'A'],
'formats': ['i4', 'f4'],
'offsets': [4, 0]})
assert_equal(x == y, False)
# This is an safe cast (not equiv) due to the different names:
assert np.can_cast(x, y, casting="safe")
class TestRecord:
def test_equivalent_record(self):
"""Test whether equivalent record dtypes hash the same."""
a = np.dtype([('yo', int)])
b = np.dtype([('yo', int)])
assert_dtype_equal(a, b)
def test_different_names(self):
# In theory, they may hash the same (collision) ?
a = np.dtype([('yo', int)])
b = np.dtype([('ye', int)])
assert_dtype_not_equal(a, b)
def test_different_titles(self):
# In theory, they may hash the same (collision) ?
a = np.dtype({'names': ['r', 'b'],
'formats': ['u1', 'u1'],
'titles': ['Red pixel', 'Blue pixel']})
b = np.dtype({'names': ['r', 'b'],
'formats': ['u1', 'u1'],
'titles': ['RRed pixel', 'Blue pixel']})
assert_dtype_not_equal(a, b)
@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
def test_refcount_dictionary_setting(self):
names = ["name1"]
formats = ["f8"]
titles = ["t1"]
offsets = [0]
d = dict(names=names, formats=formats, titles=titles, offsets=offsets)
refcounts = {k: sys.getrefcount(i) for k, i in d.items()}
np.dtype(d)
refcounts_new = {k: sys.getrefcount(i) for k, i in d.items()}
assert refcounts == refcounts_new
def test_mutate(self):
# Mutating a dtype should reset the cached hash value
a = np.dtype([('yo', int)])
b = np.dtype([('yo', int)])
c = np.dtype([('ye', int)])
assert_dtype_equal(a, b)
assert_dtype_not_equal(a, c)
a.names = ['ye']
assert_dtype_equal(a, c)
assert_dtype_not_equal(a, b)
state = b.__reduce__()[2]
a.__setstate__(state)
assert_dtype_equal(a, b)
assert_dtype_not_equal(a, c)
def test_not_lists(self):
"""Test if an appropriate exception is raised when passing bad values to
the dtype constructor.
"""
assert_raises(TypeError, np.dtype,
dict(names={'A', 'B'}, formats=['f8', 'i4']))
assert_raises(TypeError, np.dtype,
dict(names=['A', 'B'], formats={'f8', 'i4'}))
def test_aligned_size(self):
# Check that structured dtypes get padded to an aligned size
dt = np.dtype('i4, i1', align=True)
assert_equal(dt.itemsize, 8)
dt = np.dtype([('f0', 'i4'), ('f1', 'i1')], align=True)
assert_equal(dt.itemsize, 8)
dt = np.dtype({'names':['f0', 'f1'],
'formats':['i4', 'u1'],
'offsets':[0, 4]}, align=True)
assert_equal(dt.itemsize, 8)
dt = np.dtype({'f0': ('i4', 0), 'f1':('u1', 4)}, align=True)
assert_equal(dt.itemsize, 8)
# Nesting should preserve that alignment
dt1 = np.dtype([('f0', 'i4'),
('f1', [('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')]),
('f2', 'i1')], align=True)
assert_equal(dt1.itemsize, 20)
dt2 = np.dtype({'names':['f0', 'f1', 'f2'],
'formats':['i4',
[('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')],
'i1'],
'offsets':[0, 4, 16]}, align=True)
assert_equal(dt2.itemsize, 20)
dt3 = np.dtype({'f0': ('i4', 0),
'f1': ([('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')], 4),
'f2': ('i1', 16)}, align=True)
assert_equal(dt3.itemsize, 20)
assert_equal(dt1, dt2)
assert_equal(dt2, dt3)
# Nesting should preserve packing
dt1 = np.dtype([('f0', 'i4'),
('f1', [('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')]),
('f2', 'i1')], align=False)
assert_equal(dt1.itemsize, 11)
dt2 = np.dtype({'names':['f0', 'f1', 'f2'],
'formats':['i4',
[('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')],
'i1'],
'offsets':[0, 4, 10]}, align=False)
assert_equal(dt2.itemsize, 11)
dt3 = np.dtype({'f0': ('i4', 0),
'f1': ([('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')], 4),
'f2': ('i1', 10)}, align=False)
assert_equal(dt3.itemsize, 11)
assert_equal(dt1, dt2)
assert_equal(dt2, dt3)
# Array of subtype should preserve alignment
dt1 = np.dtype([('a', '|i1'),
('b', [('f0', '<i2'),
('f1', '<f4')], 2)], align=True)
assert_equal(dt1.descr, [('a', '|i1'), ('', '|V3'),
('b', [('f0', '<i2'), ('', '|V2'),
('f1', '<f4')], (2,))])
def test_union_struct(self):
# Should be able to create union dtypes
dt = np.dtype({'names':['f0', 'f1', 'f2'], 'formats':['<u4', '<u2', '<u2'],
'offsets':[0, 0, 2]}, align=True)
assert_equal(dt.itemsize, 4)
a = np.array([3], dtype='<u4').view(dt)
a['f1'] = 10
a['f2'] = 36
assert_equal(a['f0'], 10 + 36*256*256)
# Should be able to specify fields out of order
dt = np.dtype({'names':['f0', 'f1', 'f2'], 'formats':['<u4', '<u2', '<u2'],
'offsets':[4, 0, 2]}, align=True)
assert_equal(dt.itemsize, 8)
# field name should not matter: assignment is by position
dt2 = np.dtype({'names':['f2', 'f0', 'f1'],
'formats':['<u4', '<u2', '<u2'],
'offsets':[4, 0, 2]}, align=True)
vals = [(0, 1, 2), (3, -1, 4)]
vals2 = [(0, 1, 2), (3, -1, 4)]
a = np.array(vals, dt)
b = np.array(vals2, dt2)
assert_equal(a.astype(dt2), b)
assert_equal(b.astype(dt), a)
assert_equal(a.view(dt2), b)
assert_equal(b.view(dt), a)
# Should not be able to overlap objects with other types
assert_raises(TypeError, np.dtype,
{'names':['f0', 'f1'],
'formats':['O', 'i1'],
'offsets':[0, 2]})
assert_raises(TypeError, np.dtype,
{'names':['f0', 'f1'],
'formats':['i4', 'O'],
'offsets':[0, 3]})
assert_raises(TypeError, np.dtype,
{'names':['f0', 'f1'],
'formats':[[('a', 'O')], 'i1'],
'offsets':[0, 2]})
assert_raises(TypeError, np.dtype,
{'names':['f0', 'f1'],
'formats':['i4', [('a', 'O')]],
'offsets':[0, 3]})
# Out of order should still be ok, however
dt = np.dtype({'names':['f0', 'f1'],
'formats':['i1', 'O'],
'offsets':[np.dtype('intp').itemsize, 0]})
@pytest.mark.parametrize(["obj", "dtype", "expected"],
[([], ("(2)f4,"), np.empty((0, 2), dtype="f4")),
(3, "(3)f4,", [3, 3, 3]),
(np.float64(2), "(2)f4,", [2, 2]),
([((0, 1), (1, 2)), ((2,),)], '(2,2)f4', None),
(["1", "2"], "(2)i,", None)])
def test_subarray_list(self, obj, dtype, expected):
dtype = np.dtype(dtype)
res = np.array(obj, dtype=dtype)
if expected is None:
# iterate the 1-d list to fill the array
expected = np.empty(len(obj), dtype=dtype)
for i in range(len(expected)):
expected[i] = obj[i]
assert_array_equal(res, expected)
def test_comma_datetime(self):
dt = np.dtype('M8[D],datetime64[Y],i8')
assert_equal(dt, np.dtype([('f0', 'M8[D]'),
('f1', 'datetime64[Y]'),
('f2', 'i8')]))
def test_from_dictproxy(self):
# Tests for PR #5920
dt = np.dtype({'names': ['a', 'b'], 'formats': ['i4', 'f4']})
assert_dtype_equal(dt, np.dtype(dt.fields))
dt2 = np.dtype((np.void, dt.fields))
assert_equal(dt2.fields, dt.fields)
def test_from_dict_with_zero_width_field(self):
# Regression test for #6430 / #2196
dt = np.dtype([('val1', np.float32, (0,)), ('val2', int)])
dt2 = np.dtype({'names': ['val1', 'val2'],
'formats': [(np.float32, (0,)), int]})
assert_dtype_equal(dt, dt2)
assert_equal(dt.fields['val1'][0].itemsize, 0)
assert_equal(dt.itemsize, dt.fields['val2'][0].itemsize)
def test_bool_commastring(self):
d = np.dtype('?,?,?') # raises?
assert_equal(len(d.names), 3)
for n in d.names:
assert_equal(d.fields[n][0], np.dtype('?'))
def test_nonint_offsets(self):
# gh-8059
def make_dtype(off):
return np.dtype({'names': ['A'], 'formats': ['i4'],
'offsets': [off]})
assert_raises(TypeError, make_dtype, 'ASD')
assert_raises(OverflowError, make_dtype, 2**70)
assert_raises(TypeError, make_dtype, 2.3)
assert_raises(ValueError, make_dtype, -10)
# no errors here:
dt = make_dtype(np.uint32(0))
np.zeros(1, dtype=dt)[0].item()
def test_fields_by_index(self):
dt = np.dtype([('a', np.int8), ('b', np.float32, 3)])
assert_dtype_equal(dt[0], np.dtype(np.int8))
assert_dtype_equal(dt[1], np.dtype((np.float32, 3)))
assert_dtype_equal(dt[-1], dt[1])
assert_dtype_equal(dt[-2], dt[0])
assert_raises(IndexError, lambda: dt[-3])
assert_raises(TypeError, operator.getitem, dt, 3.0)
assert_equal(dt[1], dt[np.int8(1)])
@pytest.mark.parametrize('align_flag',[False, True])
def test_multifield_index(self, align_flag):
# indexing with a list produces subfields
# the align flag should be preserved
dt = np.dtype([
(('title', 'col1'), '<U20'), ('A', '<f8'), ('B', '<f8')
], align=align_flag)
dt_sub = dt[['B', 'col1']]
assert_equal(
dt_sub,
np.dtype({
'names': ['B', 'col1'],
'formats': ['<f8', '<U20'],
'offsets': [88, 0],
'titles': [None, 'title'],
'itemsize': 96
})
)
assert_equal(dt_sub.isalignedstruct, align_flag)
dt_sub = dt[['B']]
assert_equal(
dt_sub,
np.dtype({
'names': ['B'],
'formats': ['<f8'],
'offsets': [88],
'itemsize': 96
})
)
assert_equal(dt_sub.isalignedstruct, align_flag)
dt_sub = dt[[]]
assert_equal(
dt_sub,
np.dtype({
'names': [],
'formats': [],
'offsets': [],
'itemsize': 96
})
)
assert_equal(dt_sub.isalignedstruct, align_flag)
assert_raises(TypeError, operator.getitem, dt, ())
assert_raises(TypeError, operator.getitem, dt, [1, 2, 3])
assert_raises(TypeError, operator.getitem, dt, ['col1', 2])
assert_raises(KeyError, operator.getitem, dt, ['fake'])
assert_raises(KeyError, operator.getitem, dt, ['title'])
assert_raises(ValueError, operator.getitem, dt, ['col1', 'col1'])
def test_partial_dict(self):
# 'names' is missing
assert_raises(ValueError, np.dtype,
{'formats': ['i4', 'i4'], 'f0': ('i4', 0), 'f1':('i4', 4)})
def test_fieldless_views(self):
a = np.zeros(2, dtype={'names':[], 'formats':[], 'offsets':[],
'itemsize':8})
assert_raises(ValueError, a.view, np.dtype([]))
d = np.dtype((np.dtype([]), 10))
assert_equal(d.shape, (10,))
assert_equal(d.itemsize, 0)
assert_equal(d.base, np.dtype([]))
arr = np.fromiter((() for i in range(10)), [])
assert_equal(arr.dtype, np.dtype([]))
assert_raises(ValueError, np.frombuffer, b'', dtype=[])
assert_equal(np.frombuffer(b'', dtype=[], count=2),
np.empty(2, dtype=[]))
assert_raises(ValueError, np.dtype, ([], 'f8'))
assert_raises(ValueError, np.zeros(1, dtype='i4').view, [])
assert_equal(np.zeros(2, dtype=[]) == np.zeros(2, dtype=[]),
np.ones(2, dtype=bool))
assert_equal(np.zeros((1, 2), dtype=[]) == a,
np.ones((1, 2), dtype=bool))
class TestSubarray:
def test_single_subarray(self):
a = np.dtype((int, (2)))
b = np.dtype((int, (2,)))
assert_dtype_equal(a, b)
assert_equal(type(a.subdtype[1]), tuple)
assert_equal(type(b.subdtype[1]), tuple)
def test_equivalent_record(self):
"""Test whether equivalent subarray dtypes hash the same."""
a = np.dtype((int, (2, 3)))
b = np.dtype((int, (2, 3)))
assert_dtype_equal(a, b)
def test_nonequivalent_record(self):
"""Test whether different subarray dtypes hash differently."""
a = np.dtype((int, (2, 3)))
b = np.dtype((int, (3, 2)))
assert_dtype_not_equal(a, b)
a = np.dtype((int, (2, 3)))
b = np.dtype((int, (2, 2)))
assert_dtype_not_equal(a, b)
a = np.dtype((int, (1, 2, 3)))
b = np.dtype((int, (1, 2)))
assert_dtype_not_equal(a, b)
def test_shape_equal(self):
"""Test some data types that are equal"""
assert_dtype_equal(np.dtype('f8'), np.dtype(('f8', tuple())))
# FutureWarning during deprecation period; after it is passed this
# should instead check that "(1)f8" == "1f8" == ("f8", 1).
with pytest.warns(FutureWarning):
assert_dtype_equal(np.dtype('f8'), np.dtype(('f8', 1)))
assert_dtype_equal(np.dtype((int, 2)), np.dtype((int, (2,))))
assert_dtype_equal(np.dtype(('<f4', (3, 2))), np.dtype(('<f4', (3, 2))))
d = ([('a', 'f4', (1, 2)), ('b', 'f8', (3, 1))], (3, 2))
assert_dtype_equal(np.dtype(d), np.dtype(d))
def test_shape_simple(self):
"""Test some simple cases that shouldn't be equal"""
assert_dtype_not_equal(np.dtype('f8'), np.dtype(('f8', (1,))))
assert_dtype_not_equal(np.dtype(('f8', (1,))), np.dtype(('f8', (1, 1))))
assert_dtype_not_equal(np.dtype(('f4', (3, 2))), np.dtype(('f4', (2, 3))))
def test_shape_monster(self):
"""Test some more complicated cases that shouldn't be equal"""
assert_dtype_not_equal(
np.dtype(([('a', 'f4', (2, 1)), ('b', 'f8', (1, 3))], (2, 2))),
np.dtype(([('a', 'f4', (1, 2)), ('b', 'f8', (1, 3))], (2, 2))))
assert_dtype_not_equal(
np.dtype(([('a', 'f4', (2, 1)), ('b', 'f8', (1, 3))], (2, 2))),
np.dtype(([('a', 'f4', (2, 1)), ('b', 'i8', (1, 3))], (2, 2))))
assert_dtype_not_equal(
np.dtype(([('a', 'f4', (2, 1)), ('b', 'f8', (1, 3))], (2, 2))),
np.dtype(([('e', 'f8', (1, 3)), ('d', 'f4', (2, 1))], (2, 2))))
assert_dtype_not_equal(
np.dtype(([('a', [('a', 'i4', 6)], (2, 1)), ('b', 'f8', (1, 3))], (2, 2))),
np.dtype(([('a', [('a', 'u4', 6)], (2, 1)), ('b', 'f8', (1, 3))], (2, 2))))
def test_shape_sequence(self):
# Any sequence of integers should work as shape, but the result
# should be a tuple (immutable) of base type integers.
a = np.array([1, 2, 3], dtype=np.int16)
l = [1, 2, 3]
# Array gets converted
dt = np.dtype([('a', 'f4', a)])
assert_(isinstance(dt['a'].shape, tuple))
assert_(isinstance(dt['a'].shape[0], int))
# List gets converted
dt = np.dtype([('a', 'f4', l)])
assert_(isinstance(dt['a'].shape, tuple))
#
class IntLike:
def __index__(self):
return 3
def __int__(self):
# (a PyNumber_Check fails without __int__)
return 3
dt = np.dtype([('a', 'f4', IntLike())])
assert_(isinstance(dt['a'].shape, tuple))
assert_(isinstance(dt['a'].shape[0], int))
dt = np.dtype([('a', 'f4', (IntLike(),))])
assert_(isinstance(dt['a'].shape, tuple))
assert_(isinstance(dt['a'].shape[0], int))
def test_shape_matches_ndim(self):
dt = np.dtype([('a', 'f4', ())])
assert_equal(dt['a'].shape, ())
assert_equal(dt['a'].ndim, 0)
dt = np.dtype([('a', 'f4')])
assert_equal(dt['a'].shape, ())
assert_equal(dt['a'].ndim, 0)
dt = np.dtype([('a', 'f4', 4)])
assert_equal(dt['a'].shape, (4,))
assert_equal(dt['a'].ndim, 1)
dt = np.dtype([('a', 'f4', (1, 2, 3))])
assert_equal(dt['a'].shape, (1, 2, 3))
assert_equal(dt['a'].ndim, 3)
def test_shape_invalid(self):
# Check that the shape is valid.
max_int = np.iinfo(np.intc).max
max_intp = np.iinfo(np.intp).max
# Too large values (the datatype is part of this)
assert_raises(ValueError, np.dtype, [('a', 'f4', max_int // 4 + 1)])
assert_raises(ValueError, np.dtype, [('a', 'f4', max_int + 1)])
assert_raises(ValueError, np.dtype, [('a', 'f4', (max_int, 2))])
# Takes a different code path (fails earlier:
assert_raises(ValueError, np.dtype, [('a', 'f4', max_intp + 1)])
# Negative values
assert_raises(ValueError, np.dtype, [('a', 'f4', -1)])
assert_raises(ValueError, np.dtype, [('a', 'f4', (-1, -1))])
def test_alignment(self):
#Check that subarrays are aligned
t1 = np.dtype('(1,)i4', align=True)
t2 = np.dtype('2i4', align=True)
assert_equal(t1.alignment, t2.alignment)
def test_aligned_empty(self):
# Mainly regression test for gh-19696: construction failed completely
dt = np.dtype([], align=True)
assert dt == np.dtype([])
dt = np.dtype({"names": [], "formats": [], "itemsize": 0}, align=True)
assert dt == np.dtype([])
def test_subarray_base_item(self):
arr = np.ones(3, dtype=[("f", "i", 3)])
# Extracting the field "absorbs" the subarray into a view:
assert arr["f"].base is arr
# Extract the structured item, and then check the tuple component:
item = arr.item(0)
assert type(item) is tuple and len(item) == 1
assert item[0].base is arr
def test_subarray_cast_copies(self):
# Older versions of NumPy did NOT copy, but they got the ownership
# wrong (not actually knowing the correct base!). Versions since 1.21
# (I think) crashed fairly reliable. This defines the correct behavior
# as a copy. Keeping the ownership would be possible (but harder)
arr = np.ones(3, dtype=[("f", "i", 3)])
cast = arr.astype(object)
for fields in cast:
assert type(fields) == tuple and len(fields) == 1
subarr = fields[0]
assert subarr.base is None
assert subarr.flags.owndata
def iter_struct_object_dtypes():
"""
Iterates over a few complex dtypes and object pattern which
fill the array with a given object (defaults to a singleton).
Yields
------
dtype : dtype
pattern : tuple
Structured tuple for use with `np.array`.
count : int
Number of objects stored in the dtype.
singleton : object
A singleton object. The returned pattern is constructed so that
all objects inside the datatype are set to the singleton.
"""
obj = object()
dt = np.dtype([('b', 'O', (2, 3))])
p = ([[obj] * 3] * 2,)
yield pytest.param(dt, p, 6, obj, id="<subarray>")
dt = np.dtype([('a', 'i4'), ('b', 'O', (2, 3))])
p = (0, [[obj] * 3] * 2)
yield pytest.param(dt, p, 6, obj, id="<subarray in field>")
dt = np.dtype([('a', 'i4'),
('b', [('ba', 'O'), ('bb', 'i1')], (2, 3))])
p = (0, [[(obj, 0)] * 3] * 2)
yield pytest.param(dt, p, 6, obj, id="<structured subarray 1>")
dt = np.dtype([('a', 'i4'),
('b', [('ba', 'O'), ('bb', 'O')], (2, 3))])
p = (0, [[(obj, obj)] * 3] * 2)
yield pytest.param(dt, p, 12, obj, id="<structured subarray 2>")
@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
class TestStructuredObjectRefcounting:
"""These tests cover various uses of complicated structured types which
include objects and thus require reference counting.
"""
@pytest.mark.parametrize(['dt', 'pat', 'count', 'singleton'],
iter_struct_object_dtypes())
@pytest.mark.parametrize(["creation_func", "creation_obj"], [
pytest.param(np.empty, None,
# None is probably used for too many things
marks=pytest.mark.skip("unreliable due to python's behaviour")),
(np.ones, 1),
(np.zeros, 0)])
def test_structured_object_create_delete(self, dt, pat, count, singleton,
creation_func, creation_obj):
"""Structured object reference counting in creation and deletion"""
# The test assumes that 0, 1, and None are singletons.
gc.collect()
before = sys.getrefcount(creation_obj)
arr = creation_func(3, dt)
now = sys.getrefcount(creation_obj)
assert now - before == count * 3
del arr
now = sys.getrefcount(creation_obj)
assert now == before
@pytest.mark.parametrize(['dt', 'pat', 'count', 'singleton'],
iter_struct_object_dtypes())
def test_structured_object_item_setting(self, dt, pat, count, singleton):
"""Structured object reference counting for simple item setting"""
one = 1
gc.collect()
before = sys.getrefcount(singleton)
arr = np.array([pat] * 3, dt)
assert sys.getrefcount(singleton) - before == count * 3
# Fill with `1` and check that it was replaced correctly:
before2 = sys.getrefcount(one)
arr[...] = one
after2 = sys.getrefcount(one)
assert after2 - before2 == count * 3
del arr
gc.collect()
assert sys.getrefcount(one) == before2
assert sys.getrefcount(singleton) == before
@pytest.mark.parametrize(['dt', 'pat', 'count', 'singleton'],
iter_struct_object_dtypes())
@pytest.mark.parametrize(
['shape', 'index', 'items_changed'],
[((3,), ([0, 2],), 2),
((3, 2), ([0, 2], slice(None)), 4),
((3, 2), ([0, 2], [1]), 2),
((3,), ([True, False, True]), 2)])
def test_structured_object_indexing(self, shape, index, items_changed,
dt, pat, count, singleton):
"""Structured object reference counting for advanced indexing."""
# Use two small negative values (should be singletons, but less likely
# to run into race-conditions). This failed in some threaded envs
# When using 0 and 1. If it fails again, should remove all explicit
# checks, and rely on `pytest-leaks` reference count checker only.
val0 = -4
val1 = -5
arr = np.full(shape, val0, dt)
gc.collect()
before_val0 = sys.getrefcount(val0)
before_val1 = sys.getrefcount(val1)
# Test item getting:
part = arr[index]
after_val0 = sys.getrefcount(val0)
assert after_val0 - before_val0 == count * items_changed
del part
# Test item setting:
arr[index] = val1
gc.collect()
after_val0 = sys.getrefcount(val0)
after_val1 = sys.getrefcount(val1)
assert before_val0 - after_val0 == count * items_changed
assert after_val1 - before_val1 == count * items_changed
@pytest.mark.parametrize(['dt', 'pat', 'count', 'singleton'],
iter_struct_object_dtypes())
def test_structured_object_take_and_repeat(self, dt, pat, count, singleton):
"""Structured object reference counting for specialized functions.
The older functions such as take and repeat use different code paths
then item setting (when writing this).
"""
indices = [0, 1]
arr = np.array([pat] * 3, dt)
gc.collect()
before = sys.getrefcount(singleton)
res = arr.take(indices)
after = sys.getrefcount(singleton)
assert after - before == count * 2
new = res.repeat(10)
gc.collect()
after_repeat = sys.getrefcount(singleton)
assert after_repeat - after == count * 2 * 10
class TestStructuredDtypeSparseFields:
"""Tests subarray fields which contain sparse dtypes so that
not all memory is used by the dtype work. Such dtype's should
leave the underlying memory unchanged.
"""
dtype = np.dtype([('a', {'names':['aa', 'ab'], 'formats':['f', 'f'],
'offsets':[0, 4]}, (2, 3))])
sparse_dtype = np.dtype([('a', {'names':['ab'], 'formats':['f'],
'offsets':[4]}, (2, 3))])
def test_sparse_field_assignment(self):
arr = np.zeros(3, self.dtype)
sparse_arr = arr.view(self.sparse_dtype)
sparse_arr[...] = np.finfo(np.float32).max
# dtype is reduced when accessing the field, so shape is (3, 2, 3):
assert_array_equal(arr["a"]["aa"], np.zeros((3, 2, 3)))
def test_sparse_field_assignment_fancy(self):
# Fancy assignment goes to the copyswap function for complex types:
arr = np.zeros(3, self.dtype)
sparse_arr = arr.view(self.sparse_dtype)
sparse_arr[[0, 1, 2]] = np.finfo(np.float32).max
# dtype is reduced when accessing the field, so shape is (3, 2, 3):
assert_array_equal(arr["a"]["aa"], np.zeros((3, 2, 3)))
class TestMonsterType:
"""Test deeply nested subtypes."""
def test1(self):
simple1 = np.dtype({'names': ['r', 'b'], 'formats': ['u1', 'u1'],
'titles': ['Red pixel', 'Blue pixel']})
a = np.dtype([('yo', int), ('ye', simple1),
('yi', np.dtype((int, (3, 2))))])
b = np.dtype([('yo', int), ('ye', simple1),
('yi', np.dtype((int, (3, 2))))])
assert_dtype_equal(a, b)
c = np.dtype([('yo', int), ('ye', simple1),
('yi', np.dtype((a, (3, 2))))])
d = np.dtype([('yo', int), ('ye', simple1),
('yi', np.dtype((a, (3, 2))))])
assert_dtype_equal(c, d)
@pytest.mark.skipif(IS_PYSTON, reason="Pyston disables recursion checking")
def test_list_recursion(self):
l = list()
l.append(('f', l))
with pytest.raises(RecursionError):
np.dtype(l)
@pytest.mark.skipif(IS_PYSTON, reason="Pyston disables recursion checking")
def test_tuple_recursion(self):
d = np.int32
for i in range(100000):
d = (d, (1,))
with pytest.raises(RecursionError):
np.dtype(d)
@pytest.mark.skipif(IS_PYSTON, reason="Pyston disables recursion checking")
def test_dict_recursion(self):
d = dict(names=['self'], formats=[None], offsets=[0])
d['formats'][0] = d
with pytest.raises(RecursionError):
np.dtype(d)
class TestMetadata:
def test_no_metadata(self):
d = np.dtype(int)
assert_(d.metadata is None)
def test_metadata_takes_dict(self):
d = np.dtype(int, metadata={'datum': 1})
assert_(d.metadata == {'datum': 1})
def test_metadata_rejects_nondict(self):
assert_raises(TypeError, np.dtype, int, metadata='datum')
assert_raises(TypeError, np.dtype, int, metadata=1)
assert_raises(TypeError, np.dtype, int, metadata=None)
def test_nested_metadata(self):
d = np.dtype([('a', np.dtype(int, metadata={'datum': 1}))])
assert_(d['a'].metadata == {'datum': 1})
def test_base_metadata_copied(self):
d = np.dtype((np.void, np.dtype('i4,i4', metadata={'datum': 1})))
assert_(d.metadata == {'datum': 1})
class TestString:
def test_complex_dtype_str(self):
dt = np.dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)),
('rtile', '>f4', (64, 36))], (3,)),
('bottom', [('bleft', ('>f4', (8, 64)), (1,)),
('bright', '>f4', (8, 36))])])
assert_equal(str(dt),
"[('top', [('tiles', ('>f4', (64, 64)), (1,)), "
"('rtile', '>f4', (64, 36))], (3,)), "
"('bottom', [('bleft', ('>f4', (8, 64)), (1,)), "
"('bright', '>f4', (8, 36))])]")
# If the sticky aligned flag is set to True, it makes the
# str() function use a dict representation with an 'aligned' flag
dt = np.dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)),
('rtile', '>f4', (64, 36))],
(3,)),
('bottom', [('bleft', ('>f4', (8, 64)), (1,)),
('bright', '>f4', (8, 36))])],
align=True)
assert_equal(str(dt),
"{'names': ['top', 'bottom'],"
" 'formats': [([('tiles', ('>f4', (64, 64)), (1,)), "
"('rtile', '>f4', (64, 36))], (3,)), "
"[('bleft', ('>f4', (8, 64)), (1,)), "
"('bright', '>f4', (8, 36))]],"
" 'offsets': [0, 76800],"
" 'itemsize': 80000,"
" 'aligned': True}")
with np.printoptions(legacy='1.21'):
assert_equal(str(dt),
"{'names':['top','bottom'], "
"'formats':[([('tiles', ('>f4', (64, 64)), (1,)), "
"('rtile', '>f4', (64, 36))], (3,)),"
"[('bleft', ('>f4', (8, 64)), (1,)), "
"('bright', '>f4', (8, 36))]], "
"'offsets':[0,76800], "
"'itemsize':80000, "
"'aligned':True}")
assert_equal(np.dtype(eval(str(dt))), dt)
dt = np.dtype({'names': ['r', 'g', 'b'], 'formats': ['u1', 'u1', 'u1'],
'offsets': [0, 1, 2],
'titles': ['Red pixel', 'Green pixel', 'Blue pixel']})
assert_equal(str(dt),
"[(('Red pixel', 'r'), 'u1'), "
"(('Green pixel', 'g'), 'u1'), "
"(('Blue pixel', 'b'), 'u1')]")
dt = np.dtype({'names': ['rgba', 'r', 'g', 'b'],
'formats': ['<u4', 'u1', 'u1', 'u1'],
'offsets': [0, 0, 1, 2],
'titles': ['Color', 'Red pixel',
'Green pixel', 'Blue pixel']})
assert_equal(str(dt),
"{'names': ['rgba', 'r', 'g', 'b'],"
" 'formats': ['<u4', 'u1', 'u1', 'u1'],"
" 'offsets': [0, 0, 1, 2],"
" 'titles': ['Color', 'Red pixel', "
"'Green pixel', 'Blue pixel'],"
" 'itemsize': 4}")
dt = np.dtype({'names': ['r', 'b'], 'formats': ['u1', 'u1'],
'offsets': [0, 2],
'titles': ['Red pixel', 'Blue pixel']})
assert_equal(str(dt),
"{'names': ['r', 'b'],"
" 'formats': ['u1', 'u1'],"
" 'offsets': [0, 2],"
" 'titles': ['Red pixel', 'Blue pixel'],"
" 'itemsize': 3}")
dt = np.dtype([('a', '<m8[D]'), ('b', '<M8[us]')])
assert_equal(str(dt),
"[('a', '<m8[D]'), ('b', '<M8[us]')]")
def test_repr_structured(self):
dt = np.dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)),
('rtile', '>f4', (64, 36))], (3,)),
('bottom', [('bleft', ('>f4', (8, 64)), (1,)),
('bright', '>f4', (8, 36))])])
assert_equal(repr(dt),
"dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)), "
"('rtile', '>f4', (64, 36))], (3,)), "
"('bottom', [('bleft', ('>f4', (8, 64)), (1,)), "
"('bright', '>f4', (8, 36))])])")
dt = np.dtype({'names': ['r', 'g', 'b'], 'formats': ['u1', 'u1', 'u1'],
'offsets': [0, 1, 2],
'titles': ['Red pixel', 'Green pixel', 'Blue pixel']},
align=True)
assert_equal(repr(dt),
"dtype([(('Red pixel', 'r'), 'u1'), "
"(('Green pixel', 'g'), 'u1'), "
"(('Blue pixel', 'b'), 'u1')], align=True)")
def test_repr_structured_not_packed(self):
dt = np.dtype({'names': ['rgba', 'r', 'g', 'b'],
'formats': ['<u4', 'u1', 'u1', 'u1'],
'offsets': [0, 0, 1, 2],
'titles': ['Color', 'Red pixel',
'Green pixel', 'Blue pixel']}, align=True)
assert_equal(repr(dt),
"dtype({'names': ['rgba', 'r', 'g', 'b'],"
" 'formats': ['<u4', 'u1', 'u1', 'u1'],"
" 'offsets': [0, 0, 1, 2],"
" 'titles': ['Color', 'Red pixel', "
"'Green pixel', 'Blue pixel'],"
" 'itemsize': 4}, align=True)")
dt = np.dtype({'names': ['r', 'b'], 'formats': ['u1', 'u1'],
'offsets': [0, 2],
'titles': ['Red pixel', 'Blue pixel'],
'itemsize': 4})
assert_equal(repr(dt),
"dtype({'names': ['r', 'b'], "
"'formats': ['u1', 'u1'], "
"'offsets': [0, 2], "
"'titles': ['Red pixel', 'Blue pixel'], "
"'itemsize': 4})")
def test_repr_structured_datetime(self):
dt = np.dtype([('a', '<M8[D]'), ('b', '<m8[us]')])
assert_equal(repr(dt),
"dtype([('a', '<M8[D]'), ('b', '<m8[us]')])")
def test_repr_str_subarray(self):
dt = np.dtype(('<i2', (1,)))
assert_equal(repr(dt), "dtype(('<i2', (1,)))")
assert_equal(str(dt), "('<i2', (1,))")
def test_base_dtype_with_object_type(self):
# Issue gh-2798, should not error.
np.array(['a'], dtype="O").astype(("O", [("name", "O")]))
def test_empty_string_to_object(self):
# Pull request #4722
np.array(["", ""]).astype(object)
def test_void_subclass_unsized(self):
dt = np.dtype(np.record)
assert_equal(repr(dt), "dtype('V')")
assert_equal(str(dt), '|V0')
assert_equal(dt.name, 'record')
def test_void_subclass_sized(self):
dt = np.dtype((np.record, 2))
assert_equal(repr(dt), "dtype('V2')")
assert_equal(str(dt), '|V2')
assert_equal(dt.name, 'record16')
def test_void_subclass_fields(self):
dt = np.dtype((np.record, [('a', '<u2')]))
assert_equal(repr(dt), "dtype((numpy.record, [('a', '<u2')]))")
assert_equal(str(dt), "(numpy.record, [('a', '<u2')])")
assert_equal(dt.name, 'record16')
class TestDtypeAttributeDeletion:
def test_dtype_non_writable_attributes_deletion(self):
dt = np.dtype(np.double)
attr = ["subdtype", "descr", "str", "name", "base", "shape",
"isbuiltin", "isnative", "isalignedstruct", "fields",
"metadata", "hasobject"]
for s in attr:
assert_raises(AttributeError, delattr, dt, s)
def test_dtype_writable_attributes_deletion(self):
dt = np.dtype(np.double)
attr = ["names"]
for s in attr:
assert_raises(AttributeError, delattr, dt, s)
class TestDtypeAttributes:
def test_descr_has_trailing_void(self):
# see gh-6359
dtype = np.dtype({
'names': ['A', 'B'],
'formats': ['f4', 'f4'],
'offsets': [0, 8],
'itemsize': 16})
new_dtype = np.dtype(dtype.descr)
assert_equal(new_dtype.itemsize, 16)
def test_name_dtype_subclass(self):
# Ticket #4357
class user_def_subcls(np.void):
pass
assert_equal(np.dtype(user_def_subcls).name, 'user_def_subcls')
def test_zero_stride(self):
arr = np.ones(1, dtype="i8")
arr = np.broadcast_to(arr, 10)
assert arr.strides == (0,)
with pytest.raises(ValueError):
arr.dtype = "i1"
class TestDTypeMakeCanonical:
def check_canonical(self, dtype, canonical):
"""
Check most properties relevant to "canonical" versions of a dtype,
which is mainly native byte order for datatypes supporting this.
The main work is checking structured dtypes with fields, where we
reproduce most the actual logic used in the C-code.
"""
assert type(dtype) is type(canonical)
# a canonical DType should always have equivalent casting (both ways)
assert np.can_cast(dtype, canonical, casting="equiv")
assert np.can_cast(canonical, dtype, casting="equiv")
# a canonical dtype (and its fields) is always native (checks fields):
assert canonical.isnative
# Check that canonical of canonical is the same (no casting):
assert np.result_type(canonical) == canonical
if not dtype.names:
# The flags currently never change for unstructured dtypes
assert dtype.flags == canonical.flags
return
# Must have all the needs API flag set:
assert dtype.flags & 0b10000
# Check that the fields are identical (including titles):
assert dtype.fields.keys() == canonical.fields.keys()
def aligned_offset(offset, alignment):
# round up offset:
return - (-offset // alignment) * alignment
totalsize = 0
max_alignment = 1
for name in dtype.names:
# each field is also canonical:
new_field_descr = canonical.fields[name][0]
self.check_canonical(dtype.fields[name][0], new_field_descr)
# Must have the "inherited" object related flags:
expected = 0b11011 & new_field_descr.flags
assert (canonical.flags & expected) == expected
if canonical.isalignedstruct:
totalsize = aligned_offset(totalsize, new_field_descr.alignment)
max_alignment = max(new_field_descr.alignment, max_alignment)
assert canonical.fields[name][1] == totalsize
# if a title exists, they must match (otherwise empty tuple):
assert dtype.fields[name][2:] == canonical.fields[name][2:]
totalsize += new_field_descr.itemsize
if canonical.isalignedstruct:
totalsize = aligned_offset(totalsize, max_alignment)
assert canonical.itemsize == totalsize
assert canonical.alignment == max_alignment
def test_simple(self):
dt = np.dtype(">i4")
assert np.result_type(dt).isnative
assert np.result_type(dt).num == dt.num
# dtype with empty space:
struct_dt = np.dtype(">i4,<i1,i8,V3")[["f0", "f2"]]
canonical = np.result_type(struct_dt)
assert canonical.itemsize == 4+8
assert canonical.isnative
# aligned struct dtype with empty space:
struct_dt = np.dtype(">i1,<i4,i8,V3", align=True)[["f0", "f2"]]
canonical = np.result_type(struct_dt)
assert canonical.isalignedstruct
assert canonical.itemsize == np.dtype("i8").alignment + 8
assert canonical.isnative
def test_object_flag_not_inherited(self):
# The following dtype still indicates "object", because its included
# in the unaccessible space (maybe this could change at some point):
arr = np.ones(3, "i,O,i")[["f0", "f2"]]
assert arr.dtype.hasobject
canonical_dt = np.result_type(arr.dtype)
assert not canonical_dt.hasobject
@pytest.mark.slow
@hypothesis.given(dtype=hynp.nested_dtypes())
def test_make_canonical_hypothesis(self, dtype):
canonical = np.result_type(dtype)
self.check_canonical(dtype, canonical)
# result_type with two arguments should always give identical results:
two_arg_result = np.result_type(dtype, dtype)
assert np.can_cast(two_arg_result, canonical, casting="no")
@pytest.mark.slow
@hypothesis.given(
dtype=hypothesis.extra.numpy.array_dtypes(
subtype_strategy=hypothesis.extra.numpy.array_dtypes(),
min_size=5, max_size=10, allow_subarrays=True))
def test_structured(self, dtype):
# Pick 4 of the fields at random. This will leave empty space in the
# dtype (since we do not canonicalize it here).
field_subset = random.sample(dtype.names, k=4)
dtype_with_empty_space = dtype[field_subset]
assert dtype_with_empty_space.itemsize == dtype.itemsize
canonicalized = np.result_type(dtype_with_empty_space)
self.check_canonical(dtype_with_empty_space, canonicalized)
# promotion with two arguments should always give identical results:
two_arg_result = np.promote_types(
dtype_with_empty_space, dtype_with_empty_space)
assert np.can_cast(two_arg_result, canonicalized, casting="no")
# Ensure that we also check aligned struct (check the opposite, in
# case hypothesis grows support for `align`. Then repeat the test:
dtype_aligned = np.dtype(dtype.descr, align=not dtype.isalignedstruct)
dtype_with_empty_space = dtype_aligned[field_subset]
assert dtype_with_empty_space.itemsize == dtype_aligned.itemsize
canonicalized = np.result_type(dtype_with_empty_space)
self.check_canonical(dtype_with_empty_space, canonicalized)
# promotion with two arguments should always give identical results:
two_arg_result = np.promote_types(
dtype_with_empty_space, dtype_with_empty_space)
assert np.can_cast(two_arg_result, canonicalized, casting="no")
class TestPickling:
def check_pickling(self, dtype):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
buf = pickle.dumps(dtype, proto)
# The dtype pickling itself pickles `np.dtype` if it is pickled
# as a singleton `dtype` should be stored in the buffer:
assert b"_DType_reconstruct" not in buf
assert b"dtype" in buf
pickled = pickle.loads(buf)
assert_equal(pickled, dtype)
assert_equal(pickled.descr, dtype.descr)
if dtype.metadata is not None:
assert_equal(pickled.metadata, dtype.metadata)
# Check the reconstructed dtype is functional
x = np.zeros(3, dtype=dtype)
y = np.zeros(3, dtype=pickled)
assert_equal(x, y)
assert_equal(x[0], y[0])
@pytest.mark.parametrize('t', [int, float, complex, np.int32, str, object,
np.compat.unicode, bool])
def test_builtin(self, t):
self.check_pickling(np.dtype(t))
def test_structured(self):
dt = np.dtype(([('a', '>f4', (2, 1)), ('b', '<f8', (1, 3))], (2, 2)))
self.check_pickling(dt)
def test_structured_aligned(self):
dt = np.dtype('i4, i1', align=True)
self.check_pickling(dt)
def test_structured_unaligned(self):
dt = np.dtype('i4, i1', align=False)
self.check_pickling(dt)
def test_structured_padded(self):
dt = np.dtype({
'names': ['A', 'B'],
'formats': ['f4', 'f4'],
'offsets': [0, 8],
'itemsize': 16})
self.check_pickling(dt)
def test_structured_titles(self):
dt = np.dtype({'names': ['r', 'b'],
'formats': ['u1', 'u1'],
'titles': ['Red pixel', 'Blue pixel']})
self.check_pickling(dt)
@pytest.mark.parametrize('base', ['m8', 'M8'])
@pytest.mark.parametrize('unit', ['', 'Y', 'M', 'W', 'D', 'h', 'm', 's',
'ms', 'us', 'ns', 'ps', 'fs', 'as'])
def test_datetime(self, base, unit):
dt = np.dtype('%s[%s]' % (base, unit) if unit else base)
self.check_pickling(dt)
if unit:
dt = np.dtype('%s[7%s]' % (base, unit))
self.check_pickling(dt)
def test_metadata(self):
dt = np.dtype(int, metadata={'datum': 1})
self.check_pickling(dt)
@pytest.mark.parametrize("DType",
[type(np.dtype(t)) for t in np.typecodes['All']] +
[np.dtype(rational), np.dtype])
def test_pickle_types(self, DType):
# Check that DTypes (the classes/types) roundtrip when pickling
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
roundtrip_DType = pickle.loads(pickle.dumps(DType, proto))
assert roundtrip_DType is DType
class TestPromotion:
"""Test cases related to more complex DType promotions. Further promotion
tests are defined in `test_numeric.py`
"""
@pytest.mark.parametrize(["other", "expected"],
[(2**16-1, np.complex64),
(2**32-1, np.complex128),
(np.float16(2), np.complex64),
(np.float32(2), np.complex64),
(np.longdouble(2), np.complex64),
# Base of the double value to sidestep any rounding issues:
(np.longdouble(np.nextafter(1.7e308, 0.)), np.complex128),
# Additionally use "nextafter" so the cast can't round down:
(np.longdouble(np.nextafter(1.7e308, np.inf)), np.clongdouble),
# repeat for complex scalars:
(np.complex64(2), np.complex64),
(np.clongdouble(2), np.complex64),
# Base of the double value to sidestep any rounding issues:
(np.clongdouble(np.nextafter(1.7e308, 0.) * 1j), np.complex128),
# Additionally use "nextafter" so the cast can't round down:
(np.clongdouble(np.nextafter(1.7e308, np.inf)), np.clongdouble),
])
def test_complex_other_value_based(self, other, expected):
# This would change if we modify the value based promotion
min_complex = np.dtype(np.complex64)
res = np.result_type(other, min_complex)
assert res == expected
# Check the same for a simple ufunc call that uses the same logic:
res = np.minimum(other, np.ones(3, dtype=min_complex)).dtype
assert res == expected
@pytest.mark.parametrize(["other", "expected"],
[(np.bool_, np.complex128),
(np.int64, np.complex128),
(np.float16, np.complex64),
(np.float32, np.complex64),
(np.float64, np.complex128),
(np.longdouble, np.clongdouble),
(np.complex64, np.complex64),
(np.complex128, np.complex128),
(np.clongdouble, np.clongdouble),
])
def test_complex_scalar_value_based(self, other, expected):
# This would change if we modify the value based promotion
complex_scalar = 1j
res = np.result_type(other, complex_scalar)
assert res == expected
# Check the same for a simple ufunc call that uses the same logic:
res = np.minimum(np.ones(3, dtype=other), complex_scalar).dtype
assert res == expected
def test_complex_pyscalar_promote_rational(self):
with pytest.raises(TypeError,
match=r".* do not have a common DType"):
np.result_type(1j, rational)
with pytest.raises(TypeError,
match=r".* no common DType exists for the given inputs"):
np.result_type(1j, rational(1, 2))
@pytest.mark.parametrize("val", [2, 2**32, 2**63, 2**64, 2*100])
def test_python_integer_promotion(self, val):
# If we only path scalars (mainly python ones!), the result must take
# into account that the integer may be considered int32, int64, uint64,
# or object depending on the input value. So test those paths!
expected_dtype = np.result_type(np.array(val).dtype, np.array(0).dtype)
assert np.result_type(val, 0) == expected_dtype
# For completeness sake, also check with a NumPy scalar as second arg:
assert np.result_type(val, np.int8(0)) == expected_dtype
@pytest.mark.parametrize(["other", "expected"],
[(1, rational), (1., np.float64)])
def test_float_int_pyscalar_promote_rational(self, other, expected):
# Note that rationals are a bit akward as they promote with float64
# or default ints, but not float16 or uint8/int8 (which looks
# inconsistent here)
with pytest.raises(TypeError,
match=r".* do not have a common DType"):
np.result_type(other, rational)
assert np.result_type(other, rational(1, 2)) == expected
@pytest.mark.parametrize(["dtypes", "expected"], [
# These promotions are not associative/commutative:
([np.uint16, np.int16, np.float16], np.float32),
([np.uint16, np.int8, np.float16], np.float32),
([np.uint8, np.int16, np.float16], np.float32),
# The following promotions are not ambiguous, but cover code
# paths of abstract promotion (no particular logic being tested)
([1, 1, np.float64], np.float64),
([1, 1., np.complex128], np.complex128),
([1, 1j, np.float64], np.complex128),
([1., 1., np.int64], np.float64),
([1., 1j, np.float64], np.complex128),
([1j, 1j, np.float64], np.complex128),
([1, True, np.bool_], np.int_),
])
def test_permutations_do_not_influence_result(self, dtypes, expected):
# Tests that most permutations do not influence the result. In the
# above some uint and int combintations promote to a larger integer
# type, which would then promote to a larger than necessary float.
for perm in permutations(dtypes):
assert np.result_type(*perm) == expected
def test_rational_dtype():
# test for bug gh-5719
a = np.array([1111], dtype=rational).astype
assert_raises(OverflowError, a, 'int8')
# test that dtype detection finds user-defined types
x = rational(1)
assert_equal(np.array([x,x]).dtype, np.dtype(rational))
def test_dtypes_are_true():
# test for gh-6294
assert bool(np.dtype('f8'))
assert bool(np.dtype('i8'))
assert bool(np.dtype([('a', 'i8'), ('b', 'f4')]))
def test_invalid_dtype_string():
# test for gh-10440
assert_raises(TypeError, np.dtype, 'f8,i8,[f8,i8]')
assert_raises(TypeError, np.dtype, u'Fl\xfcgel')
def test_keyword_argument():
# test for https://github.com/numpy/numpy/pull/16574#issuecomment-642660971
assert np.dtype(dtype=np.float64) == np.dtype(np.float64)
def test_ulong_dtype():
# test for gh-21063
assert np.dtype("ulong") == np.dtype(np.uint)
class TestFromDTypeAttribute:
def test_simple(self):
class dt:
dtype = np.dtype("f8")
assert np.dtype(dt) == np.float64
assert np.dtype(dt()) == np.float64
@pytest.mark.skipif(IS_PYSTON, reason="Pyston disables recursion checking")
def test_recursion(self):
class dt:
pass
dt.dtype = dt
with pytest.raises(RecursionError):
np.dtype(dt)
dt_instance = dt()
dt_instance.dtype = dt
with pytest.raises(RecursionError):
np.dtype(dt_instance)
def test_void_subtype(self):
class dt(np.void):
# This code path is fully untested before, so it is unclear
# what this should be useful for. Note that if np.void is used
# numpy will think we are deallocating a base type [1.17, 2019-02].
dtype = np.dtype("f,f")
np.dtype(dt)
np.dtype(dt(1))
@pytest.mark.skipif(IS_PYSTON, reason="Pyston disables recursion checking")
def test_void_subtype_recursion(self):
class vdt(np.void):
pass
vdt.dtype = vdt
with pytest.raises(RecursionError):
np.dtype(vdt)
with pytest.raises(RecursionError):
np.dtype(vdt(1))
class TestDTypeClasses:
@pytest.mark.parametrize("dtype", list(np.typecodes['All']) + [rational])
def test_basic_dtypes_subclass_properties(self, dtype):
# Note: Except for the isinstance and type checks, these attributes
# are considered currently private and may change.
dtype = np.dtype(dtype)
assert isinstance(dtype, np.dtype)
assert type(dtype) is not np.dtype
assert type(dtype).__name__ == f"dtype[{dtype.type.__name__}]"
assert type(dtype).__module__ == "numpy"
assert not type(dtype)._abstract
# the flexible dtypes and datetime/timedelta have additional parameters
# which are more than just storage information, these would need to be
# given when creating a dtype:
parametric = (np.void, np.str_, np.bytes_, np.datetime64, np.timedelta64)
if dtype.type not in parametric:
assert not type(dtype)._parametric
assert type(dtype)() is dtype
else:
assert type(dtype)._parametric
with assert_raises(TypeError):
type(dtype)()
def test_dtype_superclass(self):
assert type(np.dtype) is not type
assert isinstance(np.dtype, type)
assert type(np.dtype).__name__ == "_DTypeMeta"
assert type(np.dtype).__module__ == "numpy"
assert np.dtype._abstract
class TestFromCTypes:
@staticmethod
def check(ctype, dtype):
dtype = np.dtype(dtype)
assert_equal(np.dtype(ctype), dtype)
assert_equal(np.dtype(ctype()), dtype)
def test_array(self):
c8 = ctypes.c_uint8
self.check( 3 * c8, (np.uint8, (3,)))
self.check( 1 * c8, (np.uint8, (1,)))
self.check( 0 * c8, (np.uint8, (0,)))
self.check(1 * (3 * c8), ((np.uint8, (3,)), (1,)))
self.check(3 * (1 * c8), ((np.uint8, (1,)), (3,)))
def test_padded_structure(self):
class PaddedStruct(ctypes.Structure):
_fields_ = [
('a', ctypes.c_uint8),
('b', ctypes.c_uint16)
]
expected = np.dtype([
('a', np.uint8),
('b', np.uint16)
], align=True)
self.check(PaddedStruct, expected)
def test_bit_fields(self):
class BitfieldStruct(ctypes.Structure):
_fields_ = [
('a', ctypes.c_uint8, 7),
('b', ctypes.c_uint8, 1)
]
assert_raises(TypeError, np.dtype, BitfieldStruct)
assert_raises(TypeError, np.dtype, BitfieldStruct())
def test_pointer(self):
p_uint8 = ctypes.POINTER(ctypes.c_uint8)
assert_raises(TypeError, np.dtype, p_uint8)
def test_void_pointer(self):
self.check(ctypes.c_void_p, np.uintp)
def test_union(self):
class Union(ctypes.Union):
_fields_ = [
('a', ctypes.c_uint8),
('b', ctypes.c_uint16),
]
expected = np.dtype(dict(
names=['a', 'b'],
formats=[np.uint8, np.uint16],
offsets=[0, 0],
itemsize=2
))
self.check(Union, expected)
def test_union_with_struct_packed(self):
class Struct(ctypes.Structure):
_pack_ = 1
_fields_ = [
('one', ctypes.c_uint8),
('two', ctypes.c_uint32)
]
class Union(ctypes.Union):
_fields_ = [
('a', ctypes.c_uint8),
('b', ctypes.c_uint16),
('c', ctypes.c_uint32),
('d', Struct),
]
expected = np.dtype(dict(
names=['a', 'b', 'c', 'd'],
formats=['u1', np.uint16, np.uint32, [('one', 'u1'), ('two', np.uint32)]],
offsets=[0, 0, 0, 0],
itemsize=ctypes.sizeof(Union)
))
self.check(Union, expected)
def test_union_packed(self):
class Struct(ctypes.Structure):
_fields_ = [
('one', ctypes.c_uint8),
('two', ctypes.c_uint32)
]
_pack_ = 1
class Union(ctypes.Union):
_pack_ = 1
_fields_ = [
('a', ctypes.c_uint8),
('b', ctypes.c_uint16),
('c', ctypes.c_uint32),
('d', Struct),
]
expected = np.dtype(dict(
names=['a', 'b', 'c', 'd'],
formats=['u1', np.uint16, np.uint32, [('one', 'u1'), ('two', np.uint32)]],
offsets=[0, 0, 0, 0],
itemsize=ctypes.sizeof(Union)
))
self.check(Union, expected)
def test_packed_structure(self):
class PackedStructure(ctypes.Structure):
_pack_ = 1
_fields_ = [
('a', ctypes.c_uint8),
('b', ctypes.c_uint16)
]
expected = np.dtype([
('a', np.uint8),
('b', np.uint16)
])
self.check(PackedStructure, expected)
def test_large_packed_structure(self):
class PackedStructure(ctypes.Structure):
_pack_ = 2
_fields_ = [
('a', ctypes.c_uint8),
('b', ctypes.c_uint16),
('c', ctypes.c_uint8),
('d', ctypes.c_uint16),
('e', ctypes.c_uint32),
('f', ctypes.c_uint32),
('g', ctypes.c_uint8)
]
expected = np.dtype(dict(
formats=[np.uint8, np.uint16, np.uint8, np.uint16, np.uint32, np.uint32, np.uint8 ],
offsets=[0, 2, 4, 6, 8, 12, 16],
names=['a', 'b', 'c', 'd', 'e', 'f', 'g'],
itemsize=18))
self.check(PackedStructure, expected)
def test_big_endian_structure_packed(self):
class BigEndStruct(ctypes.BigEndianStructure):
_fields_ = [
('one', ctypes.c_uint8),
('two', ctypes.c_uint32)
]
_pack_ = 1
expected = np.dtype([('one', 'u1'), ('two', '>u4')])
self.check(BigEndStruct, expected)
def test_little_endian_structure_packed(self):
class LittleEndStruct(ctypes.LittleEndianStructure):
_fields_ = [
('one', ctypes.c_uint8),
('two', ctypes.c_uint32)
]
_pack_ = 1
expected = np.dtype([('one', 'u1'), ('two', '<u4')])
self.check(LittleEndStruct, expected)
def test_little_endian_structure(self):
class PaddedStruct(ctypes.LittleEndianStructure):
_fields_ = [
('a', ctypes.c_uint8),
('b', ctypes.c_uint16)
]
expected = np.dtype([
('a', '<B'),
('b', '<H')
], align=True)
self.check(PaddedStruct, expected)
def test_big_endian_structure(self):
class PaddedStruct(ctypes.BigEndianStructure):
_fields_ = [
('a', ctypes.c_uint8),
('b', ctypes.c_uint16)
]
expected = np.dtype([
('a', '>B'),
('b', '>H')
], align=True)
self.check(PaddedStruct, expected)
def test_simple_endian_types(self):
self.check(ctypes.c_uint16.__ctype_le__, np.dtype('<u2'))
self.check(ctypes.c_uint16.__ctype_be__, np.dtype('>u2'))
self.check(ctypes.c_uint8.__ctype_le__, np.dtype('u1'))
self.check(ctypes.c_uint8.__ctype_be__, np.dtype('u1'))
all_types = set(np.typecodes['All'])
all_pairs = permutations(all_types, 2)
@pytest.mark.parametrize("pair", all_pairs)
def test_pairs(self, pair):
"""
Check that np.dtype('x,y') matches [np.dtype('x'), np.dtype('y')]
Example: np.dtype('d,I') -> dtype([('f0', '<f8'), ('f1', '<u4')])
"""
# gh-5645: check that np.dtype('i,L') can be used
pair_type = np.dtype('{},{}'.format(*pair))
expected = np.dtype([('f0', pair[0]), ('f1', pair[1])])
assert_equal(pair_type, expected)
class TestUserDType:
@pytest.mark.leaks_references(reason="dynamically creates custom dtype.")
def test_custom_structured_dtype(self):
class mytype:
pass
blueprint = np.dtype([("field", object)])
dt = create_custom_field_dtype(blueprint, mytype, 0)
assert dt.type == mytype
# We cannot (currently) *create* this dtype with `np.dtype` because
# mytype does not inherit from `np.generic`. This seems like an
# unnecessary restriction, but one that has been around forever:
assert np.dtype(mytype) == np.dtype("O")
def test_custom_structured_dtype_errors(self):
class mytype:
pass
blueprint = np.dtype([("field", object)])
with pytest.raises(ValueError):
# Tests what happens if fields are unset during creation
# which is currently rejected due to the containing object
# (see PyArray_RegisterDataType).
create_custom_field_dtype(blueprint, mytype, 1)
with pytest.raises(RuntimeError):
# Tests that a dtype must have its type field set up to np.dtype
# or in this case a builtin instance.
create_custom_field_dtype(blueprint, mytype, 2)
@pytest.mark.skipif(sys.version_info < (3, 9), reason="Requires python 3.9")
class TestClassGetItem:
def test_dtype(self) -> None:
alias = np.dtype[Any]
assert isinstance(alias, types.GenericAlias)
assert alias.__origin__ is np.dtype
@pytest.mark.parametrize("code", np.typecodes["All"])
def test_dtype_subclass(self, code: str) -> None:
cls = type(np.dtype(code))
alias = cls[Any]
assert isinstance(alias, types.GenericAlias)
assert alias.__origin__ is cls
@pytest.mark.parametrize("arg_len", range(4))
def test_subscript_tuple(self, arg_len: int) -> None:
arg_tup = (Any,) * arg_len
if arg_len == 1:
assert np.dtype[arg_tup]
else:
with pytest.raises(TypeError):
np.dtype[arg_tup]
def test_subscript_scalar(self) -> None:
assert np.dtype[Any]
def test_result_type_integers_and_unitless_timedelta64():
# Regression test for gh-20077. The following call of `result_type`
# would cause a seg. fault.
td = np.timedelta64(4)
result = np.result_type(0, td)
assert_dtype_equal(result, td.dtype)
@pytest.mark.skipif(sys.version_info >= (3, 9), reason="Requires python 3.8")
def test_class_getitem_38() -> None:
match = "Type subscription requires python >= 3.9"
with pytest.raises(TypeError, match=match):
np.dtype[Any]
| 70,825 | Python | 38.545505 | 96 | 0.518532 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_numerictypes.py | import sys
import itertools
import pytest
import numpy as np
from numpy.testing import assert_, assert_equal, assert_raises, IS_PYPY
# This is the structure of the table used for plain objects:
#
# +-+-+-+
# |x|y|z|
# +-+-+-+
# Structure of a plain array description:
Pdescr = [
('x', 'i4', (2,)),
('y', 'f8', (2, 2)),
('z', 'u1')]
# A plain list of tuples with values for testing:
PbufferT = [
# x y z
([3, 2], [[6., 4.], [6., 4.]], 8),
([4, 3], [[7., 5.], [7., 5.]], 9),
]
# This is the structure of the table used for nested objects (DON'T PANIC!):
#
# +-+---------------------------------+-----+----------+-+-+
# |x|Info |color|info |y|z|
# | +-----+--+----------------+----+--+ +----+-----+ | |
# | |value|y2|Info2 |name|z2| |Name|Value| | |
# | | | +----+-----+--+--+ | | | | | | |
# | | | |name|value|y3|z3| | | | | | | |
# +-+-----+--+----+-----+--+--+----+--+-----+----+-----+-+-+
#
# The corresponding nested array description:
Ndescr = [
('x', 'i4', (2,)),
('Info', [
('value', 'c16'),
('y2', 'f8'),
('Info2', [
('name', 'S2'),
('value', 'c16', (2,)),
('y3', 'f8', (2,)),
('z3', 'u4', (2,))]),
('name', 'S2'),
('z2', 'b1')]),
('color', 'S2'),
('info', [
('Name', 'U8'),
('Value', 'c16')]),
('y', 'f8', (2, 2)),
('z', 'u1')]
NbufferT = [
# x Info color info y z
# value y2 Info2 name z2 Name Value
# name value y3 z3
([3, 2], (6j, 6., (b'nn', [6j, 4j], [6., 4.], [1, 2]), b'NN', True), b'cc', (u'NN', 6j), [[6., 4.], [6., 4.]], 8),
([4, 3], (7j, 7., (b'oo', [7j, 5j], [7., 5.], [2, 1]), b'OO', False), b'dd', (u'OO', 7j), [[7., 5.], [7., 5.]], 9),
]
byteorder = {'little':'<', 'big':'>'}[sys.byteorder]
def normalize_descr(descr):
"Normalize a description adding the platform byteorder."
out = []
for item in descr:
dtype = item[1]
if isinstance(dtype, str):
if dtype[0] not in ['|', '<', '>']:
onebyte = dtype[1:] == "1"
if onebyte or dtype[0] in ['S', 'V', 'b']:
dtype = "|" + dtype
else:
dtype = byteorder + dtype
if len(item) > 2 and np.prod(item[2]) > 1:
nitem = (item[0], dtype, item[2])
else:
nitem = (item[0], dtype)
out.append(nitem)
elif isinstance(dtype, list):
l = normalize_descr(dtype)
out.append((item[0], l))
else:
raise ValueError("Expected a str or list and got %s" %
(type(item)))
return out
############################################################
# Creation tests
############################################################
class CreateZeros:
"""Check the creation of heterogeneous arrays zero-valued"""
def test_zeros0D(self):
"""Check creation of 0-dimensional objects"""
h = np.zeros((), dtype=self._descr)
assert_(normalize_descr(self._descr) == h.dtype.descr)
assert_(h.dtype.fields['x'][0].name[:4] == 'void')
assert_(h.dtype.fields['x'][0].char == 'V')
assert_(h.dtype.fields['x'][0].type == np.void)
# A small check that data is ok
assert_equal(h['z'], np.zeros((), dtype='u1'))
def test_zerosSD(self):
"""Check creation of single-dimensional objects"""
h = np.zeros((2,), dtype=self._descr)
assert_(normalize_descr(self._descr) == h.dtype.descr)
assert_(h.dtype['y'].name[:4] == 'void')
assert_(h.dtype['y'].char == 'V')
assert_(h.dtype['y'].type == np.void)
# A small check that data is ok
assert_equal(h['z'], np.zeros((2,), dtype='u1'))
def test_zerosMD(self):
"""Check creation of multi-dimensional objects"""
h = np.zeros((2, 3), dtype=self._descr)
assert_(normalize_descr(self._descr) == h.dtype.descr)
assert_(h.dtype['z'].name == 'uint8')
assert_(h.dtype['z'].char == 'B')
assert_(h.dtype['z'].type == np.uint8)
# A small check that data is ok
assert_equal(h['z'], np.zeros((2, 3), dtype='u1'))
class TestCreateZerosPlain(CreateZeros):
"""Check the creation of heterogeneous arrays zero-valued (plain)"""
_descr = Pdescr
class TestCreateZerosNested(CreateZeros):
"""Check the creation of heterogeneous arrays zero-valued (nested)"""
_descr = Ndescr
class CreateValues:
"""Check the creation of heterogeneous arrays with values"""
def test_tuple(self):
"""Check creation from tuples"""
h = np.array(self._buffer, dtype=self._descr)
assert_(normalize_descr(self._descr) == h.dtype.descr)
if self.multiple_rows:
assert_(h.shape == (2,))
else:
assert_(h.shape == ())
def test_list_of_tuple(self):
"""Check creation from list of tuples"""
h = np.array([self._buffer], dtype=self._descr)
assert_(normalize_descr(self._descr) == h.dtype.descr)
if self.multiple_rows:
assert_(h.shape == (1, 2))
else:
assert_(h.shape == (1,))
def test_list_of_list_of_tuple(self):
"""Check creation from list of list of tuples"""
h = np.array([[self._buffer]], dtype=self._descr)
assert_(normalize_descr(self._descr) == h.dtype.descr)
if self.multiple_rows:
assert_(h.shape == (1, 1, 2))
else:
assert_(h.shape == (1, 1))
class TestCreateValuesPlainSingle(CreateValues):
"""Check the creation of heterogeneous arrays (plain, single row)"""
_descr = Pdescr
multiple_rows = 0
_buffer = PbufferT[0]
class TestCreateValuesPlainMultiple(CreateValues):
"""Check the creation of heterogeneous arrays (plain, multiple rows)"""
_descr = Pdescr
multiple_rows = 1
_buffer = PbufferT
class TestCreateValuesNestedSingle(CreateValues):
"""Check the creation of heterogeneous arrays (nested, single row)"""
_descr = Ndescr
multiple_rows = 0
_buffer = NbufferT[0]
class TestCreateValuesNestedMultiple(CreateValues):
"""Check the creation of heterogeneous arrays (nested, multiple rows)"""
_descr = Ndescr
multiple_rows = 1
_buffer = NbufferT
############################################################
# Reading tests
############################################################
class ReadValuesPlain:
"""Check the reading of values in heterogeneous arrays (plain)"""
def test_access_fields(self):
h = np.array(self._buffer, dtype=self._descr)
if not self.multiple_rows:
assert_(h.shape == ())
assert_equal(h['x'], np.array(self._buffer[0], dtype='i4'))
assert_equal(h['y'], np.array(self._buffer[1], dtype='f8'))
assert_equal(h['z'], np.array(self._buffer[2], dtype='u1'))
else:
assert_(len(h) == 2)
assert_equal(h['x'], np.array([self._buffer[0][0],
self._buffer[1][0]], dtype='i4'))
assert_equal(h['y'], np.array([self._buffer[0][1],
self._buffer[1][1]], dtype='f8'))
assert_equal(h['z'], np.array([self._buffer[0][2],
self._buffer[1][2]], dtype='u1'))
class TestReadValuesPlainSingle(ReadValuesPlain):
"""Check the creation of heterogeneous arrays (plain, single row)"""
_descr = Pdescr
multiple_rows = 0
_buffer = PbufferT[0]
class TestReadValuesPlainMultiple(ReadValuesPlain):
"""Check the values of heterogeneous arrays (plain, multiple rows)"""
_descr = Pdescr
multiple_rows = 1
_buffer = PbufferT
class ReadValuesNested:
"""Check the reading of values in heterogeneous arrays (nested)"""
def test_access_top_fields(self):
"""Check reading the top fields of a nested array"""
h = np.array(self._buffer, dtype=self._descr)
if not self.multiple_rows:
assert_(h.shape == ())
assert_equal(h['x'], np.array(self._buffer[0], dtype='i4'))
assert_equal(h['y'], np.array(self._buffer[4], dtype='f8'))
assert_equal(h['z'], np.array(self._buffer[5], dtype='u1'))
else:
assert_(len(h) == 2)
assert_equal(h['x'], np.array([self._buffer[0][0],
self._buffer[1][0]], dtype='i4'))
assert_equal(h['y'], np.array([self._buffer[0][4],
self._buffer[1][4]], dtype='f8'))
assert_equal(h['z'], np.array([self._buffer[0][5],
self._buffer[1][5]], dtype='u1'))
def test_nested1_acessors(self):
"""Check reading the nested fields of a nested array (1st level)"""
h = np.array(self._buffer, dtype=self._descr)
if not self.multiple_rows:
assert_equal(h['Info']['value'],
np.array(self._buffer[1][0], dtype='c16'))
assert_equal(h['Info']['y2'],
np.array(self._buffer[1][1], dtype='f8'))
assert_equal(h['info']['Name'],
np.array(self._buffer[3][0], dtype='U2'))
assert_equal(h['info']['Value'],
np.array(self._buffer[3][1], dtype='c16'))
else:
assert_equal(h['Info']['value'],
np.array([self._buffer[0][1][0],
self._buffer[1][1][0]],
dtype='c16'))
assert_equal(h['Info']['y2'],
np.array([self._buffer[0][1][1],
self._buffer[1][1][1]],
dtype='f8'))
assert_equal(h['info']['Name'],
np.array([self._buffer[0][3][0],
self._buffer[1][3][0]],
dtype='U2'))
assert_equal(h['info']['Value'],
np.array([self._buffer[0][3][1],
self._buffer[1][3][1]],
dtype='c16'))
def test_nested2_acessors(self):
"""Check reading the nested fields of a nested array (2nd level)"""
h = np.array(self._buffer, dtype=self._descr)
if not self.multiple_rows:
assert_equal(h['Info']['Info2']['value'],
np.array(self._buffer[1][2][1], dtype='c16'))
assert_equal(h['Info']['Info2']['z3'],
np.array(self._buffer[1][2][3], dtype='u4'))
else:
assert_equal(h['Info']['Info2']['value'],
np.array([self._buffer[0][1][2][1],
self._buffer[1][1][2][1]],
dtype='c16'))
assert_equal(h['Info']['Info2']['z3'],
np.array([self._buffer[0][1][2][3],
self._buffer[1][1][2][3]],
dtype='u4'))
def test_nested1_descriptor(self):
"""Check access nested descriptors of a nested array (1st level)"""
h = np.array(self._buffer, dtype=self._descr)
assert_(h.dtype['Info']['value'].name == 'complex128')
assert_(h.dtype['Info']['y2'].name == 'float64')
assert_(h.dtype['info']['Name'].name == 'str256')
assert_(h.dtype['info']['Value'].name == 'complex128')
def test_nested2_descriptor(self):
"""Check access nested descriptors of a nested array (2nd level)"""
h = np.array(self._buffer, dtype=self._descr)
assert_(h.dtype['Info']['Info2']['value'].name == 'void256')
assert_(h.dtype['Info']['Info2']['z3'].name == 'void64')
class TestReadValuesNestedSingle(ReadValuesNested):
"""Check the values of heterogeneous arrays (nested, single row)"""
_descr = Ndescr
multiple_rows = False
_buffer = NbufferT[0]
class TestReadValuesNestedMultiple(ReadValuesNested):
"""Check the values of heterogeneous arrays (nested, multiple rows)"""
_descr = Ndescr
multiple_rows = True
_buffer = NbufferT
class TestEmptyField:
def test_assign(self):
a = np.arange(10, dtype=np.float32)
a.dtype = [("int", "<0i4"), ("float", "<2f4")]
assert_(a['int'].shape == (5, 0))
assert_(a['float'].shape == (5, 2))
class TestCommonType:
def test_scalar_loses1(self):
res = np.find_common_type(['f4', 'f4', 'i2'], ['f8'])
assert_(res == 'f4')
def test_scalar_loses2(self):
res = np.find_common_type(['f4', 'f4'], ['i8'])
assert_(res == 'f4')
def test_scalar_wins(self):
res = np.find_common_type(['f4', 'f4', 'i2'], ['c8'])
assert_(res == 'c8')
def test_scalar_wins2(self):
res = np.find_common_type(['u4', 'i4', 'i4'], ['f4'])
assert_(res == 'f8')
def test_scalar_wins3(self): # doesn't go up to 'f16' on purpose
res = np.find_common_type(['u8', 'i8', 'i8'], ['f8'])
assert_(res == 'f8')
class TestMultipleFields:
def setup_method(self):
self.ary = np.array([(1, 2, 3, 4), (5, 6, 7, 8)], dtype='i4,f4,i2,c8')
def _bad_call(self):
return self.ary['f0', 'f1']
def test_no_tuple(self):
assert_raises(IndexError, self._bad_call)
def test_return(self):
res = self.ary[['f0', 'f2']].tolist()
assert_(res == [(1, 3), (5, 7)])
class TestIsSubDType:
# scalar types can be promoted into dtypes
wrappers = [np.dtype, lambda x: x]
def test_both_abstract(self):
assert_(np.issubdtype(np.floating, np.inexact))
assert_(not np.issubdtype(np.inexact, np.floating))
def test_same(self):
for cls in (np.float32, np.int32):
for w1, w2 in itertools.product(self.wrappers, repeat=2):
assert_(np.issubdtype(w1(cls), w2(cls)))
def test_subclass(self):
# note we cannot promote floating to a dtype, as it would turn into a
# concrete type
for w in self.wrappers:
assert_(np.issubdtype(w(np.float32), np.floating))
assert_(np.issubdtype(w(np.float64), np.floating))
def test_subclass_backwards(self):
for w in self.wrappers:
assert_(not np.issubdtype(np.floating, w(np.float32)))
assert_(not np.issubdtype(np.floating, w(np.float64)))
def test_sibling_class(self):
for w1, w2 in itertools.product(self.wrappers, repeat=2):
assert_(not np.issubdtype(w1(np.float32), w2(np.float64)))
assert_(not np.issubdtype(w1(np.float64), w2(np.float32)))
def test_nondtype_nonscalartype(self):
# See gh-14619 and gh-9505 which introduced the deprecation to fix
# this. These tests are directly taken from gh-9505
assert not np.issubdtype(np.float32, 'float64')
assert not np.issubdtype(np.float32, 'f8')
assert not np.issubdtype(np.int32, str)
assert not np.issubdtype(np.int32, 'int64')
assert not np.issubdtype(np.str_, 'void')
# for the following the correct spellings are
# np.integer, np.floating, or np.complexfloating respectively:
assert not np.issubdtype(np.int8, int) # np.int8 is never np.int_
assert not np.issubdtype(np.float32, float)
assert not np.issubdtype(np.complex64, complex)
assert not np.issubdtype(np.float32, "float")
assert not np.issubdtype(np.float64, "f")
# Test the same for the correct first datatype and abstract one
# in the case of int, float, complex:
assert np.issubdtype(np.float64, 'float64')
assert np.issubdtype(np.float64, 'f8')
assert np.issubdtype(np.str_, str)
assert np.issubdtype(np.int64, 'int64')
assert np.issubdtype(np.void, 'void')
assert np.issubdtype(np.int8, np.integer)
assert np.issubdtype(np.float32, np.floating)
assert np.issubdtype(np.complex64, np.complexfloating)
assert np.issubdtype(np.float64, "float")
assert np.issubdtype(np.float32, "f")
class TestSctypeDict:
def test_longdouble(self):
assert_(np.sctypeDict['f8'] is not np.longdouble)
assert_(np.sctypeDict['c16'] is not np.clongdouble)
def test_ulong(self):
# Test that 'ulong' behaves like 'long'. np.sctypeDict['long'] is an
# alias for np.int_, but np.long is not supported for historical
# reasons (gh-21063)
assert_(np.sctypeDict['ulong'] is np.uint)
assert_(not hasattr(np, 'ulong'))
class TestBitName:
def test_abstract(self):
assert_raises(ValueError, np.core.numerictypes.bitname, np.floating)
class TestMaximumSctype:
# note that parametrizing with sctype['int'] and similar would skip types
# with the same size (gh-11923)
@pytest.mark.parametrize('t', [np.byte, np.short, np.intc, np.int_, np.longlong])
def test_int(self, t):
assert_equal(np.maximum_sctype(t), np.sctypes['int'][-1])
@pytest.mark.parametrize('t', [np.ubyte, np.ushort, np.uintc, np.uint, np.ulonglong])
def test_uint(self, t):
assert_equal(np.maximum_sctype(t), np.sctypes['uint'][-1])
@pytest.mark.parametrize('t', [np.half, np.single, np.double, np.longdouble])
def test_float(self, t):
assert_equal(np.maximum_sctype(t), np.sctypes['float'][-1])
@pytest.mark.parametrize('t', [np.csingle, np.cdouble, np.clongdouble])
def test_complex(self, t):
assert_equal(np.maximum_sctype(t), np.sctypes['complex'][-1])
@pytest.mark.parametrize('t', [np.bool_, np.object_, np.unicode_, np.bytes_, np.void])
def test_other(self, t):
assert_equal(np.maximum_sctype(t), t)
class Test_sctype2char:
# This function is old enough that we're really just documenting the quirks
# at this point.
def test_scalar_type(self):
assert_equal(np.sctype2char(np.double), 'd')
assert_equal(np.sctype2char(np.int_), 'l')
assert_equal(np.sctype2char(np.unicode_), 'U')
assert_equal(np.sctype2char(np.bytes_), 'S')
def test_other_type(self):
assert_equal(np.sctype2char(float), 'd')
assert_equal(np.sctype2char(list), 'O')
assert_equal(np.sctype2char(np.ndarray), 'O')
def test_third_party_scalar_type(self):
from numpy.core._rational_tests import rational
assert_raises(KeyError, np.sctype2char, rational)
assert_raises(KeyError, np.sctype2char, rational(1))
def test_array_instance(self):
assert_equal(np.sctype2char(np.array([1.0, 2.0])), 'd')
def test_abstract_type(self):
assert_raises(KeyError, np.sctype2char, np.floating)
def test_non_type(self):
assert_raises(ValueError, np.sctype2char, 1)
@pytest.mark.parametrize("rep, expected", [
(np.int32, True),
(list, False),
(1.1, False),
(str, True),
(np.dtype(np.float64), True),
(np.dtype((np.int16, (3, 4))), True),
(np.dtype([('a', np.int8)]), True),
])
def test_issctype(rep, expected):
# ensure proper identification of scalar
# data-types by issctype()
actual = np.issctype(rep)
assert_equal(actual, expected)
@pytest.mark.skipif(sys.flags.optimize > 1,
reason="no docstrings present to inspect when PYTHONOPTIMIZE/Py_OptimizeFlag > 1")
@pytest.mark.xfail(IS_PYPY,
reason="PyPy cannot modify tp_doc after PyType_Ready")
class TestDocStrings:
def test_platform_dependent_aliases(self):
if np.int64 is np.int_:
assert_('int64' in np.int_.__doc__)
elif np.int64 is np.longlong:
assert_('int64' in np.longlong.__doc__)
class TestScalarTypeNames:
# gh-9799
numeric_types = [
np.byte, np.short, np.intc, np.int_, np.longlong,
np.ubyte, np.ushort, np.uintc, np.uint, np.ulonglong,
np.half, np.single, np.double, np.longdouble,
np.csingle, np.cdouble, np.clongdouble,
]
def test_names_are_unique(self):
# none of the above may be aliases for each other
assert len(set(self.numeric_types)) == len(self.numeric_types)
# names must be unique
names = [t.__name__ for t in self.numeric_types]
assert len(set(names)) == len(names)
@pytest.mark.parametrize('t', numeric_types)
def test_names_reflect_attributes(self, t):
""" Test that names correspond to where the type is under ``np.`` """
assert getattr(np, t.__name__) is t
@pytest.mark.parametrize('t', numeric_types)
def test_names_are_undersood_by_dtype(self, t):
""" Test the dtype constructor maps names back to the type """
assert np.dtype(t.__name__).type is t
| 21,152 | Python | 36.63879 | 119 | 0.536781 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_array_coercion.py | """
Tests for array coercion, mainly through testing `np.array` results directly.
Note that other such tests exist e.g. in `test_api.py` and many corner-cases
are tested (sometimes indirectly) elsewhere.
"""
import pytest
from pytest import param
from itertools import product
import numpy as np
from numpy.core._rational_tests import rational
from numpy.core._multiarray_umath import _discover_array_parameters
from numpy.testing import (
assert_array_equal, assert_warns, IS_PYPY)
def arraylikes():
"""
Generator for functions converting an array into various array-likes.
If full is True (default) includes array-likes not capable of handling
all dtypes
"""
# base array:
def ndarray(a):
return a
yield param(ndarray, id="ndarray")
# subclass:
class MyArr(np.ndarray):
pass
def subclass(a):
return a.view(MyArr)
yield subclass
class _SequenceLike():
# We are giving a warning that array-like's were also expected to be
# sequence-like in `np.array([array_like])`, this can be removed
# when the deprecation exired (started NumPy 1.20)
def __len__(self):
raise TypeError
def __getitem__(self):
raise TypeError
# Array-interface
class ArrayDunder(_SequenceLike):
def __init__(self, a):
self.a = a
def __array__(self, dtype=None):
return self.a
yield param(ArrayDunder, id="__array__")
# memory-view
yield param(memoryview, id="memoryview")
# Array-interface
class ArrayInterface(_SequenceLike):
def __init__(self, a):
self.a = a # need to hold on to keep interface valid
self.__array_interface__ = a.__array_interface__
yield param(ArrayInterface, id="__array_interface__")
# Array-Struct
class ArrayStruct(_SequenceLike):
def __init__(self, a):
self.a = a # need to hold on to keep struct valid
self.__array_struct__ = a.__array_struct__
yield param(ArrayStruct, id="__array_struct__")
def scalar_instances(times=True, extended_precision=True, user_dtype=True):
# Hard-coded list of scalar instances.
# Floats:
yield param(np.sqrt(np.float16(5)), id="float16")
yield param(np.sqrt(np.float32(5)), id="float32")
yield param(np.sqrt(np.float64(5)), id="float64")
if extended_precision:
yield param(np.sqrt(np.longdouble(5)), id="longdouble")
# Complex:
yield param(np.sqrt(np.complex64(2+3j)), id="complex64")
yield param(np.sqrt(np.complex128(2+3j)), id="complex128")
if extended_precision:
yield param(np.sqrt(np.longcomplex(2+3j)), id="clongdouble")
# Bool:
# XFAIL: Bool should be added, but has some bad properties when it
# comes to strings, see also gh-9875
# yield param(np.bool_(0), id="bool")
# Integers:
yield param(np.int8(2), id="int8")
yield param(np.int16(2), id="int16")
yield param(np.int32(2), id="int32")
yield param(np.int64(2), id="int64")
yield param(np.uint8(2), id="uint8")
yield param(np.uint16(2), id="uint16")
yield param(np.uint32(2), id="uint32")
yield param(np.uint64(2), id="uint64")
# Rational:
if user_dtype:
yield param(rational(1, 2), id="rational")
# Cannot create a structured void scalar directly:
structured = np.array([(1, 3)], "i,i")[0]
assert isinstance(structured, np.void)
assert structured.dtype == np.dtype("i,i")
yield param(structured, id="structured")
if times:
# Datetimes and timedelta
yield param(np.timedelta64(2), id="timedelta64[generic]")
yield param(np.timedelta64(23, "s"), id="timedelta64[s]")
yield param(np.timedelta64("NaT", "s"), id="timedelta64[s](NaT)")
yield param(np.datetime64("NaT"), id="datetime64[generic](NaT)")
yield param(np.datetime64("2020-06-07 12:43", "ms"), id="datetime64[ms]")
# Strings and unstructured void:
yield param(np.bytes_(b"1234"), id="bytes")
yield param(np.unicode_("2345"), id="unicode")
yield param(np.void(b"4321"), id="unstructured_void")
def is_parametric_dtype(dtype):
"""Returns True if the the dtype is a parametric legacy dtype (itemsize
is 0, or a datetime without units)
"""
if dtype.itemsize == 0:
return True
if issubclass(dtype.type, (np.datetime64, np.timedelta64)):
if dtype.name.endswith("64"):
# Generic time units
return True
return False
class TestStringDiscovery:
@pytest.mark.parametrize("obj",
[object(), 1.2, 10**43, None, "string"],
ids=["object", "1.2", "10**43", "None", "string"])
def test_basic_stringlength(self, obj):
length = len(str(obj))
expected = np.dtype(f"S{length}")
assert np.array(obj, dtype="S").dtype == expected
assert np.array([obj], dtype="S").dtype == expected
# A nested array is also discovered correctly
arr = np.array(obj, dtype="O")
assert np.array(arr, dtype="S").dtype == expected
# Check that .astype() behaves identical
assert arr.astype("S").dtype == expected
@pytest.mark.parametrize("obj",
[object(), 1.2, 10**43, None, "string"],
ids=["object", "1.2", "10**43", "None", "string"])
def test_nested_arrays_stringlength(self, obj):
length = len(str(obj))
expected = np.dtype(f"S{length}")
arr = np.array(obj, dtype="O")
assert np.array([arr, arr], dtype="S").dtype == expected
@pytest.mark.parametrize("arraylike", arraylikes())
def test_unpack_first_level(self, arraylike):
# We unpack exactly one level of array likes
obj = np.array([None])
obj[0] = np.array(1.2)
# the length of the included item, not of the float dtype
length = len(str(obj[0]))
expected = np.dtype(f"S{length}")
obj = arraylike(obj)
# casting to string usually calls str(obj)
arr = np.array([obj], dtype="S")
assert arr.shape == (1, 1)
assert arr.dtype == expected
class TestScalarDiscovery:
def test_void_special_case(self):
# Void dtypes with structures discover tuples as elements
arr = np.array((1, 2, 3), dtype="i,i,i")
assert arr.shape == ()
arr = np.array([(1, 2, 3)], dtype="i,i,i")
assert arr.shape == (1,)
def test_char_special_case(self):
arr = np.array("string", dtype="c")
assert arr.shape == (6,)
assert arr.dtype.char == "c"
arr = np.array(["string"], dtype="c")
assert arr.shape == (1, 6)
assert arr.dtype.char == "c"
def test_char_special_case_deep(self):
# Check that the character special case errors correctly if the
# array is too deep:
nested = ["string"] # 2 dimensions (due to string being sequence)
for i in range(np.MAXDIMS - 2):
nested = [nested]
arr = np.array(nested, dtype='c')
assert arr.shape == (1,) * (np.MAXDIMS - 1) + (6,)
with pytest.raises(ValueError):
np.array([nested], dtype="c")
def test_unknown_object(self):
arr = np.array(object())
assert arr.shape == ()
assert arr.dtype == np.dtype("O")
@pytest.mark.parametrize("scalar", scalar_instances())
def test_scalar(self, scalar):
arr = np.array(scalar)
assert arr.shape == ()
assert arr.dtype == scalar.dtype
arr = np.array([[scalar, scalar]])
assert arr.shape == (1, 2)
assert arr.dtype == scalar.dtype
# Additionally to string this test also runs into a corner case
# with datetime promotion (the difference is the promotion order).
@pytest.mark.filterwarnings("ignore:Promotion of numbers:FutureWarning")
def test_scalar_promotion(self):
for sc1, sc2 in product(scalar_instances(), scalar_instances()):
sc1, sc2 = sc1.values[0], sc2.values[0]
# test all combinations:
try:
arr = np.array([sc1, sc2])
except (TypeError, ValueError):
# The promotion between two times can fail
# XFAIL (ValueError): Some object casts are currently undefined
continue
assert arr.shape == (2,)
try:
dt1, dt2 = sc1.dtype, sc2.dtype
expected_dtype = np.promote_types(dt1, dt2)
assert arr.dtype == expected_dtype
except TypeError as e:
# Will currently always go to object dtype
assert arr.dtype == np.dtype("O")
@pytest.mark.parametrize("scalar", scalar_instances())
def test_scalar_coercion(self, scalar):
# This tests various scalar coercion paths, mainly for the numerical
# types. It includes some paths not directly related to `np.array`
if isinstance(scalar, np.inexact):
# Ensure we have a full-precision number if available
scalar = type(scalar)((scalar * 2)**0.5)
if type(scalar) is rational:
# Rational generally fails due to a missing cast. In the future
# object casts should automatically be defined based on `setitem`.
pytest.xfail("Rational to object cast is undefined currently.")
# Use casting from object:
arr = np.array(scalar, dtype=object).astype(scalar.dtype)
# Test various ways to create an array containing this scalar:
arr1 = np.array(scalar).reshape(1)
arr2 = np.array([scalar])
arr3 = np.empty(1, dtype=scalar.dtype)
arr3[0] = scalar
arr4 = np.empty(1, dtype=scalar.dtype)
arr4[:] = [scalar]
# All of these methods should yield the same results
assert_array_equal(arr, arr1)
assert_array_equal(arr, arr2)
assert_array_equal(arr, arr3)
assert_array_equal(arr, arr4)
@pytest.mark.xfail(IS_PYPY, reason="`int(np.complex128(3))` fails on PyPy")
@pytest.mark.filterwarnings("ignore::numpy.ComplexWarning")
@pytest.mark.parametrize("cast_to", scalar_instances())
def test_scalar_coercion_same_as_cast_and_assignment(self, cast_to):
"""
Test that in most cases:
* `np.array(scalar, dtype=dtype)`
* `np.empty((), dtype=dtype)[()] = scalar`
* `np.array(scalar).astype(dtype)`
should behave the same. The only exceptions are paramteric dtypes
(mainly datetime/timedelta without unit) and void without fields.
"""
dtype = cast_to.dtype # use to parametrize only the target dtype
for scalar in scalar_instances(times=False):
scalar = scalar.values[0]
if dtype.type == np.void:
if scalar.dtype.fields is not None and dtype.fields is None:
# Here, coercion to "V6" works, but the cast fails.
# Since the types are identical, SETITEM takes care of
# this, but has different rules than the cast.
with pytest.raises(TypeError):
np.array(scalar).astype(dtype)
np.array(scalar, dtype=dtype)
np.array([scalar], dtype=dtype)
continue
# The main test, we first try to use casting and if it succeeds
# continue below testing that things are the same, otherwise
# test that the alternative paths at least also fail.
try:
cast = np.array(scalar).astype(dtype)
except (TypeError, ValueError, RuntimeError):
# coercion should also raise (error type may change)
with pytest.raises(Exception):
np.array(scalar, dtype=dtype)
if (isinstance(scalar, rational) and
np.issubdtype(dtype, np.signedinteger)):
return
with pytest.raises(Exception):
np.array([scalar], dtype=dtype)
# assignment should also raise
res = np.zeros((), dtype=dtype)
with pytest.raises(Exception):
res[()] = scalar
return
# Non error path:
arr = np.array(scalar, dtype=dtype)
assert_array_equal(arr, cast)
# assignment behaves the same
ass = np.zeros((), dtype=dtype)
ass[()] = scalar
assert_array_equal(ass, cast)
@pytest.mark.parametrize("pyscalar", [10, 10.32, 10.14j, 10**100])
def test_pyscalar_subclasses(self, pyscalar):
"""NumPy arrays are read/write which means that anything but invariant
behaviour is on thin ice. However, we currently are happy to discover
subclasses of Python float, int, complex the same as the base classes.
This should potentially be deprecated.
"""
class MyScalar(type(pyscalar)):
pass
res = np.array(MyScalar(pyscalar))
expected = np.array(pyscalar)
assert_array_equal(res, expected)
@pytest.mark.parametrize("dtype_char", np.typecodes["All"])
def test_default_dtype_instance(self, dtype_char):
if dtype_char in "SU":
dtype = np.dtype(dtype_char + "1")
elif dtype_char == "V":
# Legacy behaviour was to use V8. The reason was float64 being the
# default dtype and that having 8 bytes.
dtype = np.dtype("V8")
else:
dtype = np.dtype(dtype_char)
discovered_dtype, _ = _discover_array_parameters([], type(dtype))
assert discovered_dtype == dtype
assert discovered_dtype.itemsize == dtype.itemsize
@pytest.mark.parametrize("dtype", np.typecodes["Integer"])
def test_scalar_to_int_coerce_does_not_cast(self, dtype):
"""
Signed integers are currently different in that they do not cast other
NumPy scalar, but instead use scalar.__int__(). The hardcoded
exception to this rule is `np.array(scalar, dtype=integer)`.
"""
dtype = np.dtype(dtype)
invalid_int = np.ulonglong(-1)
float_nan = np.float64(np.nan)
for scalar in [float_nan, invalid_int]:
# This is a special case using casting logic and thus not failing:
coerced = np.array(scalar, dtype=dtype)
cast = np.array(scalar).astype(dtype)
assert_array_equal(coerced, cast)
# However these fail:
with pytest.raises((ValueError, OverflowError)):
np.array([scalar], dtype=dtype)
with pytest.raises((ValueError, OverflowError)):
cast[()] = scalar
class TestTimeScalars:
@pytest.mark.parametrize("dtype", [np.int64, np.float32])
@pytest.mark.parametrize("scalar",
[param(np.timedelta64("NaT", "s"), id="timedelta64[s](NaT)"),
param(np.timedelta64(123, "s"), id="timedelta64[s]"),
param(np.datetime64("NaT", "generic"), id="datetime64[generic](NaT)"),
param(np.datetime64(1, "D"), id="datetime64[D]")],)
def test_coercion_basic(self, dtype, scalar):
# Note the `[scalar]` is there because np.array(scalar) uses stricter
# `scalar.__int__()` rules for backward compatibility right now.
arr = np.array(scalar, dtype=dtype)
cast = np.array(scalar).astype(dtype)
assert_array_equal(arr, cast)
ass = np.ones((), dtype=dtype)
if issubclass(dtype, np.integer):
with pytest.raises(TypeError):
# raises, as would np.array([scalar], dtype=dtype), this is
# conversion from times, but behaviour of integers.
ass[()] = scalar
else:
ass[()] = scalar
assert_array_equal(ass, cast)
@pytest.mark.parametrize("dtype", [np.int64, np.float32])
@pytest.mark.parametrize("scalar",
[param(np.timedelta64(123, "ns"), id="timedelta64[ns]"),
param(np.timedelta64(12, "generic"), id="timedelta64[generic]")])
def test_coercion_timedelta_convert_to_number(self, dtype, scalar):
# Only "ns" and "generic" timedeltas can be converted to numbers
# so these are slightly special.
arr = np.array(scalar, dtype=dtype)
cast = np.array(scalar).astype(dtype)
ass = np.ones((), dtype=dtype)
ass[()] = scalar # raises, as would np.array([scalar], dtype=dtype)
assert_array_equal(arr, cast)
assert_array_equal(cast, cast)
@pytest.mark.parametrize("dtype", ["S6", "U6"])
@pytest.mark.parametrize(["val", "unit"],
[param(123, "s", id="[s]"), param(123, "D", id="[D]")])
def test_coercion_assignment_datetime(self, val, unit, dtype):
# String from datetime64 assignment is currently special cased to
# never use casting. This is because casting will error in this
# case, and traditionally in most cases the behaviour is maintained
# like this. (`np.array(scalar, dtype="U6")` would have failed before)
# TODO: This discrepancy _should_ be resolved, either by relaxing the
# cast, or by deprecating the first part.
scalar = np.datetime64(val, unit)
dtype = np.dtype(dtype)
cut_string = dtype.type(str(scalar)[:6])
arr = np.array(scalar, dtype=dtype)
assert arr[()] == cut_string
ass = np.ones((), dtype=dtype)
ass[()] = scalar
assert ass[()] == cut_string
with pytest.raises(RuntimeError):
# However, unlike the above assignment using `str(scalar)[:6]`
# due to being handled by the string DType and not be casting
# the explicit cast fails:
np.array(scalar).astype(dtype)
@pytest.mark.parametrize(["val", "unit"],
[param(123, "s", id="[s]"), param(123, "D", id="[D]")])
def test_coercion_assignment_timedelta(self, val, unit):
scalar = np.timedelta64(val, unit)
# Unlike datetime64, timedelta allows the unsafe cast:
np.array(scalar, dtype="S6")
cast = np.array(scalar).astype("S6")
ass = np.ones((), dtype="S6")
ass[()] = scalar
expected = scalar.astype("S")[:6]
assert cast[()] == expected
assert ass[()] == expected
class TestNested:
def test_nested_simple(self):
initial = [1.2]
nested = initial
for i in range(np.MAXDIMS - 1):
nested = [nested]
arr = np.array(nested, dtype="float64")
assert arr.shape == (1,) * np.MAXDIMS
with pytest.raises(ValueError):
np.array([nested], dtype="float64")
# We discover object automatically at this time:
with assert_warns(np.VisibleDeprecationWarning):
arr = np.array([nested])
assert arr.dtype == np.dtype("O")
assert arr.shape == (1,) * np.MAXDIMS
assert arr.item() is initial
def test_pathological_self_containing(self):
# Test that this also works for two nested sequences
l = []
l.append(l)
arr = np.array([l, l, l], dtype=object)
assert arr.shape == (3,) + (1,) * (np.MAXDIMS - 1)
# Also check a ragged case:
arr = np.array([l, [None], l], dtype=object)
assert arr.shape == (3, 1)
@pytest.mark.parametrize("arraylike", arraylikes())
def test_nested_arraylikes(self, arraylike):
# We try storing an array like into an array, but the array-like
# will have too many dimensions. This means the shape discovery
# decides that the array-like must be treated as an object (a special
# case of ragged discovery). The result will be an array with one
# dimension less than the maximum dimensions, and the array being
# assigned to it (which does work for object or if `float(arraylike)`
# works).
initial = arraylike(np.ones((1, 1)))
nested = initial
for i in range(np.MAXDIMS - 1):
nested = [nested]
with pytest.warns(DeprecationWarning):
# It will refuse to assign the array into
np.array(nested, dtype="float64")
# If this is object, we end up assigning a (1, 1) array into (1,)
# (due to running out of dimensions), this is currently supported but
# a special case which is not ideal.
arr = np.array(nested, dtype=object)
assert arr.shape == (1,) * np.MAXDIMS
assert arr.item() == np.array(initial).item()
@pytest.mark.parametrize("arraylike", arraylikes())
def test_uneven_depth_ragged(self, arraylike):
arr = np.arange(4).reshape((2, 2))
arr = arraylike(arr)
# Array is ragged in the second dimension already:
out = np.array([arr, [arr]], dtype=object)
assert out.shape == (2,)
assert out[0] is arr
assert type(out[1]) is list
# Array is ragged in the third dimension:
with pytest.raises(ValueError):
# This is a broadcast error during assignment, because
# the array shape would be (2, 2, 2) but `arr[0, 0] = arr` fails.
np.array([arr, [arr, arr]], dtype=object)
def test_empty_sequence(self):
arr = np.array([[], [1], [[1]]], dtype=object)
assert arr.shape == (3,)
# The empty sequence stops further dimension discovery, so the
# result shape will be (0,) which leads to an error during:
with pytest.raises(ValueError):
np.array([[], np.empty((0, 1))], dtype=object)
def test_array_of_different_depths(self):
# When multiple arrays (or array-likes) are included in a
# sequences and have different depth, we currently discover
# as many dimensions as they share. (see also gh-17224)
arr = np.zeros((3, 2))
mismatch_first_dim = np.zeros((1, 2))
mismatch_second_dim = np.zeros((3, 3))
dtype, shape = _discover_array_parameters(
[arr, mismatch_second_dim], dtype=np.dtype("O"))
assert shape == (2, 3)
dtype, shape = _discover_array_parameters(
[arr, mismatch_first_dim], dtype=np.dtype("O"))
assert shape == (2,)
# The second case is currently supported because the arrays
# can be stored as objects:
res = np.asarray([arr, mismatch_first_dim], dtype=np.dtype("O"))
assert res[0] is arr
assert res[1] is mismatch_first_dim
class TestBadSequences:
# These are tests for bad objects passed into `np.array`, in general
# these have undefined behaviour. In the old code they partially worked
# when now they will fail. We could (and maybe should) create a copy
# of all sequences to be safe against bad-actors.
def test_growing_list(self):
# List to coerce, `mylist` will append to it during coercion
obj = []
class mylist(list):
def __len__(self):
obj.append([1, 2])
return super().__len__()
obj.append(mylist([1, 2]))
with pytest.raises(RuntimeError):
np.array(obj)
# Note: We do not test a shrinking list. These do very evil things
# and the only way to fix them would be to copy all sequences.
# (which may be a real option in the future).
def test_mutated_list(self):
# List to coerce, `mylist` will mutate the first element
obj = []
class mylist(list):
def __len__(self):
obj[0] = [2, 3] # replace with a different list.
return super().__len__()
obj.append([2, 3])
obj.append(mylist([1, 2]))
with pytest.raises(RuntimeError):
np.array(obj)
def test_replace_0d_array(self):
# List to coerce, `mylist` will mutate the first element
obj = []
class baditem:
def __len__(self):
obj[0][0] = 2 # replace with a different list.
raise ValueError("not actually a sequence!")
def __getitem__(self):
pass
# Runs into a corner case in the new code, the `array(2)` is cached
# so replacing it invalidates the cache.
obj.append([np.array(2), baditem()])
with pytest.raises(RuntimeError):
np.array(obj)
class TestArrayLikes:
@pytest.mark.parametrize("arraylike", arraylikes())
def test_0d_object_special_case(self, arraylike):
arr = np.array(0.)
obj = arraylike(arr)
# A single array-like is always converted:
res = np.array(obj, dtype=object)
assert_array_equal(arr, res)
# But a single 0-D nested array-like never:
res = np.array([obj], dtype=object)
assert res[0] is obj
def test_0d_generic_special_case(self):
class ArraySubclass(np.ndarray):
def __float__(self):
raise TypeError("e.g. quantities raise on this")
arr = np.array(0.)
obj = arr.view(ArraySubclass)
res = np.array(obj)
# The subclass is simply cast:
assert_array_equal(arr, res)
# If the 0-D array-like is included, __float__ is currently
# guaranteed to be used. We may want to change that, quantities
# and masked arrays half make use of this.
with pytest.raises(TypeError):
np.array([obj])
# The same holds for memoryview:
obj = memoryview(arr)
res = np.array(obj)
assert_array_equal(arr, res)
with pytest.raises(ValueError):
# The error type does not matter much here.
np.array([obj])
def test_arraylike_classes(self):
# The classes of array-likes should generally be acceptable to be
# stored inside a numpy (object) array. This tests all of the
# special attributes (since all are checked during coercion).
arr = np.array(np.int64)
assert arr[()] is np.int64
arr = np.array([np.int64])
assert arr[0] is np.int64
# This also works for properties/unbound methods:
class ArrayLike:
@property
def __array_interface__(self):
pass
@property
def __array_struct__(self):
pass
def __array__(self):
pass
arr = np.array(ArrayLike)
assert arr[()] is ArrayLike
arr = np.array([ArrayLike])
assert arr[0] is ArrayLike
@pytest.mark.skipif(
np.dtype(np.intp).itemsize < 8, reason="Needs 64bit platform")
def test_too_large_array_error_paths(self):
"""Test the error paths, including for memory leaks"""
arr = np.array(0, dtype="uint8")
# Guarantees that a contiguous copy won't work:
arr = np.broadcast_to(arr, 2**62)
for i in range(5):
# repeat, to ensure caching cannot have an effect:
with pytest.raises(MemoryError):
np.array(arr)
with pytest.raises(MemoryError):
np.array([arr])
@pytest.mark.parametrize("attribute",
["__array_interface__", "__array__", "__array_struct__"])
@pytest.mark.parametrize("error", [RecursionError, MemoryError])
def test_bad_array_like_attributes(self, attribute, error):
# RecursionError and MemoryError are considered fatal. All errors
# (except AttributeError) should probably be raised in the future,
# but shapely made use of it, so it will require a deprecation.
class BadInterface:
def __getattr__(self, attr):
if attr == attribute:
raise error
super().__getattr__(attr)
with pytest.raises(error):
np.array(BadInterface())
@pytest.mark.parametrize("error", [RecursionError, MemoryError])
def test_bad_array_like_bad_length(self, error):
# RecursionError and MemoryError are considered "critical" in
# sequences. We could expand this more generally though. (NumPy 1.20)
class BadSequence:
def __len__(self):
raise error
def __getitem__(self):
# must have getitem to be a Sequence
return 1
with pytest.raises(error):
np.array(BadSequence())
class TestSpecialAttributeLookupFailure:
# An exception was raised while fetching the attribute
class WeirdArrayLike:
@property
def __array__(self):
raise RuntimeError("oops!")
class WeirdArrayInterface:
@property
def __array_interface__(self):
raise RuntimeError("oops!")
def test_deprecated(self):
with pytest.raises(RuntimeError):
np.array(self.WeirdArrayLike())
with pytest.raises(RuntimeError):
np.array(self.WeirdArrayInterface())
| 29,075 | Python | 36.859375 | 83 | 0.589888 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_datetime.py |
import numpy
import numpy as np
import datetime
import pytest
from numpy.testing import (
assert_, assert_equal, assert_raises, assert_warns, suppress_warnings,
assert_raises_regex, assert_array_equal,
)
from numpy.compat import pickle
# Use pytz to test out various time zones if available
try:
from pytz import timezone as tz
_has_pytz = True
except ImportError:
_has_pytz = False
try:
RecursionError
except NameError:
RecursionError = RuntimeError # python < 3.5
class TestDateTime:
def test_datetime_dtype_creation(self):
for unit in ['Y', 'M', 'W', 'D',
'h', 'm', 's', 'ms', 'us',
'μs', # alias for us
'ns', 'ps', 'fs', 'as']:
dt1 = np.dtype('M8[750%s]' % unit)
assert_(dt1 == np.dtype('datetime64[750%s]' % unit))
dt2 = np.dtype('m8[%s]' % unit)
assert_(dt2 == np.dtype('timedelta64[%s]' % unit))
# Generic units shouldn't add [] to the end
assert_equal(str(np.dtype("M8")), "datetime64")
# Should be possible to specify the endianness
assert_equal(np.dtype("=M8"), np.dtype("M8"))
assert_equal(np.dtype("=M8[s]"), np.dtype("M8[s]"))
assert_(np.dtype(">M8") == np.dtype("M8") or
np.dtype("<M8") == np.dtype("M8"))
assert_(np.dtype(">M8[D]") == np.dtype("M8[D]") or
np.dtype("<M8[D]") == np.dtype("M8[D]"))
assert_(np.dtype(">M8") != np.dtype("<M8"))
assert_equal(np.dtype("=m8"), np.dtype("m8"))
assert_equal(np.dtype("=m8[s]"), np.dtype("m8[s]"))
assert_(np.dtype(">m8") == np.dtype("m8") or
np.dtype("<m8") == np.dtype("m8"))
assert_(np.dtype(">m8[D]") == np.dtype("m8[D]") or
np.dtype("<m8[D]") == np.dtype("m8[D]"))
assert_(np.dtype(">m8") != np.dtype("<m8"))
# Check that the parser rejects bad datetime types
assert_raises(TypeError, np.dtype, 'M8[badunit]')
assert_raises(TypeError, np.dtype, 'm8[badunit]')
assert_raises(TypeError, np.dtype, 'M8[YY]')
assert_raises(TypeError, np.dtype, 'm8[YY]')
assert_raises(TypeError, np.dtype, 'm4')
assert_raises(TypeError, np.dtype, 'M7')
assert_raises(TypeError, np.dtype, 'm7')
assert_raises(TypeError, np.dtype, 'M16')
assert_raises(TypeError, np.dtype, 'm16')
assert_raises(TypeError, np.dtype, 'M8[3000000000ps]')
def test_datetime_casting_rules(self):
# Cannot cast safely/same_kind between timedelta and datetime
assert_(not np.can_cast('m8', 'M8', casting='same_kind'))
assert_(not np.can_cast('M8', 'm8', casting='same_kind'))
assert_(not np.can_cast('m8', 'M8', casting='safe'))
assert_(not np.can_cast('M8', 'm8', casting='safe'))
# Can cast safely/same_kind from integer to timedelta
assert_(np.can_cast('i8', 'm8', casting='same_kind'))
assert_(np.can_cast('i8', 'm8', casting='safe'))
assert_(np.can_cast('i4', 'm8', casting='same_kind'))
assert_(np.can_cast('i4', 'm8', casting='safe'))
assert_(np.can_cast('u4', 'm8', casting='same_kind'))
assert_(np.can_cast('u4', 'm8', casting='safe'))
# Cannot cast safely from unsigned integer of the same size, which
# could overflow
assert_(np.can_cast('u8', 'm8', casting='same_kind'))
assert_(not np.can_cast('u8', 'm8', casting='safe'))
# Cannot cast safely/same_kind from float to timedelta
assert_(not np.can_cast('f4', 'm8', casting='same_kind'))
assert_(not np.can_cast('f4', 'm8', casting='safe'))
# Cannot cast safely/same_kind from integer to datetime
assert_(not np.can_cast('i8', 'M8', casting='same_kind'))
assert_(not np.can_cast('i8', 'M8', casting='safe'))
# Cannot cast safely/same_kind from bool to datetime
assert_(not np.can_cast('b1', 'M8', casting='same_kind'))
assert_(not np.can_cast('b1', 'M8', casting='safe'))
# Can cast safely/same_kind from bool to timedelta
assert_(np.can_cast('b1', 'm8', casting='same_kind'))
assert_(np.can_cast('b1', 'm8', casting='safe'))
# Can cast datetime safely from months/years to days
assert_(np.can_cast('M8[M]', 'M8[D]', casting='safe'))
assert_(np.can_cast('M8[Y]', 'M8[D]', casting='safe'))
# Cannot cast timedelta safely from months/years to days
assert_(not np.can_cast('m8[M]', 'm8[D]', casting='safe'))
assert_(not np.can_cast('m8[Y]', 'm8[D]', casting='safe'))
# Can cast datetime same_kind from months/years to days
assert_(np.can_cast('M8[M]', 'M8[D]', casting='same_kind'))
assert_(np.can_cast('M8[Y]', 'M8[D]', casting='same_kind'))
# Can't cast timedelta same_kind from months/years to days
assert_(not np.can_cast('m8[M]', 'm8[D]', casting='same_kind'))
assert_(not np.can_cast('m8[Y]', 'm8[D]', casting='same_kind'))
# Can cast datetime same_kind across the date/time boundary
assert_(np.can_cast('M8[D]', 'M8[h]', casting='same_kind'))
# Can cast timedelta same_kind across the date/time boundary
assert_(np.can_cast('m8[D]', 'm8[h]', casting='same_kind'))
assert_(np.can_cast('m8[h]', 'm8[D]', casting='same_kind'))
# Cannot cast safely if the integer multiplier doesn't divide
assert_(not np.can_cast('M8[7h]', 'M8[3h]', casting='safe'))
assert_(not np.can_cast('M8[3h]', 'M8[6h]', casting='safe'))
# But can cast same_kind
assert_(np.can_cast('M8[7h]', 'M8[3h]', casting='same_kind'))
# Can cast safely if the integer multiplier does divide
assert_(np.can_cast('M8[6h]', 'M8[3h]', casting='safe'))
# We can always cast types with generic units (corresponding to NaT) to
# more specific types
assert_(np.can_cast('m8', 'm8[h]', casting='same_kind'))
assert_(np.can_cast('m8', 'm8[h]', casting='safe'))
assert_(np.can_cast('M8', 'M8[h]', casting='same_kind'))
assert_(np.can_cast('M8', 'M8[h]', casting='safe'))
# but not the other way around
assert_(not np.can_cast('m8[h]', 'm8', casting='same_kind'))
assert_(not np.can_cast('m8[h]', 'm8', casting='safe'))
assert_(not np.can_cast('M8[h]', 'M8', casting='same_kind'))
assert_(not np.can_cast('M8[h]', 'M8', casting='safe'))
def test_datetime_prefix_conversions(self):
# regression tests related to gh-19631;
# test metric prefixes from seconds down to
# attoseconds for bidirectional conversions
smaller_units = ['M8[7000ms]',
'M8[2000us]',
'M8[1000ns]',
'M8[5000ns]',
'M8[2000ps]',
'M8[9000fs]',
'M8[1000as]',
'M8[2000000ps]',
'M8[1000000as]',
'M8[2000000000ps]',
'M8[1000000000as]']
larger_units = ['M8[7s]',
'M8[2ms]',
'M8[us]',
'M8[5us]',
'M8[2ns]',
'M8[9ps]',
'M8[1fs]',
'M8[2us]',
'M8[1ps]',
'M8[2ms]',
'M8[1ns]']
for larger_unit, smaller_unit in zip(larger_units, smaller_units):
assert np.can_cast(larger_unit, smaller_unit, casting='safe')
assert np.can_cast(smaller_unit, larger_unit, casting='safe')
@pytest.mark.parametrize("unit", [
"s", "ms", "us", "ns", "ps", "fs", "as"])
def test_prohibit_negative_datetime(self, unit):
with assert_raises(TypeError):
np.array([1], dtype=f"M8[-1{unit}]")
def test_compare_generic_nat(self):
# regression tests for gh-6452
assert_(np.datetime64('NaT') !=
np.datetime64('2000') + np.timedelta64('NaT'))
assert_(np.datetime64('NaT') != np.datetime64('NaT', 'us'))
assert_(np.datetime64('NaT', 'us') != np.datetime64('NaT'))
@pytest.mark.parametrize("size", [
3, 21, 217, 1000])
def test_datetime_nat_argsort_stability(self, size):
# NaT < NaT should be False internally for
# sort stability
expected = np.arange(size)
arr = np.tile(np.datetime64('NaT'), size)
assert_equal(np.argsort(arr, kind='mergesort'), expected)
@pytest.mark.parametrize("size", [
3, 21, 217, 1000])
def test_timedelta_nat_argsort_stability(self, size):
# NaT < NaT should be False internally for
# sort stability
expected = np.arange(size)
arr = np.tile(np.timedelta64('NaT'), size)
assert_equal(np.argsort(arr, kind='mergesort'), expected)
@pytest.mark.parametrize("arr, expected", [
# the example provided in gh-12629
(['NaT', 1, 2, 3],
[1, 2, 3, 'NaT']),
# multiple NaTs
(['NaT', 9, 'NaT', -707],
[-707, 9, 'NaT', 'NaT']),
# this sort explores another code path for NaT
([1, -2, 3, 'NaT'],
[-2, 1, 3, 'NaT']),
# 2-D array
([[51, -220, 'NaT'],
[-17, 'NaT', -90]],
[[-220, 51, 'NaT'],
[-90, -17, 'NaT']]),
])
@pytest.mark.parametrize("dtype", [
'M8[ns]', 'M8[us]',
'm8[ns]', 'm8[us]'])
def test_datetime_timedelta_sort_nat(self, arr, expected, dtype):
# fix for gh-12629 and gh-15063; NaT sorting to end of array
arr = np.array(arr, dtype=dtype)
expected = np.array(expected, dtype=dtype)
arr.sort()
assert_equal(arr, expected)
def test_datetime_scalar_construction(self):
# Construct with different units
assert_equal(np.datetime64('1950-03-12', 'D'),
np.datetime64('1950-03-12'))
assert_equal(np.datetime64('1950-03-12T13', 's'),
np.datetime64('1950-03-12T13', 'm'))
# Default construction means NaT
assert_equal(np.datetime64(), np.datetime64('NaT'))
# Some basic strings and repr
assert_equal(str(np.datetime64('NaT')), 'NaT')
assert_equal(repr(np.datetime64('NaT')),
"numpy.datetime64('NaT')")
assert_equal(str(np.datetime64('2011-02')), '2011-02')
assert_equal(repr(np.datetime64('2011-02')),
"numpy.datetime64('2011-02')")
# None gets constructed as NaT
assert_equal(np.datetime64(None), np.datetime64('NaT'))
# Default construction of NaT is in generic units
assert_equal(np.datetime64().dtype, np.dtype('M8'))
assert_equal(np.datetime64('NaT').dtype, np.dtype('M8'))
# Construction from integers requires a specified unit
assert_raises(ValueError, np.datetime64, 17)
# When constructing from a scalar or zero-dimensional array,
# it either keeps the units or you can override them.
a = np.datetime64('2000-03-18T16', 'h')
b = np.array('2000-03-18T16', dtype='M8[h]')
assert_equal(a.dtype, np.dtype('M8[h]'))
assert_equal(b.dtype, np.dtype('M8[h]'))
assert_equal(np.datetime64(a), a)
assert_equal(np.datetime64(a).dtype, np.dtype('M8[h]'))
assert_equal(np.datetime64(b), a)
assert_equal(np.datetime64(b).dtype, np.dtype('M8[h]'))
assert_equal(np.datetime64(a, 's'), a)
assert_equal(np.datetime64(a, 's').dtype, np.dtype('M8[s]'))
assert_equal(np.datetime64(b, 's'), a)
assert_equal(np.datetime64(b, 's').dtype, np.dtype('M8[s]'))
# Construction from datetime.date
assert_equal(np.datetime64('1945-03-25'),
np.datetime64(datetime.date(1945, 3, 25)))
assert_equal(np.datetime64('2045-03-25', 'D'),
np.datetime64(datetime.date(2045, 3, 25), 'D'))
# Construction from datetime.datetime
assert_equal(np.datetime64('1980-01-25T14:36:22.5'),
np.datetime64(datetime.datetime(1980, 1, 25,
14, 36, 22, 500000)))
# Construction with time units from a date is okay
assert_equal(np.datetime64('1920-03-13', 'h'),
np.datetime64('1920-03-13T00'))
assert_equal(np.datetime64('1920-03', 'm'),
np.datetime64('1920-03-01T00:00'))
assert_equal(np.datetime64('1920', 's'),
np.datetime64('1920-01-01T00:00:00'))
assert_equal(np.datetime64(datetime.date(2045, 3, 25), 'ms'),
np.datetime64('2045-03-25T00:00:00.000'))
# Construction with date units from a datetime is also okay
assert_equal(np.datetime64('1920-03-13T18', 'D'),
np.datetime64('1920-03-13'))
assert_equal(np.datetime64('1920-03-13T18:33:12', 'M'),
np.datetime64('1920-03'))
assert_equal(np.datetime64('1920-03-13T18:33:12.5', 'Y'),
np.datetime64('1920'))
def test_datetime_scalar_construction_timezone(self):
# verify that supplying an explicit timezone works, but is deprecated
with assert_warns(DeprecationWarning):
assert_equal(np.datetime64('2000-01-01T00Z'),
np.datetime64('2000-01-01T00'))
with assert_warns(DeprecationWarning):
assert_equal(np.datetime64('2000-01-01T00-08'),
np.datetime64('2000-01-01T08'))
def test_datetime_array_find_type(self):
dt = np.datetime64('1970-01-01', 'M')
arr = np.array([dt])
assert_equal(arr.dtype, np.dtype('M8[M]'))
# at the moment, we don't automatically convert these to datetime64
dt = datetime.date(1970, 1, 1)
arr = np.array([dt])
assert_equal(arr.dtype, np.dtype('O'))
dt = datetime.datetime(1970, 1, 1, 12, 30, 40)
arr = np.array([dt])
assert_equal(arr.dtype, np.dtype('O'))
# find "supertype" for non-dates and dates
b = np.bool_(True)
dm = np.datetime64('1970-01-01', 'M')
d = datetime.date(1970, 1, 1)
dt = datetime.datetime(1970, 1, 1, 12, 30, 40)
arr = np.array([b, dm])
assert_equal(arr.dtype, np.dtype('O'))
arr = np.array([b, d])
assert_equal(arr.dtype, np.dtype('O'))
arr = np.array([b, dt])
assert_equal(arr.dtype, np.dtype('O'))
arr = np.array([d, d]).astype('datetime64')
assert_equal(arr.dtype, np.dtype('M8[D]'))
arr = np.array([dt, dt]).astype('datetime64')
assert_equal(arr.dtype, np.dtype('M8[us]'))
@pytest.mark.parametrize("unit", [
# test all date / time units and use
# "generic" to select generic unit
("Y"), ("M"), ("W"), ("D"), ("h"), ("m"),
("s"), ("ms"), ("us"), ("ns"), ("ps"),
("fs"), ("as"), ("generic") ])
def test_timedelta_np_int_construction(self, unit):
# regression test for gh-7617
if unit != "generic":
assert_equal(np.timedelta64(np.int64(123), unit),
np.timedelta64(123, unit))
else:
assert_equal(np.timedelta64(np.int64(123)),
np.timedelta64(123))
def test_timedelta_scalar_construction(self):
# Construct with different units
assert_equal(np.timedelta64(7, 'D'),
np.timedelta64(1, 'W'))
assert_equal(np.timedelta64(120, 's'),
np.timedelta64(2, 'm'))
# Default construction means 0
assert_equal(np.timedelta64(), np.timedelta64(0))
# None gets constructed as NaT
assert_equal(np.timedelta64(None), np.timedelta64('NaT'))
# Some basic strings and repr
assert_equal(str(np.timedelta64('NaT')), 'NaT')
assert_equal(repr(np.timedelta64('NaT')),
"numpy.timedelta64('NaT')")
assert_equal(str(np.timedelta64(3, 's')), '3 seconds')
assert_equal(repr(np.timedelta64(-3, 's')),
"numpy.timedelta64(-3,'s')")
assert_equal(repr(np.timedelta64(12)),
"numpy.timedelta64(12)")
# Construction from an integer produces generic units
assert_equal(np.timedelta64(12).dtype, np.dtype('m8'))
# When constructing from a scalar or zero-dimensional array,
# it either keeps the units or you can override them.
a = np.timedelta64(2, 'h')
b = np.array(2, dtype='m8[h]')
assert_equal(a.dtype, np.dtype('m8[h]'))
assert_equal(b.dtype, np.dtype('m8[h]'))
assert_equal(np.timedelta64(a), a)
assert_equal(np.timedelta64(a).dtype, np.dtype('m8[h]'))
assert_equal(np.timedelta64(b), a)
assert_equal(np.timedelta64(b).dtype, np.dtype('m8[h]'))
assert_equal(np.timedelta64(a, 's'), a)
assert_equal(np.timedelta64(a, 's').dtype, np.dtype('m8[s]'))
assert_equal(np.timedelta64(b, 's'), a)
assert_equal(np.timedelta64(b, 's').dtype, np.dtype('m8[s]'))
# Construction from datetime.timedelta
assert_equal(np.timedelta64(5, 'D'),
np.timedelta64(datetime.timedelta(days=5)))
assert_equal(np.timedelta64(102347621, 's'),
np.timedelta64(datetime.timedelta(seconds=102347621)))
assert_equal(np.timedelta64(-10234760000, 'us'),
np.timedelta64(datetime.timedelta(
microseconds=-10234760000)))
assert_equal(np.timedelta64(10234760000, 'us'),
np.timedelta64(datetime.timedelta(
microseconds=10234760000)))
assert_equal(np.timedelta64(1023476, 'ms'),
np.timedelta64(datetime.timedelta(milliseconds=1023476)))
assert_equal(np.timedelta64(10, 'm'),
np.timedelta64(datetime.timedelta(minutes=10)))
assert_equal(np.timedelta64(281, 'h'),
np.timedelta64(datetime.timedelta(hours=281)))
assert_equal(np.timedelta64(28, 'W'),
np.timedelta64(datetime.timedelta(weeks=28)))
# Cannot construct across nonlinear time unit boundaries
a = np.timedelta64(3, 's')
assert_raises(TypeError, np.timedelta64, a, 'M')
assert_raises(TypeError, np.timedelta64, a, 'Y')
a = np.timedelta64(6, 'M')
assert_raises(TypeError, np.timedelta64, a, 'D')
assert_raises(TypeError, np.timedelta64, a, 'h')
a = np.timedelta64(1, 'Y')
assert_raises(TypeError, np.timedelta64, a, 'D')
assert_raises(TypeError, np.timedelta64, a, 'm')
a = datetime.timedelta(seconds=3)
assert_raises(TypeError, np.timedelta64, a, 'M')
assert_raises(TypeError, np.timedelta64, a, 'Y')
a = datetime.timedelta(weeks=3)
assert_raises(TypeError, np.timedelta64, a, 'M')
assert_raises(TypeError, np.timedelta64, a, 'Y')
a = datetime.timedelta()
assert_raises(TypeError, np.timedelta64, a, 'M')
assert_raises(TypeError, np.timedelta64, a, 'Y')
def test_timedelta_object_array_conversion(self):
# Regression test for gh-11096
inputs = [datetime.timedelta(28),
datetime.timedelta(30),
datetime.timedelta(31)]
expected = np.array([28, 30, 31], dtype='timedelta64[D]')
actual = np.array(inputs, dtype='timedelta64[D]')
assert_equal(expected, actual)
def test_timedelta_0_dim_object_array_conversion(self):
# Regression test for gh-11151
test = np.array(datetime.timedelta(seconds=20))
actual = test.astype(np.timedelta64)
# expected value from the array constructor workaround
# described in above issue
expected = np.array(datetime.timedelta(seconds=20),
np.timedelta64)
assert_equal(actual, expected)
def test_timedelta_nat_format(self):
# gh-17552
assert_equal('NaT', '{0}'.format(np.timedelta64('nat')))
def test_timedelta_scalar_construction_units(self):
# String construction detecting units
assert_equal(np.datetime64('2010').dtype,
np.dtype('M8[Y]'))
assert_equal(np.datetime64('2010-03').dtype,
np.dtype('M8[M]'))
assert_equal(np.datetime64('2010-03-12').dtype,
np.dtype('M8[D]'))
assert_equal(np.datetime64('2010-03-12T17').dtype,
np.dtype('M8[h]'))
assert_equal(np.datetime64('2010-03-12T17:15').dtype,
np.dtype('M8[m]'))
assert_equal(np.datetime64('2010-03-12T17:15:08').dtype,
np.dtype('M8[s]'))
assert_equal(np.datetime64('2010-03-12T17:15:08.1').dtype,
np.dtype('M8[ms]'))
assert_equal(np.datetime64('2010-03-12T17:15:08.12').dtype,
np.dtype('M8[ms]'))
assert_equal(np.datetime64('2010-03-12T17:15:08.123').dtype,
np.dtype('M8[ms]'))
assert_equal(np.datetime64('2010-03-12T17:15:08.1234').dtype,
np.dtype('M8[us]'))
assert_equal(np.datetime64('2010-03-12T17:15:08.12345').dtype,
np.dtype('M8[us]'))
assert_equal(np.datetime64('2010-03-12T17:15:08.123456').dtype,
np.dtype('M8[us]'))
assert_equal(np.datetime64('1970-01-01T00:00:02.1234567').dtype,
np.dtype('M8[ns]'))
assert_equal(np.datetime64('1970-01-01T00:00:02.12345678').dtype,
np.dtype('M8[ns]'))
assert_equal(np.datetime64('1970-01-01T00:00:02.123456789').dtype,
np.dtype('M8[ns]'))
assert_equal(np.datetime64('1970-01-01T00:00:02.1234567890').dtype,
np.dtype('M8[ps]'))
assert_equal(np.datetime64('1970-01-01T00:00:02.12345678901').dtype,
np.dtype('M8[ps]'))
assert_equal(np.datetime64('1970-01-01T00:00:02.123456789012').dtype,
np.dtype('M8[ps]'))
assert_equal(np.datetime64(
'1970-01-01T00:00:02.1234567890123').dtype,
np.dtype('M8[fs]'))
assert_equal(np.datetime64(
'1970-01-01T00:00:02.12345678901234').dtype,
np.dtype('M8[fs]'))
assert_equal(np.datetime64(
'1970-01-01T00:00:02.123456789012345').dtype,
np.dtype('M8[fs]'))
assert_equal(np.datetime64(
'1970-01-01T00:00:02.1234567890123456').dtype,
np.dtype('M8[as]'))
assert_equal(np.datetime64(
'1970-01-01T00:00:02.12345678901234567').dtype,
np.dtype('M8[as]'))
assert_equal(np.datetime64(
'1970-01-01T00:00:02.123456789012345678').dtype,
np.dtype('M8[as]'))
# Python date object
assert_equal(np.datetime64(datetime.date(2010, 4, 16)).dtype,
np.dtype('M8[D]'))
# Python datetime object
assert_equal(np.datetime64(
datetime.datetime(2010, 4, 16, 13, 45, 18)).dtype,
np.dtype('M8[us]'))
# 'today' special value
assert_equal(np.datetime64('today').dtype,
np.dtype('M8[D]'))
# 'now' special value
assert_equal(np.datetime64('now').dtype,
np.dtype('M8[s]'))
def test_datetime_nat_casting(self):
a = np.array('NaT', dtype='M8[D]')
b = np.datetime64('NaT', '[D]')
# Arrays
assert_equal(a.astype('M8[s]'), np.array('NaT', dtype='M8[s]'))
assert_equal(a.astype('M8[ms]'), np.array('NaT', dtype='M8[ms]'))
assert_equal(a.astype('M8[M]'), np.array('NaT', dtype='M8[M]'))
assert_equal(a.astype('M8[Y]'), np.array('NaT', dtype='M8[Y]'))
assert_equal(a.astype('M8[W]'), np.array('NaT', dtype='M8[W]'))
# Scalars -> Scalars
assert_equal(np.datetime64(b, '[s]'), np.datetime64('NaT', '[s]'))
assert_equal(np.datetime64(b, '[ms]'), np.datetime64('NaT', '[ms]'))
assert_equal(np.datetime64(b, '[M]'), np.datetime64('NaT', '[M]'))
assert_equal(np.datetime64(b, '[Y]'), np.datetime64('NaT', '[Y]'))
assert_equal(np.datetime64(b, '[W]'), np.datetime64('NaT', '[W]'))
# Arrays -> Scalars
assert_equal(np.datetime64(a, '[s]'), np.datetime64('NaT', '[s]'))
assert_equal(np.datetime64(a, '[ms]'), np.datetime64('NaT', '[ms]'))
assert_equal(np.datetime64(a, '[M]'), np.datetime64('NaT', '[M]'))
assert_equal(np.datetime64(a, '[Y]'), np.datetime64('NaT', '[Y]'))
assert_equal(np.datetime64(a, '[W]'), np.datetime64('NaT', '[W]'))
# NaN -> NaT
nan = np.array([np.nan] * 8)
fnan = nan.astype('f')
lnan = nan.astype('g')
cnan = nan.astype('D')
cfnan = nan.astype('F')
clnan = nan.astype('G')
nat = np.array([np.datetime64('NaT')] * 8)
assert_equal(nan.astype('M8[ns]'), nat)
assert_equal(fnan.astype('M8[ns]'), nat)
assert_equal(lnan.astype('M8[ns]'), nat)
assert_equal(cnan.astype('M8[ns]'), nat)
assert_equal(cfnan.astype('M8[ns]'), nat)
assert_equal(clnan.astype('M8[ns]'), nat)
nat = np.array([np.timedelta64('NaT')] * 8)
assert_equal(nan.astype('timedelta64[ns]'), nat)
assert_equal(fnan.astype('timedelta64[ns]'), nat)
assert_equal(lnan.astype('timedelta64[ns]'), nat)
assert_equal(cnan.astype('timedelta64[ns]'), nat)
assert_equal(cfnan.astype('timedelta64[ns]'), nat)
assert_equal(clnan.astype('timedelta64[ns]'), nat)
def test_days_creation(self):
assert_equal(np.array('1599', dtype='M8[D]').astype('i8'),
(1600-1970)*365 - (1972-1600)/4 + 3 - 365)
assert_equal(np.array('1600', dtype='M8[D]').astype('i8'),
(1600-1970)*365 - (1972-1600)/4 + 3)
assert_equal(np.array('1601', dtype='M8[D]').astype('i8'),
(1600-1970)*365 - (1972-1600)/4 + 3 + 366)
assert_equal(np.array('1900', dtype='M8[D]').astype('i8'),
(1900-1970)*365 - (1970-1900)//4)
assert_equal(np.array('1901', dtype='M8[D]').astype('i8'),
(1900-1970)*365 - (1970-1900)//4 + 365)
assert_equal(np.array('1967', dtype='M8[D]').astype('i8'), -3*365 - 1)
assert_equal(np.array('1968', dtype='M8[D]').astype('i8'), -2*365 - 1)
assert_equal(np.array('1969', dtype='M8[D]').astype('i8'), -1*365)
assert_equal(np.array('1970', dtype='M8[D]').astype('i8'), 0*365)
assert_equal(np.array('1971', dtype='M8[D]').astype('i8'), 1*365)
assert_equal(np.array('1972', dtype='M8[D]').astype('i8'), 2*365)
assert_equal(np.array('1973', dtype='M8[D]').astype('i8'), 3*365 + 1)
assert_equal(np.array('1974', dtype='M8[D]').astype('i8'), 4*365 + 1)
assert_equal(np.array('2000', dtype='M8[D]').astype('i8'),
(2000 - 1970)*365 + (2000 - 1972)//4)
assert_equal(np.array('2001', dtype='M8[D]').astype('i8'),
(2000 - 1970)*365 + (2000 - 1972)//4 + 366)
assert_equal(np.array('2400', dtype='M8[D]').astype('i8'),
(2400 - 1970)*365 + (2400 - 1972)//4 - 3)
assert_equal(np.array('2401', dtype='M8[D]').astype('i8'),
(2400 - 1970)*365 + (2400 - 1972)//4 - 3 + 366)
assert_equal(np.array('1600-02-29', dtype='M8[D]').astype('i8'),
(1600-1970)*365 - (1972-1600)//4 + 3 + 31 + 28)
assert_equal(np.array('1600-03-01', dtype='M8[D]').astype('i8'),
(1600-1970)*365 - (1972-1600)//4 + 3 + 31 + 29)
assert_equal(np.array('2000-02-29', dtype='M8[D]').astype('i8'),
(2000 - 1970)*365 + (2000 - 1972)//4 + 31 + 28)
assert_equal(np.array('2000-03-01', dtype='M8[D]').astype('i8'),
(2000 - 1970)*365 + (2000 - 1972)//4 + 31 + 29)
assert_equal(np.array('2001-03-22', dtype='M8[D]').astype('i8'),
(2000 - 1970)*365 + (2000 - 1972)//4 + 366 + 31 + 28 + 21)
def test_days_to_pydate(self):
assert_equal(np.array('1599', dtype='M8[D]').astype('O'),
datetime.date(1599, 1, 1))
assert_equal(np.array('1600', dtype='M8[D]').astype('O'),
datetime.date(1600, 1, 1))
assert_equal(np.array('1601', dtype='M8[D]').astype('O'),
datetime.date(1601, 1, 1))
assert_equal(np.array('1900', dtype='M8[D]').astype('O'),
datetime.date(1900, 1, 1))
assert_equal(np.array('1901', dtype='M8[D]').astype('O'),
datetime.date(1901, 1, 1))
assert_equal(np.array('2000', dtype='M8[D]').astype('O'),
datetime.date(2000, 1, 1))
assert_equal(np.array('2001', dtype='M8[D]').astype('O'),
datetime.date(2001, 1, 1))
assert_equal(np.array('1600-02-29', dtype='M8[D]').astype('O'),
datetime.date(1600, 2, 29))
assert_equal(np.array('1600-03-01', dtype='M8[D]').astype('O'),
datetime.date(1600, 3, 1))
assert_equal(np.array('2001-03-22', dtype='M8[D]').astype('O'),
datetime.date(2001, 3, 22))
def test_dtype_comparison(self):
assert_(not (np.dtype('M8[us]') == np.dtype('M8[ms]')))
assert_(np.dtype('M8[us]') != np.dtype('M8[ms]'))
assert_(np.dtype('M8[2D]') != np.dtype('M8[D]'))
assert_(np.dtype('M8[D]') != np.dtype('M8[2D]'))
def test_pydatetime_creation(self):
a = np.array(['1960-03-12', datetime.date(1960, 3, 12)], dtype='M8[D]')
assert_equal(a[0], a[1])
a = np.array(['1999-12-31', datetime.date(1999, 12, 31)], dtype='M8[D]')
assert_equal(a[0], a[1])
a = np.array(['2000-01-01', datetime.date(2000, 1, 1)], dtype='M8[D]')
assert_equal(a[0], a[1])
# Will fail if the date changes during the exact right moment
a = np.array(['today', datetime.date.today()], dtype='M8[D]')
assert_equal(a[0], a[1])
# datetime.datetime.now() returns local time, not UTC
#a = np.array(['now', datetime.datetime.now()], dtype='M8[s]')
#assert_equal(a[0], a[1])
# we can give a datetime.date time units
assert_equal(np.array(datetime.date(1960, 3, 12), dtype='M8[s]'),
np.array(np.datetime64('1960-03-12T00:00:00')))
def test_datetime_string_conversion(self):
a = ['2011-03-16', '1920-01-01', '2013-05-19']
str_a = np.array(a, dtype='S')
uni_a = np.array(a, dtype='U')
dt_a = np.array(a, dtype='M')
# String to datetime
assert_equal(dt_a, str_a.astype('M'))
assert_equal(dt_a.dtype, str_a.astype('M').dtype)
dt_b = np.empty_like(dt_a)
dt_b[...] = str_a
assert_equal(dt_a, dt_b)
# Datetime to string
assert_equal(str_a, dt_a.astype('S0'))
str_b = np.empty_like(str_a)
str_b[...] = dt_a
assert_equal(str_a, str_b)
# Unicode to datetime
assert_equal(dt_a, uni_a.astype('M'))
assert_equal(dt_a.dtype, uni_a.astype('M').dtype)
dt_b = np.empty_like(dt_a)
dt_b[...] = uni_a
assert_equal(dt_a, dt_b)
# Datetime to unicode
assert_equal(uni_a, dt_a.astype('U'))
uni_b = np.empty_like(uni_a)
uni_b[...] = dt_a
assert_equal(uni_a, uni_b)
# Datetime to long string - gh-9712
assert_equal(str_a, dt_a.astype((np.string_, 128)))
str_b = np.empty(str_a.shape, dtype=(np.string_, 128))
str_b[...] = dt_a
assert_equal(str_a, str_b)
@pytest.mark.parametrize("time_dtype", ["m8[D]", "M8[Y]"])
def test_time_byteswapping(self, time_dtype):
times = np.array(["2017", "NaT"], dtype=time_dtype)
times_swapped = times.astype(times.dtype.newbyteorder())
assert_array_equal(times, times_swapped)
unswapped = times_swapped.view(np.int64).newbyteorder()
assert_array_equal(unswapped, times.view(np.int64))
@pytest.mark.parametrize(["time1", "time2"],
[("M8[s]", "M8[D]"), ("m8[s]", "m8[ns]")])
def test_time_byteswapped_cast(self, time1, time2):
dtype1 = np.dtype(time1)
dtype2 = np.dtype(time2)
times = np.array(["2017", "NaT"], dtype=dtype1)
expected = times.astype(dtype2)
# Test that every byte-swapping combination also returns the same
# results (previous tests check that this comparison works fine).
res = times.astype(dtype1.newbyteorder()).astype(dtype2)
assert_array_equal(res, expected)
res = times.astype(dtype2.newbyteorder())
assert_array_equal(res, expected)
res = times.astype(dtype1.newbyteorder()).astype(dtype2.newbyteorder())
assert_array_equal(res, expected)
@pytest.mark.parametrize("time_dtype", ["m8[D]", "M8[Y]"])
@pytest.mark.parametrize("str_dtype", ["U", "S"])
def test_datetime_conversions_byteorders(self, str_dtype, time_dtype):
times = np.array(["2017", "NaT"], dtype=time_dtype)
# Unfortunately, timedelta does not roundtrip:
from_strings = np.array(["2017", "NaT"], dtype=str_dtype)
to_strings = times.astype(str_dtype) # assume this is correct
# Check that conversion from times to string works if src is swapped:
times_swapped = times.astype(times.dtype.newbyteorder())
res = times_swapped.astype(str_dtype)
assert_array_equal(res, to_strings)
# And also if both are swapped:
res = times_swapped.astype(to_strings.dtype.newbyteorder())
assert_array_equal(res, to_strings)
# only destination is swapped:
res = times.astype(to_strings.dtype.newbyteorder())
assert_array_equal(res, to_strings)
# Check that conversion from string to times works if src is swapped:
from_strings_swapped = from_strings.astype(
from_strings.dtype.newbyteorder())
res = from_strings_swapped.astype(time_dtype)
assert_array_equal(res, times)
# And if both are swapped:
res = from_strings_swapped.astype(times.dtype.newbyteorder())
assert_array_equal(res, times)
# Only destination is swapped:
res = from_strings.astype(times.dtype.newbyteorder())
assert_array_equal(res, times)
def test_datetime_array_str(self):
a = np.array(['2011-03-16', '1920-01-01', '2013-05-19'], dtype='M')
assert_equal(str(a), "['2011-03-16' '1920-01-01' '2013-05-19']")
a = np.array(['2011-03-16T13:55', '1920-01-01T03:12'], dtype='M')
assert_equal(np.array2string(a, separator=', ',
formatter={'datetime': lambda x:
"'%s'" % np.datetime_as_string(x, timezone='UTC')}),
"['2011-03-16T13:55Z', '1920-01-01T03:12Z']")
# Check that one NaT doesn't corrupt subsequent entries
a = np.array(['2010', 'NaT', '2030']).astype('M')
assert_equal(str(a), "['2010' 'NaT' '2030']")
def test_timedelta_array_str(self):
a = np.array([-1, 0, 100], dtype='m')
assert_equal(str(a), "[ -1 0 100]")
a = np.array(['NaT', 'NaT'], dtype='m')
assert_equal(str(a), "['NaT' 'NaT']")
# Check right-alignment with NaTs
a = np.array([-1, 'NaT', 0], dtype='m')
assert_equal(str(a), "[ -1 'NaT' 0]")
a = np.array([-1, 'NaT', 1234567], dtype='m')
assert_equal(str(a), "[ -1 'NaT' 1234567]")
# Test with other byteorder:
a = np.array([-1, 'NaT', 1234567], dtype='>m')
assert_equal(str(a), "[ -1 'NaT' 1234567]")
a = np.array([-1, 'NaT', 1234567], dtype='<m')
assert_equal(str(a), "[ -1 'NaT' 1234567]")
def test_pickle(self):
# Check that pickle roundtripping works
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
dt = np.dtype('M8[7D]')
assert_equal(pickle.loads(pickle.dumps(dt, protocol=proto)), dt)
dt = np.dtype('M8[W]')
assert_equal(pickle.loads(pickle.dumps(dt, protocol=proto)), dt)
scalar = np.datetime64('2016-01-01T00:00:00.000000000')
assert_equal(pickle.loads(pickle.dumps(scalar, protocol=proto)),
scalar)
delta = scalar - np.datetime64('2015-01-01T00:00:00.000000000')
assert_equal(pickle.loads(pickle.dumps(delta, protocol=proto)),
delta)
# Check that loading pickles from 1.6 works
pkl = b"cnumpy\ndtype\np0\n(S'M8'\np1\nI0\nI1\ntp2\nRp3\n" + \
b"(I4\nS'<'\np4\nNNNI-1\nI-1\nI0\n((dp5\n(S'D'\np6\n" + \
b"I7\nI1\nI1\ntp7\ntp8\ntp9\nb."
assert_equal(pickle.loads(pkl), np.dtype('<M8[7D]'))
pkl = b"cnumpy\ndtype\np0\n(S'M8'\np1\nI0\nI1\ntp2\nRp3\n" + \
b"(I4\nS'<'\np4\nNNNI-1\nI-1\nI0\n((dp5\n(S'W'\np6\n" + \
b"I1\nI1\nI1\ntp7\ntp8\ntp9\nb."
assert_equal(pickle.loads(pkl), np.dtype('<M8[W]'))
pkl = b"cnumpy\ndtype\np0\n(S'M8'\np1\nI0\nI1\ntp2\nRp3\n" + \
b"(I4\nS'>'\np4\nNNNI-1\nI-1\nI0\n((dp5\n(S'us'\np6\n" + \
b"I1\nI1\nI1\ntp7\ntp8\ntp9\nb."
assert_equal(pickle.loads(pkl), np.dtype('>M8[us]'))
def test_setstate(self):
"Verify that datetime dtype __setstate__ can handle bad arguments"
dt = np.dtype('>M8[us]')
assert_raises(ValueError, dt.__setstate__, (4, '>', None, None, None, -1, -1, 0, 1))
assert_(dt.__reduce__()[2] == np.dtype('>M8[us]').__reduce__()[2])
assert_raises(TypeError, dt.__setstate__, (4, '>', None, None, None, -1, -1, 0, ({}, 'xxx')))
assert_(dt.__reduce__()[2] == np.dtype('>M8[us]').__reduce__()[2])
def test_dtype_promotion(self):
# datetime <op> datetime computes the metadata gcd
# timedelta <op> timedelta computes the metadata gcd
for mM in ['m', 'M']:
assert_equal(
np.promote_types(np.dtype(mM+'8[2Y]'), np.dtype(mM+'8[2Y]')),
np.dtype(mM+'8[2Y]'))
assert_equal(
np.promote_types(np.dtype(mM+'8[12Y]'), np.dtype(mM+'8[15Y]')),
np.dtype(mM+'8[3Y]'))
assert_equal(
np.promote_types(np.dtype(mM+'8[62M]'), np.dtype(mM+'8[24M]')),
np.dtype(mM+'8[2M]'))
assert_equal(
np.promote_types(np.dtype(mM+'8[1W]'), np.dtype(mM+'8[2D]')),
np.dtype(mM+'8[1D]'))
assert_equal(
np.promote_types(np.dtype(mM+'8[W]'), np.dtype(mM+'8[13s]')),
np.dtype(mM+'8[s]'))
assert_equal(
np.promote_types(np.dtype(mM+'8[13W]'), np.dtype(mM+'8[49s]')),
np.dtype(mM+'8[7s]'))
# timedelta <op> timedelta raises when there is no reasonable gcd
assert_raises(TypeError, np.promote_types,
np.dtype('m8[Y]'), np.dtype('m8[D]'))
assert_raises(TypeError, np.promote_types,
np.dtype('m8[M]'), np.dtype('m8[W]'))
# timedelta and float cannot be safely cast with each other
assert_raises(TypeError, np.promote_types, "float32", "m8")
assert_raises(TypeError, np.promote_types, "m8", "float32")
assert_raises(TypeError, np.promote_types, "uint64", "m8")
assert_raises(TypeError, np.promote_types, "m8", "uint64")
# timedelta <op> timedelta may overflow with big unit ranges
assert_raises(OverflowError, np.promote_types,
np.dtype('m8[W]'), np.dtype('m8[fs]'))
assert_raises(OverflowError, np.promote_types,
np.dtype('m8[s]'), np.dtype('m8[as]'))
def test_cast_overflow(self):
# gh-4486
def cast():
numpy.datetime64("1971-01-01 00:00:00.000000000000000").astype("<M8[D]")
assert_raises(OverflowError, cast)
def cast2():
numpy.datetime64("2014").astype("<M8[fs]")
assert_raises(OverflowError, cast2)
def test_pyobject_roundtrip(self):
# All datetime types should be able to roundtrip through object
a = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0,
-1020040340, -2942398, -1, 0, 1, 234523453, 1199164176],
dtype=np.int64)
# With date units
for unit in ['M8[D]', 'M8[W]', 'M8[M]', 'M8[Y]']:
b = a.copy().view(dtype=unit)
b[0] = '-0001-01-01'
b[1] = '-0001-12-31'
b[2] = '0000-01-01'
b[3] = '0001-01-01'
b[4] = '1969-12-31'
b[5] = '1970-01-01'
b[6] = '9999-12-31'
b[7] = '10000-01-01'
b[8] = 'NaT'
assert_equal(b.astype(object).astype(unit), b,
"Error roundtripping unit %s" % unit)
# With time units
for unit in ['M8[as]', 'M8[16fs]', 'M8[ps]', 'M8[us]',
'M8[300as]', 'M8[20us]']:
b = a.copy().view(dtype=unit)
b[0] = '-0001-01-01T00'
b[1] = '-0001-12-31T00'
b[2] = '0000-01-01T00'
b[3] = '0001-01-01T00'
b[4] = '1969-12-31T23:59:59.999999'
b[5] = '1970-01-01T00'
b[6] = '9999-12-31T23:59:59.999999'
b[7] = '10000-01-01T00'
b[8] = 'NaT'
assert_equal(b.astype(object).astype(unit), b,
"Error roundtripping unit %s" % unit)
def test_month_truncation(self):
# Make sure that months are truncating correctly
assert_equal(np.array('1945-03-01', dtype='M8[M]'),
np.array('1945-03-31', dtype='M8[M]'))
assert_equal(np.array('1969-11-01', dtype='M8[M]'),
np.array('1969-11-30T23:59:59.99999', dtype='M').astype('M8[M]'))
assert_equal(np.array('1969-12-01', dtype='M8[M]'),
np.array('1969-12-31T23:59:59.99999', dtype='M').astype('M8[M]'))
assert_equal(np.array('1970-01-01', dtype='M8[M]'),
np.array('1970-01-31T23:59:59.99999', dtype='M').astype('M8[M]'))
assert_equal(np.array('1980-02-01', dtype='M8[M]'),
np.array('1980-02-29T23:59:59.99999', dtype='M').astype('M8[M]'))
def test_different_unit_comparison(self):
# Check some years with date units
for unit1 in ['Y', 'M', 'D']:
dt1 = np.dtype('M8[%s]' % unit1)
for unit2 in ['Y', 'M', 'D']:
dt2 = np.dtype('M8[%s]' % unit2)
assert_equal(np.array('1945', dtype=dt1),
np.array('1945', dtype=dt2))
assert_equal(np.array('1970', dtype=dt1),
np.array('1970', dtype=dt2))
assert_equal(np.array('9999', dtype=dt1),
np.array('9999', dtype=dt2))
assert_equal(np.array('10000', dtype=dt1),
np.array('10000-01-01', dtype=dt2))
assert_equal(np.datetime64('1945', unit1),
np.datetime64('1945', unit2))
assert_equal(np.datetime64('1970', unit1),
np.datetime64('1970', unit2))
assert_equal(np.datetime64('9999', unit1),
np.datetime64('9999', unit2))
assert_equal(np.datetime64('10000', unit1),
np.datetime64('10000-01-01', unit2))
# Check some datetimes with time units
for unit1 in ['6h', 'h', 'm', 's', '10ms', 'ms', 'us']:
dt1 = np.dtype('M8[%s]' % unit1)
for unit2 in ['h', 'm', 's', 'ms', 'us']:
dt2 = np.dtype('M8[%s]' % unit2)
assert_equal(np.array('1945-03-12T18', dtype=dt1),
np.array('1945-03-12T18', dtype=dt2))
assert_equal(np.array('1970-03-12T18', dtype=dt1),
np.array('1970-03-12T18', dtype=dt2))
assert_equal(np.array('9999-03-12T18', dtype=dt1),
np.array('9999-03-12T18', dtype=dt2))
assert_equal(np.array('10000-01-01T00', dtype=dt1),
np.array('10000-01-01T00', dtype=dt2))
assert_equal(np.datetime64('1945-03-12T18', unit1),
np.datetime64('1945-03-12T18', unit2))
assert_equal(np.datetime64('1970-03-12T18', unit1),
np.datetime64('1970-03-12T18', unit2))
assert_equal(np.datetime64('9999-03-12T18', unit1),
np.datetime64('9999-03-12T18', unit2))
assert_equal(np.datetime64('10000-01-01T00', unit1),
np.datetime64('10000-01-01T00', unit2))
# Check some days with units that won't overflow
for unit1 in ['D', '12h', 'h', 'm', 's', '4s', 'ms', 'us']:
dt1 = np.dtype('M8[%s]' % unit1)
for unit2 in ['D', 'h', 'm', 's', 'ms', 'us']:
dt2 = np.dtype('M8[%s]' % unit2)
assert_(np.equal(np.array('1932-02-17', dtype='M').astype(dt1),
np.array('1932-02-17T00:00:00', dtype='M').astype(dt2),
casting='unsafe'))
assert_(np.equal(np.array('10000-04-27', dtype='M').astype(dt1),
np.array('10000-04-27T00:00:00', dtype='M').astype(dt2),
casting='unsafe'))
# Shouldn't be able to compare datetime and timedelta
# TODO: Changing to 'same_kind' or 'safe' casting in the ufuncs by
# default is needed to properly catch this kind of thing...
a = np.array('2012-12-21', dtype='M8[D]')
b = np.array(3, dtype='m8[D]')
#assert_raises(TypeError, np.less, a, b)
assert_raises(TypeError, np.less, a, b, casting='same_kind')
def test_datetime_like(self):
a = np.array([3], dtype='m8[4D]')
b = np.array(['2012-12-21'], dtype='M8[D]')
assert_equal(np.ones_like(a).dtype, a.dtype)
assert_equal(np.zeros_like(a).dtype, a.dtype)
assert_equal(np.empty_like(a).dtype, a.dtype)
assert_equal(np.ones_like(b).dtype, b.dtype)
assert_equal(np.zeros_like(b).dtype, b.dtype)
assert_equal(np.empty_like(b).dtype, b.dtype)
def test_datetime_unary(self):
for tda, tdb, tdzero, tdone, tdmone in \
[
# One-dimensional arrays
(np.array([3], dtype='m8[D]'),
np.array([-3], dtype='m8[D]'),
np.array([0], dtype='m8[D]'),
np.array([1], dtype='m8[D]'),
np.array([-1], dtype='m8[D]')),
# NumPy scalars
(np.timedelta64(3, '[D]'),
np.timedelta64(-3, '[D]'),
np.timedelta64(0, '[D]'),
np.timedelta64(1, '[D]'),
np.timedelta64(-1, '[D]'))]:
# negative ufunc
assert_equal(-tdb, tda)
assert_equal((-tdb).dtype, tda.dtype)
assert_equal(np.negative(tdb), tda)
assert_equal(np.negative(tdb).dtype, tda.dtype)
# positive ufunc
assert_equal(np.positive(tda), tda)
assert_equal(np.positive(tda).dtype, tda.dtype)
assert_equal(np.positive(tdb), tdb)
assert_equal(np.positive(tdb).dtype, tdb.dtype)
# absolute ufunc
assert_equal(np.absolute(tdb), tda)
assert_equal(np.absolute(tdb).dtype, tda.dtype)
# sign ufunc
assert_equal(np.sign(tda), tdone)
assert_equal(np.sign(tdb), tdmone)
assert_equal(np.sign(tdzero), tdzero)
assert_equal(np.sign(tda).dtype, tda.dtype)
# The ufuncs always produce native-endian results
assert_
def test_datetime_add(self):
for dta, dtb, dtc, dtnat, tda, tdb, tdc in \
[
# One-dimensional arrays
(np.array(['2012-12-21'], dtype='M8[D]'),
np.array(['2012-12-24'], dtype='M8[D]'),
np.array(['2012-12-21T11'], dtype='M8[h]'),
np.array(['NaT'], dtype='M8[D]'),
np.array([3], dtype='m8[D]'),
np.array([11], dtype='m8[h]'),
np.array([3*24 + 11], dtype='m8[h]')),
# NumPy scalars
(np.datetime64('2012-12-21', '[D]'),
np.datetime64('2012-12-24', '[D]'),
np.datetime64('2012-12-21T11', '[h]'),
np.datetime64('NaT', '[D]'),
np.timedelta64(3, '[D]'),
np.timedelta64(11, '[h]'),
np.timedelta64(3*24 + 11, '[h]'))]:
# m8 + m8
assert_equal(tda + tdb, tdc)
assert_equal((tda + tdb).dtype, np.dtype('m8[h]'))
# m8 + bool
assert_equal(tdb + True, tdb + 1)
assert_equal((tdb + True).dtype, np.dtype('m8[h]'))
# m8 + int
assert_equal(tdb + 3*24, tdc)
assert_equal((tdb + 3*24).dtype, np.dtype('m8[h]'))
# bool + m8
assert_equal(False + tdb, tdb)
assert_equal((False + tdb).dtype, np.dtype('m8[h]'))
# int + m8
assert_equal(3*24 + tdb, tdc)
assert_equal((3*24 + tdb).dtype, np.dtype('m8[h]'))
# M8 + bool
assert_equal(dta + True, dta + 1)
assert_equal(dtnat + True, dtnat)
assert_equal((dta + True).dtype, np.dtype('M8[D]'))
# M8 + int
assert_equal(dta + 3, dtb)
assert_equal(dtnat + 3, dtnat)
assert_equal((dta + 3).dtype, np.dtype('M8[D]'))
# bool + M8
assert_equal(False + dta, dta)
assert_equal(False + dtnat, dtnat)
assert_equal((False + dta).dtype, np.dtype('M8[D]'))
# int + M8
assert_equal(3 + dta, dtb)
assert_equal(3 + dtnat, dtnat)
assert_equal((3 + dta).dtype, np.dtype('M8[D]'))
# M8 + m8
assert_equal(dta + tda, dtb)
assert_equal(dtnat + tda, dtnat)
assert_equal((dta + tda).dtype, np.dtype('M8[D]'))
# m8 + M8
assert_equal(tda + dta, dtb)
assert_equal(tda + dtnat, dtnat)
assert_equal((tda + dta).dtype, np.dtype('M8[D]'))
# In M8 + m8, the result goes to higher precision
assert_equal(np.add(dta, tdb, casting='unsafe'), dtc)
assert_equal(np.add(dta, tdb, casting='unsafe').dtype,
np.dtype('M8[h]'))
assert_equal(np.add(tdb, dta, casting='unsafe'), dtc)
assert_equal(np.add(tdb, dta, casting='unsafe').dtype,
np.dtype('M8[h]'))
# M8 + M8
assert_raises(TypeError, np.add, dta, dtb)
def test_datetime_subtract(self):
for dta, dtb, dtc, dtd, dte, dtnat, tda, tdb, tdc in \
[
# One-dimensional arrays
(np.array(['2012-12-21'], dtype='M8[D]'),
np.array(['2012-12-24'], dtype='M8[D]'),
np.array(['1940-12-24'], dtype='M8[D]'),
np.array(['1940-12-24T00'], dtype='M8[h]'),
np.array(['1940-12-23T13'], dtype='M8[h]'),
np.array(['NaT'], dtype='M8[D]'),
np.array([3], dtype='m8[D]'),
np.array([11], dtype='m8[h]'),
np.array([3*24 - 11], dtype='m8[h]')),
# NumPy scalars
(np.datetime64('2012-12-21', '[D]'),
np.datetime64('2012-12-24', '[D]'),
np.datetime64('1940-12-24', '[D]'),
np.datetime64('1940-12-24T00', '[h]'),
np.datetime64('1940-12-23T13', '[h]'),
np.datetime64('NaT', '[D]'),
np.timedelta64(3, '[D]'),
np.timedelta64(11, '[h]'),
np.timedelta64(3*24 - 11, '[h]'))]:
# m8 - m8
assert_equal(tda - tdb, tdc)
assert_equal((tda - tdb).dtype, np.dtype('m8[h]'))
assert_equal(tdb - tda, -tdc)
assert_equal((tdb - tda).dtype, np.dtype('m8[h]'))
# m8 - bool
assert_equal(tdc - True, tdc - 1)
assert_equal((tdc - True).dtype, np.dtype('m8[h]'))
# m8 - int
assert_equal(tdc - 3*24, -tdb)
assert_equal((tdc - 3*24).dtype, np.dtype('m8[h]'))
# int - m8
assert_equal(False - tdb, -tdb)
assert_equal((False - tdb).dtype, np.dtype('m8[h]'))
# int - m8
assert_equal(3*24 - tdb, tdc)
assert_equal((3*24 - tdb).dtype, np.dtype('m8[h]'))
# M8 - bool
assert_equal(dtb - True, dtb - 1)
assert_equal(dtnat - True, dtnat)
assert_equal((dtb - True).dtype, np.dtype('M8[D]'))
# M8 - int
assert_equal(dtb - 3, dta)
assert_equal(dtnat - 3, dtnat)
assert_equal((dtb - 3).dtype, np.dtype('M8[D]'))
# M8 - m8
assert_equal(dtb - tda, dta)
assert_equal(dtnat - tda, dtnat)
assert_equal((dtb - tda).dtype, np.dtype('M8[D]'))
# In M8 - m8, the result goes to higher precision
assert_equal(np.subtract(dtc, tdb, casting='unsafe'), dte)
assert_equal(np.subtract(dtc, tdb, casting='unsafe').dtype,
np.dtype('M8[h]'))
# M8 - M8 with different goes to higher precision
assert_equal(np.subtract(dtc, dtd, casting='unsafe'),
np.timedelta64(0, 'h'))
assert_equal(np.subtract(dtc, dtd, casting='unsafe').dtype,
np.dtype('m8[h]'))
assert_equal(np.subtract(dtd, dtc, casting='unsafe'),
np.timedelta64(0, 'h'))
assert_equal(np.subtract(dtd, dtc, casting='unsafe').dtype,
np.dtype('m8[h]'))
# m8 - M8
assert_raises(TypeError, np.subtract, tda, dta)
# bool - M8
assert_raises(TypeError, np.subtract, False, dta)
# int - M8
assert_raises(TypeError, np.subtract, 3, dta)
def test_datetime_multiply(self):
for dta, tda, tdb, tdc in \
[
# One-dimensional arrays
(np.array(['2012-12-21'], dtype='M8[D]'),
np.array([6], dtype='m8[h]'),
np.array([9], dtype='m8[h]'),
np.array([12], dtype='m8[h]')),
# NumPy scalars
(np.datetime64('2012-12-21', '[D]'),
np.timedelta64(6, '[h]'),
np.timedelta64(9, '[h]'),
np.timedelta64(12, '[h]'))]:
# m8 * int
assert_equal(tda * 2, tdc)
assert_equal((tda * 2).dtype, np.dtype('m8[h]'))
# int * m8
assert_equal(2 * tda, tdc)
assert_equal((2 * tda).dtype, np.dtype('m8[h]'))
# m8 * float
assert_equal(tda * 1.5, tdb)
assert_equal((tda * 1.5).dtype, np.dtype('m8[h]'))
# float * m8
assert_equal(1.5 * tda, tdb)
assert_equal((1.5 * tda).dtype, np.dtype('m8[h]'))
# m8 * m8
assert_raises(TypeError, np.multiply, tda, tdb)
# m8 * M8
assert_raises(TypeError, np.multiply, dta, tda)
# M8 * m8
assert_raises(TypeError, np.multiply, tda, dta)
# M8 * int
assert_raises(TypeError, np.multiply, dta, 2)
# int * M8
assert_raises(TypeError, np.multiply, 2, dta)
# M8 * float
assert_raises(TypeError, np.multiply, dta, 1.5)
# float * M8
assert_raises(TypeError, np.multiply, 1.5, dta)
# NaTs
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "invalid value encountered in multiply")
nat = np.timedelta64('NaT')
def check(a, b, res):
assert_equal(a * b, res)
assert_equal(b * a, res)
for tp in (int, float):
check(nat, tp(2), nat)
check(nat, tp(0), nat)
for f in (float('inf'), float('nan')):
check(np.timedelta64(1), f, nat)
check(np.timedelta64(0), f, nat)
check(nat, f, nat)
@pytest.mark.parametrize("op1, op2, exp", [
# m8 same units round down
(np.timedelta64(7, 's'),
np.timedelta64(4, 's'),
1),
# m8 same units round down with negative
(np.timedelta64(7, 's'),
np.timedelta64(-4, 's'),
-2),
# m8 same units negative no round down
(np.timedelta64(8, 's'),
np.timedelta64(-4, 's'),
-2),
# m8 different units
(np.timedelta64(1, 'm'),
np.timedelta64(31, 's'),
1),
# m8 generic units
(np.timedelta64(1890),
np.timedelta64(31),
60),
# Y // M works
(np.timedelta64(2, 'Y'),
np.timedelta64('13', 'M'),
1),
# handle 1D arrays
(np.array([1, 2, 3], dtype='m8'),
np.array([2], dtype='m8'),
np.array([0, 1, 1], dtype=np.int64)),
])
def test_timedelta_floor_divide(self, op1, op2, exp):
assert_equal(op1 // op2, exp)
@pytest.mark.parametrize("op1, op2", [
# div by 0
(np.timedelta64(10, 'us'),
np.timedelta64(0, 'us')),
# div with NaT
(np.timedelta64('NaT'),
np.timedelta64(50, 'us')),
# special case for int64 min
# in integer floor division
(np.timedelta64(np.iinfo(np.int64).min),
np.timedelta64(-1)),
])
def test_timedelta_floor_div_warnings(self, op1, op2):
with assert_warns(RuntimeWarning):
actual = op1 // op2
assert_equal(actual, 0)
assert_equal(actual.dtype, np.int64)
@pytest.mark.parametrize("val1, val2", [
# the smallest integer that can't be represented
# exactly in a double should be preserved if we avoid
# casting to double in floordiv operation
(9007199254740993, 1),
# stress the alternate floordiv code path where
# operand signs don't match and remainder isn't 0
(9007199254740999, -2),
])
def test_timedelta_floor_div_precision(self, val1, val2):
op1 = np.timedelta64(val1)
op2 = np.timedelta64(val2)
actual = op1 // op2
# Python reference integer floor
expected = val1 // val2
assert_equal(actual, expected)
@pytest.mark.parametrize("val1, val2", [
# years and months sometimes can't be unambiguously
# divided for floor division operation
(np.timedelta64(7, 'Y'),
np.timedelta64(3, 's')),
(np.timedelta64(7, 'M'),
np.timedelta64(1, 'D')),
])
def test_timedelta_floor_div_error(self, val1, val2):
with assert_raises_regex(TypeError, "common metadata divisor"):
val1 // val2
@pytest.mark.parametrize("op1, op2", [
# reuse the test cases from floordiv
(np.timedelta64(7, 's'),
np.timedelta64(4, 's')),
# m8 same units round down with negative
(np.timedelta64(7, 's'),
np.timedelta64(-4, 's')),
# m8 same units negative no round down
(np.timedelta64(8, 's'),
np.timedelta64(-4, 's')),
# m8 different units
(np.timedelta64(1, 'm'),
np.timedelta64(31, 's')),
# m8 generic units
(np.timedelta64(1890),
np.timedelta64(31)),
# Y // M works
(np.timedelta64(2, 'Y'),
np.timedelta64('13', 'M')),
# handle 1D arrays
(np.array([1, 2, 3], dtype='m8'),
np.array([2], dtype='m8')),
])
def test_timedelta_divmod(self, op1, op2):
expected = (op1 // op2, op1 % op2)
assert_equal(divmod(op1, op2), expected)
@pytest.mark.parametrize("op1, op2", [
# reuse cases from floordiv
# div by 0
(np.timedelta64(10, 'us'),
np.timedelta64(0, 'us')),
# div with NaT
(np.timedelta64('NaT'),
np.timedelta64(50, 'us')),
# special case for int64 min
# in integer floor division
(np.timedelta64(np.iinfo(np.int64).min),
np.timedelta64(-1)),
])
def test_timedelta_divmod_warnings(self, op1, op2):
with assert_warns(RuntimeWarning):
expected = (op1 // op2, op1 % op2)
with assert_warns(RuntimeWarning):
actual = divmod(op1, op2)
assert_equal(actual, expected)
def test_datetime_divide(self):
for dta, tda, tdb, tdc, tdd in \
[
# One-dimensional arrays
(np.array(['2012-12-21'], dtype='M8[D]'),
np.array([6], dtype='m8[h]'),
np.array([9], dtype='m8[h]'),
np.array([12], dtype='m8[h]'),
np.array([6], dtype='m8[m]')),
# NumPy scalars
(np.datetime64('2012-12-21', '[D]'),
np.timedelta64(6, '[h]'),
np.timedelta64(9, '[h]'),
np.timedelta64(12, '[h]'),
np.timedelta64(6, '[m]'))]:
# m8 / int
assert_equal(tdc / 2, tda)
assert_equal((tdc / 2).dtype, np.dtype('m8[h]'))
# m8 / float
assert_equal(tda / 0.5, tdc)
assert_equal((tda / 0.5).dtype, np.dtype('m8[h]'))
# m8 / m8
assert_equal(tda / tdb, 6 / 9)
assert_equal(np.divide(tda, tdb), 6 / 9)
assert_equal(np.true_divide(tda, tdb), 6 / 9)
assert_equal(tdb / tda, 9 / 6)
assert_equal((tda / tdb).dtype, np.dtype('f8'))
assert_equal(tda / tdd, 60)
assert_equal(tdd / tda, 1 / 60)
# int / m8
assert_raises(TypeError, np.divide, 2, tdb)
# float / m8
assert_raises(TypeError, np.divide, 0.5, tdb)
# m8 / M8
assert_raises(TypeError, np.divide, dta, tda)
# M8 / m8
assert_raises(TypeError, np.divide, tda, dta)
# M8 / int
assert_raises(TypeError, np.divide, dta, 2)
# int / M8
assert_raises(TypeError, np.divide, 2, dta)
# M8 / float
assert_raises(TypeError, np.divide, dta, 1.5)
# float / M8
assert_raises(TypeError, np.divide, 1.5, dta)
# NaTs
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, r".*encountered in divide")
nat = np.timedelta64('NaT')
for tp in (int, float):
assert_equal(np.timedelta64(1) / tp(0), nat)
assert_equal(np.timedelta64(0) / tp(0), nat)
assert_equal(nat / tp(0), nat)
assert_equal(nat / tp(2), nat)
# Division by inf
assert_equal(np.timedelta64(1) / float('inf'), np.timedelta64(0))
assert_equal(np.timedelta64(0) / float('inf'), np.timedelta64(0))
assert_equal(nat / float('inf'), nat)
# Division by nan
assert_equal(np.timedelta64(1) / float('nan'), nat)
assert_equal(np.timedelta64(0) / float('nan'), nat)
assert_equal(nat / float('nan'), nat)
def test_datetime_compare(self):
# Test all the comparison operators
a = np.datetime64('2000-03-12T18:00:00.000000')
b = np.array(['2000-03-12T18:00:00.000000',
'2000-03-12T17:59:59.999999',
'2000-03-12T18:00:00.000001',
'1970-01-11T12:00:00.909090',
'2016-01-11T12:00:00.909090'],
dtype='datetime64[us]')
assert_equal(np.equal(a, b), [1, 0, 0, 0, 0])
assert_equal(np.not_equal(a, b), [0, 1, 1, 1, 1])
assert_equal(np.less(a, b), [0, 0, 1, 0, 1])
assert_equal(np.less_equal(a, b), [1, 0, 1, 0, 1])
assert_equal(np.greater(a, b), [0, 1, 0, 1, 0])
assert_equal(np.greater_equal(a, b), [1, 1, 0, 1, 0])
def test_datetime_compare_nat(self):
dt_nat = np.datetime64('NaT', 'D')
dt_other = np.datetime64('2000-01-01')
td_nat = np.timedelta64('NaT', 'h')
td_other = np.timedelta64(1, 'h')
for op in [np.equal, np.less, np.less_equal,
np.greater, np.greater_equal]:
assert_(not op(dt_nat, dt_nat))
assert_(not op(dt_nat, dt_other))
assert_(not op(dt_other, dt_nat))
assert_(not op(td_nat, td_nat))
assert_(not op(td_nat, td_other))
assert_(not op(td_other, td_nat))
assert_(np.not_equal(dt_nat, dt_nat))
assert_(np.not_equal(dt_nat, dt_other))
assert_(np.not_equal(dt_other, dt_nat))
assert_(np.not_equal(td_nat, td_nat))
assert_(np.not_equal(td_nat, td_other))
assert_(np.not_equal(td_other, td_nat))
def test_datetime_minmax(self):
# The metadata of the result should become the GCD
# of the operand metadata
a = np.array('1999-03-12T13', dtype='M8[2m]')
b = np.array('1999-03-12T12', dtype='M8[s]')
assert_equal(np.minimum(a, b), b)
assert_equal(np.minimum(a, b).dtype, np.dtype('M8[s]'))
assert_equal(np.fmin(a, b), b)
assert_equal(np.fmin(a, b).dtype, np.dtype('M8[s]'))
assert_equal(np.maximum(a, b), a)
assert_equal(np.maximum(a, b).dtype, np.dtype('M8[s]'))
assert_equal(np.fmax(a, b), a)
assert_equal(np.fmax(a, b).dtype, np.dtype('M8[s]'))
# Viewed as integers, the comparison is opposite because
# of the units chosen
assert_equal(np.minimum(a.view('i8'), b.view('i8')), a.view('i8'))
# Interaction with NaT
a = np.array('1999-03-12T13', dtype='M8[2m]')
dtnat = np.array('NaT', dtype='M8[h]')
assert_equal(np.minimum(a, dtnat), dtnat)
assert_equal(np.minimum(dtnat, a), dtnat)
assert_equal(np.maximum(a, dtnat), dtnat)
assert_equal(np.maximum(dtnat, a), dtnat)
assert_equal(np.fmin(dtnat, a), a)
assert_equal(np.fmin(a, dtnat), a)
assert_equal(np.fmax(dtnat, a), a)
assert_equal(np.fmax(a, dtnat), a)
# Also do timedelta
a = np.array(3, dtype='m8[h]')
b = np.array(3*3600 - 3, dtype='m8[s]')
assert_equal(np.minimum(a, b), b)
assert_equal(np.minimum(a, b).dtype, np.dtype('m8[s]'))
assert_equal(np.fmin(a, b), b)
assert_equal(np.fmin(a, b).dtype, np.dtype('m8[s]'))
assert_equal(np.maximum(a, b), a)
assert_equal(np.maximum(a, b).dtype, np.dtype('m8[s]'))
assert_equal(np.fmax(a, b), a)
assert_equal(np.fmax(a, b).dtype, np.dtype('m8[s]'))
# Viewed as integers, the comparison is opposite because
# of the units chosen
assert_equal(np.minimum(a.view('i8'), b.view('i8')), a.view('i8'))
# should raise between datetime and timedelta
#
# TODO: Allowing unsafe casting by
# default in ufuncs strikes again... :(
a = np.array(3, dtype='m8[h]')
b = np.array('1999-03-12T12', dtype='M8[s]')
#assert_raises(TypeError, np.minimum, a, b)
#assert_raises(TypeError, np.maximum, a, b)
#assert_raises(TypeError, np.fmin, a, b)
#assert_raises(TypeError, np.fmax, a, b)
assert_raises(TypeError, np.minimum, a, b, casting='same_kind')
assert_raises(TypeError, np.maximum, a, b, casting='same_kind')
assert_raises(TypeError, np.fmin, a, b, casting='same_kind')
assert_raises(TypeError, np.fmax, a, b, casting='same_kind')
def test_hours(self):
t = np.ones(3, dtype='M8[s]')
t[0] = 60*60*24 + 60*60*10
assert_(t[0].item().hour == 10)
def test_divisor_conversion_year(self):
assert_(np.dtype('M8[Y/4]') == np.dtype('M8[3M]'))
assert_(np.dtype('M8[Y/13]') == np.dtype('M8[4W]'))
assert_(np.dtype('M8[3Y/73]') == np.dtype('M8[15D]'))
def test_divisor_conversion_month(self):
assert_(np.dtype('M8[M/2]') == np.dtype('M8[2W]'))
assert_(np.dtype('M8[M/15]') == np.dtype('M8[2D]'))
assert_(np.dtype('M8[3M/40]') == np.dtype('M8[54h]'))
def test_divisor_conversion_week(self):
assert_(np.dtype('m8[W/7]') == np.dtype('m8[D]'))
assert_(np.dtype('m8[3W/14]') == np.dtype('m8[36h]'))
assert_(np.dtype('m8[5W/140]') == np.dtype('m8[360m]'))
def test_divisor_conversion_day(self):
assert_(np.dtype('M8[D/12]') == np.dtype('M8[2h]'))
assert_(np.dtype('M8[D/120]') == np.dtype('M8[12m]'))
assert_(np.dtype('M8[3D/960]') == np.dtype('M8[270s]'))
def test_divisor_conversion_hour(self):
assert_(np.dtype('m8[h/30]') == np.dtype('m8[2m]'))
assert_(np.dtype('m8[3h/300]') == np.dtype('m8[36s]'))
def test_divisor_conversion_minute(self):
assert_(np.dtype('m8[m/30]') == np.dtype('m8[2s]'))
assert_(np.dtype('m8[3m/300]') == np.dtype('m8[600ms]'))
def test_divisor_conversion_second(self):
assert_(np.dtype('m8[s/100]') == np.dtype('m8[10ms]'))
assert_(np.dtype('m8[3s/10000]') == np.dtype('m8[300us]'))
def test_divisor_conversion_fs(self):
assert_(np.dtype('M8[fs/100]') == np.dtype('M8[10as]'))
assert_raises(ValueError, lambda: np.dtype('M8[3fs/10000]'))
def test_divisor_conversion_as(self):
assert_raises(ValueError, lambda: np.dtype('M8[as/10]'))
def test_string_parser_variants(self):
# Allow space instead of 'T' between date and time
assert_equal(np.array(['1980-02-29T01:02:03'], np.dtype('M8[s]')),
np.array(['1980-02-29 01:02:03'], np.dtype('M8[s]')))
# Allow positive years
assert_equal(np.array(['+1980-02-29T01:02:03'], np.dtype('M8[s]')),
np.array(['+1980-02-29 01:02:03'], np.dtype('M8[s]')))
# Allow negative years
assert_equal(np.array(['-1980-02-29T01:02:03'], np.dtype('M8[s]')),
np.array(['-1980-02-29 01:02:03'], np.dtype('M8[s]')))
# UTC specifier
with assert_warns(DeprecationWarning):
assert_equal(
np.array(['+1980-02-29T01:02:03'], np.dtype('M8[s]')),
np.array(['+1980-02-29 01:02:03Z'], np.dtype('M8[s]')))
with assert_warns(DeprecationWarning):
assert_equal(
np.array(['-1980-02-29T01:02:03'], np.dtype('M8[s]')),
np.array(['-1980-02-29 01:02:03Z'], np.dtype('M8[s]')))
# Time zone offset
with assert_warns(DeprecationWarning):
assert_equal(
np.array(['1980-02-29T02:02:03'], np.dtype('M8[s]')),
np.array(['1980-02-29 00:32:03-0130'], np.dtype('M8[s]')))
with assert_warns(DeprecationWarning):
assert_equal(
np.array(['1980-02-28T22:32:03'], np.dtype('M8[s]')),
np.array(['1980-02-29 00:02:03+01:30'], np.dtype('M8[s]')))
with assert_warns(DeprecationWarning):
assert_equal(
np.array(['1980-02-29T02:32:03.506'], np.dtype('M8[s]')),
np.array(['1980-02-29 00:32:03.506-02'], np.dtype('M8[s]')))
with assert_warns(DeprecationWarning):
assert_equal(np.datetime64('1977-03-02T12:30-0230'),
np.datetime64('1977-03-02T15:00'))
def test_string_parser_error_check(self):
# Arbitrary bad string
assert_raises(ValueError, np.array, ['badvalue'], np.dtype('M8[us]'))
# Character after year must be '-'
assert_raises(ValueError, np.array, ['1980X'], np.dtype('M8[us]'))
# Cannot have trailing '-'
assert_raises(ValueError, np.array, ['1980-'], np.dtype('M8[us]'))
# Month must be in range [1,12]
assert_raises(ValueError, np.array, ['1980-00'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-13'], np.dtype('M8[us]'))
# Month must have two digits
assert_raises(ValueError, np.array, ['1980-1'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-1-02'], np.dtype('M8[us]'))
# 'Mor' is not a valid month
assert_raises(ValueError, np.array, ['1980-Mor'], np.dtype('M8[us]'))
# Cannot have trailing '-'
assert_raises(ValueError, np.array, ['1980-01-'], np.dtype('M8[us]'))
# Day must be in range [1,len(month)]
assert_raises(ValueError, np.array, ['1980-01-0'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-01-00'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-01-32'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1979-02-29'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-02-30'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-03-32'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-04-31'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-05-32'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-06-31'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-07-32'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-08-32'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-09-31'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-10-32'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-11-31'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-12-32'], np.dtype('M8[us]'))
# Cannot have trailing characters
assert_raises(ValueError, np.array, ['1980-02-03%'],
np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-02-03 q'],
np.dtype('M8[us]'))
# Hours must be in range [0, 23]
assert_raises(ValueError, np.array, ['1980-02-03 25'],
np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-02-03T25'],
np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-02-03 24:01'],
np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-02-03T24:01'],
np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-02-03 -1'],
np.dtype('M8[us]'))
# No trailing ':'
assert_raises(ValueError, np.array, ['1980-02-03 01:'],
np.dtype('M8[us]'))
# Minutes must be in range [0, 59]
assert_raises(ValueError, np.array, ['1980-02-03 01:-1'],
np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-02-03 01:60'],
np.dtype('M8[us]'))
# No trailing ':'
assert_raises(ValueError, np.array, ['1980-02-03 01:60:'],
np.dtype('M8[us]'))
# Seconds must be in range [0, 59]
assert_raises(ValueError, np.array, ['1980-02-03 01:10:-1'],
np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-02-03 01:01:60'],
np.dtype('M8[us]'))
# Timezone offset must within a reasonable range
with assert_warns(DeprecationWarning):
assert_raises(ValueError, np.array, ['1980-02-03 01:01:00+0661'],
np.dtype('M8[us]'))
with assert_warns(DeprecationWarning):
assert_raises(ValueError, np.array, ['1980-02-03 01:01:00+2500'],
np.dtype('M8[us]'))
with assert_warns(DeprecationWarning):
assert_raises(ValueError, np.array, ['1980-02-03 01:01:00-0070'],
np.dtype('M8[us]'))
with assert_warns(DeprecationWarning):
assert_raises(ValueError, np.array, ['1980-02-03 01:01:00-3000'],
np.dtype('M8[us]'))
with assert_warns(DeprecationWarning):
assert_raises(ValueError, np.array, ['1980-02-03 01:01:00-25:00'],
np.dtype('M8[us]'))
def test_creation_overflow(self):
date = '1980-03-23 20:00:00'
timesteps = np.array([date], dtype='datetime64[s]')[0].astype(np.int64)
for unit in ['ms', 'us', 'ns']:
timesteps *= 1000
x = np.array([date], dtype='datetime64[%s]' % unit)
assert_equal(timesteps, x[0].astype(np.int64),
err_msg='Datetime conversion error for unit %s' % unit)
assert_equal(x[0].astype(np.int64), 322689600000000000)
# gh-13062
with pytest.raises(OverflowError):
np.datetime64(2**64, 'D')
with pytest.raises(OverflowError):
np.timedelta64(2**64, 'D')
def test_datetime_as_string(self):
# Check all the units with default string conversion
date = '1959-10-13'
datetime = '1959-10-13T12:34:56.789012345678901234'
assert_equal(np.datetime_as_string(np.datetime64(date, 'Y')),
'1959')
assert_equal(np.datetime_as_string(np.datetime64(date, 'M')),
'1959-10')
assert_equal(np.datetime_as_string(np.datetime64(date, 'D')),
'1959-10-13')
assert_equal(np.datetime_as_string(np.datetime64(datetime, 'h')),
'1959-10-13T12')
assert_equal(np.datetime_as_string(np.datetime64(datetime, 'm')),
'1959-10-13T12:34')
assert_equal(np.datetime_as_string(np.datetime64(datetime, 's')),
'1959-10-13T12:34:56')
assert_equal(np.datetime_as_string(np.datetime64(datetime, 'ms')),
'1959-10-13T12:34:56.789')
for us in ['us', 'μs', b'us']: # check non-ascii and bytes too
assert_equal(np.datetime_as_string(np.datetime64(datetime, us)),
'1959-10-13T12:34:56.789012')
datetime = '1969-12-31T23:34:56.789012345678901234'
assert_equal(np.datetime_as_string(np.datetime64(datetime, 'ns')),
'1969-12-31T23:34:56.789012345')
assert_equal(np.datetime_as_string(np.datetime64(datetime, 'ps')),
'1969-12-31T23:34:56.789012345678')
assert_equal(np.datetime_as_string(np.datetime64(datetime, 'fs')),
'1969-12-31T23:34:56.789012345678901')
datetime = '1969-12-31T23:59:57.789012345678901234'
assert_equal(np.datetime_as_string(np.datetime64(datetime, 'as')),
datetime)
datetime = '1970-01-01T00:34:56.789012345678901234'
assert_equal(np.datetime_as_string(np.datetime64(datetime, 'ns')),
'1970-01-01T00:34:56.789012345')
assert_equal(np.datetime_as_string(np.datetime64(datetime, 'ps')),
'1970-01-01T00:34:56.789012345678')
assert_equal(np.datetime_as_string(np.datetime64(datetime, 'fs')),
'1970-01-01T00:34:56.789012345678901')
datetime = '1970-01-01T00:00:05.789012345678901234'
assert_equal(np.datetime_as_string(np.datetime64(datetime, 'as')),
datetime)
# String conversion with the unit= parameter
a = np.datetime64('2032-07-18T12:23:34.123456', 'us')
assert_equal(np.datetime_as_string(a, unit='Y', casting='unsafe'),
'2032')
assert_equal(np.datetime_as_string(a, unit='M', casting='unsafe'),
'2032-07')
assert_equal(np.datetime_as_string(a, unit='W', casting='unsafe'),
'2032-07-18')
assert_equal(np.datetime_as_string(a, unit='D', casting='unsafe'),
'2032-07-18')
assert_equal(np.datetime_as_string(a, unit='h'), '2032-07-18T12')
assert_equal(np.datetime_as_string(a, unit='m'),
'2032-07-18T12:23')
assert_equal(np.datetime_as_string(a, unit='s'),
'2032-07-18T12:23:34')
assert_equal(np.datetime_as_string(a, unit='ms'),
'2032-07-18T12:23:34.123')
assert_equal(np.datetime_as_string(a, unit='us'),
'2032-07-18T12:23:34.123456')
assert_equal(np.datetime_as_string(a, unit='ns'),
'2032-07-18T12:23:34.123456000')
assert_equal(np.datetime_as_string(a, unit='ps'),
'2032-07-18T12:23:34.123456000000')
assert_equal(np.datetime_as_string(a, unit='fs'),
'2032-07-18T12:23:34.123456000000000')
assert_equal(np.datetime_as_string(a, unit='as'),
'2032-07-18T12:23:34.123456000000000000')
# unit='auto' parameter
assert_equal(np.datetime_as_string(
np.datetime64('2032-07-18T12:23:34.123456', 'us'), unit='auto'),
'2032-07-18T12:23:34.123456')
assert_equal(np.datetime_as_string(
np.datetime64('2032-07-18T12:23:34.12', 'us'), unit='auto'),
'2032-07-18T12:23:34.120')
assert_equal(np.datetime_as_string(
np.datetime64('2032-07-18T12:23:34', 'us'), unit='auto'),
'2032-07-18T12:23:34')
assert_equal(np.datetime_as_string(
np.datetime64('2032-07-18T12:23:00', 'us'), unit='auto'),
'2032-07-18T12:23')
# 'auto' doesn't split up hour and minute
assert_equal(np.datetime_as_string(
np.datetime64('2032-07-18T12:00:00', 'us'), unit='auto'),
'2032-07-18T12:00')
assert_equal(np.datetime_as_string(
np.datetime64('2032-07-18T00:00:00', 'us'), unit='auto'),
'2032-07-18')
# 'auto' doesn't split up the date
assert_equal(np.datetime_as_string(
np.datetime64('2032-07-01T00:00:00', 'us'), unit='auto'),
'2032-07-01')
assert_equal(np.datetime_as_string(
np.datetime64('2032-01-01T00:00:00', 'us'), unit='auto'),
'2032-01-01')
@pytest.mark.skipif(not _has_pytz, reason="The pytz module is not available.")
def test_datetime_as_string_timezone(self):
# timezone='local' vs 'UTC'
a = np.datetime64('2010-03-15T06:30', 'm')
assert_equal(np.datetime_as_string(a),
'2010-03-15T06:30')
assert_equal(np.datetime_as_string(a, timezone='naive'),
'2010-03-15T06:30')
assert_equal(np.datetime_as_string(a, timezone='UTC'),
'2010-03-15T06:30Z')
assert_(np.datetime_as_string(a, timezone='local') !=
'2010-03-15T06:30')
b = np.datetime64('2010-02-15T06:30', 'm')
assert_equal(np.datetime_as_string(a, timezone=tz('US/Central')),
'2010-03-15T01:30-0500')
assert_equal(np.datetime_as_string(a, timezone=tz('US/Eastern')),
'2010-03-15T02:30-0400')
assert_equal(np.datetime_as_string(a, timezone=tz('US/Pacific')),
'2010-03-14T23:30-0700')
assert_equal(np.datetime_as_string(b, timezone=tz('US/Central')),
'2010-02-15T00:30-0600')
assert_equal(np.datetime_as_string(b, timezone=tz('US/Eastern')),
'2010-02-15T01:30-0500')
assert_equal(np.datetime_as_string(b, timezone=tz('US/Pacific')),
'2010-02-14T22:30-0800')
# Dates to strings with a timezone attached is disabled by default
assert_raises(TypeError, np.datetime_as_string, a, unit='D',
timezone=tz('US/Pacific'))
# Check that we can print out the date in the specified time zone
assert_equal(np.datetime_as_string(a, unit='D',
timezone=tz('US/Pacific'), casting='unsafe'),
'2010-03-14')
assert_equal(np.datetime_as_string(b, unit='D',
timezone=tz('US/Central'), casting='unsafe'),
'2010-02-15')
def test_datetime_arange(self):
# With two datetimes provided as strings
a = np.arange('2010-01-05', '2010-01-10', dtype='M8[D]')
assert_equal(a.dtype, np.dtype('M8[D]'))
assert_equal(a,
np.array(['2010-01-05', '2010-01-06', '2010-01-07',
'2010-01-08', '2010-01-09'], dtype='M8[D]'))
a = np.arange('1950-02-10', '1950-02-06', -1, dtype='M8[D]')
assert_equal(a.dtype, np.dtype('M8[D]'))
assert_equal(a,
np.array(['1950-02-10', '1950-02-09', '1950-02-08',
'1950-02-07'], dtype='M8[D]'))
# Unit should be detected as months here
a = np.arange('1969-05', '1970-05', 2, dtype='M8')
assert_equal(a.dtype, np.dtype('M8[M]'))
assert_equal(a,
np.datetime64('1969-05') + np.arange(12, step=2))
# datetime, integer|timedelta works as well
# produces arange (start, start + stop) in this case
a = np.arange('1969', 18, 3, dtype='M8')
assert_equal(a.dtype, np.dtype('M8[Y]'))
assert_equal(a,
np.datetime64('1969') + np.arange(18, step=3))
a = np.arange('1969-12-19', 22, np.timedelta64(2), dtype='M8')
assert_equal(a.dtype, np.dtype('M8[D]'))
assert_equal(a,
np.datetime64('1969-12-19') + np.arange(22, step=2))
# Step of 0 is disallowed
assert_raises(ValueError, np.arange, np.datetime64('today'),
np.datetime64('today') + 3, 0)
# Promotion across nonlinear unit boundaries is disallowed
assert_raises(TypeError, np.arange, np.datetime64('2011-03-01', 'D'),
np.timedelta64(5, 'M'))
assert_raises(TypeError, np.arange,
np.datetime64('2012-02-03T14', 's'),
np.timedelta64(5, 'Y'))
def test_datetime_arange_no_dtype(self):
d = np.array('2010-01-04', dtype="M8[D]")
assert_equal(np.arange(d, d + 1), d)
assert_raises(ValueError, np.arange, d)
def test_timedelta_arange(self):
a = np.arange(3, 10, dtype='m8')
assert_equal(a.dtype, np.dtype('m8'))
assert_equal(a, np.timedelta64(0) + np.arange(3, 10))
a = np.arange(np.timedelta64(3, 's'), 10, 2, dtype='m8')
assert_equal(a.dtype, np.dtype('m8[s]'))
assert_equal(a, np.timedelta64(0, 's') + np.arange(3, 10, 2))
# Step of 0 is disallowed
assert_raises(ValueError, np.arange, np.timedelta64(0),
np.timedelta64(5), 0)
# Promotion across nonlinear unit boundaries is disallowed
assert_raises(TypeError, np.arange, np.timedelta64(0, 'D'),
np.timedelta64(5, 'M'))
assert_raises(TypeError, np.arange, np.timedelta64(0, 'Y'),
np.timedelta64(5, 'D'))
@pytest.mark.parametrize("val1, val2, expected", [
# case from gh-12092
(np.timedelta64(7, 's'),
np.timedelta64(3, 's'),
np.timedelta64(1, 's')),
# negative value cases
(np.timedelta64(3, 's'),
np.timedelta64(-2, 's'),
np.timedelta64(-1, 's')),
(np.timedelta64(-3, 's'),
np.timedelta64(2, 's'),
np.timedelta64(1, 's')),
# larger value cases
(np.timedelta64(17, 's'),
np.timedelta64(22, 's'),
np.timedelta64(17, 's')),
(np.timedelta64(22, 's'),
np.timedelta64(17, 's'),
np.timedelta64(5, 's')),
# different units
(np.timedelta64(1, 'm'),
np.timedelta64(57, 's'),
np.timedelta64(3, 's')),
(np.timedelta64(1, 'us'),
np.timedelta64(727, 'ns'),
np.timedelta64(273, 'ns')),
# NaT is propagated
(np.timedelta64('NaT'),
np.timedelta64(50, 'ns'),
np.timedelta64('NaT')),
# Y % M works
(np.timedelta64(2, 'Y'),
np.timedelta64(22, 'M'),
np.timedelta64(2, 'M')),
])
def test_timedelta_modulus(self, val1, val2, expected):
assert_equal(val1 % val2, expected)
@pytest.mark.parametrize("val1, val2", [
# years and months sometimes can't be unambiguously
# divided for modulus operation
(np.timedelta64(7, 'Y'),
np.timedelta64(3, 's')),
(np.timedelta64(7, 'M'),
np.timedelta64(1, 'D')),
])
def test_timedelta_modulus_error(self, val1, val2):
with assert_raises_regex(TypeError, "common metadata divisor"):
val1 % val2
def test_timedelta_modulus_div_by_zero(self):
with assert_warns(RuntimeWarning):
actual = np.timedelta64(10, 's') % np.timedelta64(0, 's')
assert_equal(actual, np.timedelta64('NaT'))
@pytest.mark.parametrize("val1, val2", [
# cases where one operand is not
# timedelta64
(np.timedelta64(7, 'Y'),
15,),
(7.5,
np.timedelta64(1, 'D')),
])
def test_timedelta_modulus_type_resolution(self, val1, val2):
# NOTE: some of the operations may be supported
# in the future
with assert_raises_regex(TypeError,
"'remainder' cannot use operands with types"):
val1 % val2
def test_timedelta_arange_no_dtype(self):
d = np.array(5, dtype="m8[D]")
assert_equal(np.arange(d, d + 1), d)
assert_equal(np.arange(d), np.arange(0, d))
def test_datetime_maximum_reduce(self):
a = np.array(['2010-01-02', '1999-03-14', '1833-03'], dtype='M8[D]')
assert_equal(np.maximum.reduce(a).dtype, np.dtype('M8[D]'))
assert_equal(np.maximum.reduce(a),
np.datetime64('2010-01-02'))
a = np.array([1, 4, 0, 7, 2], dtype='m8[s]')
assert_equal(np.maximum.reduce(a).dtype, np.dtype('m8[s]'))
assert_equal(np.maximum.reduce(a),
np.timedelta64(7, 's'))
def test_timedelta_correct_mean(self):
# test mainly because it worked only via a bug in that allowed:
# `timedelta.sum(dtype="f8")` to ignore the dtype request.
a = np.arange(1000, dtype="m8[s]")
assert_array_equal(a.mean(), a.sum() / len(a))
def test_datetime_no_subtract_reducelike(self):
# subtracting two datetime64 works, but we cannot reduce it, since
# the result of that subtraction will have a different dtype.
arr = np.array(["2021-12-02", "2019-05-12"], dtype="M8[ms]")
msg = r"the resolved dtypes are not compatible"
with pytest.raises(TypeError, match=msg):
np.subtract.reduce(arr)
with pytest.raises(TypeError, match=msg):
np.subtract.accumulate(arr)
with pytest.raises(TypeError, match=msg):
np.subtract.reduceat(arr, [0])
def test_datetime_busday_offset(self):
# First Monday in June
assert_equal(
np.busday_offset('2011-06', 0, roll='forward', weekmask='Mon'),
np.datetime64('2011-06-06'))
# Last Monday in June
assert_equal(
np.busday_offset('2011-07', -1, roll='forward', weekmask='Mon'),
np.datetime64('2011-06-27'))
assert_equal(
np.busday_offset('2011-07', -1, roll='forward', weekmask='Mon'),
np.datetime64('2011-06-27'))
# Default M-F business days, different roll modes
assert_equal(np.busday_offset('2010-08', 0, roll='backward'),
np.datetime64('2010-07-30'))
assert_equal(np.busday_offset('2010-08', 0, roll='preceding'),
np.datetime64('2010-07-30'))
assert_equal(np.busday_offset('2010-08', 0, roll='modifiedpreceding'),
np.datetime64('2010-08-02'))
assert_equal(np.busday_offset('2010-08', 0, roll='modifiedfollowing'),
np.datetime64('2010-08-02'))
assert_equal(np.busday_offset('2010-08', 0, roll='forward'),
np.datetime64('2010-08-02'))
assert_equal(np.busday_offset('2010-08', 0, roll='following'),
np.datetime64('2010-08-02'))
assert_equal(np.busday_offset('2010-10-30', 0, roll='following'),
np.datetime64('2010-11-01'))
assert_equal(
np.busday_offset('2010-10-30', 0, roll='modifiedfollowing'),
np.datetime64('2010-10-29'))
assert_equal(
np.busday_offset('2010-10-30', 0, roll='modifiedpreceding'),
np.datetime64('2010-10-29'))
assert_equal(
np.busday_offset('2010-10-16', 0, roll='modifiedfollowing'),
np.datetime64('2010-10-18'))
assert_equal(
np.busday_offset('2010-10-16', 0, roll='modifiedpreceding'),
np.datetime64('2010-10-15'))
# roll='raise' by default
assert_raises(ValueError, np.busday_offset, '2011-06-04', 0)
# Bigger offset values
assert_equal(np.busday_offset('2006-02-01', 25),
np.datetime64('2006-03-08'))
assert_equal(np.busday_offset('2006-03-08', -25),
np.datetime64('2006-02-01'))
assert_equal(np.busday_offset('2007-02-25', 11, weekmask='SatSun'),
np.datetime64('2007-04-07'))
assert_equal(np.busday_offset('2007-04-07', -11, weekmask='SatSun'),
np.datetime64('2007-02-25'))
# NaT values when roll is not raise
assert_equal(np.busday_offset(np.datetime64('NaT'), 1, roll='nat'),
np.datetime64('NaT'))
assert_equal(np.busday_offset(np.datetime64('NaT'), 1, roll='following'),
np.datetime64('NaT'))
assert_equal(np.busday_offset(np.datetime64('NaT'), 1, roll='preceding'),
np.datetime64('NaT'))
def test_datetime_busdaycalendar(self):
# Check that it removes NaT, duplicates, and weekends
# and sorts the result.
bdd = np.busdaycalendar(
holidays=['NaT', '2011-01-17', '2011-03-06', 'NaT',
'2011-12-26', '2011-05-30', '2011-01-17'])
assert_equal(bdd.holidays,
np.array(['2011-01-17', '2011-05-30', '2011-12-26'], dtype='M8'))
# Default M-F weekmask
assert_equal(bdd.weekmask, np.array([1, 1, 1, 1, 1, 0, 0], dtype='?'))
# Check string weekmask with varying whitespace.
bdd = np.busdaycalendar(weekmask="Sun TueWed Thu\tFri")
assert_equal(bdd.weekmask, np.array([0, 1, 1, 1, 1, 0, 1], dtype='?'))
# Check length 7 0/1 string
bdd = np.busdaycalendar(weekmask="0011001")
assert_equal(bdd.weekmask, np.array([0, 0, 1, 1, 0, 0, 1], dtype='?'))
# Check length 7 string weekmask.
bdd = np.busdaycalendar(weekmask="Mon Tue")
assert_equal(bdd.weekmask, np.array([1, 1, 0, 0, 0, 0, 0], dtype='?'))
# All-zeros weekmask should raise
assert_raises(ValueError, np.busdaycalendar, weekmask=[0, 0, 0, 0, 0, 0, 0])
# weekday names must be correct case
assert_raises(ValueError, np.busdaycalendar, weekmask="satsun")
# All-zeros weekmask should raise
assert_raises(ValueError, np.busdaycalendar, weekmask="")
# Invalid weekday name codes should raise
assert_raises(ValueError, np.busdaycalendar, weekmask="Mon Tue We")
assert_raises(ValueError, np.busdaycalendar, weekmask="Max")
assert_raises(ValueError, np.busdaycalendar, weekmask="Monday Tue")
def test_datetime_busday_holidays_offset(self):
# With exactly one holiday
assert_equal(
np.busday_offset('2011-11-10', 1, holidays=['2011-11-11']),
np.datetime64('2011-11-14'))
assert_equal(
np.busday_offset('2011-11-04', 5, holidays=['2011-11-11']),
np.datetime64('2011-11-14'))
assert_equal(
np.busday_offset('2011-11-10', 5, holidays=['2011-11-11']),
np.datetime64('2011-11-18'))
assert_equal(
np.busday_offset('2011-11-14', -1, holidays=['2011-11-11']),
np.datetime64('2011-11-10'))
assert_equal(
np.busday_offset('2011-11-18', -5, holidays=['2011-11-11']),
np.datetime64('2011-11-10'))
assert_equal(
np.busday_offset('2011-11-14', -5, holidays=['2011-11-11']),
np.datetime64('2011-11-04'))
# With the holiday appearing twice
assert_equal(
np.busday_offset('2011-11-10', 1,
holidays=['2011-11-11', '2011-11-11']),
np.datetime64('2011-11-14'))
assert_equal(
np.busday_offset('2011-11-14', -1,
holidays=['2011-11-11', '2011-11-11']),
np.datetime64('2011-11-10'))
# With a NaT holiday
assert_equal(
np.busday_offset('2011-11-10', 1,
holidays=['2011-11-11', 'NaT']),
np.datetime64('2011-11-14'))
assert_equal(
np.busday_offset('2011-11-14', -1,
holidays=['NaT', '2011-11-11']),
np.datetime64('2011-11-10'))
# With another holiday after
assert_equal(
np.busday_offset('2011-11-10', 1,
holidays=['2011-11-11', '2011-11-24']),
np.datetime64('2011-11-14'))
assert_equal(
np.busday_offset('2011-11-14', -1,
holidays=['2011-11-11', '2011-11-24']),
np.datetime64('2011-11-10'))
# With another holiday before
assert_equal(
np.busday_offset('2011-11-10', 1,
holidays=['2011-10-10', '2011-11-11']),
np.datetime64('2011-11-14'))
assert_equal(
np.busday_offset('2011-11-14', -1,
holidays=['2011-10-10', '2011-11-11']),
np.datetime64('2011-11-10'))
# With another holiday before and after
assert_equal(
np.busday_offset('2011-11-10', 1,
holidays=['2011-10-10', '2011-11-11', '2011-11-24']),
np.datetime64('2011-11-14'))
assert_equal(
np.busday_offset('2011-11-14', -1,
holidays=['2011-10-10', '2011-11-11', '2011-11-24']),
np.datetime64('2011-11-10'))
# A bigger forward jump across more than one week/holiday
holidays = ['2011-10-10', '2011-11-11', '2011-11-24',
'2011-12-25', '2011-05-30', '2011-02-21',
'2011-12-26', '2012-01-02']
bdd = np.busdaycalendar(weekmask='1111100', holidays=holidays)
assert_equal(
np.busday_offset('2011-10-03', 4, holidays=holidays),
np.busday_offset('2011-10-03', 4))
assert_equal(
np.busday_offset('2011-10-03', 5, holidays=holidays),
np.busday_offset('2011-10-03', 5 + 1))
assert_equal(
np.busday_offset('2011-10-03', 27, holidays=holidays),
np.busday_offset('2011-10-03', 27 + 1))
assert_equal(
np.busday_offset('2011-10-03', 28, holidays=holidays),
np.busday_offset('2011-10-03', 28 + 2))
assert_equal(
np.busday_offset('2011-10-03', 35, holidays=holidays),
np.busday_offset('2011-10-03', 35 + 2))
assert_equal(
np.busday_offset('2011-10-03', 36, holidays=holidays),
np.busday_offset('2011-10-03', 36 + 3))
assert_equal(
np.busday_offset('2011-10-03', 56, holidays=holidays),
np.busday_offset('2011-10-03', 56 + 3))
assert_equal(
np.busday_offset('2011-10-03', 57, holidays=holidays),
np.busday_offset('2011-10-03', 57 + 4))
assert_equal(
np.busday_offset('2011-10-03', 60, holidays=holidays),
np.busday_offset('2011-10-03', 60 + 4))
assert_equal(
np.busday_offset('2011-10-03', 61, holidays=holidays),
np.busday_offset('2011-10-03', 61 + 5))
assert_equal(
np.busday_offset('2011-10-03', 61, busdaycal=bdd),
np.busday_offset('2011-10-03', 61 + 5))
# A bigger backward jump across more than one week/holiday
assert_equal(
np.busday_offset('2012-01-03', -1, holidays=holidays),
np.busday_offset('2012-01-03', -1 - 1))
assert_equal(
np.busday_offset('2012-01-03', -4, holidays=holidays),
np.busday_offset('2012-01-03', -4 - 1))
assert_equal(
np.busday_offset('2012-01-03', -5, holidays=holidays),
np.busday_offset('2012-01-03', -5 - 2))
assert_equal(
np.busday_offset('2012-01-03', -25, holidays=holidays),
np.busday_offset('2012-01-03', -25 - 2))
assert_equal(
np.busday_offset('2012-01-03', -26, holidays=holidays),
np.busday_offset('2012-01-03', -26 - 3))
assert_equal(
np.busday_offset('2012-01-03', -33, holidays=holidays),
np.busday_offset('2012-01-03', -33 - 3))
assert_equal(
np.busday_offset('2012-01-03', -34, holidays=holidays),
np.busday_offset('2012-01-03', -34 - 4))
assert_equal(
np.busday_offset('2012-01-03', -56, holidays=holidays),
np.busday_offset('2012-01-03', -56 - 4))
assert_equal(
np.busday_offset('2012-01-03', -57, holidays=holidays),
np.busday_offset('2012-01-03', -57 - 5))
assert_equal(
np.busday_offset('2012-01-03', -57, busdaycal=bdd),
np.busday_offset('2012-01-03', -57 - 5))
# Can't supply both a weekmask/holidays and busdaycal
assert_raises(ValueError, np.busday_offset, '2012-01-03', -15,
weekmask='1111100', busdaycal=bdd)
assert_raises(ValueError, np.busday_offset, '2012-01-03', -15,
holidays=holidays, busdaycal=bdd)
# Roll with the holidays
assert_equal(
np.busday_offset('2011-12-25', 0,
roll='forward', holidays=holidays),
np.datetime64('2011-12-27'))
assert_equal(
np.busday_offset('2011-12-26', 0,
roll='forward', holidays=holidays),
np.datetime64('2011-12-27'))
assert_equal(
np.busday_offset('2011-12-26', 0,
roll='backward', holidays=holidays),
np.datetime64('2011-12-23'))
assert_equal(
np.busday_offset('2012-02-27', 0,
roll='modifiedfollowing',
holidays=['2012-02-27', '2012-02-26', '2012-02-28',
'2012-03-01', '2012-02-29']),
np.datetime64('2012-02-24'))
assert_equal(
np.busday_offset('2012-03-06', 0,
roll='modifiedpreceding',
holidays=['2012-03-02', '2012-03-03', '2012-03-01',
'2012-03-05', '2012-03-07', '2012-03-06']),
np.datetime64('2012-03-08'))
def test_datetime_busday_holidays_count(self):
holidays = ['2011-01-01', '2011-10-10', '2011-11-11', '2011-11-24',
'2011-12-25', '2011-05-30', '2011-02-21', '2011-01-17',
'2011-12-26', '2012-01-02', '2011-02-21', '2011-05-30',
'2011-07-01', '2011-07-04', '2011-09-05', '2011-10-10']
bdd = np.busdaycalendar(weekmask='1111100', holidays=holidays)
# Validate against busday_offset broadcast against
# a range of offsets
dates = np.busday_offset('2011-01-01', np.arange(366),
roll='forward', busdaycal=bdd)
assert_equal(np.busday_count('2011-01-01', dates, busdaycal=bdd),
np.arange(366))
# Returns negative value when reversed
assert_equal(np.busday_count(dates, '2011-01-01', busdaycal=bdd),
-np.arange(366))
dates = np.busday_offset('2011-12-31', -np.arange(366),
roll='forward', busdaycal=bdd)
assert_equal(np.busday_count(dates, '2011-12-31', busdaycal=bdd),
np.arange(366))
# Returns negative value when reversed
assert_equal(np.busday_count('2011-12-31', dates, busdaycal=bdd),
-np.arange(366))
# Can't supply both a weekmask/holidays and busdaycal
assert_raises(ValueError, np.busday_offset, '2012-01-03', '2012-02-03',
weekmask='1111100', busdaycal=bdd)
assert_raises(ValueError, np.busday_offset, '2012-01-03', '2012-02-03',
holidays=holidays, busdaycal=bdd)
# Number of Mondays in March 2011
assert_equal(np.busday_count('2011-03', '2011-04', weekmask='Mon'), 4)
# Returns negative value when reversed
assert_equal(np.busday_count('2011-04', '2011-03', weekmask='Mon'), -4)
def test_datetime_is_busday(self):
holidays = ['2011-01-01', '2011-10-10', '2011-11-11', '2011-11-24',
'2011-12-25', '2011-05-30', '2011-02-21', '2011-01-17',
'2011-12-26', '2012-01-02', '2011-02-21', '2011-05-30',
'2011-07-01', '2011-07-04', '2011-09-05', '2011-10-10',
'NaT']
bdd = np.busdaycalendar(weekmask='1111100', holidays=holidays)
# Weekend/weekday tests
assert_equal(np.is_busday('2011-01-01'), False)
assert_equal(np.is_busday('2011-01-02'), False)
assert_equal(np.is_busday('2011-01-03'), True)
# All the holidays are not business days
assert_equal(np.is_busday(holidays, busdaycal=bdd),
np.zeros(len(holidays), dtype='?'))
def test_datetime_y2038(self):
# Test parsing on either side of the Y2038 boundary
a = np.datetime64('2038-01-19T03:14:07')
assert_equal(a.view(np.int64), 2**31 - 1)
a = np.datetime64('2038-01-19T03:14:08')
assert_equal(a.view(np.int64), 2**31)
# Test parsing on either side of the Y2038 boundary with
# a manually specified timezone offset
with assert_warns(DeprecationWarning):
a = np.datetime64('2038-01-19T04:14:07+0100')
assert_equal(a.view(np.int64), 2**31 - 1)
with assert_warns(DeprecationWarning):
a = np.datetime64('2038-01-19T04:14:08+0100')
assert_equal(a.view(np.int64), 2**31)
# Test parsing a date after Y2038
a = np.datetime64('2038-01-20T13:21:14')
assert_equal(str(a), '2038-01-20T13:21:14')
def test_isnat(self):
assert_(np.isnat(np.datetime64('NaT', 'ms')))
assert_(np.isnat(np.datetime64('NaT', 'ns')))
assert_(not np.isnat(np.datetime64('2038-01-19T03:14:07')))
assert_(np.isnat(np.timedelta64('NaT', "ms")))
assert_(not np.isnat(np.timedelta64(34, "ms")))
res = np.array([False, False, True])
for unit in ['Y', 'M', 'W', 'D',
'h', 'm', 's', 'ms', 'us',
'ns', 'ps', 'fs', 'as']:
arr = np.array([123, -321, "NaT"], dtype='<datetime64[%s]' % unit)
assert_equal(np.isnat(arr), res)
arr = np.array([123, -321, "NaT"], dtype='>datetime64[%s]' % unit)
assert_equal(np.isnat(arr), res)
arr = np.array([123, -321, "NaT"], dtype='<timedelta64[%s]' % unit)
assert_equal(np.isnat(arr), res)
arr = np.array([123, -321, "NaT"], dtype='>timedelta64[%s]' % unit)
assert_equal(np.isnat(arr), res)
def test_isnat_error(self):
# Test that only datetime dtype arrays are accepted
for t in np.typecodes["All"]:
if t in np.typecodes["Datetime"]:
continue
assert_raises(TypeError, np.isnat, np.zeros(10, t))
def test_isfinite_scalar(self):
assert_(not np.isfinite(np.datetime64('NaT', 'ms')))
assert_(not np.isfinite(np.datetime64('NaT', 'ns')))
assert_(np.isfinite(np.datetime64('2038-01-19T03:14:07')))
assert_(not np.isfinite(np.timedelta64('NaT', "ms")))
assert_(np.isfinite(np.timedelta64(34, "ms")))
@pytest.mark.parametrize('unit', ['Y', 'M', 'W', 'D', 'h', 'm', 's', 'ms',
'us', 'ns', 'ps', 'fs', 'as'])
@pytest.mark.parametrize('dstr', ['<datetime64[%s]', '>datetime64[%s]',
'<timedelta64[%s]', '>timedelta64[%s]'])
def test_isfinite_isinf_isnan_units(self, unit, dstr):
'''check isfinite, isinf, isnan for all units of <M, >M, <m, >m dtypes
'''
arr_val = [123, -321, "NaT"]
arr = np.array(arr_val, dtype= dstr % unit)
pos = np.array([True, True, False])
neg = np.array([False, False, True])
false = np.array([False, False, False])
assert_equal(np.isfinite(arr), pos)
assert_equal(np.isinf(arr), false)
assert_equal(np.isnan(arr), neg)
def test_assert_equal(self):
assert_raises(AssertionError, assert_equal,
np.datetime64('nat'), np.timedelta64('nat'))
def test_corecursive_input(self):
# construct a co-recursive list
a, b = [], []
a.append(b)
b.append(a)
obj_arr = np.array([None])
obj_arr[0] = a
# At some point this caused a stack overflow (gh-11154). Now raises
# ValueError since the nested list cannot be converted to a datetime.
assert_raises(ValueError, obj_arr.astype, 'M8')
assert_raises(ValueError, obj_arr.astype, 'm8')
@pytest.mark.parametrize("shape", [(), (1,)])
def test_discovery_from_object_array(self, shape):
arr = np.array("2020-10-10", dtype=object).reshape(shape)
res = np.array("2020-10-10", dtype="M8").reshape(shape)
assert res.dtype == np.dtype("M8[D]")
assert_equal(arr.astype("M8"), res)
arr[...] = np.bytes_("2020-10-10") # try a numpy string type
assert_equal(arr.astype("M8"), res)
arr = arr.astype("S")
assert_equal(arr.astype("S").astype("M8"), res)
@pytest.mark.parametrize("time_unit", [
"Y", "M", "W", "D", "h", "m", "s", "ms", "us", "ns", "ps", "fs", "as",
# compound units
"10D", "2M",
])
def test_limit_symmetry(self, time_unit):
"""
Dates should have symmetric limits around the unix epoch at +/-np.int64
"""
epoch = np.datetime64(0, time_unit)
latest = np.datetime64(np.iinfo(np.int64).max, time_unit)
earliest = np.datetime64(-np.iinfo(np.int64).max, time_unit)
# above should not have overflowed
assert earliest < epoch < latest
@pytest.mark.parametrize("time_unit", [
"Y", "M",
pytest.param("W", marks=pytest.mark.xfail(reason="gh-13197")),
"D", "h", "m",
"s", "ms", "us", "ns", "ps", "fs", "as",
pytest.param("10D", marks=pytest.mark.xfail(reason="similar to gh-13197")),
])
@pytest.mark.parametrize("sign", [-1, 1])
def test_limit_str_roundtrip(self, time_unit, sign):
"""
Limits should roundtrip when converted to strings.
This tests the conversion to and from npy_datetimestruct.
"""
# TODO: add absolute (gold standard) time span limit strings
limit = np.datetime64(np.iinfo(np.int64).max * sign, time_unit)
# Convert to string and back. Explicit unit needed since the day and
# week reprs are not distinguishable.
limit_via_str = np.datetime64(str(limit), time_unit)
assert limit_via_str == limit
class TestDateTimeData:
def test_basic(self):
a = np.array(['1980-03-23'], dtype=np.datetime64)
assert_equal(np.datetime_data(a.dtype), ('D', 1))
def test_bytes(self):
# byte units are converted to unicode
dt = np.datetime64('2000', (b'ms', 5))
assert np.datetime_data(dt.dtype) == ('ms', 5)
dt = np.datetime64('2000', b'5ms')
assert np.datetime_data(dt.dtype) == ('ms', 5)
def test_non_ascii(self):
# μs is normalized to μ
dt = np.datetime64('2000', ('μs', 5))
assert np.datetime_data(dt.dtype) == ('us', 5)
dt = np.datetime64('2000', '5μs')
assert np.datetime_data(dt.dtype) == ('us', 5)
| 114,951 | Python | 44.453539 | 101 | 0.526511 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_dlpack.py | import sys
import pytest
import numpy as np
from numpy.testing import assert_array_equal, IS_PYPY
class TestDLPack:
@pytest.mark.skipif(IS_PYPY, reason="PyPy can't get refcounts.")
def test_dunder_dlpack_refcount(self):
x = np.arange(5)
y = x.__dlpack__()
assert sys.getrefcount(x) == 3
del y
assert sys.getrefcount(x) == 2
def test_dunder_dlpack_stream(self):
x = np.arange(5)
x.__dlpack__(stream=None)
with pytest.raises(RuntimeError):
x.__dlpack__(stream=1)
def test_strides_not_multiple_of_itemsize(self):
dt = np.dtype([('int', np.int32), ('char', np.int8)])
y = np.zeros((5,), dtype=dt)
z = y['int']
with pytest.raises(RuntimeError):
np.from_dlpack(z)
@pytest.mark.skipif(IS_PYPY, reason="PyPy can't get refcounts.")
def test_from_dlpack_refcount(self):
x = np.arange(5)
y = np.from_dlpack(x)
assert sys.getrefcount(x) == 3
del y
assert sys.getrefcount(x) == 2
@pytest.mark.parametrize("dtype", [
np.int8, np.int16, np.int32, np.int64,
np.uint8, np.uint16, np.uint32, np.uint64,
np.float16, np.float32, np.float64,
np.complex64, np.complex128
])
def test_dtype_passthrough(self, dtype):
x = np.arange(5, dtype=dtype)
y = np.from_dlpack(x)
assert y.dtype == x.dtype
assert_array_equal(x, y)
def test_invalid_dtype(self):
x = np.asarray(np.datetime64('2021-05-27'))
with pytest.raises(TypeError):
np.from_dlpack(x)
def test_invalid_byte_swapping(self):
dt = np.dtype('=i8').newbyteorder()
x = np.arange(5, dtype=dt)
with pytest.raises(TypeError):
np.from_dlpack(x)
def test_non_contiguous(self):
x = np.arange(25).reshape((5, 5))
y1 = x[0]
assert_array_equal(y1, np.from_dlpack(y1))
y2 = x[:, 0]
assert_array_equal(y2, np.from_dlpack(y2))
y3 = x[1, :]
assert_array_equal(y3, np.from_dlpack(y3))
y4 = x[1]
assert_array_equal(y4, np.from_dlpack(y4))
y5 = np.diagonal(x).copy()
assert_array_equal(y5, np.from_dlpack(y5))
@pytest.mark.parametrize("ndim", range(33))
def test_higher_dims(self, ndim):
shape = (1,) * ndim
x = np.zeros(shape, dtype=np.float64)
assert shape == np.from_dlpack(x).shape
def test_dlpack_device(self):
x = np.arange(5)
assert x.__dlpack_device__() == (1, 0)
y = np.from_dlpack(x)
assert y.__dlpack_device__() == (1, 0)
z = y[::2]
assert z.__dlpack_device__() == (1, 0)
def dlpack_deleter_exception(self):
x = np.arange(5)
_ = x.__dlpack__()
raise RuntimeError
def test_dlpack_destructor_exception(self):
with pytest.raises(RuntimeError):
self.dlpack_deleter_exception()
def test_readonly(self):
x = np.arange(5)
x.flags.writeable = False
with pytest.raises(TypeError):
x.__dlpack__()
def test_ndim0(self):
x = np.array(1.0)
y = np.from_dlpack(x)
assert_array_equal(x, y)
def test_size1dims_arrays(self):
x = np.ndarray(dtype='f8', shape=(10, 5, 1), strides=(8, 80, 4),
buffer=np.ones(1000, dtype=np.uint8), order='F')
y = np.from_dlpack(x)
assert_array_equal(x, y)
| 3,502 | Python | 27.25 | 72 | 0.555682 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_machar.py | """
Test machar. Given recent changes to hardcode type data, we might want to get
rid of both MachAr and this test at some point.
"""
from numpy.core._machar import MachAr
import numpy.core.numerictypes as ntypes
from numpy import errstate, array
class TestMachAr:
def _run_machar_highprec(self):
# Instantiate MachAr instance with high enough precision to cause
# underflow
try:
hiprec = ntypes.float96
MachAr(lambda v: array(v, hiprec))
except AttributeError:
# Fixme, this needs to raise a 'skip' exception.
"Skipping test: no ntypes.float96 available on this platform."
def test_underlow(self):
# Regression test for #759:
# instantiating MachAr for dtype = np.float96 raises spurious warning.
with errstate(all='raise'):
try:
self._run_machar_highprec()
except FloatingPointError as e:
msg = "Caught %s exception, should not have been raised." % e
raise AssertionError(msg)
| 1,067 | Python | 33.451612 | 78 | 0.637301 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_cpu_features.py | import sys, platform, re, pytest
from numpy.core._multiarray_umath import __cpu_features__
def assert_features_equal(actual, desired, fname):
__tracebackhide__ = True # Hide traceback for py.test
actual, desired = str(actual), str(desired)
if actual == desired:
return
detected = str(__cpu_features__).replace("'", "")
try:
with open("/proc/cpuinfo", "r") as fd:
cpuinfo = fd.read(2048)
except Exception as err:
cpuinfo = str(err)
try:
import subprocess
auxv = subprocess.check_output(['/bin/true'], env=dict(LD_SHOW_AUXV="1"))
auxv = auxv.decode()
except Exception as err:
auxv = str(err)
import textwrap
error_report = textwrap.indent(
"""
###########################################
### Extra debugging information
###########################################
-------------------------------------------
--- NumPy Detections
-------------------------------------------
%s
-------------------------------------------
--- SYS / CPUINFO
-------------------------------------------
%s....
-------------------------------------------
--- SYS / AUXV
-------------------------------------------
%s
""" % (detected, cpuinfo, auxv), prefix='\r')
raise AssertionError((
"Failure Detection\n"
" NAME: '%s'\n"
" ACTUAL: %s\n"
" DESIRED: %s\n"
"%s"
) % (fname, actual, desired, error_report))
class AbstractTest:
features = []
features_groups = {}
features_map = {}
features_flags = set()
def load_flags(self):
# a hook
pass
def test_features(self):
self.load_flags()
for gname, features in self.features_groups.items():
test_features = [self.cpu_have(f) for f in features]
assert_features_equal(__cpu_features__.get(gname), all(test_features), gname)
for feature_name in self.features:
cpu_have = self.cpu_have(feature_name)
npy_have = __cpu_features__.get(feature_name)
assert_features_equal(npy_have, cpu_have, feature_name)
def cpu_have(self, feature_name):
map_names = self.features_map.get(feature_name, feature_name)
if isinstance(map_names, str):
return map_names in self.features_flags
for f in map_names:
if f in self.features_flags:
return True
return False
def load_flags_cpuinfo(self, magic_key):
self.features_flags = self.get_cpuinfo_item(magic_key)
def get_cpuinfo_item(self, magic_key):
values = set()
with open('/proc/cpuinfo') as fd:
for line in fd:
if not line.startswith(magic_key):
continue
flags_value = [s.strip() for s in line.split(':', 1)]
if len(flags_value) == 2:
values = values.union(flags_value[1].upper().split())
return values
def load_flags_auxv(self):
import subprocess
auxv = subprocess.check_output(['/bin/true'], env=dict(LD_SHOW_AUXV="1"))
for at in auxv.split(b'\n'):
if not at.startswith(b"AT_HWCAP"):
continue
hwcap_value = [s.strip() for s in at.split(b':', 1)]
if len(hwcap_value) == 2:
self.features_flags = self.features_flags.union(
hwcap_value[1].upper().decode().split()
)
is_linux = sys.platform.startswith('linux')
is_cygwin = sys.platform.startswith('cygwin')
machine = platform.machine()
is_x86 = re.match("^(amd64|x86|i386|i686)", machine, re.IGNORECASE)
@pytest.mark.skipif(
not (is_linux or is_cygwin) or not is_x86, reason="Only for Linux and x86"
)
class Test_X86_Features(AbstractTest):
features = [
"MMX", "SSE", "SSE2", "SSE3", "SSSE3", "SSE41", "POPCNT", "SSE42",
"AVX", "F16C", "XOP", "FMA4", "FMA3", "AVX2", "AVX512F", "AVX512CD",
"AVX512ER", "AVX512PF", "AVX5124FMAPS", "AVX5124VNNIW", "AVX512VPOPCNTDQ",
"AVX512VL", "AVX512BW", "AVX512DQ", "AVX512VNNI", "AVX512IFMA",
"AVX512VBMI", "AVX512VBMI2", "AVX512BITALG",
]
features_groups = dict(
AVX512_KNL = ["AVX512F", "AVX512CD", "AVX512ER", "AVX512PF"],
AVX512_KNM = ["AVX512F", "AVX512CD", "AVX512ER", "AVX512PF", "AVX5124FMAPS",
"AVX5124VNNIW", "AVX512VPOPCNTDQ"],
AVX512_SKX = ["AVX512F", "AVX512CD", "AVX512BW", "AVX512DQ", "AVX512VL"],
AVX512_CLX = ["AVX512F", "AVX512CD", "AVX512BW", "AVX512DQ", "AVX512VL", "AVX512VNNI"],
AVX512_CNL = ["AVX512F", "AVX512CD", "AVX512BW", "AVX512DQ", "AVX512VL", "AVX512IFMA",
"AVX512VBMI"],
AVX512_ICL = ["AVX512F", "AVX512CD", "AVX512BW", "AVX512DQ", "AVX512VL", "AVX512IFMA",
"AVX512VBMI", "AVX512VNNI", "AVX512VBMI2", "AVX512BITALG", "AVX512VPOPCNTDQ"],
)
features_map = dict(
SSE3="PNI", SSE41="SSE4_1", SSE42="SSE4_2", FMA3="FMA",
AVX512VNNI="AVX512_VNNI", AVX512BITALG="AVX512_BITALG", AVX512VBMI2="AVX512_VBMI2",
AVX5124FMAPS="AVX512_4FMAPS", AVX5124VNNIW="AVX512_4VNNIW", AVX512VPOPCNTDQ="AVX512_VPOPCNTDQ",
)
def load_flags(self):
self.load_flags_cpuinfo("flags")
is_power = re.match("^(powerpc|ppc)64", machine, re.IGNORECASE)
@pytest.mark.skipif(not is_linux or not is_power, reason="Only for Linux and Power")
class Test_POWER_Features(AbstractTest):
features = ["VSX", "VSX2", "VSX3", "VSX4"]
features_map = dict(VSX2="ARCH_2_07", VSX3="ARCH_3_00", VSX4="ARCH_3_1")
def load_flags(self):
self.load_flags_auxv()
is_zarch = re.match("^(s390x)", machine, re.IGNORECASE)
@pytest.mark.skipif(not is_linux or not is_zarch,
reason="Only for Linux and IBM Z")
class Test_ZARCH_Features(AbstractTest):
features = ["VX", "VXE", "VXE2"]
def load_flags(self):
self.load_flags_auxv()
is_arm = re.match("^(arm|aarch64)", machine, re.IGNORECASE)
@pytest.mark.skipif(not is_linux or not is_arm, reason="Only for Linux and ARM")
class Test_ARM_Features(AbstractTest):
features = [
"NEON", "ASIMD", "FPHP", "ASIMDHP", "ASIMDDP", "ASIMDFHM"
]
features_groups = dict(
NEON_FP16 = ["NEON", "HALF"],
NEON_VFPV4 = ["NEON", "VFPV4"],
)
def load_flags(self):
self.load_flags_cpuinfo("Features")
arch = self.get_cpuinfo_item("CPU architecture")
# in case of mounting virtual filesystem of aarch64 kernel
is_rootfs_v8 = int('0'+next(iter(arch))) > 7 if arch else 0
if re.match("^(aarch64|AARCH64)", machine) or is_rootfs_v8:
self.features_map = dict(
NEON="ASIMD", HALF="ASIMD", VFPV4="ASIMD"
)
else:
self.features_map = dict(
# ELF auxiliary vector and /proc/cpuinfo on Linux kernel(armv8 aarch32)
# doesn't provide information about ASIMD, so we assume that ASIMD is supported
# if the kernel reports any one of the following ARM8 features.
ASIMD=("AES", "SHA1", "SHA2", "PMULL", "CRC32")
)
| 7,169 | Python | 37.548387 | 103 | 0.556563 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_ufunc.py | import warnings
import itertools
import sys
import pytest
import numpy as np
import numpy.core._umath_tests as umt
import numpy.linalg._umath_linalg as uml
import numpy.core._operand_flag_tests as opflag_tests
import numpy.core._rational_tests as _rational_tests
from numpy.testing import (
assert_, assert_equal, assert_raises, assert_array_equal,
assert_almost_equal, assert_array_almost_equal, assert_no_warnings,
assert_allclose, HAS_REFCOUNT, suppress_warnings
)
from numpy.testing._private.utils import requires_memory
from numpy.compat import pickle
UNARY_UFUNCS = [obj for obj in np.core.umath.__dict__.values()
if isinstance(obj, np.ufunc)]
UNARY_OBJECT_UFUNCS = [uf for uf in UNARY_UFUNCS if "O->O" in uf.types]
class TestUfuncKwargs:
def test_kwarg_exact(self):
assert_raises(TypeError, np.add, 1, 2, castingx='safe')
assert_raises(TypeError, np.add, 1, 2, dtypex=int)
assert_raises(TypeError, np.add, 1, 2, extobjx=[4096])
assert_raises(TypeError, np.add, 1, 2, outx=None)
assert_raises(TypeError, np.add, 1, 2, sigx='ii->i')
assert_raises(TypeError, np.add, 1, 2, signaturex='ii->i')
assert_raises(TypeError, np.add, 1, 2, subokx=False)
assert_raises(TypeError, np.add, 1, 2, wherex=[True])
def test_sig_signature(self):
assert_raises(TypeError, np.add, 1, 2, sig='ii->i',
signature='ii->i')
def test_sig_dtype(self):
assert_raises(TypeError, np.add, 1, 2, sig='ii->i',
dtype=int)
assert_raises(TypeError, np.add, 1, 2, signature='ii->i',
dtype=int)
def test_extobj_refcount(self):
# Should not segfault with USE_DEBUG.
assert_raises(TypeError, np.add, 1, 2, extobj=[4096], parrot=True)
class TestUfuncGenericLoops:
"""Test generic loops.
The loops to be tested are:
PyUFunc_ff_f_As_dd_d
PyUFunc_ff_f
PyUFunc_dd_d
PyUFunc_gg_g
PyUFunc_FF_F_As_DD_D
PyUFunc_DD_D
PyUFunc_FF_F
PyUFunc_GG_G
PyUFunc_OO_O
PyUFunc_OO_O_method
PyUFunc_f_f_As_d_d
PyUFunc_d_d
PyUFunc_f_f
PyUFunc_g_g
PyUFunc_F_F_As_D_D
PyUFunc_F_F
PyUFunc_D_D
PyUFunc_G_G
PyUFunc_O_O
PyUFunc_O_O_method
PyUFunc_On_Om
Where:
f -- float
d -- double
g -- long double
F -- complex float
D -- complex double
G -- complex long double
O -- python object
It is difficult to assure that each of these loops is entered from the
Python level as the special cased loops are a moving target and the
corresponding types are architecture dependent. We probably need to
define C level testing ufuncs to get at them. For the time being, I've
just looked at the signatures registered in the build directory to find
relevant functions.
"""
np_dtypes = [
(np.single, np.single), (np.single, np.double),
(np.csingle, np.csingle), (np.csingle, np.cdouble),
(np.double, np.double), (np.longdouble, np.longdouble),
(np.cdouble, np.cdouble), (np.clongdouble, np.clongdouble)]
@pytest.mark.parametrize('input_dtype,output_dtype', np_dtypes)
def test_unary_PyUFunc(self, input_dtype, output_dtype, f=np.exp, x=0, y=1):
xs = np.full(10, input_dtype(x), dtype=output_dtype)
ys = f(xs)[::2]
assert_allclose(ys, y)
assert_equal(ys.dtype, output_dtype)
def f2(x, y):
return x**y
@pytest.mark.parametrize('input_dtype,output_dtype', np_dtypes)
def test_binary_PyUFunc(self, input_dtype, output_dtype, f=f2, x=0, y=1):
xs = np.full(10, input_dtype(x), dtype=output_dtype)
ys = f(xs, xs)[::2]
assert_allclose(ys, y)
assert_equal(ys.dtype, output_dtype)
# class to use in testing object method loops
class foo:
def conjugate(self):
return np.bool_(1)
def logical_xor(self, obj):
return np.bool_(1)
def test_unary_PyUFunc_O_O(self):
x = np.ones(10, dtype=object)
assert_(np.all(np.abs(x) == 1))
def test_unary_PyUFunc_O_O_method_simple(self, foo=foo):
x = np.full(10, foo(), dtype=object)
assert_(np.all(np.conjugate(x) == True))
def test_binary_PyUFunc_OO_O(self):
x = np.ones(10, dtype=object)
assert_(np.all(np.add(x, x) == 2))
def test_binary_PyUFunc_OO_O_method(self, foo=foo):
x = np.full(10, foo(), dtype=object)
assert_(np.all(np.logical_xor(x, x)))
def test_binary_PyUFunc_On_Om_method(self, foo=foo):
x = np.full((10, 2, 3), foo(), dtype=object)
assert_(np.all(np.logical_xor(x, x)))
def test_python_complex_conjugate(self):
# The conjugate ufunc should fall back to calling the method:
arr = np.array([1+2j, 3-4j], dtype="O")
assert isinstance(arr[0], complex)
res = np.conjugate(arr)
assert res.dtype == np.dtype("O")
assert_array_equal(res, np.array([1-2j, 3+4j], dtype="O"))
@pytest.mark.parametrize("ufunc", UNARY_OBJECT_UFUNCS)
def test_unary_PyUFunc_O_O_method_full(self, ufunc):
"""Compare the result of the object loop with non-object one"""
val = np.float64(np.pi/4)
class MyFloat(np.float64):
def __getattr__(self, attr):
try:
return super().__getattr__(attr)
except AttributeError:
return lambda: getattr(np.core.umath, attr)(val)
# Use 0-D arrays, to ensure the same element call
num_arr = np.array(val, dtype=np.float64)
obj_arr = np.array(MyFloat(val), dtype="O")
with np.errstate(all="raise"):
try:
res_num = ufunc(num_arr)
except Exception as exc:
with assert_raises(type(exc)):
ufunc(obj_arr)
else:
res_obj = ufunc(obj_arr)
assert_array_almost_equal(res_num.astype("O"), res_obj)
def _pickleable_module_global():
pass
class TestUfunc:
def test_pickle(self):
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
assert_(pickle.loads(pickle.dumps(np.sin,
protocol=proto)) is np.sin)
# Check that ufunc not defined in the top level numpy namespace
# such as numpy.core._rational_tests.test_add can also be pickled
res = pickle.loads(pickle.dumps(_rational_tests.test_add,
protocol=proto))
assert_(res is _rational_tests.test_add)
def test_pickle_withstring(self):
astring = (b"cnumpy.core\n_ufunc_reconstruct\np0\n"
b"(S'numpy.core.umath'\np1\nS'cos'\np2\ntp3\nRp4\n.")
assert_(pickle.loads(astring) is np.cos)
def test_pickle_name_is_qualname(self):
# This tests that a simplification of our ufunc pickle code will
# lead to allowing qualnames as names. Future ufuncs should
# possible add a specific qualname, or a hook into pickling instead
# (dask+numba may benefit).
_pickleable_module_global.ufunc = umt._pickleable_module_global_ufunc
obj = pickle.loads(pickle.dumps(_pickleable_module_global.ufunc))
assert obj is umt._pickleable_module_global_ufunc
def test_reduceat_shifting_sum(self):
L = 6
x = np.arange(L)
idx = np.array(list(zip(np.arange(L - 2), np.arange(L - 2) + 2))).ravel()
assert_array_equal(np.add.reduceat(x, idx)[::2], [1, 3, 5, 7])
def test_all_ufunc(self):
"""Try to check presence and results of all ufuncs.
The list of ufuncs comes from generate_umath.py and is as follows:
===== ==== ============= =============== ========================
done args function types notes
===== ==== ============= =============== ========================
n 1 conjugate nums + O
n 1 absolute nums + O complex -> real
n 1 negative nums + O
n 1 sign nums + O -> int
n 1 invert bool + ints + O flts raise an error
n 1 degrees real + M cmplx raise an error
n 1 radians real + M cmplx raise an error
n 1 arccos flts + M
n 1 arccosh flts + M
n 1 arcsin flts + M
n 1 arcsinh flts + M
n 1 arctan flts + M
n 1 arctanh flts + M
n 1 cos flts + M
n 1 sin flts + M
n 1 tan flts + M
n 1 cosh flts + M
n 1 sinh flts + M
n 1 tanh flts + M
n 1 exp flts + M
n 1 expm1 flts + M
n 1 log flts + M
n 1 log10 flts + M
n 1 log1p flts + M
n 1 sqrt flts + M real x < 0 raises error
n 1 ceil real + M
n 1 trunc real + M
n 1 floor real + M
n 1 fabs real + M
n 1 rint flts + M
n 1 isnan flts -> bool
n 1 isinf flts -> bool
n 1 isfinite flts -> bool
n 1 signbit real -> bool
n 1 modf real -> (frac, int)
n 1 logical_not bool + nums + M -> bool
n 2 left_shift ints + O flts raise an error
n 2 right_shift ints + O flts raise an error
n 2 add bool + nums + O boolean + is ||
n 2 subtract bool + nums + O boolean - is ^
n 2 multiply bool + nums + O boolean * is &
n 2 divide nums + O
n 2 floor_divide nums + O
n 2 true_divide nums + O bBhH -> f, iIlLqQ -> d
n 2 fmod nums + M
n 2 power nums + O
n 2 greater bool + nums + O -> bool
n 2 greater_equal bool + nums + O -> bool
n 2 less bool + nums + O -> bool
n 2 less_equal bool + nums + O -> bool
n 2 equal bool + nums + O -> bool
n 2 not_equal bool + nums + O -> bool
n 2 logical_and bool + nums + M -> bool
n 2 logical_or bool + nums + M -> bool
n 2 logical_xor bool + nums + M -> bool
n 2 maximum bool + nums + O
n 2 minimum bool + nums + O
n 2 bitwise_and bool + ints + O flts raise an error
n 2 bitwise_or bool + ints + O flts raise an error
n 2 bitwise_xor bool + ints + O flts raise an error
n 2 arctan2 real + M
n 2 remainder ints + real + O
n 2 hypot real + M
===== ==== ============= =============== ========================
Types other than those listed will be accepted, but they are cast to
the smallest compatible type for which the function is defined. The
casting rules are:
bool -> int8 -> float32
ints -> double
"""
pass
# from include/numpy/ufuncobject.h
size_inferred = 2
can_ignore = 4
def test_signature0(self):
# the arguments to test_signature are: nin, nout, core_signature
enabled, num_dims, ixs, flags, sizes = umt.test_signature(
2, 1, "(i),(i)->()")
assert_equal(enabled, 1)
assert_equal(num_dims, (1, 1, 0))
assert_equal(ixs, (0, 0))
assert_equal(flags, (self.size_inferred,))
assert_equal(sizes, (-1,))
def test_signature1(self):
# empty core signature; treat as plain ufunc (with trivial core)
enabled, num_dims, ixs, flags, sizes = umt.test_signature(
2, 1, "(),()->()")
assert_equal(enabled, 0)
assert_equal(num_dims, (0, 0, 0))
assert_equal(ixs, ())
assert_equal(flags, ())
assert_equal(sizes, ())
def test_signature2(self):
# more complicated names for variables
enabled, num_dims, ixs, flags, sizes = umt.test_signature(
2, 1, "(i1,i2),(J_1)->(_kAB)")
assert_equal(enabled, 1)
assert_equal(num_dims, (2, 1, 1))
assert_equal(ixs, (0, 1, 2, 3))
assert_equal(flags, (self.size_inferred,)*4)
assert_equal(sizes, (-1, -1, -1, -1))
def test_signature3(self):
enabled, num_dims, ixs, flags, sizes = umt.test_signature(
2, 1, u"(i1, i12), (J_1)->(i12, i2)")
assert_equal(enabled, 1)
assert_equal(num_dims, (2, 1, 2))
assert_equal(ixs, (0, 1, 2, 1, 3))
assert_equal(flags, (self.size_inferred,)*4)
assert_equal(sizes, (-1, -1, -1, -1))
def test_signature4(self):
# matrix_multiply signature from _umath_tests
enabled, num_dims, ixs, flags, sizes = umt.test_signature(
2, 1, "(n,k),(k,m)->(n,m)")
assert_equal(enabled, 1)
assert_equal(num_dims, (2, 2, 2))
assert_equal(ixs, (0, 1, 1, 2, 0, 2))
assert_equal(flags, (self.size_inferred,)*3)
assert_equal(sizes, (-1, -1, -1))
def test_signature5(self):
# matmul signature from _umath_tests
enabled, num_dims, ixs, flags, sizes = umt.test_signature(
2, 1, "(n?,k),(k,m?)->(n?,m?)")
assert_equal(enabled, 1)
assert_equal(num_dims, (2, 2, 2))
assert_equal(ixs, (0, 1, 1, 2, 0, 2))
assert_equal(flags, (self.size_inferred | self.can_ignore,
self.size_inferred,
self.size_inferred | self.can_ignore))
assert_equal(sizes, (-1, -1, -1))
def test_signature6(self):
enabled, num_dims, ixs, flags, sizes = umt.test_signature(
1, 1, "(3)->()")
assert_equal(enabled, 1)
assert_equal(num_dims, (1, 0))
assert_equal(ixs, (0,))
assert_equal(flags, (0,))
assert_equal(sizes, (3,))
def test_signature7(self):
enabled, num_dims, ixs, flags, sizes = umt.test_signature(
3, 1, "(3),(03,3),(n)->(9)")
assert_equal(enabled, 1)
assert_equal(num_dims, (1, 2, 1, 1))
assert_equal(ixs, (0, 0, 0, 1, 2))
assert_equal(flags, (0, self.size_inferred, 0))
assert_equal(sizes, (3, -1, 9))
def test_signature8(self):
enabled, num_dims, ixs, flags, sizes = umt.test_signature(
3, 1, "(3?),(3?,3?),(n)->(9)")
assert_equal(enabled, 1)
assert_equal(num_dims, (1, 2, 1, 1))
assert_equal(ixs, (0, 0, 0, 1, 2))
assert_equal(flags, (self.can_ignore, self.size_inferred, 0))
assert_equal(sizes, (3, -1, 9))
def test_signature9(self):
enabled, num_dims, ixs, flags, sizes = umt.test_signature(
1, 1, "( 3) -> ( )")
assert_equal(enabled, 1)
assert_equal(num_dims, (1, 0))
assert_equal(ixs, (0,))
assert_equal(flags, (0,))
assert_equal(sizes, (3,))
def test_signature10(self):
enabled, num_dims, ixs, flags, sizes = umt.test_signature(
3, 1, "( 3? ) , (3? , 3?) ,(n )-> ( 9)")
assert_equal(enabled, 1)
assert_equal(num_dims, (1, 2, 1, 1))
assert_equal(ixs, (0, 0, 0, 1, 2))
assert_equal(flags, (self.can_ignore, self.size_inferred, 0))
assert_equal(sizes, (3, -1, 9))
def test_signature_failure_extra_parenthesis(self):
with assert_raises(ValueError):
umt.test_signature(2, 1, "((i)),(i)->()")
def test_signature_failure_mismatching_parenthesis(self):
with assert_raises(ValueError):
umt.test_signature(2, 1, "(i),)i(->()")
def test_signature_failure_signature_missing_input_arg(self):
with assert_raises(ValueError):
umt.test_signature(2, 1, "(i),->()")
def test_signature_failure_signature_missing_output_arg(self):
with assert_raises(ValueError):
umt.test_signature(2, 2, "(i),(i)->()")
def test_get_signature(self):
assert_equal(umt.inner1d.signature, "(i),(i)->()")
def test_forced_sig(self):
a = 0.5*np.arange(3, dtype='f8')
assert_equal(np.add(a, 0.5), [0.5, 1, 1.5])
with pytest.warns(DeprecationWarning):
assert_equal(np.add(a, 0.5, sig='i', casting='unsafe'), [0, 0, 1])
assert_equal(np.add(a, 0.5, sig='ii->i', casting='unsafe'), [0, 0, 1])
with pytest.warns(DeprecationWarning):
assert_equal(np.add(a, 0.5, sig=('i4',), casting='unsafe'),
[0, 0, 1])
assert_equal(np.add(a, 0.5, sig=('i4', 'i4', 'i4'),
casting='unsafe'), [0, 0, 1])
b = np.zeros((3,), dtype='f8')
np.add(a, 0.5, out=b)
assert_equal(b, [0.5, 1, 1.5])
b[:] = 0
with pytest.warns(DeprecationWarning):
np.add(a, 0.5, sig='i', out=b, casting='unsafe')
assert_equal(b, [0, 0, 1])
b[:] = 0
np.add(a, 0.5, sig='ii->i', out=b, casting='unsafe')
assert_equal(b, [0, 0, 1])
b[:] = 0
with pytest.warns(DeprecationWarning):
np.add(a, 0.5, sig=('i4',), out=b, casting='unsafe')
assert_equal(b, [0, 0, 1])
b[:] = 0
np.add(a, 0.5, sig=('i4', 'i4', 'i4'), out=b, casting='unsafe')
assert_equal(b, [0, 0, 1])
def test_signature_all_None(self):
# signature all None, is an acceptable alternative (since 1.21)
# to not providing a signature.
res1 = np.add([3], [4], sig=(None, None, None))
res2 = np.add([3], [4])
assert_array_equal(res1, res2)
res1 = np.maximum([3], [4], sig=(None, None, None))
res2 = np.maximum([3], [4])
assert_array_equal(res1, res2)
with pytest.raises(TypeError):
# special case, that would be deprecated anyway, so errors:
np.add(3, 4, signature=(None,))
def test_signature_dtype_type(self):
# Since that will be the normal behaviour (past NumPy 1.21)
# we do support the types already:
float_dtype = type(np.dtype(np.float64))
np.add(3, 4, signature=(float_dtype, float_dtype, None))
@pytest.mark.parametrize("casting", ["unsafe", "same_kind", "safe"])
def test_partial_signature_mismatch(self, casting):
# If the second argument matches already, no need to specify it:
res = np.ldexp(np.float32(1.), np.int_(2), dtype="d")
assert res.dtype == "d"
res = np.ldexp(np.float32(1.), np.int_(2), signature=(None, None, "d"))
assert res.dtype == "d"
# ldexp only has a loop for long input as second argument, overriding
# the output cannot help with that (no matter the casting)
with pytest.raises(TypeError):
np.ldexp(1., np.uint64(3), dtype="d")
with pytest.raises(TypeError):
np.ldexp(1., np.uint64(3), signature=(None, None, "d"))
def test_partial_signature_mismatch_with_cache(self):
with pytest.raises(TypeError):
np.add(np.float16(1), np.uint64(2), sig=("e", "d", None))
# Ensure e,d->None is in the dispatching cache (double loop)
np.add(np.float16(1), np.float64(2))
# The error must still be raised:
with pytest.raises(TypeError):
np.add(np.float16(1), np.uint64(2), sig=("e", "d", None))
def test_use_output_signature_for_all_arguments(self):
# Test that providing only `dtype=` or `signature=(None, None, dtype)`
# is sufficient if falling back to a homogeneous signature works.
# In this case, the `intp, intp -> intp` loop is chosen.
res = np.power(1.5, 2.8, dtype=np.intp, casting="unsafe")
assert res == 1 # the cast happens first.
res = np.power(1.5, 2.8, signature=(None, None, np.intp),
casting="unsafe")
assert res == 1
with pytest.raises(TypeError):
# the unsafe casting would normally cause errors though:
np.power(1.5, 2.8, dtype=np.intp)
def test_signature_errors(self):
with pytest.raises(TypeError,
match="the signature object to ufunc must be a string or"):
np.add(3, 4, signature=123.) # neither a string nor a tuple
with pytest.raises(ValueError):
# bad symbols that do not translate to dtypes
np.add(3, 4, signature="%^->#")
with pytest.raises(ValueError):
np.add(3, 4, signature=b"ii-i") # incomplete and byte string
with pytest.raises(ValueError):
np.add(3, 4, signature="ii>i") # incomplete string
with pytest.raises(ValueError):
np.add(3, 4, signature=(None, "f8")) # bad length
with pytest.raises(UnicodeDecodeError):
np.add(3, 4, signature=b"\xff\xff->i")
def test_forced_dtype_times(self):
# Signatures only set the type numbers (not the actual loop dtypes)
# so using `M` in a signature/dtype should generally work:
a = np.array(['2010-01-02', '1999-03-14', '1833-03'], dtype='>M8[D]')
np.maximum(a, a, dtype="M")
np.maximum.reduce(a, dtype="M")
arr = np.arange(10, dtype="m8[s]")
np.add(arr, arr, dtype="m")
np.maximum(arr, arr, dtype="m")
@pytest.mark.parametrize("ufunc", [np.add, np.sqrt])
def test_cast_safety(self, ufunc):
"""Basic test for the safest casts, because ufuncs inner loops can
indicate a cast-safety as well (which is normally always "no").
"""
def call_ufunc(arr, **kwargs):
return ufunc(*(arr,) * ufunc.nin, **kwargs)
arr = np.array([1., 2., 3.], dtype=np.float32)
arr_bs = arr.astype(arr.dtype.newbyteorder())
expected = call_ufunc(arr)
# Normally, a "no" cast:
res = call_ufunc(arr, casting="no")
assert_array_equal(expected, res)
# Byte-swapping is not allowed with "no" though:
with pytest.raises(TypeError):
call_ufunc(arr_bs, casting="no")
# But is allowed with "equiv":
res = call_ufunc(arr_bs, casting="equiv")
assert_array_equal(expected, res)
# Casting to float64 is safe, but not equiv:
with pytest.raises(TypeError):
call_ufunc(arr_bs, dtype=np.float64, casting="equiv")
# but it is safe cast:
res = call_ufunc(arr_bs, dtype=np.float64, casting="safe")
expected = call_ufunc(arr.astype(np.float64)) # upcast
assert_array_equal(expected, res)
def test_true_divide(self):
a = np.array(10)
b = np.array(20)
tgt = np.array(0.5)
for tc in 'bhilqBHILQefdgFDG':
dt = np.dtype(tc)
aa = a.astype(dt)
bb = b.astype(dt)
# Check result value and dtype.
for x, y in itertools.product([aa, -aa], [bb, -bb]):
# Check with no output type specified
if tc in 'FDG':
tgt = complex(x)/complex(y)
else:
tgt = float(x)/float(y)
res = np.true_divide(x, y)
rtol = max(np.finfo(res).resolution, 1e-15)
assert_allclose(res, tgt, rtol=rtol)
if tc in 'bhilqBHILQ':
assert_(res.dtype.name == 'float64')
else:
assert_(res.dtype.name == dt.name )
# Check with output type specified. This also checks for the
# incorrect casts in issue gh-3484 because the unary '-' does
# not change types, even for unsigned types, Hence casts in the
# ufunc from signed to unsigned and vice versa will lead to
# errors in the values.
for tcout in 'bhilqBHILQ':
dtout = np.dtype(tcout)
assert_raises(TypeError, np.true_divide, x, y, dtype=dtout)
for tcout in 'efdg':
dtout = np.dtype(tcout)
if tc in 'FDG':
# Casting complex to float is not allowed
assert_raises(TypeError, np.true_divide, x, y, dtype=dtout)
else:
tgt = float(x)/float(y)
rtol = max(np.finfo(dtout).resolution, 1e-15)
# The value of tiny for double double is NaN
with suppress_warnings() as sup:
sup.filter(UserWarning)
if not np.isnan(np.finfo(dtout).tiny):
atol = max(np.finfo(dtout).tiny, 3e-308)
else:
atol = 3e-308
# Some test values result in invalid for float16.
with np.errstate(invalid='ignore'):
res = np.true_divide(x, y, dtype=dtout)
if not np.isfinite(res) and tcout == 'e':
continue
assert_allclose(res, tgt, rtol=rtol, atol=atol)
assert_(res.dtype.name == dtout.name)
for tcout in 'FDG':
dtout = np.dtype(tcout)
tgt = complex(x)/complex(y)
rtol = max(np.finfo(dtout).resolution, 1e-15)
# The value of tiny for double double is NaN
with suppress_warnings() as sup:
sup.filter(UserWarning)
if not np.isnan(np.finfo(dtout).tiny):
atol = max(np.finfo(dtout).tiny, 3e-308)
else:
atol = 3e-308
res = np.true_divide(x, y, dtype=dtout)
if not np.isfinite(res):
continue
assert_allclose(res, tgt, rtol=rtol, atol=atol)
assert_(res.dtype.name == dtout.name)
# Check booleans
a = np.ones((), dtype=np.bool_)
res = np.true_divide(a, a)
assert_(res == 1.0)
assert_(res.dtype.name == 'float64')
res = np.true_divide(~a, a)
assert_(res == 0.0)
assert_(res.dtype.name == 'float64')
def test_sum_stability(self):
a = np.ones(500, dtype=np.float32)
assert_almost_equal((a / 10.).sum() - a.size / 10., 0, 4)
a = np.ones(500, dtype=np.float64)
assert_almost_equal((a / 10.).sum() - a.size / 10., 0, 13)
def test_sum(self):
for dt in (int, np.float16, np.float32, np.float64, np.longdouble):
for v in (0, 1, 2, 7, 8, 9, 15, 16, 19, 127,
128, 1024, 1235):
tgt = dt(v * (v + 1) / 2)
d = np.arange(1, v + 1, dtype=dt)
# warning if sum overflows, which it does in float16
overflow = not np.isfinite(tgt)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
assert_almost_equal(np.sum(d), tgt)
assert_equal(len(w), 1 * overflow)
assert_almost_equal(np.sum(d[::-1]), tgt)
assert_equal(len(w), 2 * overflow)
d = np.ones(500, dtype=dt)
assert_almost_equal(np.sum(d[::2]), 250.)
assert_almost_equal(np.sum(d[1::2]), 250.)
assert_almost_equal(np.sum(d[::3]), 167.)
assert_almost_equal(np.sum(d[1::3]), 167.)
assert_almost_equal(np.sum(d[::-2]), 250.)
assert_almost_equal(np.sum(d[-1::-2]), 250.)
assert_almost_equal(np.sum(d[::-3]), 167.)
assert_almost_equal(np.sum(d[-1::-3]), 167.)
# sum with first reduction entry != 0
d = np.ones((1,), dtype=dt)
d += d
assert_almost_equal(d, 2.)
def test_sum_complex(self):
for dt in (np.complex64, np.complex128, np.clongdouble):
for v in (0, 1, 2, 7, 8, 9, 15, 16, 19, 127,
128, 1024, 1235):
tgt = dt(v * (v + 1) / 2) - dt((v * (v + 1) / 2) * 1j)
d = np.empty(v, dtype=dt)
d.real = np.arange(1, v + 1)
d.imag = -np.arange(1, v + 1)
assert_almost_equal(np.sum(d), tgt)
assert_almost_equal(np.sum(d[::-1]), tgt)
d = np.ones(500, dtype=dt) + 1j
assert_almost_equal(np.sum(d[::2]), 250. + 250j)
assert_almost_equal(np.sum(d[1::2]), 250. + 250j)
assert_almost_equal(np.sum(d[::3]), 167. + 167j)
assert_almost_equal(np.sum(d[1::3]), 167. + 167j)
assert_almost_equal(np.sum(d[::-2]), 250. + 250j)
assert_almost_equal(np.sum(d[-1::-2]), 250. + 250j)
assert_almost_equal(np.sum(d[::-3]), 167. + 167j)
assert_almost_equal(np.sum(d[-1::-3]), 167. + 167j)
# sum with first reduction entry != 0
d = np.ones((1,), dtype=dt) + 1j
d += d
assert_almost_equal(d, 2. + 2j)
def test_sum_initial(self):
# Integer, single axis
assert_equal(np.sum([3], initial=2), 5)
# Floating point
assert_almost_equal(np.sum([0.2], initial=0.1), 0.3)
# Multiple non-adjacent axes
assert_equal(np.sum(np.ones((2, 3, 5), dtype=np.int64), axis=(0, 2), initial=2),
[12, 12, 12])
def test_sum_where(self):
# More extensive tests done in test_reduction_with_where.
assert_equal(np.sum([[1., 2.], [3., 4.]], where=[True, False]), 4.)
assert_equal(np.sum([[1., 2.], [3., 4.]], axis=0, initial=5.,
where=[True, False]), [9., 5.])
def test_inner1d(self):
a = np.arange(6).reshape((2, 3))
assert_array_equal(umt.inner1d(a, a), np.sum(a*a, axis=-1))
a = np.arange(6)
assert_array_equal(umt.inner1d(a, a), np.sum(a*a))
def test_broadcast(self):
msg = "broadcast"
a = np.arange(4).reshape((2, 1, 2))
b = np.arange(4).reshape((1, 2, 2))
assert_array_equal(umt.inner1d(a, b), np.sum(a*b, axis=-1), err_msg=msg)
msg = "extend & broadcast loop dimensions"
b = np.arange(4).reshape((2, 2))
assert_array_equal(umt.inner1d(a, b), np.sum(a*b, axis=-1), err_msg=msg)
# Broadcast in core dimensions should fail
a = np.arange(8).reshape((4, 2))
b = np.arange(4).reshape((4, 1))
assert_raises(ValueError, umt.inner1d, a, b)
# Extend core dimensions should fail
a = np.arange(8).reshape((4, 2))
b = np.array(7)
assert_raises(ValueError, umt.inner1d, a, b)
# Broadcast should fail
a = np.arange(2).reshape((2, 1, 1))
b = np.arange(3).reshape((3, 1, 1))
assert_raises(ValueError, umt.inner1d, a, b)
# Writing to a broadcasted array with overlap should warn, gh-2705
a = np.arange(2)
b = np.arange(4).reshape((2, 2))
u, v = np.broadcast_arrays(a, b)
assert_equal(u.strides[0], 0)
x = u + v
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
u += v
assert_equal(len(w), 1)
assert_(x[0, 0] != u[0, 0])
# Output reduction should not be allowed.
# See gh-15139
a = np.arange(6).reshape(3, 2)
b = np.ones(2)
out = np.empty(())
assert_raises(ValueError, umt.inner1d, a, b, out)
out2 = np.empty(3)
c = umt.inner1d(a, b, out2)
assert_(c is out2)
def test_out_broadcasts(self):
# For ufuncs and gufuncs (not for reductions), we currently allow
# the output to cause broadcasting of the input arrays.
# both along dimensions with shape 1 and dimensions which do not
# exist at all in the inputs.
arr = np.arange(3).reshape(1, 3)
out = np.empty((5, 4, 3))
np.add(arr, arr, out=out)
assert (out == np.arange(3) * 2).all()
# The same holds for gufuncs (gh-16484)
umt.inner1d(arr, arr, out=out)
# the result would be just a scalar `5`, but is broadcast fully:
assert (out == 5).all()
@pytest.mark.parametrize(["arr", "out"], [
([2], np.empty(())),
([1, 2], np.empty(1)),
(np.ones((4, 3)), np.empty((4, 1)))],
ids=["(1,)->()", "(2,)->(1,)", "(4, 3)->(4, 1)"])
def test_out_broadcast_errors(self, arr, out):
# Output is (currently) allowed to broadcast inputs, but it cannot be
# smaller than the actual result.
with pytest.raises(ValueError, match="non-broadcastable"):
np.positive(arr, out=out)
with pytest.raises(ValueError, match="non-broadcastable"):
np.add(np.ones(()), arr, out=out)
def test_type_cast(self):
msg = "type cast"
a = np.arange(6, dtype='short').reshape((2, 3))
assert_array_equal(umt.inner1d(a, a), np.sum(a*a, axis=-1),
err_msg=msg)
msg = "type cast on one argument"
a = np.arange(6).reshape((2, 3))
b = a + 0.1
assert_array_almost_equal(umt.inner1d(a, b), np.sum(a*b, axis=-1),
err_msg=msg)
def test_endian(self):
msg = "big endian"
a = np.arange(6, dtype='>i4').reshape((2, 3))
assert_array_equal(umt.inner1d(a, a), np.sum(a*a, axis=-1),
err_msg=msg)
msg = "little endian"
a = np.arange(6, dtype='<i4').reshape((2, 3))
assert_array_equal(umt.inner1d(a, a), np.sum(a*a, axis=-1),
err_msg=msg)
# Output should always be native-endian
Ba = np.arange(1, dtype='>f8')
La = np.arange(1, dtype='<f8')
assert_equal((Ba+Ba).dtype, np.dtype('f8'))
assert_equal((Ba+La).dtype, np.dtype('f8'))
assert_equal((La+Ba).dtype, np.dtype('f8'))
assert_equal((La+La).dtype, np.dtype('f8'))
assert_equal(np.absolute(La).dtype, np.dtype('f8'))
assert_equal(np.absolute(Ba).dtype, np.dtype('f8'))
assert_equal(np.negative(La).dtype, np.dtype('f8'))
assert_equal(np.negative(Ba).dtype, np.dtype('f8'))
def test_incontiguous_array(self):
msg = "incontiguous memory layout of array"
x = np.arange(64).reshape((2, 2, 2, 2, 2, 2))
a = x[:, 0,:, 0,:, 0]
b = x[:, 1,:, 1,:, 1]
a[0, 0, 0] = -1
msg2 = "make sure it references to the original array"
assert_equal(x[0, 0, 0, 0, 0, 0], -1, err_msg=msg2)
assert_array_equal(umt.inner1d(a, b), np.sum(a*b, axis=-1), err_msg=msg)
x = np.arange(24).reshape(2, 3, 4)
a = x.T
b = x.T
a[0, 0, 0] = -1
assert_equal(x[0, 0, 0], -1, err_msg=msg2)
assert_array_equal(umt.inner1d(a, b), np.sum(a*b, axis=-1), err_msg=msg)
def test_output_argument(self):
msg = "output argument"
a = np.arange(12).reshape((2, 3, 2))
b = np.arange(4).reshape((2, 1, 2)) + 1
c = np.zeros((2, 3), dtype='int')
umt.inner1d(a, b, c)
assert_array_equal(c, np.sum(a*b, axis=-1), err_msg=msg)
c[:] = -1
umt.inner1d(a, b, out=c)
assert_array_equal(c, np.sum(a*b, axis=-1), err_msg=msg)
msg = "output argument with type cast"
c = np.zeros((2, 3), dtype='int16')
umt.inner1d(a, b, c)
assert_array_equal(c, np.sum(a*b, axis=-1), err_msg=msg)
c[:] = -1
umt.inner1d(a, b, out=c)
assert_array_equal(c, np.sum(a*b, axis=-1), err_msg=msg)
msg = "output argument with incontiguous layout"
c = np.zeros((2, 3, 4), dtype='int16')
umt.inner1d(a, b, c[..., 0])
assert_array_equal(c[..., 0], np.sum(a*b, axis=-1), err_msg=msg)
c[:] = -1
umt.inner1d(a, b, out=c[..., 0])
assert_array_equal(c[..., 0], np.sum(a*b, axis=-1), err_msg=msg)
def test_axes_argument(self):
# inner1d signature: '(i),(i)->()'
inner1d = umt.inner1d
a = np.arange(27.).reshape((3, 3, 3))
b = np.arange(10., 19.).reshape((3, 1, 3))
# basic tests on inputs (outputs tested below with matrix_multiply).
c = inner1d(a, b)
assert_array_equal(c, (a * b).sum(-1))
# default
c = inner1d(a, b, axes=[(-1,), (-1,), ()])
assert_array_equal(c, (a * b).sum(-1))
# integers ok for single axis.
c = inner1d(a, b, axes=[-1, -1, ()])
assert_array_equal(c, (a * b).sum(-1))
# mix fine
c = inner1d(a, b, axes=[(-1,), -1, ()])
assert_array_equal(c, (a * b).sum(-1))
# can omit last axis.
c = inner1d(a, b, axes=[-1, -1])
assert_array_equal(c, (a * b).sum(-1))
# can pass in other types of integer (with __index__ protocol)
c = inner1d(a, b, axes=[np.int8(-1), np.array(-1, dtype=np.int32)])
assert_array_equal(c, (a * b).sum(-1))
# swap some axes
c = inner1d(a, b, axes=[0, 0])
assert_array_equal(c, (a * b).sum(0))
c = inner1d(a, b, axes=[0, 2])
assert_array_equal(c, (a.transpose(1, 2, 0) * b).sum(-1))
# Check errors for improperly constructed axes arguments.
# should have list.
assert_raises(TypeError, inner1d, a, b, axes=-1)
# needs enough elements
assert_raises(ValueError, inner1d, a, b, axes=[-1])
# should pass in indices.
assert_raises(TypeError, inner1d, a, b, axes=[-1.0, -1.0])
assert_raises(TypeError, inner1d, a, b, axes=[(-1.0,), -1])
assert_raises(TypeError, inner1d, a, b, axes=[None, 1])
# cannot pass an index unless there is only one dimension
# (output is wrong in this case)
assert_raises(TypeError, inner1d, a, b, axes=[-1, -1, -1])
# or pass in generally the wrong number of axes
assert_raises(ValueError, inner1d, a, b, axes=[-1, -1, (-1,)])
assert_raises(ValueError, inner1d, a, b, axes=[-1, (-2, -1), ()])
# axes need to have same length.
assert_raises(ValueError, inner1d, a, b, axes=[0, 1])
# matrix_multiply signature: '(m,n),(n,p)->(m,p)'
mm = umt.matrix_multiply
a = np.arange(12).reshape((2, 3, 2))
b = np.arange(8).reshape((2, 2, 2, 1)) + 1
# Sanity check.
c = mm(a, b)
assert_array_equal(c, np.matmul(a, b))
# Default axes.
c = mm(a, b, axes=[(-2, -1), (-2, -1), (-2, -1)])
assert_array_equal(c, np.matmul(a, b))
# Default with explicit axes.
c = mm(a, b, axes=[(1, 2), (2, 3), (2, 3)])
assert_array_equal(c, np.matmul(a, b))
# swap some axes.
c = mm(a, b, axes=[(0, -1), (1, 2), (-2, -1)])
assert_array_equal(c, np.matmul(a.transpose(1, 0, 2),
b.transpose(0, 3, 1, 2)))
# Default with output array.
c = np.empty((2, 2, 3, 1))
d = mm(a, b, out=c, axes=[(1, 2), (2, 3), (2, 3)])
assert_(c is d)
assert_array_equal(c, np.matmul(a, b))
# Transposed output array
c = np.empty((1, 2, 2, 3))
d = mm(a, b, out=c, axes=[(-2, -1), (-2, -1), (3, 0)])
assert_(c is d)
assert_array_equal(c, np.matmul(a, b).transpose(3, 0, 1, 2))
# Check errors for improperly constructed axes arguments.
# wrong argument
assert_raises(TypeError, mm, a, b, axis=1)
# axes should be list
assert_raises(TypeError, mm, a, b, axes=1)
assert_raises(TypeError, mm, a, b, axes=((-2, -1), (-2, -1), (-2, -1)))
# list needs to have right length
assert_raises(ValueError, mm, a, b, axes=[])
assert_raises(ValueError, mm, a, b, axes=[(-2, -1)])
# list should contain tuples for multiple axes
assert_raises(TypeError, mm, a, b, axes=[-1, -1, -1])
assert_raises(TypeError, mm, a, b, axes=[(-2, -1), (-2, -1), -1])
assert_raises(TypeError,
mm, a, b, axes=[[-2, -1], [-2, -1], [-2, -1]])
assert_raises(TypeError,
mm, a, b, axes=[(-2, -1), (-2, -1), [-2, -1]])
assert_raises(TypeError, mm, a, b, axes=[(-2, -1), (-2, -1), None])
# tuples should not have duplicated values
assert_raises(ValueError, mm, a, b, axes=[(-2, -1), (-2, -1), (-2, -2)])
# arrays should have enough axes.
z = np.zeros((2, 2))
assert_raises(ValueError, mm, z, z[0])
assert_raises(ValueError, mm, z, z, out=z[:, 0])
assert_raises(ValueError, mm, z[1], z, axes=[0, 1])
assert_raises(ValueError, mm, z, z, out=z[0], axes=[0, 1])
# Regular ufuncs should not accept axes.
assert_raises(TypeError, np.add, 1., 1., axes=[0])
# should be able to deal with bad unrelated kwargs.
assert_raises(TypeError, mm, z, z, axes=[0, 1], parrot=True)
def test_axis_argument(self):
# inner1d signature: '(i),(i)->()'
inner1d = umt.inner1d
a = np.arange(27.).reshape((3, 3, 3))
b = np.arange(10., 19.).reshape((3, 1, 3))
c = inner1d(a, b)
assert_array_equal(c, (a * b).sum(-1))
c = inner1d(a, b, axis=-1)
assert_array_equal(c, (a * b).sum(-1))
out = np.zeros_like(c)
d = inner1d(a, b, axis=-1, out=out)
assert_(d is out)
assert_array_equal(d, c)
c = inner1d(a, b, axis=0)
assert_array_equal(c, (a * b).sum(0))
# Sanity checks on innerwt and cumsum.
a = np.arange(6).reshape((2, 3))
b = np.arange(10, 16).reshape((2, 3))
w = np.arange(20, 26).reshape((2, 3))
assert_array_equal(umt.innerwt(a, b, w, axis=0),
np.sum(a * b * w, axis=0))
assert_array_equal(umt.cumsum(a, axis=0), np.cumsum(a, axis=0))
assert_array_equal(umt.cumsum(a, axis=-1), np.cumsum(a, axis=-1))
out = np.empty_like(a)
b = umt.cumsum(a, out=out, axis=0)
assert_(out is b)
assert_array_equal(b, np.cumsum(a, axis=0))
b = umt.cumsum(a, out=out, axis=1)
assert_(out is b)
assert_array_equal(b, np.cumsum(a, axis=-1))
# Check errors.
# Cannot pass in both axis and axes.
assert_raises(TypeError, inner1d, a, b, axis=0, axes=[0, 0])
# Not an integer.
assert_raises(TypeError, inner1d, a, b, axis=[0])
# more than 1 core dimensions.
mm = umt.matrix_multiply
assert_raises(TypeError, mm, a, b, axis=1)
# Output wrong size in axis.
out = np.empty((1, 2, 3), dtype=a.dtype)
assert_raises(ValueError, umt.cumsum, a, out=out, axis=0)
# Regular ufuncs should not accept axis.
assert_raises(TypeError, np.add, 1., 1., axis=0)
def test_keepdims_argument(self):
# inner1d signature: '(i),(i)->()'
inner1d = umt.inner1d
a = np.arange(27.).reshape((3, 3, 3))
b = np.arange(10., 19.).reshape((3, 1, 3))
c = inner1d(a, b)
assert_array_equal(c, (a * b).sum(-1))
c = inner1d(a, b, keepdims=False)
assert_array_equal(c, (a * b).sum(-1))
c = inner1d(a, b, keepdims=True)
assert_array_equal(c, (a * b).sum(-1, keepdims=True))
out = np.zeros_like(c)
d = inner1d(a, b, keepdims=True, out=out)
assert_(d is out)
assert_array_equal(d, c)
# Now combined with axis and axes.
c = inner1d(a, b, axis=-1, keepdims=False)
assert_array_equal(c, (a * b).sum(-1, keepdims=False))
c = inner1d(a, b, axis=-1, keepdims=True)
assert_array_equal(c, (a * b).sum(-1, keepdims=True))
c = inner1d(a, b, axis=0, keepdims=False)
assert_array_equal(c, (a * b).sum(0, keepdims=False))
c = inner1d(a, b, axis=0, keepdims=True)
assert_array_equal(c, (a * b).sum(0, keepdims=True))
c = inner1d(a, b, axes=[(-1,), (-1,), ()], keepdims=False)
assert_array_equal(c, (a * b).sum(-1))
c = inner1d(a, b, axes=[(-1,), (-1,), (-1,)], keepdims=True)
assert_array_equal(c, (a * b).sum(-1, keepdims=True))
c = inner1d(a, b, axes=[0, 0], keepdims=False)
assert_array_equal(c, (a * b).sum(0))
c = inner1d(a, b, axes=[0, 0, 0], keepdims=True)
assert_array_equal(c, (a * b).sum(0, keepdims=True))
c = inner1d(a, b, axes=[0, 2], keepdims=False)
assert_array_equal(c, (a.transpose(1, 2, 0) * b).sum(-1))
c = inner1d(a, b, axes=[0, 2], keepdims=True)
assert_array_equal(c, (a.transpose(1, 2, 0) * b).sum(-1,
keepdims=True))
c = inner1d(a, b, axes=[0, 2, 2], keepdims=True)
assert_array_equal(c, (a.transpose(1, 2, 0) * b).sum(-1,
keepdims=True))
c = inner1d(a, b, axes=[0, 2, 0], keepdims=True)
assert_array_equal(c, (a * b.transpose(2, 0, 1)).sum(0, keepdims=True))
# Hardly useful, but should work.
c = inner1d(a, b, axes=[0, 2, 1], keepdims=True)
assert_array_equal(c, (a.transpose(1, 0, 2) * b.transpose(0, 2, 1))
.sum(1, keepdims=True))
# Check with two core dimensions.
a = np.eye(3) * np.arange(4.)[:, np.newaxis, np.newaxis]
expected = uml.det(a)
c = uml.det(a, keepdims=False)
assert_array_equal(c, expected)
c = uml.det(a, keepdims=True)
assert_array_equal(c, expected[:, np.newaxis, np.newaxis])
a = np.eye(3) * np.arange(4.)[:, np.newaxis, np.newaxis]
expected_s, expected_l = uml.slogdet(a)
cs, cl = uml.slogdet(a, keepdims=False)
assert_array_equal(cs, expected_s)
assert_array_equal(cl, expected_l)
cs, cl = uml.slogdet(a, keepdims=True)
assert_array_equal(cs, expected_s[:, np.newaxis, np.newaxis])
assert_array_equal(cl, expected_l[:, np.newaxis, np.newaxis])
# Sanity check on innerwt.
a = np.arange(6).reshape((2, 3))
b = np.arange(10, 16).reshape((2, 3))
w = np.arange(20, 26).reshape((2, 3))
assert_array_equal(umt.innerwt(a, b, w, keepdims=True),
np.sum(a * b * w, axis=-1, keepdims=True))
assert_array_equal(umt.innerwt(a, b, w, axis=0, keepdims=True),
np.sum(a * b * w, axis=0, keepdims=True))
# Check errors.
# Not a boolean
assert_raises(TypeError, inner1d, a, b, keepdims='true')
# More than 1 core dimension, and core output dimensions.
mm = umt.matrix_multiply
assert_raises(TypeError, mm, a, b, keepdims=True)
assert_raises(TypeError, mm, a, b, keepdims=False)
# Regular ufuncs should not accept keepdims.
assert_raises(TypeError, np.add, 1., 1., keepdims=False)
def test_innerwt(self):
a = np.arange(6).reshape((2, 3))
b = np.arange(10, 16).reshape((2, 3))
w = np.arange(20, 26).reshape((2, 3))
assert_array_equal(umt.innerwt(a, b, w), np.sum(a*b*w, axis=-1))
a = np.arange(100, 124).reshape((2, 3, 4))
b = np.arange(200, 224).reshape((2, 3, 4))
w = np.arange(300, 324).reshape((2, 3, 4))
assert_array_equal(umt.innerwt(a, b, w), np.sum(a*b*w, axis=-1))
def test_innerwt_empty(self):
"""Test generalized ufunc with zero-sized operands"""
a = np.array([], dtype='f8')
b = np.array([], dtype='f8')
w = np.array([], dtype='f8')
assert_array_equal(umt.innerwt(a, b, w), np.sum(a*b*w, axis=-1))
def test_cross1d(self):
"""Test with fixed-sized signature."""
a = np.eye(3)
assert_array_equal(umt.cross1d(a, a), np.zeros((3, 3)))
out = np.zeros((3, 3))
result = umt.cross1d(a[0], a, out)
assert_(result is out)
assert_array_equal(result, np.vstack((np.zeros(3), a[2], -a[1])))
assert_raises(ValueError, umt.cross1d, np.eye(4), np.eye(4))
assert_raises(ValueError, umt.cross1d, a, np.arange(4.))
# Wrong output core dimension.
assert_raises(ValueError, umt.cross1d, a, np.arange(3.), np.zeros((3, 4)))
# Wrong output broadcast dimension (see gh-15139).
assert_raises(ValueError, umt.cross1d, a, np.arange(3.), np.zeros(3))
def test_can_ignore_signature(self):
# Comparing the effects of ? in signature:
# matrix_multiply: (m,n),(n,p)->(m,p) # all must be there.
# matmul: (m?,n),(n,p?)->(m?,p?) # allow missing m, p.
mat = np.arange(12).reshape((2, 3, 2))
single_vec = np.arange(2)
col_vec = single_vec[:, np.newaxis]
col_vec_array = np.arange(8).reshape((2, 2, 2, 1)) + 1
# matrix @ single column vector with proper dimension
mm_col_vec = umt.matrix_multiply(mat, col_vec)
# matmul does the same thing
matmul_col_vec = umt.matmul(mat, col_vec)
assert_array_equal(matmul_col_vec, mm_col_vec)
# matrix @ vector without dimension making it a column vector.
# matrix multiply fails -> missing core dim.
assert_raises(ValueError, umt.matrix_multiply, mat, single_vec)
# matmul mimicker passes, and returns a vector.
matmul_col = umt.matmul(mat, single_vec)
assert_array_equal(matmul_col, mm_col_vec.squeeze())
# Now with a column array: same as for column vector,
# broadcasting sensibly.
mm_col_vec = umt.matrix_multiply(mat, col_vec_array)
matmul_col_vec = umt.matmul(mat, col_vec_array)
assert_array_equal(matmul_col_vec, mm_col_vec)
# As above, but for row vector
single_vec = np.arange(3)
row_vec = single_vec[np.newaxis, :]
row_vec_array = np.arange(24).reshape((4, 2, 1, 1, 3)) + 1
# row vector @ matrix
mm_row_vec = umt.matrix_multiply(row_vec, mat)
matmul_row_vec = umt.matmul(row_vec, mat)
assert_array_equal(matmul_row_vec, mm_row_vec)
# single row vector @ matrix
assert_raises(ValueError, umt.matrix_multiply, single_vec, mat)
matmul_row = umt.matmul(single_vec, mat)
assert_array_equal(matmul_row, mm_row_vec.squeeze())
# row vector array @ matrix
mm_row_vec = umt.matrix_multiply(row_vec_array, mat)
matmul_row_vec = umt.matmul(row_vec_array, mat)
assert_array_equal(matmul_row_vec, mm_row_vec)
# Now for vector combinations
# row vector @ column vector
col_vec = row_vec.T
col_vec_array = row_vec_array.swapaxes(-2, -1)
mm_row_col_vec = umt.matrix_multiply(row_vec, col_vec)
matmul_row_col_vec = umt.matmul(row_vec, col_vec)
assert_array_equal(matmul_row_col_vec, mm_row_col_vec)
# single row vector @ single col vector
assert_raises(ValueError, umt.matrix_multiply, single_vec, single_vec)
matmul_row_col = umt.matmul(single_vec, single_vec)
assert_array_equal(matmul_row_col, mm_row_col_vec.squeeze())
# row vector array @ matrix
mm_row_col_array = umt.matrix_multiply(row_vec_array, col_vec_array)
matmul_row_col_array = umt.matmul(row_vec_array, col_vec_array)
assert_array_equal(matmul_row_col_array, mm_row_col_array)
# Finally, check that things are *not* squeezed if one gives an
# output.
out = np.zeros_like(mm_row_col_array)
out = umt.matrix_multiply(row_vec_array, col_vec_array, out=out)
assert_array_equal(out, mm_row_col_array)
out[:] = 0
out = umt.matmul(row_vec_array, col_vec_array, out=out)
assert_array_equal(out, mm_row_col_array)
# And check one cannot put missing dimensions back.
out = np.zeros_like(mm_row_col_vec)
assert_raises(ValueError, umt.matrix_multiply, single_vec, single_vec,
out)
# But fine for matmul, since it is just a broadcast.
out = umt.matmul(single_vec, single_vec, out)
assert_array_equal(out, mm_row_col_vec.squeeze())
def test_matrix_multiply(self):
self.compare_matrix_multiply_results(np.int64)
self.compare_matrix_multiply_results(np.double)
def test_matrix_multiply_umath_empty(self):
res = umt.matrix_multiply(np.ones((0, 10)), np.ones((10, 0)))
assert_array_equal(res, np.zeros((0, 0)))
res = umt.matrix_multiply(np.ones((10, 0)), np.ones((0, 10)))
assert_array_equal(res, np.zeros((10, 10)))
def compare_matrix_multiply_results(self, tp):
d1 = np.array(np.random.rand(2, 3, 4), dtype=tp)
d2 = np.array(np.random.rand(2, 3, 4), dtype=tp)
msg = "matrix multiply on type %s" % d1.dtype.name
def permute_n(n):
if n == 1:
return ([0],)
ret = ()
base = permute_n(n-1)
for perm in base:
for i in range(n):
new = perm + [n-1]
new[n-1] = new[i]
new[i] = n-1
ret += (new,)
return ret
def slice_n(n):
if n == 0:
return ((),)
ret = ()
base = slice_n(n-1)
for sl in base:
ret += (sl+(slice(None),),)
ret += (sl+(slice(0, 1),),)
return ret
def broadcastable(s1, s2):
return s1 == s2 or s1 == 1 or s2 == 1
permute_3 = permute_n(3)
slice_3 = slice_n(3) + ((slice(None, None, -1),)*3,)
ref = True
for p1 in permute_3:
for p2 in permute_3:
for s1 in slice_3:
for s2 in slice_3:
a1 = d1.transpose(p1)[s1]
a2 = d2.transpose(p2)[s2]
ref = ref and a1.base is not None
ref = ref and a2.base is not None
if (a1.shape[-1] == a2.shape[-2] and
broadcastable(a1.shape[0], a2.shape[0])):
assert_array_almost_equal(
umt.matrix_multiply(a1, a2),
np.sum(a2[..., np.newaxis].swapaxes(-3, -1) *
a1[..., np.newaxis,:], axis=-1),
err_msg=msg + ' %s %s' % (str(a1.shape),
str(a2.shape)))
assert_equal(ref, True, err_msg="reference check")
def test_euclidean_pdist(self):
a = np.arange(12, dtype=float).reshape(4, 3)
out = np.empty((a.shape[0] * (a.shape[0] - 1) // 2,), dtype=a.dtype)
umt.euclidean_pdist(a, out)
b = np.sqrt(np.sum((a[:, None] - a)**2, axis=-1))
b = b[~np.tri(a.shape[0], dtype=bool)]
assert_almost_equal(out, b)
# An output array is required to determine p with signature (n,d)->(p)
assert_raises(ValueError, umt.euclidean_pdist, a)
def test_cumsum(self):
a = np.arange(10)
result = umt.cumsum(a)
assert_array_equal(result, a.cumsum())
def test_object_logical(self):
a = np.array([3, None, True, False, "test", ""], dtype=object)
assert_equal(np.logical_or(a, None),
np.array([x or None for x in a], dtype=object))
assert_equal(np.logical_or(a, True),
np.array([x or True for x in a], dtype=object))
assert_equal(np.logical_or(a, 12),
np.array([x or 12 for x in a], dtype=object))
assert_equal(np.logical_or(a, "blah"),
np.array([x or "blah" for x in a], dtype=object))
assert_equal(np.logical_and(a, None),
np.array([x and None for x in a], dtype=object))
assert_equal(np.logical_and(a, True),
np.array([x and True for x in a], dtype=object))
assert_equal(np.logical_and(a, 12),
np.array([x and 12 for x in a], dtype=object))
assert_equal(np.logical_and(a, "blah"),
np.array([x and "blah" for x in a], dtype=object))
assert_equal(np.logical_not(a),
np.array([not x for x in a], dtype=object))
assert_equal(np.logical_or.reduce(a), 3)
assert_equal(np.logical_and.reduce(a), None)
def test_object_comparison(self):
class HasComparisons:
def __eq__(self, other):
return '=='
arr0d = np.array(HasComparisons())
assert_equal(arr0d == arr0d, True)
assert_equal(np.equal(arr0d, arr0d), True) # normal behavior is a cast
arr1d = np.array([HasComparisons()])
assert_equal(arr1d == arr1d, np.array([True]))
assert_equal(np.equal(arr1d, arr1d), np.array([True])) # normal behavior is a cast
assert_equal(np.equal(arr1d, arr1d, dtype=object), np.array(['==']))
def test_object_array_reduction(self):
# Reductions on object arrays
a = np.array(['a', 'b', 'c'], dtype=object)
assert_equal(np.sum(a), 'abc')
assert_equal(np.max(a), 'c')
assert_equal(np.min(a), 'a')
a = np.array([True, False, True], dtype=object)
assert_equal(np.sum(a), 2)
assert_equal(np.prod(a), 0)
assert_equal(np.any(a), True)
assert_equal(np.all(a), False)
assert_equal(np.max(a), True)
assert_equal(np.min(a), False)
assert_equal(np.array([[1]], dtype=object).sum(), 1)
assert_equal(np.array([[[1, 2]]], dtype=object).sum((0, 1)), [1, 2])
assert_equal(np.array([1], dtype=object).sum(initial=1), 2)
assert_equal(np.array([[1], [2, 3]], dtype=object)
.sum(initial=[0], where=[False, True]), [0, 2, 3])
def test_object_array_accumulate_inplace(self):
# Checks that in-place accumulates work, see also gh-7402
arr = np.ones(4, dtype=object)
arr[:] = [[1] for i in range(4)]
# Twice reproduced also for tuples:
np.add.accumulate(arr, out=arr)
np.add.accumulate(arr, out=arr)
assert_array_equal(arr,
np.array([[1]*i for i in [1, 3, 6, 10]], dtype=object),
)
# And the same if the axis argument is used
arr = np.ones((2, 4), dtype=object)
arr[0, :] = [[2] for i in range(4)]
np.add.accumulate(arr, out=arr, axis=-1)
np.add.accumulate(arr, out=arr, axis=-1)
assert_array_equal(arr[0, :],
np.array([[2]*i for i in [1, 3, 6, 10]], dtype=object),
)
def test_object_array_accumulate_failure(self):
# Typical accumulation on object works as expected:
res = np.add.accumulate(np.array([1, 0, 2], dtype=object))
assert_array_equal(res, np.array([1, 1, 3], dtype=object))
# But errors are propagated from the inner-loop if they occur:
with pytest.raises(TypeError):
np.add.accumulate([1, None, 2])
def test_object_array_reduceat_inplace(self):
# Checks that in-place reduceats work, see also gh-7465
arr = np.empty(4, dtype=object)
arr[:] = [[1] for i in range(4)]
out = np.empty(4, dtype=object)
out[:] = [[1] for i in range(4)]
np.add.reduceat(arr, np.arange(4), out=arr)
np.add.reduceat(arr, np.arange(4), out=arr)
assert_array_equal(arr, out)
# And the same if the axis argument is used
arr = np.ones((2, 4), dtype=object)
arr[0, :] = [[2] for i in range(4)]
out = np.ones((2, 4), dtype=object)
out[0, :] = [[2] for i in range(4)]
np.add.reduceat(arr, np.arange(4), out=arr, axis=-1)
np.add.reduceat(arr, np.arange(4), out=arr, axis=-1)
assert_array_equal(arr, out)
def test_object_array_reduceat_failure(self):
# Reduceat works as expected when no invalid operation occurs (None is
# not involved in an operation here)
res = np.add.reduceat(np.array([1, None, 2], dtype=object), [1, 2])
assert_array_equal(res, np.array([None, 2], dtype=object))
# But errors when None would be involved in an operation:
with pytest.raises(TypeError):
np.add.reduceat([1, None, 2], [0, 2])
def test_zerosize_reduction(self):
# Test with default dtype and object dtype
for a in [[], np.array([], dtype=object)]:
assert_equal(np.sum(a), 0)
assert_equal(np.prod(a), 1)
assert_equal(np.any(a), False)
assert_equal(np.all(a), True)
assert_raises(ValueError, np.max, a)
assert_raises(ValueError, np.min, a)
def test_axis_out_of_bounds(self):
a = np.array([False, False])
assert_raises(np.AxisError, a.all, axis=1)
a = np.array([False, False])
assert_raises(np.AxisError, a.all, axis=-2)
a = np.array([False, False])
assert_raises(np.AxisError, a.any, axis=1)
a = np.array([False, False])
assert_raises(np.AxisError, a.any, axis=-2)
def test_scalar_reduction(self):
# The functions 'sum', 'prod', etc allow specifying axis=0
# even for scalars
assert_equal(np.sum(3, axis=0), 3)
assert_equal(np.prod(3.5, axis=0), 3.5)
assert_equal(np.any(True, axis=0), True)
assert_equal(np.all(False, axis=0), False)
assert_equal(np.max(3, axis=0), 3)
assert_equal(np.min(2.5, axis=0), 2.5)
# Check scalar behaviour for ufuncs without an identity
assert_equal(np.power.reduce(3), 3)
# Make sure that scalars are coming out from this operation
assert_(type(np.prod(np.float32(2.5), axis=0)) is np.float32)
assert_(type(np.sum(np.float32(2.5), axis=0)) is np.float32)
assert_(type(np.max(np.float32(2.5), axis=0)) is np.float32)
assert_(type(np.min(np.float32(2.5), axis=0)) is np.float32)
# check if scalars/0-d arrays get cast
assert_(type(np.any(0, axis=0)) is np.bool_)
# assert that 0-d arrays get wrapped
class MyArray(np.ndarray):
pass
a = np.array(1).view(MyArray)
assert_(type(np.any(a)) is MyArray)
def test_casting_out_param(self):
# Test that it's possible to do casts on output
a = np.ones((200, 100), np.int64)
b = np.ones((200, 100), np.int64)
c = np.ones((200, 100), np.float64)
np.add(a, b, out=c)
assert_equal(c, 2)
a = np.zeros(65536)
b = np.zeros(65536, dtype=np.float32)
np.subtract(a, 0, out=b)
assert_equal(b, 0)
def test_where_param(self):
# Test that the where= ufunc parameter works with regular arrays
a = np.arange(7)
b = np.ones(7)
c = np.zeros(7)
np.add(a, b, out=c, where=(a % 2 == 1))
assert_equal(c, [0, 2, 0, 4, 0, 6, 0])
a = np.arange(4).reshape(2, 2) + 2
np.power(a, [2, 3], out=a, where=[[0, 1], [1, 0]])
assert_equal(a, [[2, 27], [16, 5]])
# Broadcasting the where= parameter
np.subtract(a, 2, out=a, where=[True, False])
assert_equal(a, [[0, 27], [14, 5]])
def test_where_param_buffer_output(self):
# This test is temporarily skipped because it requires
# adding masking features to the nditer to work properly
# With casting on output
a = np.ones(10, np.int64)
b = np.ones(10, np.int64)
c = 1.5 * np.ones(10, np.float64)
np.add(a, b, out=c, where=[1, 0, 0, 1, 0, 0, 1, 1, 1, 0])
assert_equal(c, [2, 1.5, 1.5, 2, 1.5, 1.5, 2, 2, 2, 1.5])
def test_where_param_alloc(self):
# With casting and allocated output
a = np.array([1], dtype=np.int64)
m = np.array([True], dtype=bool)
assert_equal(np.sqrt(a, where=m), [1])
# No casting and allocated output
a = np.array([1], dtype=np.float64)
m = np.array([True], dtype=bool)
assert_equal(np.sqrt(a, where=m), [1])
def test_where_with_broadcasting(self):
# See gh-17198
a = np.random.random((5000, 4))
b = np.random.random((5000, 1))
where = a > 0.3
out = np.full_like(a, 0)
np.less(a, b, where=where, out=out)
b_where = np.broadcast_to(b, a.shape)[where]
assert_array_equal((a[where] < b_where), out[where].astype(bool))
assert not out[~where].any() # outside mask, out remains all 0
def check_identityless_reduction(self, a):
# np.minimum.reduce is an identityless reduction
# Verify that it sees the zero at various positions
a[...] = 1
a[1, 0, 0] = 0
assert_equal(np.minimum.reduce(a, axis=None), 0)
assert_equal(np.minimum.reduce(a, axis=(0, 1)), [0, 1, 1, 1])
assert_equal(np.minimum.reduce(a, axis=(0, 2)), [0, 1, 1])
assert_equal(np.minimum.reduce(a, axis=(1, 2)), [1, 0])
assert_equal(np.minimum.reduce(a, axis=0),
[[0, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]])
assert_equal(np.minimum.reduce(a, axis=1),
[[1, 1, 1, 1], [0, 1, 1, 1]])
assert_equal(np.minimum.reduce(a, axis=2),
[[1, 1, 1], [0, 1, 1]])
assert_equal(np.minimum.reduce(a, axis=()), a)
a[...] = 1
a[0, 1, 0] = 0
assert_equal(np.minimum.reduce(a, axis=None), 0)
assert_equal(np.minimum.reduce(a, axis=(0, 1)), [0, 1, 1, 1])
assert_equal(np.minimum.reduce(a, axis=(0, 2)), [1, 0, 1])
assert_equal(np.minimum.reduce(a, axis=(1, 2)), [0, 1])
assert_equal(np.minimum.reduce(a, axis=0),
[[1, 1, 1, 1], [0, 1, 1, 1], [1, 1, 1, 1]])
assert_equal(np.minimum.reduce(a, axis=1),
[[0, 1, 1, 1], [1, 1, 1, 1]])
assert_equal(np.minimum.reduce(a, axis=2),
[[1, 0, 1], [1, 1, 1]])
assert_equal(np.minimum.reduce(a, axis=()), a)
a[...] = 1
a[0, 0, 1] = 0
assert_equal(np.minimum.reduce(a, axis=None), 0)
assert_equal(np.minimum.reduce(a, axis=(0, 1)), [1, 0, 1, 1])
assert_equal(np.minimum.reduce(a, axis=(0, 2)), [0, 1, 1])
assert_equal(np.minimum.reduce(a, axis=(1, 2)), [0, 1])
assert_equal(np.minimum.reduce(a, axis=0),
[[1, 0, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]])
assert_equal(np.minimum.reduce(a, axis=1),
[[1, 0, 1, 1], [1, 1, 1, 1]])
assert_equal(np.minimum.reduce(a, axis=2),
[[0, 1, 1], [1, 1, 1]])
assert_equal(np.minimum.reduce(a, axis=()), a)
@requires_memory(6 * 1024**3)
def test_identityless_reduction_huge_array(self):
# Regression test for gh-20921 (copying identity incorrectly failed)
arr = np.zeros((2, 2**31), 'uint8')
arr[:, 0] = [1, 3]
arr[:, -1] = [4, 1]
res = np.maximum.reduce(arr, axis=0)
del arr
assert res[0] == 3
assert res[-1] == 4
def test_identityless_reduction_corder(self):
a = np.empty((2, 3, 4), order='C')
self.check_identityless_reduction(a)
def test_identityless_reduction_forder(self):
a = np.empty((2, 3, 4), order='F')
self.check_identityless_reduction(a)
def test_identityless_reduction_otherorder(self):
a = np.empty((2, 4, 3), order='C').swapaxes(1, 2)
self.check_identityless_reduction(a)
def test_identityless_reduction_noncontig(self):
a = np.empty((3, 5, 4), order='C').swapaxes(1, 2)
a = a[1:, 1:, 1:]
self.check_identityless_reduction(a)
def test_identityless_reduction_noncontig_unaligned(self):
a = np.empty((3*4*5*8 + 1,), dtype='i1')
a = a[1:].view(dtype='f8')
a.shape = (3, 4, 5)
a = a[1:, 1:, 1:]
self.check_identityless_reduction(a)
def test_initial_reduction(self):
# np.minimum.reduce is an identityless reduction
# For cases like np.maximum(np.abs(...), initial=0)
# More generally, a supremum over non-negative numbers.
assert_equal(np.maximum.reduce([], initial=0), 0)
# For cases like reduction of an empty array over the reals.
assert_equal(np.minimum.reduce([], initial=np.inf), np.inf)
assert_equal(np.maximum.reduce([], initial=-np.inf), -np.inf)
# Random tests
assert_equal(np.minimum.reduce([5], initial=4), 4)
assert_equal(np.maximum.reduce([4], initial=5), 5)
assert_equal(np.maximum.reduce([5], initial=4), 5)
assert_equal(np.minimum.reduce([4], initial=5), 4)
# Check initial=None raises ValueError for both types of ufunc reductions
assert_raises(ValueError, np.minimum.reduce, [], initial=None)
assert_raises(ValueError, np.add.reduce, [], initial=None)
# Check that np._NoValue gives default behavior.
assert_equal(np.add.reduce([], initial=np._NoValue), 0)
# Check that initial kwarg behaves as intended for dtype=object
a = np.array([10], dtype=object)
res = np.add.reduce(a, initial=5)
assert_equal(res, 15)
@pytest.mark.parametrize('axis', (0, 1, None))
@pytest.mark.parametrize('where', (np.array([False, True, True]),
np.array([[True], [False], [True]]),
np.array([[True, False, False],
[False, True, False],
[False, True, True]])))
def test_reduction_with_where(self, axis, where):
a = np.arange(9.).reshape(3, 3)
a_copy = a.copy()
a_check = np.zeros_like(a)
np.positive(a, out=a_check, where=where)
res = np.add.reduce(a, axis=axis, where=where)
check = a_check.sum(axis)
assert_equal(res, check)
# Check we do not overwrite elements of a internally.
assert_array_equal(a, a_copy)
@pytest.mark.parametrize(('axis', 'where'),
((0, np.array([True, False, True])),
(1, [True, True, False]),
(None, True)))
@pytest.mark.parametrize('initial', (-np.inf, 5.))
def test_reduction_with_where_and_initial(self, axis, where, initial):
a = np.arange(9.).reshape(3, 3)
a_copy = a.copy()
a_check = np.full(a.shape, -np.inf)
np.positive(a, out=a_check, where=where)
res = np.maximum.reduce(a, axis=axis, where=where, initial=initial)
check = a_check.max(axis, initial=initial)
assert_equal(res, check)
def test_reduction_where_initial_needed(self):
a = np.arange(9.).reshape(3, 3)
m = [False, True, False]
assert_raises(ValueError, np.maximum.reduce, a, where=m)
def test_identityless_reduction_nonreorderable(self):
a = np.array([[8.0, 2.0, 2.0], [1.0, 0.5, 0.25]])
res = np.divide.reduce(a, axis=0)
assert_equal(res, [8.0, 4.0, 8.0])
res = np.divide.reduce(a, axis=1)
assert_equal(res, [2.0, 8.0])
res = np.divide.reduce(a, axis=())
assert_equal(res, a)
assert_raises(ValueError, np.divide.reduce, a, axis=(0, 1))
def test_reduce_zero_axis(self):
# If we have a n x m array and do a reduction with axis=1, then we are
# doing n reductions, and each reduction takes an m-element array. For
# a reduction operation without an identity, then:
# n > 0, m > 0: fine
# n = 0, m > 0: fine, doing 0 reductions of m-element arrays
# n > 0, m = 0: can't reduce a 0-element array, ValueError
# n = 0, m = 0: can't reduce a 0-element array, ValueError (for
# consistency with the above case)
# This test doesn't actually look at return values, it just checks to
# make sure that error we get an error in exactly those cases where we
# expect one, and assumes the calculations themselves are done
# correctly.
def ok(f, *args, **kwargs):
f(*args, **kwargs)
def err(f, *args, **kwargs):
assert_raises(ValueError, f, *args, **kwargs)
def t(expect, func, n, m):
expect(func, np.zeros((n, m)), axis=1)
expect(func, np.zeros((m, n)), axis=0)
expect(func, np.zeros((n // 2, n // 2, m)), axis=2)
expect(func, np.zeros((n // 2, m, n // 2)), axis=1)
expect(func, np.zeros((n, m // 2, m // 2)), axis=(1, 2))
expect(func, np.zeros((m // 2, n, m // 2)), axis=(0, 2))
expect(func, np.zeros((m // 3, m // 3, m // 3,
n // 2, n // 2)),
axis=(0, 1, 2))
# Check what happens if the inner (resp. outer) dimensions are a
# mix of zero and non-zero:
expect(func, np.zeros((10, m, n)), axis=(0, 1))
expect(func, np.zeros((10, n, m)), axis=(0, 2))
expect(func, np.zeros((m, 10, n)), axis=0)
expect(func, np.zeros((10, m, n)), axis=1)
expect(func, np.zeros((10, n, m)), axis=2)
# np.maximum is just an arbitrary ufunc with no reduction identity
assert_equal(np.maximum.identity, None)
t(ok, np.maximum.reduce, 30, 30)
t(ok, np.maximum.reduce, 0, 30)
t(err, np.maximum.reduce, 30, 0)
t(err, np.maximum.reduce, 0, 0)
err(np.maximum.reduce, [])
np.maximum.reduce(np.zeros((0, 0)), axis=())
# all of the combinations are fine for a reduction that has an
# identity
t(ok, np.add.reduce, 30, 30)
t(ok, np.add.reduce, 0, 30)
t(ok, np.add.reduce, 30, 0)
t(ok, np.add.reduce, 0, 0)
np.add.reduce([])
np.add.reduce(np.zeros((0, 0)), axis=())
# OTOH, accumulate always makes sense for any combination of n and m,
# because it maps an m-element array to an m-element array. These
# tests are simpler because accumulate doesn't accept multiple axes.
for uf in (np.maximum, np.add):
uf.accumulate(np.zeros((30, 0)), axis=0)
uf.accumulate(np.zeros((0, 30)), axis=0)
uf.accumulate(np.zeros((30, 30)), axis=0)
uf.accumulate(np.zeros((0, 0)), axis=0)
def test_safe_casting(self):
# In old versions of numpy, in-place operations used the 'unsafe'
# casting rules. In versions >= 1.10, 'same_kind' is the
# default and an exception is raised instead of a warning.
# when 'same_kind' is not satisfied.
a = np.array([1, 2, 3], dtype=int)
# Non-in-place addition is fine
assert_array_equal(assert_no_warnings(np.add, a, 1.1),
[2.1, 3.1, 4.1])
assert_raises(TypeError, np.add, a, 1.1, out=a)
def add_inplace(a, b):
a += b
assert_raises(TypeError, add_inplace, a, 1.1)
# Make sure that explicitly overriding the exception is allowed:
assert_no_warnings(np.add, a, 1.1, out=a, casting="unsafe")
assert_array_equal(a, [2, 3, 4])
def test_ufunc_custom_out(self):
# Test ufunc with built in input types and custom output type
a = np.array([0, 1, 2], dtype='i8')
b = np.array([0, 1, 2], dtype='i8')
c = np.empty(3, dtype=_rational_tests.rational)
# Output must be specified so numpy knows what
# ufunc signature to look for
result = _rational_tests.test_add(a, b, c)
target = np.array([0, 2, 4], dtype=_rational_tests.rational)
assert_equal(result, target)
# The new resolution means that we can (usually) find custom loops
# as long as they match exactly:
result = _rational_tests.test_add(a, b)
assert_equal(result, target)
# This works even more generally, so long the default common-dtype
# promoter works out:
result = _rational_tests.test_add(a, b.astype(np.uint16), out=c)
assert_equal(result, target)
# But, it can be fooled, e.g. (use scalars, which forces legacy
# type resolution to kick in, which then fails):
with assert_raises(TypeError):
_rational_tests.test_add(a, np.uint16(2))
def test_operand_flags(self):
a = np.arange(16, dtype='l').reshape(4, 4)
b = np.arange(9, dtype='l').reshape(3, 3)
opflag_tests.inplace_add(a[:-1, :-1], b)
assert_equal(a, np.array([[0, 2, 4, 3], [7, 9, 11, 7],
[14, 16, 18, 11], [12, 13, 14, 15]], dtype='l'))
a = np.array(0)
opflag_tests.inplace_add(a, 3)
assert_equal(a, 3)
opflag_tests.inplace_add(a, [3, 4])
assert_equal(a, 10)
def test_struct_ufunc(self):
import numpy.core._struct_ufunc_tests as struct_ufunc
a = np.array([(1, 2, 3)], dtype='u8,u8,u8')
b = np.array([(1, 2, 3)], dtype='u8,u8,u8')
result = struct_ufunc.add_triplet(a, b)
assert_equal(result, np.array([(2, 4, 6)], dtype='u8,u8,u8'))
assert_raises(RuntimeError, struct_ufunc.register_fail)
def test_custom_ufunc(self):
a = np.array(
[_rational_tests.rational(1, 2),
_rational_tests.rational(1, 3),
_rational_tests.rational(1, 4)],
dtype=_rational_tests.rational)
b = np.array(
[_rational_tests.rational(1, 2),
_rational_tests.rational(1, 3),
_rational_tests.rational(1, 4)],
dtype=_rational_tests.rational)
result = _rational_tests.test_add_rationals(a, b)
expected = np.array(
[_rational_tests.rational(1),
_rational_tests.rational(2, 3),
_rational_tests.rational(1, 2)],
dtype=_rational_tests.rational)
assert_equal(result, expected)
def test_custom_ufunc_forced_sig(self):
# gh-9351 - looking for a non-first userloop would previously hang
with assert_raises(TypeError):
np.multiply(_rational_tests.rational(1), 1,
signature=(_rational_tests.rational, int, None))
def test_custom_array_like(self):
class MyThing:
__array_priority__ = 1000
rmul_count = 0
getitem_count = 0
def __init__(self, shape):
self.shape = shape
def __len__(self):
return self.shape[0]
def __getitem__(self, i):
MyThing.getitem_count += 1
if not isinstance(i, tuple):
i = (i,)
if len(i) > self.ndim:
raise IndexError("boo")
return MyThing(self.shape[len(i):])
def __rmul__(self, other):
MyThing.rmul_count += 1
return self
np.float64(5)*MyThing((3, 3))
assert_(MyThing.rmul_count == 1, MyThing.rmul_count)
assert_(MyThing.getitem_count <= 2, MyThing.getitem_count)
def test_inplace_fancy_indexing(self):
a = np.arange(10)
np.add.at(a, [2, 5, 2], 1)
assert_equal(a, [0, 1, 4, 3, 4, 6, 6, 7, 8, 9])
a = np.arange(10)
b = np.array([100, 100, 100])
np.add.at(a, [2, 5, 2], b)
assert_equal(a, [0, 1, 202, 3, 4, 105, 6, 7, 8, 9])
a = np.arange(9).reshape(3, 3)
b = np.array([[100, 100, 100], [200, 200, 200], [300, 300, 300]])
np.add.at(a, (slice(None), [1, 2, 1]), b)
assert_equal(a, [[0, 201, 102], [3, 404, 205], [6, 607, 308]])
a = np.arange(27).reshape(3, 3, 3)
b = np.array([100, 200, 300])
np.add.at(a, (slice(None), slice(None), [1, 2, 1]), b)
assert_equal(a,
[[[0, 401, 202],
[3, 404, 205],
[6, 407, 208]],
[[9, 410, 211],
[12, 413, 214],
[15, 416, 217]],
[[18, 419, 220],
[21, 422, 223],
[24, 425, 226]]])
a = np.arange(9).reshape(3, 3)
b = np.array([[100, 100, 100], [200, 200, 200], [300, 300, 300]])
np.add.at(a, ([1, 2, 1], slice(None)), b)
assert_equal(a, [[0, 1, 2], [403, 404, 405], [206, 207, 208]])
a = np.arange(27).reshape(3, 3, 3)
b = np.array([100, 200, 300])
np.add.at(a, (slice(None), [1, 2, 1], slice(None)), b)
assert_equal(a,
[[[0, 1, 2],
[203, 404, 605],
[106, 207, 308]],
[[9, 10, 11],
[212, 413, 614],
[115, 216, 317]],
[[18, 19, 20],
[221, 422, 623],
[124, 225, 326]]])
a = np.arange(9).reshape(3, 3)
b = np.array([100, 200, 300])
np.add.at(a, (0, [1, 2, 1]), b)
assert_equal(a, [[0, 401, 202], [3, 4, 5], [6, 7, 8]])
a = np.arange(27).reshape(3, 3, 3)
b = np.array([100, 200, 300])
np.add.at(a, ([1, 2, 1], 0, slice(None)), b)
assert_equal(a,
[[[0, 1, 2],
[3, 4, 5],
[6, 7, 8]],
[[209, 410, 611],
[12, 13, 14],
[15, 16, 17]],
[[118, 219, 320],
[21, 22, 23],
[24, 25, 26]]])
a = np.arange(27).reshape(3, 3, 3)
b = np.array([100, 200, 300])
np.add.at(a, (slice(None), slice(None), slice(None)), b)
assert_equal(a,
[[[100, 201, 302],
[103, 204, 305],
[106, 207, 308]],
[[109, 210, 311],
[112, 213, 314],
[115, 216, 317]],
[[118, 219, 320],
[121, 222, 323],
[124, 225, 326]]])
a = np.arange(10)
np.negative.at(a, [2, 5, 2])
assert_equal(a, [0, 1, 2, 3, 4, -5, 6, 7, 8, 9])
# Test 0-dim array
a = np.array(0)
np.add.at(a, (), 1)
assert_equal(a, 1)
assert_raises(IndexError, np.add.at, a, 0, 1)
assert_raises(IndexError, np.add.at, a, [], 1)
# Test mixed dtypes
a = np.arange(10)
np.power.at(a, [1, 2, 3, 2], 3.5)
assert_equal(a, np.array([0, 1, 4414, 46, 4, 5, 6, 7, 8, 9]))
# Test boolean indexing and boolean ufuncs
a = np.arange(10)
index = a % 2 == 0
np.equal.at(a, index, [0, 2, 4, 6, 8])
assert_equal(a, [1, 1, 1, 3, 1, 5, 1, 7, 1, 9])
# Test unary operator
a = np.arange(10, dtype='u4')
np.invert.at(a, [2, 5, 2])
assert_equal(a, [0, 1, 2, 3, 4, 5 ^ 0xffffffff, 6, 7, 8, 9])
# Test empty subspace
orig = np.arange(4)
a = orig[:, None][:, 0:0]
np.add.at(a, [0, 1], 3)
assert_array_equal(orig, np.arange(4))
# Test with swapped byte order
index = np.array([1, 2, 1], np.dtype('i').newbyteorder())
values = np.array([1, 2, 3, 4], np.dtype('f').newbyteorder())
np.add.at(values, index, 3)
assert_array_equal(values, [1, 8, 6, 4])
# Test exception thrown
values = np.array(['a', 1], dtype=object)
assert_raises(TypeError, np.add.at, values, [0, 1], 1)
assert_array_equal(values, np.array(['a', 1], dtype=object))
# Test multiple output ufuncs raise error, gh-5665
assert_raises(ValueError, np.modf.at, np.arange(10), [1])
# Test maximum
a = np.array([1, 2, 3])
np.maximum.at(a, [0], 0)
assert_equal(np.array([1, 2, 3]), a)
def test_at_not_none_signature(self):
# Test ufuncs with non-trivial signature raise a TypeError
a = np.ones((2, 2, 2))
b = np.ones((1, 2, 2))
assert_raises(TypeError, np.matmul.at, a, [0], b)
a = np.array([[[1, 2], [3, 4]]])
assert_raises(TypeError, np.linalg._umath_linalg.det.at, a, [0])
def test_reduce_arguments(self):
f = np.add.reduce
d = np.ones((5,2), dtype=int)
o = np.ones((2,), dtype=d.dtype)
r = o * 5
assert_equal(f(d), r)
# a, axis=0, dtype=None, out=None, keepdims=False
assert_equal(f(d, axis=0), r)
assert_equal(f(d, 0), r)
assert_equal(f(d, 0, dtype=None), r)
assert_equal(f(d, 0, dtype='i'), r)
assert_equal(f(d, 0, 'i'), r)
assert_equal(f(d, 0, None), r)
assert_equal(f(d, 0, None, out=None), r)
assert_equal(f(d, 0, None, out=o), r)
assert_equal(f(d, 0, None, o), r)
assert_equal(f(d, 0, None, None), r)
assert_equal(f(d, 0, None, None, keepdims=False), r)
assert_equal(f(d, 0, None, None, True), r.reshape((1,) + r.shape))
assert_equal(f(d, 0, None, None, False, 0), r)
assert_equal(f(d, 0, None, None, False, initial=0), r)
assert_equal(f(d, 0, None, None, False, 0, True), r)
assert_equal(f(d, 0, None, None, False, 0, where=True), r)
# multiple keywords
assert_equal(f(d, axis=0, dtype=None, out=None, keepdims=False), r)
assert_equal(f(d, 0, dtype=None, out=None, keepdims=False), r)
assert_equal(f(d, 0, None, out=None, keepdims=False), r)
assert_equal(f(d, 0, None, out=None, keepdims=False, initial=0,
where=True), r)
# too little
assert_raises(TypeError, f)
# too much
assert_raises(TypeError, f, d, 0, None, None, False, 0, True, 1)
# invalid axis
assert_raises(TypeError, f, d, "invalid")
assert_raises(TypeError, f, d, axis="invalid")
assert_raises(TypeError, f, d, axis="invalid", dtype=None,
keepdims=True)
# invalid dtype
assert_raises(TypeError, f, d, 0, "invalid")
assert_raises(TypeError, f, d, dtype="invalid")
assert_raises(TypeError, f, d, dtype="invalid", out=None)
# invalid out
assert_raises(TypeError, f, d, 0, None, "invalid")
assert_raises(TypeError, f, d, out="invalid")
assert_raises(TypeError, f, d, out="invalid", dtype=None)
# keepdims boolean, no invalid value
# assert_raises(TypeError, f, d, 0, None, None, "invalid")
# assert_raises(TypeError, f, d, keepdims="invalid", axis=0, dtype=None)
# invalid mix
assert_raises(TypeError, f, d, 0, keepdims="invalid", dtype="invalid",
out=None)
# invalid keyword
assert_raises(TypeError, f, d, axis=0, dtype=None, invalid=0)
assert_raises(TypeError, f, d, invalid=0)
assert_raises(TypeError, f, d, 0, keepdims=True, invalid="invalid",
out=None)
assert_raises(TypeError, f, d, axis=0, dtype=None, keepdims=True,
out=None, invalid=0)
assert_raises(TypeError, f, d, axis=0, dtype=None,
out=None, invalid=0)
def test_structured_equal(self):
# https://github.com/numpy/numpy/issues/4855
class MyA(np.ndarray):
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
return getattr(ufunc, method)(*(input.view(np.ndarray)
for input in inputs), **kwargs)
a = np.arange(12.).reshape(4,3)
ra = a.view(dtype=('f8,f8,f8')).squeeze()
mra = ra.view(MyA)
target = np.array([ True, False, False, False], dtype=bool)
assert_equal(np.all(target == (mra == ra[0])), True)
def test_scalar_equal(self):
# Scalar comparisons should always work, without deprecation warnings.
# even when the ufunc fails.
a = np.array(0.)
b = np.array('a')
assert_(a != b)
assert_(b != a)
assert_(not (a == b))
assert_(not (b == a))
def test_NotImplemented_not_returned(self):
# See gh-5964 and gh-2091. Some of these functions are not operator
# related and were fixed for other reasons in the past.
binary_funcs = [
np.power, np.add, np.subtract, np.multiply, np.divide,
np.true_divide, np.floor_divide, np.bitwise_and, np.bitwise_or,
np.bitwise_xor, np.left_shift, np.right_shift, np.fmax,
np.fmin, np.fmod, np.hypot, np.logaddexp, np.logaddexp2,
np.maximum, np.minimum, np.mod,
np.greater, np.greater_equal, np.less, np.less_equal,
np.equal, np.not_equal]
a = np.array('1')
b = 1
c = np.array([1., 2.])
for f in binary_funcs:
assert_raises(TypeError, f, a, b)
assert_raises(TypeError, f, c, a)
@pytest.mark.parametrize("ufunc",
[np.logical_and, np.logical_or]) # logical_xor object loop is bad
@pytest.mark.parametrize("signature",
[(None, None, object), (object, None, None),
(None, object, None)])
def test_logical_ufuncs_object_signatures(self, ufunc, signature):
a = np.array([True, None, False], dtype=object)
res = ufunc(a, a, signature=signature)
assert res.dtype == object
@pytest.mark.parametrize("ufunc",
[np.logical_and, np.logical_or, np.logical_xor])
@pytest.mark.parametrize("signature",
[(bool, None, object), (object, None, bool),
(None, object, bool)])
def test_logical_ufuncs_mixed_object_signatures(self, ufunc, signature):
# Most mixed signatures fail (except those with bool out, e.g. `OO->?`)
a = np.array([True, None, False])
with pytest.raises(TypeError):
ufunc(a, a, signature=signature)
@pytest.mark.parametrize("ufunc",
[np.logical_and, np.logical_or, np.logical_xor])
def test_logical_ufuncs_support_anything(self, ufunc):
# The logical ufuncs support even input that can't be promoted:
a = np.array(b'1', dtype="V3")
c = np.array([1., 2.])
assert_array_equal(ufunc(a, c), ufunc([True, True], True))
assert ufunc.reduce(a) == True
# check that the output has no effect:
out = np.zeros(2, dtype=np.int32)
expected = ufunc([True, True], True).astype(out.dtype)
assert_array_equal(ufunc(a, c, out=out), expected)
out = np.zeros((), dtype=np.int32)
assert ufunc.reduce(a, out=out) == True
# Last check, test reduction when out and a match (the complexity here
# is that the "i,i->?" may seem right, but should not match.
a = np.array([3], dtype="i")
out = np.zeros((), dtype=a.dtype)
assert ufunc.reduce(a, out=out) == 1
@pytest.mark.parametrize("ufunc",
[np.logical_and, np.logical_or, np.logical_xor])
def test_logical_ufuncs_reject_string(self, ufunc):
"""
Logical ufuncs are normally well defined by working with the boolean
equivalent, i.e. casting all inputs to bools should work.
However, casting strings to bools is *currently* weird, because it
actually uses `bool(int(str))`. Thus we explicitly reject strings.
This test should succeed (and can probably just be removed) as soon as
string to bool casts are well defined in NumPy.
"""
with pytest.raises(TypeError, match="contain a loop with signature"):
ufunc(["1"], ["3"])
with pytest.raises(TypeError, match="contain a loop with signature"):
ufunc.reduce(["1", "2", "0"])
@pytest.mark.parametrize("ufunc",
[np.logical_and, np.logical_or, np.logical_xor])
def test_logical_ufuncs_out_cast_check(self, ufunc):
a = np.array('1')
c = np.array([1., 2.])
out = a.copy()
with pytest.raises(TypeError):
# It would be safe, but not equiv casting:
ufunc(a, c, out=out, casting="equiv")
def test_reducelike_byteorder_resolution(self):
# See gh-20699, byte-order changes need some extra care in the type
# resolution to make the following succeed:
arr_be = np.arange(10, dtype=">i8")
arr_le = np.arange(10, dtype="<i8")
assert np.add.reduce(arr_be) == np.add.reduce(arr_le)
assert_array_equal(np.add.accumulate(arr_be), np.add.accumulate(arr_le))
assert_array_equal(
np.add.reduceat(arr_be, [1]), np.add.reduceat(arr_le, [1]))
def test_reducelike_out_promotes(self):
# Check that the out argument to reductions is considered for
# promotion. See also gh-20455.
# Note that these paths could prefer `initial=` in the future and
# do not up-cast to the default integer for add and prod
arr = np.ones(1000, dtype=np.uint8)
out = np.zeros((), dtype=np.uint16)
assert np.add.reduce(arr, out=out) == 1000
arr[:10] = 2
assert np.multiply.reduce(arr, out=out) == 2**10
# For legacy dtypes, the signature currently has to be forced if `out=`
# is passed. The two paths below should differ, without `dtype=` the
# expected result should be: `np.prod(arr.astype("f8")).astype("f4")`!
arr = np.full(5, 2**25-1, dtype=np.int64)
# float32 and int64 promote to float64:
res = np.zeros((), dtype=np.float32)
# If `dtype=` is passed, the calculation is forced to float32:
single_res = np.zeros((), dtype=np.float32)
np.multiply.reduce(arr, out=single_res, dtype=np.float32)
assert single_res != res
def test_reducelike_output_needs_identical_cast(self):
# Checks the case where the we have a simple byte-swap works, maily
# tests that this is not rejected directly.
# (interesting because we require descriptor identity in reducelikes).
arr = np.ones(20, dtype="f8")
out = np.empty((), dtype=arr.dtype.newbyteorder())
expected = np.add.reduce(arr)
np.add.reduce(arr, out=out)
assert_array_equal(expected, out)
# Check reduceat:
out = np.empty(2, dtype=arr.dtype.newbyteorder())
expected = np.add.reduceat(arr, [0, 1])
np.add.reduceat(arr, [0, 1], out=out)
assert_array_equal(expected, out)
# And accumulate:
out = np.empty(arr.shape, dtype=arr.dtype.newbyteorder())
expected = np.add.accumulate(arr)
np.add.accumulate(arr, out=out)
assert_array_equal(expected, out)
def test_reduce_noncontig_output(self):
# Check that reduction deals with non-contiguous output arrays
# appropriately.
#
# gh-8036
x = np.arange(7*13*8, dtype=np.int16).reshape(7, 13, 8)
x = x[4:6,1:11:6,1:5].transpose(1, 2, 0)
y_base = np.arange(4*4, dtype=np.int16).reshape(4, 4)
y = y_base[::2,:]
y_base_copy = y_base.copy()
r0 = np.add.reduce(x, out=y.copy(), axis=2)
r1 = np.add.reduce(x, out=y, axis=2)
# The results should match, and y_base shouldn't get clobbered
assert_equal(r0, r1)
assert_equal(y_base[1,:], y_base_copy[1,:])
assert_equal(y_base[3,:], y_base_copy[3,:])
@pytest.mark.parametrize("with_cast", [True, False])
def test_reduceat_and_accumulate_out_shape_mismatch(self, with_cast):
# Should raise an error mentioning "shape" or "size"
arr = np.arange(5)
out = np.arange(3) # definitely wrong shape
if with_cast:
# If a cast is necessary on the output, we can be sure to use
# the generic NpyIter (non-fast) path.
out = out.astype(np.float64)
with pytest.raises(ValueError, match="(shape|size)"):
np.add.reduceat(arr, [0, 3], out=out)
with pytest.raises(ValueError, match="(shape|size)"):
np.add.accumulate(arr, out=out)
@pytest.mark.parametrize('out_shape',
[(), (1,), (3,), (1, 1), (1, 3), (4, 3)])
@pytest.mark.parametrize('keepdims', [True, False])
@pytest.mark.parametrize('f_reduce', [np.add.reduce, np.minimum.reduce])
def test_reduce_wrong_dimension_output(self, f_reduce, keepdims, out_shape):
# Test that we're not incorrectly broadcasting dimensions.
# See gh-15144 (failed for np.add.reduce previously).
a = np.arange(12.).reshape(4, 3)
out = np.empty(out_shape, a.dtype)
correct_out = f_reduce(a, axis=0, keepdims=keepdims)
if out_shape != correct_out.shape:
with assert_raises(ValueError):
f_reduce(a, axis=0, out=out, keepdims=keepdims)
else:
check = f_reduce(a, axis=0, out=out, keepdims=keepdims)
assert_(check is out)
assert_array_equal(check, correct_out)
def test_reduce_output_does_not_broadcast_input(self):
# Test that the output shape cannot broadcast an input dimension
# (it never can add dimensions, but it might expand an existing one)
a = np.ones((1, 10))
out_correct = (np.empty((1, 1)))
out_incorrect = np.empty((3, 1))
np.add.reduce(a, axis=-1, out=out_correct, keepdims=True)
np.add.reduce(a, axis=-1, out=out_correct[:, 0], keepdims=False)
with assert_raises(ValueError):
np.add.reduce(a, axis=-1, out=out_incorrect, keepdims=True)
with assert_raises(ValueError):
np.add.reduce(a, axis=-1, out=out_incorrect[:, 0], keepdims=False)
def test_reduce_output_subclass_ok(self):
class MyArr(np.ndarray):
pass
out = np.empty(())
np.add.reduce(np.ones(5), out=out) # no subclass, all fine
out = out.view(MyArr)
assert np.add.reduce(np.ones(5), out=out) is out
assert type(np.add.reduce(out)) is MyArr
def test_no_doc_string(self):
# gh-9337
assert_('\n' not in umt.inner1d_no_doc.__doc__)
def test_invalid_args(self):
# gh-7961
exc = pytest.raises(TypeError, np.sqrt, None)
# minimally check the exception text
assert exc.match('loop of ufunc does not support')
@pytest.mark.parametrize('nat', [np.datetime64('nat'), np.timedelta64('nat')])
def test_nat_is_not_finite(self, nat):
try:
assert not np.isfinite(nat)
except TypeError:
pass # ok, just not implemented
@pytest.mark.parametrize('nat', [np.datetime64('nat'), np.timedelta64('nat')])
def test_nat_is_nan(self, nat):
try:
assert np.isnan(nat)
except TypeError:
pass # ok, just not implemented
@pytest.mark.parametrize('nat', [np.datetime64('nat'), np.timedelta64('nat')])
def test_nat_is_not_inf(self, nat):
try:
assert not np.isinf(nat)
except TypeError:
pass # ok, just not implemented
@pytest.mark.parametrize('ufunc', [getattr(np, x) for x in dir(np)
if isinstance(getattr(np, x), np.ufunc)])
def test_ufunc_types(ufunc):
'''
Check all ufuncs that the correct type is returned. Avoid
object and boolean types since many operations are not defined for
for them.
Choose the shape so even dot and matmul will succeed
'''
for typ in ufunc.types:
# types is a list of strings like ii->i
if 'O' in typ or '?' in typ:
continue
inp, out = typ.split('->')
args = [np.ones((3, 3), t) for t in inp]
with warnings.catch_warnings(record=True):
warnings.filterwarnings("always")
res = ufunc(*args)
if isinstance(res, tuple):
outs = tuple(out)
assert len(res) == len(outs)
for r, t in zip(res, outs):
assert r.dtype == np.dtype(t)
else:
assert res.dtype == np.dtype(out)
@pytest.mark.parametrize('ufunc', [getattr(np, x) for x in dir(np)
if isinstance(getattr(np, x), np.ufunc)])
def test_ufunc_noncontiguous(ufunc):
'''
Check that contiguous and non-contiguous calls to ufuncs
have the same results for values in range(9)
'''
for typ in ufunc.types:
# types is a list of strings like ii->i
if any(set('O?mM') & set(typ)):
# bool, object, datetime are too irregular for this simple test
continue
inp, out = typ.split('->')
args_c = [np.empty(6, t) for t in inp]
args_n = [np.empty(18, t)[::3] for t in inp]
for a in args_c:
a.flat = range(1,7)
for a in args_n:
a.flat = range(1,7)
with warnings.catch_warnings(record=True):
warnings.filterwarnings("always")
res_c = ufunc(*args_c)
res_n = ufunc(*args_n)
if len(out) == 1:
res_c = (res_c,)
res_n = (res_n,)
for c_ar, n_ar in zip(res_c, res_n):
dt = c_ar.dtype
if np.issubdtype(dt, np.floating):
# for floating point results allow a small fuss in comparisons
# since different algorithms (libm vs. intrinsics) can be used
# for different input strides
res_eps = np.finfo(dt).eps
tol = 2*res_eps
assert_allclose(res_c, res_n, atol=tol, rtol=tol)
else:
assert_equal(c_ar, n_ar)
@pytest.mark.parametrize('ufunc', [np.sign, np.equal])
def test_ufunc_warn_with_nan(ufunc):
# issue gh-15127
# test that calling certain ufuncs with a non-standard `nan` value does not
# emit a warning
# `b` holds a 64 bit signaling nan: the most significant bit of the
# significand is zero.
b = np.array([0x7ff0000000000001], 'i8').view('f8')
assert np.isnan(b)
if ufunc.nin == 1:
ufunc(b)
elif ufunc.nin == 2:
ufunc(b, b.copy())
else:
raise ValueError('ufunc with more than 2 inputs')
@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
def test_ufunc_casterrors():
# Tests that casting errors are correctly reported and buffers are
# cleared.
# The following array can be added to itself as an object array, but
# the result cannot be cast to an integer output:
value = 123 # relies on python cache (leak-check will still find it)
arr = np.array([value] * int(np.BUFSIZE * 1.5) +
["string"] +
[value] * int(1.5 * np.BUFSIZE), dtype=object)
out = np.ones(len(arr), dtype=np.intp)
count = sys.getrefcount(value)
with pytest.raises(ValueError):
# Output casting failure:
np.add(arr, arr, out=out, casting="unsafe")
assert count == sys.getrefcount(value)
# output is unchanged after the error, this shows that the iteration
# was aborted (this is not necessarily defined behaviour)
assert out[-1] == 1
with pytest.raises(ValueError):
# Input casting failure:
np.add(arr, arr, out=out, dtype=np.intp, casting="unsafe")
assert count == sys.getrefcount(value)
# output is unchanged after the error, this shows that the iteration
# was aborted (this is not necessarily defined behaviour)
assert out[-1] == 1
def test_trivial_loop_invalid_cast():
# This tests the fast-path "invalid cast", see gh-19904.
with pytest.raises(TypeError,
match="cast ufunc 'add' input 0"):
# the void dtype definitely cannot cast to double:
np.add(np.array(1, "i,i"), 3, signature="dd->d")
@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
@pytest.mark.parametrize("offset",
[0, np.BUFSIZE//2, int(1.5*np.BUFSIZE)])
def test_reduce_casterrors(offset):
# Test reporting of casting errors in reductions, we test various
# offsets to where the casting error will occur, since these may occur
# at different places during the reduction procedure. For example
# the first item may be special.
value = 123 # relies on python cache (leak-check will still find it)
arr = np.array([value] * offset +
["string"] +
[value] * int(1.5 * np.BUFSIZE), dtype=object)
out = np.array(-1, dtype=np.intp)
count = sys.getrefcount(value)
with pytest.raises(ValueError, match="invalid literal"):
# This is an unsafe cast, but we currently always allow that.
# Note that the double loop is picked, but the cast fails.
np.add.reduce(arr, dtype=np.intp, out=out)
assert count == sys.getrefcount(value)
# If an error occurred during casting, the operation is done at most until
# the error occurs (the result of which would be `value * offset`) and -1
# if the error happened immediately.
# This does not define behaviour, the output is invalid and thus undefined
assert out[()] < value * offset
@pytest.mark.parametrize("method",
[np.add.accumulate, np.add.reduce,
pytest.param(lambda x: np.add.reduceat(x, [0]), id="reduceat"),
pytest.param(lambda x: np.log.at(x, [2]), id="at")])
def test_ufunc_methods_floaterrors(method):
# adding inf and -inf (or log(-inf) creates an invalid float and warns
arr = np.array([np.inf, 0, -np.inf])
with np.errstate(all="warn"):
with pytest.warns(RuntimeWarning, match="invalid value"):
method(arr)
arr = np.array([np.inf, 0, -np.inf])
with np.errstate(all="raise"):
with pytest.raises(FloatingPointError):
method(arr)
def _check_neg_zero(value):
if value != 0.0:
return False
if not np.signbit(value.real):
return False
if value.dtype.kind == "c":
return np.signbit(value.imag)
return True
@pytest.mark.parametrize("dtype", np.typecodes["AllFloat"])
def test_addition_negative_zero(dtype):
dtype = np.dtype(dtype)
if dtype.kind == "c":
neg_zero = dtype.type(complex(-0.0, -0.0))
else:
neg_zero = dtype.type(-0.0)
arr = np.array(neg_zero)
arr2 = np.array(neg_zero)
assert _check_neg_zero(arr + arr2)
# In-place ops may end up on a different path (reduce path) see gh-21211
arr += arr2
assert _check_neg_zero(arr)
@pytest.mark.parametrize("dtype", np.typecodes["AllFloat"])
@pytest.mark.parametrize("use_initial", [True, False])
def test_addition_reduce_negative_zero(dtype, use_initial):
dtype = np.dtype(dtype)
if dtype.kind == "c":
neg_zero = dtype.type(complex(-0.0, -0.0))
else:
neg_zero = dtype.type(-0.0)
kwargs = {}
if use_initial:
kwargs["initial"] = neg_zero
else:
pytest.xfail("-0. propagation in sum currently requires initial")
# Test various length, in case SIMD paths or chunking play a role.
# 150 extends beyond the pairwise blocksize; probably not important.
for i in range(0, 150):
arr = np.array([neg_zero] * i, dtype=dtype)
res = np.sum(arr, **kwargs)
if i > 0 or use_initial:
assert _check_neg_zero(res)
else:
# `sum([])` should probably be 0.0 and not -0.0 like `sum([-0.0])`
assert not np.signbit(res.real)
assert not np.signbit(res.imag)
| 108,584 | Python | 40.747405 | 91 | 0.536202 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_array_interface.py | import sys
import pytest
import numpy as np
from numpy.testing import extbuild
@pytest.fixture
def get_module(tmp_path):
""" Some codes to generate data and manage temporary buffers use when
sharing with numpy via the array interface protocol.
"""
if not sys.platform.startswith('linux'):
pytest.skip('link fails on cygwin')
prologue = '''
#include <Python.h>
#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
#include <numpy/arrayobject.h>
#include <stdio.h>
#include <math.h>
NPY_NO_EXPORT
void delete_array_struct(PyObject *cap) {
/* get the array interface structure */
PyArrayInterface *inter = (PyArrayInterface*)
PyCapsule_GetPointer(cap, NULL);
/* get the buffer by which data was shared */
double *ptr = (double*)PyCapsule_GetContext(cap);
/* for the purposes of the regression test set the elements
to nan */
for (npy_intp i = 0; i < inter->shape[0]; ++i)
ptr[i] = nan("");
/* free the shared buffer */
free(ptr);
/* free the array interface structure */
free(inter->shape);
free(inter);
fprintf(stderr, "delete_array_struct\\ncap = %ld inter = %ld"
" ptr = %ld\\n", (long)cap, (long)inter, (long)ptr);
}
'''
functions = [
("new_array_struct", "METH_VARARGS", """
long long n_elem = 0;
double value = 0.0;
if (!PyArg_ParseTuple(args, "Ld", &n_elem, &value)) {
Py_RETURN_NONE;
}
/* allocate and initialize the data to share with numpy */
long long n_bytes = n_elem*sizeof(double);
double *data = (double*)malloc(n_bytes);
if (!data) {
PyErr_Format(PyExc_MemoryError,
"Failed to malloc %lld bytes", n_bytes);
Py_RETURN_NONE;
}
for (long long i = 0; i < n_elem; ++i) {
data[i] = value;
}
/* calculate the shape and stride */
int nd = 1;
npy_intp *ss = (npy_intp*)malloc(2*nd*sizeof(npy_intp));
npy_intp *shape = ss;
npy_intp *stride = ss + nd;
shape[0] = n_elem;
stride[0] = sizeof(double);
/* construct the array interface */
PyArrayInterface *inter = (PyArrayInterface*)
malloc(sizeof(PyArrayInterface));
memset(inter, 0, sizeof(PyArrayInterface));
inter->two = 2;
inter->nd = nd;
inter->typekind = 'f';
inter->itemsize = sizeof(double);
inter->shape = shape;
inter->strides = stride;
inter->data = data;
inter->flags = NPY_ARRAY_WRITEABLE | NPY_ARRAY_NOTSWAPPED |
NPY_ARRAY_ALIGNED | NPY_ARRAY_C_CONTIGUOUS;
/* package into a capsule */
PyObject *cap = PyCapsule_New(inter, NULL, delete_array_struct);
/* save the pointer to the data */
PyCapsule_SetContext(cap, data);
fprintf(stderr, "new_array_struct\\ncap = %ld inter = %ld"
" ptr = %ld\\n", (long)cap, (long)inter, (long)data);
return cap;
""")
]
more_init = "import_array();"
try:
import array_interface_testing
return array_interface_testing
except ImportError:
pass
# if it does not exist, build and load it
return extbuild.build_and_import_extension('array_interface_testing',
functions,
prologue=prologue,
include_dirs=[np.get_include()],
build_dir=tmp_path,
more_init=more_init)
@pytest.mark.slow
def test_cstruct(get_module):
class data_source:
"""
This class is for testing the timing of the PyCapsule destructor
invoked when numpy release its reference to the shared data as part of
the numpy array interface protocol. If the PyCapsule destructor is
called early the shared data is freed and invlaid memory accesses will
occur.
"""
def __init__(self, size, value):
self.size = size
self.value = value
@property
def __array_struct__(self):
return get_module.new_array_struct(self.size, self.value)
# write to the same stream as the C code
stderr = sys.__stderr__
# used to validate the shared data.
expected_value = -3.1415
multiplier = -10000.0
# create some data to share with numpy via the array interface
# assign the data an expected value.
stderr.write(' ---- create an object to share data ---- \n')
buf = data_source(256, expected_value)
stderr.write(' ---- OK!\n\n')
# share the data
stderr.write(' ---- share data via the array interface protocol ---- \n')
arr = np.array(buf, copy=False)
stderr.write('arr.__array_interface___ = %s\n' % (
str(arr.__array_interface__)))
stderr.write('arr.base = %s\n' % (str(arr.base)))
stderr.write(' ---- OK!\n\n')
# release the source of the shared data. this will not release the data
# that was shared with numpy, that is done in the PyCapsule destructor.
stderr.write(' ---- destroy the object that shared data ---- \n')
buf = None
stderr.write(' ---- OK!\n\n')
# check that we got the expected data. If the PyCapsule destructor we
# defined was prematurely called then this test will fail because our
# destructor sets the elements of the array to NaN before free'ing the
# buffer. Reading the values here may also cause a SEGV
assert np.allclose(arr, expected_value)
# read the data. If the PyCapsule destructor we defined was prematurely
# called then reading the values here may cause a SEGV and will be reported
# as invalid reads by valgrind
stderr.write(' ---- read shared data ---- \n')
stderr.write('arr = %s\n' % (str(arr)))
stderr.write(' ---- OK!\n\n')
# write to the shared buffer. If the shared data was prematurely deleted
# this will may cause a SEGV and valgrind will report invalid writes
stderr.write(' ---- modify shared data ---- \n')
arr *= multiplier
expected_value *= multiplier
stderr.write('arr.__array_interface___ = %s\n' % (
str(arr.__array_interface__)))
stderr.write('arr.base = %s\n' % (str(arr.base)))
stderr.write(' ---- OK!\n\n')
# read the data. If the shared data was prematurely deleted this
# will may cause a SEGV and valgrind will report invalid reads
stderr.write(' ---- read modified shared data ---- \n')
stderr.write('arr = %s\n' % (str(arr)))
stderr.write(' ---- OK!\n\n')
# check that we got the expected data. If the PyCapsule destructor we
# defined was prematurely called then this test will fail because our
# destructor sets the elements of the array to NaN before free'ing the
# buffer. Reading the values here may also cause a SEGV
assert np.allclose(arr, expected_value)
# free the shared data, the PyCapsule destructor should run here
stderr.write(' ---- free shared data ---- \n')
arr = None
stderr.write(' ---- OK!\n\n')
| 7,596 | Python | 34.009216 | 79 | 0.562138 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test__exceptions.py | """
Tests of the ._exceptions module. Primarily for exercising the __str__ methods.
"""
import pickle
import pytest
import numpy as np
_ArrayMemoryError = np.core._exceptions._ArrayMemoryError
_UFuncNoLoopError = np.core._exceptions._UFuncNoLoopError
class TestArrayMemoryError:
def test_pickling(self):
""" Test that _ArrayMemoryError can be pickled """
error = _ArrayMemoryError((1023,), np.dtype(np.uint8))
res = pickle.loads(pickle.dumps(error))
assert res._total_size == error._total_size
def test_str(self):
e = _ArrayMemoryError((1023,), np.dtype(np.uint8))
str(e) # not crashing is enough
# testing these properties is easier than testing the full string repr
def test__size_to_string(self):
""" Test e._size_to_string """
f = _ArrayMemoryError._size_to_string
Ki = 1024
assert f(0) == '0 bytes'
assert f(1) == '1 bytes'
assert f(1023) == '1023 bytes'
assert f(Ki) == '1.00 KiB'
assert f(Ki+1) == '1.00 KiB'
assert f(10*Ki) == '10.0 KiB'
assert f(int(999.4*Ki)) == '999. KiB'
assert f(int(1023.4*Ki)) == '1023. KiB'
assert f(int(1023.5*Ki)) == '1.00 MiB'
assert f(Ki*Ki) == '1.00 MiB'
# 1023.9999 Mib should round to 1 GiB
assert f(int(Ki*Ki*Ki*0.9999)) == '1.00 GiB'
assert f(Ki*Ki*Ki*Ki*Ki*Ki) == '1.00 EiB'
# larger than sys.maxsize, adding larger prefixes isn't going to help
# anyway.
assert f(Ki*Ki*Ki*Ki*Ki*Ki*123456) == '123456. EiB'
def test__total_size(self):
""" Test e._total_size """
e = _ArrayMemoryError((1,), np.dtype(np.uint8))
assert e._total_size == 1
e = _ArrayMemoryError((2, 4), np.dtype((np.uint64, 16)))
assert e._total_size == 1024
class TestUFuncNoLoopError:
def test_pickling(self):
""" Test that _UFuncNoLoopError can be pickled """
assert isinstance(pickle.dumps(_UFuncNoLoopError), bytes)
@pytest.mark.parametrize("args", [
(2, 1, None),
(2, 1, "test_prefix"),
("test message",),
])
class TestAxisError:
def test_attr(self, args):
"""Validate attribute types."""
exc = np.AxisError(*args)
if len(args) == 1:
assert exc.axis is None
assert exc.ndim is None
else:
axis, ndim, *_ = args
assert exc.axis == axis
assert exc.ndim == ndim
def test_pickling(self, args):
"""Test that `AxisError` can be pickled."""
exc = np.AxisError(*args)
exc2 = pickle.loads(pickle.dumps(exc))
assert type(exc) is type(exc2)
for name in ("axis", "ndim", "args"):
attr1 = getattr(exc, name)
attr2 = getattr(exc2, name)
assert attr1 == attr2, name
| 2,846 | Python | 30.988764 | 79 | 0.575193 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_item_selection.py | import sys
import numpy as np
from numpy.testing import (
assert_, assert_raises, assert_array_equal, HAS_REFCOUNT
)
class TestTake:
def test_simple(self):
a = [[1, 2], [3, 4]]
a_str = [[b'1', b'2'], [b'3', b'4']]
modes = ['raise', 'wrap', 'clip']
indices = [-1, 4]
index_arrays = [np.empty(0, dtype=np.intp),
np.empty(tuple(), dtype=np.intp),
np.empty((1, 1), dtype=np.intp)]
real_indices = {'raise': {-1: 1, 4: IndexError},
'wrap': {-1: 1, 4: 0},
'clip': {-1: 0, 4: 1}}
# Currently all types but object, use the same function generation.
# So it should not be necessary to test all. However test also a non
# refcounted struct on top of object, which has a size that hits the
# default (non-specialized) path.
types = int, object, np.dtype([('', 'i2', 3)])
for t in types:
# ta works, even if the array may be odd if buffer interface is used
ta = np.array(a if np.issubdtype(t, np.number) else a_str, dtype=t)
tresult = list(ta.T.copy())
for index_array in index_arrays:
if index_array.size != 0:
tresult[0].shape = (2,) + index_array.shape
tresult[1].shape = (2,) + index_array.shape
for mode in modes:
for index in indices:
real_index = real_indices[mode][index]
if real_index is IndexError and index_array.size != 0:
index_array.put(0, index)
assert_raises(IndexError, ta.take, index_array,
mode=mode, axis=1)
elif index_array.size != 0:
index_array.put(0, index)
res = ta.take(index_array, mode=mode, axis=1)
assert_array_equal(res, tresult[real_index])
else:
res = ta.take(index_array, mode=mode, axis=1)
assert_(res.shape == (2,) + index_array.shape)
def test_refcounting(self):
objects = [object() for i in range(10)]
for mode in ('raise', 'clip', 'wrap'):
a = np.array(objects)
b = np.array([2, 2, 4, 5, 3, 5])
a.take(b, out=a[:6], mode=mode)
del a
if HAS_REFCOUNT:
assert_(all(sys.getrefcount(o) == 3 for o in objects))
# not contiguous, example:
a = np.array(objects * 2)[::2]
a.take(b, out=a[:6], mode=mode)
del a
if HAS_REFCOUNT:
assert_(all(sys.getrefcount(o) == 3 for o in objects))
def test_unicode_mode(self):
d = np.arange(10)
k = b'\xc3\xa4'.decode("UTF8")
assert_raises(ValueError, d.take, 5, mode=k)
def test_empty_partition(self):
# In reference to github issue #6530
a_original = np.array([0, 2, 4, 6, 8, 10])
a = a_original.copy()
# An empty partition should be a successful no-op
a.partition(np.array([], dtype=np.int16))
assert_array_equal(a, a_original)
def test_empty_argpartition(self):
# In reference to github issue #6530
a = np.array([0, 2, 4, 6, 8, 10])
a = a.argpartition(np.array([], dtype=np.int16))
b = np.array([0, 1, 2, 3, 4, 5])
assert_array_equal(a, b)
| 3,579 | Python | 40.149425 | 80 | 0.488963 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_protocols.py | import pytest
import warnings
import numpy as np
@pytest.mark.filterwarnings("error")
def test_getattr_warning():
# issue gh-14735: make sure we clear only getattr errors, and let warnings
# through
class Wrapper:
def __init__(self, array):
self.array = array
def __len__(self):
return len(self.array)
def __getitem__(self, item):
return type(self)(self.array[item])
def __getattr__(self, name):
if name.startswith("__array_"):
warnings.warn("object got converted", UserWarning, stacklevel=1)
return getattr(self.array, name)
def __repr__(self):
return "<Wrapper({self.array})>".format(self=self)
array = Wrapper(np.arange(10))
with pytest.raises(UserWarning, match="object got converted"):
np.asarray(array)
def test_array_called():
class Wrapper:
val = '0' * 100
def __array__(self, result=None):
return np.array([self.val], dtype=object)
wrapped = Wrapper()
arr = np.array(wrapped, dtype=str)
assert arr.dtype == 'U100'
assert arr[0] == Wrapper.val
| 1,168 | Python | 24.977777 | 80 | 0.589041 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_indexing.py | import sys
import warnings
import functools
import operator
import pytest
import numpy as np
from numpy.core._multiarray_tests import array_indexing
from itertools import product
from numpy.testing import (
assert_, assert_equal, assert_raises, assert_raises_regex,
assert_array_equal, assert_warns, HAS_REFCOUNT,
)
class TestIndexing:
def test_index_no_floats(self):
a = np.array([[[5]]])
assert_raises(IndexError, lambda: a[0.0])
assert_raises(IndexError, lambda: a[0, 0.0])
assert_raises(IndexError, lambda: a[0.0, 0])
assert_raises(IndexError, lambda: a[0.0,:])
assert_raises(IndexError, lambda: a[:, 0.0])
assert_raises(IndexError, lambda: a[:, 0.0,:])
assert_raises(IndexError, lambda: a[0.0,:,:])
assert_raises(IndexError, lambda: a[0, 0, 0.0])
assert_raises(IndexError, lambda: a[0.0, 0, 0])
assert_raises(IndexError, lambda: a[0, 0.0, 0])
assert_raises(IndexError, lambda: a[-1.4])
assert_raises(IndexError, lambda: a[0, -1.4])
assert_raises(IndexError, lambda: a[-1.4, 0])
assert_raises(IndexError, lambda: a[-1.4,:])
assert_raises(IndexError, lambda: a[:, -1.4])
assert_raises(IndexError, lambda: a[:, -1.4,:])
assert_raises(IndexError, lambda: a[-1.4,:,:])
assert_raises(IndexError, lambda: a[0, 0, -1.4])
assert_raises(IndexError, lambda: a[-1.4, 0, 0])
assert_raises(IndexError, lambda: a[0, -1.4, 0])
assert_raises(IndexError, lambda: a[0.0:, 0.0])
assert_raises(IndexError, lambda: a[0.0:, 0.0,:])
def test_slicing_no_floats(self):
a = np.array([[5]])
# start as float.
assert_raises(TypeError, lambda: a[0.0:])
assert_raises(TypeError, lambda: a[0:, 0.0:2])
assert_raises(TypeError, lambda: a[0.0::2, :0])
assert_raises(TypeError, lambda: a[0.0:1:2,:])
assert_raises(TypeError, lambda: a[:, 0.0:])
# stop as float.
assert_raises(TypeError, lambda: a[:0.0])
assert_raises(TypeError, lambda: a[:0, 1:2.0])
assert_raises(TypeError, lambda: a[:0.0:2, :0])
assert_raises(TypeError, lambda: a[:0.0,:])
assert_raises(TypeError, lambda: a[:, 0:4.0:2])
# step as float.
assert_raises(TypeError, lambda: a[::1.0])
assert_raises(TypeError, lambda: a[0:, :2:2.0])
assert_raises(TypeError, lambda: a[1::4.0, :0])
assert_raises(TypeError, lambda: a[::5.0,:])
assert_raises(TypeError, lambda: a[:, 0:4:2.0])
# mixed.
assert_raises(TypeError, lambda: a[1.0:2:2.0])
assert_raises(TypeError, lambda: a[1.0::2.0])
assert_raises(TypeError, lambda: a[0:, :2.0:2.0])
assert_raises(TypeError, lambda: a[1.0:1:4.0, :0])
assert_raises(TypeError, lambda: a[1.0:5.0:5.0,:])
assert_raises(TypeError, lambda: a[:, 0.4:4.0:2.0])
# should still get the DeprecationWarning if step = 0.
assert_raises(TypeError, lambda: a[::0.0])
def test_index_no_array_to_index(self):
# No non-scalar arrays.
a = np.array([[[1]]])
assert_raises(TypeError, lambda: a[a:a:a])
def test_none_index(self):
# `None` index adds newaxis
a = np.array([1, 2, 3])
assert_equal(a[None], a[np.newaxis])
assert_equal(a[None].ndim, a.ndim + 1)
def test_empty_tuple_index(self):
# Empty tuple index creates a view
a = np.array([1, 2, 3])
assert_equal(a[()], a)
assert_(a[()].base is a)
a = np.array(0)
assert_(isinstance(a[()], np.int_))
def test_void_scalar_empty_tuple(self):
s = np.zeros((), dtype='V4')
assert_equal(s[()].dtype, s.dtype)
assert_equal(s[()], s)
assert_equal(type(s[...]), np.ndarray)
def test_same_kind_index_casting(self):
# Indexes should be cast with same-kind and not safe, even if that
# is somewhat unsafe. So test various different code paths.
index = np.arange(5)
u_index = index.astype(np.uintp)
arr = np.arange(10)
assert_array_equal(arr[index], arr[u_index])
arr[u_index] = np.arange(5)
assert_array_equal(arr, np.arange(10))
arr = np.arange(10).reshape(5, 2)
assert_array_equal(arr[index], arr[u_index])
arr[u_index] = np.arange(5)[:,None]
assert_array_equal(arr, np.arange(5)[:,None].repeat(2, axis=1))
arr = np.arange(25).reshape(5, 5)
assert_array_equal(arr[u_index, u_index], arr[index, index])
def test_empty_fancy_index(self):
# Empty list index creates an empty array
# with the same dtype (but with weird shape)
a = np.array([1, 2, 3])
assert_equal(a[[]], [])
assert_equal(a[[]].dtype, a.dtype)
b = np.array([], dtype=np.intp)
assert_equal(a[[]], [])
assert_equal(a[[]].dtype, a.dtype)
b = np.array([])
assert_raises(IndexError, a.__getitem__, b)
def test_ellipsis_index(self):
a = np.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
assert_(a[...] is not a)
assert_equal(a[...], a)
# `a[...]` was `a` in numpy <1.9.
assert_(a[...].base is a)
# Slicing with ellipsis can skip an
# arbitrary number of dimensions
assert_equal(a[0, ...], a[0])
assert_equal(a[0, ...], a[0,:])
assert_equal(a[..., 0], a[:, 0])
# Slicing with ellipsis always results
# in an array, not a scalar
assert_equal(a[0, ..., 1], np.array(2))
# Assignment with `(Ellipsis,)` on 0-d arrays
b = np.array(1)
b[(Ellipsis,)] = 2
assert_equal(b, 2)
def test_single_int_index(self):
# Single integer index selects one row
a = np.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
assert_equal(a[0], [1, 2, 3])
assert_equal(a[-1], [7, 8, 9])
# Index out of bounds produces IndexError
assert_raises(IndexError, a.__getitem__, 1 << 30)
# Index overflow produces IndexError
assert_raises(IndexError, a.__getitem__, 1 << 64)
def test_single_bool_index(self):
# Single boolean index
a = np.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
assert_equal(a[np.array(True)], a[None])
assert_equal(a[np.array(False)], a[None][0:0])
def test_boolean_shape_mismatch(self):
arr = np.ones((5, 4, 3))
index = np.array([True])
assert_raises(IndexError, arr.__getitem__, index)
index = np.array([False] * 6)
assert_raises(IndexError, arr.__getitem__, index)
index = np.zeros((4, 4), dtype=bool)
assert_raises(IndexError, arr.__getitem__, index)
assert_raises(IndexError, arr.__getitem__, (slice(None), index))
def test_boolean_indexing_onedim(self):
# Indexing a 2-dimensional array with
# boolean array of length one
a = np.array([[ 0., 0., 0.]])
b = np.array([ True], dtype=bool)
assert_equal(a[b], a)
# boolean assignment
a[b] = 1.
assert_equal(a, [[1., 1., 1.]])
def test_boolean_assignment_value_mismatch(self):
# A boolean assignment should fail when the shape of the values
# cannot be broadcast to the subscription. (see also gh-3458)
a = np.arange(4)
def f(a, v):
a[a > -1] = v
assert_raises(ValueError, f, a, [])
assert_raises(ValueError, f, a, [1, 2, 3])
assert_raises(ValueError, f, a[:1], [1, 2, 3])
def test_boolean_assignment_needs_api(self):
# See also gh-7666
# This caused a segfault on Python 2 due to the GIL not being
# held when the iterator does not need it, but the transfer function
# does
arr = np.zeros(1000)
indx = np.zeros(1000, dtype=bool)
indx[:100] = True
arr[indx] = np.ones(100, dtype=object)
expected = np.zeros(1000)
expected[:100] = 1
assert_array_equal(arr, expected)
def test_boolean_indexing_twodim(self):
# Indexing a 2-dimensional array with
# 2-dimensional boolean array
a = np.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
b = np.array([[ True, False, True],
[False, True, False],
[ True, False, True]])
assert_equal(a[b], [1, 3, 5, 7, 9])
assert_equal(a[b[1]], [[4, 5, 6]])
assert_equal(a[b[0]], a[b[2]])
# boolean assignment
a[b] = 0
assert_equal(a, [[0, 2, 0],
[4, 0, 6],
[0, 8, 0]])
def test_boolean_indexing_list(self):
# Regression test for #13715. It's a use-after-free bug which the
# test won't directly catch, but it will show up in valgrind.
a = np.array([1, 2, 3])
b = [True, False, True]
# Two variants of the test because the first takes a fast path
assert_equal(a[b], [1, 3])
assert_equal(a[None, b], [[1, 3]])
def test_reverse_strides_and_subspace_bufferinit(self):
# This tests that the strides are not reversed for simple and
# subspace fancy indexing.
a = np.ones(5)
b = np.zeros(5, dtype=np.intp)[::-1]
c = np.arange(5)[::-1]
a[b] = c
# If the strides are not reversed, the 0 in the arange comes last.
assert_equal(a[0], 0)
# This also tests that the subspace buffer is initialized:
a = np.ones((5, 2))
c = np.arange(10).reshape(5, 2)[::-1]
a[b, :] = c
assert_equal(a[0], [0, 1])
def test_reversed_strides_result_allocation(self):
# Test a bug when calculating the output strides for a result array
# when the subspace size was 1 (and test other cases as well)
a = np.arange(10)[:, None]
i = np.arange(10)[::-1]
assert_array_equal(a[i], a[i.copy('C')])
a = np.arange(20).reshape(-1, 2)
def test_uncontiguous_subspace_assignment(self):
# During development there was a bug activating a skip logic
# based on ndim instead of size.
a = np.full((3, 4, 2), -1)
b = np.full((3, 4, 2), -1)
a[[0, 1]] = np.arange(2 * 4 * 2).reshape(2, 4, 2).T
b[[0, 1]] = np.arange(2 * 4 * 2).reshape(2, 4, 2).T.copy()
assert_equal(a, b)
def test_too_many_fancy_indices_special_case(self):
# Just documents behaviour, this is a small limitation.
a = np.ones((1,) * 32) # 32 is NPY_MAXDIMS
assert_raises(IndexError, a.__getitem__, (np.array([0]),) * 32)
def test_scalar_array_bool(self):
# NumPy bools can be used as boolean index (python ones as of yet not)
a = np.array(1)
assert_equal(a[np.bool_(True)], a[np.array(True)])
assert_equal(a[np.bool_(False)], a[np.array(False)])
# After deprecating bools as integers:
#a = np.array([0,1,2])
#assert_equal(a[True, :], a[None, :])
#assert_equal(a[:, True], a[:, None])
#
#assert_(not np.may_share_memory(a, a[True, :]))
def test_everything_returns_views(self):
# Before `...` would return a itself.
a = np.arange(5)
assert_(a is not a[()])
assert_(a is not a[...])
assert_(a is not a[:])
def test_broaderrors_indexing(self):
a = np.zeros((5, 5))
assert_raises(IndexError, a.__getitem__, ([0, 1], [0, 1, 2]))
assert_raises(IndexError, a.__setitem__, ([0, 1], [0, 1, 2]), 0)
def test_trivial_fancy_out_of_bounds(self):
a = np.zeros(5)
ind = np.ones(20, dtype=np.intp)
ind[-1] = 10
assert_raises(IndexError, a.__getitem__, ind)
assert_raises(IndexError, a.__setitem__, ind, 0)
ind = np.ones(20, dtype=np.intp)
ind[0] = 11
assert_raises(IndexError, a.__getitem__, ind)
assert_raises(IndexError, a.__setitem__, ind, 0)
def test_trivial_fancy_not_possible(self):
# Test that the fast path for trivial assignment is not incorrectly
# used when the index is not contiguous or 1D, see also gh-11467.
a = np.arange(6)
idx = np.arange(6, dtype=np.intp).reshape(2, 1, 3)[:, :, 0]
assert_array_equal(a[idx], idx)
# this case must not go into the fast path, note that idx is
# a non-contiuguous none 1D array here.
a[idx] = -1
res = np.arange(6)
res[0] = -1
res[3] = -1
assert_array_equal(a, res)
def test_nonbaseclass_values(self):
class SubClass(np.ndarray):
def __array_finalize__(self, old):
# Have array finalize do funny things
self.fill(99)
a = np.zeros((5, 5))
s = a.copy().view(type=SubClass)
s.fill(1)
a[[0, 1, 2, 3, 4], :] = s
assert_((a == 1).all())
# Subspace is last, so transposing might want to finalize
a[:, [0, 1, 2, 3, 4]] = s
assert_((a == 1).all())
a.fill(0)
a[...] = s
assert_((a == 1).all())
def test_array_like_values(self):
# Similar to the above test, but use a memoryview instead
a = np.zeros((5, 5))
s = np.arange(25, dtype=np.float64).reshape(5, 5)
a[[0, 1, 2, 3, 4], :] = memoryview(s)
assert_array_equal(a, s)
a[:, [0, 1, 2, 3, 4]] = memoryview(s)
assert_array_equal(a, s)
a[...] = memoryview(s)
assert_array_equal(a, s)
def test_subclass_writeable(self):
d = np.rec.array([('NGC1001', 11), ('NGC1002', 1.), ('NGC1003', 1.)],
dtype=[('target', 'S20'), ('V_mag', '>f4')])
ind = np.array([False, True, True], dtype=bool)
assert_(d[ind].flags.writeable)
ind = np.array([0, 1])
assert_(d[ind].flags.writeable)
assert_(d[...].flags.writeable)
assert_(d[0].flags.writeable)
def test_memory_order(self):
# This is not necessary to preserve. Memory layouts for
# more complex indices are not as simple.
a = np.arange(10)
b = np.arange(10).reshape(5,2).T
assert_(a[b].flags.f_contiguous)
# Takes a different implementation branch:
a = a.reshape(-1, 1)
assert_(a[b, 0].flags.f_contiguous)
def test_scalar_return_type(self):
# Full scalar indices should return scalars and object
# arrays should not call PyArray_Return on their items
class Zero:
# The most basic valid indexing
def __index__(self):
return 0
z = Zero()
class ArrayLike:
# Simple array, should behave like the array
def __array__(self):
return np.array(0)
a = np.zeros(())
assert_(isinstance(a[()], np.float_))
a = np.zeros(1)
assert_(isinstance(a[z], np.float_))
a = np.zeros((1, 1))
assert_(isinstance(a[z, np.array(0)], np.float_))
assert_(isinstance(a[z, ArrayLike()], np.float_))
# And object arrays do not call it too often:
b = np.array(0)
a = np.array(0, dtype=object)
a[()] = b
assert_(isinstance(a[()], np.ndarray))
a = np.array([b, None])
assert_(isinstance(a[z], np.ndarray))
a = np.array([[b, None]])
assert_(isinstance(a[z, np.array(0)], np.ndarray))
assert_(isinstance(a[z, ArrayLike()], np.ndarray))
def test_small_regressions(self):
# Reference count of intp for index checks
a = np.array([0])
if HAS_REFCOUNT:
refcount = sys.getrefcount(np.dtype(np.intp))
# item setting always checks indices in separate function:
a[np.array([0], dtype=np.intp)] = 1
a[np.array([0], dtype=np.uint8)] = 1
assert_raises(IndexError, a.__setitem__,
np.array([1], dtype=np.intp), 1)
assert_raises(IndexError, a.__setitem__,
np.array([1], dtype=np.uint8), 1)
if HAS_REFCOUNT:
assert_equal(sys.getrefcount(np.dtype(np.intp)), refcount)
def test_unaligned(self):
v = (np.zeros(64, dtype=np.int8) + ord('a'))[1:-7]
d = v.view(np.dtype("S8"))
# unaligned source
x = (np.zeros(16, dtype=np.int8) + ord('a'))[1:-7]
x = x.view(np.dtype("S8"))
x[...] = np.array("b" * 8, dtype="S")
b = np.arange(d.size)
#trivial
assert_equal(d[b], d)
d[b] = x
# nontrivial
# unaligned index array
b = np.zeros(d.size + 1).view(np.int8)[1:-(np.intp(0).itemsize - 1)]
b = b.view(np.intp)[:d.size]
b[...] = np.arange(d.size)
assert_equal(d[b.astype(np.int16)], d)
d[b.astype(np.int16)] = x
# boolean
d[b % 2 == 0]
d[b % 2 == 0] = x[::2]
def test_tuple_subclass(self):
arr = np.ones((5, 5))
# A tuple subclass should also be an nd-index
class TupleSubclass(tuple):
pass
index = ([1], [1])
index = TupleSubclass(index)
assert_(arr[index].shape == (1,))
# Unlike the non nd-index:
assert_(arr[index,].shape != (1,))
def test_broken_sequence_not_nd_index(self):
# See gh-5063:
# If we have an object which claims to be a sequence, but fails
# on item getting, this should not be converted to an nd-index (tuple)
# If this object happens to be a valid index otherwise, it should work
# This object here is very dubious and probably bad though:
class SequenceLike:
def __index__(self):
return 0
def __len__(self):
return 1
def __getitem__(self, item):
raise IndexError('Not possible')
arr = np.arange(10)
assert_array_equal(arr[SequenceLike()], arr[SequenceLike(),])
# also test that field indexing does not segfault
# for a similar reason, by indexing a structured array
arr = np.zeros((1,), dtype=[('f1', 'i8'), ('f2', 'i8')])
assert_array_equal(arr[SequenceLike()], arr[SequenceLike(),])
def test_indexing_array_weird_strides(self):
# See also gh-6221
# the shapes used here come from the issue and create the correct
# size for the iterator buffering size.
x = np.ones(10)
x2 = np.ones((10, 2))
ind = np.arange(10)[:, None, None, None]
ind = np.broadcast_to(ind, (10, 55, 4, 4))
# single advanced index case
assert_array_equal(x[ind], x[ind.copy()])
# higher dimensional advanced index
zind = np.zeros(4, dtype=np.intp)
assert_array_equal(x2[ind, zind], x2[ind.copy(), zind])
def test_indexing_array_negative_strides(self):
# From gh-8264,
# core dumps if negative strides are used in iteration
arro = np.zeros((4, 4))
arr = arro[::-1, ::-1]
slices = (slice(None), [0, 1, 2, 3])
arr[slices] = 10
assert_array_equal(arr, 10.)
def test_character_assignment(self):
# This is an example a function going through CopyObject which
# used to have an untested special path for scalars
# (the character special dtype case, should be deprecated probably)
arr = np.zeros((1, 5), dtype="c")
arr[0] = np.str_("asdfg") # must assign as a sequence
assert_array_equal(arr[0], np.array("asdfg", dtype="c"))
assert arr[0, 1] == b"s" # make sure not all were set to "a" for both
@pytest.mark.parametrize("index",
[True, False, np.array([0])])
@pytest.mark.parametrize("num", [32, 40])
@pytest.mark.parametrize("original_ndim", [1, 32])
def test_too_many_advanced_indices(self, index, num, original_ndim):
# These are limitations based on the number of arguments we can process.
# For `num=32` (and all boolean cases), the result is actually define;
# but the use of NpyIter (NPY_MAXARGS) limits it for technical reasons.
arr = np.ones((1,) * original_ndim)
with pytest.raises(IndexError):
arr[(index,) * num]
with pytest.raises(IndexError):
arr[(index,) * num] = 1.
def test_structured_advanced_indexing(self):
# Test that copyswap(n) used by integer array indexing is threadsafe
# for structured datatypes, see gh-15387. This test can behave randomly.
from concurrent.futures import ThreadPoolExecutor
# Create a deeply nested dtype to make a failure more likely:
dt = np.dtype([("", "f8")])
dt = np.dtype([("", dt)] * 2)
dt = np.dtype([("", dt)] * 2)
# The array should be large enough to likely run into threading issues
arr = np.random.uniform(size=(6000, 8)).view(dt)[:, 0]
rng = np.random.default_rng()
def func(arr):
indx = rng.integers(0, len(arr), size=6000, dtype=np.intp)
arr[indx]
tpe = ThreadPoolExecutor(max_workers=8)
futures = [tpe.submit(func, arr) for _ in range(10)]
for f in futures:
f.result()
assert arr.dtype is dt
def test_nontuple_ndindex(self):
a = np.arange(25).reshape((5, 5))
assert_equal(a[[0, 1]], np.array([a[0], a[1]]))
assert_equal(a[[0, 1], [0, 1]], np.array([0, 6]))
assert_raises(IndexError, a.__getitem__, [slice(None)])
class TestFieldIndexing:
def test_scalar_return_type(self):
# Field access on an array should return an array, even if it
# is 0-d.
a = np.zeros((), [('a','f8')])
assert_(isinstance(a['a'], np.ndarray))
assert_(isinstance(a[['a']], np.ndarray))
class TestBroadcastedAssignments:
def assign(self, a, ind, val):
a[ind] = val
return a
def test_prepending_ones(self):
a = np.zeros((3, 2))
a[...] = np.ones((1, 3, 2))
# Fancy with subspace with and without transpose
a[[0, 1, 2], :] = np.ones((1, 3, 2))
a[:, [0, 1]] = np.ones((1, 3, 2))
# Fancy without subspace (with broadcasting)
a[[[0], [1], [2]], [0, 1]] = np.ones((1, 3, 2))
def test_prepend_not_one(self):
assign = self.assign
s_ = np.s_
a = np.zeros(5)
# Too large and not only ones.
assert_raises(ValueError, assign, a, s_[...], np.ones((2, 1)))
assert_raises(ValueError, assign, a, s_[[1, 2, 3],], np.ones((2, 1)))
assert_raises(ValueError, assign, a, s_[[[1], [2]],], np.ones((2,2,1)))
def test_simple_broadcasting_errors(self):
assign = self.assign
s_ = np.s_
a = np.zeros((5, 1))
assert_raises(ValueError, assign, a, s_[...], np.zeros((5, 2)))
assert_raises(ValueError, assign, a, s_[...], np.zeros((5, 0)))
assert_raises(ValueError, assign, a, s_[:, [0]], np.zeros((5, 2)))
assert_raises(ValueError, assign, a, s_[:, [0]], np.zeros((5, 0)))
assert_raises(ValueError, assign, a, s_[[0], :], np.zeros((2, 1)))
@pytest.mark.parametrize("index", [
(..., [1, 2], slice(None)),
([0, 1], ..., 0),
(..., [1, 2], [1, 2])])
def test_broadcast_error_reports_correct_shape(self, index):
values = np.zeros((100, 100)) # will never broadcast below
arr = np.zeros((3, 4, 5, 6, 7))
# We currently report without any spaces (could be changed)
shape_str = str(arr[index].shape).replace(" ", "")
with pytest.raises(ValueError) as e:
arr[index] = values
assert str(e.value).endswith(shape_str)
def test_index_is_larger(self):
# Simple case of fancy index broadcasting of the index.
a = np.zeros((5, 5))
a[[[0], [1], [2]], [0, 1, 2]] = [2, 3, 4]
assert_((a[:3, :3] == [2, 3, 4]).all())
def test_broadcast_subspace(self):
a = np.zeros((100, 100))
v = np.arange(100)[:,None]
b = np.arange(100)[::-1]
a[b] = v
assert_((a[::-1] == v).all())
class TestSubclasses:
def test_basic(self):
# Test that indexing in various ways produces SubClass instances,
# and that the base is set up correctly: the original subclass
# instance for views, and a new ndarray for advanced/boolean indexing
# where a copy was made (latter a regression test for gh-11983).
class SubClass(np.ndarray):
pass
a = np.arange(5)
s = a.view(SubClass)
s_slice = s[:3]
assert_(type(s_slice) is SubClass)
assert_(s_slice.base is s)
assert_array_equal(s_slice, a[:3])
s_fancy = s[[0, 1, 2]]
assert_(type(s_fancy) is SubClass)
assert_(s_fancy.base is not s)
assert_(type(s_fancy.base) is np.ndarray)
assert_array_equal(s_fancy, a[[0, 1, 2]])
assert_array_equal(s_fancy.base, a[[0, 1, 2]])
s_bool = s[s > 0]
assert_(type(s_bool) is SubClass)
assert_(s_bool.base is not s)
assert_(type(s_bool.base) is np.ndarray)
assert_array_equal(s_bool, a[a > 0])
assert_array_equal(s_bool.base, a[a > 0])
def test_fancy_on_read_only(self):
# Test that fancy indexing on read-only SubClass does not make a
# read-only copy (gh-14132)
class SubClass(np.ndarray):
pass
a = np.arange(5)
s = a.view(SubClass)
s.flags.writeable = False
s_fancy = s[[0, 1, 2]]
assert_(s_fancy.flags.writeable)
def test_finalize_gets_full_info(self):
# Array finalize should be called on the filled array.
class SubClass(np.ndarray):
def __array_finalize__(self, old):
self.finalize_status = np.array(self)
self.old = old
s = np.arange(10).view(SubClass)
new_s = s[:3]
assert_array_equal(new_s.finalize_status, new_s)
assert_array_equal(new_s.old, s)
new_s = s[[0,1,2,3]]
assert_array_equal(new_s.finalize_status, new_s)
assert_array_equal(new_s.old, s)
new_s = s[s > 0]
assert_array_equal(new_s.finalize_status, new_s)
assert_array_equal(new_s.old, s)
class TestFancyIndexingCast:
def test_boolean_index_cast_assign(self):
# Setup the boolean index and float arrays.
shape = (8, 63)
bool_index = np.zeros(shape).astype(bool)
bool_index[0, 1] = True
zero_array = np.zeros(shape)
# Assigning float is fine.
zero_array[bool_index] = np.array([1])
assert_equal(zero_array[0, 1], 1)
# Fancy indexing works, although we get a cast warning.
assert_warns(np.ComplexWarning,
zero_array.__setitem__, ([0], [1]), np.array([2 + 1j]))
assert_equal(zero_array[0, 1], 2) # No complex part
# Cast complex to float, throwing away the imaginary portion.
assert_warns(np.ComplexWarning,
zero_array.__setitem__, bool_index, np.array([1j]))
assert_equal(zero_array[0, 1], 0)
class TestFancyIndexingEquivalence:
def test_object_assign(self):
# Check that the field and object special case using copyto is active.
# The right hand side cannot be converted to an array here.
a = np.arange(5, dtype=object)
b = a.copy()
a[:3] = [1, (1,2), 3]
b[[0, 1, 2]] = [1, (1,2), 3]
assert_array_equal(a, b)
# test same for subspace fancy indexing
b = np.arange(5, dtype=object)[None, :]
b[[0], :3] = [[1, (1,2), 3]]
assert_array_equal(a, b[0])
# Check that swapping of axes works.
# There was a bug that made the later assignment throw a ValueError
# do to an incorrectly transposed temporary right hand side (gh-5714)
b = b.T
b[:3, [0]] = [[1], [(1,2)], [3]]
assert_array_equal(a, b[:, 0])
# Another test for the memory order of the subspace
arr = np.ones((3, 4, 5), dtype=object)
# Equivalent slicing assignment for comparison
cmp_arr = arr.copy()
cmp_arr[:1, ...] = [[[1], [2], [3], [4]]]
arr[[0], ...] = [[[1], [2], [3], [4]]]
assert_array_equal(arr, cmp_arr)
arr = arr.copy('F')
arr[[0], ...] = [[[1], [2], [3], [4]]]
assert_array_equal(arr, cmp_arr)
def test_cast_equivalence(self):
# Yes, normal slicing uses unsafe casting.
a = np.arange(5)
b = a.copy()
a[:3] = np.array(['2', '-3', '-1'])
b[[0, 2, 1]] = np.array(['2', '-1', '-3'])
assert_array_equal(a, b)
# test the same for subspace fancy indexing
b = np.arange(5)[None, :]
b[[0], :3] = np.array([['2', '-3', '-1']])
assert_array_equal(a, b[0])
class TestMultiIndexingAutomated:
"""
These tests use code to mimic the C-Code indexing for selection.
NOTE:
* This still lacks tests for complex item setting.
* If you change behavior of indexing, you might want to modify
these tests to try more combinations.
* Behavior was written to match numpy version 1.8. (though a
first version matched 1.7.)
* Only tuple indices are supported by the mimicking code.
(and tested as of writing this)
* Error types should match most of the time as long as there
is only one error. For multiple errors, what gets raised
will usually not be the same one. They are *not* tested.
Update 2016-11-30: It is probably not worth maintaining this test
indefinitely and it can be dropped if maintenance becomes a burden.
"""
def setup_method(self):
self.a = np.arange(np.prod([3, 1, 5, 6])).reshape(3, 1, 5, 6)
self.b = np.empty((3, 0, 5, 6))
self.complex_indices = ['skip', Ellipsis,
0,
# Boolean indices, up to 3-d for some special cases of eating up
# dimensions, also need to test all False
np.array([True, False, False]),
np.array([[True, False], [False, True]]),
np.array([[[False, False], [False, False]]]),
# Some slices:
slice(-5, 5, 2),
slice(1, 1, 100),
slice(4, -1, -2),
slice(None, None, -3),
# Some Fancy indexes:
np.empty((0, 1, 1), dtype=np.intp), # empty and can be broadcast
np.array([0, 1, -2]),
np.array([[2], [0], [1]]),
np.array([[0, -1], [0, 1]], dtype=np.dtype('intp').newbyteorder()),
np.array([2, -1], dtype=np.int8),
np.zeros([1]*31, dtype=int), # trigger too large array.
np.array([0., 1.])] # invalid datatype
# Some simpler indices that still cover a bit more
self.simple_indices = [Ellipsis, None, -1, [1], np.array([True]),
'skip']
# Very simple ones to fill the rest:
self.fill_indices = [slice(None, None), 0]
def _get_multi_index(self, arr, indices):
"""Mimic multi dimensional indexing.
Parameters
----------
arr : ndarray
Array to be indexed.
indices : tuple of index objects
Returns
-------
out : ndarray
An array equivalent to the indexing operation (but always a copy).
`arr[indices]` should be identical.
no_copy : bool
Whether the indexing operation requires a copy. If this is `True`,
`np.may_share_memory(arr, arr[indices])` should be `True` (with
some exceptions for scalars and possibly 0-d arrays).
Notes
-----
While the function may mostly match the errors of normal indexing this
is generally not the case.
"""
in_indices = list(indices)
indices = []
# if False, this is a fancy or boolean index
no_copy = True
# number of fancy/scalar indexes that are not consecutive
num_fancy = 0
# number of dimensions indexed by a "fancy" index
fancy_dim = 0
# NOTE: This is a funny twist (and probably OK to change).
# The boolean array has illegal indexes, but this is
# allowed if the broadcast fancy-indices are 0-sized.
# This variable is to catch that case.
error_unless_broadcast_to_empty = False
# We need to handle Ellipsis and make arrays from indices, also
# check if this is fancy indexing (set no_copy).
ndim = 0
ellipsis_pos = None # define here mostly to replace all but first.
for i, indx in enumerate(in_indices):
if indx is None:
continue
if isinstance(indx, np.ndarray) and indx.dtype == bool:
no_copy = False
if indx.ndim == 0:
raise IndexError
# boolean indices can have higher dimensions
ndim += indx.ndim
fancy_dim += indx.ndim
continue
if indx is Ellipsis:
if ellipsis_pos is None:
ellipsis_pos = i
continue # do not increment ndim counter
raise IndexError
if isinstance(indx, slice):
ndim += 1
continue
if not isinstance(indx, np.ndarray):
# This could be open for changes in numpy.
# numpy should maybe raise an error if casting to intp
# is not safe. It rejects np.array([1., 2.]) but not
# [1., 2.] as index (same for ie. np.take).
# (Note the importance of empty lists if changing this here)
try:
indx = np.array(indx, dtype=np.intp)
except ValueError:
raise IndexError
in_indices[i] = indx
elif indx.dtype.kind != 'b' and indx.dtype.kind != 'i':
raise IndexError('arrays used as indices must be of '
'integer (or boolean) type')
if indx.ndim != 0:
no_copy = False
ndim += 1
fancy_dim += 1
if arr.ndim - ndim < 0:
# we can't take more dimensions then we have, not even for 0-d
# arrays. since a[()] makes sense, but not a[(),]. We will
# raise an error later on, unless a broadcasting error occurs
# first.
raise IndexError
if ndim == 0 and None not in in_indices:
# Well we have no indexes or one Ellipsis. This is legal.
return arr.copy(), no_copy
if ellipsis_pos is not None:
in_indices[ellipsis_pos:ellipsis_pos+1] = ([slice(None, None)] *
(arr.ndim - ndim))
for ax, indx in enumerate(in_indices):
if isinstance(indx, slice):
# convert to an index array
indx = np.arange(*indx.indices(arr.shape[ax]))
indices.append(['s', indx])
continue
elif indx is None:
# this is like taking a slice with one element from a new axis:
indices.append(['n', np.array([0], dtype=np.intp)])
arr = arr.reshape((arr.shape[:ax] + (1,) + arr.shape[ax:]))
continue
if isinstance(indx, np.ndarray) and indx.dtype == bool:
if indx.shape != arr.shape[ax:ax+indx.ndim]:
raise IndexError
try:
flat_indx = np.ravel_multi_index(np.nonzero(indx),
arr.shape[ax:ax+indx.ndim], mode='raise')
except Exception:
error_unless_broadcast_to_empty = True
# fill with 0s instead, and raise error later
flat_indx = np.array([0]*indx.sum(), dtype=np.intp)
# concatenate axis into a single one:
if indx.ndim != 0:
arr = arr.reshape((arr.shape[:ax]
+ (np.prod(arr.shape[ax:ax+indx.ndim]),)
+ arr.shape[ax+indx.ndim:]))
indx = flat_indx
else:
# This could be changed, a 0-d boolean index can
# make sense (even outside the 0-d indexed array case)
# Note that originally this is could be interpreted as
# integer in the full integer special case.
raise IndexError
else:
# If the index is a singleton, the bounds check is done
# before the broadcasting. This used to be different in <1.9
if indx.ndim == 0:
if indx >= arr.shape[ax] or indx < -arr.shape[ax]:
raise IndexError
if indx.ndim == 0:
# The index is a scalar. This used to be two fold, but if
# fancy indexing was active, the check was done later,
# possibly after broadcasting it away (1.7. or earlier).
# Now it is always done.
if indx >= arr.shape[ax] or indx < - arr.shape[ax]:
raise IndexError
if (len(indices) > 0 and
indices[-1][0] == 'f' and
ax != ellipsis_pos):
# NOTE: There could still have been a 0-sized Ellipsis
# between them. Checked that with ellipsis_pos.
indices[-1].append(indx)
else:
# We have a fancy index that is not after an existing one.
# NOTE: A 0-d array triggers this as well, while one may
# expect it to not trigger it, since a scalar would not be
# considered fancy indexing.
num_fancy += 1
indices.append(['f', indx])
if num_fancy > 1 and not no_copy:
# We have to flush the fancy indexes left
new_indices = indices[:]
axes = list(range(arr.ndim))
fancy_axes = []
new_indices.insert(0, ['f'])
ni = 0
ai = 0
for indx in indices:
ni += 1
if indx[0] == 'f':
new_indices[0].extend(indx[1:])
del new_indices[ni]
ni -= 1
for ax in range(ai, ai + len(indx[1:])):
fancy_axes.append(ax)
axes.remove(ax)
ai += len(indx) - 1 # axis we are at
indices = new_indices
# and now we need to transpose arr:
arr = arr.transpose(*(fancy_axes + axes))
# We only have one 'f' index now and arr is transposed accordingly.
# Now handle newaxis by reshaping...
ax = 0
for indx in indices:
if indx[0] == 'f':
if len(indx) == 1:
continue
# First of all, reshape arr to combine fancy axes into one:
orig_shape = arr.shape
orig_slice = orig_shape[ax:ax + len(indx[1:])]
arr = arr.reshape((arr.shape[:ax]
+ (np.prod(orig_slice).astype(int),)
+ arr.shape[ax + len(indx[1:]):]))
# Check if broadcasting works
res = np.broadcast(*indx[1:])
# unfortunately the indices might be out of bounds. So check
# that first, and use mode='wrap' then. However only if
# there are any indices...
if res.size != 0:
if error_unless_broadcast_to_empty:
raise IndexError
for _indx, _size in zip(indx[1:], orig_slice):
if _indx.size == 0:
continue
if np.any(_indx >= _size) or np.any(_indx < -_size):
raise IndexError
if len(indx[1:]) == len(orig_slice):
if np.product(orig_slice) == 0:
# Work around for a crash or IndexError with 'wrap'
# in some 0-sized cases.
try:
mi = np.ravel_multi_index(indx[1:], orig_slice,
mode='raise')
except Exception:
# This happens with 0-sized orig_slice (sometimes?)
# here it is a ValueError, but indexing gives a:
raise IndexError('invalid index into 0-sized')
else:
mi = np.ravel_multi_index(indx[1:], orig_slice,
mode='wrap')
else:
# Maybe never happens...
raise ValueError
arr = arr.take(mi.ravel(), axis=ax)
try:
arr = arr.reshape((arr.shape[:ax]
+ mi.shape
+ arr.shape[ax+1:]))
except ValueError:
# too many dimensions, probably
raise IndexError
ax += mi.ndim
continue
# If we are here, we have a 1D array for take:
arr = arr.take(indx[1], axis=ax)
ax += 1
return arr, no_copy
def _check_multi_index(self, arr, index):
"""Check a multi index item getting and simple setting.
Parameters
----------
arr : ndarray
Array to be indexed, must be a reshaped arange.
index : tuple of indexing objects
Index being tested.
"""
# Test item getting
try:
mimic_get, no_copy = self._get_multi_index(arr, index)
except Exception as e:
if HAS_REFCOUNT:
prev_refcount = sys.getrefcount(arr)
assert_raises(type(e), arr.__getitem__, index)
assert_raises(type(e), arr.__setitem__, index, 0)
if HAS_REFCOUNT:
assert_equal(prev_refcount, sys.getrefcount(arr))
return
self._compare_index_result(arr, index, mimic_get, no_copy)
def _check_single_index(self, arr, index):
"""Check a single index item getting and simple setting.
Parameters
----------
arr : ndarray
Array to be indexed, must be an arange.
index : indexing object
Index being tested. Must be a single index and not a tuple
of indexing objects (see also `_check_multi_index`).
"""
try:
mimic_get, no_copy = self._get_multi_index(arr, (index,))
except Exception as e:
if HAS_REFCOUNT:
prev_refcount = sys.getrefcount(arr)
assert_raises(type(e), arr.__getitem__, index)
assert_raises(type(e), arr.__setitem__, index, 0)
if HAS_REFCOUNT:
assert_equal(prev_refcount, sys.getrefcount(arr))
return
self._compare_index_result(arr, index, mimic_get, no_copy)
def _compare_index_result(self, arr, index, mimic_get, no_copy):
"""Compare mimicked result to indexing result.
"""
arr = arr.copy()
indexed_arr = arr[index]
assert_array_equal(indexed_arr, mimic_get)
# Check if we got a view, unless its a 0-sized or 0-d array.
# (then its not a view, and that does not matter)
if indexed_arr.size != 0 and indexed_arr.ndim != 0:
assert_(np.may_share_memory(indexed_arr, arr) == no_copy)
# Check reference count of the original array
if HAS_REFCOUNT:
if no_copy:
# refcount increases by one:
assert_equal(sys.getrefcount(arr), 3)
else:
assert_equal(sys.getrefcount(arr), 2)
# Test non-broadcast setitem:
b = arr.copy()
b[index] = mimic_get + 1000
if b.size == 0:
return # nothing to compare here...
if no_copy and indexed_arr.ndim != 0:
# change indexed_arr in-place to manipulate original:
indexed_arr += 1000
assert_array_equal(arr, b)
return
# Use the fact that the array is originally an arange:
arr.flat[indexed_arr.ravel()] += 1000
assert_array_equal(arr, b)
def test_boolean(self):
a = np.array(5)
assert_equal(a[np.array(True)], 5)
a[np.array(True)] = 1
assert_equal(a, 1)
# NOTE: This is different from normal broadcasting, as
# arr[boolean_array] works like in a multi index. Which means
# it is aligned to the left. This is probably correct for
# consistency with arr[boolean_array,] also no broadcasting
# is done at all
self._check_multi_index(
self.a, (np.zeros_like(self.a, dtype=bool),))
self._check_multi_index(
self.a, (np.zeros_like(self.a, dtype=bool)[..., 0],))
self._check_multi_index(
self.a, (np.zeros_like(self.a, dtype=bool)[None, ...],))
def test_multidim(self):
# Automatically test combinations with complex indexes on 2nd (or 1st)
# spot and the simple ones in one other spot.
with warnings.catch_warnings():
# This is so that np.array(True) is not accepted in a full integer
# index, when running the file separately.
warnings.filterwarnings('error', '', DeprecationWarning)
warnings.filterwarnings('error', '', np.VisibleDeprecationWarning)
def isskip(idx):
return isinstance(idx, str) and idx == "skip"
for simple_pos in [0, 2, 3]:
tocheck = [self.fill_indices, self.complex_indices,
self.fill_indices, self.fill_indices]
tocheck[simple_pos] = self.simple_indices
for index in product(*tocheck):
index = tuple(i for i in index if not isskip(i))
self._check_multi_index(self.a, index)
self._check_multi_index(self.b, index)
# Check very simple item getting:
self._check_multi_index(self.a, (0, 0, 0, 0))
self._check_multi_index(self.b, (0, 0, 0, 0))
# Also check (simple cases of) too many indices:
assert_raises(IndexError, self.a.__getitem__, (0, 0, 0, 0, 0))
assert_raises(IndexError, self.a.__setitem__, (0, 0, 0, 0, 0), 0)
assert_raises(IndexError, self.a.__getitem__, (0, 0, [1], 0, 0))
assert_raises(IndexError, self.a.__setitem__, (0, 0, [1], 0, 0), 0)
def test_1d(self):
a = np.arange(10)
for index in self.complex_indices:
self._check_single_index(a, index)
class TestFloatNonIntegerArgument:
"""
These test that ``TypeError`` is raised when you try to use
non-integers as arguments to for indexing and slicing e.g. ``a[0.0:5]``
and ``a[0.5]``, or other functions like ``array.reshape(1., -1)``.
"""
def test_valid_indexing(self):
# These should raise no errors.
a = np.array([[[5]]])
a[np.array([0])]
a[[0, 0]]
a[:, [0, 0]]
a[:, 0,:]
a[:,:,:]
def test_valid_slicing(self):
# These should raise no errors.
a = np.array([[[5]]])
a[::]
a[0:]
a[:2]
a[0:2]
a[::2]
a[1::2]
a[:2:2]
a[1:2:2]
def test_non_integer_argument_errors(self):
a = np.array([[5]])
assert_raises(TypeError, np.reshape, a, (1., 1., -1))
assert_raises(TypeError, np.reshape, a, (np.array(1.), -1))
assert_raises(TypeError, np.take, a, [0], 1.)
assert_raises(TypeError, np.take, a, [0], np.float64(1.))
def test_non_integer_sequence_multiplication(self):
# NumPy scalar sequence multiply should not work with non-integers
def mult(a, b):
return a * b
assert_raises(TypeError, mult, [1], np.float_(3))
# following should be OK
mult([1], np.int_(3))
def test_reduce_axis_float_index(self):
d = np.zeros((3,3,3))
assert_raises(TypeError, np.min, d, 0.5)
assert_raises(TypeError, np.min, d, (0.5, 1))
assert_raises(TypeError, np.min, d, (1, 2.2))
assert_raises(TypeError, np.min, d, (.2, 1.2))
class TestBooleanIndexing:
# Using a boolean as integer argument/indexing is an error.
def test_bool_as_int_argument_errors(self):
a = np.array([[[1]]])
assert_raises(TypeError, np.reshape, a, (True, -1))
assert_raises(TypeError, np.reshape, a, (np.bool_(True), -1))
# Note that operator.index(np.array(True)) does not work, a boolean
# array is thus also deprecated, but not with the same message:
assert_raises(TypeError, operator.index, np.array(True))
assert_warns(DeprecationWarning, operator.index, np.True_)
assert_raises(TypeError, np.take, args=(a, [0], False))
def test_boolean_indexing_weirdness(self):
# Weird boolean indexing things
a = np.ones((2, 3, 4))
a[False, True, ...].shape == (0, 2, 3, 4)
a[True, [0, 1], True, True, [1], [[2]]] == (1, 2)
assert_raises(IndexError, lambda: a[False, [0, 1], ...])
def test_boolean_indexing_fast_path(self):
# These used to either give the wrong error, or incorrectly give no
# error.
a = np.ones((3, 3))
# This used to incorrectly work (and give an array of shape (0,))
idx1 = np.array([[False]*9])
assert_raises_regex(IndexError,
"boolean index did not match indexed array along dimension 0; "
"dimension is 3 but corresponding boolean dimension is 1",
lambda: a[idx1])
# This used to incorrectly give a ValueError: operands could not be broadcast together
idx2 = np.array([[False]*8 + [True]])
assert_raises_regex(IndexError,
"boolean index did not match indexed array along dimension 0; "
"dimension is 3 but corresponding boolean dimension is 1",
lambda: a[idx2])
# This is the same as it used to be. The above two should work like this.
idx3 = np.array([[False]*10])
assert_raises_regex(IndexError,
"boolean index did not match indexed array along dimension 0; "
"dimension is 3 but corresponding boolean dimension is 1",
lambda: a[idx3])
# This used to give ValueError: non-broadcastable operand
a = np.ones((1, 1, 2))
idx = np.array([[[True], [False]]])
assert_raises_regex(IndexError,
"boolean index did not match indexed array along dimension 1; "
"dimension is 1 but corresponding boolean dimension is 2",
lambda: a[idx])
class TestArrayToIndexDeprecation:
"""Creating an index from array not 0-D is an error.
"""
def test_array_to_index_error(self):
# so no exception is expected. The raising is effectively tested above.
a = np.array([[[1]]])
assert_raises(TypeError, operator.index, np.array([1]))
assert_raises(TypeError, np.reshape, a, (a, -1))
assert_raises(TypeError, np.take, a, [0], a)
class TestNonIntegerArrayLike:
"""Tests that array_likes only valid if can safely cast to integer.
For instance, lists give IndexError when they cannot be safely cast to
an integer.
"""
def test_basic(self):
a = np.arange(10)
assert_raises(IndexError, a.__getitem__, [0.5, 1.5])
assert_raises(IndexError, a.__getitem__, (['1', '2'],))
# The following is valid
a.__getitem__([])
class TestMultipleEllipsisError:
"""An index can only have a single ellipsis.
"""
def test_basic(self):
a = np.arange(10)
assert_raises(IndexError, lambda: a[..., ...])
assert_raises(IndexError, a.__getitem__, ((Ellipsis,) * 2,))
assert_raises(IndexError, a.__getitem__, ((Ellipsis,) * 3,))
class TestCApiAccess:
def test_getitem(self):
subscript = functools.partial(array_indexing, 0)
# 0-d arrays don't work:
assert_raises(IndexError, subscript, np.ones(()), 0)
# Out of bound values:
assert_raises(IndexError, subscript, np.ones(10), 11)
assert_raises(IndexError, subscript, np.ones(10), -11)
assert_raises(IndexError, subscript, np.ones((10, 10)), 11)
assert_raises(IndexError, subscript, np.ones((10, 10)), -11)
a = np.arange(10)
assert_array_equal(a[4], subscript(a, 4))
a = a.reshape(5, 2)
assert_array_equal(a[-4], subscript(a, -4))
def test_setitem(self):
assign = functools.partial(array_indexing, 1)
# Deletion is impossible:
assert_raises(ValueError, assign, np.ones(10), 0)
# 0-d arrays don't work:
assert_raises(IndexError, assign, np.ones(()), 0, 0)
# Out of bound values:
assert_raises(IndexError, assign, np.ones(10), 11, 0)
assert_raises(IndexError, assign, np.ones(10), -11, 0)
assert_raises(IndexError, assign, np.ones((10, 10)), 11, 0)
assert_raises(IndexError, assign, np.ones((10, 10)), -11, 0)
a = np.arange(10)
assign(a, 4, 10)
assert_(a[4] == 10)
a = a.reshape(5, 2)
assign(a, 4, 10)
assert_array_equal(a[-1], [10, 10])
| 54,234 | Python | 37.247532 | 94 | 0.534831 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/examples/limited_api/setup.py | """
Build an example package using the limited Python C API.
"""
import numpy as np
from setuptools import setup, Extension
import os
macros = [("NPY_NO_DEPRECATED_API", 0), ("Py_LIMITED_API", "0x03060000")]
limited_api = Extension(
"limited_api",
sources=[os.path.join('.', "limited_api.c")],
include_dirs=[np.get_include()],
define_macros=macros,
)
extensions = [limited_api]
setup(
ext_modules=extensions
)
| 435 | Python | 17.956521 | 73 | 0.673563 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/examples/limited_api/limited_api.c | #define Py_LIMITED_API 0x03060000
#include <Python.h>
#include <numpy/arrayobject.h>
#include <numpy/ufuncobject.h>
static PyModuleDef moduledef = {
.m_base = PyModuleDef_HEAD_INIT,
.m_name = "limited_api"
};
PyMODINIT_FUNC PyInit_limited_api(void)
{
import_array();
import_umath();
return PyModule_Create(&moduledef);
}
| 344 | C | 18.166666 | 39 | 0.694767 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/examples/cython/setup.py | """
Provide python-space access to the functions exposed in numpy/__init__.pxd
for testing.
"""
import numpy as np
from distutils.core import setup
from Cython.Build import cythonize
from setuptools.extension import Extension
import os
macros = [("NPY_NO_DEPRECATED_API", 0)]
checks = Extension(
"checks",
sources=[os.path.join('.', "checks.pyx")],
include_dirs=[np.get_include()],
define_macros=macros,
)
extensions = [checks]
setup(
ext_modules=cythonize(extensions)
)
| 496 | Python | 18.115384 | 74 | 0.707661 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/data/generate_umath_validation_data.cpp | #include <algorithm>
#include <fstream>
#include <iostream>
#include <cmath>
#include <random>
#include <cstdio>
#include <ctime>
#include <vector>
struct ufunc {
std::string name;
double (*f32func)(double);
long double (*f64func)(long double);
float f32ulp;
float f64ulp;
};
template <typename T>
T
RandomFloat(T a, T b)
{
T random = ((T)rand()) / (T)RAND_MAX;
T diff = b - a;
T r = random * diff;
return a + r;
}
template <typename T>
void
append_random_array(std::vector<T> &arr, T min, T max, size_t N)
{
for (size_t ii = 0; ii < N; ++ii)
arr.emplace_back(RandomFloat<T>(min, max));
}
template <typename T1, typename T2>
std::vector<T1>
computeTrueVal(const std::vector<T1> &in, T2 (*mathfunc)(T2))
{
std::vector<T1> out;
for (T1 elem : in) {
T2 elem_d = (T2)elem;
T1 out_elem = (T1)mathfunc(elem_d);
out.emplace_back(out_elem);
}
return out;
}
/*
* FP range:
* [-inf, -maxflt, -1., -minflt, -minden, 0., minden, minflt, 1., maxflt, inf]
*/
#define MINDEN std::numeric_limits<T>::denorm_min()
#define MINFLT std::numeric_limits<T>::min()
#define MAXFLT std::numeric_limits<T>::max()
#define INF std::numeric_limits<T>::infinity()
#define qNAN std::numeric_limits<T>::quiet_NaN()
#define sNAN std::numeric_limits<T>::signaling_NaN()
template <typename T>
std::vector<T>
generate_input_vector(std::string func)
{
std::vector<T> input = {MINDEN, -MINDEN, MINFLT, -MINFLT, MAXFLT,
-MAXFLT, INF, -INF, qNAN, sNAN,
-1.0, 1.0, 0.0, -0.0};
// [-1.0, 1.0]
if ((func == "arcsin") || (func == "arccos") || (func == "arctanh")) {
append_random_array<T>(input, -1.0, 1.0, 700);
}
// (0.0, INF]
else if ((func == "log2") || (func == "log10")) {
append_random_array<T>(input, 0.0, 1.0, 200);
append_random_array<T>(input, MINDEN, MINFLT, 200);
append_random_array<T>(input, MINFLT, 1.0, 200);
append_random_array<T>(input, 1.0, MAXFLT, 200);
}
// (-1.0, INF]
else if (func == "log1p") {
append_random_array<T>(input, -1.0, 1.0, 200);
append_random_array<T>(input, -MINFLT, -MINDEN, 100);
append_random_array<T>(input, -1.0, -MINFLT, 100);
append_random_array<T>(input, MINDEN, MINFLT, 100);
append_random_array<T>(input, MINFLT, 1.0, 100);
append_random_array<T>(input, 1.0, MAXFLT, 100);
}
// [1.0, INF]
else if (func == "arccosh") {
append_random_array<T>(input, 1.0, 2.0, 400);
append_random_array<T>(input, 2.0, MAXFLT, 300);
}
// [-INF, INF]
else {
append_random_array<T>(input, -1.0, 1.0, 100);
append_random_array<T>(input, MINDEN, MINFLT, 100);
append_random_array<T>(input, -MINFLT, -MINDEN, 100);
append_random_array<T>(input, MINFLT, 1.0, 100);
append_random_array<T>(input, -1.0, -MINFLT, 100);
append_random_array<T>(input, 1.0, MAXFLT, 100);
append_random_array<T>(input, -MAXFLT, -100.0, 100);
}
std::random_shuffle(input.begin(), input.end());
return input;
}
int
main()
{
srand(42);
std::vector<struct ufunc> umathfunc = {
{"sin", sin, sin, 2.37, 3.3},
{"cos", cos, cos, 2.36, 3.38},
{"tan", tan, tan, 3.91, 3.93},
{"arcsin", asin, asin, 3.12, 2.55},
{"arccos", acos, acos, 2.1, 1.67},
{"arctan", atan, atan, 2.3, 2.52},
{"sinh", sinh, sinh, 1.55, 1.89},
{"cosh", cosh, cosh, 2.48, 1.97},
{"tanh", tanh, tanh, 1.38, 1.19},
{"arcsinh", asinh, asinh, 1.01, 1.48},
{"arccosh", acosh, acosh, 1.16, 1.05},
{"arctanh", atanh, atanh, 1.45, 1.46},
{"cbrt", cbrt, cbrt, 1.94, 1.82},
//{"exp",exp,exp,3.76,1.53},
{"exp2", exp2, exp2, 1.01, 1.04},
{"expm1", expm1, expm1, 2.62, 2.1},
//{"log",log,log,1.84,1.67},
{"log10", log10, log10, 3.5, 1.92},
{"log1p", log1p, log1p, 1.96, 1.93},
{"log2", log2, log2, 2.12, 1.84},
};
for (int ii = 0; ii < umathfunc.size(); ++ii) {
// ignore sin/cos
if ((umathfunc[ii].name != "sin") && (umathfunc[ii].name != "cos")) {
std::string fileName =
"umath-validation-set-" + umathfunc[ii].name + ".csv";
std::ofstream txtOut;
txtOut.open(fileName, std::ofstream::trunc);
txtOut << "dtype,input,output,ulperrortol" << std::endl;
// Single Precision
auto f32in = generate_input_vector<float>(umathfunc[ii].name);
auto f32out = computeTrueVal<float, double>(f32in,
umathfunc[ii].f32func);
for (int jj = 0; jj < f32in.size(); ++jj) {
txtOut << "np.float32" << std::hex << ",0x"
<< *reinterpret_cast<uint32_t *>(&f32in[jj]) << ",0x"
<< *reinterpret_cast<uint32_t *>(&f32out[jj]) << ","
<< ceil(umathfunc[ii].f32ulp) << std::endl;
}
// Double Precision
auto f64in = generate_input_vector<double>(umathfunc[ii].name);
auto f64out = computeTrueVal<double, long double>(
f64in, umathfunc[ii].f64func);
for (int jj = 0; jj < f64in.size(); ++jj) {
txtOut << "np.float64" << std::hex << ",0x"
<< *reinterpret_cast<uint64_t *>(&f64in[jj]) << ",0x"
<< *reinterpret_cast<uint64_t *>(&f64out[jj]) << ","
<< ceil(umathfunc[ii].f64ulp) << std::endl;
}
txtOut.close();
}
}
return 0;
}
| 5,840 | C++ | 33.157895 | 79 | 0.509075 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/compat/__init__.py | """
Compatibility module.
This module contains duplicated code from Python itself or 3rd party
extensions, which may be included for the following reasons:
* compatibility
* we may only need a small subset of the copied library/module
"""
from . import _inspect
from . import _pep440
from . import py3k
from ._inspect import getargspec, formatargspec
from .py3k import *
__all__ = []
__all__.extend(_inspect.__all__)
__all__.extend(py3k.__all__)
| 454 | Python | 21.749999 | 68 | 0.722467 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/compat/_pep440.py | """Utility to compare pep440 compatible version strings.
The LooseVersion and StrictVersion classes that distutils provides don't
work; they don't recognize anything like alpha/beta/rc/dev versions.
"""
# Copyright (c) Donald Stufft and individual contributors.
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import collections
import itertools
import re
__all__ = [
"parse", "Version", "LegacyVersion", "InvalidVersion", "VERSION_PATTERN",
]
# BEGIN packaging/_structures.py
class Infinity:
def __repr__(self):
return "Infinity"
def __hash__(self):
return hash(repr(self))
def __lt__(self, other):
return False
def __le__(self, other):
return False
def __eq__(self, other):
return isinstance(other, self.__class__)
def __ne__(self, other):
return not isinstance(other, self.__class__)
def __gt__(self, other):
return True
def __ge__(self, other):
return True
def __neg__(self):
return NegativeInfinity
Infinity = Infinity()
class NegativeInfinity:
def __repr__(self):
return "-Infinity"
def __hash__(self):
return hash(repr(self))
def __lt__(self, other):
return True
def __le__(self, other):
return True
def __eq__(self, other):
return isinstance(other, self.__class__)
def __ne__(self, other):
return not isinstance(other, self.__class__)
def __gt__(self, other):
return False
def __ge__(self, other):
return False
def __neg__(self):
return Infinity
# BEGIN packaging/version.py
NegativeInfinity = NegativeInfinity()
_Version = collections.namedtuple(
"_Version",
["epoch", "release", "dev", "pre", "post", "local"],
)
def parse(version):
"""
Parse the given version string and return either a :class:`Version` object
or a :class:`LegacyVersion` object depending on if the given version is
a valid PEP 440 version or a legacy version.
"""
try:
return Version(version)
except InvalidVersion:
return LegacyVersion(version)
class InvalidVersion(ValueError):
"""
An invalid version was found, users should refer to PEP 440.
"""
class _BaseVersion:
def __hash__(self):
return hash(self._key)
def __lt__(self, other):
return self._compare(other, lambda s, o: s < o)
def __le__(self, other):
return self._compare(other, lambda s, o: s <= o)
def __eq__(self, other):
return self._compare(other, lambda s, o: s == o)
def __ge__(self, other):
return self._compare(other, lambda s, o: s >= o)
def __gt__(self, other):
return self._compare(other, lambda s, o: s > o)
def __ne__(self, other):
return self._compare(other, lambda s, o: s != o)
def _compare(self, other, method):
if not isinstance(other, _BaseVersion):
return NotImplemented
return method(self._key, other._key)
class LegacyVersion(_BaseVersion):
def __init__(self, version):
self._version = str(version)
self._key = _legacy_cmpkey(self._version)
def __str__(self):
return self._version
def __repr__(self):
return "<LegacyVersion({0})>".format(repr(str(self)))
@property
def public(self):
return self._version
@property
def base_version(self):
return self._version
@property
def local(self):
return None
@property
def is_prerelease(self):
return False
@property
def is_postrelease(self):
return False
_legacy_version_component_re = re.compile(
r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE,
)
_legacy_version_replacement_map = {
"pre": "c", "preview": "c", "-": "final-", "rc": "c", "dev": "@",
}
def _parse_version_parts(s):
for part in _legacy_version_component_re.split(s):
part = _legacy_version_replacement_map.get(part, part)
if not part or part == ".":
continue
if part[:1] in "0123456789":
# pad for numeric comparison
yield part.zfill(8)
else:
yield "*" + part
# ensure that alpha/beta/candidate are before final
yield "*final"
def _legacy_cmpkey(version):
# We hardcode an epoch of -1 here. A PEP 440 version can only have an epoch
# greater than or equal to 0. This will effectively put the LegacyVersion,
# which uses the defacto standard originally implemented by setuptools,
# as before all PEP 440 versions.
epoch = -1
# This scheme is taken from pkg_resources.parse_version setuptools prior to
# its adoption of the packaging library.
parts = []
for part in _parse_version_parts(version.lower()):
if part.startswith("*"):
# remove "-" before a prerelease tag
if part < "*final":
while parts and parts[-1] == "*final-":
parts.pop()
# remove trailing zeros from each series of numeric parts
while parts and parts[-1] == "00000000":
parts.pop()
parts.append(part)
parts = tuple(parts)
return epoch, parts
# Deliberately not anchored to the start and end of the string, to make it
# easier for 3rd party code to reuse
VERSION_PATTERN = r"""
v?
(?:
(?:(?P<epoch>[0-9]+)!)? # epoch
(?P<release>[0-9]+(?:\.[0-9]+)*) # release segment
(?P<pre> # pre-release
[-_\.]?
(?P<pre_l>(a|b|c|rc|alpha|beta|pre|preview))
[-_\.]?
(?P<pre_n>[0-9]+)?
)?
(?P<post> # post release
(?:-(?P<post_n1>[0-9]+))
|
(?:
[-_\.]?
(?P<post_l>post|rev|r)
[-_\.]?
(?P<post_n2>[0-9]+)?
)
)?
(?P<dev> # dev release
[-_\.]?
(?P<dev_l>dev)
[-_\.]?
(?P<dev_n>[0-9]+)?
)?
)
(?:\+(?P<local>[a-z0-9]+(?:[-_\.][a-z0-9]+)*))? # local version
"""
class Version(_BaseVersion):
_regex = re.compile(
r"^\s*" + VERSION_PATTERN + r"\s*$",
re.VERBOSE | re.IGNORECASE,
)
def __init__(self, version):
# Validate the version and parse it into pieces
match = self._regex.search(version)
if not match:
raise InvalidVersion("Invalid version: '{0}'".format(version))
# Store the parsed out pieces of the version
self._version = _Version(
epoch=int(match.group("epoch")) if match.group("epoch") else 0,
release=tuple(int(i) for i in match.group("release").split(".")),
pre=_parse_letter_version(
match.group("pre_l"),
match.group("pre_n"),
),
post=_parse_letter_version(
match.group("post_l"),
match.group("post_n1") or match.group("post_n2"),
),
dev=_parse_letter_version(
match.group("dev_l"),
match.group("dev_n"),
),
local=_parse_local_version(match.group("local")),
)
# Generate a key which will be used for sorting
self._key = _cmpkey(
self._version.epoch,
self._version.release,
self._version.pre,
self._version.post,
self._version.dev,
self._version.local,
)
def __repr__(self):
return "<Version({0})>".format(repr(str(self)))
def __str__(self):
parts = []
# Epoch
if self._version.epoch != 0:
parts.append("{0}!".format(self._version.epoch))
# Release segment
parts.append(".".join(str(x) for x in self._version.release))
# Pre-release
if self._version.pre is not None:
parts.append("".join(str(x) for x in self._version.pre))
# Post-release
if self._version.post is not None:
parts.append(".post{0}".format(self._version.post[1]))
# Development release
if self._version.dev is not None:
parts.append(".dev{0}".format(self._version.dev[1]))
# Local version segment
if self._version.local is not None:
parts.append(
"+{0}".format(".".join(str(x) for x in self._version.local))
)
return "".join(parts)
@property
def public(self):
return str(self).split("+", 1)[0]
@property
def base_version(self):
parts = []
# Epoch
if self._version.epoch != 0:
parts.append("{0}!".format(self._version.epoch))
# Release segment
parts.append(".".join(str(x) for x in self._version.release))
return "".join(parts)
@property
def local(self):
version_string = str(self)
if "+" in version_string:
return version_string.split("+", 1)[1]
@property
def is_prerelease(self):
return bool(self._version.dev or self._version.pre)
@property
def is_postrelease(self):
return bool(self._version.post)
def _parse_letter_version(letter, number):
if letter:
# We assume there is an implicit 0 in a pre-release if there is
# no numeral associated with it.
if number is None:
number = 0
# We normalize any letters to their lower-case form
letter = letter.lower()
# We consider some words to be alternate spellings of other words and
# in those cases we want to normalize the spellings to our preferred
# spelling.
if letter == "alpha":
letter = "a"
elif letter == "beta":
letter = "b"
elif letter in ["c", "pre", "preview"]:
letter = "rc"
elif letter in ["rev", "r"]:
letter = "post"
return letter, int(number)
if not letter and number:
# We assume that if we are given a number but not given a letter,
# then this is using the implicit post release syntax (e.g., 1.0-1)
letter = "post"
return letter, int(number)
_local_version_seperators = re.compile(r"[\._-]")
def _parse_local_version(local):
"""
Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
"""
if local is not None:
return tuple(
part.lower() if not part.isdigit() else int(part)
for part in _local_version_seperators.split(local)
)
def _cmpkey(epoch, release, pre, post, dev, local):
# When we compare a release version, we want to compare it with all of the
# trailing zeros removed. So we'll use a reverse the list, drop all the now
# leading zeros until we come to something non-zero, then take the rest,
# re-reverse it back into the correct order, and make it a tuple and use
# that for our sorting key.
release = tuple(
reversed(list(
itertools.dropwhile(
lambda x: x == 0,
reversed(release),
)
))
)
# We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
# We'll do this by abusing the pre-segment, but we _only_ want to do this
# if there is no pre- or a post-segment. If we have one of those, then
# the normal sorting rules will handle this case correctly.
if pre is None and post is None and dev is not None:
pre = -Infinity
# Versions without a pre-release (except as noted above) should sort after
# those with one.
elif pre is None:
pre = Infinity
# Versions without a post-segment should sort before those with one.
if post is None:
post = -Infinity
# Versions without a development segment should sort after those with one.
if dev is None:
dev = Infinity
if local is None:
# Versions without a local segment should sort before those with one.
local = -Infinity
else:
# Versions with a local segment need that segment parsed to implement
# the sorting rules in PEP440.
# - Alphanumeric segments sort before numeric segments
# - Alphanumeric segments sort lexicographically
# - Numeric segments sort numerically
# - Shorter versions sort before longer versions when the prefixes
# match exactly
local = tuple(
(i, "") if isinstance(i, int) else (-Infinity, i)
for i in local
)
return epoch, release, pre, post, dev, local
| 14,069 | Python | 27.831967 | 79 | 0.574597 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/compat/_inspect.py | """Subset of inspect module from upstream python
We use this instead of upstream because upstream inspect is slow to import, and
significantly contributes to numpy import times. Importing this copy has almost
no overhead.
"""
import types
__all__ = ['getargspec', 'formatargspec']
# ----------------------------------------------------------- type-checking
def ismethod(object):
"""Return true if the object is an instance method.
Instance method objects provide these attributes:
__doc__ documentation string
__name__ name with which this method was defined
im_class class object in which this method belongs
im_func function object containing implementation of method
im_self instance to which this method is bound, or None
"""
return isinstance(object, types.MethodType)
def isfunction(object):
"""Return true if the object is a user-defined function.
Function objects provide these attributes:
__doc__ documentation string
__name__ name with which this function was defined
func_code code object containing compiled function bytecode
func_defaults tuple of any default values for arguments
func_doc (same as __doc__)
func_globals global namespace in which this function was defined
func_name (same as __name__)
"""
return isinstance(object, types.FunctionType)
def iscode(object):
"""Return true if the object is a code object.
Code objects provide these attributes:
co_argcount number of arguments (not including * or ** args)
co_code string of raw compiled bytecode
co_consts tuple of constants used in the bytecode
co_filename name of file in which this code object was created
co_firstlineno number of first line in Python source code
co_flags bitmap: 1=optimized | 2=newlocals | 4=*arg | 8=**arg
co_lnotab encoded mapping of line numbers to bytecode indices
co_name name with which this code object was defined
co_names tuple of names of local variables
co_nlocals number of local variables
co_stacksize virtual machine stack space required
co_varnames tuple of names of arguments and local variables
"""
return isinstance(object, types.CodeType)
# ------------------------------------------------ argument list extraction
# These constants are from Python's compile.h.
CO_OPTIMIZED, CO_NEWLOCALS, CO_VARARGS, CO_VARKEYWORDS = 1, 2, 4, 8
def getargs(co):
"""Get information about the arguments accepted by a code object.
Three things are returned: (args, varargs, varkw), where 'args' is
a list of argument names (possibly containing nested lists), and
'varargs' and 'varkw' are the names of the * and ** arguments or None.
"""
if not iscode(co):
raise TypeError('arg is not a code object')
nargs = co.co_argcount
names = co.co_varnames
args = list(names[:nargs])
# The following acrobatics are for anonymous (tuple) arguments.
# Which we do not need to support, so remove to avoid importing
# the dis module.
for i in range(nargs):
if args[i][:1] in ['', '.']:
raise TypeError("tuple function arguments are not supported")
varargs = None
if co.co_flags & CO_VARARGS:
varargs = co.co_varnames[nargs]
nargs = nargs + 1
varkw = None
if co.co_flags & CO_VARKEYWORDS:
varkw = co.co_varnames[nargs]
return args, varargs, varkw
def getargspec(func):
"""Get the names and default values of a function's arguments.
A tuple of four things is returned: (args, varargs, varkw, defaults).
'args' is a list of the argument names (it may contain nested lists).
'varargs' and 'varkw' are the names of the * and ** arguments or None.
'defaults' is an n-tuple of the default values of the last n arguments.
"""
if ismethod(func):
func = func.__func__
if not isfunction(func):
raise TypeError('arg is not a Python function')
args, varargs, varkw = getargs(func.__code__)
return args, varargs, varkw, func.__defaults__
def getargvalues(frame):
"""Get information about arguments passed into a particular frame.
A tuple of four things is returned: (args, varargs, varkw, locals).
'args' is a list of the argument names (it may contain nested lists).
'varargs' and 'varkw' are the names of the * and ** arguments or None.
'locals' is the locals dictionary of the given frame.
"""
args, varargs, varkw = getargs(frame.f_code)
return args, varargs, varkw, frame.f_locals
def joinseq(seq):
if len(seq) == 1:
return '(' + seq[0] + ',)'
else:
return '(' + ', '.join(seq) + ')'
def strseq(object, convert, join=joinseq):
"""Recursively walk a sequence, stringifying each element.
"""
if type(object) in [list, tuple]:
return join([strseq(_o, convert, join) for _o in object])
else:
return convert(object)
def formatargspec(args, varargs=None, varkw=None, defaults=None,
formatarg=str,
formatvarargs=lambda name: '*' + name,
formatvarkw=lambda name: '**' + name,
formatvalue=lambda value: '=' + repr(value),
join=joinseq):
"""Format an argument spec from the 4 values returned by getargspec.
The first four arguments are (args, varargs, varkw, defaults). The
other four arguments are the corresponding optional formatting functions
that are called to turn names and values into strings. The ninth
argument is an optional function to format the sequence of arguments.
"""
specs = []
if defaults:
firstdefault = len(args) - len(defaults)
for i in range(len(args)):
spec = strseq(args[i], formatarg, join)
if defaults and i >= firstdefault:
spec = spec + formatvalue(defaults[i - firstdefault])
specs.append(spec)
if varargs is not None:
specs.append(formatvarargs(varargs))
if varkw is not None:
specs.append(formatvarkw(varkw))
return '(' + ', '.join(specs) + ')'
def formatargvalues(args, varargs, varkw, locals,
formatarg=str,
formatvarargs=lambda name: '*' + name,
formatvarkw=lambda name: '**' + name,
formatvalue=lambda value: '=' + repr(value),
join=joinseq):
"""Format an argument spec from the 4 values returned by getargvalues.
The first four arguments are (args, varargs, varkw, locals). The
next four arguments are the corresponding optional formatting functions
that are called to turn names and values into strings. The ninth
argument is an optional function to format the sequence of arguments.
"""
def convert(name, locals=locals,
formatarg=formatarg, formatvalue=formatvalue):
return formatarg(name) + formatvalue(locals[name])
specs = [strseq(arg, convert, join) for arg in args]
if varargs:
specs.append(formatvarargs(varargs) + formatvalue(locals[varargs]))
if varkw:
specs.append(formatvarkw(varkw) + formatvalue(locals[varkw]))
return '(' + ', '.join(specs) + ')'
| 7,447 | Python | 37.791666 | 79 | 0.633544 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/compat/py3k.py | """
Python 3.X compatibility tools.
While this file was originally intended for Python 2 -> 3 transition,
it is now used to create a compatibility layer between different
minor versions of Python 3.
While the active version of numpy may not support a given version of python, we
allow downstream libraries to continue to use these shims for forward
compatibility with numpy while they transition their code to newer versions of
Python.
"""
__all__ = ['bytes', 'asbytes', 'isfileobj', 'getexception', 'strchar',
'unicode', 'asunicode', 'asbytes_nested', 'asunicode_nested',
'asstr', 'open_latin1', 'long', 'basestring', 'sixu',
'integer_types', 'is_pathlib_path', 'npy_load_module', 'Path',
'pickle', 'contextlib_nullcontext', 'os_fspath', 'os_PathLike']
import sys
import os
from pathlib import Path
import io
try:
import pickle5 as pickle
except ImportError:
import pickle
long = int
integer_types = (int,)
basestring = str
unicode = str
bytes = bytes
def asunicode(s):
if isinstance(s, bytes):
return s.decode('latin1')
return str(s)
def asbytes(s):
if isinstance(s, bytes):
return s
return str(s).encode('latin1')
def asstr(s):
if isinstance(s, bytes):
return s.decode('latin1')
return str(s)
def isfileobj(f):
return isinstance(f, (io.FileIO, io.BufferedReader, io.BufferedWriter))
def open_latin1(filename, mode='r'):
return open(filename, mode=mode, encoding='iso-8859-1')
def sixu(s):
return s
strchar = 'U'
def getexception():
return sys.exc_info()[1]
def asbytes_nested(x):
if hasattr(x, '__iter__') and not isinstance(x, (bytes, unicode)):
return [asbytes_nested(y) for y in x]
else:
return asbytes(x)
def asunicode_nested(x):
if hasattr(x, '__iter__') and not isinstance(x, (bytes, unicode)):
return [asunicode_nested(y) for y in x]
else:
return asunicode(x)
def is_pathlib_path(obj):
"""
Check whether obj is a `pathlib.Path` object.
Prefer using ``isinstance(obj, os.PathLike)`` instead of this function.
"""
return isinstance(obj, Path)
# from Python 3.7
class contextlib_nullcontext:
"""Context manager that does no additional processing.
Used as a stand-in for a normal context manager, when a particular
block of code is only sometimes used with a normal context manager:
cm = optional_cm if condition else nullcontext()
with cm:
# Perform operation, using optional_cm if condition is True
.. note::
Prefer using `contextlib.nullcontext` instead of this context manager.
"""
def __init__(self, enter_result=None):
self.enter_result = enter_result
def __enter__(self):
return self.enter_result
def __exit__(self, *excinfo):
pass
def npy_load_module(name, fn, info=None):
"""
Load a module. Uses ``load_module`` which will be deprecated in python
3.12. An alternative that uses ``exec_module`` is in
numpy.distutils.misc_util.exec_mod_from_location
.. versionadded:: 1.11.2
Parameters
----------
name : str
Full module name.
fn : str
Path to module file.
info : tuple, optional
Only here for backward compatibility with Python 2.*.
Returns
-------
mod : module
"""
# Explicitly lazy import this to avoid paying the cost
# of importing importlib at startup
from importlib.machinery import SourceFileLoader
return SourceFileLoader(name, fn).load_module()
os_fspath = os.fspath
os_PathLike = os.PathLike
| 3,607 | Python | 25.144927 | 79 | 0.659551 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/compat/tests/test_compat.py | from os.path import join
from numpy.compat import isfileobj
from numpy.testing import assert_
from numpy.testing import tempdir
def test_isfileobj():
with tempdir(prefix="numpy_test_compat_") as folder:
filename = join(folder, 'a.bin')
with open(filename, 'wb') as f:
assert_(isfileobj(f))
with open(filename, 'ab') as f:
assert_(isfileobj(f))
with open(filename, 'rb') as f:
assert_(isfileobj(f))
| 476 | Python | 22.849999 | 56 | 0.621849 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/fft/helper.pyi | from typing import Any, TypeVar, overload
from numpy import generic, integer, floating, complexfloating
from numpy._typing import (
NDArray,
ArrayLike,
_ShapeLike,
_ArrayLike,
_ArrayLikeFloat_co,
_ArrayLikeComplex_co,
)
_SCT = TypeVar("_SCT", bound=generic)
__all__: list[str]
@overload
def fftshift(x: _ArrayLike[_SCT], axes: None | _ShapeLike = ...) -> NDArray[_SCT]: ...
@overload
def fftshift(x: ArrayLike, axes: None | _ShapeLike = ...) -> NDArray[Any]: ...
@overload
def ifftshift(x: _ArrayLike[_SCT], axes: None | _ShapeLike = ...) -> NDArray[_SCT]: ...
@overload
def ifftshift(x: ArrayLike, axes: None | _ShapeLike = ...) -> NDArray[Any]: ...
@overload
def fftfreq(
n: int | integer[Any],
d: _ArrayLikeFloat_co,
) -> NDArray[floating[Any]]: ...
@overload
def fftfreq(
n: int | integer[Any],
d: _ArrayLikeComplex_co,
) -> NDArray[complexfloating[Any, Any]]: ...
@overload
def rfftfreq(
n: int | integer[Any],
d: _ArrayLikeFloat_co,
) -> NDArray[floating[Any]]: ...
@overload
def rfftfreq(
n: int | integer[Any],
d: _ArrayLikeComplex_co,
) -> NDArray[complexfloating[Any, Any]]: ...
| 1,152 | unknown | 23.020833 | 87 | 0.638889 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/fft/__init__.py | """
Discrete Fourier Transform (:mod:`numpy.fft`)
=============================================
.. currentmodule:: numpy.fft
The SciPy module `scipy.fft` is a more comprehensive superset
of ``numpy.fft``, which includes only a basic set of routines.
Standard FFTs
-------------
.. autosummary::
:toctree: generated/
fft Discrete Fourier transform.
ifft Inverse discrete Fourier transform.
fft2 Discrete Fourier transform in two dimensions.
ifft2 Inverse discrete Fourier transform in two dimensions.
fftn Discrete Fourier transform in N-dimensions.
ifftn Inverse discrete Fourier transform in N dimensions.
Real FFTs
---------
.. autosummary::
:toctree: generated/
rfft Real discrete Fourier transform.
irfft Inverse real discrete Fourier transform.
rfft2 Real discrete Fourier transform in two dimensions.
irfft2 Inverse real discrete Fourier transform in two dimensions.
rfftn Real discrete Fourier transform in N dimensions.
irfftn Inverse real discrete Fourier transform in N dimensions.
Hermitian FFTs
--------------
.. autosummary::
:toctree: generated/
hfft Hermitian discrete Fourier transform.
ihfft Inverse Hermitian discrete Fourier transform.
Helper routines
---------------
.. autosummary::
:toctree: generated/
fftfreq Discrete Fourier Transform sample frequencies.
rfftfreq DFT sample frequencies (for usage with rfft, irfft).
fftshift Shift zero-frequency component to center of spectrum.
ifftshift Inverse of fftshift.
Background information
----------------------
Fourier analysis is fundamentally a method for expressing a function as a
sum of periodic components, and for recovering the function from those
components. When both the function and its Fourier transform are
replaced with discretized counterparts, it is called the discrete Fourier
transform (DFT). The DFT has become a mainstay of numerical computing in
part because of a very fast algorithm for computing it, called the Fast
Fourier Transform (FFT), which was known to Gauss (1805) and was brought
to light in its current form by Cooley and Tukey [CT]_. Press et al. [NR]_
provide an accessible introduction to Fourier analysis and its
applications.
Because the discrete Fourier transform separates its input into
components that contribute at discrete frequencies, it has a great number
of applications in digital signal processing, e.g., for filtering, and in
this context the discretized input to the transform is customarily
referred to as a *signal*, which exists in the *time domain*. The output
is called a *spectrum* or *transform* and exists in the *frequency
domain*.
Implementation details
----------------------
There are many ways to define the DFT, varying in the sign of the
exponent, normalization, etc. In this implementation, the DFT is defined
as
.. math::
A_k = \\sum_{m=0}^{n-1} a_m \\exp\\left\\{-2\\pi i{mk \\over n}\\right\\}
\\qquad k = 0,\\ldots,n-1.
The DFT is in general defined for complex inputs and outputs, and a
single-frequency component at linear frequency :math:`f` is
represented by a complex exponential
:math:`a_m = \\exp\\{2\\pi i\\,f m\\Delta t\\}`, where :math:`\\Delta t`
is the sampling interval.
The values in the result follow so-called "standard" order: If ``A =
fft(a, n)``, then ``A[0]`` contains the zero-frequency term (the sum of
the signal), which is always purely real for real inputs. Then ``A[1:n/2]``
contains the positive-frequency terms, and ``A[n/2+1:]`` contains the
negative-frequency terms, in order of decreasingly negative frequency.
For an even number of input points, ``A[n/2]`` represents both positive and
negative Nyquist frequency, and is also purely real for real input. For
an odd number of input points, ``A[(n-1)/2]`` contains the largest positive
frequency, while ``A[(n+1)/2]`` contains the largest negative frequency.
The routine ``np.fft.fftfreq(n)`` returns an array giving the frequencies
of corresponding elements in the output. The routine
``np.fft.fftshift(A)`` shifts transforms and their frequencies to put the
zero-frequency components in the middle, and ``np.fft.ifftshift(A)`` undoes
that shift.
When the input `a` is a time-domain signal and ``A = fft(a)``, ``np.abs(A)``
is its amplitude spectrum and ``np.abs(A)**2`` is its power spectrum.
The phase spectrum is obtained by ``np.angle(A)``.
The inverse DFT is defined as
.. math::
a_m = \\frac{1}{n}\\sum_{k=0}^{n-1}A_k\\exp\\left\\{2\\pi i{mk\\over n}\\right\\}
\\qquad m = 0,\\ldots,n-1.
It differs from the forward transform by the sign of the exponential
argument and the default normalization by :math:`1/n`.
Type Promotion
--------------
`numpy.fft` promotes ``float32`` and ``complex64`` arrays to ``float64`` and
``complex128`` arrays respectively. For an FFT implementation that does not
promote input arrays, see `scipy.fftpack`.
Normalization
-------------
The argument ``norm`` indicates which direction of the pair of direct/inverse
transforms is scaled and with what normalization factor.
The default normalization (``"backward"``) has the direct (forward) transforms
unscaled and the inverse (backward) transforms scaled by :math:`1/n`. It is
possible to obtain unitary transforms by setting the keyword argument ``norm``
to ``"ortho"`` so that both direct and inverse transforms are scaled by
:math:`1/\\sqrt{n}`. Finally, setting the keyword argument ``norm`` to
``"forward"`` has the direct transforms scaled by :math:`1/n` and the inverse
transforms unscaled (i.e. exactly opposite to the default ``"backward"``).
`None` is an alias of the default option ``"backward"`` for backward
compatibility.
Real and Hermitian transforms
-----------------------------
When the input is purely real, its transform is Hermitian, i.e., the
component at frequency :math:`f_k` is the complex conjugate of the
component at frequency :math:`-f_k`, which means that for real
inputs there is no information in the negative frequency components that
is not already available from the positive frequency components.
The family of `rfft` functions is
designed to operate on real inputs, and exploits this symmetry by
computing only the positive frequency components, up to and including the
Nyquist frequency. Thus, ``n`` input points produce ``n/2+1`` complex
output points. The inverses of this family assumes the same symmetry of
its input, and for an output of ``n`` points uses ``n/2+1`` input points.
Correspondingly, when the spectrum is purely real, the signal is
Hermitian. The `hfft` family of functions exploits this symmetry by
using ``n/2+1`` complex points in the input (time) domain for ``n`` real
points in the frequency domain.
In higher dimensions, FFTs are used, e.g., for image analysis and
filtering. The computational efficiency of the FFT means that it can
also be a faster way to compute large convolutions, using the property
that a convolution in the time domain is equivalent to a point-by-point
multiplication in the frequency domain.
Higher dimensions
-----------------
In two dimensions, the DFT is defined as
.. math::
A_{kl} = \\sum_{m=0}^{M-1} \\sum_{n=0}^{N-1}
a_{mn}\\exp\\left\\{-2\\pi i \\left({mk\\over M}+{nl\\over N}\\right)\\right\\}
\\qquad k = 0, \\ldots, M-1;\\quad l = 0, \\ldots, N-1,
which extends in the obvious way to higher dimensions, and the inverses
in higher dimensions also extend in the same way.
References
----------
.. [CT] Cooley, James W., and John W. Tukey, 1965, "An algorithm for the
machine calculation of complex Fourier series," *Math. Comput.*
19: 297-301.
.. [NR] Press, W., Teukolsky, S., Vetterline, W.T., and Flannery, B.P.,
2007, *Numerical Recipes: The Art of Scientific Computing*, ch.
12-13. Cambridge Univ. Press, Cambridge, UK.
Examples
--------
For examples, see the various functions.
"""
from . import _pocketfft, helper
from ._pocketfft import *
from .helper import *
__all__ = _pocketfft.__all__.copy()
__all__ += helper.__all__
from numpy._pytesttester import PytestTester
test = PytestTester(__name__)
del PytestTester
| 8,175 | Python | 37.384976 | 84 | 0.713394 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/fft/setup.py | import sys
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('fft', parent_package, top_path)
config.add_subpackage('tests')
# AIX needs to be told to use large file support - at all times
defs = [('_LARGE_FILES', None)] if sys.platform[:3] == "aix" else []
# Configure pocketfft_internal
config.add_extension('_pocketfft_internal',
sources=['_pocketfft.c'],
define_macros=defs,
)
config.add_data_files('*.pyi')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(configuration=configuration)
| 728 | Python | 30.695651 | 72 | 0.614011 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/fft/_pocketfft.py | """
Discrete Fourier Transforms
Routines in this module:
fft(a, n=None, axis=-1, norm="backward")
ifft(a, n=None, axis=-1, norm="backward")
rfft(a, n=None, axis=-1, norm="backward")
irfft(a, n=None, axis=-1, norm="backward")
hfft(a, n=None, axis=-1, norm="backward")
ihfft(a, n=None, axis=-1, norm="backward")
fftn(a, s=None, axes=None, norm="backward")
ifftn(a, s=None, axes=None, norm="backward")
rfftn(a, s=None, axes=None, norm="backward")
irfftn(a, s=None, axes=None, norm="backward")
fft2(a, s=None, axes=(-2,-1), norm="backward")
ifft2(a, s=None, axes=(-2, -1), norm="backward")
rfft2(a, s=None, axes=(-2,-1), norm="backward")
irfft2(a, s=None, axes=(-2, -1), norm="backward")
i = inverse transform
r = transform of purely real data
h = Hermite transform
n = n-dimensional transform
2 = 2-dimensional transform
(Note: 2D routines are just nD routines with different default
behavior.)
"""
__all__ = ['fft', 'ifft', 'rfft', 'irfft', 'hfft', 'ihfft', 'rfftn',
'irfftn', 'rfft2', 'irfft2', 'fft2', 'ifft2', 'fftn', 'ifftn']
import functools
from numpy.core import asarray, zeros, swapaxes, conjugate, take, sqrt
from . import _pocketfft_internal as pfi
from numpy.core.multiarray import normalize_axis_index
from numpy.core import overrides
array_function_dispatch = functools.partial(
overrides.array_function_dispatch, module='numpy.fft')
# `inv_norm` is a float by which the result of the transform needs to be
# divided. This replaces the original, more intuitive 'fct` parameter to avoid
# divisions by zero (or alternatively additional checks) in the case of
# zero-length axes during its computation.
def _raw_fft(a, n, axis, is_real, is_forward, inv_norm):
axis = normalize_axis_index(axis, a.ndim)
if n is None:
n = a.shape[axis]
fct = 1/inv_norm
if a.shape[axis] != n:
s = list(a.shape)
index = [slice(None)]*len(s)
if s[axis] > n:
index[axis] = slice(0, n)
a = a[tuple(index)]
else:
index[axis] = slice(0, s[axis])
s[axis] = n
z = zeros(s, a.dtype.char)
z[tuple(index)] = a
a = z
if axis == a.ndim-1:
r = pfi.execute(a, is_real, is_forward, fct)
else:
a = swapaxes(a, axis, -1)
r = pfi.execute(a, is_real, is_forward, fct)
r = swapaxes(r, axis, -1)
return r
def _get_forward_norm(n, norm):
if n < 1:
raise ValueError(f"Invalid number of FFT data points ({n}) specified.")
if norm is None or norm == "backward":
return 1
elif norm == "ortho":
return sqrt(n)
elif norm == "forward":
return n
raise ValueError(f'Invalid norm value {norm}; should be "backward",'
'"ortho" or "forward".')
def _get_backward_norm(n, norm):
if n < 1:
raise ValueError(f"Invalid number of FFT data points ({n}) specified.")
if norm is None or norm == "backward":
return n
elif norm == "ortho":
return sqrt(n)
elif norm == "forward":
return 1
raise ValueError(f'Invalid norm value {norm}; should be "backward", '
'"ortho" or "forward".')
_SWAP_DIRECTION_MAP = {"backward": "forward", None: "forward",
"ortho": "ortho", "forward": "backward"}
def _swap_direction(norm):
try:
return _SWAP_DIRECTION_MAP[norm]
except KeyError:
raise ValueError(f'Invalid norm value {norm}; should be "backward", '
'"ortho" or "forward".') from None
def _fft_dispatcher(a, n=None, axis=None, norm=None):
return (a,)
@array_function_dispatch(_fft_dispatcher)
def fft(a, n=None, axis=-1, norm=None):
"""
Compute the one-dimensional discrete Fourier Transform.
This function computes the one-dimensional *n*-point discrete Fourier
Transform (DFT) with the efficient Fast Fourier Transform (FFT)
algorithm [CT].
Parameters
----------
a : array_like
Input array, can be complex.
n : int, optional
Length of the transformed axis of the output.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
axis : int, optional
Axis over which to compute the FFT. If not given, the last axis is
used.
norm : {"backward", "ortho", "forward"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is "backward".
Indicates which direction of the forward/backward pair of transforms
is scaled and with what normalization factor.
.. versionadded:: 1.20.0
The "backward", "forward" values were added.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
Raises
------
IndexError
If `axis` is not a valid axis of `a`.
See Also
--------
numpy.fft : for definition of the DFT and conventions used.
ifft : The inverse of `fft`.
fft2 : The two-dimensional FFT.
fftn : The *n*-dimensional FFT.
rfftn : The *n*-dimensional FFT of real input.
fftfreq : Frequency bins for given FFT parameters.
Notes
-----
FFT (Fast Fourier Transform) refers to a way the discrete Fourier
Transform (DFT) can be calculated efficiently, by using symmetries in the
calculated terms. The symmetry is highest when `n` is a power of 2, and
the transform is therefore most efficient for these sizes.
The DFT is defined, with the conventions used in this implementation, in
the documentation for the `numpy.fft` module.
References
----------
.. [CT] Cooley, James W., and John W. Tukey, 1965, "An algorithm for the
machine calculation of complex Fourier series," *Math. Comput.*
19: 297-301.
Examples
--------
>>> np.fft.fft(np.exp(2j * np.pi * np.arange(8) / 8))
array([-2.33486982e-16+1.14423775e-17j, 8.00000000e+00-1.25557246e-15j,
2.33486982e-16+2.33486982e-16j, 0.00000000e+00+1.22464680e-16j,
-1.14423775e-17+2.33486982e-16j, 0.00000000e+00+5.20784380e-16j,
1.14423775e-17+1.14423775e-17j, 0.00000000e+00+1.22464680e-16j])
In this example, real input has an FFT which is Hermitian, i.e., symmetric
in the real part and anti-symmetric in the imaginary part, as described in
the `numpy.fft` documentation:
>>> import matplotlib.pyplot as plt
>>> t = np.arange(256)
>>> sp = np.fft.fft(np.sin(t))
>>> freq = np.fft.fftfreq(t.shape[-1])
>>> plt.plot(freq, sp.real, freq, sp.imag)
[<matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>]
>>> plt.show()
"""
a = asarray(a)
if n is None:
n = a.shape[axis]
inv_norm = _get_forward_norm(n, norm)
output = _raw_fft(a, n, axis, False, True, inv_norm)
return output
@array_function_dispatch(_fft_dispatcher)
def ifft(a, n=None, axis=-1, norm=None):
"""
Compute the one-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the one-dimensional *n*-point
discrete Fourier transform computed by `fft`. In other words,
``ifft(fft(a)) == a`` to within numerical accuracy.
For a general description of the algorithm and definitions,
see `numpy.fft`.
The input should be ordered in the same way as is returned by `fft`,
i.e.,
* ``a[0]`` should contain the zero frequency term,
* ``a[1:n//2]`` should contain the positive-frequency terms,
* ``a[n//2 + 1:]`` should contain the negative-frequency terms, in
increasing order starting from the most negative frequency.
For an even number of input points, ``A[n//2]`` represents the sum of
the values at the positive and negative Nyquist frequencies, as the two
are aliased together. See `numpy.fft` for details.
Parameters
----------
a : array_like
Input array, can be complex.
n : int, optional
Length of the transformed axis of the output.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
See notes about padding issues.
axis : int, optional
Axis over which to compute the inverse DFT. If not given, the last
axis is used.
norm : {"backward", "ortho", "forward"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is "backward".
Indicates which direction of the forward/backward pair of transforms
is scaled and with what normalization factor.
.. versionadded:: 1.20.0
The "backward", "forward" values were added.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
Raises
------
IndexError
If `axis` is not a valid axis of `a`.
See Also
--------
numpy.fft : An introduction, with definitions and general explanations.
fft : The one-dimensional (forward) FFT, of which `ifft` is the inverse
ifft2 : The two-dimensional inverse FFT.
ifftn : The n-dimensional inverse FFT.
Notes
-----
If the input parameter `n` is larger than the size of the input, the input
is padded by appending zeros at the end. Even though this is the common
approach, it might lead to surprising results. If a different padding is
desired, it must be performed before calling `ifft`.
Examples
--------
>>> np.fft.ifft([0, 4, 0, 0])
array([ 1.+0.j, 0.+1.j, -1.+0.j, 0.-1.j]) # may vary
Create and plot a band-limited signal with random phases:
>>> import matplotlib.pyplot as plt
>>> t = np.arange(400)
>>> n = np.zeros((400,), dtype=complex)
>>> n[40:60] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20,)))
>>> s = np.fft.ifft(n)
>>> plt.plot(t, s.real, label='real')
[<matplotlib.lines.Line2D object at ...>]
>>> plt.plot(t, s.imag, '--', label='imaginary')
[<matplotlib.lines.Line2D object at ...>]
>>> plt.legend()
<matplotlib.legend.Legend object at ...>
>>> plt.show()
"""
a = asarray(a)
if n is None:
n = a.shape[axis]
inv_norm = _get_backward_norm(n, norm)
output = _raw_fft(a, n, axis, False, False, inv_norm)
return output
@array_function_dispatch(_fft_dispatcher)
def rfft(a, n=None, axis=-1, norm=None):
"""
Compute the one-dimensional discrete Fourier Transform for real input.
This function computes the one-dimensional *n*-point discrete Fourier
Transform (DFT) of a real-valued array by means of an efficient algorithm
called the Fast Fourier Transform (FFT).
Parameters
----------
a : array_like
Input array
n : int, optional
Number of points along transformation axis in the input to use.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
axis : int, optional
Axis over which to compute the FFT. If not given, the last axis is
used.
norm : {"backward", "ortho", "forward"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is "backward".
Indicates which direction of the forward/backward pair of transforms
is scaled and with what normalization factor.
.. versionadded:: 1.20.0
The "backward", "forward" values were added.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
If `n` is even, the length of the transformed axis is ``(n/2)+1``.
If `n` is odd, the length is ``(n+1)/2``.
Raises
------
IndexError
If `axis` is not a valid axis of `a`.
See Also
--------
numpy.fft : For definition of the DFT and conventions used.
irfft : The inverse of `rfft`.
fft : The one-dimensional FFT of general (complex) input.
fftn : The *n*-dimensional FFT.
rfftn : The *n*-dimensional FFT of real input.
Notes
-----
When the DFT is computed for purely real input, the output is
Hermitian-symmetric, i.e. the negative frequency terms are just the complex
conjugates of the corresponding positive-frequency terms, and the
negative-frequency terms are therefore redundant. This function does not
compute the negative frequency terms, and the length of the transformed
axis of the output is therefore ``n//2 + 1``.
When ``A = rfft(a)`` and fs is the sampling frequency, ``A[0]`` contains
the zero-frequency term 0*fs, which is real due to Hermitian symmetry.
If `n` is even, ``A[-1]`` contains the term representing both positive
and negative Nyquist frequency (+fs/2 and -fs/2), and must also be purely
real. If `n` is odd, there is no term at fs/2; ``A[-1]`` contains
the largest positive frequency (fs/2*(n-1)/n), and is complex in the
general case.
If the input `a` contains an imaginary part, it is silently discarded.
Examples
--------
>>> np.fft.fft([0, 1, 0, 0])
array([ 1.+0.j, 0.-1.j, -1.+0.j, 0.+1.j]) # may vary
>>> np.fft.rfft([0, 1, 0, 0])
array([ 1.+0.j, 0.-1.j, -1.+0.j]) # may vary
Notice how the final element of the `fft` output is the complex conjugate
of the second element, for real input. For `rfft`, this symmetry is
exploited to compute only the non-negative frequency terms.
"""
a = asarray(a)
if n is None:
n = a.shape[axis]
inv_norm = _get_forward_norm(n, norm)
output = _raw_fft(a, n, axis, True, True, inv_norm)
return output
@array_function_dispatch(_fft_dispatcher)
def irfft(a, n=None, axis=-1, norm=None):
"""
Computes the inverse of `rfft`.
This function computes the inverse of the one-dimensional *n*-point
discrete Fourier Transform of real input computed by `rfft`.
In other words, ``irfft(rfft(a), len(a)) == a`` to within numerical
accuracy. (See Notes below for why ``len(a)`` is necessary here.)
The input is expected to be in the form returned by `rfft`, i.e. the
real zero-frequency term followed by the complex positive frequency terms
in order of increasing frequency. Since the discrete Fourier Transform of
real input is Hermitian-symmetric, the negative frequency terms are taken
to be the complex conjugates of the corresponding positive frequency terms.
Parameters
----------
a : array_like
The input array.
n : int, optional
Length of the transformed axis of the output.
For `n` output points, ``n//2+1`` input points are necessary. If the
input is longer than this, it is cropped. If it is shorter than this,
it is padded with zeros. If `n` is not given, it is taken to be
``2*(m-1)`` where ``m`` is the length of the input along the axis
specified by `axis`.
axis : int, optional
Axis over which to compute the inverse FFT. If not given, the last
axis is used.
norm : {"backward", "ortho", "forward"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is "backward".
Indicates which direction of the forward/backward pair of transforms
is scaled and with what normalization factor.
.. versionadded:: 1.20.0
The "backward", "forward" values were added.
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
The length of the transformed axis is `n`, or, if `n` is not given,
``2*(m-1)`` where ``m`` is the length of the transformed axis of the
input. To get an odd number of output points, `n` must be specified.
Raises
------
IndexError
If `axis` is not a valid axis of `a`.
See Also
--------
numpy.fft : For definition of the DFT and conventions used.
rfft : The one-dimensional FFT of real input, of which `irfft` is inverse.
fft : The one-dimensional FFT.
irfft2 : The inverse of the two-dimensional FFT of real input.
irfftn : The inverse of the *n*-dimensional FFT of real input.
Notes
-----
Returns the real valued `n`-point inverse discrete Fourier transform
of `a`, where `a` contains the non-negative frequency terms of a
Hermitian-symmetric sequence. `n` is the length of the result, not the
input.
If you specify an `n` such that `a` must be zero-padded or truncated, the
extra/removed values will be added/removed at high frequencies. One can
thus resample a series to `m` points via Fourier interpolation by:
``a_resamp = irfft(rfft(a), m)``.
The correct interpretation of the hermitian input depends on the length of
the original data, as given by `n`. This is because each input shape could
correspond to either an odd or even length signal. By default, `irfft`
assumes an even output length which puts the last entry at the Nyquist
frequency; aliasing with its symmetric counterpart. By Hermitian symmetry,
the value is thus treated as purely real. To avoid losing information, the
correct length of the real input **must** be given.
Examples
--------
>>> np.fft.ifft([1, -1j, -1, 1j])
array([0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j]) # may vary
>>> np.fft.irfft([1, -1j, -1])
array([0., 1., 0., 0.])
Notice how the last term in the input to the ordinary `ifft` is the
complex conjugate of the second term, and the output has zero imaginary
part everywhere. When calling `irfft`, the negative frequencies are not
specified, and the output array is purely real.
"""
a = asarray(a)
if n is None:
n = (a.shape[axis] - 1) * 2
inv_norm = _get_backward_norm(n, norm)
output = _raw_fft(a, n, axis, True, False, inv_norm)
return output
@array_function_dispatch(_fft_dispatcher)
def hfft(a, n=None, axis=-1, norm=None):
"""
Compute the FFT of a signal that has Hermitian symmetry, i.e., a real
spectrum.
Parameters
----------
a : array_like
The input array.
n : int, optional
Length of the transformed axis of the output. For `n` output
points, ``n//2 + 1`` input points are necessary. If the input is
longer than this, it is cropped. If it is shorter than this, it is
padded with zeros. If `n` is not given, it is taken to be ``2*(m-1)``
where ``m`` is the length of the input along the axis specified by
`axis`.
axis : int, optional
Axis over which to compute the FFT. If not given, the last
axis is used.
norm : {"backward", "ortho", "forward"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is "backward".
Indicates which direction of the forward/backward pair of transforms
is scaled and with what normalization factor.
.. versionadded:: 1.20.0
The "backward", "forward" values were added.
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
The length of the transformed axis is `n`, or, if `n` is not given,
``2*m - 2`` where ``m`` is the length of the transformed axis of
the input. To get an odd number of output points, `n` must be
specified, for instance as ``2*m - 1`` in the typical case,
Raises
------
IndexError
If `axis` is not a valid axis of `a`.
See also
--------
rfft : Compute the one-dimensional FFT for real input.
ihfft : The inverse of `hfft`.
Notes
-----
`hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the
opposite case: here the signal has Hermitian symmetry in the time
domain and is real in the frequency domain. So here it's `hfft` for
which you must supply the length of the result if it is to be odd.
* even: ``ihfft(hfft(a, 2*len(a) - 2)) == a``, within roundoff error,
* odd: ``ihfft(hfft(a, 2*len(a) - 1)) == a``, within roundoff error.
The correct interpretation of the hermitian input depends on the length of
the original data, as given by `n`. This is because each input shape could
correspond to either an odd or even length signal. By default, `hfft`
assumes an even output length which puts the last entry at the Nyquist
frequency; aliasing with its symmetric counterpart. By Hermitian symmetry,
the value is thus treated as purely real. To avoid losing information, the
shape of the full signal **must** be given.
Examples
--------
>>> signal = np.array([1, 2, 3, 4, 3, 2])
>>> np.fft.fft(signal)
array([15.+0.j, -4.+0.j, 0.+0.j, -1.-0.j, 0.+0.j, -4.+0.j]) # may vary
>>> np.fft.hfft(signal[:4]) # Input first half of signal
array([15., -4., 0., -1., 0., -4.])
>>> np.fft.hfft(signal, 6) # Input entire signal and truncate
array([15., -4., 0., -1., 0., -4.])
>>> signal = np.array([[1, 1.j], [-1.j, 2]])
>>> np.conj(signal.T) - signal # check Hermitian symmetry
array([[ 0.-0.j, -0.+0.j], # may vary
[ 0.+0.j, 0.-0.j]])
>>> freq_spectrum = np.fft.hfft(signal)
>>> freq_spectrum
array([[ 1., 1.],
[ 2., -2.]])
"""
a = asarray(a)
if n is None:
n = (a.shape[axis] - 1) * 2
new_norm = _swap_direction(norm)
output = irfft(conjugate(a), n, axis, norm=new_norm)
return output
@array_function_dispatch(_fft_dispatcher)
def ihfft(a, n=None, axis=-1, norm=None):
"""
Compute the inverse FFT of a signal that has Hermitian symmetry.
Parameters
----------
a : array_like
Input array.
n : int, optional
Length of the inverse FFT, the number of points along
transformation axis in the input to use. If `n` is smaller than
the length of the input, the input is cropped. If it is larger,
the input is padded with zeros. If `n` is not given, the length of
the input along the axis specified by `axis` is used.
axis : int, optional
Axis over which to compute the inverse FFT. If not given, the last
axis is used.
norm : {"backward", "ortho", "forward"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is "backward".
Indicates which direction of the forward/backward pair of transforms
is scaled and with what normalization factor.
.. versionadded:: 1.20.0
The "backward", "forward" values were added.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
The length of the transformed axis is ``n//2 + 1``.
See also
--------
hfft, irfft
Notes
-----
`hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the
opposite case: here the signal has Hermitian symmetry in the time
domain and is real in the frequency domain. So here it's `hfft` for
which you must supply the length of the result if it is to be odd:
* even: ``ihfft(hfft(a, 2*len(a) - 2)) == a``, within roundoff error,
* odd: ``ihfft(hfft(a, 2*len(a) - 1)) == a``, within roundoff error.
Examples
--------
>>> spectrum = np.array([ 15, -4, 0, -1, 0, -4])
>>> np.fft.ifft(spectrum)
array([1.+0.j, 2.+0.j, 3.+0.j, 4.+0.j, 3.+0.j, 2.+0.j]) # may vary
>>> np.fft.ihfft(spectrum)
array([ 1.-0.j, 2.-0.j, 3.-0.j, 4.-0.j]) # may vary
"""
a = asarray(a)
if n is None:
n = a.shape[axis]
new_norm = _swap_direction(norm)
output = conjugate(rfft(a, n, axis, norm=new_norm))
return output
def _cook_nd_args(a, s=None, axes=None, invreal=0):
if s is None:
shapeless = 1
if axes is None:
s = list(a.shape)
else:
s = take(a.shape, axes)
else:
shapeless = 0
s = list(s)
if axes is None:
axes = list(range(-len(s), 0))
if len(s) != len(axes):
raise ValueError("Shape and axes have different lengths.")
if invreal and shapeless:
s[-1] = (a.shape[axes[-1]] - 1) * 2
return s, axes
def _raw_fftnd(a, s=None, axes=None, function=fft, norm=None):
a = asarray(a)
s, axes = _cook_nd_args(a, s, axes)
itl = list(range(len(axes)))
itl.reverse()
for ii in itl:
a = function(a, n=s[ii], axis=axes[ii], norm=norm)
return a
def _fftn_dispatcher(a, s=None, axes=None, norm=None):
return (a,)
@array_function_dispatch(_fftn_dispatcher)
def fftn(a, s=None, axes=None, norm=None):
"""
Compute the N-dimensional discrete Fourier Transform.
This function computes the *N*-dimensional discrete Fourier Transform over
any number of axes in an *M*-dimensional array by means of the Fast Fourier
Transform (FFT).
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
This corresponds to ``n`` for ``fft(x, n)``.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the transform over that axis is
performed multiple times.
norm : {"backward", "ortho", "forward"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is "backward".
Indicates which direction of the forward/backward pair of transforms
is scaled and with what normalization factor.
.. versionadded:: 1.20.0
The "backward", "forward" values were added.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` and `a`,
as explained in the parameters section above.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
ifftn : The inverse of `fftn`, the inverse *n*-dimensional FFT.
fft : The one-dimensional FFT, with definitions and conventions used.
rfftn : The *n*-dimensional FFT of real input.
fft2 : The two-dimensional FFT.
fftshift : Shifts zero-frequency terms to centre of array
Notes
-----
The output, analogously to `fft`, contains the term for zero frequency in
the low-order corner of all axes, the positive frequency terms in the
first half of all axes, the term for the Nyquist frequency in the middle
of all axes and the negative frequency terms in the second half of all
axes, in order of decreasingly negative frequency.
See `numpy.fft` for details, definitions and conventions used.
Examples
--------
>>> a = np.mgrid[:3, :3, :3][0]
>>> np.fft.fftn(a, axes=(1, 2))
array([[[ 0.+0.j, 0.+0.j, 0.+0.j], # may vary
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[ 9.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[18.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]]])
>>> np.fft.fftn(a, (2, 2), axes=(0, 1))
array([[[ 2.+0.j, 2.+0.j, 2.+0.j], # may vary
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[-2.+0.j, -2.+0.j, -2.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]]])
>>> import matplotlib.pyplot as plt
>>> [X, Y] = np.meshgrid(2 * np.pi * np.arange(200) / 12,
... 2 * np.pi * np.arange(200) / 34)
>>> S = np.sin(X) + np.cos(Y) + np.random.uniform(0, 1, X.shape)
>>> FS = np.fft.fftn(S)
>>> plt.imshow(np.log(np.abs(np.fft.fftshift(FS))**2))
<matplotlib.image.AxesImage object at 0x...>
>>> plt.show()
"""
return _raw_fftnd(a, s, axes, fft, norm)
@array_function_dispatch(_fftn_dispatcher)
def ifftn(a, s=None, axes=None, norm=None):
"""
Compute the N-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the N-dimensional discrete
Fourier Transform over any number of axes in an M-dimensional array by
means of the Fast Fourier Transform (FFT). In other words,
``ifftn(fftn(a)) == a`` to within numerical accuracy.
For a description of the definitions and conventions used, see `numpy.fft`.
The input, analogously to `ifft`, should be ordered in the same way as is
returned by `fftn`, i.e. it should have the term for zero frequency
in all axes in the low-order corner, the positive frequency terms in the
first half of all axes, the term for the Nyquist frequency in the middle
of all axes and the negative frequency terms in the second half of all
axes, in order of decreasingly negative frequency.
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
This corresponds to ``n`` for ``ifft(x, n)``.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used. See notes for issue on `ifft` zero padding.
axes : sequence of ints, optional
Axes over which to compute the IFFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the inverse transform over that
axis is performed multiple times.
norm : {"backward", "ortho", "forward"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is "backward".
Indicates which direction of the forward/backward pair of transforms
is scaled and with what normalization factor.
.. versionadded:: 1.20.0
The "backward", "forward" values were added.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` or `a`,
as explained in the parameters section above.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
fftn : The forward *n*-dimensional FFT, of which `ifftn` is the inverse.
ifft : The one-dimensional inverse FFT.
ifft2 : The two-dimensional inverse FFT.
ifftshift : Undoes `fftshift`, shifts zero-frequency terms to beginning
of array.
Notes
-----
See `numpy.fft` for definitions and conventions used.
Zero-padding, analogously with `ifft`, is performed by appending zeros to
the input along the specified dimension. Although this is the common
approach, it might lead to surprising results. If another form of zero
padding is desired, it must be performed before `ifftn` is called.
Examples
--------
>>> a = np.eye(4)
>>> np.fft.ifftn(np.fft.fftn(a, axes=(0,)), axes=(1,))
array([[1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], # may vary
[0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j],
[0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j],
[0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j]])
Create and plot an image with band-limited frequency content:
>>> import matplotlib.pyplot as plt
>>> n = np.zeros((200,200), dtype=complex)
>>> n[60:80, 20:40] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20, 20)))
>>> im = np.fft.ifftn(n).real
>>> plt.imshow(im)
<matplotlib.image.AxesImage object at 0x...>
>>> plt.show()
"""
return _raw_fftnd(a, s, axes, ifft, norm)
@array_function_dispatch(_fftn_dispatcher)
def fft2(a, s=None, axes=(-2, -1), norm=None):
"""
Compute the 2-dimensional discrete Fourier Transform.
This function computes the *n*-dimensional discrete Fourier Transform
over any axes in an *M*-dimensional array by means of the
Fast Fourier Transform (FFT). By default, the transform is computed over
the last two axes of the input array, i.e., a 2-dimensional FFT.
Parameters
----------
a : array_like
Input array, can be complex
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
This corresponds to ``n`` for ``fft(x, n)``.
Along each axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last two
axes are used. A repeated index in `axes` means the transform over
that axis is performed multiple times. A one-element sequence means
that a one-dimensional FFT is performed.
norm : {"backward", "ortho", "forward"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is "backward".
Indicates which direction of the forward/backward pair of transforms
is scaled and with what normalization factor.
.. versionadded:: 1.20.0
The "backward", "forward" values were added.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or the last two axes if `axes` is not given.
Raises
------
ValueError
If `s` and `axes` have different length, or `axes` not given and
``len(s) != 2``.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
ifft2 : The inverse two-dimensional FFT.
fft : The one-dimensional FFT.
fftn : The *n*-dimensional FFT.
fftshift : Shifts zero-frequency terms to the center of the array.
For two-dimensional input, swaps first and third quadrants, and second
and fourth quadrants.
Notes
-----
`fft2` is just `fftn` with a different default for `axes`.
The output, analogously to `fft`, contains the term for zero frequency in
the low-order corner of the transformed axes, the positive frequency terms
in the first half of these axes, the term for the Nyquist frequency in the
middle of the axes and the negative frequency terms in the second half of
the axes, in order of decreasingly negative frequency.
See `fftn` for details and a plotting example, and `numpy.fft` for
definitions and conventions used.
Examples
--------
>>> a = np.mgrid[:5, :5][0]
>>> np.fft.fft2(a)
array([[ 50. +0.j , 0. +0.j , 0. +0.j , # may vary
0. +0.j , 0. +0.j ],
[-12.5+17.20477401j, 0. +0.j , 0. +0.j ,
0. +0.j , 0. +0.j ],
[-12.5 +4.0614962j , 0. +0.j , 0. +0.j ,
0. +0.j , 0. +0.j ],
[-12.5 -4.0614962j , 0. +0.j , 0. +0.j ,
0. +0.j , 0. +0.j ],
[-12.5-17.20477401j, 0. +0.j , 0. +0.j ,
0. +0.j , 0. +0.j ]])
"""
return _raw_fftnd(a, s, axes, fft, norm)
@array_function_dispatch(_fftn_dispatcher)
def ifft2(a, s=None, axes=(-2, -1), norm=None):
"""
Compute the 2-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the 2-dimensional discrete Fourier
Transform over any number of axes in an M-dimensional array by means of
the Fast Fourier Transform (FFT). In other words, ``ifft2(fft2(a)) == a``
to within numerical accuracy. By default, the inverse transform is
computed over the last two axes of the input array.
The input, analogously to `ifft`, should be ordered in the same way as is
returned by `fft2`, i.e. it should have the term for zero frequency
in the low-order corner of the two axes, the positive frequency terms in
the first half of these axes, the term for the Nyquist frequency in the
middle of the axes and the negative frequency terms in the second half of
both axes, in order of decreasingly negative frequency.
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each axis) of the output (``s[0]`` refers to axis 0,
``s[1]`` to axis 1, etc.). This corresponds to `n` for ``ifft(x, n)``.
Along each axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used. See notes for issue on `ifft` zero padding.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last two
axes are used. A repeated index in `axes` means the transform over
that axis is performed multiple times. A one-element sequence means
that a one-dimensional FFT is performed.
norm : {"backward", "ortho", "forward"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is "backward".
Indicates which direction of the forward/backward pair of transforms
is scaled and with what normalization factor.
.. versionadded:: 1.20.0
The "backward", "forward" values were added.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or the last two axes if `axes` is not given.
Raises
------
ValueError
If `s` and `axes` have different length, or `axes` not given and
``len(s) != 2``.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
fft2 : The forward 2-dimensional FFT, of which `ifft2` is the inverse.
ifftn : The inverse of the *n*-dimensional FFT.
fft : The one-dimensional FFT.
ifft : The one-dimensional inverse FFT.
Notes
-----
`ifft2` is just `ifftn` with a different default for `axes`.
See `ifftn` for details and a plotting example, and `numpy.fft` for
definition and conventions used.
Zero-padding, analogously with `ifft`, is performed by appending zeros to
the input along the specified dimension. Although this is the common
approach, it might lead to surprising results. If another form of zero
padding is desired, it must be performed before `ifft2` is called.
Examples
--------
>>> a = 4 * np.eye(4)
>>> np.fft.ifft2(a)
array([[1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], # may vary
[0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j],
[0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j],
[0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j]])
"""
return _raw_fftnd(a, s, axes, ifft, norm)
@array_function_dispatch(_fftn_dispatcher)
def rfftn(a, s=None, axes=None, norm=None):
"""
Compute the N-dimensional discrete Fourier Transform for real input.
This function computes the N-dimensional discrete Fourier Transform over
any number of axes in an M-dimensional real array by means of the Fast
Fourier Transform (FFT). By default, all axes are transformed, with the
real transform performed over the last axis, while the remaining
transforms are complex.
Parameters
----------
a : array_like
Input array, taken to be real.
s : sequence of ints, optional
Shape (length along each transformed axis) to use from the input.
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
The final element of `s` corresponds to `n` for ``rfft(x, n)``, while
for the remaining axes, it corresponds to `n` for ``fft(x, n)``.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
norm : {"backward", "ortho", "forward"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is "backward".
Indicates which direction of the forward/backward pair of transforms
is scaled and with what normalization factor.
.. versionadded:: 1.20.0
The "backward", "forward" values were added.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` and `a`,
as explained in the parameters section above.
The length of the last axis transformed will be ``s[-1]//2+1``,
while the remaining transformed axes will have lengths according to
`s`, or unchanged from the input.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
irfftn : The inverse of `rfftn`, i.e. the inverse of the n-dimensional FFT
of real input.
fft : The one-dimensional FFT, with definitions and conventions used.
rfft : The one-dimensional FFT of real input.
fftn : The n-dimensional FFT.
rfft2 : The two-dimensional FFT of real input.
Notes
-----
The transform for real input is performed over the last transformation
axis, as by `rfft`, then the transform over the remaining axes is
performed as by `fftn`. The order of the output is as for `rfft` for the
final transformation axis, and as for `fftn` for the remaining
transformation axes.
See `fft` for details, definitions and conventions used.
Examples
--------
>>> a = np.ones((2, 2, 2))
>>> np.fft.rfftn(a)
array([[[8.+0.j, 0.+0.j], # may vary
[0.+0.j, 0.+0.j]],
[[0.+0.j, 0.+0.j],
[0.+0.j, 0.+0.j]]])
>>> np.fft.rfftn(a, axes=(2, 0))
array([[[4.+0.j, 0.+0.j], # may vary
[4.+0.j, 0.+0.j]],
[[0.+0.j, 0.+0.j],
[0.+0.j, 0.+0.j]]])
"""
a = asarray(a)
s, axes = _cook_nd_args(a, s, axes)
a = rfft(a, s[-1], axes[-1], norm)
for ii in range(len(axes)-1):
a = fft(a, s[ii], axes[ii], norm)
return a
@array_function_dispatch(_fftn_dispatcher)
def rfft2(a, s=None, axes=(-2, -1), norm=None):
"""
Compute the 2-dimensional FFT of a real array.
Parameters
----------
a : array
Input array, taken to be real.
s : sequence of ints, optional
Shape of the FFT.
axes : sequence of ints, optional
Axes over which to compute the FFT.
norm : {"backward", "ortho", "forward"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is "backward".
Indicates which direction of the forward/backward pair of transforms
is scaled and with what normalization factor.
.. versionadded:: 1.20.0
The "backward", "forward" values were added.
Returns
-------
out : ndarray
The result of the real 2-D FFT.
See Also
--------
rfftn : Compute the N-dimensional discrete Fourier Transform for real
input.
Notes
-----
This is really just `rfftn` with different default behavior.
For more details see `rfftn`.
Examples
--------
>>> a = np.mgrid[:5, :5][0]
>>> np.fft.rfft2(a)
array([[ 50. +0.j , 0. +0.j , 0. +0.j ],
[-12.5+17.20477401j, 0. +0.j , 0. +0.j ],
[-12.5 +4.0614962j , 0. +0.j , 0. +0.j ],
[-12.5 -4.0614962j , 0. +0.j , 0. +0.j ],
[-12.5-17.20477401j, 0. +0.j , 0. +0.j ]])
"""
return rfftn(a, s, axes, norm)
@array_function_dispatch(_fftn_dispatcher)
def irfftn(a, s=None, axes=None, norm=None):
"""
Computes the inverse of `rfftn`.
This function computes the inverse of the N-dimensional discrete
Fourier Transform for real input over any number of axes in an
M-dimensional array by means of the Fast Fourier Transform (FFT). In
other words, ``irfftn(rfftn(a), a.shape) == a`` to within numerical
accuracy. (The ``a.shape`` is necessary like ``len(a)`` is for `irfft`,
and for the same reason.)
The input should be ordered in the same way as is returned by `rfftn`,
i.e. as for `irfft` for the final transformation axis, and as for `ifftn`
along all the other axes.
Parameters
----------
a : array_like
Input array.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). `s` is also the
number of input points used along this axis, except for the last axis,
where ``s[-1]//2+1`` points of the input are used.
Along any axis, if the shape indicated by `s` is smaller than that of
the input, the input is cropped. If it is larger, the input is padded
with zeros. If `s` is not given, the shape of the input along the axes
specified by axes is used. Except for the last axis which is taken to
be ``2*(m-1)`` where ``m`` is the length of the input along that axis.
axes : sequence of ints, optional
Axes over which to compute the inverse FFT. If not given, the last
`len(s)` axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the inverse transform over that
axis is performed multiple times.
norm : {"backward", "ortho", "forward"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is "backward".
Indicates which direction of the forward/backward pair of transforms
is scaled and with what normalization factor.
.. versionadded:: 1.20.0
The "backward", "forward" values were added.
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` or `a`,
as explained in the parameters section above.
The length of each transformed axis is as given by the corresponding
element of `s`, or the length of the input in every axis except for the
last one if `s` is not given. In the final transformed axis the length
of the output when `s` is not given is ``2*(m-1)`` where ``m`` is the
length of the final transformed axis of the input. To get an odd
number of output points in the final axis, `s` must be specified.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
rfftn : The forward n-dimensional FFT of real input,
of which `ifftn` is the inverse.
fft : The one-dimensional FFT, with definitions and conventions used.
irfft : The inverse of the one-dimensional FFT of real input.
irfft2 : The inverse of the two-dimensional FFT of real input.
Notes
-----
See `fft` for definitions and conventions used.
See `rfft` for definitions and conventions used for real input.
The correct interpretation of the hermitian input depends on the shape of
the original data, as given by `s`. This is because each input shape could
correspond to either an odd or even length signal. By default, `irfftn`
assumes an even output length which puts the last entry at the Nyquist
frequency; aliasing with its symmetric counterpart. When performing the
final complex to real transform, the last value is thus treated as purely
real. To avoid losing information, the correct shape of the real input
**must** be given.
Examples
--------
>>> a = np.zeros((3, 2, 2))
>>> a[0, 0, 0] = 3 * 2 * 2
>>> np.fft.irfftn(a)
array([[[1., 1.],
[1., 1.]],
[[1., 1.],
[1., 1.]],
[[1., 1.],
[1., 1.]]])
"""
a = asarray(a)
s, axes = _cook_nd_args(a, s, axes, invreal=1)
for ii in range(len(axes)-1):
a = ifft(a, s[ii], axes[ii], norm)
a = irfft(a, s[-1], axes[-1], norm)
return a
@array_function_dispatch(_fftn_dispatcher)
def irfft2(a, s=None, axes=(-2, -1), norm=None):
"""
Computes the inverse of `rfft2`.
Parameters
----------
a : array_like
The input array
s : sequence of ints, optional
Shape of the real output to the inverse FFT.
axes : sequence of ints, optional
The axes over which to compute the inverse fft.
Default is the last two axes.
norm : {"backward", "ortho", "forward"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is "backward".
Indicates which direction of the forward/backward pair of transforms
is scaled and with what normalization factor.
.. versionadded:: 1.20.0
The "backward", "forward" values were added.
Returns
-------
out : ndarray
The result of the inverse real 2-D FFT.
See Also
--------
rfft2 : The forward two-dimensional FFT of real input,
of which `irfft2` is the inverse.
rfft : The one-dimensional FFT for real input.
irfft : The inverse of the one-dimensional FFT of real input.
irfftn : Compute the inverse of the N-dimensional FFT of real input.
Notes
-----
This is really `irfftn` with different defaults.
For more details see `irfftn`.
Examples
--------
>>> a = np.mgrid[:5, :5][0]
>>> A = np.fft.rfft2(a)
>>> np.fft.irfft2(A, s=a.shape)
array([[0., 0., 0., 0., 0.],
[1., 1., 1., 1., 1.],
[2., 2., 2., 2., 2.],
[3., 3., 3., 3., 3.],
[4., 4., 4., 4., 4.]])
"""
return irfftn(a, s, axes, norm)
| 52,897 | Python | 36.121403 | 90 | 0.61132 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/fft/_pocketfft.pyi | from collections.abc import Sequence
from typing import Literal as L
from numpy import complex128, float64
from numpy._typing import ArrayLike, NDArray, _ArrayLikeNumber_co
_NormKind = L[None, "backward", "ortho", "forward"]
__all__: list[str]
def fft(
a: ArrayLike,
n: None | int = ...,
axis: int = ...,
norm: _NormKind = ...,
) -> NDArray[complex128]: ...
def ifft(
a: ArrayLike,
n: None | int = ...,
axis: int = ...,
norm: _NormKind = ...,
) -> NDArray[complex128]: ...
def rfft(
a: ArrayLike,
n: None | int = ...,
axis: int = ...,
norm: _NormKind = ...,
) -> NDArray[complex128]: ...
def irfft(
a: ArrayLike,
n: None | int = ...,
axis: int = ...,
norm: _NormKind = ...,
) -> NDArray[float64]: ...
# Input array must be compatible with `np.conjugate`
def hfft(
a: _ArrayLikeNumber_co,
n: None | int = ...,
axis: int = ...,
norm: _NormKind = ...,
) -> NDArray[float64]: ...
def ihfft(
a: ArrayLike,
n: None | int = ...,
axis: int = ...,
norm: _NormKind = ...,
) -> NDArray[complex128]: ...
def fftn(
a: ArrayLike,
s: None | Sequence[int] = ...,
axes: None | Sequence[int] = ...,
norm: _NormKind = ...,
) -> NDArray[complex128]: ...
def ifftn(
a: ArrayLike,
s: None | Sequence[int] = ...,
axes: None | Sequence[int] = ...,
norm: _NormKind = ...,
) -> NDArray[complex128]: ...
def rfftn(
a: ArrayLike,
s: None | Sequence[int] = ...,
axes: None | Sequence[int] = ...,
norm: _NormKind = ...,
) -> NDArray[complex128]: ...
def irfftn(
a: ArrayLike,
s: None | Sequence[int] = ...,
axes: None | Sequence[int] = ...,
norm: _NormKind = ...,
) -> NDArray[float64]: ...
def fft2(
a: ArrayLike,
s: None | Sequence[int] = ...,
axes: None | Sequence[int] = ...,
norm: _NormKind = ...,
) -> NDArray[complex128]: ...
def ifft2(
a: ArrayLike,
s: None | Sequence[int] = ...,
axes: None | Sequence[int] = ...,
norm: _NormKind = ...,
) -> NDArray[complex128]: ...
def rfft2(
a: ArrayLike,
s: None | Sequence[int] = ...,
axes: None | Sequence[int] = ...,
norm: _NormKind = ...,
) -> NDArray[complex128]: ...
def irfft2(
a: ArrayLike,
s: None | Sequence[int] = ...,
axes: None | Sequence[int] = ...,
norm: _NormKind = ...,
) -> NDArray[float64]: ...
| 2,371 | unknown | 20.761468 | 65 | 0.525938 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/fft/__init__.pyi | from numpy._pytesttester import PytestTester
from numpy.fft._pocketfft import (
fft as fft,
ifft as ifft,
rfft as rfft,
irfft as irfft,
hfft as hfft,
ihfft as ihfft,
rfftn as rfftn,
irfftn as irfftn,
rfft2 as rfft2,
irfft2 as irfft2,
fft2 as fft2,
ifft2 as ifft2,
fftn as fftn,
ifftn as ifftn,
)
from numpy.fft.helper import (
fftshift as fftshift,
ifftshift as ifftshift,
fftfreq as fftfreq,
rfftfreq as rfftfreq,
)
__all__: list[str]
__path__: list[str]
test: PytestTester
| 550 | unknown | 17.366666 | 44 | 0.649091 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/fft/helper.py | """
Discrete Fourier Transforms - helper.py
"""
from numpy.core import integer, empty, arange, asarray, roll
from numpy.core.overrides import array_function_dispatch, set_module
# Created by Pearu Peterson, September 2002
__all__ = ['fftshift', 'ifftshift', 'fftfreq', 'rfftfreq']
integer_types = (int, integer)
def _fftshift_dispatcher(x, axes=None):
return (x,)
@array_function_dispatch(_fftshift_dispatcher, module='numpy.fft')
def fftshift(x, axes=None):
"""
Shift the zero-frequency component to the center of the spectrum.
This function swaps half-spaces for all axes listed (defaults to all).
Note that ``y[0]`` is the Nyquist component only if ``len(x)`` is even.
Parameters
----------
x : array_like
Input array.
axes : int or shape tuple, optional
Axes over which to shift. Default is None, which shifts all axes.
Returns
-------
y : ndarray
The shifted array.
See Also
--------
ifftshift : The inverse of `fftshift`.
Examples
--------
>>> freqs = np.fft.fftfreq(10, 0.1)
>>> freqs
array([ 0., 1., 2., ..., -3., -2., -1.])
>>> np.fft.fftshift(freqs)
array([-5., -4., -3., -2., -1., 0., 1., 2., 3., 4.])
Shift the zero-frequency component only along the second axis:
>>> freqs = np.fft.fftfreq(9, d=1./9).reshape(3, 3)
>>> freqs
array([[ 0., 1., 2.],
[ 3., 4., -4.],
[-3., -2., -1.]])
>>> np.fft.fftshift(freqs, axes=(1,))
array([[ 2., 0., 1.],
[-4., 3., 4.],
[-1., -3., -2.]])
"""
x = asarray(x)
if axes is None:
axes = tuple(range(x.ndim))
shift = [dim // 2 for dim in x.shape]
elif isinstance(axes, integer_types):
shift = x.shape[axes] // 2
else:
shift = [x.shape[ax] // 2 for ax in axes]
return roll(x, shift, axes)
@array_function_dispatch(_fftshift_dispatcher, module='numpy.fft')
def ifftshift(x, axes=None):
"""
The inverse of `fftshift`. Although identical for even-length `x`, the
functions differ by one sample for odd-length `x`.
Parameters
----------
x : array_like
Input array.
axes : int or shape tuple, optional
Axes over which to calculate. Defaults to None, which shifts all axes.
Returns
-------
y : ndarray
The shifted array.
See Also
--------
fftshift : Shift zero-frequency component to the center of the spectrum.
Examples
--------
>>> freqs = np.fft.fftfreq(9, d=1./9).reshape(3, 3)
>>> freqs
array([[ 0., 1., 2.],
[ 3., 4., -4.],
[-3., -2., -1.]])
>>> np.fft.ifftshift(np.fft.fftshift(freqs))
array([[ 0., 1., 2.],
[ 3., 4., -4.],
[-3., -2., -1.]])
"""
x = asarray(x)
if axes is None:
axes = tuple(range(x.ndim))
shift = [-(dim // 2) for dim in x.shape]
elif isinstance(axes, integer_types):
shift = -(x.shape[axes] // 2)
else:
shift = [-(x.shape[ax] // 2) for ax in axes]
return roll(x, shift, axes)
@set_module('numpy.fft')
def fftfreq(n, d=1.0):
"""
Return the Discrete Fourier Transform sample frequencies.
The returned float array `f` contains the frequency bin centers in cycles
per unit of the sample spacing (with zero at the start). For instance, if
the sample spacing is in seconds, then the frequency unit is cycles/second.
Given a window length `n` and a sample spacing `d`::
f = [0, 1, ..., n/2-1, -n/2, ..., -1] / (d*n) if n is even
f = [0, 1, ..., (n-1)/2, -(n-1)/2, ..., -1] / (d*n) if n is odd
Parameters
----------
n : int
Window length.
d : scalar, optional
Sample spacing (inverse of the sampling rate). Defaults to 1.
Returns
-------
f : ndarray
Array of length `n` containing the sample frequencies.
Examples
--------
>>> signal = np.array([-2, 8, 6, 4, 1, 0, 3, 5], dtype=float)
>>> fourier = np.fft.fft(signal)
>>> n = signal.size
>>> timestep = 0.1
>>> freq = np.fft.fftfreq(n, d=timestep)
>>> freq
array([ 0. , 1.25, 2.5 , ..., -3.75, -2.5 , -1.25])
"""
if not isinstance(n, integer_types):
raise ValueError("n should be an integer")
val = 1.0 / (n * d)
results = empty(n, int)
N = (n-1)//2 + 1
p1 = arange(0, N, dtype=int)
results[:N] = p1
p2 = arange(-(n//2), 0, dtype=int)
results[N:] = p2
return results * val
@set_module('numpy.fft')
def rfftfreq(n, d=1.0):
"""
Return the Discrete Fourier Transform sample frequencies
(for usage with rfft, irfft).
The returned float array `f` contains the frequency bin centers in cycles
per unit of the sample spacing (with zero at the start). For instance, if
the sample spacing is in seconds, then the frequency unit is cycles/second.
Given a window length `n` and a sample spacing `d`::
f = [0, 1, ..., n/2-1, n/2] / (d*n) if n is even
f = [0, 1, ..., (n-1)/2-1, (n-1)/2] / (d*n) if n is odd
Unlike `fftfreq` (but like `scipy.fftpack.rfftfreq`)
the Nyquist frequency component is considered to be positive.
Parameters
----------
n : int
Window length.
d : scalar, optional
Sample spacing (inverse of the sampling rate). Defaults to 1.
Returns
-------
f : ndarray
Array of length ``n//2 + 1`` containing the sample frequencies.
Examples
--------
>>> signal = np.array([-2, 8, 6, 4, 1, 0, 3, 5, -3, 4], dtype=float)
>>> fourier = np.fft.rfft(signal)
>>> n = signal.size
>>> sample_rate = 100
>>> freq = np.fft.fftfreq(n, d=1./sample_rate)
>>> freq
array([ 0., 10., 20., ..., -30., -20., -10.])
>>> freq = np.fft.rfftfreq(n, d=1./sample_rate)
>>> freq
array([ 0., 10., 20., 30., 40., 50.])
"""
if not isinstance(n, integer_types):
raise ValueError("n should be an integer")
val = 1.0/(n*d)
N = n//2 + 1
results = arange(0, N, dtype=int)
return results * val
| 6,154 | Python | 26.725225 | 79 | 0.544849 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/fft/tests/test_pocketfft.py | import numpy as np
import pytest
from numpy.random import random
from numpy.testing import (
assert_array_equal, assert_raises, assert_allclose
)
import threading
import queue
def fft1(x):
L = len(x)
phase = -2j * np.pi * (np.arange(L) / L)
phase = np.arange(L).reshape(-1, 1) * phase
return np.sum(x*np.exp(phase), axis=1)
class TestFFTShift:
def test_fft_n(self):
assert_raises(ValueError, np.fft.fft, [1, 2, 3], 0)
class TestFFT1D:
def test_identity(self):
maxlen = 512
x = random(maxlen) + 1j*random(maxlen)
xr = random(maxlen)
for i in range(1, maxlen):
assert_allclose(np.fft.ifft(np.fft.fft(x[0:i])), x[0:i],
atol=1e-12)
assert_allclose(np.fft.irfft(np.fft.rfft(xr[0:i]), i),
xr[0:i], atol=1e-12)
def test_fft(self):
x = random(30) + 1j*random(30)
assert_allclose(fft1(x), np.fft.fft(x), atol=1e-6)
assert_allclose(fft1(x), np.fft.fft(x, norm="backward"), atol=1e-6)
assert_allclose(fft1(x) / np.sqrt(30),
np.fft.fft(x, norm="ortho"), atol=1e-6)
assert_allclose(fft1(x) / 30.,
np.fft.fft(x, norm="forward"), atol=1e-6)
@pytest.mark.parametrize('norm', (None, 'backward', 'ortho', 'forward'))
def test_ifft(self, norm):
x = random(30) + 1j*random(30)
assert_allclose(
x, np.fft.ifft(np.fft.fft(x, norm=norm), norm=norm),
atol=1e-6)
# Ensure we get the correct error message
with pytest.raises(ValueError,
match='Invalid number of FFT data points'):
np.fft.ifft([], norm=norm)
def test_fft2(self):
x = random((30, 20)) + 1j*random((30, 20))
assert_allclose(np.fft.fft(np.fft.fft(x, axis=1), axis=0),
np.fft.fft2(x), atol=1e-6)
assert_allclose(np.fft.fft2(x),
np.fft.fft2(x, norm="backward"), atol=1e-6)
assert_allclose(np.fft.fft2(x) / np.sqrt(30 * 20),
np.fft.fft2(x, norm="ortho"), atol=1e-6)
assert_allclose(np.fft.fft2(x) / (30. * 20.),
np.fft.fft2(x, norm="forward"), atol=1e-6)
def test_ifft2(self):
x = random((30, 20)) + 1j*random((30, 20))
assert_allclose(np.fft.ifft(np.fft.ifft(x, axis=1), axis=0),
np.fft.ifft2(x), atol=1e-6)
assert_allclose(np.fft.ifft2(x),
np.fft.ifft2(x, norm="backward"), atol=1e-6)
assert_allclose(np.fft.ifft2(x) * np.sqrt(30 * 20),
np.fft.ifft2(x, norm="ortho"), atol=1e-6)
assert_allclose(np.fft.ifft2(x) * (30. * 20.),
np.fft.ifft2(x, norm="forward"), atol=1e-6)
def test_fftn(self):
x = random((30, 20, 10)) + 1j*random((30, 20, 10))
assert_allclose(
np.fft.fft(np.fft.fft(np.fft.fft(x, axis=2), axis=1), axis=0),
np.fft.fftn(x), atol=1e-6)
assert_allclose(np.fft.fftn(x),
np.fft.fftn(x, norm="backward"), atol=1e-6)
assert_allclose(np.fft.fftn(x) / np.sqrt(30 * 20 * 10),
np.fft.fftn(x, norm="ortho"), atol=1e-6)
assert_allclose(np.fft.fftn(x) / (30. * 20. * 10.),
np.fft.fftn(x, norm="forward"), atol=1e-6)
def test_ifftn(self):
x = random((30, 20, 10)) + 1j*random((30, 20, 10))
assert_allclose(
np.fft.ifft(np.fft.ifft(np.fft.ifft(x, axis=2), axis=1), axis=0),
np.fft.ifftn(x), atol=1e-6)
assert_allclose(np.fft.ifftn(x),
np.fft.ifftn(x, norm="backward"), atol=1e-6)
assert_allclose(np.fft.ifftn(x) * np.sqrt(30 * 20 * 10),
np.fft.ifftn(x, norm="ortho"), atol=1e-6)
assert_allclose(np.fft.ifftn(x) * (30. * 20. * 10.),
np.fft.ifftn(x, norm="forward"), atol=1e-6)
def test_rfft(self):
x = random(30)
for n in [x.size, 2*x.size]:
for norm in [None, 'backward', 'ortho', 'forward']:
assert_allclose(
np.fft.fft(x, n=n, norm=norm)[:(n//2 + 1)],
np.fft.rfft(x, n=n, norm=norm), atol=1e-6)
assert_allclose(
np.fft.rfft(x, n=n),
np.fft.rfft(x, n=n, norm="backward"), atol=1e-6)
assert_allclose(
np.fft.rfft(x, n=n) / np.sqrt(n),
np.fft.rfft(x, n=n, norm="ortho"), atol=1e-6)
assert_allclose(
np.fft.rfft(x, n=n) / n,
np.fft.rfft(x, n=n, norm="forward"), atol=1e-6)
def test_irfft(self):
x = random(30)
assert_allclose(x, np.fft.irfft(np.fft.rfft(x)), atol=1e-6)
assert_allclose(x, np.fft.irfft(np.fft.rfft(x, norm="backward"),
norm="backward"), atol=1e-6)
assert_allclose(x, np.fft.irfft(np.fft.rfft(x, norm="ortho"),
norm="ortho"), atol=1e-6)
assert_allclose(x, np.fft.irfft(np.fft.rfft(x, norm="forward"),
norm="forward"), atol=1e-6)
def test_rfft2(self):
x = random((30, 20))
assert_allclose(np.fft.fft2(x)[:, :11], np.fft.rfft2(x), atol=1e-6)
assert_allclose(np.fft.rfft2(x),
np.fft.rfft2(x, norm="backward"), atol=1e-6)
assert_allclose(np.fft.rfft2(x) / np.sqrt(30 * 20),
np.fft.rfft2(x, norm="ortho"), atol=1e-6)
assert_allclose(np.fft.rfft2(x) / (30. * 20.),
np.fft.rfft2(x, norm="forward"), atol=1e-6)
def test_irfft2(self):
x = random((30, 20))
assert_allclose(x, np.fft.irfft2(np.fft.rfft2(x)), atol=1e-6)
assert_allclose(x, np.fft.irfft2(np.fft.rfft2(x, norm="backward"),
norm="backward"), atol=1e-6)
assert_allclose(x, np.fft.irfft2(np.fft.rfft2(x, norm="ortho"),
norm="ortho"), atol=1e-6)
assert_allclose(x, np.fft.irfft2(np.fft.rfft2(x, norm="forward"),
norm="forward"), atol=1e-6)
def test_rfftn(self):
x = random((30, 20, 10))
assert_allclose(np.fft.fftn(x)[:, :, :6], np.fft.rfftn(x), atol=1e-6)
assert_allclose(np.fft.rfftn(x),
np.fft.rfftn(x, norm="backward"), atol=1e-6)
assert_allclose(np.fft.rfftn(x) / np.sqrt(30 * 20 * 10),
np.fft.rfftn(x, norm="ortho"), atol=1e-6)
assert_allclose(np.fft.rfftn(x) / (30. * 20. * 10.),
np.fft.rfftn(x, norm="forward"), atol=1e-6)
def test_irfftn(self):
x = random((30, 20, 10))
assert_allclose(x, np.fft.irfftn(np.fft.rfftn(x)), atol=1e-6)
assert_allclose(x, np.fft.irfftn(np.fft.rfftn(x, norm="backward"),
norm="backward"), atol=1e-6)
assert_allclose(x, np.fft.irfftn(np.fft.rfftn(x, norm="ortho"),
norm="ortho"), atol=1e-6)
assert_allclose(x, np.fft.irfftn(np.fft.rfftn(x, norm="forward"),
norm="forward"), atol=1e-6)
def test_hfft(self):
x = random(14) + 1j*random(14)
x_herm = np.concatenate((random(1), x, random(1)))
x = np.concatenate((x_herm, x[::-1].conj()))
assert_allclose(np.fft.fft(x), np.fft.hfft(x_herm), atol=1e-6)
assert_allclose(np.fft.hfft(x_herm),
np.fft.hfft(x_herm, norm="backward"), atol=1e-6)
assert_allclose(np.fft.hfft(x_herm) / np.sqrt(30),
np.fft.hfft(x_herm, norm="ortho"), atol=1e-6)
assert_allclose(np.fft.hfft(x_herm) / 30.,
np.fft.hfft(x_herm, norm="forward"), atol=1e-6)
def test_ihfft(self):
x = random(14) + 1j*random(14)
x_herm = np.concatenate((random(1), x, random(1)))
x = np.concatenate((x_herm, x[::-1].conj()))
assert_allclose(x_herm, np.fft.ihfft(np.fft.hfft(x_herm)), atol=1e-6)
assert_allclose(x_herm, np.fft.ihfft(np.fft.hfft(x_herm,
norm="backward"), norm="backward"), atol=1e-6)
assert_allclose(x_herm, np.fft.ihfft(np.fft.hfft(x_herm,
norm="ortho"), norm="ortho"), atol=1e-6)
assert_allclose(x_herm, np.fft.ihfft(np.fft.hfft(x_herm,
norm="forward"), norm="forward"), atol=1e-6)
@pytest.mark.parametrize("op", [np.fft.fftn, np.fft.ifftn,
np.fft.rfftn, np.fft.irfftn])
def test_axes(self, op):
x = random((30, 20, 10))
axes = [(0, 1, 2), (0, 2, 1), (1, 0, 2), (1, 2, 0), (2, 0, 1), (2, 1, 0)]
for a in axes:
op_tr = op(np.transpose(x, a))
tr_op = np.transpose(op(x, axes=a), a)
assert_allclose(op_tr, tr_op, atol=1e-6)
def test_all_1d_norm_preserving(self):
# verify that round-trip transforms are norm-preserving
x = random(30)
x_norm = np.linalg.norm(x)
n = x.size * 2
func_pairs = [(np.fft.fft, np.fft.ifft),
(np.fft.rfft, np.fft.irfft),
# hfft: order so the first function takes x.size samples
# (necessary for comparison to x_norm above)
(np.fft.ihfft, np.fft.hfft),
]
for forw, back in func_pairs:
for n in [x.size, 2*x.size]:
for norm in [None, 'backward', 'ortho', 'forward']:
tmp = forw(x, n=n, norm=norm)
tmp = back(tmp, n=n, norm=norm)
assert_allclose(x_norm,
np.linalg.norm(tmp), atol=1e-6)
@pytest.mark.parametrize("dtype", [np.half, np.single, np.double,
np.longdouble])
def test_dtypes(self, dtype):
# make sure that all input precisions are accepted and internally
# converted to 64bit
x = random(30).astype(dtype)
assert_allclose(np.fft.ifft(np.fft.fft(x)), x, atol=1e-6)
assert_allclose(np.fft.irfft(np.fft.rfft(x)), x, atol=1e-6)
@pytest.mark.parametrize(
"dtype",
[np.float32, np.float64, np.complex64, np.complex128])
@pytest.mark.parametrize("order", ["F", 'non-contiguous'])
@pytest.mark.parametrize(
"fft",
[np.fft.fft, np.fft.fft2, np.fft.fftn,
np.fft.ifft, np.fft.ifft2, np.fft.ifftn])
def test_fft_with_order(dtype, order, fft):
# Check that FFT/IFFT produces identical results for C, Fortran and
# non contiguous arrays
rng = np.random.RandomState(42)
X = rng.rand(8, 7, 13).astype(dtype, copy=False)
# See discussion in pull/14178
_tol = 8.0 * np.sqrt(np.log2(X.size)) * np.finfo(X.dtype).eps
if order == 'F':
Y = np.asfortranarray(X)
else:
# Make a non contiguous array
Y = X[::-1]
X = np.ascontiguousarray(X[::-1])
if fft.__name__.endswith('fft'):
for axis in range(3):
X_res = fft(X, axis=axis)
Y_res = fft(Y, axis=axis)
assert_allclose(X_res, Y_res, atol=_tol, rtol=_tol)
elif fft.__name__.endswith(('fft2', 'fftn')):
axes = [(0, 1), (1, 2), (0, 2)]
if fft.__name__.endswith('fftn'):
axes.extend([(0,), (1,), (2,), None])
for ax in axes:
X_res = fft(X, axes=ax)
Y_res = fft(Y, axes=ax)
assert_allclose(X_res, Y_res, atol=_tol, rtol=_tol)
else:
raise ValueError()
class TestFFTThreadSafe:
threads = 16
input_shape = (800, 200)
def _test_mtsame(self, func, *args):
def worker(args, q):
q.put(func(*args))
q = queue.Queue()
expected = func(*args)
# Spin off a bunch of threads to call the same function simultaneously
t = [threading.Thread(target=worker, args=(args, q))
for i in range(self.threads)]
[x.start() for x in t]
[x.join() for x in t]
# Make sure all threads returned the correct value
for i in range(self.threads):
assert_array_equal(q.get(timeout=5), expected,
'Function returned wrong value in multithreaded context')
def test_fft(self):
a = np.ones(self.input_shape) * 1+0j
self._test_mtsame(np.fft.fft, a)
def test_ifft(self):
a = np.ones(self.input_shape) * 1+0j
self._test_mtsame(np.fft.ifft, a)
def test_rfft(self):
a = np.ones(self.input_shape)
self._test_mtsame(np.fft.rfft, a)
def test_irfft(self):
a = np.ones(self.input_shape) * 1+0j
self._test_mtsame(np.fft.irfft, a)
| 12,827 | Python | 40.649351 | 81 | 0.518126 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/fft/tests/test_helper.py | """Test functions for fftpack.helper module
Copied from fftpack.helper by Pearu Peterson, October 2005
"""
import numpy as np
from numpy.testing import assert_array_almost_equal
from numpy import fft, pi
class TestFFTShift:
def test_definition(self):
x = [0, 1, 2, 3, 4, -4, -3, -2, -1]
y = [-4, -3, -2, -1, 0, 1, 2, 3, 4]
assert_array_almost_equal(fft.fftshift(x), y)
assert_array_almost_equal(fft.ifftshift(y), x)
x = [0, 1, 2, 3, 4, -5, -4, -3, -2, -1]
y = [-5, -4, -3, -2, -1, 0, 1, 2, 3, 4]
assert_array_almost_equal(fft.fftshift(x), y)
assert_array_almost_equal(fft.ifftshift(y), x)
def test_inverse(self):
for n in [1, 4, 9, 100, 211]:
x = np.random.random((n,))
assert_array_almost_equal(fft.ifftshift(fft.fftshift(x)), x)
def test_axes_keyword(self):
freqs = [[0, 1, 2], [3, 4, -4], [-3, -2, -1]]
shifted = [[-1, -3, -2], [2, 0, 1], [-4, 3, 4]]
assert_array_almost_equal(fft.fftshift(freqs, axes=(0, 1)), shifted)
assert_array_almost_equal(fft.fftshift(freqs, axes=0),
fft.fftshift(freqs, axes=(0,)))
assert_array_almost_equal(fft.ifftshift(shifted, axes=(0, 1)), freqs)
assert_array_almost_equal(fft.ifftshift(shifted, axes=0),
fft.ifftshift(shifted, axes=(0,)))
assert_array_almost_equal(fft.fftshift(freqs), shifted)
assert_array_almost_equal(fft.ifftshift(shifted), freqs)
def test_uneven_dims(self):
""" Test 2D input, which has uneven dimension sizes """
freqs = [
[0, 1],
[2, 3],
[4, 5]
]
# shift in dimension 0
shift_dim0 = [
[4, 5],
[0, 1],
[2, 3]
]
assert_array_almost_equal(fft.fftshift(freqs, axes=0), shift_dim0)
assert_array_almost_equal(fft.ifftshift(shift_dim0, axes=0), freqs)
assert_array_almost_equal(fft.fftshift(freqs, axes=(0,)), shift_dim0)
assert_array_almost_equal(fft.ifftshift(shift_dim0, axes=[0]), freqs)
# shift in dimension 1
shift_dim1 = [
[1, 0],
[3, 2],
[5, 4]
]
assert_array_almost_equal(fft.fftshift(freqs, axes=1), shift_dim1)
assert_array_almost_equal(fft.ifftshift(shift_dim1, axes=1), freqs)
# shift in both dimensions
shift_dim_both = [
[5, 4],
[1, 0],
[3, 2]
]
assert_array_almost_equal(fft.fftshift(freqs, axes=(0, 1)), shift_dim_both)
assert_array_almost_equal(fft.ifftshift(shift_dim_both, axes=(0, 1)), freqs)
assert_array_almost_equal(fft.fftshift(freqs, axes=[0, 1]), shift_dim_both)
assert_array_almost_equal(fft.ifftshift(shift_dim_both, axes=[0, 1]), freqs)
# axes=None (default) shift in all dimensions
assert_array_almost_equal(fft.fftshift(freqs, axes=None), shift_dim_both)
assert_array_almost_equal(fft.ifftshift(shift_dim_both, axes=None), freqs)
assert_array_almost_equal(fft.fftshift(freqs), shift_dim_both)
assert_array_almost_equal(fft.ifftshift(shift_dim_both), freqs)
def test_equal_to_original(self):
""" Test that the new (>=v1.15) implementation (see #10073) is equal to the original (<=v1.14) """
from numpy.core import asarray, concatenate, arange, take
def original_fftshift(x, axes=None):
""" How fftshift was implemented in v1.14"""
tmp = asarray(x)
ndim = tmp.ndim
if axes is None:
axes = list(range(ndim))
elif isinstance(axes, int):
axes = (axes,)
y = tmp
for k in axes:
n = tmp.shape[k]
p2 = (n + 1) // 2
mylist = concatenate((arange(p2, n), arange(p2)))
y = take(y, mylist, k)
return y
def original_ifftshift(x, axes=None):
""" How ifftshift was implemented in v1.14 """
tmp = asarray(x)
ndim = tmp.ndim
if axes is None:
axes = list(range(ndim))
elif isinstance(axes, int):
axes = (axes,)
y = tmp
for k in axes:
n = tmp.shape[k]
p2 = n - (n + 1) // 2
mylist = concatenate((arange(p2, n), arange(p2)))
y = take(y, mylist, k)
return y
# create possible 2d array combinations and try all possible keywords
# compare output to original functions
for i in range(16):
for j in range(16):
for axes_keyword in [0, 1, None, (0,), (0, 1)]:
inp = np.random.rand(i, j)
assert_array_almost_equal(fft.fftshift(inp, axes_keyword),
original_fftshift(inp, axes_keyword))
assert_array_almost_equal(fft.ifftshift(inp, axes_keyword),
original_ifftshift(inp, axes_keyword))
class TestFFTFreq:
def test_definition(self):
x = [0, 1, 2, 3, 4, -4, -3, -2, -1]
assert_array_almost_equal(9*fft.fftfreq(9), x)
assert_array_almost_equal(9*pi*fft.fftfreq(9, pi), x)
x = [0, 1, 2, 3, 4, -5, -4, -3, -2, -1]
assert_array_almost_equal(10*fft.fftfreq(10), x)
assert_array_almost_equal(10*pi*fft.fftfreq(10, pi), x)
class TestRFFTFreq:
def test_definition(self):
x = [0, 1, 2, 3, 4]
assert_array_almost_equal(9*fft.rfftfreq(9), x)
assert_array_almost_equal(9*pi*fft.rfftfreq(9, pi), x)
x = [0, 1, 2, 3, 4, 5]
assert_array_almost_equal(10*fft.rfftfreq(10), x)
assert_array_almost_equal(10*pi*fft.rfftfreq(10, pi), x)
class TestIRFFTN:
def test_not_last_axis_success(self):
ar, ai = np.random.random((2, 16, 8, 32))
a = ar + 1j*ai
axes = (-2,)
# Should not raise error
fft.irfftn(a, axes=axes)
| 6,148 | Python | 35.60119 | 106 | 0.530416 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/linalg/linalg.py | """Lite version of scipy.linalg.
Notes
-----
This module is a lite version of the linalg.py module in SciPy which
contains high-level Python interface to the LAPACK library. The lite
version only accesses the following LAPACK functions: dgesv, zgesv,
dgeev, zgeev, dgesdd, zgesdd, dgelsd, zgelsd, dsyevd, zheevd, dgetrf,
zgetrf, dpotrf, zpotrf, dgeqrf, zgeqrf, zungqr, dorgqr.
"""
__all__ = ['matrix_power', 'solve', 'tensorsolve', 'tensorinv', 'inv',
'cholesky', 'eigvals', 'eigvalsh', 'pinv', 'slogdet', 'det',
'svd', 'eig', 'eigh', 'lstsq', 'norm', 'qr', 'cond', 'matrix_rank',
'LinAlgError', 'multi_dot']
import functools
import operator
import warnings
from numpy.core import (
array, asarray, zeros, empty, empty_like, intc, single, double,
csingle, cdouble, inexact, complexfloating, newaxis, all, Inf, dot,
add, multiply, sqrt, fastCopyAndTranspose, sum, isfinite,
finfo, errstate, geterrobj, moveaxis, amin, amax, product, abs,
atleast_2d, intp, asanyarray, object_, matmul,
swapaxes, divide, count_nonzero, isnan, sign, argsort, sort,
reciprocal
)
from numpy.core.multiarray import normalize_axis_index
from numpy.core.overrides import set_module
from numpy.core import overrides
from numpy.lib.twodim_base import triu, eye
from numpy.linalg import _umath_linalg
array_function_dispatch = functools.partial(
overrides.array_function_dispatch, module='numpy.linalg')
fortran_int = intc
@set_module('numpy.linalg')
class LinAlgError(Exception):
"""
Generic Python-exception-derived object raised by linalg functions.
General purpose exception class, derived from Python's exception.Exception
class, programmatically raised in linalg functions when a Linear
Algebra-related condition would prevent further correct execution of the
function.
Parameters
----------
None
Examples
--------
>>> from numpy import linalg as LA
>>> LA.inv(np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "...linalg.py", line 350,
in inv return wrap(solve(a, identity(a.shape[0], dtype=a.dtype)))
File "...linalg.py", line 249,
in solve
raise LinAlgError('Singular matrix')
numpy.linalg.LinAlgError: Singular matrix
"""
def _determine_error_states():
errobj = geterrobj()
bufsize = errobj[0]
with errstate(invalid='call', over='ignore',
divide='ignore', under='ignore'):
invalid_call_errmask = geterrobj()[1]
return [bufsize, invalid_call_errmask, None]
# Dealing with errors in _umath_linalg
_linalg_error_extobj = _determine_error_states()
del _determine_error_states
def _raise_linalgerror_singular(err, flag):
raise LinAlgError("Singular matrix")
def _raise_linalgerror_nonposdef(err, flag):
raise LinAlgError("Matrix is not positive definite")
def _raise_linalgerror_eigenvalues_nonconvergence(err, flag):
raise LinAlgError("Eigenvalues did not converge")
def _raise_linalgerror_svd_nonconvergence(err, flag):
raise LinAlgError("SVD did not converge")
def _raise_linalgerror_lstsq(err, flag):
raise LinAlgError("SVD did not converge in Linear Least Squares")
def _raise_linalgerror_qr(err, flag):
raise LinAlgError("Incorrect argument found while performing "
"QR factorization")
def get_linalg_error_extobj(callback):
extobj = list(_linalg_error_extobj) # make a copy
extobj[2] = callback
return extobj
def _makearray(a):
new = asarray(a)
wrap = getattr(a, "__array_prepare__", new.__array_wrap__)
return new, wrap
def isComplexType(t):
return issubclass(t, complexfloating)
_real_types_map = {single : single,
double : double,
csingle : single,
cdouble : double}
_complex_types_map = {single : csingle,
double : cdouble,
csingle : csingle,
cdouble : cdouble}
def _realType(t, default=double):
return _real_types_map.get(t, default)
def _complexType(t, default=cdouble):
return _complex_types_map.get(t, default)
def _commonType(*arrays):
# in lite version, use higher precision (always double or cdouble)
result_type = single
is_complex = False
for a in arrays:
if issubclass(a.dtype.type, inexact):
if isComplexType(a.dtype.type):
is_complex = True
rt = _realType(a.dtype.type, default=None)
if rt is None:
# unsupported inexact scalar
raise TypeError("array type %s is unsupported in linalg" %
(a.dtype.name,))
else:
rt = double
if rt is double:
result_type = double
if is_complex:
t = cdouble
result_type = _complex_types_map[result_type]
else:
t = double
return t, result_type
# _fastCopyAndTranpose assumes the input is 2D (as all the calls in here are).
_fastCT = fastCopyAndTranspose
def _to_native_byte_order(*arrays):
ret = []
for arr in arrays:
if arr.dtype.byteorder not in ('=', '|'):
ret.append(asarray(arr, dtype=arr.dtype.newbyteorder('=')))
else:
ret.append(arr)
if len(ret) == 1:
return ret[0]
else:
return ret
def _fastCopyAndTranspose(type, *arrays):
cast_arrays = ()
for a in arrays:
if a.dtype.type is not type:
a = a.astype(type)
cast_arrays = cast_arrays + (_fastCT(a),)
if len(cast_arrays) == 1:
return cast_arrays[0]
else:
return cast_arrays
def _assert_2d(*arrays):
for a in arrays:
if a.ndim != 2:
raise LinAlgError('%d-dimensional array given. Array must be '
'two-dimensional' % a.ndim)
def _assert_stacked_2d(*arrays):
for a in arrays:
if a.ndim < 2:
raise LinAlgError('%d-dimensional array given. Array must be '
'at least two-dimensional' % a.ndim)
def _assert_stacked_square(*arrays):
for a in arrays:
m, n = a.shape[-2:]
if m != n:
raise LinAlgError('Last 2 dimensions of the array must be square')
def _assert_finite(*arrays):
for a in arrays:
if not isfinite(a).all():
raise LinAlgError("Array must not contain infs or NaNs")
def _is_empty_2d(arr):
# check size first for efficiency
return arr.size == 0 and product(arr.shape[-2:]) == 0
def transpose(a):
"""
Transpose each matrix in a stack of matrices.
Unlike np.transpose, this only swaps the last two axes, rather than all of
them
Parameters
----------
a : (...,M,N) array_like
Returns
-------
aT : (...,N,M) ndarray
"""
return swapaxes(a, -1, -2)
# Linear equations
def _tensorsolve_dispatcher(a, b, axes=None):
return (a, b)
@array_function_dispatch(_tensorsolve_dispatcher)
def tensorsolve(a, b, axes=None):
"""
Solve the tensor equation ``a x = b`` for x.
It is assumed that all indices of `x` are summed over in the product,
together with the rightmost indices of `a`, as is done in, for example,
``tensordot(a, x, axes=b.ndim)``.
Parameters
----------
a : array_like
Coefficient tensor, of shape ``b.shape + Q``. `Q`, a tuple, equals
the shape of that sub-tensor of `a` consisting of the appropriate
number of its rightmost indices, and must be such that
``prod(Q) == prod(b.shape)`` (in which sense `a` is said to be
'square').
b : array_like
Right-hand tensor, which can be of any shape.
axes : tuple of ints, optional
Axes in `a` to reorder to the right, before inversion.
If None (default), no reordering is done.
Returns
-------
x : ndarray, shape Q
Raises
------
LinAlgError
If `a` is singular or not 'square' (in the above sense).
See Also
--------
numpy.tensordot, tensorinv, numpy.einsum
Examples
--------
>>> a = np.eye(2*3*4)
>>> a.shape = (2*3, 4, 2, 3, 4)
>>> b = np.random.randn(2*3, 4)
>>> x = np.linalg.tensorsolve(a, b)
>>> x.shape
(2, 3, 4)
>>> np.allclose(np.tensordot(a, x, axes=3), b)
True
"""
a, wrap = _makearray(a)
b = asarray(b)
an = a.ndim
if axes is not None:
allaxes = list(range(0, an))
for k in axes:
allaxes.remove(k)
allaxes.insert(an, k)
a = a.transpose(allaxes)
oldshape = a.shape[-(an-b.ndim):]
prod = 1
for k in oldshape:
prod *= k
if a.size != prod ** 2:
raise LinAlgError(
"Input arrays must satisfy the requirement \
prod(a.shape[b.ndim:]) == prod(a.shape[:b.ndim])"
)
a = a.reshape(prod, prod)
b = b.ravel()
res = wrap(solve(a, b))
res.shape = oldshape
return res
def _solve_dispatcher(a, b):
return (a, b)
@array_function_dispatch(_solve_dispatcher)
def solve(a, b):
"""
Solve a linear matrix equation, or system of linear scalar equations.
Computes the "exact" solution, `x`, of the well-determined, i.e., full
rank, linear matrix equation `ax = b`.
Parameters
----------
a : (..., M, M) array_like
Coefficient matrix.
b : {(..., M,), (..., M, K)}, array_like
Ordinate or "dependent variable" values.
Returns
-------
x : {(..., M,), (..., M, K)} ndarray
Solution to the system a x = b. Returned shape is identical to `b`.
Raises
------
LinAlgError
If `a` is singular or not square.
See Also
--------
scipy.linalg.solve : Similar function in SciPy.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The solutions are computed using LAPACK routine ``_gesv``.
`a` must be square and of full-rank, i.e., all rows (or, equivalently,
columns) must be linearly independent; if either is not true, use
`lstsq` for the least-squares best "solution" of the
system/equation.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pg. 22.
Examples
--------
Solve the system of equations ``x0 + 2 * x1 = 1`` and ``3 * x0 + 5 * x1 = 2``:
>>> a = np.array([[1, 2], [3, 5]])
>>> b = np.array([1, 2])
>>> x = np.linalg.solve(a, b)
>>> x
array([-1., 1.])
Check that the solution is correct:
>>> np.allclose(np.dot(a, x), b)
True
"""
a, _ = _makearray(a)
_assert_stacked_2d(a)
_assert_stacked_square(a)
b, wrap = _makearray(b)
t, result_t = _commonType(a, b)
# We use the b = (..., M,) logic, only if the number of extra dimensions
# match exactly
if b.ndim == a.ndim - 1:
gufunc = _umath_linalg.solve1
else:
gufunc = _umath_linalg.solve
signature = 'DD->D' if isComplexType(t) else 'dd->d'
extobj = get_linalg_error_extobj(_raise_linalgerror_singular)
r = gufunc(a, b, signature=signature, extobj=extobj)
return wrap(r.astype(result_t, copy=False))
def _tensorinv_dispatcher(a, ind=None):
return (a,)
@array_function_dispatch(_tensorinv_dispatcher)
def tensorinv(a, ind=2):
"""
Compute the 'inverse' of an N-dimensional array.
The result is an inverse for `a` relative to the tensordot operation
``tensordot(a, b, ind)``, i. e., up to floating-point accuracy,
``tensordot(tensorinv(a), a, ind)`` is the "identity" tensor for the
tensordot operation.
Parameters
----------
a : array_like
Tensor to 'invert'. Its shape must be 'square', i. e.,
``prod(a.shape[:ind]) == prod(a.shape[ind:])``.
ind : int, optional
Number of first indices that are involved in the inverse sum.
Must be a positive integer, default is 2.
Returns
-------
b : ndarray
`a`'s tensordot inverse, shape ``a.shape[ind:] + a.shape[:ind]``.
Raises
------
LinAlgError
If `a` is singular or not 'square' (in the above sense).
See Also
--------
numpy.tensordot, tensorsolve
Examples
--------
>>> a = np.eye(4*6)
>>> a.shape = (4, 6, 8, 3)
>>> ainv = np.linalg.tensorinv(a, ind=2)
>>> ainv.shape
(8, 3, 4, 6)
>>> b = np.random.randn(4, 6)
>>> np.allclose(np.tensordot(ainv, b), np.linalg.tensorsolve(a, b))
True
>>> a = np.eye(4*6)
>>> a.shape = (24, 8, 3)
>>> ainv = np.linalg.tensorinv(a, ind=1)
>>> ainv.shape
(8, 3, 24)
>>> b = np.random.randn(24)
>>> np.allclose(np.tensordot(ainv, b, 1), np.linalg.tensorsolve(a, b))
True
"""
a = asarray(a)
oldshape = a.shape
prod = 1
if ind > 0:
invshape = oldshape[ind:] + oldshape[:ind]
for k in oldshape[ind:]:
prod *= k
else:
raise ValueError("Invalid ind argument.")
a = a.reshape(prod, -1)
ia = inv(a)
return ia.reshape(*invshape)
# Matrix inversion
def _unary_dispatcher(a):
return (a,)
@array_function_dispatch(_unary_dispatcher)
def inv(a):
"""
Compute the (multiplicative) inverse of a matrix.
Given a square matrix `a`, return the matrix `ainv` satisfying
``dot(a, ainv) = dot(ainv, a) = eye(a.shape[0])``.
Parameters
----------
a : (..., M, M) array_like
Matrix to be inverted.
Returns
-------
ainv : (..., M, M) ndarray or matrix
(Multiplicative) inverse of the matrix `a`.
Raises
------
LinAlgError
If `a` is not square or inversion fails.
See Also
--------
scipy.linalg.inv : Similar function in SciPy.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
Examples
--------
>>> from numpy.linalg import inv
>>> a = np.array([[1., 2.], [3., 4.]])
>>> ainv = inv(a)
>>> np.allclose(np.dot(a, ainv), np.eye(2))
True
>>> np.allclose(np.dot(ainv, a), np.eye(2))
True
If a is a matrix object, then the return value is a matrix as well:
>>> ainv = inv(np.matrix(a))
>>> ainv
matrix([[-2. , 1. ],
[ 1.5, -0.5]])
Inverses of several matrices can be computed at once:
>>> a = np.array([[[1., 2.], [3., 4.]], [[1, 3], [3, 5]]])
>>> inv(a)
array([[[-2. , 1. ],
[ 1.5 , -0.5 ]],
[[-1.25, 0.75],
[ 0.75, -0.25]]])
"""
a, wrap = _makearray(a)
_assert_stacked_2d(a)
_assert_stacked_square(a)
t, result_t = _commonType(a)
signature = 'D->D' if isComplexType(t) else 'd->d'
extobj = get_linalg_error_extobj(_raise_linalgerror_singular)
ainv = _umath_linalg.inv(a, signature=signature, extobj=extobj)
return wrap(ainv.astype(result_t, copy=False))
def _matrix_power_dispatcher(a, n):
return (a,)
@array_function_dispatch(_matrix_power_dispatcher)
def matrix_power(a, n):
"""
Raise a square matrix to the (integer) power `n`.
For positive integers `n`, the power is computed by repeated matrix
squarings and matrix multiplications. If ``n == 0``, the identity matrix
of the same shape as M is returned. If ``n < 0``, the inverse
is computed and then raised to the ``abs(n)``.
.. note:: Stacks of object matrices are not currently supported.
Parameters
----------
a : (..., M, M) array_like
Matrix to be "powered".
n : int
The exponent can be any integer or long integer, positive,
negative, or zero.
Returns
-------
a**n : (..., M, M) ndarray or matrix object
The return value is the same shape and type as `M`;
if the exponent is positive or zero then the type of the
elements is the same as those of `M`. If the exponent is
negative the elements are floating-point.
Raises
------
LinAlgError
For matrices that are not square or that (for negative powers) cannot
be inverted numerically.
Examples
--------
>>> from numpy.linalg import matrix_power
>>> i = np.array([[0, 1], [-1, 0]]) # matrix equiv. of the imaginary unit
>>> matrix_power(i, 3) # should = -i
array([[ 0, -1],
[ 1, 0]])
>>> matrix_power(i, 0)
array([[1, 0],
[0, 1]])
>>> matrix_power(i, -3) # should = 1/(-i) = i, but w/ f.p. elements
array([[ 0., 1.],
[-1., 0.]])
Somewhat more sophisticated example
>>> q = np.zeros((4, 4))
>>> q[0:2, 0:2] = -i
>>> q[2:4, 2:4] = i
>>> q # one of the three quaternion units not equal to 1
array([[ 0., -1., 0., 0.],
[ 1., 0., 0., 0.],
[ 0., 0., 0., 1.],
[ 0., 0., -1., 0.]])
>>> matrix_power(q, 2) # = -np.eye(4)
array([[-1., 0., 0., 0.],
[ 0., -1., 0., 0.],
[ 0., 0., -1., 0.],
[ 0., 0., 0., -1.]])
"""
a = asanyarray(a)
_assert_stacked_2d(a)
_assert_stacked_square(a)
try:
n = operator.index(n)
except TypeError as e:
raise TypeError("exponent must be an integer") from e
# Fall back on dot for object arrays. Object arrays are not supported by
# the current implementation of matmul using einsum
if a.dtype != object:
fmatmul = matmul
elif a.ndim == 2:
fmatmul = dot
else:
raise NotImplementedError(
"matrix_power not supported for stacks of object arrays")
if n == 0:
a = empty_like(a)
a[...] = eye(a.shape[-2], dtype=a.dtype)
return a
elif n < 0:
a = inv(a)
n = abs(n)
# short-cuts.
if n == 1:
return a
elif n == 2:
return fmatmul(a, a)
elif n == 3:
return fmatmul(fmatmul(a, a), a)
# Use binary decomposition to reduce the number of matrix multiplications.
# Here, we iterate over the bits of n, from LSB to MSB, raise `a` to
# increasing powers of 2, and multiply into the result as needed.
z = result = None
while n > 0:
z = a if z is None else fmatmul(z, z)
n, bit = divmod(n, 2)
if bit:
result = z if result is None else fmatmul(result, z)
return result
# Cholesky decomposition
@array_function_dispatch(_unary_dispatcher)
def cholesky(a):
"""
Cholesky decomposition.
Return the Cholesky decomposition, `L * L.H`, of the square matrix `a`,
where `L` is lower-triangular and .H is the conjugate transpose operator
(which is the ordinary transpose if `a` is real-valued). `a` must be
Hermitian (symmetric if real-valued) and positive-definite. No
checking is performed to verify whether `a` is Hermitian or not.
In addition, only the lower-triangular and diagonal elements of `a`
are used. Only `L` is actually returned.
Parameters
----------
a : (..., M, M) array_like
Hermitian (symmetric if all elements are real), positive-definite
input matrix.
Returns
-------
L : (..., M, M) array_like
Lower-triangular Cholesky factor of `a`. Returns a matrix object if
`a` is a matrix object.
Raises
------
LinAlgError
If the decomposition fails, for example, if `a` is not
positive-definite.
See Also
--------
scipy.linalg.cholesky : Similar function in SciPy.
scipy.linalg.cholesky_banded : Cholesky decompose a banded Hermitian
positive-definite matrix.
scipy.linalg.cho_factor : Cholesky decomposition of a matrix, to use in
`scipy.linalg.cho_solve`.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The Cholesky decomposition is often used as a fast way of solving
.. math:: A \\mathbf{x} = \\mathbf{b}
(when `A` is both Hermitian/symmetric and positive-definite).
First, we solve for :math:`\\mathbf{y}` in
.. math:: L \\mathbf{y} = \\mathbf{b},
and then for :math:`\\mathbf{x}` in
.. math:: L.H \\mathbf{x} = \\mathbf{y}.
Examples
--------
>>> A = np.array([[1,-2j],[2j,5]])
>>> A
array([[ 1.+0.j, -0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> L = np.linalg.cholesky(A)
>>> L
array([[1.+0.j, 0.+0.j],
[0.+2.j, 1.+0.j]])
>>> np.dot(L, L.T.conj()) # verify that L * L.H = A
array([[1.+0.j, 0.-2.j],
[0.+2.j, 5.+0.j]])
>>> A = [[1,-2j],[2j,5]] # what happens if A is only array_like?
>>> np.linalg.cholesky(A) # an ndarray object is returned
array([[1.+0.j, 0.+0.j],
[0.+2.j, 1.+0.j]])
>>> # But a matrix object is returned if A is a matrix object
>>> np.linalg.cholesky(np.matrix(A))
matrix([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
"""
extobj = get_linalg_error_extobj(_raise_linalgerror_nonposdef)
gufunc = _umath_linalg.cholesky_lo
a, wrap = _makearray(a)
_assert_stacked_2d(a)
_assert_stacked_square(a)
t, result_t = _commonType(a)
signature = 'D->D' if isComplexType(t) else 'd->d'
r = gufunc(a, signature=signature, extobj=extobj)
return wrap(r.astype(result_t, copy=False))
# QR decomposition
def _qr_dispatcher(a, mode=None):
return (a,)
@array_function_dispatch(_qr_dispatcher)
def qr(a, mode='reduced'):
"""
Compute the qr factorization of a matrix.
Factor the matrix `a` as *qr*, where `q` is orthonormal and `r` is
upper-triangular.
Parameters
----------
a : array_like, shape (..., M, N)
An array-like object with the dimensionality of at least 2.
mode : {'reduced', 'complete', 'r', 'raw'}, optional
If K = min(M, N), then
* 'reduced' : returns q, r with dimensions
(..., M, K), (..., K, N) (default)
* 'complete' : returns q, r with dimensions (..., M, M), (..., M, N)
* 'r' : returns r only with dimensions (..., K, N)
* 'raw' : returns h, tau with dimensions (..., N, M), (..., K,)
The options 'reduced', 'complete, and 'raw' are new in numpy 1.8,
see the notes for more information. The default is 'reduced', and to
maintain backward compatibility with earlier versions of numpy both
it and the old default 'full' can be omitted. Note that array h
returned in 'raw' mode is transposed for calling Fortran. The
'economic' mode is deprecated. The modes 'full' and 'economic' may
be passed using only the first letter for backwards compatibility,
but all others must be spelled out. See the Notes for more
explanation.
Returns
-------
q : ndarray of float or complex, optional
A matrix with orthonormal columns. When mode = 'complete' the
result is an orthogonal/unitary matrix depending on whether or not
a is real/complex. The determinant may be either +/- 1 in that
case. In case the number of dimensions in the input array is
greater than 2 then a stack of the matrices with above properties
is returned.
r : ndarray of float or complex, optional
The upper-triangular matrix or a stack of upper-triangular
matrices if the number of dimensions in the input array is greater
than 2.
(h, tau) : ndarrays of np.double or np.cdouble, optional
The array h contains the Householder reflectors that generate q
along with r. The tau array contains scaling factors for the
reflectors. In the deprecated 'economic' mode only h is returned.
Raises
------
LinAlgError
If factoring fails.
See Also
--------
scipy.linalg.qr : Similar function in SciPy.
scipy.linalg.rq : Compute RQ decomposition of a matrix.
Notes
-----
This is an interface to the LAPACK routines ``dgeqrf``, ``zgeqrf``,
``dorgqr``, and ``zungqr``.
For more information on the qr factorization, see for example:
https://en.wikipedia.org/wiki/QR_factorization
Subclasses of `ndarray` are preserved except for the 'raw' mode. So if
`a` is of type `matrix`, all the return values will be matrices too.
New 'reduced', 'complete', and 'raw' options for mode were added in
NumPy 1.8.0 and the old option 'full' was made an alias of 'reduced'. In
addition the options 'full' and 'economic' were deprecated. Because
'full' was the previous default and 'reduced' is the new default,
backward compatibility can be maintained by letting `mode` default.
The 'raw' option was added so that LAPACK routines that can multiply
arrays by q using the Householder reflectors can be used. Note that in
this case the returned arrays are of type np.double or np.cdouble and
the h array is transposed to be FORTRAN compatible. No routines using
the 'raw' return are currently exposed by numpy, but some are available
in lapack_lite and just await the necessary work.
Examples
--------
>>> a = np.random.randn(9, 6)
>>> q, r = np.linalg.qr(a)
>>> np.allclose(a, np.dot(q, r)) # a does equal qr
True
>>> r2 = np.linalg.qr(a, mode='r')
>>> np.allclose(r, r2) # mode='r' returns the same r as mode='full'
True
>>> a = np.random.normal(size=(3, 2, 2)) # Stack of 2 x 2 matrices as input
>>> q, r = np.linalg.qr(a)
>>> q.shape
(3, 2, 2)
>>> r.shape
(3, 2, 2)
>>> np.allclose(a, np.matmul(q, r))
True
Example illustrating a common use of `qr`: solving of least squares
problems
What are the least-squares-best `m` and `y0` in ``y = y0 + mx`` for
the following data: {(0,1), (1,0), (1,2), (2,1)}. (Graph the points
and you'll see that it should be y0 = 0, m = 1.) The answer is provided
by solving the over-determined matrix equation ``Ax = b``, where::
A = array([[0, 1], [1, 1], [1, 1], [2, 1]])
x = array([[y0], [m]])
b = array([[1], [0], [2], [1]])
If A = qr such that q is orthonormal (which is always possible via
Gram-Schmidt), then ``x = inv(r) * (q.T) * b``. (In numpy practice,
however, we simply use `lstsq`.)
>>> A = np.array([[0, 1], [1, 1], [1, 1], [2, 1]])
>>> A
array([[0, 1],
[1, 1],
[1, 1],
[2, 1]])
>>> b = np.array([1, 2, 2, 3])
>>> q, r = np.linalg.qr(A)
>>> p = np.dot(q.T, b)
>>> np.dot(np.linalg.inv(r), p)
array([ 1., 1.])
"""
if mode not in ('reduced', 'complete', 'r', 'raw'):
if mode in ('f', 'full'):
# 2013-04-01, 1.8
msg = "".join((
"The 'full' option is deprecated in favor of 'reduced'.\n",
"For backward compatibility let mode default."))
warnings.warn(msg, DeprecationWarning, stacklevel=3)
mode = 'reduced'
elif mode in ('e', 'economic'):
# 2013-04-01, 1.8
msg = "The 'economic' option is deprecated."
warnings.warn(msg, DeprecationWarning, stacklevel=3)
mode = 'economic'
else:
raise ValueError(f"Unrecognized mode '{mode}'")
a, wrap = _makearray(a)
_assert_stacked_2d(a)
m, n = a.shape[-2:]
t, result_t = _commonType(a)
a = a.astype(t, copy=True)
a = _to_native_byte_order(a)
mn = min(m, n)
if m <= n:
gufunc = _umath_linalg.qr_r_raw_m
else:
gufunc = _umath_linalg.qr_r_raw_n
signature = 'D->D' if isComplexType(t) else 'd->d'
extobj = get_linalg_error_extobj(_raise_linalgerror_qr)
tau = gufunc(a, signature=signature, extobj=extobj)
# handle modes that don't return q
if mode == 'r':
r = triu(a[..., :mn, :])
r = r.astype(result_t, copy=False)
return wrap(r)
if mode == 'raw':
q = transpose(a)
q = q.astype(result_t, copy=False)
tau = tau.astype(result_t, copy=False)
return wrap(q), tau
if mode == 'economic':
a = a.astype(result_t, copy=False)
return wrap(a)
# mc is the number of columns in the resulting q
# matrix. If the mode is complete then it is
# same as number of rows, and if the mode is reduced,
# then it is the minimum of number of rows and columns.
if mode == 'complete' and m > n:
mc = m
gufunc = _umath_linalg.qr_complete
else:
mc = mn
gufunc = _umath_linalg.qr_reduced
signature = 'DD->D' if isComplexType(t) else 'dd->d'
extobj = get_linalg_error_extobj(_raise_linalgerror_qr)
q = gufunc(a, tau, signature=signature, extobj=extobj)
r = triu(a[..., :mc, :])
q = q.astype(result_t, copy=False)
r = r.astype(result_t, copy=False)
return wrap(q), wrap(r)
# Eigenvalues
@array_function_dispatch(_unary_dispatcher)
def eigvals(a):
"""
Compute the eigenvalues of a general matrix.
Main difference between `eigvals` and `eig`: the eigenvectors aren't
returned.
Parameters
----------
a : (..., M, M) array_like
A complex- or real-valued matrix whose eigenvalues will be computed.
Returns
-------
w : (..., M,) ndarray
The eigenvalues, each repeated according to its multiplicity.
They are not necessarily ordered, nor are they necessarily
real for real matrices.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eig : eigenvalues and right eigenvectors of general arrays
eigvalsh : eigenvalues of real symmetric or complex Hermitian
(conjugate symmetric) arrays.
eigh : eigenvalues and eigenvectors of real symmetric or complex
Hermitian (conjugate symmetric) arrays.
scipy.linalg.eigvals : Similar function in SciPy.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
This is implemented using the ``_geev`` LAPACK routines which compute
the eigenvalues and eigenvectors of general square arrays.
Examples
--------
Illustration, using the fact that the eigenvalues of a diagonal matrix
are its diagonal elements, that multiplying a matrix on the left
by an orthogonal matrix, `Q`, and on the right by `Q.T` (the transpose
of `Q`), preserves the eigenvalues of the "middle" matrix. In other words,
if `Q` is orthogonal, then ``Q * A * Q.T`` has the same eigenvalues as
``A``:
>>> from numpy import linalg as LA
>>> x = np.random.random()
>>> Q = np.array([[np.cos(x), -np.sin(x)], [np.sin(x), np.cos(x)]])
>>> LA.norm(Q[0, :]), LA.norm(Q[1, :]), np.dot(Q[0, :],Q[1, :])
(1.0, 1.0, 0.0)
Now multiply a diagonal matrix by ``Q`` on one side and by ``Q.T`` on the other:
>>> D = np.diag((-1,1))
>>> LA.eigvals(D)
array([-1., 1.])
>>> A = np.dot(Q, D)
>>> A = np.dot(A, Q.T)
>>> LA.eigvals(A)
array([ 1., -1.]) # random
"""
a, wrap = _makearray(a)
_assert_stacked_2d(a)
_assert_stacked_square(a)
_assert_finite(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
signature = 'D->D' if isComplexType(t) else 'd->D'
w = _umath_linalg.eigvals(a, signature=signature, extobj=extobj)
if not isComplexType(t):
if all(w.imag == 0):
w = w.real
result_t = _realType(result_t)
else:
result_t = _complexType(result_t)
return w.astype(result_t, copy=False)
def _eigvalsh_dispatcher(a, UPLO=None):
return (a,)
@array_function_dispatch(_eigvalsh_dispatcher)
def eigvalsh(a, UPLO='L'):
"""
Compute the eigenvalues of a complex Hermitian or real symmetric matrix.
Main difference from eigh: the eigenvectors are not computed.
Parameters
----------
a : (..., M, M) array_like
A complex- or real-valued matrix whose eigenvalues are to be
computed.
UPLO : {'L', 'U'}, optional
Specifies whether the calculation is done with the lower triangular
part of `a` ('L', default) or the upper triangular part ('U').
Irrespective of this value only the real parts of the diagonal will
be considered in the computation to preserve the notion of a Hermitian
matrix. It therefore follows that the imaginary part of the diagonal
will always be treated as zero.
Returns
-------
w : (..., M,) ndarray
The eigenvalues in ascending order, each repeated according to
its multiplicity.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigh : eigenvalues and eigenvectors of real symmetric or complex Hermitian
(conjugate symmetric) arrays.
eigvals : eigenvalues of general real or complex arrays.
eig : eigenvalues and right eigenvectors of general real or complex
arrays.
scipy.linalg.eigvalsh : Similar function in SciPy.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The eigenvalues are computed using LAPACK routines ``_syevd``, ``_heevd``.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, -2j], [2j, 5]])
>>> LA.eigvalsh(a)
array([ 0.17157288, 5.82842712]) # may vary
>>> # demonstrate the treatment of the imaginary part of the diagonal
>>> a = np.array([[5+2j, 9-2j], [0+2j, 2-1j]])
>>> a
array([[5.+2.j, 9.-2.j],
[0.+2.j, 2.-1.j]])
>>> # with UPLO='L' this is numerically equivalent to using LA.eigvals()
>>> # with:
>>> b = np.array([[5.+0.j, 0.-2.j], [0.+2.j, 2.-0.j]])
>>> b
array([[5.+0.j, 0.-2.j],
[0.+2.j, 2.+0.j]])
>>> wa = LA.eigvalsh(a)
>>> wb = LA.eigvals(b)
>>> wa; wb
array([1., 6.])
array([6.+0.j, 1.+0.j])
"""
UPLO = UPLO.upper()
if UPLO not in ('L', 'U'):
raise ValueError("UPLO argument must be 'L' or 'U'")
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
if UPLO == 'L':
gufunc = _umath_linalg.eigvalsh_lo
else:
gufunc = _umath_linalg.eigvalsh_up
a, wrap = _makearray(a)
_assert_stacked_2d(a)
_assert_stacked_square(a)
t, result_t = _commonType(a)
signature = 'D->d' if isComplexType(t) else 'd->d'
w = gufunc(a, signature=signature, extobj=extobj)
return w.astype(_realType(result_t), copy=False)
def _convertarray(a):
t, result_t = _commonType(a)
a = _fastCT(a.astype(t))
return a, t, result_t
# Eigenvectors
@array_function_dispatch(_unary_dispatcher)
def eig(a):
"""
Compute the eigenvalues and right eigenvectors of a square array.
Parameters
----------
a : (..., M, M) array
Matrices for which the eigenvalues and right eigenvectors will
be computed
Returns
-------
w : (..., M) array
The eigenvalues, each repeated according to its multiplicity.
The eigenvalues are not necessarily ordered. The resulting
array will be of complex type, unless the imaginary part is
zero in which case it will be cast to a real type. When `a`
is real the resulting eigenvalues will be real (0 imaginary
part) or occur in conjugate pairs
v : (..., M, M) array
The normalized (unit "length") eigenvectors, such that the
column ``v[:,i]`` is the eigenvector corresponding to the
eigenvalue ``w[i]``.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigvals : eigenvalues of a non-symmetric array.
eigh : eigenvalues and eigenvectors of a real symmetric or complex
Hermitian (conjugate symmetric) array.
eigvalsh : eigenvalues of a real symmetric or complex Hermitian
(conjugate symmetric) array.
scipy.linalg.eig : Similar function in SciPy that also solves the
generalized eigenvalue problem.
scipy.linalg.schur : Best choice for unitary and other non-Hermitian
normal matrices.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
This is implemented using the ``_geev`` LAPACK routines which compute
the eigenvalues and eigenvectors of general square arrays.
The number `w` is an eigenvalue of `a` if there exists a vector
`v` such that ``a @ v = w * v``. Thus, the arrays `a`, `w`, and
`v` satisfy the equations ``a @ v[:,i] = w[i] * v[:,i]``
for :math:`i \\in \\{0,...,M-1\\}`.
The array `v` of eigenvectors may not be of maximum rank, that is, some
of the columns may be linearly dependent, although round-off error may
obscure that fact. If the eigenvalues are all different, then theoretically
the eigenvectors are linearly independent and `a` can be diagonalized by
a similarity transformation using `v`, i.e, ``inv(v) @ a @ v`` is diagonal.
For non-Hermitian normal matrices the SciPy function `scipy.linalg.schur`
is preferred because the matrix `v` is guaranteed to be unitary, which is
not the case when using `eig`. The Schur factorization produces an
upper triangular matrix rather than a diagonal matrix, but for normal
matrices only the diagonal of the upper triangular matrix is needed, the
rest is roundoff error.
Finally, it is emphasized that `v` consists of the *right* (as in
right-hand side) eigenvectors of `a`. A vector `y` satisfying
``y.T @ a = z * y.T`` for some number `z` is called a *left*
eigenvector of `a`, and, in general, the left and right eigenvectors
of a matrix are not necessarily the (perhaps conjugate) transposes
of each other.
References
----------
G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, FL,
Academic Press, Inc., 1980, Various pp.
Examples
--------
>>> from numpy import linalg as LA
(Almost) trivial example with real e-values and e-vectors.
>>> w, v = LA.eig(np.diag((1, 2, 3)))
>>> w; v
array([1., 2., 3.])
array([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]])
Real matrix possessing complex e-values and e-vectors; note that the
e-values are complex conjugates of each other.
>>> w, v = LA.eig(np.array([[1, -1], [1, 1]]))
>>> w; v
array([1.+1.j, 1.-1.j])
array([[0.70710678+0.j , 0.70710678-0.j ],
[0. -0.70710678j, 0. +0.70710678j]])
Complex-valued matrix with real e-values (but complex-valued e-vectors);
note that ``a.conj().T == a``, i.e., `a` is Hermitian.
>>> a = np.array([[1, 1j], [-1j, 1]])
>>> w, v = LA.eig(a)
>>> w; v
array([2.+0.j, 0.+0.j])
array([[ 0. +0.70710678j, 0.70710678+0.j ], # may vary
[ 0.70710678+0.j , -0. +0.70710678j]])
Be careful about round-off error!
>>> a = np.array([[1 + 1e-9, 0], [0, 1 - 1e-9]])
>>> # Theor. e-values are 1 +/- 1e-9
>>> w, v = LA.eig(a)
>>> w; v
array([1., 1.])
array([[1., 0.],
[0., 1.]])
"""
a, wrap = _makearray(a)
_assert_stacked_2d(a)
_assert_stacked_square(a)
_assert_finite(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
signature = 'D->DD' if isComplexType(t) else 'd->DD'
w, vt = _umath_linalg.eig(a, signature=signature, extobj=extobj)
if not isComplexType(t) and all(w.imag == 0.0):
w = w.real
vt = vt.real
result_t = _realType(result_t)
else:
result_t = _complexType(result_t)
vt = vt.astype(result_t, copy=False)
return w.astype(result_t, copy=False), wrap(vt)
@array_function_dispatch(_eigvalsh_dispatcher)
def eigh(a, UPLO='L'):
"""
Return the eigenvalues and eigenvectors of a complex Hermitian
(conjugate symmetric) or a real symmetric matrix.
Returns two objects, a 1-D array containing the eigenvalues of `a`, and
a 2-D square array or matrix (depending on the input type) of the
corresponding eigenvectors (in columns).
Parameters
----------
a : (..., M, M) array
Hermitian or real symmetric matrices whose eigenvalues and
eigenvectors are to be computed.
UPLO : {'L', 'U'}, optional
Specifies whether the calculation is done with the lower triangular
part of `a` ('L', default) or the upper triangular part ('U').
Irrespective of this value only the real parts of the diagonal will
be considered in the computation to preserve the notion of a Hermitian
matrix. It therefore follows that the imaginary part of the diagonal
will always be treated as zero.
Returns
-------
w : (..., M) ndarray
The eigenvalues in ascending order, each repeated according to
its multiplicity.
v : {(..., M, M) ndarray, (..., M, M) matrix}
The column ``v[:, i]`` is the normalized eigenvector corresponding
to the eigenvalue ``w[i]``. Will return a matrix object if `a` is
a matrix object.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigvalsh : eigenvalues of real symmetric or complex Hermitian
(conjugate symmetric) arrays.
eig : eigenvalues and right eigenvectors for non-symmetric arrays.
eigvals : eigenvalues of non-symmetric arrays.
scipy.linalg.eigh : Similar function in SciPy (but also solves the
generalized eigenvalue problem).
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The eigenvalues/eigenvectors are computed using LAPACK routines ``_syevd``,
``_heevd``.
The eigenvalues of real symmetric or complex Hermitian matrices are
always real. [1]_ The array `v` of (column) eigenvectors is unitary
and `a`, `w`, and `v` satisfy the equations
``dot(a, v[:, i]) = w[i] * v[:, i]``.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pg. 222.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, -2j], [2j, 5]])
>>> a
array([[ 1.+0.j, -0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> w, v = LA.eigh(a)
>>> w; v
array([0.17157288, 5.82842712])
array([[-0.92387953+0.j , -0.38268343+0.j ], # may vary
[ 0. +0.38268343j, 0. -0.92387953j]])
>>> np.dot(a, v[:, 0]) - w[0] * v[:, 0] # verify 1st e-val/vec pair
array([5.55111512e-17+0.0000000e+00j, 0.00000000e+00+1.2490009e-16j])
>>> np.dot(a, v[:, 1]) - w[1] * v[:, 1] # verify 2nd e-val/vec pair
array([0.+0.j, 0.+0.j])
>>> A = np.matrix(a) # what happens if input is a matrix object
>>> A
matrix([[ 1.+0.j, -0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> w, v = LA.eigh(A)
>>> w; v
array([0.17157288, 5.82842712])
matrix([[-0.92387953+0.j , -0.38268343+0.j ], # may vary
[ 0. +0.38268343j, 0. -0.92387953j]])
>>> # demonstrate the treatment of the imaginary part of the diagonal
>>> a = np.array([[5+2j, 9-2j], [0+2j, 2-1j]])
>>> a
array([[5.+2.j, 9.-2.j],
[0.+2.j, 2.-1.j]])
>>> # with UPLO='L' this is numerically equivalent to using LA.eig() with:
>>> b = np.array([[5.+0.j, 0.-2.j], [0.+2.j, 2.-0.j]])
>>> b
array([[5.+0.j, 0.-2.j],
[0.+2.j, 2.+0.j]])
>>> wa, va = LA.eigh(a)
>>> wb, vb = LA.eig(b)
>>> wa; wb
array([1., 6.])
array([6.+0.j, 1.+0.j])
>>> va; vb
array([[-0.4472136 +0.j , -0.89442719+0.j ], # may vary
[ 0. +0.89442719j, 0. -0.4472136j ]])
array([[ 0.89442719+0.j , -0. +0.4472136j],
[-0. +0.4472136j, 0.89442719+0.j ]])
"""
UPLO = UPLO.upper()
if UPLO not in ('L', 'U'):
raise ValueError("UPLO argument must be 'L' or 'U'")
a, wrap = _makearray(a)
_assert_stacked_2d(a)
_assert_stacked_square(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
if UPLO == 'L':
gufunc = _umath_linalg.eigh_lo
else:
gufunc = _umath_linalg.eigh_up
signature = 'D->dD' if isComplexType(t) else 'd->dd'
w, vt = gufunc(a, signature=signature, extobj=extobj)
w = w.astype(_realType(result_t), copy=False)
vt = vt.astype(result_t, copy=False)
return w, wrap(vt)
# Singular value decomposition
def _svd_dispatcher(a, full_matrices=None, compute_uv=None, hermitian=None):
return (a,)
@array_function_dispatch(_svd_dispatcher)
def svd(a, full_matrices=True, compute_uv=True, hermitian=False):
"""
Singular Value Decomposition.
When `a` is a 2D array, and ``full_matrices=False``, then it is
factorized as ``u @ np.diag(s) @ vh = (u * s) @ vh``, where
`u` and the Hermitian transpose of `vh` are 2D arrays with
orthonormal columns and `s` is a 1D array of `a`'s singular
values. When `a` is higher-dimensional, SVD is applied in
stacked mode as explained below.
Parameters
----------
a : (..., M, N) array_like
A real or complex array with ``a.ndim >= 2``.
full_matrices : bool, optional
If True (default), `u` and `vh` have the shapes ``(..., M, M)`` and
``(..., N, N)``, respectively. Otherwise, the shapes are
``(..., M, K)`` and ``(..., K, N)``, respectively, where
``K = min(M, N)``.
compute_uv : bool, optional
Whether or not to compute `u` and `vh` in addition to `s`. True
by default.
hermitian : bool, optional
If True, `a` is assumed to be Hermitian (symmetric if real-valued),
enabling a more efficient method for finding singular values.
Defaults to False.
.. versionadded:: 1.17.0
Returns
-------
u : { (..., M, M), (..., M, K) } array
Unitary array(s). The first ``a.ndim - 2`` dimensions have the same
size as those of the input `a`. The size of the last two dimensions
depends on the value of `full_matrices`. Only returned when
`compute_uv` is True.
s : (..., K) array
Vector(s) with the singular values, within each vector sorted in
descending order. The first ``a.ndim - 2`` dimensions have the same
size as those of the input `a`.
vh : { (..., N, N), (..., K, N) } array
Unitary array(s). The first ``a.ndim - 2`` dimensions have the same
size as those of the input `a`. The size of the last two dimensions
depends on the value of `full_matrices`. Only returned when
`compute_uv` is True.
Raises
------
LinAlgError
If SVD computation does not converge.
See Also
--------
scipy.linalg.svd : Similar function in SciPy.
scipy.linalg.svdvals : Compute singular values of a matrix.
Notes
-----
.. versionchanged:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The decomposition is performed using LAPACK routine ``_gesdd``.
SVD is usually described for the factorization of a 2D matrix :math:`A`.
The higher-dimensional case will be discussed below. In the 2D case, SVD is
written as :math:`A = U S V^H`, where :math:`A = a`, :math:`U= u`,
:math:`S= \\mathtt{np.diag}(s)` and :math:`V^H = vh`. The 1D array `s`
contains the singular values of `a` and `u` and `vh` are unitary. The rows
of `vh` are the eigenvectors of :math:`A^H A` and the columns of `u` are
the eigenvectors of :math:`A A^H`. In both cases the corresponding
(possibly non-zero) eigenvalues are given by ``s**2``.
If `a` has more than two dimensions, then broadcasting rules apply, as
explained in :ref:`routines.linalg-broadcasting`. This means that SVD is
working in "stacked" mode: it iterates over all indices of the first
``a.ndim - 2`` dimensions and for each combination SVD is applied to the
last two indices. The matrix `a` can be reconstructed from the
decomposition with either ``(u * s[..., None, :]) @ vh`` or
``u @ (s[..., None] * vh)``. (The ``@`` operator can be replaced by the
function ``np.matmul`` for python versions below 3.5.)
If `a` is a ``matrix`` object (as opposed to an ``ndarray``), then so are
all the return values.
Examples
--------
>>> a = np.random.randn(9, 6) + 1j*np.random.randn(9, 6)
>>> b = np.random.randn(2, 7, 8, 3) + 1j*np.random.randn(2, 7, 8, 3)
Reconstruction based on full SVD, 2D case:
>>> u, s, vh = np.linalg.svd(a, full_matrices=True)
>>> u.shape, s.shape, vh.shape
((9, 9), (6,), (6, 6))
>>> np.allclose(a, np.dot(u[:, :6] * s, vh))
True
>>> smat = np.zeros((9, 6), dtype=complex)
>>> smat[:6, :6] = np.diag(s)
>>> np.allclose(a, np.dot(u, np.dot(smat, vh)))
True
Reconstruction based on reduced SVD, 2D case:
>>> u, s, vh = np.linalg.svd(a, full_matrices=False)
>>> u.shape, s.shape, vh.shape
((9, 6), (6,), (6, 6))
>>> np.allclose(a, np.dot(u * s, vh))
True
>>> smat = np.diag(s)
>>> np.allclose(a, np.dot(u, np.dot(smat, vh)))
True
Reconstruction based on full SVD, 4D case:
>>> u, s, vh = np.linalg.svd(b, full_matrices=True)
>>> u.shape, s.shape, vh.shape
((2, 7, 8, 8), (2, 7, 3), (2, 7, 3, 3))
>>> np.allclose(b, np.matmul(u[..., :3] * s[..., None, :], vh))
True
>>> np.allclose(b, np.matmul(u[..., :3], s[..., None] * vh))
True
Reconstruction based on reduced SVD, 4D case:
>>> u, s, vh = np.linalg.svd(b, full_matrices=False)
>>> u.shape, s.shape, vh.shape
((2, 7, 8, 3), (2, 7, 3), (2, 7, 3, 3))
>>> np.allclose(b, np.matmul(u * s[..., None, :], vh))
True
>>> np.allclose(b, np.matmul(u, s[..., None] * vh))
True
"""
import numpy as _nx
a, wrap = _makearray(a)
if hermitian:
# note: lapack svd returns eigenvalues with s ** 2 sorted descending,
# but eig returns s sorted ascending, so we re-order the eigenvalues
# and related arrays to have the correct order
if compute_uv:
s, u = eigh(a)
sgn = sign(s)
s = abs(s)
sidx = argsort(s)[..., ::-1]
sgn = _nx.take_along_axis(sgn, sidx, axis=-1)
s = _nx.take_along_axis(s, sidx, axis=-1)
u = _nx.take_along_axis(u, sidx[..., None, :], axis=-1)
# singular values are unsigned, move the sign into v
vt = transpose(u * sgn[..., None, :]).conjugate()
return wrap(u), s, wrap(vt)
else:
s = eigvalsh(a)
s = s[..., ::-1]
s = abs(s)
return sort(s)[..., ::-1]
_assert_stacked_2d(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(_raise_linalgerror_svd_nonconvergence)
m, n = a.shape[-2:]
if compute_uv:
if full_matrices:
if m < n:
gufunc = _umath_linalg.svd_m_f
else:
gufunc = _umath_linalg.svd_n_f
else:
if m < n:
gufunc = _umath_linalg.svd_m_s
else:
gufunc = _umath_linalg.svd_n_s
signature = 'D->DdD' if isComplexType(t) else 'd->ddd'
u, s, vh = gufunc(a, signature=signature, extobj=extobj)
u = u.astype(result_t, copy=False)
s = s.astype(_realType(result_t), copy=False)
vh = vh.astype(result_t, copy=False)
return wrap(u), s, wrap(vh)
else:
if m < n:
gufunc = _umath_linalg.svd_m
else:
gufunc = _umath_linalg.svd_n
signature = 'D->d' if isComplexType(t) else 'd->d'
s = gufunc(a, signature=signature, extobj=extobj)
s = s.astype(_realType(result_t), copy=False)
return s
def _cond_dispatcher(x, p=None):
return (x,)
@array_function_dispatch(_cond_dispatcher)
def cond(x, p=None):
"""
Compute the condition number of a matrix.
This function is capable of returning the condition number using
one of seven different norms, depending on the value of `p` (see
Parameters below).
Parameters
----------
x : (..., M, N) array_like
The matrix whose condition number is sought.
p : {None, 1, -1, 2, -2, inf, -inf, 'fro'}, optional
Order of the norm used in the condition number computation:
===== ============================
p norm for matrices
===== ============================
None 2-norm, computed directly using the ``SVD``
'fro' Frobenius norm
inf max(sum(abs(x), axis=1))
-inf min(sum(abs(x), axis=1))
1 max(sum(abs(x), axis=0))
-1 min(sum(abs(x), axis=0))
2 2-norm (largest sing. value)
-2 smallest singular value
===== ============================
inf means the `numpy.inf` object, and the Frobenius norm is
the root-of-sum-of-squares norm.
Returns
-------
c : {float, inf}
The condition number of the matrix. May be infinite.
See Also
--------
numpy.linalg.norm
Notes
-----
The condition number of `x` is defined as the norm of `x` times the
norm of the inverse of `x` [1]_; the norm can be the usual L2-norm
(root-of-sum-of-squares) or one of a number of other matrix norms.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, Orlando, FL,
Academic Press, Inc., 1980, pg. 285.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, 0, -1], [0, 1, 0], [1, 0, 1]])
>>> a
array([[ 1, 0, -1],
[ 0, 1, 0],
[ 1, 0, 1]])
>>> LA.cond(a)
1.4142135623730951
>>> LA.cond(a, 'fro')
3.1622776601683795
>>> LA.cond(a, np.inf)
2.0
>>> LA.cond(a, -np.inf)
1.0
>>> LA.cond(a, 1)
2.0
>>> LA.cond(a, -1)
1.0
>>> LA.cond(a, 2)
1.4142135623730951
>>> LA.cond(a, -2)
0.70710678118654746 # may vary
>>> min(LA.svd(a, compute_uv=False))*min(LA.svd(LA.inv(a), compute_uv=False))
0.70710678118654746 # may vary
"""
x = asarray(x) # in case we have a matrix
if _is_empty_2d(x):
raise LinAlgError("cond is not defined on empty arrays")
if p is None or p == 2 or p == -2:
s = svd(x, compute_uv=False)
with errstate(all='ignore'):
if p == -2:
r = s[..., -1] / s[..., 0]
else:
r = s[..., 0] / s[..., -1]
else:
# Call inv(x) ignoring errors. The result array will
# contain nans in the entries where inversion failed.
_assert_stacked_2d(x)
_assert_stacked_square(x)
t, result_t = _commonType(x)
signature = 'D->D' if isComplexType(t) else 'd->d'
with errstate(all='ignore'):
invx = _umath_linalg.inv(x, signature=signature)
r = norm(x, p, axis=(-2, -1)) * norm(invx, p, axis=(-2, -1))
r = r.astype(result_t, copy=False)
# Convert nans to infs unless the original array had nan entries
r = asarray(r)
nan_mask = isnan(r)
if nan_mask.any():
nan_mask &= ~isnan(x).any(axis=(-2, -1))
if r.ndim > 0:
r[nan_mask] = Inf
elif nan_mask:
r[()] = Inf
# Convention is to return scalars instead of 0d arrays
if r.ndim == 0:
r = r[()]
return r
def _matrix_rank_dispatcher(A, tol=None, hermitian=None):
return (A,)
@array_function_dispatch(_matrix_rank_dispatcher)
def matrix_rank(A, tol=None, hermitian=False):
"""
Return matrix rank of array using SVD method
Rank of the array is the number of singular values of the array that are
greater than `tol`.
.. versionchanged:: 1.14
Can now operate on stacks of matrices
Parameters
----------
A : {(M,), (..., M, N)} array_like
Input vector or stack of matrices.
tol : (...) array_like, float, optional
Threshold below which SVD values are considered zero. If `tol` is
None, and ``S`` is an array with singular values for `M`, and
``eps`` is the epsilon value for datatype of ``S``, then `tol` is
set to ``S.max() * max(M, N) * eps``.
.. versionchanged:: 1.14
Broadcasted against the stack of matrices
hermitian : bool, optional
If True, `A` is assumed to be Hermitian (symmetric if real-valued),
enabling a more efficient method for finding singular values.
Defaults to False.
.. versionadded:: 1.14
Returns
-------
rank : (...) array_like
Rank of A.
Notes
-----
The default threshold to detect rank deficiency is a test on the magnitude
of the singular values of `A`. By default, we identify singular values less
than ``S.max() * max(M, N) * eps`` as indicating rank deficiency (with
the symbols defined above). This is the algorithm MATLAB uses [1]. It also
appears in *Numerical recipes* in the discussion of SVD solutions for linear
least squares [2].
This default threshold is designed to detect rank deficiency accounting for
the numerical errors of the SVD computation. Imagine that there is a column
in `A` that is an exact (in floating point) linear combination of other
columns in `A`. Computing the SVD on `A` will not produce a singular value
exactly equal to 0 in general: any difference of the smallest SVD value from
0 will be caused by numerical imprecision in the calculation of the SVD.
Our threshold for small SVD values takes this numerical imprecision into
account, and the default threshold will detect such numerical rank
deficiency. The threshold may declare a matrix `A` rank deficient even if
the linear combination of some columns of `A` is not exactly equal to
another column of `A` but only numerically very close to another column of
`A`.
We chose our default threshold because it is in wide use. Other thresholds
are possible. For example, elsewhere in the 2007 edition of *Numerical
recipes* there is an alternative threshold of ``S.max() *
np.finfo(A.dtype).eps / 2. * np.sqrt(m + n + 1.)``. The authors describe
this threshold as being based on "expected roundoff error" (p 71).
The thresholds above deal with floating point roundoff error in the
calculation of the SVD. However, you may have more information about the
sources of error in `A` that would make you consider other tolerance values
to detect *effective* rank deficiency. The most useful measure of the
tolerance depends on the operations you intend to use on your matrix. For
example, if your data come from uncertain measurements with uncertainties
greater than floating point epsilon, choosing a tolerance near that
uncertainty may be preferable. The tolerance may be absolute if the
uncertainties are absolute rather than relative.
References
----------
.. [1] MATLAB reference documentation, "Rank"
https://www.mathworks.com/help/techdoc/ref/rank.html
.. [2] W. H. Press, S. A. Teukolsky, W. T. Vetterling and B. P. Flannery,
"Numerical Recipes (3rd edition)", Cambridge University Press, 2007,
page 795.
Examples
--------
>>> from numpy.linalg import matrix_rank
>>> matrix_rank(np.eye(4)) # Full rank matrix
4
>>> I=np.eye(4); I[-1,-1] = 0. # rank deficient matrix
>>> matrix_rank(I)
3
>>> matrix_rank(np.ones((4,))) # 1 dimension - rank 1 unless all 0
1
>>> matrix_rank(np.zeros((4,)))
0
"""
A = asarray(A)
if A.ndim < 2:
return int(not all(A==0))
S = svd(A, compute_uv=False, hermitian=hermitian)
if tol is None:
tol = S.max(axis=-1, keepdims=True) * max(A.shape[-2:]) * finfo(S.dtype).eps
else:
tol = asarray(tol)[..., newaxis]
return count_nonzero(S > tol, axis=-1)
# Generalized inverse
def _pinv_dispatcher(a, rcond=None, hermitian=None):
return (a,)
@array_function_dispatch(_pinv_dispatcher)
def pinv(a, rcond=1e-15, hermitian=False):
"""
Compute the (Moore-Penrose) pseudo-inverse of a matrix.
Calculate the generalized inverse of a matrix using its
singular-value decomposition (SVD) and including all
*large* singular values.
.. versionchanged:: 1.14
Can now operate on stacks of matrices
Parameters
----------
a : (..., M, N) array_like
Matrix or stack of matrices to be pseudo-inverted.
rcond : (...) array_like of float
Cutoff for small singular values.
Singular values less than or equal to
``rcond * largest_singular_value`` are set to zero.
Broadcasts against the stack of matrices.
hermitian : bool, optional
If True, `a` is assumed to be Hermitian (symmetric if real-valued),
enabling a more efficient method for finding singular values.
Defaults to False.
.. versionadded:: 1.17.0
Returns
-------
B : (..., N, M) ndarray
The pseudo-inverse of `a`. If `a` is a `matrix` instance, then so
is `B`.
Raises
------
LinAlgError
If the SVD computation does not converge.
See Also
--------
scipy.linalg.pinv : Similar function in SciPy.
scipy.linalg.pinvh : Compute the (Moore-Penrose) pseudo-inverse of a
Hermitian matrix.
Notes
-----
The pseudo-inverse of a matrix A, denoted :math:`A^+`, is
defined as: "the matrix that 'solves' [the least-squares problem]
:math:`Ax = b`," i.e., if :math:`\\bar{x}` is said solution, then
:math:`A^+` is that matrix such that :math:`\\bar{x} = A^+b`.
It can be shown that if :math:`Q_1 \\Sigma Q_2^T = A` is the singular
value decomposition of A, then
:math:`A^+ = Q_2 \\Sigma^+ Q_1^T`, where :math:`Q_{1,2}` are
orthogonal matrices, :math:`\\Sigma` is a diagonal matrix consisting
of A's so-called singular values, (followed, typically, by
zeros), and then :math:`\\Sigma^+` is simply the diagonal matrix
consisting of the reciprocals of A's singular values
(again, followed by zeros). [1]_
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pp. 139-142.
Examples
--------
The following example checks that ``a * a+ * a == a`` and
``a+ * a * a+ == a+``:
>>> a = np.random.randn(9, 6)
>>> B = np.linalg.pinv(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a, wrap = _makearray(a)
rcond = asarray(rcond)
if _is_empty_2d(a):
m, n = a.shape[-2:]
res = empty(a.shape[:-2] + (n, m), dtype=a.dtype)
return wrap(res)
a = a.conjugate()
u, s, vt = svd(a, full_matrices=False, hermitian=hermitian)
# discard small singular values
cutoff = rcond[..., newaxis] * amax(s, axis=-1, keepdims=True)
large = s > cutoff
s = divide(1, s, where=large, out=s)
s[~large] = 0
res = matmul(transpose(vt), multiply(s[..., newaxis], transpose(u)))
return wrap(res)
# Determinant
@array_function_dispatch(_unary_dispatcher)
def slogdet(a):
"""
Compute the sign and (natural) logarithm of the determinant of an array.
If an array has a very small or very large determinant, then a call to
`det` may overflow or underflow. This routine is more robust against such
issues, because it computes the logarithm of the determinant rather than
the determinant itself.
Parameters
----------
a : (..., M, M) array_like
Input array, has to be a square 2-D array.
Returns
-------
sign : (...) array_like
A number representing the sign of the determinant. For a real matrix,
this is 1, 0, or -1. For a complex matrix, this is a complex number
with absolute value 1 (i.e., it is on the unit circle), or else 0.
logdet : (...) array_like
The natural log of the absolute value of the determinant.
If the determinant is zero, then `sign` will be 0 and `logdet` will be
-Inf. In all cases, the determinant is equal to ``sign * np.exp(logdet)``.
See Also
--------
det
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
.. versionadded:: 1.6.0
The determinant is computed via LU factorization using the LAPACK
routine ``z/dgetrf``.
Examples
--------
The determinant of a 2-D array ``[[a, b], [c, d]]`` is ``ad - bc``:
>>> a = np.array([[1, 2], [3, 4]])
>>> (sign, logdet) = np.linalg.slogdet(a)
>>> (sign, logdet)
(-1, 0.69314718055994529) # may vary
>>> sign * np.exp(logdet)
-2.0
Computing log-determinants for a stack of matrices:
>>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ])
>>> a.shape
(3, 2, 2)
>>> sign, logdet = np.linalg.slogdet(a)
>>> (sign, logdet)
(array([-1., -1., -1.]), array([ 0.69314718, 1.09861229, 2.07944154]))
>>> sign * np.exp(logdet)
array([-2., -3., -8.])
This routine succeeds where ordinary `det` does not:
>>> np.linalg.det(np.eye(500) * 0.1)
0.0
>>> np.linalg.slogdet(np.eye(500) * 0.1)
(1, -1151.2925464970228)
"""
a = asarray(a)
_assert_stacked_2d(a)
_assert_stacked_square(a)
t, result_t = _commonType(a)
real_t = _realType(result_t)
signature = 'D->Dd' if isComplexType(t) else 'd->dd'
sign, logdet = _umath_linalg.slogdet(a, signature=signature)
sign = sign.astype(result_t, copy=False)
logdet = logdet.astype(real_t, copy=False)
return sign, logdet
@array_function_dispatch(_unary_dispatcher)
def det(a):
"""
Compute the determinant of an array.
Parameters
----------
a : (..., M, M) array_like
Input array to compute determinants for.
Returns
-------
det : (...) array_like
Determinant of `a`.
See Also
--------
slogdet : Another way to represent the determinant, more suitable
for large matrices where underflow/overflow may occur.
scipy.linalg.det : Similar function in SciPy.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The determinant is computed via LU factorization using the LAPACK
routine ``z/dgetrf``.
Examples
--------
The determinant of a 2-D array [[a, b], [c, d]] is ad - bc:
>>> a = np.array([[1, 2], [3, 4]])
>>> np.linalg.det(a)
-2.0 # may vary
Computing determinants for a stack of matrices:
>>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ])
>>> a.shape
(3, 2, 2)
>>> np.linalg.det(a)
array([-2., -3., -8.])
"""
a = asarray(a)
_assert_stacked_2d(a)
_assert_stacked_square(a)
t, result_t = _commonType(a)
signature = 'D->D' if isComplexType(t) else 'd->d'
r = _umath_linalg.det(a, signature=signature)
r = r.astype(result_t, copy=False)
return r
# Linear Least Squares
def _lstsq_dispatcher(a, b, rcond=None):
return (a, b)
@array_function_dispatch(_lstsq_dispatcher)
def lstsq(a, b, rcond="warn"):
r"""
Return the least-squares solution to a linear matrix equation.
Computes the vector `x` that approximately solves the equation
``a @ x = b``. The equation may be under-, well-, or over-determined
(i.e., the number of linearly independent rows of `a` can be less than,
equal to, or greater than its number of linearly independent columns).
If `a` is square and of full rank, then `x` (but for round-off error)
is the "exact" solution of the equation. Else, `x` minimizes the
Euclidean 2-norm :math:`||b - ax||`. If there are multiple minimizing
solutions, the one with the smallest 2-norm :math:`||x||` is returned.
Parameters
----------
a : (M, N) array_like
"Coefficient" matrix.
b : {(M,), (M, K)} array_like
Ordinate or "dependent variable" values. If `b` is two-dimensional,
the least-squares solution is calculated for each of the `K` columns
of `b`.
rcond : float, optional
Cut-off ratio for small singular values of `a`.
For the purposes of rank determination, singular values are treated
as zero if they are smaller than `rcond` times the largest singular
value of `a`.
.. versionchanged:: 1.14.0
If not set, a FutureWarning is given. The previous default
of ``-1`` will use the machine precision as `rcond` parameter,
the new default will use the machine precision times `max(M, N)`.
To silence the warning and use the new default, use ``rcond=None``,
to keep using the old behavior, use ``rcond=-1``.
Returns
-------
x : {(N,), (N, K)} ndarray
Least-squares solution. If `b` is two-dimensional,
the solutions are in the `K` columns of `x`.
residuals : {(1,), (K,), (0,)} ndarray
Sums of squared residuals: Squared Euclidean 2-norm for each column in
``b - a @ x``.
If the rank of `a` is < N or M <= N, this is an empty array.
If `b` is 1-dimensional, this is a (1,) shape array.
Otherwise the shape is (K,).
rank : int
Rank of matrix `a`.
s : (min(M, N),) ndarray
Singular values of `a`.
Raises
------
LinAlgError
If computation does not converge.
See Also
--------
scipy.linalg.lstsq : Similar function in SciPy.
Notes
-----
If `b` is a matrix, then all array results are returned as matrices.
Examples
--------
Fit a line, ``y = mx + c``, through some noisy data-points:
>>> x = np.array([0, 1, 2, 3])
>>> y = np.array([-1, 0.2, 0.9, 2.1])
By examining the coefficients, we see that the line should have a
gradient of roughly 1 and cut the y-axis at, more or less, -1.
We can rewrite the line equation as ``y = Ap``, where ``A = [[x 1]]``
and ``p = [[m], [c]]``. Now use `lstsq` to solve for `p`:
>>> A = np.vstack([x, np.ones(len(x))]).T
>>> A
array([[ 0., 1.],
[ 1., 1.],
[ 2., 1.],
[ 3., 1.]])
>>> m, c = np.linalg.lstsq(A, y, rcond=None)[0]
>>> m, c
(1.0 -0.95) # may vary
Plot the data along with the fitted line:
>>> import matplotlib.pyplot as plt
>>> _ = plt.plot(x, y, 'o', label='Original data', markersize=10)
>>> _ = plt.plot(x, m*x + c, 'r', label='Fitted line')
>>> _ = plt.legend()
>>> plt.show()
"""
a, _ = _makearray(a)
b, wrap = _makearray(b)
is_1d = b.ndim == 1
if is_1d:
b = b[:, newaxis]
_assert_2d(a, b)
m, n = a.shape[-2:]
m2, n_rhs = b.shape[-2:]
if m != m2:
raise LinAlgError('Incompatible dimensions')
t, result_t = _commonType(a, b)
result_real_t = _realType(result_t)
# Determine default rcond value
if rcond == "warn":
# 2017-08-19, 1.14.0
warnings.warn("`rcond` parameter will change to the default of "
"machine precision times ``max(M, N)`` where M and N "
"are the input matrix dimensions.\n"
"To use the future default and silence this warning "
"we advise to pass `rcond=None`, to keep using the old, "
"explicitly pass `rcond=-1`.",
FutureWarning, stacklevel=3)
rcond = -1
if rcond is None:
rcond = finfo(t).eps * max(n, m)
if m <= n:
gufunc = _umath_linalg.lstsq_m
else:
gufunc = _umath_linalg.lstsq_n
signature = 'DDd->Ddid' if isComplexType(t) else 'ddd->ddid'
extobj = get_linalg_error_extobj(_raise_linalgerror_lstsq)
if n_rhs == 0:
# lapack can't handle n_rhs = 0 - so allocate the array one larger in that axis
b = zeros(b.shape[:-2] + (m, n_rhs + 1), dtype=b.dtype)
x, resids, rank, s = gufunc(a, b, rcond, signature=signature, extobj=extobj)
if m == 0:
x[...] = 0
if n_rhs == 0:
# remove the item we added
x = x[..., :n_rhs]
resids = resids[..., :n_rhs]
# remove the axis we added
if is_1d:
x = x.squeeze(axis=-1)
# we probably should squeeze resids too, but we can't
# without breaking compatibility.
# as documented
if rank != n or m <= n:
resids = array([], result_real_t)
# coerce output arrays
s = s.astype(result_real_t, copy=False)
resids = resids.astype(result_real_t, copy=False)
x = x.astype(result_t, copy=True) # Copying lets the memory in r_parts be freed
return wrap(x), wrap(resids), rank, s
def _multi_svd_norm(x, row_axis, col_axis, op):
"""Compute a function of the singular values of the 2-D matrices in `x`.
This is a private utility function used by `numpy.linalg.norm()`.
Parameters
----------
x : ndarray
row_axis, col_axis : int
The axes of `x` that hold the 2-D matrices.
op : callable
This should be either numpy.amin or `numpy.amax` or `numpy.sum`.
Returns
-------
result : float or ndarray
If `x` is 2-D, the return values is a float.
Otherwise, it is an array with ``x.ndim - 2`` dimensions.
The return values are either the minimum or maximum or sum of the
singular values of the matrices, depending on whether `op`
is `numpy.amin` or `numpy.amax` or `numpy.sum`.
"""
y = moveaxis(x, (row_axis, col_axis), (-2, -1))
result = op(svd(y, compute_uv=False), axis=-1)
return result
def _norm_dispatcher(x, ord=None, axis=None, keepdims=None):
return (x,)
@array_function_dispatch(_norm_dispatcher)
def norm(x, ord=None, axis=None, keepdims=False):
"""
Matrix or vector norm.
This function is able to return one of eight different matrix norms,
or one of an infinite number of vector norms (described below), depending
on the value of the ``ord`` parameter.
Parameters
----------
x : array_like
Input array. If `axis` is None, `x` must be 1-D or 2-D, unless `ord`
is None. If both `axis` and `ord` are None, the 2-norm of
``x.ravel`` will be returned.
ord : {non-zero int, inf, -inf, 'fro', 'nuc'}, optional
Order of the norm (see table under ``Notes``). inf means numpy's
`inf` object. The default is None.
axis : {None, int, 2-tuple of ints}, optional.
If `axis` is an integer, it specifies the axis of `x` along which to
compute the vector norms. If `axis` is a 2-tuple, it specifies the
axes that hold 2-D matrices, and the matrix norms of these matrices
are computed. If `axis` is None then either a vector norm (when `x`
is 1-D) or a matrix norm (when `x` is 2-D) is returned. The default
is None.
.. versionadded:: 1.8.0
keepdims : bool, optional
If this is set to True, the axes which are normed over are left in the
result as dimensions with size one. With this option the result will
broadcast correctly against the original `x`.
.. versionadded:: 1.10.0
Returns
-------
n : float or ndarray
Norm of the matrix or vector(s).
See Also
--------
scipy.linalg.norm : Similar function in SciPy.
Notes
-----
For values of ``ord < 1``, the result is, strictly speaking, not a
mathematical 'norm', but it may still be useful for various numerical
purposes.
The following norms can be calculated:
===== ============================ ==========================
ord norm for matrices norm for vectors
===== ============================ ==========================
None Frobenius norm 2-norm
'fro' Frobenius norm --
'nuc' nuclear norm --
inf max(sum(abs(x), axis=1)) max(abs(x))
-inf min(sum(abs(x), axis=1)) min(abs(x))
0 -- sum(x != 0)
1 max(sum(abs(x), axis=0)) as below
-1 min(sum(abs(x), axis=0)) as below
2 2-norm (largest sing. value) as below
-2 smallest singular value as below
other -- sum(abs(x)**ord)**(1./ord)
===== ============================ ==========================
The Frobenius norm is given by [1]_:
:math:`||A||_F = [\\sum_{i,j} abs(a_{i,j})^2]^{1/2}`
The nuclear norm is the sum of the singular values.
Both the Frobenius and nuclear norm orders are only defined for
matrices and raise a ValueError when ``x.ndim != 2``.
References
----------
.. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*,
Baltimore, MD, Johns Hopkins University Press, 1985, pg. 15
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.arange(9) - 4
>>> a
array([-4, -3, -2, ..., 2, 3, 4])
>>> b = a.reshape((3, 3))
>>> b
array([[-4, -3, -2],
[-1, 0, 1],
[ 2, 3, 4]])
>>> LA.norm(a)
7.745966692414834
>>> LA.norm(b)
7.745966692414834
>>> LA.norm(b, 'fro')
7.745966692414834
>>> LA.norm(a, np.inf)
4.0
>>> LA.norm(b, np.inf)
9.0
>>> LA.norm(a, -np.inf)
0.0
>>> LA.norm(b, -np.inf)
2.0
>>> LA.norm(a, 1)
20.0
>>> LA.norm(b, 1)
7.0
>>> LA.norm(a, -1)
-4.6566128774142013e-010
>>> LA.norm(b, -1)
6.0
>>> LA.norm(a, 2)
7.745966692414834
>>> LA.norm(b, 2)
7.3484692283495345
>>> LA.norm(a, -2)
0.0
>>> LA.norm(b, -2)
1.8570331885190563e-016 # may vary
>>> LA.norm(a, 3)
5.8480354764257312 # may vary
>>> LA.norm(a, -3)
0.0
Using the `axis` argument to compute vector norms:
>>> c = np.array([[ 1, 2, 3],
... [-1, 1, 4]])
>>> LA.norm(c, axis=0)
array([ 1.41421356, 2.23606798, 5. ])
>>> LA.norm(c, axis=1)
array([ 3.74165739, 4.24264069])
>>> LA.norm(c, ord=1, axis=1)
array([ 6., 6.])
Using the `axis` argument to compute matrix norms:
>>> m = np.arange(8).reshape(2,2,2)
>>> LA.norm(m, axis=(1,2))
array([ 3.74165739, 11.22497216])
>>> LA.norm(m[0, :, :]), LA.norm(m[1, :, :])
(3.7416573867739413, 11.224972160321824)
"""
x = asarray(x)
if not issubclass(x.dtype.type, (inexact, object_)):
x = x.astype(float)
# Immediately handle some default, simple, fast, and common cases.
if axis is None:
ndim = x.ndim
if ((ord is None) or
(ord in ('f', 'fro') and ndim == 2) or
(ord == 2 and ndim == 1)):
x = x.ravel(order='K')
if isComplexType(x.dtype.type):
x_real = x.real
x_imag = x.imag
sqnorm = x_real.dot(x_real) + x_imag.dot(x_imag)
else:
sqnorm = x.dot(x)
ret = sqrt(sqnorm)
if keepdims:
ret = ret.reshape(ndim*[1])
return ret
# Normalize the `axis` argument to a tuple.
nd = x.ndim
if axis is None:
axis = tuple(range(nd))
elif not isinstance(axis, tuple):
try:
axis = int(axis)
except Exception as e:
raise TypeError("'axis' must be None, an integer or a tuple of integers") from e
axis = (axis,)
if len(axis) == 1:
if ord == Inf:
return abs(x).max(axis=axis, keepdims=keepdims)
elif ord == -Inf:
return abs(x).min(axis=axis, keepdims=keepdims)
elif ord == 0:
# Zero norm
return (x != 0).astype(x.real.dtype).sum(axis=axis, keepdims=keepdims)
elif ord == 1:
# special case for speedup
return add.reduce(abs(x), axis=axis, keepdims=keepdims)
elif ord is None or ord == 2:
# special case for speedup
s = (x.conj() * x).real
return sqrt(add.reduce(s, axis=axis, keepdims=keepdims))
# None of the str-type keywords for ord ('fro', 'nuc')
# are valid for vectors
elif isinstance(ord, str):
raise ValueError(f"Invalid norm order '{ord}' for vectors")
else:
absx = abs(x)
absx **= ord
ret = add.reduce(absx, axis=axis, keepdims=keepdims)
ret **= reciprocal(ord, dtype=ret.dtype)
return ret
elif len(axis) == 2:
row_axis, col_axis = axis
row_axis = normalize_axis_index(row_axis, nd)
col_axis = normalize_axis_index(col_axis, nd)
if row_axis == col_axis:
raise ValueError('Duplicate axes given.')
if ord == 2:
ret = _multi_svd_norm(x, row_axis, col_axis, amax)
elif ord == -2:
ret = _multi_svd_norm(x, row_axis, col_axis, amin)
elif ord == 1:
if col_axis > row_axis:
col_axis -= 1
ret = add.reduce(abs(x), axis=row_axis).max(axis=col_axis)
elif ord == Inf:
if row_axis > col_axis:
row_axis -= 1
ret = add.reduce(abs(x), axis=col_axis).max(axis=row_axis)
elif ord == -1:
if col_axis > row_axis:
col_axis -= 1
ret = add.reduce(abs(x), axis=row_axis).min(axis=col_axis)
elif ord == -Inf:
if row_axis > col_axis:
row_axis -= 1
ret = add.reduce(abs(x), axis=col_axis).min(axis=row_axis)
elif ord in [None, 'fro', 'f']:
ret = sqrt(add.reduce((x.conj() * x).real, axis=axis))
elif ord == 'nuc':
ret = _multi_svd_norm(x, row_axis, col_axis, sum)
else:
raise ValueError("Invalid norm order for matrices.")
if keepdims:
ret_shape = list(x.shape)
ret_shape[axis[0]] = 1
ret_shape[axis[1]] = 1
ret = ret.reshape(ret_shape)
return ret
else:
raise ValueError("Improper number of dimensions to norm.")
# multi_dot
def _multidot_dispatcher(arrays, *, out=None):
yield from arrays
yield out
@array_function_dispatch(_multidot_dispatcher)
def multi_dot(arrays, *, out=None):
"""
Compute the dot product of two or more arrays in a single function call,
while automatically selecting the fastest evaluation order.
`multi_dot` chains `numpy.dot` and uses optimal parenthesization
of the matrices [1]_ [2]_. Depending on the shapes of the matrices,
this can speed up the multiplication a lot.
If the first argument is 1-D it is treated as a row vector.
If the last argument is 1-D it is treated as a column vector.
The other arguments must be 2-D.
Think of `multi_dot` as::
def multi_dot(arrays): return functools.reduce(np.dot, arrays)
Parameters
----------
arrays : sequence of array_like
If the first argument is 1-D it is treated as row vector.
If the last argument is 1-D it is treated as column vector.
The other arguments must be 2-D.
out : ndarray, optional
Output argument. This must have the exact kind that would be returned
if it was not used. In particular, it must have the right type, must be
C-contiguous, and its dtype must be the dtype that would be returned
for `dot(a, b)`. This is a performance feature. Therefore, if these
conditions are not met, an exception is raised, instead of attempting
to be flexible.
.. versionadded:: 1.19.0
Returns
-------
output : ndarray
Returns the dot product of the supplied arrays.
See Also
--------
numpy.dot : dot multiplication with two arguments.
References
----------
.. [1] Cormen, "Introduction to Algorithms", Chapter 15.2, p. 370-378
.. [2] https://en.wikipedia.org/wiki/Matrix_chain_multiplication
Examples
--------
`multi_dot` allows you to write::
>>> from numpy.linalg import multi_dot
>>> # Prepare some data
>>> A = np.random.random((10000, 100))
>>> B = np.random.random((100, 1000))
>>> C = np.random.random((1000, 5))
>>> D = np.random.random((5, 333))
>>> # the actual dot multiplication
>>> _ = multi_dot([A, B, C, D])
instead of::
>>> _ = np.dot(np.dot(np.dot(A, B), C), D)
>>> # or
>>> _ = A.dot(B).dot(C).dot(D)
Notes
-----
The cost for a matrix multiplication can be calculated with the
following function::
def cost(A, B):
return A.shape[0] * A.shape[1] * B.shape[1]
Assume we have three matrices
:math:`A_{10x100}, B_{100x5}, C_{5x50}`.
The costs for the two different parenthesizations are as follows::
cost((AB)C) = 10*100*5 + 10*5*50 = 5000 + 2500 = 7500
cost(A(BC)) = 10*100*50 + 100*5*50 = 50000 + 25000 = 75000
"""
n = len(arrays)
# optimization only makes sense for len(arrays) > 2
if n < 2:
raise ValueError("Expecting at least two arrays.")
elif n == 2:
return dot(arrays[0], arrays[1], out=out)
arrays = [asanyarray(a) for a in arrays]
# save original ndim to reshape the result array into the proper form later
ndim_first, ndim_last = arrays[0].ndim, arrays[-1].ndim
# Explicitly convert vectors to 2D arrays to keep the logic of the internal
# _multi_dot_* functions as simple as possible.
if arrays[0].ndim == 1:
arrays[0] = atleast_2d(arrays[0])
if arrays[-1].ndim == 1:
arrays[-1] = atleast_2d(arrays[-1]).T
_assert_2d(*arrays)
# _multi_dot_three is much faster than _multi_dot_matrix_chain_order
if n == 3:
result = _multi_dot_three(arrays[0], arrays[1], arrays[2], out=out)
else:
order = _multi_dot_matrix_chain_order(arrays)
result = _multi_dot(arrays, order, 0, n - 1, out=out)
# return proper shape
if ndim_first == 1 and ndim_last == 1:
return result[0, 0] # scalar
elif ndim_first == 1 or ndim_last == 1:
return result.ravel() # 1-D
else:
return result
def _multi_dot_three(A, B, C, out=None):
"""
Find the best order for three arrays and do the multiplication.
For three arguments `_multi_dot_three` is approximately 15 times faster
than `_multi_dot_matrix_chain_order`
"""
a0, a1b0 = A.shape
b1c0, c1 = C.shape
# cost1 = cost((AB)C) = a0*a1b0*b1c0 + a0*b1c0*c1
cost1 = a0 * b1c0 * (a1b0 + c1)
# cost2 = cost(A(BC)) = a1b0*b1c0*c1 + a0*a1b0*c1
cost2 = a1b0 * c1 * (a0 + b1c0)
if cost1 < cost2:
return dot(dot(A, B), C, out=out)
else:
return dot(A, dot(B, C), out=out)
def _multi_dot_matrix_chain_order(arrays, return_costs=False):
"""
Return a np.array that encodes the optimal order of mutiplications.
The optimal order array is then used by `_multi_dot()` to do the
multiplication.
Also return the cost matrix if `return_costs` is `True`
The implementation CLOSELY follows Cormen, "Introduction to Algorithms",
Chapter 15.2, p. 370-378. Note that Cormen uses 1-based indices.
cost[i, j] = min([
cost[prefix] + cost[suffix] + cost_mult(prefix, suffix)
for k in range(i, j)])
"""
n = len(arrays)
# p stores the dimensions of the matrices
# Example for p: A_{10x100}, B_{100x5}, C_{5x50} --> p = [10, 100, 5, 50]
p = [a.shape[0] for a in arrays] + [arrays[-1].shape[1]]
# m is a matrix of costs of the subproblems
# m[i,j]: min number of scalar multiplications needed to compute A_{i..j}
m = zeros((n, n), dtype=double)
# s is the actual ordering
# s[i, j] is the value of k at which we split the product A_i..A_j
s = empty((n, n), dtype=intp)
for l in range(1, n):
for i in range(n - l):
j = i + l
m[i, j] = Inf
for k in range(i, j):
q = m[i, k] + m[k+1, j] + p[i]*p[k+1]*p[j+1]
if q < m[i, j]:
m[i, j] = q
s[i, j] = k # Note that Cormen uses 1-based index
return (s, m) if return_costs else s
def _multi_dot(arrays, order, i, j, out=None):
"""Actually do the multiplication with the given order."""
if i == j:
# the initial call with non-None out should never get here
assert out is None
return arrays[i]
else:
return dot(_multi_dot(arrays, order, i, order[i, j]),
_multi_dot(arrays, order, order[i, j] + 1, j),
out=out)
| 89,813 | Python | 30.950907 | 92 | 0.576086 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/linalg/linalg.pyi | from collections.abc import Iterable
from typing import (
Literal as L,
overload,
TypeVar,
Any,
SupportsIndex,
SupportsInt,
)
from numpy import (
generic,
floating,
complexfloating,
int32,
float64,
complex128,
)
from numpy.linalg import LinAlgError as LinAlgError
from numpy._typing import (
NDArray,
ArrayLike,
_ArrayLikeInt_co,
_ArrayLikeFloat_co,
_ArrayLikeComplex_co,
_ArrayLikeTD64_co,
_ArrayLikeObject_co,
)
_T = TypeVar("_T")
_ArrayType = TypeVar("_ArrayType", bound=NDArray[Any])
_2Tuple = tuple[_T, _T]
_ModeKind = L["reduced", "complete", "r", "raw"]
__all__: list[str]
@overload
def tensorsolve(
a: _ArrayLikeInt_co,
b: _ArrayLikeInt_co,
axes: None | Iterable[int] =...,
) -> NDArray[float64]: ...
@overload
def tensorsolve(
a: _ArrayLikeFloat_co,
b: _ArrayLikeFloat_co,
axes: None | Iterable[int] =...,
) -> NDArray[floating[Any]]: ...
@overload
def tensorsolve(
a: _ArrayLikeComplex_co,
b: _ArrayLikeComplex_co,
axes: None | Iterable[int] =...,
) -> NDArray[complexfloating[Any, Any]]: ...
@overload
def solve(
a: _ArrayLikeInt_co,
b: _ArrayLikeInt_co,
) -> NDArray[float64]: ...
@overload
def solve(
a: _ArrayLikeFloat_co,
b: _ArrayLikeFloat_co,
) -> NDArray[floating[Any]]: ...
@overload
def solve(
a: _ArrayLikeComplex_co,
b: _ArrayLikeComplex_co,
) -> NDArray[complexfloating[Any, Any]]: ...
@overload
def tensorinv(
a: _ArrayLikeInt_co,
ind: int = ...,
) -> NDArray[float64]: ...
@overload
def tensorinv(
a: _ArrayLikeFloat_co,
ind: int = ...,
) -> NDArray[floating[Any]]: ...
@overload
def tensorinv(
a: _ArrayLikeComplex_co,
ind: int = ...,
) -> NDArray[complexfloating[Any, Any]]: ...
@overload
def inv(a: _ArrayLikeInt_co) -> NDArray[float64]: ...
@overload
def inv(a: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ...
@overload
def inv(a: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ...
# TODO: The supported input and output dtypes are dependent on the value of `n`.
# For example: `n < 0` always casts integer types to float64
def matrix_power(
a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
n: SupportsIndex,
) -> NDArray[Any]: ...
@overload
def cholesky(a: _ArrayLikeInt_co) -> NDArray[float64]: ...
@overload
def cholesky(a: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ...
@overload
def cholesky(a: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ...
@overload
def qr(a: _ArrayLikeInt_co, mode: _ModeKind = ...) -> _2Tuple[NDArray[float64]]: ...
@overload
def qr(a: _ArrayLikeFloat_co, mode: _ModeKind = ...) -> _2Tuple[NDArray[floating[Any]]]: ...
@overload
def qr(a: _ArrayLikeComplex_co, mode: _ModeKind = ...) -> _2Tuple[NDArray[complexfloating[Any, Any]]]: ...
@overload
def eigvals(a: _ArrayLikeInt_co) -> NDArray[float64] | NDArray[complex128]: ...
@overload
def eigvals(a: _ArrayLikeFloat_co) -> NDArray[floating[Any]] | NDArray[complexfloating[Any, Any]]: ...
@overload
def eigvals(a: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ...
@overload
def eigvalsh(a: _ArrayLikeInt_co, UPLO: L["L", "U", "l", "u"] = ...) -> NDArray[float64]: ...
@overload
def eigvalsh(a: _ArrayLikeComplex_co, UPLO: L["L", "U", "l", "u"] = ...) -> NDArray[floating[Any]]: ...
@overload
def eig(a: _ArrayLikeInt_co) -> _2Tuple[NDArray[float64]] | _2Tuple[NDArray[complex128]]: ...
@overload
def eig(a: _ArrayLikeFloat_co) -> _2Tuple[NDArray[floating[Any]]] | _2Tuple[NDArray[complexfloating[Any, Any]]]: ...
@overload
def eig(a: _ArrayLikeComplex_co) -> _2Tuple[NDArray[complexfloating[Any, Any]]]: ...
@overload
def eigh(
a: _ArrayLikeInt_co,
UPLO: L["L", "U", "l", "u"] = ...,
) -> tuple[NDArray[float64], NDArray[float64]]: ...
@overload
def eigh(
a: _ArrayLikeFloat_co,
UPLO: L["L", "U", "l", "u"] = ...,
) -> tuple[NDArray[floating[Any]], NDArray[floating[Any]]]: ...
@overload
def eigh(
a: _ArrayLikeComplex_co,
UPLO: L["L", "U", "l", "u"] = ...,
) -> tuple[NDArray[floating[Any]], NDArray[complexfloating[Any, Any]]]: ...
@overload
def svd(
a: _ArrayLikeInt_co,
full_matrices: bool = ...,
compute_uv: L[True] = ...,
hermitian: bool = ...,
) -> tuple[
NDArray[float64],
NDArray[float64],
NDArray[float64],
]: ...
@overload
def svd(
a: _ArrayLikeFloat_co,
full_matrices: bool = ...,
compute_uv: L[True] = ...,
hermitian: bool = ...,
) -> tuple[
NDArray[floating[Any]],
NDArray[floating[Any]],
NDArray[floating[Any]],
]: ...
@overload
def svd(
a: _ArrayLikeComplex_co,
full_matrices: bool = ...,
compute_uv: L[True] = ...,
hermitian: bool = ...,
) -> tuple[
NDArray[complexfloating[Any, Any]],
NDArray[floating[Any]],
NDArray[complexfloating[Any, Any]],
]: ...
@overload
def svd(
a: _ArrayLikeInt_co,
full_matrices: bool = ...,
compute_uv: L[False] = ...,
hermitian: bool = ...,
) -> NDArray[float64]: ...
@overload
def svd(
a: _ArrayLikeComplex_co,
full_matrices: bool = ...,
compute_uv: L[False] = ...,
hermitian: bool = ...,
) -> NDArray[floating[Any]]: ...
# TODO: Returns a scalar for 2D arrays and
# a `(x.ndim - 2)`` dimensionl array otherwise
def cond(x: _ArrayLikeComplex_co, p: None | float | L["fro", "nuc"] = ...) -> Any: ...
# TODO: Returns `int` for <2D arrays and `intp` otherwise
def matrix_rank(
A: _ArrayLikeComplex_co,
tol: None | _ArrayLikeFloat_co = ...,
hermitian: bool = ...,
) -> Any: ...
@overload
def pinv(
a: _ArrayLikeInt_co,
rcond: _ArrayLikeFloat_co = ...,
hermitian: bool = ...,
) -> NDArray[float64]: ...
@overload
def pinv(
a: _ArrayLikeFloat_co,
rcond: _ArrayLikeFloat_co = ...,
hermitian: bool = ...,
) -> NDArray[floating[Any]]: ...
@overload
def pinv(
a: _ArrayLikeComplex_co,
rcond: _ArrayLikeFloat_co = ...,
hermitian: bool = ...,
) -> NDArray[complexfloating[Any, Any]]: ...
# TODO: Returns a 2-tuple of scalars for 2D arrays and
# a 2-tuple of `(a.ndim - 2)`` dimensionl arrays otherwise
def slogdet(a: _ArrayLikeComplex_co) -> _2Tuple[Any]: ...
# TODO: Returns a 2-tuple of scalars for 2D arrays and
# a 2-tuple of `(a.ndim - 2)`` dimensionl arrays otherwise
def det(a: _ArrayLikeComplex_co) -> Any: ...
@overload
def lstsq(a: _ArrayLikeInt_co, b: _ArrayLikeInt_co, rcond: None | float = ...) -> tuple[
NDArray[float64],
NDArray[float64],
int32,
NDArray[float64],
]: ...
@overload
def lstsq(a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, rcond: None | float = ...) -> tuple[
NDArray[floating[Any]],
NDArray[floating[Any]],
int32,
NDArray[floating[Any]],
]: ...
@overload
def lstsq(a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co, rcond: None | float = ...) -> tuple[
NDArray[complexfloating[Any, Any]],
NDArray[floating[Any]],
int32,
NDArray[floating[Any]],
]: ...
@overload
def norm(
x: ArrayLike,
ord: None | float | L["fro", "nuc"] = ...,
axis: None = ...,
keepdims: bool = ...,
) -> floating[Any]: ...
@overload
def norm(
x: ArrayLike,
ord: None | float | L["fro", "nuc"] = ...,
axis: SupportsInt | SupportsIndex | tuple[int, ...] = ...,
keepdims: bool = ...,
) -> Any: ...
# TODO: Returns a scalar or array
def multi_dot(
arrays: Iterable[_ArrayLikeComplex_co | _ArrayLikeObject_co | _ArrayLikeTD64_co],
*,
out: None | NDArray[Any] = ...,
) -> Any: ...
| 7,440 | unknown | 25.293286 | 116 | 0.617339 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/linalg/__init__.py | """
``numpy.linalg``
================
The NumPy linear algebra functions rely on BLAS and LAPACK to provide efficient
low level implementations of standard linear algebra algorithms. Those
libraries may be provided by NumPy itself using C versions of a subset of their
reference implementations but, when possible, highly optimized libraries that
take advantage of specialized processor functionality are preferred. Examples
of such libraries are OpenBLAS, MKL (TM), and ATLAS. Because those libraries
are multithreaded and processor dependent, environmental variables and external
packages such as threadpoolctl may be needed to control the number of threads
or specify the processor architecture.
- OpenBLAS: https://www.openblas.net/
- threadpoolctl: https://github.com/joblib/threadpoolctl
Please note that the most-used linear algebra functions in NumPy are present in
the main ``numpy`` namespace rather than in ``numpy.linalg``. There are:
``dot``, ``vdot``, ``inner``, ``outer``, ``matmul``, ``tensordot``, ``einsum``,
``einsum_path`` and ``kron``.
Functions present in numpy.linalg are listed below.
Matrix and vector products
--------------------------
multi_dot
matrix_power
Decompositions
--------------
cholesky
qr
svd
Matrix eigenvalues
------------------
eig
eigh
eigvals
eigvalsh
Norms and other numbers
-----------------------
norm
cond
det
matrix_rank
slogdet
Solving equations and inverting matrices
----------------------------------------
solve
tensorsolve
lstsq
inv
pinv
tensorinv
Exceptions
----------
LinAlgError
"""
# To get sub-modules
from . import linalg
from .linalg import *
__all__ = linalg.__all__.copy()
from numpy._pytesttester import PytestTester
test = PytestTester(__name__)
del PytestTester
| 1,813 | Python | 21.395061 | 79 | 0.685604 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/linalg/setup.py | import os
import sys
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
from numpy.distutils.ccompiler_opt import NPY_CXX_FLAGS
from numpy.distutils.system_info import get_info, system_info
config = Configuration('linalg', parent_package, top_path)
config.add_subpackage('tests')
# Configure lapack_lite
src_dir = 'lapack_lite'
lapack_lite_src = [
os.path.join(src_dir, 'python_xerbla.c'),
os.path.join(src_dir, 'f2c_z_lapack.c'),
os.path.join(src_dir, 'f2c_c_lapack.c'),
os.path.join(src_dir, 'f2c_d_lapack.c'),
os.path.join(src_dir, 'f2c_s_lapack.c'),
os.path.join(src_dir, 'f2c_lapack.c'),
os.path.join(src_dir, 'f2c_blas.c'),
os.path.join(src_dir, 'f2c_config.c'),
os.path.join(src_dir, 'f2c.c'),
]
all_sources = config.paths(lapack_lite_src)
if os.environ.get('NPY_USE_BLAS_ILP64', "0") != "0":
lapack_info = get_info('lapack_ilp64_opt', 2)
else:
lapack_info = get_info('lapack_opt', 0) # and {}
use_lapack_lite = not lapack_info
if use_lapack_lite:
# This makes numpy.distutils write the fact that lapack_lite
# is being used to numpy.__config__
class numpy_linalg_lapack_lite(system_info):
def calc_info(self):
info = {'language': 'c'}
if sys.maxsize > 2**32:
# Build lapack-lite in 64-bit integer mode.
# The suffix is arbitrary (lapack_lite symbols follow it),
# but use the "64_" convention here.
info['define_macros'] = [
('HAVE_BLAS_ILP64', None),
('BLAS_SYMBOL_SUFFIX', '64_')
]
self.set_info(**info)
lapack_info = numpy_linalg_lapack_lite().get_info(2)
def get_lapack_lite_sources(ext, build_dir):
if use_lapack_lite:
print("### Warning: Using unoptimized lapack ###")
return all_sources
else:
if sys.platform == 'win32':
print("### Warning: python_xerbla.c is disabled ###")
return []
return [all_sources[0]]
config.add_extension(
'lapack_lite',
sources=['lapack_litemodule.c', get_lapack_lite_sources],
depends=['lapack_lite/f2c.h'],
extra_info=lapack_info,
)
# umath_linalg module
config.add_extension(
'_umath_linalg',
sources=['umath_linalg.cpp', get_lapack_lite_sources],
depends=['lapack_lite/f2c.h'],
extra_info=lapack_info,
extra_cxx_compile_args=NPY_CXX_FLAGS,
libraries=['npymath'],
)
config.add_data_files('*.pyi')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(configuration=configuration)
| 2,925 | Python | 33.423529 | 78 | 0.565128 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/linalg/__init__.pyi | from numpy.linalg.linalg import (
matrix_power as matrix_power,
solve as solve,
tensorsolve as tensorsolve,
tensorinv as tensorinv,
inv as inv,
cholesky as cholesky,
eigvals as eigvals,
eigvalsh as eigvalsh,
pinv as pinv,
slogdet as slogdet,
det as det,
svd as svd,
eig as eig,
eigh as eigh,
lstsq as lstsq,
norm as norm,
qr as qr,
cond as cond,
matrix_rank as matrix_rank,
multi_dot as multi_dot,
)
from numpy._pytesttester import PytestTester
__all__: list[str]
__path__: list[str]
test: PytestTester
class LinAlgError(Exception): ...
| 620 | unknown | 19.032257 | 44 | 0.654839 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/linalg/tests/test_deprecations.py | """Test deprecation and future warnings.
"""
import numpy as np
from numpy.testing import assert_warns
def test_qr_mode_full_future_warning():
"""Check mode='full' FutureWarning.
In numpy 1.8 the mode options 'full' and 'economic' in linalg.qr were
deprecated. The release date will probably be sometime in the summer
of 2013.
"""
a = np.eye(2)
assert_warns(DeprecationWarning, np.linalg.qr, a, mode='full')
assert_warns(DeprecationWarning, np.linalg.qr, a, mode='f')
assert_warns(DeprecationWarning, np.linalg.qr, a, mode='economic')
assert_warns(DeprecationWarning, np.linalg.qr, a, mode='e')
| 640 | Python | 29.523808 | 73 | 0.7 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/linalg/tests/test_regression.py | """ Test functions for linalg module
"""
import warnings
import numpy as np
from numpy import linalg, arange, float64, array, dot, transpose
from numpy.testing import (
assert_, assert_raises, assert_equal, assert_array_equal,
assert_array_almost_equal, assert_array_less
)
class TestRegression:
def test_eig_build(self):
# Ticket #652
rva = array([1.03221168e+02 + 0.j,
-1.91843603e+01 + 0.j,
-6.04004526e-01 + 15.84422474j,
-6.04004526e-01 - 15.84422474j,
-1.13692929e+01 + 0.j,
-6.57612485e-01 + 10.41755503j,
-6.57612485e-01 - 10.41755503j,
1.82126812e+01 + 0.j,
1.06011014e+01 + 0.j,
7.80732773e+00 + 0.j,
-7.65390898e-01 + 0.j,
1.51971555e-15 + 0.j,
-1.51308713e-15 + 0.j])
a = arange(13 * 13, dtype=float64)
a.shape = (13, 13)
a = a % 17
va, ve = linalg.eig(a)
va.sort()
rva.sort()
assert_array_almost_equal(va, rva)
def test_eigh_build(self):
# Ticket 662.
rvals = [68.60568999, 89.57756725, 106.67185574]
cov = array([[77.70273908, 3.51489954, 15.64602427],
[3.51489954, 88.97013878, -1.07431931],
[15.64602427, -1.07431931, 98.18223512]])
vals, vecs = linalg.eigh(cov)
assert_array_almost_equal(vals, rvals)
def test_svd_build(self):
# Ticket 627.
a = array([[0., 1.], [1., 1.], [2., 1.], [3., 1.]])
m, n = a.shape
u, s, vh = linalg.svd(a)
b = dot(transpose(u[:, n:]), a)
assert_array_almost_equal(b, np.zeros((2, 2)))
def test_norm_vector_badarg(self):
# Regression for #786: Frobenius norm for vectors raises
# ValueError.
assert_raises(ValueError, linalg.norm, array([1., 2., 3.]), 'fro')
def test_lapack_endian(self):
# For bug #1482
a = array([[5.7998084, -2.1825367],
[-2.1825367, 9.85910595]], dtype='>f8')
b = array(a, dtype='<f8')
ap = linalg.cholesky(a)
bp = linalg.cholesky(b)
assert_array_equal(ap, bp)
def test_large_svd_32bit(self):
# See gh-4442, 64bit would require very large/slow matrices.
x = np.eye(1000, 66)
np.linalg.svd(x)
def test_svd_no_uv(self):
# gh-4733
for shape in (3, 4), (4, 4), (4, 3):
for t in float, complex:
a = np.ones(shape, dtype=t)
w = linalg.svd(a, compute_uv=False)
c = np.count_nonzero(np.absolute(w) > 0.5)
assert_equal(c, 1)
assert_equal(np.linalg.matrix_rank(a), 1)
assert_array_less(1, np.linalg.norm(a, ord=2))
def test_norm_object_array(self):
# gh-7575
testvector = np.array([np.array([0, 1]), 0, 0], dtype=object)
norm = linalg.norm(testvector)
assert_array_equal(norm, [0, 1])
assert_(norm.dtype == np.dtype('float64'))
norm = linalg.norm(testvector, ord=1)
assert_array_equal(norm, [0, 1])
assert_(norm.dtype != np.dtype('float64'))
norm = linalg.norm(testvector, ord=2)
assert_array_equal(norm, [0, 1])
assert_(norm.dtype == np.dtype('float64'))
assert_raises(ValueError, linalg.norm, testvector, ord='fro')
assert_raises(ValueError, linalg.norm, testvector, ord='nuc')
assert_raises(ValueError, linalg.norm, testvector, ord=np.inf)
assert_raises(ValueError, linalg.norm, testvector, ord=-np.inf)
with warnings.catch_warnings():
warnings.simplefilter("error", DeprecationWarning)
assert_raises((AttributeError, DeprecationWarning),
linalg.norm, testvector, ord=0)
assert_raises(ValueError, linalg.norm, testvector, ord=-1)
assert_raises(ValueError, linalg.norm, testvector, ord=-2)
testmatrix = np.array([[np.array([0, 1]), 0, 0],
[0, 0, 0]], dtype=object)
norm = linalg.norm(testmatrix)
assert_array_equal(norm, [0, 1])
assert_(norm.dtype == np.dtype('float64'))
norm = linalg.norm(testmatrix, ord='fro')
assert_array_equal(norm, [0, 1])
assert_(norm.dtype == np.dtype('float64'))
assert_raises(TypeError, linalg.norm, testmatrix, ord='nuc')
assert_raises(ValueError, linalg.norm, testmatrix, ord=np.inf)
assert_raises(ValueError, linalg.norm, testmatrix, ord=-np.inf)
assert_raises(ValueError, linalg.norm, testmatrix, ord=0)
assert_raises(ValueError, linalg.norm, testmatrix, ord=1)
assert_raises(ValueError, linalg.norm, testmatrix, ord=-1)
assert_raises(TypeError, linalg.norm, testmatrix, ord=2)
assert_raises(TypeError, linalg.norm, testmatrix, ord=-2)
assert_raises(ValueError, linalg.norm, testmatrix, ord=3)
def test_lstsq_complex_larger_rhs(self):
# gh-9891
size = 20
n_rhs = 70
G = np.random.randn(size, size) + 1j * np.random.randn(size, size)
u = np.random.randn(size, n_rhs) + 1j * np.random.randn(size, n_rhs)
b = G.dot(u)
# This should work without segmentation fault.
u_lstsq, res, rank, sv = linalg.lstsq(G, b, rcond=None)
# check results just in case
assert_array_almost_equal(u_lstsq, u)
| 5,597 | Python | 36.57047 | 76 | 0.55226 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/linalg/tests/test_linalg.py | """ Test functions for linalg module
"""
import os
import sys
import itertools
import traceback
import textwrap
import subprocess
import pytest
import numpy as np
from numpy import array, single, double, csingle, cdouble, dot, identity, matmul
from numpy.core import swapaxes
from numpy import multiply, atleast_2d, inf, asarray
from numpy import linalg
from numpy.linalg import matrix_power, norm, matrix_rank, multi_dot, LinAlgError
from numpy.linalg.linalg import _multi_dot_matrix_chain_order
from numpy.testing import (
assert_, assert_equal, assert_raises, assert_array_equal,
assert_almost_equal, assert_allclose, suppress_warnings,
assert_raises_regex, HAS_LAPACK64,
)
def consistent_subclass(out, in_):
# For ndarray subclass input, our output should have the same subclass
# (non-ndarray input gets converted to ndarray).
return type(out) is (type(in_) if isinstance(in_, np.ndarray)
else np.ndarray)
old_assert_almost_equal = assert_almost_equal
def assert_almost_equal(a, b, single_decimal=6, double_decimal=12, **kw):
if asarray(a).dtype.type in (single, csingle):
decimal = single_decimal
else:
decimal = double_decimal
old_assert_almost_equal(a, b, decimal=decimal, **kw)
def get_real_dtype(dtype):
return {single: single, double: double,
csingle: single, cdouble: double}[dtype]
def get_complex_dtype(dtype):
return {single: csingle, double: cdouble,
csingle: csingle, cdouble: cdouble}[dtype]
def get_rtol(dtype):
# Choose a safe rtol
if dtype in (single, csingle):
return 1e-5
else:
return 1e-11
# used to categorize tests
all_tags = {
'square', 'nonsquare', 'hermitian', # mutually exclusive
'generalized', 'size-0', 'strided' # optional additions
}
class LinalgCase:
def __init__(self, name, a, b, tags=set()):
"""
A bundle of arguments to be passed to a test case, with an identifying
name, the operands a and b, and a set of tags to filter the tests
"""
assert_(isinstance(name, str))
self.name = name
self.a = a
self.b = b
self.tags = frozenset(tags) # prevent shared tags
def check(self, do):
"""
Run the function `do` on this test case, expanding arguments
"""
do(self.a, self.b, tags=self.tags)
def __repr__(self):
return f'<LinalgCase: {self.name}>'
def apply_tag(tag, cases):
"""
Add the given tag (a string) to each of the cases (a list of LinalgCase
objects)
"""
assert tag in all_tags, "Invalid tag"
for case in cases:
case.tags = case.tags | {tag}
return cases
#
# Base test cases
#
np.random.seed(1234)
CASES = []
# square test cases
CASES += apply_tag('square', [
LinalgCase("single",
array([[1., 2.], [3., 4.]], dtype=single),
array([2., 1.], dtype=single)),
LinalgCase("double",
array([[1., 2.], [3., 4.]], dtype=double),
array([2., 1.], dtype=double)),
LinalgCase("double_2",
array([[1., 2.], [3., 4.]], dtype=double),
array([[2., 1., 4.], [3., 4., 6.]], dtype=double)),
LinalgCase("csingle",
array([[1. + 2j, 2 + 3j], [3 + 4j, 4 + 5j]], dtype=csingle),
array([2. + 1j, 1. + 2j], dtype=csingle)),
LinalgCase("cdouble",
array([[1. + 2j, 2 + 3j], [3 + 4j, 4 + 5j]], dtype=cdouble),
array([2. + 1j, 1. + 2j], dtype=cdouble)),
LinalgCase("cdouble_2",
array([[1. + 2j, 2 + 3j], [3 + 4j, 4 + 5j]], dtype=cdouble),
array([[2. + 1j, 1. + 2j, 1 + 3j], [1 - 2j, 1 - 3j, 1 - 6j]], dtype=cdouble)),
LinalgCase("0x0",
np.empty((0, 0), dtype=double),
np.empty((0,), dtype=double),
tags={'size-0'}),
LinalgCase("8x8",
np.random.rand(8, 8),
np.random.rand(8)),
LinalgCase("1x1",
np.random.rand(1, 1),
np.random.rand(1)),
LinalgCase("nonarray",
[[1, 2], [3, 4]],
[2, 1]),
])
# non-square test-cases
CASES += apply_tag('nonsquare', [
LinalgCase("single_nsq_1",
array([[1., 2., 3.], [3., 4., 6.]], dtype=single),
array([2., 1.], dtype=single)),
LinalgCase("single_nsq_2",
array([[1., 2.], [3., 4.], [5., 6.]], dtype=single),
array([2., 1., 3.], dtype=single)),
LinalgCase("double_nsq_1",
array([[1., 2., 3.], [3., 4., 6.]], dtype=double),
array([2., 1.], dtype=double)),
LinalgCase("double_nsq_2",
array([[1., 2.], [3., 4.], [5., 6.]], dtype=double),
array([2., 1., 3.], dtype=double)),
LinalgCase("csingle_nsq_1",
array(
[[1. + 1j, 2. + 2j, 3. - 3j], [3. - 5j, 4. + 9j, 6. + 2j]], dtype=csingle),
array([2. + 1j, 1. + 2j], dtype=csingle)),
LinalgCase("csingle_nsq_2",
array(
[[1. + 1j, 2. + 2j], [3. - 3j, 4. - 9j], [5. - 4j, 6. + 8j]], dtype=csingle),
array([2. + 1j, 1. + 2j, 3. - 3j], dtype=csingle)),
LinalgCase("cdouble_nsq_1",
array(
[[1. + 1j, 2. + 2j, 3. - 3j], [3. - 5j, 4. + 9j, 6. + 2j]], dtype=cdouble),
array([2. + 1j, 1. + 2j], dtype=cdouble)),
LinalgCase("cdouble_nsq_2",
array(
[[1. + 1j, 2. + 2j], [3. - 3j, 4. - 9j], [5. - 4j, 6. + 8j]], dtype=cdouble),
array([2. + 1j, 1. + 2j, 3. - 3j], dtype=cdouble)),
LinalgCase("cdouble_nsq_1_2",
array(
[[1. + 1j, 2. + 2j, 3. - 3j], [3. - 5j, 4. + 9j, 6. + 2j]], dtype=cdouble),
array([[2. + 1j, 1. + 2j], [1 - 1j, 2 - 2j]], dtype=cdouble)),
LinalgCase("cdouble_nsq_2_2",
array(
[[1. + 1j, 2. + 2j], [3. - 3j, 4. - 9j], [5. - 4j, 6. + 8j]], dtype=cdouble),
array([[2. + 1j, 1. + 2j], [1 - 1j, 2 - 2j], [1 - 1j, 2 - 2j]], dtype=cdouble)),
LinalgCase("8x11",
np.random.rand(8, 11),
np.random.rand(8)),
LinalgCase("1x5",
np.random.rand(1, 5),
np.random.rand(1)),
LinalgCase("5x1",
np.random.rand(5, 1),
np.random.rand(5)),
LinalgCase("0x4",
np.random.rand(0, 4),
np.random.rand(0),
tags={'size-0'}),
LinalgCase("4x0",
np.random.rand(4, 0),
np.random.rand(4),
tags={'size-0'}),
])
# hermitian test-cases
CASES += apply_tag('hermitian', [
LinalgCase("hsingle",
array([[1., 2.], [2., 1.]], dtype=single),
None),
LinalgCase("hdouble",
array([[1., 2.], [2., 1.]], dtype=double),
None),
LinalgCase("hcsingle",
array([[1., 2 + 3j], [2 - 3j, 1]], dtype=csingle),
None),
LinalgCase("hcdouble",
array([[1., 2 + 3j], [2 - 3j, 1]], dtype=cdouble),
None),
LinalgCase("hempty",
np.empty((0, 0), dtype=double),
None,
tags={'size-0'}),
LinalgCase("hnonarray",
[[1, 2], [2, 1]],
None),
LinalgCase("matrix_b_only",
array([[1., 2.], [2., 1.]]),
None),
LinalgCase("hmatrix_1x1",
np.random.rand(1, 1),
None),
])
#
# Gufunc test cases
#
def _make_generalized_cases():
new_cases = []
for case in CASES:
if not isinstance(case.a, np.ndarray):
continue
a = np.array([case.a, 2 * case.a, 3 * case.a])
if case.b is None:
b = None
else:
b = np.array([case.b, 7 * case.b, 6 * case.b])
new_case = LinalgCase(case.name + "_tile3", a, b,
tags=case.tags | {'generalized'})
new_cases.append(new_case)
a = np.array([case.a] * 2 * 3).reshape((3, 2) + case.a.shape)
if case.b is None:
b = None
else:
b = np.array([case.b] * 2 * 3).reshape((3, 2) + case.b.shape)
new_case = LinalgCase(case.name + "_tile213", a, b,
tags=case.tags | {'generalized'})
new_cases.append(new_case)
return new_cases
CASES += _make_generalized_cases()
#
# Generate stride combination variations of the above
#
def _stride_comb_iter(x):
"""
Generate cartesian product of strides for all axes
"""
if not isinstance(x, np.ndarray):
yield x, "nop"
return
stride_set = [(1,)] * x.ndim
stride_set[-1] = (1, 3, -4)
if x.ndim > 1:
stride_set[-2] = (1, 3, -4)
if x.ndim > 2:
stride_set[-3] = (1, -4)
for repeats in itertools.product(*tuple(stride_set)):
new_shape = [abs(a * b) for a, b in zip(x.shape, repeats)]
slices = tuple([slice(None, None, repeat) for repeat in repeats])
# new array with different strides, but same data
xi = np.empty(new_shape, dtype=x.dtype)
xi.view(np.uint32).fill(0xdeadbeef)
xi = xi[slices]
xi[...] = x
xi = xi.view(x.__class__)
assert_(np.all(xi == x))
yield xi, "stride_" + "_".join(["%+d" % j for j in repeats])
# generate also zero strides if possible
if x.ndim >= 1 and x.shape[-1] == 1:
s = list(x.strides)
s[-1] = 0
xi = np.lib.stride_tricks.as_strided(x, strides=s)
yield xi, "stride_xxx_0"
if x.ndim >= 2 and x.shape[-2] == 1:
s = list(x.strides)
s[-2] = 0
xi = np.lib.stride_tricks.as_strided(x, strides=s)
yield xi, "stride_xxx_0_x"
if x.ndim >= 2 and x.shape[:-2] == (1, 1):
s = list(x.strides)
s[-1] = 0
s[-2] = 0
xi = np.lib.stride_tricks.as_strided(x, strides=s)
yield xi, "stride_xxx_0_0"
def _make_strided_cases():
new_cases = []
for case in CASES:
for a, a_label in _stride_comb_iter(case.a):
for b, b_label in _stride_comb_iter(case.b):
new_case = LinalgCase(case.name + "_" + a_label + "_" + b_label, a, b,
tags=case.tags | {'strided'})
new_cases.append(new_case)
return new_cases
CASES += _make_strided_cases()
#
# Test different routines against the above cases
#
class LinalgTestCase:
TEST_CASES = CASES
def check_cases(self, require=set(), exclude=set()):
"""
Run func on each of the cases with all of the tags in require, and none
of the tags in exclude
"""
for case in self.TEST_CASES:
# filter by require and exclude
if case.tags & require != require:
continue
if case.tags & exclude:
continue
try:
case.check(self.do)
except Exception as e:
msg = f'In test case: {case!r}\n\n'
msg += traceback.format_exc()
raise AssertionError(msg) from e
class LinalgSquareTestCase(LinalgTestCase):
def test_sq_cases(self):
self.check_cases(require={'square'},
exclude={'generalized', 'size-0'})
def test_empty_sq_cases(self):
self.check_cases(require={'square', 'size-0'},
exclude={'generalized'})
class LinalgNonsquareTestCase(LinalgTestCase):
def test_nonsq_cases(self):
self.check_cases(require={'nonsquare'},
exclude={'generalized', 'size-0'})
def test_empty_nonsq_cases(self):
self.check_cases(require={'nonsquare', 'size-0'},
exclude={'generalized'})
class HermitianTestCase(LinalgTestCase):
def test_herm_cases(self):
self.check_cases(require={'hermitian'},
exclude={'generalized', 'size-0'})
def test_empty_herm_cases(self):
self.check_cases(require={'hermitian', 'size-0'},
exclude={'generalized'})
class LinalgGeneralizedSquareTestCase(LinalgTestCase):
@pytest.mark.slow
def test_generalized_sq_cases(self):
self.check_cases(require={'generalized', 'square'},
exclude={'size-0'})
@pytest.mark.slow
def test_generalized_empty_sq_cases(self):
self.check_cases(require={'generalized', 'square', 'size-0'})
class LinalgGeneralizedNonsquareTestCase(LinalgTestCase):
@pytest.mark.slow
def test_generalized_nonsq_cases(self):
self.check_cases(require={'generalized', 'nonsquare'},
exclude={'size-0'})
@pytest.mark.slow
def test_generalized_empty_nonsq_cases(self):
self.check_cases(require={'generalized', 'nonsquare', 'size-0'})
class HermitianGeneralizedTestCase(LinalgTestCase):
@pytest.mark.slow
def test_generalized_herm_cases(self):
self.check_cases(require={'generalized', 'hermitian'},
exclude={'size-0'})
@pytest.mark.slow
def test_generalized_empty_herm_cases(self):
self.check_cases(require={'generalized', 'hermitian', 'size-0'},
exclude={'none'})
def dot_generalized(a, b):
a = asarray(a)
if a.ndim >= 3:
if a.ndim == b.ndim:
# matrix x matrix
new_shape = a.shape[:-1] + b.shape[-1:]
elif a.ndim == b.ndim + 1:
# matrix x vector
new_shape = a.shape[:-1]
else:
raise ValueError("Not implemented...")
r = np.empty(new_shape, dtype=np.common_type(a, b))
for c in itertools.product(*map(range, a.shape[:-2])):
r[c] = dot(a[c], b[c])
return r
else:
return dot(a, b)
def identity_like_generalized(a):
a = asarray(a)
if a.ndim >= 3:
r = np.empty(a.shape, dtype=a.dtype)
r[...] = identity(a.shape[-2])
return r
else:
return identity(a.shape[0])
class SolveCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
# kept apart from TestSolve for use for testing with matrices.
def do(self, a, b, tags):
x = linalg.solve(a, b)
assert_almost_equal(b, dot_generalized(a, x))
assert_(consistent_subclass(x, b))
class TestSolve(SolveCases):
@pytest.mark.parametrize('dtype', [single, double, csingle, cdouble])
def test_types(self, dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
assert_equal(linalg.solve(x, x).dtype, dtype)
def test_0_size(self):
class ArraySubclass(np.ndarray):
pass
# Test system of 0x0 matrices
a = np.arange(8).reshape(2, 2, 2)
b = np.arange(6).reshape(1, 2, 3).view(ArraySubclass)
expected = linalg.solve(a, b)[:, 0:0, :]
result = linalg.solve(a[:, 0:0, 0:0], b[:, 0:0, :])
assert_array_equal(result, expected)
assert_(isinstance(result, ArraySubclass))
# Test errors for non-square and only b's dimension being 0
assert_raises(linalg.LinAlgError, linalg.solve, a[:, 0:0, 0:1], b)
assert_raises(ValueError, linalg.solve, a, b[:, 0:0, :])
# Test broadcasting error
b = np.arange(6).reshape(1, 3, 2) # broadcasting error
assert_raises(ValueError, linalg.solve, a, b)
assert_raises(ValueError, linalg.solve, a[0:0], b[0:0])
# Test zero "single equations" with 0x0 matrices.
b = np.arange(2).reshape(1, 2).view(ArraySubclass)
expected = linalg.solve(a, b)[:, 0:0]
result = linalg.solve(a[:, 0:0, 0:0], b[:, 0:0])
assert_array_equal(result, expected)
assert_(isinstance(result, ArraySubclass))
b = np.arange(3).reshape(1, 3)
assert_raises(ValueError, linalg.solve, a, b)
assert_raises(ValueError, linalg.solve, a[0:0], b[0:0])
assert_raises(ValueError, linalg.solve, a[:, 0:0, 0:0], b)
def test_0_size_k(self):
# test zero multiple equation (K=0) case.
class ArraySubclass(np.ndarray):
pass
a = np.arange(4).reshape(1, 2, 2)
b = np.arange(6).reshape(3, 2, 1).view(ArraySubclass)
expected = linalg.solve(a, b)[:, :, 0:0]
result = linalg.solve(a, b[:, :, 0:0])
assert_array_equal(result, expected)
assert_(isinstance(result, ArraySubclass))
# test both zero.
expected = linalg.solve(a, b)[:, 0:0, 0:0]
result = linalg.solve(a[:, 0:0, 0:0], b[:, 0:0, 0:0])
assert_array_equal(result, expected)
assert_(isinstance(result, ArraySubclass))
class InvCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
def do(self, a, b, tags):
a_inv = linalg.inv(a)
assert_almost_equal(dot_generalized(a, a_inv),
identity_like_generalized(a))
assert_(consistent_subclass(a_inv, a))
class TestInv(InvCases):
@pytest.mark.parametrize('dtype', [single, double, csingle, cdouble])
def test_types(self, dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
assert_equal(linalg.inv(x).dtype, dtype)
def test_0_size(self):
# Check that all kinds of 0-sized arrays work
class ArraySubclass(np.ndarray):
pass
a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass)
res = linalg.inv(a)
assert_(res.dtype.type is np.float64)
assert_equal(a.shape, res.shape)
assert_(isinstance(res, ArraySubclass))
a = np.zeros((0, 0), dtype=np.complex64).view(ArraySubclass)
res = linalg.inv(a)
assert_(res.dtype.type is np.complex64)
assert_equal(a.shape, res.shape)
assert_(isinstance(res, ArraySubclass))
class EigvalsCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
def do(self, a, b, tags):
ev = linalg.eigvals(a)
evalues, evectors = linalg.eig(a)
assert_almost_equal(ev, evalues)
class TestEigvals(EigvalsCases):
@pytest.mark.parametrize('dtype', [single, double, csingle, cdouble])
def test_types(self, dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
assert_equal(linalg.eigvals(x).dtype, dtype)
x = np.array([[1, 0.5], [-1, 1]], dtype=dtype)
assert_equal(linalg.eigvals(x).dtype, get_complex_dtype(dtype))
def test_0_size(self):
# Check that all kinds of 0-sized arrays work
class ArraySubclass(np.ndarray):
pass
a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass)
res = linalg.eigvals(a)
assert_(res.dtype.type is np.float64)
assert_equal((0, 1), res.shape)
# This is just for documentation, it might make sense to change:
assert_(isinstance(res, np.ndarray))
a = np.zeros((0, 0), dtype=np.complex64).view(ArraySubclass)
res = linalg.eigvals(a)
assert_(res.dtype.type is np.complex64)
assert_equal((0,), res.shape)
# This is just for documentation, it might make sense to change:
assert_(isinstance(res, np.ndarray))
class EigCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
def do(self, a, b, tags):
evalues, evectors = linalg.eig(a)
assert_allclose(dot_generalized(a, evectors),
np.asarray(evectors) * np.asarray(evalues)[..., None, :],
rtol=get_rtol(evalues.dtype))
assert_(consistent_subclass(evectors, a))
class TestEig(EigCases):
@pytest.mark.parametrize('dtype', [single, double, csingle, cdouble])
def test_types(self, dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
w, v = np.linalg.eig(x)
assert_equal(w.dtype, dtype)
assert_equal(v.dtype, dtype)
x = np.array([[1, 0.5], [-1, 1]], dtype=dtype)
w, v = np.linalg.eig(x)
assert_equal(w.dtype, get_complex_dtype(dtype))
assert_equal(v.dtype, get_complex_dtype(dtype))
def test_0_size(self):
# Check that all kinds of 0-sized arrays work
class ArraySubclass(np.ndarray):
pass
a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass)
res, res_v = linalg.eig(a)
assert_(res_v.dtype.type is np.float64)
assert_(res.dtype.type is np.float64)
assert_equal(a.shape, res_v.shape)
assert_equal((0, 1), res.shape)
# This is just for documentation, it might make sense to change:
assert_(isinstance(a, np.ndarray))
a = np.zeros((0, 0), dtype=np.complex64).view(ArraySubclass)
res, res_v = linalg.eig(a)
assert_(res_v.dtype.type is np.complex64)
assert_(res.dtype.type is np.complex64)
assert_equal(a.shape, res_v.shape)
assert_equal((0,), res.shape)
# This is just for documentation, it might make sense to change:
assert_(isinstance(a, np.ndarray))
class SVDBaseTests:
hermitian = False
@pytest.mark.parametrize('dtype', [single, double, csingle, cdouble])
def test_types(self, dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
u, s, vh = linalg.svd(x)
assert_equal(u.dtype, dtype)
assert_equal(s.dtype, get_real_dtype(dtype))
assert_equal(vh.dtype, dtype)
s = linalg.svd(x, compute_uv=False, hermitian=self.hermitian)
assert_equal(s.dtype, get_real_dtype(dtype))
class SVDCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
def do(self, a, b, tags):
u, s, vt = linalg.svd(a, False)
assert_allclose(a, dot_generalized(np.asarray(u) * np.asarray(s)[..., None, :],
np.asarray(vt)),
rtol=get_rtol(u.dtype))
assert_(consistent_subclass(u, a))
assert_(consistent_subclass(vt, a))
class TestSVD(SVDCases, SVDBaseTests):
def test_empty_identity(self):
""" Empty input should put an identity matrix in u or vh """
x = np.empty((4, 0))
u, s, vh = linalg.svd(x, compute_uv=True, hermitian=self.hermitian)
assert_equal(u.shape, (4, 4))
assert_equal(vh.shape, (0, 0))
assert_equal(u, np.eye(4))
x = np.empty((0, 4))
u, s, vh = linalg.svd(x, compute_uv=True, hermitian=self.hermitian)
assert_equal(u.shape, (0, 0))
assert_equal(vh.shape, (4, 4))
assert_equal(vh, np.eye(4))
class SVDHermitianCases(HermitianTestCase, HermitianGeneralizedTestCase):
def do(self, a, b, tags):
u, s, vt = linalg.svd(a, False, hermitian=True)
assert_allclose(a, dot_generalized(np.asarray(u) * np.asarray(s)[..., None, :],
np.asarray(vt)),
rtol=get_rtol(u.dtype))
def hermitian(mat):
axes = list(range(mat.ndim))
axes[-1], axes[-2] = axes[-2], axes[-1]
return np.conj(np.transpose(mat, axes=axes))
assert_almost_equal(np.matmul(u, hermitian(u)), np.broadcast_to(np.eye(u.shape[-1]), u.shape))
assert_almost_equal(np.matmul(vt, hermitian(vt)), np.broadcast_to(np.eye(vt.shape[-1]), vt.shape))
assert_equal(np.sort(s)[..., ::-1], s)
assert_(consistent_subclass(u, a))
assert_(consistent_subclass(vt, a))
class TestSVDHermitian(SVDHermitianCases, SVDBaseTests):
hermitian = True
class CondCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
# cond(x, p) for p in (None, 2, -2)
def do(self, a, b, tags):
c = asarray(a) # a might be a matrix
if 'size-0' in tags:
assert_raises(LinAlgError, linalg.cond, c)
return
# +-2 norms
s = linalg.svd(c, compute_uv=False)
assert_almost_equal(
linalg.cond(a), s[..., 0] / s[..., -1],
single_decimal=5, double_decimal=11)
assert_almost_equal(
linalg.cond(a, 2), s[..., 0] / s[..., -1],
single_decimal=5, double_decimal=11)
assert_almost_equal(
linalg.cond(a, -2), s[..., -1] / s[..., 0],
single_decimal=5, double_decimal=11)
# Other norms
cinv = np.linalg.inv(c)
assert_almost_equal(
linalg.cond(a, 1),
abs(c).sum(-2).max(-1) * abs(cinv).sum(-2).max(-1),
single_decimal=5, double_decimal=11)
assert_almost_equal(
linalg.cond(a, -1),
abs(c).sum(-2).min(-1) * abs(cinv).sum(-2).min(-1),
single_decimal=5, double_decimal=11)
assert_almost_equal(
linalg.cond(a, np.inf),
abs(c).sum(-1).max(-1) * abs(cinv).sum(-1).max(-1),
single_decimal=5, double_decimal=11)
assert_almost_equal(
linalg.cond(a, -np.inf),
abs(c).sum(-1).min(-1) * abs(cinv).sum(-1).min(-1),
single_decimal=5, double_decimal=11)
assert_almost_equal(
linalg.cond(a, 'fro'),
np.sqrt((abs(c)**2).sum(-1).sum(-1)
* (abs(cinv)**2).sum(-1).sum(-1)),
single_decimal=5, double_decimal=11)
class TestCond(CondCases):
def test_basic_nonsvd(self):
# Smoketest the non-svd norms
A = array([[1., 0, 1], [0, -2., 0], [0, 0, 3.]])
assert_almost_equal(linalg.cond(A, inf), 4)
assert_almost_equal(linalg.cond(A, -inf), 2/3)
assert_almost_equal(linalg.cond(A, 1), 4)
assert_almost_equal(linalg.cond(A, -1), 0.5)
assert_almost_equal(linalg.cond(A, 'fro'), np.sqrt(265 / 12))
def test_singular(self):
# Singular matrices have infinite condition number for
# positive norms, and negative norms shouldn't raise
# exceptions
As = [np.zeros((2, 2)), np.ones((2, 2))]
p_pos = [None, 1, 2, 'fro']
p_neg = [-1, -2]
for A, p in itertools.product(As, p_pos):
# Inversion may not hit exact infinity, so just check the
# number is large
assert_(linalg.cond(A, p) > 1e15)
for A, p in itertools.product(As, p_neg):
linalg.cond(A, p)
@pytest.mark.xfail(True, run=False,
reason="Platform/LAPACK-dependent failure, "
"see gh-18914")
def test_nan(self):
# nans should be passed through, not converted to infs
ps = [None, 1, -1, 2, -2, 'fro']
p_pos = [None, 1, 2, 'fro']
A = np.ones((2, 2))
A[0,1] = np.nan
for p in ps:
c = linalg.cond(A, p)
assert_(isinstance(c, np.float_))
assert_(np.isnan(c))
A = np.ones((3, 2, 2))
A[1,0,1] = np.nan
for p in ps:
c = linalg.cond(A, p)
assert_(np.isnan(c[1]))
if p in p_pos:
assert_(c[0] > 1e15)
assert_(c[2] > 1e15)
else:
assert_(not np.isnan(c[0]))
assert_(not np.isnan(c[2]))
def test_stacked_singular(self):
# Check behavior when only some of the stacked matrices are
# singular
np.random.seed(1234)
A = np.random.rand(2, 2, 2, 2)
A[0,0] = 0
A[1,1] = 0
for p in (None, 1, 2, 'fro', -1, -2):
c = linalg.cond(A, p)
assert_equal(c[0,0], np.inf)
assert_equal(c[1,1], np.inf)
assert_(np.isfinite(c[0,1]))
assert_(np.isfinite(c[1,0]))
class PinvCases(LinalgSquareTestCase,
LinalgNonsquareTestCase,
LinalgGeneralizedSquareTestCase,
LinalgGeneralizedNonsquareTestCase):
def do(self, a, b, tags):
a_ginv = linalg.pinv(a)
# `a @ a_ginv == I` does not hold if a is singular
dot = dot_generalized
assert_almost_equal(dot(dot(a, a_ginv), a), a, single_decimal=5, double_decimal=11)
assert_(consistent_subclass(a_ginv, a))
class TestPinv(PinvCases):
pass
class PinvHermitianCases(HermitianTestCase, HermitianGeneralizedTestCase):
def do(self, a, b, tags):
a_ginv = linalg.pinv(a, hermitian=True)
# `a @ a_ginv == I` does not hold if a is singular
dot = dot_generalized
assert_almost_equal(dot(dot(a, a_ginv), a), a, single_decimal=5, double_decimal=11)
assert_(consistent_subclass(a_ginv, a))
class TestPinvHermitian(PinvHermitianCases):
pass
class DetCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
def do(self, a, b, tags):
d = linalg.det(a)
(s, ld) = linalg.slogdet(a)
if asarray(a).dtype.type in (single, double):
ad = asarray(a).astype(double)
else:
ad = asarray(a).astype(cdouble)
ev = linalg.eigvals(ad)
assert_almost_equal(d, multiply.reduce(ev, axis=-1))
assert_almost_equal(s * np.exp(ld), multiply.reduce(ev, axis=-1))
s = np.atleast_1d(s)
ld = np.atleast_1d(ld)
m = (s != 0)
assert_almost_equal(np.abs(s[m]), 1)
assert_equal(ld[~m], -inf)
class TestDet(DetCases):
def test_zero(self):
assert_equal(linalg.det([[0.0]]), 0.0)
assert_equal(type(linalg.det([[0.0]])), double)
assert_equal(linalg.det([[0.0j]]), 0.0)
assert_equal(type(linalg.det([[0.0j]])), cdouble)
assert_equal(linalg.slogdet([[0.0]]), (0.0, -inf))
assert_equal(type(linalg.slogdet([[0.0]])[0]), double)
assert_equal(type(linalg.slogdet([[0.0]])[1]), double)
assert_equal(linalg.slogdet([[0.0j]]), (0.0j, -inf))
assert_equal(type(linalg.slogdet([[0.0j]])[0]), cdouble)
assert_equal(type(linalg.slogdet([[0.0j]])[1]), double)
@pytest.mark.parametrize('dtype', [single, double, csingle, cdouble])
def test_types(self, dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
assert_equal(np.linalg.det(x).dtype, dtype)
ph, s = np.linalg.slogdet(x)
assert_equal(s.dtype, get_real_dtype(dtype))
assert_equal(ph.dtype, dtype)
def test_0_size(self):
a = np.zeros((0, 0), dtype=np.complex64)
res = linalg.det(a)
assert_equal(res, 1.)
assert_(res.dtype.type is np.complex64)
res = linalg.slogdet(a)
assert_equal(res, (1, 0))
assert_(res[0].dtype.type is np.complex64)
assert_(res[1].dtype.type is np.float32)
a = np.zeros((0, 0), dtype=np.float64)
res = linalg.det(a)
assert_equal(res, 1.)
assert_(res.dtype.type is np.float64)
res = linalg.slogdet(a)
assert_equal(res, (1, 0))
assert_(res[0].dtype.type is np.float64)
assert_(res[1].dtype.type is np.float64)
class LstsqCases(LinalgSquareTestCase, LinalgNonsquareTestCase):
def do(self, a, b, tags):
arr = np.asarray(a)
m, n = arr.shape
u, s, vt = linalg.svd(a, False)
x, residuals, rank, sv = linalg.lstsq(a, b, rcond=-1)
if m == 0:
assert_((x == 0).all())
if m <= n:
assert_almost_equal(b, dot(a, x))
assert_equal(rank, m)
else:
assert_equal(rank, n)
assert_almost_equal(sv, sv.__array_wrap__(s))
if rank == n and m > n:
expect_resids = (
np.asarray(abs(np.dot(a, x) - b)) ** 2).sum(axis=0)
expect_resids = np.asarray(expect_resids)
if np.asarray(b).ndim == 1:
expect_resids.shape = (1,)
assert_equal(residuals.shape, expect_resids.shape)
else:
expect_resids = np.array([]).view(type(x))
assert_almost_equal(residuals, expect_resids)
assert_(np.issubdtype(residuals.dtype, np.floating))
assert_(consistent_subclass(x, b))
assert_(consistent_subclass(residuals, b))
class TestLstsq(LstsqCases):
def test_future_rcond(self):
a = np.array([[0., 1., 0., 1., 2., 0.],
[0., 2., 0., 0., 1., 0.],
[1., 0., 1., 0., 0., 4.],
[0., 0., 0., 2., 3., 0.]]).T
b = np.array([1, 0, 0, 0, 0, 0])
with suppress_warnings() as sup:
w = sup.record(FutureWarning, "`rcond` parameter will change")
x, residuals, rank, s = linalg.lstsq(a, b)
assert_(rank == 4)
x, residuals, rank, s = linalg.lstsq(a, b, rcond=-1)
assert_(rank == 4)
x, residuals, rank, s = linalg.lstsq(a, b, rcond=None)
assert_(rank == 3)
# Warning should be raised exactly once (first command)
assert_(len(w) == 1)
@pytest.mark.parametrize(["m", "n", "n_rhs"], [
(4, 2, 2),
(0, 4, 1),
(0, 4, 2),
(4, 0, 1),
(4, 0, 2),
(4, 2, 0),
(0, 0, 0)
])
def test_empty_a_b(self, m, n, n_rhs):
a = np.arange(m * n).reshape(m, n)
b = np.ones((m, n_rhs))
x, residuals, rank, s = linalg.lstsq(a, b, rcond=None)
if m == 0:
assert_((x == 0).all())
assert_equal(x.shape, (n, n_rhs))
assert_equal(residuals.shape, ((n_rhs,) if m > n else (0,)))
if m > n and n_rhs > 0:
# residuals are exactly the squared norms of b's columns
r = b - np.dot(a, x)
assert_almost_equal(residuals, (r * r).sum(axis=-2))
assert_equal(rank, min(m, n))
assert_equal(s.shape, (min(m, n),))
def test_incompatible_dims(self):
# use modified version of docstring example
x = np.array([0, 1, 2, 3])
y = np.array([-1, 0.2, 0.9, 2.1, 3.3])
A = np.vstack([x, np.ones(len(x))]).T
with assert_raises_regex(LinAlgError, "Incompatible dimensions"):
linalg.lstsq(A, y, rcond=None)
@pytest.mark.parametrize('dt', [np.dtype(c) for c in '?bBhHiIqQefdgFDGO'])
class TestMatrixPower:
rshft_0 = np.eye(4)
rshft_1 = rshft_0[[3, 0, 1, 2]]
rshft_2 = rshft_0[[2, 3, 0, 1]]
rshft_3 = rshft_0[[1, 2, 3, 0]]
rshft_all = [rshft_0, rshft_1, rshft_2, rshft_3]
noninv = array([[1, 0], [0, 0]])
stacked = np.block([[[rshft_0]]]*2)
#FIXME the 'e' dtype might work in future
dtnoinv = [object, np.dtype('e'), np.dtype('g'), np.dtype('G')]
def test_large_power(self, dt):
rshft = self.rshft_1.astype(dt)
assert_equal(
matrix_power(rshft, 2**100 + 2**10 + 2**5 + 0), self.rshft_0)
assert_equal(
matrix_power(rshft, 2**100 + 2**10 + 2**5 + 1), self.rshft_1)
assert_equal(
matrix_power(rshft, 2**100 + 2**10 + 2**5 + 2), self.rshft_2)
assert_equal(
matrix_power(rshft, 2**100 + 2**10 + 2**5 + 3), self.rshft_3)
def test_power_is_zero(self, dt):
def tz(M):
mz = matrix_power(M, 0)
assert_equal(mz, identity_like_generalized(M))
assert_equal(mz.dtype, M.dtype)
for mat in self.rshft_all:
tz(mat.astype(dt))
if dt != object:
tz(self.stacked.astype(dt))
def test_power_is_one(self, dt):
def tz(mat):
mz = matrix_power(mat, 1)
assert_equal(mz, mat)
assert_equal(mz.dtype, mat.dtype)
for mat in self.rshft_all:
tz(mat.astype(dt))
if dt != object:
tz(self.stacked.astype(dt))
def test_power_is_two(self, dt):
def tz(mat):
mz = matrix_power(mat, 2)
mmul = matmul if mat.dtype != object else dot
assert_equal(mz, mmul(mat, mat))
assert_equal(mz.dtype, mat.dtype)
for mat in self.rshft_all:
tz(mat.astype(dt))
if dt != object:
tz(self.stacked.astype(dt))
def test_power_is_minus_one(self, dt):
def tz(mat):
invmat = matrix_power(mat, -1)
mmul = matmul if mat.dtype != object else dot
assert_almost_equal(
mmul(invmat, mat), identity_like_generalized(mat))
for mat in self.rshft_all:
if dt not in self.dtnoinv:
tz(mat.astype(dt))
def test_exceptions_bad_power(self, dt):
mat = self.rshft_0.astype(dt)
assert_raises(TypeError, matrix_power, mat, 1.5)
assert_raises(TypeError, matrix_power, mat, [1])
def test_exceptions_non_square(self, dt):
assert_raises(LinAlgError, matrix_power, np.array([1], dt), 1)
assert_raises(LinAlgError, matrix_power, np.array([[1], [2]], dt), 1)
assert_raises(LinAlgError, matrix_power, np.ones((4, 3, 2), dt), 1)
def test_exceptions_not_invertible(self, dt):
if dt in self.dtnoinv:
return
mat = self.noninv.astype(dt)
assert_raises(LinAlgError, matrix_power, mat, -1)
class TestEigvalshCases(HermitianTestCase, HermitianGeneralizedTestCase):
def do(self, a, b, tags):
# note that eigenvalue arrays returned by eig must be sorted since
# their order isn't guaranteed.
ev = linalg.eigvalsh(a, 'L')
evalues, evectors = linalg.eig(a)
evalues.sort(axis=-1)
assert_allclose(ev, evalues, rtol=get_rtol(ev.dtype))
ev2 = linalg.eigvalsh(a, 'U')
assert_allclose(ev2, evalues, rtol=get_rtol(ev.dtype))
class TestEigvalsh:
@pytest.mark.parametrize('dtype', [single, double, csingle, cdouble])
def test_types(self, dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
w = np.linalg.eigvalsh(x)
assert_equal(w.dtype, get_real_dtype(dtype))
def test_invalid(self):
x = np.array([[1, 0.5], [0.5, 1]], dtype=np.float32)
assert_raises(ValueError, np.linalg.eigvalsh, x, UPLO="lrong")
assert_raises(ValueError, np.linalg.eigvalsh, x, "lower")
assert_raises(ValueError, np.linalg.eigvalsh, x, "upper")
def test_UPLO(self):
Klo = np.array([[0, 0], [1, 0]], dtype=np.double)
Kup = np.array([[0, 1], [0, 0]], dtype=np.double)
tgt = np.array([-1, 1], dtype=np.double)
rtol = get_rtol(np.double)
# Check default is 'L'
w = np.linalg.eigvalsh(Klo)
assert_allclose(w, tgt, rtol=rtol)
# Check 'L'
w = np.linalg.eigvalsh(Klo, UPLO='L')
assert_allclose(w, tgt, rtol=rtol)
# Check 'l'
w = np.linalg.eigvalsh(Klo, UPLO='l')
assert_allclose(w, tgt, rtol=rtol)
# Check 'U'
w = np.linalg.eigvalsh(Kup, UPLO='U')
assert_allclose(w, tgt, rtol=rtol)
# Check 'u'
w = np.linalg.eigvalsh(Kup, UPLO='u')
assert_allclose(w, tgt, rtol=rtol)
def test_0_size(self):
# Check that all kinds of 0-sized arrays work
class ArraySubclass(np.ndarray):
pass
a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass)
res = linalg.eigvalsh(a)
assert_(res.dtype.type is np.float64)
assert_equal((0, 1), res.shape)
# This is just for documentation, it might make sense to change:
assert_(isinstance(res, np.ndarray))
a = np.zeros((0, 0), dtype=np.complex64).view(ArraySubclass)
res = linalg.eigvalsh(a)
assert_(res.dtype.type is np.float32)
assert_equal((0,), res.shape)
# This is just for documentation, it might make sense to change:
assert_(isinstance(res, np.ndarray))
class TestEighCases(HermitianTestCase, HermitianGeneralizedTestCase):
def do(self, a, b, tags):
# note that eigenvalue arrays returned by eig must be sorted since
# their order isn't guaranteed.
ev, evc = linalg.eigh(a)
evalues, evectors = linalg.eig(a)
evalues.sort(axis=-1)
assert_almost_equal(ev, evalues)
assert_allclose(dot_generalized(a, evc),
np.asarray(ev)[..., None, :] * np.asarray(evc),
rtol=get_rtol(ev.dtype))
ev2, evc2 = linalg.eigh(a, 'U')
assert_almost_equal(ev2, evalues)
assert_allclose(dot_generalized(a, evc2),
np.asarray(ev2)[..., None, :] * np.asarray(evc2),
rtol=get_rtol(ev.dtype), err_msg=repr(a))
class TestEigh:
@pytest.mark.parametrize('dtype', [single, double, csingle, cdouble])
def test_types(self, dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
w, v = np.linalg.eigh(x)
assert_equal(w.dtype, get_real_dtype(dtype))
assert_equal(v.dtype, dtype)
def test_invalid(self):
x = np.array([[1, 0.5], [0.5, 1]], dtype=np.float32)
assert_raises(ValueError, np.linalg.eigh, x, UPLO="lrong")
assert_raises(ValueError, np.linalg.eigh, x, "lower")
assert_raises(ValueError, np.linalg.eigh, x, "upper")
def test_UPLO(self):
Klo = np.array([[0, 0], [1, 0]], dtype=np.double)
Kup = np.array([[0, 1], [0, 0]], dtype=np.double)
tgt = np.array([-1, 1], dtype=np.double)
rtol = get_rtol(np.double)
# Check default is 'L'
w, v = np.linalg.eigh(Klo)
assert_allclose(w, tgt, rtol=rtol)
# Check 'L'
w, v = np.linalg.eigh(Klo, UPLO='L')
assert_allclose(w, tgt, rtol=rtol)
# Check 'l'
w, v = np.linalg.eigh(Klo, UPLO='l')
assert_allclose(w, tgt, rtol=rtol)
# Check 'U'
w, v = np.linalg.eigh(Kup, UPLO='U')
assert_allclose(w, tgt, rtol=rtol)
# Check 'u'
w, v = np.linalg.eigh(Kup, UPLO='u')
assert_allclose(w, tgt, rtol=rtol)
def test_0_size(self):
# Check that all kinds of 0-sized arrays work
class ArraySubclass(np.ndarray):
pass
a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass)
res, res_v = linalg.eigh(a)
assert_(res_v.dtype.type is np.float64)
assert_(res.dtype.type is np.float64)
assert_equal(a.shape, res_v.shape)
assert_equal((0, 1), res.shape)
# This is just for documentation, it might make sense to change:
assert_(isinstance(a, np.ndarray))
a = np.zeros((0, 0), dtype=np.complex64).view(ArraySubclass)
res, res_v = linalg.eigh(a)
assert_(res_v.dtype.type is np.complex64)
assert_(res.dtype.type is np.float32)
assert_equal(a.shape, res_v.shape)
assert_equal((0,), res.shape)
# This is just for documentation, it might make sense to change:
assert_(isinstance(a, np.ndarray))
class _TestNormBase:
dt = None
dec = None
@staticmethod
def check_dtype(x, res):
if issubclass(x.dtype.type, np.inexact):
assert_equal(res.dtype, x.real.dtype)
else:
# For integer input, don't have to test float precision of output.
assert_(issubclass(res.dtype.type, np.floating))
class _TestNormGeneral(_TestNormBase):
def test_empty(self):
assert_equal(norm([]), 0.0)
assert_equal(norm(array([], dtype=self.dt)), 0.0)
assert_equal(norm(atleast_2d(array([], dtype=self.dt))), 0.0)
def test_vector_return_type(self):
a = np.array([1, 0, 1])
exact_types = np.typecodes['AllInteger']
inexact_types = np.typecodes['AllFloat']
all_types = exact_types + inexact_types
for each_type in all_types:
at = a.astype(each_type)
an = norm(at, -np.inf)
self.check_dtype(at, an)
assert_almost_equal(an, 0.0)
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "divide by zero encountered")
an = norm(at, -1)
self.check_dtype(at, an)
assert_almost_equal(an, 0.0)
an = norm(at, 0)
self.check_dtype(at, an)
assert_almost_equal(an, 2)
an = norm(at, 1)
self.check_dtype(at, an)
assert_almost_equal(an, 2.0)
an = norm(at, 2)
self.check_dtype(at, an)
assert_almost_equal(an, an.dtype.type(2.0)**an.dtype.type(1.0/2.0))
an = norm(at, 4)
self.check_dtype(at, an)
assert_almost_equal(an, an.dtype.type(2.0)**an.dtype.type(1.0/4.0))
an = norm(at, np.inf)
self.check_dtype(at, an)
assert_almost_equal(an, 1.0)
def test_vector(self):
a = [1, 2, 3, 4]
b = [-1, -2, -3, -4]
c = [-1, 2, -3, 4]
def _test(v):
np.testing.assert_almost_equal(norm(v), 30 ** 0.5,
decimal=self.dec)
np.testing.assert_almost_equal(norm(v, inf), 4.0,
decimal=self.dec)
np.testing.assert_almost_equal(norm(v, -inf), 1.0,
decimal=self.dec)
np.testing.assert_almost_equal(norm(v, 1), 10.0,
decimal=self.dec)
np.testing.assert_almost_equal(norm(v, -1), 12.0 / 25,
decimal=self.dec)
np.testing.assert_almost_equal(norm(v, 2), 30 ** 0.5,
decimal=self.dec)
np.testing.assert_almost_equal(norm(v, -2), ((205. / 144) ** -0.5),
decimal=self.dec)
np.testing.assert_almost_equal(norm(v, 0), 4,
decimal=self.dec)
for v in (a, b, c,):
_test(v)
for v in (array(a, dtype=self.dt), array(b, dtype=self.dt),
array(c, dtype=self.dt)):
_test(v)
def test_axis(self):
# Vector norms.
# Compare the use of `axis` with computing the norm of each row
# or column separately.
A = array([[1, 2, 3], [4, 5, 6]], dtype=self.dt)
for order in [None, -1, 0, 1, 2, 3, np.Inf, -np.Inf]:
expected0 = [norm(A[:, k], ord=order) for k in range(A.shape[1])]
assert_almost_equal(norm(A, ord=order, axis=0), expected0)
expected1 = [norm(A[k, :], ord=order) for k in range(A.shape[0])]
assert_almost_equal(norm(A, ord=order, axis=1), expected1)
# Matrix norms.
B = np.arange(1, 25, dtype=self.dt).reshape(2, 3, 4)
nd = B.ndim
for order in [None, -2, 2, -1, 1, np.Inf, -np.Inf, 'fro']:
for axis in itertools.combinations(range(-nd, nd), 2):
row_axis, col_axis = axis
if row_axis < 0:
row_axis += nd
if col_axis < 0:
col_axis += nd
if row_axis == col_axis:
assert_raises(ValueError, norm, B, ord=order, axis=axis)
else:
n = norm(B, ord=order, axis=axis)
# The logic using k_index only works for nd = 3.
# This has to be changed if nd is increased.
k_index = nd - (row_axis + col_axis)
if row_axis < col_axis:
expected = [norm(B[:].take(k, axis=k_index), ord=order)
for k in range(B.shape[k_index])]
else:
expected = [norm(B[:].take(k, axis=k_index).T, ord=order)
for k in range(B.shape[k_index])]
assert_almost_equal(n, expected)
def test_keepdims(self):
A = np.arange(1, 25, dtype=self.dt).reshape(2, 3, 4)
allclose_err = 'order {0}, axis = {1}'
shape_err = 'Shape mismatch found {0}, expected {1}, order={2}, axis={3}'
# check the order=None, axis=None case
expected = norm(A, ord=None, axis=None)
found = norm(A, ord=None, axis=None, keepdims=True)
assert_allclose(np.squeeze(found), expected,
err_msg=allclose_err.format(None, None))
expected_shape = (1, 1, 1)
assert_(found.shape == expected_shape,
shape_err.format(found.shape, expected_shape, None, None))
# Vector norms.
for order in [None, -1, 0, 1, 2, 3, np.Inf, -np.Inf]:
for k in range(A.ndim):
expected = norm(A, ord=order, axis=k)
found = norm(A, ord=order, axis=k, keepdims=True)
assert_allclose(np.squeeze(found), expected,
err_msg=allclose_err.format(order, k))
expected_shape = list(A.shape)
expected_shape[k] = 1
expected_shape = tuple(expected_shape)
assert_(found.shape == expected_shape,
shape_err.format(found.shape, expected_shape, order, k))
# Matrix norms.
for order in [None, -2, 2, -1, 1, np.Inf, -np.Inf, 'fro', 'nuc']:
for k in itertools.permutations(range(A.ndim), 2):
expected = norm(A, ord=order, axis=k)
found = norm(A, ord=order, axis=k, keepdims=True)
assert_allclose(np.squeeze(found), expected,
err_msg=allclose_err.format(order, k))
expected_shape = list(A.shape)
expected_shape[k[0]] = 1
expected_shape[k[1]] = 1
expected_shape = tuple(expected_shape)
assert_(found.shape == expected_shape,
shape_err.format(found.shape, expected_shape, order, k))
class _TestNorm2D(_TestNormBase):
# Define the part for 2d arrays separately, so we can subclass this
# and run the tests using np.matrix in matrixlib.tests.test_matrix_linalg.
array = np.array
def test_matrix_empty(self):
assert_equal(norm(self.array([[]], dtype=self.dt)), 0.0)
def test_matrix_return_type(self):
a = self.array([[1, 0, 1], [0, 1, 1]])
exact_types = np.typecodes['AllInteger']
# float32, complex64, float64, complex128 types are the only types
# allowed by `linalg`, which performs the matrix operations used
# within `norm`.
inexact_types = 'fdFD'
all_types = exact_types + inexact_types
for each_type in all_types:
at = a.astype(each_type)
an = norm(at, -np.inf)
self.check_dtype(at, an)
assert_almost_equal(an, 2.0)
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "divide by zero encountered")
an = norm(at, -1)
self.check_dtype(at, an)
assert_almost_equal(an, 1.0)
an = norm(at, 1)
self.check_dtype(at, an)
assert_almost_equal(an, 2.0)
an = norm(at, 2)
self.check_dtype(at, an)
assert_almost_equal(an, 3.0**(1.0/2.0))
an = norm(at, -2)
self.check_dtype(at, an)
assert_almost_equal(an, 1.0)
an = norm(at, np.inf)
self.check_dtype(at, an)
assert_almost_equal(an, 2.0)
an = norm(at, 'fro')
self.check_dtype(at, an)
assert_almost_equal(an, 2.0)
an = norm(at, 'nuc')
self.check_dtype(at, an)
# Lower bar needed to support low precision floats.
# They end up being off by 1 in the 7th place.
np.testing.assert_almost_equal(an, 2.7320508075688772, decimal=6)
def test_matrix_2x2(self):
A = self.array([[1, 3], [5, 7]], dtype=self.dt)
assert_almost_equal(norm(A), 84 ** 0.5)
assert_almost_equal(norm(A, 'fro'), 84 ** 0.5)
assert_almost_equal(norm(A, 'nuc'), 10.0)
assert_almost_equal(norm(A, inf), 12.0)
assert_almost_equal(norm(A, -inf), 4.0)
assert_almost_equal(norm(A, 1), 10.0)
assert_almost_equal(norm(A, -1), 6.0)
assert_almost_equal(norm(A, 2), 9.1231056256176615)
assert_almost_equal(norm(A, -2), 0.87689437438234041)
assert_raises(ValueError, norm, A, 'nofro')
assert_raises(ValueError, norm, A, -3)
assert_raises(ValueError, norm, A, 0)
def test_matrix_3x3(self):
# This test has been added because the 2x2 example
# happened to have equal nuclear norm and induced 1-norm.
# The 1/10 scaling factor accommodates the absolute tolerance
# used in assert_almost_equal.
A = (1 / 10) * \
self.array([[1, 2, 3], [6, 0, 5], [3, 2, 1]], dtype=self.dt)
assert_almost_equal(norm(A), (1 / 10) * 89 ** 0.5)
assert_almost_equal(norm(A, 'fro'), (1 / 10) * 89 ** 0.5)
assert_almost_equal(norm(A, 'nuc'), 1.3366836911774836)
assert_almost_equal(norm(A, inf), 1.1)
assert_almost_equal(norm(A, -inf), 0.6)
assert_almost_equal(norm(A, 1), 1.0)
assert_almost_equal(norm(A, -1), 0.4)
assert_almost_equal(norm(A, 2), 0.88722940323461277)
assert_almost_equal(norm(A, -2), 0.19456584790481812)
def test_bad_args(self):
# Check that bad arguments raise the appropriate exceptions.
A = self.array([[1, 2, 3], [4, 5, 6]], dtype=self.dt)
B = np.arange(1, 25, dtype=self.dt).reshape(2, 3, 4)
# Using `axis=<integer>` or passing in a 1-D array implies vector
# norms are being computed, so also using `ord='fro'`
# or `ord='nuc'` or any other string raises a ValueError.
assert_raises(ValueError, norm, A, 'fro', 0)
assert_raises(ValueError, norm, A, 'nuc', 0)
assert_raises(ValueError, norm, [3, 4], 'fro', None)
assert_raises(ValueError, norm, [3, 4], 'nuc', None)
assert_raises(ValueError, norm, [3, 4], 'test', None)
# Similarly, norm should raise an exception when ord is any finite
# number other than 1, 2, -1 or -2 when computing matrix norms.
for order in [0, 3]:
assert_raises(ValueError, norm, A, order, None)
assert_raises(ValueError, norm, A, order, (0, 1))
assert_raises(ValueError, norm, B, order, (1, 2))
# Invalid axis
assert_raises(np.AxisError, norm, B, None, 3)
assert_raises(np.AxisError, norm, B, None, (2, 3))
assert_raises(ValueError, norm, B, None, (0, 1, 2))
class _TestNorm(_TestNorm2D, _TestNormGeneral):
pass
class TestNorm_NonSystematic:
def test_longdouble_norm(self):
# Non-regression test: p-norm of longdouble would previously raise
# UnboundLocalError.
x = np.arange(10, dtype=np.longdouble)
old_assert_almost_equal(norm(x, ord=3), 12.65, decimal=2)
def test_intmin(self):
# Non-regression test: p-norm of signed integer would previously do
# float cast and abs in the wrong order.
x = np.array([-2 ** 31], dtype=np.int32)
old_assert_almost_equal(norm(x, ord=3), 2 ** 31, decimal=5)
def test_complex_high_ord(self):
# gh-4156
d = np.empty((2,), dtype=np.clongdouble)
d[0] = 6 + 7j
d[1] = -6 + 7j
res = 11.615898132184
old_assert_almost_equal(np.linalg.norm(d, ord=3), res, decimal=10)
d = d.astype(np.complex128)
old_assert_almost_equal(np.linalg.norm(d, ord=3), res, decimal=9)
d = d.astype(np.complex64)
old_assert_almost_equal(np.linalg.norm(d, ord=3), res, decimal=5)
# Separate definitions so we can use them for matrix tests.
class _TestNormDoubleBase(_TestNormBase):
dt = np.double
dec = 12
class _TestNormSingleBase(_TestNormBase):
dt = np.float32
dec = 6
class _TestNormInt64Base(_TestNormBase):
dt = np.int64
dec = 12
class TestNormDouble(_TestNorm, _TestNormDoubleBase):
pass
class TestNormSingle(_TestNorm, _TestNormSingleBase):
pass
class TestNormInt64(_TestNorm, _TestNormInt64Base):
pass
class TestMatrixRank:
def test_matrix_rank(self):
# Full rank matrix
assert_equal(4, matrix_rank(np.eye(4)))
# rank deficient matrix
I = np.eye(4)
I[-1, -1] = 0.
assert_equal(matrix_rank(I), 3)
# All zeros - zero rank
assert_equal(matrix_rank(np.zeros((4, 4))), 0)
# 1 dimension - rank 1 unless all 0
assert_equal(matrix_rank([1, 0, 0, 0]), 1)
assert_equal(matrix_rank(np.zeros((4,))), 0)
# accepts array-like
assert_equal(matrix_rank([1]), 1)
# greater than 2 dimensions treated as stacked matrices
ms = np.array([I, np.eye(4), np.zeros((4,4))])
assert_equal(matrix_rank(ms), np.array([3, 4, 0]))
# works on scalar
assert_equal(matrix_rank(1), 1)
def test_symmetric_rank(self):
assert_equal(4, matrix_rank(np.eye(4), hermitian=True))
assert_equal(1, matrix_rank(np.ones((4, 4)), hermitian=True))
assert_equal(0, matrix_rank(np.zeros((4, 4)), hermitian=True))
# rank deficient matrix
I = np.eye(4)
I[-1, -1] = 0.
assert_equal(3, matrix_rank(I, hermitian=True))
# manually supplied tolerance
I[-1, -1] = 1e-8
assert_equal(4, matrix_rank(I, hermitian=True, tol=0.99e-8))
assert_equal(3, matrix_rank(I, hermitian=True, tol=1.01e-8))
def test_reduced_rank():
# Test matrices with reduced rank
rng = np.random.RandomState(20120714)
for i in range(100):
# Make a rank deficient matrix
X = rng.normal(size=(40, 10))
X[:, 0] = X[:, 1] + X[:, 2]
# Assert that matrix_rank detected deficiency
assert_equal(matrix_rank(X), 9)
X[:, 3] = X[:, 4] + X[:, 5]
assert_equal(matrix_rank(X), 8)
class TestQR:
# Define the array class here, so run this on matrices elsewhere.
array = np.array
def check_qr(self, a):
# This test expects the argument `a` to be an ndarray or
# a subclass of an ndarray of inexact type.
a_type = type(a)
a_dtype = a.dtype
m, n = a.shape
k = min(m, n)
# mode == 'complete'
q, r = linalg.qr(a, mode='complete')
assert_(q.dtype == a_dtype)
assert_(r.dtype == a_dtype)
assert_(isinstance(q, a_type))
assert_(isinstance(r, a_type))
assert_(q.shape == (m, m))
assert_(r.shape == (m, n))
assert_almost_equal(dot(q, r), a)
assert_almost_equal(dot(q.T.conj(), q), np.eye(m))
assert_almost_equal(np.triu(r), r)
# mode == 'reduced'
q1, r1 = linalg.qr(a, mode='reduced')
assert_(q1.dtype == a_dtype)
assert_(r1.dtype == a_dtype)
assert_(isinstance(q1, a_type))
assert_(isinstance(r1, a_type))
assert_(q1.shape == (m, k))
assert_(r1.shape == (k, n))
assert_almost_equal(dot(q1, r1), a)
assert_almost_equal(dot(q1.T.conj(), q1), np.eye(k))
assert_almost_equal(np.triu(r1), r1)
# mode == 'r'
r2 = linalg.qr(a, mode='r')
assert_(r2.dtype == a_dtype)
assert_(isinstance(r2, a_type))
assert_almost_equal(r2, r1)
@pytest.mark.parametrize(["m", "n"], [
(3, 0),
(0, 3),
(0, 0)
])
def test_qr_empty(self, m, n):
k = min(m, n)
a = np.empty((m, n))
self.check_qr(a)
h, tau = np.linalg.qr(a, mode='raw')
assert_equal(h.dtype, np.double)
assert_equal(tau.dtype, np.double)
assert_equal(h.shape, (n, m))
assert_equal(tau.shape, (k,))
def test_mode_raw(self):
# The factorization is not unique and varies between libraries,
# so it is not possible to check against known values. Functional
# testing is a possibility, but awaits the exposure of more
# of the functions in lapack_lite. Consequently, this test is
# very limited in scope. Note that the results are in FORTRAN
# order, hence the h arrays are transposed.
a = self.array([[1, 2], [3, 4], [5, 6]], dtype=np.double)
# Test double
h, tau = linalg.qr(a, mode='raw')
assert_(h.dtype == np.double)
assert_(tau.dtype == np.double)
assert_(h.shape == (2, 3))
assert_(tau.shape == (2,))
h, tau = linalg.qr(a.T, mode='raw')
assert_(h.dtype == np.double)
assert_(tau.dtype == np.double)
assert_(h.shape == (3, 2))
assert_(tau.shape == (2,))
def test_mode_all_but_economic(self):
a = self.array([[1, 2], [3, 4]])
b = self.array([[1, 2], [3, 4], [5, 6]])
for dt in "fd":
m1 = a.astype(dt)
m2 = b.astype(dt)
self.check_qr(m1)
self.check_qr(m2)
self.check_qr(m2.T)
for dt in "fd":
m1 = 1 + 1j * a.astype(dt)
m2 = 1 + 1j * b.astype(dt)
self.check_qr(m1)
self.check_qr(m2)
self.check_qr(m2.T)
def check_qr_stacked(self, a):
# This test expects the argument `a` to be an ndarray or
# a subclass of an ndarray of inexact type.
a_type = type(a)
a_dtype = a.dtype
m, n = a.shape[-2:]
k = min(m, n)
# mode == 'complete'
q, r = linalg.qr(a, mode='complete')
assert_(q.dtype == a_dtype)
assert_(r.dtype == a_dtype)
assert_(isinstance(q, a_type))
assert_(isinstance(r, a_type))
assert_(q.shape[-2:] == (m, m))
assert_(r.shape[-2:] == (m, n))
assert_almost_equal(matmul(q, r), a)
I_mat = np.identity(q.shape[-1])
stack_I_mat = np.broadcast_to(I_mat,
q.shape[:-2] + (q.shape[-1],)*2)
assert_almost_equal(matmul(swapaxes(q, -1, -2).conj(), q), stack_I_mat)
assert_almost_equal(np.triu(r[..., :, :]), r)
# mode == 'reduced'
q1, r1 = linalg.qr(a, mode='reduced')
assert_(q1.dtype == a_dtype)
assert_(r1.dtype == a_dtype)
assert_(isinstance(q1, a_type))
assert_(isinstance(r1, a_type))
assert_(q1.shape[-2:] == (m, k))
assert_(r1.shape[-2:] == (k, n))
assert_almost_equal(matmul(q1, r1), a)
I_mat = np.identity(q1.shape[-1])
stack_I_mat = np.broadcast_to(I_mat,
q1.shape[:-2] + (q1.shape[-1],)*2)
assert_almost_equal(matmul(swapaxes(q1, -1, -2).conj(), q1),
stack_I_mat)
assert_almost_equal(np.triu(r1[..., :, :]), r1)
# mode == 'r'
r2 = linalg.qr(a, mode='r')
assert_(r2.dtype == a_dtype)
assert_(isinstance(r2, a_type))
assert_almost_equal(r2, r1)
@pytest.mark.parametrize("size", [
(3, 4), (4, 3), (4, 4),
(3, 0), (0, 3)])
@pytest.mark.parametrize("outer_size", [
(2, 2), (2,), (2, 3, 4)])
@pytest.mark.parametrize("dt", [
np.single, np.double,
np.csingle, np.cdouble])
def test_stacked_inputs(self, outer_size, size, dt):
A = np.random.normal(size=outer_size + size).astype(dt)
B = np.random.normal(size=outer_size + size).astype(dt)
self.check_qr_stacked(A)
self.check_qr_stacked(A + 1.j*B)
class TestCholesky:
# TODO: are there no other tests for cholesky?
@pytest.mark.parametrize(
'shape', [(1, 1), (2, 2), (3, 3), (50, 50), (3, 10, 10)]
)
@pytest.mark.parametrize(
'dtype', (np.float32, np.float64, np.complex64, np.complex128)
)
def test_basic_property(self, shape, dtype):
# Check A = L L^H
np.random.seed(1)
a = np.random.randn(*shape)
if np.issubdtype(dtype, np.complexfloating):
a = a + 1j*np.random.randn(*shape)
t = list(range(len(shape)))
t[-2:] = -1, -2
a = np.matmul(a.transpose(t).conj(), a)
a = np.asarray(a, dtype=dtype)
c = np.linalg.cholesky(a)
b = np.matmul(c, c.transpose(t).conj())
assert_allclose(b, a,
err_msg=f'{shape} {dtype}\n{a}\n{c}',
atol=500 * a.shape[0] * np.finfo(dtype).eps)
def test_0_size(self):
class ArraySubclass(np.ndarray):
pass
a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass)
res = linalg.cholesky(a)
assert_equal(a.shape, res.shape)
assert_(res.dtype.type is np.float64)
# for documentation purpose:
assert_(isinstance(res, np.ndarray))
a = np.zeros((1, 0, 0), dtype=np.complex64).view(ArraySubclass)
res = linalg.cholesky(a)
assert_equal(a.shape, res.shape)
assert_(res.dtype.type is np.complex64)
assert_(isinstance(res, np.ndarray))
def test_byteorder_check():
# Byte order check should pass for native order
if sys.byteorder == 'little':
native = '<'
else:
native = '>'
for dtt in (np.float32, np.float64):
arr = np.eye(4, dtype=dtt)
n_arr = arr.newbyteorder(native)
sw_arr = arr.newbyteorder('S').byteswap()
assert_equal(arr.dtype.byteorder, '=')
for routine in (linalg.inv, linalg.det, linalg.pinv):
# Normal call
res = routine(arr)
# Native but not '='
assert_array_equal(res, routine(n_arr))
# Swapped
assert_array_equal(res, routine(sw_arr))
def test_generalized_raise_multiloop():
# It should raise an error even if the error doesn't occur in the
# last iteration of the ufunc inner loop
invertible = np.array([[1, 2], [3, 4]])
non_invertible = np.array([[1, 1], [1, 1]])
x = np.zeros([4, 4, 2, 2])[1::2]
x[...] = invertible
x[0, 0] = non_invertible
assert_raises(np.linalg.LinAlgError, np.linalg.inv, x)
def test_xerbla_override():
# Check that our xerbla has been successfully linked in. If it is not,
# the default xerbla routine is called, which prints a message to stdout
# and may, or may not, abort the process depending on the LAPACK package.
XERBLA_OK = 255
try:
pid = os.fork()
except (OSError, AttributeError):
# fork failed, or not running on POSIX
pytest.skip("Not POSIX or fork failed.")
if pid == 0:
# child; close i/o file handles
os.close(1)
os.close(0)
# Avoid producing core files.
import resource
resource.setrlimit(resource.RLIMIT_CORE, (0, 0))
# These calls may abort.
try:
np.linalg.lapack_lite.xerbla()
except ValueError:
pass
except Exception:
os._exit(os.EX_CONFIG)
try:
a = np.array([[1.]])
np.linalg.lapack_lite.dorgqr(
1, 1, 1, a,
0, # <- invalid value
a, a, 0, 0)
except ValueError as e:
if "DORGQR parameter number 5" in str(e):
# success, reuse error code to mark success as
# FORTRAN STOP returns as success.
os._exit(XERBLA_OK)
# Did not abort, but our xerbla was not linked in.
os._exit(os.EX_CONFIG)
else:
# parent
pid, status = os.wait()
if os.WEXITSTATUS(status) != XERBLA_OK:
pytest.skip('Numpy xerbla not linked in.')
@pytest.mark.slow
def test_sdot_bug_8577():
# Regression test that loading certain other libraries does not
# result to wrong results in float32 linear algebra.
#
# There's a bug gh-8577 on OSX that can trigger this, and perhaps
# there are also other situations in which it occurs.
#
# Do the check in a separate process.
bad_libs = ['PyQt5.QtWidgets', 'IPython']
template = textwrap.dedent("""
import sys
{before}
try:
import {bad_lib}
except ImportError:
sys.exit(0)
{after}
x = np.ones(2, dtype=np.float32)
sys.exit(0 if np.allclose(x.dot(x), 2.0) else 1)
""")
for bad_lib in bad_libs:
code = template.format(before="import numpy as np", after="",
bad_lib=bad_lib)
subprocess.check_call([sys.executable, "-c", code])
# Swapped import order
code = template.format(after="import numpy as np", before="",
bad_lib=bad_lib)
subprocess.check_call([sys.executable, "-c", code])
class TestMultiDot:
def test_basic_function_with_three_arguments(self):
# multi_dot with three arguments uses a fast hand coded algorithm to
# determine the optimal order. Therefore test it separately.
A = np.random.random((6, 2))
B = np.random.random((2, 6))
C = np.random.random((6, 2))
assert_almost_equal(multi_dot([A, B, C]), A.dot(B).dot(C))
assert_almost_equal(multi_dot([A, B, C]), np.dot(A, np.dot(B, C)))
def test_basic_function_with_two_arguments(self):
# separate code path with two arguments
A = np.random.random((6, 2))
B = np.random.random((2, 6))
assert_almost_equal(multi_dot([A, B]), A.dot(B))
assert_almost_equal(multi_dot([A, B]), np.dot(A, B))
def test_basic_function_with_dynamic_programming_optimization(self):
# multi_dot with four or more arguments uses the dynamic programming
# optimization and therefore deserve a separate
A = np.random.random((6, 2))
B = np.random.random((2, 6))
C = np.random.random((6, 2))
D = np.random.random((2, 1))
assert_almost_equal(multi_dot([A, B, C, D]), A.dot(B).dot(C).dot(D))
def test_vector_as_first_argument(self):
# The first argument can be 1-D
A1d = np.random.random(2) # 1-D
B = np.random.random((2, 6))
C = np.random.random((6, 2))
D = np.random.random((2, 2))
# the result should be 1-D
assert_equal(multi_dot([A1d, B, C, D]).shape, (2,))
def test_vector_as_last_argument(self):
# The last argument can be 1-D
A = np.random.random((6, 2))
B = np.random.random((2, 6))
C = np.random.random((6, 2))
D1d = np.random.random(2) # 1-D
# the result should be 1-D
assert_equal(multi_dot([A, B, C, D1d]).shape, (6,))
def test_vector_as_first_and_last_argument(self):
# The first and last arguments can be 1-D
A1d = np.random.random(2) # 1-D
B = np.random.random((2, 6))
C = np.random.random((6, 2))
D1d = np.random.random(2) # 1-D
# the result should be a scalar
assert_equal(multi_dot([A1d, B, C, D1d]).shape, ())
def test_three_arguments_and_out(self):
# multi_dot with three arguments uses a fast hand coded algorithm to
# determine the optimal order. Therefore test it separately.
A = np.random.random((6, 2))
B = np.random.random((2, 6))
C = np.random.random((6, 2))
out = np.zeros((6, 2))
ret = multi_dot([A, B, C], out=out)
assert out is ret
assert_almost_equal(out, A.dot(B).dot(C))
assert_almost_equal(out, np.dot(A, np.dot(B, C)))
def test_two_arguments_and_out(self):
# separate code path with two arguments
A = np.random.random((6, 2))
B = np.random.random((2, 6))
out = np.zeros((6, 6))
ret = multi_dot([A, B], out=out)
assert out is ret
assert_almost_equal(out, A.dot(B))
assert_almost_equal(out, np.dot(A, B))
def test_dynamic_programming_optimization_and_out(self):
# multi_dot with four or more arguments uses the dynamic programming
# optimization and therefore deserve a separate test
A = np.random.random((6, 2))
B = np.random.random((2, 6))
C = np.random.random((6, 2))
D = np.random.random((2, 1))
out = np.zeros((6, 1))
ret = multi_dot([A, B, C, D], out=out)
assert out is ret
assert_almost_equal(out, A.dot(B).dot(C).dot(D))
def test_dynamic_programming_logic(self):
# Test for the dynamic programming part
# This test is directly taken from Cormen page 376.
arrays = [np.random.random((30, 35)),
np.random.random((35, 15)),
np.random.random((15, 5)),
np.random.random((5, 10)),
np.random.random((10, 20)),
np.random.random((20, 25))]
m_expected = np.array([[0., 15750., 7875., 9375., 11875., 15125.],
[0., 0., 2625., 4375., 7125., 10500.],
[0., 0., 0., 750., 2500., 5375.],
[0., 0., 0., 0., 1000., 3500.],
[0., 0., 0., 0., 0., 5000.],
[0., 0., 0., 0., 0., 0.]])
s_expected = np.array([[0, 1, 1, 3, 3, 3],
[0, 0, 2, 3, 3, 3],
[0, 0, 0, 3, 3, 3],
[0, 0, 0, 0, 4, 5],
[0, 0, 0, 0, 0, 5],
[0, 0, 0, 0, 0, 0]], dtype=int)
s_expected -= 1 # Cormen uses 1-based index, python does not.
s, m = _multi_dot_matrix_chain_order(arrays, return_costs=True)
# Only the upper triangular part (without the diagonal) is interesting.
assert_almost_equal(np.triu(s[:-1, 1:]),
np.triu(s_expected[:-1, 1:]))
assert_almost_equal(np.triu(m), np.triu(m_expected))
def test_too_few_input_arrays(self):
assert_raises(ValueError, multi_dot, [])
assert_raises(ValueError, multi_dot, [np.random.random((3, 3))])
class TestTensorinv:
@pytest.mark.parametrize("arr, ind", [
(np.ones((4, 6, 8, 2)), 2),
(np.ones((3, 3, 2)), 1),
])
def test_non_square_handling(self, arr, ind):
with assert_raises(LinAlgError):
linalg.tensorinv(arr, ind=ind)
@pytest.mark.parametrize("shape, ind", [
# examples from docstring
((4, 6, 8, 3), 2),
((24, 8, 3), 1),
])
def test_tensorinv_shape(self, shape, ind):
a = np.eye(24)
a.shape = shape
ainv = linalg.tensorinv(a=a, ind=ind)
expected = a.shape[ind:] + a.shape[:ind]
actual = ainv.shape
assert_equal(actual, expected)
@pytest.mark.parametrize("ind", [
0, -2,
])
def test_tensorinv_ind_limit(self, ind):
a = np.eye(24)
a.shape = (4, 6, 8, 3)
with assert_raises(ValueError):
linalg.tensorinv(a=a, ind=ind)
def test_tensorinv_result(self):
# mimic a docstring example
a = np.eye(24)
a.shape = (24, 8, 3)
ainv = linalg.tensorinv(a, ind=1)
b = np.ones(24)
assert_allclose(np.tensordot(ainv, b, 1), np.linalg.tensorsolve(a, b))
class TestTensorsolve:
@pytest.mark.parametrize("a, axes", [
(np.ones((4, 6, 8, 2)), None),
(np.ones((3, 3, 2)), (0, 2)),
])
def test_non_square_handling(self, a, axes):
with assert_raises(LinAlgError):
b = np.ones(a.shape[:2])
linalg.tensorsolve(a, b, axes=axes)
@pytest.mark.parametrize("shape",
[(2, 3, 6), (3, 4, 4, 3), (0, 3, 3, 0)],
)
def test_tensorsolve_result(self, shape):
a = np.random.randn(*shape)
b = np.ones(a.shape[:2])
x = np.linalg.tensorsolve(a, b)
assert_allclose(np.tensordot(a, x, axes=len(x.shape)), b)
def test_unsupported_commontype():
# linalg gracefully handles unsupported type
arr = np.array([[1, -2], [2, 5]], dtype='float16')
with assert_raises_regex(TypeError, "unsupported in linalg"):
linalg.cholesky(arr)
#@pytest.mark.slow
#@pytest.mark.xfail(not HAS_LAPACK64, run=False,
# reason="Numpy not compiled with 64-bit BLAS/LAPACK")
#@requires_memory(free_bytes=16e9)
@pytest.mark.skip(reason="Bad memory reports lead to OOM in ci testing")
def test_blas64_dot():
n = 2**32
a = np.zeros([1, n], dtype=np.float32)
b = np.ones([1, 1], dtype=np.float32)
a[0,-1] = 1
c = np.dot(b, a)
assert_equal(c[0,-1], 1)
@pytest.mark.xfail(not HAS_LAPACK64,
reason="Numpy not compiled with 64-bit BLAS/LAPACK")
def test_blas64_geqrf_lwork_smoketest():
# Smoke test LAPACK geqrf lwork call with 64-bit integers
dtype = np.float64
lapack_routine = np.linalg.lapack_lite.dgeqrf
m = 2**32 + 1
n = 2**32 + 1
lda = m
# Dummy arrays, not referenced by the lapack routine, so don't
# need to be of the right size
a = np.zeros([1, 1], dtype=dtype)
work = np.zeros([1], dtype=dtype)
tau = np.zeros([1], dtype=dtype)
# Size query
results = lapack_routine(m, n, a, lda, tau, work, -1, 0)
assert_equal(results['info'], 0)
assert_equal(results['m'], m)
assert_equal(results['n'], m)
# Should result to an integer of a reasonable size
lwork = int(work.item())
assert_(2**32 < lwork < 2**42)
| 77,352 | Python | 34.41804 | 106 | 0.539624 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/_typing/_add_docstring.py | """A module for creating docstrings for sphinx ``data`` domains."""
import re
import textwrap
from ._generic_alias import NDArray
_docstrings_list = []
def add_newdoc(name: str, value: str, doc: str) -> None:
"""Append ``_docstrings_list`` with a docstring for `name`.
Parameters
----------
name : str
The name of the object.
value : str
A string-representation of the object.
doc : str
The docstring of the object.
"""
_docstrings_list.append((name, value, doc))
def _parse_docstrings() -> str:
"""Convert all docstrings in ``_docstrings_list`` into a single
sphinx-legible text block.
"""
type_list_ret = []
for name, value, doc in _docstrings_list:
s = textwrap.dedent(doc).replace("\n", "\n ")
# Replace sections by rubrics
lines = s.split("\n")
new_lines = []
indent = ""
for line in lines:
m = re.match(r'^(\s+)[-=]+\s*$', line)
if m and new_lines:
prev = textwrap.dedent(new_lines.pop())
if prev == "Examples":
indent = ""
new_lines.append(f'{m.group(1)}.. rubric:: {prev}')
else:
indent = 4 * " "
new_lines.append(f'{m.group(1)}.. admonition:: {prev}')
new_lines.append("")
else:
new_lines.append(f"{indent}{line}")
s = "\n".join(new_lines)
s_block = f""".. data:: {name}\n :value: {value}\n {s}"""
type_list_ret.append(s_block)
return "\n".join(type_list_ret)
add_newdoc('ArrayLike', 'typing.Union[...]',
"""
A `~typing.Union` representing objects that can be coerced
into an `~numpy.ndarray`.
Among others this includes the likes of:
* Scalars.
* (Nested) sequences.
* Objects implementing the `~class.__array__` protocol.
.. versionadded:: 1.20
See Also
--------
:term:`array_like`:
Any scalar or sequence that can be interpreted as an ndarray.
Examples
--------
.. code-block:: python
>>> import numpy as np
>>> import numpy.typing as npt
>>> def as_array(a: npt.ArrayLike) -> np.ndarray:
... return np.array(a)
""")
add_newdoc('DTypeLike', 'typing.Union[...]',
"""
A `~typing.Union` representing objects that can be coerced
into a `~numpy.dtype`.
Among others this includes the likes of:
* :class:`type` objects.
* Character codes or the names of :class:`type` objects.
* Objects with the ``.dtype`` attribute.
.. versionadded:: 1.20
See Also
--------
:ref:`Specifying and constructing data types <arrays.dtypes.constructing>`
A comprehensive overview of all objects that can be coerced
into data types.
Examples
--------
.. code-block:: python
>>> import numpy as np
>>> import numpy.typing as npt
>>> def as_dtype(d: npt.DTypeLike) -> np.dtype:
... return np.dtype(d)
""")
add_newdoc('NDArray', repr(NDArray),
"""
A :term:`generic <generic type>` version of
`np.ndarray[Any, np.dtype[+ScalarType]] <numpy.ndarray>`.
Can be used during runtime for typing arrays with a given dtype
and unspecified shape.
.. versionadded:: 1.21
Examples
--------
.. code-block:: python
>>> import numpy as np
>>> import numpy.typing as npt
>>> print(npt.NDArray)
numpy.ndarray[typing.Any, numpy.dtype[+ScalarType]]
>>> print(npt.NDArray[np.float64])
numpy.ndarray[typing.Any, numpy.dtype[numpy.float64]]
>>> NDArrayInt = npt.NDArray[np.int_]
>>> a: NDArrayInt = np.arange(10)
>>> def func(a: npt.ArrayLike) -> npt.NDArray[Any]:
... return np.array(a)
""")
_docstrings = _parse_docstrings()
| 3,925 | Python | 24.660131 | 78 | 0.551338 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/_typing/_callable.pyi | """
A module with various ``typing.Protocol`` subclasses that implement
the ``__call__`` magic method.
See the `Mypy documentation`_ on protocols for more details.
.. _`Mypy documentation`: https://mypy.readthedocs.io/en/stable/protocols.html#callback-protocols
"""
from __future__ import annotations
from typing import (
TypeVar,
overload,
Any,
NoReturn,
Protocol,
)
from numpy import (
ndarray,
dtype,
generic,
bool_,
timedelta64,
number,
integer,
unsignedinteger,
signedinteger,
int8,
int_,
floating,
float64,
complexfloating,
complex128,
)
from ._nbit import _NBitInt, _NBitDouble
from ._scalars import (
_BoolLike_co,
_IntLike_co,
_FloatLike_co,
_NumberLike_co,
)
from . import NBitBase
from ._generic_alias import NDArray
from ._nested_sequence import _NestedSequence
_T1 = TypeVar("_T1")
_T2 = TypeVar("_T2")
_T1_contra = TypeVar("_T1_contra", contravariant=True)
_T2_contra = TypeVar("_T2_contra", contravariant=True)
_2Tuple = tuple[_T1, _T1]
_NBit1 = TypeVar("_NBit1", bound=NBitBase)
_NBit2 = TypeVar("_NBit2", bound=NBitBase)
_IntType = TypeVar("_IntType", bound=integer)
_FloatType = TypeVar("_FloatType", bound=floating)
_NumberType = TypeVar("_NumberType", bound=number)
_NumberType_co = TypeVar("_NumberType_co", covariant=True, bound=number)
_GenericType_co = TypeVar("_GenericType_co", covariant=True, bound=generic)
class _BoolOp(Protocol[_GenericType_co]):
@overload
def __call__(self, other: _BoolLike_co, /) -> _GenericType_co: ...
@overload # platform dependent
def __call__(self, other: int, /) -> int_: ...
@overload
def __call__(self, other: float, /) -> float64: ...
@overload
def __call__(self, other: complex, /) -> complex128: ...
@overload
def __call__(self, other: _NumberType, /) -> _NumberType: ...
class _BoolBitOp(Protocol[_GenericType_co]):
@overload
def __call__(self, other: _BoolLike_co, /) -> _GenericType_co: ...
@overload # platform dependent
def __call__(self, other: int, /) -> int_: ...
@overload
def __call__(self, other: _IntType, /) -> _IntType: ...
class _BoolSub(Protocol):
# Note that `other: bool_` is absent here
@overload
def __call__(self, other: bool, /) -> NoReturn: ...
@overload # platform dependent
def __call__(self, other: int, /) -> int_: ...
@overload
def __call__(self, other: float, /) -> float64: ...
@overload
def __call__(self, other: complex, /) -> complex128: ...
@overload
def __call__(self, other: _NumberType, /) -> _NumberType: ...
class _BoolTrueDiv(Protocol):
@overload
def __call__(self, other: float | _IntLike_co, /) -> float64: ...
@overload
def __call__(self, other: complex, /) -> complex128: ...
@overload
def __call__(self, other: _NumberType, /) -> _NumberType: ...
class _BoolMod(Protocol):
@overload
def __call__(self, other: _BoolLike_co, /) -> int8: ...
@overload # platform dependent
def __call__(self, other: int, /) -> int_: ...
@overload
def __call__(self, other: float, /) -> float64: ...
@overload
def __call__(self, other: _IntType, /) -> _IntType: ...
@overload
def __call__(self, other: _FloatType, /) -> _FloatType: ...
class _BoolDivMod(Protocol):
@overload
def __call__(self, other: _BoolLike_co, /) -> _2Tuple[int8]: ...
@overload # platform dependent
def __call__(self, other: int, /) -> _2Tuple[int_]: ...
@overload
def __call__(self, other: float, /) -> _2Tuple[floating[_NBit1 | _NBitDouble]]: ...
@overload
def __call__(self, other: _IntType, /) -> _2Tuple[_IntType]: ...
@overload
def __call__(self, other: _FloatType, /) -> _2Tuple[_FloatType]: ...
class _TD64Div(Protocol[_NumberType_co]):
@overload
def __call__(self, other: timedelta64, /) -> _NumberType_co: ...
@overload
def __call__(self, other: _BoolLike_co, /) -> NoReturn: ...
@overload
def __call__(self, other: _FloatLike_co, /) -> timedelta64: ...
class _IntTrueDiv(Protocol[_NBit1]):
@overload
def __call__(self, other: bool, /) -> floating[_NBit1]: ...
@overload
def __call__(self, other: int, /) -> floating[_NBit1 | _NBitInt]: ...
@overload
def __call__(self, other: float, /) -> floating[_NBit1 | _NBitDouble]: ...
@overload
def __call__(
self, other: complex, /,
) -> complexfloating[_NBit1 | _NBitDouble, _NBit1 | _NBitDouble]: ...
@overload
def __call__(self, other: integer[_NBit2], /) -> floating[_NBit1 | _NBit2]: ...
class _UnsignedIntOp(Protocol[_NBit1]):
# NOTE: `uint64 + signedinteger -> float64`
@overload
def __call__(self, other: bool, /) -> unsignedinteger[_NBit1]: ...
@overload
def __call__(
self, other: int | signedinteger[Any], /
) -> Any: ...
@overload
def __call__(self, other: float, /) -> floating[_NBit1 | _NBitDouble]: ...
@overload
def __call__(
self, other: complex, /,
) -> complexfloating[_NBit1 | _NBitDouble, _NBit1 | _NBitDouble]: ...
@overload
def __call__(
self, other: unsignedinteger[_NBit2], /
) -> unsignedinteger[_NBit1 | _NBit2]: ...
class _UnsignedIntBitOp(Protocol[_NBit1]):
@overload
def __call__(self, other: bool, /) -> unsignedinteger[_NBit1]: ...
@overload
def __call__(self, other: int, /) -> signedinteger[Any]: ...
@overload
def __call__(self, other: signedinteger[Any], /) -> signedinteger[Any]: ...
@overload
def __call__(
self, other: unsignedinteger[_NBit2], /
) -> unsignedinteger[_NBit1 | _NBit2]: ...
class _UnsignedIntMod(Protocol[_NBit1]):
@overload
def __call__(self, other: bool, /) -> unsignedinteger[_NBit1]: ...
@overload
def __call__(
self, other: int | signedinteger[Any], /
) -> Any: ...
@overload
def __call__(self, other: float, /) -> floating[_NBit1 | _NBitDouble]: ...
@overload
def __call__(
self, other: unsignedinteger[_NBit2], /
) -> unsignedinteger[_NBit1 | _NBit2]: ...
class _UnsignedIntDivMod(Protocol[_NBit1]):
@overload
def __call__(self, other: bool, /) -> _2Tuple[signedinteger[_NBit1]]: ...
@overload
def __call__(
self, other: int | signedinteger[Any], /
) -> _2Tuple[Any]: ...
@overload
def __call__(self, other: float, /) -> _2Tuple[floating[_NBit1 | _NBitDouble]]: ...
@overload
def __call__(
self, other: unsignedinteger[_NBit2], /
) -> _2Tuple[unsignedinteger[_NBit1 | _NBit2]]: ...
class _SignedIntOp(Protocol[_NBit1]):
@overload
def __call__(self, other: bool, /) -> signedinteger[_NBit1]: ...
@overload
def __call__(self, other: int, /) -> signedinteger[_NBit1 | _NBitInt]: ...
@overload
def __call__(self, other: float, /) -> floating[_NBit1 | _NBitDouble]: ...
@overload
def __call__(
self, other: complex, /,
) -> complexfloating[_NBit1 | _NBitDouble, _NBit1 | _NBitDouble]: ...
@overload
def __call__(
self, other: signedinteger[_NBit2], /,
) -> signedinteger[_NBit1 | _NBit2]: ...
class _SignedIntBitOp(Protocol[_NBit1]):
@overload
def __call__(self, other: bool, /) -> signedinteger[_NBit1]: ...
@overload
def __call__(self, other: int, /) -> signedinteger[_NBit1 | _NBitInt]: ...
@overload
def __call__(
self, other: signedinteger[_NBit2], /,
) -> signedinteger[_NBit1 | _NBit2]: ...
class _SignedIntMod(Protocol[_NBit1]):
@overload
def __call__(self, other: bool, /) -> signedinteger[_NBit1]: ...
@overload
def __call__(self, other: int, /) -> signedinteger[_NBit1 | _NBitInt]: ...
@overload
def __call__(self, other: float, /) -> floating[_NBit1 | _NBitDouble]: ...
@overload
def __call__(
self, other: signedinteger[_NBit2], /,
) -> signedinteger[_NBit1 | _NBit2]: ...
class _SignedIntDivMod(Protocol[_NBit1]):
@overload
def __call__(self, other: bool, /) -> _2Tuple[signedinteger[_NBit1]]: ...
@overload
def __call__(self, other: int, /) -> _2Tuple[signedinteger[_NBit1 | _NBitInt]]: ...
@overload
def __call__(self, other: float, /) -> _2Tuple[floating[_NBit1 | _NBitDouble]]: ...
@overload
def __call__(
self, other: signedinteger[_NBit2], /,
) -> _2Tuple[signedinteger[_NBit1 | _NBit2]]: ...
class _FloatOp(Protocol[_NBit1]):
@overload
def __call__(self, other: bool, /) -> floating[_NBit1]: ...
@overload
def __call__(self, other: int, /) -> floating[_NBit1 | _NBitInt]: ...
@overload
def __call__(self, other: float, /) -> floating[_NBit1 | _NBitDouble]: ...
@overload
def __call__(
self, other: complex, /,
) -> complexfloating[_NBit1 | _NBitDouble, _NBit1 | _NBitDouble]: ...
@overload
def __call__(
self, other: integer[_NBit2] | floating[_NBit2], /
) -> floating[_NBit1 | _NBit2]: ...
class _FloatMod(Protocol[_NBit1]):
@overload
def __call__(self, other: bool, /) -> floating[_NBit1]: ...
@overload
def __call__(self, other: int, /) -> floating[_NBit1 | _NBitInt]: ...
@overload
def __call__(self, other: float, /) -> floating[_NBit1 | _NBitDouble]: ...
@overload
def __call__(
self, other: integer[_NBit2] | floating[_NBit2], /
) -> floating[_NBit1 | _NBit2]: ...
class _FloatDivMod(Protocol[_NBit1]):
@overload
def __call__(self, other: bool, /) -> _2Tuple[floating[_NBit1]]: ...
@overload
def __call__(self, other: int, /) -> _2Tuple[floating[_NBit1 | _NBitInt]]: ...
@overload
def __call__(self, other: float, /) -> _2Tuple[floating[_NBit1 | _NBitDouble]]: ...
@overload
def __call__(
self, other: integer[_NBit2] | floating[_NBit2], /
) -> _2Tuple[floating[_NBit1 | _NBit2]]: ...
class _ComplexOp(Protocol[_NBit1]):
@overload
def __call__(self, other: bool, /) -> complexfloating[_NBit1, _NBit1]: ...
@overload
def __call__(self, other: int, /) -> complexfloating[_NBit1 | _NBitInt, _NBit1 | _NBitInt]: ...
@overload
def __call__(
self, other: complex, /,
) -> complexfloating[_NBit1 | _NBitDouble, _NBit1 | _NBitDouble]: ...
@overload
def __call__(
self,
other: (
integer[_NBit2]
| floating[_NBit2]
| complexfloating[_NBit2, _NBit2]
), /,
) -> complexfloating[_NBit1 | _NBit2, _NBit1 | _NBit2]: ...
class _NumberOp(Protocol):
def __call__(self, other: _NumberLike_co, /) -> Any: ...
class _SupportsLT(Protocol):
def __lt__(self, other: Any, /) -> object: ...
class _SupportsGT(Protocol):
def __gt__(self, other: Any, /) -> object: ...
class _ComparisonOp(Protocol[_T1_contra, _T2_contra]):
@overload
def __call__(self, other: _T1_contra, /) -> bool_: ...
@overload
def __call__(self, other: _T2_contra, /) -> NDArray[bool_]: ...
@overload
def __call__(
self,
other: _SupportsLT | _SupportsGT | _NestedSequence[_SupportsLT | _SupportsGT],
/,
) -> Any: ...
| 11,124 | unknown | 31.817109 | 99 | 0.574703 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/_typing/_nbit.py | """A module with the precisions of platform-specific `~numpy.number`s."""
from typing import Any
# To-be replaced with a `npt.NBitBase` subclass by numpy's mypy plugin
_NBitByte = Any
_NBitShort = Any
_NBitIntC = Any
_NBitIntP = Any
_NBitInt = Any
_NBitLongLong = Any
_NBitHalf = Any
_NBitSingle = Any
_NBitDouble = Any
_NBitLongDouble = Any
| 345 | Python | 19.35294 | 73 | 0.727536 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/_typing/_dtype_like.py | from typing import (
Any,
List,
Sequence,
Tuple,
Union,
Type,
TypeVar,
Protocol,
TypedDict,
runtime_checkable,
)
import numpy as np
from ._shape import _ShapeLike
from ._generic_alias import _DType as DType
from ._char_codes import (
_BoolCodes,
_UInt8Codes,
_UInt16Codes,
_UInt32Codes,
_UInt64Codes,
_Int8Codes,
_Int16Codes,
_Int32Codes,
_Int64Codes,
_Float16Codes,
_Float32Codes,
_Float64Codes,
_Complex64Codes,
_Complex128Codes,
_ByteCodes,
_ShortCodes,
_IntCCodes,
_IntPCodes,
_IntCodes,
_LongLongCodes,
_UByteCodes,
_UShortCodes,
_UIntCCodes,
_UIntPCodes,
_UIntCodes,
_ULongLongCodes,
_HalfCodes,
_SingleCodes,
_DoubleCodes,
_LongDoubleCodes,
_CSingleCodes,
_CDoubleCodes,
_CLongDoubleCodes,
_DT64Codes,
_TD64Codes,
_StrCodes,
_BytesCodes,
_VoidCodes,
_ObjectCodes,
)
_SCT = TypeVar("_SCT", bound=np.generic)
_DType_co = TypeVar("_DType_co", covariant=True, bound=DType[Any])
_DTypeLikeNested = Any # TODO: wait for support for recursive types
# Mandatory keys
class _DTypeDictBase(TypedDict):
names: Sequence[str]
formats: Sequence[_DTypeLikeNested]
# Mandatory + optional keys
class _DTypeDict(_DTypeDictBase, total=False):
# Only `str` elements are usable as indexing aliases,
# but `titles` can in principle accept any object
offsets: Sequence[int]
titles: Sequence[Any]
itemsize: int
aligned: bool
# A protocol for anything with the dtype attribute
@runtime_checkable
class _SupportsDType(Protocol[_DType_co]):
@property
def dtype(self) -> _DType_co: ...
# A subset of `npt.DTypeLike` that can be parametrized w.r.t. `np.generic`
_DTypeLike = Union[
"np.dtype[_SCT]",
Type[_SCT],
_SupportsDType["np.dtype[_SCT]"],
]
# Would create a dtype[np.void]
_VoidDTypeLike = Union[
# (flexible_dtype, itemsize)
Tuple[_DTypeLikeNested, int],
# (fixed_dtype, shape)
Tuple[_DTypeLikeNested, _ShapeLike],
# [(field_name, field_dtype, field_shape), ...]
#
# The type here is quite broad because NumPy accepts quite a wide
# range of inputs inside the list; see the tests for some
# examples.
List[Any],
# {'names': ..., 'formats': ..., 'offsets': ..., 'titles': ...,
# 'itemsize': ...}
_DTypeDict,
# (base_dtype, new_dtype)
Tuple[_DTypeLikeNested, _DTypeLikeNested],
]
# Anything that can be coerced into numpy.dtype.
# Reference: https://docs.scipy.org/doc/numpy/reference/arrays.dtypes.html
DTypeLike = Union[
DType[Any],
# default data type (float64)
None,
# array-scalar types and generic types
Type[Any], # NOTE: We're stuck with `Type[Any]` due to object dtypes
# anything with a dtype attribute
_SupportsDType[DType[Any]],
# character codes, type strings or comma-separated fields, e.g., 'float64'
str,
_VoidDTypeLike,
]
# NOTE: while it is possible to provide the dtype as a dict of
# dtype-like objects (e.g. `{'field1': ..., 'field2': ..., ...}`),
# this syntax is officially discourged and
# therefore not included in the Union defining `DTypeLike`.
#
# See https://github.com/numpy/numpy/issues/16891 for more details.
# Aliases for commonly used dtype-like objects.
# Note that the precision of `np.number` subclasses is ignored herein.
_DTypeLikeBool = Union[
Type[bool],
Type[np.bool_],
DType[np.bool_],
_SupportsDType[DType[np.bool_]],
_BoolCodes,
]
_DTypeLikeUInt = Union[
Type[np.unsignedinteger],
DType[np.unsignedinteger],
_SupportsDType[DType[np.unsignedinteger]],
_UInt8Codes,
_UInt16Codes,
_UInt32Codes,
_UInt64Codes,
_UByteCodes,
_UShortCodes,
_UIntCCodes,
_UIntPCodes,
_UIntCodes,
_ULongLongCodes,
]
_DTypeLikeInt = Union[
Type[int],
Type[np.signedinteger],
DType[np.signedinteger],
_SupportsDType[DType[np.signedinteger]],
_Int8Codes,
_Int16Codes,
_Int32Codes,
_Int64Codes,
_ByteCodes,
_ShortCodes,
_IntCCodes,
_IntPCodes,
_IntCodes,
_LongLongCodes,
]
_DTypeLikeFloat = Union[
Type[float],
Type[np.floating],
DType[np.floating],
_SupportsDType[DType[np.floating]],
_Float16Codes,
_Float32Codes,
_Float64Codes,
_HalfCodes,
_SingleCodes,
_DoubleCodes,
_LongDoubleCodes,
]
_DTypeLikeComplex = Union[
Type[complex],
Type[np.complexfloating],
DType[np.complexfloating],
_SupportsDType[DType[np.complexfloating]],
_Complex64Codes,
_Complex128Codes,
_CSingleCodes,
_CDoubleCodes,
_CLongDoubleCodes,
]
_DTypeLikeDT64 = Union[
Type[np.timedelta64],
DType[np.timedelta64],
_SupportsDType[DType[np.timedelta64]],
_TD64Codes,
]
_DTypeLikeTD64 = Union[
Type[np.datetime64],
DType[np.datetime64],
_SupportsDType[DType[np.datetime64]],
_DT64Codes,
]
_DTypeLikeStr = Union[
Type[str],
Type[np.str_],
DType[np.str_],
_SupportsDType[DType[np.str_]],
_StrCodes,
]
_DTypeLikeBytes = Union[
Type[bytes],
Type[np.bytes_],
DType[np.bytes_],
_SupportsDType[DType[np.bytes_]],
_BytesCodes,
]
_DTypeLikeVoid = Union[
Type[np.void],
DType[np.void],
_SupportsDType[DType[np.void]],
_VoidCodes,
_VoidDTypeLike,
]
_DTypeLikeObject = Union[
type,
DType[np.object_],
_SupportsDType[DType[np.object_]],
_ObjectCodes,
]
_DTypeLikeComplex_co = Union[
_DTypeLikeBool,
_DTypeLikeUInt,
_DTypeLikeInt,
_DTypeLikeFloat,
_DTypeLikeComplex,
]
| 5,628 | Python | 21.516 | 78 | 0.649431 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/_typing/_char_codes.py | from typing import Literal
_BoolCodes = Literal["?", "=?", "<?", ">?", "bool", "bool_", "bool8"]
_UInt8Codes = Literal["uint8", "u1", "=u1", "<u1", ">u1"]
_UInt16Codes = Literal["uint16", "u2", "=u2", "<u2", ">u2"]
_UInt32Codes = Literal["uint32", "u4", "=u4", "<u4", ">u4"]
_UInt64Codes = Literal["uint64", "u8", "=u8", "<u8", ">u8"]
_Int8Codes = Literal["int8", "i1", "=i1", "<i1", ">i1"]
_Int16Codes = Literal["int16", "i2", "=i2", "<i2", ">i2"]
_Int32Codes = Literal["int32", "i4", "=i4", "<i4", ">i4"]
_Int64Codes = Literal["int64", "i8", "=i8", "<i8", ">i8"]
_Float16Codes = Literal["float16", "f2", "=f2", "<f2", ">f2"]
_Float32Codes = Literal["float32", "f4", "=f4", "<f4", ">f4"]
_Float64Codes = Literal["float64", "f8", "=f8", "<f8", ">f8"]
_Complex64Codes = Literal["complex64", "c8", "=c8", "<c8", ">c8"]
_Complex128Codes = Literal["complex128", "c16", "=c16", "<c16", ">c16"]
_ByteCodes = Literal["byte", "b", "=b", "<b", ">b"]
_ShortCodes = Literal["short", "h", "=h", "<h", ">h"]
_IntCCodes = Literal["intc", "i", "=i", "<i", ">i"]
_IntPCodes = Literal["intp", "int0", "p", "=p", "<p", ">p"]
_IntCodes = Literal["long", "int", "int_", "l", "=l", "<l", ">l"]
_LongLongCodes = Literal["longlong", "q", "=q", "<q", ">q"]
_UByteCodes = Literal["ubyte", "B", "=B", "<B", ">B"]
_UShortCodes = Literal["ushort", "H", "=H", "<H", ">H"]
_UIntCCodes = Literal["uintc", "I", "=I", "<I", ">I"]
_UIntPCodes = Literal["uintp", "uint0", "P", "=P", "<P", ">P"]
_UIntCodes = Literal["ulong", "uint", "L", "=L", "<L", ">L"]
_ULongLongCodes = Literal["ulonglong", "Q", "=Q", "<Q", ">Q"]
_HalfCodes = Literal["half", "e", "=e", "<e", ">e"]
_SingleCodes = Literal["single", "f", "=f", "<f", ">f"]
_DoubleCodes = Literal["double", "float", "float_", "d", "=d", "<d", ">d"]
_LongDoubleCodes = Literal["longdouble", "longfloat", "g", "=g", "<g", ">g"]
_CSingleCodes = Literal["csingle", "singlecomplex", "F", "=F", "<F", ">F"]
_CDoubleCodes = Literal["cdouble", "complex", "complex_", "cfloat", "D", "=D", "<D", ">D"]
_CLongDoubleCodes = Literal["clongdouble", "clongfloat", "longcomplex", "G", "=G", "<G", ">G"]
_StrCodes = Literal["str", "str_", "str0", "unicode", "unicode_", "U", "=U", "<U", ">U"]
_BytesCodes = Literal["bytes", "bytes_", "bytes0", "S", "=S", "<S", ">S"]
_VoidCodes = Literal["void", "void0", "V", "=V", "<V", ">V"]
_ObjectCodes = Literal["object", "object_", "O", "=O", "<O", ">O"]
_DT64Codes = Literal[
"datetime64", "=datetime64", "<datetime64", ">datetime64",
"datetime64[Y]", "=datetime64[Y]", "<datetime64[Y]", ">datetime64[Y]",
"datetime64[M]", "=datetime64[M]", "<datetime64[M]", ">datetime64[M]",
"datetime64[W]", "=datetime64[W]", "<datetime64[W]", ">datetime64[W]",
"datetime64[D]", "=datetime64[D]", "<datetime64[D]", ">datetime64[D]",
"datetime64[h]", "=datetime64[h]", "<datetime64[h]", ">datetime64[h]",
"datetime64[m]", "=datetime64[m]", "<datetime64[m]", ">datetime64[m]",
"datetime64[s]", "=datetime64[s]", "<datetime64[s]", ">datetime64[s]",
"datetime64[ms]", "=datetime64[ms]", "<datetime64[ms]", ">datetime64[ms]",
"datetime64[us]", "=datetime64[us]", "<datetime64[us]", ">datetime64[us]",
"datetime64[ns]", "=datetime64[ns]", "<datetime64[ns]", ">datetime64[ns]",
"datetime64[ps]", "=datetime64[ps]", "<datetime64[ps]", ">datetime64[ps]",
"datetime64[fs]", "=datetime64[fs]", "<datetime64[fs]", ">datetime64[fs]",
"datetime64[as]", "=datetime64[as]", "<datetime64[as]", ">datetime64[as]",
"M", "=M", "<M", ">M",
"M8", "=M8", "<M8", ">M8",
"M8[Y]", "=M8[Y]", "<M8[Y]", ">M8[Y]",
"M8[M]", "=M8[M]", "<M8[M]", ">M8[M]",
"M8[W]", "=M8[W]", "<M8[W]", ">M8[W]",
"M8[D]", "=M8[D]", "<M8[D]", ">M8[D]",
"M8[h]", "=M8[h]", "<M8[h]", ">M8[h]",
"M8[m]", "=M8[m]", "<M8[m]", ">M8[m]",
"M8[s]", "=M8[s]", "<M8[s]", ">M8[s]",
"M8[ms]", "=M8[ms]", "<M8[ms]", ">M8[ms]",
"M8[us]", "=M8[us]", "<M8[us]", ">M8[us]",
"M8[ns]", "=M8[ns]", "<M8[ns]", ">M8[ns]",
"M8[ps]", "=M8[ps]", "<M8[ps]", ">M8[ps]",
"M8[fs]", "=M8[fs]", "<M8[fs]", ">M8[fs]",
"M8[as]", "=M8[as]", "<M8[as]", ">M8[as]",
]
_TD64Codes = Literal[
"timedelta64", "=timedelta64", "<timedelta64", ">timedelta64",
"timedelta64[Y]", "=timedelta64[Y]", "<timedelta64[Y]", ">timedelta64[Y]",
"timedelta64[M]", "=timedelta64[M]", "<timedelta64[M]", ">timedelta64[M]",
"timedelta64[W]", "=timedelta64[W]", "<timedelta64[W]", ">timedelta64[W]",
"timedelta64[D]", "=timedelta64[D]", "<timedelta64[D]", ">timedelta64[D]",
"timedelta64[h]", "=timedelta64[h]", "<timedelta64[h]", ">timedelta64[h]",
"timedelta64[m]", "=timedelta64[m]", "<timedelta64[m]", ">timedelta64[m]",
"timedelta64[s]", "=timedelta64[s]", "<timedelta64[s]", ">timedelta64[s]",
"timedelta64[ms]", "=timedelta64[ms]", "<timedelta64[ms]", ">timedelta64[ms]",
"timedelta64[us]", "=timedelta64[us]", "<timedelta64[us]", ">timedelta64[us]",
"timedelta64[ns]", "=timedelta64[ns]", "<timedelta64[ns]", ">timedelta64[ns]",
"timedelta64[ps]", "=timedelta64[ps]", "<timedelta64[ps]", ">timedelta64[ps]",
"timedelta64[fs]", "=timedelta64[fs]", "<timedelta64[fs]", ">timedelta64[fs]",
"timedelta64[as]", "=timedelta64[as]", "<timedelta64[as]", ">timedelta64[as]",
"m", "=m", "<m", ">m",
"m8", "=m8", "<m8", ">m8",
"m8[Y]", "=m8[Y]", "<m8[Y]", ">m8[Y]",
"m8[M]", "=m8[M]", "<m8[M]", ">m8[M]",
"m8[W]", "=m8[W]", "<m8[W]", ">m8[W]",
"m8[D]", "=m8[D]", "<m8[D]", ">m8[D]",
"m8[h]", "=m8[h]", "<m8[h]", ">m8[h]",
"m8[m]", "=m8[m]", "<m8[m]", ">m8[m]",
"m8[s]", "=m8[s]", "<m8[s]", ">m8[s]",
"m8[ms]", "=m8[ms]", "<m8[ms]", ">m8[ms]",
"m8[us]", "=m8[us]", "<m8[us]", ">m8[us]",
"m8[ns]", "=m8[ns]", "<m8[ns]", ">m8[ns]",
"m8[ps]", "=m8[ps]", "<m8[ps]", ">m8[ps]",
"m8[fs]", "=m8[fs]", "<m8[fs]", ">m8[fs]",
"m8[as]", "=m8[as]", "<m8[as]", ">m8[as]",
]
| 5,916 | Python | 51.830357 | 94 | 0.505578 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/_typing/_generic_alias.py | from __future__ import annotations
import sys
import types
from collections.abc import Generator, Iterable, Iterator
from typing import (
Any,
ClassVar,
NoReturn,
TypeVar,
TYPE_CHECKING,
)
import numpy as np
__all__ = ["_GenericAlias", "NDArray"]
_T = TypeVar("_T", bound="_GenericAlias")
def _to_str(obj: object) -> str:
"""Helper function for `_GenericAlias.__repr__`."""
if obj is Ellipsis:
return '...'
elif isinstance(obj, type) and not isinstance(obj, _GENERIC_ALIAS_TYPE):
if obj.__module__ == 'builtins':
return obj.__qualname__
else:
return f'{obj.__module__}.{obj.__qualname__}'
else:
return repr(obj)
def _parse_parameters(args: Iterable[Any]) -> Generator[TypeVar, None, None]:
"""Search for all typevars and typevar-containing objects in `args`.
Helper function for `_GenericAlias.__init__`.
"""
for i in args:
if hasattr(i, "__parameters__"):
yield from i.__parameters__
elif isinstance(i, TypeVar):
yield i
def _reconstruct_alias(alias: _T, parameters: Iterator[TypeVar]) -> _T:
"""Recursively replace all typevars with those from `parameters`.
Helper function for `_GenericAlias.__getitem__`.
"""
args = []
for i in alias.__args__:
if isinstance(i, TypeVar):
value: Any = next(parameters)
elif isinstance(i, _GenericAlias):
value = _reconstruct_alias(i, parameters)
elif hasattr(i, "__parameters__"):
prm_tup = tuple(next(parameters) for _ in i.__parameters__)
value = i[prm_tup]
else:
value = i
args.append(value)
cls = type(alias)
return cls(alias.__origin__, tuple(args), alias.__unpacked__)
class _GenericAlias:
"""A python-based backport of the `types.GenericAlias` class.
E.g. for ``t = list[int]``, ``t.__origin__`` is ``list`` and
``t.__args__`` is ``(int,)``.
See Also
--------
:pep:`585`
The PEP responsible for introducing `types.GenericAlias`.
"""
__slots__ = (
"__weakref__",
"_origin",
"_args",
"_parameters",
"_hash",
"_starred",
)
@property
def __origin__(self) -> type:
return super().__getattribute__("_origin")
@property
def __args__(self) -> tuple[object, ...]:
return super().__getattribute__("_args")
@property
def __parameters__(self) -> tuple[TypeVar, ...]:
"""Type variables in the ``GenericAlias``."""
return super().__getattribute__("_parameters")
@property
def __unpacked__(self) -> bool:
return super().__getattribute__("_starred")
@property
def __typing_unpacked_tuple_args__(self) -> tuple[object, ...] | None:
# NOTE: This should return `__args__` if `__origin__` is a tuple,
# which should never be the case with how `_GenericAlias` is used
# within numpy
return None
def __init__(
self,
origin: type,
args: object | tuple[object, ...],
starred: bool = False,
) -> None:
self._origin = origin
self._args = args if isinstance(args, tuple) else (args,)
self._parameters = tuple(_parse_parameters(self.__args__))
self._starred = starred
@property
def __call__(self) -> type[Any]:
return self.__origin__
def __reduce__(self: _T) -> tuple[
type[_T],
tuple[type[Any], tuple[object, ...], bool],
]:
cls = type(self)
return cls, (self.__origin__, self.__args__, self.__unpacked__)
def __mro_entries__(self, bases: Iterable[object]) -> tuple[type[Any]]:
return (self.__origin__,)
def __dir__(self) -> list[str]:
"""Implement ``dir(self)``."""
cls = type(self)
dir_origin = set(dir(self.__origin__))
return sorted(cls._ATTR_EXCEPTIONS | dir_origin)
def __hash__(self) -> int:
"""Return ``hash(self)``."""
# Attempt to use the cached hash
try:
return super().__getattribute__("_hash")
except AttributeError:
self._hash: int = (
hash(self.__origin__) ^
hash(self.__args__) ^
hash(self.__unpacked__)
)
return super().__getattribute__("_hash")
def __instancecheck__(self, obj: object) -> NoReturn:
"""Check if an `obj` is an instance."""
raise TypeError("isinstance() argument 2 cannot be a "
"parameterized generic")
def __subclasscheck__(self, cls: type) -> NoReturn:
"""Check if a `cls` is a subclass."""
raise TypeError("issubclass() argument 2 cannot be a "
"parameterized generic")
def __repr__(self) -> str:
"""Return ``repr(self)``."""
args = ", ".join(_to_str(i) for i in self.__args__)
origin = _to_str(self.__origin__)
prefix = "*" if self.__unpacked__ else ""
return f"{prefix}{origin}[{args}]"
def __getitem__(self: _T, key: object | tuple[object, ...]) -> _T:
"""Return ``self[key]``."""
key_tup = key if isinstance(key, tuple) else (key,)
if len(self.__parameters__) == 0:
raise TypeError(f"There are no type variables left in {self}")
elif len(key_tup) > len(self.__parameters__):
raise TypeError(f"Too many arguments for {self}")
elif len(key_tup) < len(self.__parameters__):
raise TypeError(f"Too few arguments for {self}")
key_iter = iter(key_tup)
return _reconstruct_alias(self, key_iter)
def __eq__(self, value: object) -> bool:
"""Return ``self == value``."""
if not isinstance(value, _GENERIC_ALIAS_TYPE):
return NotImplemented
return (
self.__origin__ == value.__origin__ and
self.__args__ == value.__args__ and
self.__unpacked__ == getattr(
value, "__unpacked__", self.__unpacked__
)
)
def __iter__(self: _T) -> Generator[_T, None, None]:
"""Return ``iter(self)``."""
cls = type(self)
yield cls(self.__origin__, self.__args__, True)
_ATTR_EXCEPTIONS: ClassVar[frozenset[str]] = frozenset({
"__origin__",
"__args__",
"__parameters__",
"__mro_entries__",
"__reduce__",
"__reduce_ex__",
"__copy__",
"__deepcopy__",
"__unpacked__",
"__typing_unpacked_tuple_args__",
"__class__",
})
def __getattribute__(self, name: str) -> Any:
"""Return ``getattr(self, name)``."""
# Pull the attribute from `__origin__` unless its
# name is in `_ATTR_EXCEPTIONS`
cls = type(self)
if name in cls._ATTR_EXCEPTIONS:
return super().__getattribute__(name)
return getattr(self.__origin__, name)
# See `_GenericAlias.__eq__`
if sys.version_info >= (3, 9):
_GENERIC_ALIAS_TYPE = (_GenericAlias, types.GenericAlias)
else:
_GENERIC_ALIAS_TYPE = (_GenericAlias,)
ScalarType = TypeVar("ScalarType", bound=np.generic, covariant=True)
if TYPE_CHECKING or sys.version_info >= (3, 9):
_DType = np.dtype[ScalarType]
NDArray = np.ndarray[Any, np.dtype[ScalarType]]
else:
_DType = _GenericAlias(np.dtype, (ScalarType,))
NDArray = _GenericAlias(np.ndarray, (Any, _DType))
| 7,479 | Python | 29.406504 | 77 | 0.53697 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/_typing/_scalars.py | from typing import Union, Tuple, Any
import numpy as np
# NOTE: `_StrLike_co` and `_BytesLike_co` are pointless, as `np.str_` and
# `np.bytes_` are already subclasses of their builtin counterpart
_CharLike_co = Union[str, bytes]
# The 6 `<X>Like_co` type-aliases below represent all scalars that can be
# coerced into `<X>` (with the casting rule `same_kind`)
_BoolLike_co = Union[bool, np.bool_]
_UIntLike_co = Union[_BoolLike_co, np.unsignedinteger]
_IntLike_co = Union[_BoolLike_co, int, np.integer]
_FloatLike_co = Union[_IntLike_co, float, np.floating]
_ComplexLike_co = Union[_FloatLike_co, complex, np.complexfloating]
_TD64Like_co = Union[_IntLike_co, np.timedelta64]
_NumberLike_co = Union[int, float, complex, np.number, np.bool_]
_ScalarLike_co = Union[
int,
float,
complex,
str,
bytes,
np.generic,
]
# `_VoidLike_co` is technically not a scalar, but it's close enough
_VoidLike_co = Union[Tuple[Any, ...], np.void]
| 957 | Python | 29.903225 | 73 | 0.698015 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/_typing/_ufunc.pyi | """A module with private type-check-only `numpy.ufunc` subclasses.
The signatures of the ufuncs are too varied to reasonably type
with a single class. So instead, `ufunc` has been expanded into
four private subclasses, one for each combination of
`~ufunc.nin` and `~ufunc.nout`.
"""
from typing import (
Any,
Generic,
overload,
TypeVar,
Literal,
SupportsIndex,
Protocol,
)
from numpy import ufunc, _CastingKind, _OrderKACF
from numpy.typing import NDArray
from ._shape import _ShapeLike
from ._scalars import _ScalarLike_co
from ._array_like import ArrayLike, _ArrayLikeBool_co, _ArrayLikeInt_co
from ._dtype_like import DTypeLike
_T = TypeVar("_T")
_2Tuple = tuple[_T, _T]
_3Tuple = tuple[_T, _T, _T]
_4Tuple = tuple[_T, _T, _T, _T]
_NTypes = TypeVar("_NTypes", bound=int)
_IDType = TypeVar("_IDType", bound=Any)
_NameType = TypeVar("_NameType", bound=str)
class _SupportsArrayUFunc(Protocol):
def __array_ufunc__(
self,
ufunc: ufunc,
method: Literal["__call__", "reduce", "reduceat", "accumulate", "outer", "inner"],
*inputs: Any,
**kwargs: Any,
) -> Any: ...
# NOTE: In reality `extobj` should be a length of list 3 containing an
# int, an int, and a callable, but there's no way to properly express
# non-homogenous lists.
# Use `Any` over `Union` to avoid issues related to lists invariance.
# NOTE: `reduce`, `accumulate`, `reduceat` and `outer` raise a ValueError for
# ufuncs that don't accept two input arguments and return one output argument.
# In such cases the respective methods are simply typed as `None`.
# NOTE: Similarly, `at` won't be defined for ufuncs that return
# multiple outputs; in such cases `at` is typed as `None`
# NOTE: If 2 output types are returned then `out` must be a
# 2-tuple of arrays. Otherwise `None` or a plain array are also acceptable
class _UFunc_Nin1_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: ignore[misc]
@property
def __name__(self) -> _NameType: ...
@property
def ntypes(self) -> _NTypes: ...
@property
def identity(self) -> _IDType: ...
@property
def nin(self) -> Literal[1]: ...
@property
def nout(self) -> Literal[1]: ...
@property
def nargs(self) -> Literal[2]: ...
@property
def signature(self) -> None: ...
@property
def reduce(self) -> None: ...
@property
def accumulate(self) -> None: ...
@property
def reduceat(self) -> None: ...
@property
def outer(self) -> None: ...
@overload
def __call__(
self,
__x1: _ScalarLike_co,
out: None = ...,
*,
where: None | _ArrayLikeBool_co = ...,
casting: _CastingKind = ...,
order: _OrderKACF = ...,
dtype: DTypeLike = ...,
subok: bool = ...,
signature: str | _2Tuple[None | str] = ...,
extobj: list[Any] = ...,
) -> Any: ...
@overload
def __call__(
self,
__x1: ArrayLike,
out: None | NDArray[Any] | tuple[NDArray[Any]] = ...,
*,
where: None | _ArrayLikeBool_co = ...,
casting: _CastingKind = ...,
order: _OrderKACF = ...,
dtype: DTypeLike = ...,
subok: bool = ...,
signature: str | _2Tuple[None | str] = ...,
extobj: list[Any] = ...,
) -> NDArray[Any]: ...
@overload
def __call__(
self,
__x1: _SupportsArrayUFunc,
out: None | NDArray[Any] | tuple[NDArray[Any]] = ...,
*,
where: None | _ArrayLikeBool_co = ...,
casting: _CastingKind = ...,
order: _OrderKACF = ...,
dtype: DTypeLike = ...,
subok: bool = ...,
signature: str | _2Tuple[None | str] = ...,
extobj: list[Any] = ...,
) -> Any: ...
def at(
self,
a: _SupportsArrayUFunc,
indices: _ArrayLikeInt_co,
/,
) -> None: ...
class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: ignore[misc]
@property
def __name__(self) -> _NameType: ...
@property
def ntypes(self) -> _NTypes: ...
@property
def identity(self) -> _IDType: ...
@property
def nin(self) -> Literal[2]: ...
@property
def nout(self) -> Literal[1]: ...
@property
def nargs(self) -> Literal[3]: ...
@property
def signature(self) -> None: ...
@overload
def __call__(
self,
__x1: _ScalarLike_co,
__x2: _ScalarLike_co,
out: None = ...,
*,
where: None | _ArrayLikeBool_co = ...,
casting: _CastingKind = ...,
order: _OrderKACF = ...,
dtype: DTypeLike = ...,
subok: bool = ...,
signature: str | _3Tuple[None | str] = ...,
extobj: list[Any] = ...,
) -> Any: ...
@overload
def __call__(
self,
__x1: ArrayLike,
__x2: ArrayLike,
out: None | NDArray[Any] | tuple[NDArray[Any]] = ...,
*,
where: None | _ArrayLikeBool_co = ...,
casting: _CastingKind = ...,
order: _OrderKACF = ...,
dtype: DTypeLike = ...,
subok: bool = ...,
signature: str | _3Tuple[None | str] = ...,
extobj: list[Any] = ...,
) -> NDArray[Any]: ...
def at(
self,
a: NDArray[Any],
indices: _ArrayLikeInt_co,
b: ArrayLike,
/,
) -> None: ...
def reduce(
self,
array: ArrayLike,
axis: None | _ShapeLike = ...,
dtype: DTypeLike = ...,
out: None | NDArray[Any] = ...,
keepdims: bool = ...,
initial: Any = ...,
where: _ArrayLikeBool_co = ...,
) -> Any: ...
def accumulate(
self,
array: ArrayLike,
axis: SupportsIndex = ...,
dtype: DTypeLike = ...,
out: None | NDArray[Any] = ...,
) -> NDArray[Any]: ...
def reduceat(
self,
array: ArrayLike,
indices: _ArrayLikeInt_co,
axis: SupportsIndex = ...,
dtype: DTypeLike = ...,
out: None | NDArray[Any] = ...,
) -> NDArray[Any]: ...
# Expand `**kwargs` into explicit keyword-only arguments
@overload
def outer(
self,
A: _ScalarLike_co,
B: _ScalarLike_co,
/, *,
out: None = ...,
where: None | _ArrayLikeBool_co = ...,
casting: _CastingKind = ...,
order: _OrderKACF = ...,
dtype: DTypeLike = ...,
subok: bool = ...,
signature: str | _3Tuple[None | str] = ...,
extobj: list[Any] = ...,
) -> Any: ...
@overload
def outer( # type: ignore[misc]
self,
A: ArrayLike,
B: ArrayLike,
/, *,
out: None | NDArray[Any] | tuple[NDArray[Any]] = ...,
where: None | _ArrayLikeBool_co = ...,
casting: _CastingKind = ...,
order: _OrderKACF = ...,
dtype: DTypeLike = ...,
subok: bool = ...,
signature: str | _3Tuple[None | str] = ...,
extobj: list[Any] = ...,
) -> NDArray[Any]: ...
class _UFunc_Nin1_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: ignore[misc]
@property
def __name__(self) -> _NameType: ...
@property
def ntypes(self) -> _NTypes: ...
@property
def identity(self) -> _IDType: ...
@property
def nin(self) -> Literal[1]: ...
@property
def nout(self) -> Literal[2]: ...
@property
def nargs(self) -> Literal[3]: ...
@property
def signature(self) -> None: ...
@property
def at(self) -> None: ...
@property
def reduce(self) -> None: ...
@property
def accumulate(self) -> None: ...
@property
def reduceat(self) -> None: ...
@property
def outer(self) -> None: ...
@overload
def __call__(
self,
__x1: _ScalarLike_co,
__out1: None = ...,
__out2: None = ...,
*,
where: None | _ArrayLikeBool_co = ...,
casting: _CastingKind = ...,
order: _OrderKACF = ...,
dtype: DTypeLike = ...,
subok: bool = ...,
signature: str | _3Tuple[None | str] = ...,
extobj: list[Any] = ...,
) -> _2Tuple[Any]: ...
@overload
def __call__(
self,
__x1: ArrayLike,
__out1: None | NDArray[Any] = ...,
__out2: None | NDArray[Any] = ...,
*,
out: _2Tuple[NDArray[Any]] = ...,
where: None | _ArrayLikeBool_co = ...,
casting: _CastingKind = ...,
order: _OrderKACF = ...,
dtype: DTypeLike = ...,
subok: bool = ...,
signature: str | _3Tuple[None | str] = ...,
extobj: list[Any] = ...,
) -> _2Tuple[NDArray[Any]]: ...
@overload
def __call__(
self,
__x1: _SupportsArrayUFunc,
__out1: None | NDArray[Any] = ...,
__out2: None | NDArray[Any] = ...,
*,
out: _2Tuple[NDArray[Any]] = ...,
where: None | _ArrayLikeBool_co = ...,
casting: _CastingKind = ...,
order: _OrderKACF = ...,
dtype: DTypeLike = ...,
subok: bool = ...,
signature: str | _3Tuple[None | str] = ...,
extobj: list[Any] = ...,
) -> _2Tuple[Any]: ...
class _UFunc_Nin2_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: ignore[misc]
@property
def __name__(self) -> _NameType: ...
@property
def ntypes(self) -> _NTypes: ...
@property
def identity(self) -> _IDType: ...
@property
def nin(self) -> Literal[2]: ...
@property
def nout(self) -> Literal[2]: ...
@property
def nargs(self) -> Literal[4]: ...
@property
def signature(self) -> None: ...
@property
def at(self) -> None: ...
@property
def reduce(self) -> None: ...
@property
def accumulate(self) -> None: ...
@property
def reduceat(self) -> None: ...
@property
def outer(self) -> None: ...
@overload
def __call__(
self,
__x1: _ScalarLike_co,
__x2: _ScalarLike_co,
__out1: None = ...,
__out2: None = ...,
*,
where: None | _ArrayLikeBool_co = ...,
casting: _CastingKind = ...,
order: _OrderKACF = ...,
dtype: DTypeLike = ...,
subok: bool = ...,
signature: str | _4Tuple[None | str] = ...,
extobj: list[Any] = ...,
) -> _2Tuple[Any]: ...
@overload
def __call__(
self,
__x1: ArrayLike,
__x2: ArrayLike,
__out1: None | NDArray[Any] = ...,
__out2: None | NDArray[Any] = ...,
*,
out: _2Tuple[NDArray[Any]] = ...,
where: None | _ArrayLikeBool_co = ...,
casting: _CastingKind = ...,
order: _OrderKACF = ...,
dtype: DTypeLike = ...,
subok: bool = ...,
signature: str | _4Tuple[None | str] = ...,
extobj: list[Any] = ...,
) -> _2Tuple[NDArray[Any]]: ...
class _GUFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: ignore[misc]
@property
def __name__(self) -> _NameType: ...
@property
def ntypes(self) -> _NTypes: ...
@property
def identity(self) -> _IDType: ...
@property
def nin(self) -> Literal[2]: ...
@property
def nout(self) -> Literal[1]: ...
@property
def nargs(self) -> Literal[3]: ...
# NOTE: In practice the only gufunc in the main namespace is `matmul`,
# so we can use its signature here
@property
def signature(self) -> Literal["(n?,k),(k,m?)->(n?,m?)"]: ...
@property
def reduce(self) -> None: ...
@property
def accumulate(self) -> None: ...
@property
def reduceat(self) -> None: ...
@property
def outer(self) -> None: ...
@property
def at(self) -> None: ...
# Scalar for 1D array-likes; ndarray otherwise
@overload
def __call__(
self,
__x1: ArrayLike,
__x2: ArrayLike,
out: None = ...,
*,
casting: _CastingKind = ...,
order: _OrderKACF = ...,
dtype: DTypeLike = ...,
subok: bool = ...,
signature: str | _3Tuple[None | str] = ...,
extobj: list[Any] = ...,
axes: list[_2Tuple[SupportsIndex]] = ...,
) -> Any: ...
@overload
def __call__(
self,
__x1: ArrayLike,
__x2: ArrayLike,
out: NDArray[Any] | tuple[NDArray[Any]],
*,
casting: _CastingKind = ...,
order: _OrderKACF = ...,
dtype: DTypeLike = ...,
subok: bool = ...,
signature: str | _3Tuple[None | str] = ...,
extobj: list[Any] = ...,
axes: list[_2Tuple[SupportsIndex]] = ...,
) -> NDArray[Any]: ...
| 12,638 | unknown | 27.338565 | 92 | 0.502611 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/_typing/__init__.py | """Private counterpart of ``numpy.typing``."""
from __future__ import annotations
from numpy import ufunc
from numpy.core.overrides import set_module
from typing import TYPE_CHECKING, final
@final # Disallow the creation of arbitrary `NBitBase` subclasses
@set_module("numpy.typing")
class NBitBase:
"""
A type representing `numpy.number` precision during static type checking.
Used exclusively for the purpose static type checking, `NBitBase`
represents the base of a hierarchical set of subclasses.
Each subsequent subclass is herein used for representing a lower level
of precision, *e.g.* ``64Bit > 32Bit > 16Bit``.
.. versionadded:: 1.20
Examples
--------
Below is a typical usage example: `NBitBase` is herein used for annotating
a function that takes a float and integer of arbitrary precision
as arguments and returns a new float of whichever precision is largest
(*e.g.* ``np.float16 + np.int64 -> np.float64``).
.. code-block:: python
>>> from __future__ import annotations
>>> from typing import TypeVar, TYPE_CHECKING
>>> import numpy as np
>>> import numpy.typing as npt
>>> T1 = TypeVar("T1", bound=npt.NBitBase)
>>> T2 = TypeVar("T2", bound=npt.NBitBase)
>>> def add(a: np.floating[T1], b: np.integer[T2]) -> np.floating[T1 | T2]:
... return a + b
>>> a = np.float16()
>>> b = np.int64()
>>> out = add(a, b)
>>> if TYPE_CHECKING:
... reveal_locals()
... # note: Revealed local types are:
... # note: a: numpy.floating[numpy.typing._16Bit*]
... # note: b: numpy.signedinteger[numpy.typing._64Bit*]
... # note: out: numpy.floating[numpy.typing._64Bit*]
"""
def __init_subclass__(cls) -> None:
allowed_names = {
"NBitBase", "_256Bit", "_128Bit", "_96Bit", "_80Bit",
"_64Bit", "_32Bit", "_16Bit", "_8Bit",
}
if cls.__name__ not in allowed_names:
raise TypeError('cannot inherit from final class "NBitBase"')
super().__init_subclass__()
# Silence errors about subclassing a `@final`-decorated class
class _256Bit(NBitBase): # type: ignore[misc]
pass
class _128Bit(_256Bit): # type: ignore[misc]
pass
class _96Bit(_128Bit): # type: ignore[misc]
pass
class _80Bit(_96Bit): # type: ignore[misc]
pass
class _64Bit(_80Bit): # type: ignore[misc]
pass
class _32Bit(_64Bit): # type: ignore[misc]
pass
class _16Bit(_32Bit): # type: ignore[misc]
pass
class _8Bit(_16Bit): # type: ignore[misc]
pass
from ._nested_sequence import (
_NestedSequence as _NestedSequence,
)
from ._nbit import (
_NBitByte as _NBitByte,
_NBitShort as _NBitShort,
_NBitIntC as _NBitIntC,
_NBitIntP as _NBitIntP,
_NBitInt as _NBitInt,
_NBitLongLong as _NBitLongLong,
_NBitHalf as _NBitHalf,
_NBitSingle as _NBitSingle,
_NBitDouble as _NBitDouble,
_NBitLongDouble as _NBitLongDouble,
)
from ._char_codes import (
_BoolCodes as _BoolCodes,
_UInt8Codes as _UInt8Codes,
_UInt16Codes as _UInt16Codes,
_UInt32Codes as _UInt32Codes,
_UInt64Codes as _UInt64Codes,
_Int8Codes as _Int8Codes,
_Int16Codes as _Int16Codes,
_Int32Codes as _Int32Codes,
_Int64Codes as _Int64Codes,
_Float16Codes as _Float16Codes,
_Float32Codes as _Float32Codes,
_Float64Codes as _Float64Codes,
_Complex64Codes as _Complex64Codes,
_Complex128Codes as _Complex128Codes,
_ByteCodes as _ByteCodes,
_ShortCodes as _ShortCodes,
_IntCCodes as _IntCCodes,
_IntPCodes as _IntPCodes,
_IntCodes as _IntCodes,
_LongLongCodes as _LongLongCodes,
_UByteCodes as _UByteCodes,
_UShortCodes as _UShortCodes,
_UIntCCodes as _UIntCCodes,
_UIntPCodes as _UIntPCodes,
_UIntCodes as _UIntCodes,
_ULongLongCodes as _ULongLongCodes,
_HalfCodes as _HalfCodes,
_SingleCodes as _SingleCodes,
_DoubleCodes as _DoubleCodes,
_LongDoubleCodes as _LongDoubleCodes,
_CSingleCodes as _CSingleCodes,
_CDoubleCodes as _CDoubleCodes,
_CLongDoubleCodes as _CLongDoubleCodes,
_DT64Codes as _DT64Codes,
_TD64Codes as _TD64Codes,
_StrCodes as _StrCodes,
_BytesCodes as _BytesCodes,
_VoidCodes as _VoidCodes,
_ObjectCodes as _ObjectCodes,
)
from ._scalars import (
_CharLike_co as _CharLike_co,
_BoolLike_co as _BoolLike_co,
_UIntLike_co as _UIntLike_co,
_IntLike_co as _IntLike_co,
_FloatLike_co as _FloatLike_co,
_ComplexLike_co as _ComplexLike_co,
_TD64Like_co as _TD64Like_co,
_NumberLike_co as _NumberLike_co,
_ScalarLike_co as _ScalarLike_co,
_VoidLike_co as _VoidLike_co,
)
from ._shape import (
_Shape as _Shape,
_ShapeLike as _ShapeLike,
)
from ._dtype_like import (
DTypeLike as DTypeLike,
_DTypeLike as _DTypeLike,
_SupportsDType as _SupportsDType,
_VoidDTypeLike as _VoidDTypeLike,
_DTypeLikeBool as _DTypeLikeBool,
_DTypeLikeUInt as _DTypeLikeUInt,
_DTypeLikeInt as _DTypeLikeInt,
_DTypeLikeFloat as _DTypeLikeFloat,
_DTypeLikeComplex as _DTypeLikeComplex,
_DTypeLikeTD64 as _DTypeLikeTD64,
_DTypeLikeDT64 as _DTypeLikeDT64,
_DTypeLikeObject as _DTypeLikeObject,
_DTypeLikeVoid as _DTypeLikeVoid,
_DTypeLikeStr as _DTypeLikeStr,
_DTypeLikeBytes as _DTypeLikeBytes,
_DTypeLikeComplex_co as _DTypeLikeComplex_co,
)
from ._array_like import (
ArrayLike as ArrayLike,
_ArrayLike as _ArrayLike,
_FiniteNestedSequence as _FiniteNestedSequence,
_SupportsArray as _SupportsArray,
_SupportsArrayFunc as _SupportsArrayFunc,
_ArrayLikeInt as _ArrayLikeInt,
_ArrayLikeBool_co as _ArrayLikeBool_co,
_ArrayLikeUInt_co as _ArrayLikeUInt_co,
_ArrayLikeInt_co as _ArrayLikeInt_co,
_ArrayLikeFloat_co as _ArrayLikeFloat_co,
_ArrayLikeComplex_co as _ArrayLikeComplex_co,
_ArrayLikeNumber_co as _ArrayLikeNumber_co,
_ArrayLikeTD64_co as _ArrayLikeTD64_co,
_ArrayLikeDT64_co as _ArrayLikeDT64_co,
_ArrayLikeObject_co as _ArrayLikeObject_co,
_ArrayLikeVoid_co as _ArrayLikeVoid_co,
_ArrayLikeStr_co as _ArrayLikeStr_co,
_ArrayLikeBytes_co as _ArrayLikeBytes_co,
_ArrayLikeUnknown as _ArrayLikeUnknown,
_UnknownType as _UnknownType,
)
from ._generic_alias import (
NDArray as NDArray,
_DType as _DType,
_GenericAlias as _GenericAlias,
)
if TYPE_CHECKING:
from ._ufunc import (
_UFunc_Nin1_Nout1 as _UFunc_Nin1_Nout1,
_UFunc_Nin2_Nout1 as _UFunc_Nin2_Nout1,
_UFunc_Nin1_Nout2 as _UFunc_Nin1_Nout2,
_UFunc_Nin2_Nout2 as _UFunc_Nin2_Nout2,
_GUFunc_Nin2_Nout1 as _GUFunc_Nin2_Nout1,
)
else:
# Declare the (type-check-only) ufunc subclasses as ufunc aliases during
# runtime; this helps autocompletion tools such as Jedi (numpy/numpy#19834)
_UFunc_Nin1_Nout1 = ufunc
_UFunc_Nin2_Nout1 = ufunc
_UFunc_Nin1_Nout2 = ufunc
_UFunc_Nin2_Nout2 = ufunc
_GUFunc_Nin2_Nout1 = ufunc
| 7,108 | Python | 30.455752 | 83 | 0.662915 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/_typing/_nested_sequence.py | """A module containing the `_NestedSequence` protocol."""
from __future__ import annotations
from typing import (
Any,
Iterator,
overload,
TypeVar,
Protocol,
runtime_checkable,
)
__all__ = ["_NestedSequence"]
_T_co = TypeVar("_T_co", covariant=True)
@runtime_checkable
class _NestedSequence(Protocol[_T_co]):
"""A protocol for representing nested sequences.
Warning
-------
`_NestedSequence` currently does not work in combination with typevars,
*e.g.* ``def func(a: _NestedSequnce[T]) -> T: ...``.
See Also
--------
collections.abc.Sequence
ABCs for read-only and mutable :term:`sequences`.
Examples
--------
.. code-block:: python
>>> from __future__ import annotations
>>> from typing import TYPE_CHECKING
>>> import numpy as np
>>> from numpy._typing import _NestedSequnce
>>> def get_dtype(seq: _NestedSequnce[float]) -> np.dtype[np.float64]:
... return np.asarray(seq).dtype
>>> a = get_dtype([1.0])
>>> b = get_dtype([[1.0]])
>>> c = get_dtype([[[1.0]]])
>>> d = get_dtype([[[[1.0]]]])
>>> if TYPE_CHECKING:
... reveal_locals()
... # note: Revealed local types are:
... # note: a: numpy.dtype[numpy.floating[numpy._typing._64Bit]]
... # note: b: numpy.dtype[numpy.floating[numpy._typing._64Bit]]
... # note: c: numpy.dtype[numpy.floating[numpy._typing._64Bit]]
... # note: d: numpy.dtype[numpy.floating[numpy._typing._64Bit]]
"""
def __len__(self, /) -> int:
"""Implement ``len(self)``."""
raise NotImplementedError
@overload
def __getitem__(self, index: int, /) -> _T_co | _NestedSequence[_T_co]: ...
@overload
def __getitem__(self, index: slice, /) -> _NestedSequence[_T_co]: ...
def __getitem__(self, index, /):
"""Implement ``self[x]``."""
raise NotImplementedError
def __contains__(self, x: object, /) -> bool:
"""Implement ``x in self``."""
raise NotImplementedError
def __iter__(self, /) -> Iterator[_T_co | _NestedSequence[_T_co]]:
"""Implement ``iter(self)``."""
raise NotImplementedError
def __reversed__(self, /) -> Iterator[_T_co | _NestedSequence[_T_co]]:
"""Implement ``reversed(self)``."""
raise NotImplementedError
def count(self, value: Any, /) -> int:
"""Return the number of occurrences of `value`."""
raise NotImplementedError
def index(self, value: Any, /) -> int:
"""Return the first index of `value`."""
raise NotImplementedError
| 2,699 | Python | 28.032258 | 80 | 0.551315 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/_typing/_array_like.py | from __future__ import annotations
# NOTE: Import `Sequence` from `typing` as we it is needed for a type-alias,
# not an annotation
from collections.abc import Collection, Callable
from typing import Any, Sequence, Protocol, Union, TypeVar, runtime_checkable
from numpy import (
ndarray,
dtype,
generic,
bool_,
unsignedinteger,
integer,
floating,
complexfloating,
number,
timedelta64,
datetime64,
object_,
void,
str_,
bytes_,
)
from ._nested_sequence import _NestedSequence
_T = TypeVar("_T")
_ScalarType = TypeVar("_ScalarType", bound=generic)
_DType = TypeVar("_DType", bound="dtype[Any]")
_DType_co = TypeVar("_DType_co", covariant=True, bound="dtype[Any]")
# The `_SupportsArray` protocol only cares about the default dtype
# (i.e. `dtype=None` or no `dtype` parameter at all) of the to-be returned
# array.
# Concrete implementations of the protocol are responsible for adding
# any and all remaining overloads
@runtime_checkable
class _SupportsArray(Protocol[_DType_co]):
def __array__(self) -> ndarray[Any, _DType_co]: ...
@runtime_checkable
class _SupportsArrayFunc(Protocol):
"""A protocol class representing `~class.__array_function__`."""
def __array_function__(
self,
func: Callable[..., Any],
types: Collection[type[Any]],
args: tuple[Any, ...],
kwargs: dict[str, Any],
) -> object: ...
# TODO: Wait until mypy supports recursive objects in combination with typevars
_FiniteNestedSequence = Union[
_T,
Sequence[_T],
Sequence[Sequence[_T]],
Sequence[Sequence[Sequence[_T]]],
Sequence[Sequence[Sequence[Sequence[_T]]]],
]
# A subset of `npt.ArrayLike` that can be parametrized w.r.t. `np.generic`
_ArrayLike = Union[
_SupportsArray["dtype[_ScalarType]"],
_NestedSequence[_SupportsArray["dtype[_ScalarType]"]],
]
# A union representing array-like objects; consists of two typevars:
# One representing types that can be parametrized w.r.t. `np.dtype`
# and another one for the rest
_DualArrayLike = Union[
_SupportsArray[_DType],
_NestedSequence[_SupportsArray[_DType]],
_T,
_NestedSequence[_T],
]
# TODO: support buffer protocols once
#
# https://bugs.python.org/issue27501
#
# is resolved. See also the mypy issue:
#
# https://github.com/python/typing/issues/593
ArrayLike = _DualArrayLike[
dtype,
Union[bool, int, float, complex, str, bytes],
]
# `ArrayLike<X>_co`: array-like objects that can be coerced into `X`
# given the casting rules `same_kind`
_ArrayLikeBool_co = _DualArrayLike[
"dtype[bool_]",
bool,
]
_ArrayLikeUInt_co = _DualArrayLike[
"dtype[Union[bool_, unsignedinteger[Any]]]",
bool,
]
_ArrayLikeInt_co = _DualArrayLike[
"dtype[Union[bool_, integer[Any]]]",
Union[bool, int],
]
_ArrayLikeFloat_co = _DualArrayLike[
"dtype[Union[bool_, integer[Any], floating[Any]]]",
Union[bool, int, float],
]
_ArrayLikeComplex_co = _DualArrayLike[
"dtype[Union[bool_, integer[Any], floating[Any], complexfloating[Any, Any]]]",
Union[bool, int, float, complex],
]
_ArrayLikeNumber_co = _DualArrayLike[
"dtype[Union[bool_, number[Any]]]",
Union[bool, int, float, complex],
]
_ArrayLikeTD64_co = _DualArrayLike[
"dtype[Union[bool_, integer[Any], timedelta64]]",
Union[bool, int],
]
_ArrayLikeDT64_co = Union[
_SupportsArray["dtype[datetime64]"],
_NestedSequence[_SupportsArray["dtype[datetime64]"]],
]
_ArrayLikeObject_co = Union[
_SupportsArray["dtype[object_]"],
_NestedSequence[_SupportsArray["dtype[object_]"]],
]
_ArrayLikeVoid_co = Union[
_SupportsArray["dtype[void]"],
_NestedSequence[_SupportsArray["dtype[void]"]],
]
_ArrayLikeStr_co = _DualArrayLike[
"dtype[str_]",
str,
]
_ArrayLikeBytes_co = _DualArrayLike[
"dtype[bytes_]",
bytes,
]
_ArrayLikeInt = _DualArrayLike[
"dtype[integer[Any]]",
int,
]
# Extra ArrayLike type so that pyright can deal with NDArray[Any]
# Used as the first overload, should only match NDArray[Any],
# not any actual types.
# https://github.com/numpy/numpy/pull/22193
class _UnknownType:
...
_ArrayLikeUnknown = _DualArrayLike[
"dtype[_UnknownType]",
_UnknownType,
]
| 4,212 | Python | 25.496855 | 82 | 0.674739 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/_typing/_shape.py | from typing import Sequence, Tuple, Union, SupportsIndex
_Shape = Tuple[int, ...]
# Anything that can be coerced to a shape tuple
_ShapeLike = Union[SupportsIndex, Sequence[SupportsIndex]]
| 191 | Python | 26.428568 | 58 | 0.759162 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/_typing/_extended_precision.py | """A module with platform-specific extended precision
`numpy.number` subclasses.
The subclasses are defined here (instead of ``__init__.pyi``) such
that they can be imported conditionally via the numpy's mypy plugin.
"""
from typing import TYPE_CHECKING
import numpy as np
from . import (
_80Bit,
_96Bit,
_128Bit,
_256Bit,
)
if TYPE_CHECKING:
uint128 = np.unsignedinteger[_128Bit]
uint256 = np.unsignedinteger[_256Bit]
int128 = np.signedinteger[_128Bit]
int256 = np.signedinteger[_256Bit]
float80 = np.floating[_80Bit]
float96 = np.floating[_96Bit]
float128 = np.floating[_128Bit]
float256 = np.floating[_256Bit]
complex160 = np.complexfloating[_80Bit, _80Bit]
complex192 = np.complexfloating[_96Bit, _96Bit]
complex256 = np.complexfloating[_128Bit, _128Bit]
complex512 = np.complexfloating[_256Bit, _256Bit]
else:
uint128 = Any
uint256 = Any
int128 = Any
int256 = Any
float80 = Any
float96 = Any
float128 = Any
float256 = Any
complex160 = Any
complex192 = Any
complex256 = Any
complex512 = Any
| 1,111 | Python | 24.272727 | 68 | 0.675968 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/tests/test_scripts.py | """ Test scripts
Test that we can run executable scripts that have been installed with numpy.
"""
import sys
import os
import pytest
from os.path import join as pathjoin, isfile, dirname
import subprocess
import numpy as np
from numpy.testing import assert_equal
is_inplace = isfile(pathjoin(dirname(np.__file__), '..', 'setup.py'))
def find_f2py_commands():
if sys.platform == 'win32':
exe_dir = dirname(sys.executable)
if exe_dir.endswith('Scripts'): # virtualenv
return [os.path.join(exe_dir, 'f2py')]
else:
return [os.path.join(exe_dir, "Scripts", 'f2py')]
else:
# Three scripts are installed in Unix-like systems:
# 'f2py', 'f2py{major}', and 'f2py{major.minor}'. For example,
# if installed with python3.9 the scripts would be named
# 'f2py', 'f2py3', and 'f2py3.9'.
version = sys.version_info
major = str(version.major)
minor = str(version.minor)
return ['f2py', 'f2py' + major, 'f2py' + major + '.' + minor]
@pytest.mark.skipif(is_inplace, reason="Cannot test f2py command inplace")
@pytest.mark.xfail(reason="Test is unreliable")
@pytest.mark.parametrize('f2py_cmd', find_f2py_commands())
def test_f2py(f2py_cmd):
# test that we can run f2py script
stdout = subprocess.check_output([f2py_cmd, '-v'])
assert_equal(stdout.strip(), np.__version__.encode('ascii'))
def test_pep338():
stdout = subprocess.check_output([sys.executable, '-mnumpy.f2py', '-v'])
assert_equal(stdout.strip(), np.__version__.encode('ascii'))
| 1,573 | Python | 32.489361 | 76 | 0.649078 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/tests/test_matlib.py | import numpy as np
import numpy.matlib
from numpy.testing import assert_array_equal, assert_
def test_empty():
x = numpy.matlib.empty((2,))
assert_(isinstance(x, np.matrix))
assert_(x.shape, (1, 2))
def test_ones():
assert_array_equal(numpy.matlib.ones((2, 3)),
np.matrix([[ 1., 1., 1.],
[ 1., 1., 1.]]))
assert_array_equal(numpy.matlib.ones(2), np.matrix([[ 1., 1.]]))
def test_zeros():
assert_array_equal(numpy.matlib.zeros((2, 3)),
np.matrix([[ 0., 0., 0.],
[ 0., 0., 0.]]))
assert_array_equal(numpy.matlib.zeros(2), np.matrix([[ 0., 0.]]))
def test_identity():
x = numpy.matlib.identity(2, dtype=int)
assert_array_equal(x, np.matrix([[1, 0], [0, 1]]))
def test_eye():
xc = numpy.matlib.eye(3, k=1, dtype=int)
assert_array_equal(xc, np.matrix([[ 0, 1, 0],
[ 0, 0, 1],
[ 0, 0, 0]]))
assert xc.flags.c_contiguous
assert not xc.flags.f_contiguous
xf = numpy.matlib.eye(3, 4, dtype=int, order='F')
assert_array_equal(xf, np.matrix([[ 1, 0, 0, 0],
[ 0, 1, 0, 0],
[ 0, 0, 1, 0]]))
assert not xf.flags.c_contiguous
assert xf.flags.f_contiguous
def test_rand():
x = numpy.matlib.rand(3)
# check matrix type, array would have shape (3,)
assert_(x.ndim == 2)
def test_randn():
x = np.matlib.randn(3)
# check matrix type, array would have shape (3,)
assert_(x.ndim == 2)
def test_repmat():
a1 = np.arange(4)
x = numpy.matlib.repmat(a1, 2, 2)
y = np.array([[0, 1, 2, 3, 0, 1, 2, 3],
[0, 1, 2, 3, 0, 1, 2, 3]])
assert_array_equal(x, y)
| 1,852 | Python | 30.406779 | 70 | 0.487041 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/tests/test_reloading.py | from numpy.testing import assert_raises, assert_warns, assert_, assert_equal
from numpy.compat import pickle
import sys
import subprocess
import textwrap
from importlib import reload
def test_numpy_reloading():
# gh-7844. Also check that relevant globals retain their identity.
import numpy as np
import numpy._globals
_NoValue = np._NoValue
VisibleDeprecationWarning = np.VisibleDeprecationWarning
ModuleDeprecationWarning = np.ModuleDeprecationWarning
with assert_warns(UserWarning):
reload(np)
assert_(_NoValue is np._NoValue)
assert_(ModuleDeprecationWarning is np.ModuleDeprecationWarning)
assert_(VisibleDeprecationWarning is np.VisibleDeprecationWarning)
assert_raises(RuntimeError, reload, numpy._globals)
with assert_warns(UserWarning):
reload(np)
assert_(_NoValue is np._NoValue)
assert_(ModuleDeprecationWarning is np.ModuleDeprecationWarning)
assert_(VisibleDeprecationWarning is np.VisibleDeprecationWarning)
def test_novalue():
import numpy as np
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
assert_equal(repr(np._NoValue), '<no value>')
assert_(pickle.loads(pickle.dumps(np._NoValue,
protocol=proto)) is np._NoValue)
def test_full_reimport():
"""At the time of writing this, it is *not* truly supported, but
apparently enough users rely on it, for it to be an annoying change
when it started failing previously.
"""
# Test within a new process, to ensure that we do not mess with the
# global state during the test run (could lead to cryptic test failures).
# This is generally unsafe, especially, since we also reload the C-modules.
code = textwrap.dedent(r"""
import sys
from pytest import warns
import numpy as np
for k in list(sys.modules.keys()):
if "numpy" in k:
del sys.modules[k]
with warns(UserWarning):
import numpy as np
""")
p = subprocess.run([sys.executable, '-c', code], capture_output=True)
if p.returncode:
raise AssertionError(
f"Non-zero return code: {p.returncode!r}\n\n{p.stderr.decode()}"
)
| 2,244 | Python | 33.538461 | 79 | 0.677362 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/tests/test_public_api.py | import sys
import sysconfig
import subprocess
import pkgutil
import types
import importlib
import warnings
import numpy as np
import numpy
import pytest
try:
import ctypes
except ImportError:
ctypes = None
def check_dir(module, module_name=None):
"""Returns a mapping of all objects with the wrong __module__ attribute."""
if module_name is None:
module_name = module.__name__
results = {}
for name in dir(module):
item = getattr(module, name)
if (hasattr(item, '__module__') and hasattr(item, '__name__')
and item.__module__ != module_name):
results[name] = item.__module__ + '.' + item.__name__
return results
def test_numpy_namespace():
# None of these objects are publicly documented to be part of the main
# NumPy namespace (some are useful though, others need to be cleaned up)
undocumented = {
'Tester': 'numpy.testing._private.nosetester.NoseTester',
'_add_newdoc_ufunc': 'numpy.core._multiarray_umath._add_newdoc_ufunc',
'add_docstring': 'numpy.core._multiarray_umath.add_docstring',
'add_newdoc': 'numpy.core.function_base.add_newdoc',
'add_newdoc_ufunc': 'numpy.core._multiarray_umath._add_newdoc_ufunc',
'byte_bounds': 'numpy.lib.utils.byte_bounds',
'compare_chararrays': 'numpy.core._multiarray_umath.compare_chararrays',
'deprecate': 'numpy.lib.utils.deprecate',
'deprecate_with_doc': 'numpy.lib.utils.deprecate_with_doc',
'disp': 'numpy.lib.function_base.disp',
'fastCopyAndTranspose': 'numpy.core._multiarray_umath._fastCopyAndTranspose',
'get_array_wrap': 'numpy.lib.shape_base.get_array_wrap',
'get_include': 'numpy.lib.utils.get_include',
'recfromcsv': 'numpy.lib.npyio.recfromcsv',
'recfromtxt': 'numpy.lib.npyio.recfromtxt',
'safe_eval': 'numpy.lib.utils.safe_eval',
'set_string_function': 'numpy.core.arrayprint.set_string_function',
'show_config': 'numpy.__config__.show',
'who': 'numpy.lib.utils.who',
}
# We override dir to not show these members
allowlist = undocumented
bad_results = check_dir(np)
# pytest gives better error messages with the builtin assert than with
# assert_equal
assert bad_results == allowlist
@pytest.mark.parametrize('name', ['testing', 'Tester'])
def test_import_lazy_import(name):
"""Make sure we can actually use the modules we lazy load.
While not exported as part of the public API, it was accessible. With the
use of __getattr__ and __dir__, this isn't always true It can happen that
an infinite recursion may happen.
This is the only way I found that would force the failure to appear on the
badly implemented code.
We also test for the presence of the lazily imported modules in dir
"""
exe = (sys.executable, '-c', "import numpy; numpy." + name)
result = subprocess.check_output(exe)
assert not result
# Make sure they are still in the __dir__
assert name in dir(np)
def test_dir_testing():
"""Assert that output of dir has only one "testing/tester"
attribute without duplicate"""
assert len(dir(np)) == len(set(dir(np)))
def test_numpy_linalg():
bad_results = check_dir(np.linalg)
assert bad_results == {}
def test_numpy_fft():
bad_results = check_dir(np.fft)
assert bad_results == {}
@pytest.mark.skipif(ctypes is None,
reason="ctypes not available in this python")
def test_NPY_NO_EXPORT():
cdll = ctypes.CDLL(np.core._multiarray_tests.__file__)
# Make sure an arbitrary NPY_NO_EXPORT function is actually hidden
f = getattr(cdll, 'test_not_exported', None)
assert f is None, ("'test_not_exported' is mistakenly exported, "
"NPY_NO_EXPORT does not work")
# Historically NumPy has not used leading underscores for private submodules
# much. This has resulted in lots of things that look like public modules
# (i.e. things that can be imported as `import numpy.somesubmodule.somefile`),
# but were never intended to be public. The PUBLIC_MODULES list contains
# modules that are either public because they were meant to be, or because they
# contain public functions/objects that aren't present in any other namespace
# for whatever reason and therefore should be treated as public.
#
# The PRIVATE_BUT_PRESENT_MODULES list contains modules that look public (lack
# of underscores) but should not be used. For many of those modules the
# current status is fine. For others it may make sense to work on making them
# private, to clean up our public API and avoid confusion.
PUBLIC_MODULES = ['numpy.' + s for s in [
"array_api",
"array_api.linalg",
"ctypeslib",
"distutils",
"distutils.cpuinfo",
"distutils.exec_command",
"distutils.misc_util",
"distutils.log",
"distutils.system_info",
"doc",
"doc.constants",
"doc.ufuncs",
"f2py",
"fft",
"lib",
"lib.format", # was this meant to be public?
"lib.mixins",
"lib.recfunctions",
"lib.scimath",
"lib.stride_tricks",
"linalg",
"ma",
"ma.extras",
"ma.mrecords",
"matlib",
"polynomial",
"polynomial.chebyshev",
"polynomial.hermite",
"polynomial.hermite_e",
"polynomial.laguerre",
"polynomial.legendre",
"polynomial.polynomial",
"random",
"testing",
"typing",
"typing.mypy_plugin",
"version",
]]
PUBLIC_ALIASED_MODULES = [
"numpy.char",
"numpy.emath",
"numpy.rec",
]
PRIVATE_BUT_PRESENT_MODULES = ['numpy.' + s for s in [
"compat",
"compat.py3k",
"conftest",
"core",
"core.arrayprint",
"core.defchararray",
"core.einsumfunc",
"core.fromnumeric",
"core.function_base",
"core.getlimits",
"core.memmap",
"core.multiarray",
"core.numeric",
"core.numerictypes",
"core.overrides",
"core.records",
"core.shape_base",
"core.umath",
"core.umath_tests",
"distutils.armccompiler",
"distutils.ccompiler",
'distutils.ccompiler_opt',
"distutils.command",
"distutils.command.autodist",
"distutils.command.bdist_rpm",
"distutils.command.build",
"distutils.command.build_clib",
"distutils.command.build_ext",
"distutils.command.build_py",
"distutils.command.build_scripts",
"distutils.command.build_src",
"distutils.command.config",
"distutils.command.config_compiler",
"distutils.command.develop",
"distutils.command.egg_info",
"distutils.command.install",
"distutils.command.install_clib",
"distutils.command.install_data",
"distutils.command.install_headers",
"distutils.command.sdist",
"distutils.conv_template",
"distutils.core",
"distutils.extension",
"distutils.fcompiler",
"distutils.fcompiler.absoft",
"distutils.fcompiler.arm",
"distutils.fcompiler.compaq",
"distutils.fcompiler.environment",
"distutils.fcompiler.g95",
"distutils.fcompiler.gnu",
"distutils.fcompiler.hpux",
"distutils.fcompiler.ibm",
"distutils.fcompiler.intel",
"distutils.fcompiler.lahey",
"distutils.fcompiler.mips",
"distutils.fcompiler.nag",
"distutils.fcompiler.none",
"distutils.fcompiler.pathf95",
"distutils.fcompiler.pg",
"distutils.fcompiler.nv",
"distutils.fcompiler.sun",
"distutils.fcompiler.vast",
"distutils.fcompiler.fujitsu",
"distutils.from_template",
"distutils.intelccompiler",
"distutils.lib2def",
"distutils.line_endings",
"distutils.mingw32ccompiler",
"distutils.msvccompiler",
"distutils.npy_pkg_config",
"distutils.numpy_distribution",
"distutils.pathccompiler",
"distutils.unixccompiler",
"dual",
"f2py.auxfuncs",
"f2py.capi_maps",
"f2py.cb_rules",
"f2py.cfuncs",
"f2py.common_rules",
"f2py.crackfortran",
"f2py.diagnose",
"f2py.f2py2e",
"f2py.f90mod_rules",
"f2py.func2subr",
"f2py.rules",
"f2py.symbolic",
"f2py.use_rules",
"fft.helper",
"lib.arraypad",
"lib.arraysetops",
"lib.arrayterator",
"lib.function_base",
"lib.histograms",
"lib.index_tricks",
"lib.nanfunctions",
"lib.npyio",
"lib.polynomial",
"lib.shape_base",
"lib.twodim_base",
"lib.type_check",
"lib.ufunclike",
"lib.user_array", # note: not in np.lib, but probably should just be deleted
"lib.utils",
"linalg.lapack_lite",
"linalg.linalg",
"ma.bench",
"ma.core",
"ma.testutils",
"ma.timer_comparison",
"matrixlib",
"matrixlib.defmatrix",
"polynomial.polyutils",
"random.mtrand",
"random.bit_generator",
"testing.print_coercion_tables",
"testing.utils",
]]
def is_unexpected(name):
"""Check if this needs to be considered."""
if '._' in name or '.tests' in name or '.setup' in name:
return False
if name in PUBLIC_MODULES:
return False
if name in PUBLIC_ALIASED_MODULES:
return False
if name in PRIVATE_BUT_PRESENT_MODULES:
return False
return True
# These are present in a directory with an __init__.py but cannot be imported
# code_generators/ isn't installed, but present for an inplace build
SKIP_LIST = [
"numpy.core.code_generators",
"numpy.core.code_generators.genapi",
"numpy.core.code_generators.generate_umath",
"numpy.core.code_generators.ufunc_docstrings",
"numpy.core.code_generators.generate_numpy_api",
"numpy.core.code_generators.generate_ufunc_api",
"numpy.core.code_generators.numpy_api",
"numpy.core.code_generators.generate_umath_doc",
"numpy.core.cversions",
"numpy.core.generate_numpy_api",
"numpy.distutils.msvc9compiler",
]
def test_all_modules_are_expected():
"""
Test that we don't add anything that looks like a new public module by
accident. Check is based on filenames.
"""
modnames = []
for _, modname, ispkg in pkgutil.walk_packages(path=np.__path__,
prefix=np.__name__ + '.',
onerror=None):
if is_unexpected(modname) and modname not in SKIP_LIST:
# We have a name that is new. If that's on purpose, add it to
# PUBLIC_MODULES. We don't expect to have to add anything to
# PRIVATE_BUT_PRESENT_MODULES. Use an underscore in the name!
modnames.append(modname)
if modnames:
raise AssertionError(f'Found unexpected modules: {modnames}')
# Stuff that clearly shouldn't be in the API and is detected by the next test
# below
SKIP_LIST_2 = [
'numpy.math',
'numpy.distutils.log.sys',
'numpy.doc.constants.re',
'numpy.doc.constants.textwrap',
'numpy.lib.emath',
'numpy.lib.math',
'numpy.matlib.char',
'numpy.matlib.rec',
'numpy.matlib.emath',
'numpy.matlib.math',
'numpy.matlib.linalg',
'numpy.matlib.fft',
'numpy.matlib.random',
'numpy.matlib.ctypeslib',
'numpy.matlib.ma',
]
def test_all_modules_are_expected_2():
"""
Method checking all objects. The pkgutil-based method in
`test_all_modules_are_expected` does not catch imports into a namespace,
only filenames. So this test is more thorough, and checks this like:
import .lib.scimath as emath
To check if something in a module is (effectively) public, one can check if
there's anything in that namespace that's a public function/object but is
not exposed in a higher-level namespace. For example for a `numpy.lib`
submodule::
mod = np.lib.mixins
for obj in mod.__all__:
if obj in np.__all__:
continue
elif obj in np.lib.__all__:
continue
else:
print(obj)
"""
def find_unexpected_members(mod_name):
members = []
module = importlib.import_module(mod_name)
if hasattr(module, '__all__'):
objnames = module.__all__
else:
objnames = dir(module)
for objname in objnames:
if not objname.startswith('_'):
fullobjname = mod_name + '.' + objname
if isinstance(getattr(module, objname), types.ModuleType):
if is_unexpected(fullobjname):
if fullobjname not in SKIP_LIST_2:
members.append(fullobjname)
return members
unexpected_members = find_unexpected_members("numpy")
for modname in PUBLIC_MODULES:
unexpected_members.extend(find_unexpected_members(modname))
if unexpected_members:
raise AssertionError("Found unexpected object(s) that look like "
"modules: {}".format(unexpected_members))
def test_api_importable():
"""
Check that all submodules listed higher up in this file can be imported
Note that if a PRIVATE_BUT_PRESENT_MODULES entry goes missing, it may
simply need to be removed from the list (deprecation may or may not be
needed - apply common sense).
"""
def check_importable(module_name):
try:
importlib.import_module(module_name)
except (ImportError, AttributeError):
return False
return True
module_names = []
for module_name in PUBLIC_MODULES:
if not check_importable(module_name):
module_names.append(module_name)
if module_names:
raise AssertionError("Modules in the public API that cannot be "
"imported: {}".format(module_names))
for module_name in PUBLIC_ALIASED_MODULES:
try:
eval(module_name)
except AttributeError:
module_names.append(module_name)
if module_names:
raise AssertionError("Modules in the public API that were not "
"found: {}".format(module_names))
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', category=DeprecationWarning)
warnings.filterwarnings('always', category=ImportWarning)
for module_name in PRIVATE_BUT_PRESENT_MODULES:
if not check_importable(module_name):
module_names.append(module_name)
if module_names:
raise AssertionError("Modules that are not really public but looked "
"public and can not be imported: "
"{}".format(module_names))
@pytest.mark.xfail(
sysconfig.get_config_var("Py_DEBUG") is not None,
reason=(
"NumPy possibly built with `USE_DEBUG=True ./tools/travis-test.sh`, "
"which does not expose the `array_api` entry point. "
"See https://github.com/numpy/numpy/pull/19800"
),
)
def test_array_api_entry_point():
"""
Entry point for Array API implementation can be found with importlib and
returns the numpy.array_api namespace.
"""
eps = importlib.metadata.entry_points()
try:
xp_eps = eps.select(group="array_api")
except AttributeError:
# The select interface for entry_points was introduced in py3.10,
# deprecating its dict interface. We fallback to dict keys for finding
# Array API entry points so that running this test in <=3.9 will
# still work - see https://github.com/numpy/numpy/pull/19800.
xp_eps = eps.get("array_api", [])
assert len(xp_eps) > 0, "No entry points for 'array_api' found"
try:
ep = next(ep for ep in xp_eps if ep.name == "numpy")
except StopIteration:
raise AssertionError("'numpy' not in array_api entry points") from None
xp = ep.load()
msg = (
f"numpy entry point value '{ep.value}' "
"does not point to our Array API implementation"
)
assert xp is numpy.array_api, msg
| 15,916 | Python | 30.707171 | 85 | 0.635964 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/tests/test_ctypeslib.py | import sys
import pytest
import weakref
from pathlib import Path
import numpy as np
from numpy.ctypeslib import ndpointer, load_library, as_array
from numpy.distutils.misc_util import get_shared_lib_extension
from numpy.testing import assert_, assert_array_equal, assert_raises, assert_equal
try:
import ctypes
except ImportError:
ctypes = None
else:
cdll = None
test_cdll = None
if hasattr(sys, 'gettotalrefcount'):
try:
cdll = load_library('_multiarray_umath_d', np.core._multiarray_umath.__file__)
except OSError:
pass
try:
test_cdll = load_library('_multiarray_tests', np.core._multiarray_tests.__file__)
except OSError:
pass
if cdll is None:
cdll = load_library('_multiarray_umath', np.core._multiarray_umath.__file__)
if test_cdll is None:
test_cdll = load_library('_multiarray_tests', np.core._multiarray_tests.__file__)
c_forward_pointer = test_cdll.forward_pointer
@pytest.mark.skipif(ctypes is None,
reason="ctypes not available in this python")
@pytest.mark.skipif(sys.platform == 'cygwin',
reason="Known to fail on cygwin")
class TestLoadLibrary:
def test_basic(self):
loader_path = np.core._multiarray_umath.__file__
out1 = load_library('_multiarray_umath', loader_path)
out2 = load_library(Path('_multiarray_umath'), loader_path)
out3 = load_library('_multiarray_umath', Path(loader_path))
out4 = load_library(b'_multiarray_umath', loader_path)
assert isinstance(out1, ctypes.CDLL)
assert out1 is out2 is out3 is out4
def test_basic2(self):
# Regression for #801: load_library with a full library name
# (including extension) does not work.
try:
try:
so = get_shared_lib_extension(is_python_ext=True)
# Should succeed
load_library('_multiarray_umath%s' % so, np.core._multiarray_umath.__file__)
except ImportError:
print("No distutils available, skipping test.")
except ImportError as e:
msg = ("ctypes is not available on this python: skipping the test"
" (import error was: %s)" % str(e))
print(msg)
class TestNdpointer:
def test_dtype(self):
dt = np.intc
p = ndpointer(dtype=dt)
assert_(p.from_param(np.array([1], dt)))
dt = '<i4'
p = ndpointer(dtype=dt)
assert_(p.from_param(np.array([1], dt)))
dt = np.dtype('>i4')
p = ndpointer(dtype=dt)
p.from_param(np.array([1], dt))
assert_raises(TypeError, p.from_param,
np.array([1], dt.newbyteorder('swap')))
dtnames = ['x', 'y']
dtformats = [np.intc, np.float64]
dtdescr = {'names': dtnames, 'formats': dtformats}
dt = np.dtype(dtdescr)
p = ndpointer(dtype=dt)
assert_(p.from_param(np.zeros((10,), dt)))
samedt = np.dtype(dtdescr)
p = ndpointer(dtype=samedt)
assert_(p.from_param(np.zeros((10,), dt)))
dt2 = np.dtype(dtdescr, align=True)
if dt.itemsize != dt2.itemsize:
assert_raises(TypeError, p.from_param, np.zeros((10,), dt2))
else:
assert_(p.from_param(np.zeros((10,), dt2)))
def test_ndim(self):
p = ndpointer(ndim=0)
assert_(p.from_param(np.array(1)))
assert_raises(TypeError, p.from_param, np.array([1]))
p = ndpointer(ndim=1)
assert_raises(TypeError, p.from_param, np.array(1))
assert_(p.from_param(np.array([1])))
p = ndpointer(ndim=2)
assert_(p.from_param(np.array([[1]])))
def test_shape(self):
p = ndpointer(shape=(1, 2))
assert_(p.from_param(np.array([[1, 2]])))
assert_raises(TypeError, p.from_param, np.array([[1], [2]]))
p = ndpointer(shape=())
assert_(p.from_param(np.array(1)))
def test_flags(self):
x = np.array([[1, 2], [3, 4]], order='F')
p = ndpointer(flags='FORTRAN')
assert_(p.from_param(x))
p = ndpointer(flags='CONTIGUOUS')
assert_raises(TypeError, p.from_param, x)
p = ndpointer(flags=x.flags.num)
assert_(p.from_param(x))
assert_raises(TypeError, p.from_param, np.array([[1, 2], [3, 4]]))
def test_cache(self):
assert_(ndpointer(dtype=np.float64) is ndpointer(dtype=np.float64))
# shapes are normalized
assert_(ndpointer(shape=2) is ndpointer(shape=(2,)))
# 1.12 <= v < 1.16 had a bug that made these fail
assert_(ndpointer(shape=2) is not ndpointer(ndim=2))
assert_(ndpointer(ndim=2) is not ndpointer(shape=2))
@pytest.mark.skipif(ctypes is None,
reason="ctypes not available on this python installation")
class TestNdpointerCFunc:
def test_arguments(self):
""" Test that arguments are coerced from arrays """
c_forward_pointer.restype = ctypes.c_void_p
c_forward_pointer.argtypes = (ndpointer(ndim=2),)
c_forward_pointer(np.zeros((2, 3)))
# too many dimensions
assert_raises(
ctypes.ArgumentError, c_forward_pointer, np.zeros((2, 3, 4)))
@pytest.mark.parametrize(
'dt', [
float,
np.dtype(dict(
formats=['<i4', '<i4'],
names=['a', 'b'],
offsets=[0, 2],
itemsize=6
))
], ids=[
'float',
'overlapping-fields'
]
)
def test_return(self, dt):
""" Test that return values are coerced to arrays """
arr = np.zeros((2, 3), dt)
ptr_type = ndpointer(shape=arr.shape, dtype=arr.dtype)
c_forward_pointer.restype = ptr_type
c_forward_pointer.argtypes = (ptr_type,)
# check that the arrays are equivalent views on the same data
arr2 = c_forward_pointer(arr)
assert_equal(arr2.dtype, arr.dtype)
assert_equal(arr2.shape, arr.shape)
assert_equal(
arr2.__array_interface__['data'],
arr.__array_interface__['data']
)
def test_vague_return_value(self):
""" Test that vague ndpointer return values do not promote to arrays """
arr = np.zeros((2, 3))
ptr_type = ndpointer(dtype=arr.dtype)
c_forward_pointer.restype = ptr_type
c_forward_pointer.argtypes = (ptr_type,)
ret = c_forward_pointer(arr)
assert_(isinstance(ret, ptr_type))
@pytest.mark.skipif(ctypes is None,
reason="ctypes not available on this python installation")
class TestAsArray:
def test_array(self):
from ctypes import c_int
pair_t = c_int * 2
a = as_array(pair_t(1, 2))
assert_equal(a.shape, (2,))
assert_array_equal(a, np.array([1, 2]))
a = as_array((pair_t * 3)(pair_t(1, 2), pair_t(3, 4), pair_t(5, 6)))
assert_equal(a.shape, (3, 2))
assert_array_equal(a, np.array([[1, 2], [3, 4], [5, 6]]))
def test_pointer(self):
from ctypes import c_int, cast, POINTER
p = cast((c_int * 10)(*range(10)), POINTER(c_int))
a = as_array(p, shape=(10,))
assert_equal(a.shape, (10,))
assert_array_equal(a, np.arange(10))
a = as_array(p, shape=(2, 5))
assert_equal(a.shape, (2, 5))
assert_array_equal(a, np.arange(10).reshape((2, 5)))
# shape argument is required
assert_raises(TypeError, as_array, p)
def test_struct_array_pointer(self):
from ctypes import c_int16, Structure, pointer
class Struct(Structure):
_fields_ = [('a', c_int16)]
Struct3 = 3 * Struct
c_array = (2 * Struct3)(
Struct3(Struct(a=1), Struct(a=2), Struct(a=3)),
Struct3(Struct(a=4), Struct(a=5), Struct(a=6))
)
expected = np.array([
[(1,), (2,), (3,)],
[(4,), (5,), (6,)],
], dtype=[('a', np.int16)])
def check(x):
assert_equal(x.dtype, expected.dtype)
assert_equal(x, expected)
# all of these should be equivalent
check(as_array(c_array))
check(as_array(pointer(c_array), shape=()))
check(as_array(pointer(c_array[0]), shape=(2,)))
check(as_array(pointer(c_array[0][0]), shape=(2, 3)))
def test_reference_cycles(self):
# related to gh-6511
import ctypes
# create array to work with
# don't use int/long to avoid running into bpo-10746
N = 100
a = np.arange(N, dtype=np.short)
# get pointer to array
pnt = np.ctypeslib.as_ctypes(a)
with np.testing.assert_no_gc_cycles():
# decay the array above to a pointer to its first element
newpnt = ctypes.cast(pnt, ctypes.POINTER(ctypes.c_short))
# and construct an array using this data
b = np.ctypeslib.as_array(newpnt, (N,))
# now delete both, which should cleanup both objects
del newpnt, b
def test_segmentation_fault(self):
arr = np.zeros((224, 224, 3))
c_arr = np.ctypeslib.as_ctypes(arr)
arr_ref = weakref.ref(arr)
del arr
# check the reference wasn't cleaned up
assert_(arr_ref() is not None)
# check we avoid the segfault
c_arr[0][0][0]
@pytest.mark.skipif(ctypes is None,
reason="ctypes not available on this python installation")
class TestAsCtypesType:
""" Test conversion from dtypes to ctypes types """
def test_scalar(self):
dt = np.dtype('<u2')
ct = np.ctypeslib.as_ctypes_type(dt)
assert_equal(ct, ctypes.c_uint16.__ctype_le__)
dt = np.dtype('>u2')
ct = np.ctypeslib.as_ctypes_type(dt)
assert_equal(ct, ctypes.c_uint16.__ctype_be__)
dt = np.dtype('u2')
ct = np.ctypeslib.as_ctypes_type(dt)
assert_equal(ct, ctypes.c_uint16)
def test_subarray(self):
dt = np.dtype((np.int32, (2, 3)))
ct = np.ctypeslib.as_ctypes_type(dt)
assert_equal(ct, 2 * (3 * ctypes.c_int32))
def test_structure(self):
dt = np.dtype([
('a', np.uint16),
('b', np.uint32),
])
ct = np.ctypeslib.as_ctypes_type(dt)
assert_(issubclass(ct, ctypes.Structure))
assert_equal(ctypes.sizeof(ct), dt.itemsize)
assert_equal(ct._fields_, [
('a', ctypes.c_uint16),
('b', ctypes.c_uint32),
])
def test_structure_aligned(self):
dt = np.dtype([
('a', np.uint16),
('b', np.uint32),
], align=True)
ct = np.ctypeslib.as_ctypes_type(dt)
assert_(issubclass(ct, ctypes.Structure))
assert_equal(ctypes.sizeof(ct), dt.itemsize)
assert_equal(ct._fields_, [
('a', ctypes.c_uint16),
('', ctypes.c_char * 2), # padding
('b', ctypes.c_uint32),
])
def test_union(self):
dt = np.dtype(dict(
names=['a', 'b'],
offsets=[0, 0],
formats=[np.uint16, np.uint32]
))
ct = np.ctypeslib.as_ctypes_type(dt)
assert_(issubclass(ct, ctypes.Union))
assert_equal(ctypes.sizeof(ct), dt.itemsize)
assert_equal(ct._fields_, [
('a', ctypes.c_uint16),
('b', ctypes.c_uint32),
])
def test_padded_union(self):
dt = np.dtype(dict(
names=['a', 'b'],
offsets=[0, 0],
formats=[np.uint16, np.uint32],
itemsize=5,
))
ct = np.ctypeslib.as_ctypes_type(dt)
assert_(issubclass(ct, ctypes.Union))
assert_equal(ctypes.sizeof(ct), dt.itemsize)
assert_equal(ct._fields_, [
('a', ctypes.c_uint16),
('b', ctypes.c_uint32),
('', ctypes.c_char * 5), # padding
])
def test_overlapping(self):
dt = np.dtype(dict(
names=['a', 'b'],
offsets=[0, 2],
formats=[np.uint32, np.uint32]
))
assert_raises(NotImplementedError, np.ctypeslib.as_ctypes_type, dt)
| 12,290 | Python | 32.308943 | 93 | 0.55297 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.