file_path
stringlengths 22
162
| content
stringlengths 19
501k
| size
int64 19
501k
| lang
stringclasses 1
value | avg_line_length
float64 6.33
100
| max_line_length
int64 18
935
| alphanum_fraction
float64 0.34
0.93
|
---|---|---|---|---|---|---|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/typing/tests/data/pass/numerictypes.py | import numpy as np
np.maximum_sctype("S8")
np.maximum_sctype(object)
np.issctype(object)
np.issctype("S8")
np.obj2sctype(list)
np.obj2sctype(list, default=None)
np.obj2sctype(list, default=np.string_)
np.issubclass_(np.int32, int)
np.issubclass_(np.float64, float)
np.issubclass_(np.float64, (int, float))
np.issubsctype("int64", int)
np.issubsctype(np.array([1]), np.array([1]))
np.issubdtype("S1", np.string_)
np.issubdtype(np.float64, np.float32)
np.sctype2char("S1")
np.sctype2char(list)
np.find_common_type([], [np.int64, np.float32, complex])
np.find_common_type((), (np.int64, np.float32, complex))
np.find_common_type([np.int64, np.float32], [])
np.find_common_type([np.float32], [np.int64, np.float64])
np.cast[int]
np.cast["i8"]
np.cast[np.int64]
np.nbytes[int]
np.nbytes["i8"]
np.nbytes[np.int64]
np.ScalarType
np.ScalarType[0]
np.ScalarType[3]
np.ScalarType[8]
np.ScalarType[10]
np.typecodes["Character"]
np.typecodes["Complex"]
np.typecodes["All"]
| 973 | Python | 19.291666 | 57 | 0.719424 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/typing/tests/data/pass/ndarray_misc.py | """
Tests for miscellaneous (non-magic) ``np.ndarray``/``np.generic`` methods.
More extensive tests are performed for the methods'
function-based counterpart in `../from_numeric.py`.
"""
from __future__ import annotations
import operator
from typing import cast, Any
import numpy as np
class SubClass(np.ndarray): ...
i4 = np.int32(1)
A: np.ndarray[Any, np.dtype[np.int32]] = np.array([[1]], dtype=np.int32)
B0 = np.empty((), dtype=np.int32).view(SubClass)
B1 = np.empty((1,), dtype=np.int32).view(SubClass)
B2 = np.empty((1, 1), dtype=np.int32).view(SubClass)
C: np.ndarray[Any, np.dtype[np.int32]] = np.array([0, 1, 2], dtype=np.int32)
D = np.empty(3).view(SubClass)
i4.all()
A.all()
A.all(axis=0)
A.all(keepdims=True)
A.all(out=B0)
i4.any()
A.any()
A.any(axis=0)
A.any(keepdims=True)
A.any(out=B0)
i4.argmax()
A.argmax()
A.argmax(axis=0)
A.argmax(out=B0)
i4.argmin()
A.argmin()
A.argmin(axis=0)
A.argmin(out=B0)
i4.argsort()
A.argsort()
i4.choose([()])
_choices = np.array([[0, 1, 2], [3, 4, 5], [6, 7, 8]], dtype=np.int32)
C.choose(_choices)
C.choose(_choices, out=D)
i4.clip(1)
A.clip(1)
A.clip(None, 1)
A.clip(1, out=B2)
A.clip(None, 1, out=B2)
i4.compress([1])
A.compress([1])
A.compress([1], out=B1)
i4.conj()
A.conj()
B0.conj()
i4.conjugate()
A.conjugate()
B0.conjugate()
i4.cumprod()
A.cumprod()
A.cumprod(out=B1)
i4.cumsum()
A.cumsum()
A.cumsum(out=B1)
i4.max()
A.max()
A.max(axis=0)
A.max(keepdims=True)
A.max(out=B0)
i4.mean()
A.mean()
A.mean(axis=0)
A.mean(keepdims=True)
A.mean(out=B0)
i4.min()
A.min()
A.min(axis=0)
A.min(keepdims=True)
A.min(out=B0)
i4.newbyteorder()
A.newbyteorder()
B0.newbyteorder('|')
i4.prod()
A.prod()
A.prod(axis=0)
A.prod(keepdims=True)
A.prod(out=B0)
i4.ptp()
A.ptp()
A.ptp(axis=0)
A.ptp(keepdims=True)
A.astype(int).ptp(out=B0)
i4.round()
A.round()
A.round(out=B2)
i4.repeat(1)
A.repeat(1)
B0.repeat(1)
i4.std()
A.std()
A.std(axis=0)
A.std(keepdims=True)
A.std(out=B0.astype(np.float64))
i4.sum()
A.sum()
A.sum(axis=0)
A.sum(keepdims=True)
A.sum(out=B0)
i4.take(0)
A.take(0)
A.take([0])
A.take(0, out=B0)
A.take([0], out=B1)
i4.var()
A.var()
A.var(axis=0)
A.var(keepdims=True)
A.var(out=B0)
A.argpartition([0])
A.diagonal()
A.dot(1)
A.dot(1, out=B0)
A.nonzero()
C.searchsorted(1)
A.trace()
A.trace(out=B0)
void = cast(np.void, np.array(1, dtype=[("f", np.float64)]).take(0))
void.setfield(10, np.float64)
A.item(0)
C.item(0)
A.ravel()
C.ravel()
A.flatten()
C.flatten()
A.reshape(1)
C.reshape(3)
int(np.array(1.0, dtype=np.float64))
int(np.array("1", dtype=np.str_))
float(np.array(1.0, dtype=np.float64))
float(np.array("1", dtype=np.str_))
complex(np.array(1.0, dtype=np.float64))
operator.index(np.array(1, dtype=np.int64))
| 2,716 | Python | 13.607527 | 76 | 0.656848 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/typing/tests/data/pass/simple_py3.py | import numpy as np
array = np.array([1, 2])
# The @ operator is not in python 2
array @ array
| 96 | Python | 12.857141 | 35 | 0.666667 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/ma/timer_comparison.py | import timeit
from functools import reduce
import numpy as np
from numpy import float_
import numpy.core.fromnumeric as fromnumeric
from numpy.testing import build_err_msg
pi = np.pi
class ModuleTester:
def __init__(self, module):
self.module = module
self.allequal = module.allequal
self.arange = module.arange
self.array = module.array
self.concatenate = module.concatenate
self.count = module.count
self.equal = module.equal
self.filled = module.filled
self.getmask = module.getmask
self.getmaskarray = module.getmaskarray
self.id = id
self.inner = module.inner
self.make_mask = module.make_mask
self.masked = module.masked
self.masked_array = module.masked_array
self.masked_values = module.masked_values
self.mask_or = module.mask_or
self.nomask = module.nomask
self.ones = module.ones
self.outer = module.outer
self.repeat = module.repeat
self.resize = module.resize
self.sort = module.sort
self.take = module.take
self.transpose = module.transpose
self.zeros = module.zeros
self.MaskType = module.MaskType
try:
self.umath = module.umath
except AttributeError:
self.umath = module.core.umath
self.testnames = []
def assert_array_compare(self, comparison, x, y, err_msg='', header='',
fill_value=True):
"""
Assert that a comparison of two masked arrays is satisfied elementwise.
"""
xf = self.filled(x)
yf = self.filled(y)
m = self.mask_or(self.getmask(x), self.getmask(y))
x = self.filled(self.masked_array(xf, mask=m), fill_value)
y = self.filled(self.masked_array(yf, mask=m), fill_value)
if (x.dtype.char != "O"):
x = x.astype(float_)
if isinstance(x, np.ndarray) and x.size > 1:
x[np.isnan(x)] = 0
elif np.isnan(x):
x = 0
if (y.dtype.char != "O"):
y = y.astype(float_)
if isinstance(y, np.ndarray) and y.size > 1:
y[np.isnan(y)] = 0
elif np.isnan(y):
y = 0
try:
cond = (x.shape == () or y.shape == ()) or x.shape == y.shape
if not cond:
msg = build_err_msg([x, y],
err_msg
+ f'\n(shapes {x.shape}, {y.shape} mismatch)',
header=header,
names=('x', 'y'))
assert cond, msg
val = comparison(x, y)
if m is not self.nomask and fill_value:
val = self.masked_array(val, mask=m)
if isinstance(val, bool):
cond = val
reduced = [0]
else:
reduced = val.ravel()
cond = reduced.all()
reduced = reduced.tolist()
if not cond:
match = 100-100.0*reduced.count(1)/len(reduced)
msg = build_err_msg([x, y],
err_msg
+ '\n(mismatch %s%%)' % (match,),
header=header,
names=('x', 'y'))
assert cond, msg
except ValueError as e:
msg = build_err_msg([x, y], err_msg, header=header, names=('x', 'y'))
raise ValueError(msg) from e
def assert_array_equal(self, x, y, err_msg=''):
"""
Checks the elementwise equality of two masked arrays.
"""
self.assert_array_compare(self.equal, x, y, err_msg=err_msg,
header='Arrays are not equal')
@np.errstate(all='ignore')
def test_0(self):
"""
Tests creation
"""
x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.])
m = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
xm = self.masked_array(x, mask=m)
xm[0]
@np.errstate(all='ignore')
def test_1(self):
"""
Tests creation
"""
x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.])
y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.])
m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1]
xm = self.masked_array(x, mask=m1)
ym = self.masked_array(y, mask=m2)
xf = np.where(m1, 1.e+20, x)
xm.set_fill_value(1.e+20)
assert((xm-ym).filled(0).any())
s = x.shape
assert(xm.size == reduce(lambda x, y:x*y, s))
assert(self.count(xm) == len(m1) - reduce(lambda x, y:x+y, m1))
for s in [(4, 3), (6, 2)]:
x.shape = s
y.shape = s
xm.shape = s
ym.shape = s
xf.shape = s
assert(self.count(xm) == len(m1) - reduce(lambda x, y:x+y, m1))
@np.errstate(all='ignore')
def test_2(self):
"""
Tests conversions and indexing.
"""
x1 = np.array([1, 2, 4, 3])
x2 = self.array(x1, mask=[1, 0, 0, 0])
x3 = self.array(x1, mask=[0, 1, 0, 1])
x4 = self.array(x1)
# test conversion to strings, no errors
str(x2)
repr(x2)
# tests of indexing
assert type(x2[1]) is type(x1[1])
assert x1[1] == x2[1]
x1[2] = 9
x2[2] = 9
self.assert_array_equal(x1, x2)
x1[1:3] = 99
x2[1:3] = 99
x2[1] = self.masked
x2[1:3] = self.masked
x2[:] = x1
x2[1] = self.masked
x3[:] = self.masked_array([1, 2, 3, 4], [0, 1, 1, 0])
x4[:] = self.masked_array([1, 2, 3, 4], [0, 1, 1, 0])
x1 = np.arange(5)*1.0
x2 = self.masked_values(x1, 3.0)
x1 = self.array([1, 'hello', 2, 3], object)
x2 = np.array([1, 'hello', 2, 3], object)
# check that no error occurs.
x1[1]
x2[1]
assert x1[1:1].shape == (0,)
# Tests copy-size
n = [0, 0, 1, 0, 0]
m = self.make_mask(n)
m2 = self.make_mask(m)
assert(m is m2)
m3 = self.make_mask(m, copy=1)
assert(m is not m3)
@np.errstate(all='ignore')
def test_3(self):
"""
Tests resize/repeat
"""
x4 = self.arange(4)
x4[2] = self.masked
y4 = self.resize(x4, (8,))
assert self.allequal(self.concatenate([x4, x4]), y4)
assert self.allequal(self.getmask(y4), [0, 0, 1, 0, 0, 0, 1, 0])
y5 = self.repeat(x4, (2, 2, 2, 2), axis=0)
self.assert_array_equal(y5, [0, 0, 1, 1, 2, 2, 3, 3])
y6 = self.repeat(x4, 2, axis=0)
assert self.allequal(y5, y6)
y7 = x4.repeat((2, 2, 2, 2), axis=0)
assert self.allequal(y5, y7)
y8 = x4.repeat(2, 0)
assert self.allequal(y5, y8)
@np.errstate(all='ignore')
def test_4(self):
"""
Test of take, transpose, inner, outer products.
"""
x = self.arange(24)
y = np.arange(24)
x[5:6] = self.masked
x = x.reshape(2, 3, 4)
y = y.reshape(2, 3, 4)
assert self.allequal(np.transpose(y, (2, 0, 1)), self.transpose(x, (2, 0, 1)))
assert self.allequal(np.take(y, (2, 0, 1), 1), self.take(x, (2, 0, 1), 1))
assert self.allequal(np.inner(self.filled(x, 0), self.filled(y, 0)),
self.inner(x, y))
assert self.allequal(np.outer(self.filled(x, 0), self.filled(y, 0)),
self.outer(x, y))
y = self.array(['abc', 1, 'def', 2, 3], object)
y[2] = self.masked
t = self.take(y, [0, 3, 4])
assert t[0] == 'abc'
assert t[1] == 2
assert t[2] == 3
@np.errstate(all='ignore')
def test_5(self):
"""
Tests inplace w/ scalar
"""
x = self.arange(10)
y = self.arange(10)
xm = self.arange(10)
xm[2] = self.masked
x += 1
assert self.allequal(x, y+1)
xm += 1
assert self.allequal(xm, y+1)
x = self.arange(10)
xm = self.arange(10)
xm[2] = self.masked
x -= 1
assert self.allequal(x, y-1)
xm -= 1
assert self.allequal(xm, y-1)
x = self.arange(10)*1.0
xm = self.arange(10)*1.0
xm[2] = self.masked
x *= 2.0
assert self.allequal(x, y*2)
xm *= 2.0
assert self.allequal(xm, y*2)
x = self.arange(10)*2
xm = self.arange(10)*2
xm[2] = self.masked
x /= 2
assert self.allequal(x, y)
xm /= 2
assert self.allequal(xm, y)
x = self.arange(10)*1.0
xm = self.arange(10)*1.0
xm[2] = self.masked
x /= 2.0
assert self.allequal(x, y/2.0)
xm /= self.arange(10)
self.assert_array_equal(xm, self.ones((10,)))
x = self.arange(10).astype(float_)
xm = self.arange(10)
xm[2] = self.masked
x += 1.
assert self.allequal(x, y + 1.)
@np.errstate(all='ignore')
def test_6(self):
"""
Tests inplace w/ array
"""
x = self.arange(10, dtype=float_)
y = self.arange(10)
xm = self.arange(10, dtype=float_)
xm[2] = self.masked
m = xm.mask
a = self.arange(10, dtype=float_)
a[-1] = self.masked
x += a
xm += a
assert self.allequal(x, y+a)
assert self.allequal(xm, y+a)
assert self.allequal(xm.mask, self.mask_or(m, a.mask))
x = self.arange(10, dtype=float_)
xm = self.arange(10, dtype=float_)
xm[2] = self.masked
m = xm.mask
a = self.arange(10, dtype=float_)
a[-1] = self.masked
x -= a
xm -= a
assert self.allequal(x, y-a)
assert self.allequal(xm, y-a)
assert self.allequal(xm.mask, self.mask_or(m, a.mask))
x = self.arange(10, dtype=float_)
xm = self.arange(10, dtype=float_)
xm[2] = self.masked
m = xm.mask
a = self.arange(10, dtype=float_)
a[-1] = self.masked
x *= a
xm *= a
assert self.allequal(x, y*a)
assert self.allequal(xm, y*a)
assert self.allequal(xm.mask, self.mask_or(m, a.mask))
x = self.arange(10, dtype=float_)
xm = self.arange(10, dtype=float_)
xm[2] = self.masked
m = xm.mask
a = self.arange(10, dtype=float_)
a[-1] = self.masked
x /= a
xm /= a
@np.errstate(all='ignore')
def test_7(self):
"Tests ufunc"
d = (self.array([1.0, 0, -1, pi/2]*2, mask=[0, 1]+[0]*6),
self.array([1.0, 0, -1, pi/2]*2, mask=[1, 0]+[0]*6),)
for f in ['sqrt', 'log', 'log10', 'exp', 'conjugate',
# 'sin', 'cos', 'tan',
# 'arcsin', 'arccos', 'arctan',
# 'sinh', 'cosh', 'tanh',
# 'arcsinh',
# 'arccosh',
# 'arctanh',
# 'absolute', 'fabs', 'negative',
# # 'nonzero', 'around',
# 'floor', 'ceil',
# # 'sometrue', 'alltrue',
# 'logical_not',
# 'add', 'subtract', 'multiply',
# 'divide', 'true_divide', 'floor_divide',
# 'remainder', 'fmod', 'hypot', 'arctan2',
# 'equal', 'not_equal', 'less_equal', 'greater_equal',
# 'less', 'greater',
# 'logical_and', 'logical_or', 'logical_xor',
]:
try:
uf = getattr(self.umath, f)
except AttributeError:
uf = getattr(fromnumeric, f)
mf = getattr(self.module, f)
args = d[:uf.nin]
ur = uf(*args)
mr = mf(*args)
self.assert_array_equal(ur.filled(0), mr.filled(0), f)
self.assert_array_equal(ur._mask, mr._mask)
@np.errstate(all='ignore')
def test_99(self):
# test average
ott = self.array([0., 1., 2., 3.], mask=[1, 0, 0, 0])
self.assert_array_equal(2.0, self.average(ott, axis=0))
self.assert_array_equal(2.0, self.average(ott, weights=[1., 1., 2., 1.]))
result, wts = self.average(ott, weights=[1., 1., 2., 1.], returned=1)
self.assert_array_equal(2.0, result)
assert(wts == 4.0)
ott[:] = self.masked
assert(self.average(ott, axis=0) is self.masked)
ott = self.array([0., 1., 2., 3.], mask=[1, 0, 0, 0])
ott = ott.reshape(2, 2)
ott[:, 1] = self.masked
self.assert_array_equal(self.average(ott, axis=0), [2.0, 0.0])
assert(self.average(ott, axis=1)[0] is self.masked)
self.assert_array_equal([2., 0.], self.average(ott, axis=0))
result, wts = self.average(ott, axis=0, returned=1)
self.assert_array_equal(wts, [1., 0.])
w1 = [0, 1, 1, 1, 1, 0]
w2 = [[0, 1, 1, 1, 1, 0], [1, 0, 0, 0, 0, 1]]
x = self.arange(6)
self.assert_array_equal(self.average(x, axis=0), 2.5)
self.assert_array_equal(self.average(x, axis=0, weights=w1), 2.5)
y = self.array([self.arange(6), 2.0*self.arange(6)])
self.assert_array_equal(self.average(y, None), np.add.reduce(np.arange(6))*3./12.)
self.assert_array_equal(self.average(y, axis=0), np.arange(6) * 3./2.)
self.assert_array_equal(self.average(y, axis=1), [self.average(x, axis=0), self.average(x, axis=0) * 2.0])
self.assert_array_equal(self.average(y, None, weights=w2), 20./6.)
self.assert_array_equal(self.average(y, axis=0, weights=w2), [0., 1., 2., 3., 4., 10.])
self.assert_array_equal(self.average(y, axis=1), [self.average(x, axis=0), self.average(x, axis=0) * 2.0])
m1 = self.zeros(6)
m2 = [0, 0, 1, 1, 0, 0]
m3 = [[0, 0, 1, 1, 0, 0], [0, 1, 1, 1, 1, 0]]
m4 = self.ones(6)
m5 = [0, 1, 1, 1, 1, 1]
self.assert_array_equal(self.average(self.masked_array(x, m1), axis=0), 2.5)
self.assert_array_equal(self.average(self.masked_array(x, m2), axis=0), 2.5)
self.assert_array_equal(self.average(self.masked_array(x, m5), axis=0), 0.0)
self.assert_array_equal(self.count(self.average(self.masked_array(x, m4), axis=0)), 0)
z = self.masked_array(y, m3)
self.assert_array_equal(self.average(z, None), 20./6.)
self.assert_array_equal(self.average(z, axis=0), [0., 1., 99., 99., 4.0, 7.5])
self.assert_array_equal(self.average(z, axis=1), [2.5, 5.0])
self.assert_array_equal(self.average(z, axis=0, weights=w2), [0., 1., 99., 99., 4.0, 10.0])
@np.errstate(all='ignore')
def test_A(self):
x = self.arange(24)
x[5:6] = self.masked
x = x.reshape(2, 3, 4)
if __name__ == '__main__':
setup_base = ("from __main__ import ModuleTester \n"
"import numpy\n"
"tester = ModuleTester(module)\n")
setup_cur = "import numpy.ma.core as module\n" + setup_base
(nrepeat, nloop) = (10, 10)
for i in range(1, 8):
func = 'tester.test_%i()' % i
cur = timeit.Timer(func, setup_cur).repeat(nrepeat, nloop*10)
cur = np.sort(cur)
print("#%i" % i + 50*'.')
print(eval("ModuleTester.test_%i.__doc__" % i))
print(f'core_current : {cur[0]:.3f} - {cur[1]:.3f}')
| 15,658 | Python | 34.268018 | 114 | 0.483203 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/ma/__init__.py | """
=============
Masked Arrays
=============
Arrays sometimes contain invalid or missing data. When doing operations
on such arrays, we wish to suppress invalid values, which is the purpose masked
arrays fulfill (an example of typical use is given below).
For example, examine the following array:
>>> x = np.array([2, 1, 3, np.nan, 5, 2, 3, np.nan])
When we try to calculate the mean of the data, the result is undetermined:
>>> np.mean(x)
nan
The mean is calculated using roughly ``np.sum(x)/len(x)``, but since
any number added to ``NaN`` [1]_ produces ``NaN``, this doesn't work. Enter
masked arrays:
>>> m = np.ma.masked_array(x, np.isnan(x))
>>> m
masked_array(data = [2.0 1.0 3.0 -- 5.0 2.0 3.0 --],
mask = [False False False True False False False True],
fill_value=1e+20)
Here, we construct a masked array that suppress all ``NaN`` values. We
may now proceed to calculate the mean of the other values:
>>> np.mean(m)
2.6666666666666665
.. [1] Not-a-Number, a floating point value that is the result of an
invalid operation.
.. moduleauthor:: Pierre Gerard-Marchant
.. moduleauthor:: Jarrod Millman
"""
from . import core
from .core import *
from . import extras
from .extras import *
__all__ = ['core', 'extras']
__all__ += core.__all__
__all__ += extras.__all__
from numpy._pytesttester import PytestTester
test = PytestTester(__name__)
del PytestTester
| 1,404 | Python | 24.545454 | 79 | 0.672365 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/ma/bench.py | #!/usr/bin/env python3
import timeit
import numpy
###############################################################################
# Global variables #
###############################################################################
# Small arrays
xs = numpy.random.uniform(-1, 1, 6).reshape(2, 3)
ys = numpy.random.uniform(-1, 1, 6).reshape(2, 3)
zs = xs + 1j * ys
m1 = [[True, False, False], [False, False, True]]
m2 = [[True, False, True], [False, False, True]]
nmxs = numpy.ma.array(xs, mask=m1)
nmys = numpy.ma.array(ys, mask=m2)
nmzs = numpy.ma.array(zs, mask=m1)
# Big arrays
xl = numpy.random.uniform(-1, 1, 100*100).reshape(100, 100)
yl = numpy.random.uniform(-1, 1, 100*100).reshape(100, 100)
zl = xl + 1j * yl
maskx = xl > 0.8
masky = yl < -0.8
nmxl = numpy.ma.array(xl, mask=maskx)
nmyl = numpy.ma.array(yl, mask=masky)
nmzl = numpy.ma.array(zl, mask=maskx)
###############################################################################
# Functions #
###############################################################################
def timer(s, v='', nloop=500, nrep=3):
units = ["s", "ms", "µs", "ns"]
scaling = [1, 1e3, 1e6, 1e9]
print("%s : %-50s : " % (v, s), end=' ')
varnames = ["%ss,nm%ss,%sl,nm%sl" % tuple(x*4) for x in 'xyz']
setup = 'from __main__ import numpy, ma, %s' % ','.join(varnames)
Timer = timeit.Timer(stmt=s, setup=setup)
best = min(Timer.repeat(nrep, nloop)) / nloop
if best > 0.0:
order = min(-int(numpy.floor(numpy.log10(best)) // 3), 3)
else:
order = 3
print("%d loops, best of %d: %.*g %s per loop" % (nloop, nrep,
3,
best * scaling[order],
units[order]))
def compare_functions_1v(func, nloop=500,
xs=xs, nmxs=nmxs, xl=xl, nmxl=nmxl):
funcname = func.__name__
print("-"*50)
print(f'{funcname} on small arrays')
module, data = "numpy.ma", "nmxs"
timer("%(module)s.%(funcname)s(%(data)s)" % locals(), v="%11s" % module, nloop=nloop)
print("%s on large arrays" % funcname)
module, data = "numpy.ma", "nmxl"
timer("%(module)s.%(funcname)s(%(data)s)" % locals(), v="%11s" % module, nloop=nloop)
return
def compare_methods(methodname, args, vars='x', nloop=500, test=True,
xs=xs, nmxs=nmxs, xl=xl, nmxl=nmxl):
print("-"*50)
print(f'{methodname} on small arrays')
data, ver = f'nm{vars}l', 'numpy.ma'
timer("%(data)s.%(methodname)s(%(args)s)" % locals(), v=ver, nloop=nloop)
print("%s on large arrays" % methodname)
data, ver = "nm%sl" % vars, 'numpy.ma'
timer("%(data)s.%(methodname)s(%(args)s)" % locals(), v=ver, nloop=nloop)
return
def compare_functions_2v(func, nloop=500, test=True,
xs=xs, nmxs=nmxs,
ys=ys, nmys=nmys,
xl=xl, nmxl=nmxl,
yl=yl, nmyl=nmyl):
funcname = func.__name__
print("-"*50)
print(f'{funcname} on small arrays')
module, data = "numpy.ma", "nmxs,nmys"
timer("%(module)s.%(funcname)s(%(data)s)" % locals(), v="%11s" % module, nloop=nloop)
print(f'{funcname} on large arrays')
module, data = "numpy.ma", "nmxl,nmyl"
timer("%(module)s.%(funcname)s(%(data)s)" % locals(), v="%11s" % module, nloop=nloop)
return
if __name__ == '__main__':
compare_functions_1v(numpy.sin)
compare_functions_1v(numpy.log)
compare_functions_1v(numpy.sqrt)
compare_functions_2v(numpy.multiply)
compare_functions_2v(numpy.divide)
compare_functions_2v(numpy.power)
compare_methods('ravel', '', nloop=1000)
compare_methods('conjugate', '', 'z', nloop=1000)
compare_methods('transpose', '', nloop=1000)
compare_methods('compressed', '', nloop=1000)
compare_methods('__getitem__', '0', nloop=1000)
compare_methods('__getitem__', '(0,0)', nloop=1000)
compare_methods('__getitem__', '[0,-1]', nloop=1000)
compare_methods('__setitem__', '0, 17', nloop=1000, test=False)
compare_methods('__setitem__', '(0,0), 17', nloop=1000, test=False)
print("-"*50)
print("__setitem__ on small arrays")
timer('nmxs.__setitem__((-1,0),numpy.ma.masked)', 'numpy.ma ', nloop=10000)
print("-"*50)
print("__setitem__ on large arrays")
timer('nmxl.__setitem__((-1,0),numpy.ma.masked)', 'numpy.ma ', nloop=10000)
print("-"*50)
print("where on small arrays")
timer('numpy.ma.where(nmxs>2,nmxs,nmys)', 'numpy.ma ', nloop=1000)
print("-"*50)
print("where on large arrays")
timer('numpy.ma.where(nmxl>2,nmxl,nmyl)', 'numpy.ma ', nloop=100)
| 4,858 | Python | 36.091603 | 89 | 0.511733 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/ma/core.py | """
numpy.ma : a package to handle missing or invalid values.
This package was initially written for numarray by Paul F. Dubois
at Lawrence Livermore National Laboratory.
In 2006, the package was completely rewritten by Pierre Gerard-Marchant
(University of Georgia) to make the MaskedArray class a subclass of ndarray,
and to improve support of structured arrays.
Copyright 1999, 2000, 2001 Regents of the University of California.
Released for unlimited redistribution.
* Adapted for numpy_core 2005 by Travis Oliphant and (mainly) Paul Dubois.
* Subclassing of the base `ndarray` 2006 by Pierre Gerard-Marchant
(pgmdevlist_AT_gmail_DOT_com)
* Improvements suggested by Reggie Dugard (reggie_AT_merfinllc_DOT_com)
.. moduleauthor:: Pierre Gerard-Marchant
"""
# pylint: disable-msg=E1002
import builtins
import inspect
import operator
import warnings
import textwrap
import re
from functools import reduce
import numpy as np
import numpy.core.umath as umath
import numpy.core.numerictypes as ntypes
from numpy import ndarray, amax, amin, iscomplexobj, bool_, _NoValue
from numpy import array as narray
from numpy.lib.function_base import angle
from numpy.compat import (
getargspec, formatargspec, long, unicode, bytes
)
from numpy import expand_dims
from numpy.core.numeric import normalize_axis_tuple
__all__ = [
'MAError', 'MaskError', 'MaskType', 'MaskedArray', 'abs', 'absolute',
'add', 'all', 'allclose', 'allequal', 'alltrue', 'amax', 'amin',
'angle', 'anom', 'anomalies', 'any', 'append', 'arange', 'arccos',
'arccosh', 'arcsin', 'arcsinh', 'arctan', 'arctan2', 'arctanh',
'argmax', 'argmin', 'argsort', 'around', 'array', 'asanyarray',
'asarray', 'bitwise_and', 'bitwise_or', 'bitwise_xor', 'bool_', 'ceil',
'choose', 'clip', 'common_fill_value', 'compress', 'compressed',
'concatenate', 'conjugate', 'convolve', 'copy', 'correlate', 'cos', 'cosh',
'count', 'cumprod', 'cumsum', 'default_fill_value', 'diag', 'diagonal',
'diff', 'divide', 'empty', 'empty_like', 'equal', 'exp',
'expand_dims', 'fabs', 'filled', 'fix_invalid', 'flatten_mask',
'flatten_structured_array', 'floor', 'floor_divide', 'fmod',
'frombuffer', 'fromflex', 'fromfunction', 'getdata', 'getmask',
'getmaskarray', 'greater', 'greater_equal', 'harden_mask', 'hypot',
'identity', 'ids', 'indices', 'inner', 'innerproduct', 'isMA',
'isMaskedArray', 'is_mask', 'is_masked', 'isarray', 'left_shift',
'less', 'less_equal', 'log', 'log10', 'log2',
'logical_and', 'logical_not', 'logical_or', 'logical_xor', 'make_mask',
'make_mask_descr', 'make_mask_none', 'mask_or', 'masked',
'masked_array', 'masked_equal', 'masked_greater',
'masked_greater_equal', 'masked_inside', 'masked_invalid',
'masked_less', 'masked_less_equal', 'masked_not_equal',
'masked_object', 'masked_outside', 'masked_print_option',
'masked_singleton', 'masked_values', 'masked_where', 'max', 'maximum',
'maximum_fill_value', 'mean', 'min', 'minimum', 'minimum_fill_value',
'mod', 'multiply', 'mvoid', 'ndim', 'negative', 'nomask', 'nonzero',
'not_equal', 'ones', 'ones_like', 'outer', 'outerproduct', 'power', 'prod',
'product', 'ptp', 'put', 'putmask', 'ravel', 'remainder',
'repeat', 'reshape', 'resize', 'right_shift', 'round', 'round_',
'set_fill_value', 'shape', 'sin', 'sinh', 'size', 'soften_mask',
'sometrue', 'sort', 'sqrt', 'squeeze', 'std', 'subtract', 'sum',
'swapaxes', 'take', 'tan', 'tanh', 'trace', 'transpose', 'true_divide',
'var', 'where', 'zeros', 'zeros_like',
]
MaskType = np.bool_
nomask = MaskType(0)
class MaskedArrayFutureWarning(FutureWarning):
pass
def _deprecate_argsort_axis(arr):
"""
Adjust the axis passed to argsort, warning if necessary
Parameters
----------
arr
The array which argsort was called on
np.ma.argsort has a long-term bug where the default of the axis argument
is wrong (gh-8701), which now must be kept for backwards compatibility.
Thankfully, this only makes a difference when arrays are 2- or more-
dimensional, so we only need a warning then.
"""
if arr.ndim <= 1:
# no warning needed - but switch to -1 anyway, to avoid surprising
# subclasses, which are more likely to implement scalar axes.
return -1
else:
# 2017-04-11, Numpy 1.13.0, gh-8701: warn on axis default
warnings.warn(
"In the future the default for argsort will be axis=-1, not the "
"current None, to match its documentation and np.argsort. "
"Explicitly pass -1 or None to silence this warning.",
MaskedArrayFutureWarning, stacklevel=3)
return None
def doc_note(initialdoc, note):
"""
Adds a Notes section to an existing docstring.
"""
if initialdoc is None:
return
if note is None:
return initialdoc
notesplit = re.split(r'\n\s*?Notes\n\s*?-----', inspect.cleandoc(initialdoc))
notedoc = "\n\nNotes\n-----\n%s\n" % inspect.cleandoc(note)
return ''.join(notesplit[:1] + [notedoc] + notesplit[1:])
def get_object_signature(obj):
"""
Get the signature from obj
"""
try:
sig = formatargspec(*getargspec(obj))
except TypeError:
sig = ''
return sig
###############################################################################
# Exceptions #
###############################################################################
class MAError(Exception):
"""
Class for masked array related errors.
"""
pass
class MaskError(MAError):
"""
Class for mask related errors.
"""
pass
###############################################################################
# Filling options #
###############################################################################
# b: boolean - c: complex - f: floats - i: integer - O: object - S: string
default_filler = {'b': True,
'c': 1.e20 + 0.0j,
'f': 1.e20,
'i': 999999,
'O': '?',
'S': b'N/A',
'u': 999999,
'V': b'???',
'U': u'N/A'
}
# Add datetime64 and timedelta64 types
for v in ["Y", "M", "W", "D", "h", "m", "s", "ms", "us", "ns", "ps",
"fs", "as"]:
default_filler["M8[" + v + "]"] = np.datetime64("NaT", v)
default_filler["m8[" + v + "]"] = np.timedelta64("NaT", v)
float_types_list = [np.half, np.single, np.double, np.longdouble,
np.csingle, np.cdouble, np.clongdouble]
max_filler = ntypes._minvals
max_filler.update([(k, -np.inf) for k in float_types_list[:4]])
max_filler.update([(k, complex(-np.inf, -np.inf)) for k in float_types_list[-3:]])
min_filler = ntypes._maxvals
min_filler.update([(k, +np.inf) for k in float_types_list[:4]])
min_filler.update([(k, complex(+np.inf, +np.inf)) for k in float_types_list[-3:]])
del float_types_list
def _recursive_fill_value(dtype, f):
"""
Recursively produce a fill value for `dtype`, calling f on scalar dtypes
"""
if dtype.names is not None:
vals = tuple(_recursive_fill_value(dtype[name], f) for name in dtype.names)
return np.array(vals, dtype=dtype)[()] # decay to void scalar from 0d
elif dtype.subdtype:
subtype, shape = dtype.subdtype
subval = _recursive_fill_value(subtype, f)
return np.full(shape, subval)
else:
return f(dtype)
def _get_dtype_of(obj):
""" Convert the argument for *_fill_value into a dtype """
if isinstance(obj, np.dtype):
return obj
elif hasattr(obj, 'dtype'):
return obj.dtype
else:
return np.asanyarray(obj).dtype
def default_fill_value(obj):
"""
Return the default fill value for the argument object.
The default filling value depends on the datatype of the input
array or the type of the input scalar:
======== ========
datatype default
======== ========
bool True
int 999999
float 1.e20
complex 1.e20+0j
object '?'
string 'N/A'
======== ========
For structured types, a structured scalar is returned, with each field the
default fill value for its type.
For subarray types, the fill value is an array of the same size containing
the default scalar fill value.
Parameters
----------
obj : ndarray, dtype or scalar
The array data-type or scalar for which the default fill value
is returned.
Returns
-------
fill_value : scalar
The default fill value.
Examples
--------
>>> np.ma.default_fill_value(1)
999999
>>> np.ma.default_fill_value(np.array([1.1, 2., np.pi]))
1e+20
>>> np.ma.default_fill_value(np.dtype(complex))
(1e+20+0j)
"""
def _scalar_fill_value(dtype):
if dtype.kind in 'Mm':
return default_filler.get(dtype.str[1:], '?')
else:
return default_filler.get(dtype.kind, '?')
dtype = _get_dtype_of(obj)
return _recursive_fill_value(dtype, _scalar_fill_value)
def _extremum_fill_value(obj, extremum, extremum_name):
def _scalar_fill_value(dtype):
try:
return extremum[dtype]
except KeyError as e:
raise TypeError(
f"Unsuitable type {dtype} for calculating {extremum_name}."
) from None
dtype = _get_dtype_of(obj)
return _recursive_fill_value(dtype, _scalar_fill_value)
def minimum_fill_value(obj):
"""
Return the maximum value that can be represented by the dtype of an object.
This function is useful for calculating a fill value suitable for
taking the minimum of an array with a given dtype.
Parameters
----------
obj : ndarray, dtype or scalar
An object that can be queried for it's numeric type.
Returns
-------
val : scalar
The maximum representable value.
Raises
------
TypeError
If `obj` isn't a suitable numeric type.
See Also
--------
maximum_fill_value : The inverse function.
set_fill_value : Set the filling value of a masked array.
MaskedArray.fill_value : Return current fill value.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.int8()
>>> ma.minimum_fill_value(a)
127
>>> a = np.int32()
>>> ma.minimum_fill_value(a)
2147483647
An array of numeric data can also be passed.
>>> a = np.array([1, 2, 3], dtype=np.int8)
>>> ma.minimum_fill_value(a)
127
>>> a = np.array([1, 2, 3], dtype=np.float32)
>>> ma.minimum_fill_value(a)
inf
"""
return _extremum_fill_value(obj, min_filler, "minimum")
def maximum_fill_value(obj):
"""
Return the minimum value that can be represented by the dtype of an object.
This function is useful for calculating a fill value suitable for
taking the maximum of an array with a given dtype.
Parameters
----------
obj : ndarray, dtype or scalar
An object that can be queried for it's numeric type.
Returns
-------
val : scalar
The minimum representable value.
Raises
------
TypeError
If `obj` isn't a suitable numeric type.
See Also
--------
minimum_fill_value : The inverse function.
set_fill_value : Set the filling value of a masked array.
MaskedArray.fill_value : Return current fill value.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.int8()
>>> ma.maximum_fill_value(a)
-128
>>> a = np.int32()
>>> ma.maximum_fill_value(a)
-2147483648
An array of numeric data can also be passed.
>>> a = np.array([1, 2, 3], dtype=np.int8)
>>> ma.maximum_fill_value(a)
-128
>>> a = np.array([1, 2, 3], dtype=np.float32)
>>> ma.maximum_fill_value(a)
-inf
"""
return _extremum_fill_value(obj, max_filler, "maximum")
def _recursive_set_fill_value(fillvalue, dt):
"""
Create a fill value for a structured dtype.
Parameters
----------
fillvalue : scalar or array_like
Scalar or array representing the fill value. If it is of shorter
length than the number of fields in dt, it will be resized.
dt : dtype
The structured dtype for which to create the fill value.
Returns
-------
val : tuple
A tuple of values corresponding to the structured fill value.
"""
fillvalue = np.resize(fillvalue, len(dt.names))
output_value = []
for (fval, name) in zip(fillvalue, dt.names):
cdtype = dt[name]
if cdtype.subdtype:
cdtype = cdtype.subdtype[0]
if cdtype.names is not None:
output_value.append(tuple(_recursive_set_fill_value(fval, cdtype)))
else:
output_value.append(np.array(fval, dtype=cdtype).item())
return tuple(output_value)
def _check_fill_value(fill_value, ndtype):
"""
Private function validating the given `fill_value` for the given dtype.
If fill_value is None, it is set to the default corresponding to the dtype.
If fill_value is not None, its value is forced to the given dtype.
The result is always a 0d array.
"""
ndtype = np.dtype(ndtype)
if fill_value is None:
fill_value = default_fill_value(ndtype)
elif ndtype.names is not None:
if isinstance(fill_value, (ndarray, np.void)):
try:
fill_value = np.array(fill_value, copy=False, dtype=ndtype)
except ValueError as e:
err_msg = "Unable to transform %s to dtype %s"
raise ValueError(err_msg % (fill_value, ndtype)) from e
else:
fill_value = np.asarray(fill_value, dtype=object)
fill_value = np.array(_recursive_set_fill_value(fill_value, ndtype),
dtype=ndtype)
else:
if isinstance(fill_value, str) and (ndtype.char not in 'OSVU'):
# Note this check doesn't work if fill_value is not a scalar
err_msg = "Cannot set fill value of string with array of dtype %s"
raise TypeError(err_msg % ndtype)
else:
# In case we want to convert 1e20 to int.
# Also in case of converting string arrays.
try:
fill_value = np.array(fill_value, copy=False, dtype=ndtype)
except (OverflowError, ValueError) as e:
# Raise TypeError instead of OverflowError or ValueError.
# OverflowError is seldom used, and the real problem here is
# that the passed fill_value is not compatible with the ndtype.
err_msg = "Cannot convert fill_value %s to dtype %s"
raise TypeError(err_msg % (fill_value, ndtype)) from e
return np.array(fill_value)
def set_fill_value(a, fill_value):
"""
Set the filling value of a, if a is a masked array.
This function changes the fill value of the masked array `a` in place.
If `a` is not a masked array, the function returns silently, without
doing anything.
Parameters
----------
a : array_like
Input array.
fill_value : dtype
Filling value. A consistency test is performed to make sure
the value is compatible with the dtype of `a`.
Returns
-------
None
Nothing returned by this function.
See Also
--------
maximum_fill_value : Return the default fill value for a dtype.
MaskedArray.fill_value : Return current fill value.
MaskedArray.set_fill_value : Equivalent method.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.arange(5)
>>> a
array([0, 1, 2, 3, 4])
>>> a = ma.masked_where(a < 3, a)
>>> a
masked_array(data=[--, --, --, 3, 4],
mask=[ True, True, True, False, False],
fill_value=999999)
>>> ma.set_fill_value(a, -999)
>>> a
masked_array(data=[--, --, --, 3, 4],
mask=[ True, True, True, False, False],
fill_value=-999)
Nothing happens if `a` is not a masked array.
>>> a = list(range(5))
>>> a
[0, 1, 2, 3, 4]
>>> ma.set_fill_value(a, 100)
>>> a
[0, 1, 2, 3, 4]
>>> a = np.arange(5)
>>> a
array([0, 1, 2, 3, 4])
>>> ma.set_fill_value(a, 100)
>>> a
array([0, 1, 2, 3, 4])
"""
if isinstance(a, MaskedArray):
a.set_fill_value(fill_value)
return
def get_fill_value(a):
"""
Return the filling value of a, if any. Otherwise, returns the
default filling value for that type.
"""
if isinstance(a, MaskedArray):
result = a.fill_value
else:
result = default_fill_value(a)
return result
def common_fill_value(a, b):
"""
Return the common filling value of two masked arrays, if any.
If ``a.fill_value == b.fill_value``, return the fill value,
otherwise return None.
Parameters
----------
a, b : MaskedArray
The masked arrays for which to compare fill values.
Returns
-------
fill_value : scalar or None
The common fill value, or None.
Examples
--------
>>> x = np.ma.array([0, 1.], fill_value=3)
>>> y = np.ma.array([0, 1.], fill_value=3)
>>> np.ma.common_fill_value(x, y)
3.0
"""
t1 = get_fill_value(a)
t2 = get_fill_value(b)
if t1 == t2:
return t1
return None
def filled(a, fill_value=None):
"""
Return input as an array with masked data replaced by a fill value.
If `a` is not a `MaskedArray`, `a` itself is returned.
If `a` is a `MaskedArray` and `fill_value` is None, `fill_value` is set to
``a.fill_value``.
Parameters
----------
a : MaskedArray or array_like
An input object.
fill_value : array_like, optional.
Can be scalar or non-scalar. If non-scalar, the
resulting filled array should be broadcastable
over input array. Default is None.
Returns
-------
a : ndarray
The filled array.
See Also
--------
compressed
Examples
--------
>>> x = np.ma.array(np.arange(9).reshape(3, 3), mask=[[1, 0, 0],
... [1, 0, 0],
... [0, 0, 0]])
>>> x.filled()
array([[999999, 1, 2],
[999999, 4, 5],
[ 6, 7, 8]])
>>> x.filled(fill_value=333)
array([[333, 1, 2],
[333, 4, 5],
[ 6, 7, 8]])
>>> x.filled(fill_value=np.arange(3))
array([[0, 1, 2],
[0, 4, 5],
[6, 7, 8]])
"""
if hasattr(a, 'filled'):
return a.filled(fill_value)
elif isinstance(a, ndarray):
# Should we check for contiguity ? and a.flags['CONTIGUOUS']:
return a
elif isinstance(a, dict):
return np.array(a, 'O')
else:
return np.array(a)
def get_masked_subclass(*arrays):
"""
Return the youngest subclass of MaskedArray from a list of (masked) arrays.
In case of siblings, the first listed takes over.
"""
if len(arrays) == 1:
arr = arrays[0]
if isinstance(arr, MaskedArray):
rcls = type(arr)
else:
rcls = MaskedArray
else:
arrcls = [type(a) for a in arrays]
rcls = arrcls[0]
if not issubclass(rcls, MaskedArray):
rcls = MaskedArray
for cls in arrcls[1:]:
if issubclass(cls, rcls):
rcls = cls
# Don't return MaskedConstant as result: revert to MaskedArray
if rcls.__name__ == 'MaskedConstant':
return MaskedArray
return rcls
def getdata(a, subok=True):
"""
Return the data of a masked array as an ndarray.
Return the data of `a` (if any) as an ndarray if `a` is a ``MaskedArray``,
else return `a` as a ndarray or subclass (depending on `subok`) if not.
Parameters
----------
a : array_like
Input ``MaskedArray``, alternatively a ndarray or a subclass thereof.
subok : bool
Whether to force the output to be a `pure` ndarray (False) or to
return a subclass of ndarray if appropriate (True, default).
See Also
--------
getmask : Return the mask of a masked array, or nomask.
getmaskarray : Return the mask of a masked array, or full array of False.
Examples
--------
>>> import numpy.ma as ma
>>> a = ma.masked_equal([[1,2],[3,4]], 2)
>>> a
masked_array(
data=[[1, --],
[3, 4]],
mask=[[False, True],
[False, False]],
fill_value=2)
>>> ma.getdata(a)
array([[1, 2],
[3, 4]])
Equivalently use the ``MaskedArray`` `data` attribute.
>>> a.data
array([[1, 2],
[3, 4]])
"""
try:
data = a._data
except AttributeError:
data = np.array(a, copy=False, subok=subok)
if not subok:
return data.view(ndarray)
return data
get_data = getdata
def fix_invalid(a, mask=nomask, copy=True, fill_value=None):
"""
Return input with invalid data masked and replaced by a fill value.
Invalid data means values of `nan`, `inf`, etc.
Parameters
----------
a : array_like
Input array, a (subclass of) ndarray.
mask : sequence, optional
Mask. Must be convertible to an array of booleans with the same
shape as `data`. True indicates a masked (i.e. invalid) data.
copy : bool, optional
Whether to use a copy of `a` (True) or to fix `a` in place (False).
Default is True.
fill_value : scalar, optional
Value used for fixing invalid data. Default is None, in which case
the ``a.fill_value`` is used.
Returns
-------
b : MaskedArray
The input array with invalid entries fixed.
Notes
-----
A copy is performed by default.
Examples
--------
>>> x = np.ma.array([1., -1, np.nan, np.inf], mask=[1] + [0]*3)
>>> x
masked_array(data=[--, -1.0, nan, inf],
mask=[ True, False, False, False],
fill_value=1e+20)
>>> np.ma.fix_invalid(x)
masked_array(data=[--, -1.0, --, --],
mask=[ True, False, True, True],
fill_value=1e+20)
>>> fixed = np.ma.fix_invalid(x)
>>> fixed.data
array([ 1.e+00, -1.e+00, 1.e+20, 1.e+20])
>>> x.data
array([ 1., -1., nan, inf])
"""
a = masked_array(a, copy=copy, mask=mask, subok=True)
invalid = np.logical_not(np.isfinite(a._data))
if not invalid.any():
return a
a._mask |= invalid
if fill_value is None:
fill_value = a.fill_value
a._data[invalid] = fill_value
return a
def is_string_or_list_of_strings(val):
return (isinstance(val, str) or
(isinstance(val, list) and val and
builtins.all(isinstance(s, str) for s in val)))
###############################################################################
# Ufuncs #
###############################################################################
ufunc_domain = {}
ufunc_fills = {}
class _DomainCheckInterval:
"""
Define a valid interval, so that :
``domain_check_interval(a,b)(x) == True`` where
``x < a`` or ``x > b``.
"""
def __init__(self, a, b):
"domain_check_interval(a,b)(x) = true where x < a or y > b"
if a > b:
(a, b) = (b, a)
self.a = a
self.b = b
def __call__(self, x):
"Execute the call behavior."
# nans at masked positions cause RuntimeWarnings, even though
# they are masked. To avoid this we suppress warnings.
with np.errstate(invalid='ignore'):
return umath.logical_or(umath.greater(x, self.b),
umath.less(x, self.a))
class _DomainTan:
"""
Define a valid interval for the `tan` function, so that:
``domain_tan(eps) = True`` where ``abs(cos(x)) < eps``
"""
def __init__(self, eps):
"domain_tan(eps) = true where abs(cos(x)) < eps)"
self.eps = eps
def __call__(self, x):
"Executes the call behavior."
with np.errstate(invalid='ignore'):
return umath.less(umath.absolute(umath.cos(x)), self.eps)
class _DomainSafeDivide:
"""
Define a domain for safe division.
"""
def __init__(self, tolerance=None):
self.tolerance = tolerance
def __call__(self, a, b):
# Delay the selection of the tolerance to here in order to reduce numpy
# import times. The calculation of these parameters is a substantial
# component of numpy's import time.
if self.tolerance is None:
self.tolerance = np.finfo(float).tiny
# don't call ma ufuncs from __array_wrap__ which would fail for scalars
a, b = np.asarray(a), np.asarray(b)
with np.errstate(invalid='ignore'):
return umath.absolute(a) * self.tolerance >= umath.absolute(b)
class _DomainGreater:
"""
DomainGreater(v)(x) is True where x <= v.
"""
def __init__(self, critical_value):
"DomainGreater(v)(x) = true where x <= v"
self.critical_value = critical_value
def __call__(self, x):
"Executes the call behavior."
with np.errstate(invalid='ignore'):
return umath.less_equal(x, self.critical_value)
class _DomainGreaterEqual:
"""
DomainGreaterEqual(v)(x) is True where x < v.
"""
def __init__(self, critical_value):
"DomainGreaterEqual(v)(x) = true where x < v"
self.critical_value = critical_value
def __call__(self, x):
"Executes the call behavior."
with np.errstate(invalid='ignore'):
return umath.less(x, self.critical_value)
class _MaskedUFunc:
def __init__(self, ufunc):
self.f = ufunc
self.__doc__ = ufunc.__doc__
self.__name__ = ufunc.__name__
def __str__(self):
return f"Masked version of {self.f}"
class _MaskedUnaryOperation(_MaskedUFunc):
"""
Defines masked version of unary operations, where invalid values are
pre-masked.
Parameters
----------
mufunc : callable
The function for which to define a masked version. Made available
as ``_MaskedUnaryOperation.f``.
fill : scalar, optional
Filling value, default is 0.
domain : class instance
Domain for the function. Should be one of the ``_Domain*``
classes. Default is None.
"""
def __init__(self, mufunc, fill=0, domain=None):
super().__init__(mufunc)
self.fill = fill
self.domain = domain
ufunc_domain[mufunc] = domain
ufunc_fills[mufunc] = fill
def __call__(self, a, *args, **kwargs):
"""
Execute the call behavior.
"""
d = getdata(a)
# Deal with domain
if self.domain is not None:
# Case 1.1. : Domained function
# nans at masked positions cause RuntimeWarnings, even though
# they are masked. To avoid this we suppress warnings.
with np.errstate(divide='ignore', invalid='ignore'):
result = self.f(d, *args, **kwargs)
# Make a mask
m = ~umath.isfinite(result)
m |= self.domain(d)
m |= getmask(a)
else:
# Case 1.2. : Function without a domain
# Get the result and the mask
with np.errstate(divide='ignore', invalid='ignore'):
result = self.f(d, *args, **kwargs)
m = getmask(a)
if not result.ndim:
# Case 2.1. : The result is scalarscalar
if m:
return masked
return result
if m is not nomask:
# Case 2.2. The result is an array
# We need to fill the invalid data back w/ the input Now,
# that's plain silly: in C, we would just skip the element and
# keep the original, but we do have to do it that way in Python
# In case result has a lower dtype than the inputs (as in
# equal)
try:
np.copyto(result, d, where=m)
except TypeError:
pass
# Transform to
masked_result = result.view(get_masked_subclass(a))
masked_result._mask = m
masked_result._update_from(a)
return masked_result
class _MaskedBinaryOperation(_MaskedUFunc):
"""
Define masked version of binary operations, where invalid
values are pre-masked.
Parameters
----------
mbfunc : function
The function for which to define a masked version. Made available
as ``_MaskedBinaryOperation.f``.
domain : class instance
Default domain for the function. Should be one of the ``_Domain*``
classes. Default is None.
fillx : scalar, optional
Filling value for the first argument, default is 0.
filly : scalar, optional
Filling value for the second argument, default is 0.
"""
def __init__(self, mbfunc, fillx=0, filly=0):
"""
abfunc(fillx, filly) must be defined.
abfunc(x, filly) = x for all x to enable reduce.
"""
super().__init__(mbfunc)
self.fillx = fillx
self.filly = filly
ufunc_domain[mbfunc] = None
ufunc_fills[mbfunc] = (fillx, filly)
def __call__(self, a, b, *args, **kwargs):
"""
Execute the call behavior.
"""
# Get the data, as ndarray
(da, db) = (getdata(a), getdata(b))
# Get the result
with np.errstate():
np.seterr(divide='ignore', invalid='ignore')
result = self.f(da, db, *args, **kwargs)
# Get the mask for the result
(ma, mb) = (getmask(a), getmask(b))
if ma is nomask:
if mb is nomask:
m = nomask
else:
m = umath.logical_or(getmaskarray(a), mb)
elif mb is nomask:
m = umath.logical_or(ma, getmaskarray(b))
else:
m = umath.logical_or(ma, mb)
# Case 1. : scalar
if not result.ndim:
if m:
return masked
return result
# Case 2. : array
# Revert result to da where masked
if m is not nomask and m.any():
# any errors, just abort; impossible to guarantee masked values
try:
np.copyto(result, da, casting='unsafe', where=m)
except Exception:
pass
# Transforms to a (subclass of) MaskedArray
masked_result = result.view(get_masked_subclass(a, b))
masked_result._mask = m
if isinstance(a, MaskedArray):
masked_result._update_from(a)
elif isinstance(b, MaskedArray):
masked_result._update_from(b)
return masked_result
def reduce(self, target, axis=0, dtype=None):
"""
Reduce `target` along the given `axis`.
"""
tclass = get_masked_subclass(target)
m = getmask(target)
t = filled(target, self.filly)
if t.shape == ():
t = t.reshape(1)
if m is not nomask:
m = make_mask(m, copy=True)
m.shape = (1,)
if m is nomask:
tr = self.f.reduce(t, axis)
mr = nomask
else:
tr = self.f.reduce(t, axis, dtype=dtype)
mr = umath.logical_and.reduce(m, axis)
if not tr.shape:
if mr:
return masked
else:
return tr
masked_tr = tr.view(tclass)
masked_tr._mask = mr
return masked_tr
def outer(self, a, b):
"""
Return the function applied to the outer product of a and b.
"""
(da, db) = (getdata(a), getdata(b))
d = self.f.outer(da, db)
ma = getmask(a)
mb = getmask(b)
if ma is nomask and mb is nomask:
m = nomask
else:
ma = getmaskarray(a)
mb = getmaskarray(b)
m = umath.logical_or.outer(ma, mb)
if (not m.ndim) and m:
return masked
if m is not nomask:
np.copyto(d, da, where=m)
if not d.shape:
return d
masked_d = d.view(get_masked_subclass(a, b))
masked_d._mask = m
return masked_d
def accumulate(self, target, axis=0):
"""Accumulate `target` along `axis` after filling with y fill
value.
"""
tclass = get_masked_subclass(target)
t = filled(target, self.filly)
result = self.f.accumulate(t, axis)
masked_result = result.view(tclass)
return masked_result
class _DomainedBinaryOperation(_MaskedUFunc):
"""
Define binary operations that have a domain, like divide.
They have no reduce, outer or accumulate.
Parameters
----------
mbfunc : function
The function for which to define a masked version. Made available
as ``_DomainedBinaryOperation.f``.
domain : class instance
Default domain for the function. Should be one of the ``_Domain*``
classes.
fillx : scalar, optional
Filling value for the first argument, default is 0.
filly : scalar, optional
Filling value for the second argument, default is 0.
"""
def __init__(self, dbfunc, domain, fillx=0, filly=0):
"""abfunc(fillx, filly) must be defined.
abfunc(x, filly) = x for all x to enable reduce.
"""
super().__init__(dbfunc)
self.domain = domain
self.fillx = fillx
self.filly = filly
ufunc_domain[dbfunc] = domain
ufunc_fills[dbfunc] = (fillx, filly)
def __call__(self, a, b, *args, **kwargs):
"Execute the call behavior."
# Get the data
(da, db) = (getdata(a), getdata(b))
# Get the result
with np.errstate(divide='ignore', invalid='ignore'):
result = self.f(da, db, *args, **kwargs)
# Get the mask as a combination of the source masks and invalid
m = ~umath.isfinite(result)
m |= getmask(a)
m |= getmask(b)
# Apply the domain
domain = ufunc_domain.get(self.f, None)
if domain is not None:
m |= domain(da, db)
# Take care of the scalar case first
if not m.ndim:
if m:
return masked
else:
return result
# When the mask is True, put back da if possible
# any errors, just abort; impossible to guarantee masked values
try:
np.copyto(result, 0, casting='unsafe', where=m)
# avoid using "*" since this may be overlaid
masked_da = umath.multiply(m, da)
# only add back if it can be cast safely
if np.can_cast(masked_da.dtype, result.dtype, casting='safe'):
result += masked_da
except Exception:
pass
# Transforms to a (subclass of) MaskedArray
masked_result = result.view(get_masked_subclass(a, b))
masked_result._mask = m
if isinstance(a, MaskedArray):
masked_result._update_from(a)
elif isinstance(b, MaskedArray):
masked_result._update_from(b)
return masked_result
# Unary ufuncs
exp = _MaskedUnaryOperation(umath.exp)
conjugate = _MaskedUnaryOperation(umath.conjugate)
sin = _MaskedUnaryOperation(umath.sin)
cos = _MaskedUnaryOperation(umath.cos)
arctan = _MaskedUnaryOperation(umath.arctan)
arcsinh = _MaskedUnaryOperation(umath.arcsinh)
sinh = _MaskedUnaryOperation(umath.sinh)
cosh = _MaskedUnaryOperation(umath.cosh)
tanh = _MaskedUnaryOperation(umath.tanh)
abs = absolute = _MaskedUnaryOperation(umath.absolute)
angle = _MaskedUnaryOperation(angle) # from numpy.lib.function_base
fabs = _MaskedUnaryOperation(umath.fabs)
negative = _MaskedUnaryOperation(umath.negative)
floor = _MaskedUnaryOperation(umath.floor)
ceil = _MaskedUnaryOperation(umath.ceil)
around = _MaskedUnaryOperation(np.round_)
logical_not = _MaskedUnaryOperation(umath.logical_not)
# Domained unary ufuncs
sqrt = _MaskedUnaryOperation(umath.sqrt, 0.0,
_DomainGreaterEqual(0.0))
log = _MaskedUnaryOperation(umath.log, 1.0,
_DomainGreater(0.0))
log2 = _MaskedUnaryOperation(umath.log2, 1.0,
_DomainGreater(0.0))
log10 = _MaskedUnaryOperation(umath.log10, 1.0,
_DomainGreater(0.0))
tan = _MaskedUnaryOperation(umath.tan, 0.0,
_DomainTan(1e-35))
arcsin = _MaskedUnaryOperation(umath.arcsin, 0.0,
_DomainCheckInterval(-1.0, 1.0))
arccos = _MaskedUnaryOperation(umath.arccos, 0.0,
_DomainCheckInterval(-1.0, 1.0))
arccosh = _MaskedUnaryOperation(umath.arccosh, 1.0,
_DomainGreaterEqual(1.0))
arctanh = _MaskedUnaryOperation(umath.arctanh, 0.0,
_DomainCheckInterval(-1.0 + 1e-15, 1.0 - 1e-15))
# Binary ufuncs
add = _MaskedBinaryOperation(umath.add)
subtract = _MaskedBinaryOperation(umath.subtract)
multiply = _MaskedBinaryOperation(umath.multiply, 1, 1)
arctan2 = _MaskedBinaryOperation(umath.arctan2, 0.0, 1.0)
equal = _MaskedBinaryOperation(umath.equal)
equal.reduce = None
not_equal = _MaskedBinaryOperation(umath.not_equal)
not_equal.reduce = None
less_equal = _MaskedBinaryOperation(umath.less_equal)
less_equal.reduce = None
greater_equal = _MaskedBinaryOperation(umath.greater_equal)
greater_equal.reduce = None
less = _MaskedBinaryOperation(umath.less)
less.reduce = None
greater = _MaskedBinaryOperation(umath.greater)
greater.reduce = None
logical_and = _MaskedBinaryOperation(umath.logical_and)
alltrue = _MaskedBinaryOperation(umath.logical_and, 1, 1).reduce
logical_or = _MaskedBinaryOperation(umath.logical_or)
sometrue = logical_or.reduce
logical_xor = _MaskedBinaryOperation(umath.logical_xor)
bitwise_and = _MaskedBinaryOperation(umath.bitwise_and)
bitwise_or = _MaskedBinaryOperation(umath.bitwise_or)
bitwise_xor = _MaskedBinaryOperation(umath.bitwise_xor)
hypot = _MaskedBinaryOperation(umath.hypot)
# Domained binary ufuncs
divide = _DomainedBinaryOperation(umath.divide, _DomainSafeDivide(), 0, 1)
true_divide = _DomainedBinaryOperation(umath.true_divide,
_DomainSafeDivide(), 0, 1)
floor_divide = _DomainedBinaryOperation(umath.floor_divide,
_DomainSafeDivide(), 0, 1)
remainder = _DomainedBinaryOperation(umath.remainder,
_DomainSafeDivide(), 0, 1)
fmod = _DomainedBinaryOperation(umath.fmod, _DomainSafeDivide(), 0, 1)
mod = _DomainedBinaryOperation(umath.mod, _DomainSafeDivide(), 0, 1)
###############################################################################
# Mask creation functions #
###############################################################################
def _replace_dtype_fields_recursive(dtype, primitive_dtype):
"Private function allowing recursion in _replace_dtype_fields."
_recurse = _replace_dtype_fields_recursive
# Do we have some name fields ?
if dtype.names is not None:
descr = []
for name in dtype.names:
field = dtype.fields[name]
if len(field) == 3:
# Prepend the title to the name
name = (field[-1], name)
descr.append((name, _recurse(field[0], primitive_dtype)))
new_dtype = np.dtype(descr)
# Is this some kind of composite a la (float,2)
elif dtype.subdtype:
descr = list(dtype.subdtype)
descr[0] = _recurse(dtype.subdtype[0], primitive_dtype)
new_dtype = np.dtype(tuple(descr))
# this is a primitive type, so do a direct replacement
else:
new_dtype = primitive_dtype
# preserve identity of dtypes
if new_dtype == dtype:
new_dtype = dtype
return new_dtype
def _replace_dtype_fields(dtype, primitive_dtype):
"""
Construct a dtype description list from a given dtype.
Returns a new dtype object, with all fields and subtypes in the given type
recursively replaced with `primitive_dtype`.
Arguments are coerced to dtypes first.
"""
dtype = np.dtype(dtype)
primitive_dtype = np.dtype(primitive_dtype)
return _replace_dtype_fields_recursive(dtype, primitive_dtype)
def make_mask_descr(ndtype):
"""
Construct a dtype description list from a given dtype.
Returns a new dtype object, with the type of all fields in `ndtype` to a
boolean type. Field names are not altered.
Parameters
----------
ndtype : dtype
The dtype to convert.
Returns
-------
result : dtype
A dtype that looks like `ndtype`, the type of all fields is boolean.
Examples
--------
>>> import numpy.ma as ma
>>> dtype = np.dtype({'names':['foo', 'bar'],
... 'formats':[np.float32, np.int64]})
>>> dtype
dtype([('foo', '<f4'), ('bar', '<i8')])
>>> ma.make_mask_descr(dtype)
dtype([('foo', '|b1'), ('bar', '|b1')])
>>> ma.make_mask_descr(np.float32)
dtype('bool')
"""
return _replace_dtype_fields(ndtype, MaskType)
def getmask(a):
"""
Return the mask of a masked array, or nomask.
Return the mask of `a` as an ndarray if `a` is a `MaskedArray` and the
mask is not `nomask`, else return `nomask`. To guarantee a full array
of booleans of the same shape as a, use `getmaskarray`.
Parameters
----------
a : array_like
Input `MaskedArray` for which the mask is required.
See Also
--------
getdata : Return the data of a masked array as an ndarray.
getmaskarray : Return the mask of a masked array, or full array of False.
Examples
--------
>>> import numpy.ma as ma
>>> a = ma.masked_equal([[1,2],[3,4]], 2)
>>> a
masked_array(
data=[[1, --],
[3, 4]],
mask=[[False, True],
[False, False]],
fill_value=2)
>>> ma.getmask(a)
array([[False, True],
[False, False]])
Equivalently use the `MaskedArray` `mask` attribute.
>>> a.mask
array([[False, True],
[False, False]])
Result when mask == `nomask`
>>> b = ma.masked_array([[1,2],[3,4]])
>>> b
masked_array(
data=[[1, 2],
[3, 4]],
mask=False,
fill_value=999999)
>>> ma.nomask
False
>>> ma.getmask(b) == ma.nomask
True
>>> b.mask == ma.nomask
True
"""
return getattr(a, '_mask', nomask)
get_mask = getmask
def getmaskarray(arr):
"""
Return the mask of a masked array, or full boolean array of False.
Return the mask of `arr` as an ndarray if `arr` is a `MaskedArray` and
the mask is not `nomask`, else return a full boolean array of False of
the same shape as `arr`.
Parameters
----------
arr : array_like
Input `MaskedArray` for which the mask is required.
See Also
--------
getmask : Return the mask of a masked array, or nomask.
getdata : Return the data of a masked array as an ndarray.
Examples
--------
>>> import numpy.ma as ma
>>> a = ma.masked_equal([[1,2],[3,4]], 2)
>>> a
masked_array(
data=[[1, --],
[3, 4]],
mask=[[False, True],
[False, False]],
fill_value=2)
>>> ma.getmaskarray(a)
array([[False, True],
[False, False]])
Result when mask == ``nomask``
>>> b = ma.masked_array([[1,2],[3,4]])
>>> b
masked_array(
data=[[1, 2],
[3, 4]],
mask=False,
fill_value=999999)
>>> ma.getmaskarray(b)
array([[False, False],
[False, False]])
"""
mask = getmask(arr)
if mask is nomask:
mask = make_mask_none(np.shape(arr), getattr(arr, 'dtype', None))
return mask
def is_mask(m):
"""
Return True if m is a valid, standard mask.
This function does not check the contents of the input, only that the
type is MaskType. In particular, this function returns False if the
mask has a flexible dtype.
Parameters
----------
m : array_like
Array to test.
Returns
-------
result : bool
True if `m.dtype.type` is MaskType, False otherwise.
See Also
--------
ma.isMaskedArray : Test whether input is an instance of MaskedArray.
Examples
--------
>>> import numpy.ma as ma
>>> m = ma.masked_equal([0, 1, 0, 2, 3], 0)
>>> m
masked_array(data=[--, 1, --, 2, 3],
mask=[ True, False, True, False, False],
fill_value=0)
>>> ma.is_mask(m)
False
>>> ma.is_mask(m.mask)
True
Input must be an ndarray (or have similar attributes)
for it to be considered a valid mask.
>>> m = [False, True, False]
>>> ma.is_mask(m)
False
>>> m = np.array([False, True, False])
>>> m
array([False, True, False])
>>> ma.is_mask(m)
True
Arrays with complex dtypes don't return True.
>>> dtype = np.dtype({'names':['monty', 'pithon'],
... 'formats':[bool, bool]})
>>> dtype
dtype([('monty', '|b1'), ('pithon', '|b1')])
>>> m = np.array([(True, False), (False, True), (True, False)],
... dtype=dtype)
>>> m
array([( True, False), (False, True), ( True, False)],
dtype=[('monty', '?'), ('pithon', '?')])
>>> ma.is_mask(m)
False
"""
try:
return m.dtype.type is MaskType
except AttributeError:
return False
def _shrink_mask(m):
"""
Shrink a mask to nomask if possible
"""
if m.dtype.names is None and not m.any():
return nomask
else:
return m
def make_mask(m, copy=False, shrink=True, dtype=MaskType):
"""
Create a boolean mask from an array.
Return `m` as a boolean mask, creating a copy if necessary or requested.
The function can accept any sequence that is convertible to integers,
or ``nomask``. Does not require that contents must be 0s and 1s, values
of 0 are interpreted as False, everything else as True.
Parameters
----------
m : array_like
Potential mask.
copy : bool, optional
Whether to return a copy of `m` (True) or `m` itself (False).
shrink : bool, optional
Whether to shrink `m` to ``nomask`` if all its values are False.
dtype : dtype, optional
Data-type of the output mask. By default, the output mask has a
dtype of MaskType (bool). If the dtype is flexible, each field has
a boolean dtype. This is ignored when `m` is ``nomask``, in which
case ``nomask`` is always returned.
Returns
-------
result : ndarray
A boolean mask derived from `m`.
Examples
--------
>>> import numpy.ma as ma
>>> m = [True, False, True, True]
>>> ma.make_mask(m)
array([ True, False, True, True])
>>> m = [1, 0, 1, 1]
>>> ma.make_mask(m)
array([ True, False, True, True])
>>> m = [1, 0, 2, -3]
>>> ma.make_mask(m)
array([ True, False, True, True])
Effect of the `shrink` parameter.
>>> m = np.zeros(4)
>>> m
array([0., 0., 0., 0.])
>>> ma.make_mask(m)
False
>>> ma.make_mask(m, shrink=False)
array([False, False, False, False])
Using a flexible `dtype`.
>>> m = [1, 0, 1, 1]
>>> n = [0, 1, 0, 0]
>>> arr = []
>>> for man, mouse in zip(m, n):
... arr.append((man, mouse))
>>> arr
[(1, 0), (0, 1), (1, 0), (1, 0)]
>>> dtype = np.dtype({'names':['man', 'mouse'],
... 'formats':[np.int64, np.int64]})
>>> arr = np.array(arr, dtype=dtype)
>>> arr
array([(1, 0), (0, 1), (1, 0), (1, 0)],
dtype=[('man', '<i8'), ('mouse', '<i8')])
>>> ma.make_mask(arr, dtype=dtype)
array([(True, False), (False, True), (True, False), (True, False)],
dtype=[('man', '|b1'), ('mouse', '|b1')])
"""
if m is nomask:
return nomask
# Make sure the input dtype is valid.
dtype = make_mask_descr(dtype)
# legacy boolean special case: "existence of fields implies true"
if isinstance(m, ndarray) and m.dtype.fields and dtype == np.bool_:
return np.ones(m.shape, dtype=dtype)
# Fill the mask in case there are missing data; turn it into an ndarray.
result = np.array(filled(m, True), copy=copy, dtype=dtype, subok=True)
# Bas les masques !
if shrink:
result = _shrink_mask(result)
return result
def make_mask_none(newshape, dtype=None):
"""
Return a boolean mask of the given shape, filled with False.
This function returns a boolean ndarray with all entries False, that can
be used in common mask manipulations. If a complex dtype is specified, the
type of each field is converted to a boolean type.
Parameters
----------
newshape : tuple
A tuple indicating the shape of the mask.
dtype : {None, dtype}, optional
If None, use a MaskType instance. Otherwise, use a new datatype with
the same fields as `dtype`, converted to boolean types.
Returns
-------
result : ndarray
An ndarray of appropriate shape and dtype, filled with False.
See Also
--------
make_mask : Create a boolean mask from an array.
make_mask_descr : Construct a dtype description list from a given dtype.
Examples
--------
>>> import numpy.ma as ma
>>> ma.make_mask_none((3,))
array([False, False, False])
Defining a more complex dtype.
>>> dtype = np.dtype({'names':['foo', 'bar'],
... 'formats':[np.float32, np.int64]})
>>> dtype
dtype([('foo', '<f4'), ('bar', '<i8')])
>>> ma.make_mask_none((3,), dtype=dtype)
array([(False, False), (False, False), (False, False)],
dtype=[('foo', '|b1'), ('bar', '|b1')])
"""
if dtype is None:
result = np.zeros(newshape, dtype=MaskType)
else:
result = np.zeros(newshape, dtype=make_mask_descr(dtype))
return result
def _recursive_mask_or(m1, m2, newmask):
names = m1.dtype.names
for name in names:
current1 = m1[name]
if current1.dtype.names is not None:
_recursive_mask_or(current1, m2[name], newmask[name])
else:
umath.logical_or(current1, m2[name], newmask[name])
def mask_or(m1, m2, copy=False, shrink=True):
"""
Combine two masks with the ``logical_or`` operator.
The result may be a view on `m1` or `m2` if the other is `nomask`
(i.e. False).
Parameters
----------
m1, m2 : array_like
Input masks.
copy : bool, optional
If copy is False and one of the inputs is `nomask`, return a view
of the other input mask. Defaults to False.
shrink : bool, optional
Whether to shrink the output to `nomask` if all its values are
False. Defaults to True.
Returns
-------
mask : output mask
The result masks values that are masked in either `m1` or `m2`.
Raises
------
ValueError
If `m1` and `m2` have different flexible dtypes.
Examples
--------
>>> m1 = np.ma.make_mask([0, 1, 1, 0])
>>> m2 = np.ma.make_mask([1, 0, 0, 0])
>>> np.ma.mask_or(m1, m2)
array([ True, True, True, False])
"""
if (m1 is nomask) or (m1 is False):
dtype = getattr(m2, 'dtype', MaskType)
return make_mask(m2, copy=copy, shrink=shrink, dtype=dtype)
if (m2 is nomask) or (m2 is False):
dtype = getattr(m1, 'dtype', MaskType)
return make_mask(m1, copy=copy, shrink=shrink, dtype=dtype)
if m1 is m2 and is_mask(m1):
return m1
(dtype1, dtype2) = (getattr(m1, 'dtype', None), getattr(m2, 'dtype', None))
if dtype1 != dtype2:
raise ValueError("Incompatible dtypes '%s'<>'%s'" % (dtype1, dtype2))
if dtype1.names is not None:
# Allocate an output mask array with the properly broadcast shape.
newmask = np.empty(np.broadcast(m1, m2).shape, dtype1)
_recursive_mask_or(m1, m2, newmask)
return newmask
return make_mask(umath.logical_or(m1, m2), copy=copy, shrink=shrink)
def flatten_mask(mask):
"""
Returns a completely flattened version of the mask, where nested fields
are collapsed.
Parameters
----------
mask : array_like
Input array, which will be interpreted as booleans.
Returns
-------
flattened_mask : ndarray of bools
The flattened input.
Examples
--------
>>> mask = np.array([0, 0, 1])
>>> np.ma.flatten_mask(mask)
array([False, False, True])
>>> mask = np.array([(0, 0), (0, 1)], dtype=[('a', bool), ('b', bool)])
>>> np.ma.flatten_mask(mask)
array([False, False, False, True])
>>> mdtype = [('a', bool), ('b', [('ba', bool), ('bb', bool)])]
>>> mask = np.array([(0, (0, 0)), (0, (0, 1))], dtype=mdtype)
>>> np.ma.flatten_mask(mask)
array([False, False, False, False, False, True])
"""
def _flatmask(mask):
"Flatten the mask and returns a (maybe nested) sequence of booleans."
mnames = mask.dtype.names
if mnames is not None:
return [flatten_mask(mask[name]) for name in mnames]
else:
return mask
def _flatsequence(sequence):
"Generates a flattened version of the sequence."
try:
for element in sequence:
if hasattr(element, '__iter__'):
yield from _flatsequence(element)
else:
yield element
except TypeError:
yield sequence
mask = np.asarray(mask)
flattened = _flatsequence(_flatmask(mask))
return np.array([_ for _ in flattened], dtype=bool)
def _check_mask_axis(mask, axis, keepdims=np._NoValue):
"Check whether there are masked values along the given axis"
kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims}
if mask is not nomask:
return mask.all(axis=axis, **kwargs)
return nomask
###############################################################################
# Masking functions #
###############################################################################
def masked_where(condition, a, copy=True):
"""
Mask an array where a condition is met.
Return `a` as an array masked where `condition` is True.
Any masked values of `a` or `condition` are also masked in the output.
Parameters
----------
condition : array_like
Masking condition. When `condition` tests floating point values for
equality, consider using ``masked_values`` instead.
a : array_like
Array to mask.
copy : bool
If True (default) make a copy of `a` in the result. If False modify
`a` in place and return a view.
Returns
-------
result : MaskedArray
The result of masking `a` where `condition` is True.
See Also
--------
masked_values : Mask using floating point equality.
masked_equal : Mask where equal to a given value.
masked_not_equal : Mask where `not` equal to a given value.
masked_less_equal : Mask where less than or equal to a given value.
masked_greater_equal : Mask where greater than or equal to a given value.
masked_less : Mask where less than a given value.
masked_greater : Mask where greater than a given value.
masked_inside : Mask inside a given interval.
masked_outside : Mask outside a given interval.
masked_invalid : Mask invalid values (NaNs or infs).
Examples
--------
>>> import numpy.ma as ma
>>> a = np.arange(4)
>>> a
array([0, 1, 2, 3])
>>> ma.masked_where(a <= 2, a)
masked_array(data=[--, --, --, 3],
mask=[ True, True, True, False],
fill_value=999999)
Mask array `b` conditional on `a`.
>>> b = ['a', 'b', 'c', 'd']
>>> ma.masked_where(a == 2, b)
masked_array(data=['a', 'b', --, 'd'],
mask=[False, False, True, False],
fill_value='N/A',
dtype='<U1')
Effect of the `copy` argument.
>>> c = ma.masked_where(a <= 2, a)
>>> c
masked_array(data=[--, --, --, 3],
mask=[ True, True, True, False],
fill_value=999999)
>>> c[0] = 99
>>> c
masked_array(data=[99, --, --, 3],
mask=[False, True, True, False],
fill_value=999999)
>>> a
array([0, 1, 2, 3])
>>> c = ma.masked_where(a <= 2, a, copy=False)
>>> c[0] = 99
>>> c
masked_array(data=[99, --, --, 3],
mask=[False, True, True, False],
fill_value=999999)
>>> a
array([99, 1, 2, 3])
When `condition` or `a` contain masked values.
>>> a = np.arange(4)
>>> a = ma.masked_where(a == 2, a)
>>> a
masked_array(data=[0, 1, --, 3],
mask=[False, False, True, False],
fill_value=999999)
>>> b = np.arange(4)
>>> b = ma.masked_where(b == 0, b)
>>> b
masked_array(data=[--, 1, 2, 3],
mask=[ True, False, False, False],
fill_value=999999)
>>> ma.masked_where(a == 3, b)
masked_array(data=[--, 1, --, --],
mask=[ True, False, True, True],
fill_value=999999)
"""
# Make sure that condition is a valid standard-type mask.
cond = make_mask(condition, shrink=False)
a = np.array(a, copy=copy, subok=True)
(cshape, ashape) = (cond.shape, a.shape)
if cshape and cshape != ashape:
raise IndexError("Inconsistent shape between the condition and the input"
" (got %s and %s)" % (cshape, ashape))
if hasattr(a, '_mask'):
cond = mask_or(cond, a._mask)
cls = type(a)
else:
cls = MaskedArray
result = a.view(cls)
# Assign to *.mask so that structured masks are handled correctly.
result.mask = _shrink_mask(cond)
# There is no view of a boolean so when 'a' is a MaskedArray with nomask
# the update to the result's mask has no effect.
if not copy and hasattr(a, '_mask') and getmask(a) is nomask:
a._mask = result._mask.view()
return result
def masked_greater(x, value, copy=True):
"""
Mask an array where greater than a given value.
This function is a shortcut to ``masked_where``, with
`condition` = (x > value).
See Also
--------
masked_where : Mask where a condition is met.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.arange(4)
>>> a
array([0, 1, 2, 3])
>>> ma.masked_greater(a, 2)
masked_array(data=[0, 1, 2, --],
mask=[False, False, False, True],
fill_value=999999)
"""
return masked_where(greater(x, value), x, copy=copy)
def masked_greater_equal(x, value, copy=True):
"""
Mask an array where greater than or equal to a given value.
This function is a shortcut to ``masked_where``, with
`condition` = (x >= value).
See Also
--------
masked_where : Mask where a condition is met.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.arange(4)
>>> a
array([0, 1, 2, 3])
>>> ma.masked_greater_equal(a, 2)
masked_array(data=[0, 1, --, --],
mask=[False, False, True, True],
fill_value=999999)
"""
return masked_where(greater_equal(x, value), x, copy=copy)
def masked_less(x, value, copy=True):
"""
Mask an array where less than a given value.
This function is a shortcut to ``masked_where``, with
`condition` = (x < value).
See Also
--------
masked_where : Mask where a condition is met.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.arange(4)
>>> a
array([0, 1, 2, 3])
>>> ma.masked_less(a, 2)
masked_array(data=[--, --, 2, 3],
mask=[ True, True, False, False],
fill_value=999999)
"""
return masked_where(less(x, value), x, copy=copy)
def masked_less_equal(x, value, copy=True):
"""
Mask an array where less than or equal to a given value.
This function is a shortcut to ``masked_where``, with
`condition` = (x <= value).
See Also
--------
masked_where : Mask where a condition is met.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.arange(4)
>>> a
array([0, 1, 2, 3])
>>> ma.masked_less_equal(a, 2)
masked_array(data=[--, --, --, 3],
mask=[ True, True, True, False],
fill_value=999999)
"""
return masked_where(less_equal(x, value), x, copy=copy)
def masked_not_equal(x, value, copy=True):
"""
Mask an array where `not` equal to a given value.
This function is a shortcut to ``masked_where``, with
`condition` = (x != value).
See Also
--------
masked_where : Mask where a condition is met.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.arange(4)
>>> a
array([0, 1, 2, 3])
>>> ma.masked_not_equal(a, 2)
masked_array(data=[--, --, 2, --],
mask=[ True, True, False, True],
fill_value=999999)
"""
return masked_where(not_equal(x, value), x, copy=copy)
def masked_equal(x, value, copy=True):
"""
Mask an array where equal to a given value.
This function is a shortcut to ``masked_where``, with
`condition` = (x == value). For floating point arrays,
consider using ``masked_values(x, value)``.
See Also
--------
masked_where : Mask where a condition is met.
masked_values : Mask using floating point equality.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.arange(4)
>>> a
array([0, 1, 2, 3])
>>> ma.masked_equal(a, 2)
masked_array(data=[0, 1, --, 3],
mask=[False, False, True, False],
fill_value=2)
"""
output = masked_where(equal(x, value), x, copy=copy)
output.fill_value = value
return output
def masked_inside(x, v1, v2, copy=True):
"""
Mask an array inside a given interval.
Shortcut to ``masked_where``, where `condition` is True for `x` inside
the interval [v1,v2] (v1 <= x <= v2). The boundaries `v1` and `v2`
can be given in either order.
See Also
--------
masked_where : Mask where a condition is met.
Notes
-----
The array `x` is prefilled with its filling value.
Examples
--------
>>> import numpy.ma as ma
>>> x = [0.31, 1.2, 0.01, 0.2, -0.4, -1.1]
>>> ma.masked_inside(x, -0.3, 0.3)
masked_array(data=[0.31, 1.2, --, --, -0.4, -1.1],
mask=[False, False, True, True, False, False],
fill_value=1e+20)
The order of `v1` and `v2` doesn't matter.
>>> ma.masked_inside(x, 0.3, -0.3)
masked_array(data=[0.31, 1.2, --, --, -0.4, -1.1],
mask=[False, False, True, True, False, False],
fill_value=1e+20)
"""
if v2 < v1:
(v1, v2) = (v2, v1)
xf = filled(x)
condition = (xf >= v1) & (xf <= v2)
return masked_where(condition, x, copy=copy)
def masked_outside(x, v1, v2, copy=True):
"""
Mask an array outside a given interval.
Shortcut to ``masked_where``, where `condition` is True for `x` outside
the interval [v1,v2] (x < v1)|(x > v2).
The boundaries `v1` and `v2` can be given in either order.
See Also
--------
masked_where : Mask where a condition is met.
Notes
-----
The array `x` is prefilled with its filling value.
Examples
--------
>>> import numpy.ma as ma
>>> x = [0.31, 1.2, 0.01, 0.2, -0.4, -1.1]
>>> ma.masked_outside(x, -0.3, 0.3)
masked_array(data=[--, --, 0.01, 0.2, --, --],
mask=[ True, True, False, False, True, True],
fill_value=1e+20)
The order of `v1` and `v2` doesn't matter.
>>> ma.masked_outside(x, 0.3, -0.3)
masked_array(data=[--, --, 0.01, 0.2, --, --],
mask=[ True, True, False, False, True, True],
fill_value=1e+20)
"""
if v2 < v1:
(v1, v2) = (v2, v1)
xf = filled(x)
condition = (xf < v1) | (xf > v2)
return masked_where(condition, x, copy=copy)
def masked_object(x, value, copy=True, shrink=True):
"""
Mask the array `x` where the data are exactly equal to value.
This function is similar to `masked_values`, but only suitable
for object arrays: for floating point, use `masked_values` instead.
Parameters
----------
x : array_like
Array to mask
value : object
Comparison value
copy : {True, False}, optional
Whether to return a copy of `x`.
shrink : {True, False}, optional
Whether to collapse a mask full of False to nomask
Returns
-------
result : MaskedArray
The result of masking `x` where equal to `value`.
See Also
--------
masked_where : Mask where a condition is met.
masked_equal : Mask where equal to a given value (integers).
masked_values : Mask using floating point equality.
Examples
--------
>>> import numpy.ma as ma
>>> food = np.array(['green_eggs', 'ham'], dtype=object)
>>> # don't eat spoiled food
>>> eat = ma.masked_object(food, 'green_eggs')
>>> eat
masked_array(data=[--, 'ham'],
mask=[ True, False],
fill_value='green_eggs',
dtype=object)
>>> # plain ol` ham is boring
>>> fresh_food = np.array(['cheese', 'ham', 'pineapple'], dtype=object)
>>> eat = ma.masked_object(fresh_food, 'green_eggs')
>>> eat
masked_array(data=['cheese', 'ham', 'pineapple'],
mask=False,
fill_value='green_eggs',
dtype=object)
Note that `mask` is set to ``nomask`` if possible.
>>> eat
masked_array(data=['cheese', 'ham', 'pineapple'],
mask=False,
fill_value='green_eggs',
dtype=object)
"""
if isMaskedArray(x):
condition = umath.equal(x._data, value)
mask = x._mask
else:
condition = umath.equal(np.asarray(x), value)
mask = nomask
mask = mask_or(mask, make_mask(condition, shrink=shrink))
return masked_array(x, mask=mask, copy=copy, fill_value=value)
def masked_values(x, value, rtol=1e-5, atol=1e-8, copy=True, shrink=True):
"""
Mask using floating point equality.
Return a MaskedArray, masked where the data in array `x` are approximately
equal to `value`, determined using `isclose`. The default tolerances for
`masked_values` are the same as those for `isclose`.
For integer types, exact equality is used, in the same way as
`masked_equal`.
The fill_value is set to `value` and the mask is set to ``nomask`` if
possible.
Parameters
----------
x : array_like
Array to mask.
value : float
Masking value.
rtol, atol : float, optional
Tolerance parameters passed on to `isclose`
copy : bool, optional
Whether to return a copy of `x`.
shrink : bool, optional
Whether to collapse a mask full of False to ``nomask``.
Returns
-------
result : MaskedArray
The result of masking `x` where approximately equal to `value`.
See Also
--------
masked_where : Mask where a condition is met.
masked_equal : Mask where equal to a given value (integers).
Examples
--------
>>> import numpy.ma as ma
>>> x = np.array([1, 1.1, 2, 1.1, 3])
>>> ma.masked_values(x, 1.1)
masked_array(data=[1.0, --, 2.0, --, 3.0],
mask=[False, True, False, True, False],
fill_value=1.1)
Note that `mask` is set to ``nomask`` if possible.
>>> ma.masked_values(x, 1.5)
masked_array(data=[1. , 1.1, 2. , 1.1, 3. ],
mask=False,
fill_value=1.5)
For integers, the fill value will be different in general to the
result of ``masked_equal``.
>>> x = np.arange(5)
>>> x
array([0, 1, 2, 3, 4])
>>> ma.masked_values(x, 2)
masked_array(data=[0, 1, --, 3, 4],
mask=[False, False, True, False, False],
fill_value=2)
>>> ma.masked_equal(x, 2)
masked_array(data=[0, 1, --, 3, 4],
mask=[False, False, True, False, False],
fill_value=2)
"""
xnew = filled(x, value)
if np.issubdtype(xnew.dtype, np.floating):
mask = np.isclose(xnew, value, atol=atol, rtol=rtol)
else:
mask = umath.equal(xnew, value)
ret = masked_array(xnew, mask=mask, copy=copy, fill_value=value)
if shrink:
ret.shrink_mask()
return ret
def masked_invalid(a, copy=True):
"""
Mask an array where invalid values occur (NaNs or infs).
This function is a shortcut to ``masked_where``, with
`condition` = ~(np.isfinite(a)). Any pre-existing mask is conserved.
Only applies to arrays with a dtype where NaNs or infs make sense
(i.e. floating point types), but accepts any array_like object.
See Also
--------
masked_where : Mask where a condition is met.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.arange(5, dtype=float)
>>> a[2] = np.NaN
>>> a[3] = np.PINF
>>> a
array([ 0., 1., nan, inf, 4.])
>>> ma.masked_invalid(a)
masked_array(data=[0.0, 1.0, --, --, 4.0],
mask=[False, False, True, True, False],
fill_value=1e+20)
"""
a = np.array(a, copy=copy, subok=True)
mask = getattr(a, '_mask', None)
if mask is not None:
condition = ~(np.isfinite(getdata(a)))
if mask is not nomask:
condition |= mask
cls = type(a)
else:
condition = ~(np.isfinite(a))
cls = MaskedArray
result = a.view(cls)
result._mask = condition
return result
###############################################################################
# Printing options #
###############################################################################
class _MaskedPrintOption:
"""
Handle the string used to represent missing data in a masked array.
"""
def __init__(self, display):
"""
Create the masked_print_option object.
"""
self._display = display
self._enabled = True
def display(self):
"""
Display the string to print for masked values.
"""
return self._display
def set_display(self, s):
"""
Set the string to print for masked values.
"""
self._display = s
def enabled(self):
"""
Is the use of the display value enabled?
"""
return self._enabled
def enable(self, shrink=1):
"""
Set the enabling shrink to `shrink`.
"""
self._enabled = shrink
def __str__(self):
return str(self._display)
__repr__ = __str__
# if you single index into a masked location you get this object.
masked_print_option = _MaskedPrintOption('--')
def _recursive_printoption(result, mask, printopt):
"""
Puts printoptions in result where mask is True.
Private function allowing for recursion
"""
names = result.dtype.names
if names is not None:
for name in names:
curdata = result[name]
curmask = mask[name]
_recursive_printoption(curdata, curmask, printopt)
else:
np.copyto(result, printopt, where=mask)
return
# For better or worse, these end in a newline
_legacy_print_templates = dict(
long_std=textwrap.dedent("""\
masked_%(name)s(data =
%(data)s,
%(nlen)s mask =
%(mask)s,
%(nlen)s fill_value = %(fill)s)
"""),
long_flx=textwrap.dedent("""\
masked_%(name)s(data =
%(data)s,
%(nlen)s mask =
%(mask)s,
%(nlen)s fill_value = %(fill)s,
%(nlen)s dtype = %(dtype)s)
"""),
short_std=textwrap.dedent("""\
masked_%(name)s(data = %(data)s,
%(nlen)s mask = %(mask)s,
%(nlen)s fill_value = %(fill)s)
"""),
short_flx=textwrap.dedent("""\
masked_%(name)s(data = %(data)s,
%(nlen)s mask = %(mask)s,
%(nlen)s fill_value = %(fill)s,
%(nlen)s dtype = %(dtype)s)
""")
)
###############################################################################
# MaskedArray class #
###############################################################################
def _recursive_filled(a, mask, fill_value):
"""
Recursively fill `a` with `fill_value`.
"""
names = a.dtype.names
for name in names:
current = a[name]
if current.dtype.names is not None:
_recursive_filled(current, mask[name], fill_value[name])
else:
np.copyto(current, fill_value[name], where=mask[name])
def flatten_structured_array(a):
"""
Flatten a structured array.
The data type of the output is chosen such that it can represent all of the
(nested) fields.
Parameters
----------
a : structured array
Returns
-------
output : masked array or ndarray
A flattened masked array if the input is a masked array, otherwise a
standard ndarray.
Examples
--------
>>> ndtype = [('a', int), ('b', float)]
>>> a = np.array([(1, 1), (2, 2)], dtype=ndtype)
>>> np.ma.flatten_structured_array(a)
array([[1., 1.],
[2., 2.]])
"""
def flatten_sequence(iterable):
"""
Flattens a compound of nested iterables.
"""
for elm in iter(iterable):
if hasattr(elm, '__iter__'):
yield from flatten_sequence(elm)
else:
yield elm
a = np.asanyarray(a)
inishape = a.shape
a = a.ravel()
if isinstance(a, MaskedArray):
out = np.array([tuple(flatten_sequence(d.item())) for d in a._data])
out = out.view(MaskedArray)
out._mask = np.array([tuple(flatten_sequence(d.item()))
for d in getmaskarray(a)])
else:
out = np.array([tuple(flatten_sequence(d.item())) for d in a])
if len(inishape) > 1:
newshape = list(out.shape)
newshape[0] = inishape
out.shape = tuple(flatten_sequence(newshape))
return out
def _arraymethod(funcname, onmask=True):
"""
Return a class method wrapper around a basic array method.
Creates a class method which returns a masked array, where the new
``_data`` array is the output of the corresponding basic method called
on the original ``_data``.
If `onmask` is True, the new mask is the output of the method called
on the initial mask. Otherwise, the new mask is just a reference
to the initial mask.
Parameters
----------
funcname : str
Name of the function to apply on data.
onmask : bool
Whether the mask must be processed also (True) or left
alone (False). Default is True. Make available as `_onmask`
attribute.
Returns
-------
method : instancemethod
Class method wrapper of the specified basic array method.
"""
def wrapped_method(self, *args, **params):
result = getattr(self._data, funcname)(*args, **params)
result = result.view(type(self))
result._update_from(self)
mask = self._mask
if not onmask:
result.__setmask__(mask)
elif mask is not nomask:
# __setmask__ makes a copy, which we don't want
result._mask = getattr(mask, funcname)(*args, **params)
return result
methdoc = getattr(ndarray, funcname, None) or getattr(np, funcname, None)
if methdoc is not None:
wrapped_method.__doc__ = methdoc.__doc__
wrapped_method.__name__ = funcname
return wrapped_method
class MaskedIterator:
"""
Flat iterator object to iterate over masked arrays.
A `MaskedIterator` iterator is returned by ``x.flat`` for any masked array
`x`. It allows iterating over the array as if it were a 1-D array,
either in a for-loop or by calling its `next` method.
Iteration is done in C-contiguous style, with the last index varying the
fastest. The iterator can also be indexed using basic slicing or
advanced indexing.
See Also
--------
MaskedArray.flat : Return a flat iterator over an array.
MaskedArray.flatten : Returns a flattened copy of an array.
Notes
-----
`MaskedIterator` is not exported by the `ma` module. Instead of
instantiating a `MaskedIterator` directly, use `MaskedArray.flat`.
Examples
--------
>>> x = np.ma.array(arange(6).reshape(2, 3))
>>> fl = x.flat
>>> type(fl)
<class 'numpy.ma.core.MaskedIterator'>
>>> for item in fl:
... print(item)
...
0
1
2
3
4
5
Extracting more than a single element b indexing the `MaskedIterator`
returns a masked array:
>>> fl[2:4]
masked_array(data = [2 3],
mask = False,
fill_value = 999999)
"""
def __init__(self, ma):
self.ma = ma
self.dataiter = ma._data.flat
if ma._mask is nomask:
self.maskiter = None
else:
self.maskiter = ma._mask.flat
def __iter__(self):
return self
def __getitem__(self, indx):
result = self.dataiter.__getitem__(indx).view(type(self.ma))
if self.maskiter is not None:
_mask = self.maskiter.__getitem__(indx)
if isinstance(_mask, ndarray):
# set shape to match that of data; this is needed for matrices
_mask.shape = result.shape
result._mask = _mask
elif isinstance(_mask, np.void):
return mvoid(result, mask=_mask, hardmask=self.ma._hardmask)
elif _mask: # Just a scalar, masked
return masked
return result
# This won't work if ravel makes a copy
def __setitem__(self, index, value):
self.dataiter[index] = getdata(value)
if self.maskiter is not None:
self.maskiter[index] = getmaskarray(value)
def __next__(self):
"""
Return the next value, or raise StopIteration.
Examples
--------
>>> x = np.ma.array([3, 2], mask=[0, 1])
>>> fl = x.flat
>>> next(fl)
3
>>> next(fl)
masked
>>> next(fl)
Traceback (most recent call last):
...
StopIteration
"""
d = next(self.dataiter)
if self.maskiter is not None:
m = next(self.maskiter)
if isinstance(m, np.void):
return mvoid(d, mask=m, hardmask=self.ma._hardmask)
elif m: # Just a scalar, masked
return masked
return d
class MaskedArray(ndarray):
"""
An array class with possibly masked values.
Masked values of True exclude the corresponding element from any
computation.
Construction::
x = MaskedArray(data, mask=nomask, dtype=None, copy=False, subok=True,
ndmin=0, fill_value=None, keep_mask=True, hard_mask=None,
shrink=True, order=None)
Parameters
----------
data : array_like
Input data.
mask : sequence, optional
Mask. Must be convertible to an array of booleans with the same
shape as `data`. True indicates a masked (i.e. invalid) data.
dtype : dtype, optional
Data type of the output.
If `dtype` is None, the type of the data argument (``data.dtype``)
is used. If `dtype` is not None and different from ``data.dtype``,
a copy is performed.
copy : bool, optional
Whether to copy the input data (True), or to use a reference instead.
Default is False.
subok : bool, optional
Whether to return a subclass of `MaskedArray` if possible (True) or a
plain `MaskedArray`. Default is True.
ndmin : int, optional
Minimum number of dimensions. Default is 0.
fill_value : scalar, optional
Value used to fill in the masked values when necessary.
If None, a default based on the data-type is used.
keep_mask : bool, optional
Whether to combine `mask` with the mask of the input data, if any
(True), or to use only `mask` for the output (False). Default is True.
hard_mask : bool, optional
Whether to use a hard mask or not. With a hard mask, masked values
cannot be unmasked. Default is False.
shrink : bool, optional
Whether to force compression of an empty mask. Default is True.
order : {'C', 'F', 'A'}, optional
Specify the order of the array. If order is 'C', then the array
will be in C-contiguous order (last-index varies the fastest).
If order is 'F', then the returned array will be in
Fortran-contiguous order (first-index varies the fastest).
If order is 'A' (default), then the returned array may be
in any order (either C-, Fortran-contiguous, or even discontiguous),
unless a copy is required, in which case it will be C-contiguous.
Examples
--------
The ``mask`` can be initialized with an array of boolean values
with the same shape as ``data``.
>>> data = np.arange(6).reshape((2, 3))
>>> np.ma.MaskedArray(data, mask=[[False, True, False],
... [False, False, True]])
masked_array(
data=[[0, --, 2],
[3, 4, --]],
mask=[[False, True, False],
[False, False, True]],
fill_value=999999)
Alternatively, the ``mask`` can be initialized to homogeneous boolean
array with the same shape as ``data`` by passing in a scalar
boolean value:
>>> np.ma.MaskedArray(data, mask=False)
masked_array(
data=[[0, 1, 2],
[3, 4, 5]],
mask=[[False, False, False],
[False, False, False]],
fill_value=999999)
>>> np.ma.MaskedArray(data, mask=True)
masked_array(
data=[[--, --, --],
[--, --, --]],
mask=[[ True, True, True],
[ True, True, True]],
fill_value=999999,
dtype=int64)
.. note::
The recommended practice for initializing ``mask`` with a scalar
boolean value is to use ``True``/``False`` rather than
``np.True_``/``np.False_``. The reason is :attr:`nomask`
is represented internally as ``np.False_``.
>>> np.False_ is np.ma.nomask
True
"""
__array_priority__ = 15
_defaultmask = nomask
_defaulthardmask = False
_baseclass = ndarray
# Maximum number of elements per axis used when printing an array. The
# 1d case is handled separately because we need more values in this case.
_print_width = 100
_print_width_1d = 1500
def __new__(cls, data=None, mask=nomask, dtype=None, copy=False,
subok=True, ndmin=0, fill_value=None, keep_mask=True,
hard_mask=None, shrink=True, order=None):
"""
Create a new masked array from scratch.
Notes
-----
A masked array can also be created by taking a .view(MaskedArray).
"""
# Process data.
_data = np.array(data, dtype=dtype, copy=copy,
order=order, subok=True, ndmin=ndmin)
_baseclass = getattr(data, '_baseclass', type(_data))
# Check that we're not erasing the mask.
if isinstance(data, MaskedArray) and (data.shape != _data.shape):
copy = True
# Here, we copy the _view_, so that we can attach new properties to it
# we must never do .view(MaskedConstant), as that would create a new
# instance of np.ma.masked, which make identity comparison fail
if isinstance(data, cls) and subok and not isinstance(data, MaskedConstant):
_data = ndarray.view(_data, type(data))
else:
_data = ndarray.view(_data, cls)
# Handle the case where data is not a subclass of ndarray, but
# still has the _mask attribute like MaskedArrays
if hasattr(data, '_mask') and not isinstance(data, ndarray):
_data._mask = data._mask
# FIXME: should we set `_data._sharedmask = True`?
# Process mask.
# Type of the mask
mdtype = make_mask_descr(_data.dtype)
if mask is nomask:
# Case 1. : no mask in input.
# Erase the current mask ?
if not keep_mask:
# With a reduced version
if shrink:
_data._mask = nomask
# With full version
else:
_data._mask = np.zeros(_data.shape, dtype=mdtype)
# Check whether we missed something
elif isinstance(data, (tuple, list)):
try:
# If data is a sequence of masked array
mask = np.array(
[getmaskarray(np.asanyarray(m, dtype=_data.dtype))
for m in data], dtype=mdtype)
except ValueError:
# If data is nested
mask = nomask
# Force shrinking of the mask if needed (and possible)
if (mdtype == MaskType) and mask.any():
_data._mask = mask
_data._sharedmask = False
else:
_data._sharedmask = not copy
if copy:
_data._mask = _data._mask.copy()
# Reset the shape of the original mask
if getmask(data) is not nomask:
data._mask.shape = data.shape
else:
# Case 2. : With a mask in input.
# If mask is boolean, create an array of True or False
if mask is True and mdtype == MaskType:
mask = np.ones(_data.shape, dtype=mdtype)
elif mask is False and mdtype == MaskType:
mask = np.zeros(_data.shape, dtype=mdtype)
else:
# Read the mask with the current mdtype
try:
mask = np.array(mask, copy=copy, dtype=mdtype)
# Or assume it's a sequence of bool/int
except TypeError:
mask = np.array([tuple([m] * len(mdtype)) for m in mask],
dtype=mdtype)
# Make sure the mask and the data have the same shape
if mask.shape != _data.shape:
(nd, nm) = (_data.size, mask.size)
if nm == 1:
mask = np.resize(mask, _data.shape)
elif nm == nd:
mask = np.reshape(mask, _data.shape)
else:
msg = "Mask and data not compatible: data size is %i, " + \
"mask size is %i."
raise MaskError(msg % (nd, nm))
copy = True
# Set the mask to the new value
if _data._mask is nomask:
_data._mask = mask
_data._sharedmask = not copy
else:
if not keep_mask:
_data._mask = mask
_data._sharedmask = not copy
else:
if _data.dtype.names is not None:
def _recursive_or(a, b):
"do a|=b on each field of a, recursively"
for name in a.dtype.names:
(af, bf) = (a[name], b[name])
if af.dtype.names is not None:
_recursive_or(af, bf)
else:
af |= bf
_recursive_or(_data._mask, mask)
else:
_data._mask = np.logical_or(mask, _data._mask)
_data._sharedmask = False
# Update fill_value.
if fill_value is None:
fill_value = getattr(data, '_fill_value', None)
# But don't run the check unless we have something to check.
if fill_value is not None:
_data._fill_value = _check_fill_value(fill_value, _data.dtype)
# Process extra options ..
if hard_mask is None:
_data._hardmask = getattr(data, '_hardmask', False)
else:
_data._hardmask = hard_mask
_data._baseclass = _baseclass
return _data
def _update_from(self, obj):
"""
Copies some attributes of obj to self.
"""
if isinstance(obj, ndarray):
_baseclass = type(obj)
else:
_baseclass = ndarray
# We need to copy the _basedict to avoid backward propagation
_optinfo = {}
_optinfo.update(getattr(obj, '_optinfo', {}))
_optinfo.update(getattr(obj, '_basedict', {}))
if not isinstance(obj, MaskedArray):
_optinfo.update(getattr(obj, '__dict__', {}))
_dict = dict(_fill_value=getattr(obj, '_fill_value', None),
_hardmask=getattr(obj, '_hardmask', False),
_sharedmask=getattr(obj, '_sharedmask', False),
_isfield=getattr(obj, '_isfield', False),
_baseclass=getattr(obj, '_baseclass', _baseclass),
_optinfo=_optinfo,
_basedict=_optinfo)
self.__dict__.update(_dict)
self.__dict__.update(_optinfo)
return
def __array_finalize__(self, obj):
"""
Finalizes the masked array.
"""
# Get main attributes.
self._update_from(obj)
# We have to decide how to initialize self.mask, based on
# obj.mask. This is very difficult. There might be some
# correspondence between the elements in the array we are being
# created from (= obj) and us. Or there might not. This method can
# be called in all kinds of places for all kinds of reasons -- could
# be empty_like, could be slicing, could be a ufunc, could be a view.
# The numpy subclassing interface simply doesn't give us any way
# to know, which means that at best this method will be based on
# guesswork and heuristics. To make things worse, there isn't even any
# clear consensus about what the desired behavior is. For instance,
# most users think that np.empty_like(marr) -- which goes via this
# method -- should return a masked array with an empty mask (see
# gh-3404 and linked discussions), but others disagree, and they have
# existing code which depends on empty_like returning an array that
# matches the input mask.
#
# Historically our algorithm was: if the template object mask had the
# same *number of elements* as us, then we used *it's mask object
# itself* as our mask, so that writes to us would also write to the
# original array. This is horribly broken in multiple ways.
#
# Now what we do instead is, if the template object mask has the same
# number of elements as us, and we do not have the same base pointer
# as the template object (b/c views like arr[...] should keep the same
# mask), then we make a copy of the template object mask and use
# that. This is also horribly broken but somewhat less so. Maybe.
if isinstance(obj, ndarray):
# XX: This looks like a bug -- shouldn't it check self.dtype
# instead?
if obj.dtype.names is not None:
_mask = getmaskarray(obj)
else:
_mask = getmask(obj)
# If self and obj point to exactly the same data, then probably
# self is a simple view of obj (e.g., self = obj[...]), so they
# should share the same mask. (This isn't 100% reliable, e.g. self
# could be the first row of obj, or have strange strides, but as a
# heuristic it's not bad.) In all other cases, we make a copy of
# the mask, so that future modifications to 'self' do not end up
# side-effecting 'obj' as well.
if (_mask is not nomask and obj.__array_interface__["data"][0]
!= self.__array_interface__["data"][0]):
# We should make a copy. But we could get here via astype,
# in which case the mask might need a new dtype as well
# (e.g., changing to or from a structured dtype), and the
# order could have changed. So, change the mask type if
# needed and use astype instead of copy.
if self.dtype == obj.dtype:
_mask_dtype = _mask.dtype
else:
_mask_dtype = make_mask_descr(self.dtype)
if self.flags.c_contiguous:
order = "C"
elif self.flags.f_contiguous:
order = "F"
else:
order = "K"
_mask = _mask.astype(_mask_dtype, order)
else:
# Take a view so shape changes, etc., do not propagate back.
_mask = _mask.view()
else:
_mask = nomask
self._mask = _mask
# Finalize the mask
if self._mask is not nomask:
try:
self._mask.shape = self.shape
except ValueError:
self._mask = nomask
except (TypeError, AttributeError):
# When _mask.shape is not writable (because it's a void)
pass
# Finalize the fill_value
if self._fill_value is not None:
self._fill_value = _check_fill_value(self._fill_value, self.dtype)
elif self.dtype.names is not None:
# Finalize the default fill_value for structured arrays
self._fill_value = _check_fill_value(None, self.dtype)
def __array_wrap__(self, obj, context=None):
"""
Special hook for ufuncs.
Wraps the numpy array and sets the mask according to context.
"""
if obj is self: # for in-place operations
result = obj
else:
result = obj.view(type(self))
result._update_from(self)
if context is not None:
result._mask = result._mask.copy()
func, args, out_i = context
# args sometimes contains outputs (gh-10459), which we don't want
input_args = args[:func.nin]
m = reduce(mask_or, [getmaskarray(arg) for arg in input_args])
# Get the domain mask
domain = ufunc_domain.get(func, None)
if domain is not None:
# Take the domain, and make sure it's a ndarray
with np.errstate(divide='ignore', invalid='ignore'):
d = filled(domain(*input_args), True)
if d.any():
# Fill the result where the domain is wrong
try:
# Binary domain: take the last value
fill_value = ufunc_fills[func][-1]
except TypeError:
# Unary domain: just use this one
fill_value = ufunc_fills[func]
except KeyError:
# Domain not recognized, use fill_value instead
fill_value = self.fill_value
np.copyto(result, fill_value, where=d)
# Update the mask
if m is nomask:
m = d
else:
# Don't modify inplace, we risk back-propagation
m = (m | d)
# Make sure the mask has the proper size
if result is not self and result.shape == () and m:
return masked
else:
result._mask = m
result._sharedmask = False
return result
def view(self, dtype=None, type=None, fill_value=None):
"""
Return a view of the MaskedArray data.
Parameters
----------
dtype : data-type or ndarray sub-class, optional
Data-type descriptor of the returned view, e.g., float32 or int16.
The default, None, results in the view having the same data-type
as `a`. As with ``ndarray.view``, dtype can also be specified as
an ndarray sub-class, which then specifies the type of the
returned object (this is equivalent to setting the ``type``
parameter).
type : Python type, optional
Type of the returned view, either ndarray or a subclass. The
default None results in type preservation.
fill_value : scalar, optional
The value to use for invalid entries (None by default).
If None, then this argument is inferred from the passed `dtype`, or
in its absence the original array, as discussed in the notes below.
See Also
--------
numpy.ndarray.view : Equivalent method on ndarray object.
Notes
-----
``a.view()`` is used two different ways:
``a.view(some_dtype)`` or ``a.view(dtype=some_dtype)`` constructs a view
of the array's memory with a different data-type. This can cause a
reinterpretation of the bytes of memory.
``a.view(ndarray_subclass)`` or ``a.view(type=ndarray_subclass)`` just
returns an instance of `ndarray_subclass` that looks at the same array
(same shape, dtype, etc.) This does not cause a reinterpretation of the
memory.
If `fill_value` is not specified, but `dtype` is specified (and is not
an ndarray sub-class), the `fill_value` of the MaskedArray will be
reset. If neither `fill_value` nor `dtype` are specified (or if
`dtype` is an ndarray sub-class), then the fill value is preserved.
Finally, if `fill_value` is specified, but `dtype` is not, the fill
value is set to the specified value.
For ``a.view(some_dtype)``, if ``some_dtype`` has a different number of
bytes per entry than the previous dtype (for example, converting a
regular array to a structured array), then the behavior of the view
cannot be predicted just from the superficial appearance of ``a`` (shown
by ``print(a)``). It also depends on exactly how ``a`` is stored in
memory. Therefore if ``a`` is C-ordered versus fortran-ordered, versus
defined as a slice or transpose, etc., the view may give different
results.
"""
if dtype is None:
if type is None:
output = ndarray.view(self)
else:
output = ndarray.view(self, type)
elif type is None:
try:
if issubclass(dtype, ndarray):
output = ndarray.view(self, dtype)
dtype = None
else:
output = ndarray.view(self, dtype)
except TypeError:
output = ndarray.view(self, dtype)
else:
output = ndarray.view(self, dtype, type)
# also make the mask be a view (so attr changes to the view's
# mask do no affect original object's mask)
# (especially important to avoid affecting np.masked singleton)
if getmask(output) is not nomask:
output._mask = output._mask.view()
# Make sure to reset the _fill_value if needed
if getattr(output, '_fill_value', None) is not None:
if fill_value is None:
if dtype is None:
pass # leave _fill_value as is
else:
output._fill_value = None
else:
output.fill_value = fill_value
return output
def __getitem__(self, indx):
"""
x.__getitem__(y) <==> x[y]
Return the item described by i, as a masked array.
"""
# We could directly use ndarray.__getitem__ on self.
# But then we would have to modify __array_finalize__ to prevent the
# mask of being reshaped if it hasn't been set up properly yet
# So it's easier to stick to the current version
dout = self.data[indx]
_mask = self._mask
def _is_scalar(m):
return not isinstance(m, np.ndarray)
def _scalar_heuristic(arr, elem):
"""
Return whether `elem` is a scalar result of indexing `arr`, or None
if undecidable without promoting nomask to a full mask
"""
# obviously a scalar
if not isinstance(elem, np.ndarray):
return True
# object array scalar indexing can return anything
elif arr.dtype.type is np.object_:
if arr.dtype is not elem.dtype:
# elem is an array, but dtypes do not match, so must be
# an element
return True
# well-behaved subclass that only returns 0d arrays when
# expected - this is not a scalar
elif type(arr).__getitem__ == ndarray.__getitem__:
return False
return None
if _mask is not nomask:
# _mask cannot be a subclass, so it tells us whether we should
# expect a scalar. It also cannot be of dtype object.
mout = _mask[indx]
scalar_expected = _is_scalar(mout)
else:
# attempt to apply the heuristic to avoid constructing a full mask
mout = nomask
scalar_expected = _scalar_heuristic(self.data, dout)
if scalar_expected is None:
# heuristics have failed
# construct a full array, so we can be certain. This is costly.
# we could also fall back on ndarray.__getitem__(self.data, indx)
scalar_expected = _is_scalar(getmaskarray(self)[indx])
# Did we extract a single item?
if scalar_expected:
# A record
if isinstance(dout, np.void):
# We should always re-cast to mvoid, otherwise users can
# change masks on rows that already have masked values, but not
# on rows that have no masked values, which is inconsistent.
return mvoid(dout, mask=mout, hardmask=self._hardmask)
# special case introduced in gh-5962
elif (self.dtype.type is np.object_ and
isinstance(dout, np.ndarray) and
dout is not masked):
# If masked, turn into a MaskedArray, with everything masked.
if mout:
return MaskedArray(dout, mask=True)
else:
return dout
# Just a scalar
else:
if mout:
return masked
else:
return dout
else:
# Force dout to MA
dout = dout.view(type(self))
# Inherit attributes from self
dout._update_from(self)
# Check the fill_value
if is_string_or_list_of_strings(indx):
if self._fill_value is not None:
dout._fill_value = self._fill_value[indx]
# Something like gh-15895 has happened if this check fails.
# _fill_value should always be an ndarray.
if not isinstance(dout._fill_value, np.ndarray):
raise RuntimeError('Internal NumPy error.')
# If we're indexing a multidimensional field in a
# structured array (such as dtype("(2,)i2,(2,)i1")),
# dimensionality goes up (M[field].ndim == M.ndim +
# M.dtype[field].ndim). That's fine for
# M[field] but problematic for M[field].fill_value
# which should have shape () to avoid breaking several
# methods. There is no great way out, so set to
# first element. See issue #6723.
if dout._fill_value.ndim > 0:
if not (dout._fill_value ==
dout._fill_value.flat[0]).all():
warnings.warn(
"Upon accessing multidimensional field "
f"{indx!s}, need to keep dimensionality "
"of fill_value at 0. Discarding "
"heterogeneous fill_value and setting "
f"all to {dout._fill_value[0]!s}.",
stacklevel=2)
# Need to use `.flat[0:1].squeeze(...)` instead of just
# `.flat[0]` to ensure the result is a 0d array and not
# a scalar.
dout._fill_value = dout._fill_value.flat[0:1].squeeze(axis=0)
dout._isfield = True
# Update the mask if needed
if mout is not nomask:
# set shape to match that of data; this is needed for matrices
dout._mask = reshape(mout, dout.shape)
dout._sharedmask = True
# Note: Don't try to check for m.any(), that'll take too long
return dout
def __setitem__(self, indx, value):
"""
x.__setitem__(i, y) <==> x[i]=y
Set item described by index. If value is masked, masks those
locations.
"""
if self is masked:
raise MaskError('Cannot alter the masked element.')
_data = self._data
_mask = self._mask
if isinstance(indx, str):
_data[indx] = value
if _mask is nomask:
self._mask = _mask = make_mask_none(self.shape, self.dtype)
_mask[indx] = getmask(value)
return
_dtype = _data.dtype
if value is masked:
# The mask wasn't set: create a full version.
if _mask is nomask:
_mask = self._mask = make_mask_none(self.shape, _dtype)
# Now, set the mask to its value.
if _dtype.names is not None:
_mask[indx] = tuple([True] * len(_dtype.names))
else:
_mask[indx] = True
return
# Get the _data part of the new value
dval = getattr(value, '_data', value)
# Get the _mask part of the new value
mval = getmask(value)
if _dtype.names is not None and mval is nomask:
mval = tuple([False] * len(_dtype.names))
if _mask is nomask:
# Set the data, then the mask
_data[indx] = dval
if mval is not nomask:
_mask = self._mask = make_mask_none(self.shape, _dtype)
_mask[indx] = mval
elif not self._hardmask:
# Set the data, then the mask
if (isinstance(indx, masked_array) and
not isinstance(value, masked_array)):
_data[indx.data] = dval
else:
_data[indx] = dval
_mask[indx] = mval
elif hasattr(indx, 'dtype') and (indx.dtype == MaskType):
indx = indx * umath.logical_not(_mask)
_data[indx] = dval
else:
if _dtype.names is not None:
err_msg = "Flexible 'hard' masks are not yet supported."
raise NotImplementedError(err_msg)
mindx = mask_or(_mask[indx], mval, copy=True)
dindx = self._data[indx]
if dindx.size > 1:
np.copyto(dindx, dval, where=~mindx)
elif mindx is nomask:
dindx = dval
_data[indx] = dindx
_mask[indx] = mindx
return
# Define so that we can overwrite the setter.
@property
def dtype(self):
return super().dtype
@dtype.setter
def dtype(self, dtype):
super(MaskedArray, type(self)).dtype.__set__(self, dtype)
if self._mask is not nomask:
self._mask = self._mask.view(make_mask_descr(dtype), ndarray)
# Try to reset the shape of the mask (if we don't have a void).
# This raises a ValueError if the dtype change won't work.
try:
self._mask.shape = self.shape
except (AttributeError, TypeError):
pass
@property
def shape(self):
return super().shape
@shape.setter
def shape(self, shape):
super(MaskedArray, type(self)).shape.__set__(self, shape)
# Cannot use self._mask, since it may not (yet) exist when a
# masked matrix sets the shape.
if getmask(self) is not nomask:
self._mask.shape = self.shape
def __setmask__(self, mask, copy=False):
"""
Set the mask.
"""
idtype = self.dtype
current_mask = self._mask
if mask is masked:
mask = True
if current_mask is nomask:
# Make sure the mask is set
# Just don't do anything if there's nothing to do.
if mask is nomask:
return
current_mask = self._mask = make_mask_none(self.shape, idtype)
if idtype.names is None:
# No named fields.
# Hardmask: don't unmask the data
if self._hardmask:
current_mask |= mask
# Softmask: set everything to False
# If it's obviously a compatible scalar, use a quick update
# method.
elif isinstance(mask, (int, float, np.bool_, np.number)):
current_mask[...] = mask
# Otherwise fall back to the slower, general purpose way.
else:
current_mask.flat = mask
else:
# Named fields w/
mdtype = current_mask.dtype
mask = np.array(mask, copy=False)
# Mask is a singleton
if not mask.ndim:
# It's a boolean : make a record
if mask.dtype.kind == 'b':
mask = np.array(tuple([mask.item()] * len(mdtype)),
dtype=mdtype)
# It's a record: make sure the dtype is correct
else:
mask = mask.astype(mdtype)
# Mask is a sequence
else:
# Make sure the new mask is a ndarray with the proper dtype
try:
mask = np.array(mask, copy=copy, dtype=mdtype)
# Or assume it's a sequence of bool/int
except TypeError:
mask = np.array([tuple([m] * len(mdtype)) for m in mask],
dtype=mdtype)
# Hardmask: don't unmask the data
if self._hardmask:
for n in idtype.names:
current_mask[n] |= mask[n]
# Softmask: set everything to False
# If it's obviously a compatible scalar, use a quick update
# method.
elif isinstance(mask, (int, float, np.bool_, np.number)):
current_mask[...] = mask
# Otherwise fall back to the slower, general purpose way.
else:
current_mask.flat = mask
# Reshape if needed
if current_mask.shape:
current_mask.shape = self.shape
return
_set_mask = __setmask__
@property
def mask(self):
""" Current mask. """
# We could try to force a reshape, but that wouldn't work in some
# cases.
# Return a view so that the dtype and shape cannot be changed in place
# This still preserves nomask by identity
return self._mask.view()
@mask.setter
def mask(self, value):
self.__setmask__(value)
@property
def recordmask(self):
"""
Get or set the mask of the array if it has no named fields. For
structured arrays, returns a ndarray of booleans where entries are
``True`` if **all** the fields are masked, ``False`` otherwise:
>>> x = np.ma.array([(1, 1), (2, 2), (3, 3), (4, 4), (5, 5)],
... mask=[(0, 0), (1, 0), (1, 1), (0, 1), (0, 0)],
... dtype=[('a', int), ('b', int)])
>>> x.recordmask
array([False, False, True, False, False])
"""
_mask = self._mask.view(ndarray)
if _mask.dtype.names is None:
return _mask
return np.all(flatten_structured_array(_mask), axis=-1)
@recordmask.setter
def recordmask(self, mask):
raise NotImplementedError("Coming soon: setting the mask per records!")
def harden_mask(self):
"""
Force the mask to hard, preventing unmasking by assignment.
Whether the mask of a masked array is hard or soft is determined by
its `~ma.MaskedArray.hardmask` property. `harden_mask` sets
`~ma.MaskedArray.hardmask` to ``True`` (and returns the modified
self).
See Also
--------
ma.MaskedArray.hardmask
ma.MaskedArray.soften_mask
"""
self._hardmask = True
return self
def soften_mask(self):
"""
Force the mask to soft (default), allowing unmasking by assignment.
Whether the mask of a masked array is hard or soft is determined by
its `~ma.MaskedArray.hardmask` property. `soften_mask` sets
`~ma.MaskedArray.hardmask` to ``False`` (and returns the modified
self).
See Also
--------
ma.MaskedArray.hardmask
ma.MaskedArray.harden_mask
"""
self._hardmask = False
return self
@property
def hardmask(self):
"""
Specifies whether values can be unmasked through assignments.
By default, assigning definite values to masked array entries will
unmask them. When `hardmask` is ``True``, the mask will not change
through assignments.
See Also
--------
ma.MaskedArray.harden_mask
ma.MaskedArray.soften_mask
Examples
--------
>>> x = np.arange(10)
>>> m = np.ma.masked_array(x, x>5)
>>> assert not m.hardmask
Since `m` has a soft mask, assigning an element value unmasks that
element:
>>> m[8] = 42
>>> m
masked_array(data=[0, 1, 2, 3, 4, 5, --, --, 42, --],
mask=[False, False, False, False, False, False,
True, True, False, True],
fill_value=999999)
After hardening, the mask is not affected by assignments:
>>> hardened = np.ma.harden_mask(m)
>>> assert m.hardmask and hardened is m
>>> m[:] = 23
>>> m
masked_array(data=[23, 23, 23, 23, 23, 23, --, --, 23, --],
mask=[False, False, False, False, False, False,
True, True, False, True],
fill_value=999999)
"""
return self._hardmask
def unshare_mask(self):
"""
Copy the mask and set the `sharedmask` flag to ``False``.
Whether the mask is shared between masked arrays can be seen from
the `sharedmask` property. `unshare_mask` ensures the mask is not
shared. A copy of the mask is only made if it was shared.
See Also
--------
sharedmask
"""
if self._sharedmask:
self._mask = self._mask.copy()
self._sharedmask = False
return self
@property
def sharedmask(self):
""" Share status of the mask (read-only). """
return self._sharedmask
def shrink_mask(self):
"""
Reduce a mask to nomask when possible.
Parameters
----------
None
Returns
-------
None
Examples
--------
>>> x = np.ma.array([[1,2 ], [3, 4]], mask=[0]*4)
>>> x.mask
array([[False, False],
[False, False]])
>>> x.shrink_mask()
masked_array(
data=[[1, 2],
[3, 4]],
mask=False,
fill_value=999999)
>>> x.mask
False
"""
self._mask = _shrink_mask(self._mask)
return self
@property
def baseclass(self):
""" Class of the underlying data (read-only). """
return self._baseclass
def _get_data(self):
"""
Returns the underlying data, as a view of the masked array.
If the underlying data is a subclass of :class:`numpy.ndarray`, it is
returned as such.
>>> x = np.ma.array(np.matrix([[1, 2], [3, 4]]), mask=[[0, 1], [1, 0]])
>>> x.data
matrix([[1, 2],
[3, 4]])
The type of the data can be accessed through the :attr:`baseclass`
attribute.
"""
return ndarray.view(self, self._baseclass)
_data = property(fget=_get_data)
data = property(fget=_get_data)
@property
def flat(self):
""" Return a flat iterator, or set a flattened version of self to value. """
return MaskedIterator(self)
@flat.setter
def flat(self, value):
y = self.ravel()
y[:] = value
@property
def fill_value(self):
"""
The filling value of the masked array is a scalar. When setting, None
will set to a default based on the data type.
Examples
--------
>>> for dt in [np.int32, np.int64, np.float64, np.complex128]:
... np.ma.array([0, 1], dtype=dt).get_fill_value()
...
999999
999999
1e+20
(1e+20+0j)
>>> x = np.ma.array([0, 1.], fill_value=-np.inf)
>>> x.fill_value
-inf
>>> x.fill_value = np.pi
>>> x.fill_value
3.1415926535897931 # may vary
Reset to default:
>>> x.fill_value = None
>>> x.fill_value
1e+20
"""
if self._fill_value is None:
self._fill_value = _check_fill_value(None, self.dtype)
# Temporary workaround to account for the fact that str and bytes
# scalars cannot be indexed with (), whereas all other numpy
# scalars can. See issues #7259 and #7267.
# The if-block can be removed after #7267 has been fixed.
if isinstance(self._fill_value, ndarray):
return self._fill_value[()]
return self._fill_value
@fill_value.setter
def fill_value(self, value=None):
target = _check_fill_value(value, self.dtype)
if not target.ndim == 0:
# 2019-11-12, 1.18.0
warnings.warn(
"Non-scalar arrays for the fill value are deprecated. Use "
"arrays with scalar values instead. The filled function "
"still supports any array as `fill_value`.",
DeprecationWarning, stacklevel=2)
_fill_value = self._fill_value
if _fill_value is None:
# Create the attribute if it was undefined
self._fill_value = target
else:
# Don't overwrite the attribute, just fill it (for propagation)
_fill_value[()] = target
# kept for compatibility
get_fill_value = fill_value.fget
set_fill_value = fill_value.fset
def filled(self, fill_value=None):
"""
Return a copy of self, with masked values filled with a given value.
**However**, if there are no masked values to fill, self will be
returned instead as an ndarray.
Parameters
----------
fill_value : array_like, optional
The value to use for invalid entries. Can be scalar or non-scalar.
If non-scalar, the resulting ndarray must be broadcastable over
input array. Default is None, in which case, the `fill_value`
attribute of the array is used instead.
Returns
-------
filled_array : ndarray
A copy of ``self`` with invalid entries replaced by *fill_value*
(be it the function argument or the attribute of ``self``), or
``self`` itself as an ndarray if there are no invalid entries to
be replaced.
Notes
-----
The result is **not** a MaskedArray!
Examples
--------
>>> x = np.ma.array([1,2,3,4,5], mask=[0,0,1,0,1], fill_value=-999)
>>> x.filled()
array([ 1, 2, -999, 4, -999])
>>> x.filled(fill_value=1000)
array([ 1, 2, 1000, 4, 1000])
>>> type(x.filled())
<class 'numpy.ndarray'>
Subclassing is preserved. This means that if, e.g., the data part of
the masked array is a recarray, `filled` returns a recarray:
>>> x = np.array([(-1, 2), (-3, 4)], dtype='i8,i8').view(np.recarray)
>>> m = np.ma.array(x, mask=[(True, False), (False, True)])
>>> m.filled()
rec.array([(999999, 2), ( -3, 999999)],
dtype=[('f0', '<i8'), ('f1', '<i8')])
"""
m = self._mask
if m is nomask:
return self._data
if fill_value is None:
fill_value = self.fill_value
else:
fill_value = _check_fill_value(fill_value, self.dtype)
if self is masked_singleton:
return np.asanyarray(fill_value)
if m.dtype.names is not None:
result = self._data.copy('K')
_recursive_filled(result, self._mask, fill_value)
elif not m.any():
return self._data
else:
result = self._data.copy('K')
try:
np.copyto(result, fill_value, where=m)
except (TypeError, AttributeError):
fill_value = narray(fill_value, dtype=object)
d = result.astype(object)
result = np.choose(m, (d, fill_value))
except IndexError:
# ok, if scalar
if self._data.shape:
raise
elif m:
result = np.array(fill_value, dtype=self.dtype)
else:
result = self._data
return result
def compressed(self):
"""
Return all the non-masked data as a 1-D array.
Returns
-------
data : ndarray
A new `ndarray` holding the non-masked data is returned.
Notes
-----
The result is **not** a MaskedArray!
Examples
--------
>>> x = np.ma.array(np.arange(5), mask=[0]*2 + [1]*3)
>>> x.compressed()
array([0, 1])
>>> type(x.compressed())
<class 'numpy.ndarray'>
"""
data = ndarray.ravel(self._data)
if self._mask is not nomask:
data = data.compress(np.logical_not(ndarray.ravel(self._mask)))
return data
def compress(self, condition, axis=None, out=None):
"""
Return `a` where condition is ``True``.
If condition is a `~ma.MaskedArray`, missing values are considered
as ``False``.
Parameters
----------
condition : var
Boolean 1-d array selecting which entries to return. If len(condition)
is less than the size of a along the axis, then output is truncated
to length of condition array.
axis : {None, int}, optional
Axis along which the operation must be performed.
out : {None, ndarray}, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output but the type will be cast if
necessary.
Returns
-------
result : MaskedArray
A :class:`~ma.MaskedArray` object.
Notes
-----
Please note the difference with :meth:`compressed` !
The output of :meth:`compress` has a mask, the output of
:meth:`compressed` does not.
Examples
--------
>>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4)
>>> x
masked_array(
data=[[1, --, 3],
[--, 5, --],
[7, --, 9]],
mask=[[False, True, False],
[ True, False, True],
[False, True, False]],
fill_value=999999)
>>> x.compress([1, 0, 1])
masked_array(data=[1, 3],
mask=[False, False],
fill_value=999999)
>>> x.compress([1, 0, 1], axis=1)
masked_array(
data=[[1, 3],
[--, --],
[7, 9]],
mask=[[False, False],
[ True, True],
[False, False]],
fill_value=999999)
"""
# Get the basic components
(_data, _mask) = (self._data, self._mask)
# Force the condition to a regular ndarray and forget the missing
# values.
condition = np.asarray(condition)
_new = _data.compress(condition, axis=axis, out=out).view(type(self))
_new._update_from(self)
if _mask is not nomask:
_new._mask = _mask.compress(condition, axis=axis)
return _new
def _insert_masked_print(self):
"""
Replace masked values with masked_print_option, casting all innermost
dtypes to object.
"""
if masked_print_option.enabled():
mask = self._mask
if mask is nomask:
res = self._data
else:
# convert to object array to make filled work
data = self._data
# For big arrays, to avoid a costly conversion to the
# object dtype, extract the corners before the conversion.
print_width = (self._print_width if self.ndim > 1
else self._print_width_1d)
for axis in range(self.ndim):
if data.shape[axis] > print_width:
ind = print_width // 2
arr = np.split(data, (ind, -ind), axis=axis)
data = np.concatenate((arr[0], arr[2]), axis=axis)
arr = np.split(mask, (ind, -ind), axis=axis)
mask = np.concatenate((arr[0], arr[2]), axis=axis)
rdtype = _replace_dtype_fields(self.dtype, "O")
res = data.astype(rdtype)
_recursive_printoption(res, mask, masked_print_option)
else:
res = self.filled(self.fill_value)
return res
def __str__(self):
return str(self._insert_masked_print())
def __repr__(self):
"""
Literal string representation.
"""
if self._baseclass is np.ndarray:
name = 'array'
else:
name = self._baseclass.__name__
# 2016-11-19: Demoted to legacy format
if np.core.arrayprint._get_legacy_print_mode() <= 113:
is_long = self.ndim > 1
parameters = dict(
name=name,
nlen=" " * len(name),
data=str(self),
mask=str(self._mask),
fill=str(self.fill_value),
dtype=str(self.dtype)
)
is_structured = bool(self.dtype.names)
key = '{}_{}'.format(
'long' if is_long else 'short',
'flx' if is_structured else 'std'
)
return _legacy_print_templates[key] % parameters
prefix = f"masked_{name}("
dtype_needed = (
not np.core.arrayprint.dtype_is_implied(self.dtype) or
np.all(self.mask) or
self.size == 0
)
# determine which keyword args need to be shown
keys = ['data', 'mask', 'fill_value']
if dtype_needed:
keys.append('dtype')
# array has only one row (non-column)
is_one_row = builtins.all(dim == 1 for dim in self.shape[:-1])
# choose what to indent each keyword with
min_indent = 2
if is_one_row:
# first key on the same line as the type, remaining keys
# aligned by equals
indents = {}
indents[keys[0]] = prefix
for k in keys[1:]:
n = builtins.max(min_indent, len(prefix + keys[0]) - len(k))
indents[k] = ' ' * n
prefix = '' # absorbed into the first indent
else:
# each key on its own line, indented by two spaces
indents = {k: ' ' * min_indent for k in keys}
prefix = prefix + '\n' # first key on the next line
# format the field values
reprs = {}
reprs['data'] = np.array2string(
self._insert_masked_print(),
separator=", ",
prefix=indents['data'] + 'data=',
suffix=',')
reprs['mask'] = np.array2string(
self._mask,
separator=", ",
prefix=indents['mask'] + 'mask=',
suffix=',')
reprs['fill_value'] = repr(self.fill_value)
if dtype_needed:
reprs['dtype'] = np.core.arrayprint.dtype_short_repr(self.dtype)
# join keys with values and indentations
result = ',\n'.join(
'{}{}={}'.format(indents[k], k, reprs[k])
for k in keys
)
return prefix + result + ')'
def _delegate_binop(self, other):
# This emulates the logic in
# private/binop_override.h:forward_binop_should_defer
if isinstance(other, type(self)):
return False
array_ufunc = getattr(other, "__array_ufunc__", False)
if array_ufunc is False:
other_priority = getattr(other, "__array_priority__", -1000000)
return self.__array_priority__ < other_priority
else:
# If array_ufunc is not None, it will be called inside the ufunc;
# None explicitly tells us to not call the ufunc, i.e., defer.
return array_ufunc is None
def _comparison(self, other, compare):
"""Compare self with other using operator.eq or operator.ne.
When either of the elements is masked, the result is masked as well,
but the underlying boolean data are still set, with self and other
considered equal if both are masked, and unequal otherwise.
For structured arrays, all fields are combined, with masked values
ignored. The result is masked if all fields were masked, with self
and other considered equal only if both were fully masked.
"""
omask = getmask(other)
smask = self.mask
mask = mask_or(smask, omask, copy=True)
odata = getdata(other)
if mask.dtype.names is not None:
# For possibly masked structured arrays we need to be careful,
# since the standard structured array comparison will use all
# fields, masked or not. To avoid masked fields influencing the
# outcome, we set all masked fields in self to other, so they'll
# count as equal. To prepare, we ensure we have the right shape.
broadcast_shape = np.broadcast(self, odata).shape
sbroadcast = np.broadcast_to(self, broadcast_shape, subok=True)
sbroadcast._mask = mask
sdata = sbroadcast.filled(odata)
# Now take care of the mask; the merged mask should have an item
# masked if all fields were masked (in one and/or other).
mask = (mask == np.ones((), mask.dtype))
else:
# For regular arrays, just use the data as they come.
sdata = self.data
check = compare(sdata, odata)
if isinstance(check, (np.bool_, bool)):
return masked if mask else check
if mask is not nomask:
# Adjust elements that were masked, which should be treated
# as equal if masked in both, unequal if masked in one.
# Note that this works automatically for structured arrays too.
check = np.where(mask, compare(smask, omask), check)
if mask.shape != check.shape:
# Guarantee consistency of the shape, making a copy since the
# the mask may need to get written to later.
mask = np.broadcast_to(mask, check.shape).copy()
check = check.view(type(self))
check._update_from(self)
check._mask = mask
# Cast fill value to bool_ if needed. If it cannot be cast, the
# default boolean fill value is used.
if check._fill_value is not None:
try:
fill = _check_fill_value(check._fill_value, np.bool_)
except (TypeError, ValueError):
fill = _check_fill_value(None, np.bool_)
check._fill_value = fill
return check
def __eq__(self, other):
"""Check whether other equals self elementwise.
When either of the elements is masked, the result is masked as well,
but the underlying boolean data are still set, with self and other
considered equal if both are masked, and unequal otherwise.
For structured arrays, all fields are combined, with masked values
ignored. The result is masked if all fields were masked, with self
and other considered equal only if both were fully masked.
"""
return self._comparison(other, operator.eq)
def __ne__(self, other):
"""Check whether other does not equal self elementwise.
When either of the elements is masked, the result is masked as well,
but the underlying boolean data are still set, with self and other
considered equal if both are masked, and unequal otherwise.
For structured arrays, all fields are combined, with masked values
ignored. The result is masked if all fields were masked, with self
and other considered equal only if both were fully masked.
"""
return self._comparison(other, operator.ne)
def __add__(self, other):
"""
Add self to other, and return a new masked array.
"""
if self._delegate_binop(other):
return NotImplemented
return add(self, other)
def __radd__(self, other):
"""
Add other to self, and return a new masked array.
"""
# In analogy with __rsub__ and __rdiv__, use original order:
# we get here from `other + self`.
return add(other, self)
def __sub__(self, other):
"""
Subtract other from self, and return a new masked array.
"""
if self._delegate_binop(other):
return NotImplemented
return subtract(self, other)
def __rsub__(self, other):
"""
Subtract self from other, and return a new masked array.
"""
return subtract(other, self)
def __mul__(self, other):
"Multiply self by other, and return a new masked array."
if self._delegate_binop(other):
return NotImplemented
return multiply(self, other)
def __rmul__(self, other):
"""
Multiply other by self, and return a new masked array.
"""
# In analogy with __rsub__ and __rdiv__, use original order:
# we get here from `other * self`.
return multiply(other, self)
def __div__(self, other):
"""
Divide other into self, and return a new masked array.
"""
if self._delegate_binop(other):
return NotImplemented
return divide(self, other)
def __truediv__(self, other):
"""
Divide other into self, and return a new masked array.
"""
if self._delegate_binop(other):
return NotImplemented
return true_divide(self, other)
def __rtruediv__(self, other):
"""
Divide self into other, and return a new masked array.
"""
return true_divide(other, self)
def __floordiv__(self, other):
"""
Divide other into self, and return a new masked array.
"""
if self._delegate_binop(other):
return NotImplemented
return floor_divide(self, other)
def __rfloordiv__(self, other):
"""
Divide self into other, and return a new masked array.
"""
return floor_divide(other, self)
def __pow__(self, other):
"""
Raise self to the power other, masking the potential NaNs/Infs
"""
if self._delegate_binop(other):
return NotImplemented
return power(self, other)
def __rpow__(self, other):
"""
Raise other to the power self, masking the potential NaNs/Infs
"""
return power(other, self)
def __iadd__(self, other):
"""
Add other to self in-place.
"""
m = getmask(other)
if self._mask is nomask:
if m is not nomask and m.any():
self._mask = make_mask_none(self.shape, self.dtype)
self._mask += m
else:
if m is not nomask:
self._mask += m
self._data.__iadd__(np.where(self._mask, self.dtype.type(0),
getdata(other)))
return self
def __isub__(self, other):
"""
Subtract other from self in-place.
"""
m = getmask(other)
if self._mask is nomask:
if m is not nomask and m.any():
self._mask = make_mask_none(self.shape, self.dtype)
self._mask += m
elif m is not nomask:
self._mask += m
self._data.__isub__(np.where(self._mask, self.dtype.type(0),
getdata(other)))
return self
def __imul__(self, other):
"""
Multiply self by other in-place.
"""
m = getmask(other)
if self._mask is nomask:
if m is not nomask and m.any():
self._mask = make_mask_none(self.shape, self.dtype)
self._mask += m
elif m is not nomask:
self._mask += m
self._data.__imul__(np.where(self._mask, self.dtype.type(1),
getdata(other)))
return self
def __idiv__(self, other):
"""
Divide self by other in-place.
"""
other_data = getdata(other)
dom_mask = _DomainSafeDivide().__call__(self._data, other_data)
other_mask = getmask(other)
new_mask = mask_or(other_mask, dom_mask)
# The following 3 lines control the domain filling
if dom_mask.any():
(_, fval) = ufunc_fills[np.divide]
other_data = np.where(dom_mask, fval, other_data)
self._mask |= new_mask
self._data.__idiv__(np.where(self._mask, self.dtype.type(1),
other_data))
return self
def __ifloordiv__(self, other):
"""
Floor divide self by other in-place.
"""
other_data = getdata(other)
dom_mask = _DomainSafeDivide().__call__(self._data, other_data)
other_mask = getmask(other)
new_mask = mask_or(other_mask, dom_mask)
# The following 3 lines control the domain filling
if dom_mask.any():
(_, fval) = ufunc_fills[np.floor_divide]
other_data = np.where(dom_mask, fval, other_data)
self._mask |= new_mask
self._data.__ifloordiv__(np.where(self._mask, self.dtype.type(1),
other_data))
return self
def __itruediv__(self, other):
"""
True divide self by other in-place.
"""
other_data = getdata(other)
dom_mask = _DomainSafeDivide().__call__(self._data, other_data)
other_mask = getmask(other)
new_mask = mask_or(other_mask, dom_mask)
# The following 3 lines control the domain filling
if dom_mask.any():
(_, fval) = ufunc_fills[np.true_divide]
other_data = np.where(dom_mask, fval, other_data)
self._mask |= new_mask
self._data.__itruediv__(np.where(self._mask, self.dtype.type(1),
other_data))
return self
def __ipow__(self, other):
"""
Raise self to the power other, in place.
"""
other_data = getdata(other)
other_mask = getmask(other)
with np.errstate(divide='ignore', invalid='ignore'):
self._data.__ipow__(np.where(self._mask, self.dtype.type(1),
other_data))
invalid = np.logical_not(np.isfinite(self._data))
if invalid.any():
if self._mask is not nomask:
self._mask |= invalid
else:
self._mask = invalid
np.copyto(self._data, self.fill_value, where=invalid)
new_mask = mask_or(other_mask, invalid)
self._mask = mask_or(self._mask, new_mask)
return self
def __float__(self):
"""
Convert to float.
"""
if self.size > 1:
raise TypeError("Only length-1 arrays can be converted "
"to Python scalars")
elif self._mask:
warnings.warn("Warning: converting a masked element to nan.", stacklevel=2)
return np.nan
return float(self.item())
def __int__(self):
"""
Convert to int.
"""
if self.size > 1:
raise TypeError("Only length-1 arrays can be converted "
"to Python scalars")
elif self._mask:
raise MaskError('Cannot convert masked element to a Python int.')
return int(self.item())
@property
def imag(self):
"""
The imaginary part of the masked array.
This property is a view on the imaginary part of this `MaskedArray`.
See Also
--------
real
Examples
--------
>>> x = np.ma.array([1+1.j, -2j, 3.45+1.6j], mask=[False, True, False])
>>> x.imag
masked_array(data=[1.0, --, 1.6],
mask=[False, True, False],
fill_value=1e+20)
"""
result = self._data.imag.view(type(self))
result.__setmask__(self._mask)
return result
# kept for compatibility
get_imag = imag.fget
@property
def real(self):
"""
The real part of the masked array.
This property is a view on the real part of this `MaskedArray`.
See Also
--------
imag
Examples
--------
>>> x = np.ma.array([1+1.j, -2j, 3.45+1.6j], mask=[False, True, False])
>>> x.real
masked_array(data=[1.0, --, 3.45],
mask=[False, True, False],
fill_value=1e+20)
"""
result = self._data.real.view(type(self))
result.__setmask__(self._mask)
return result
# kept for compatibility
get_real = real.fget
def count(self, axis=None, keepdims=np._NoValue):
"""
Count the non-masked elements of the array along the given axis.
Parameters
----------
axis : None or int or tuple of ints, optional
Axis or axes along which the count is performed.
The default, None, performs the count over all
the dimensions of the input array. `axis` may be negative, in
which case it counts from the last to the first axis.
.. versionadded:: 1.10.0
If this is a tuple of ints, the count is performed on multiple
axes, instead of a single axis or all the axes as before.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the array.
Returns
-------
result : ndarray or scalar
An array with the same shape as the input array, with the specified
axis removed. If the array is a 0-d array, or if `axis` is None, a
scalar is returned.
See Also
--------
ma.count_masked : Count masked elements in array or along a given axis.
Examples
--------
>>> import numpy.ma as ma
>>> a = ma.arange(6).reshape((2, 3))
>>> a[1, :] = ma.masked
>>> a
masked_array(
data=[[0, 1, 2],
[--, --, --]],
mask=[[False, False, False],
[ True, True, True]],
fill_value=999999)
>>> a.count()
3
When the `axis` keyword is specified an array of appropriate size is
returned.
>>> a.count(axis=0)
array([1, 1, 1])
>>> a.count(axis=1)
array([3, 0])
"""
kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims}
m = self._mask
# special case for matrices (we assume no other subclasses modify
# their dimensions)
if isinstance(self.data, np.matrix):
if m is nomask:
m = np.zeros(self.shape, dtype=np.bool_)
m = m.view(type(self.data))
if m is nomask:
# compare to _count_reduce_items in _methods.py
if self.shape == ():
if axis not in (None, 0):
raise np.AxisError(axis=axis, ndim=self.ndim)
return 1
elif axis is None:
if kwargs.get('keepdims', False):
return np.array(self.size, dtype=np.intp, ndmin=self.ndim)
return self.size
axes = normalize_axis_tuple(axis, self.ndim)
items = 1
for ax in axes:
items *= self.shape[ax]
if kwargs.get('keepdims', False):
out_dims = list(self.shape)
for a in axes:
out_dims[a] = 1
else:
out_dims = [d for n, d in enumerate(self.shape)
if n not in axes]
# make sure to return a 0-d array if axis is supplied
return np.full(out_dims, items, dtype=np.intp)
# take care of the masked singleton
if self is masked:
return 0
return (~m).sum(axis=axis, dtype=np.intp, **kwargs)
def ravel(self, order='C'):
"""
Returns a 1D version of self, as a view.
Parameters
----------
order : {'C', 'F', 'A', 'K'}, optional
The elements of `a` are read using this index order. 'C' means to
index the elements in C-like order, with the last axis index
changing fastest, back to the first axis index changing slowest.
'F' means to index the elements in Fortran-like index order, with
the first index changing fastest, and the last index changing
slowest. Note that the 'C' and 'F' options take no account of the
memory layout of the underlying array, and only refer to the order
of axis indexing. 'A' means to read the elements in Fortran-like
index order if `m` is Fortran *contiguous* in memory, C-like order
otherwise. 'K' means to read the elements in the order they occur
in memory, except for reversing the data when strides are negative.
By default, 'C' index order is used.
Returns
-------
MaskedArray
Output view is of shape ``(self.size,)`` (or
``(np.ma.product(self.shape),)``).
Examples
--------
>>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4)
>>> x
masked_array(
data=[[1, --, 3],
[--, 5, --],
[7, --, 9]],
mask=[[False, True, False],
[ True, False, True],
[False, True, False]],
fill_value=999999)
>>> x.ravel()
masked_array(data=[1, --, 3, --, 5, --, 7, --, 9],
mask=[False, True, False, True, False, True, False, True,
False],
fill_value=999999)
"""
r = ndarray.ravel(self._data, order=order).view(type(self))
r._update_from(self)
if self._mask is not nomask:
r._mask = ndarray.ravel(self._mask, order=order).reshape(r.shape)
else:
r._mask = nomask
return r
def reshape(self, *s, **kwargs):
"""
Give a new shape to the array without changing its data.
Returns a masked array containing the same data, but with a new shape.
The result is a view on the original array; if this is not possible, a
ValueError is raised.
Parameters
----------
shape : int or tuple of ints
The new shape should be compatible with the original shape. If an
integer is supplied, then the result will be a 1-D array of that
length.
order : {'C', 'F'}, optional
Determines whether the array data should be viewed as in C
(row-major) or FORTRAN (column-major) order.
Returns
-------
reshaped_array : array
A new view on the array.
See Also
--------
reshape : Equivalent function in the masked array module.
numpy.ndarray.reshape : Equivalent method on ndarray object.
numpy.reshape : Equivalent function in the NumPy module.
Notes
-----
The reshaping operation cannot guarantee that a copy will not be made,
to modify the shape in place, use ``a.shape = s``
Examples
--------
>>> x = np.ma.array([[1,2],[3,4]], mask=[1,0,0,1])
>>> x
masked_array(
data=[[--, 2],
[3, --]],
mask=[[ True, False],
[False, True]],
fill_value=999999)
>>> x = x.reshape((4,1))
>>> x
masked_array(
data=[[--],
[2],
[3],
[--]],
mask=[[ True],
[False],
[False],
[ True]],
fill_value=999999)
"""
kwargs.update(order=kwargs.get('order', 'C'))
result = self._data.reshape(*s, **kwargs).view(type(self))
result._update_from(self)
mask = self._mask
if mask is not nomask:
result._mask = mask.reshape(*s, **kwargs)
return result
def resize(self, newshape, refcheck=True, order=False):
"""
.. warning::
This method does nothing, except raise a ValueError exception. A
masked array does not own its data and therefore cannot safely be
resized in place. Use the `numpy.ma.resize` function instead.
This method is difficult to implement safely and may be deprecated in
future releases of NumPy.
"""
# Note : the 'order' keyword looks broken, let's just drop it
errmsg = "A masked array does not own its data "\
"and therefore cannot be resized.\n" \
"Use the numpy.ma.resize function instead."
raise ValueError(errmsg)
def put(self, indices, values, mode='raise'):
"""
Set storage-indexed locations to corresponding values.
Sets self._data.flat[n] = values[n] for each n in indices.
If `values` is shorter than `indices` then it will repeat.
If `values` has some masked values, the initial mask is updated
in consequence, else the corresponding values are unmasked.
Parameters
----------
indices : 1-D array_like
Target indices, interpreted as integers.
values : array_like
Values to place in self._data copy at target indices.
mode : {'raise', 'wrap', 'clip'}, optional
Specifies how out-of-bounds indices will behave.
'raise' : raise an error.
'wrap' : wrap around.
'clip' : clip to the range.
Notes
-----
`values` can be a scalar or length 1 array.
Examples
--------
>>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4)
>>> x
masked_array(
data=[[1, --, 3],
[--, 5, --],
[7, --, 9]],
mask=[[False, True, False],
[ True, False, True],
[False, True, False]],
fill_value=999999)
>>> x.put([0,4,8],[10,20,30])
>>> x
masked_array(
data=[[10, --, 3],
[--, 20, --],
[7, --, 30]],
mask=[[False, True, False],
[ True, False, True],
[False, True, False]],
fill_value=999999)
>>> x.put(4,999)
>>> x
masked_array(
data=[[10, --, 3],
[--, 999, --],
[7, --, 30]],
mask=[[False, True, False],
[ True, False, True],
[False, True, False]],
fill_value=999999)
"""
# Hard mask: Get rid of the values/indices that fall on masked data
if self._hardmask and self._mask is not nomask:
mask = self._mask[indices]
indices = narray(indices, copy=False)
values = narray(values, copy=False, subok=True)
values.resize(indices.shape)
indices = indices[~mask]
values = values[~mask]
self._data.put(indices, values, mode=mode)
# short circuit if neither self nor values are masked
if self._mask is nomask and getmask(values) is nomask:
return
m = getmaskarray(self)
if getmask(values) is nomask:
m.put(indices, False, mode=mode)
else:
m.put(indices, values._mask, mode=mode)
m = make_mask(m, copy=False, shrink=True)
self._mask = m
return
def ids(self):
"""
Return the addresses of the data and mask areas.
Parameters
----------
None
Examples
--------
>>> x = np.ma.array([1, 2, 3], mask=[0, 1, 1])
>>> x.ids()
(166670640, 166659832) # may vary
If the array has no mask, the address of `nomask` is returned. This address
is typically not close to the data in memory:
>>> x = np.ma.array([1, 2, 3])
>>> x.ids()
(166691080, 3083169284) # may vary
"""
if self._mask is nomask:
return (self.ctypes.data, id(nomask))
return (self.ctypes.data, self._mask.ctypes.data)
def iscontiguous(self):
"""
Return a boolean indicating whether the data is contiguous.
Parameters
----------
None
Examples
--------
>>> x = np.ma.array([1, 2, 3])
>>> x.iscontiguous()
True
`iscontiguous` returns one of the flags of the masked array:
>>> x.flags
C_CONTIGUOUS : True
F_CONTIGUOUS : True
OWNDATA : False
WRITEABLE : True
ALIGNED : True
WRITEBACKIFCOPY : False
"""
return self.flags['CONTIGUOUS']
def all(self, axis=None, out=None, keepdims=np._NoValue):
"""
Returns True if all elements evaluate to True.
The output array is masked where all the values along the given axis
are masked: if the output would have been a scalar and that all the
values are masked, then the output is `masked`.
Refer to `numpy.all` for full documentation.
See Also
--------
numpy.ndarray.all : corresponding function for ndarrays
numpy.all : equivalent function
Examples
--------
>>> np.ma.array([1,2,3]).all()
True
>>> a = np.ma.array([1,2,3], mask=True)
>>> (a.all() is np.ma.masked)
True
"""
kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims}
mask = _check_mask_axis(self._mask, axis, **kwargs)
if out is None:
d = self.filled(True).all(axis=axis, **kwargs).view(type(self))
if d.ndim:
d.__setmask__(mask)
elif mask:
return masked
return d
self.filled(True).all(axis=axis, out=out, **kwargs)
if isinstance(out, MaskedArray):
if out.ndim or mask:
out.__setmask__(mask)
return out
def any(self, axis=None, out=None, keepdims=np._NoValue):
"""
Returns True if any of the elements of `a` evaluate to True.
Masked values are considered as False during computation.
Refer to `numpy.any` for full documentation.
See Also
--------
numpy.ndarray.any : corresponding function for ndarrays
numpy.any : equivalent function
"""
kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims}
mask = _check_mask_axis(self._mask, axis, **kwargs)
if out is None:
d = self.filled(False).any(axis=axis, **kwargs).view(type(self))
if d.ndim:
d.__setmask__(mask)
elif mask:
d = masked
return d
self.filled(False).any(axis=axis, out=out, **kwargs)
if isinstance(out, MaskedArray):
if out.ndim or mask:
out.__setmask__(mask)
return out
def nonzero(self):
"""
Return the indices of unmasked elements that are not zero.
Returns a tuple of arrays, one for each dimension, containing the
indices of the non-zero elements in that dimension. The corresponding
non-zero values can be obtained with::
a[a.nonzero()]
To group the indices by element, rather than dimension, use
instead::
np.transpose(a.nonzero())
The result of this is always a 2d array, with a row for each non-zero
element.
Parameters
----------
None
Returns
-------
tuple_of_arrays : tuple
Indices of elements that are non-zero.
See Also
--------
numpy.nonzero :
Function operating on ndarrays.
flatnonzero :
Return indices that are non-zero in the flattened version of the input
array.
numpy.ndarray.nonzero :
Equivalent ndarray method.
count_nonzero :
Counts the number of non-zero elements in the input array.
Examples
--------
>>> import numpy.ma as ma
>>> x = ma.array(np.eye(3))
>>> x
masked_array(
data=[[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]],
mask=False,
fill_value=1e+20)
>>> x.nonzero()
(array([0, 1, 2]), array([0, 1, 2]))
Masked elements are ignored.
>>> x[1, 1] = ma.masked
>>> x
masked_array(
data=[[1.0, 0.0, 0.0],
[0.0, --, 0.0],
[0.0, 0.0, 1.0]],
mask=[[False, False, False],
[False, True, False],
[False, False, False]],
fill_value=1e+20)
>>> x.nonzero()
(array([0, 2]), array([0, 2]))
Indices can also be grouped by element.
>>> np.transpose(x.nonzero())
array([[0, 0],
[2, 2]])
A common use for ``nonzero`` is to find the indices of an array, where
a condition is True. Given an array `a`, the condition `a` > 3 is a
boolean array and since False is interpreted as 0, ma.nonzero(a > 3)
yields the indices of the `a` where the condition is true.
>>> a = ma.array([[1,2,3],[4,5,6],[7,8,9]])
>>> a > 3
masked_array(
data=[[False, False, False],
[ True, True, True],
[ True, True, True]],
mask=False,
fill_value=True)
>>> ma.nonzero(a > 3)
(array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2]))
The ``nonzero`` method of the condition array can also be called.
>>> (a > 3).nonzero()
(array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2]))
"""
return narray(self.filled(0), copy=False).nonzero()
def trace(self, offset=0, axis1=0, axis2=1, dtype=None, out=None):
"""
(this docstring should be overwritten)
"""
#!!!: implement out + test!
m = self._mask
if m is nomask:
result = super().trace(offset=offset, axis1=axis1, axis2=axis2,
out=out)
return result.astype(dtype)
else:
D = self.diagonal(offset=offset, axis1=axis1, axis2=axis2)
return D.astype(dtype).filled(0).sum(axis=-1, out=out)
trace.__doc__ = ndarray.trace.__doc__
def dot(self, b, out=None, strict=False):
"""
a.dot(b, out=None)
Masked dot product of two arrays. Note that `out` and `strict` are
located in different positions than in `ma.dot`. In order to
maintain compatibility with the functional version, it is
recommended that the optional arguments be treated as keyword only.
At some point that may be mandatory.
.. versionadded:: 1.10.0
Parameters
----------
b : masked_array_like
Inputs array.
out : masked_array, optional
Output argument. This must have the exact kind that would be
returned if it was not used. In particular, it must have the
right type, must be C-contiguous, and its dtype must be the
dtype that would be returned for `ma.dot(a,b)`. This is a
performance feature. Therefore, if these conditions are not
met, an exception is raised, instead of attempting to be
flexible.
strict : bool, optional
Whether masked data are propagated (True) or set to 0 (False)
for the computation. Default is False. Propagating the mask
means that if a masked value appears in a row or column, the
whole row or column is considered masked.
.. versionadded:: 1.10.2
See Also
--------
numpy.ma.dot : equivalent function
"""
return dot(self, b, out=out, strict=strict)
def sum(self, axis=None, dtype=None, out=None, keepdims=np._NoValue):
"""
Return the sum of the array elements over the given axis.
Masked elements are set to 0 internally.
Refer to `numpy.sum` for full documentation.
See Also
--------
numpy.ndarray.sum : corresponding function for ndarrays
numpy.sum : equivalent function
Examples
--------
>>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4)
>>> x
masked_array(
data=[[1, --, 3],
[--, 5, --],
[7, --, 9]],
mask=[[False, True, False],
[ True, False, True],
[False, True, False]],
fill_value=999999)
>>> x.sum()
25
>>> x.sum(axis=1)
masked_array(data=[4, 5, 16],
mask=[False, False, False],
fill_value=999999)
>>> x.sum(axis=0)
masked_array(data=[8, 5, 12],
mask=[False, False, False],
fill_value=999999)
>>> print(type(x.sum(axis=0, dtype=np.int64)[0]))
<class 'numpy.int64'>
"""
kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims}
_mask = self._mask
newmask = _check_mask_axis(_mask, axis, **kwargs)
# No explicit output
if out is None:
result = self.filled(0).sum(axis, dtype=dtype, **kwargs)
rndim = getattr(result, 'ndim', 0)
if rndim:
result = result.view(type(self))
result.__setmask__(newmask)
elif newmask:
result = masked
return result
# Explicit output
result = self.filled(0).sum(axis, dtype=dtype, out=out, **kwargs)
if isinstance(out, MaskedArray):
outmask = getmask(out)
if outmask is nomask:
outmask = out._mask = make_mask_none(out.shape)
outmask.flat = newmask
return out
def cumsum(self, axis=None, dtype=None, out=None):
"""
Return the cumulative sum of the array elements over the given axis.
Masked values are set to 0 internally during the computation.
However, their position is saved, and the result will be masked at
the same locations.
Refer to `numpy.cumsum` for full documentation.
Notes
-----
The mask is lost if `out` is not a valid :class:`ma.MaskedArray` !
Arithmetic is modular when using integer types, and no error is
raised on overflow.
See Also
--------
numpy.ndarray.cumsum : corresponding function for ndarrays
numpy.cumsum : equivalent function
Examples
--------
>>> marr = np.ma.array(np.arange(10), mask=[0,0,0,1,1,1,0,0,0,0])
>>> marr.cumsum()
masked_array(data=[0, 1, 3, --, --, --, 9, 16, 24, 33],
mask=[False, False, False, True, True, True, False, False,
False, False],
fill_value=999999)
"""
result = self.filled(0).cumsum(axis=axis, dtype=dtype, out=out)
if out is not None:
if isinstance(out, MaskedArray):
out.__setmask__(self.mask)
return out
result = result.view(type(self))
result.__setmask__(self._mask)
return result
def prod(self, axis=None, dtype=None, out=None, keepdims=np._NoValue):
"""
Return the product of the array elements over the given axis.
Masked elements are set to 1 internally for computation.
Refer to `numpy.prod` for full documentation.
Notes
-----
Arithmetic is modular when using integer types, and no error is raised
on overflow.
See Also
--------
numpy.ndarray.prod : corresponding function for ndarrays
numpy.prod : equivalent function
"""
kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims}
_mask = self._mask
newmask = _check_mask_axis(_mask, axis, **kwargs)
# No explicit output
if out is None:
result = self.filled(1).prod(axis, dtype=dtype, **kwargs)
rndim = getattr(result, 'ndim', 0)
if rndim:
result = result.view(type(self))
result.__setmask__(newmask)
elif newmask:
result = masked
return result
# Explicit output
result = self.filled(1).prod(axis, dtype=dtype, out=out, **kwargs)
if isinstance(out, MaskedArray):
outmask = getmask(out)
if outmask is nomask:
outmask = out._mask = make_mask_none(out.shape)
outmask.flat = newmask
return out
product = prod
def cumprod(self, axis=None, dtype=None, out=None):
"""
Return the cumulative product of the array elements over the given axis.
Masked values are set to 1 internally during the computation.
However, their position is saved, and the result will be masked at
the same locations.
Refer to `numpy.cumprod` for full documentation.
Notes
-----
The mask is lost if `out` is not a valid MaskedArray !
Arithmetic is modular when using integer types, and no error is
raised on overflow.
See Also
--------
numpy.ndarray.cumprod : corresponding function for ndarrays
numpy.cumprod : equivalent function
"""
result = self.filled(1).cumprod(axis=axis, dtype=dtype, out=out)
if out is not None:
if isinstance(out, MaskedArray):
out.__setmask__(self._mask)
return out
result = result.view(type(self))
result.__setmask__(self._mask)
return result
def mean(self, axis=None, dtype=None, out=None, keepdims=np._NoValue):
"""
Returns the average of the array elements along given axis.
Masked entries are ignored, and result elements which are not
finite will be masked.
Refer to `numpy.mean` for full documentation.
See Also
--------
numpy.ndarray.mean : corresponding function for ndarrays
numpy.mean : Equivalent function
numpy.ma.average : Weighted average.
Examples
--------
>>> a = np.ma.array([1,2,3], mask=[False, False, True])
>>> a
masked_array(data=[1, 2, --],
mask=[False, False, True],
fill_value=999999)
>>> a.mean()
1.5
"""
kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims}
if self._mask is nomask:
result = super().mean(axis=axis, dtype=dtype, **kwargs)[()]
else:
dsum = self.sum(axis=axis, dtype=dtype, **kwargs)
cnt = self.count(axis=axis, **kwargs)
if cnt.shape == () and (cnt == 0):
result = masked
else:
result = dsum * 1. / cnt
if out is not None:
out.flat = result
if isinstance(out, MaskedArray):
outmask = getmask(out)
if outmask is nomask:
outmask = out._mask = make_mask_none(out.shape)
outmask.flat = getmask(result)
return out
return result
def anom(self, axis=None, dtype=None):
"""
Compute the anomalies (deviations from the arithmetic mean)
along the given axis.
Returns an array of anomalies, with the same shape as the input and
where the arithmetic mean is computed along the given axis.
Parameters
----------
axis : int, optional
Axis over which the anomalies are taken.
The default is to use the mean of the flattened array as reference.
dtype : dtype, optional
Type to use in computing the variance. For arrays of integer type
the default is float32; for arrays of float types it is the same as
the array type.
See Also
--------
mean : Compute the mean of the array.
Examples
--------
>>> a = np.ma.array([1,2,3])
>>> a.anom()
masked_array(data=[-1., 0., 1.],
mask=False,
fill_value=1e+20)
"""
m = self.mean(axis, dtype)
if not axis:
return self - m
else:
return self - expand_dims(m, axis)
def var(self, axis=None, dtype=None, out=None, ddof=0,
keepdims=np._NoValue):
"""
Returns the variance of the array elements along given axis.
Masked entries are ignored, and result elements which are not
finite will be masked.
Refer to `numpy.var` for full documentation.
See Also
--------
numpy.ndarray.var : corresponding function for ndarrays
numpy.var : Equivalent function
"""
kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims}
# Easy case: nomask, business as usual
if self._mask is nomask:
ret = super().var(axis=axis, dtype=dtype, out=out, ddof=ddof,
**kwargs)[()]
if out is not None:
if isinstance(out, MaskedArray):
out.__setmask__(nomask)
return out
return ret
# Some data are masked, yay!
cnt = self.count(axis=axis, **kwargs) - ddof
danom = self - self.mean(axis, dtype, keepdims=True)
if iscomplexobj(self):
danom = umath.absolute(danom) ** 2
else:
danom *= danom
dvar = divide(danom.sum(axis, **kwargs), cnt).view(type(self))
# Apply the mask if it's not a scalar
if dvar.ndim:
dvar._mask = mask_or(self._mask.all(axis, **kwargs), (cnt <= 0))
dvar._update_from(self)
elif getmask(dvar):
# Make sure that masked is returned when the scalar is masked.
dvar = masked
if out is not None:
if isinstance(out, MaskedArray):
out.flat = 0
out.__setmask__(True)
elif out.dtype.kind in 'biu':
errmsg = "Masked data information would be lost in one or "\
"more location."
raise MaskError(errmsg)
else:
out.flat = np.nan
return out
# In case with have an explicit output
if out is not None:
# Set the data
out.flat = dvar
# Set the mask if needed
if isinstance(out, MaskedArray):
out.__setmask__(dvar.mask)
return out
return dvar
var.__doc__ = np.var.__doc__
def std(self, axis=None, dtype=None, out=None, ddof=0,
keepdims=np._NoValue):
"""
Returns the standard deviation of the array elements along given axis.
Masked entries are ignored.
Refer to `numpy.std` for full documentation.
See Also
--------
numpy.ndarray.std : corresponding function for ndarrays
numpy.std : Equivalent function
"""
kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims}
dvar = self.var(axis, dtype, out, ddof, **kwargs)
if dvar is not masked:
if out is not None:
np.power(out, 0.5, out=out, casting='unsafe')
return out
dvar = sqrt(dvar)
return dvar
def round(self, decimals=0, out=None):
"""
Return each element rounded to the given number of decimals.
Refer to `numpy.around` for full documentation.
See Also
--------
numpy.ndarray.round : corresponding function for ndarrays
numpy.around : equivalent function
"""
result = self._data.round(decimals=decimals, out=out).view(type(self))
if result.ndim > 0:
result._mask = self._mask
result._update_from(self)
elif self._mask:
# Return masked when the scalar is masked
result = masked
# No explicit output: we're done
if out is None:
return result
if isinstance(out, MaskedArray):
out.__setmask__(self._mask)
return out
def argsort(self, axis=np._NoValue, kind=None, order=None,
endwith=True, fill_value=None):
"""
Return an ndarray of indices that sort the array along the
specified axis. Masked values are filled beforehand to
`fill_value`.
Parameters
----------
axis : int, optional
Axis along which to sort. If None, the default, the flattened array
is used.
.. versionchanged:: 1.13.0
Previously, the default was documented to be -1, but that was
in error. At some future date, the default will change to -1, as
originally intended.
Until then, the axis should be given explicitly when
``arr.ndim > 1``, to avoid a FutureWarning.
kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional
The sorting algorithm used.
order : list, optional
When `a` is an array with fields defined, this argument specifies
which fields to compare first, second, etc. Not all fields need be
specified.
endwith : {True, False}, optional
Whether missing values (if any) should be treated as the largest values
(True) or the smallest values (False)
When the array contains unmasked values at the same extremes of the
datatype, the ordering of these values and the masked values is
undefined.
fill_value : scalar or None, optional
Value used internally for the masked values.
If ``fill_value`` is not None, it supersedes ``endwith``.
Returns
-------
index_array : ndarray, int
Array of indices that sort `a` along the specified axis.
In other words, ``a[index_array]`` yields a sorted `a`.
See Also
--------
ma.MaskedArray.sort : Describes sorting algorithms used.
lexsort : Indirect stable sort with multiple keys.
numpy.ndarray.sort : Inplace sort.
Notes
-----
See `sort` for notes on the different sorting algorithms.
Examples
--------
>>> a = np.ma.array([3,2,1], mask=[False, False, True])
>>> a
masked_array(data=[3, 2, --],
mask=[False, False, True],
fill_value=999999)
>>> a.argsort()
array([1, 0, 2])
"""
# 2017-04-11, Numpy 1.13.0, gh-8701: warn on axis default
if axis is np._NoValue:
axis = _deprecate_argsort_axis(self)
if fill_value is None:
if endwith:
# nan > inf
if np.issubdtype(self.dtype, np.floating):
fill_value = np.nan
else:
fill_value = minimum_fill_value(self)
else:
fill_value = maximum_fill_value(self)
filled = self.filled(fill_value)
return filled.argsort(axis=axis, kind=kind, order=order)
def argmin(self, axis=None, fill_value=None, out=None, *,
keepdims=np._NoValue):
"""
Return array of indices to the minimum values along the given axis.
Parameters
----------
axis : {None, integer}
If None, the index is into the flattened array, otherwise along
the specified axis
fill_value : scalar or None, optional
Value used to fill in the masked values. If None, the output of
minimum_fill_value(self._data) is used instead.
out : {None, array}, optional
Array into which the result can be placed. Its type is preserved
and it must be of the right shape to hold the output.
Returns
-------
ndarray or scalar
If multi-dimension input, returns a new ndarray of indices to the
minimum values along the given axis. Otherwise, returns a scalar
of index to the minimum values along the given axis.
Examples
--------
>>> x = np.ma.array(np.arange(4), mask=[1,1,0,0])
>>> x.shape = (2,2)
>>> x
masked_array(
data=[[--, --],
[2, 3]],
mask=[[ True, True],
[False, False]],
fill_value=999999)
>>> x.argmin(axis=0, fill_value=-1)
array([0, 0])
>>> x.argmin(axis=0, fill_value=9)
array([1, 1])
"""
if fill_value is None:
fill_value = minimum_fill_value(self)
d = self.filled(fill_value).view(ndarray)
keepdims = False if keepdims is np._NoValue else bool(keepdims)
return d.argmin(axis, out=out, keepdims=keepdims)
def argmax(self, axis=None, fill_value=None, out=None, *,
keepdims=np._NoValue):
"""
Returns array of indices of the maximum values along the given axis.
Masked values are treated as if they had the value fill_value.
Parameters
----------
axis : {None, integer}
If None, the index is into the flattened array, otherwise along
the specified axis
fill_value : scalar or None, optional
Value used to fill in the masked values. If None, the output of
maximum_fill_value(self._data) is used instead.
out : {None, array}, optional
Array into which the result can be placed. Its type is preserved
and it must be of the right shape to hold the output.
Returns
-------
index_array : {integer_array}
Examples
--------
>>> a = np.arange(6).reshape(2,3)
>>> a.argmax()
5
>>> a.argmax(0)
array([1, 1, 1])
>>> a.argmax(1)
array([2, 2])
"""
if fill_value is None:
fill_value = maximum_fill_value(self._data)
d = self.filled(fill_value).view(ndarray)
keepdims = False if keepdims is np._NoValue else bool(keepdims)
return d.argmax(axis, out=out, keepdims=keepdims)
def sort(self, axis=-1, kind=None, order=None,
endwith=True, fill_value=None):
"""
Sort the array, in-place
Parameters
----------
a : array_like
Array to be sorted.
axis : int, optional
Axis along which to sort. If None, the array is flattened before
sorting. The default is -1, which sorts along the last axis.
kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional
The sorting algorithm used.
order : list, optional
When `a` is a structured array, this argument specifies which fields
to compare first, second, and so on. This list does not need to
include all of the fields.
endwith : {True, False}, optional
Whether missing values (if any) should be treated as the largest values
(True) or the smallest values (False)
When the array contains unmasked values sorting at the same extremes of the
datatype, the ordering of these values and the masked values is
undefined.
fill_value : scalar or None, optional
Value used internally for the masked values.
If ``fill_value`` is not None, it supersedes ``endwith``.
Returns
-------
sorted_array : ndarray
Array of the same type and shape as `a`.
See Also
--------
numpy.ndarray.sort : Method to sort an array in-place.
argsort : Indirect sort.
lexsort : Indirect stable sort on multiple keys.
searchsorted : Find elements in a sorted array.
Notes
-----
See ``sort`` for notes on the different sorting algorithms.
Examples
--------
>>> a = np.ma.array([1, 2, 5, 4, 3],mask=[0, 1, 0, 1, 0])
>>> # Default
>>> a.sort()
>>> a
masked_array(data=[1, 3, 5, --, --],
mask=[False, False, False, True, True],
fill_value=999999)
>>> a = np.ma.array([1, 2, 5, 4, 3],mask=[0, 1, 0, 1, 0])
>>> # Put missing values in the front
>>> a.sort(endwith=False)
>>> a
masked_array(data=[--, --, 1, 3, 5],
mask=[ True, True, False, False, False],
fill_value=999999)
>>> a = np.ma.array([1, 2, 5, 4, 3],mask=[0, 1, 0, 1, 0])
>>> # fill_value takes over endwith
>>> a.sort(endwith=False, fill_value=3)
>>> a
masked_array(data=[1, --, --, 3, 5],
mask=[False, True, True, False, False],
fill_value=999999)
"""
if self._mask is nomask:
ndarray.sort(self, axis=axis, kind=kind, order=order)
return
if self is masked:
return
sidx = self.argsort(axis=axis, kind=kind, order=order,
fill_value=fill_value, endwith=endwith)
self[...] = np.take_along_axis(self, sidx, axis=axis)
def min(self, axis=None, out=None, fill_value=None, keepdims=np._NoValue):
"""
Return the minimum along a given axis.
Parameters
----------
axis : None or int or tuple of ints, optional
Axis along which to operate. By default, ``axis`` is None and the
flattened input is used.
.. versionadded:: 1.7.0
If this is a tuple of ints, the minimum is selected over multiple
axes, instead of a single axis or all the axes as before.
out : array_like, optional
Alternative output array in which to place the result. Must be of
the same shape and buffer length as the expected output.
fill_value : scalar or None, optional
Value used to fill in the masked values.
If None, use the output of `minimum_fill_value`.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the array.
Returns
-------
amin : array_like
New array holding the result.
If ``out`` was specified, ``out`` is returned.
See Also
--------
ma.minimum_fill_value
Returns the minimum filling value for a given datatype.
"""
kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims}
_mask = self._mask
newmask = _check_mask_axis(_mask, axis, **kwargs)
if fill_value is None:
fill_value = minimum_fill_value(self)
# No explicit output
if out is None:
result = self.filled(fill_value).min(
axis=axis, out=out, **kwargs).view(type(self))
if result.ndim:
# Set the mask
result.__setmask__(newmask)
# Get rid of Infs
if newmask.ndim:
np.copyto(result, result.fill_value, where=newmask)
elif newmask:
result = masked
return result
# Explicit output
result = self.filled(fill_value).min(axis=axis, out=out, **kwargs)
if isinstance(out, MaskedArray):
outmask = getmask(out)
if outmask is nomask:
outmask = out._mask = make_mask_none(out.shape)
outmask.flat = newmask
else:
if out.dtype.kind in 'biu':
errmsg = "Masked data information would be lost in one or more"\
" location."
raise MaskError(errmsg)
np.copyto(out, np.nan, where=newmask)
return out
# unique to masked arrays
def mini(self, axis=None):
"""
Return the array minimum along the specified axis.
.. deprecated:: 1.13.0
This function is identical to both:
* ``self.min(keepdims=True, axis=axis).squeeze(axis=axis)``
* ``np.ma.minimum.reduce(self, axis=axis)``
Typically though, ``self.min(axis=axis)`` is sufficient.
Parameters
----------
axis : int, optional
The axis along which to find the minima. Default is None, in which case
the minimum value in the whole array is returned.
Returns
-------
min : scalar or MaskedArray
If `axis` is None, the result is a scalar. Otherwise, if `axis` is
given and the array is at least 2-D, the result is a masked array with
dimension one smaller than the array on which `mini` is called.
Examples
--------
>>> x = np.ma.array(np.arange(6), mask=[0 ,1, 0, 0, 0 ,1]).reshape(3, 2)
>>> x
masked_array(
data=[[0, --],
[2, 3],
[4, --]],
mask=[[False, True],
[False, False],
[False, True]],
fill_value=999999)
>>> x.mini()
masked_array(data=0,
mask=False,
fill_value=999999)
>>> x.mini(axis=0)
masked_array(data=[0, 3],
mask=[False, False],
fill_value=999999)
>>> x.mini(axis=1)
masked_array(data=[0, 2, 4],
mask=[False, False, False],
fill_value=999999)
There is a small difference between `mini` and `min`:
>>> x[:,1].mini(axis=0)
masked_array(data=3,
mask=False,
fill_value=999999)
>>> x[:,1].min(axis=0)
3
"""
# 2016-04-13, 1.13.0, gh-8764
warnings.warn(
"`mini` is deprecated; use the `min` method or "
"`np.ma.minimum.reduce instead.",
DeprecationWarning, stacklevel=2)
return minimum.reduce(self, axis)
def max(self, axis=None, out=None, fill_value=None, keepdims=np._NoValue):
"""
Return the maximum along a given axis.
Parameters
----------
axis : None or int or tuple of ints, optional
Axis along which to operate. By default, ``axis`` is None and the
flattened input is used.
.. versionadded:: 1.7.0
If this is a tuple of ints, the maximum is selected over multiple
axes, instead of a single axis or all the axes as before.
out : array_like, optional
Alternative output array in which to place the result. Must
be of the same shape and buffer length as the expected output.
fill_value : scalar or None, optional
Value used to fill in the masked values.
If None, use the output of maximum_fill_value().
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the array.
Returns
-------
amax : array_like
New array holding the result.
If ``out`` was specified, ``out`` is returned.
See Also
--------
ma.maximum_fill_value
Returns the maximum filling value for a given datatype.
"""
kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims}
_mask = self._mask
newmask = _check_mask_axis(_mask, axis, **kwargs)
if fill_value is None:
fill_value = maximum_fill_value(self)
# No explicit output
if out is None:
result = self.filled(fill_value).max(
axis=axis, out=out, **kwargs).view(type(self))
if result.ndim:
# Set the mask
result.__setmask__(newmask)
# Get rid of Infs
if newmask.ndim:
np.copyto(result, result.fill_value, where=newmask)
elif newmask:
result = masked
return result
# Explicit output
result = self.filled(fill_value).max(axis=axis, out=out, **kwargs)
if isinstance(out, MaskedArray):
outmask = getmask(out)
if outmask is nomask:
outmask = out._mask = make_mask_none(out.shape)
outmask.flat = newmask
else:
if out.dtype.kind in 'biu':
errmsg = "Masked data information would be lost in one or more"\
" location."
raise MaskError(errmsg)
np.copyto(out, np.nan, where=newmask)
return out
def ptp(self, axis=None, out=None, fill_value=None, keepdims=False):
"""
Return (maximum - minimum) along the given dimension
(i.e. peak-to-peak value).
.. warning::
`ptp` preserves the data type of the array. This means the
return value for an input of signed integers with n bits
(e.g. `np.int8`, `np.int16`, etc) is also a signed integer
with n bits. In that case, peak-to-peak values greater than
``2**(n-1)-1`` will be returned as negative values. An example
with a work-around is shown below.
Parameters
----------
axis : {None, int}, optional
Axis along which to find the peaks. If None (default) the
flattened array is used.
out : {None, array_like}, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output
but the type will be cast if necessary.
fill_value : scalar or None, optional
Value used to fill in the masked values.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the array.
Returns
-------
ptp : ndarray.
A new array holding the result, unless ``out`` was
specified, in which case a reference to ``out`` is returned.
Examples
--------
>>> x = np.ma.MaskedArray([[4, 9, 2, 10],
... [6, 9, 7, 12]])
>>> x.ptp(axis=1)
masked_array(data=[8, 6],
mask=False,
fill_value=999999)
>>> x.ptp(axis=0)
masked_array(data=[2, 0, 5, 2],
mask=False,
fill_value=999999)
>>> x.ptp()
10
This example shows that a negative value can be returned when
the input is an array of signed integers.
>>> y = np.ma.MaskedArray([[1, 127],
... [0, 127],
... [-1, 127],
... [-2, 127]], dtype=np.int8)
>>> y.ptp(axis=1)
masked_array(data=[ 126, 127, -128, -127],
mask=False,
fill_value=999999,
dtype=int8)
A work-around is to use the `view()` method to view the result as
unsigned integers with the same bit width:
>>> y.ptp(axis=1).view(np.uint8)
masked_array(data=[126, 127, 128, 129],
mask=False,
fill_value=999999,
dtype=uint8)
"""
if out is None:
result = self.max(axis=axis, fill_value=fill_value,
keepdims=keepdims)
result -= self.min(axis=axis, fill_value=fill_value,
keepdims=keepdims)
return result
out.flat = self.max(axis=axis, out=out, fill_value=fill_value,
keepdims=keepdims)
min_value = self.min(axis=axis, fill_value=fill_value,
keepdims=keepdims)
np.subtract(out, min_value, out=out, casting='unsafe')
return out
def partition(self, *args, **kwargs):
warnings.warn("Warning: 'partition' will ignore the 'mask' "
f"of the {self.__class__.__name__}.",
stacklevel=2)
return super().partition(*args, **kwargs)
def argpartition(self, *args, **kwargs):
warnings.warn("Warning: 'argpartition' will ignore the 'mask' "
f"of the {self.__class__.__name__}.",
stacklevel=2)
return super().argpartition(*args, **kwargs)
def take(self, indices, axis=None, out=None, mode='raise'):
"""
"""
(_data, _mask) = (self._data, self._mask)
cls = type(self)
# Make sure the indices are not masked
maskindices = getmask(indices)
if maskindices is not nomask:
indices = indices.filled(0)
# Get the data, promoting scalars to 0d arrays with [...] so that
# .view works correctly
if out is None:
out = _data.take(indices, axis=axis, mode=mode)[...].view(cls)
else:
np.take(_data, indices, axis=axis, mode=mode, out=out)
# Get the mask
if isinstance(out, MaskedArray):
if _mask is nomask:
outmask = maskindices
else:
outmask = _mask.take(indices, axis=axis, mode=mode)
outmask |= maskindices
out.__setmask__(outmask)
# demote 0d arrays back to scalars, for consistency with ndarray.take
return out[()]
# Array methods
copy = _arraymethod('copy')
diagonal = _arraymethod('diagonal')
flatten = _arraymethod('flatten')
repeat = _arraymethod('repeat')
squeeze = _arraymethod('squeeze')
swapaxes = _arraymethod('swapaxes')
T = property(fget=lambda self: self.transpose())
transpose = _arraymethod('transpose')
def tolist(self, fill_value=None):
"""
Return the data portion of the masked array as a hierarchical Python list.
Data items are converted to the nearest compatible Python type.
Masked values are converted to `fill_value`. If `fill_value` is None,
the corresponding entries in the output list will be ``None``.
Parameters
----------
fill_value : scalar, optional
The value to use for invalid entries. Default is None.
Returns
-------
result : list
The Python list representation of the masked array.
Examples
--------
>>> x = np.ma.array([[1,2,3], [4,5,6], [7,8,9]], mask=[0] + [1,0]*4)
>>> x.tolist()
[[1, None, 3], [None, 5, None], [7, None, 9]]
>>> x.tolist(-999)
[[1, -999, 3], [-999, 5, -999], [7, -999, 9]]
"""
_mask = self._mask
# No mask ? Just return .data.tolist ?
if _mask is nomask:
return self._data.tolist()
# Explicit fill_value: fill the array and get the list
if fill_value is not None:
return self.filled(fill_value).tolist()
# Structured array.
names = self.dtype.names
if names:
result = self._data.astype([(_, object) for _ in names])
for n in names:
result[n][_mask[n]] = None
return result.tolist()
# Standard arrays.
if _mask is nomask:
return [None]
# Set temps to save time when dealing w/ marrays.
inishape = self.shape
result = np.array(self._data.ravel(), dtype=object)
result[_mask.ravel()] = None
result.shape = inishape
return result.tolist()
def tostring(self, fill_value=None, order='C'):
r"""
A compatibility alias for `tobytes`, with exactly the same behavior.
Despite its name, it returns `bytes` not `str`\ s.
.. deprecated:: 1.19.0
"""
# 2020-03-30, Numpy 1.19.0
warnings.warn(
"tostring() is deprecated. Use tobytes() instead.",
DeprecationWarning, stacklevel=2)
return self.tobytes(fill_value, order=order)
def tobytes(self, fill_value=None, order='C'):
"""
Return the array data as a string containing the raw bytes in the array.
The array is filled with a fill value before the string conversion.
.. versionadded:: 1.9.0
Parameters
----------
fill_value : scalar, optional
Value used to fill in the masked values. Default is None, in which
case `MaskedArray.fill_value` is used.
order : {'C','F','A'}, optional
Order of the data item in the copy. Default is 'C'.
- 'C' -- C order (row major).
- 'F' -- Fortran order (column major).
- 'A' -- Any, current order of array.
- None -- Same as 'A'.
See Also
--------
numpy.ndarray.tobytes
tolist, tofile
Notes
-----
As for `ndarray.tobytes`, information about the shape, dtype, etc.,
but also about `fill_value`, will be lost.
Examples
--------
>>> x = np.ma.array(np.array([[1, 2], [3, 4]]), mask=[[0, 1], [1, 0]])
>>> x.tobytes()
b'\\x01\\x00\\x00\\x00\\x00\\x00\\x00\\x00?B\\x0f\\x00\\x00\\x00\\x00\\x00?B\\x0f\\x00\\x00\\x00\\x00\\x00\\x04\\x00\\x00\\x00\\x00\\x00\\x00\\x00'
"""
return self.filled(fill_value).tobytes(order=order)
def tofile(self, fid, sep="", format="%s"):
"""
Save a masked array to a file in binary format.
.. warning::
This function is not implemented yet.
Raises
------
NotImplementedError
When `tofile` is called.
"""
raise NotImplementedError("MaskedArray.tofile() not implemented yet.")
def toflex(self):
"""
Transforms a masked array into a flexible-type array.
The flexible type array that is returned will have two fields:
* the ``_data`` field stores the ``_data`` part of the array.
* the ``_mask`` field stores the ``_mask`` part of the array.
Parameters
----------
None
Returns
-------
record : ndarray
A new flexible-type `ndarray` with two fields: the first element
containing a value, the second element containing the corresponding
mask boolean. The returned record shape matches self.shape.
Notes
-----
A side-effect of transforming a masked array into a flexible `ndarray` is
that meta information (``fill_value``, ...) will be lost.
Examples
--------
>>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4)
>>> x
masked_array(
data=[[1, --, 3],
[--, 5, --],
[7, --, 9]],
mask=[[False, True, False],
[ True, False, True],
[False, True, False]],
fill_value=999999)
>>> x.toflex()
array([[(1, False), (2, True), (3, False)],
[(4, True), (5, False), (6, True)],
[(7, False), (8, True), (9, False)]],
dtype=[('_data', '<i8'), ('_mask', '?')])
"""
# Get the basic dtype.
ddtype = self.dtype
# Make sure we have a mask
_mask = self._mask
if _mask is None:
_mask = make_mask_none(self.shape, ddtype)
# And get its dtype
mdtype = self._mask.dtype
record = np.ndarray(shape=self.shape,
dtype=[('_data', ddtype), ('_mask', mdtype)])
record['_data'] = self._data
record['_mask'] = self._mask
return record
torecords = toflex
# Pickling
def __getstate__(self):
"""Return the internal state of the masked array, for pickling
purposes.
"""
cf = 'CF'[self.flags.fnc]
data_state = super().__reduce__()[2]
return data_state + (getmaskarray(self).tobytes(cf), self._fill_value)
def __setstate__(self, state):
"""Restore the internal state of the masked array, for
pickling purposes. ``state`` is typically the output of the
``__getstate__`` output, and is a 5-tuple:
- class name
- a tuple giving the shape of the data
- a typecode for the data
- a binary string for the data
- a binary string for the mask.
"""
(_, shp, typ, isf, raw, msk, flv) = state
super().__setstate__((shp, typ, isf, raw))
self._mask.__setstate__((shp, make_mask_descr(typ), isf, msk))
self.fill_value = flv
def __reduce__(self):
"""Return a 3-tuple for pickling a MaskedArray.
"""
return (_mareconstruct,
(self.__class__, self._baseclass, (0,), 'b',),
self.__getstate__())
def __deepcopy__(self, memo=None):
from copy import deepcopy
copied = MaskedArray.__new__(type(self), self, copy=True)
if memo is None:
memo = {}
memo[id(self)] = copied
for (k, v) in self.__dict__.items():
copied.__dict__[k] = deepcopy(v, memo)
return copied
def _mareconstruct(subtype, baseclass, baseshape, basetype,):
"""Internal function that builds a new MaskedArray from the
information stored in a pickle.
"""
_data = ndarray.__new__(baseclass, baseshape, basetype)
_mask = ndarray.__new__(ndarray, baseshape, make_mask_descr(basetype))
return subtype.__new__(subtype, _data, mask=_mask, dtype=basetype,)
class mvoid(MaskedArray):
"""
Fake a 'void' object to use for masked array with structured dtypes.
"""
def __new__(self, data, mask=nomask, dtype=None, fill_value=None,
hardmask=False, copy=False, subok=True):
_data = np.array(data, copy=copy, subok=subok, dtype=dtype)
_data = _data.view(self)
_data._hardmask = hardmask
if mask is not nomask:
if isinstance(mask, np.void):
_data._mask = mask
else:
try:
# Mask is already a 0D array
_data._mask = np.void(mask)
except TypeError:
# Transform the mask to a void
mdtype = make_mask_descr(dtype)
_data._mask = np.array(mask, dtype=mdtype)[()]
if fill_value is not None:
_data.fill_value = fill_value
return _data
@property
def _data(self):
# Make sure that the _data part is a np.void
return super()._data[()]
def __getitem__(self, indx):
"""
Get the index.
"""
m = self._mask
if isinstance(m[indx], ndarray):
# Can happen when indx is a multi-dimensional field:
# A = ma.masked_array(data=[([0,1],)], mask=[([True,
# False],)], dtype=[("A", ">i2", (2,))])
# x = A[0]; y = x["A"]; then y.mask["A"].size==2
# and we can not say masked/unmasked.
# The result is no longer mvoid!
# See also issue #6724.
return masked_array(
data=self._data[indx], mask=m[indx],
fill_value=self._fill_value[indx],
hard_mask=self._hardmask)
if m is not nomask and m[indx]:
return masked
return self._data[indx]
def __setitem__(self, indx, value):
self._data[indx] = value
if self._hardmask:
self._mask[indx] |= getattr(value, "_mask", False)
else:
self._mask[indx] = getattr(value, "_mask", False)
def __str__(self):
m = self._mask
if m is nomask:
return str(self._data)
rdtype = _replace_dtype_fields(self._data.dtype, "O")
data_arr = super()._data
res = data_arr.astype(rdtype)
_recursive_printoption(res, self._mask, masked_print_option)
return str(res)
__repr__ = __str__
def __iter__(self):
"Defines an iterator for mvoid"
(_data, _mask) = (self._data, self._mask)
if _mask is nomask:
yield from _data
else:
for (d, m) in zip(_data, _mask):
if m:
yield masked
else:
yield d
def __len__(self):
return self._data.__len__()
def filled(self, fill_value=None):
"""
Return a copy with masked fields filled with a given value.
Parameters
----------
fill_value : array_like, optional
The value to use for invalid entries. Can be scalar or
non-scalar. If latter is the case, the filled array should
be broadcastable over input array. Default is None, in
which case the `fill_value` attribute is used instead.
Returns
-------
filled_void
A `np.void` object
See Also
--------
MaskedArray.filled
"""
return asarray(self).filled(fill_value)[()]
def tolist(self):
"""
Transforms the mvoid object into a tuple.
Masked fields are replaced by None.
Returns
-------
returned_tuple
Tuple of fields
"""
_mask = self._mask
if _mask is nomask:
return self._data.tolist()
result = []
for (d, m) in zip(self._data, self._mask):
if m:
result.append(None)
else:
# .item() makes sure we return a standard Python object
result.append(d.item())
return tuple(result)
##############################################################################
# Shortcuts #
##############################################################################
def isMaskedArray(x):
"""
Test whether input is an instance of MaskedArray.
This function returns True if `x` is an instance of MaskedArray
and returns False otherwise. Any object is accepted as input.
Parameters
----------
x : object
Object to test.
Returns
-------
result : bool
True if `x` is a MaskedArray.
See Also
--------
isMA : Alias to isMaskedArray.
isarray : Alias to isMaskedArray.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.eye(3, 3)
>>> a
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
>>> m = ma.masked_values(a, 0)
>>> m
masked_array(
data=[[1.0, --, --],
[--, 1.0, --],
[--, --, 1.0]],
mask=[[False, True, True],
[ True, False, True],
[ True, True, False]],
fill_value=0.0)
>>> ma.isMaskedArray(a)
False
>>> ma.isMaskedArray(m)
True
>>> ma.isMaskedArray([0, 1, 2])
False
"""
return isinstance(x, MaskedArray)
isarray = isMaskedArray
isMA = isMaskedArray # backward compatibility
class MaskedConstant(MaskedArray):
# the lone np.ma.masked instance
__singleton = None
@classmethod
def __has_singleton(cls):
# second case ensures `cls.__singleton` is not just a view on the
# superclass singleton
return cls.__singleton is not None and type(cls.__singleton) is cls
def __new__(cls):
if not cls.__has_singleton():
# We define the masked singleton as a float for higher precedence.
# Note that it can be tricky sometimes w/ type comparison
data = np.array(0.)
mask = np.array(True)
# prevent any modifications
data.flags.writeable = False
mask.flags.writeable = False
# don't fall back on MaskedArray.__new__(MaskedConstant), since
# that might confuse it - this way, the construction is entirely
# within our control
cls.__singleton = MaskedArray(data, mask=mask).view(cls)
return cls.__singleton
def __array_finalize__(self, obj):
if not self.__has_singleton():
# this handles the `.view` in __new__, which we want to copy across
# properties normally
return super().__array_finalize__(obj)
elif self is self.__singleton:
# not clear how this can happen, play it safe
pass
else:
# everywhere else, we want to downcast to MaskedArray, to prevent a
# duplicate maskedconstant.
self.__class__ = MaskedArray
MaskedArray.__array_finalize__(self, obj)
def __array_prepare__(self, obj, context=None):
return self.view(MaskedArray).__array_prepare__(obj, context)
def __array_wrap__(self, obj, context=None):
return self.view(MaskedArray).__array_wrap__(obj, context)
def __str__(self):
return str(masked_print_option._display)
def __repr__(self):
if self is MaskedConstant.__singleton:
return 'masked'
else:
# it's a subclass, or something is wrong, make it obvious
return object.__repr__(self)
def __format__(self, format_spec):
# Replace ndarray.__format__ with the default, which supports no format characters.
# Supporting format characters is unwise here, because we do not know what type
# the user was expecting - better to not guess.
try:
return object.__format__(self, format_spec)
except TypeError:
# 2020-03-23, NumPy 1.19.0
warnings.warn(
"Format strings passed to MaskedConstant are ignored, but in future may "
"error or produce different behavior",
FutureWarning, stacklevel=2
)
return object.__format__(self, "")
def __reduce__(self):
"""Override of MaskedArray's __reduce__.
"""
return (self.__class__, ())
# inplace operations have no effect. We have to override them to avoid
# trying to modify the readonly data and mask arrays
def __iop__(self, other):
return self
__iadd__ = \
__isub__ = \
__imul__ = \
__ifloordiv__ = \
__itruediv__ = \
__ipow__ = \
__iop__
del __iop__ # don't leave this around
def copy(self, *args, **kwargs):
""" Copy is a no-op on the maskedconstant, as it is a scalar """
# maskedconstant is a scalar, so copy doesn't need to copy. There's
# precedent for this with `np.bool_` scalars.
return self
def __copy__(self):
return self
def __deepcopy__(self, memo):
return self
def __setattr__(self, attr, value):
if not self.__has_singleton():
# allow the singleton to be initialized
return super().__setattr__(attr, value)
elif self is self.__singleton:
raise AttributeError(
f"attributes of {self!r} are not writeable")
else:
# duplicate instance - we can end up here from __array_finalize__,
# where we set the __class__ attribute
return super().__setattr__(attr, value)
masked = masked_singleton = MaskedConstant()
masked_array = MaskedArray
def array(data, dtype=None, copy=False, order=None,
mask=nomask, fill_value=None, keep_mask=True,
hard_mask=False, shrink=True, subok=True, ndmin=0):
"""
Shortcut to MaskedArray.
The options are in a different order for convenience and backwards
compatibility.
"""
return MaskedArray(data, mask=mask, dtype=dtype, copy=copy,
subok=subok, keep_mask=keep_mask,
hard_mask=hard_mask, fill_value=fill_value,
ndmin=ndmin, shrink=shrink, order=order)
array.__doc__ = masked_array.__doc__
def is_masked(x):
"""
Determine whether input has masked values.
Accepts any object as input, but always returns False unless the
input is a MaskedArray containing masked values.
Parameters
----------
x : array_like
Array to check for masked values.
Returns
-------
result : bool
True if `x` is a MaskedArray with masked values, False otherwise.
Examples
--------
>>> import numpy.ma as ma
>>> x = ma.masked_equal([0, 1, 0, 2, 3], 0)
>>> x
masked_array(data=[--, 1, --, 2, 3],
mask=[ True, False, True, False, False],
fill_value=0)
>>> ma.is_masked(x)
True
>>> x = ma.masked_equal([0, 1, 0, 2, 3], 42)
>>> x
masked_array(data=[0, 1, 0, 2, 3],
mask=False,
fill_value=42)
>>> ma.is_masked(x)
False
Always returns False if `x` isn't a MaskedArray.
>>> x = [False, True, False]
>>> ma.is_masked(x)
False
>>> x = 'a string'
>>> ma.is_masked(x)
False
"""
m = getmask(x)
if m is nomask:
return False
elif m.any():
return True
return False
##############################################################################
# Extrema functions #
##############################################################################
class _extrema_operation(_MaskedUFunc):
"""
Generic class for maximum/minimum functions.
.. note::
This is the base class for `_maximum_operation` and
`_minimum_operation`.
"""
def __init__(self, ufunc, compare, fill_value):
super().__init__(ufunc)
self.compare = compare
self.fill_value_func = fill_value
def __call__(self, a, b=None):
"Executes the call behavior."
if b is None:
# 2016-04-13, 1.13.0
warnings.warn(
f"Single-argument form of np.ma.{self.__name__} is deprecated. Use "
f"np.ma.{self.__name__}.reduce instead.",
DeprecationWarning, stacklevel=2)
return self.reduce(a)
return where(self.compare(a, b), a, b)
def reduce(self, target, axis=np._NoValue):
"Reduce target along the given axis."
target = narray(target, copy=False, subok=True)
m = getmask(target)
if axis is np._NoValue and target.ndim > 1:
# 2017-05-06, Numpy 1.13.0: warn on axis default
warnings.warn(
f"In the future the default for ma.{self.__name__}.reduce will be axis=0, "
f"not the current None, to match np.{self.__name__}.reduce. "
"Explicitly pass 0 or None to silence this warning.",
MaskedArrayFutureWarning, stacklevel=2)
axis = None
if axis is not np._NoValue:
kwargs = dict(axis=axis)
else:
kwargs = dict()
if m is nomask:
t = self.f.reduce(target, **kwargs)
else:
target = target.filled(
self.fill_value_func(target)).view(type(target))
t = self.f.reduce(target, **kwargs)
m = umath.logical_and.reduce(m, **kwargs)
if hasattr(t, '_mask'):
t._mask = m
elif m:
t = masked
return t
def outer(self, a, b):
"Return the function applied to the outer product of a and b."
ma = getmask(a)
mb = getmask(b)
if ma is nomask and mb is nomask:
m = nomask
else:
ma = getmaskarray(a)
mb = getmaskarray(b)
m = logical_or.outer(ma, mb)
result = self.f.outer(filled(a), filled(b))
if not isinstance(result, MaskedArray):
result = result.view(MaskedArray)
result._mask = m
return result
def min(obj, axis=None, out=None, fill_value=None, keepdims=np._NoValue):
kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims}
try:
return obj.min(axis=axis, fill_value=fill_value, out=out, **kwargs)
except (AttributeError, TypeError):
# If obj doesn't have a min method, or if the method doesn't accept a
# fill_value argument
return asanyarray(obj).min(axis=axis, fill_value=fill_value,
out=out, **kwargs)
min.__doc__ = MaskedArray.min.__doc__
def max(obj, axis=None, out=None, fill_value=None, keepdims=np._NoValue):
kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims}
try:
return obj.max(axis=axis, fill_value=fill_value, out=out, **kwargs)
except (AttributeError, TypeError):
# If obj doesn't have a max method, or if the method doesn't accept a
# fill_value argument
return asanyarray(obj).max(axis=axis, fill_value=fill_value,
out=out, **kwargs)
max.__doc__ = MaskedArray.max.__doc__
def ptp(obj, axis=None, out=None, fill_value=None, keepdims=np._NoValue):
kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims}
try:
return obj.ptp(axis, out=out, fill_value=fill_value, **kwargs)
except (AttributeError, TypeError):
# If obj doesn't have a ptp method or if the method doesn't accept
# a fill_value argument
return asanyarray(obj).ptp(axis=axis, fill_value=fill_value,
out=out, **kwargs)
ptp.__doc__ = MaskedArray.ptp.__doc__
##############################################################################
# Definition of functions from the corresponding methods #
##############################################################################
class _frommethod:
"""
Define functions from existing MaskedArray methods.
Parameters
----------
methodname : str
Name of the method to transform.
"""
def __init__(self, methodname, reversed=False):
self.__name__ = methodname
self.__doc__ = self.getdoc()
self.reversed = reversed
def getdoc(self):
"Return the doc of the function (from the doc of the method)."
meth = getattr(MaskedArray, self.__name__, None) or\
getattr(np, self.__name__, None)
signature = self.__name__ + get_object_signature(meth)
if meth is not None:
doc = """ %s\n%s""" % (
signature, getattr(meth, '__doc__', None))
return doc
def __call__(self, a, *args, **params):
if self.reversed:
args = list(args)
a, args[0] = args[0], a
marr = asanyarray(a)
method_name = self.__name__
method = getattr(type(marr), method_name, None)
if method is None:
# use the corresponding np function
method = getattr(np, method_name)
return method(marr, *args, **params)
all = _frommethod('all')
anomalies = anom = _frommethod('anom')
any = _frommethod('any')
compress = _frommethod('compress', reversed=True)
cumprod = _frommethod('cumprod')
cumsum = _frommethod('cumsum')
copy = _frommethod('copy')
diagonal = _frommethod('diagonal')
harden_mask = _frommethod('harden_mask')
ids = _frommethod('ids')
maximum = _extrema_operation(umath.maximum, greater, maximum_fill_value)
mean = _frommethod('mean')
minimum = _extrema_operation(umath.minimum, less, minimum_fill_value)
nonzero = _frommethod('nonzero')
prod = _frommethod('prod')
product = _frommethod('prod')
ravel = _frommethod('ravel')
repeat = _frommethod('repeat')
shrink_mask = _frommethod('shrink_mask')
soften_mask = _frommethod('soften_mask')
std = _frommethod('std')
sum = _frommethod('sum')
swapaxes = _frommethod('swapaxes')
#take = _frommethod('take')
trace = _frommethod('trace')
var = _frommethod('var')
count = _frommethod('count')
def take(a, indices, axis=None, out=None, mode='raise'):
"""
"""
a = masked_array(a)
return a.take(indices, axis=axis, out=out, mode=mode)
def power(a, b, third=None):
"""
Returns element-wise base array raised to power from second array.
This is the masked array version of `numpy.power`. For details see
`numpy.power`.
See Also
--------
numpy.power
Notes
-----
The *out* argument to `numpy.power` is not supported, `third` has to be
None.
"""
if third is not None:
raise MaskError("3-argument power not supported.")
# Get the masks
ma = getmask(a)
mb = getmask(b)
m = mask_or(ma, mb)
# Get the rawdata
fa = getdata(a)
fb = getdata(b)
# Get the type of the result (so that we preserve subclasses)
if isinstance(a, MaskedArray):
basetype = type(a)
else:
basetype = MaskedArray
# Get the result and view it as a (subclass of) MaskedArray
with np.errstate(divide='ignore', invalid='ignore'):
result = np.where(m, fa, umath.power(fa, fb)).view(basetype)
result._update_from(a)
# Find where we're in trouble w/ NaNs and Infs
invalid = np.logical_not(np.isfinite(result.view(ndarray)))
# Add the initial mask
if m is not nomask:
if not result.ndim:
return masked
result._mask = np.logical_or(m, invalid)
# Fix the invalid parts
if invalid.any():
if not result.ndim:
return masked
elif result._mask is nomask:
result._mask = invalid
result._data[invalid] = result.fill_value
return result
argmin = _frommethod('argmin')
argmax = _frommethod('argmax')
def argsort(a, axis=np._NoValue, kind=None, order=None, endwith=True, fill_value=None):
"Function version of the eponymous method."
a = np.asanyarray(a)
# 2017-04-11, Numpy 1.13.0, gh-8701: warn on axis default
if axis is np._NoValue:
axis = _deprecate_argsort_axis(a)
if isinstance(a, MaskedArray):
return a.argsort(axis=axis, kind=kind, order=order,
endwith=endwith, fill_value=fill_value)
else:
return a.argsort(axis=axis, kind=kind, order=order)
argsort.__doc__ = MaskedArray.argsort.__doc__
def sort(a, axis=-1, kind=None, order=None, endwith=True, fill_value=None):
"""
Return a sorted copy of the masked array.
Equivalent to creating a copy of the array
and applying the MaskedArray ``sort()`` method.
Refer to ``MaskedArray.sort`` for the full documentation
See Also
--------
MaskedArray.sort : equivalent method
"""
a = np.array(a, copy=True, subok=True)
if axis is None:
a = a.flatten()
axis = 0
if isinstance(a, MaskedArray):
a.sort(axis=axis, kind=kind, order=order,
endwith=endwith, fill_value=fill_value)
else:
a.sort(axis=axis, kind=kind, order=order)
return a
def compressed(x):
"""
Return all the non-masked data as a 1-D array.
This function is equivalent to calling the "compressed" method of a
`ma.MaskedArray`, see `ma.MaskedArray.compressed` for details.
See Also
--------
ma.MaskedArray.compressed : Equivalent method.
"""
return asanyarray(x).compressed()
def concatenate(arrays, axis=0):
"""
Concatenate a sequence of arrays along the given axis.
Parameters
----------
arrays : sequence of array_like
The arrays must have the same shape, except in the dimension
corresponding to `axis` (the first, by default).
axis : int, optional
The axis along which the arrays will be joined. Default is 0.
Returns
-------
result : MaskedArray
The concatenated array with any masked entries preserved.
See Also
--------
numpy.concatenate : Equivalent function in the top-level NumPy module.
Examples
--------
>>> import numpy.ma as ma
>>> a = ma.arange(3)
>>> a[1] = ma.masked
>>> b = ma.arange(2, 5)
>>> a
masked_array(data=[0, --, 2],
mask=[False, True, False],
fill_value=999999)
>>> b
masked_array(data=[2, 3, 4],
mask=False,
fill_value=999999)
>>> ma.concatenate([a, b])
masked_array(data=[0, --, 2, 2, 3, 4],
mask=[False, True, False, False, False, False],
fill_value=999999)
"""
d = np.concatenate([getdata(a) for a in arrays], axis)
rcls = get_masked_subclass(*arrays)
data = d.view(rcls)
# Check whether one of the arrays has a non-empty mask.
for x in arrays:
if getmask(x) is not nomask:
break
else:
return data
# OK, so we have to concatenate the masks
dm = np.concatenate([getmaskarray(a) for a in arrays], axis)
dm = dm.reshape(d.shape)
# If we decide to keep a '_shrinkmask' option, we want to check that
# all of them are True, and then check for dm.any()
data._mask = _shrink_mask(dm)
return data
def diag(v, k=0):
"""
Extract a diagonal or construct a diagonal array.
This function is the equivalent of `numpy.diag` that takes masked
values into account, see `numpy.diag` for details.
See Also
--------
numpy.diag : Equivalent function for ndarrays.
"""
output = np.diag(v, k).view(MaskedArray)
if getmask(v) is not nomask:
output._mask = np.diag(v._mask, k)
return output
def left_shift(a, n):
"""
Shift the bits of an integer to the left.
This is the masked array version of `numpy.left_shift`, for details
see that function.
See Also
--------
numpy.left_shift
"""
m = getmask(a)
if m is nomask:
d = umath.left_shift(filled(a), n)
return masked_array(d)
else:
d = umath.left_shift(filled(a, 0), n)
return masked_array(d, mask=m)
def right_shift(a, n):
"""
Shift the bits of an integer to the right.
This is the masked array version of `numpy.right_shift`, for details
see that function.
See Also
--------
numpy.right_shift
"""
m = getmask(a)
if m is nomask:
d = umath.right_shift(filled(a), n)
return masked_array(d)
else:
d = umath.right_shift(filled(a, 0), n)
return masked_array(d, mask=m)
def put(a, indices, values, mode='raise'):
"""
Set storage-indexed locations to corresponding values.
This function is equivalent to `MaskedArray.put`, see that method
for details.
See Also
--------
MaskedArray.put
"""
# We can't use 'frommethod', the order of arguments is different
try:
return a.put(indices, values, mode=mode)
except AttributeError:
return narray(a, copy=False).put(indices, values, mode=mode)
def putmask(a, mask, values): # , mode='raise'):
"""
Changes elements of an array based on conditional and input values.
This is the masked array version of `numpy.putmask`, for details see
`numpy.putmask`.
See Also
--------
numpy.putmask
Notes
-----
Using a masked array as `values` will **not** transform a `ndarray` into
a `MaskedArray`.
"""
# We can't use 'frommethod', the order of arguments is different
if not isinstance(a, MaskedArray):
a = a.view(MaskedArray)
(valdata, valmask) = (getdata(values), getmask(values))
if getmask(a) is nomask:
if valmask is not nomask:
a._sharedmask = True
a._mask = make_mask_none(a.shape, a.dtype)
np.copyto(a._mask, valmask, where=mask)
elif a._hardmask:
if valmask is not nomask:
m = a._mask.copy()
np.copyto(m, valmask, where=mask)
a.mask |= m
else:
if valmask is nomask:
valmask = getmaskarray(values)
np.copyto(a._mask, valmask, where=mask)
np.copyto(a._data, valdata, where=mask)
return
def transpose(a, axes=None):
"""
Permute the dimensions of an array.
This function is exactly equivalent to `numpy.transpose`.
See Also
--------
numpy.transpose : Equivalent function in top-level NumPy module.
Examples
--------
>>> import numpy.ma as ma
>>> x = ma.arange(4).reshape((2,2))
>>> x[1, 1] = ma.masked
>>> x
masked_array(
data=[[0, 1],
[2, --]],
mask=[[False, False],
[False, True]],
fill_value=999999)
>>> ma.transpose(x)
masked_array(
data=[[0, 2],
[1, --]],
mask=[[False, False],
[False, True]],
fill_value=999999)
"""
# We can't use 'frommethod', as 'transpose' doesn't take keywords
try:
return a.transpose(axes)
except AttributeError:
return narray(a, copy=False).transpose(axes).view(MaskedArray)
def reshape(a, new_shape, order='C'):
"""
Returns an array containing the same data with a new shape.
Refer to `MaskedArray.reshape` for full documentation.
See Also
--------
MaskedArray.reshape : equivalent function
"""
# We can't use 'frommethod', it whine about some parameters. Dmmit.
try:
return a.reshape(new_shape, order=order)
except AttributeError:
_tmp = narray(a, copy=False).reshape(new_shape, order=order)
return _tmp.view(MaskedArray)
def resize(x, new_shape):
"""
Return a new masked array with the specified size and shape.
This is the masked equivalent of the `numpy.resize` function. The new
array is filled with repeated copies of `x` (in the order that the
data are stored in memory). If `x` is masked, the new array will be
masked, and the new mask will be a repetition of the old one.
See Also
--------
numpy.resize : Equivalent function in the top level NumPy module.
Examples
--------
>>> import numpy.ma as ma
>>> a = ma.array([[1, 2] ,[3, 4]])
>>> a[0, 1] = ma.masked
>>> a
masked_array(
data=[[1, --],
[3, 4]],
mask=[[False, True],
[False, False]],
fill_value=999999)
>>> np.resize(a, (3, 3))
masked_array(
data=[[1, 2, 3],
[4, 1, 2],
[3, 4, 1]],
mask=False,
fill_value=999999)
>>> ma.resize(a, (3, 3))
masked_array(
data=[[1, --, 3],
[4, 1, --],
[3, 4, 1]],
mask=[[False, True, False],
[False, False, True],
[False, False, False]],
fill_value=999999)
A MaskedArray is always returned, regardless of the input type.
>>> a = np.array([[1, 2] ,[3, 4]])
>>> ma.resize(a, (3, 3))
masked_array(
data=[[1, 2, 3],
[4, 1, 2],
[3, 4, 1]],
mask=False,
fill_value=999999)
"""
# We can't use _frommethods here, as N.resize is notoriously whiny.
m = getmask(x)
if m is not nomask:
m = np.resize(m, new_shape)
result = np.resize(x, new_shape).view(get_masked_subclass(x))
if result.ndim:
result._mask = m
return result
def ndim(obj):
"""
maskedarray version of the numpy function.
"""
return np.ndim(getdata(obj))
ndim.__doc__ = np.ndim.__doc__
def shape(obj):
"maskedarray version of the numpy function."
return np.shape(getdata(obj))
shape.__doc__ = np.shape.__doc__
def size(obj, axis=None):
"maskedarray version of the numpy function."
return np.size(getdata(obj), axis)
size.__doc__ = np.size.__doc__
##############################################################################
# Extra functions #
##############################################################################
def where(condition, x=_NoValue, y=_NoValue):
"""
Return a masked array with elements from `x` or `y`, depending on condition.
.. note::
When only `condition` is provided, this function is identical to
`nonzero`. The rest of this documentation covers only the case where
all three arguments are provided.
Parameters
----------
condition : array_like, bool
Where True, yield `x`, otherwise yield `y`.
x, y : array_like, optional
Values from which to choose. `x`, `y` and `condition` need to be
broadcastable to some shape.
Returns
-------
out : MaskedArray
An masked array with `masked` elements where the condition is masked,
elements from `x` where `condition` is True, and elements from `y`
elsewhere.
See Also
--------
numpy.where : Equivalent function in the top-level NumPy module.
nonzero : The function that is called when x and y are omitted
Examples
--------
>>> x = np.ma.array(np.arange(9.).reshape(3, 3), mask=[[0, 1, 0],
... [1, 0, 1],
... [0, 1, 0]])
>>> x
masked_array(
data=[[0.0, --, 2.0],
[--, 4.0, --],
[6.0, --, 8.0]],
mask=[[False, True, False],
[ True, False, True],
[False, True, False]],
fill_value=1e+20)
>>> np.ma.where(x > 5, x, -3.1416)
masked_array(
data=[[-3.1416, --, -3.1416],
[--, -3.1416, --],
[6.0, --, 8.0]],
mask=[[False, True, False],
[ True, False, True],
[False, True, False]],
fill_value=1e+20)
"""
# handle the single-argument case
missing = (x is _NoValue, y is _NoValue).count(True)
if missing == 1:
raise ValueError("Must provide both 'x' and 'y' or neither.")
if missing == 2:
return nonzero(condition)
# we only care if the condition is true - false or masked pick y
cf = filled(condition, False)
xd = getdata(x)
yd = getdata(y)
# we need the full arrays here for correct final dimensions
cm = getmaskarray(condition)
xm = getmaskarray(x)
ym = getmaskarray(y)
# deal with the fact that masked.dtype == float64, but we don't actually
# want to treat it as that.
if x is masked and y is not masked:
xd = np.zeros((), dtype=yd.dtype)
xm = np.ones((), dtype=ym.dtype)
elif y is masked and x is not masked:
yd = np.zeros((), dtype=xd.dtype)
ym = np.ones((), dtype=xm.dtype)
data = np.where(cf, xd, yd)
mask = np.where(cf, xm, ym)
mask = np.where(cm, np.ones((), dtype=mask.dtype), mask)
# collapse the mask, for backwards compatibility
mask = _shrink_mask(mask)
return masked_array(data, mask=mask)
def choose(indices, choices, out=None, mode='raise'):
"""
Use an index array to construct a new array from a list of choices.
Given an array of integers and a list of n choice arrays, this method
will create a new array that merges each of the choice arrays. Where a
value in `index` is i, the new array will have the value that choices[i]
contains in the same place.
Parameters
----------
indices : ndarray of ints
This array must contain integers in ``[0, n-1]``, where n is the
number of choices.
choices : sequence of arrays
Choice arrays. The index array and all of the choices should be
broadcastable to the same shape.
out : array, optional
If provided, the result will be inserted into this array. It should
be of the appropriate shape and `dtype`.
mode : {'raise', 'wrap', 'clip'}, optional
Specifies how out-of-bounds indices will behave.
* 'raise' : raise an error
* 'wrap' : wrap around
* 'clip' : clip to the range
Returns
-------
merged_array : array
See Also
--------
choose : equivalent function
Examples
--------
>>> choice = np.array([[1,1,1], [2,2,2], [3,3,3]])
>>> a = np.array([2, 1, 0])
>>> np.ma.choose(a, choice)
masked_array(data=[3, 2, 1],
mask=False,
fill_value=999999)
"""
def fmask(x):
"Returns the filled array, or True if masked."
if x is masked:
return True
return filled(x)
def nmask(x):
"Returns the mask, True if ``masked``, False if ``nomask``."
if x is masked:
return True
return getmask(x)
# Get the indices.
c = filled(indices, 0)
# Get the masks.
masks = [nmask(x) for x in choices]
data = [fmask(x) for x in choices]
# Construct the mask
outputmask = np.choose(c, masks, mode=mode)
outputmask = make_mask(mask_or(outputmask, getmask(indices)),
copy=False, shrink=True)
# Get the choices.
d = np.choose(c, data, mode=mode, out=out).view(MaskedArray)
if out is not None:
if isinstance(out, MaskedArray):
out.__setmask__(outputmask)
return out
d.__setmask__(outputmask)
return d
def round_(a, decimals=0, out=None):
"""
Return a copy of a, rounded to 'decimals' places.
When 'decimals' is negative, it specifies the number of positions
to the left of the decimal point. The real and imaginary parts of
complex numbers are rounded separately. Nothing is done if the
array is not of float type and 'decimals' is greater than or equal
to 0.
Parameters
----------
decimals : int
Number of decimals to round to. May be negative.
out : array_like
Existing array to use for output.
If not given, returns a default copy of a.
Notes
-----
If out is given and does not have a mask attribute, the mask of a
is lost!
"""
if out is None:
return np.round_(a, decimals, out)
else:
np.round_(getdata(a), decimals, out)
if hasattr(out, '_mask'):
out._mask = getmask(a)
return out
round = round_
# Needed by dot, so move here from extras.py. It will still be exported
# from extras.py for compatibility.
def mask_rowcols(a, axis=None):
"""
Mask rows and/or columns of a 2D array that contain masked values.
Mask whole rows and/or columns of a 2D array that contain
masked values. The masking behavior is selected using the
`axis` parameter.
- If `axis` is None, rows *and* columns are masked.
- If `axis` is 0, only rows are masked.
- If `axis` is 1 or -1, only columns are masked.
Parameters
----------
a : array_like, MaskedArray
The array to mask. If not a MaskedArray instance (or if no array
elements are masked). The result is a MaskedArray with `mask` set
to `nomask` (False). Must be a 2D array.
axis : int, optional
Axis along which to perform the operation. If None, applies to a
flattened version of the array.
Returns
-------
a : MaskedArray
A modified version of the input array, masked depending on the value
of the `axis` parameter.
Raises
------
NotImplementedError
If input array `a` is not 2D.
See Also
--------
mask_rows : Mask rows of a 2D array that contain masked values.
mask_cols : Mask cols of a 2D array that contain masked values.
masked_where : Mask where a condition is met.
Notes
-----
The input array's mask is modified by this function.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.zeros((3, 3), dtype=int)
>>> a[1, 1] = 1
>>> a
array([[0, 0, 0],
[0, 1, 0],
[0, 0, 0]])
>>> a = ma.masked_equal(a, 1)
>>> a
masked_array(
data=[[0, 0, 0],
[0, --, 0],
[0, 0, 0]],
mask=[[False, False, False],
[False, True, False],
[False, False, False]],
fill_value=1)
>>> ma.mask_rowcols(a)
masked_array(
data=[[0, --, 0],
[--, --, --],
[0, --, 0]],
mask=[[False, True, False],
[ True, True, True],
[False, True, False]],
fill_value=1)
"""
a = array(a, subok=False)
if a.ndim != 2:
raise NotImplementedError("mask_rowcols works for 2D arrays only.")
m = getmask(a)
# Nothing is masked: return a
if m is nomask or not m.any():
return a
maskedval = m.nonzero()
a._mask = a._mask.copy()
if not axis:
a[np.unique(maskedval[0])] = masked
if axis in [None, 1, -1]:
a[:, np.unique(maskedval[1])] = masked
return a
# Include masked dot here to avoid import problems in getting it from
# extras.py. Note that it is not included in __all__, but rather exported
# from extras in order to avoid backward compatibility problems.
def dot(a, b, strict=False, out=None):
"""
Return the dot product of two arrays.
This function is the equivalent of `numpy.dot` that takes masked values
into account. Note that `strict` and `out` are in different position
than in the method version. In order to maintain compatibility with the
corresponding method, it is recommended that the optional arguments be
treated as keyword only. At some point that may be mandatory.
.. note::
Works only with 2-D arrays at the moment.
Parameters
----------
a, b : masked_array_like
Inputs arrays.
strict : bool, optional
Whether masked data are propagated (True) or set to 0 (False) for
the computation. Default is False. Propagating the mask means that
if a masked value appears in a row or column, the whole row or
column is considered masked.
out : masked_array, optional
Output argument. This must have the exact kind that would be returned
if it was not used. In particular, it must have the right type, must be
C-contiguous, and its dtype must be the dtype that would be returned
for `dot(a,b)`. This is a performance feature. Therefore, if these
conditions are not met, an exception is raised, instead of attempting
to be flexible.
.. versionadded:: 1.10.2
See Also
--------
numpy.dot : Equivalent function for ndarrays.
Examples
--------
>>> a = np.ma.array([[1, 2, 3], [4, 5, 6]], mask=[[1, 0, 0], [0, 0, 0]])
>>> b = np.ma.array([[1, 2], [3, 4], [5, 6]], mask=[[1, 0], [0, 0], [0, 0]])
>>> np.ma.dot(a, b)
masked_array(
data=[[21, 26],
[45, 64]],
mask=[[False, False],
[False, False]],
fill_value=999999)
>>> np.ma.dot(a, b, strict=True)
masked_array(
data=[[--, --],
[--, 64]],
mask=[[ True, True],
[ True, False]],
fill_value=999999)
"""
# !!!: Works only with 2D arrays. There should be a way to get it to run
# with higher dimension
if strict and (a.ndim == 2) and (b.ndim == 2):
a = mask_rowcols(a, 0)
b = mask_rowcols(b, 1)
am = ~getmaskarray(a)
bm = ~getmaskarray(b)
if out is None:
d = np.dot(filled(a, 0), filled(b, 0))
m = ~np.dot(am, bm)
if d.ndim == 0:
d = np.asarray(d)
r = d.view(get_masked_subclass(a, b))
r.__setmask__(m)
return r
else:
d = np.dot(filled(a, 0), filled(b, 0), out._data)
if out.mask.shape != d.shape:
out._mask = np.empty(d.shape, MaskType)
np.dot(am, bm, out._mask)
np.logical_not(out._mask, out._mask)
return out
def inner(a, b):
"""
Returns the inner product of a and b for arrays of floating point types.
Like the generic NumPy equivalent the product sum is over the last dimension
of a and b. The first argument is not conjugated.
"""
fa = filled(a, 0)
fb = filled(b, 0)
if fa.ndim == 0:
fa.shape = (1,)
if fb.ndim == 0:
fb.shape = (1,)
return np.inner(fa, fb).view(MaskedArray)
inner.__doc__ = doc_note(np.inner.__doc__,
"Masked values are replaced by 0.")
innerproduct = inner
def outer(a, b):
"maskedarray version of the numpy function."
fa = filled(a, 0).ravel()
fb = filled(b, 0).ravel()
d = np.outer(fa, fb)
ma = getmask(a)
mb = getmask(b)
if ma is nomask and mb is nomask:
return masked_array(d)
ma = getmaskarray(a)
mb = getmaskarray(b)
m = make_mask(1 - np.outer(1 - ma, 1 - mb), copy=False)
return masked_array(d, mask=m)
outer.__doc__ = doc_note(np.outer.__doc__,
"Masked values are replaced by 0.")
outerproduct = outer
def _convolve_or_correlate(f, a, v, mode, propagate_mask):
"""
Helper function for ma.correlate and ma.convolve
"""
if propagate_mask:
# results which are contributed to by either item in any pair being invalid
mask = (
f(getmaskarray(a), np.ones(np.shape(v), dtype=bool), mode=mode)
| f(np.ones(np.shape(a), dtype=bool), getmaskarray(v), mode=mode)
)
data = f(getdata(a), getdata(v), mode=mode)
else:
# results which are not contributed to by any pair of valid elements
mask = ~f(~getmaskarray(a), ~getmaskarray(v))
data = f(filled(a, 0), filled(v, 0), mode=mode)
return masked_array(data, mask=mask)
def correlate(a, v, mode='valid', propagate_mask=True):
"""
Cross-correlation of two 1-dimensional sequences.
Parameters
----------
a, v : array_like
Input sequences.
mode : {'valid', 'same', 'full'}, optional
Refer to the `np.convolve` docstring. Note that the default
is 'valid', unlike `convolve`, which uses 'full'.
propagate_mask : bool
If True, then a result element is masked if any masked element contributes towards it.
If False, then a result element is only masked if no non-masked element
contribute towards it
Returns
-------
out : MaskedArray
Discrete cross-correlation of `a` and `v`.
See Also
--------
numpy.correlate : Equivalent function in the top-level NumPy module.
"""
return _convolve_or_correlate(np.correlate, a, v, mode, propagate_mask)
def convolve(a, v, mode='full', propagate_mask=True):
"""
Returns the discrete, linear convolution of two one-dimensional sequences.
Parameters
----------
a, v : array_like
Input sequences.
mode : {'valid', 'same', 'full'}, optional
Refer to the `np.convolve` docstring.
propagate_mask : bool
If True, then if any masked element is included in the sum for a result
element, then the result is masked.
If False, then the result element is only masked if no non-masked cells
contribute towards it
Returns
-------
out : MaskedArray
Discrete, linear convolution of `a` and `v`.
See Also
--------
numpy.convolve : Equivalent function in the top-level NumPy module.
"""
return _convolve_or_correlate(np.convolve, a, v, mode, propagate_mask)
def allequal(a, b, fill_value=True):
"""
Return True if all entries of a and b are equal, using
fill_value as a truth value where either or both are masked.
Parameters
----------
a, b : array_like
Input arrays to compare.
fill_value : bool, optional
Whether masked values in a or b are considered equal (True) or not
(False).
Returns
-------
y : bool
Returns True if the two arrays are equal within the given
tolerance, False otherwise. If either array contains NaN,
then False is returned.
See Also
--------
all, any
numpy.ma.allclose
Examples
--------
>>> a = np.ma.array([1e10, 1e-7, 42.0], mask=[0, 0, 1])
>>> a
masked_array(data=[10000000000.0, 1e-07, --],
mask=[False, False, True],
fill_value=1e+20)
>>> b = np.array([1e10, 1e-7, -42.0])
>>> b
array([ 1.00000000e+10, 1.00000000e-07, -4.20000000e+01])
>>> np.ma.allequal(a, b, fill_value=False)
False
>>> np.ma.allequal(a, b)
True
"""
m = mask_or(getmask(a), getmask(b))
if m is nomask:
x = getdata(a)
y = getdata(b)
d = umath.equal(x, y)
return d.all()
elif fill_value:
x = getdata(a)
y = getdata(b)
d = umath.equal(x, y)
dm = array(d, mask=m, copy=False)
return dm.filled(True).all(None)
else:
return False
def allclose(a, b, masked_equal=True, rtol=1e-5, atol=1e-8):
"""
Returns True if two arrays are element-wise equal within a tolerance.
This function is equivalent to `allclose` except that masked values
are treated as equal (default) or unequal, depending on the `masked_equal`
argument.
Parameters
----------
a, b : array_like
Input arrays to compare.
masked_equal : bool, optional
Whether masked values in `a` and `b` are considered equal (True) or not
(False). They are considered equal by default.
rtol : float, optional
Relative tolerance. The relative difference is equal to ``rtol * b``.
Default is 1e-5.
atol : float, optional
Absolute tolerance. The absolute difference is equal to `atol`.
Default is 1e-8.
Returns
-------
y : bool
Returns True if the two arrays are equal within the given
tolerance, False otherwise. If either array contains NaN, then
False is returned.
See Also
--------
all, any
numpy.allclose : the non-masked `allclose`.
Notes
-----
If the following equation is element-wise True, then `allclose` returns
True::
absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`))
Return True if all elements of `a` and `b` are equal subject to
given tolerances.
Examples
--------
>>> a = np.ma.array([1e10, 1e-7, 42.0], mask=[0, 0, 1])
>>> a
masked_array(data=[10000000000.0, 1e-07, --],
mask=[False, False, True],
fill_value=1e+20)
>>> b = np.ma.array([1e10, 1e-8, -42.0], mask=[0, 0, 1])
>>> np.ma.allclose(a, b)
False
>>> a = np.ma.array([1e10, 1e-8, 42.0], mask=[0, 0, 1])
>>> b = np.ma.array([1.00001e10, 1e-9, -42.0], mask=[0, 0, 1])
>>> np.ma.allclose(a, b)
True
>>> np.ma.allclose(a, b, masked_equal=False)
False
Masked values are not compared directly.
>>> a = np.ma.array([1e10, 1e-8, 42.0], mask=[0, 0, 1])
>>> b = np.ma.array([1.00001e10, 1e-9, 42.0], mask=[0, 0, 1])
>>> np.ma.allclose(a, b)
True
>>> np.ma.allclose(a, b, masked_equal=False)
False
"""
x = masked_array(a, copy=False)
y = masked_array(b, copy=False)
# make sure y is an inexact type to avoid abs(MIN_INT); will cause
# casting of x later.
# NOTE: We explicitly allow timedelta, which used to work. This could
# possibly be deprecated. See also gh-18286.
# timedelta works if `atol` is an integer or also a timedelta.
# Although, the default tolerances are unlikely to be useful
if y.dtype.kind != "m":
dtype = np.result_type(y, 1.)
if y.dtype != dtype:
y = masked_array(y, dtype=dtype, copy=False)
m = mask_or(getmask(x), getmask(y))
xinf = np.isinf(masked_array(x, copy=False, mask=m)).filled(False)
# If we have some infs, they should fall at the same place.
if not np.all(xinf == filled(np.isinf(y), False)):
return False
# No infs at all
if not np.any(xinf):
d = filled(less_equal(absolute(x - y), atol + rtol * absolute(y)),
masked_equal)
return np.all(d)
if not np.all(filled(x[xinf] == y[xinf], masked_equal)):
return False
x = x[~xinf]
y = y[~xinf]
d = filled(less_equal(absolute(x - y), atol + rtol * absolute(y)),
masked_equal)
return np.all(d)
def asarray(a, dtype=None, order=None):
"""
Convert the input to a masked array of the given data-type.
No copy is performed if the input is already an `ndarray`. If `a` is
a subclass of `MaskedArray`, a base class `MaskedArray` is returned.
Parameters
----------
a : array_like
Input data, in any form that can be converted to a masked array. This
includes lists, lists of tuples, tuples, tuples of tuples, tuples
of lists, ndarrays and masked arrays.
dtype : dtype, optional
By default, the data-type is inferred from the input data.
order : {'C', 'F'}, optional
Whether to use row-major ('C') or column-major ('FORTRAN') memory
representation. Default is 'C'.
Returns
-------
out : MaskedArray
Masked array interpretation of `a`.
See Also
--------
asanyarray : Similar to `asarray`, but conserves subclasses.
Examples
--------
>>> x = np.arange(10.).reshape(2, 5)
>>> x
array([[0., 1., 2., 3., 4.],
[5., 6., 7., 8., 9.]])
>>> np.ma.asarray(x)
masked_array(
data=[[0., 1., 2., 3., 4.],
[5., 6., 7., 8., 9.]],
mask=False,
fill_value=1e+20)
>>> type(np.ma.asarray(x))
<class 'numpy.ma.core.MaskedArray'>
"""
order = order or 'C'
return masked_array(a, dtype=dtype, copy=False, keep_mask=True,
subok=False, order=order)
def asanyarray(a, dtype=None):
"""
Convert the input to a masked array, conserving subclasses.
If `a` is a subclass of `MaskedArray`, its class is conserved.
No copy is performed if the input is already an `ndarray`.
Parameters
----------
a : array_like
Input data, in any form that can be converted to an array.
dtype : dtype, optional
By default, the data-type is inferred from the input data.
order : {'C', 'F'}, optional
Whether to use row-major ('C') or column-major ('FORTRAN') memory
representation. Default is 'C'.
Returns
-------
out : MaskedArray
MaskedArray interpretation of `a`.
See Also
--------
asarray : Similar to `asanyarray`, but does not conserve subclass.
Examples
--------
>>> x = np.arange(10.).reshape(2, 5)
>>> x
array([[0., 1., 2., 3., 4.],
[5., 6., 7., 8., 9.]])
>>> np.ma.asanyarray(x)
masked_array(
data=[[0., 1., 2., 3., 4.],
[5., 6., 7., 8., 9.]],
mask=False,
fill_value=1e+20)
>>> type(np.ma.asanyarray(x))
<class 'numpy.ma.core.MaskedArray'>
"""
# workaround for #8666, to preserve identity. Ideally the bottom line
# would handle this for us.
if isinstance(a, MaskedArray) and (dtype is None or dtype == a.dtype):
return a
return masked_array(a, dtype=dtype, copy=False, keep_mask=True, subok=True)
##############################################################################
# Pickling #
##############################################################################
def _pickle_warn(method):
# NumPy 1.15.0, 2017-12-10
warnings.warn(
f"np.ma.{method} is deprecated, use pickle.{method} instead",
DeprecationWarning, stacklevel=3)
def fromfile(file, dtype=float, count=-1, sep=''):
raise NotImplementedError(
"fromfile() not yet implemented for a MaskedArray.")
def fromflex(fxarray):
"""
Build a masked array from a suitable flexible-type array.
The input array has to have a data-type with ``_data`` and ``_mask``
fields. This type of array is output by `MaskedArray.toflex`.
Parameters
----------
fxarray : ndarray
The structured input array, containing ``_data`` and ``_mask``
fields. If present, other fields are discarded.
Returns
-------
result : MaskedArray
The constructed masked array.
See Also
--------
MaskedArray.toflex : Build a flexible-type array from a masked array.
Examples
--------
>>> x = np.ma.array(np.arange(9).reshape(3, 3), mask=[0] + [1, 0] * 4)
>>> rec = x.toflex()
>>> rec
array([[(0, False), (1, True), (2, False)],
[(3, True), (4, False), (5, True)],
[(6, False), (7, True), (8, False)]],
dtype=[('_data', '<i8'), ('_mask', '?')])
>>> x2 = np.ma.fromflex(rec)
>>> x2
masked_array(
data=[[0, --, 2],
[--, 4, --],
[6, --, 8]],
mask=[[False, True, False],
[ True, False, True],
[False, True, False]],
fill_value=999999)
Extra fields can be present in the structured array but are discarded:
>>> dt = [('_data', '<i4'), ('_mask', '|b1'), ('field3', '<f4')]
>>> rec2 = np.zeros((2, 2), dtype=dt)
>>> rec2
array([[(0, False, 0.), (0, False, 0.)],
[(0, False, 0.), (0, False, 0.)]],
dtype=[('_data', '<i4'), ('_mask', '?'), ('field3', '<f4')])
>>> y = np.ma.fromflex(rec2)
>>> y
masked_array(
data=[[0, 0],
[0, 0]],
mask=[[False, False],
[False, False]],
fill_value=999999,
dtype=int32)
"""
return masked_array(fxarray['_data'], mask=fxarray['_mask'])
class _convert2ma:
"""
Convert functions from numpy to numpy.ma.
Parameters
----------
_methodname : string
Name of the method to transform.
"""
__doc__ = None
def __init__(self, funcname, np_ret, np_ma_ret, params=None):
self._func = getattr(np, funcname)
self.__doc__ = self.getdoc(np_ret, np_ma_ret)
self._extras = params or {}
def getdoc(self, np_ret, np_ma_ret):
"Return the doc of the function (from the doc of the method)."
doc = getattr(self._func, '__doc__', None)
sig = get_object_signature(self._func)
if doc:
doc = self._replace_return_type(doc, np_ret, np_ma_ret)
# Add the signature of the function at the beginning of the doc
if sig:
sig = "%s%s\n" % (self._func.__name__, sig)
doc = sig + doc
return doc
def _replace_return_type(self, doc, np_ret, np_ma_ret):
"""
Replace documentation of ``np`` function's return type.
Replaces it with the proper type for the ``np.ma`` function.
Parameters
----------
doc : str
The documentation of the ``np`` method.
np_ret : str
The return type string of the ``np`` method that we want to
replace. (e.g. "out : ndarray")
np_ma_ret : str
The return type string of the ``np.ma`` method.
(e.g. "out : MaskedArray")
"""
if np_ret not in doc:
raise RuntimeError(
f"Failed to replace `{np_ret}` with `{np_ma_ret}`. "
f"The documentation string for return type, {np_ret}, is not "
f"found in the docstring for `np.{self._func.__name__}`. "
f"Fix the docstring for `np.{self._func.__name__}` or "
"update the expected string for return type."
)
return doc.replace(np_ret, np_ma_ret)
def __call__(self, *args, **params):
# Find the common parameters to the call and the definition
_extras = self._extras
common_params = set(params).intersection(_extras)
# Drop the common parameters from the call
for p in common_params:
_extras[p] = params.pop(p)
# Get the result
result = self._func.__call__(*args, **params).view(MaskedArray)
if "fill_value" in common_params:
result.fill_value = _extras.get("fill_value", None)
if "hardmask" in common_params:
result._hardmask = bool(_extras.get("hard_mask", False))
return result
arange = _convert2ma(
'arange',
params=dict(fill_value=None, hardmask=False),
np_ret='arange : ndarray',
np_ma_ret='arange : MaskedArray',
)
clip = _convert2ma(
'clip',
params=dict(fill_value=None, hardmask=False),
np_ret='clipped_array : ndarray',
np_ma_ret='clipped_array : MaskedArray',
)
diff = _convert2ma(
'diff',
params=dict(fill_value=None, hardmask=False),
np_ret='diff : ndarray',
np_ma_ret='diff : MaskedArray',
)
empty = _convert2ma(
'empty',
params=dict(fill_value=None, hardmask=False),
np_ret='out : ndarray',
np_ma_ret='out : MaskedArray',
)
empty_like = _convert2ma(
'empty_like',
np_ret='out : ndarray',
np_ma_ret='out : MaskedArray',
)
frombuffer = _convert2ma(
'frombuffer',
np_ret='out : ndarray',
np_ma_ret='out: MaskedArray',
)
fromfunction = _convert2ma(
'fromfunction',
np_ret='fromfunction : any',
np_ma_ret='fromfunction: MaskedArray',
)
identity = _convert2ma(
'identity',
params=dict(fill_value=None, hardmask=False),
np_ret='out : ndarray',
np_ma_ret='out : MaskedArray',
)
indices = _convert2ma(
'indices',
params=dict(fill_value=None, hardmask=False),
np_ret='grid : one ndarray or tuple of ndarrays',
np_ma_ret='grid : one MaskedArray or tuple of MaskedArrays',
)
ones = _convert2ma(
'ones',
params=dict(fill_value=None, hardmask=False),
np_ret='out : ndarray',
np_ma_ret='out : MaskedArray',
)
ones_like = _convert2ma(
'ones_like',
np_ret='out : ndarray',
np_ma_ret='out : MaskedArray',
)
squeeze = _convert2ma(
'squeeze',
params=dict(fill_value=None, hardmask=False),
np_ret='squeezed : ndarray',
np_ma_ret='squeezed : MaskedArray',
)
zeros = _convert2ma(
'zeros',
params=dict(fill_value=None, hardmask=False),
np_ret='out : ndarray',
np_ma_ret='out : MaskedArray',
)
zeros_like = _convert2ma(
'zeros_like',
np_ret='out : ndarray',
np_ma_ret='out : MaskedArray',
)
def append(a, b, axis=None):
"""Append values to the end of an array.
.. versionadded:: 1.9.0
Parameters
----------
a : array_like
Values are appended to a copy of this array.
b : array_like
These values are appended to a copy of `a`. It must be of the
correct shape (the same shape as `a`, excluding `axis`). If `axis`
is not specified, `b` can be any shape and will be flattened
before use.
axis : int, optional
The axis along which `v` are appended. If `axis` is not given,
both `a` and `b` are flattened before use.
Returns
-------
append : MaskedArray
A copy of `a` with `b` appended to `axis`. Note that `append`
does not occur in-place: a new array is allocated and filled. If
`axis` is None, the result is a flattened array.
See Also
--------
numpy.append : Equivalent function in the top-level NumPy module.
Examples
--------
>>> import numpy.ma as ma
>>> a = ma.masked_values([1, 2, 3], 2)
>>> b = ma.masked_values([[4, 5, 6], [7, 8, 9]], 7)
>>> ma.append(a, b)
masked_array(data=[1, --, 3, 4, 5, 6, --, 8, 9],
mask=[False, True, False, False, False, False, True, False,
False],
fill_value=999999)
"""
return concatenate([a, b], axis)
| 269,111 | Python | 31.298608 | 155 | 0.541888 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/ma/setup.py | #!/usr/bin/env python3
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('ma', parent_package, top_path)
config.add_subpackage('tests')
config.add_data_files('*.pyi')
return config
if __name__ == "__main__":
from numpy.distutils.core import setup
config = configuration(top_path='').todict()
setup(**config)
| 418 | Python | 31.230767 | 58 | 0.681818 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/ma/testutils.py | """Miscellaneous functions for testing masked arrays and subclasses
:author: Pierre Gerard-Marchant
:contact: pierregm_at_uga_dot_edu
:version: $Id: testutils.py 3529 2007-11-13 08:01:14Z jarrod.millman $
"""
import operator
import numpy as np
from numpy import ndarray, float_
import numpy.core.umath as umath
import numpy.testing
from numpy.testing import (
assert_, assert_allclose, assert_array_almost_equal_nulp,
assert_raises, build_err_msg
)
from .core import mask_or, getmask, masked_array, nomask, masked, filled
__all__masked = [
'almost', 'approx', 'assert_almost_equal', 'assert_array_almost_equal',
'assert_array_approx_equal', 'assert_array_compare',
'assert_array_equal', 'assert_array_less', 'assert_close',
'assert_equal', 'assert_equal_records', 'assert_mask_equal',
'assert_not_equal', 'fail_if_array_equal',
]
# Include some normal test functions to avoid breaking other projects who
# have mistakenly included them from this file. SciPy is one. That is
# unfortunate, as some of these functions are not intended to work with
# masked arrays. But there was no way to tell before.
from unittest import TestCase
__some__from_testing = [
'TestCase', 'assert_', 'assert_allclose', 'assert_array_almost_equal_nulp',
'assert_raises'
]
__all__ = __all__masked + __some__from_testing
def approx(a, b, fill_value=True, rtol=1e-5, atol=1e-8):
"""
Returns true if all components of a and b are equal to given tolerances.
If fill_value is True, masked values considered equal. Otherwise,
masked values are considered unequal. The relative error rtol should
be positive and << 1.0 The absolute error atol comes into play for
those elements of b that are very small or zero; it says how small a
must be also.
"""
m = mask_or(getmask(a), getmask(b))
d1 = filled(a)
d2 = filled(b)
if d1.dtype.char == "O" or d2.dtype.char == "O":
return np.equal(d1, d2).ravel()
x = filled(masked_array(d1, copy=False, mask=m), fill_value).astype(float_)
y = filled(masked_array(d2, copy=False, mask=m), 1).astype(float_)
d = np.less_equal(umath.absolute(x - y), atol + rtol * umath.absolute(y))
return d.ravel()
def almost(a, b, decimal=6, fill_value=True):
"""
Returns True if a and b are equal up to decimal places.
If fill_value is True, masked values considered equal. Otherwise,
masked values are considered unequal.
"""
m = mask_or(getmask(a), getmask(b))
d1 = filled(a)
d2 = filled(b)
if d1.dtype.char == "O" or d2.dtype.char == "O":
return np.equal(d1, d2).ravel()
x = filled(masked_array(d1, copy=False, mask=m), fill_value).astype(float_)
y = filled(masked_array(d2, copy=False, mask=m), 1).astype(float_)
d = np.around(np.abs(x - y), decimal) <= 10.0 ** (-decimal)
return d.ravel()
def _assert_equal_on_sequences(actual, desired, err_msg=''):
"""
Asserts the equality of two non-array sequences.
"""
assert_equal(len(actual), len(desired), err_msg)
for k in range(len(desired)):
assert_equal(actual[k], desired[k], f'item={k!r}\n{err_msg}')
return
def assert_equal_records(a, b):
"""
Asserts that two records are equal.
Pretty crude for now.
"""
assert_equal(a.dtype, b.dtype)
for f in a.dtype.names:
(af, bf) = (operator.getitem(a, f), operator.getitem(b, f))
if not (af is masked) and not (bf is masked):
assert_equal(operator.getitem(a, f), operator.getitem(b, f))
return
def assert_equal(actual, desired, err_msg=''):
"""
Asserts that two items are equal.
"""
# Case #1: dictionary .....
if isinstance(desired, dict):
if not isinstance(actual, dict):
raise AssertionError(repr(type(actual)))
assert_equal(len(actual), len(desired), err_msg)
for k, i in desired.items():
if k not in actual:
raise AssertionError(f"{k} not in {actual}")
assert_equal(actual[k], desired[k], f'key={k!r}\n{err_msg}')
return
# Case #2: lists .....
if isinstance(desired, (list, tuple)) and isinstance(actual, (list, tuple)):
return _assert_equal_on_sequences(actual, desired, err_msg='')
if not (isinstance(actual, ndarray) or isinstance(desired, ndarray)):
msg = build_err_msg([actual, desired], err_msg,)
if not desired == actual:
raise AssertionError(msg)
return
# Case #4. arrays or equivalent
if ((actual is masked) and not (desired is masked)) or \
((desired is masked) and not (actual is masked)):
msg = build_err_msg([actual, desired],
err_msg, header='', names=('x', 'y'))
raise ValueError(msg)
actual = np.asanyarray(actual)
desired = np.asanyarray(desired)
(actual_dtype, desired_dtype) = (actual.dtype, desired.dtype)
if actual_dtype.char == "S" and desired_dtype.char == "S":
return _assert_equal_on_sequences(actual.tolist(),
desired.tolist(),
err_msg='')
return assert_array_equal(actual, desired, err_msg)
def fail_if_equal(actual, desired, err_msg='',):
"""
Raises an assertion error if two items are equal.
"""
if isinstance(desired, dict):
if not isinstance(actual, dict):
raise AssertionError(repr(type(actual)))
fail_if_equal(len(actual), len(desired), err_msg)
for k, i in desired.items():
if k not in actual:
raise AssertionError(repr(k))
fail_if_equal(actual[k], desired[k], f'key={k!r}\n{err_msg}')
return
if isinstance(desired, (list, tuple)) and isinstance(actual, (list, tuple)):
fail_if_equal(len(actual), len(desired), err_msg)
for k in range(len(desired)):
fail_if_equal(actual[k], desired[k], f'item={k!r}\n{err_msg}')
return
if isinstance(actual, np.ndarray) or isinstance(desired, np.ndarray):
return fail_if_array_equal(actual, desired, err_msg)
msg = build_err_msg([actual, desired], err_msg)
if not desired != actual:
raise AssertionError(msg)
assert_not_equal = fail_if_equal
def assert_almost_equal(actual, desired, decimal=7, err_msg='', verbose=True):
"""
Asserts that two items are almost equal.
The test is equivalent to abs(desired-actual) < 0.5 * 10**(-decimal).
"""
if isinstance(actual, np.ndarray) or isinstance(desired, np.ndarray):
return assert_array_almost_equal(actual, desired, decimal=decimal,
err_msg=err_msg, verbose=verbose)
msg = build_err_msg([actual, desired],
err_msg=err_msg, verbose=verbose)
if not round(abs(desired - actual), decimal) == 0:
raise AssertionError(msg)
assert_close = assert_almost_equal
def assert_array_compare(comparison, x, y, err_msg='', verbose=True, header='',
fill_value=True):
"""
Asserts that comparison between two masked arrays is satisfied.
The comparison is elementwise.
"""
# Allocate a common mask and refill
m = mask_or(getmask(x), getmask(y))
x = masked_array(x, copy=False, mask=m, keep_mask=False, subok=False)
y = masked_array(y, copy=False, mask=m, keep_mask=False, subok=False)
if ((x is masked) and not (y is masked)) or \
((y is masked) and not (x is masked)):
msg = build_err_msg([x, y], err_msg=err_msg, verbose=verbose,
header=header, names=('x', 'y'))
raise ValueError(msg)
# OK, now run the basic tests on filled versions
return np.testing.assert_array_compare(comparison,
x.filled(fill_value),
y.filled(fill_value),
err_msg=err_msg,
verbose=verbose, header=header)
def assert_array_equal(x, y, err_msg='', verbose=True):
"""
Checks the elementwise equality of two masked arrays.
"""
assert_array_compare(operator.__eq__, x, y,
err_msg=err_msg, verbose=verbose,
header='Arrays are not equal')
def fail_if_array_equal(x, y, err_msg='', verbose=True):
"""
Raises an assertion error if two masked arrays are not equal elementwise.
"""
def compare(x, y):
return (not np.alltrue(approx(x, y)))
assert_array_compare(compare, x, y, err_msg=err_msg, verbose=verbose,
header='Arrays are not equal')
def assert_array_approx_equal(x, y, decimal=6, err_msg='', verbose=True):
"""
Checks the equality of two masked arrays, up to given number odecimals.
The equality is checked elementwise.
"""
def compare(x, y):
"Returns the result of the loose comparison between x and y)."
return approx(x, y, rtol=10. ** -decimal)
assert_array_compare(compare, x, y, err_msg=err_msg, verbose=verbose,
header='Arrays are not almost equal')
def assert_array_almost_equal(x, y, decimal=6, err_msg='', verbose=True):
"""
Checks the equality of two masked arrays, up to given number odecimals.
The equality is checked elementwise.
"""
def compare(x, y):
"Returns the result of the loose comparison between x and y)."
return almost(x, y, decimal)
assert_array_compare(compare, x, y, err_msg=err_msg, verbose=verbose,
header='Arrays are not almost equal')
def assert_array_less(x, y, err_msg='', verbose=True):
"""
Checks that x is smaller than y elementwise.
"""
assert_array_compare(operator.__lt__, x, y,
err_msg=err_msg, verbose=verbose,
header='Arrays are not less-ordered')
def assert_mask_equal(m1, m2, err_msg=''):
"""
Asserts the equality of two masks.
"""
if m1 is nomask:
assert_(m2 is nomask)
if m2 is nomask:
assert_(m1 is nomask)
assert_array_equal(m1, m2, err_msg=err_msg)
| 10,239 | Python | 34.432526 | 80 | 0.608263 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/ma/mrecords.py | """:mod:`numpy.ma..mrecords`
Defines the equivalent of :class:`numpy.recarrays` for masked arrays,
where fields can be accessed as attributes.
Note that :class:`numpy.ma.MaskedArray` already supports structured datatypes
and the masking of individual fields.
.. moduleauthor:: Pierre Gerard-Marchant
"""
# We should make sure that no field is called '_mask','mask','_fieldmask',
# or whatever restricted keywords. An idea would be to no bother in the
# first place, and then rename the invalid fields with a trailing
# underscore. Maybe we could just overload the parser function ?
from numpy.ma import (
MAError, MaskedArray, masked, nomask, masked_array, getdata,
getmaskarray, filled
)
import numpy.ma as ma
import warnings
import numpy as np
from numpy import (
bool_, dtype, ndarray, recarray, array as narray
)
from numpy.core.records import (
fromarrays as recfromarrays, fromrecords as recfromrecords
)
_byteorderconv = np.core.records._byteorderconv
_check_fill_value = ma.core._check_fill_value
__all__ = [
'MaskedRecords', 'mrecarray', 'fromarrays', 'fromrecords',
'fromtextfile', 'addfield',
]
reserved_fields = ['_data', '_mask', '_fieldmask', 'dtype']
def _checknames(descr, names=None):
"""
Checks that field names ``descr`` are not reserved keywords.
If this is the case, a default 'f%i' is substituted. If the argument
`names` is not None, updates the field names to valid names.
"""
ndescr = len(descr)
default_names = ['f%i' % i for i in range(ndescr)]
if names is None:
new_names = default_names
else:
if isinstance(names, (tuple, list)):
new_names = names
elif isinstance(names, str):
new_names = names.split(',')
else:
raise NameError(f'illegal input names {names!r}')
nnames = len(new_names)
if nnames < ndescr:
new_names += default_names[nnames:]
ndescr = []
for (n, d, t) in zip(new_names, default_names, descr.descr):
if n in reserved_fields:
if t[0] in reserved_fields:
ndescr.append((d, t[1]))
else:
ndescr.append(t)
else:
ndescr.append((n, t[1]))
return np.dtype(ndescr)
def _get_fieldmask(self):
mdescr = [(n, '|b1') for n in self.dtype.names]
fdmask = np.empty(self.shape, dtype=mdescr)
fdmask.flat = tuple([False] * len(mdescr))
return fdmask
class MaskedRecords(MaskedArray):
"""
Attributes
----------
_data : recarray
Underlying data, as a record array.
_mask : boolean array
Mask of the records. A record is masked when all its fields are
masked.
_fieldmask : boolean recarray
Record array of booleans, setting the mask of each individual field
of each record.
_fill_value : record
Filling values for each field.
"""
def __new__(cls, shape, dtype=None, buf=None, offset=0, strides=None,
formats=None, names=None, titles=None,
byteorder=None, aligned=False,
mask=nomask, hard_mask=False, fill_value=None, keep_mask=True,
copy=False,
**options):
self = recarray.__new__(cls, shape, dtype=dtype, buf=buf, offset=offset,
strides=strides, formats=formats, names=names,
titles=titles, byteorder=byteorder,
aligned=aligned,)
mdtype = ma.make_mask_descr(self.dtype)
if mask is nomask or not np.size(mask):
if not keep_mask:
self._mask = tuple([False] * len(mdtype))
else:
mask = np.array(mask, copy=copy)
if mask.shape != self.shape:
(nd, nm) = (self.size, mask.size)
if nm == 1:
mask = np.resize(mask, self.shape)
elif nm == nd:
mask = np.reshape(mask, self.shape)
else:
msg = "Mask and data not compatible: data size is %i, " + \
"mask size is %i."
raise MAError(msg % (nd, nm))
if not keep_mask:
self.__setmask__(mask)
self._sharedmask = True
else:
if mask.dtype == mdtype:
_mask = mask
else:
_mask = np.array([tuple([m] * len(mdtype)) for m in mask],
dtype=mdtype)
self._mask = _mask
return self
def __array_finalize__(self, obj):
# Make sure we have a _fieldmask by default
_mask = getattr(obj, '_mask', None)
if _mask is None:
objmask = getattr(obj, '_mask', nomask)
_dtype = ndarray.__getattribute__(self, 'dtype')
if objmask is nomask:
_mask = ma.make_mask_none(self.shape, dtype=_dtype)
else:
mdescr = ma.make_mask_descr(_dtype)
_mask = narray([tuple([m] * len(mdescr)) for m in objmask],
dtype=mdescr).view(recarray)
# Update some of the attributes
_dict = self.__dict__
_dict.update(_mask=_mask)
self._update_from(obj)
if _dict['_baseclass'] == ndarray:
_dict['_baseclass'] = recarray
return
@property
def _data(self):
"""
Returns the data as a recarray.
"""
return ndarray.view(self, recarray)
@property
def _fieldmask(self):
"""
Alias to mask.
"""
return self._mask
def __len__(self):
"""
Returns the length
"""
# We have more than one record
if self.ndim:
return len(self._data)
# We have only one record: return the nb of fields
return len(self.dtype)
def __getattribute__(self, attr):
try:
return object.__getattribute__(self, attr)
except AttributeError:
# attr must be a fieldname
pass
fielddict = ndarray.__getattribute__(self, 'dtype').fields
try:
res = fielddict[attr][:2]
except (TypeError, KeyError) as e:
raise AttributeError(
f'record array has no attribute {attr}') from e
# So far, so good
_localdict = ndarray.__getattribute__(self, '__dict__')
_data = ndarray.view(self, _localdict['_baseclass'])
obj = _data.getfield(*res)
if obj.dtype.names is not None:
raise NotImplementedError("MaskedRecords is currently limited to"
"simple records.")
# Get some special attributes
# Reset the object's mask
hasmasked = False
_mask = _localdict.get('_mask', None)
if _mask is not None:
try:
_mask = _mask[attr]
except IndexError:
# Couldn't find a mask: use the default (nomask)
pass
tp_len = len(_mask.dtype)
hasmasked = _mask.view((bool, ((tp_len,) if tp_len else ()))).any()
if (obj.shape or hasmasked):
obj = obj.view(MaskedArray)
obj._baseclass = ndarray
obj._isfield = True
obj._mask = _mask
# Reset the field values
_fill_value = _localdict.get('_fill_value', None)
if _fill_value is not None:
try:
obj._fill_value = _fill_value[attr]
except ValueError:
obj._fill_value = None
else:
obj = obj.item()
return obj
def __setattr__(self, attr, val):
"""
Sets the attribute attr to the value val.
"""
# Should we call __setmask__ first ?
if attr in ['mask', 'fieldmask']:
self.__setmask__(val)
return
# Create a shortcut (so that we don't have to call getattr all the time)
_localdict = object.__getattribute__(self, '__dict__')
# Check whether we're creating a new field
newattr = attr not in _localdict
try:
# Is attr a generic attribute ?
ret = object.__setattr__(self, attr, val)
except Exception:
# Not a generic attribute: exit if it's not a valid field
fielddict = ndarray.__getattribute__(self, 'dtype').fields or {}
optinfo = ndarray.__getattribute__(self, '_optinfo') or {}
if not (attr in fielddict or attr in optinfo):
raise
else:
# Get the list of names
fielddict = ndarray.__getattribute__(self, 'dtype').fields or {}
# Check the attribute
if attr not in fielddict:
return ret
if newattr:
# We just added this one or this setattr worked on an
# internal attribute.
try:
object.__delattr__(self, attr)
except Exception:
return ret
# Let's try to set the field
try:
res = fielddict[attr][:2]
except (TypeError, KeyError) as e:
raise AttributeError(
f'record array has no attribute {attr}') from e
if val is masked:
_fill_value = _localdict['_fill_value']
if _fill_value is not None:
dval = _localdict['_fill_value'][attr]
else:
dval = val
mval = True
else:
dval = filled(val)
mval = getmaskarray(val)
obj = ndarray.__getattribute__(self, '_data').setfield(dval, *res)
_localdict['_mask'].__setitem__(attr, mval)
return obj
def __getitem__(self, indx):
"""
Returns all the fields sharing the same fieldname base.
The fieldname base is either `_data` or `_mask`.
"""
_localdict = self.__dict__
_mask = ndarray.__getattribute__(self, '_mask')
_data = ndarray.view(self, _localdict['_baseclass'])
# We want a field
if isinstance(indx, str):
# Make sure _sharedmask is True to propagate back to _fieldmask
# Don't use _set_mask, there are some copies being made that
# break propagation Don't force the mask to nomask, that wreaks
# easy masking
obj = _data[indx].view(MaskedArray)
obj._mask = _mask[indx]
obj._sharedmask = True
fval = _localdict['_fill_value']
if fval is not None:
obj._fill_value = fval[indx]
# Force to masked if the mask is True
if not obj.ndim and obj._mask:
return masked
return obj
# We want some elements.
# First, the data.
obj = np.array(_data[indx], copy=False).view(mrecarray)
obj._mask = np.array(_mask[indx], copy=False).view(recarray)
return obj
def __setitem__(self, indx, value):
"""
Sets the given record to value.
"""
MaskedArray.__setitem__(self, indx, value)
if isinstance(indx, str):
self._mask[indx] = ma.getmaskarray(value)
def __str__(self):
"""
Calculates the string representation.
"""
if self.size > 1:
mstr = [f"({','.join([str(i) for i in s])})"
for s in zip(*[getattr(self, f) for f in self.dtype.names])]
return f"[{', '.join(mstr)}]"
else:
mstr = [f"{','.join([str(i) for i in s])}"
for s in zip([getattr(self, f) for f in self.dtype.names])]
return f"({', '.join(mstr)})"
def __repr__(self):
"""
Calculates the repr representation.
"""
_names = self.dtype.names
fmt = "%%%is : %%s" % (max([len(n) for n in _names]) + 4,)
reprstr = [fmt % (f, getattr(self, f)) for f in self.dtype.names]
reprstr.insert(0, 'masked_records(')
reprstr.extend([fmt % (' fill_value', self.fill_value),
' )'])
return str("\n".join(reprstr))
def view(self, dtype=None, type=None):
"""
Returns a view of the mrecarray.
"""
# OK, basic copy-paste from MaskedArray.view.
if dtype is None:
if type is None:
output = ndarray.view(self)
else:
output = ndarray.view(self, type)
# Here again.
elif type is None:
try:
if issubclass(dtype, ndarray):
output = ndarray.view(self, dtype)
else:
output = ndarray.view(self, dtype)
# OK, there's the change
except TypeError:
dtype = np.dtype(dtype)
# we need to revert to MaskedArray, but keeping the possibility
# of subclasses (eg, TimeSeriesRecords), so we'll force a type
# set to the first parent
if dtype.fields is None:
basetype = self.__class__.__bases__[0]
output = self.__array__().view(dtype, basetype)
output._update_from(self)
else:
output = ndarray.view(self, dtype)
output._fill_value = None
else:
output = ndarray.view(self, dtype, type)
# Update the mask, just like in MaskedArray.view
if (getattr(output, '_mask', nomask) is not nomask):
mdtype = ma.make_mask_descr(output.dtype)
output._mask = self._mask.view(mdtype, ndarray)
output._mask.shape = output.shape
return output
def harden_mask(self):
"""
Forces the mask to hard.
"""
self._hardmask = True
def soften_mask(self):
"""
Forces the mask to soft
"""
self._hardmask = False
def copy(self):
"""
Returns a copy of the masked record.
"""
copied = self._data.copy().view(type(self))
copied._mask = self._mask.copy()
return copied
def tolist(self, fill_value=None):
"""
Return the data portion of the array as a list.
Data items are converted to the nearest compatible Python type.
Masked values are converted to fill_value. If fill_value is None,
the corresponding entries in the output list will be ``None``.
"""
if fill_value is not None:
return self.filled(fill_value).tolist()
result = narray(self.filled().tolist(), dtype=object)
mask = narray(self._mask.tolist())
result[mask] = None
return result.tolist()
def __getstate__(self):
"""Return the internal state of the masked array.
This is for pickling.
"""
state = (1,
self.shape,
self.dtype,
self.flags.fnc,
self._data.tobytes(),
self._mask.tobytes(),
self._fill_value,
)
return state
def __setstate__(self, state):
"""
Restore the internal state of the masked array.
This is for pickling. ``state`` is typically the output of the
``__getstate__`` output, and is a 5-tuple:
- class name
- a tuple giving the shape of the data
- a typecode for the data
- a binary string for the data
- a binary string for the mask.
"""
(ver, shp, typ, isf, raw, msk, flv) = state
ndarray.__setstate__(self, (shp, typ, isf, raw))
mdtype = dtype([(k, bool_) for (k, _) in self.dtype.descr])
self.__dict__['_mask'].__setstate__((shp, mdtype, isf, msk))
self.fill_value = flv
def __reduce__(self):
"""
Return a 3-tuple for pickling a MaskedArray.
"""
return (_mrreconstruct,
(self.__class__, self._baseclass, (0,), 'b',),
self.__getstate__())
def _mrreconstruct(subtype, baseclass, baseshape, basetype,):
"""
Build a new MaskedArray from the information stored in a pickle.
"""
_data = ndarray.__new__(baseclass, baseshape, basetype).view(subtype)
_mask = ndarray.__new__(ndarray, baseshape, 'b1')
return subtype.__new__(subtype, _data, mask=_mask, dtype=basetype,)
mrecarray = MaskedRecords
###############################################################################
# Constructors #
###############################################################################
def fromarrays(arraylist, dtype=None, shape=None, formats=None,
names=None, titles=None, aligned=False, byteorder=None,
fill_value=None):
"""
Creates a mrecarray from a (flat) list of masked arrays.
Parameters
----------
arraylist : sequence
A list of (masked) arrays. Each element of the sequence is first converted
to a masked array if needed. If a 2D array is passed as argument, it is
processed line by line
dtype : {None, dtype}, optional
Data type descriptor.
shape : {None, integer}, optional
Number of records. If None, shape is defined from the shape of the
first array in the list.
formats : {None, sequence}, optional
Sequence of formats for each individual field. If None, the formats will
be autodetected by inspecting the fields and selecting the highest dtype
possible.
names : {None, sequence}, optional
Sequence of the names of each field.
fill_value : {None, sequence}, optional
Sequence of data to be used as filling values.
Notes
-----
Lists of tuples should be preferred over lists of lists for faster processing.
"""
datalist = [getdata(x) for x in arraylist]
masklist = [np.atleast_1d(getmaskarray(x)) for x in arraylist]
_array = recfromarrays(datalist,
dtype=dtype, shape=shape, formats=formats,
names=names, titles=titles, aligned=aligned,
byteorder=byteorder).view(mrecarray)
_array._mask.flat = list(zip(*masklist))
if fill_value is not None:
_array.fill_value = fill_value
return _array
def fromrecords(reclist, dtype=None, shape=None, formats=None, names=None,
titles=None, aligned=False, byteorder=None,
fill_value=None, mask=nomask):
"""
Creates a MaskedRecords from a list of records.
Parameters
----------
reclist : sequence
A list of records. Each element of the sequence is first converted
to a masked array if needed. If a 2D array is passed as argument, it is
processed line by line
dtype : {None, dtype}, optional
Data type descriptor.
shape : {None,int}, optional
Number of records. If None, ``shape`` is defined from the shape of the
first array in the list.
formats : {None, sequence}, optional
Sequence of formats for each individual field. If None, the formats will
be autodetected by inspecting the fields and selecting the highest dtype
possible.
names : {None, sequence}, optional
Sequence of the names of each field.
fill_value : {None, sequence}, optional
Sequence of data to be used as filling values.
mask : {nomask, sequence}, optional.
External mask to apply on the data.
Notes
-----
Lists of tuples should be preferred over lists of lists for faster processing.
"""
# Grab the initial _fieldmask, if needed:
_mask = getattr(reclist, '_mask', None)
# Get the list of records.
if isinstance(reclist, ndarray):
# Make sure we don't have some hidden mask
if isinstance(reclist, MaskedArray):
reclist = reclist.filled().view(ndarray)
# Grab the initial dtype, just in case
if dtype is None:
dtype = reclist.dtype
reclist = reclist.tolist()
mrec = recfromrecords(reclist, dtype=dtype, shape=shape, formats=formats,
names=names, titles=titles,
aligned=aligned, byteorder=byteorder).view(mrecarray)
# Set the fill_value if needed
if fill_value is not None:
mrec.fill_value = fill_value
# Now, let's deal w/ the mask
if mask is not nomask:
mask = np.array(mask, copy=False)
maskrecordlength = len(mask.dtype)
if maskrecordlength:
mrec._mask.flat = mask
elif mask.ndim == 2:
mrec._mask.flat = [tuple(m) for m in mask]
else:
mrec.__setmask__(mask)
if _mask is not None:
mrec._mask[:] = _mask
return mrec
def _guessvartypes(arr):
"""
Tries to guess the dtypes of the str_ ndarray `arr`.
Guesses by testing element-wise conversion. Returns a list of dtypes.
The array is first converted to ndarray. If the array is 2D, the test
is performed on the first line. An exception is raised if the file is
3D or more.
"""
vartypes = []
arr = np.asarray(arr)
if arr.ndim == 2:
arr = arr[0]
elif arr.ndim > 2:
raise ValueError("The array should be 2D at most!")
# Start the conversion loop.
for f in arr:
try:
int(f)
except (ValueError, TypeError):
try:
float(f)
except (ValueError, TypeError):
try:
complex(f)
except (ValueError, TypeError):
vartypes.append(arr.dtype)
else:
vartypes.append(np.dtype(complex))
else:
vartypes.append(np.dtype(float))
else:
vartypes.append(np.dtype(int))
return vartypes
def openfile(fname):
"""
Opens the file handle of file `fname`.
"""
# A file handle
if hasattr(fname, 'readline'):
return fname
# Try to open the file and guess its type
try:
f = open(fname)
except FileNotFoundError as e:
raise FileNotFoundError(f"No such file: '{fname}'") from e
if f.readline()[:2] != "\\x":
f.seek(0, 0)
return f
f.close()
raise NotImplementedError("Wow, binary file")
def fromtextfile(fname, delimiter=None, commentchar='#', missingchar='',
varnames=None, vartypes=None,
*, delimitor=np._NoValue): # backwards compatibility
"""
Creates a mrecarray from data stored in the file `filename`.
Parameters
----------
fname : {file name/handle}
Handle of an opened file.
delimiter : {None, string}, optional
Alphanumeric character used to separate columns in the file.
If None, any (group of) white spacestring(s) will be used.
commentchar : {'#', string}, optional
Alphanumeric character used to mark the start of a comment.
missingchar : {'', string}, optional
String indicating missing data, and used to create the masks.
varnames : {None, sequence}, optional
Sequence of the variable names. If None, a list will be created from
the first non empty line of the file.
vartypes : {None, sequence}, optional
Sequence of the variables dtypes. If None, it will be estimated from
the first non-commented line.
Ultra simple: the varnames are in the header, one line"""
if delimitor is not np._NoValue:
if delimiter is not None:
raise TypeError("fromtextfile() got multiple values for argument "
"'delimiter'")
# NumPy 1.22.0, 2021-09-23
warnings.warn("The 'delimitor' keyword argument of "
"numpy.ma.mrecords.fromtextfile() is deprecated "
"since NumPy 1.22.0, use 'delimiter' instead.",
DeprecationWarning, stacklevel=2)
delimiter = delimitor
# Try to open the file.
ftext = openfile(fname)
# Get the first non-empty line as the varnames
while True:
line = ftext.readline()
firstline = line[:line.find(commentchar)].strip()
_varnames = firstline.split(delimiter)
if len(_varnames) > 1:
break
if varnames is None:
varnames = _varnames
# Get the data.
_variables = masked_array([line.strip().split(delimiter) for line in ftext
if line[0] != commentchar and len(line) > 1])
(_, nfields) = _variables.shape
ftext.close()
# Try to guess the dtype.
if vartypes is None:
vartypes = _guessvartypes(_variables[0])
else:
vartypes = [np.dtype(v) for v in vartypes]
if len(vartypes) != nfields:
msg = "Attempting to %i dtypes for %i fields!"
msg += " Reverting to default."
warnings.warn(msg % (len(vartypes), nfields), stacklevel=2)
vartypes = _guessvartypes(_variables[0])
# Construct the descriptor.
mdescr = [(n, f) for (n, f) in zip(varnames, vartypes)]
mfillv = [ma.default_fill_value(f) for f in vartypes]
# Get the data and the mask.
# We just need a list of masked_arrays. It's easier to create it like that:
_mask = (_variables.T == missingchar)
_datalist = [masked_array(a, mask=m, dtype=t, fill_value=f)
for (a, m, t, f) in zip(_variables.T, _mask, vartypes, mfillv)]
return fromarrays(_datalist, dtype=mdescr)
def addfield(mrecord, newfield, newfieldname=None):
"""Adds a new field to the masked record array
Uses `newfield` as data and `newfieldname` as name. If `newfieldname`
is None, the new field name is set to 'fi', where `i` is the number of
existing fields.
"""
_data = mrecord._data
_mask = mrecord._mask
if newfieldname is None or newfieldname in reserved_fields:
newfieldname = 'f%i' % len(_data.dtype)
newfield = ma.array(newfield)
# Get the new data.
# Create a new empty recarray
newdtype = np.dtype(_data.dtype.descr + [(newfieldname, newfield.dtype)])
newdata = recarray(_data.shape, newdtype)
# Add the existing field
[newdata.setfield(_data.getfield(*f), *f)
for f in _data.dtype.fields.values()]
# Add the new field
newdata.setfield(newfield._data, *newdata.dtype.fields[newfieldname])
newdata = newdata.view(MaskedRecords)
# Get the new mask
# Create a new empty recarray
newmdtype = np.dtype([(n, bool_) for n in newdtype.names])
newmask = recarray(_data.shape, newmdtype)
# Add the old masks
[newmask.setfield(_mask.getfield(*f), *f)
for f in _mask.dtype.fields.values()]
# Add the mask of the new field
newmask.setfield(getmaskarray(newfield),
*newmask.dtype.fields[newfieldname])
newdata._mask = newmask
return newdata
| 27,232 | Python | 33.735969 | 82 | 0.553981 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/ma/extras.py | """
Masked arrays add-ons.
A collection of utilities for `numpy.ma`.
:author: Pierre Gerard-Marchant
:contact: pierregm_at_uga_dot_edu
:version: $Id: extras.py 3473 2007-10-29 15:18:13Z jarrod.millman $
"""
__all__ = [
'apply_along_axis', 'apply_over_axes', 'atleast_1d', 'atleast_2d',
'atleast_3d', 'average', 'clump_masked', 'clump_unmasked', 'column_stack',
'compress_cols', 'compress_nd', 'compress_rowcols', 'compress_rows',
'count_masked', 'corrcoef', 'cov', 'diagflat', 'dot', 'dstack', 'ediff1d',
'flatnotmasked_contiguous', 'flatnotmasked_edges', 'hsplit', 'hstack',
'isin', 'in1d', 'intersect1d', 'mask_cols', 'mask_rowcols', 'mask_rows',
'masked_all', 'masked_all_like', 'median', 'mr_', 'ndenumerate',
'notmasked_contiguous', 'notmasked_edges', 'polyfit', 'row_stack',
'setdiff1d', 'setxor1d', 'stack', 'unique', 'union1d', 'vander', 'vstack',
]
import itertools
import warnings
from . import core as ma
from .core import (
MaskedArray, MAError, add, array, asarray, concatenate, filled, count,
getmask, getmaskarray, make_mask_descr, masked, masked_array, mask_or,
nomask, ones, sort, zeros, getdata, get_masked_subclass, dot,
mask_rowcols
)
import numpy as np
from numpy import ndarray, array as nxarray
from numpy.core.multiarray import normalize_axis_index
from numpy.core.numeric import normalize_axis_tuple
from numpy.lib.function_base import _ureduce
from numpy.lib.index_tricks import AxisConcatenator
def issequence(seq):
"""
Is seq a sequence (ndarray, list or tuple)?
"""
return isinstance(seq, (ndarray, tuple, list))
def count_masked(arr, axis=None):
"""
Count the number of masked elements along the given axis.
Parameters
----------
arr : array_like
An array with (possibly) masked elements.
axis : int, optional
Axis along which to count. If None (default), a flattened
version of the array is used.
Returns
-------
count : int, ndarray
The total number of masked elements (axis=None) or the number
of masked elements along each slice of the given axis.
See Also
--------
MaskedArray.count : Count non-masked elements.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.arange(9).reshape((3,3))
>>> a = ma.array(a)
>>> a[1, 0] = ma.masked
>>> a[1, 2] = ma.masked
>>> a[2, 1] = ma.masked
>>> a
masked_array(
data=[[0, 1, 2],
[--, 4, --],
[6, --, 8]],
mask=[[False, False, False],
[ True, False, True],
[False, True, False]],
fill_value=999999)
>>> ma.count_masked(a)
3
When the `axis` keyword is used an array is returned.
>>> ma.count_masked(a, axis=0)
array([1, 1, 1])
>>> ma.count_masked(a, axis=1)
array([0, 2, 1])
"""
m = getmaskarray(arr)
return m.sum(axis)
def masked_all(shape, dtype=float):
"""
Empty masked array with all elements masked.
Return an empty masked array of the given shape and dtype, where all the
data are masked.
Parameters
----------
shape : int or tuple of ints
Shape of the required MaskedArray, e.g., ``(2, 3)`` or ``2``.
dtype : dtype, optional
Data type of the output.
Returns
-------
a : MaskedArray
A masked array with all data masked.
See Also
--------
masked_all_like : Empty masked array modelled on an existing array.
Examples
--------
>>> import numpy.ma as ma
>>> ma.masked_all((3, 3))
masked_array(
data=[[--, --, --],
[--, --, --],
[--, --, --]],
mask=[[ True, True, True],
[ True, True, True],
[ True, True, True]],
fill_value=1e+20,
dtype=float64)
The `dtype` parameter defines the underlying data type.
>>> a = ma.masked_all((3, 3))
>>> a.dtype
dtype('float64')
>>> a = ma.masked_all((3, 3), dtype=np.int32)
>>> a.dtype
dtype('int32')
"""
a = masked_array(np.empty(shape, dtype),
mask=np.ones(shape, make_mask_descr(dtype)))
return a
def masked_all_like(arr):
"""
Empty masked array with the properties of an existing array.
Return an empty masked array of the same shape and dtype as
the array `arr`, where all the data are masked.
Parameters
----------
arr : ndarray
An array describing the shape and dtype of the required MaskedArray.
Returns
-------
a : MaskedArray
A masked array with all data masked.
Raises
------
AttributeError
If `arr` doesn't have a shape attribute (i.e. not an ndarray)
See Also
--------
masked_all : Empty masked array with all elements masked.
Examples
--------
>>> import numpy.ma as ma
>>> arr = np.zeros((2, 3), dtype=np.float32)
>>> arr
array([[0., 0., 0.],
[0., 0., 0.]], dtype=float32)
>>> ma.masked_all_like(arr)
masked_array(
data=[[--, --, --],
[--, --, --]],
mask=[[ True, True, True],
[ True, True, True]],
fill_value=1e+20,
dtype=float32)
The dtype of the masked array matches the dtype of `arr`.
>>> arr.dtype
dtype('float32')
>>> ma.masked_all_like(arr).dtype
dtype('float32')
"""
a = np.empty_like(arr).view(MaskedArray)
a._mask = np.ones(a.shape, dtype=make_mask_descr(a.dtype))
return a
#####--------------------------------------------------------------------------
#---- --- Standard functions ---
#####--------------------------------------------------------------------------
class _fromnxfunction:
"""
Defines a wrapper to adapt NumPy functions to masked arrays.
An instance of `_fromnxfunction` can be called with the same parameters
as the wrapped NumPy function. The docstring of `newfunc` is adapted from
the wrapped function as well, see `getdoc`.
This class should not be used directly. Instead, one of its extensions that
provides support for a specific type of input should be used.
Parameters
----------
funcname : str
The name of the function to be adapted. The function should be
in the NumPy namespace (i.e. ``np.funcname``).
"""
def __init__(self, funcname):
self.__name__ = funcname
self.__doc__ = self.getdoc()
def getdoc(self):
"""
Retrieve the docstring and signature from the function.
The ``__doc__`` attribute of the function is used as the docstring for
the new masked array version of the function. A note on application
of the function to the mask is appended.
Parameters
----------
None
"""
npfunc = getattr(np, self.__name__, None)
doc = getattr(npfunc, '__doc__', None)
if doc:
sig = self.__name__ + ma.get_object_signature(npfunc)
doc = ma.doc_note(doc, "The function is applied to both the _data "
"and the _mask, if any.")
return '\n\n'.join((sig, doc))
return
def __call__(self, *args, **params):
pass
class _fromnxfunction_single(_fromnxfunction):
"""
A version of `_fromnxfunction` that is called with a single array
argument followed by auxiliary args that are passed verbatim for
both the data and mask calls.
"""
def __call__(self, x, *args, **params):
func = getattr(np, self.__name__)
if isinstance(x, ndarray):
_d = func(x.__array__(), *args, **params)
_m = func(getmaskarray(x), *args, **params)
return masked_array(_d, mask=_m)
else:
_d = func(np.asarray(x), *args, **params)
_m = func(getmaskarray(x), *args, **params)
return masked_array(_d, mask=_m)
class _fromnxfunction_seq(_fromnxfunction):
"""
A version of `_fromnxfunction` that is called with a single sequence
of arrays followed by auxiliary args that are passed verbatim for
both the data and mask calls.
"""
def __call__(self, x, *args, **params):
func = getattr(np, self.__name__)
_d = func(tuple([np.asarray(a) for a in x]), *args, **params)
_m = func(tuple([getmaskarray(a) for a in x]), *args, **params)
return masked_array(_d, mask=_m)
class _fromnxfunction_args(_fromnxfunction):
"""
A version of `_fromnxfunction` that is called with multiple array
arguments. The first non-array-like input marks the beginning of the
arguments that are passed verbatim for both the data and mask calls.
Array arguments are processed independently and the results are
returned in a list. If only one array is found, the return value is
just the processed array instead of a list.
"""
def __call__(self, *args, **params):
func = getattr(np, self.__name__)
arrays = []
args = list(args)
while len(args) > 0 and issequence(args[0]):
arrays.append(args.pop(0))
res = []
for x in arrays:
_d = func(np.asarray(x), *args, **params)
_m = func(getmaskarray(x), *args, **params)
res.append(masked_array(_d, mask=_m))
if len(arrays) == 1:
return res[0]
return res
class _fromnxfunction_allargs(_fromnxfunction):
"""
A version of `_fromnxfunction` that is called with multiple array
arguments. Similar to `_fromnxfunction_args` except that all args
are converted to arrays even if they are not so already. This makes
it possible to process scalars as 1-D arrays. Only keyword arguments
are passed through verbatim for the data and mask calls. Arrays
arguments are processed independently and the results are returned
in a list. If only one arg is present, the return value is just the
processed array instead of a list.
"""
def __call__(self, *args, **params):
func = getattr(np, self.__name__)
res = []
for x in args:
_d = func(np.asarray(x), **params)
_m = func(getmaskarray(x), **params)
res.append(masked_array(_d, mask=_m))
if len(args) == 1:
return res[0]
return res
atleast_1d = _fromnxfunction_allargs('atleast_1d')
atleast_2d = _fromnxfunction_allargs('atleast_2d')
atleast_3d = _fromnxfunction_allargs('atleast_3d')
vstack = row_stack = _fromnxfunction_seq('vstack')
hstack = _fromnxfunction_seq('hstack')
column_stack = _fromnxfunction_seq('column_stack')
dstack = _fromnxfunction_seq('dstack')
stack = _fromnxfunction_seq('stack')
hsplit = _fromnxfunction_single('hsplit')
diagflat = _fromnxfunction_single('diagflat')
#####--------------------------------------------------------------------------
#----
#####--------------------------------------------------------------------------
def flatten_inplace(seq):
"""Flatten a sequence in place."""
k = 0
while (k != len(seq)):
while hasattr(seq[k], '__iter__'):
seq[k:(k + 1)] = seq[k]
k += 1
return seq
def apply_along_axis(func1d, axis, arr, *args, **kwargs):
"""
(This docstring should be overwritten)
"""
arr = array(arr, copy=False, subok=True)
nd = arr.ndim
axis = normalize_axis_index(axis, nd)
ind = [0] * (nd - 1)
i = np.zeros(nd, 'O')
indlist = list(range(nd))
indlist.remove(axis)
i[axis] = slice(None, None)
outshape = np.asarray(arr.shape).take(indlist)
i.put(indlist, ind)
res = func1d(arr[tuple(i.tolist())], *args, **kwargs)
# if res is a number, then we have a smaller output array
asscalar = np.isscalar(res)
if not asscalar:
try:
len(res)
except TypeError:
asscalar = True
# Note: we shouldn't set the dtype of the output from the first result
# so we force the type to object, and build a list of dtypes. We'll
# just take the largest, to avoid some downcasting
dtypes = []
if asscalar:
dtypes.append(np.asarray(res).dtype)
outarr = zeros(outshape, object)
outarr[tuple(ind)] = res
Ntot = np.product(outshape)
k = 1
while k < Ntot:
# increment the index
ind[-1] += 1
n = -1
while (ind[n] >= outshape[n]) and (n > (1 - nd)):
ind[n - 1] += 1
ind[n] = 0
n -= 1
i.put(indlist, ind)
res = func1d(arr[tuple(i.tolist())], *args, **kwargs)
outarr[tuple(ind)] = res
dtypes.append(asarray(res).dtype)
k += 1
else:
res = array(res, copy=False, subok=True)
j = i.copy()
j[axis] = ([slice(None, None)] * res.ndim)
j.put(indlist, ind)
Ntot = np.product(outshape)
holdshape = outshape
outshape = list(arr.shape)
outshape[axis] = res.shape
dtypes.append(asarray(res).dtype)
outshape = flatten_inplace(outshape)
outarr = zeros(outshape, object)
outarr[tuple(flatten_inplace(j.tolist()))] = res
k = 1
while k < Ntot:
# increment the index
ind[-1] += 1
n = -1
while (ind[n] >= holdshape[n]) and (n > (1 - nd)):
ind[n - 1] += 1
ind[n] = 0
n -= 1
i.put(indlist, ind)
j.put(indlist, ind)
res = func1d(arr[tuple(i.tolist())], *args, **kwargs)
outarr[tuple(flatten_inplace(j.tolist()))] = res
dtypes.append(asarray(res).dtype)
k += 1
max_dtypes = np.dtype(np.asarray(dtypes).max())
if not hasattr(arr, '_mask'):
result = np.asarray(outarr, dtype=max_dtypes)
else:
result = asarray(outarr, dtype=max_dtypes)
result.fill_value = ma.default_fill_value(result)
return result
apply_along_axis.__doc__ = np.apply_along_axis.__doc__
def apply_over_axes(func, a, axes):
"""
(This docstring will be overwritten)
"""
val = asarray(a)
N = a.ndim
if array(axes).ndim == 0:
axes = (axes,)
for axis in axes:
if axis < 0:
axis = N + axis
args = (val, axis)
res = func(*args)
if res.ndim == val.ndim:
val = res
else:
res = ma.expand_dims(res, axis)
if res.ndim == val.ndim:
val = res
else:
raise ValueError("function is not returning "
"an array of the correct shape")
return val
if apply_over_axes.__doc__ is not None:
apply_over_axes.__doc__ = np.apply_over_axes.__doc__[
:np.apply_over_axes.__doc__.find('Notes')].rstrip() + \
"""
Examples
--------
>>> a = np.ma.arange(24).reshape(2,3,4)
>>> a[:,0,1] = np.ma.masked
>>> a[:,1,:] = np.ma.masked
>>> a
masked_array(
data=[[[0, --, 2, 3],
[--, --, --, --],
[8, 9, 10, 11]],
[[12, --, 14, 15],
[--, --, --, --],
[20, 21, 22, 23]]],
mask=[[[False, True, False, False],
[ True, True, True, True],
[False, False, False, False]],
[[False, True, False, False],
[ True, True, True, True],
[False, False, False, False]]],
fill_value=999999)
>>> np.ma.apply_over_axes(np.ma.sum, a, [0,2])
masked_array(
data=[[[46],
[--],
[124]]],
mask=[[[False],
[ True],
[False]]],
fill_value=999999)
Tuple axis arguments to ufuncs are equivalent:
>>> np.ma.sum(a, axis=(0,2)).reshape((1,-1,1))
masked_array(
data=[[[46],
[--],
[124]]],
mask=[[[False],
[ True],
[False]]],
fill_value=999999)
"""
def average(a, axis=None, weights=None, returned=False, *,
keepdims=np._NoValue):
"""
Return the weighted average of array over the given axis.
Parameters
----------
a : array_like
Data to be averaged.
Masked entries are not taken into account in the computation.
axis : int, optional
Axis along which to average `a`. If None, averaging is done over
the flattened array.
weights : array_like, optional
The importance that each element has in the computation of the average.
The weights array can either be 1-D (in which case its length must be
the size of `a` along the given axis) or of the same shape as `a`.
If ``weights=None``, then all data in `a` are assumed to have a
weight equal to one. The 1-D calculation is::
avg = sum(a * weights) / sum(weights)
The only constraint on `weights` is that `sum(weights)` must not be 0.
returned : bool, optional
Flag indicating whether a tuple ``(result, sum of weights)``
should be returned as output (True), or just the result (False).
Default is False.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `a`.
*Note:* `keepdims` will not work with instances of `numpy.matrix`
or other classes whose methods do not support `keepdims`.
.. versionadded:: 1.23.0
Returns
-------
average, [sum_of_weights] : (tuple of) scalar or MaskedArray
The average along the specified axis. When returned is `True`,
return a tuple with the average as the first element and the sum
of the weights as the second element. The return type is `np.float64`
if `a` is of integer type and floats smaller than `float64`, or the
input data-type, otherwise. If returned, `sum_of_weights` is always
`float64`.
Examples
--------
>>> a = np.ma.array([1., 2., 3., 4.], mask=[False, False, True, True])
>>> np.ma.average(a, weights=[3, 1, 0, 0])
1.25
>>> x = np.ma.arange(6.).reshape(3, 2)
>>> x
masked_array(
data=[[0., 1.],
[2., 3.],
[4., 5.]],
mask=False,
fill_value=1e+20)
>>> avg, sumweights = np.ma.average(x, axis=0, weights=[1, 2, 3],
... returned=True)
>>> avg
masked_array(data=[2.6666666666666665, 3.6666666666666665],
mask=[False, False],
fill_value=1e+20)
With ``keepdims=True``, the following result has shape (3, 1).
>>> np.ma.average(x, axis=1, keepdims=True)
masked_array(
data=[[0.5],
[2.5],
[4.5]],
mask=False,
fill_value=1e+20)
"""
a = asarray(a)
m = getmask(a)
# inspired by 'average' in numpy/lib/function_base.py
if keepdims is np._NoValue:
# Don't pass on the keepdims argument if one wasn't given.
keepdims_kw = {}
else:
keepdims_kw = {'keepdims': keepdims}
if weights is None:
avg = a.mean(axis, **keepdims_kw)
scl = avg.dtype.type(a.count(axis))
else:
wgt = asarray(weights)
if issubclass(a.dtype.type, (np.integer, np.bool_)):
result_dtype = np.result_type(a.dtype, wgt.dtype, 'f8')
else:
result_dtype = np.result_type(a.dtype, wgt.dtype)
# Sanity checks
if a.shape != wgt.shape:
if axis is None:
raise TypeError(
"Axis must be specified when shapes of a and weights "
"differ.")
if wgt.ndim != 1:
raise TypeError(
"1D weights expected when shapes of a and weights differ.")
if wgt.shape[0] != a.shape[axis]:
raise ValueError(
"Length of weights not compatible with specified axis.")
# setup wgt to broadcast along axis
wgt = np.broadcast_to(wgt, (a.ndim-1)*(1,) + wgt.shape, subok=True)
wgt = wgt.swapaxes(-1, axis)
if m is not nomask:
wgt = wgt*(~a.mask)
wgt.mask |= a.mask
scl = wgt.sum(axis=axis, dtype=result_dtype, **keepdims_kw)
avg = np.multiply(a, wgt,
dtype=result_dtype).sum(axis, **keepdims_kw) / scl
if returned:
if scl.shape != avg.shape:
scl = np.broadcast_to(scl, avg.shape).copy()
return avg, scl
else:
return avg
def median(a, axis=None, out=None, overwrite_input=False, keepdims=False):
"""
Compute the median along the specified axis.
Returns the median of the array elements.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : int, optional
Axis along which the medians are computed. The default (None) is
to compute the median along a flattened version of the array.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output
but the type will be cast if necessary.
overwrite_input : bool, optional
If True, then allow use of memory of input array (a) for
calculations. The input array will be modified by the call to
median. This will save memory when you do not need to preserve
the contents of the input array. Treat the input as undefined,
but it will probably be fully or partially sorted. Default is
False. Note that, if `overwrite_input` is True, and the input
is not already an `ndarray`, an error will be raised.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
.. versionadded:: 1.10.0
Returns
-------
median : ndarray
A new array holding the result is returned unless out is
specified, in which case a reference to out is returned.
Return data-type is `float64` for integers and floats smaller than
`float64`, or the input data-type, otherwise.
See Also
--------
mean
Notes
-----
Given a vector ``V`` with ``N`` non masked values, the median of ``V``
is the middle value of a sorted copy of ``V`` (``Vs``) - i.e.
``Vs[(N-1)/2]``, when ``N`` is odd, or ``{Vs[N/2 - 1] + Vs[N/2]}/2``
when ``N`` is even.
Examples
--------
>>> x = np.ma.array(np.arange(8), mask=[0]*4 + [1]*4)
>>> np.ma.median(x)
1.5
>>> x = np.ma.array(np.arange(10).reshape(2, 5), mask=[0]*6 + [1]*4)
>>> np.ma.median(x)
2.5
>>> np.ma.median(x, axis=-1, overwrite_input=True)
masked_array(data=[2.0, 5.0],
mask=[False, False],
fill_value=1e+20)
"""
if not hasattr(a, 'mask'):
m = np.median(getdata(a, subok=True), axis=axis,
out=out, overwrite_input=overwrite_input,
keepdims=keepdims)
if isinstance(m, np.ndarray) and 1 <= m.ndim:
return masked_array(m, copy=False)
else:
return m
r, k = _ureduce(a, func=_median, axis=axis, out=out,
overwrite_input=overwrite_input)
if keepdims:
return r.reshape(k)
else:
return r
def _median(a, axis=None, out=None, overwrite_input=False):
# when an unmasked NaN is present return it, so we need to sort the NaN
# values behind the mask
if np.issubdtype(a.dtype, np.inexact):
fill_value = np.inf
else:
fill_value = None
if overwrite_input:
if axis is None:
asorted = a.ravel()
asorted.sort(fill_value=fill_value)
else:
a.sort(axis=axis, fill_value=fill_value)
asorted = a
else:
asorted = sort(a, axis=axis, fill_value=fill_value)
if axis is None:
axis = 0
else:
axis = normalize_axis_index(axis, asorted.ndim)
if asorted.shape[axis] == 0:
# for empty axis integer indices fail so use slicing to get same result
# as median (which is mean of empty slice = nan)
indexer = [slice(None)] * asorted.ndim
indexer[axis] = slice(0, 0)
indexer = tuple(indexer)
return np.ma.mean(asorted[indexer], axis=axis, out=out)
if asorted.ndim == 1:
idx, odd = divmod(count(asorted), 2)
mid = asorted[idx + odd - 1:idx + 1]
if np.issubdtype(asorted.dtype, np.inexact) and asorted.size > 0:
# avoid inf / x = masked
s = mid.sum(out=out)
if not odd:
s = np.true_divide(s, 2., casting='safe', out=out)
s = np.lib.utils._median_nancheck(asorted, s, axis)
else:
s = mid.mean(out=out)
# if result is masked either the input contained enough
# minimum_fill_value so that it would be the median or all values
# masked
if np.ma.is_masked(s) and not np.all(asorted.mask):
return np.ma.minimum_fill_value(asorted)
return s
counts = count(asorted, axis=axis, keepdims=True)
h = counts // 2
# duplicate high if odd number of elements so mean does nothing
odd = counts % 2 == 1
l = np.where(odd, h, h-1)
lh = np.concatenate([l,h], axis=axis)
# get low and high median
low_high = np.take_along_axis(asorted, lh, axis=axis)
def replace_masked(s):
# Replace masked entries with minimum_full_value unless it all values
# are masked. This is required as the sort order of values equal or
# larger than the fill value is undefined and a valid value placed
# elsewhere, e.g. [4, --, inf].
if np.ma.is_masked(s):
rep = (~np.all(asorted.mask, axis=axis, keepdims=True)) & s.mask
s.data[rep] = np.ma.minimum_fill_value(asorted)
s.mask[rep] = False
replace_masked(low_high)
if np.issubdtype(asorted.dtype, np.inexact):
# avoid inf / x = masked
s = np.ma.sum(low_high, axis=axis, out=out)
np.true_divide(s.data, 2., casting='unsafe', out=s.data)
s = np.lib.utils._median_nancheck(asorted, s, axis)
else:
s = np.ma.mean(low_high, axis=axis, out=out)
return s
def compress_nd(x, axis=None):
"""Suppress slices from multiple dimensions which contain masked values.
Parameters
----------
x : array_like, MaskedArray
The array to operate on. If not a MaskedArray instance (or if no array
elements are masked), `x` is interpreted as a MaskedArray with `mask`
set to `nomask`.
axis : tuple of ints or int, optional
Which dimensions to suppress slices from can be configured with this
parameter.
- If axis is a tuple of ints, those are the axes to suppress slices from.
- If axis is an int, then that is the only axis to suppress slices from.
- If axis is None, all axis are selected.
Returns
-------
compress_array : ndarray
The compressed array.
"""
x = asarray(x)
m = getmask(x)
# Set axis to tuple of ints
if axis is None:
axis = tuple(range(x.ndim))
else:
axis = normalize_axis_tuple(axis, x.ndim)
# Nothing is masked: return x
if m is nomask or not m.any():
return x._data
# All is masked: return empty
if m.all():
return nxarray([])
# Filter elements through boolean indexing
data = x._data
for ax in axis:
axes = tuple(list(range(ax)) + list(range(ax + 1, x.ndim)))
data = data[(slice(None),)*ax + (~m.any(axis=axes),)]
return data
def compress_rowcols(x, axis=None):
"""
Suppress the rows and/or columns of a 2-D array that contain
masked values.
The suppression behavior is selected with the `axis` parameter.
- If axis is None, both rows and columns are suppressed.
- If axis is 0, only rows are suppressed.
- If axis is 1 or -1, only columns are suppressed.
Parameters
----------
x : array_like, MaskedArray
The array to operate on. If not a MaskedArray instance (or if no array
elements are masked), `x` is interpreted as a MaskedArray with
`mask` set to `nomask`. Must be a 2D array.
axis : int, optional
Axis along which to perform the operation. Default is None.
Returns
-------
compressed_array : ndarray
The compressed array.
Examples
--------
>>> x = np.ma.array(np.arange(9).reshape(3, 3), mask=[[1, 0, 0],
... [1, 0, 0],
... [0, 0, 0]])
>>> x
masked_array(
data=[[--, 1, 2],
[--, 4, 5],
[6, 7, 8]],
mask=[[ True, False, False],
[ True, False, False],
[False, False, False]],
fill_value=999999)
>>> np.ma.compress_rowcols(x)
array([[7, 8]])
>>> np.ma.compress_rowcols(x, 0)
array([[6, 7, 8]])
>>> np.ma.compress_rowcols(x, 1)
array([[1, 2],
[4, 5],
[7, 8]])
"""
if asarray(x).ndim != 2:
raise NotImplementedError("compress_rowcols works for 2D arrays only.")
return compress_nd(x, axis=axis)
def compress_rows(a):
"""
Suppress whole rows of a 2-D array that contain masked values.
This is equivalent to ``np.ma.compress_rowcols(a, 0)``, see
`compress_rowcols` for details.
See Also
--------
compress_rowcols
"""
a = asarray(a)
if a.ndim != 2:
raise NotImplementedError("compress_rows works for 2D arrays only.")
return compress_rowcols(a, 0)
def compress_cols(a):
"""
Suppress whole columns of a 2-D array that contain masked values.
This is equivalent to ``np.ma.compress_rowcols(a, 1)``, see
`compress_rowcols` for details.
See Also
--------
compress_rowcols
"""
a = asarray(a)
if a.ndim != 2:
raise NotImplementedError("compress_cols works for 2D arrays only.")
return compress_rowcols(a, 1)
def mask_rows(a, axis=np._NoValue):
"""
Mask rows of a 2D array that contain masked values.
This function is a shortcut to ``mask_rowcols`` with `axis` equal to 0.
See Also
--------
mask_rowcols : Mask rows and/or columns of a 2D array.
masked_where : Mask where a condition is met.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.zeros((3, 3), dtype=int)
>>> a[1, 1] = 1
>>> a
array([[0, 0, 0],
[0, 1, 0],
[0, 0, 0]])
>>> a = ma.masked_equal(a, 1)
>>> a
masked_array(
data=[[0, 0, 0],
[0, --, 0],
[0, 0, 0]],
mask=[[False, False, False],
[False, True, False],
[False, False, False]],
fill_value=1)
>>> ma.mask_rows(a)
masked_array(
data=[[0, 0, 0],
[--, --, --],
[0, 0, 0]],
mask=[[False, False, False],
[ True, True, True],
[False, False, False]],
fill_value=1)
"""
if axis is not np._NoValue:
# remove the axis argument when this deprecation expires
# NumPy 1.18.0, 2019-11-28
warnings.warn(
"The axis argument has always been ignored, in future passing it "
"will raise TypeError", DeprecationWarning, stacklevel=2)
return mask_rowcols(a, 0)
def mask_cols(a, axis=np._NoValue):
"""
Mask columns of a 2D array that contain masked values.
This function is a shortcut to ``mask_rowcols`` with `axis` equal to 1.
See Also
--------
mask_rowcols : Mask rows and/or columns of a 2D array.
masked_where : Mask where a condition is met.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.zeros((3, 3), dtype=int)
>>> a[1, 1] = 1
>>> a
array([[0, 0, 0],
[0, 1, 0],
[0, 0, 0]])
>>> a = ma.masked_equal(a, 1)
>>> a
masked_array(
data=[[0, 0, 0],
[0, --, 0],
[0, 0, 0]],
mask=[[False, False, False],
[False, True, False],
[False, False, False]],
fill_value=1)
>>> ma.mask_cols(a)
masked_array(
data=[[0, --, 0],
[0, --, 0],
[0, --, 0]],
mask=[[False, True, False],
[False, True, False],
[False, True, False]],
fill_value=1)
"""
if axis is not np._NoValue:
# remove the axis argument when this deprecation expires
# NumPy 1.18.0, 2019-11-28
warnings.warn(
"The axis argument has always been ignored, in future passing it "
"will raise TypeError", DeprecationWarning, stacklevel=2)
return mask_rowcols(a, 1)
#####--------------------------------------------------------------------------
#---- --- arraysetops ---
#####--------------------------------------------------------------------------
def ediff1d(arr, to_end=None, to_begin=None):
"""
Compute the differences between consecutive elements of an array.
This function is the equivalent of `numpy.ediff1d` that takes masked
values into account, see `numpy.ediff1d` for details.
See Also
--------
numpy.ediff1d : Equivalent function for ndarrays.
"""
arr = ma.asanyarray(arr).flat
ed = arr[1:] - arr[:-1]
arrays = [ed]
#
if to_begin is not None:
arrays.insert(0, to_begin)
if to_end is not None:
arrays.append(to_end)
#
if len(arrays) != 1:
# We'll save ourselves a copy of a potentially large array in the common
# case where neither to_begin or to_end was given.
ed = hstack(arrays)
#
return ed
def unique(ar1, return_index=False, return_inverse=False):
"""
Finds the unique elements of an array.
Masked values are considered the same element (masked). The output array
is always a masked array. See `numpy.unique` for more details.
See Also
--------
numpy.unique : Equivalent function for ndarrays.
"""
output = np.unique(ar1,
return_index=return_index,
return_inverse=return_inverse)
if isinstance(output, tuple):
output = list(output)
output[0] = output[0].view(MaskedArray)
output = tuple(output)
else:
output = output.view(MaskedArray)
return output
def intersect1d(ar1, ar2, assume_unique=False):
"""
Returns the unique elements common to both arrays.
Masked values are considered equal one to the other.
The output is always a masked array.
See `numpy.intersect1d` for more details.
See Also
--------
numpy.intersect1d : Equivalent function for ndarrays.
Examples
--------
>>> x = np.ma.array([1, 3, 3, 3], mask=[0, 0, 0, 1])
>>> y = np.ma.array([3, 1, 1, 1], mask=[0, 0, 0, 1])
>>> np.ma.intersect1d(x, y)
masked_array(data=[1, 3, --],
mask=[False, False, True],
fill_value=999999)
"""
if assume_unique:
aux = ma.concatenate((ar1, ar2))
else:
# Might be faster than unique( intersect1d( ar1, ar2 ) )?
aux = ma.concatenate((unique(ar1), unique(ar2)))
aux.sort()
return aux[:-1][aux[1:] == aux[:-1]]
def setxor1d(ar1, ar2, assume_unique=False):
"""
Set exclusive-or of 1-D arrays with unique elements.
The output is always a masked array. See `numpy.setxor1d` for more details.
See Also
--------
numpy.setxor1d : Equivalent function for ndarrays.
"""
if not assume_unique:
ar1 = unique(ar1)
ar2 = unique(ar2)
aux = ma.concatenate((ar1, ar2))
if aux.size == 0:
return aux
aux.sort()
auxf = aux.filled()
# flag = ediff1d( aux, to_end = 1, to_begin = 1 ) == 0
flag = ma.concatenate(([True], (auxf[1:] != auxf[:-1]), [True]))
# flag2 = ediff1d( flag ) == 0
flag2 = (flag[1:] == flag[:-1])
return aux[flag2]
def in1d(ar1, ar2, assume_unique=False, invert=False):
"""
Test whether each element of an array is also present in a second
array.
The output is always a masked array. See `numpy.in1d` for more details.
We recommend using :func:`isin` instead of `in1d` for new code.
See Also
--------
isin : Version of this function that preserves the shape of ar1.
numpy.in1d : Equivalent function for ndarrays.
Notes
-----
.. versionadded:: 1.4.0
"""
if not assume_unique:
ar1, rev_idx = unique(ar1, return_inverse=True)
ar2 = unique(ar2)
ar = ma.concatenate((ar1, ar2))
# We need this to be a stable sort, so always use 'mergesort'
# here. The values from the first array should always come before
# the values from the second array.
order = ar.argsort(kind='mergesort')
sar = ar[order]
if invert:
bool_ar = (sar[1:] != sar[:-1])
else:
bool_ar = (sar[1:] == sar[:-1])
flag = ma.concatenate((bool_ar, [invert]))
indx = order.argsort(kind='mergesort')[:len(ar1)]
if assume_unique:
return flag[indx]
else:
return flag[indx][rev_idx]
def isin(element, test_elements, assume_unique=False, invert=False):
"""
Calculates `element in test_elements`, broadcasting over
`element` only.
The output is always a masked array of the same shape as `element`.
See `numpy.isin` for more details.
See Also
--------
in1d : Flattened version of this function.
numpy.isin : Equivalent function for ndarrays.
Notes
-----
.. versionadded:: 1.13.0
"""
element = ma.asarray(element)
return in1d(element, test_elements, assume_unique=assume_unique,
invert=invert).reshape(element.shape)
def union1d(ar1, ar2):
"""
Union of two arrays.
The output is always a masked array. See `numpy.union1d` for more details.
See Also
--------
numpy.union1d : Equivalent function for ndarrays.
"""
return unique(ma.concatenate((ar1, ar2), axis=None))
def setdiff1d(ar1, ar2, assume_unique=False):
"""
Set difference of 1D arrays with unique elements.
The output is always a masked array. See `numpy.setdiff1d` for more
details.
See Also
--------
numpy.setdiff1d : Equivalent function for ndarrays.
Examples
--------
>>> x = np.ma.array([1, 2, 3, 4], mask=[0, 1, 0, 1])
>>> np.ma.setdiff1d(x, [1, 2])
masked_array(data=[3, --],
mask=[False, True],
fill_value=999999)
"""
if assume_unique:
ar1 = ma.asarray(ar1).ravel()
else:
ar1 = unique(ar1)
ar2 = unique(ar2)
return ar1[in1d(ar1, ar2, assume_unique=True, invert=True)]
###############################################################################
# Covariance #
###############################################################################
def _covhelper(x, y=None, rowvar=True, allow_masked=True):
"""
Private function for the computation of covariance and correlation
coefficients.
"""
x = ma.array(x, ndmin=2, copy=True, dtype=float)
xmask = ma.getmaskarray(x)
# Quick exit if we can't process masked data
if not allow_masked and xmask.any():
raise ValueError("Cannot process masked data.")
#
if x.shape[0] == 1:
rowvar = True
# Make sure that rowvar is either 0 or 1
rowvar = int(bool(rowvar))
axis = 1 - rowvar
if rowvar:
tup = (slice(None), None)
else:
tup = (None, slice(None))
#
if y is None:
xnotmask = np.logical_not(xmask).astype(int)
else:
y = array(y, copy=False, ndmin=2, dtype=float)
ymask = ma.getmaskarray(y)
if not allow_masked and ymask.any():
raise ValueError("Cannot process masked data.")
if xmask.any() or ymask.any():
if y.shape == x.shape:
# Define some common mask
common_mask = np.logical_or(xmask, ymask)
if common_mask is not nomask:
xmask = x._mask = y._mask = ymask = common_mask
x._sharedmask = False
y._sharedmask = False
x = ma.concatenate((x, y), axis)
xnotmask = np.logical_not(np.concatenate((xmask, ymask), axis)).astype(int)
x -= x.mean(axis=rowvar)[tup]
return (x, xnotmask, rowvar)
def cov(x, y=None, rowvar=True, bias=False, allow_masked=True, ddof=None):
"""
Estimate the covariance matrix.
Except for the handling of missing data this function does the same as
`numpy.cov`. For more details and examples, see `numpy.cov`.
By default, masked values are recognized as such. If `x` and `y` have the
same shape, a common mask is allocated: if ``x[i,j]`` is masked, then
``y[i,j]`` will also be masked.
Setting `allow_masked` to False will raise an exception if values are
missing in either of the input arrays.
Parameters
----------
x : array_like
A 1-D or 2-D array containing multiple variables and observations.
Each row of `x` represents a variable, and each column a single
observation of all those variables. Also see `rowvar` below.
y : array_like, optional
An additional set of variables and observations. `y` has the same
shape as `x`.
rowvar : bool, optional
If `rowvar` is True (default), then each row represents a
variable, with observations in the columns. Otherwise, the relationship
is transposed: each column represents a variable, while the rows
contain observations.
bias : bool, optional
Default normalization (False) is by ``(N-1)``, where ``N`` is the
number of observations given (unbiased estimate). If `bias` is True,
then normalization is by ``N``. This keyword can be overridden by
the keyword ``ddof`` in numpy versions >= 1.5.
allow_masked : bool, optional
If True, masked values are propagated pair-wise: if a value is masked
in `x`, the corresponding value is masked in `y`.
If False, raises a `ValueError` exception when some values are missing.
ddof : {None, int}, optional
If not ``None`` normalization is by ``(N - ddof)``, where ``N`` is
the number of observations; this overrides the value implied by
``bias``. The default value is ``None``.
.. versionadded:: 1.5
Raises
------
ValueError
Raised if some values are missing and `allow_masked` is False.
See Also
--------
numpy.cov
"""
# Check inputs
if ddof is not None and ddof != int(ddof):
raise ValueError("ddof must be an integer")
# Set up ddof
if ddof is None:
if bias:
ddof = 0
else:
ddof = 1
(x, xnotmask, rowvar) = _covhelper(x, y, rowvar, allow_masked)
if not rowvar:
fact = np.dot(xnotmask.T, xnotmask) * 1. - ddof
result = (dot(x.T, x.conj(), strict=False) / fact).squeeze()
else:
fact = np.dot(xnotmask, xnotmask.T) * 1. - ddof
result = (dot(x, x.T.conj(), strict=False) / fact).squeeze()
return result
def corrcoef(x, y=None, rowvar=True, bias=np._NoValue, allow_masked=True,
ddof=np._NoValue):
"""
Return Pearson product-moment correlation coefficients.
Except for the handling of missing data this function does the same as
`numpy.corrcoef`. For more details and examples, see `numpy.corrcoef`.
Parameters
----------
x : array_like
A 1-D or 2-D array containing multiple variables and observations.
Each row of `x` represents a variable, and each column a single
observation of all those variables. Also see `rowvar` below.
y : array_like, optional
An additional set of variables and observations. `y` has the same
shape as `x`.
rowvar : bool, optional
If `rowvar` is True (default), then each row represents a
variable, with observations in the columns. Otherwise, the relationship
is transposed: each column represents a variable, while the rows
contain observations.
bias : _NoValue, optional
Has no effect, do not use.
.. deprecated:: 1.10.0
allow_masked : bool, optional
If True, masked values are propagated pair-wise: if a value is masked
in `x`, the corresponding value is masked in `y`.
If False, raises an exception. Because `bias` is deprecated, this
argument needs to be treated as keyword only to avoid a warning.
ddof : _NoValue, optional
Has no effect, do not use.
.. deprecated:: 1.10.0
See Also
--------
numpy.corrcoef : Equivalent function in top-level NumPy module.
cov : Estimate the covariance matrix.
Notes
-----
This function accepts but discards arguments `bias` and `ddof`. This is
for backwards compatibility with previous versions of this function. These
arguments had no effect on the return values of the function and can be
safely ignored in this and previous versions of numpy.
"""
msg = 'bias and ddof have no effect and are deprecated'
if bias is not np._NoValue or ddof is not np._NoValue:
# 2015-03-15, 1.10
warnings.warn(msg, DeprecationWarning, stacklevel=2)
# Get the data
(x, xnotmask, rowvar) = _covhelper(x, y, rowvar, allow_masked)
# Compute the covariance matrix
if not rowvar:
fact = np.dot(xnotmask.T, xnotmask) * 1.
c = (dot(x.T, x.conj(), strict=False) / fact).squeeze()
else:
fact = np.dot(xnotmask, xnotmask.T) * 1.
c = (dot(x, x.T.conj(), strict=False) / fact).squeeze()
# Check whether we have a scalar
try:
diag = ma.diagonal(c)
except ValueError:
return 1
#
if xnotmask.all():
_denom = ma.sqrt(ma.multiply.outer(diag, diag))
else:
_denom = diagflat(diag)
_denom._sharedmask = False # We know return is always a copy
n = x.shape[1 - rowvar]
if rowvar:
for i in range(n - 1):
for j in range(i + 1, n):
_x = mask_cols(vstack((x[i], x[j]))).var(axis=1)
_denom[i, j] = _denom[j, i] = ma.sqrt(ma.multiply.reduce(_x))
else:
for i in range(n - 1):
for j in range(i + 1, n):
_x = mask_cols(
vstack((x[:, i], x[:, j]))).var(axis=1)
_denom[i, j] = _denom[j, i] = ma.sqrt(ma.multiply.reduce(_x))
return c / _denom
#####--------------------------------------------------------------------------
#---- --- Concatenation helpers ---
#####--------------------------------------------------------------------------
class MAxisConcatenator(AxisConcatenator):
"""
Translate slice objects to concatenation along an axis.
For documentation on usage, see `mr_class`.
See Also
--------
mr_class
"""
concatenate = staticmethod(concatenate)
@classmethod
def makemat(cls, arr):
# There used to be a view as np.matrix here, but we may eventually
# deprecate that class. In preparation, we use the unmasked version
# to construct the matrix (with copy=False for backwards compatibility
# with the .view)
data = super().makemat(arr.data, copy=False)
return array(data, mask=arr.mask)
def __getitem__(self, key):
# matrix builder syntax, like 'a, b; c, d'
if isinstance(key, str):
raise MAError("Unavailable for masked array.")
return super().__getitem__(key)
class mr_class(MAxisConcatenator):
"""
Translate slice objects to concatenation along the first axis.
This is the masked array version of `lib.index_tricks.RClass`.
See Also
--------
lib.index_tricks.RClass
Examples
--------
>>> np.ma.mr_[np.ma.array([1,2,3]), 0, 0, np.ma.array([4,5,6])]
masked_array(data=[1, 2, 3, ..., 4, 5, 6],
mask=False,
fill_value=999999)
"""
def __init__(self):
MAxisConcatenator.__init__(self, 0)
mr_ = mr_class()
#####--------------------------------------------------------------------------
#---- Find unmasked data ---
#####--------------------------------------------------------------------------
def ndenumerate(a, compressed=True):
"""
Multidimensional index iterator.
Return an iterator yielding pairs of array coordinates and values,
skipping elements that are masked. With `compressed=False`,
`ma.masked` is yielded as the value of masked elements. This
behavior differs from that of `numpy.ndenumerate`, which yields the
value of the underlying data array.
Notes
-----
.. versionadded:: 1.23.0
Parameters
----------
a : array_like
An array with (possibly) masked elements.
compressed : bool, optional
If True (default), masked elements are skipped.
See Also
--------
numpy.ndenumerate : Equivalent function ignoring any mask.
Examples
--------
>>> a = np.ma.arange(9).reshape((3, 3))
>>> a[1, 0] = np.ma.masked
>>> a[1, 2] = np.ma.masked
>>> a[2, 1] = np.ma.masked
>>> a
masked_array(
data=[[0, 1, 2],
[--, 4, --],
[6, --, 8]],
mask=[[False, False, False],
[ True, False, True],
[False, True, False]],
fill_value=999999)
>>> for index, x in np.ma.ndenumerate(a):
... print(index, x)
(0, 0) 0
(0, 1) 1
(0, 2) 2
(1, 1) 4
(2, 0) 6
(2, 2) 8
>>> for index, x in np.ma.ndenumerate(a, compressed=False):
... print(index, x)
(0, 0) 0
(0, 1) 1
(0, 2) 2
(1, 0) --
(1, 1) 4
(1, 2) --
(2, 0) 6
(2, 1) --
(2, 2) 8
"""
for it, mask in zip(np.ndenumerate(a), getmaskarray(a).flat):
if not mask:
yield it
elif not compressed:
yield it[0], masked
def flatnotmasked_edges(a):
"""
Find the indices of the first and last unmasked values.
Expects a 1-D `MaskedArray`, returns None if all values are masked.
Parameters
----------
a : array_like
Input 1-D `MaskedArray`
Returns
-------
edges : ndarray or None
The indices of first and last non-masked value in the array.
Returns None if all values are masked.
See Also
--------
flatnotmasked_contiguous, notmasked_contiguous, notmasked_edges
clump_masked, clump_unmasked
Notes
-----
Only accepts 1-D arrays.
Examples
--------
>>> a = np.ma.arange(10)
>>> np.ma.flatnotmasked_edges(a)
array([0, 9])
>>> mask = (a < 3) | (a > 8) | (a == 5)
>>> a[mask] = np.ma.masked
>>> np.array(a[~a.mask])
array([3, 4, 6, 7, 8])
>>> np.ma.flatnotmasked_edges(a)
array([3, 8])
>>> a[:] = np.ma.masked
>>> print(np.ma.flatnotmasked_edges(a))
None
"""
m = getmask(a)
if m is nomask or not np.any(m):
return np.array([0, a.size - 1])
unmasked = np.flatnonzero(~m)
if len(unmasked) > 0:
return unmasked[[0, -1]]
else:
return None
def notmasked_edges(a, axis=None):
"""
Find the indices of the first and last unmasked values along an axis.
If all values are masked, return None. Otherwise, return a list
of two tuples, corresponding to the indices of the first and last
unmasked values respectively.
Parameters
----------
a : array_like
The input array.
axis : int, optional
Axis along which to perform the operation.
If None (default), applies to a flattened version of the array.
Returns
-------
edges : ndarray or list
An array of start and end indexes if there are any masked data in
the array. If there are no masked data in the array, `edges` is a
list of the first and last index.
See Also
--------
flatnotmasked_contiguous, flatnotmasked_edges, notmasked_contiguous
clump_masked, clump_unmasked
Examples
--------
>>> a = np.arange(9).reshape((3, 3))
>>> m = np.zeros_like(a)
>>> m[1:, 1:] = 1
>>> am = np.ma.array(a, mask=m)
>>> np.array(am[~am.mask])
array([0, 1, 2, 3, 6])
>>> np.ma.notmasked_edges(am)
array([0, 6])
"""
a = asarray(a)
if axis is None or a.ndim == 1:
return flatnotmasked_edges(a)
m = getmaskarray(a)
idx = array(np.indices(a.shape), mask=np.asarray([m] * a.ndim))
return [tuple([idx[i].min(axis).compressed() for i in range(a.ndim)]),
tuple([idx[i].max(axis).compressed() for i in range(a.ndim)]), ]
def flatnotmasked_contiguous(a):
"""
Find contiguous unmasked data in a masked array.
Parameters
----------
a : array_like
The input array.
Returns
-------
slice_list : list
A sorted sequence of `slice` objects (start index, end index).
.. versionchanged:: 1.15.0
Now returns an empty list instead of None for a fully masked array
See Also
--------
flatnotmasked_edges, notmasked_contiguous, notmasked_edges
clump_masked, clump_unmasked
Notes
-----
Only accepts 2-D arrays at most.
Examples
--------
>>> a = np.ma.arange(10)
>>> np.ma.flatnotmasked_contiguous(a)
[slice(0, 10, None)]
>>> mask = (a < 3) | (a > 8) | (a == 5)
>>> a[mask] = np.ma.masked
>>> np.array(a[~a.mask])
array([3, 4, 6, 7, 8])
>>> np.ma.flatnotmasked_contiguous(a)
[slice(3, 5, None), slice(6, 9, None)]
>>> a[:] = np.ma.masked
>>> np.ma.flatnotmasked_contiguous(a)
[]
"""
m = getmask(a)
if m is nomask:
return [slice(0, a.size)]
i = 0
result = []
for (k, g) in itertools.groupby(m.ravel()):
n = len(list(g))
if not k:
result.append(slice(i, i + n))
i += n
return result
def notmasked_contiguous(a, axis=None):
"""
Find contiguous unmasked data in a masked array along the given axis.
Parameters
----------
a : array_like
The input array.
axis : int, optional
Axis along which to perform the operation.
If None (default), applies to a flattened version of the array, and this
is the same as `flatnotmasked_contiguous`.
Returns
-------
endpoints : list
A list of slices (start and end indexes) of unmasked indexes
in the array.
If the input is 2d and axis is specified, the result is a list of lists.
See Also
--------
flatnotmasked_edges, flatnotmasked_contiguous, notmasked_edges
clump_masked, clump_unmasked
Notes
-----
Only accepts 2-D arrays at most.
Examples
--------
>>> a = np.arange(12).reshape((3, 4))
>>> mask = np.zeros_like(a)
>>> mask[1:, :-1] = 1; mask[0, 1] = 1; mask[-1, 0] = 0
>>> ma = np.ma.array(a, mask=mask)
>>> ma
masked_array(
data=[[0, --, 2, 3],
[--, --, --, 7],
[8, --, --, 11]],
mask=[[False, True, False, False],
[ True, True, True, False],
[False, True, True, False]],
fill_value=999999)
>>> np.array(ma[~ma.mask])
array([ 0, 2, 3, 7, 8, 11])
>>> np.ma.notmasked_contiguous(ma)
[slice(0, 1, None), slice(2, 4, None), slice(7, 9, None), slice(11, 12, None)]
>>> np.ma.notmasked_contiguous(ma, axis=0)
[[slice(0, 1, None), slice(2, 3, None)], [], [slice(0, 1, None)], [slice(0, 3, None)]]
>>> np.ma.notmasked_contiguous(ma, axis=1)
[[slice(0, 1, None), slice(2, 4, None)], [slice(3, 4, None)], [slice(0, 1, None), slice(3, 4, None)]]
"""
a = asarray(a)
nd = a.ndim
if nd > 2:
raise NotImplementedError("Currently limited to atmost 2D array.")
if axis is None or nd == 1:
return flatnotmasked_contiguous(a)
#
result = []
#
other = (axis + 1) % 2
idx = [0, 0]
idx[axis] = slice(None, None)
#
for i in range(a.shape[other]):
idx[other] = i
result.append(flatnotmasked_contiguous(a[tuple(idx)]))
return result
def _ezclump(mask):
"""
Finds the clumps (groups of data with the same values) for a 1D bool array.
Returns a series of slices.
"""
if mask.ndim > 1:
mask = mask.ravel()
idx = (mask[1:] ^ mask[:-1]).nonzero()
idx = idx[0] + 1
if mask[0]:
if len(idx) == 0:
return [slice(0, mask.size)]
r = [slice(0, idx[0])]
r.extend((slice(left, right)
for left, right in zip(idx[1:-1:2], idx[2::2])))
else:
if len(idx) == 0:
return []
r = [slice(left, right) for left, right in zip(idx[:-1:2], idx[1::2])]
if mask[-1]:
r.append(slice(idx[-1], mask.size))
return r
def clump_unmasked(a):
"""
Return list of slices corresponding to the unmasked clumps of a 1-D array.
(A "clump" is defined as a contiguous region of the array).
Parameters
----------
a : ndarray
A one-dimensional masked array.
Returns
-------
slices : list of slice
The list of slices, one for each continuous region of unmasked
elements in `a`.
Notes
-----
.. versionadded:: 1.4.0
See Also
--------
flatnotmasked_edges, flatnotmasked_contiguous, notmasked_edges
notmasked_contiguous, clump_masked
Examples
--------
>>> a = np.ma.masked_array(np.arange(10))
>>> a[[0, 1, 2, 6, 8, 9]] = np.ma.masked
>>> np.ma.clump_unmasked(a)
[slice(3, 6, None), slice(7, 8, None)]
"""
mask = getattr(a, '_mask', nomask)
if mask is nomask:
return [slice(0, a.size)]
return _ezclump(~mask)
def clump_masked(a):
"""
Returns a list of slices corresponding to the masked clumps of a 1-D array.
(A "clump" is defined as a contiguous region of the array).
Parameters
----------
a : ndarray
A one-dimensional masked array.
Returns
-------
slices : list of slice
The list of slices, one for each continuous region of masked elements
in `a`.
Notes
-----
.. versionadded:: 1.4.0
See Also
--------
flatnotmasked_edges, flatnotmasked_contiguous, notmasked_edges
notmasked_contiguous, clump_unmasked
Examples
--------
>>> a = np.ma.masked_array(np.arange(10))
>>> a[[0, 1, 2, 6, 8, 9]] = np.ma.masked
>>> np.ma.clump_masked(a)
[slice(0, 3, None), slice(6, 7, None), slice(8, 10, None)]
"""
mask = ma.getmask(a)
if mask is nomask:
return []
return _ezclump(mask)
###############################################################################
# Polynomial fit #
###############################################################################
def vander(x, n=None):
"""
Masked values in the input array result in rows of zeros.
"""
_vander = np.vander(x, n)
m = getmask(x)
if m is not nomask:
_vander[m] = 0
return _vander
vander.__doc__ = ma.doc_note(np.vander.__doc__, vander.__doc__)
def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False):
"""
Any masked values in x is propagated in y, and vice-versa.
"""
x = asarray(x)
y = asarray(y)
m = getmask(x)
if y.ndim == 1:
m = mask_or(m, getmask(y))
elif y.ndim == 2:
my = getmask(mask_rows(y))
if my is not nomask:
m = mask_or(m, my[:, 0])
else:
raise TypeError("Expected a 1D or 2D array for y!")
if w is not None:
w = asarray(w)
if w.ndim != 1:
raise TypeError("expected a 1-d array for weights")
if w.shape[0] != y.shape[0]:
raise TypeError("expected w and y to have the same length")
m = mask_or(m, getmask(w))
if m is not nomask:
not_m = ~m
if w is not None:
w = w[not_m]
return np.polyfit(x[not_m], y[not_m], deg, rcond, full, w, cov)
else:
return np.polyfit(x, y, deg, rcond, full, w, cov)
polyfit.__doc__ = ma.doc_note(np.polyfit.__doc__, polyfit.__doc__)
| 60,910 | Python | 29.094368 | 105 | 0.554654 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/ma/tests/test_core.py | # pylint: disable-msg=W0400,W0511,W0611,W0612,W0614,R0201,E1102
"""Tests suite for MaskedArray & subclassing.
:author: Pierre Gerard-Marchant
:contact: pierregm_at_uga_dot_edu
"""
__author__ = "Pierre GF Gerard-Marchant"
import sys
import warnings
import operator
import itertools
import textwrap
import pytest
from functools import reduce
import numpy as np
import numpy.ma.core
import numpy.core.fromnumeric as fromnumeric
import numpy.core.umath as umath
from numpy.testing import (
assert_raises, assert_warns, suppress_warnings
)
from numpy import ndarray
from numpy.compat import asbytes
from numpy.ma.testutils import (
assert_, assert_array_equal, assert_equal, assert_almost_equal,
assert_equal_records, fail_if_equal, assert_not_equal,
assert_mask_equal
)
from numpy.ma.core import (
MAError, MaskError, MaskType, MaskedArray, abs, absolute, add, all,
allclose, allequal, alltrue, angle, anom, arange, arccos, arccosh, arctan2,
arcsin, arctan, argsort, array, asarray, choose, concatenate,
conjugate, cos, cosh, count, default_fill_value, diag, divide, doc_note,
empty, empty_like, equal, exp, flatten_mask, filled, fix_invalid,
flatten_structured_array, fromflex, getmask, getmaskarray, greater,
greater_equal, identity, inner, isMaskedArray, less, less_equal, log,
log10, make_mask, make_mask_descr, mask_or, masked, masked_array,
masked_equal, masked_greater, masked_greater_equal, masked_inside,
masked_less, masked_less_equal, masked_not_equal, masked_outside,
masked_print_option, masked_values, masked_where, max, maximum,
maximum_fill_value, min, minimum, minimum_fill_value, mod, multiply,
mvoid, nomask, not_equal, ones, ones_like, outer, power, product, put,
putmask, ravel, repeat, reshape, resize, shape, sin, sinh, sometrue, sort,
sqrt, subtract, sum, take, tan, tanh, transpose, where, zeros, zeros_like,
)
from numpy.compat import pickle
pi = np.pi
suppress_copy_mask_on_assignment = suppress_warnings()
suppress_copy_mask_on_assignment.filter(
numpy.ma.core.MaskedArrayFutureWarning,
"setting an item on a masked array which has a shared mask will not copy")
# For parametrized numeric testing
num_dts = [np.dtype(dt_) for dt_ in '?bhilqBHILQefdgFD']
num_ids = [dt_.char for dt_ in num_dts]
class TestMaskedArray:
# Base test class for MaskedArrays.
def setup_method(self):
# Base data definition.
x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.])
y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.])
a10 = 10.
m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1]
xm = masked_array(x, mask=m1)
ym = masked_array(y, mask=m2)
z = np.array([-.5, 0., .5, .8])
zm = masked_array(z, mask=[0, 1, 0, 0])
xf = np.where(m1, 1e+20, x)
xm.set_fill_value(1e+20)
self.d = (x, y, a10, m1, m2, xm, ym, z, zm, xf)
def test_basicattributes(self):
# Tests some basic array attributes.
a = array([1, 3, 2])
b = array([1, 3, 2], mask=[1, 0, 1])
assert_equal(a.ndim, 1)
assert_equal(b.ndim, 1)
assert_equal(a.size, 3)
assert_equal(b.size, 3)
assert_equal(a.shape, (3,))
assert_equal(b.shape, (3,))
def test_basic0d(self):
# Checks masking a scalar
x = masked_array(0)
assert_equal(str(x), '0')
x = masked_array(0, mask=True)
assert_equal(str(x), str(masked_print_option))
x = masked_array(0, mask=False)
assert_equal(str(x), '0')
x = array(0, mask=1)
assert_(x.filled().dtype is x._data.dtype)
def test_basic1d(self):
# Test of basic array creation and properties in 1 dimension.
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
assert_(not isMaskedArray(x))
assert_(isMaskedArray(xm))
assert_((xm - ym).filled(0).any())
fail_if_equal(xm.mask.astype(int), ym.mask.astype(int))
s = x.shape
assert_equal(np.shape(xm), s)
assert_equal(xm.shape, s)
assert_equal(xm.dtype, x.dtype)
assert_equal(zm.dtype, z.dtype)
assert_equal(xm.size, reduce(lambda x, y:x * y, s))
assert_equal(count(xm), len(m1) - reduce(lambda x, y:x + y, m1))
assert_array_equal(xm, xf)
assert_array_equal(filled(xm, 1.e20), xf)
assert_array_equal(x, xm)
def test_basic2d(self):
# Test of basic array creation and properties in 2 dimensions.
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
for s in [(4, 3), (6, 2)]:
x.shape = s
y.shape = s
xm.shape = s
ym.shape = s
xf.shape = s
assert_(not isMaskedArray(x))
assert_(isMaskedArray(xm))
assert_equal(shape(xm), s)
assert_equal(xm.shape, s)
assert_equal(xm.size, reduce(lambda x, y:x * y, s))
assert_equal(count(xm), len(m1) - reduce(lambda x, y:x + y, m1))
assert_equal(xm, xf)
assert_equal(filled(xm, 1.e20), xf)
assert_equal(x, xm)
def test_concatenate_basic(self):
# Tests concatenations.
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
# basic concatenation
assert_equal(np.concatenate((x, y)), concatenate((xm, ym)))
assert_equal(np.concatenate((x, y)), concatenate((x, y)))
assert_equal(np.concatenate((x, y)), concatenate((xm, y)))
assert_equal(np.concatenate((x, y, x)), concatenate((x, ym, x)))
def test_concatenate_alongaxis(self):
# Tests concatenations.
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
# Concatenation along an axis
s = (3, 4)
x.shape = y.shape = xm.shape = ym.shape = s
assert_equal(xm.mask, np.reshape(m1, s))
assert_equal(ym.mask, np.reshape(m2, s))
xmym = concatenate((xm, ym), 1)
assert_equal(np.concatenate((x, y), 1), xmym)
assert_equal(np.concatenate((xm.mask, ym.mask), 1), xmym._mask)
x = zeros(2)
y = array(ones(2), mask=[False, True])
z = concatenate((x, y))
assert_array_equal(z, [0, 0, 1, 1])
assert_array_equal(z.mask, [False, False, False, True])
z = concatenate((y, x))
assert_array_equal(z, [1, 1, 0, 0])
assert_array_equal(z.mask, [False, True, False, False])
def test_concatenate_flexible(self):
# Tests the concatenation on flexible arrays.
data = masked_array(list(zip(np.random.rand(10),
np.arange(10))),
dtype=[('a', float), ('b', int)])
test = concatenate([data[:5], data[5:]])
assert_equal_records(test, data)
def test_creation_ndmin(self):
# Check the use of ndmin
x = array([1, 2, 3], mask=[1, 0, 0], ndmin=2)
assert_equal(x.shape, (1, 3))
assert_equal(x._data, [[1, 2, 3]])
assert_equal(x._mask, [[1, 0, 0]])
def test_creation_ndmin_from_maskedarray(self):
# Make sure we're not losing the original mask w/ ndmin
x = array([1, 2, 3])
x[-1] = masked
xx = array(x, ndmin=2, dtype=float)
assert_equal(x.shape, x._mask.shape)
assert_equal(xx.shape, xx._mask.shape)
def test_creation_maskcreation(self):
# Tests how masks are initialized at the creation of Maskedarrays.
data = arange(24, dtype=float)
data[[3, 6, 15]] = masked
dma_1 = MaskedArray(data)
assert_equal(dma_1.mask, data.mask)
dma_2 = MaskedArray(dma_1)
assert_equal(dma_2.mask, dma_1.mask)
dma_3 = MaskedArray(dma_1, mask=[1, 0, 0, 0] * 6)
fail_if_equal(dma_3.mask, dma_1.mask)
x = array([1, 2, 3], mask=True)
assert_equal(x._mask, [True, True, True])
x = array([1, 2, 3], mask=False)
assert_equal(x._mask, [False, False, False])
y = array([1, 2, 3], mask=x._mask, copy=False)
assert_(np.may_share_memory(x.mask, y.mask))
y = array([1, 2, 3], mask=x._mask, copy=True)
assert_(not np.may_share_memory(x.mask, y.mask))
def test_masked_singleton_array_creation_warns(self):
# The first works, but should not (ideally), there may be no way
# to solve this, however, as long as `np.ma.masked` is an ndarray.
np.array(np.ma.masked)
with pytest.warns(UserWarning):
# Tries to create a float array, using `float(np.ma.masked)`.
# We may want to define this is invalid behaviour in the future!
# (requiring np.ma.masked to be a known NumPy scalar probably
# with a DType.)
np.array([3., np.ma.masked])
def test_creation_with_list_of_maskedarrays(self):
# Tests creating a masked array from a list of masked arrays.
x = array(np.arange(5), mask=[1, 0, 0, 0, 0])
data = array((x, x[::-1]))
assert_equal(data, [[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]])
assert_equal(data._mask, [[1, 0, 0, 0, 0], [0, 0, 0, 0, 1]])
x.mask = nomask
data = array((x, x[::-1]))
assert_equal(data, [[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]])
assert_(data.mask is nomask)
def test_creation_with_list_of_maskedarrays_no_bool_cast(self):
# Tests the regression in gh-18551
masked_str = np.ma.masked_array(['a', 'b'], mask=[True, False])
normal_int = np.arange(2)
res = np.ma.asarray([masked_str, normal_int], dtype="U21")
assert_array_equal(res.mask, [[True, False], [False, False]])
# The above only failed due a long chain of oddity, try also with
# an object array that cannot be converted to bool always:
class NotBool():
def __bool__(self):
raise ValueError("not a bool!")
masked_obj = np.ma.masked_array([NotBool(), 'b'], mask=[True, False])
# Check that the NotBool actually fails like we would expect:
with pytest.raises(ValueError, match="not a bool!"):
np.asarray([masked_obj], dtype=bool)
res = np.ma.asarray([masked_obj, normal_int])
assert_array_equal(res.mask, [[True, False], [False, False]])
def test_creation_from_ndarray_with_padding(self):
x = np.array([('A', 0)], dtype={'names':['f0','f1'],
'formats':['S4','i8'],
'offsets':[0,8]})
array(x) # used to fail due to 'V' padding field in x.dtype.descr
def test_unknown_keyword_parameter(self):
with pytest.raises(TypeError, match="unexpected keyword argument"):
MaskedArray([1, 2, 3], maks=[0, 1, 0]) # `mask` is misspelled.
def test_asarray(self):
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
xm.fill_value = -9999
xm._hardmask = True
xmm = asarray(xm)
assert_equal(xmm._data, xm._data)
assert_equal(xmm._mask, xm._mask)
assert_equal(xmm.fill_value, xm.fill_value)
assert_equal(xmm._hardmask, xm._hardmask)
def test_asarray_default_order(self):
# See Issue #6646
m = np.eye(3).T
assert_(not m.flags.c_contiguous)
new_m = asarray(m)
assert_(new_m.flags.c_contiguous)
def test_asarray_enforce_order(self):
# See Issue #6646
m = np.eye(3).T
assert_(not m.flags.c_contiguous)
new_m = asarray(m, order='C')
assert_(new_m.flags.c_contiguous)
def test_fix_invalid(self):
# Checks fix_invalid.
with np.errstate(invalid='ignore'):
data = masked_array([np.nan, 0., 1.], mask=[0, 0, 1])
data_fixed = fix_invalid(data)
assert_equal(data_fixed._data, [data.fill_value, 0., 1.])
assert_equal(data_fixed._mask, [1., 0., 1.])
def test_maskedelement(self):
# Test of masked element
x = arange(6)
x[1] = masked
assert_(str(masked) == '--')
assert_(x[1] is masked)
assert_equal(filled(x[1], 0), 0)
def test_set_element_as_object(self):
# Tests setting elements with object
a = empty(1, dtype=object)
x = (1, 2, 3, 4, 5)
a[0] = x
assert_equal(a[0], x)
assert_(a[0] is x)
import datetime
dt = datetime.datetime.now()
a[0] = dt
assert_(a[0] is dt)
def test_indexing(self):
# Tests conversions and indexing
x1 = np.array([1, 2, 4, 3])
x2 = array(x1, mask=[1, 0, 0, 0])
x3 = array(x1, mask=[0, 1, 0, 1])
x4 = array(x1)
# test conversion to strings
str(x2) # raises?
repr(x2) # raises?
assert_equal(np.sort(x1), sort(x2, endwith=False))
# tests of indexing
assert_(type(x2[1]) is type(x1[1]))
assert_(x1[1] == x2[1])
assert_(x2[0] is masked)
assert_equal(x1[2], x2[2])
assert_equal(x1[2:5], x2[2:5])
assert_equal(x1[:], x2[:])
assert_equal(x1[1:], x3[1:])
x1[2] = 9
x2[2] = 9
assert_equal(x1, x2)
x1[1:3] = 99
x2[1:3] = 99
assert_equal(x1, x2)
x2[1] = masked
assert_equal(x1, x2)
x2[1:3] = masked
assert_equal(x1, x2)
x2[:] = x1
x2[1] = masked
assert_(allequal(getmask(x2), array([0, 1, 0, 0])))
x3[:] = masked_array([1, 2, 3, 4], [0, 1, 1, 0])
assert_(allequal(getmask(x3), array([0, 1, 1, 0])))
x4[:] = masked_array([1, 2, 3, 4], [0, 1, 1, 0])
assert_(allequal(getmask(x4), array([0, 1, 1, 0])))
assert_(allequal(x4, array([1, 2, 3, 4])))
x1 = np.arange(5) * 1.0
x2 = masked_values(x1, 3.0)
assert_equal(x1, x2)
assert_(allequal(array([0, 0, 0, 1, 0], MaskType), x2.mask))
assert_equal(3.0, x2.fill_value)
x1 = array([1, 'hello', 2, 3], object)
x2 = np.array([1, 'hello', 2, 3], object)
s1 = x1[1]
s2 = x2[1]
assert_equal(type(s2), str)
assert_equal(type(s1), str)
assert_equal(s1, s2)
assert_(x1[1:1].shape == (0,))
@suppress_copy_mask_on_assignment
def test_copy(self):
# Tests of some subtle points of copying and sizing.
n = [0, 0, 1, 0, 0]
m = make_mask(n)
m2 = make_mask(m)
assert_(m is m2)
m3 = make_mask(m, copy=True)
assert_(m is not m3)
x1 = np.arange(5)
y1 = array(x1, mask=m)
assert_equal(y1._data.__array_interface__, x1.__array_interface__)
assert_(allequal(x1, y1.data))
assert_equal(y1._mask.__array_interface__, m.__array_interface__)
y1a = array(y1)
# Default for masked array is not to copy; see gh-10318.
assert_(y1a._data.__array_interface__ ==
y1._data.__array_interface__)
assert_(y1a._mask.__array_interface__ ==
y1._mask.__array_interface__)
y2 = array(x1, mask=m3)
assert_(y2._data.__array_interface__ == x1.__array_interface__)
assert_(y2._mask.__array_interface__ == m3.__array_interface__)
assert_(y2[2] is masked)
y2[2] = 9
assert_(y2[2] is not masked)
assert_(y2._mask.__array_interface__ == m3.__array_interface__)
assert_(allequal(y2.mask, 0))
y2a = array(x1, mask=m, copy=1)
assert_(y2a._data.__array_interface__ != x1.__array_interface__)
#assert_( y2a._mask is not m)
assert_(y2a._mask.__array_interface__ != m.__array_interface__)
assert_(y2a[2] is masked)
y2a[2] = 9
assert_(y2a[2] is not masked)
#assert_( y2a._mask is not m)
assert_(y2a._mask.__array_interface__ != m.__array_interface__)
assert_(allequal(y2a.mask, 0))
y3 = array(x1 * 1.0, mask=m)
assert_(filled(y3).dtype is (x1 * 1.0).dtype)
x4 = arange(4)
x4[2] = masked
y4 = resize(x4, (8,))
assert_equal(concatenate([x4, x4]), y4)
assert_equal(getmask(y4), [0, 0, 1, 0, 0, 0, 1, 0])
y5 = repeat(x4, (2, 2, 2, 2), axis=0)
assert_equal(y5, [0, 0, 1, 1, 2, 2, 3, 3])
y6 = repeat(x4, 2, axis=0)
assert_equal(y5, y6)
y7 = x4.repeat((2, 2, 2, 2), axis=0)
assert_equal(y5, y7)
y8 = x4.repeat(2, 0)
assert_equal(y5, y8)
y9 = x4.copy()
assert_equal(y9._data, x4._data)
assert_equal(y9._mask, x4._mask)
x = masked_array([1, 2, 3], mask=[0, 1, 0])
# Copy is False by default
y = masked_array(x)
assert_equal(y._data.ctypes.data, x._data.ctypes.data)
assert_equal(y._mask.ctypes.data, x._mask.ctypes.data)
y = masked_array(x, copy=True)
assert_not_equal(y._data.ctypes.data, x._data.ctypes.data)
assert_not_equal(y._mask.ctypes.data, x._mask.ctypes.data)
def test_copy_0d(self):
# gh-9430
x = np.ma.array(43, mask=True)
xc = x.copy()
assert_equal(xc.mask, True)
def test_copy_on_python_builtins(self):
# Tests copy works on python builtins (issue#8019)
assert_(isMaskedArray(np.ma.copy([1,2,3])))
assert_(isMaskedArray(np.ma.copy((1,2,3))))
def test_copy_immutable(self):
# Tests that the copy method is immutable, GitHub issue #5247
a = np.ma.array([1, 2, 3])
b = np.ma.array([4, 5, 6])
a_copy_method = a.copy
b.copy
assert_equal(a_copy_method(), [1, 2, 3])
def test_deepcopy(self):
from copy import deepcopy
a = array([0, 1, 2], mask=[False, True, False])
copied = deepcopy(a)
assert_equal(copied.mask, a.mask)
assert_not_equal(id(a._mask), id(copied._mask))
copied[1] = 1
assert_equal(copied.mask, [0, 0, 0])
assert_equal(a.mask, [0, 1, 0])
copied = deepcopy(a)
assert_equal(copied.mask, a.mask)
copied.mask[1] = False
assert_equal(copied.mask, [0, 0, 0])
assert_equal(a.mask, [0, 1, 0])
def test_format(self):
a = array([0, 1, 2], mask=[False, True, False])
assert_equal(format(a), "[0 -- 2]")
assert_equal(format(masked), "--")
assert_equal(format(masked, ""), "--")
# Postponed from PR #15410, perhaps address in the future.
# assert_equal(format(masked, " >5"), " --")
# assert_equal(format(masked, " <5"), "-- ")
# Expect a FutureWarning for using format_spec with MaskedElement
with assert_warns(FutureWarning):
with_format_string = format(masked, " >5")
assert_equal(with_format_string, "--")
def test_str_repr(self):
a = array([0, 1, 2], mask=[False, True, False])
assert_equal(str(a), '[0 -- 2]')
assert_equal(
repr(a),
textwrap.dedent('''\
masked_array(data=[0, --, 2],
mask=[False, True, False],
fill_value=999999)''')
)
# arrays with a continuation
a = np.ma.arange(2000)
a[1:50] = np.ma.masked
assert_equal(
repr(a),
textwrap.dedent('''\
masked_array(data=[0, --, --, ..., 1997, 1998, 1999],
mask=[False, True, True, ..., False, False, False],
fill_value=999999)''')
)
# line-wrapped 1d arrays are correctly aligned
a = np.ma.arange(20)
assert_equal(
repr(a),
textwrap.dedent('''\
masked_array(data=[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
14, 15, 16, 17, 18, 19],
mask=False,
fill_value=999999)''')
)
# 2d arrays cause wrapping
a = array([[1, 2, 3], [4, 5, 6]], dtype=np.int8)
a[1,1] = np.ma.masked
assert_equal(
repr(a),
textwrap.dedent('''\
masked_array(
data=[[1, 2, 3],
[4, --, 6]],
mask=[[False, False, False],
[False, True, False]],
fill_value=999999,
dtype=int8)''')
)
# but not it they're a row vector
assert_equal(
repr(a[:1]),
textwrap.dedent('''\
masked_array(data=[[1, 2, 3]],
mask=[[False, False, False]],
fill_value=999999,
dtype=int8)''')
)
# dtype=int is implied, so not shown
assert_equal(
repr(a.astype(int)),
textwrap.dedent('''\
masked_array(
data=[[1, 2, 3],
[4, --, 6]],
mask=[[False, False, False],
[False, True, False]],
fill_value=999999)''')
)
def test_str_repr_legacy(self):
oldopts = np.get_printoptions()
np.set_printoptions(legacy='1.13')
try:
a = array([0, 1, 2], mask=[False, True, False])
assert_equal(str(a), '[0 -- 2]')
assert_equal(repr(a), 'masked_array(data = [0 -- 2],\n'
' mask = [False True False],\n'
' fill_value = 999999)\n')
a = np.ma.arange(2000)
a[1:50] = np.ma.masked
assert_equal(
repr(a),
'masked_array(data = [0 -- -- ..., 1997 1998 1999],\n'
' mask = [False True True ..., False False False],\n'
' fill_value = 999999)\n'
)
finally:
np.set_printoptions(**oldopts)
def test_0d_unicode(self):
u = u'caf\xe9'
utype = type(u)
arr_nomask = np.ma.array(u)
arr_masked = np.ma.array(u, mask=True)
assert_equal(utype(arr_nomask), u)
assert_equal(utype(arr_masked), u'--')
def test_pickling(self):
# Tests pickling
for dtype in (int, float, str, object):
a = arange(10).astype(dtype)
a.fill_value = 999
masks = ([0, 0, 0, 1, 0, 1, 0, 1, 0, 1], # partially masked
True, # Fully masked
False) # Fully unmasked
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
for mask in masks:
a.mask = mask
a_pickled = pickle.loads(pickle.dumps(a, protocol=proto))
assert_equal(a_pickled._mask, a._mask)
assert_equal(a_pickled._data, a._data)
if dtype in (object, int):
assert_equal(a_pickled.fill_value, 999)
else:
assert_equal(a_pickled.fill_value, dtype(999))
assert_array_equal(a_pickled.mask, mask)
def test_pickling_subbaseclass(self):
# Test pickling w/ a subclass of ndarray
x = np.array([(1.0, 2), (3.0, 4)],
dtype=[('x', float), ('y', int)]).view(np.recarray)
a = masked_array(x, mask=[(True, False), (False, True)])
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
a_pickled = pickle.loads(pickle.dumps(a, protocol=proto))
assert_equal(a_pickled._mask, a._mask)
assert_equal(a_pickled, a)
assert_(isinstance(a_pickled._data, np.recarray))
def test_pickling_maskedconstant(self):
# Test pickling MaskedConstant
mc = np.ma.masked
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
mc_pickled = pickle.loads(pickle.dumps(mc, protocol=proto))
assert_equal(mc_pickled._baseclass, mc._baseclass)
assert_equal(mc_pickled._mask, mc._mask)
assert_equal(mc_pickled._data, mc._data)
def test_pickling_wstructured(self):
# Tests pickling w/ structured array
a = array([(1, 1.), (2, 2.)], mask=[(0, 0), (0, 1)],
dtype=[('a', int), ('b', float)])
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
a_pickled = pickle.loads(pickle.dumps(a, protocol=proto))
assert_equal(a_pickled._mask, a._mask)
assert_equal(a_pickled, a)
def test_pickling_keepalignment(self):
# Tests pickling w/ F_CONTIGUOUS arrays
a = arange(10)
a.shape = (-1, 2)
b = a.T
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
test = pickle.loads(pickle.dumps(b, protocol=proto))
assert_equal(test, b)
def test_single_element_subscript(self):
# Tests single element subscripts of Maskedarrays.
a = array([1, 3, 2])
b = array([1, 3, 2], mask=[1, 0, 1])
assert_equal(a[0].shape, ())
assert_equal(b[0].shape, ())
assert_equal(b[1].shape, ())
def test_topython(self):
# Tests some communication issues with Python.
assert_equal(1, int(array(1)))
assert_equal(1.0, float(array(1)))
assert_equal(1, int(array([[[1]]])))
assert_equal(1.0, float(array([[1]])))
assert_raises(TypeError, float, array([1, 1]))
with suppress_warnings() as sup:
sup.filter(UserWarning, 'Warning: converting a masked element')
assert_(np.isnan(float(array([1], mask=[1]))))
a = array([1, 2, 3], mask=[1, 0, 0])
assert_raises(TypeError, lambda: float(a))
assert_equal(float(a[-1]), 3.)
assert_(np.isnan(float(a[0])))
assert_raises(TypeError, int, a)
assert_equal(int(a[-1]), 3)
assert_raises(MAError, lambda:int(a[0]))
def test_oddfeatures_1(self):
# Test of other odd features
x = arange(20)
x = x.reshape(4, 5)
x.flat[5] = 12
assert_(x[1, 0] == 12)
z = x + 10j * x
assert_equal(z.real, x)
assert_equal(z.imag, 10 * x)
assert_equal((z * conjugate(z)).real, 101 * x * x)
z.imag[...] = 0.0
x = arange(10)
x[3] = masked
assert_(str(x[3]) == str(masked))
c = x >= 8
assert_(count(where(c, masked, masked)) == 0)
assert_(shape(where(c, masked, masked)) == c.shape)
z = masked_where(c, x)
assert_(z.dtype is x.dtype)
assert_(z[3] is masked)
assert_(z[4] is not masked)
assert_(z[7] is not masked)
assert_(z[8] is masked)
assert_(z[9] is masked)
assert_equal(x, z)
def test_oddfeatures_2(self):
# Tests some more features.
x = array([1., 2., 3., 4., 5.])
c = array([1, 1, 1, 0, 0])
x[2] = masked
z = where(c, x, -x)
assert_equal(z, [1., 2., 0., -4., -5])
c[0] = masked
z = where(c, x, -x)
assert_equal(z, [1., 2., 0., -4., -5])
assert_(z[0] is masked)
assert_(z[1] is not masked)
assert_(z[2] is masked)
@suppress_copy_mask_on_assignment
def test_oddfeatures_3(self):
# Tests some generic features
atest = array([10], mask=True)
btest = array([20])
idx = atest.mask
atest[idx] = btest[idx]
assert_equal(atest, [20])
def test_filled_with_object_dtype(self):
a = np.ma.masked_all(1, dtype='O')
assert_equal(a.filled('x')[0], 'x')
def test_filled_with_flexible_dtype(self):
# Test filled w/ flexible dtype
flexi = array([(1, 1, 1)],
dtype=[('i', int), ('s', '|S8'), ('f', float)])
flexi[0] = masked
assert_equal(flexi.filled(),
np.array([(default_fill_value(0),
default_fill_value('0'),
default_fill_value(0.),)], dtype=flexi.dtype))
flexi[0] = masked
assert_equal(flexi.filled(1),
np.array([(1, '1', 1.)], dtype=flexi.dtype))
def test_filled_with_mvoid(self):
# Test filled w/ mvoid
ndtype = [('a', int), ('b', float)]
a = mvoid((1, 2.), mask=[(0, 1)], dtype=ndtype)
# Filled using default
test = a.filled()
assert_equal(tuple(test), (1, default_fill_value(1.)))
# Explicit fill_value
test = a.filled((-1, -1))
assert_equal(tuple(test), (1, -1))
# Using predefined filling values
a.fill_value = (-999, -999)
assert_equal(tuple(a.filled()), (1, -999))
def test_filled_with_nested_dtype(self):
# Test filled w/ nested dtype
ndtype = [('A', int), ('B', [('BA', int), ('BB', int)])]
a = array([(1, (1, 1)), (2, (2, 2))],
mask=[(0, (1, 0)), (0, (0, 1))], dtype=ndtype)
test = a.filled(0)
control = np.array([(1, (0, 1)), (2, (2, 0))], dtype=ndtype)
assert_equal(test, control)
test = a['B'].filled(0)
control = np.array([(0, 1), (2, 0)], dtype=a['B'].dtype)
assert_equal(test, control)
# test if mask gets set correctly (see #6760)
Z = numpy.ma.zeros(2, numpy.dtype([("A", "(2,2)i1,(2,2)i1", (2,2))]))
assert_equal(Z.data.dtype, numpy.dtype([('A', [('f0', 'i1', (2, 2)),
('f1', 'i1', (2, 2))], (2, 2))]))
assert_equal(Z.mask.dtype, numpy.dtype([('A', [('f0', '?', (2, 2)),
('f1', '?', (2, 2))], (2, 2))]))
def test_filled_with_f_order(self):
# Test filled w/ F-contiguous array
a = array(np.array([(0, 1, 2), (4, 5, 6)], order='F'),
mask=np.array([(0, 0, 1), (1, 0, 0)], order='F'),
order='F') # this is currently ignored
assert_(a.flags['F_CONTIGUOUS'])
assert_(a.filled(0).flags['F_CONTIGUOUS'])
def test_optinfo_propagation(self):
# Checks that _optinfo dictionary isn't back-propagated
x = array([1, 2, 3, ], dtype=float)
x._optinfo['info'] = '???'
y = x.copy()
assert_equal(y._optinfo['info'], '???')
y._optinfo['info'] = '!!!'
assert_equal(x._optinfo['info'], '???')
def test_optinfo_forward_propagation(self):
a = array([1,2,2,4])
a._optinfo["key"] = "value"
assert_equal(a._optinfo["key"], (a == 2)._optinfo["key"])
assert_equal(a._optinfo["key"], (a != 2)._optinfo["key"])
assert_equal(a._optinfo["key"], (a > 2)._optinfo["key"])
assert_equal(a._optinfo["key"], (a >= 2)._optinfo["key"])
assert_equal(a._optinfo["key"], (a <= 2)._optinfo["key"])
assert_equal(a._optinfo["key"], (a + 2)._optinfo["key"])
assert_equal(a._optinfo["key"], (a - 2)._optinfo["key"])
assert_equal(a._optinfo["key"], (a * 2)._optinfo["key"])
assert_equal(a._optinfo["key"], (a / 2)._optinfo["key"])
assert_equal(a._optinfo["key"], a[:2]._optinfo["key"])
assert_equal(a._optinfo["key"], a[[0,0,2]]._optinfo["key"])
assert_equal(a._optinfo["key"], np.exp(a)._optinfo["key"])
assert_equal(a._optinfo["key"], np.abs(a)._optinfo["key"])
assert_equal(a._optinfo["key"], array(a, copy=True)._optinfo["key"])
assert_equal(a._optinfo["key"], np.zeros_like(a)._optinfo["key"])
def test_fancy_printoptions(self):
# Test printing a masked array w/ fancy dtype.
fancydtype = np.dtype([('x', int), ('y', [('t', int), ('s', float)])])
test = array([(1, (2, 3.0)), (4, (5, 6.0))],
mask=[(1, (0, 1)), (0, (1, 0))],
dtype=fancydtype)
control = "[(--, (2, --)) (4, (--, 6.0))]"
assert_equal(str(test), control)
# Test 0-d array with multi-dimensional dtype
t_2d0 = masked_array(data = (0, [[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0]],
0.0),
mask = (False, [[True, False, True],
[False, False, True]],
False),
dtype = "int, (2,3)float, float")
control = "(0, [[--, 0.0, --], [0.0, 0.0, --]], 0.0)"
assert_equal(str(t_2d0), control)
def test_flatten_structured_array(self):
# Test flatten_structured_array on arrays
# On ndarray
ndtype = [('a', int), ('b', float)]
a = np.array([(1, 1), (2, 2)], dtype=ndtype)
test = flatten_structured_array(a)
control = np.array([[1., 1.], [2., 2.]], dtype=float)
assert_equal(test, control)
assert_equal(test.dtype, control.dtype)
# On masked_array
a = array([(1, 1), (2, 2)], mask=[(0, 1), (1, 0)], dtype=ndtype)
test = flatten_structured_array(a)
control = array([[1., 1.], [2., 2.]],
mask=[[0, 1], [1, 0]], dtype=float)
assert_equal(test, control)
assert_equal(test.dtype, control.dtype)
assert_equal(test.mask, control.mask)
# On masked array with nested structure
ndtype = [('a', int), ('b', [('ba', int), ('bb', float)])]
a = array([(1, (1, 1.1)), (2, (2, 2.2))],
mask=[(0, (1, 0)), (1, (0, 1))], dtype=ndtype)
test = flatten_structured_array(a)
control = array([[1., 1., 1.1], [2., 2., 2.2]],
mask=[[0, 1, 0], [1, 0, 1]], dtype=float)
assert_equal(test, control)
assert_equal(test.dtype, control.dtype)
assert_equal(test.mask, control.mask)
# Keeping the initial shape
ndtype = [('a', int), ('b', float)]
a = np.array([[(1, 1), ], [(2, 2), ]], dtype=ndtype)
test = flatten_structured_array(a)
control = np.array([[[1., 1.], ], [[2., 2.], ]], dtype=float)
assert_equal(test, control)
assert_equal(test.dtype, control.dtype)
def test_void0d(self):
# Test creating a mvoid object
ndtype = [('a', int), ('b', int)]
a = np.array([(1, 2,)], dtype=ndtype)[0]
f = mvoid(a)
assert_(isinstance(f, mvoid))
a = masked_array([(1, 2)], mask=[(1, 0)], dtype=ndtype)[0]
assert_(isinstance(a, mvoid))
a = masked_array([(1, 2), (1, 2)], mask=[(1, 0), (0, 0)], dtype=ndtype)
f = mvoid(a._data[0], a._mask[0])
assert_(isinstance(f, mvoid))
def test_mvoid_getitem(self):
# Test mvoid.__getitem__
ndtype = [('a', int), ('b', int)]
a = masked_array([(1, 2,), (3, 4)], mask=[(0, 0), (1, 0)],
dtype=ndtype)
# w/o mask
f = a[0]
assert_(isinstance(f, mvoid))
assert_equal((f[0], f['a']), (1, 1))
assert_equal(f['b'], 2)
# w/ mask
f = a[1]
assert_(isinstance(f, mvoid))
assert_(f[0] is masked)
assert_(f['a'] is masked)
assert_equal(f[1], 4)
# exotic dtype
A = masked_array(data=[([0,1],)],
mask=[([True, False],)],
dtype=[("A", ">i2", (2,))])
assert_equal(A[0]["A"], A["A"][0])
assert_equal(A[0]["A"], masked_array(data=[0, 1],
mask=[True, False], dtype=">i2"))
def test_mvoid_iter(self):
# Test iteration on __getitem__
ndtype = [('a', int), ('b', int)]
a = masked_array([(1, 2,), (3, 4)], mask=[(0, 0), (1, 0)],
dtype=ndtype)
# w/o mask
assert_equal(list(a[0]), [1, 2])
# w/ mask
assert_equal(list(a[1]), [masked, 4])
def test_mvoid_print(self):
# Test printing a mvoid
mx = array([(1, 1), (2, 2)], dtype=[('a', int), ('b', int)])
assert_equal(str(mx[0]), "(1, 1)")
mx['b'][0] = masked
ini_display = masked_print_option._display
masked_print_option.set_display("-X-")
try:
assert_equal(str(mx[0]), "(1, -X-)")
assert_equal(repr(mx[0]), "(1, -X-)")
finally:
masked_print_option.set_display(ini_display)
# also check if there are object datatypes (see gh-7493)
mx = array([(1,), (2,)], dtype=[('a', 'O')])
assert_equal(str(mx[0]), "(1,)")
def test_mvoid_multidim_print(self):
# regression test for gh-6019
t_ma = masked_array(data = [([1, 2, 3],)],
mask = [([False, True, False],)],
fill_value = ([999999, 999999, 999999],),
dtype = [('a', '<i4', (3,))])
assert_(str(t_ma[0]) == "([1, --, 3],)")
assert_(repr(t_ma[0]) == "([1, --, 3],)")
# additional tests with structured arrays
t_2d = masked_array(data = [([[1, 2], [3,4]],)],
mask = [([[False, True], [True, False]],)],
dtype = [('a', '<i4', (2,2))])
assert_(str(t_2d[0]) == "([[1, --], [--, 4]],)")
assert_(repr(t_2d[0]) == "([[1, --], [--, 4]],)")
t_0d = masked_array(data = [(1,2)],
mask = [(True,False)],
dtype = [('a', '<i4'), ('b', '<i4')])
assert_(str(t_0d[0]) == "(--, 2)")
assert_(repr(t_0d[0]) == "(--, 2)")
t_2d = masked_array(data = [([[1, 2], [3,4]], 1)],
mask = [([[False, True], [True, False]], False)],
dtype = [('a', '<i4', (2,2)), ('b', float)])
assert_(str(t_2d[0]) == "([[1, --], [--, 4]], 1.0)")
assert_(repr(t_2d[0]) == "([[1, --], [--, 4]], 1.0)")
t_ne = masked_array(data=[(1, (1, 1))],
mask=[(True, (True, False))],
dtype = [('a', '<i4'), ('b', 'i4,i4')])
assert_(str(t_ne[0]) == "(--, (--, 1))")
assert_(repr(t_ne[0]) == "(--, (--, 1))")
def test_object_with_array(self):
mx1 = masked_array([1.], mask=[True])
mx2 = masked_array([1., 2.])
mx = masked_array([mx1, mx2], mask=[False, True], dtype=object)
assert_(mx[0] is mx1)
assert_(mx[1] is not mx2)
assert_(np.all(mx[1].data == mx2.data))
assert_(np.all(mx[1].mask))
# check that we return a view.
mx[1].data[0] = 0.
assert_(mx2[0] == 0.)
class TestMaskedArrayArithmetic:
# Base test class for MaskedArrays.
def setup_method(self):
# Base data definition.
x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.])
y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.])
a10 = 10.
m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1]
xm = masked_array(x, mask=m1)
ym = masked_array(y, mask=m2)
z = np.array([-.5, 0., .5, .8])
zm = masked_array(z, mask=[0, 1, 0, 0])
xf = np.where(m1, 1e+20, x)
xm.set_fill_value(1e+20)
self.d = (x, y, a10, m1, m2, xm, ym, z, zm, xf)
self.err_status = np.geterr()
np.seterr(divide='ignore', invalid='ignore')
def teardown_method(self):
np.seterr(**self.err_status)
def test_basic_arithmetic(self):
# Test of basic arithmetic.
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
a2d = array([[1, 2], [0, 4]])
a2dm = masked_array(a2d, [[0, 0], [1, 0]])
assert_equal(a2d * a2d, a2d * a2dm)
assert_equal(a2d + a2d, a2d + a2dm)
assert_equal(a2d - a2d, a2d - a2dm)
for s in [(12,), (4, 3), (2, 6)]:
x = x.reshape(s)
y = y.reshape(s)
xm = xm.reshape(s)
ym = ym.reshape(s)
xf = xf.reshape(s)
assert_equal(-x, -xm)
assert_equal(x + y, xm + ym)
assert_equal(x - y, xm - ym)
assert_equal(x * y, xm * ym)
assert_equal(x / y, xm / ym)
assert_equal(a10 + y, a10 + ym)
assert_equal(a10 - y, a10 - ym)
assert_equal(a10 * y, a10 * ym)
assert_equal(a10 / y, a10 / ym)
assert_equal(x + a10, xm + a10)
assert_equal(x - a10, xm - a10)
assert_equal(x * a10, xm * a10)
assert_equal(x / a10, xm / a10)
assert_equal(x ** 2, xm ** 2)
assert_equal(abs(x) ** 2.5, abs(xm) ** 2.5)
assert_equal(x ** y, xm ** ym)
assert_equal(np.add(x, y), add(xm, ym))
assert_equal(np.subtract(x, y), subtract(xm, ym))
assert_equal(np.multiply(x, y), multiply(xm, ym))
assert_equal(np.divide(x, y), divide(xm, ym))
def test_divide_on_different_shapes(self):
x = arange(6, dtype=float)
x.shape = (2, 3)
y = arange(3, dtype=float)
z = x / y
assert_equal(z, [[-1., 1., 1.], [-1., 4., 2.5]])
assert_equal(z.mask, [[1, 0, 0], [1, 0, 0]])
z = x / y[None,:]
assert_equal(z, [[-1., 1., 1.], [-1., 4., 2.5]])
assert_equal(z.mask, [[1, 0, 0], [1, 0, 0]])
y = arange(2, dtype=float)
z = x / y[:, None]
assert_equal(z, [[-1., -1., -1.], [3., 4., 5.]])
assert_equal(z.mask, [[1, 1, 1], [0, 0, 0]])
def test_mixed_arithmetic(self):
# Tests mixed arithmetic.
na = np.array([1])
ma = array([1])
assert_(isinstance(na + ma, MaskedArray))
assert_(isinstance(ma + na, MaskedArray))
def test_limits_arithmetic(self):
tiny = np.finfo(float).tiny
a = array([tiny, 1. / tiny, 0.])
assert_equal(getmaskarray(a / 2), [0, 0, 0])
assert_equal(getmaskarray(2 / a), [1, 0, 1])
def test_masked_singleton_arithmetic(self):
# Tests some scalar arithmetic on MaskedArrays.
# Masked singleton should remain masked no matter what
xm = array(0, mask=1)
assert_((1 / array(0)).mask)
assert_((1 + xm).mask)
assert_((-xm).mask)
assert_(maximum(xm, xm).mask)
assert_(minimum(xm, xm).mask)
def test_masked_singleton_equality(self):
# Tests (in)equality on masked singleton
a = array([1, 2, 3], mask=[1, 1, 0])
assert_((a[0] == 0) is masked)
assert_((a[0] != 0) is masked)
assert_equal((a[-1] == 0), False)
assert_equal((a[-1] != 0), True)
def test_arithmetic_with_masked_singleton(self):
# Checks that there's no collapsing to masked
x = masked_array([1, 2])
y = x * masked
assert_equal(y.shape, x.shape)
assert_equal(y._mask, [True, True])
y = x[0] * masked
assert_(y is masked)
y = x + masked
assert_equal(y.shape, x.shape)
assert_equal(y._mask, [True, True])
def test_arithmetic_with_masked_singleton_on_1d_singleton(self):
# Check that we're not losing the shape of a singleton
x = masked_array([1, ])
y = x + masked
assert_equal(y.shape, x.shape)
assert_equal(y.mask, [True, ])
def test_scalar_arithmetic(self):
x = array(0, mask=0)
assert_equal(x.filled().ctypes.data, x.ctypes.data)
# Make sure we don't lose the shape in some circumstances
xm = array((0, 0)) / 0.
assert_equal(xm.shape, (2,))
assert_equal(xm.mask, [1, 1])
def test_basic_ufuncs(self):
# Test various functions such as sin, cos.
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
assert_equal(np.cos(x), cos(xm))
assert_equal(np.cosh(x), cosh(xm))
assert_equal(np.sin(x), sin(xm))
assert_equal(np.sinh(x), sinh(xm))
assert_equal(np.tan(x), tan(xm))
assert_equal(np.tanh(x), tanh(xm))
assert_equal(np.sqrt(abs(x)), sqrt(xm))
assert_equal(np.log(abs(x)), log(xm))
assert_equal(np.log10(abs(x)), log10(xm))
assert_equal(np.exp(x), exp(xm))
assert_equal(np.arcsin(z), arcsin(zm))
assert_equal(np.arccos(z), arccos(zm))
assert_equal(np.arctan(z), arctan(zm))
assert_equal(np.arctan2(x, y), arctan2(xm, ym))
assert_equal(np.absolute(x), absolute(xm))
assert_equal(np.angle(x + 1j*y), angle(xm + 1j*ym))
assert_equal(np.angle(x + 1j*y, deg=True), angle(xm + 1j*ym, deg=True))
assert_equal(np.equal(x, y), equal(xm, ym))
assert_equal(np.not_equal(x, y), not_equal(xm, ym))
assert_equal(np.less(x, y), less(xm, ym))
assert_equal(np.greater(x, y), greater(xm, ym))
assert_equal(np.less_equal(x, y), less_equal(xm, ym))
assert_equal(np.greater_equal(x, y), greater_equal(xm, ym))
assert_equal(np.conjugate(x), conjugate(xm))
def test_count_func(self):
# Tests count
assert_equal(1, count(1))
assert_equal(0, array(1, mask=[1]))
ott = array([0., 1., 2., 3.], mask=[1, 0, 0, 0])
res = count(ott)
assert_(res.dtype.type is np.intp)
assert_equal(3, res)
ott = ott.reshape((2, 2))
res = count(ott)
assert_(res.dtype.type is np.intp)
assert_equal(3, res)
res = count(ott, 0)
assert_(isinstance(res, ndarray))
assert_equal([1, 2], res)
assert_(getmask(res) is nomask)
ott = array([0., 1., 2., 3.])
res = count(ott, 0)
assert_(isinstance(res, ndarray))
assert_(res.dtype.type is np.intp)
assert_raises(np.AxisError, ott.count, axis=1)
def test_count_on_python_builtins(self):
# Tests count works on python builtins (issue#8019)
assert_equal(3, count([1,2,3]))
assert_equal(2, count((1,2)))
def test_minmax_func(self):
# Tests minimum and maximum.
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
# max doesn't work if shaped
xr = np.ravel(x)
xmr = ravel(xm)
# following are true because of careful selection of data
assert_equal(max(xr), maximum.reduce(xmr))
assert_equal(min(xr), minimum.reduce(xmr))
assert_equal(minimum([1, 2, 3], [4, 0, 9]), [1, 0, 3])
assert_equal(maximum([1, 2, 3], [4, 0, 9]), [4, 2, 9])
x = arange(5)
y = arange(5) - 2
x[3] = masked
y[0] = masked
assert_equal(minimum(x, y), where(less(x, y), x, y))
assert_equal(maximum(x, y), where(greater(x, y), x, y))
assert_(minimum.reduce(x) == 0)
assert_(maximum.reduce(x) == 4)
x = arange(4).reshape(2, 2)
x[-1, -1] = masked
assert_equal(maximum.reduce(x, axis=None), 2)
def test_minimummaximum_func(self):
a = np.ones((2, 2))
aminimum = minimum(a, a)
assert_(isinstance(aminimum, MaskedArray))
assert_equal(aminimum, np.minimum(a, a))
aminimum = minimum.outer(a, a)
assert_(isinstance(aminimum, MaskedArray))
assert_equal(aminimum, np.minimum.outer(a, a))
amaximum = maximum(a, a)
assert_(isinstance(amaximum, MaskedArray))
assert_equal(amaximum, np.maximum(a, a))
amaximum = maximum.outer(a, a)
assert_(isinstance(amaximum, MaskedArray))
assert_equal(amaximum, np.maximum.outer(a, a))
def test_minmax_reduce(self):
# Test np.min/maximum.reduce on array w/ full False mask
a = array([1, 2, 3], mask=[False, False, False])
b = np.maximum.reduce(a)
assert_equal(b, 3)
def test_minmax_funcs_with_output(self):
# Tests the min/max functions with explicit outputs
mask = np.random.rand(12).round()
xm = array(np.random.uniform(0, 10, 12), mask=mask)
xm.shape = (3, 4)
for funcname in ('min', 'max'):
# Initialize
npfunc = getattr(np, funcname)
mafunc = getattr(numpy.ma.core, funcname)
# Use the np version
nout = np.empty((4,), dtype=int)
try:
result = npfunc(xm, axis=0, out=nout)
except MaskError:
pass
nout = np.empty((4,), dtype=float)
result = npfunc(xm, axis=0, out=nout)
assert_(result is nout)
# Use the ma version
nout.fill(-999)
result = mafunc(xm, axis=0, out=nout)
assert_(result is nout)
def test_minmax_methods(self):
# Additional tests on max/min
(_, _, _, _, _, xm, _, _, _, _) = self.d
xm.shape = (xm.size,)
assert_equal(xm.max(), 10)
assert_(xm[0].max() is masked)
assert_(xm[0].max(0) is masked)
assert_(xm[0].max(-1) is masked)
assert_equal(xm.min(), -10.)
assert_(xm[0].min() is masked)
assert_(xm[0].min(0) is masked)
assert_(xm[0].min(-1) is masked)
assert_equal(xm.ptp(), 20.)
assert_(xm[0].ptp() is masked)
assert_(xm[0].ptp(0) is masked)
assert_(xm[0].ptp(-1) is masked)
x = array([1, 2, 3], mask=True)
assert_(x.min() is masked)
assert_(x.max() is masked)
assert_(x.ptp() is masked)
def test_minmax_dtypes(self):
# Additional tests on max/min for non-standard float and complex dtypes
x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.])
a10 = 10.
an10 = -10.0
m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
xm = masked_array(x, mask=m1)
xm.set_fill_value(1e+20)
float_dtypes = [np.half, np.single, np.double,
np.longdouble, np.cfloat, np.cdouble, np.clongdouble]
for float_dtype in float_dtypes:
assert_equal(masked_array(x, mask=m1, dtype=float_dtype).max(),
float_dtype(a10))
assert_equal(masked_array(x, mask=m1, dtype=float_dtype).min(),
float_dtype(an10))
assert_equal(xm.min(), an10)
assert_equal(xm.max(), a10)
# Non-complex type only test
for float_dtype in float_dtypes[:4]:
assert_equal(masked_array(x, mask=m1, dtype=float_dtype).max(),
float_dtype(a10))
assert_equal(masked_array(x, mask=m1, dtype=float_dtype).min(),
float_dtype(an10))
# Complex types only test
for float_dtype in float_dtypes[-3:]:
ym = masked_array([1e20+1j, 1e20-2j, 1e20-1j], mask=[0, 1, 0],
dtype=float_dtype)
assert_equal(ym.min(), float_dtype(1e20-1j))
assert_equal(ym.max(), float_dtype(1e20+1j))
zm = masked_array([np.inf+2j, np.inf+3j, -np.inf-1j], mask=[0, 1, 0],
dtype=float_dtype)
assert_equal(zm.min(), float_dtype(-np.inf-1j))
assert_equal(zm.max(), float_dtype(np.inf+2j))
cmax = np.inf - 1j * np.finfo(np.float64).max
assert masked_array([-cmax, 0], mask=[0, 1]).max() == -cmax
assert masked_array([cmax, 0], mask=[0, 1]).min() == cmax
def test_addsumprod(self):
# Tests add, sum, product.
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
assert_equal(np.add.reduce(x), add.reduce(x))
assert_equal(np.add.accumulate(x), add.accumulate(x))
assert_equal(4, sum(array(4), axis=0))
assert_equal(4, sum(array(4), axis=0))
assert_equal(np.sum(x, axis=0), sum(x, axis=0))
assert_equal(np.sum(filled(xm, 0), axis=0), sum(xm, axis=0))
assert_equal(np.sum(x, 0), sum(x, 0))
assert_equal(np.product(x, axis=0), product(x, axis=0))
assert_equal(np.product(x, 0), product(x, 0))
assert_equal(np.product(filled(xm, 1), axis=0), product(xm, axis=0))
s = (3, 4)
x.shape = y.shape = xm.shape = ym.shape = s
if len(s) > 1:
assert_equal(np.concatenate((x, y), 1), concatenate((xm, ym), 1))
assert_equal(np.add.reduce(x, 1), add.reduce(x, 1))
assert_equal(np.sum(x, 1), sum(x, 1))
assert_equal(np.product(x, 1), product(x, 1))
def test_binops_d2D(self):
# Test binary operations on 2D data
a = array([[1.], [2.], [3.]], mask=[[False], [True], [True]])
b = array([[2., 3.], [4., 5.], [6., 7.]])
test = a * b
control = array([[2., 3.], [2., 2.], [3., 3.]],
mask=[[0, 0], [1, 1], [1, 1]])
assert_equal(test, control)
assert_equal(test.data, control.data)
assert_equal(test.mask, control.mask)
test = b * a
control = array([[2., 3.], [4., 5.], [6., 7.]],
mask=[[0, 0], [1, 1], [1, 1]])
assert_equal(test, control)
assert_equal(test.data, control.data)
assert_equal(test.mask, control.mask)
a = array([[1.], [2.], [3.]])
b = array([[2., 3.], [4., 5.], [6., 7.]],
mask=[[0, 0], [0, 0], [0, 1]])
test = a * b
control = array([[2, 3], [8, 10], [18, 3]],
mask=[[0, 0], [0, 0], [0, 1]])
assert_equal(test, control)
assert_equal(test.data, control.data)
assert_equal(test.mask, control.mask)
test = b * a
control = array([[2, 3], [8, 10], [18, 7]],
mask=[[0, 0], [0, 0], [0, 1]])
assert_equal(test, control)
assert_equal(test.data, control.data)
assert_equal(test.mask, control.mask)
def test_domained_binops_d2D(self):
# Test domained binary operations on 2D data
a = array([[1.], [2.], [3.]], mask=[[False], [True], [True]])
b = array([[2., 3.], [4., 5.], [6., 7.]])
test = a / b
control = array([[1. / 2., 1. / 3.], [2., 2.], [3., 3.]],
mask=[[0, 0], [1, 1], [1, 1]])
assert_equal(test, control)
assert_equal(test.data, control.data)
assert_equal(test.mask, control.mask)
test = b / a
control = array([[2. / 1., 3. / 1.], [4., 5.], [6., 7.]],
mask=[[0, 0], [1, 1], [1, 1]])
assert_equal(test, control)
assert_equal(test.data, control.data)
assert_equal(test.mask, control.mask)
a = array([[1.], [2.], [3.]])
b = array([[2., 3.], [4., 5.], [6., 7.]],
mask=[[0, 0], [0, 0], [0, 1]])
test = a / b
control = array([[1. / 2, 1. / 3], [2. / 4, 2. / 5], [3. / 6, 3]],
mask=[[0, 0], [0, 0], [0, 1]])
assert_equal(test, control)
assert_equal(test.data, control.data)
assert_equal(test.mask, control.mask)
test = b / a
control = array([[2 / 1., 3 / 1.], [4 / 2., 5 / 2.], [6 / 3., 7]],
mask=[[0, 0], [0, 0], [0, 1]])
assert_equal(test, control)
assert_equal(test.data, control.data)
assert_equal(test.mask, control.mask)
def test_noshrinking(self):
# Check that we don't shrink a mask when not wanted
# Binary operations
a = masked_array([1., 2., 3.], mask=[False, False, False],
shrink=False)
b = a + 1
assert_equal(b.mask, [0, 0, 0])
# In place binary operation
a += 1
assert_equal(a.mask, [0, 0, 0])
# Domained binary operation
b = a / 1.
assert_equal(b.mask, [0, 0, 0])
# In place binary operation
a /= 1.
assert_equal(a.mask, [0, 0, 0])
def test_ufunc_nomask(self):
# check the case ufuncs should set the mask to false
m = np.ma.array([1])
# check we don't get array([False], dtype=bool)
assert_equal(np.true_divide(m, 5).mask.shape, ())
def test_noshink_on_creation(self):
# Check that the mask is not shrunk on array creation when not wanted
a = np.ma.masked_values([1., 2.5, 3.1], 1.5, shrink=False)
assert_equal(a.mask, [0, 0, 0])
def test_mod(self):
# Tests mod
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
assert_equal(mod(x, y), mod(xm, ym))
test = mod(ym, xm)
assert_equal(test, np.mod(ym, xm))
assert_equal(test.mask, mask_or(xm.mask, ym.mask))
test = mod(xm, ym)
assert_equal(test, np.mod(xm, ym))
assert_equal(test.mask, mask_or(mask_or(xm.mask, ym.mask), (ym == 0)))
def test_TakeTransposeInnerOuter(self):
# Test of take, transpose, inner, outer products
x = arange(24)
y = np.arange(24)
x[5:6] = masked
x = x.reshape(2, 3, 4)
y = y.reshape(2, 3, 4)
assert_equal(np.transpose(y, (2, 0, 1)), transpose(x, (2, 0, 1)))
assert_equal(np.take(y, (2, 0, 1), 1), take(x, (2, 0, 1), 1))
assert_equal(np.inner(filled(x, 0), filled(y, 0)),
inner(x, y))
assert_equal(np.outer(filled(x, 0), filled(y, 0)),
outer(x, y))
y = array(['abc', 1, 'def', 2, 3], object)
y[2] = masked
t = take(y, [0, 3, 4])
assert_(t[0] == 'abc')
assert_(t[1] == 2)
assert_(t[2] == 3)
def test_imag_real(self):
# Check complex
xx = array([1 + 10j, 20 + 2j], mask=[1, 0])
assert_equal(xx.imag, [10, 2])
assert_equal(xx.imag.filled(), [1e+20, 2])
assert_equal(xx.imag.dtype, xx._data.imag.dtype)
assert_equal(xx.real, [1, 20])
assert_equal(xx.real.filled(), [1e+20, 20])
assert_equal(xx.real.dtype, xx._data.real.dtype)
def test_methods_with_output(self):
xm = array(np.random.uniform(0, 10, 12)).reshape(3, 4)
xm[:, 0] = xm[0] = xm[-1, -1] = masked
funclist = ('sum', 'prod', 'var', 'std', 'max', 'min', 'ptp', 'mean',)
for funcname in funclist:
npfunc = getattr(np, funcname)
xmmeth = getattr(xm, funcname)
# A ndarray as explicit input
output = np.empty(4, dtype=float)
output.fill(-9999)
result = npfunc(xm, axis=0, out=output)
# ... the result should be the given output
assert_(result is output)
assert_equal(result, xmmeth(axis=0, out=output))
output = empty(4, dtype=int)
result = xmmeth(axis=0, out=output)
assert_(result is output)
assert_(output[0] is masked)
def test_eq_on_structured(self):
# Test the equality of structured arrays
ndtype = [('A', int), ('B', int)]
a = array([(1, 1), (2, 2)], mask=[(0, 1), (0, 0)], dtype=ndtype)
test = (a == a)
assert_equal(test.data, [True, True])
assert_equal(test.mask, [False, False])
assert_(test.fill_value == True)
test = (a == a[0])
assert_equal(test.data, [True, False])
assert_equal(test.mask, [False, False])
assert_(test.fill_value == True)
b = array([(1, 1), (2, 2)], mask=[(1, 0), (0, 0)], dtype=ndtype)
test = (a == b)
assert_equal(test.data, [False, True])
assert_equal(test.mask, [True, False])
assert_(test.fill_value == True)
test = (a[0] == b)
assert_equal(test.data, [False, False])
assert_equal(test.mask, [True, False])
assert_(test.fill_value == True)
b = array([(1, 1), (2, 2)], mask=[(0, 1), (1, 0)], dtype=ndtype)
test = (a == b)
assert_equal(test.data, [True, True])
assert_equal(test.mask, [False, False])
assert_(test.fill_value == True)
# complicated dtype, 2-dimensional array.
ndtype = [('A', int), ('B', [('BA', int), ('BB', int)])]
a = array([[(1, (1, 1)), (2, (2, 2))],
[(3, (3, 3)), (4, (4, 4))]],
mask=[[(0, (1, 0)), (0, (0, 1))],
[(1, (0, 0)), (1, (1, 1))]], dtype=ndtype)
test = (a[0, 0] == a)
assert_equal(test.data, [[True, False], [False, False]])
assert_equal(test.mask, [[False, False], [False, True]])
assert_(test.fill_value == True)
def test_ne_on_structured(self):
# Test the equality of structured arrays
ndtype = [('A', int), ('B', int)]
a = array([(1, 1), (2, 2)], mask=[(0, 1), (0, 0)], dtype=ndtype)
test = (a != a)
assert_equal(test.data, [False, False])
assert_equal(test.mask, [False, False])
assert_(test.fill_value == True)
test = (a != a[0])
assert_equal(test.data, [False, True])
assert_equal(test.mask, [False, False])
assert_(test.fill_value == True)
b = array([(1, 1), (2, 2)], mask=[(1, 0), (0, 0)], dtype=ndtype)
test = (a != b)
assert_equal(test.data, [True, False])
assert_equal(test.mask, [True, False])
assert_(test.fill_value == True)
test = (a[0] != b)
assert_equal(test.data, [True, True])
assert_equal(test.mask, [True, False])
assert_(test.fill_value == True)
b = array([(1, 1), (2, 2)], mask=[(0, 1), (1, 0)], dtype=ndtype)
test = (a != b)
assert_equal(test.data, [False, False])
assert_equal(test.mask, [False, False])
assert_(test.fill_value == True)
# complicated dtype, 2-dimensional array.
ndtype = [('A', int), ('B', [('BA', int), ('BB', int)])]
a = array([[(1, (1, 1)), (2, (2, 2))],
[(3, (3, 3)), (4, (4, 4))]],
mask=[[(0, (1, 0)), (0, (0, 1))],
[(1, (0, 0)), (1, (1, 1))]], dtype=ndtype)
test = (a[0, 0] != a)
assert_equal(test.data, [[False, True], [True, True]])
assert_equal(test.mask, [[False, False], [False, True]])
assert_(test.fill_value == True)
def test_eq_ne_structured_extra(self):
# ensure simple examples are symmetric and make sense.
# from https://github.com/numpy/numpy/pull/8590#discussion_r101126465
dt = np.dtype('i4,i4')
for m1 in (mvoid((1, 2), mask=(0, 0), dtype=dt),
mvoid((1, 2), mask=(0, 1), dtype=dt),
mvoid((1, 2), mask=(1, 0), dtype=dt),
mvoid((1, 2), mask=(1, 1), dtype=dt)):
ma1 = m1.view(MaskedArray)
r1 = ma1.view('2i4')
for m2 in (np.array((1, 1), dtype=dt),
mvoid((1, 1), dtype=dt),
mvoid((1, 0), mask=(0, 1), dtype=dt),
mvoid((3, 2), mask=(0, 1), dtype=dt)):
ma2 = m2.view(MaskedArray)
r2 = ma2.view('2i4')
eq_expected = (r1 == r2).all()
assert_equal(m1 == m2, eq_expected)
assert_equal(m2 == m1, eq_expected)
assert_equal(ma1 == m2, eq_expected)
assert_equal(m1 == ma2, eq_expected)
assert_equal(ma1 == ma2, eq_expected)
# Also check it is the same if we do it element by element.
el_by_el = [m1[name] == m2[name] for name in dt.names]
assert_equal(array(el_by_el, dtype=bool).all(), eq_expected)
ne_expected = (r1 != r2).any()
assert_equal(m1 != m2, ne_expected)
assert_equal(m2 != m1, ne_expected)
assert_equal(ma1 != m2, ne_expected)
assert_equal(m1 != ma2, ne_expected)
assert_equal(ma1 != ma2, ne_expected)
el_by_el = [m1[name] != m2[name] for name in dt.names]
assert_equal(array(el_by_el, dtype=bool).any(), ne_expected)
@pytest.mark.parametrize('dt', ['S', 'U'])
@pytest.mark.parametrize('fill', [None, 'A'])
def test_eq_for_strings(self, dt, fill):
# Test the equality of structured arrays
a = array(['a', 'b'], dtype=dt, mask=[0, 1], fill_value=fill)
test = (a == a)
assert_equal(test.data, [True, True])
assert_equal(test.mask, [False, True])
assert_(test.fill_value == True)
test = (a == a[0])
assert_equal(test.data, [True, False])
assert_equal(test.mask, [False, True])
assert_(test.fill_value == True)
b = array(['a', 'b'], dtype=dt, mask=[1, 0], fill_value=fill)
test = (a == b)
assert_equal(test.data, [False, False])
assert_equal(test.mask, [True, True])
assert_(test.fill_value == True)
test = (a[0] == b)
assert_equal(test.data, [False, False])
assert_equal(test.mask, [True, False])
assert_(test.fill_value == True)
test = (b == a[0])
assert_equal(test.data, [False, False])
assert_equal(test.mask, [True, False])
assert_(test.fill_value == True)
@pytest.mark.parametrize('dt', ['S', 'U'])
@pytest.mark.parametrize('fill', [None, 'A'])
def test_ne_for_strings(self, dt, fill):
# Test the equality of structured arrays
a = array(['a', 'b'], dtype=dt, mask=[0, 1], fill_value=fill)
test = (a != a)
assert_equal(test.data, [False, False])
assert_equal(test.mask, [False, True])
assert_(test.fill_value == True)
test = (a != a[0])
assert_equal(test.data, [False, True])
assert_equal(test.mask, [False, True])
assert_(test.fill_value == True)
b = array(['a', 'b'], dtype=dt, mask=[1, 0], fill_value=fill)
test = (a != b)
assert_equal(test.data, [True, True])
assert_equal(test.mask, [True, True])
assert_(test.fill_value == True)
test = (a[0] != b)
assert_equal(test.data, [True, True])
assert_equal(test.mask, [True, False])
assert_(test.fill_value == True)
test = (b != a[0])
assert_equal(test.data, [True, True])
assert_equal(test.mask, [True, False])
assert_(test.fill_value == True)
@pytest.mark.parametrize('dt1', num_dts, ids=num_ids)
@pytest.mark.parametrize('dt2', num_dts, ids=num_ids)
@pytest.mark.parametrize('fill', [None, 1])
def test_eq_for_numeric(self, dt1, dt2, fill):
# Test the equality of structured arrays
a = array([0, 1], dtype=dt1, mask=[0, 1], fill_value=fill)
test = (a == a)
assert_equal(test.data, [True, True])
assert_equal(test.mask, [False, True])
assert_(test.fill_value == True)
test = (a == a[0])
assert_equal(test.data, [True, False])
assert_equal(test.mask, [False, True])
assert_(test.fill_value == True)
b = array([0, 1], dtype=dt2, mask=[1, 0], fill_value=fill)
test = (a == b)
assert_equal(test.data, [False, False])
assert_equal(test.mask, [True, True])
assert_(test.fill_value == True)
test = (a[0] == b)
assert_equal(test.data, [False, False])
assert_equal(test.mask, [True, False])
assert_(test.fill_value == True)
test = (b == a[0])
assert_equal(test.data, [False, False])
assert_equal(test.mask, [True, False])
assert_(test.fill_value == True)
@pytest.mark.parametrize('dt1', num_dts, ids=num_ids)
@pytest.mark.parametrize('dt2', num_dts, ids=num_ids)
@pytest.mark.parametrize('fill', [None, 1])
def test_ne_for_numeric(self, dt1, dt2, fill):
# Test the equality of structured arrays
a = array([0, 1], dtype=dt1, mask=[0, 1], fill_value=fill)
test = (a != a)
assert_equal(test.data, [False, False])
assert_equal(test.mask, [False, True])
assert_(test.fill_value == True)
test = (a != a[0])
assert_equal(test.data, [False, True])
assert_equal(test.mask, [False, True])
assert_(test.fill_value == True)
b = array([0, 1], dtype=dt2, mask=[1, 0], fill_value=fill)
test = (a != b)
assert_equal(test.data, [True, True])
assert_equal(test.mask, [True, True])
assert_(test.fill_value == True)
test = (a[0] != b)
assert_equal(test.data, [True, True])
assert_equal(test.mask, [True, False])
assert_(test.fill_value == True)
test = (b != a[0])
assert_equal(test.data, [True, True])
assert_equal(test.mask, [True, False])
assert_(test.fill_value == True)
def test_eq_with_None(self):
# Really, comparisons with None should not be done, but check them
# anyway. Note that pep8 will flag these tests.
# Deprecation is in place for arrays, and when it happens this
# test will fail (and have to be changed accordingly).
# With partial mask
with suppress_warnings() as sup:
sup.filter(FutureWarning, "Comparison to `None`")
a = array([None, 1], mask=[0, 1])
assert_equal(a == None, array([True, False], mask=[0, 1]))
assert_equal(a.data == None, [True, False])
assert_equal(a != None, array([False, True], mask=[0, 1]))
# With nomask
a = array([None, 1], mask=False)
assert_equal(a == None, [True, False])
assert_equal(a != None, [False, True])
# With complete mask
a = array([None, 2], mask=True)
assert_equal(a == None, array([False, True], mask=True))
assert_equal(a != None, array([True, False], mask=True))
# Fully masked, even comparison to None should return "masked"
a = masked
assert_equal(a == None, masked)
def test_eq_with_scalar(self):
a = array(1)
assert_equal(a == 1, True)
assert_equal(a == 0, False)
assert_equal(a != 1, False)
assert_equal(a != 0, True)
b = array(1, mask=True)
assert_equal(b == 0, masked)
assert_equal(b == 1, masked)
assert_equal(b != 0, masked)
assert_equal(b != 1, masked)
def test_eq_different_dimensions(self):
m1 = array([1, 1], mask=[0, 1])
# test comparison with both masked and regular arrays.
for m2 in (array([[0, 1], [1, 2]]),
np.array([[0, 1], [1, 2]])):
test = (m1 == m2)
assert_equal(test.data, [[False, False],
[True, False]])
assert_equal(test.mask, [[False, True],
[False, True]])
def test_numpyarithmetic(self):
# Check that the mask is not back-propagated when using numpy functions
a = masked_array([-1, 0, 1, 2, 3], mask=[0, 0, 0, 0, 1])
control = masked_array([np.nan, np.nan, 0, np.log(2), -1],
mask=[1, 1, 0, 0, 1])
test = log(a)
assert_equal(test, control)
assert_equal(test.mask, control.mask)
assert_equal(a.mask, [0, 0, 0, 0, 1])
test = np.log(a)
assert_equal(test, control)
assert_equal(test.mask, control.mask)
assert_equal(a.mask, [0, 0, 0, 0, 1])
class TestMaskedArrayAttributes:
def test_keepmask(self):
# Tests the keep mask flag
x = masked_array([1, 2, 3], mask=[1, 0, 0])
mx = masked_array(x)
assert_equal(mx.mask, x.mask)
mx = masked_array(x, mask=[0, 1, 0], keep_mask=False)
assert_equal(mx.mask, [0, 1, 0])
mx = masked_array(x, mask=[0, 1, 0], keep_mask=True)
assert_equal(mx.mask, [1, 1, 0])
# We default to true
mx = masked_array(x, mask=[0, 1, 0])
assert_equal(mx.mask, [1, 1, 0])
def test_hardmask(self):
# Test hard_mask
d = arange(5)
n = [0, 0, 0, 1, 1]
m = make_mask(n)
xh = array(d, mask=m, hard_mask=True)
# We need to copy, to avoid updating d in xh !
xs = array(d, mask=m, hard_mask=False, copy=True)
xh[[1, 4]] = [10, 40]
xs[[1, 4]] = [10, 40]
assert_equal(xh._data, [0, 10, 2, 3, 4])
assert_equal(xs._data, [0, 10, 2, 3, 40])
assert_equal(xs.mask, [0, 0, 0, 1, 0])
assert_(xh._hardmask)
assert_(not xs._hardmask)
xh[1:4] = [10, 20, 30]
xs[1:4] = [10, 20, 30]
assert_equal(xh._data, [0, 10, 20, 3, 4])
assert_equal(xs._data, [0, 10, 20, 30, 40])
assert_equal(xs.mask, nomask)
xh[0] = masked
xs[0] = masked
assert_equal(xh.mask, [1, 0, 0, 1, 1])
assert_equal(xs.mask, [1, 0, 0, 0, 0])
xh[:] = 1
xs[:] = 1
assert_equal(xh._data, [0, 1, 1, 3, 4])
assert_equal(xs._data, [1, 1, 1, 1, 1])
assert_equal(xh.mask, [1, 0, 0, 1, 1])
assert_equal(xs.mask, nomask)
# Switch to soft mask
xh.soften_mask()
xh[:] = arange(5)
assert_equal(xh._data, [0, 1, 2, 3, 4])
assert_equal(xh.mask, nomask)
# Switch back to hard mask
xh.harden_mask()
xh[xh < 3] = masked
assert_equal(xh._data, [0, 1, 2, 3, 4])
assert_equal(xh._mask, [1, 1, 1, 0, 0])
xh[filled(xh > 1, False)] = 5
assert_equal(xh._data, [0, 1, 2, 5, 5])
assert_equal(xh._mask, [1, 1, 1, 0, 0])
xh = array([[1, 2], [3, 4]], mask=[[1, 0], [0, 0]], hard_mask=True)
xh[0] = 0
assert_equal(xh._data, [[1, 0], [3, 4]])
assert_equal(xh._mask, [[1, 0], [0, 0]])
xh[-1, -1] = 5
assert_equal(xh._data, [[1, 0], [3, 5]])
assert_equal(xh._mask, [[1, 0], [0, 0]])
xh[filled(xh < 5, False)] = 2
assert_equal(xh._data, [[1, 2], [2, 5]])
assert_equal(xh._mask, [[1, 0], [0, 0]])
def test_hardmask_again(self):
# Another test of hardmask
d = arange(5)
n = [0, 0, 0, 1, 1]
m = make_mask(n)
xh = array(d, mask=m, hard_mask=True)
xh[4:5] = 999
xh[0:1] = 999
assert_equal(xh._data, [999, 1, 2, 3, 4])
def test_hardmask_oncemore_yay(self):
# OK, yet another test of hardmask
# Make sure that harden_mask/soften_mask//unshare_mask returns self
a = array([1, 2, 3], mask=[1, 0, 0])
b = a.harden_mask()
assert_equal(a, b)
b[0] = 0
assert_equal(a, b)
assert_equal(b, array([1, 2, 3], mask=[1, 0, 0]))
a = b.soften_mask()
a[0] = 0
assert_equal(a, b)
assert_equal(b, array([0, 2, 3], mask=[0, 0, 0]))
def test_smallmask(self):
# Checks the behaviour of _smallmask
a = arange(10)
a[1] = masked
a[1] = 1
assert_equal(a._mask, nomask)
a = arange(10)
a._smallmask = False
a[1] = masked
a[1] = 1
assert_equal(a._mask, zeros(10))
def test_shrink_mask(self):
# Tests .shrink_mask()
a = array([1, 2, 3], mask=[0, 0, 0])
b = a.shrink_mask()
assert_equal(a, b)
assert_equal(a.mask, nomask)
# Mask cannot be shrunk on structured types, so is a no-op
a = np.ma.array([(1, 2.0)], [('a', int), ('b', float)])
b = a.copy()
a.shrink_mask()
assert_equal(a.mask, b.mask)
def test_flat(self):
# Test that flat can return all types of items [#4585, #4615]
# test 2-D record array
# ... on structured array w/ masked records
x = array([[(1, 1.1, 'one'), (2, 2.2, 'two'), (3, 3.3, 'thr')],
[(4, 4.4, 'fou'), (5, 5.5, 'fiv'), (6, 6.6, 'six')]],
dtype=[('a', int), ('b', float), ('c', '|S8')])
x['a'][0, 1] = masked
x['b'][1, 0] = masked
x['c'][0, 2] = masked
x[-1, -1] = masked
xflat = x.flat
assert_equal(xflat[0], x[0, 0])
assert_equal(xflat[1], x[0, 1])
assert_equal(xflat[2], x[0, 2])
assert_equal(xflat[:3], x[0])
assert_equal(xflat[3], x[1, 0])
assert_equal(xflat[4], x[1, 1])
assert_equal(xflat[5], x[1, 2])
assert_equal(xflat[3:], x[1])
assert_equal(xflat[-1], x[-1, -1])
i = 0
j = 0
for xf in xflat:
assert_equal(xf, x[j, i])
i += 1
if i >= x.shape[-1]:
i = 0
j += 1
def test_assign_dtype(self):
# check that the mask's dtype is updated when dtype is changed
a = np.zeros(4, dtype='f4,i4')
m = np.ma.array(a)
m.dtype = np.dtype('f4')
repr(m) # raises?
assert_equal(m.dtype, np.dtype('f4'))
# check that dtype changes that change shape of mask too much
# are not allowed
def assign():
m = np.ma.array(a)
m.dtype = np.dtype('f8')
assert_raises(ValueError, assign)
b = a.view(dtype='f4', type=np.ma.MaskedArray) # raises?
assert_equal(b.dtype, np.dtype('f4'))
# check that nomask is preserved
a = np.zeros(4, dtype='f4')
m = np.ma.array(a)
m.dtype = np.dtype('f4,i4')
assert_equal(m.dtype, np.dtype('f4,i4'))
assert_equal(m._mask, np.ma.nomask)
class TestFillingValues:
def test_check_on_scalar(self):
# Test _check_fill_value set to valid and invalid values
_check_fill_value = np.ma.core._check_fill_value
fval = _check_fill_value(0, int)
assert_equal(fval, 0)
fval = _check_fill_value(None, int)
assert_equal(fval, default_fill_value(0))
fval = _check_fill_value(0, "|S3")
assert_equal(fval, b"0")
fval = _check_fill_value(None, "|S3")
assert_equal(fval, default_fill_value(b"camelot!"))
assert_raises(TypeError, _check_fill_value, 1e+20, int)
assert_raises(TypeError, _check_fill_value, 'stuff', int)
def test_check_on_fields(self):
# Tests _check_fill_value with records
_check_fill_value = np.ma.core._check_fill_value
ndtype = [('a', int), ('b', float), ('c', "|S3")]
# A check on a list should return a single record
fval = _check_fill_value([-999, -12345678.9, "???"], ndtype)
assert_(isinstance(fval, ndarray))
assert_equal(fval.item(), [-999, -12345678.9, b"???"])
# A check on None should output the defaults
fval = _check_fill_value(None, ndtype)
assert_(isinstance(fval, ndarray))
assert_equal(fval.item(), [default_fill_value(0),
default_fill_value(0.),
asbytes(default_fill_value("0"))])
#.....Using a structured type as fill_value should work
fill_val = np.array((-999, -12345678.9, "???"), dtype=ndtype)
fval = _check_fill_value(fill_val, ndtype)
assert_(isinstance(fval, ndarray))
assert_equal(fval.item(), [-999, -12345678.9, b"???"])
#.....Using a flexible type w/ a different type shouldn't matter
# BEHAVIOR in 1.5 and earlier, and 1.13 and later: match structured
# types by position
fill_val = np.array((-999, -12345678.9, "???"),
dtype=[("A", int), ("B", float), ("C", "|S3")])
fval = _check_fill_value(fill_val, ndtype)
assert_(isinstance(fval, ndarray))
assert_equal(fval.item(), [-999, -12345678.9, b"???"])
#.....Using an object-array shouldn't matter either
fill_val = np.ndarray(shape=(1,), dtype=object)
fill_val[0] = (-999, -12345678.9, b"???")
fval = _check_fill_value(fill_val, object)
assert_(isinstance(fval, ndarray))
assert_equal(fval.item(), [-999, -12345678.9, b"???"])
# NOTE: This test was never run properly as "fill_value" rather than
# "fill_val" was assigned. Written properly, it fails.
#fill_val = np.array((-999, -12345678.9, "???"))
#fval = _check_fill_value(fill_val, ndtype)
#assert_(isinstance(fval, ndarray))
#assert_equal(fval.item(), [-999, -12345678.9, b"???"])
#.....One-field-only flexible type should work as well
ndtype = [("a", int)]
fval = _check_fill_value(-999999999, ndtype)
assert_(isinstance(fval, ndarray))
assert_equal(fval.item(), (-999999999,))
def test_fillvalue_conversion(self):
# Tests the behavior of fill_value during conversion
# We had a tailored comment to make sure special attributes are
# properly dealt with
a = array([b'3', b'4', b'5'])
a._optinfo.update({'comment':"updated!"})
b = array(a, dtype=int)
assert_equal(b._data, [3, 4, 5])
assert_equal(b.fill_value, default_fill_value(0))
b = array(a, dtype=float)
assert_equal(b._data, [3, 4, 5])
assert_equal(b.fill_value, default_fill_value(0.))
b = a.astype(int)
assert_equal(b._data, [3, 4, 5])
assert_equal(b.fill_value, default_fill_value(0))
assert_equal(b._optinfo['comment'], "updated!")
b = a.astype([('a', '|S3')])
assert_equal(b['a']._data, a._data)
assert_equal(b['a'].fill_value, a.fill_value)
def test_default_fill_value(self):
# check all calling conventions
f1 = default_fill_value(1.)
f2 = default_fill_value(np.array(1.))
f3 = default_fill_value(np.array(1.).dtype)
assert_equal(f1, f2)
assert_equal(f1, f3)
def test_default_fill_value_structured(self):
fields = array([(1, 1, 1)],
dtype=[('i', int), ('s', '|S8'), ('f', float)])
f1 = default_fill_value(fields)
f2 = default_fill_value(fields.dtype)
expected = np.array((default_fill_value(0),
default_fill_value('0'),
default_fill_value(0.)), dtype=fields.dtype)
assert_equal(f1, expected)
assert_equal(f2, expected)
def test_default_fill_value_void(self):
dt = np.dtype([('v', 'V7')])
f = default_fill_value(dt)
assert_equal(f['v'], np.array(default_fill_value(dt['v']), dt['v']))
def test_fillvalue(self):
# Yet more fun with the fill_value
data = masked_array([1, 2, 3], fill_value=-999)
series = data[[0, 2, 1]]
assert_equal(series._fill_value, data._fill_value)
mtype = [('f', float), ('s', '|S3')]
x = array([(1, 'a'), (2, 'b'), (pi, 'pi')], dtype=mtype)
x.fill_value = 999
assert_equal(x.fill_value.item(), [999., b'999'])
assert_equal(x['f'].fill_value, 999)
assert_equal(x['s'].fill_value, b'999')
x.fill_value = (9, '???')
assert_equal(x.fill_value.item(), (9, b'???'))
assert_equal(x['f'].fill_value, 9)
assert_equal(x['s'].fill_value, b'???')
x = array([1, 2, 3.1])
x.fill_value = 999
assert_equal(np.asarray(x.fill_value).dtype, float)
assert_equal(x.fill_value, 999.)
assert_equal(x._fill_value, np.array(999.))
def test_subarray_fillvalue(self):
# gh-10483 test multi-field index fill value
fields = array([(1, 1, 1)],
dtype=[('i', int), ('s', '|S8'), ('f', float)])
with suppress_warnings() as sup:
sup.filter(FutureWarning, "Numpy has detected")
subfields = fields[['i', 'f']]
assert_equal(tuple(subfields.fill_value), (999999, 1.e+20))
# test comparison does not raise:
subfields[1:] == subfields[:-1]
def test_fillvalue_exotic_dtype(self):
# Tests yet more exotic flexible dtypes
_check_fill_value = np.ma.core._check_fill_value
ndtype = [('i', int), ('s', '|S8'), ('f', float)]
control = np.array((default_fill_value(0),
default_fill_value('0'),
default_fill_value(0.),),
dtype=ndtype)
assert_equal(_check_fill_value(None, ndtype), control)
# The shape shouldn't matter
ndtype = [('f0', float, (2, 2))]
control = np.array((default_fill_value(0.),),
dtype=[('f0', float)]).astype(ndtype)
assert_equal(_check_fill_value(None, ndtype), control)
control = np.array((0,), dtype=[('f0', float)]).astype(ndtype)
assert_equal(_check_fill_value(0, ndtype), control)
ndtype = np.dtype("int, (2,3)float, float")
control = np.array((default_fill_value(0),
default_fill_value(0.),
default_fill_value(0.),),
dtype="int, float, float").astype(ndtype)
test = _check_fill_value(None, ndtype)
assert_equal(test, control)
control = np.array((0, 0, 0), dtype="int, float, float").astype(ndtype)
assert_equal(_check_fill_value(0, ndtype), control)
# but when indexing, fill value should become scalar not tuple
# See issue #6723
M = masked_array(control)
assert_equal(M["f1"].fill_value.ndim, 0)
def test_fillvalue_datetime_timedelta(self):
# Test default fillvalue for datetime64 and timedelta64 types.
# See issue #4476, this would return '?' which would cause errors
# elsewhere
for timecode in ("as", "fs", "ps", "ns", "us", "ms", "s", "m",
"h", "D", "W", "M", "Y"):
control = numpy.datetime64("NaT", timecode)
test = default_fill_value(numpy.dtype("<M8[" + timecode + "]"))
np.testing.assert_equal(test, control)
control = numpy.timedelta64("NaT", timecode)
test = default_fill_value(numpy.dtype("<m8[" + timecode + "]"))
np.testing.assert_equal(test, control)
def test_extremum_fill_value(self):
# Tests extremum fill values for flexible type.
a = array([(1, (2, 3)), (4, (5, 6))],
dtype=[('A', int), ('B', [('BA', int), ('BB', int)])])
test = a.fill_value
assert_equal(test.dtype, a.dtype)
assert_equal(test['A'], default_fill_value(a['A']))
assert_equal(test['B']['BA'], default_fill_value(a['B']['BA']))
assert_equal(test['B']['BB'], default_fill_value(a['B']['BB']))
test = minimum_fill_value(a)
assert_equal(test.dtype, a.dtype)
assert_equal(test[0], minimum_fill_value(a['A']))
assert_equal(test[1][0], minimum_fill_value(a['B']['BA']))
assert_equal(test[1][1], minimum_fill_value(a['B']['BB']))
assert_equal(test[1], minimum_fill_value(a['B']))
test = maximum_fill_value(a)
assert_equal(test.dtype, a.dtype)
assert_equal(test[0], maximum_fill_value(a['A']))
assert_equal(test[1][0], maximum_fill_value(a['B']['BA']))
assert_equal(test[1][1], maximum_fill_value(a['B']['BB']))
assert_equal(test[1], maximum_fill_value(a['B']))
def test_extremum_fill_value_subdtype(self):
a = array(([2, 3, 4],), dtype=[('value', np.int8, 3)])
test = minimum_fill_value(a)
assert_equal(test.dtype, a.dtype)
assert_equal(test[0], np.full(3, minimum_fill_value(a['value'])))
test = maximum_fill_value(a)
assert_equal(test.dtype, a.dtype)
assert_equal(test[0], np.full(3, maximum_fill_value(a['value'])))
def test_fillvalue_individual_fields(self):
# Test setting fill_value on individual fields
ndtype = [('a', int), ('b', int)]
# Explicit fill_value
a = array(list(zip([1, 2, 3], [4, 5, 6])),
fill_value=(-999, -999), dtype=ndtype)
aa = a['a']
aa.set_fill_value(10)
assert_equal(aa._fill_value, np.array(10))
assert_equal(tuple(a.fill_value), (10, -999))
a.fill_value['b'] = -10
assert_equal(tuple(a.fill_value), (10, -10))
# Implicit fill_value
t = array(list(zip([1, 2, 3], [4, 5, 6])), dtype=ndtype)
tt = t['a']
tt.set_fill_value(10)
assert_equal(tt._fill_value, np.array(10))
assert_equal(tuple(t.fill_value), (10, default_fill_value(0)))
def test_fillvalue_implicit_structured_array(self):
# Check that fill_value is always defined for structured arrays
ndtype = ('b', float)
adtype = ('a', float)
a = array([(1.,), (2.,)], mask=[(False,), (False,)],
fill_value=(np.nan,), dtype=np.dtype([adtype]))
b = empty(a.shape, dtype=[adtype, ndtype])
b['a'] = a['a']
b['a'].set_fill_value(a['a'].fill_value)
f = b._fill_value[()]
assert_(np.isnan(f[0]))
assert_equal(f[-1], default_fill_value(1.))
def test_fillvalue_as_arguments(self):
# Test adding a fill_value parameter to empty/ones/zeros
a = empty(3, fill_value=999.)
assert_equal(a.fill_value, 999.)
a = ones(3, fill_value=999., dtype=float)
assert_equal(a.fill_value, 999.)
a = zeros(3, fill_value=0., dtype=complex)
assert_equal(a.fill_value, 0.)
a = identity(3, fill_value=0., dtype=complex)
assert_equal(a.fill_value, 0.)
def test_shape_argument(self):
# Test that shape can be provides as an argument
# GH issue 6106
a = empty(shape=(3, ))
assert_equal(a.shape, (3, ))
a = ones(shape=(3, ), dtype=float)
assert_equal(a.shape, (3, ))
a = zeros(shape=(3, ), dtype=complex)
assert_equal(a.shape, (3, ))
def test_fillvalue_in_view(self):
# Test the behavior of fill_value in view
# Create initial masked array
x = array([1, 2, 3], fill_value=1, dtype=np.int64)
# Check that fill_value is preserved by default
y = x.view()
assert_(y.fill_value == 1)
# Check that fill_value is preserved if dtype is specified and the
# dtype is an ndarray sub-class and has a _fill_value attribute
y = x.view(MaskedArray)
assert_(y.fill_value == 1)
# Check that fill_value is preserved if type is specified and the
# dtype is an ndarray sub-class and has a _fill_value attribute (by
# default, the first argument is dtype, not type)
y = x.view(type=MaskedArray)
assert_(y.fill_value == 1)
# Check that code does not crash if passed an ndarray sub-class that
# does not have a _fill_value attribute
y = x.view(np.ndarray)
y = x.view(type=np.ndarray)
# Check that fill_value can be overridden with view
y = x.view(MaskedArray, fill_value=2)
assert_(y.fill_value == 2)
# Check that fill_value can be overridden with view (using type=)
y = x.view(type=MaskedArray, fill_value=2)
assert_(y.fill_value == 2)
# Check that fill_value gets reset if passed a dtype but not a
# fill_value. This is because even though in some cases one can safely
# cast the fill_value, e.g. if taking an int64 view of an int32 array,
# in other cases, this cannot be done (e.g. int32 view of an int64
# array with a large fill_value).
y = x.view(dtype=np.int32)
assert_(y.fill_value == 999999)
def test_fillvalue_bytes_or_str(self):
# Test whether fill values work as expected for structured dtypes
# containing bytes or str. See issue #7259.
a = empty(shape=(3, ), dtype="(2)3S,(2)3U")
assert_equal(a["f0"].fill_value, default_fill_value(b"spam"))
assert_equal(a["f1"].fill_value, default_fill_value("eggs"))
class TestUfuncs:
# Test class for the application of ufuncs on MaskedArrays.
def setup_method(self):
# Base data definition.
self.d = (array([1.0, 0, -1, pi / 2] * 2, mask=[0, 1] + [0] * 6),
array([1.0, 0, -1, pi / 2] * 2, mask=[1, 0] + [0] * 6),)
self.err_status = np.geterr()
np.seterr(divide='ignore', invalid='ignore')
def teardown_method(self):
np.seterr(**self.err_status)
def test_testUfuncRegression(self):
# Tests new ufuncs on MaskedArrays.
for f in ['sqrt', 'log', 'log10', 'exp', 'conjugate',
'sin', 'cos', 'tan',
'arcsin', 'arccos', 'arctan',
'sinh', 'cosh', 'tanh',
'arcsinh',
'arccosh',
'arctanh',
'absolute', 'fabs', 'negative',
'floor', 'ceil',
'logical_not',
'add', 'subtract', 'multiply',
'divide', 'true_divide', 'floor_divide',
'remainder', 'fmod', 'hypot', 'arctan2',
'equal', 'not_equal', 'less_equal', 'greater_equal',
'less', 'greater',
'logical_and', 'logical_or', 'logical_xor',
]:
try:
uf = getattr(umath, f)
except AttributeError:
uf = getattr(fromnumeric, f)
mf = getattr(numpy.ma.core, f)
args = self.d[:uf.nin]
ur = uf(*args)
mr = mf(*args)
assert_equal(ur.filled(0), mr.filled(0), f)
assert_mask_equal(ur.mask, mr.mask, err_msg=f)
def test_reduce(self):
# Tests reduce on MaskedArrays.
a = self.d[0]
assert_(not alltrue(a, axis=0))
assert_(sometrue(a, axis=0))
assert_equal(sum(a[:3], axis=0), 0)
assert_equal(product(a, axis=0), 0)
assert_equal(add.reduce(a), pi)
def test_minmax(self):
# Tests extrema on MaskedArrays.
a = arange(1, 13).reshape(3, 4)
amask = masked_where(a < 5, a)
assert_equal(amask.max(), a.max())
assert_equal(amask.min(), 5)
assert_equal(amask.max(0), a.max(0))
assert_equal(amask.min(0), [5, 6, 7, 8])
assert_(amask.max(1)[0].mask)
assert_(amask.min(1)[0].mask)
def test_ndarray_mask(self):
# Check that the mask of the result is a ndarray (not a MaskedArray...)
a = masked_array([-1, 0, 1, 2, 3], mask=[0, 0, 0, 0, 1])
test = np.sqrt(a)
control = masked_array([-1, 0, 1, np.sqrt(2), -1],
mask=[1, 0, 0, 0, 1])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
assert_(not isinstance(test.mask, MaskedArray))
def test_treatment_of_NotImplemented(self):
# Check that NotImplemented is returned at appropriate places
a = masked_array([1., 2.], mask=[1, 0])
assert_raises(TypeError, operator.mul, a, "abc")
assert_raises(TypeError, operator.truediv, a, "abc")
class MyClass:
__array_priority__ = a.__array_priority__ + 1
def __mul__(self, other):
return "My mul"
def __rmul__(self, other):
return "My rmul"
me = MyClass()
assert_(me * a == "My mul")
assert_(a * me == "My rmul")
# and that __array_priority__ is respected
class MyClass2:
__array_priority__ = 100
def __mul__(self, other):
return "Me2mul"
def __rmul__(self, other):
return "Me2rmul"
def __rdiv__(self, other):
return "Me2rdiv"
__rtruediv__ = __rdiv__
me_too = MyClass2()
assert_(a.__mul__(me_too) is NotImplemented)
assert_(all(multiply.outer(a, me_too) == "Me2rmul"))
assert_(a.__truediv__(me_too) is NotImplemented)
assert_(me_too * a == "Me2mul")
assert_(a * me_too == "Me2rmul")
assert_(a / me_too == "Me2rdiv")
def test_no_masked_nan_warnings(self):
# check that a nan in masked position does not
# cause ufunc warnings
m = np.ma.array([0.5, np.nan], mask=[0,1])
with warnings.catch_warnings():
warnings.filterwarnings("error")
# test unary and binary ufuncs
exp(m)
add(m, 1)
m > 0
# test different unary domains
sqrt(m)
log(m)
tan(m)
arcsin(m)
arccos(m)
arccosh(m)
# test binary domains
divide(m, 2)
# also check that allclose uses ma ufuncs, to avoid warning
allclose(m, 0.5)
class TestMaskedArrayInPlaceArithmetic:
# Test MaskedArray Arithmetic
def setup_method(self):
x = arange(10)
y = arange(10)
xm = arange(10)
xm[2] = masked
self.intdata = (x, y, xm)
self.floatdata = (x.astype(float), y.astype(float), xm.astype(float))
self.othertypes = np.typecodes['AllInteger'] + np.typecodes['AllFloat']
self.othertypes = [np.dtype(_).type for _ in self.othertypes]
self.uint8data = (
x.astype(np.uint8),
y.astype(np.uint8),
xm.astype(np.uint8)
)
def test_inplace_addition_scalar(self):
# Test of inplace additions
(x, y, xm) = self.intdata
xm[2] = masked
x += 1
assert_equal(x, y + 1)
xm += 1
assert_equal(xm, y + 1)
(x, _, xm) = self.floatdata
id1 = x.data.ctypes.data
x += 1.
assert_(id1 == x.data.ctypes.data)
assert_equal(x, y + 1.)
def test_inplace_addition_array(self):
# Test of inplace additions
(x, y, xm) = self.intdata
m = xm.mask
a = arange(10, dtype=np.int16)
a[-1] = masked
x += a
xm += a
assert_equal(x, y + a)
assert_equal(xm, y + a)
assert_equal(xm.mask, mask_or(m, a.mask))
def test_inplace_subtraction_scalar(self):
# Test of inplace subtractions
(x, y, xm) = self.intdata
x -= 1
assert_equal(x, y - 1)
xm -= 1
assert_equal(xm, y - 1)
def test_inplace_subtraction_array(self):
# Test of inplace subtractions
(x, y, xm) = self.floatdata
m = xm.mask
a = arange(10, dtype=float)
a[-1] = masked
x -= a
xm -= a
assert_equal(x, y - a)
assert_equal(xm, y - a)
assert_equal(xm.mask, mask_or(m, a.mask))
def test_inplace_multiplication_scalar(self):
# Test of inplace multiplication
(x, y, xm) = self.floatdata
x *= 2.0
assert_equal(x, y * 2)
xm *= 2.0
assert_equal(xm, y * 2)
def test_inplace_multiplication_array(self):
# Test of inplace multiplication
(x, y, xm) = self.floatdata
m = xm.mask
a = arange(10, dtype=float)
a[-1] = masked
x *= a
xm *= a
assert_equal(x, y * a)
assert_equal(xm, y * a)
assert_equal(xm.mask, mask_or(m, a.mask))
def test_inplace_division_scalar_int(self):
# Test of inplace division
(x, y, xm) = self.intdata
x = arange(10) * 2
xm = arange(10) * 2
xm[2] = masked
x //= 2
assert_equal(x, y)
xm //= 2
assert_equal(xm, y)
def test_inplace_division_scalar_float(self):
# Test of inplace division
(x, y, xm) = self.floatdata
x /= 2.0
assert_equal(x, y / 2.0)
xm /= arange(10)
assert_equal(xm, ones((10,)))
def test_inplace_division_array_float(self):
# Test of inplace division
(x, y, xm) = self.floatdata
m = xm.mask
a = arange(10, dtype=float)
a[-1] = masked
x /= a
xm /= a
assert_equal(x, y / a)
assert_equal(xm, y / a)
assert_equal(xm.mask, mask_or(mask_or(m, a.mask), (a == 0)))
def test_inplace_division_misc(self):
x = [1., 1., 1., -2., pi / 2., 4., 5., -10., 10., 1., 2., 3.]
y = [5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]
m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1]
xm = masked_array(x, mask=m1)
ym = masked_array(y, mask=m2)
z = xm / ym
assert_equal(z._mask, [1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1])
assert_equal(z._data,
[1., 1., 1., -1., -pi / 2., 4., 5., 1., 1., 1., 2., 3.])
xm = xm.copy()
xm /= ym
assert_equal(xm._mask, [1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1])
assert_equal(z._data,
[1., 1., 1., -1., -pi / 2., 4., 5., 1., 1., 1., 2., 3.])
def test_datafriendly_add(self):
# Test keeping data w/ (inplace) addition
x = array([1, 2, 3], mask=[0, 0, 1])
# Test add w/ scalar
xx = x + 1
assert_equal(xx.data, [2, 3, 3])
assert_equal(xx.mask, [0, 0, 1])
# Test iadd w/ scalar
x += 1
assert_equal(x.data, [2, 3, 3])
assert_equal(x.mask, [0, 0, 1])
# Test add w/ array
x = array([1, 2, 3], mask=[0, 0, 1])
xx = x + array([1, 2, 3], mask=[1, 0, 0])
assert_equal(xx.data, [1, 4, 3])
assert_equal(xx.mask, [1, 0, 1])
# Test iadd w/ array
x = array([1, 2, 3], mask=[0, 0, 1])
x += array([1, 2, 3], mask=[1, 0, 0])
assert_equal(x.data, [1, 4, 3])
assert_equal(x.mask, [1, 0, 1])
def test_datafriendly_sub(self):
# Test keeping data w/ (inplace) subtraction
# Test sub w/ scalar
x = array([1, 2, 3], mask=[0, 0, 1])
xx = x - 1
assert_equal(xx.data, [0, 1, 3])
assert_equal(xx.mask, [0, 0, 1])
# Test isub w/ scalar
x = array([1, 2, 3], mask=[0, 0, 1])
x -= 1
assert_equal(x.data, [0, 1, 3])
assert_equal(x.mask, [0, 0, 1])
# Test sub w/ array
x = array([1, 2, 3], mask=[0, 0, 1])
xx = x - array([1, 2, 3], mask=[1, 0, 0])
assert_equal(xx.data, [1, 0, 3])
assert_equal(xx.mask, [1, 0, 1])
# Test isub w/ array
x = array([1, 2, 3], mask=[0, 0, 1])
x -= array([1, 2, 3], mask=[1, 0, 0])
assert_equal(x.data, [1, 0, 3])
assert_equal(x.mask, [1, 0, 1])
def test_datafriendly_mul(self):
# Test keeping data w/ (inplace) multiplication
# Test mul w/ scalar
x = array([1, 2, 3], mask=[0, 0, 1])
xx = x * 2
assert_equal(xx.data, [2, 4, 3])
assert_equal(xx.mask, [0, 0, 1])
# Test imul w/ scalar
x = array([1, 2, 3], mask=[0, 0, 1])
x *= 2
assert_equal(x.data, [2, 4, 3])
assert_equal(x.mask, [0, 0, 1])
# Test mul w/ array
x = array([1, 2, 3], mask=[0, 0, 1])
xx = x * array([10, 20, 30], mask=[1, 0, 0])
assert_equal(xx.data, [1, 40, 3])
assert_equal(xx.mask, [1, 0, 1])
# Test imul w/ array
x = array([1, 2, 3], mask=[0, 0, 1])
x *= array([10, 20, 30], mask=[1, 0, 0])
assert_equal(x.data, [1, 40, 3])
assert_equal(x.mask, [1, 0, 1])
def test_datafriendly_div(self):
# Test keeping data w/ (inplace) division
# Test div on scalar
x = array([1, 2, 3], mask=[0, 0, 1])
xx = x / 2.
assert_equal(xx.data, [1 / 2., 2 / 2., 3])
assert_equal(xx.mask, [0, 0, 1])
# Test idiv on scalar
x = array([1., 2., 3.], mask=[0, 0, 1])
x /= 2.
assert_equal(x.data, [1 / 2., 2 / 2., 3])
assert_equal(x.mask, [0, 0, 1])
# Test div on array
x = array([1., 2., 3.], mask=[0, 0, 1])
xx = x / array([10., 20., 30.], mask=[1, 0, 0])
assert_equal(xx.data, [1., 2. / 20., 3.])
assert_equal(xx.mask, [1, 0, 1])
# Test idiv on array
x = array([1., 2., 3.], mask=[0, 0, 1])
x /= array([10., 20., 30.], mask=[1, 0, 0])
assert_equal(x.data, [1., 2 / 20., 3.])
assert_equal(x.mask, [1, 0, 1])
def test_datafriendly_pow(self):
# Test keeping data w/ (inplace) power
# Test pow on scalar
x = array([1., 2., 3.], mask=[0, 0, 1])
xx = x ** 2.5
assert_equal(xx.data, [1., 2. ** 2.5, 3.])
assert_equal(xx.mask, [0, 0, 1])
# Test ipow on scalar
x **= 2.5
assert_equal(x.data, [1., 2. ** 2.5, 3])
assert_equal(x.mask, [0, 0, 1])
def test_datafriendly_add_arrays(self):
a = array([[1, 1], [3, 3]])
b = array([1, 1], mask=[0, 0])
a += b
assert_equal(a, [[2, 2], [4, 4]])
if a.mask is not nomask:
assert_equal(a.mask, [[0, 0], [0, 0]])
a = array([[1, 1], [3, 3]])
b = array([1, 1], mask=[0, 1])
a += b
assert_equal(a, [[2, 2], [4, 4]])
assert_equal(a.mask, [[0, 1], [0, 1]])
def test_datafriendly_sub_arrays(self):
a = array([[1, 1], [3, 3]])
b = array([1, 1], mask=[0, 0])
a -= b
assert_equal(a, [[0, 0], [2, 2]])
if a.mask is not nomask:
assert_equal(a.mask, [[0, 0], [0, 0]])
a = array([[1, 1], [3, 3]])
b = array([1, 1], mask=[0, 1])
a -= b
assert_equal(a, [[0, 0], [2, 2]])
assert_equal(a.mask, [[0, 1], [0, 1]])
def test_datafriendly_mul_arrays(self):
a = array([[1, 1], [3, 3]])
b = array([1, 1], mask=[0, 0])
a *= b
assert_equal(a, [[1, 1], [3, 3]])
if a.mask is not nomask:
assert_equal(a.mask, [[0, 0], [0, 0]])
a = array([[1, 1], [3, 3]])
b = array([1, 1], mask=[0, 1])
a *= b
assert_equal(a, [[1, 1], [3, 3]])
assert_equal(a.mask, [[0, 1], [0, 1]])
def test_inplace_addition_scalar_type(self):
# Test of inplace additions
for t in self.othertypes:
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings("always")
(x, y, xm) = (_.astype(t) for _ in self.uint8data)
xm[2] = masked
x += t(1)
assert_equal(x, y + t(1))
xm += t(1)
assert_equal(xm, y + t(1))
assert_equal(len(w), 0, f'Failed on type={t}.')
def test_inplace_addition_array_type(self):
# Test of inplace additions
for t in self.othertypes:
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings("always")
(x, y, xm) = (_.astype(t) for _ in self.uint8data)
m = xm.mask
a = arange(10, dtype=t)
a[-1] = masked
x += a
xm += a
assert_equal(x, y + a)
assert_equal(xm, y + a)
assert_equal(xm.mask, mask_or(m, a.mask))
assert_equal(len(w), 0, f'Failed on type={t}.')
def test_inplace_subtraction_scalar_type(self):
# Test of inplace subtractions
for t in self.othertypes:
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings("always")
(x, y, xm) = (_.astype(t) for _ in self.uint8data)
x -= t(1)
assert_equal(x, y - t(1))
xm -= t(1)
assert_equal(xm, y - t(1))
assert_equal(len(w), 0, f'Failed on type={t}.')
def test_inplace_subtraction_array_type(self):
# Test of inplace subtractions
for t in self.othertypes:
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings("always")
(x, y, xm) = (_.astype(t) for _ in self.uint8data)
m = xm.mask
a = arange(10, dtype=t)
a[-1] = masked
x -= a
xm -= a
assert_equal(x, y - a)
assert_equal(xm, y - a)
assert_equal(xm.mask, mask_or(m, a.mask))
assert_equal(len(w), 0, f'Failed on type={t}.')
def test_inplace_multiplication_scalar_type(self):
# Test of inplace multiplication
for t in self.othertypes:
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings("always")
(x, y, xm) = (_.astype(t) for _ in self.uint8data)
x *= t(2)
assert_equal(x, y * t(2))
xm *= t(2)
assert_equal(xm, y * t(2))
assert_equal(len(w), 0, f'Failed on type={t}.')
def test_inplace_multiplication_array_type(self):
# Test of inplace multiplication
for t in self.othertypes:
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings("always")
(x, y, xm) = (_.astype(t) for _ in self.uint8data)
m = xm.mask
a = arange(10, dtype=t)
a[-1] = masked
x *= a
xm *= a
assert_equal(x, y * a)
assert_equal(xm, y * a)
assert_equal(xm.mask, mask_or(m, a.mask))
assert_equal(len(w), 0, f'Failed on type={t}.')
def test_inplace_floor_division_scalar_type(self):
# Test of inplace division
# Check for TypeError in case of unsupported types
unsupported = {np.dtype(t).type for t in np.typecodes["Complex"]}
for t in self.othertypes:
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings("always")
(x, y, xm) = (_.astype(t) for _ in self.uint8data)
x = arange(10, dtype=t) * t(2)
xm = arange(10, dtype=t) * t(2)
xm[2] = masked
try:
x //= t(2)
xm //= t(2)
assert_equal(x, y)
assert_equal(xm, y)
assert_equal(len(w), 0, "Failed on type=%s." % t)
except TypeError:
msg = f"Supported type {t} throwing TypeError"
assert t in unsupported, msg
def test_inplace_floor_division_array_type(self):
# Test of inplace division
# Check for TypeError in case of unsupported types
unsupported = {np.dtype(t).type for t in np.typecodes["Complex"]}
for t in self.othertypes:
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings("always")
(x, y, xm) = (_.astype(t) for _ in self.uint8data)
m = xm.mask
a = arange(10, dtype=t)
a[-1] = masked
try:
x //= a
xm //= a
assert_equal(x, y // a)
assert_equal(xm, y // a)
assert_equal(
xm.mask,
mask_or(mask_or(m, a.mask), (a == t(0)))
)
assert_equal(len(w), 0, f'Failed on type={t}.')
except TypeError:
msg = f"Supported type {t} throwing TypeError"
assert t in unsupported, msg
def test_inplace_division_scalar_type(self):
# Test of inplace division
for t in self.othertypes:
with suppress_warnings() as sup:
sup.record(UserWarning)
(x, y, xm) = (_.astype(t) for _ in self.uint8data)
x = arange(10, dtype=t) * t(2)
xm = arange(10, dtype=t) * t(2)
xm[2] = masked
# May get a DeprecationWarning or a TypeError.
#
# This is a consequence of the fact that this is true divide
# and will require casting to float for calculation and
# casting back to the original type. This will only be raised
# with integers. Whether it is an error or warning is only
# dependent on how stringent the casting rules are.
#
# Will handle the same way.
try:
x /= t(2)
assert_equal(x, y)
except (DeprecationWarning, TypeError) as e:
warnings.warn(str(e), stacklevel=1)
try:
xm /= t(2)
assert_equal(xm, y)
except (DeprecationWarning, TypeError) as e:
warnings.warn(str(e), stacklevel=1)
if issubclass(t, np.integer):
assert_equal(len(sup.log), 2, f'Failed on type={t}.')
else:
assert_equal(len(sup.log), 0, f'Failed on type={t}.')
def test_inplace_division_array_type(self):
# Test of inplace division
for t in self.othertypes:
with suppress_warnings() as sup:
sup.record(UserWarning)
(x, y, xm) = (_.astype(t) for _ in self.uint8data)
m = xm.mask
a = arange(10, dtype=t)
a[-1] = masked
# May get a DeprecationWarning or a TypeError.
#
# This is a consequence of the fact that this is true divide
# and will require casting to float for calculation and
# casting back to the original type. This will only be raised
# with integers. Whether it is an error or warning is only
# dependent on how stringent the casting rules are.
#
# Will handle the same way.
try:
x /= a
assert_equal(x, y / a)
except (DeprecationWarning, TypeError) as e:
warnings.warn(str(e), stacklevel=1)
try:
xm /= a
assert_equal(xm, y / a)
assert_equal(
xm.mask,
mask_or(mask_or(m, a.mask), (a == t(0)))
)
except (DeprecationWarning, TypeError) as e:
warnings.warn(str(e), stacklevel=1)
if issubclass(t, np.integer):
assert_equal(len(sup.log), 2, f'Failed on type={t}.')
else:
assert_equal(len(sup.log), 0, f'Failed on type={t}.')
def test_inplace_pow_type(self):
# Test keeping data w/ (inplace) power
for t in self.othertypes:
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings("always")
# Test pow on scalar
x = array([1, 2, 3], mask=[0, 0, 1], dtype=t)
xx = x ** t(2)
xx_r = array([1, 2 ** 2, 3], mask=[0, 0, 1], dtype=t)
assert_equal(xx.data, xx_r.data)
assert_equal(xx.mask, xx_r.mask)
# Test ipow on scalar
x **= t(2)
assert_equal(x.data, xx_r.data)
assert_equal(x.mask, xx_r.mask)
assert_equal(len(w), 0, f'Failed on type={t}.')
class TestMaskedArrayMethods:
# Test class for miscellaneous MaskedArrays methods.
def setup_method(self):
# Base data definition.
x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928,
8.43, 7.78, 9.865, 5.878, 8.979, 4.732,
3.012, 6.022, 5.095, 3.116, 5.238, 3.957,
6.04, 9.63, 7.712, 3.382, 4.489, 6.479,
7.189, 9.645, 5.395, 4.961, 9.894, 2.893,
7.357, 9.828, 6.272, 3.758, 6.693, 0.993])
X = x.reshape(6, 6)
XX = x.reshape(3, 2, 2, 3)
m = np.array([0, 1, 0, 1, 0, 0,
1, 0, 1, 1, 0, 1,
0, 0, 0, 1, 0, 1,
0, 0, 0, 1, 1, 1,
1, 0, 0, 1, 0, 0,
0, 0, 1, 0, 1, 0])
mx = array(data=x, mask=m)
mX = array(data=X, mask=m.reshape(X.shape))
mXX = array(data=XX, mask=m.reshape(XX.shape))
m2 = np.array([1, 1, 0, 1, 0, 0,
1, 1, 1, 1, 0, 1,
0, 0, 1, 1, 0, 1,
0, 0, 0, 1, 1, 1,
1, 0, 0, 1, 1, 0,
0, 0, 1, 0, 1, 1])
m2x = array(data=x, mask=m2)
m2X = array(data=X, mask=m2.reshape(X.shape))
m2XX = array(data=XX, mask=m2.reshape(XX.shape))
self.d = (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX)
def test_generic_methods(self):
# Tests some MaskedArray methods.
a = array([1, 3, 2])
assert_equal(a.any(), a._data.any())
assert_equal(a.all(), a._data.all())
assert_equal(a.argmax(), a._data.argmax())
assert_equal(a.argmin(), a._data.argmin())
assert_equal(a.choose(0, 1, 2, 3, 4), a._data.choose(0, 1, 2, 3, 4))
assert_equal(a.compress([1, 0, 1]), a._data.compress([1, 0, 1]))
assert_equal(a.conj(), a._data.conj())
assert_equal(a.conjugate(), a._data.conjugate())
m = array([[1, 2], [3, 4]])
assert_equal(m.diagonal(), m._data.diagonal())
assert_equal(a.sum(), a._data.sum())
assert_equal(a.take([1, 2]), a._data.take([1, 2]))
assert_equal(m.transpose(), m._data.transpose())
def test_allclose(self):
# Tests allclose on arrays
a = np.random.rand(10)
b = a + np.random.rand(10) * 1e-8
assert_(allclose(a, b))
# Test allclose w/ infs
a[0] = np.inf
assert_(not allclose(a, b))
b[0] = np.inf
assert_(allclose(a, b))
# Test allclose w/ masked
a = masked_array(a)
a[-1] = masked
assert_(allclose(a, b, masked_equal=True))
assert_(not allclose(a, b, masked_equal=False))
# Test comparison w/ scalar
a *= 1e-8
a[0] = 0
assert_(allclose(a, 0, masked_equal=True))
# Test that the function works for MIN_INT integer typed arrays
a = masked_array([np.iinfo(np.int_).min], dtype=np.int_)
assert_(allclose(a, a))
def test_allclose_timedelta(self):
# Allclose currently works for timedelta64 as long as `atol` is
# an integer or also a timedelta64
a = np.array([[1, 2, 3, 4]], dtype="m8[ns]")
assert allclose(a, a, atol=0)
assert allclose(a, a, atol=np.timedelta64(1, "ns"))
def test_allany(self):
# Checks the any/all methods/functions.
x = np.array([[0.13, 0.26, 0.90],
[0.28, 0.33, 0.63],
[0.31, 0.87, 0.70]])
m = np.array([[True, False, False],
[False, False, False],
[True, True, False]], dtype=np.bool_)
mx = masked_array(x, mask=m)
mxbig = (mx > 0.5)
mxsmall = (mx < 0.5)
assert_(not mxbig.all())
assert_(mxbig.any())
assert_equal(mxbig.all(0), [False, False, True])
assert_equal(mxbig.all(1), [False, False, True])
assert_equal(mxbig.any(0), [False, False, True])
assert_equal(mxbig.any(1), [True, True, True])
assert_(not mxsmall.all())
assert_(mxsmall.any())
assert_equal(mxsmall.all(0), [True, True, False])
assert_equal(mxsmall.all(1), [False, False, False])
assert_equal(mxsmall.any(0), [True, True, False])
assert_equal(mxsmall.any(1), [True, True, False])
def test_allany_oddities(self):
# Some fun with all and any
store = empty((), dtype=bool)
full = array([1, 2, 3], mask=True)
assert_(full.all() is masked)
full.all(out=store)
assert_(store)
assert_(store._mask, True)
assert_(store is not masked)
store = empty((), dtype=bool)
assert_(full.any() is masked)
full.any(out=store)
assert_(not store)
assert_(store._mask, True)
assert_(store is not masked)
def test_argmax_argmin(self):
# Tests argmin & argmax on MaskedArrays.
(x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d
assert_equal(mx.argmin(), 35)
assert_equal(mX.argmin(), 35)
assert_equal(m2x.argmin(), 4)
assert_equal(m2X.argmin(), 4)
assert_equal(mx.argmax(), 28)
assert_equal(mX.argmax(), 28)
assert_equal(m2x.argmax(), 31)
assert_equal(m2X.argmax(), 31)
assert_equal(mX.argmin(0), [2, 2, 2, 5, 0, 5])
assert_equal(m2X.argmin(0), [2, 2, 4, 5, 0, 4])
assert_equal(mX.argmax(0), [0, 5, 0, 5, 4, 0])
assert_equal(m2X.argmax(0), [5, 5, 0, 5, 1, 0])
assert_equal(mX.argmin(1), [4, 1, 0, 0, 5, 5, ])
assert_equal(m2X.argmin(1), [4, 4, 0, 0, 5, 3])
assert_equal(mX.argmax(1), [2, 4, 1, 1, 4, 1])
assert_equal(m2X.argmax(1), [2, 4, 1, 1, 1, 1])
def test_clip(self):
# Tests clip on MaskedArrays.
x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928,
8.43, 7.78, 9.865, 5.878, 8.979, 4.732,
3.012, 6.022, 5.095, 3.116, 5.238, 3.957,
6.04, 9.63, 7.712, 3.382, 4.489, 6.479,
7.189, 9.645, 5.395, 4.961, 9.894, 2.893,
7.357, 9.828, 6.272, 3.758, 6.693, 0.993])
m = np.array([0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1,
0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1,
1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0])
mx = array(x, mask=m)
clipped = mx.clip(2, 8)
assert_equal(clipped.mask, mx.mask)
assert_equal(clipped._data, x.clip(2, 8))
assert_equal(clipped._data, mx._data.clip(2, 8))
def test_clip_out(self):
# gh-14140
a = np.arange(10)
m = np.ma.MaskedArray(a, mask=[0, 1] * 5)
m.clip(0, 5, out=m)
assert_equal(m.mask, [0, 1] * 5)
def test_compress(self):
# test compress
a = masked_array([1., 2., 3., 4., 5.], fill_value=9999)
condition = (a > 1.5) & (a < 3.5)
assert_equal(a.compress(condition), [2., 3.])
a[[2, 3]] = masked
b = a.compress(condition)
assert_equal(b._data, [2., 3.])
assert_equal(b._mask, [0, 1])
assert_equal(b.fill_value, 9999)
assert_equal(b, a[condition])
condition = (a < 4.)
b = a.compress(condition)
assert_equal(b._data, [1., 2., 3.])
assert_equal(b._mask, [0, 0, 1])
assert_equal(b.fill_value, 9999)
assert_equal(b, a[condition])
a = masked_array([[10, 20, 30], [40, 50, 60]],
mask=[[0, 0, 1], [1, 0, 0]])
b = a.compress(a.ravel() >= 22)
assert_equal(b._data, [30, 40, 50, 60])
assert_equal(b._mask, [1, 1, 0, 0])
x = np.array([3, 1, 2])
b = a.compress(x >= 2, axis=1)
assert_equal(b._data, [[10, 30], [40, 60]])
assert_equal(b._mask, [[0, 1], [1, 0]])
def test_compressed(self):
# Tests compressed
a = array([1, 2, 3, 4], mask=[0, 0, 0, 0])
b = a.compressed()
assert_equal(b, a)
a[0] = masked
b = a.compressed()
assert_equal(b, [2, 3, 4])
def test_empty(self):
# Tests empty/like
datatype = [('a', int), ('b', float), ('c', '|S8')]
a = masked_array([(1, 1.1, '1.1'), (2, 2.2, '2.2'), (3, 3.3, '3.3')],
dtype=datatype)
assert_equal(len(a.fill_value.item()), len(datatype))
b = empty_like(a)
assert_equal(b.shape, a.shape)
assert_equal(b.fill_value, a.fill_value)
b = empty(len(a), dtype=datatype)
assert_equal(b.shape, a.shape)
assert_equal(b.fill_value, a.fill_value)
# check empty_like mask handling
a = masked_array([1, 2, 3], mask=[False, True, False])
b = empty_like(a)
assert_(not np.may_share_memory(a.mask, b.mask))
b = a.view(masked_array)
assert_(np.may_share_memory(a.mask, b.mask))
def test_zeros(self):
# Tests zeros/like
datatype = [('a', int), ('b', float), ('c', '|S8')]
a = masked_array([(1, 1.1, '1.1'), (2, 2.2, '2.2'), (3, 3.3, '3.3')],
dtype=datatype)
assert_equal(len(a.fill_value.item()), len(datatype))
b = zeros(len(a), dtype=datatype)
assert_equal(b.shape, a.shape)
assert_equal(b.fill_value, a.fill_value)
b = zeros_like(a)
assert_equal(b.shape, a.shape)
assert_equal(b.fill_value, a.fill_value)
# check zeros_like mask handling
a = masked_array([1, 2, 3], mask=[False, True, False])
b = zeros_like(a)
assert_(not np.may_share_memory(a.mask, b.mask))
b = a.view()
assert_(np.may_share_memory(a.mask, b.mask))
def test_ones(self):
# Tests ones/like
datatype = [('a', int), ('b', float), ('c', '|S8')]
a = masked_array([(1, 1.1, '1.1'), (2, 2.2, '2.2'), (3, 3.3, '3.3')],
dtype=datatype)
assert_equal(len(a.fill_value.item()), len(datatype))
b = ones(len(a), dtype=datatype)
assert_equal(b.shape, a.shape)
assert_equal(b.fill_value, a.fill_value)
b = ones_like(a)
assert_equal(b.shape, a.shape)
assert_equal(b.fill_value, a.fill_value)
# check ones_like mask handling
a = masked_array([1, 2, 3], mask=[False, True, False])
b = ones_like(a)
assert_(not np.may_share_memory(a.mask, b.mask))
b = a.view()
assert_(np.may_share_memory(a.mask, b.mask))
@suppress_copy_mask_on_assignment
def test_put(self):
# Tests put.
d = arange(5)
n = [0, 0, 0, 1, 1]
m = make_mask(n)
x = array(d, mask=m)
assert_(x[3] is masked)
assert_(x[4] is masked)
x[[1, 4]] = [10, 40]
assert_(x[3] is masked)
assert_(x[4] is not masked)
assert_equal(x, [0, 10, 2, -1, 40])
x = masked_array(arange(10), mask=[1, 0, 0, 0, 0] * 2)
i = [0, 2, 4, 6]
x.put(i, [6, 4, 2, 0])
assert_equal(x, asarray([6, 1, 4, 3, 2, 5, 0, 7, 8, 9, ]))
assert_equal(x.mask, [0, 0, 0, 0, 0, 1, 0, 0, 0, 0])
x.put(i, masked_array([0, 2, 4, 6], [1, 0, 1, 0]))
assert_array_equal(x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, ])
assert_equal(x.mask, [1, 0, 0, 0, 1, 1, 0, 0, 0, 0])
x = masked_array(arange(10), mask=[1, 0, 0, 0, 0] * 2)
put(x, i, [6, 4, 2, 0])
assert_equal(x, asarray([6, 1, 4, 3, 2, 5, 0, 7, 8, 9, ]))
assert_equal(x.mask, [0, 0, 0, 0, 0, 1, 0, 0, 0, 0])
put(x, i, masked_array([0, 2, 4, 6], [1, 0, 1, 0]))
assert_array_equal(x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, ])
assert_equal(x.mask, [1, 0, 0, 0, 1, 1, 0, 0, 0, 0])
def test_put_nomask(self):
# GitHub issue 6425
x = zeros(10)
z = array([3., -1.], mask=[False, True])
x.put([1, 2], z)
assert_(x[0] is not masked)
assert_equal(x[0], 0)
assert_(x[1] is not masked)
assert_equal(x[1], 3)
assert_(x[2] is masked)
assert_(x[3] is not masked)
assert_equal(x[3], 0)
def test_put_hardmask(self):
# Tests put on hardmask
d = arange(5)
n = [0, 0, 0, 1, 1]
m = make_mask(n)
xh = array(d + 1, mask=m, hard_mask=True, copy=True)
xh.put([4, 2, 0, 1, 3], [1, 2, 3, 4, 5])
assert_equal(xh._data, [3, 4, 2, 4, 5])
def test_putmask(self):
x = arange(6) + 1
mx = array(x, mask=[0, 0, 0, 1, 1, 1])
mask = [0, 0, 1, 0, 0, 1]
# w/o mask, w/o masked values
xx = x.copy()
putmask(xx, mask, 99)
assert_equal(xx, [1, 2, 99, 4, 5, 99])
# w/ mask, w/o masked values
mxx = mx.copy()
putmask(mxx, mask, 99)
assert_equal(mxx._data, [1, 2, 99, 4, 5, 99])
assert_equal(mxx._mask, [0, 0, 0, 1, 1, 0])
# w/o mask, w/ masked values
values = array([10, 20, 30, 40, 50, 60], mask=[1, 1, 1, 0, 0, 0])
xx = x.copy()
putmask(xx, mask, values)
assert_equal(xx._data, [1, 2, 30, 4, 5, 60])
assert_equal(xx._mask, [0, 0, 1, 0, 0, 0])
# w/ mask, w/ masked values
mxx = mx.copy()
putmask(mxx, mask, values)
assert_equal(mxx._data, [1, 2, 30, 4, 5, 60])
assert_equal(mxx._mask, [0, 0, 1, 1, 1, 0])
# w/ mask, w/ masked values + hardmask
mxx = mx.copy()
mxx.harden_mask()
putmask(mxx, mask, values)
assert_equal(mxx, [1, 2, 30, 4, 5, 60])
def test_ravel(self):
# Tests ravel
a = array([[1, 2, 3, 4, 5]], mask=[[0, 1, 0, 0, 0]])
aravel = a.ravel()
assert_equal(aravel._mask.shape, aravel.shape)
a = array([0, 0], mask=[1, 1])
aravel = a.ravel()
assert_equal(aravel._mask.shape, a.shape)
# Checks that small_mask is preserved
a = array([1, 2, 3, 4], mask=[0, 0, 0, 0], shrink=False)
assert_equal(a.ravel()._mask, [0, 0, 0, 0])
# Test that the fill_value is preserved
a.fill_value = -99
a.shape = (2, 2)
ar = a.ravel()
assert_equal(ar._mask, [0, 0, 0, 0])
assert_equal(ar._data, [1, 2, 3, 4])
assert_equal(ar.fill_value, -99)
# Test index ordering
assert_equal(a.ravel(order='C'), [1, 2, 3, 4])
assert_equal(a.ravel(order='F'), [1, 3, 2, 4])
def test_reshape(self):
# Tests reshape
x = arange(4)
x[0] = masked
y = x.reshape(2, 2)
assert_equal(y.shape, (2, 2,))
assert_equal(y._mask.shape, (2, 2,))
assert_equal(x.shape, (4,))
assert_equal(x._mask.shape, (4,))
def test_sort(self):
# Test sort
x = array([1, 4, 2, 3], mask=[0, 1, 0, 0], dtype=np.uint8)
sortedx = sort(x)
assert_equal(sortedx._data, [1, 2, 3, 4])
assert_equal(sortedx._mask, [0, 0, 0, 1])
sortedx = sort(x, endwith=False)
assert_equal(sortedx._data, [4, 1, 2, 3])
assert_equal(sortedx._mask, [1, 0, 0, 0])
x.sort()
assert_equal(x._data, [1, 2, 3, 4])
assert_equal(x._mask, [0, 0, 0, 1])
x = array([1, 4, 2, 3], mask=[0, 1, 0, 0], dtype=np.uint8)
x.sort(endwith=False)
assert_equal(x._data, [4, 1, 2, 3])
assert_equal(x._mask, [1, 0, 0, 0])
x = [1, 4, 2, 3]
sortedx = sort(x)
assert_(not isinstance(sorted, MaskedArray))
x = array([0, 1, -1, -2, 2], mask=nomask, dtype=np.int8)
sortedx = sort(x, endwith=False)
assert_equal(sortedx._data, [-2, -1, 0, 1, 2])
x = array([0, 1, -1, -2, 2], mask=[0, 1, 0, 0, 1], dtype=np.int8)
sortedx = sort(x, endwith=False)
assert_equal(sortedx._data, [1, 2, -2, -1, 0])
assert_equal(sortedx._mask, [1, 1, 0, 0, 0])
x = array([0, -1], dtype=np.int8)
sortedx = sort(x, kind="stable")
assert_equal(sortedx, array([-1, 0], dtype=np.int8))
def test_stable_sort(self):
x = array([1, 2, 3, 1, 2, 3], dtype=np.uint8)
expected = array([0, 3, 1, 4, 2, 5])
computed = argsort(x, kind='stable')
assert_equal(computed, expected)
def test_argsort_matches_sort(self):
x = array([1, 4, 2, 3], mask=[0, 1, 0, 0], dtype=np.uint8)
for kwargs in [dict(),
dict(endwith=True),
dict(endwith=False),
dict(fill_value=2),
dict(fill_value=2, endwith=True),
dict(fill_value=2, endwith=False)]:
sortedx = sort(x, **kwargs)
argsortedx = x[argsort(x, **kwargs)]
assert_equal(sortedx._data, argsortedx._data)
assert_equal(sortedx._mask, argsortedx._mask)
def test_sort_2d(self):
# Check sort of 2D array.
# 2D array w/o mask
a = masked_array([[8, 4, 1], [2, 0, 9]])
a.sort(0)
assert_equal(a, [[2, 0, 1], [8, 4, 9]])
a = masked_array([[8, 4, 1], [2, 0, 9]])
a.sort(1)
assert_equal(a, [[1, 4, 8], [0, 2, 9]])
# 2D array w/mask
a = masked_array([[8, 4, 1], [2, 0, 9]], mask=[[1, 0, 0], [0, 0, 1]])
a.sort(0)
assert_equal(a, [[2, 0, 1], [8, 4, 9]])
assert_equal(a._mask, [[0, 0, 0], [1, 0, 1]])
a = masked_array([[8, 4, 1], [2, 0, 9]], mask=[[1, 0, 0], [0, 0, 1]])
a.sort(1)
assert_equal(a, [[1, 4, 8], [0, 2, 9]])
assert_equal(a._mask, [[0, 0, 1], [0, 0, 1]])
# 3D
a = masked_array([[[7, 8, 9], [4, 5, 6], [1, 2, 3]],
[[1, 2, 3], [7, 8, 9], [4, 5, 6]],
[[7, 8, 9], [1, 2, 3], [4, 5, 6]],
[[4, 5, 6], [1, 2, 3], [7, 8, 9]]])
a[a % 4 == 0] = masked
am = a.copy()
an = a.filled(99)
am.sort(0)
an.sort(0)
assert_equal(am, an)
am = a.copy()
an = a.filled(99)
am.sort(1)
an.sort(1)
assert_equal(am, an)
am = a.copy()
an = a.filled(99)
am.sort(2)
an.sort(2)
assert_equal(am, an)
def test_sort_flexible(self):
# Test sort on structured dtype.
a = array(
data=[(3, 3), (3, 2), (2, 2), (2, 1), (1, 0), (1, 1), (1, 2)],
mask=[(0, 0), (0, 1), (0, 0), (0, 0), (1, 0), (0, 0), (0, 0)],
dtype=[('A', int), ('B', int)])
mask_last = array(
data=[(1, 1), (1, 2), (2, 1), (2, 2), (3, 3), (3, 2), (1, 0)],
mask=[(0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 1), (1, 0)],
dtype=[('A', int), ('B', int)])
mask_first = array(
data=[(1, 0), (1, 1), (1, 2), (2, 1), (2, 2), (3, 2), (3, 3)],
mask=[(1, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 1), (0, 0)],
dtype=[('A', int), ('B', int)])
test = sort(a)
assert_equal(test, mask_last)
assert_equal(test.mask, mask_last.mask)
test = sort(a, endwith=False)
assert_equal(test, mask_first)
assert_equal(test.mask, mask_first.mask)
# Test sort on dtype with subarray (gh-8069)
# Just check that the sort does not error, structured array subarrays
# are treated as byte strings and that leads to differing behavior
# depending on endianness and `endwith`.
dt = np.dtype([('v', int, 2)])
a = a.view(dt)
test = sort(a)
test = sort(a, endwith=False)
def test_argsort(self):
# Test argsort
a = array([1, 5, 2, 4, 3], mask=[1, 0, 0, 1, 0])
assert_equal(np.argsort(a), argsort(a))
def test_squeeze(self):
# Check squeeze
data = masked_array([[1, 2, 3]])
assert_equal(data.squeeze(), [1, 2, 3])
data = masked_array([[1, 2, 3]], mask=[[1, 1, 1]])
assert_equal(data.squeeze(), [1, 2, 3])
assert_equal(data.squeeze()._mask, [1, 1, 1])
# normal ndarrays return a view
arr = np.array([[1]])
arr_sq = arr.squeeze()
assert_equal(arr_sq, 1)
arr_sq[...] = 2
assert_equal(arr[0,0], 2)
# so maskedarrays should too
m_arr = masked_array([[1]], mask=True)
m_arr_sq = m_arr.squeeze()
assert_(m_arr_sq is not np.ma.masked)
assert_equal(m_arr_sq.mask, True)
m_arr_sq[...] = 2
assert_equal(m_arr[0,0], 2)
def test_swapaxes(self):
# Tests swapaxes on MaskedArrays.
x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928,
8.43, 7.78, 9.865, 5.878, 8.979, 4.732,
3.012, 6.022, 5.095, 3.116, 5.238, 3.957,
6.04, 9.63, 7.712, 3.382, 4.489, 6.479,
7.189, 9.645, 5.395, 4.961, 9.894, 2.893,
7.357, 9.828, 6.272, 3.758, 6.693, 0.993])
m = np.array([0, 1, 0, 1, 0, 0,
1, 0, 1, 1, 0, 1,
0, 0, 0, 1, 0, 1,
0, 0, 0, 1, 1, 1,
1, 0, 0, 1, 0, 0,
0, 0, 1, 0, 1, 0])
mX = array(x, mask=m).reshape(6, 6)
mXX = mX.reshape(3, 2, 2, 3)
mXswapped = mX.swapaxes(0, 1)
assert_equal(mXswapped[-1], mX[:, -1])
mXXswapped = mXX.swapaxes(0, 2)
assert_equal(mXXswapped.shape, (2, 2, 3, 3))
def test_take(self):
# Tests take
x = masked_array([10, 20, 30, 40], [0, 1, 0, 1])
assert_equal(x.take([0, 0, 3]), masked_array([10, 10, 40], [0, 0, 1]))
assert_equal(x.take([0, 0, 3]), x[[0, 0, 3]])
assert_equal(x.take([[0, 1], [0, 1]]),
masked_array([[10, 20], [10, 20]], [[0, 1], [0, 1]]))
# assert_equal crashes when passed np.ma.mask
assert_(x[1] is np.ma.masked)
assert_(x.take(1) is np.ma.masked)
x = array([[10, 20, 30], [40, 50, 60]], mask=[[0, 0, 1], [1, 0, 0, ]])
assert_equal(x.take([0, 2], axis=1),
array([[10, 30], [40, 60]], mask=[[0, 1], [1, 0]]))
assert_equal(take(x, [0, 2], axis=1),
array([[10, 30], [40, 60]], mask=[[0, 1], [1, 0]]))
def test_take_masked_indices(self):
# Test take w/ masked indices
a = np.array((40, 18, 37, 9, 22))
indices = np.arange(3)[None,:] + np.arange(5)[:, None]
mindices = array(indices, mask=(indices >= len(a)))
# No mask
test = take(a, mindices, mode='clip')
ctrl = array([[40, 18, 37],
[18, 37, 9],
[37, 9, 22],
[9, 22, 22],
[22, 22, 22]])
assert_equal(test, ctrl)
# Masked indices
test = take(a, mindices)
ctrl = array([[40, 18, 37],
[18, 37, 9],
[37, 9, 22],
[9, 22, 40],
[22, 40, 40]])
ctrl[3, 2] = ctrl[4, 1] = ctrl[4, 2] = masked
assert_equal(test, ctrl)
assert_equal(test.mask, ctrl.mask)
# Masked input + masked indices
a = array((40, 18, 37, 9, 22), mask=(0, 1, 0, 0, 0))
test = take(a, mindices)
ctrl[0, 1] = ctrl[1, 0] = masked
assert_equal(test, ctrl)
assert_equal(test.mask, ctrl.mask)
def test_tolist(self):
# Tests to list
# ... on 1D
x = array(np.arange(12))
x[[1, -2]] = masked
xlist = x.tolist()
assert_(xlist[1] is None)
assert_(xlist[-2] is None)
# ... on 2D
x.shape = (3, 4)
xlist = x.tolist()
ctrl = [[0, None, 2, 3], [4, 5, 6, 7], [8, 9, None, 11]]
assert_equal(xlist[0], [0, None, 2, 3])
assert_equal(xlist[1], [4, 5, 6, 7])
assert_equal(xlist[2], [8, 9, None, 11])
assert_equal(xlist, ctrl)
# ... on structured array w/ masked records
x = array(list(zip([1, 2, 3],
[1.1, 2.2, 3.3],
['one', 'two', 'thr'])),
dtype=[('a', int), ('b', float), ('c', '|S8')])
x[-1] = masked
assert_equal(x.tolist(),
[(1, 1.1, b'one'),
(2, 2.2, b'two'),
(None, None, None)])
# ... on structured array w/ masked fields
a = array([(1, 2,), (3, 4)], mask=[(0, 1), (0, 0)],
dtype=[('a', int), ('b', int)])
test = a.tolist()
assert_equal(test, [[1, None], [3, 4]])
# ... on mvoid
a = a[0]
test = a.tolist()
assert_equal(test, [1, None])
def test_tolist_specialcase(self):
# Test mvoid.tolist: make sure we return a standard Python object
a = array([(0, 1), (2, 3)], dtype=[('a', int), ('b', int)])
# w/o mask: each entry is a np.void whose elements are standard Python
for entry in a:
for item in entry.tolist():
assert_(not isinstance(item, np.generic))
# w/ mask: each entry is a ma.void whose elements should be
# standard Python
a.mask[0] = (0, 1)
for entry in a:
for item in entry.tolist():
assert_(not isinstance(item, np.generic))
def test_toflex(self):
# Test the conversion to records
data = arange(10)
record = data.toflex()
assert_equal(record['_data'], data._data)
assert_equal(record['_mask'], data._mask)
data[[0, 1, 2, -1]] = masked
record = data.toflex()
assert_equal(record['_data'], data._data)
assert_equal(record['_mask'], data._mask)
ndtype = [('i', int), ('s', '|S3'), ('f', float)]
data = array([(i, s, f) for (i, s, f) in zip(np.arange(10),
'ABCDEFGHIJKLM',
np.random.rand(10))],
dtype=ndtype)
data[[0, 1, 2, -1]] = masked
record = data.toflex()
assert_equal(record['_data'], data._data)
assert_equal(record['_mask'], data._mask)
ndtype = np.dtype("int, (2,3)float, float")
data = array([(i, f, ff) for (i, f, ff) in zip(np.arange(10),
np.random.rand(10),
np.random.rand(10))],
dtype=ndtype)
data[[0, 1, 2, -1]] = masked
record = data.toflex()
assert_equal_records(record['_data'], data._data)
assert_equal_records(record['_mask'], data._mask)
def test_fromflex(self):
# Test the reconstruction of a masked_array from a record
a = array([1, 2, 3])
test = fromflex(a.toflex())
assert_equal(test, a)
assert_equal(test.mask, a.mask)
a = array([1, 2, 3], mask=[0, 0, 1])
test = fromflex(a.toflex())
assert_equal(test, a)
assert_equal(test.mask, a.mask)
a = array([(1, 1.), (2, 2.), (3, 3.)], mask=[(1, 0), (0, 0), (0, 1)],
dtype=[('A', int), ('B', float)])
test = fromflex(a.toflex())
assert_equal(test, a)
assert_equal(test.data, a.data)
def test_arraymethod(self):
# Test a _arraymethod w/ n argument
marray = masked_array([[1, 2, 3, 4, 5]], mask=[0, 0, 1, 0, 0])
control = masked_array([[1], [2], [3], [4], [5]],
mask=[0, 0, 1, 0, 0])
assert_equal(marray.T, control)
assert_equal(marray.transpose(), control)
assert_equal(MaskedArray.cumsum(marray.T, 0), control.cumsum(0))
def test_arraymethod_0d(self):
# gh-9430
x = np.ma.array(42, mask=True)
assert_equal(x.T.mask, x.mask)
assert_equal(x.T.data, x.data)
def test_transpose_view(self):
x = np.ma.array([[1, 2, 3], [4, 5, 6]])
x[0,1] = np.ma.masked
xt = x.T
xt[1,0] = 10
xt[0,1] = np.ma.masked
assert_equal(x.data, xt.T.data)
assert_equal(x.mask, xt.T.mask)
def test_diagonal_view(self):
x = np.ma.zeros((3,3))
x[0,0] = 10
x[1,1] = np.ma.masked
x[2,2] = 20
xd = x.diagonal()
x[1,1] = 15
assert_equal(xd.mask, x.diagonal().mask)
assert_equal(xd.data, x.diagonal().data)
class TestMaskedArrayMathMethods:
def setup_method(self):
# Base data definition.
x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928,
8.43, 7.78, 9.865, 5.878, 8.979, 4.732,
3.012, 6.022, 5.095, 3.116, 5.238, 3.957,
6.04, 9.63, 7.712, 3.382, 4.489, 6.479,
7.189, 9.645, 5.395, 4.961, 9.894, 2.893,
7.357, 9.828, 6.272, 3.758, 6.693, 0.993])
X = x.reshape(6, 6)
XX = x.reshape(3, 2, 2, 3)
m = np.array([0, 1, 0, 1, 0, 0,
1, 0, 1, 1, 0, 1,
0, 0, 0, 1, 0, 1,
0, 0, 0, 1, 1, 1,
1, 0, 0, 1, 0, 0,
0, 0, 1, 0, 1, 0])
mx = array(data=x, mask=m)
mX = array(data=X, mask=m.reshape(X.shape))
mXX = array(data=XX, mask=m.reshape(XX.shape))
m2 = np.array([1, 1, 0, 1, 0, 0,
1, 1, 1, 1, 0, 1,
0, 0, 1, 1, 0, 1,
0, 0, 0, 1, 1, 1,
1, 0, 0, 1, 1, 0,
0, 0, 1, 0, 1, 1])
m2x = array(data=x, mask=m2)
m2X = array(data=X, mask=m2.reshape(X.shape))
m2XX = array(data=XX, mask=m2.reshape(XX.shape))
self.d = (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX)
def test_cumsumprod(self):
# Tests cumsum & cumprod on MaskedArrays.
(x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d
mXcp = mX.cumsum(0)
assert_equal(mXcp._data, mX.filled(0).cumsum(0))
mXcp = mX.cumsum(1)
assert_equal(mXcp._data, mX.filled(0).cumsum(1))
mXcp = mX.cumprod(0)
assert_equal(mXcp._data, mX.filled(1).cumprod(0))
mXcp = mX.cumprod(1)
assert_equal(mXcp._data, mX.filled(1).cumprod(1))
def test_cumsumprod_with_output(self):
# Tests cumsum/cumprod w/ output
xm = array(np.random.uniform(0, 10, 12)).reshape(3, 4)
xm[:, 0] = xm[0] = xm[-1, -1] = masked
for funcname in ('cumsum', 'cumprod'):
npfunc = getattr(np, funcname)
xmmeth = getattr(xm, funcname)
# A ndarray as explicit input
output = np.empty((3, 4), dtype=float)
output.fill(-9999)
result = npfunc(xm, axis=0, out=output)
# ... the result should be the given output
assert_(result is output)
assert_equal(result, xmmeth(axis=0, out=output))
output = empty((3, 4), dtype=int)
result = xmmeth(axis=0, out=output)
assert_(result is output)
def test_ptp(self):
# Tests ptp on MaskedArrays.
(x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d
(n, m) = X.shape
assert_equal(mx.ptp(), mx.compressed().ptp())
rows = np.zeros(n, float)
cols = np.zeros(m, float)
for k in range(m):
cols[k] = mX[:, k].compressed().ptp()
for k in range(n):
rows[k] = mX[k].compressed().ptp()
assert_equal(mX.ptp(0), cols)
assert_equal(mX.ptp(1), rows)
def test_add_object(self):
x = masked_array(['a', 'b'], mask=[1, 0], dtype=object)
y = x + 'x'
assert_equal(y[1], 'bx')
assert_(y.mask[0])
def test_sum_object(self):
# Test sum on object dtype
a = masked_array([1, 2, 3], mask=[1, 0, 0], dtype=object)
assert_equal(a.sum(), 5)
a = masked_array([[1, 2, 3], [4, 5, 6]], dtype=object)
assert_equal(a.sum(axis=0), [5, 7, 9])
def test_prod_object(self):
# Test prod on object dtype
a = masked_array([1, 2, 3], mask=[1, 0, 0], dtype=object)
assert_equal(a.prod(), 2 * 3)
a = masked_array([[1, 2, 3], [4, 5, 6]], dtype=object)
assert_equal(a.prod(axis=0), [4, 10, 18])
def test_meananom_object(self):
# Test mean/anom on object dtype
a = masked_array([1, 2, 3], dtype=object)
assert_equal(a.mean(), 2)
assert_equal(a.anom(), [-1, 0, 1])
def test_anom_shape(self):
a = masked_array([1, 2, 3])
assert_equal(a.anom().shape, a.shape)
a.mask = True
assert_equal(a.anom().shape, a.shape)
assert_(np.ma.is_masked(a.anom()))
def test_anom(self):
a = masked_array(np.arange(1, 7).reshape(2, 3))
assert_almost_equal(a.anom(),
[[-2.5, -1.5, -0.5], [0.5, 1.5, 2.5]])
assert_almost_equal(a.anom(axis=0),
[[-1.5, -1.5, -1.5], [1.5, 1.5, 1.5]])
assert_almost_equal(a.anom(axis=1),
[[-1., 0., 1.], [-1., 0., 1.]])
a.mask = [[0, 0, 1], [0, 1, 0]]
mval = -99
assert_almost_equal(a.anom().filled(mval),
[[-2.25, -1.25, mval], [0.75, mval, 2.75]])
assert_almost_equal(a.anom(axis=0).filled(mval),
[[-1.5, 0.0, mval], [1.5, mval, 0.0]])
assert_almost_equal(a.anom(axis=1).filled(mval),
[[-0.5, 0.5, mval], [-1.0, mval, 1.0]])
def test_trace(self):
# Tests trace on MaskedArrays.
(x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d
mXdiag = mX.diagonal()
assert_equal(mX.trace(), mX.diagonal().compressed().sum())
assert_almost_equal(mX.trace(),
X.trace() - sum(mXdiag.mask * X.diagonal(),
axis=0))
assert_equal(np.trace(mX), mX.trace())
# gh-5560
arr = np.arange(2*4*4).reshape(2,4,4)
m_arr = np.ma.masked_array(arr, False)
assert_equal(arr.trace(axis1=1, axis2=2), m_arr.trace(axis1=1, axis2=2))
def test_dot(self):
# Tests dot on MaskedArrays.
(x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d
fx = mx.filled(0)
r = mx.dot(mx)
assert_almost_equal(r.filled(0), fx.dot(fx))
assert_(r.mask is nomask)
fX = mX.filled(0)
r = mX.dot(mX)
assert_almost_equal(r.filled(0), fX.dot(fX))
assert_(r.mask[1,3])
r1 = empty_like(r)
mX.dot(mX, out=r1)
assert_almost_equal(r, r1)
mYY = mXX.swapaxes(-1, -2)
fXX, fYY = mXX.filled(0), mYY.filled(0)
r = mXX.dot(mYY)
assert_almost_equal(r.filled(0), fXX.dot(fYY))
r1 = empty_like(r)
mXX.dot(mYY, out=r1)
assert_almost_equal(r, r1)
def test_dot_shape_mismatch(self):
# regression test
x = masked_array([[1,2],[3,4]], mask=[[0,1],[0,0]])
y = masked_array([[1,2],[3,4]], mask=[[0,1],[0,0]])
z = masked_array([[0,1],[3,3]])
x.dot(y, out=z)
assert_almost_equal(z.filled(0), [[1, 0], [15, 16]])
assert_almost_equal(z.mask, [[0, 1], [0, 0]])
def test_varmean_nomask(self):
# gh-5769
foo = array([1,2,3,4], dtype='f8')
bar = array([1,2,3,4], dtype='f8')
assert_equal(type(foo.mean()), np.float64)
assert_equal(type(foo.var()), np.float64)
assert((foo.mean() == bar.mean()) is np.bool_(True))
# check array type is preserved and out works
foo = array(np.arange(16).reshape((4,4)), dtype='f8')
bar = empty(4, dtype='f4')
assert_equal(type(foo.mean(axis=1)), MaskedArray)
assert_equal(type(foo.var(axis=1)), MaskedArray)
assert_(foo.mean(axis=1, out=bar) is bar)
assert_(foo.var(axis=1, out=bar) is bar)
def test_varstd(self):
# Tests var & std on MaskedArrays.
(x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d
assert_almost_equal(mX.var(axis=None), mX.compressed().var())
assert_almost_equal(mX.std(axis=None), mX.compressed().std())
assert_almost_equal(mX.std(axis=None, ddof=1),
mX.compressed().std(ddof=1))
assert_almost_equal(mX.var(axis=None, ddof=1),
mX.compressed().var(ddof=1))
assert_equal(mXX.var(axis=3).shape, XX.var(axis=3).shape)
assert_equal(mX.var().shape, X.var().shape)
(mXvar0, mXvar1) = (mX.var(axis=0), mX.var(axis=1))
assert_almost_equal(mX.var(axis=None, ddof=2),
mX.compressed().var(ddof=2))
assert_almost_equal(mX.std(axis=None, ddof=2),
mX.compressed().std(ddof=2))
for k in range(6):
assert_almost_equal(mXvar1[k], mX[k].compressed().var())
assert_almost_equal(mXvar0[k], mX[:, k].compressed().var())
assert_almost_equal(np.sqrt(mXvar0[k]),
mX[:, k].compressed().std())
@suppress_copy_mask_on_assignment
def test_varstd_specialcases(self):
# Test a special case for var
nout = np.array(-1, dtype=float)
mout = array(-1, dtype=float)
x = array(arange(10), mask=True)
for methodname in ('var', 'std'):
method = getattr(x, methodname)
assert_(method() is masked)
assert_(method(0) is masked)
assert_(method(-1) is masked)
# Using a masked array as explicit output
method(out=mout)
assert_(mout is not masked)
assert_equal(mout.mask, True)
# Using a ndarray as explicit output
method(out=nout)
assert_(np.isnan(nout))
x = array(arange(10), mask=True)
x[-1] = 9
for methodname in ('var', 'std'):
method = getattr(x, methodname)
assert_(method(ddof=1) is masked)
assert_(method(0, ddof=1) is masked)
assert_(method(-1, ddof=1) is masked)
# Using a masked array as explicit output
method(out=mout, ddof=1)
assert_(mout is not masked)
assert_equal(mout.mask, True)
# Using a ndarray as explicit output
method(out=nout, ddof=1)
assert_(np.isnan(nout))
def test_varstd_ddof(self):
a = array([[1, 1, 0], [1, 1, 0]], mask=[[0, 0, 1], [0, 0, 1]])
test = a.std(axis=0, ddof=0)
assert_equal(test.filled(0), [0, 0, 0])
assert_equal(test.mask, [0, 0, 1])
test = a.std(axis=0, ddof=1)
assert_equal(test.filled(0), [0, 0, 0])
assert_equal(test.mask, [0, 0, 1])
test = a.std(axis=0, ddof=2)
assert_equal(test.filled(0), [0, 0, 0])
assert_equal(test.mask, [1, 1, 1])
def test_diag(self):
# Test diag
x = arange(9).reshape((3, 3))
x[1, 1] = masked
out = np.diag(x)
assert_equal(out, [0, 4, 8])
out = diag(x)
assert_equal(out, [0, 4, 8])
assert_equal(out.mask, [0, 1, 0])
out = diag(out)
control = array([[0, 0, 0], [0, 4, 0], [0, 0, 8]],
mask=[[0, 0, 0], [0, 1, 0], [0, 0, 0]])
assert_equal(out, control)
def test_axis_methods_nomask(self):
# Test the combination nomask & methods w/ axis
a = array([[1, 2, 3], [4, 5, 6]])
assert_equal(a.sum(0), [5, 7, 9])
assert_equal(a.sum(-1), [6, 15])
assert_equal(a.sum(1), [6, 15])
assert_equal(a.prod(0), [4, 10, 18])
assert_equal(a.prod(-1), [6, 120])
assert_equal(a.prod(1), [6, 120])
assert_equal(a.min(0), [1, 2, 3])
assert_equal(a.min(-1), [1, 4])
assert_equal(a.min(1), [1, 4])
assert_equal(a.max(0), [4, 5, 6])
assert_equal(a.max(-1), [3, 6])
assert_equal(a.max(1), [3, 6])
class TestMaskedArrayMathMethodsComplex:
# Test class for miscellaneous MaskedArrays methods.
def setup_method(self):
# Base data definition.
x = np.array([8.375j, 7.545j, 8.828j, 8.5j, 1.757j, 5.928,
8.43, 7.78, 9.865, 5.878, 8.979, 4.732,
3.012, 6.022, 5.095, 3.116, 5.238, 3.957,
6.04, 9.63, 7.712, 3.382, 4.489, 6.479j,
7.189j, 9.645, 5.395, 4.961, 9.894, 2.893,
7.357, 9.828, 6.272, 3.758, 6.693, 0.993j])
X = x.reshape(6, 6)
XX = x.reshape(3, 2, 2, 3)
m = np.array([0, 1, 0, 1, 0, 0,
1, 0, 1, 1, 0, 1,
0, 0, 0, 1, 0, 1,
0, 0, 0, 1, 1, 1,
1, 0, 0, 1, 0, 0,
0, 0, 1, 0, 1, 0])
mx = array(data=x, mask=m)
mX = array(data=X, mask=m.reshape(X.shape))
mXX = array(data=XX, mask=m.reshape(XX.shape))
m2 = np.array([1, 1, 0, 1, 0, 0,
1, 1, 1, 1, 0, 1,
0, 0, 1, 1, 0, 1,
0, 0, 0, 1, 1, 1,
1, 0, 0, 1, 1, 0,
0, 0, 1, 0, 1, 1])
m2x = array(data=x, mask=m2)
m2X = array(data=X, mask=m2.reshape(X.shape))
m2XX = array(data=XX, mask=m2.reshape(XX.shape))
self.d = (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX)
def test_varstd(self):
# Tests var & std on MaskedArrays.
(x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d
assert_almost_equal(mX.var(axis=None), mX.compressed().var())
assert_almost_equal(mX.std(axis=None), mX.compressed().std())
assert_equal(mXX.var(axis=3).shape, XX.var(axis=3).shape)
assert_equal(mX.var().shape, X.var().shape)
(mXvar0, mXvar1) = (mX.var(axis=0), mX.var(axis=1))
assert_almost_equal(mX.var(axis=None, ddof=2),
mX.compressed().var(ddof=2))
assert_almost_equal(mX.std(axis=None, ddof=2),
mX.compressed().std(ddof=2))
for k in range(6):
assert_almost_equal(mXvar1[k], mX[k].compressed().var())
assert_almost_equal(mXvar0[k], mX[:, k].compressed().var())
assert_almost_equal(np.sqrt(mXvar0[k]),
mX[:, k].compressed().std())
class TestMaskedArrayFunctions:
# Test class for miscellaneous functions.
def setup_method(self):
x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.])
y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.])
m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1]
xm = masked_array(x, mask=m1)
ym = masked_array(y, mask=m2)
xm.set_fill_value(1e+20)
self.info = (xm, ym)
def test_masked_where_bool(self):
x = [1, 2]
y = masked_where(False, x)
assert_equal(y, [1, 2])
assert_equal(y[1], 2)
def test_masked_equal_wlist(self):
x = [1, 2, 3]
mx = masked_equal(x, 3)
assert_equal(mx, x)
assert_equal(mx._mask, [0, 0, 1])
mx = masked_not_equal(x, 3)
assert_equal(mx, x)
assert_equal(mx._mask, [1, 1, 0])
def test_masked_equal_fill_value(self):
x = [1, 2, 3]
mx = masked_equal(x, 3)
assert_equal(mx._mask, [0, 0, 1])
assert_equal(mx.fill_value, 3)
def test_masked_where_condition(self):
# Tests masking functions.
x = array([1., 2., 3., 4., 5.])
x[2] = masked
assert_equal(masked_where(greater(x, 2), x), masked_greater(x, 2))
assert_equal(masked_where(greater_equal(x, 2), x),
masked_greater_equal(x, 2))
assert_equal(masked_where(less(x, 2), x), masked_less(x, 2))
assert_equal(masked_where(less_equal(x, 2), x),
masked_less_equal(x, 2))
assert_equal(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2))
assert_equal(masked_where(equal(x, 2), x), masked_equal(x, 2))
assert_equal(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2))
assert_equal(masked_where([1, 1, 0, 0, 0], [1, 2, 3, 4, 5]),
[99, 99, 3, 4, 5])
def test_masked_where_oddities(self):
# Tests some generic features.
atest = ones((10, 10, 10), dtype=float)
btest = zeros(atest.shape, MaskType)
ctest = masked_where(btest, atest)
assert_equal(atest, ctest)
def test_masked_where_shape_constraint(self):
a = arange(10)
with assert_raises(IndexError):
masked_equal(1, a)
test = masked_equal(a, 1)
assert_equal(test.mask, [0, 1, 0, 0, 0, 0, 0, 0, 0, 0])
def test_masked_where_structured(self):
# test that masked_where on a structured array sets a structured
# mask (see issue #2972)
a = np.zeros(10, dtype=[("A", "<f2"), ("B", "<f4")])
am = np.ma.masked_where(a["A"] < 5, a)
assert_equal(am.mask.dtype.names, am.dtype.names)
assert_equal(am["A"],
np.ma.masked_array(np.zeros(10), np.ones(10)))
def test_masked_where_mismatch(self):
# gh-4520
x = np.arange(10)
y = np.arange(5)
assert_raises(IndexError, np.ma.masked_where, y > 6, x)
def test_masked_otherfunctions(self):
assert_equal(masked_inside(list(range(5)), 1, 3),
[0, 199, 199, 199, 4])
assert_equal(masked_outside(list(range(5)), 1, 3), [199, 1, 2, 3, 199])
assert_equal(masked_inside(array(list(range(5)),
mask=[1, 0, 0, 0, 0]), 1, 3).mask,
[1, 1, 1, 1, 0])
assert_equal(masked_outside(array(list(range(5)),
mask=[0, 1, 0, 0, 0]), 1, 3).mask,
[1, 1, 0, 0, 1])
assert_equal(masked_equal(array(list(range(5)),
mask=[1, 0, 0, 0, 0]), 2).mask,
[1, 0, 1, 0, 0])
assert_equal(masked_not_equal(array([2, 2, 1, 2, 1],
mask=[1, 0, 0, 0, 0]), 2).mask,
[1, 0, 1, 0, 1])
def test_round(self):
a = array([1.23456, 2.34567, 3.45678, 4.56789, 5.67890],
mask=[0, 1, 0, 0, 0])
assert_equal(a.round(), [1., 2., 3., 5., 6.])
assert_equal(a.round(1), [1.2, 2.3, 3.5, 4.6, 5.7])
assert_equal(a.round(3), [1.235, 2.346, 3.457, 4.568, 5.679])
b = empty_like(a)
a.round(out=b)
assert_equal(b, [1., 2., 3., 5., 6.])
x = array([1., 2., 3., 4., 5.])
c = array([1, 1, 1, 0, 0])
x[2] = masked
z = where(c, x, -x)
assert_equal(z, [1., 2., 0., -4., -5])
c[0] = masked
z = where(c, x, -x)
assert_equal(z, [1., 2., 0., -4., -5])
assert_(z[0] is masked)
assert_(z[1] is not masked)
assert_(z[2] is masked)
def test_round_with_output(self):
# Testing round with an explicit output
xm = array(np.random.uniform(0, 10, 12)).reshape(3, 4)
xm[:, 0] = xm[0] = xm[-1, -1] = masked
# A ndarray as explicit input
output = np.empty((3, 4), dtype=float)
output.fill(-9999)
result = np.round(xm, decimals=2, out=output)
# ... the result should be the given output
assert_(result is output)
assert_equal(result, xm.round(decimals=2, out=output))
output = empty((3, 4), dtype=float)
result = xm.round(decimals=2, out=output)
assert_(result is output)
def test_round_with_scalar(self):
# Testing round with scalar/zero dimension input
# GH issue 2244
a = array(1.1, mask=[False])
assert_equal(a.round(), 1)
a = array(1.1, mask=[True])
assert_(a.round() is masked)
a = array(1.1, mask=[False])
output = np.empty(1, dtype=float)
output.fill(-9999)
a.round(out=output)
assert_equal(output, 1)
a = array(1.1, mask=[False])
output = array(-9999., mask=[True])
a.round(out=output)
assert_equal(output[()], 1)
a = array(1.1, mask=[True])
output = array(-9999., mask=[False])
a.round(out=output)
assert_(output[()] is masked)
def test_identity(self):
a = identity(5)
assert_(isinstance(a, MaskedArray))
assert_equal(a, np.identity(5))
def test_power(self):
x = -1.1
assert_almost_equal(power(x, 2.), 1.21)
assert_(power(x, masked) is masked)
x = array([-1.1, -1.1, 1.1, 1.1, 0.])
b = array([0.5, 2., 0.5, 2., -1.], mask=[0, 0, 0, 0, 1])
y = power(x, b)
assert_almost_equal(y, [0, 1.21, 1.04880884817, 1.21, 0.])
assert_equal(y._mask, [1, 0, 0, 0, 1])
b.mask = nomask
y = power(x, b)
assert_equal(y._mask, [1, 0, 0, 0, 1])
z = x ** b
assert_equal(z._mask, y._mask)
assert_almost_equal(z, y)
assert_almost_equal(z._data, y._data)
x **= b
assert_equal(x._mask, y._mask)
assert_almost_equal(x, y)
assert_almost_equal(x._data, y._data)
def test_power_with_broadcasting(self):
# Test power w/ broadcasting
a2 = np.array([[1., 2., 3.], [4., 5., 6.]])
a2m = array(a2, mask=[[1, 0, 0], [0, 0, 1]])
b1 = np.array([2, 4, 3])
b2 = np.array([b1, b1])
b2m = array(b2, mask=[[0, 1, 0], [0, 1, 0]])
ctrl = array([[1 ** 2, 2 ** 4, 3 ** 3], [4 ** 2, 5 ** 4, 6 ** 3]],
mask=[[1, 1, 0], [0, 1, 1]])
# No broadcasting, base & exp w/ mask
test = a2m ** b2m
assert_equal(test, ctrl)
assert_equal(test.mask, ctrl.mask)
# No broadcasting, base w/ mask, exp w/o mask
test = a2m ** b2
assert_equal(test, ctrl)
assert_equal(test.mask, a2m.mask)
# No broadcasting, base w/o mask, exp w/ mask
test = a2 ** b2m
assert_equal(test, ctrl)
assert_equal(test.mask, b2m.mask)
ctrl = array([[2 ** 2, 4 ** 4, 3 ** 3], [2 ** 2, 4 ** 4, 3 ** 3]],
mask=[[0, 1, 0], [0, 1, 0]])
test = b1 ** b2m
assert_equal(test, ctrl)
assert_equal(test.mask, ctrl.mask)
test = b2m ** b1
assert_equal(test, ctrl)
assert_equal(test.mask, ctrl.mask)
def test_where(self):
# Test the where function
x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.])
y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.])
m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1]
xm = masked_array(x, mask=m1)
ym = masked_array(y, mask=m2)
xm.set_fill_value(1e+20)
d = where(xm > 2, xm, -9)
assert_equal(d, [-9., -9., -9., -9., -9., 4.,
-9., -9., 10., -9., -9., 3.])
assert_equal(d._mask, xm._mask)
d = where(xm > 2, -9, ym)
assert_equal(d, [5., 0., 3., 2., -1., -9.,
-9., -10., -9., 1., 0., -9.])
assert_equal(d._mask, [1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0])
d = where(xm > 2, xm, masked)
assert_equal(d, [-9., -9., -9., -9., -9., 4.,
-9., -9., 10., -9., -9., 3.])
tmp = xm._mask.copy()
tmp[(xm <= 2).filled(True)] = True
assert_equal(d._mask, tmp)
ixm = xm.astype(int)
d = where(ixm > 2, ixm, masked)
assert_equal(d, [-9, -9, -9, -9, -9, 4, -9, -9, 10, -9, -9, 3])
assert_equal(d.dtype, ixm.dtype)
def test_where_object(self):
a = np.array(None)
b = masked_array(None)
r = b.copy()
assert_equal(np.ma.where(True, a, a), r)
assert_equal(np.ma.where(True, b, b), r)
def test_where_with_masked_choice(self):
x = arange(10)
x[3] = masked
c = x >= 8
# Set False to masked
z = where(c, x, masked)
assert_(z.dtype is x.dtype)
assert_(z[3] is masked)
assert_(z[4] is masked)
assert_(z[7] is masked)
assert_(z[8] is not masked)
assert_(z[9] is not masked)
assert_equal(x, z)
# Set True to masked
z = where(c, masked, x)
assert_(z.dtype is x.dtype)
assert_(z[3] is masked)
assert_(z[4] is not masked)
assert_(z[7] is not masked)
assert_(z[8] is masked)
assert_(z[9] is masked)
def test_where_with_masked_condition(self):
x = array([1., 2., 3., 4., 5.])
c = array([1, 1, 1, 0, 0])
x[2] = masked
z = where(c, x, -x)
assert_equal(z, [1., 2., 0., -4., -5])
c[0] = masked
z = where(c, x, -x)
assert_equal(z, [1., 2., 0., -4., -5])
assert_(z[0] is masked)
assert_(z[1] is not masked)
assert_(z[2] is masked)
x = arange(1, 6)
x[-1] = masked
y = arange(1, 6) * 10
y[2] = masked
c = array([1, 1, 1, 0, 0], mask=[1, 0, 0, 0, 0])
cm = c.filled(1)
z = where(c, x, y)
zm = where(cm, x, y)
assert_equal(z, zm)
assert_(getmask(zm) is nomask)
assert_equal(zm, [1, 2, 3, 40, 50])
z = where(c, masked, 1)
assert_equal(z, [99, 99, 99, 1, 1])
z = where(c, 1, masked)
assert_equal(z, [99, 1, 1, 99, 99])
def test_where_type(self):
# Test the type conservation with where
x = np.arange(4, dtype=np.int32)
y = np.arange(4, dtype=np.float32) * 2.2
test = where(x > 1.5, y, x).dtype
control = np.find_common_type([np.int32, np.float32], [])
assert_equal(test, control)
def test_where_broadcast(self):
# Issue 8599
x = np.arange(9).reshape(3, 3)
y = np.zeros(3)
core = np.where([1, 0, 1], x, y)
ma = where([1, 0, 1], x, y)
assert_equal(core, ma)
assert_equal(core.dtype, ma.dtype)
def test_where_structured(self):
# Issue 8600
dt = np.dtype([('a', int), ('b', int)])
x = np.array([(1, 2), (3, 4), (5, 6)], dtype=dt)
y = np.array((10, 20), dtype=dt)
core = np.where([0, 1, 1], x, y)
ma = np.where([0, 1, 1], x, y)
assert_equal(core, ma)
assert_equal(core.dtype, ma.dtype)
def test_where_structured_masked(self):
dt = np.dtype([('a', int), ('b', int)])
x = np.array([(1, 2), (3, 4), (5, 6)], dtype=dt)
ma = where([0, 1, 1], x, masked)
expected = masked_where([1, 0, 0], x)
assert_equal(ma.dtype, expected.dtype)
assert_equal(ma, expected)
assert_equal(ma.mask, expected.mask)
def test_choose(self):
# Test choose
choices = [[0, 1, 2, 3], [10, 11, 12, 13],
[20, 21, 22, 23], [30, 31, 32, 33]]
chosen = choose([2, 3, 1, 0], choices)
assert_equal(chosen, array([20, 31, 12, 3]))
chosen = choose([2, 4, 1, 0], choices, mode='clip')
assert_equal(chosen, array([20, 31, 12, 3]))
chosen = choose([2, 4, 1, 0], choices, mode='wrap')
assert_equal(chosen, array([20, 1, 12, 3]))
# Check with some masked indices
indices_ = array([2, 4, 1, 0], mask=[1, 0, 0, 1])
chosen = choose(indices_, choices, mode='wrap')
assert_equal(chosen, array([99, 1, 12, 99]))
assert_equal(chosen.mask, [1, 0, 0, 1])
# Check with some masked choices
choices = array(choices, mask=[[0, 0, 0, 1], [1, 1, 0, 1],
[1, 0, 0, 0], [0, 0, 0, 0]])
indices_ = [2, 3, 1, 0]
chosen = choose(indices_, choices, mode='wrap')
assert_equal(chosen, array([20, 31, 12, 3]))
assert_equal(chosen.mask, [1, 0, 0, 1])
def test_choose_with_out(self):
# Test choose with an explicit out keyword
choices = [[0, 1, 2, 3], [10, 11, 12, 13],
[20, 21, 22, 23], [30, 31, 32, 33]]
store = empty(4, dtype=int)
chosen = choose([2, 3, 1, 0], choices, out=store)
assert_equal(store, array([20, 31, 12, 3]))
assert_(store is chosen)
# Check with some masked indices + out
store = empty(4, dtype=int)
indices_ = array([2, 3, 1, 0], mask=[1, 0, 0, 1])
chosen = choose(indices_, choices, mode='wrap', out=store)
assert_equal(store, array([99, 31, 12, 99]))
assert_equal(store.mask, [1, 0, 0, 1])
# Check with some masked choices + out ina ndarray !
choices = array(choices, mask=[[0, 0, 0, 1], [1, 1, 0, 1],
[1, 0, 0, 0], [0, 0, 0, 0]])
indices_ = [2, 3, 1, 0]
store = empty(4, dtype=int).view(ndarray)
chosen = choose(indices_, choices, mode='wrap', out=store)
assert_equal(store, array([999999, 31, 12, 999999]))
def test_reshape(self):
a = arange(10)
a[0] = masked
# Try the default
b = a.reshape((5, 2))
assert_equal(b.shape, (5, 2))
assert_(b.flags['C'])
# Try w/ arguments as list instead of tuple
b = a.reshape(5, 2)
assert_equal(b.shape, (5, 2))
assert_(b.flags['C'])
# Try w/ order
b = a.reshape((5, 2), order='F')
assert_equal(b.shape, (5, 2))
assert_(b.flags['F'])
# Try w/ order
b = a.reshape(5, 2, order='F')
assert_equal(b.shape, (5, 2))
assert_(b.flags['F'])
c = np.reshape(a, (2, 5))
assert_(isinstance(c, MaskedArray))
assert_equal(c.shape, (2, 5))
assert_(c[0, 0] is masked)
assert_(c.flags['C'])
def test_make_mask_descr(self):
# Flexible
ntype = [('a', float), ('b', float)]
test = make_mask_descr(ntype)
assert_equal(test, [('a', bool), ('b', bool)])
assert_(test is make_mask_descr(test))
# Standard w/ shape
ntype = (float, 2)
test = make_mask_descr(ntype)
assert_equal(test, (bool, 2))
assert_(test is make_mask_descr(test))
# Standard standard
ntype = float
test = make_mask_descr(ntype)
assert_equal(test, np.dtype(bool))
assert_(test is make_mask_descr(test))
# Nested
ntype = [('a', float), ('b', [('ba', float), ('bb', float)])]
test = make_mask_descr(ntype)
control = np.dtype([('a', 'b1'), ('b', [('ba', 'b1'), ('bb', 'b1')])])
assert_equal(test, control)
assert_(test is make_mask_descr(test))
# Named+ shape
ntype = [('a', (float, 2))]
test = make_mask_descr(ntype)
assert_equal(test, np.dtype([('a', (bool, 2))]))
assert_(test is make_mask_descr(test))
# 2 names
ntype = [(('A', 'a'), float)]
test = make_mask_descr(ntype)
assert_equal(test, np.dtype([(('A', 'a'), bool)]))
assert_(test is make_mask_descr(test))
# nested boolean types should preserve identity
base_type = np.dtype([('a', int, 3)])
base_mtype = make_mask_descr(base_type)
sub_type = np.dtype([('a', int), ('b', base_mtype)])
test = make_mask_descr(sub_type)
assert_equal(test, np.dtype([('a', bool), ('b', [('a', bool, 3)])]))
assert_(test.fields['b'][0] is base_mtype)
def test_make_mask(self):
# Test make_mask
# w/ a list as an input
mask = [0, 1]
test = make_mask(mask)
assert_equal(test.dtype, MaskType)
assert_equal(test, [0, 1])
# w/ a ndarray as an input
mask = np.array([0, 1], dtype=bool)
test = make_mask(mask)
assert_equal(test.dtype, MaskType)
assert_equal(test, [0, 1])
# w/ a flexible-type ndarray as an input - use default
mdtype = [('a', bool), ('b', bool)]
mask = np.array([(0, 0), (0, 1)], dtype=mdtype)
test = make_mask(mask)
assert_equal(test.dtype, MaskType)
assert_equal(test, [1, 1])
# w/ a flexible-type ndarray as an input - use input dtype
mdtype = [('a', bool), ('b', bool)]
mask = np.array([(0, 0), (0, 1)], dtype=mdtype)
test = make_mask(mask, dtype=mask.dtype)
assert_equal(test.dtype, mdtype)
assert_equal(test, mask)
# w/ a flexible-type ndarray as an input - use input dtype
mdtype = [('a', float), ('b', float)]
bdtype = [('a', bool), ('b', bool)]
mask = np.array([(0, 0), (0, 1)], dtype=mdtype)
test = make_mask(mask, dtype=mask.dtype)
assert_equal(test.dtype, bdtype)
assert_equal(test, np.array([(0, 0), (0, 1)], dtype=bdtype))
# Ensure this also works for void
mask = np.array((False, True), dtype='?,?')[()]
assert_(isinstance(mask, np.void))
test = make_mask(mask, dtype=mask.dtype)
assert_equal(test, mask)
assert_(test is not mask)
mask = np.array((0, 1), dtype='i4,i4')[()]
test2 = make_mask(mask, dtype=mask.dtype)
assert_equal(test2, test)
# test that nomask is returned when m is nomask.
bools = [True, False]
dtypes = [MaskType, float]
msgformat = 'copy=%s, shrink=%s, dtype=%s'
for cpy, shr, dt in itertools.product(bools, bools, dtypes):
res = make_mask(nomask, copy=cpy, shrink=shr, dtype=dt)
assert_(res is nomask, msgformat % (cpy, shr, dt))
def test_mask_or(self):
# Initialize
mtype = [('a', bool), ('b', bool)]
mask = np.array([(0, 0), (0, 1), (1, 0), (0, 0)], dtype=mtype)
# Test using nomask as input
test = mask_or(mask, nomask)
assert_equal(test, mask)
test = mask_or(nomask, mask)
assert_equal(test, mask)
# Using False as input
test = mask_or(mask, False)
assert_equal(test, mask)
# Using another array w / the same dtype
other = np.array([(0, 1), (0, 1), (0, 1), (0, 1)], dtype=mtype)
test = mask_or(mask, other)
control = np.array([(0, 1), (0, 1), (1, 1), (0, 1)], dtype=mtype)
assert_equal(test, control)
# Using another array w / a different dtype
othertype = [('A', bool), ('B', bool)]
other = np.array([(0, 1), (0, 1), (0, 1), (0, 1)], dtype=othertype)
try:
test = mask_or(mask, other)
except ValueError:
pass
# Using nested arrays
dtype = [('a', bool), ('b', [('ba', bool), ('bb', bool)])]
amask = np.array([(0, (1, 0)), (0, (1, 0))], dtype=dtype)
bmask = np.array([(1, (0, 1)), (0, (0, 0))], dtype=dtype)
cntrl = np.array([(1, (1, 1)), (0, (1, 0))], dtype=dtype)
assert_equal(mask_or(amask, bmask), cntrl)
def test_flatten_mask(self):
# Tests flatten mask
# Standard dtype
mask = np.array([0, 0, 1], dtype=bool)
assert_equal(flatten_mask(mask), mask)
# Flexible dtype
mask = np.array([(0, 0), (0, 1)], dtype=[('a', bool), ('b', bool)])
test = flatten_mask(mask)
control = np.array([0, 0, 0, 1], dtype=bool)
assert_equal(test, control)
mdtype = [('a', bool), ('b', [('ba', bool), ('bb', bool)])]
data = [(0, (0, 0)), (0, (0, 1))]
mask = np.array(data, dtype=mdtype)
test = flatten_mask(mask)
control = np.array([0, 0, 0, 0, 0, 1], dtype=bool)
assert_equal(test, control)
def test_on_ndarray(self):
# Test functions on ndarrays
a = np.array([1, 2, 3, 4])
m = array(a, mask=False)
test = anom(a)
assert_equal(test, m.anom())
test = reshape(a, (2, 2))
assert_equal(test, m.reshape(2, 2))
def test_compress(self):
# Test compress function on ndarray and masked array
# Address Github #2495.
arr = np.arange(8)
arr.shape = 4, 2
cond = np.array([True, False, True, True])
control = arr[[0, 2, 3]]
test = np.ma.compress(cond, arr, axis=0)
assert_equal(test, control)
marr = np.ma.array(arr)
test = np.ma.compress(cond, marr, axis=0)
assert_equal(test, control)
def test_compressed(self):
# Test ma.compressed function.
# Address gh-4026
a = np.ma.array([1, 2])
test = np.ma.compressed(a)
assert_(type(test) is np.ndarray)
# Test case when input data is ndarray subclass
class A(np.ndarray):
pass
a = np.ma.array(A(shape=0))
test = np.ma.compressed(a)
assert_(type(test) is A)
# Test that compress flattens
test = np.ma.compressed([[1],[2]])
assert_equal(test.ndim, 1)
test = np.ma.compressed([[[[[1]]]]])
assert_equal(test.ndim, 1)
# Test case when input is MaskedArray subclass
class M(MaskedArray):
pass
test = np.ma.compressed(M([[[]], [[]]]))
assert_equal(test.ndim, 1)
# with .compressed() overridden
class M(MaskedArray):
def compressed(self):
return 42
test = np.ma.compressed(M([[[]], [[]]]))
assert_equal(test, 42)
def test_convolve(self):
a = masked_equal(np.arange(5), 2)
b = np.array([1, 1])
test = np.ma.convolve(a, b)
assert_equal(test, masked_equal([0, 1, -1, -1, 7, 4], -1))
test = np.ma.convolve(a, b, propagate_mask=False)
assert_equal(test, masked_equal([0, 1, 1, 3, 7, 4], -1))
test = np.ma.convolve([1, 1], [1, 1, 1])
assert_equal(test, masked_equal([1, 2, 2, 1], -1))
a = [1, 1]
b = masked_equal([1, -1, -1, 1], -1)
test = np.ma.convolve(a, b, propagate_mask=False)
assert_equal(test, masked_equal([1, 1, -1, 1, 1], -1))
test = np.ma.convolve(a, b, propagate_mask=True)
assert_equal(test, masked_equal([-1, -1, -1, -1, -1], -1))
class TestMaskedFields:
def setup_method(self):
ilist = [1, 2, 3, 4, 5]
flist = [1.1, 2.2, 3.3, 4.4, 5.5]
slist = ['one', 'two', 'three', 'four', 'five']
ddtype = [('a', int), ('b', float), ('c', '|S8')]
mdtype = [('a', bool), ('b', bool), ('c', bool)]
mask = [0, 1, 0, 0, 1]
base = array(list(zip(ilist, flist, slist)), mask=mask, dtype=ddtype)
self.data = dict(base=base, mask=mask, ddtype=ddtype, mdtype=mdtype)
def test_set_records_masks(self):
base = self.data['base']
mdtype = self.data['mdtype']
# Set w/ nomask or masked
base.mask = nomask
assert_equal_records(base._mask, np.zeros(base.shape, dtype=mdtype))
base.mask = masked
assert_equal_records(base._mask, np.ones(base.shape, dtype=mdtype))
# Set w/ simple boolean
base.mask = False
assert_equal_records(base._mask, np.zeros(base.shape, dtype=mdtype))
base.mask = True
assert_equal_records(base._mask, np.ones(base.shape, dtype=mdtype))
# Set w/ list
base.mask = [0, 0, 0, 1, 1]
assert_equal_records(base._mask,
np.array([(x, x, x) for x in [0, 0, 0, 1, 1]],
dtype=mdtype))
def test_set_record_element(self):
# Check setting an element of a record)
base = self.data['base']
(base_a, base_b, base_c) = (base['a'], base['b'], base['c'])
base[0] = (pi, pi, 'pi')
assert_equal(base_a.dtype, int)
assert_equal(base_a._data, [3, 2, 3, 4, 5])
assert_equal(base_b.dtype, float)
assert_equal(base_b._data, [pi, 2.2, 3.3, 4.4, 5.5])
assert_equal(base_c.dtype, '|S8')
assert_equal(base_c._data,
[b'pi', b'two', b'three', b'four', b'five'])
def test_set_record_slice(self):
base = self.data['base']
(base_a, base_b, base_c) = (base['a'], base['b'], base['c'])
base[:3] = (pi, pi, 'pi')
assert_equal(base_a.dtype, int)
assert_equal(base_a._data, [3, 3, 3, 4, 5])
assert_equal(base_b.dtype, float)
assert_equal(base_b._data, [pi, pi, pi, 4.4, 5.5])
assert_equal(base_c.dtype, '|S8')
assert_equal(base_c._data,
[b'pi', b'pi', b'pi', b'four', b'five'])
def test_mask_element(self):
"Check record access"
base = self.data['base']
base[0] = masked
for n in ('a', 'b', 'c'):
assert_equal(base[n].mask, [1, 1, 0, 0, 1])
assert_equal(base[n]._data, base._data[n])
def test_getmaskarray(self):
# Test getmaskarray on flexible dtype
ndtype = [('a', int), ('b', float)]
test = empty(3, dtype=ndtype)
assert_equal(getmaskarray(test),
np.array([(0, 0), (0, 0), (0, 0)],
dtype=[('a', '|b1'), ('b', '|b1')]))
test[:] = masked
assert_equal(getmaskarray(test),
np.array([(1, 1), (1, 1), (1, 1)],
dtype=[('a', '|b1'), ('b', '|b1')]))
def test_view(self):
# Test view w/ flexible dtype
iterator = list(zip(np.arange(10), np.random.rand(10)))
data = np.array(iterator)
a = array(iterator, dtype=[('a', float), ('b', float)])
a.mask[0] = (1, 0)
controlmask = np.array([1] + 19 * [0], dtype=bool)
# Transform globally to simple dtype
test = a.view(float)
assert_equal(test, data.ravel())
assert_equal(test.mask, controlmask)
# Transform globally to dty
test = a.view((float, 2))
assert_equal(test, data)
assert_equal(test.mask, controlmask.reshape(-1, 2))
def test_getitem(self):
ndtype = [('a', float), ('b', float)]
a = array(list(zip(np.random.rand(10), np.arange(10))), dtype=ndtype)
a.mask = np.array(list(zip([0, 0, 0, 0, 0, 0, 0, 0, 1, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 1, 0])),
dtype=[('a', bool), ('b', bool)])
def _test_index(i):
assert_equal(type(a[i]), mvoid)
assert_equal_records(a[i]._data, a._data[i])
assert_equal_records(a[i]._mask, a._mask[i])
assert_equal(type(a[i, ...]), MaskedArray)
assert_equal_records(a[i,...]._data, a._data[i,...])
assert_equal_records(a[i,...]._mask, a._mask[i,...])
_test_index(1) # No mask
_test_index(0) # One element masked
_test_index(-2) # All element masked
def test_setitem(self):
# Issue 4866: check that one can set individual items in [record][col]
# and [col][record] order
ndtype = np.dtype([('a', float), ('b', int)])
ma = np.ma.MaskedArray([(1.0, 1), (2.0, 2)], dtype=ndtype)
ma['a'][1] = 3.0
assert_equal(ma['a'], np.array([1.0, 3.0]))
ma[1]['a'] = 4.0
assert_equal(ma['a'], np.array([1.0, 4.0]))
# Issue 2403
mdtype = np.dtype([('a', bool), ('b', bool)])
# soft mask
control = np.array([(False, True), (True, True)], dtype=mdtype)
a = np.ma.masked_all((2,), dtype=ndtype)
a['a'][0] = 2
assert_equal(a.mask, control)
a = np.ma.masked_all((2,), dtype=ndtype)
a[0]['a'] = 2
assert_equal(a.mask, control)
# hard mask
control = np.array([(True, True), (True, True)], dtype=mdtype)
a = np.ma.masked_all((2,), dtype=ndtype)
a.harden_mask()
a['a'][0] = 2
assert_equal(a.mask, control)
a = np.ma.masked_all((2,), dtype=ndtype)
a.harden_mask()
a[0]['a'] = 2
assert_equal(a.mask, control)
def test_setitem_scalar(self):
# 8510
mask_0d = np.ma.masked_array(1, mask=True)
arr = np.ma.arange(3)
arr[0] = mask_0d
assert_array_equal(arr.mask, [True, False, False])
def test_element_len(self):
# check that len() works for mvoid (Github issue #576)
for rec in self.data['base']:
assert_equal(len(rec), len(self.data['ddtype']))
class TestMaskedObjectArray:
def test_getitem(self):
arr = np.ma.array([None, None])
for dt in [float, object]:
a0 = np.eye(2).astype(dt)
a1 = np.eye(3).astype(dt)
arr[0] = a0
arr[1] = a1
assert_(arr[0] is a0)
assert_(arr[1] is a1)
assert_(isinstance(arr[0,...], MaskedArray))
assert_(isinstance(arr[1,...], MaskedArray))
assert_(arr[0,...][()] is a0)
assert_(arr[1,...][()] is a1)
arr[0] = np.ma.masked
assert_(arr[1] is a1)
assert_(isinstance(arr[0,...], MaskedArray))
assert_(isinstance(arr[1,...], MaskedArray))
assert_equal(arr[0,...].mask, True)
assert_(arr[1,...][()] is a1)
# gh-5962 - object arrays of arrays do something special
assert_equal(arr[0].data, a0)
assert_equal(arr[0].mask, True)
assert_equal(arr[0,...][()].data, a0)
assert_equal(arr[0,...][()].mask, True)
def test_nested_ma(self):
arr = np.ma.array([None, None])
# set the first object to be an unmasked masked constant. A little fiddly
arr[0,...] = np.array([np.ma.masked], object)[0,...]
# check the above line did what we were aiming for
assert_(arr.data[0] is np.ma.masked)
# test that getitem returned the value by identity
assert_(arr[0] is np.ma.masked)
# now mask the masked value!
arr[0] = np.ma.masked
assert_(arr[0] is np.ma.masked)
class TestMaskedView:
def setup_method(self):
iterator = list(zip(np.arange(10), np.random.rand(10)))
data = np.array(iterator)
a = array(iterator, dtype=[('a', float), ('b', float)])
a.mask[0] = (1, 0)
controlmask = np.array([1] + 19 * [0], dtype=bool)
self.data = (data, a, controlmask)
def test_view_to_nothing(self):
(data, a, controlmask) = self.data
test = a.view()
assert_(isinstance(test, MaskedArray))
assert_equal(test._data, a._data)
assert_equal(test._mask, a._mask)
def test_view_to_type(self):
(data, a, controlmask) = self.data
test = a.view(np.ndarray)
assert_(not isinstance(test, MaskedArray))
assert_equal(test, a._data)
assert_equal_records(test, data.view(a.dtype).squeeze())
def test_view_to_simple_dtype(self):
(data, a, controlmask) = self.data
# View globally
test = a.view(float)
assert_(isinstance(test, MaskedArray))
assert_equal(test, data.ravel())
assert_equal(test.mask, controlmask)
def test_view_to_flexible_dtype(self):
(data, a, controlmask) = self.data
test = a.view([('A', float), ('B', float)])
assert_equal(test.mask.dtype.names, ('A', 'B'))
assert_equal(test['A'], a['a'])
assert_equal(test['B'], a['b'])
test = a[0].view([('A', float), ('B', float)])
assert_(isinstance(test, MaskedArray))
assert_equal(test.mask.dtype.names, ('A', 'B'))
assert_equal(test['A'], a['a'][0])
assert_equal(test['B'], a['b'][0])
test = a[-1].view([('A', float), ('B', float)])
assert_(isinstance(test, MaskedArray))
assert_equal(test.dtype.names, ('A', 'B'))
assert_equal(test['A'], a['a'][-1])
assert_equal(test['B'], a['b'][-1])
def test_view_to_subdtype(self):
(data, a, controlmask) = self.data
# View globally
test = a.view((float, 2))
assert_(isinstance(test, MaskedArray))
assert_equal(test, data)
assert_equal(test.mask, controlmask.reshape(-1, 2))
# View on 1 masked element
test = a[0].view((float, 2))
assert_(isinstance(test, MaskedArray))
assert_equal(test, data[0])
assert_equal(test.mask, (1, 0))
# View on 1 unmasked element
test = a[-1].view((float, 2))
assert_(isinstance(test, MaskedArray))
assert_equal(test, data[-1])
def test_view_to_dtype_and_type(self):
(data, a, controlmask) = self.data
test = a.view((float, 2), np.recarray)
assert_equal(test, data)
assert_(isinstance(test, np.recarray))
assert_(not isinstance(test, MaskedArray))
class TestOptionalArgs:
def test_ndarrayfuncs(self):
# test axis arg behaves the same as ndarray (including multiple axes)
d = np.arange(24.0).reshape((2,3,4))
m = np.zeros(24, dtype=bool).reshape((2,3,4))
# mask out last element of last dimension
m[:,:,-1] = True
a = np.ma.array(d, mask=m)
def testaxis(f, a, d):
numpy_f = numpy.__getattribute__(f)
ma_f = np.ma.__getattribute__(f)
# test axis arg
assert_equal(ma_f(a, axis=1)[...,:-1], numpy_f(d[...,:-1], axis=1))
assert_equal(ma_f(a, axis=(0,1))[...,:-1],
numpy_f(d[...,:-1], axis=(0,1)))
def testkeepdims(f, a, d):
numpy_f = numpy.__getattribute__(f)
ma_f = np.ma.__getattribute__(f)
# test keepdims arg
assert_equal(ma_f(a, keepdims=True).shape,
numpy_f(d, keepdims=True).shape)
assert_equal(ma_f(a, keepdims=False).shape,
numpy_f(d, keepdims=False).shape)
# test both at once
assert_equal(ma_f(a, axis=1, keepdims=True)[...,:-1],
numpy_f(d[...,:-1], axis=1, keepdims=True))
assert_equal(ma_f(a, axis=(0,1), keepdims=True)[...,:-1],
numpy_f(d[...,:-1], axis=(0,1), keepdims=True))
for f in ['sum', 'prod', 'mean', 'var', 'std']:
testaxis(f, a, d)
testkeepdims(f, a, d)
for f in ['min', 'max']:
testaxis(f, a, d)
d = (np.arange(24).reshape((2,3,4))%2 == 0)
a = np.ma.array(d, mask=m)
for f in ['all', 'any']:
testaxis(f, a, d)
testkeepdims(f, a, d)
def test_count(self):
# test np.ma.count specially
d = np.arange(24.0).reshape((2,3,4))
m = np.zeros(24, dtype=bool).reshape((2,3,4))
m[:,0,:] = True
a = np.ma.array(d, mask=m)
assert_equal(count(a), 16)
assert_equal(count(a, axis=1), 2*ones((2,4)))
assert_equal(count(a, axis=(0,1)), 4*ones((4,)))
assert_equal(count(a, keepdims=True), 16*ones((1,1,1)))
assert_equal(count(a, axis=1, keepdims=True), 2*ones((2,1,4)))
assert_equal(count(a, axis=(0,1), keepdims=True), 4*ones((1,1,4)))
assert_equal(count(a, axis=-2), 2*ones((2,4)))
assert_raises(ValueError, count, a, axis=(1,1))
assert_raises(np.AxisError, count, a, axis=3)
# check the 'nomask' path
a = np.ma.array(d, mask=nomask)
assert_equal(count(a), 24)
assert_equal(count(a, axis=1), 3*ones((2,4)))
assert_equal(count(a, axis=(0,1)), 6*ones((4,)))
assert_equal(count(a, keepdims=True), 24*ones((1,1,1)))
assert_equal(np.ndim(count(a, keepdims=True)), 3)
assert_equal(count(a, axis=1, keepdims=True), 3*ones((2,1,4)))
assert_equal(count(a, axis=(0,1), keepdims=True), 6*ones((1,1,4)))
assert_equal(count(a, axis=-2), 3*ones((2,4)))
assert_raises(ValueError, count, a, axis=(1,1))
assert_raises(np.AxisError, count, a, axis=3)
# check the 'masked' singleton
assert_equal(count(np.ma.masked), 0)
# check 0-d arrays do not allow axis > 0
assert_raises(np.AxisError, count, np.ma.array(1), axis=1)
class TestMaskedConstant:
def _do_add_test(self, add):
# sanity check
assert_(add(np.ma.masked, 1) is np.ma.masked)
# now try with a vector
vector = np.array([1, 2, 3])
result = add(np.ma.masked, vector)
# lots of things could go wrong here
assert_(result is not np.ma.masked)
assert_(not isinstance(result, np.ma.core.MaskedConstant))
assert_equal(result.shape, vector.shape)
assert_equal(np.ma.getmask(result), np.ones(vector.shape, dtype=bool))
def test_ufunc(self):
self._do_add_test(np.add)
def test_operator(self):
self._do_add_test(lambda a, b: a + b)
def test_ctor(self):
m = np.ma.array(np.ma.masked)
# most importantly, we do not want to create a new MaskedConstant
# instance
assert_(not isinstance(m, np.ma.core.MaskedConstant))
assert_(m is not np.ma.masked)
def test_repr(self):
# copies should not exist, but if they do, it should be obvious that
# something is wrong
assert_equal(repr(np.ma.masked), 'masked')
# create a new instance in a weird way
masked2 = np.ma.MaskedArray.__new__(np.ma.core.MaskedConstant)
assert_not_equal(repr(masked2), 'masked')
def test_pickle(self):
from io import BytesIO
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
with BytesIO() as f:
pickle.dump(np.ma.masked, f, protocol=proto)
f.seek(0)
res = pickle.load(f)
assert_(res is np.ma.masked)
def test_copy(self):
# gh-9328
# copy is a no-op, like it is with np.True_
assert_equal(
np.ma.masked.copy() is np.ma.masked,
np.True_.copy() is np.True_)
def test__copy(self):
import copy
assert_(
copy.copy(np.ma.masked) is np.ma.masked)
def test_deepcopy(self):
import copy
assert_(
copy.deepcopy(np.ma.masked) is np.ma.masked)
def test_immutable(self):
orig = np.ma.masked
assert_raises(np.ma.core.MaskError, operator.setitem, orig, (), 1)
assert_raises(ValueError,operator.setitem, orig.data, (), 1)
assert_raises(ValueError, operator.setitem, orig.mask, (), False)
view = np.ma.masked.view(np.ma.MaskedArray)
assert_raises(ValueError, operator.setitem, view, (), 1)
assert_raises(ValueError, operator.setitem, view.data, (), 1)
assert_raises(ValueError, operator.setitem, view.mask, (), False)
def test_coercion_int(self):
a_i = np.zeros((), int)
assert_raises(MaskError, operator.setitem, a_i, (), np.ma.masked)
assert_raises(MaskError, int, np.ma.masked)
def test_coercion_float(self):
a_f = np.zeros((), float)
assert_warns(UserWarning, operator.setitem, a_f, (), np.ma.masked)
assert_(np.isnan(a_f[()]))
@pytest.mark.xfail(reason="See gh-9750")
def test_coercion_unicode(self):
a_u = np.zeros((), 'U10')
a_u[()] = np.ma.masked
assert_equal(a_u[()], u'--')
@pytest.mark.xfail(reason="See gh-9750")
def test_coercion_bytes(self):
a_b = np.zeros((), 'S10')
a_b[()] = np.ma.masked
assert_equal(a_b[()], b'--')
def test_subclass(self):
# https://github.com/astropy/astropy/issues/6645
class Sub(type(np.ma.masked)): pass
a = Sub()
assert_(a is Sub())
assert_(a is not np.ma.masked)
assert_not_equal(repr(a), 'masked')
def test_attributes_readonly(self):
assert_raises(AttributeError, setattr, np.ma.masked, 'shape', (1,))
assert_raises(AttributeError, setattr, np.ma.masked, 'dtype', np.int64)
class TestMaskedWhereAliases:
# TODO: Test masked_object, masked_equal, ...
def test_masked_values(self):
res = masked_values(np.array([-32768.0]), np.int16(-32768))
assert_equal(res.mask, [True])
res = masked_values(np.inf, np.inf)
assert_equal(res.mask, True)
res = np.ma.masked_values(np.inf, -np.inf)
assert_equal(res.mask, False)
res = np.ma.masked_values([1, 2, 3, 4], 5, shrink=True)
assert_(res.mask is np.ma.nomask)
res = np.ma.masked_values([1, 2, 3, 4], 5, shrink=False)
assert_equal(res.mask, [False] * 4)
def test_masked_array():
a = np.ma.array([0, 1, 2, 3], mask=[0, 0, 1, 0])
assert_equal(np.argwhere(a), [[1], [3]])
def test_masked_array_no_copy():
# check nomask array is updated in place
a = np.ma.array([1, 2, 3, 4])
_ = np.ma.masked_where(a == 3, a, copy=False)
assert_array_equal(a.mask, [False, False, True, False])
# check masked array is updated in place
a = np.ma.array([1, 2, 3, 4], mask=[1, 0, 0, 0])
_ = np.ma.masked_where(a == 3, a, copy=False)
assert_array_equal(a.mask, [True, False, True, False])
def test_append_masked_array():
a = np.ma.masked_equal([1,2,3], value=2)
b = np.ma.masked_equal([4,3,2], value=2)
result = np.ma.append(a, b)
expected_data = [1, 2, 3, 4, 3, 2]
expected_mask = [False, True, False, False, False, True]
assert_array_equal(result.data, expected_data)
assert_array_equal(result.mask, expected_mask)
a = np.ma.masked_all((2,2))
b = np.ma.ones((3,1))
result = np.ma.append(a, b)
expected_data = [1] * 3
expected_mask = [True] * 4 + [False] * 3
assert_array_equal(result.data[-3], expected_data)
assert_array_equal(result.mask, expected_mask)
result = np.ma.append(a, b, axis=None)
assert_array_equal(result.data[-3], expected_data)
assert_array_equal(result.mask, expected_mask)
def test_append_masked_array_along_axis():
a = np.ma.masked_equal([1,2,3], value=2)
b = np.ma.masked_values([[4, 5, 6], [7, 8, 9]], 7)
# When `axis` is specified, `values` must have the correct shape.
assert_raises(ValueError, np.ma.append, a, b, axis=0)
result = np.ma.append(a[np.newaxis,:], b, axis=0)
expected = np.ma.arange(1, 10)
expected[[1, 6]] = np.ma.masked
expected = expected.reshape((3,3))
assert_array_equal(result.data, expected.data)
assert_array_equal(result.mask, expected.mask)
def test_default_fill_value_complex():
# regression test for Python 3, where 'unicode' was not defined
assert_(default_fill_value(1 + 1j) == 1.e20 + 0.0j)
def test_ufunc_with_output():
# check that giving an output argument always returns that output.
# Regression test for gh-8416.
x = array([1., 2., 3.], mask=[0, 0, 1])
y = np.add(x, 1., out=x)
assert_(y is x)
def test_ufunc_with_out_varied():
""" Test that masked arrays are immune to gh-10459 """
# the mask of the output should not affect the result, however it is passed
a = array([ 1, 2, 3], mask=[1, 0, 0])
b = array([10, 20, 30], mask=[1, 0, 0])
out = array([ 0, 0, 0], mask=[0, 0, 1])
expected = array([11, 22, 33], mask=[1, 0, 0])
out_pos = out.copy()
res_pos = np.add(a, b, out_pos)
out_kw = out.copy()
res_kw = np.add(a, b, out=out_kw)
out_tup = out.copy()
res_tup = np.add(a, b, out=(out_tup,))
assert_equal(res_kw.mask, expected.mask)
assert_equal(res_kw.data, expected.data)
assert_equal(res_tup.mask, expected.mask)
assert_equal(res_tup.data, expected.data)
assert_equal(res_pos.mask, expected.mask)
assert_equal(res_pos.data, expected.data)
def test_astype_mask_ordering():
descr = [('v', int, 3), ('x', [('y', float)])]
x = array([
[([1, 2, 3], (1.0,)), ([1, 2, 3], (2.0,))],
[([1, 2, 3], (3.0,)), ([1, 2, 3], (4.0,))]], dtype=descr)
x[0]['v'][0] = np.ma.masked
x_a = x.astype(descr)
assert x_a.dtype.names == np.dtype(descr).names
assert x_a.mask.dtype.names == np.dtype(descr).names
assert_equal(x, x_a)
assert_(x is x.astype(x.dtype, copy=False))
assert_equal(type(x.astype(x.dtype, subok=False)), np.ndarray)
x_f = x.astype(x.dtype, order='F')
assert_(x_f.flags.f_contiguous)
assert_(x_f.mask.flags.f_contiguous)
# Also test the same indirectly, via np.array
x_a2 = np.array(x, dtype=descr, subok=True)
assert x_a2.dtype.names == np.dtype(descr).names
assert x_a2.mask.dtype.names == np.dtype(descr).names
assert_equal(x, x_a2)
assert_(x is np.array(x, dtype=descr, copy=False, subok=True))
x_f2 = np.array(x, dtype=x.dtype, order='F', subok=True)
assert_(x_f2.flags.f_contiguous)
assert_(x_f2.mask.flags.f_contiguous)
@pytest.mark.parametrize('dt1', num_dts, ids=num_ids)
@pytest.mark.parametrize('dt2', num_dts, ids=num_ids)
@pytest.mark.filterwarnings('ignore::numpy.ComplexWarning')
def test_astype_basic(dt1, dt2):
# See gh-12070
src = np.ma.array(ones(3, dt1), fill_value=1)
dst = src.astype(dt2)
assert_(src.fill_value == 1)
assert_(src.dtype == dt1)
assert_(src.fill_value.dtype == dt1)
assert_(dst.fill_value == 1)
assert_(dst.dtype == dt2)
assert_(dst.fill_value.dtype == dt2)
assert_equal(src, dst)
def test_fieldless_void():
dt = np.dtype([]) # a void dtype with no fields
x = np.empty(4, dt)
# these arrays contain no values, so there's little to test - but this
# shouldn't crash
mx = np.ma.array(x)
assert_equal(mx.dtype, x.dtype)
assert_equal(mx.shape, x.shape)
mx = np.ma.array(x, mask=x)
assert_equal(mx.dtype, x.dtype)
assert_equal(mx.shape, x.shape)
def test_mask_shape_assignment_does_not_break_masked():
a = np.ma.masked
b = np.ma.array(1, mask=a.mask)
b.shape = (1,)
assert_equal(a.mask.shape, ())
@pytest.mark.skipif(sys.flags.optimize > 1,
reason="no docstrings present to inspect when PYTHONOPTIMIZE/Py_OptimizeFlag > 1")
def test_doc_note():
def method(self):
"""This docstring
Has multiple lines
And notes
Notes
-----
original note
"""
pass
expected_doc = """This docstring
Has multiple lines
And notes
Notes
-----
note
original note"""
assert_equal(np.ma.core.doc_note(method.__doc__, "note"), expected_doc)
| 205,567 | Python | 36.725821 | 102 | 0.500387 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/ma/tests/test_deprecations.py | """Test deprecation and future warnings.
"""
import pytest
import numpy as np
from numpy.testing import assert_warns
from numpy.ma.testutils import assert_equal
from numpy.ma.core import MaskedArrayFutureWarning
import io
import textwrap
class TestArgsort:
""" gh-8701 """
def _test_base(self, argsort, cls):
arr_0d = np.array(1).view(cls)
argsort(arr_0d)
arr_1d = np.array([1, 2, 3]).view(cls)
argsort(arr_1d)
# argsort has a bad default for >1d arrays
arr_2d = np.array([[1, 2], [3, 4]]).view(cls)
result = assert_warns(
np.ma.core.MaskedArrayFutureWarning, argsort, arr_2d)
assert_equal(result, argsort(arr_2d, axis=None))
# should be no warnings for explicitly specifying it
argsort(arr_2d, axis=None)
argsort(arr_2d, axis=-1)
def test_function_ndarray(self):
return self._test_base(np.ma.argsort, np.ndarray)
def test_function_maskedarray(self):
return self._test_base(np.ma.argsort, np.ma.MaskedArray)
def test_method(self):
return self._test_base(np.ma.MaskedArray.argsort, np.ma.MaskedArray)
class TestMinimumMaximum:
def test_minimum(self):
assert_warns(DeprecationWarning, np.ma.minimum, np.ma.array([1, 2]))
def test_maximum(self):
assert_warns(DeprecationWarning, np.ma.maximum, np.ma.array([1, 2]))
def test_axis_default(self):
# NumPy 1.13, 2017-05-06
data1d = np.ma.arange(6)
data2d = data1d.reshape(2, 3)
ma_min = np.ma.minimum.reduce
ma_max = np.ma.maximum.reduce
# check that the default axis is still None, but warns on 2d arrays
result = assert_warns(MaskedArrayFutureWarning, ma_max, data2d)
assert_equal(result, ma_max(data2d, axis=None))
result = assert_warns(MaskedArrayFutureWarning, ma_min, data2d)
assert_equal(result, ma_min(data2d, axis=None))
# no warnings on 1d, as both new and old defaults are equivalent
result = ma_min(data1d)
assert_equal(result, ma_min(data1d, axis=None))
assert_equal(result, ma_min(data1d, axis=0))
result = ma_max(data1d)
assert_equal(result, ma_max(data1d, axis=None))
assert_equal(result, ma_max(data1d, axis=0))
class TestFromtextfile:
def test_fromtextfile_delimitor(self):
# NumPy 1.22.0, 2021-09-23
textfile = io.StringIO(textwrap.dedent(
"""
A,B,C,D
'string 1';1;1.0;'mixed column'
'string 2';2;2.0;
'string 3';3;3.0;123
'string 4';4;4.0;3.14
"""
))
with pytest.warns(DeprecationWarning):
result = np.ma.mrecords.fromtextfile(textfile, delimitor=';')
| 2,777 | Python | 29.866666 | 76 | 0.621534 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/ma/tests/test_regression.py | import numpy as np
from numpy.testing import (
assert_, assert_array_equal, assert_allclose, suppress_warnings
)
class TestRegression:
def test_masked_array_create(self):
# Ticket #17
x = np.ma.masked_array([0, 1, 2, 3, 0, 4, 5, 6],
mask=[0, 0, 0, 1, 1, 1, 0, 0])
assert_array_equal(np.ma.nonzero(x), [[1, 2, 6, 7]])
def test_masked_array(self):
# Ticket #61
np.ma.array(1, mask=[1])
def test_mem_masked_where(self):
# Ticket #62
from numpy.ma import masked_where, MaskType
a = np.zeros((1, 1))
b = np.zeros(a.shape, MaskType)
c = masked_where(b, a)
a-c
def test_masked_array_multiply(self):
# Ticket #254
a = np.ma.zeros((4, 1))
a[2, 0] = np.ma.masked
b = np.zeros((4, 2))
a*b
b*a
def test_masked_array_repeat(self):
# Ticket #271
np.ma.array([1], mask=False).repeat(10)
def test_masked_array_repr_unicode(self):
# Ticket #1256
repr(np.ma.array(u"Unicode"))
def test_atleast_2d(self):
# Ticket #1559
a = np.ma.masked_array([0.0, 1.2, 3.5], mask=[False, True, False])
b = np.atleast_2d(a)
assert_(a.mask.ndim == 1)
assert_(b.mask.ndim == 2)
def test_set_fill_value_unicode_py3(self):
# Ticket #2733
a = np.ma.masked_array(['a', 'b', 'c'], mask=[1, 0, 0])
a.fill_value = 'X'
assert_(a.fill_value == 'X')
def test_var_sets_maskedarray_scalar(self):
# Issue gh-2757
a = np.ma.array(np.arange(5), mask=True)
mout = np.ma.array(-1, dtype=float)
a.var(out=mout)
assert_(mout._data == 0)
def test_ddof_corrcoef(self):
# See gh-3336
x = np.ma.masked_equal([1, 2, 3, 4, 5], 4)
y = np.array([2, 2.5, 3.1, 3, 5])
# this test can be removed after deprecation.
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, "bias and ddof have no effect")
r0 = np.ma.corrcoef(x, y, ddof=0)
r1 = np.ma.corrcoef(x, y, ddof=1)
# ddof should not have an effect (it gets cancelled out)
assert_allclose(r0.data, r1.data)
def test_mask_not_backmangled(self):
# See gh-10314. Test case taken from gh-3140.
a = np.ma.MaskedArray([1., 2.], mask=[False, False])
assert_(a.mask.shape == (2,))
b = np.tile(a, (2, 1))
# Check that the above no longer changes a.shape to (1, 2)
assert_(a.mask.shape == (2,))
assert_(b.shape == (2, 2))
assert_(b.mask.shape == (2, 2))
def test_empty_list_on_structured(self):
# See gh-12464. Indexing with empty list should give empty result.
ma = np.ma.MaskedArray([(1, 1.), (2, 2.), (3, 3.)], dtype='i4,f4')
assert_array_equal(ma[[]], ma[:0])
def test_masked_array_tobytes_fortran(self):
ma = np.ma.arange(4).reshape((2,2))
assert_array_equal(ma.tobytes(order='F'), ma.T.tobytes())
| 3,079 | Python | 32.478261 | 74 | 0.537187 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/ma/tests/test_extras.py | # pylint: disable-msg=W0611, W0612, W0511
"""Tests suite for MaskedArray.
Adapted from the original test_ma by Pierre Gerard-Marchant
:author: Pierre Gerard-Marchant
:contact: pierregm_at_uga_dot_edu
:version: $Id: test_extras.py 3473 2007-10-29 15:18:13Z jarrod.millman $
"""
import warnings
import itertools
import pytest
import numpy as np
from numpy.testing import (
assert_warns, suppress_warnings
)
from numpy.ma.testutils import (
assert_, assert_array_equal, assert_equal, assert_almost_equal
)
from numpy.ma.core import (
array, arange, masked, MaskedArray, masked_array, getmaskarray, shape,
nomask, ones, zeros, count
)
from numpy.ma.extras import (
atleast_1d, atleast_2d, atleast_3d, mr_, dot, polyfit, cov, corrcoef,
median, average, unique, setxor1d, setdiff1d, union1d, intersect1d, in1d,
ediff1d, apply_over_axes, apply_along_axis, compress_nd, compress_rowcols,
mask_rowcols, clump_masked, clump_unmasked, flatnotmasked_contiguous,
notmasked_contiguous, notmasked_edges, masked_all, masked_all_like, isin,
diagflat, ndenumerate, stack, vstack
)
class TestGeneric:
#
def test_masked_all(self):
# Tests masked_all
# Standard dtype
test = masked_all((2,), dtype=float)
control = array([1, 1], mask=[1, 1], dtype=float)
assert_equal(test, control)
# Flexible dtype
dt = np.dtype({'names': ['a', 'b'], 'formats': ['f', 'f']})
test = masked_all((2,), dtype=dt)
control = array([(0, 0), (0, 0)], mask=[(1, 1), (1, 1)], dtype=dt)
assert_equal(test, control)
test = masked_all((2, 2), dtype=dt)
control = array([[(0, 0), (0, 0)], [(0, 0), (0, 0)]],
mask=[[(1, 1), (1, 1)], [(1, 1), (1, 1)]],
dtype=dt)
assert_equal(test, control)
# Nested dtype
dt = np.dtype([('a', 'f'), ('b', [('ba', 'f'), ('bb', 'f')])])
test = masked_all((2,), dtype=dt)
control = array([(1, (1, 1)), (1, (1, 1))],
mask=[(1, (1, 1)), (1, (1, 1))], dtype=dt)
assert_equal(test, control)
test = masked_all((2,), dtype=dt)
control = array([(1, (1, 1)), (1, (1, 1))],
mask=[(1, (1, 1)), (1, (1, 1))], dtype=dt)
assert_equal(test, control)
test = masked_all((1, 1), dtype=dt)
control = array([[(1, (1, 1))]], mask=[[(1, (1, 1))]], dtype=dt)
assert_equal(test, control)
def test_masked_all_with_object_nested(self):
# Test masked_all works with nested array with dtype of an 'object'
# refers to issue #15895
my_dtype = np.dtype([('b', ([('c', object)], (1,)))])
masked_arr = np.ma.masked_all((1,), my_dtype)
assert_equal(type(masked_arr['b']), np.ma.core.MaskedArray)
assert_equal(type(masked_arr['b']['c']), np.ma.core.MaskedArray)
assert_equal(len(masked_arr['b']['c']), 1)
assert_equal(masked_arr['b']['c'].shape, (1, 1))
assert_equal(masked_arr['b']['c']._fill_value.shape, ())
def test_masked_all_with_object(self):
# same as above except that the array is not nested
my_dtype = np.dtype([('b', (object, (1,)))])
masked_arr = np.ma.masked_all((1,), my_dtype)
assert_equal(type(masked_arr['b']), np.ma.core.MaskedArray)
assert_equal(len(masked_arr['b']), 1)
assert_equal(masked_arr['b'].shape, (1, 1))
assert_equal(masked_arr['b']._fill_value.shape, ())
def test_masked_all_like(self):
# Tests masked_all
# Standard dtype
base = array([1, 2], dtype=float)
test = masked_all_like(base)
control = array([1, 1], mask=[1, 1], dtype=float)
assert_equal(test, control)
# Flexible dtype
dt = np.dtype({'names': ['a', 'b'], 'formats': ['f', 'f']})
base = array([(0, 0), (0, 0)], mask=[(1, 1), (1, 1)], dtype=dt)
test = masked_all_like(base)
control = array([(10, 10), (10, 10)], mask=[(1, 1), (1, 1)], dtype=dt)
assert_equal(test, control)
# Nested dtype
dt = np.dtype([('a', 'f'), ('b', [('ba', 'f'), ('bb', 'f')])])
control = array([(1, (1, 1)), (1, (1, 1))],
mask=[(1, (1, 1)), (1, (1, 1))], dtype=dt)
test = masked_all_like(control)
assert_equal(test, control)
def check_clump(self, f):
for i in range(1, 7):
for j in range(2**i):
k = np.arange(i, dtype=int)
ja = np.full(i, j, dtype=int)
a = masked_array(2**k)
a.mask = (ja & (2**k)) != 0
s = 0
for sl in f(a):
s += a.data[sl].sum()
if f == clump_unmasked:
assert_equal(a.compressed().sum(), s)
else:
a.mask = ~a.mask
assert_equal(a.compressed().sum(), s)
def test_clump_masked(self):
# Test clump_masked
a = masked_array(np.arange(10))
a[[0, 1, 2, 6, 8, 9]] = masked
#
test = clump_masked(a)
control = [slice(0, 3), slice(6, 7), slice(8, 10)]
assert_equal(test, control)
self.check_clump(clump_masked)
def test_clump_unmasked(self):
# Test clump_unmasked
a = masked_array(np.arange(10))
a[[0, 1, 2, 6, 8, 9]] = masked
test = clump_unmasked(a)
control = [slice(3, 6), slice(7, 8), ]
assert_equal(test, control)
self.check_clump(clump_unmasked)
def test_flatnotmasked_contiguous(self):
# Test flatnotmasked_contiguous
a = arange(10)
# No mask
test = flatnotmasked_contiguous(a)
assert_equal(test, [slice(0, a.size)])
# mask of all false
a.mask = np.zeros(10, dtype=bool)
assert_equal(test, [slice(0, a.size)])
# Some mask
a[(a < 3) | (a > 8) | (a == 5)] = masked
test = flatnotmasked_contiguous(a)
assert_equal(test, [slice(3, 5), slice(6, 9)])
#
a[:] = masked
test = flatnotmasked_contiguous(a)
assert_equal(test, [])
class TestAverage:
# Several tests of average. Why so many ? Good point...
def test_testAverage1(self):
# Test of average.
ott = array([0., 1., 2., 3.], mask=[True, False, False, False])
assert_equal(2.0, average(ott, axis=0))
assert_equal(2.0, average(ott, weights=[1., 1., 2., 1.]))
result, wts = average(ott, weights=[1., 1., 2., 1.], returned=True)
assert_equal(2.0, result)
assert_(wts == 4.0)
ott[:] = masked
assert_equal(average(ott, axis=0).mask, [True])
ott = array([0., 1., 2., 3.], mask=[True, False, False, False])
ott = ott.reshape(2, 2)
ott[:, 1] = masked
assert_equal(average(ott, axis=0), [2.0, 0.0])
assert_equal(average(ott, axis=1).mask[0], [True])
assert_equal([2., 0.], average(ott, axis=0))
result, wts = average(ott, axis=0, returned=True)
assert_equal(wts, [1., 0.])
def test_testAverage2(self):
# More tests of average.
w1 = [0, 1, 1, 1, 1, 0]
w2 = [[0, 1, 1, 1, 1, 0], [1, 0, 0, 0, 0, 1]]
x = arange(6, dtype=np.float_)
assert_equal(average(x, axis=0), 2.5)
assert_equal(average(x, axis=0, weights=w1), 2.5)
y = array([arange(6, dtype=np.float_), 2.0 * arange(6)])
assert_equal(average(y, None), np.add.reduce(np.arange(6)) * 3. / 12.)
assert_equal(average(y, axis=0), np.arange(6) * 3. / 2.)
assert_equal(average(y, axis=1),
[average(x, axis=0), average(x, axis=0) * 2.0])
assert_equal(average(y, None, weights=w2), 20. / 6.)
assert_equal(average(y, axis=0, weights=w2),
[0., 1., 2., 3., 4., 10.])
assert_equal(average(y, axis=1),
[average(x, axis=0), average(x, axis=0) * 2.0])
m1 = zeros(6)
m2 = [0, 0, 1, 1, 0, 0]
m3 = [[0, 0, 1, 1, 0, 0], [0, 1, 1, 1, 1, 0]]
m4 = ones(6)
m5 = [0, 1, 1, 1, 1, 1]
assert_equal(average(masked_array(x, m1), axis=0), 2.5)
assert_equal(average(masked_array(x, m2), axis=0), 2.5)
assert_equal(average(masked_array(x, m4), axis=0).mask, [True])
assert_equal(average(masked_array(x, m5), axis=0), 0.0)
assert_equal(count(average(masked_array(x, m4), axis=0)), 0)
z = masked_array(y, m3)
assert_equal(average(z, None), 20. / 6.)
assert_equal(average(z, axis=0), [0., 1., 99., 99., 4.0, 7.5])
assert_equal(average(z, axis=1), [2.5, 5.0])
assert_equal(average(z, axis=0, weights=w2),
[0., 1., 99., 99., 4.0, 10.0])
def test_testAverage3(self):
# Yet more tests of average!
a = arange(6)
b = arange(6) * 3
r1, w1 = average([[a, b], [b, a]], axis=1, returned=True)
assert_equal(shape(r1), shape(w1))
assert_equal(r1.shape, w1.shape)
r2, w2 = average(ones((2, 2, 3)), axis=0, weights=[3, 1], returned=True)
assert_equal(shape(w2), shape(r2))
r2, w2 = average(ones((2, 2, 3)), returned=True)
assert_equal(shape(w2), shape(r2))
r2, w2 = average(ones((2, 2, 3)), weights=ones((2, 2, 3)), returned=True)
assert_equal(shape(w2), shape(r2))
a2d = array([[1, 2], [0, 4]], float)
a2dm = masked_array(a2d, [[False, False], [True, False]])
a2da = average(a2d, axis=0)
assert_equal(a2da, [0.5, 3.0])
a2dma = average(a2dm, axis=0)
assert_equal(a2dma, [1.0, 3.0])
a2dma = average(a2dm, axis=None)
assert_equal(a2dma, 7. / 3.)
a2dma = average(a2dm, axis=1)
assert_equal(a2dma, [1.5, 4.0])
def test_testAverage4(self):
# Test that `keepdims` works with average
x = np.array([2, 3, 4]).reshape(3, 1)
b = np.ma.array(x, mask=[[False], [False], [True]])
w = np.array([4, 5, 6]).reshape(3, 1)
actual = average(b, weights=w, axis=1, keepdims=True)
desired = masked_array([[2.], [3.], [4.]], [[False], [False], [True]])
assert_equal(actual, desired)
def test_onintegers_with_mask(self):
# Test average on integers with mask
a = average(array([1, 2]))
assert_equal(a, 1.5)
a = average(array([1, 2, 3, 4], mask=[False, False, True, True]))
assert_equal(a, 1.5)
def test_complex(self):
# Test with complex data.
# (Regression test for https://github.com/numpy/numpy/issues/2684)
mask = np.array([[0, 0, 0, 1, 0],
[0, 1, 0, 0, 0]], dtype=bool)
a = masked_array([[0, 1+2j, 3+4j, 5+6j, 7+8j],
[9j, 0+1j, 2+3j, 4+5j, 7+7j]],
mask=mask)
av = average(a)
expected = np.average(a.compressed())
assert_almost_equal(av.real, expected.real)
assert_almost_equal(av.imag, expected.imag)
av0 = average(a, axis=0)
expected0 = average(a.real, axis=0) + average(a.imag, axis=0)*1j
assert_almost_equal(av0.real, expected0.real)
assert_almost_equal(av0.imag, expected0.imag)
av1 = average(a, axis=1)
expected1 = average(a.real, axis=1) + average(a.imag, axis=1)*1j
assert_almost_equal(av1.real, expected1.real)
assert_almost_equal(av1.imag, expected1.imag)
# Test with the 'weights' argument.
wts = np.array([[0.5, 1.0, 2.0, 1.0, 0.5],
[1.0, 1.0, 1.0, 1.0, 1.0]])
wav = average(a, weights=wts)
expected = np.average(a.compressed(), weights=wts[~mask])
assert_almost_equal(wav.real, expected.real)
assert_almost_equal(wav.imag, expected.imag)
wav0 = average(a, weights=wts, axis=0)
expected0 = (average(a.real, weights=wts, axis=0) +
average(a.imag, weights=wts, axis=0)*1j)
assert_almost_equal(wav0.real, expected0.real)
assert_almost_equal(wav0.imag, expected0.imag)
wav1 = average(a, weights=wts, axis=1)
expected1 = (average(a.real, weights=wts, axis=1) +
average(a.imag, weights=wts, axis=1)*1j)
assert_almost_equal(wav1.real, expected1.real)
assert_almost_equal(wav1.imag, expected1.imag)
@pytest.mark.parametrize(
'x, axis, expected_avg, weights, expected_wavg, expected_wsum',
[([1, 2, 3], None, [2.0], [3, 4, 1], [1.75], [8.0]),
([[1, 2, 5], [1, 6, 11]], 0, [[1.0, 4.0, 8.0]],
[1, 3], [[1.0, 5.0, 9.5]], [[4, 4, 4]])],
)
def test_basic_keepdims(self, x, axis, expected_avg,
weights, expected_wavg, expected_wsum):
avg = np.ma.average(x, axis=axis, keepdims=True)
assert avg.shape == np.shape(expected_avg)
assert_array_equal(avg, expected_avg)
wavg = np.ma.average(x, axis=axis, weights=weights, keepdims=True)
assert wavg.shape == np.shape(expected_wavg)
assert_array_equal(wavg, expected_wavg)
wavg, wsum = np.ma.average(x, axis=axis, weights=weights,
returned=True, keepdims=True)
assert wavg.shape == np.shape(expected_wavg)
assert_array_equal(wavg, expected_wavg)
assert wsum.shape == np.shape(expected_wsum)
assert_array_equal(wsum, expected_wsum)
def test_masked_weights(self):
# Test with masked weights.
# (Regression test for https://github.com/numpy/numpy/issues/10438)
a = np.ma.array(np.arange(9).reshape(3, 3),
mask=[[1, 0, 0], [1, 0, 0], [0, 0, 0]])
weights_unmasked = masked_array([5, 28, 31], mask=False)
weights_masked = masked_array([5, 28, 31], mask=[1, 0, 0])
avg_unmasked = average(a, axis=0,
weights=weights_unmasked, returned=False)
expected_unmasked = np.array([6.0, 5.21875, 6.21875])
assert_almost_equal(avg_unmasked, expected_unmasked)
avg_masked = average(a, axis=0, weights=weights_masked, returned=False)
expected_masked = np.array([6.0, 5.576271186440678, 6.576271186440678])
assert_almost_equal(avg_masked, expected_masked)
# weights should be masked if needed
# depending on the array mask. This is to avoid summing
# masked nan or other values that are not cancelled by a zero
a = np.ma.array([1.0, 2.0, 3.0, 4.0],
mask=[False, False, True, True])
avg_unmasked = average(a, weights=[1, 1, 1, np.nan])
assert_almost_equal(avg_unmasked, 1.5)
a = np.ma.array([
[1.0, 2.0, 3.0, 4.0],
[5.0, 6.0, 7.0, 8.0],
[9.0, 1.0, 2.0, 3.0],
], mask=[
[False, True, True, False],
[True, False, True, True],
[True, False, True, False],
])
avg_masked = np.ma.average(a, weights=[1, np.nan, 1], axis=0)
avg_expected = np.ma.array([1.0, np.nan, np.nan, 3.5],
mask=[False, True, True, False])
assert_almost_equal(avg_masked, avg_expected)
assert_equal(avg_masked.mask, avg_expected.mask)
class TestConcatenator:
# Tests for mr_, the equivalent of r_ for masked arrays.
def test_1d(self):
# Tests mr_ on 1D arrays.
assert_array_equal(mr_[1, 2, 3, 4, 5, 6], array([1, 2, 3, 4, 5, 6]))
b = ones(5)
m = [1, 0, 0, 0, 0]
d = masked_array(b, mask=m)
c = mr_[d, 0, 0, d]
assert_(isinstance(c, MaskedArray))
assert_array_equal(c, [1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1])
assert_array_equal(c.mask, mr_[m, 0, 0, m])
def test_2d(self):
# Tests mr_ on 2D arrays.
a_1 = np.random.rand(5, 5)
a_2 = np.random.rand(5, 5)
m_1 = np.round_(np.random.rand(5, 5), 0)
m_2 = np.round_(np.random.rand(5, 5), 0)
b_1 = masked_array(a_1, mask=m_1)
b_2 = masked_array(a_2, mask=m_2)
# append columns
d = mr_['1', b_1, b_2]
assert_(d.shape == (5, 10))
assert_array_equal(d[:, :5], b_1)
assert_array_equal(d[:, 5:], b_2)
assert_array_equal(d.mask, np.r_['1', m_1, m_2])
d = mr_[b_1, b_2]
assert_(d.shape == (10, 5))
assert_array_equal(d[:5,:], b_1)
assert_array_equal(d[5:,:], b_2)
assert_array_equal(d.mask, np.r_[m_1, m_2])
def test_masked_constant(self):
actual = mr_[np.ma.masked, 1]
assert_equal(actual.mask, [True, False])
assert_equal(actual.data[1], 1)
actual = mr_[[1, 2], np.ma.masked]
assert_equal(actual.mask, [False, False, True])
assert_equal(actual.data[:2], [1, 2])
class TestNotMasked:
# Tests notmasked_edges and notmasked_contiguous.
def test_edges(self):
# Tests unmasked_edges
data = masked_array(np.arange(25).reshape(5, 5),
mask=[[0, 0, 1, 0, 0],
[0, 0, 0, 1, 1],
[1, 1, 0, 0, 0],
[0, 0, 0, 0, 0],
[1, 1, 1, 0, 0]],)
test = notmasked_edges(data, None)
assert_equal(test, [0, 24])
test = notmasked_edges(data, 0)
assert_equal(test[0], [(0, 0, 1, 0, 0), (0, 1, 2, 3, 4)])
assert_equal(test[1], [(3, 3, 3, 4, 4), (0, 1, 2, 3, 4)])
test = notmasked_edges(data, 1)
assert_equal(test[0], [(0, 1, 2, 3, 4), (0, 0, 2, 0, 3)])
assert_equal(test[1], [(0, 1, 2, 3, 4), (4, 2, 4, 4, 4)])
#
test = notmasked_edges(data.data, None)
assert_equal(test, [0, 24])
test = notmasked_edges(data.data, 0)
assert_equal(test[0], [(0, 0, 0, 0, 0), (0, 1, 2, 3, 4)])
assert_equal(test[1], [(4, 4, 4, 4, 4), (0, 1, 2, 3, 4)])
test = notmasked_edges(data.data, -1)
assert_equal(test[0], [(0, 1, 2, 3, 4), (0, 0, 0, 0, 0)])
assert_equal(test[1], [(0, 1, 2, 3, 4), (4, 4, 4, 4, 4)])
#
data[-2] = masked
test = notmasked_edges(data, 0)
assert_equal(test[0], [(0, 0, 1, 0, 0), (0, 1, 2, 3, 4)])
assert_equal(test[1], [(1, 1, 2, 4, 4), (0, 1, 2, 3, 4)])
test = notmasked_edges(data, -1)
assert_equal(test[0], [(0, 1, 2, 4), (0, 0, 2, 3)])
assert_equal(test[1], [(0, 1, 2, 4), (4, 2, 4, 4)])
def test_contiguous(self):
# Tests notmasked_contiguous
a = masked_array(np.arange(24).reshape(3, 8),
mask=[[0, 0, 0, 0, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 1, 0]])
tmp = notmasked_contiguous(a, None)
assert_equal(tmp, [
slice(0, 4, None),
slice(16, 22, None),
slice(23, 24, None)
])
tmp = notmasked_contiguous(a, 0)
assert_equal(tmp, [
[slice(0, 1, None), slice(2, 3, None)],
[slice(0, 1, None), slice(2, 3, None)],
[slice(0, 1, None), slice(2, 3, None)],
[slice(0, 1, None), slice(2, 3, None)],
[slice(2, 3, None)],
[slice(2, 3, None)],
[],
[slice(2, 3, None)]
])
#
tmp = notmasked_contiguous(a, 1)
assert_equal(tmp, [
[slice(0, 4, None)],
[],
[slice(0, 6, None), slice(7, 8, None)]
])
class TestCompressFunctions:
def test_compress_nd(self):
# Tests compress_nd
x = np.array(list(range(3*4*5))).reshape(3, 4, 5)
m = np.zeros((3,4,5)).astype(bool)
m[1,1,1] = True
x = array(x, mask=m)
# axis=None
a = compress_nd(x)
assert_equal(a, [[[ 0, 2, 3, 4],
[10, 12, 13, 14],
[15, 17, 18, 19]],
[[40, 42, 43, 44],
[50, 52, 53, 54],
[55, 57, 58, 59]]])
# axis=0
a = compress_nd(x, 0)
assert_equal(a, [[[ 0, 1, 2, 3, 4],
[ 5, 6, 7, 8, 9],
[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19]],
[[40, 41, 42, 43, 44],
[45, 46, 47, 48, 49],
[50, 51, 52, 53, 54],
[55, 56, 57, 58, 59]]])
# axis=1
a = compress_nd(x, 1)
assert_equal(a, [[[ 0, 1, 2, 3, 4],
[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19]],
[[20, 21, 22, 23, 24],
[30, 31, 32, 33, 34],
[35, 36, 37, 38, 39]],
[[40, 41, 42, 43, 44],
[50, 51, 52, 53, 54],
[55, 56, 57, 58, 59]]])
a2 = compress_nd(x, (1,))
a3 = compress_nd(x, -2)
a4 = compress_nd(x, (-2,))
assert_equal(a, a2)
assert_equal(a, a3)
assert_equal(a, a4)
# axis=2
a = compress_nd(x, 2)
assert_equal(a, [[[ 0, 2, 3, 4],
[ 5, 7, 8, 9],
[10, 12, 13, 14],
[15, 17, 18, 19]],
[[20, 22, 23, 24],
[25, 27, 28, 29],
[30, 32, 33, 34],
[35, 37, 38, 39]],
[[40, 42, 43, 44],
[45, 47, 48, 49],
[50, 52, 53, 54],
[55, 57, 58, 59]]])
a2 = compress_nd(x, (2,))
a3 = compress_nd(x, -1)
a4 = compress_nd(x, (-1,))
assert_equal(a, a2)
assert_equal(a, a3)
assert_equal(a, a4)
# axis=(0, 1)
a = compress_nd(x, (0, 1))
assert_equal(a, [[[ 0, 1, 2, 3, 4],
[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19]],
[[40, 41, 42, 43, 44],
[50, 51, 52, 53, 54],
[55, 56, 57, 58, 59]]])
a2 = compress_nd(x, (0, -2))
assert_equal(a, a2)
# axis=(1, 2)
a = compress_nd(x, (1, 2))
assert_equal(a, [[[ 0, 2, 3, 4],
[10, 12, 13, 14],
[15, 17, 18, 19]],
[[20, 22, 23, 24],
[30, 32, 33, 34],
[35, 37, 38, 39]],
[[40, 42, 43, 44],
[50, 52, 53, 54],
[55, 57, 58, 59]]])
a2 = compress_nd(x, (-2, 2))
a3 = compress_nd(x, (1, -1))
a4 = compress_nd(x, (-2, -1))
assert_equal(a, a2)
assert_equal(a, a3)
assert_equal(a, a4)
# axis=(0, 2)
a = compress_nd(x, (0, 2))
assert_equal(a, [[[ 0, 2, 3, 4],
[ 5, 7, 8, 9],
[10, 12, 13, 14],
[15, 17, 18, 19]],
[[40, 42, 43, 44],
[45, 47, 48, 49],
[50, 52, 53, 54],
[55, 57, 58, 59]]])
a2 = compress_nd(x, (0, -1))
assert_equal(a, a2)
def test_compress_rowcols(self):
# Tests compress_rowcols
x = array(np.arange(9).reshape(3, 3),
mask=[[1, 0, 0], [0, 0, 0], [0, 0, 0]])
assert_equal(compress_rowcols(x), [[4, 5], [7, 8]])
assert_equal(compress_rowcols(x, 0), [[3, 4, 5], [6, 7, 8]])
assert_equal(compress_rowcols(x, 1), [[1, 2], [4, 5], [7, 8]])
x = array(x._data, mask=[[0, 0, 0], [0, 1, 0], [0, 0, 0]])
assert_equal(compress_rowcols(x), [[0, 2], [6, 8]])
assert_equal(compress_rowcols(x, 0), [[0, 1, 2], [6, 7, 8]])
assert_equal(compress_rowcols(x, 1), [[0, 2], [3, 5], [6, 8]])
x = array(x._data, mask=[[1, 0, 0], [0, 1, 0], [0, 0, 0]])
assert_equal(compress_rowcols(x), [[8]])
assert_equal(compress_rowcols(x, 0), [[6, 7, 8]])
assert_equal(compress_rowcols(x, 1,), [[2], [5], [8]])
x = array(x._data, mask=[[1, 0, 0], [0, 1, 0], [0, 0, 1]])
assert_equal(compress_rowcols(x).size, 0)
assert_equal(compress_rowcols(x, 0).size, 0)
assert_equal(compress_rowcols(x, 1).size, 0)
def test_mask_rowcols(self):
# Tests mask_rowcols.
x = array(np.arange(9).reshape(3, 3),
mask=[[1, 0, 0], [0, 0, 0], [0, 0, 0]])
assert_equal(mask_rowcols(x).mask,
[[1, 1, 1], [1, 0, 0], [1, 0, 0]])
assert_equal(mask_rowcols(x, 0).mask,
[[1, 1, 1], [0, 0, 0], [0, 0, 0]])
assert_equal(mask_rowcols(x, 1).mask,
[[1, 0, 0], [1, 0, 0], [1, 0, 0]])
x = array(x._data, mask=[[0, 0, 0], [0, 1, 0], [0, 0, 0]])
assert_equal(mask_rowcols(x).mask,
[[0, 1, 0], [1, 1, 1], [0, 1, 0]])
assert_equal(mask_rowcols(x, 0).mask,
[[0, 0, 0], [1, 1, 1], [0, 0, 0]])
assert_equal(mask_rowcols(x, 1).mask,
[[0, 1, 0], [0, 1, 0], [0, 1, 0]])
x = array(x._data, mask=[[1, 0, 0], [0, 1, 0], [0, 0, 0]])
assert_equal(mask_rowcols(x).mask,
[[1, 1, 1], [1, 1, 1], [1, 1, 0]])
assert_equal(mask_rowcols(x, 0).mask,
[[1, 1, 1], [1, 1, 1], [0, 0, 0]])
assert_equal(mask_rowcols(x, 1,).mask,
[[1, 1, 0], [1, 1, 0], [1, 1, 0]])
x = array(x._data, mask=[[1, 0, 0], [0, 1, 0], [0, 0, 1]])
assert_(mask_rowcols(x).all() is masked)
assert_(mask_rowcols(x, 0).all() is masked)
assert_(mask_rowcols(x, 1).all() is masked)
assert_(mask_rowcols(x).mask.all())
assert_(mask_rowcols(x, 0).mask.all())
assert_(mask_rowcols(x, 1).mask.all())
@pytest.mark.parametrize("axis", [None, 0, 1])
@pytest.mark.parametrize(["func", "rowcols_axis"],
[(np.ma.mask_rows, 0), (np.ma.mask_cols, 1)])
def test_mask_row_cols_axis_deprecation(self, axis, func, rowcols_axis):
# Test deprecation of the axis argument to `mask_rows` and `mask_cols`
x = array(np.arange(9).reshape(3, 3),
mask=[[1, 0, 0], [0, 0, 0], [0, 0, 0]])
with assert_warns(DeprecationWarning):
res = func(x, axis=axis)
assert_equal(res, mask_rowcols(x, rowcols_axis))
def test_dot(self):
# Tests dot product
n = np.arange(1, 7)
#
m = [1, 0, 0, 0, 0, 0]
a = masked_array(n, mask=m).reshape(2, 3)
b = masked_array(n, mask=m).reshape(3, 2)
c = dot(a, b, strict=True)
assert_equal(c.mask, [[1, 1], [1, 0]])
c = dot(b, a, strict=True)
assert_equal(c.mask, [[1, 1, 1], [1, 0, 0], [1, 0, 0]])
c = dot(a, b, strict=False)
assert_equal(c, np.dot(a.filled(0), b.filled(0)))
c = dot(b, a, strict=False)
assert_equal(c, np.dot(b.filled(0), a.filled(0)))
#
m = [0, 0, 0, 0, 0, 1]
a = masked_array(n, mask=m).reshape(2, 3)
b = masked_array(n, mask=m).reshape(3, 2)
c = dot(a, b, strict=True)
assert_equal(c.mask, [[0, 1], [1, 1]])
c = dot(b, a, strict=True)
assert_equal(c.mask, [[0, 0, 1], [0, 0, 1], [1, 1, 1]])
c = dot(a, b, strict=False)
assert_equal(c, np.dot(a.filled(0), b.filled(0)))
assert_equal(c, dot(a, b))
c = dot(b, a, strict=False)
assert_equal(c, np.dot(b.filled(0), a.filled(0)))
#
m = [0, 0, 0, 0, 0, 0]
a = masked_array(n, mask=m).reshape(2, 3)
b = masked_array(n, mask=m).reshape(3, 2)
c = dot(a, b)
assert_equal(c.mask, nomask)
c = dot(b, a)
assert_equal(c.mask, nomask)
#
a = masked_array(n, mask=[1, 0, 0, 0, 0, 0]).reshape(2, 3)
b = masked_array(n, mask=[0, 0, 0, 0, 0, 0]).reshape(3, 2)
c = dot(a, b, strict=True)
assert_equal(c.mask, [[1, 1], [0, 0]])
c = dot(a, b, strict=False)
assert_equal(c, np.dot(a.filled(0), b.filled(0)))
c = dot(b, a, strict=True)
assert_equal(c.mask, [[1, 0, 0], [1, 0, 0], [1, 0, 0]])
c = dot(b, a, strict=False)
assert_equal(c, np.dot(b.filled(0), a.filled(0)))
#
a = masked_array(n, mask=[0, 0, 0, 0, 0, 1]).reshape(2, 3)
b = masked_array(n, mask=[0, 0, 0, 0, 0, 0]).reshape(3, 2)
c = dot(a, b, strict=True)
assert_equal(c.mask, [[0, 0], [1, 1]])
c = dot(a, b)
assert_equal(c, np.dot(a.filled(0), b.filled(0)))
c = dot(b, a, strict=True)
assert_equal(c.mask, [[0, 0, 1], [0, 0, 1], [0, 0, 1]])
c = dot(b, a, strict=False)
assert_equal(c, np.dot(b.filled(0), a.filled(0)))
#
a = masked_array(n, mask=[0, 0, 0, 0, 0, 1]).reshape(2, 3)
b = masked_array(n, mask=[0, 0, 1, 0, 0, 0]).reshape(3, 2)
c = dot(a, b, strict=True)
assert_equal(c.mask, [[1, 0], [1, 1]])
c = dot(a, b, strict=False)
assert_equal(c, np.dot(a.filled(0), b.filled(0)))
c = dot(b, a, strict=True)
assert_equal(c.mask, [[0, 0, 1], [1, 1, 1], [0, 0, 1]])
c = dot(b, a, strict=False)
assert_equal(c, np.dot(b.filled(0), a.filled(0)))
def test_dot_returns_maskedarray(self):
# See gh-6611
a = np.eye(3)
b = array(a)
assert_(type(dot(a, a)) is MaskedArray)
assert_(type(dot(a, b)) is MaskedArray)
assert_(type(dot(b, a)) is MaskedArray)
assert_(type(dot(b, b)) is MaskedArray)
def test_dot_out(self):
a = array(np.eye(3))
out = array(np.zeros((3, 3)))
res = dot(a, a, out=out)
assert_(res is out)
assert_equal(a, res)
class TestApplyAlongAxis:
# Tests 2D functions
def test_3d(self):
a = arange(12.).reshape(2, 2, 3)
def myfunc(b):
return b[1]
xa = apply_along_axis(myfunc, 2, a)
assert_equal(xa, [[1, 4], [7, 10]])
# Tests kwargs functions
def test_3d_kwargs(self):
a = arange(12).reshape(2, 2, 3)
def myfunc(b, offset=0):
return b[1+offset]
xa = apply_along_axis(myfunc, 2, a, offset=1)
assert_equal(xa, [[2, 5], [8, 11]])
class TestApplyOverAxes:
# Tests apply_over_axes
def test_basic(self):
a = arange(24).reshape(2, 3, 4)
test = apply_over_axes(np.sum, a, [0, 2])
ctrl = np.array([[[60], [92], [124]]])
assert_equal(test, ctrl)
a[(a % 2).astype(bool)] = masked
test = apply_over_axes(np.sum, a, [0, 2])
ctrl = np.array([[[28], [44], [60]]])
assert_equal(test, ctrl)
class TestMedian:
def test_pytype(self):
r = np.ma.median([[np.inf, np.inf], [np.inf, np.inf]], axis=-1)
assert_equal(r, np.inf)
def test_inf(self):
# test that even which computes handles inf / x = masked
r = np.ma.median(np.ma.masked_array([[np.inf, np.inf],
[np.inf, np.inf]]), axis=-1)
assert_equal(r, np.inf)
r = np.ma.median(np.ma.masked_array([[np.inf, np.inf],
[np.inf, np.inf]]), axis=None)
assert_equal(r, np.inf)
# all masked
r = np.ma.median(np.ma.masked_array([[np.inf, np.inf],
[np.inf, np.inf]], mask=True),
axis=-1)
assert_equal(r.mask, True)
r = np.ma.median(np.ma.masked_array([[np.inf, np.inf],
[np.inf, np.inf]], mask=True),
axis=None)
assert_equal(r.mask, True)
def test_non_masked(self):
x = np.arange(9)
assert_equal(np.ma.median(x), 4.)
assert_(type(np.ma.median(x)) is not MaskedArray)
x = range(8)
assert_equal(np.ma.median(x), 3.5)
assert_(type(np.ma.median(x)) is not MaskedArray)
x = 5
assert_equal(np.ma.median(x), 5.)
assert_(type(np.ma.median(x)) is not MaskedArray)
# integer
x = np.arange(9 * 8).reshape(9, 8)
assert_equal(np.ma.median(x, axis=0), np.median(x, axis=0))
assert_equal(np.ma.median(x, axis=1), np.median(x, axis=1))
assert_(np.ma.median(x, axis=1) is not MaskedArray)
# float
x = np.arange(9 * 8.).reshape(9, 8)
assert_equal(np.ma.median(x, axis=0), np.median(x, axis=0))
assert_equal(np.ma.median(x, axis=1), np.median(x, axis=1))
assert_(np.ma.median(x, axis=1) is not MaskedArray)
def test_docstring_examples(self):
"test the examples given in the docstring of ma.median"
x = array(np.arange(8), mask=[0]*4 + [1]*4)
assert_equal(np.ma.median(x), 1.5)
assert_equal(np.ma.median(x).shape, (), "shape mismatch")
assert_(type(np.ma.median(x)) is not MaskedArray)
x = array(np.arange(10).reshape(2, 5), mask=[0]*6 + [1]*4)
assert_equal(np.ma.median(x), 2.5)
assert_equal(np.ma.median(x).shape, (), "shape mismatch")
assert_(type(np.ma.median(x)) is not MaskedArray)
ma_x = np.ma.median(x, axis=-1, overwrite_input=True)
assert_equal(ma_x, [2., 5.])
assert_equal(ma_x.shape, (2,), "shape mismatch")
assert_(type(ma_x) is MaskedArray)
def test_axis_argument_errors(self):
msg = "mask = %s, ndim = %s, axis = %s, overwrite_input = %s"
for ndmin in range(5):
for mask in [False, True]:
x = array(1, ndmin=ndmin, mask=mask)
# Valid axis values should not raise exception
args = itertools.product(range(-ndmin, ndmin), [False, True])
for axis, over in args:
try:
np.ma.median(x, axis=axis, overwrite_input=over)
except Exception:
raise AssertionError(msg % (mask, ndmin, axis, over))
# Invalid axis values should raise exception
args = itertools.product([-(ndmin + 1), ndmin], [False, True])
for axis, over in args:
try:
np.ma.median(x, axis=axis, overwrite_input=over)
except np.AxisError:
pass
else:
raise AssertionError(msg % (mask, ndmin, axis, over))
def test_masked_0d(self):
# Check values
x = array(1, mask=False)
assert_equal(np.ma.median(x), 1)
x = array(1, mask=True)
assert_equal(np.ma.median(x), np.ma.masked)
def test_masked_1d(self):
x = array(np.arange(5), mask=True)
assert_equal(np.ma.median(x), np.ma.masked)
assert_equal(np.ma.median(x).shape, (), "shape mismatch")
assert_(type(np.ma.median(x)) is np.ma.core.MaskedConstant)
x = array(np.arange(5), mask=False)
assert_equal(np.ma.median(x), 2.)
assert_equal(np.ma.median(x).shape, (), "shape mismatch")
assert_(type(np.ma.median(x)) is not MaskedArray)
x = array(np.arange(5), mask=[0,1,0,0,0])
assert_equal(np.ma.median(x), 2.5)
assert_equal(np.ma.median(x).shape, (), "shape mismatch")
assert_(type(np.ma.median(x)) is not MaskedArray)
x = array(np.arange(5), mask=[0,1,1,1,1])
assert_equal(np.ma.median(x), 0.)
assert_equal(np.ma.median(x).shape, (), "shape mismatch")
assert_(type(np.ma.median(x)) is not MaskedArray)
# integer
x = array(np.arange(5), mask=[0,1,1,0,0])
assert_equal(np.ma.median(x), 3.)
assert_equal(np.ma.median(x).shape, (), "shape mismatch")
assert_(type(np.ma.median(x)) is not MaskedArray)
# float
x = array(np.arange(5.), mask=[0,1,1,0,0])
assert_equal(np.ma.median(x), 3.)
assert_equal(np.ma.median(x).shape, (), "shape mismatch")
assert_(type(np.ma.median(x)) is not MaskedArray)
# integer
x = array(np.arange(6), mask=[0,1,1,1,1,0])
assert_equal(np.ma.median(x), 2.5)
assert_equal(np.ma.median(x).shape, (), "shape mismatch")
assert_(type(np.ma.median(x)) is not MaskedArray)
# float
x = array(np.arange(6.), mask=[0,1,1,1,1,0])
assert_equal(np.ma.median(x), 2.5)
assert_equal(np.ma.median(x).shape, (), "shape mismatch")
assert_(type(np.ma.median(x)) is not MaskedArray)
def test_1d_shape_consistency(self):
assert_equal(np.ma.median(array([1,2,3],mask=[0,0,0])).shape,
np.ma.median(array([1,2,3],mask=[0,1,0])).shape )
def test_2d(self):
# Tests median w/ 2D
(n, p) = (101, 30)
x = masked_array(np.linspace(-1., 1., n),)
x[:10] = x[-10:] = masked
z = masked_array(np.empty((n, p), dtype=float))
z[:, 0] = x[:]
idx = np.arange(len(x))
for i in range(1, p):
np.random.shuffle(idx)
z[:, i] = x[idx]
assert_equal(median(z[:, 0]), 0)
assert_equal(median(z), 0)
assert_equal(median(z, axis=0), np.zeros(p))
assert_equal(median(z.T, axis=1), np.zeros(p))
def test_2d_waxis(self):
# Tests median w/ 2D arrays and different axis.
x = masked_array(np.arange(30).reshape(10, 3))
x[:3] = x[-3:] = masked
assert_equal(median(x), 14.5)
assert_(type(np.ma.median(x)) is not MaskedArray)
assert_equal(median(x, axis=0), [13.5, 14.5, 15.5])
assert_(type(np.ma.median(x, axis=0)) is MaskedArray)
assert_equal(median(x, axis=1), [0, 0, 0, 10, 13, 16, 19, 0, 0, 0])
assert_(type(np.ma.median(x, axis=1)) is MaskedArray)
assert_equal(median(x, axis=1).mask, [1, 1, 1, 0, 0, 0, 0, 1, 1, 1])
def test_3d(self):
# Tests median w/ 3D
x = np.ma.arange(24).reshape(3, 4, 2)
x[x % 3 == 0] = masked
assert_equal(median(x, 0), [[12, 9], [6, 15], [12, 9], [18, 15]])
x.shape = (4, 3, 2)
assert_equal(median(x, 0), [[99, 10], [11, 99], [13, 14]])
x = np.ma.arange(24).reshape(4, 3, 2)
x[x % 5 == 0] = masked
assert_equal(median(x, 0), [[12, 10], [8, 9], [16, 17]])
def test_neg_axis(self):
x = masked_array(np.arange(30).reshape(10, 3))
x[:3] = x[-3:] = masked
assert_equal(median(x, axis=-1), median(x, axis=1))
def test_out_1d(self):
# integer float even odd
for v in (30, 30., 31, 31.):
x = masked_array(np.arange(v))
x[:3] = x[-3:] = masked
out = masked_array(np.ones(()))
r = median(x, out=out)
if v == 30:
assert_equal(out, 14.5)
else:
assert_equal(out, 15.)
assert_(r is out)
assert_(type(r) is MaskedArray)
def test_out(self):
# integer float even odd
for v in (40, 40., 30, 30.):
x = masked_array(np.arange(v).reshape(10, -1))
x[:3] = x[-3:] = masked
out = masked_array(np.ones(10))
r = median(x, axis=1, out=out)
if v == 30:
e = masked_array([0.]*3 + [10, 13, 16, 19] + [0.]*3,
mask=[True] * 3 + [False] * 4 + [True] * 3)
else:
e = masked_array([0.]*3 + [13.5, 17.5, 21.5, 25.5] + [0.]*3,
mask=[True]*3 + [False]*4 + [True]*3)
assert_equal(r, e)
assert_(r is out)
assert_(type(r) is MaskedArray)
def test_single_non_masked_value_on_axis(self):
data = [[1., 0.],
[0., 3.],
[0., 0.]]
masked_arr = np.ma.masked_equal(data, 0)
expected = [1., 3.]
assert_array_equal(np.ma.median(masked_arr, axis=0),
expected)
def test_nan(self):
for mask in (False, np.zeros(6, dtype=bool)):
dm = np.ma.array([[1, np.nan, 3], [1, 2, 3]])
dm.mask = mask
# scalar result
r = np.ma.median(dm, axis=None)
assert_(np.isscalar(r))
assert_array_equal(r, np.nan)
r = np.ma.median(dm.ravel(), axis=0)
assert_(np.isscalar(r))
assert_array_equal(r, np.nan)
r = np.ma.median(dm, axis=0)
assert_equal(type(r), MaskedArray)
assert_array_equal(r, [1, np.nan, 3])
r = np.ma.median(dm, axis=1)
assert_equal(type(r), MaskedArray)
assert_array_equal(r, [np.nan, 2])
r = np.ma.median(dm, axis=-1)
assert_equal(type(r), MaskedArray)
assert_array_equal(r, [np.nan, 2])
dm = np.ma.array([[1, np.nan, 3], [1, 2, 3]])
dm[:, 2] = np.ma.masked
assert_array_equal(np.ma.median(dm, axis=None), np.nan)
assert_array_equal(np.ma.median(dm, axis=0), [1, np.nan, 3])
assert_array_equal(np.ma.median(dm, axis=1), [np.nan, 1.5])
def test_out_nan(self):
o = np.ma.masked_array(np.zeros((4,)))
d = np.ma.masked_array(np.ones((3, 4)))
d[2, 1] = np.nan
d[2, 2] = np.ma.masked
assert_equal(np.ma.median(d, 0, out=o), o)
o = np.ma.masked_array(np.zeros((3,)))
assert_equal(np.ma.median(d, 1, out=o), o)
o = np.ma.masked_array(np.zeros(()))
assert_equal(np.ma.median(d, out=o), o)
def test_nan_behavior(self):
a = np.ma.masked_array(np.arange(24, dtype=float))
a[::3] = np.ma.masked
a[2] = np.nan
assert_array_equal(np.ma.median(a), np.nan)
assert_array_equal(np.ma.median(a, axis=0), np.nan)
a = np.ma.masked_array(np.arange(24, dtype=float).reshape(2, 3, 4))
a.mask = np.arange(a.size) % 2 == 1
aorig = a.copy()
a[1, 2, 3] = np.nan
a[1, 1, 2] = np.nan
# no axis
assert_array_equal(np.ma.median(a), np.nan)
assert_(np.isscalar(np.ma.median(a)))
# axis0
b = np.ma.median(aorig, axis=0)
b[2, 3] = np.nan
b[1, 2] = np.nan
assert_equal(np.ma.median(a, 0), b)
# axis1
b = np.ma.median(aorig, axis=1)
b[1, 3] = np.nan
b[1, 2] = np.nan
assert_equal(np.ma.median(a, 1), b)
# axis02
b = np.ma.median(aorig, axis=(0, 2))
b[1] = np.nan
b[2] = np.nan
assert_equal(np.ma.median(a, (0, 2)), b)
def test_ambigous_fill(self):
# 255 is max value, used as filler for sort
a = np.array([[3, 3, 255], [3, 3, 255]], dtype=np.uint8)
a = np.ma.masked_array(a, mask=a == 3)
assert_array_equal(np.ma.median(a, axis=1), 255)
assert_array_equal(np.ma.median(a, axis=1).mask, False)
assert_array_equal(np.ma.median(a, axis=0), a[0])
assert_array_equal(np.ma.median(a), 255)
def test_special(self):
for inf in [np.inf, -np.inf]:
a = np.array([[inf, np.nan], [np.nan, np.nan]])
a = np.ma.masked_array(a, mask=np.isnan(a))
assert_equal(np.ma.median(a, axis=0), [inf, np.nan])
assert_equal(np.ma.median(a, axis=1), [inf, np.nan])
assert_equal(np.ma.median(a), inf)
a = np.array([[np.nan, np.nan, inf], [np.nan, np.nan, inf]])
a = np.ma.masked_array(a, mask=np.isnan(a))
assert_array_equal(np.ma.median(a, axis=1), inf)
assert_array_equal(np.ma.median(a, axis=1).mask, False)
assert_array_equal(np.ma.median(a, axis=0), a[0])
assert_array_equal(np.ma.median(a), inf)
# no mask
a = np.array([[inf, inf], [inf, inf]])
assert_equal(np.ma.median(a), inf)
assert_equal(np.ma.median(a, axis=0), inf)
assert_equal(np.ma.median(a, axis=1), inf)
a = np.array([[inf, 7, -inf, -9],
[-10, np.nan, np.nan, 5],
[4, np.nan, np.nan, inf]],
dtype=np.float32)
a = np.ma.masked_array(a, mask=np.isnan(a))
if inf > 0:
assert_equal(np.ma.median(a, axis=0), [4., 7., -inf, 5.])
assert_equal(np.ma.median(a), 4.5)
else:
assert_equal(np.ma.median(a, axis=0), [-10., 7., -inf, -9.])
assert_equal(np.ma.median(a), -2.5)
assert_equal(np.ma.median(a, axis=1), [-1., -2.5, inf])
for i in range(0, 10):
for j in range(1, 10):
a = np.array([([np.nan] * i) + ([inf] * j)] * 2)
a = np.ma.masked_array(a, mask=np.isnan(a))
assert_equal(np.ma.median(a), inf)
assert_equal(np.ma.median(a, axis=1), inf)
assert_equal(np.ma.median(a, axis=0),
([np.nan] * i) + [inf] * j)
def test_empty(self):
# empty arrays
a = np.ma.masked_array(np.array([], dtype=float))
with suppress_warnings() as w:
w.record(RuntimeWarning)
assert_array_equal(np.ma.median(a), np.nan)
assert_(w.log[0].category is RuntimeWarning)
# multiple dimensions
a = np.ma.masked_array(np.array([], dtype=float, ndmin=3))
# no axis
with suppress_warnings() as w:
w.record(RuntimeWarning)
warnings.filterwarnings('always', '', RuntimeWarning)
assert_array_equal(np.ma.median(a), np.nan)
assert_(w.log[0].category is RuntimeWarning)
# axis 0 and 1
b = np.ma.masked_array(np.array([], dtype=float, ndmin=2))
assert_equal(np.ma.median(a, axis=0), b)
assert_equal(np.ma.median(a, axis=1), b)
# axis 2
b = np.ma.masked_array(np.array(np.nan, dtype=float, ndmin=2))
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', RuntimeWarning)
assert_equal(np.ma.median(a, axis=2), b)
assert_(w[0].category is RuntimeWarning)
def test_object(self):
o = np.ma.masked_array(np.arange(7.))
assert_(type(np.ma.median(o.astype(object))), float)
o[2] = np.nan
assert_(type(np.ma.median(o.astype(object))), float)
class TestCov:
def setup_method(self):
self.data = array(np.random.rand(12))
def test_1d_without_missing(self):
# Test cov on 1D variable w/o missing values
x = self.data
assert_almost_equal(np.cov(x), cov(x))
assert_almost_equal(np.cov(x, rowvar=False), cov(x, rowvar=False))
assert_almost_equal(np.cov(x, rowvar=False, bias=True),
cov(x, rowvar=False, bias=True))
def test_2d_without_missing(self):
# Test cov on 1 2D variable w/o missing values
x = self.data.reshape(3, 4)
assert_almost_equal(np.cov(x), cov(x))
assert_almost_equal(np.cov(x, rowvar=False), cov(x, rowvar=False))
assert_almost_equal(np.cov(x, rowvar=False, bias=True),
cov(x, rowvar=False, bias=True))
def test_1d_with_missing(self):
# Test cov 1 1D variable w/missing values
x = self.data
x[-1] = masked
x -= x.mean()
nx = x.compressed()
assert_almost_equal(np.cov(nx), cov(x))
assert_almost_equal(np.cov(nx, rowvar=False), cov(x, rowvar=False))
assert_almost_equal(np.cov(nx, rowvar=False, bias=True),
cov(x, rowvar=False, bias=True))
#
try:
cov(x, allow_masked=False)
except ValueError:
pass
#
# 2 1D variables w/ missing values
nx = x[1:-1]
assert_almost_equal(np.cov(nx, nx[::-1]), cov(x, x[::-1]))
assert_almost_equal(np.cov(nx, nx[::-1], rowvar=False),
cov(x, x[::-1], rowvar=False))
assert_almost_equal(np.cov(nx, nx[::-1], rowvar=False, bias=True),
cov(x, x[::-1], rowvar=False, bias=True))
def test_2d_with_missing(self):
# Test cov on 2D variable w/ missing value
x = self.data
x[-1] = masked
x = x.reshape(3, 4)
valid = np.logical_not(getmaskarray(x)).astype(int)
frac = np.dot(valid, valid.T)
xf = (x - x.mean(1)[:, None]).filled(0)
assert_almost_equal(cov(x),
np.cov(xf) * (x.shape[1] - 1) / (frac - 1.))
assert_almost_equal(cov(x, bias=True),
np.cov(xf, bias=True) * x.shape[1] / frac)
frac = np.dot(valid.T, valid)
xf = (x - x.mean(0)).filled(0)
assert_almost_equal(cov(x, rowvar=False),
(np.cov(xf, rowvar=False) *
(x.shape[0] - 1) / (frac - 1.)))
assert_almost_equal(cov(x, rowvar=False, bias=True),
(np.cov(xf, rowvar=False, bias=True) *
x.shape[0] / frac))
class TestCorrcoef:
def setup_method(self):
self.data = array(np.random.rand(12))
self.data2 = array(np.random.rand(12))
def test_ddof(self):
# ddof raises DeprecationWarning
x, y = self.data, self.data2
expected = np.corrcoef(x)
expected2 = np.corrcoef(x, y)
with suppress_warnings() as sup:
warnings.simplefilter("always")
assert_warns(DeprecationWarning, corrcoef, x, ddof=-1)
sup.filter(DeprecationWarning, "bias and ddof have no effect")
# ddof has no or negligible effect on the function
assert_almost_equal(np.corrcoef(x, ddof=0), corrcoef(x, ddof=0))
assert_almost_equal(corrcoef(x, ddof=-1), expected)
assert_almost_equal(corrcoef(x, y, ddof=-1), expected2)
assert_almost_equal(corrcoef(x, ddof=3), expected)
assert_almost_equal(corrcoef(x, y, ddof=3), expected2)
def test_bias(self):
x, y = self.data, self.data2
expected = np.corrcoef(x)
# bias raises DeprecationWarning
with suppress_warnings() as sup:
warnings.simplefilter("always")
assert_warns(DeprecationWarning, corrcoef, x, y, True, False)
assert_warns(DeprecationWarning, corrcoef, x, y, True, True)
assert_warns(DeprecationWarning, corrcoef, x, bias=False)
sup.filter(DeprecationWarning, "bias and ddof have no effect")
# bias has no or negligible effect on the function
assert_almost_equal(corrcoef(x, bias=1), expected)
def test_1d_without_missing(self):
# Test cov on 1D variable w/o missing values
x = self.data
assert_almost_equal(np.corrcoef(x), corrcoef(x))
assert_almost_equal(np.corrcoef(x, rowvar=False),
corrcoef(x, rowvar=False))
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, "bias and ddof have no effect")
assert_almost_equal(np.corrcoef(x, rowvar=False, bias=True),
corrcoef(x, rowvar=False, bias=True))
def test_2d_without_missing(self):
# Test corrcoef on 1 2D variable w/o missing values
x = self.data.reshape(3, 4)
assert_almost_equal(np.corrcoef(x), corrcoef(x))
assert_almost_equal(np.corrcoef(x, rowvar=False),
corrcoef(x, rowvar=False))
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, "bias and ddof have no effect")
assert_almost_equal(np.corrcoef(x, rowvar=False, bias=True),
corrcoef(x, rowvar=False, bias=True))
def test_1d_with_missing(self):
# Test corrcoef 1 1D variable w/missing values
x = self.data
x[-1] = masked
x -= x.mean()
nx = x.compressed()
assert_almost_equal(np.corrcoef(nx), corrcoef(x))
assert_almost_equal(np.corrcoef(nx, rowvar=False),
corrcoef(x, rowvar=False))
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, "bias and ddof have no effect")
assert_almost_equal(np.corrcoef(nx, rowvar=False, bias=True),
corrcoef(x, rowvar=False, bias=True))
try:
corrcoef(x, allow_masked=False)
except ValueError:
pass
# 2 1D variables w/ missing values
nx = x[1:-1]
assert_almost_equal(np.corrcoef(nx, nx[::-1]), corrcoef(x, x[::-1]))
assert_almost_equal(np.corrcoef(nx, nx[::-1], rowvar=False),
corrcoef(x, x[::-1], rowvar=False))
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, "bias and ddof have no effect")
# ddof and bias have no or negligible effect on the function
assert_almost_equal(np.corrcoef(nx, nx[::-1]),
corrcoef(x, x[::-1], bias=1))
assert_almost_equal(np.corrcoef(nx, nx[::-1]),
corrcoef(x, x[::-1], ddof=2))
def test_2d_with_missing(self):
# Test corrcoef on 2D variable w/ missing value
x = self.data
x[-1] = masked
x = x.reshape(3, 4)
test = corrcoef(x)
control = np.corrcoef(x)
assert_almost_equal(test[:-1, :-1], control[:-1, :-1])
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, "bias and ddof have no effect")
# ddof and bias have no or negligible effect on the function
assert_almost_equal(corrcoef(x, ddof=-2)[:-1, :-1],
control[:-1, :-1])
assert_almost_equal(corrcoef(x, ddof=3)[:-1, :-1],
control[:-1, :-1])
assert_almost_equal(corrcoef(x, bias=1)[:-1, :-1],
control[:-1, :-1])
class TestPolynomial:
#
def test_polyfit(self):
# Tests polyfit
# On ndarrays
x = np.random.rand(10)
y = np.random.rand(20).reshape(-1, 2)
assert_almost_equal(polyfit(x, y, 3), np.polyfit(x, y, 3))
# ON 1D maskedarrays
x = x.view(MaskedArray)
x[0] = masked
y = y.view(MaskedArray)
y[0, 0] = y[-1, -1] = masked
#
(C, R, K, S, D) = polyfit(x, y[:, 0], 3, full=True)
(c, r, k, s, d) = np.polyfit(x[1:], y[1:, 0].compressed(), 3,
full=True)
for (a, a_) in zip((C, R, K, S, D), (c, r, k, s, d)):
assert_almost_equal(a, a_)
#
(C, R, K, S, D) = polyfit(x, y[:, -1], 3, full=True)
(c, r, k, s, d) = np.polyfit(x[1:-1], y[1:-1, -1], 3, full=True)
for (a, a_) in zip((C, R, K, S, D), (c, r, k, s, d)):
assert_almost_equal(a, a_)
#
(C, R, K, S, D) = polyfit(x, y, 3, full=True)
(c, r, k, s, d) = np.polyfit(x[1:-1], y[1:-1,:], 3, full=True)
for (a, a_) in zip((C, R, K, S, D), (c, r, k, s, d)):
assert_almost_equal(a, a_)
#
w = np.random.rand(10) + 1
wo = w.copy()
xs = x[1:-1]
ys = y[1:-1]
ws = w[1:-1]
(C, R, K, S, D) = polyfit(x, y, 3, full=True, w=w)
(c, r, k, s, d) = np.polyfit(xs, ys, 3, full=True, w=ws)
assert_equal(w, wo)
for (a, a_) in zip((C, R, K, S, D), (c, r, k, s, d)):
assert_almost_equal(a, a_)
def test_polyfit_with_masked_NaNs(self):
x = np.random.rand(10)
y = np.random.rand(20).reshape(-1, 2)
x[0] = np.nan
y[-1,-1] = np.nan
x = x.view(MaskedArray)
y = y.view(MaskedArray)
x[0] = masked
y[-1,-1] = masked
(C, R, K, S, D) = polyfit(x, y, 3, full=True)
(c, r, k, s, d) = np.polyfit(x[1:-1], y[1:-1,:], 3, full=True)
for (a, a_) in zip((C, R, K, S, D), (c, r, k, s, d)):
assert_almost_equal(a, a_)
class TestArraySetOps:
def test_unique_onlist(self):
# Test unique on list
data = [1, 1, 1, 2, 2, 3]
test = unique(data, return_index=True, return_inverse=True)
assert_(isinstance(test[0], MaskedArray))
assert_equal(test[0], masked_array([1, 2, 3], mask=[0, 0, 0]))
assert_equal(test[1], [0, 3, 5])
assert_equal(test[2], [0, 0, 0, 1, 1, 2])
def test_unique_onmaskedarray(self):
# Test unique on masked data w/use_mask=True
data = masked_array([1, 1, 1, 2, 2, 3], mask=[0, 0, 1, 0, 1, 0])
test = unique(data, return_index=True, return_inverse=True)
assert_equal(test[0], masked_array([1, 2, 3, -1], mask=[0, 0, 0, 1]))
assert_equal(test[1], [0, 3, 5, 2])
assert_equal(test[2], [0, 0, 3, 1, 3, 2])
#
data.fill_value = 3
data = masked_array(data=[1, 1, 1, 2, 2, 3],
mask=[0, 0, 1, 0, 1, 0], fill_value=3)
test = unique(data, return_index=True, return_inverse=True)
assert_equal(test[0], masked_array([1, 2, 3, -1], mask=[0, 0, 0, 1]))
assert_equal(test[1], [0, 3, 5, 2])
assert_equal(test[2], [0, 0, 3, 1, 3, 2])
def test_unique_allmasked(self):
# Test all masked
data = masked_array([1, 1, 1], mask=True)
test = unique(data, return_index=True, return_inverse=True)
assert_equal(test[0], masked_array([1, ], mask=[True]))
assert_equal(test[1], [0])
assert_equal(test[2], [0, 0, 0])
#
# Test masked
data = masked
test = unique(data, return_index=True, return_inverse=True)
assert_equal(test[0], masked_array(masked))
assert_equal(test[1], [0])
assert_equal(test[2], [0])
def test_ediff1d(self):
# Tests mediff1d
x = masked_array(np.arange(5), mask=[1, 0, 0, 0, 1])
control = array([1, 1, 1, 4], mask=[1, 0, 0, 1])
test = ediff1d(x)
assert_equal(test, control)
assert_equal(test.filled(0), control.filled(0))
assert_equal(test.mask, control.mask)
def test_ediff1d_tobegin(self):
# Test ediff1d w/ to_begin
x = masked_array(np.arange(5), mask=[1, 0, 0, 0, 1])
test = ediff1d(x, to_begin=masked)
control = array([0, 1, 1, 1, 4], mask=[1, 1, 0, 0, 1])
assert_equal(test, control)
assert_equal(test.filled(0), control.filled(0))
assert_equal(test.mask, control.mask)
#
test = ediff1d(x, to_begin=[1, 2, 3])
control = array([1, 2, 3, 1, 1, 1, 4], mask=[0, 0, 0, 1, 0, 0, 1])
assert_equal(test, control)
assert_equal(test.filled(0), control.filled(0))
assert_equal(test.mask, control.mask)
def test_ediff1d_toend(self):
# Test ediff1d w/ to_end
x = masked_array(np.arange(5), mask=[1, 0, 0, 0, 1])
test = ediff1d(x, to_end=masked)
control = array([1, 1, 1, 4, 0], mask=[1, 0, 0, 1, 1])
assert_equal(test, control)
assert_equal(test.filled(0), control.filled(0))
assert_equal(test.mask, control.mask)
#
test = ediff1d(x, to_end=[1, 2, 3])
control = array([1, 1, 1, 4, 1, 2, 3], mask=[1, 0, 0, 1, 0, 0, 0])
assert_equal(test, control)
assert_equal(test.filled(0), control.filled(0))
assert_equal(test.mask, control.mask)
def test_ediff1d_tobegin_toend(self):
# Test ediff1d w/ to_begin and to_end
x = masked_array(np.arange(5), mask=[1, 0, 0, 0, 1])
test = ediff1d(x, to_end=masked, to_begin=masked)
control = array([0, 1, 1, 1, 4, 0], mask=[1, 1, 0, 0, 1, 1])
assert_equal(test, control)
assert_equal(test.filled(0), control.filled(0))
assert_equal(test.mask, control.mask)
#
test = ediff1d(x, to_end=[1, 2, 3], to_begin=masked)
control = array([0, 1, 1, 1, 4, 1, 2, 3],
mask=[1, 1, 0, 0, 1, 0, 0, 0])
assert_equal(test, control)
assert_equal(test.filled(0), control.filled(0))
assert_equal(test.mask, control.mask)
def test_ediff1d_ndarray(self):
# Test ediff1d w/ a ndarray
x = np.arange(5)
test = ediff1d(x)
control = array([1, 1, 1, 1], mask=[0, 0, 0, 0])
assert_equal(test, control)
assert_(isinstance(test, MaskedArray))
assert_equal(test.filled(0), control.filled(0))
assert_equal(test.mask, control.mask)
#
test = ediff1d(x, to_end=masked, to_begin=masked)
control = array([0, 1, 1, 1, 1, 0], mask=[1, 0, 0, 0, 0, 1])
assert_(isinstance(test, MaskedArray))
assert_equal(test.filled(0), control.filled(0))
assert_equal(test.mask, control.mask)
def test_intersect1d(self):
# Test intersect1d
x = array([1, 3, 3, 3], mask=[0, 0, 0, 1])
y = array([3, 1, 1, 1], mask=[0, 0, 0, 1])
test = intersect1d(x, y)
control = array([1, 3, -1], mask=[0, 0, 1])
assert_equal(test, control)
def test_setxor1d(self):
# Test setxor1d
a = array([1, 2, 5, 7, -1], mask=[0, 0, 0, 0, 1])
b = array([1, 2, 3, 4, 5, -1], mask=[0, 0, 0, 0, 0, 1])
test = setxor1d(a, b)
assert_equal(test, array([3, 4, 7]))
#
a = array([1, 2, 5, 7, -1], mask=[0, 0, 0, 0, 1])
b = [1, 2, 3, 4, 5]
test = setxor1d(a, b)
assert_equal(test, array([3, 4, 7, -1], mask=[0, 0, 0, 1]))
#
a = array([1, 2, 3])
b = array([6, 5, 4])
test = setxor1d(a, b)
assert_(isinstance(test, MaskedArray))
assert_equal(test, [1, 2, 3, 4, 5, 6])
#
a = array([1, 8, 2, 3], mask=[0, 1, 0, 0])
b = array([6, 5, 4, 8], mask=[0, 0, 0, 1])
test = setxor1d(a, b)
assert_(isinstance(test, MaskedArray))
assert_equal(test, [1, 2, 3, 4, 5, 6])
#
assert_array_equal([], setxor1d([], []))
def test_isin(self):
# the tests for in1d cover most of isin's behavior
# if in1d is removed, would need to change those tests to test
# isin instead.
a = np.arange(24).reshape([2, 3, 4])
mask = np.zeros([2, 3, 4])
mask[1, 2, 0] = 1
a = array(a, mask=mask)
b = array(data=[0, 10, 20, 30, 1, 3, 11, 22, 33],
mask=[0, 1, 0, 1, 0, 1, 0, 1, 0])
ec = zeros((2, 3, 4), dtype=bool)
ec[0, 0, 0] = True
ec[0, 0, 1] = True
ec[0, 2, 3] = True
c = isin(a, b)
assert_(isinstance(c, MaskedArray))
assert_array_equal(c, ec)
#compare results of np.isin to ma.isin
d = np.isin(a, b[~b.mask]) & ~a.mask
assert_array_equal(c, d)
def test_in1d(self):
# Test in1d
a = array([1, 2, 5, 7, -1], mask=[0, 0, 0, 0, 1])
b = array([1, 2, 3, 4, 5, -1], mask=[0, 0, 0, 0, 0, 1])
test = in1d(a, b)
assert_equal(test, [True, True, True, False, True])
#
a = array([5, 5, 2, 1, -1], mask=[0, 0, 0, 0, 1])
b = array([1, 5, -1], mask=[0, 0, 1])
test = in1d(a, b)
assert_equal(test, [True, True, False, True, True])
#
assert_array_equal([], in1d([], []))
def test_in1d_invert(self):
# Test in1d's invert parameter
a = array([1, 2, 5, 7, -1], mask=[0, 0, 0, 0, 1])
b = array([1, 2, 3, 4, 5, -1], mask=[0, 0, 0, 0, 0, 1])
assert_equal(np.invert(in1d(a, b)), in1d(a, b, invert=True))
a = array([5, 5, 2, 1, -1], mask=[0, 0, 0, 0, 1])
b = array([1, 5, -1], mask=[0, 0, 1])
assert_equal(np.invert(in1d(a, b)), in1d(a, b, invert=True))
assert_array_equal([], in1d([], [], invert=True))
def test_union1d(self):
# Test union1d
a = array([1, 2, 5, 7, 5, -1], mask=[0, 0, 0, 0, 0, 1])
b = array([1, 2, 3, 4, 5, -1], mask=[0, 0, 0, 0, 0, 1])
test = union1d(a, b)
control = array([1, 2, 3, 4, 5, 7, -1], mask=[0, 0, 0, 0, 0, 0, 1])
assert_equal(test, control)
# Tests gh-10340, arguments to union1d should be
# flattened if they are not already 1D
x = array([[0, 1, 2], [3, 4, 5]], mask=[[0, 0, 0], [0, 0, 1]])
y = array([0, 1, 2, 3, 4], mask=[0, 0, 0, 0, 1])
ez = array([0, 1, 2, 3, 4, 5], mask=[0, 0, 0, 0, 0, 1])
z = union1d(x, y)
assert_equal(z, ez)
#
assert_array_equal([], union1d([], []))
def test_setdiff1d(self):
# Test setdiff1d
a = array([6, 5, 4, 7, 7, 1, 2, 1], mask=[0, 0, 0, 0, 0, 0, 0, 1])
b = array([2, 4, 3, 3, 2, 1, 5])
test = setdiff1d(a, b)
assert_equal(test, array([6, 7, -1], mask=[0, 0, 1]))
#
a = arange(10)
b = arange(8)
assert_equal(setdiff1d(a, b), array([8, 9]))
a = array([], np.uint32, mask=[])
assert_equal(setdiff1d(a, []).dtype, np.uint32)
def test_setdiff1d_char_array(self):
# Test setdiff1d_charray
a = np.array(['a', 'b', 'c'])
b = np.array(['a', 'b', 's'])
assert_array_equal(setdiff1d(a, b), np.array(['c']))
class TestShapeBase:
def test_atleast_2d(self):
# Test atleast_2d
a = masked_array([0, 1, 2], mask=[0, 1, 0])
b = atleast_2d(a)
assert_equal(b.shape, (1, 3))
assert_equal(b.mask.shape, b.data.shape)
assert_equal(a.shape, (3,))
assert_equal(a.mask.shape, a.data.shape)
assert_equal(b.mask.shape, b.data.shape)
def test_shape_scalar(self):
# the atleast and diagflat function should work with scalars
# GitHub issue #3367
# Additionally, the atleast functions should accept multiple scalars
# correctly
b = atleast_1d(1.0)
assert_equal(b.shape, (1,))
assert_equal(b.mask.shape, b.shape)
assert_equal(b.data.shape, b.shape)
b = atleast_1d(1.0, 2.0)
for a in b:
assert_equal(a.shape, (1,))
assert_equal(a.mask.shape, a.shape)
assert_equal(a.data.shape, a.shape)
b = atleast_2d(1.0)
assert_equal(b.shape, (1, 1))
assert_equal(b.mask.shape, b.shape)
assert_equal(b.data.shape, b.shape)
b = atleast_2d(1.0, 2.0)
for a in b:
assert_equal(a.shape, (1, 1))
assert_equal(a.mask.shape, a.shape)
assert_equal(a.data.shape, a.shape)
b = atleast_3d(1.0)
assert_equal(b.shape, (1, 1, 1))
assert_equal(b.mask.shape, b.shape)
assert_equal(b.data.shape, b.shape)
b = atleast_3d(1.0, 2.0)
for a in b:
assert_equal(a.shape, (1, 1, 1))
assert_equal(a.mask.shape, a.shape)
assert_equal(a.data.shape, a.shape)
b = diagflat(1.0)
assert_equal(b.shape, (1, 1))
assert_equal(b.mask.shape, b.data.shape)
class TestNDEnumerate:
def test_ndenumerate_nomasked(self):
ordinary = np.arange(6.).reshape((1, 3, 2))
empty_mask = np.zeros_like(ordinary, dtype=bool)
with_mask = masked_array(ordinary, mask=empty_mask)
assert_equal(list(np.ndenumerate(ordinary)),
list(ndenumerate(ordinary)))
assert_equal(list(ndenumerate(ordinary)),
list(ndenumerate(with_mask)))
assert_equal(list(ndenumerate(with_mask)),
list(ndenumerate(with_mask, compressed=False)))
def test_ndenumerate_allmasked(self):
a = masked_all(())
b = masked_all((100,))
c = masked_all((2, 3, 4))
assert_equal(list(ndenumerate(a)), [])
assert_equal(list(ndenumerate(b)), [])
assert_equal(list(ndenumerate(b, compressed=False)),
list(zip(np.ndindex((100,)), 100 * [masked])))
assert_equal(list(ndenumerate(c)), [])
assert_equal(list(ndenumerate(c, compressed=False)),
list(zip(np.ndindex((2, 3, 4)), 2 * 3 * 4 * [masked])))
def test_ndenumerate_mixedmasked(self):
a = masked_array(np.arange(12).reshape((3, 4)),
mask=[[1, 1, 1, 1],
[1, 1, 0, 1],
[0, 0, 0, 0]])
items = [((1, 2), 6),
((2, 0), 8), ((2, 1), 9), ((2, 2), 10), ((2, 3), 11)]
assert_equal(list(ndenumerate(a)), items)
assert_equal(len(list(ndenumerate(a, compressed=False))), a.size)
for coordinate, value in ndenumerate(a, compressed=False):
assert_equal(a[coordinate], value)
class TestStack:
def test_stack_1d(self):
a = masked_array([0, 1, 2], mask=[0, 1, 0])
b = masked_array([9, 8, 7], mask=[1, 0, 0])
c = stack([a, b], axis=0)
assert_equal(c.shape, (2, 3))
assert_array_equal(a.mask, c[0].mask)
assert_array_equal(b.mask, c[1].mask)
d = vstack([a, b])
assert_array_equal(c.data, d.data)
assert_array_equal(c.mask, d.mask)
c = stack([a, b], axis=1)
assert_equal(c.shape, (3, 2))
assert_array_equal(a.mask, c[:, 0].mask)
assert_array_equal(b.mask, c[:, 1].mask)
def test_stack_masks(self):
a = masked_array([0, 1, 2], mask=True)
b = masked_array([9, 8, 7], mask=False)
c = stack([a, b], axis=0)
assert_equal(c.shape, (2, 3))
assert_array_equal(a.mask, c[0].mask)
assert_array_equal(b.mask, c[1].mask)
d = vstack([a, b])
assert_array_equal(c.data, d.data)
assert_array_equal(c.mask, d.mask)
c = stack([a, b], axis=1)
assert_equal(c.shape, (3, 2))
assert_array_equal(a.mask, c[:, 0].mask)
assert_array_equal(b.mask, c[:, 1].mask)
def test_stack_nd(self):
# 2D
shp = (3, 2)
d1 = np.random.randint(0, 10, shp)
d2 = np.random.randint(0, 10, shp)
m1 = np.random.randint(0, 2, shp).astype(bool)
m2 = np.random.randint(0, 2, shp).astype(bool)
a1 = masked_array(d1, mask=m1)
a2 = masked_array(d2, mask=m2)
c = stack([a1, a2], axis=0)
c_shp = (2,) + shp
assert_equal(c.shape, c_shp)
assert_array_equal(a1.mask, c[0].mask)
assert_array_equal(a2.mask, c[1].mask)
c = stack([a1, a2], axis=-1)
c_shp = shp + (2,)
assert_equal(c.shape, c_shp)
assert_array_equal(a1.mask, c[..., 0].mask)
assert_array_equal(a2.mask, c[..., 1].mask)
# 4D
shp = (3, 2, 4, 5,)
d1 = np.random.randint(0, 10, shp)
d2 = np.random.randint(0, 10, shp)
m1 = np.random.randint(0, 2, shp).astype(bool)
m2 = np.random.randint(0, 2, shp).astype(bool)
a1 = masked_array(d1, mask=m1)
a2 = masked_array(d2, mask=m2)
c = stack([a1, a2], axis=0)
c_shp = (2,) + shp
assert_equal(c.shape, c_shp)
assert_array_equal(a1.mask, c[0].mask)
assert_array_equal(a2.mask, c[1].mask)
c = stack([a1, a2], axis=-1)
c_shp = shp + (2,)
assert_equal(c.shape, c_shp)
assert_array_equal(a1.mask, c[..., 0].mask)
assert_array_equal(a2.mask, c[..., 1].mask)
| 71,958 | Python | 38.955025 | 81 | 0.494455 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/ma/tests/test_mrecords.py | # pylint: disable-msg=W0611, W0612, W0511,R0201
"""Tests suite for mrecords.
:author: Pierre Gerard-Marchant
:contact: pierregm_at_uga_dot_edu
"""
import numpy as np
import numpy.ma as ma
from numpy import recarray
from numpy.ma import masked, nomask
from numpy.testing import temppath
from numpy.core.records import (
fromrecords as recfromrecords, fromarrays as recfromarrays
)
from numpy.ma.mrecords import (
MaskedRecords, mrecarray, fromarrays, fromtextfile, fromrecords,
addfield
)
from numpy.ma.testutils import (
assert_, assert_equal,
assert_equal_records,
)
from numpy.compat import pickle
class TestMRecords:
ilist = [1, 2, 3, 4, 5]
flist = [1.1, 2.2, 3.3, 4.4, 5.5]
slist = [b'one', b'two', b'three', b'four', b'five']
ddtype = [('a', int), ('b', float), ('c', '|S8')]
mask = [0, 1, 0, 0, 1]
base = ma.array(list(zip(ilist, flist, slist)), mask=mask, dtype=ddtype)
def test_byview(self):
# Test creation by view
base = self.base
mbase = base.view(mrecarray)
assert_equal(mbase.recordmask, base.recordmask)
assert_equal_records(mbase._mask, base._mask)
assert_(isinstance(mbase._data, recarray))
assert_equal_records(mbase._data, base._data.view(recarray))
for field in ('a', 'b', 'c'):
assert_equal(base[field], mbase[field])
assert_equal_records(mbase.view(mrecarray), mbase)
def test_get(self):
# Tests fields retrieval
base = self.base.copy()
mbase = base.view(mrecarray)
# As fields..........
for field in ('a', 'b', 'c'):
assert_equal(getattr(mbase, field), mbase[field])
assert_equal(base[field], mbase[field])
# as elements .......
mbase_first = mbase[0]
assert_(isinstance(mbase_first, mrecarray))
assert_equal(mbase_first.dtype, mbase.dtype)
assert_equal(mbase_first.tolist(), (1, 1.1, b'one'))
# Used to be mask, now it's recordmask
assert_equal(mbase_first.recordmask, nomask)
assert_equal(mbase_first._mask.item(), (False, False, False))
assert_equal(mbase_first['a'], mbase['a'][0])
mbase_last = mbase[-1]
assert_(isinstance(mbase_last, mrecarray))
assert_equal(mbase_last.dtype, mbase.dtype)
assert_equal(mbase_last.tolist(), (None, None, None))
# Used to be mask, now it's recordmask
assert_equal(mbase_last.recordmask, True)
assert_equal(mbase_last._mask.item(), (True, True, True))
assert_equal(mbase_last['a'], mbase['a'][-1])
assert_((mbase_last['a'] is masked))
# as slice ..........
mbase_sl = mbase[:2]
assert_(isinstance(mbase_sl, mrecarray))
assert_equal(mbase_sl.dtype, mbase.dtype)
# Used to be mask, now it's recordmask
assert_equal(mbase_sl.recordmask, [0, 1])
assert_equal_records(mbase_sl.mask,
np.array([(False, False, False),
(True, True, True)],
dtype=mbase._mask.dtype))
assert_equal_records(mbase_sl, base[:2].view(mrecarray))
for field in ('a', 'b', 'c'):
assert_equal(getattr(mbase_sl, field), base[:2][field])
def test_set_fields(self):
# Tests setting fields.
base = self.base.copy()
mbase = base.view(mrecarray)
mbase = mbase.copy()
mbase.fill_value = (999999, 1e20, 'N/A')
# Change the data, the mask should be conserved
mbase.a._data[:] = 5
assert_equal(mbase['a']._data, [5, 5, 5, 5, 5])
assert_equal(mbase['a']._mask, [0, 1, 0, 0, 1])
# Change the elements, and the mask will follow
mbase.a = 1
assert_equal(mbase['a']._data, [1]*5)
assert_equal(ma.getmaskarray(mbase['a']), [0]*5)
# Use to be _mask, now it's recordmask
assert_equal(mbase.recordmask, [False]*5)
assert_equal(mbase._mask.tolist(),
np.array([(0, 0, 0),
(0, 1, 1),
(0, 0, 0),
(0, 0, 0),
(0, 1, 1)],
dtype=bool))
# Set a field to mask ........................
mbase.c = masked
# Use to be mask, and now it's still mask !
assert_equal(mbase.c.mask, [1]*5)
assert_equal(mbase.c.recordmask, [1]*5)
assert_equal(ma.getmaskarray(mbase['c']), [1]*5)
assert_equal(ma.getdata(mbase['c']), [b'N/A']*5)
assert_equal(mbase._mask.tolist(),
np.array([(0, 0, 1),
(0, 1, 1),
(0, 0, 1),
(0, 0, 1),
(0, 1, 1)],
dtype=bool))
# Set fields by slices .......................
mbase = base.view(mrecarray).copy()
mbase.a[3:] = 5
assert_equal(mbase.a, [1, 2, 3, 5, 5])
assert_equal(mbase.a._mask, [0, 1, 0, 0, 0])
mbase.b[3:] = masked
assert_equal(mbase.b, base['b'])
assert_equal(mbase.b._mask, [0, 1, 0, 1, 1])
# Set fields globally..........................
ndtype = [('alpha', '|S1'), ('num', int)]
data = ma.array([('a', 1), ('b', 2), ('c', 3)], dtype=ndtype)
rdata = data.view(MaskedRecords)
val = ma.array([10, 20, 30], mask=[1, 0, 0])
rdata['num'] = val
assert_equal(rdata.num, val)
assert_equal(rdata.num.mask, [1, 0, 0])
def test_set_fields_mask(self):
# Tests setting the mask of a field.
base = self.base.copy()
# This one has already a mask....
mbase = base.view(mrecarray)
mbase['a'][-2] = masked
assert_equal(mbase.a, [1, 2, 3, 4, 5])
assert_equal(mbase.a._mask, [0, 1, 0, 1, 1])
# This one has not yet
mbase = fromarrays([np.arange(5), np.random.rand(5)],
dtype=[('a', int), ('b', float)])
mbase['a'][-2] = masked
assert_equal(mbase.a, [0, 1, 2, 3, 4])
assert_equal(mbase.a._mask, [0, 0, 0, 1, 0])
def test_set_mask(self):
base = self.base.copy()
mbase = base.view(mrecarray)
# Set the mask to True .......................
mbase.mask = masked
assert_equal(ma.getmaskarray(mbase['b']), [1]*5)
assert_equal(mbase['a']._mask, mbase['b']._mask)
assert_equal(mbase['a']._mask, mbase['c']._mask)
assert_equal(mbase._mask.tolist(),
np.array([(1, 1, 1)]*5, dtype=bool))
# Delete the mask ............................
mbase.mask = nomask
assert_equal(ma.getmaskarray(mbase['c']), [0]*5)
assert_equal(mbase._mask.tolist(),
np.array([(0, 0, 0)]*5, dtype=bool))
def test_set_mask_fromarray(self):
base = self.base.copy()
mbase = base.view(mrecarray)
# Sets the mask w/ an array
mbase.mask = [1, 0, 0, 0, 1]
assert_equal(mbase.a.mask, [1, 0, 0, 0, 1])
assert_equal(mbase.b.mask, [1, 0, 0, 0, 1])
assert_equal(mbase.c.mask, [1, 0, 0, 0, 1])
# Yay, once more !
mbase.mask = [0, 0, 0, 0, 1]
assert_equal(mbase.a.mask, [0, 0, 0, 0, 1])
assert_equal(mbase.b.mask, [0, 0, 0, 0, 1])
assert_equal(mbase.c.mask, [0, 0, 0, 0, 1])
def test_set_mask_fromfields(self):
mbase = self.base.copy().view(mrecarray)
nmask = np.array(
[(0, 1, 0), (0, 1, 0), (1, 0, 1), (1, 0, 1), (0, 0, 0)],
dtype=[('a', bool), ('b', bool), ('c', bool)])
mbase.mask = nmask
assert_equal(mbase.a.mask, [0, 0, 1, 1, 0])
assert_equal(mbase.b.mask, [1, 1, 0, 0, 0])
assert_equal(mbase.c.mask, [0, 0, 1, 1, 0])
# Reinitialize and redo
mbase.mask = False
mbase.fieldmask = nmask
assert_equal(mbase.a.mask, [0, 0, 1, 1, 0])
assert_equal(mbase.b.mask, [1, 1, 0, 0, 0])
assert_equal(mbase.c.mask, [0, 0, 1, 1, 0])
def test_set_elements(self):
base = self.base.copy()
# Set an element to mask .....................
mbase = base.view(mrecarray).copy()
mbase[-2] = masked
assert_equal(
mbase._mask.tolist(),
np.array([(0, 0, 0), (1, 1, 1), (0, 0, 0), (1, 1, 1), (1, 1, 1)],
dtype=bool))
# Used to be mask, now it's recordmask!
assert_equal(mbase.recordmask, [0, 1, 0, 1, 1])
# Set slices .................................
mbase = base.view(mrecarray).copy()
mbase[:2] = (5, 5, 5)
assert_equal(mbase.a._data, [5, 5, 3, 4, 5])
assert_equal(mbase.a._mask, [0, 0, 0, 0, 1])
assert_equal(mbase.b._data, [5., 5., 3.3, 4.4, 5.5])
assert_equal(mbase.b._mask, [0, 0, 0, 0, 1])
assert_equal(mbase.c._data,
[b'5', b'5', b'three', b'four', b'five'])
assert_equal(mbase.b._mask, [0, 0, 0, 0, 1])
mbase = base.view(mrecarray).copy()
mbase[:2] = masked
assert_equal(mbase.a._data, [1, 2, 3, 4, 5])
assert_equal(mbase.a._mask, [1, 1, 0, 0, 1])
assert_equal(mbase.b._data, [1.1, 2.2, 3.3, 4.4, 5.5])
assert_equal(mbase.b._mask, [1, 1, 0, 0, 1])
assert_equal(mbase.c._data,
[b'one', b'two', b'three', b'four', b'five'])
assert_equal(mbase.b._mask, [1, 1, 0, 0, 1])
def test_setslices_hardmask(self):
# Tests setting slices w/ hardmask.
base = self.base.copy()
mbase = base.view(mrecarray)
mbase.harden_mask()
try:
mbase[-2:] = (5, 5, 5)
assert_equal(mbase.a._data, [1, 2, 3, 5, 5])
assert_equal(mbase.b._data, [1.1, 2.2, 3.3, 5, 5.5])
assert_equal(mbase.c._data,
[b'one', b'two', b'three', b'5', b'five'])
assert_equal(mbase.a._mask, [0, 1, 0, 0, 1])
assert_equal(mbase.b._mask, mbase.a._mask)
assert_equal(mbase.b._mask, mbase.c._mask)
except NotImplementedError:
# OK, not implemented yet...
pass
except AssertionError:
raise
else:
raise Exception("Flexible hard masks should be supported !")
# Not using a tuple should crash
try:
mbase[-2:] = 3
except (NotImplementedError, TypeError):
pass
else:
raise TypeError("Should have expected a readable buffer object!")
def test_hardmask(self):
# Test hardmask
base = self.base.copy()
mbase = base.view(mrecarray)
mbase.harden_mask()
assert_(mbase._hardmask)
mbase.mask = nomask
assert_equal_records(mbase._mask, base._mask)
mbase.soften_mask()
assert_(not mbase._hardmask)
mbase.mask = nomask
# So, the mask of a field is no longer set to nomask...
assert_equal_records(mbase._mask,
ma.make_mask_none(base.shape, base.dtype))
assert_(ma.make_mask(mbase['b']._mask) is nomask)
assert_equal(mbase['a']._mask, mbase['b']._mask)
def test_pickling(self):
# Test pickling
base = self.base.copy()
mrec = base.view(mrecarray)
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
_ = pickle.dumps(mrec, protocol=proto)
mrec_ = pickle.loads(_)
assert_equal(mrec_.dtype, mrec.dtype)
assert_equal_records(mrec_._data, mrec._data)
assert_equal(mrec_._mask, mrec._mask)
assert_equal_records(mrec_._mask, mrec._mask)
def test_filled(self):
# Test filling the array
_a = ma.array([1, 2, 3], mask=[0, 0, 1], dtype=int)
_b = ma.array([1.1, 2.2, 3.3], mask=[0, 0, 1], dtype=float)
_c = ma.array(['one', 'two', 'three'], mask=[0, 0, 1], dtype='|S8')
ddtype = [('a', int), ('b', float), ('c', '|S8')]
mrec = fromarrays([_a, _b, _c], dtype=ddtype,
fill_value=(99999, 99999., 'N/A'))
mrecfilled = mrec.filled()
assert_equal(mrecfilled['a'], np.array((1, 2, 99999), dtype=int))
assert_equal(mrecfilled['b'], np.array((1.1, 2.2, 99999.),
dtype=float))
assert_equal(mrecfilled['c'], np.array(('one', 'two', 'N/A'),
dtype='|S8'))
def test_tolist(self):
# Test tolist.
_a = ma.array([1, 2, 3], mask=[0, 0, 1], dtype=int)
_b = ma.array([1.1, 2.2, 3.3], mask=[0, 0, 1], dtype=float)
_c = ma.array(['one', 'two', 'three'], mask=[1, 0, 0], dtype='|S8')
ddtype = [('a', int), ('b', float), ('c', '|S8')]
mrec = fromarrays([_a, _b, _c], dtype=ddtype,
fill_value=(99999, 99999., 'N/A'))
assert_equal(mrec.tolist(),
[(1, 1.1, None), (2, 2.2, b'two'),
(None, None, b'three')])
def test_withnames(self):
# Test the creation w/ format and names
x = mrecarray(1, formats=float, names='base')
x[0]['base'] = 10
assert_equal(x['base'][0], 10)
def test_exotic_formats(self):
# Test that 'exotic' formats are processed properly
easy = mrecarray(1, dtype=[('i', int), ('s', '|S8'), ('f', float)])
easy[0] = masked
assert_equal(easy.filled(1).item(), (1, b'1', 1.))
solo = mrecarray(1, dtype=[('f0', '<f8', (2, 2))])
solo[0] = masked
assert_equal(solo.filled(1).item(),
np.array((1,), dtype=solo.dtype).item())
mult = mrecarray(2, dtype="i4, (2,3)float, float")
mult[0] = masked
mult[1] = (1, 1, 1)
mult.filled(0)
assert_equal_records(mult.filled(0),
np.array([(0, 0, 0), (1, 1, 1)],
dtype=mult.dtype))
class TestView:
def setup_method(self):
(a, b) = (np.arange(10), np.random.rand(10))
ndtype = [('a', float), ('b', float)]
arr = np.array(list(zip(a, b)), dtype=ndtype)
mrec = fromarrays([a, b], dtype=ndtype, fill_value=(-9., -99.))
mrec.mask[3] = (False, True)
self.data = (mrec, a, b, arr)
def test_view_by_itself(self):
(mrec, a, b, arr) = self.data
test = mrec.view()
assert_(isinstance(test, MaskedRecords))
assert_equal_records(test, mrec)
assert_equal_records(test._mask, mrec._mask)
def test_view_simple_dtype(self):
(mrec, a, b, arr) = self.data
ntype = (float, 2)
test = mrec.view(ntype)
assert_(isinstance(test, ma.MaskedArray))
assert_equal(test, np.array(list(zip(a, b)), dtype=float))
assert_(test[3, 1] is ma.masked)
def test_view_flexible_type(self):
(mrec, a, b, arr) = self.data
alttype = [('A', float), ('B', float)]
test = mrec.view(alttype)
assert_(isinstance(test, MaskedRecords))
assert_equal_records(test, arr.view(alttype))
assert_(test['B'][3] is masked)
assert_equal(test.dtype, np.dtype(alttype))
assert_(test._fill_value is None)
##############################################################################
class TestMRecordsImport:
_a = ma.array([1, 2, 3], mask=[0, 0, 1], dtype=int)
_b = ma.array([1.1, 2.2, 3.3], mask=[0, 0, 1], dtype=float)
_c = ma.array([b'one', b'two', b'three'],
mask=[0, 0, 1], dtype='|S8')
ddtype = [('a', int), ('b', float), ('c', '|S8')]
mrec = fromarrays([_a, _b, _c], dtype=ddtype,
fill_value=(b'99999', b'99999.',
b'N/A'))
nrec = recfromarrays((_a._data, _b._data, _c._data), dtype=ddtype)
data = (mrec, nrec, ddtype)
def test_fromarrays(self):
_a = ma.array([1, 2, 3], mask=[0, 0, 1], dtype=int)
_b = ma.array([1.1, 2.2, 3.3], mask=[0, 0, 1], dtype=float)
_c = ma.array(['one', 'two', 'three'], mask=[0, 0, 1], dtype='|S8')
(mrec, nrec, _) = self.data
for (f, l) in zip(('a', 'b', 'c'), (_a, _b, _c)):
assert_equal(getattr(mrec, f)._mask, l._mask)
# One record only
_x = ma.array([1, 1.1, 'one'], mask=[1, 0, 0], dtype=object)
assert_equal_records(fromarrays(_x, dtype=mrec.dtype), mrec[0])
def test_fromrecords(self):
# Test construction from records.
(mrec, nrec, ddtype) = self.data
#......
palist = [(1, 'abc', 3.7000002861022949, 0),
(2, 'xy', 6.6999998092651367, 1),
(0, ' ', 0.40000000596046448, 0)]
pa = recfromrecords(palist, names='c1, c2, c3, c4')
mpa = fromrecords(palist, names='c1, c2, c3, c4')
assert_equal_records(pa, mpa)
#.....
_mrec = fromrecords(nrec)
assert_equal(_mrec.dtype, mrec.dtype)
for field in _mrec.dtype.names:
assert_equal(getattr(_mrec, field), getattr(mrec._data, field))
_mrec = fromrecords(nrec.tolist(), names='c1,c2,c3')
assert_equal(_mrec.dtype, [('c1', int), ('c2', float), ('c3', '|S5')])
for (f, n) in zip(('c1', 'c2', 'c3'), ('a', 'b', 'c')):
assert_equal(getattr(_mrec, f), getattr(mrec._data, n))
_mrec = fromrecords(mrec)
assert_equal(_mrec.dtype, mrec.dtype)
assert_equal_records(_mrec._data, mrec.filled())
assert_equal_records(_mrec._mask, mrec._mask)
def test_fromrecords_wmask(self):
# Tests construction from records w/ mask.
(mrec, nrec, ddtype) = self.data
_mrec = fromrecords(nrec.tolist(), dtype=ddtype, mask=[0, 1, 0,])
assert_equal_records(_mrec._data, mrec._data)
assert_equal(_mrec._mask.tolist(), [(0, 0, 0), (1, 1, 1), (0, 0, 0)])
_mrec = fromrecords(nrec.tolist(), dtype=ddtype, mask=True)
assert_equal_records(_mrec._data, mrec._data)
assert_equal(_mrec._mask.tolist(), [(1, 1, 1), (1, 1, 1), (1, 1, 1)])
_mrec = fromrecords(nrec.tolist(), dtype=ddtype, mask=mrec._mask)
assert_equal_records(_mrec._data, mrec._data)
assert_equal(_mrec._mask.tolist(), mrec._mask.tolist())
_mrec = fromrecords(nrec.tolist(), dtype=ddtype,
mask=mrec._mask.tolist())
assert_equal_records(_mrec._data, mrec._data)
assert_equal(_mrec._mask.tolist(), mrec._mask.tolist())
def test_fromtextfile(self):
# Tests reading from a text file.
fcontent = (
"""#
'One (S)','Two (I)','Three (F)','Four (M)','Five (-)','Six (C)'
'strings',1,1.0,'mixed column',,1
'with embedded "double quotes"',2,2.0,1.0,,1
'strings',3,3.0E5,3,,1
'strings',4,-1e-10,,,1
""")
with temppath() as path:
with open(path, 'w') as f:
f.write(fcontent)
mrectxt = fromtextfile(path, delimiter=',', varnames='ABCDEFG')
assert_(isinstance(mrectxt, MaskedRecords))
assert_equal(mrectxt.F, [1, 1, 1, 1])
assert_equal(mrectxt.E._mask, [1, 1, 1, 1])
assert_equal(mrectxt.C, [1, 2, 3.e+5, -1e-10])
def test_addfield(self):
# Tests addfield
(mrec, nrec, ddtype) = self.data
(d, m) = ([100, 200, 300], [1, 0, 0])
mrec = addfield(mrec, ma.array(d, mask=m))
assert_equal(mrec.f3, d)
assert_equal(mrec.f3._mask, m)
def test_record_array_with_object_field():
# Trac #1839
y = ma.masked_array(
[(1, '2'), (3, '4')],
mask=[(0, 0), (0, 1)],
dtype=[('a', int), ('b', object)])
# getting an item used to fail
y[1]
| 19,890 | Python | 39.265182 | 78 | 0.507089 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/ma/tests/test_old_ma.py | from functools import reduce
import pytest
import numpy as np
import numpy.core.umath as umath
import numpy.core.fromnumeric as fromnumeric
from numpy.testing import (
assert_, assert_raises, assert_equal,
)
from numpy.ma import (
MaskType, MaskedArray, absolute, add, all, allclose, allequal, alltrue,
arange, arccos, arcsin, arctan, arctan2, array, average, choose,
concatenate, conjugate, cos, cosh, count, divide, equal, exp, filled,
getmask, greater, greater_equal, inner, isMaskedArray, less,
less_equal, log, log10, make_mask, masked, masked_array, masked_equal,
masked_greater, masked_greater_equal, masked_inside, masked_less,
masked_less_equal, masked_not_equal, masked_outside,
masked_print_option, masked_values, masked_where, maximum, minimum,
multiply, nomask, nonzero, not_equal, ones, outer, product, put, ravel,
repeat, resize, shape, sin, sinh, sometrue, sort, sqrt, subtract, sum,
take, tan, tanh, transpose, where, zeros,
)
from numpy.compat import pickle
pi = np.pi
def eq(v, w, msg=''):
result = allclose(v, w)
if not result:
print(f'Not eq:{msg}\n{v}\n----{w}')
return result
class TestMa:
def setup_method(self):
x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.])
y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.])
a10 = 10.
m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1]
xm = array(x, mask=m1)
ym = array(y, mask=m2)
z = np.array([-.5, 0., .5, .8])
zm = array(z, mask=[0, 1, 0, 0])
xf = np.where(m1, 1e+20, x)
s = x.shape
xm.set_fill_value(1e+20)
self.d = (x, y, a10, m1, m2, xm, ym, z, zm, xf, s)
def test_testBasic1d(self):
# Test of basic array creation and properties in 1 dimension.
(x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
assert_(not isMaskedArray(x))
assert_(isMaskedArray(xm))
assert_equal(shape(xm), s)
assert_equal(xm.shape, s)
assert_equal(xm.dtype, x.dtype)
assert_equal(xm.size, reduce(lambda x, y:x * y, s))
assert_equal(count(xm), len(m1) - reduce(lambda x, y:x + y, m1))
assert_(eq(xm, xf))
assert_(eq(filled(xm, 1.e20), xf))
assert_(eq(x, xm))
@pytest.mark.parametrize("s", [(4, 3), (6, 2)])
def test_testBasic2d(self, s):
# Test of basic array creation and properties in 2 dimensions.
(x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
x.shape = s
y.shape = s
xm.shape = s
ym.shape = s
xf.shape = s
assert_(not isMaskedArray(x))
assert_(isMaskedArray(xm))
assert_equal(shape(xm), s)
assert_equal(xm.shape, s)
assert_equal(xm.size, reduce(lambda x, y: x * y, s))
assert_equal(count(xm), len(m1) - reduce(lambda x, y: x + y, m1))
assert_(eq(xm, xf))
assert_(eq(filled(xm, 1.e20), xf))
assert_(eq(x, xm))
def test_testArithmetic(self):
# Test of basic arithmetic.
(x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
a2d = array([[1, 2], [0, 4]])
a2dm = masked_array(a2d, [[0, 0], [1, 0]])
assert_(eq(a2d * a2d, a2d * a2dm))
assert_(eq(a2d + a2d, a2d + a2dm))
assert_(eq(a2d - a2d, a2d - a2dm))
for s in [(12,), (4, 3), (2, 6)]:
x = x.reshape(s)
y = y.reshape(s)
xm = xm.reshape(s)
ym = ym.reshape(s)
xf = xf.reshape(s)
assert_(eq(-x, -xm))
assert_(eq(x + y, xm + ym))
assert_(eq(x - y, xm - ym))
assert_(eq(x * y, xm * ym))
with np.errstate(divide='ignore', invalid='ignore'):
assert_(eq(x / y, xm / ym))
assert_(eq(a10 + y, a10 + ym))
assert_(eq(a10 - y, a10 - ym))
assert_(eq(a10 * y, a10 * ym))
with np.errstate(divide='ignore', invalid='ignore'):
assert_(eq(a10 / y, a10 / ym))
assert_(eq(x + a10, xm + a10))
assert_(eq(x - a10, xm - a10))
assert_(eq(x * a10, xm * a10))
assert_(eq(x / a10, xm / a10))
assert_(eq(x ** 2, xm ** 2))
assert_(eq(abs(x) ** 2.5, abs(xm) ** 2.5))
assert_(eq(x ** y, xm ** ym))
assert_(eq(np.add(x, y), add(xm, ym)))
assert_(eq(np.subtract(x, y), subtract(xm, ym)))
assert_(eq(np.multiply(x, y), multiply(xm, ym)))
with np.errstate(divide='ignore', invalid='ignore'):
assert_(eq(np.divide(x, y), divide(xm, ym)))
def test_testMixedArithmetic(self):
na = np.array([1])
ma = array([1])
assert_(isinstance(na + ma, MaskedArray))
assert_(isinstance(ma + na, MaskedArray))
def test_testUfuncs1(self):
# Test various functions such as sin, cos.
(x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
assert_(eq(np.cos(x), cos(xm)))
assert_(eq(np.cosh(x), cosh(xm)))
assert_(eq(np.sin(x), sin(xm)))
assert_(eq(np.sinh(x), sinh(xm)))
assert_(eq(np.tan(x), tan(xm)))
assert_(eq(np.tanh(x), tanh(xm)))
with np.errstate(divide='ignore', invalid='ignore'):
assert_(eq(np.sqrt(abs(x)), sqrt(xm)))
assert_(eq(np.log(abs(x)), log(xm)))
assert_(eq(np.log10(abs(x)), log10(xm)))
assert_(eq(np.exp(x), exp(xm)))
assert_(eq(np.arcsin(z), arcsin(zm)))
assert_(eq(np.arccos(z), arccos(zm)))
assert_(eq(np.arctan(z), arctan(zm)))
assert_(eq(np.arctan2(x, y), arctan2(xm, ym)))
assert_(eq(np.absolute(x), absolute(xm)))
assert_(eq(np.equal(x, y), equal(xm, ym)))
assert_(eq(np.not_equal(x, y), not_equal(xm, ym)))
assert_(eq(np.less(x, y), less(xm, ym)))
assert_(eq(np.greater(x, y), greater(xm, ym)))
assert_(eq(np.less_equal(x, y), less_equal(xm, ym)))
assert_(eq(np.greater_equal(x, y), greater_equal(xm, ym)))
assert_(eq(np.conjugate(x), conjugate(xm)))
assert_(eq(np.concatenate((x, y)), concatenate((xm, ym))))
assert_(eq(np.concatenate((x, y)), concatenate((x, y))))
assert_(eq(np.concatenate((x, y)), concatenate((xm, y))))
assert_(eq(np.concatenate((x, y, x)), concatenate((x, ym, x))))
def test_xtestCount(self):
# Test count
ott = array([0., 1., 2., 3.], mask=[1, 0, 0, 0])
assert_(count(ott).dtype.type is np.intp)
assert_equal(3, count(ott))
assert_equal(1, count(1))
assert_(eq(0, array(1, mask=[1])))
ott = ott.reshape((2, 2))
assert_(count(ott).dtype.type is np.intp)
assert_(isinstance(count(ott, 0), np.ndarray))
assert_(count(ott).dtype.type is np.intp)
assert_(eq(3, count(ott)))
assert_(getmask(count(ott, 0)) is nomask)
assert_(eq([1, 2], count(ott, 0)))
def test_testMinMax(self):
# Test minimum and maximum.
(x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
xr = np.ravel(x) # max doesn't work if shaped
xmr = ravel(xm)
# true because of careful selection of data
assert_(eq(max(xr), maximum.reduce(xmr)))
assert_(eq(min(xr), minimum.reduce(xmr)))
def test_testAddSumProd(self):
# Test add, sum, product.
(x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
assert_(eq(np.add.reduce(x), add.reduce(x)))
assert_(eq(np.add.accumulate(x), add.accumulate(x)))
assert_(eq(4, sum(array(4), axis=0)))
assert_(eq(4, sum(array(4), axis=0)))
assert_(eq(np.sum(x, axis=0), sum(x, axis=0)))
assert_(eq(np.sum(filled(xm, 0), axis=0), sum(xm, axis=0)))
assert_(eq(np.sum(x, 0), sum(x, 0)))
assert_(eq(np.product(x, axis=0), product(x, axis=0)))
assert_(eq(np.product(x, 0), product(x, 0)))
assert_(eq(np.product(filled(xm, 1), axis=0),
product(xm, axis=0)))
if len(s) > 1:
assert_(eq(np.concatenate((x, y), 1),
concatenate((xm, ym), 1)))
assert_(eq(np.add.reduce(x, 1), add.reduce(x, 1)))
assert_(eq(np.sum(x, 1), sum(x, 1)))
assert_(eq(np.product(x, 1), product(x, 1)))
def test_testCI(self):
# Test of conversions and indexing
x1 = np.array([1, 2, 4, 3])
x2 = array(x1, mask=[1, 0, 0, 0])
x3 = array(x1, mask=[0, 1, 0, 1])
x4 = array(x1)
# test conversion to strings
str(x2) # raises?
repr(x2) # raises?
assert_(eq(np.sort(x1), sort(x2, fill_value=0)))
# tests of indexing
assert_(type(x2[1]) is type(x1[1]))
assert_(x1[1] == x2[1])
assert_(x2[0] is masked)
assert_(eq(x1[2], x2[2]))
assert_(eq(x1[2:5], x2[2:5]))
assert_(eq(x1[:], x2[:]))
assert_(eq(x1[1:], x3[1:]))
x1[2] = 9
x2[2] = 9
assert_(eq(x1, x2))
x1[1:3] = 99
x2[1:3] = 99
assert_(eq(x1, x2))
x2[1] = masked
assert_(eq(x1, x2))
x2[1:3] = masked
assert_(eq(x1, x2))
x2[:] = x1
x2[1] = masked
assert_(allequal(getmask(x2), array([0, 1, 0, 0])))
x3[:] = masked_array([1, 2, 3, 4], [0, 1, 1, 0])
assert_(allequal(getmask(x3), array([0, 1, 1, 0])))
x4[:] = masked_array([1, 2, 3, 4], [0, 1, 1, 0])
assert_(allequal(getmask(x4), array([0, 1, 1, 0])))
assert_(allequal(x4, array([1, 2, 3, 4])))
x1 = np.arange(5) * 1.0
x2 = masked_values(x1, 3.0)
assert_(eq(x1, x2))
assert_(allequal(array([0, 0, 0, 1, 0], MaskType), x2.mask))
assert_(eq(3.0, x2.fill_value))
x1 = array([1, 'hello', 2, 3], object)
x2 = np.array([1, 'hello', 2, 3], object)
s1 = x1[1]
s2 = x2[1]
assert_equal(type(s2), str)
assert_equal(type(s1), str)
assert_equal(s1, s2)
assert_(x1[1:1].shape == (0,))
def test_testCopySize(self):
# Tests of some subtle points of copying and sizing.
n = [0, 0, 1, 0, 0]
m = make_mask(n)
m2 = make_mask(m)
assert_(m is m2)
m3 = make_mask(m, copy=True)
assert_(m is not m3)
x1 = np.arange(5)
y1 = array(x1, mask=m)
assert_(y1._data is not x1)
assert_(allequal(x1, y1._data))
assert_(y1._mask is m)
y1a = array(y1, copy=0)
# For copy=False, one might expect that the array would just
# passed on, i.e., that it would be "is" instead of "==".
# See gh-4043 for discussion.
assert_(y1a._mask.__array_interface__ ==
y1._mask.__array_interface__)
y2 = array(x1, mask=m3, copy=0)
assert_(y2._mask is m3)
assert_(y2[2] is masked)
y2[2] = 9
assert_(y2[2] is not masked)
assert_(y2._mask is m3)
assert_(allequal(y2.mask, 0))
y2a = array(x1, mask=m, copy=1)
assert_(y2a._mask is not m)
assert_(y2a[2] is masked)
y2a[2] = 9
assert_(y2a[2] is not masked)
assert_(y2a._mask is not m)
assert_(allequal(y2a.mask, 0))
y3 = array(x1 * 1.0, mask=m)
assert_(filled(y3).dtype is (x1 * 1.0).dtype)
x4 = arange(4)
x4[2] = masked
y4 = resize(x4, (8,))
assert_(eq(concatenate([x4, x4]), y4))
assert_(eq(getmask(y4), [0, 0, 1, 0, 0, 0, 1, 0]))
y5 = repeat(x4, (2, 2, 2, 2), axis=0)
assert_(eq(y5, [0, 0, 1, 1, 2, 2, 3, 3]))
y6 = repeat(x4, 2, axis=0)
assert_(eq(y5, y6))
def test_testPut(self):
# Test of put
d = arange(5)
n = [0, 0, 0, 1, 1]
m = make_mask(n)
m2 = m.copy()
x = array(d, mask=m)
assert_(x[3] is masked)
assert_(x[4] is masked)
x[[1, 4]] = [10, 40]
assert_(x._mask is m)
assert_(x[3] is masked)
assert_(x[4] is not masked)
assert_(eq(x, [0, 10, 2, -1, 40]))
x = array(d, mask=m2, copy=True)
x.put([0, 1, 2], [-1, 100, 200])
assert_(x._mask is not m2)
assert_(x[3] is masked)
assert_(x[4] is masked)
assert_(eq(x, [-1, 100, 200, 0, 0]))
def test_testPut2(self):
# Test of put
d = arange(5)
x = array(d, mask=[0, 0, 0, 0, 0])
z = array([10, 40], mask=[1, 0])
assert_(x[2] is not masked)
assert_(x[3] is not masked)
x[2:4] = z
assert_(x[2] is masked)
assert_(x[3] is not masked)
assert_(eq(x, [0, 1, 10, 40, 4]))
d = arange(5)
x = array(d, mask=[0, 0, 0, 0, 0])
y = x[2:4]
z = array([10, 40], mask=[1, 0])
assert_(x[2] is not masked)
assert_(x[3] is not masked)
y[:] = z
assert_(y[0] is masked)
assert_(y[1] is not masked)
assert_(eq(y, [10, 40]))
assert_(x[2] is masked)
assert_(x[3] is not masked)
assert_(eq(x, [0, 1, 10, 40, 4]))
def test_testMaPut(self):
(x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
m = [1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1]
i = np.nonzero(m)[0]
put(ym, i, zm)
assert_(all(take(ym, i, axis=0) == zm))
def test_testOddFeatures(self):
# Test of other odd features
x = arange(20)
x = x.reshape(4, 5)
x.flat[5] = 12
assert_(x[1, 0] == 12)
z = x + 10j * x
assert_(eq(z.real, x))
assert_(eq(z.imag, 10 * x))
assert_(eq((z * conjugate(z)).real, 101 * x * x))
z.imag[...] = 0.0
x = arange(10)
x[3] = masked
assert_(str(x[3]) == str(masked))
c = x >= 8
assert_(count(where(c, masked, masked)) == 0)
assert_(shape(where(c, masked, masked)) == c.shape)
z = where(c, x, masked)
assert_(z.dtype is x.dtype)
assert_(z[3] is masked)
assert_(z[4] is masked)
assert_(z[7] is masked)
assert_(z[8] is not masked)
assert_(z[9] is not masked)
assert_(eq(x, z))
z = where(c, masked, x)
assert_(z.dtype is x.dtype)
assert_(z[3] is masked)
assert_(z[4] is not masked)
assert_(z[7] is not masked)
assert_(z[8] is masked)
assert_(z[9] is masked)
z = masked_where(c, x)
assert_(z.dtype is x.dtype)
assert_(z[3] is masked)
assert_(z[4] is not masked)
assert_(z[7] is not masked)
assert_(z[8] is masked)
assert_(z[9] is masked)
assert_(eq(x, z))
x = array([1., 2., 3., 4., 5.])
c = array([1, 1, 1, 0, 0])
x[2] = masked
z = where(c, x, -x)
assert_(eq(z, [1., 2., 0., -4., -5]))
c[0] = masked
z = where(c, x, -x)
assert_(eq(z, [1., 2., 0., -4., -5]))
assert_(z[0] is masked)
assert_(z[1] is not masked)
assert_(z[2] is masked)
assert_(eq(masked_where(greater(x, 2), x), masked_greater(x, 2)))
assert_(eq(masked_where(greater_equal(x, 2), x),
masked_greater_equal(x, 2)))
assert_(eq(masked_where(less(x, 2), x), masked_less(x, 2)))
assert_(eq(masked_where(less_equal(x, 2), x), masked_less_equal(x, 2)))
assert_(eq(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2)))
assert_(eq(masked_where(equal(x, 2), x), masked_equal(x, 2)))
assert_(eq(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2)))
assert_(eq(masked_inside(list(range(5)), 1, 3), [0, 199, 199, 199, 4]))
assert_(eq(masked_outside(list(range(5)), 1, 3), [199, 1, 2, 3, 199]))
assert_(eq(masked_inside(array(list(range(5)),
mask=[1, 0, 0, 0, 0]), 1, 3).mask,
[1, 1, 1, 1, 0]))
assert_(eq(masked_outside(array(list(range(5)),
mask=[0, 1, 0, 0, 0]), 1, 3).mask,
[1, 1, 0, 0, 1]))
assert_(eq(masked_equal(array(list(range(5)),
mask=[1, 0, 0, 0, 0]), 2).mask,
[1, 0, 1, 0, 0]))
assert_(eq(masked_not_equal(array([2, 2, 1, 2, 1],
mask=[1, 0, 0, 0, 0]), 2).mask,
[1, 0, 1, 0, 1]))
assert_(eq(masked_where([1, 1, 0, 0, 0], [1, 2, 3, 4, 5]),
[99, 99, 3, 4, 5]))
atest = ones((10, 10, 10), dtype=np.float32)
btest = zeros(atest.shape, MaskType)
ctest = masked_where(btest, atest)
assert_(eq(atest, ctest))
z = choose(c, (-x, x))
assert_(eq(z, [1., 2., 0., -4., -5]))
assert_(z[0] is masked)
assert_(z[1] is not masked)
assert_(z[2] is masked)
x = arange(6)
x[5] = masked
y = arange(6) * 10
y[2] = masked
c = array([1, 1, 1, 0, 0, 0], mask=[1, 0, 0, 0, 0, 0])
cm = c.filled(1)
z = where(c, x, y)
zm = where(cm, x, y)
assert_(eq(z, zm))
assert_(getmask(zm) is nomask)
assert_(eq(zm, [0, 1, 2, 30, 40, 50]))
z = where(c, masked, 1)
assert_(eq(z, [99, 99, 99, 1, 1, 1]))
z = where(c, 1, masked)
assert_(eq(z, [99, 1, 1, 99, 99, 99]))
def test_testMinMax2(self):
# Test of minimum, maximum.
assert_(eq(minimum([1, 2, 3], [4, 0, 9]), [1, 0, 3]))
assert_(eq(maximum([1, 2, 3], [4, 0, 9]), [4, 2, 9]))
x = arange(5)
y = arange(5) - 2
x[3] = masked
y[0] = masked
assert_(eq(minimum(x, y), where(less(x, y), x, y)))
assert_(eq(maximum(x, y), where(greater(x, y), x, y)))
assert_(minimum.reduce(x) == 0)
assert_(maximum.reduce(x) == 4)
def test_testTakeTransposeInnerOuter(self):
# Test of take, transpose, inner, outer products
x = arange(24)
y = np.arange(24)
x[5:6] = masked
x = x.reshape(2, 3, 4)
y = y.reshape(2, 3, 4)
assert_(eq(np.transpose(y, (2, 0, 1)), transpose(x, (2, 0, 1))))
assert_(eq(np.take(y, (2, 0, 1), 1), take(x, (2, 0, 1), 1)))
assert_(eq(np.inner(filled(x, 0), filled(y, 0)),
inner(x, y)))
assert_(eq(np.outer(filled(x, 0), filled(y, 0)),
outer(x, y)))
y = array(['abc', 1, 'def', 2, 3], object)
y[2] = masked
t = take(y, [0, 3, 4])
assert_(t[0] == 'abc')
assert_(t[1] == 2)
assert_(t[2] == 3)
def test_testInplace(self):
# Test of inplace operations and rich comparisons
y = arange(10)
x = arange(10)
xm = arange(10)
xm[2] = masked
x += 1
assert_(eq(x, y + 1))
xm += 1
assert_(eq(x, y + 1))
x = arange(10)
xm = arange(10)
xm[2] = masked
x -= 1
assert_(eq(x, y - 1))
xm -= 1
assert_(eq(xm, y - 1))
x = arange(10) * 1.0
xm = arange(10) * 1.0
xm[2] = masked
x *= 2.0
assert_(eq(x, y * 2))
xm *= 2.0
assert_(eq(xm, y * 2))
x = arange(10) * 2
xm = arange(10)
xm[2] = masked
x //= 2
assert_(eq(x, y))
xm //= 2
assert_(eq(x, y))
x = arange(10) * 1.0
xm = arange(10) * 1.0
xm[2] = masked
x /= 2.0
assert_(eq(x, y / 2.0))
xm /= arange(10)
assert_(eq(xm, ones((10,))))
x = arange(10).astype(np.float32)
xm = arange(10)
xm[2] = masked
x += 1.
assert_(eq(x, y + 1.))
def test_testPickle(self):
# Test of pickling
x = arange(12)
x[4:10:2] = masked
x = x.reshape(4, 3)
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
s = pickle.dumps(x, protocol=proto)
y = pickle.loads(s)
assert_(eq(x, y))
def test_testMasked(self):
# Test of masked element
xx = arange(6)
xx[1] = masked
assert_(str(masked) == '--')
assert_(xx[1] is masked)
assert_equal(filled(xx[1], 0), 0)
def test_testAverage1(self):
# Test of average.
ott = array([0., 1., 2., 3.], mask=[1, 0, 0, 0])
assert_(eq(2.0, average(ott, axis=0)))
assert_(eq(2.0, average(ott, weights=[1., 1., 2., 1.])))
result, wts = average(ott, weights=[1., 1., 2., 1.], returned=True)
assert_(eq(2.0, result))
assert_(wts == 4.0)
ott[:] = masked
assert_(average(ott, axis=0) is masked)
ott = array([0., 1., 2., 3.], mask=[1, 0, 0, 0])
ott = ott.reshape(2, 2)
ott[:, 1] = masked
assert_(eq(average(ott, axis=0), [2.0, 0.0]))
assert_(average(ott, axis=1)[0] is masked)
assert_(eq([2., 0.], average(ott, axis=0)))
result, wts = average(ott, axis=0, returned=True)
assert_(eq(wts, [1., 0.]))
def test_testAverage2(self):
# More tests of average.
w1 = [0, 1, 1, 1, 1, 0]
w2 = [[0, 1, 1, 1, 1, 0], [1, 0, 0, 0, 0, 1]]
x = arange(6)
assert_(allclose(average(x, axis=0), 2.5))
assert_(allclose(average(x, axis=0, weights=w1), 2.5))
y = array([arange(6), 2.0 * arange(6)])
assert_(allclose(average(y, None),
np.add.reduce(np.arange(6)) * 3. / 12.))
assert_(allclose(average(y, axis=0), np.arange(6) * 3. / 2.))
assert_(allclose(average(y, axis=1),
[average(x, axis=0), average(x, axis=0)*2.0]))
assert_(allclose(average(y, None, weights=w2), 20. / 6.))
assert_(allclose(average(y, axis=0, weights=w2),
[0., 1., 2., 3., 4., 10.]))
assert_(allclose(average(y, axis=1),
[average(x, axis=0), average(x, axis=0)*2.0]))
m1 = zeros(6)
m2 = [0, 0, 1, 1, 0, 0]
m3 = [[0, 0, 1, 1, 0, 0], [0, 1, 1, 1, 1, 0]]
m4 = ones(6)
m5 = [0, 1, 1, 1, 1, 1]
assert_(allclose(average(masked_array(x, m1), axis=0), 2.5))
assert_(allclose(average(masked_array(x, m2), axis=0), 2.5))
assert_(average(masked_array(x, m4), axis=0) is masked)
assert_equal(average(masked_array(x, m5), axis=0), 0.0)
assert_equal(count(average(masked_array(x, m4), axis=0)), 0)
z = masked_array(y, m3)
assert_(allclose(average(z, None), 20. / 6.))
assert_(allclose(average(z, axis=0),
[0., 1., 99., 99., 4.0, 7.5]))
assert_(allclose(average(z, axis=1), [2.5, 5.0]))
assert_(allclose(average(z, axis=0, weights=w2),
[0., 1., 99., 99., 4.0, 10.0]))
a = arange(6)
b = arange(6) * 3
r1, w1 = average([[a, b], [b, a]], axis=1, returned=True)
assert_equal(shape(r1), shape(w1))
assert_equal(r1.shape, w1.shape)
r2, w2 = average(ones((2, 2, 3)), axis=0, weights=[3, 1], returned=True)
assert_equal(shape(w2), shape(r2))
r2, w2 = average(ones((2, 2, 3)), returned=True)
assert_equal(shape(w2), shape(r2))
r2, w2 = average(ones((2, 2, 3)), weights=ones((2, 2, 3)), returned=True)
assert_(shape(w2) == shape(r2))
a2d = array([[1, 2], [0, 4]], float)
a2dm = masked_array(a2d, [[0, 0], [1, 0]])
a2da = average(a2d, axis=0)
assert_(eq(a2da, [0.5, 3.0]))
a2dma = average(a2dm, axis=0)
assert_(eq(a2dma, [1.0, 3.0]))
a2dma = average(a2dm, axis=None)
assert_(eq(a2dma, 7. / 3.))
a2dma = average(a2dm, axis=1)
assert_(eq(a2dma, [1.5, 4.0]))
def test_testToPython(self):
assert_equal(1, int(array(1)))
assert_equal(1.0, float(array(1)))
assert_equal(1, int(array([[[1]]])))
assert_equal(1.0, float(array([[1]])))
assert_raises(TypeError, float, array([1, 1]))
assert_raises(ValueError, bool, array([0, 1]))
assert_raises(ValueError, bool, array([0, 0], mask=[0, 1]))
def test_testScalarArithmetic(self):
xm = array(0, mask=1)
#TODO FIXME: Find out what the following raises a warning in r8247
with np.errstate(divide='ignore'):
assert_((1 / array(0)).mask)
assert_((1 + xm).mask)
assert_((-xm).mask)
assert_((-xm).mask)
assert_(maximum(xm, xm).mask)
assert_(minimum(xm, xm).mask)
assert_(xm.filled().dtype is xm._data.dtype)
x = array(0, mask=0)
assert_(x.filled() == x._data)
assert_equal(str(xm), str(masked_print_option))
def test_testArrayMethods(self):
a = array([1, 3, 2])
assert_(eq(a.any(), a._data.any()))
assert_(eq(a.all(), a._data.all()))
assert_(eq(a.argmax(), a._data.argmax()))
assert_(eq(a.argmin(), a._data.argmin()))
assert_(eq(a.choose(0, 1, 2, 3, 4),
a._data.choose(0, 1, 2, 3, 4)))
assert_(eq(a.compress([1, 0, 1]), a._data.compress([1, 0, 1])))
assert_(eq(a.conj(), a._data.conj()))
assert_(eq(a.conjugate(), a._data.conjugate()))
m = array([[1, 2], [3, 4]])
assert_(eq(m.diagonal(), m._data.diagonal()))
assert_(eq(a.sum(), a._data.sum()))
assert_(eq(a.take([1, 2]), a._data.take([1, 2])))
assert_(eq(m.transpose(), m._data.transpose()))
def test_testArrayAttributes(self):
a = array([1, 3, 2])
assert_equal(a.ndim, 1)
def test_testAPI(self):
assert_(not [m for m in dir(np.ndarray)
if m not in dir(MaskedArray) and
not m.startswith('_')])
def test_testSingleElementSubscript(self):
a = array([1, 3, 2])
b = array([1, 3, 2], mask=[1, 0, 1])
assert_equal(a[0].shape, ())
assert_equal(b[0].shape, ())
assert_equal(b[1].shape, ())
def test_assignment_by_condition(self):
# Test for gh-18951
a = array([1, 2, 3, 4], mask=[1, 0, 1, 0])
c = a >= 3
a[c] = 5
assert_(a[2] is masked)
def test_assignment_by_condition_2(self):
# gh-19721
a = masked_array([0, 1], mask=[False, False])
b = masked_array([0, 1], mask=[True, True])
mask = a < 1
b[mask] = a[mask]
expected_mask = [False, True]
assert_equal(b.mask, expected_mask)
class TestUfuncs:
def setup_method(self):
self.d = (array([1.0, 0, -1, pi / 2] * 2, mask=[0, 1] + [0] * 6),
array([1.0, 0, -1, pi / 2] * 2, mask=[1, 0] + [0] * 6),)
def test_testUfuncRegression(self):
f_invalid_ignore = [
'sqrt', 'arctanh', 'arcsin', 'arccos',
'arccosh', 'arctanh', 'log', 'log10', 'divide',
'true_divide', 'floor_divide', 'remainder', 'fmod']
for f in ['sqrt', 'log', 'log10', 'exp', 'conjugate',
'sin', 'cos', 'tan',
'arcsin', 'arccos', 'arctan',
'sinh', 'cosh', 'tanh',
'arcsinh',
'arccosh',
'arctanh',
'absolute', 'fabs', 'negative',
'floor', 'ceil',
'logical_not',
'add', 'subtract', 'multiply',
'divide', 'true_divide', 'floor_divide',
'remainder', 'fmod', 'hypot', 'arctan2',
'equal', 'not_equal', 'less_equal', 'greater_equal',
'less', 'greater',
'logical_and', 'logical_or', 'logical_xor']:
try:
uf = getattr(umath, f)
except AttributeError:
uf = getattr(fromnumeric, f)
mf = getattr(np.ma, f)
args = self.d[:uf.nin]
with np.errstate():
if f in f_invalid_ignore:
np.seterr(invalid='ignore')
if f in ['arctanh', 'log', 'log10']:
np.seterr(divide='ignore')
ur = uf(*args)
mr = mf(*args)
assert_(eq(ur.filled(0), mr.filled(0), f))
assert_(eqmask(ur.mask, mr.mask))
def test_reduce(self):
a = self.d[0]
assert_(not alltrue(a, axis=0))
assert_(sometrue(a, axis=0))
assert_equal(sum(a[:3], axis=0), 0)
assert_equal(product(a, axis=0), 0)
def test_minmax(self):
a = arange(1, 13).reshape(3, 4)
amask = masked_where(a < 5, a)
assert_equal(amask.max(), a.max())
assert_equal(amask.min(), 5)
assert_((amask.max(0) == a.max(0)).all())
assert_((amask.min(0) == [5, 6, 7, 8]).all())
assert_(amask.max(1)[0].mask)
assert_(amask.min(1)[0].mask)
def test_nonzero(self):
for t in "?bhilqpBHILQPfdgFDGO":
x = array([1, 0, 2, 0], mask=[0, 0, 1, 1])
assert_(eq(nonzero(x), [0]))
class TestArrayMethods:
def setup_method(self):
x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928,
8.43, 7.78, 9.865, 5.878, 8.979, 4.732,
3.012, 6.022, 5.095, 3.116, 5.238, 3.957,
6.04, 9.63, 7.712, 3.382, 4.489, 6.479,
7.189, 9.645, 5.395, 4.961, 9.894, 2.893,
7.357, 9.828, 6.272, 3.758, 6.693, 0.993])
X = x.reshape(6, 6)
XX = x.reshape(3, 2, 2, 3)
m = np.array([0, 1, 0, 1, 0, 0,
1, 0, 1, 1, 0, 1,
0, 0, 0, 1, 0, 1,
0, 0, 0, 1, 1, 1,
1, 0, 0, 1, 0, 0,
0, 0, 1, 0, 1, 0])
mx = array(data=x, mask=m)
mX = array(data=X, mask=m.reshape(X.shape))
mXX = array(data=XX, mask=m.reshape(XX.shape))
self.d = (x, X, XX, m, mx, mX, mXX)
def test_trace(self):
(x, X, XX, m, mx, mX, mXX,) = self.d
mXdiag = mX.diagonal()
assert_equal(mX.trace(), mX.diagonal().compressed().sum())
assert_(eq(mX.trace(),
X.trace() - sum(mXdiag.mask * X.diagonal(),
axis=0)))
def test_clip(self):
(x, X, XX, m, mx, mX, mXX,) = self.d
clipped = mx.clip(2, 8)
assert_(eq(clipped.mask, mx.mask))
assert_(eq(clipped._data, x.clip(2, 8)))
assert_(eq(clipped._data, mx._data.clip(2, 8)))
def test_ptp(self):
(x, X, XX, m, mx, mX, mXX,) = self.d
(n, m) = X.shape
assert_equal(mx.ptp(), mx.compressed().ptp())
rows = np.zeros(n, np.float_)
cols = np.zeros(m, np.float_)
for k in range(m):
cols[k] = mX[:, k].compressed().ptp()
for k in range(n):
rows[k] = mX[k].compressed().ptp()
assert_(eq(mX.ptp(0), cols))
assert_(eq(mX.ptp(1), rows))
def test_swapaxes(self):
(x, X, XX, m, mx, mX, mXX,) = self.d
mXswapped = mX.swapaxes(0, 1)
assert_(eq(mXswapped[-1], mX[:, -1]))
mXXswapped = mXX.swapaxes(0, 2)
assert_equal(mXXswapped.shape, (2, 2, 3, 3))
def test_cumprod(self):
(x, X, XX, m, mx, mX, mXX,) = self.d
mXcp = mX.cumprod(0)
assert_(eq(mXcp._data, mX.filled(1).cumprod(0)))
mXcp = mX.cumprod(1)
assert_(eq(mXcp._data, mX.filled(1).cumprod(1)))
def test_cumsum(self):
(x, X, XX, m, mx, mX, mXX,) = self.d
mXcp = mX.cumsum(0)
assert_(eq(mXcp._data, mX.filled(0).cumsum(0)))
mXcp = mX.cumsum(1)
assert_(eq(mXcp._data, mX.filled(0).cumsum(1)))
def test_varstd(self):
(x, X, XX, m, mx, mX, mXX,) = self.d
assert_(eq(mX.var(axis=None), mX.compressed().var()))
assert_(eq(mX.std(axis=None), mX.compressed().std()))
assert_(eq(mXX.var(axis=3).shape, XX.var(axis=3).shape))
assert_(eq(mX.var().shape, X.var().shape))
(mXvar0, mXvar1) = (mX.var(axis=0), mX.var(axis=1))
for k in range(6):
assert_(eq(mXvar1[k], mX[k].compressed().var()))
assert_(eq(mXvar0[k], mX[:, k].compressed().var()))
assert_(eq(np.sqrt(mXvar0[k]),
mX[:, k].compressed().std()))
def eqmask(m1, m2):
if m1 is nomask:
return m2 is nomask
if m2 is nomask:
return m1 is nomask
return (m1 == m2).all()
| 32,702 | Python | 36.374857 | 81 | 0.47835 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/ma/tests/test_subclassing.py | # pylint: disable-msg=W0611, W0612, W0511,R0201
"""Tests suite for MaskedArray & subclassing.
:author: Pierre Gerard-Marchant
:contact: pierregm_at_uga_dot_edu
:version: $Id: test_subclassing.py 3473 2007-10-29 15:18:13Z jarrod.millman $
"""
import numpy as np
from numpy.lib.mixins import NDArrayOperatorsMixin
from numpy.testing import assert_, assert_raises
from numpy.ma.testutils import assert_equal
from numpy.ma.core import (
array, arange, masked, MaskedArray, masked_array, log, add, hypot,
divide, asarray, asanyarray, nomask
)
# from numpy.ma.core import (
def assert_startswith(a, b):
# produces a better error message than assert_(a.startswith(b))
assert_equal(a[:len(b)], b)
class SubArray(np.ndarray):
# Defines a generic np.ndarray subclass, that stores some metadata
# in the dictionary `info`.
def __new__(cls,arr,info={}):
x = np.asanyarray(arr).view(cls)
x.info = info.copy()
return x
def __array_finalize__(self, obj):
super().__array_finalize__(obj)
self.info = getattr(obj, 'info', {}).copy()
return
def __add__(self, other):
result = super().__add__(other)
result.info['added'] = result.info.get('added', 0) + 1
return result
def __iadd__(self, other):
result = super().__iadd__(other)
result.info['iadded'] = result.info.get('iadded', 0) + 1
return result
subarray = SubArray
class SubMaskedArray(MaskedArray):
"""Pure subclass of MaskedArray, keeping some info on subclass."""
def __new__(cls, info=None, **kwargs):
obj = super().__new__(cls, **kwargs)
obj._optinfo['info'] = info
return obj
class MSubArray(SubArray, MaskedArray):
def __new__(cls, data, info={}, mask=nomask):
subarr = SubArray(data, info)
_data = MaskedArray.__new__(cls, data=subarr, mask=mask)
_data.info = subarr.info
return _data
@property
def _series(self):
_view = self.view(MaskedArray)
_view._sharedmask = False
return _view
msubarray = MSubArray
# Also a subclass that overrides __str__, __repr__ and __setitem__, disallowing
# setting to non-class values (and thus np.ma.core.masked_print_option)
# and overrides __array_wrap__, updating the info dict, to check that this
# doesn't get destroyed by MaskedArray._update_from. But this one also needs
# its own iterator...
class CSAIterator:
"""
Flat iterator object that uses its own setter/getter
(works around ndarray.flat not propagating subclass setters/getters
see https://github.com/numpy/numpy/issues/4564)
roughly following MaskedIterator
"""
def __init__(self, a):
self._original = a
self._dataiter = a.view(np.ndarray).flat
def __iter__(self):
return self
def __getitem__(self, indx):
out = self._dataiter.__getitem__(indx)
if not isinstance(out, np.ndarray):
out = out.__array__()
out = out.view(type(self._original))
return out
def __setitem__(self, index, value):
self._dataiter[index] = self._original._validate_input(value)
def __next__(self):
return next(self._dataiter).__array__().view(type(self._original))
class ComplicatedSubArray(SubArray):
def __str__(self):
return f'myprefix {self.view(SubArray)} mypostfix'
def __repr__(self):
# Return a repr that does not start with 'name('
return f'<{self.__class__.__name__} {self}>'
def _validate_input(self, value):
if not isinstance(value, ComplicatedSubArray):
raise ValueError("Can only set to MySubArray values")
return value
def __setitem__(self, item, value):
# validation ensures direct assignment with ndarray or
# masked_print_option will fail
super().__setitem__(item, self._validate_input(value))
def __getitem__(self, item):
# ensure getter returns our own class also for scalars
value = super().__getitem__(item)
if not isinstance(value, np.ndarray): # scalar
value = value.__array__().view(ComplicatedSubArray)
return value
@property
def flat(self):
return CSAIterator(self)
@flat.setter
def flat(self, value):
y = self.ravel()
y[:] = value
def __array_wrap__(self, obj, context=None):
obj = super().__array_wrap__(obj, context)
if context is not None and context[0] is np.multiply:
obj.info['multiplied'] = obj.info.get('multiplied', 0) + 1
return obj
class WrappedArray(NDArrayOperatorsMixin):
"""
Wrapping a MaskedArray rather than subclassing to test that
ufunc deferrals are commutative.
See: https://github.com/numpy/numpy/issues/15200)
"""
__array_priority__ = 20
def __init__(self, array, **attrs):
self._array = array
self.attrs = attrs
def __repr__(self):
return f"{self.__class__.__name__}(\n{self._array}\n{self.attrs}\n)"
def __array__(self):
return np.asarray(self._array)
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
if method == '__call__':
inputs = [arg._array if isinstance(arg, self.__class__) else arg
for arg in inputs]
return self.__class__(ufunc(*inputs, **kwargs), **self.attrs)
else:
return NotImplemented
class TestSubclassing:
# Test suite for masked subclasses of ndarray.
def setup_method(self):
x = np.arange(5, dtype='float')
mx = msubarray(x, mask=[0, 1, 0, 0, 0])
self.data = (x, mx)
def test_data_subclassing(self):
# Tests whether the subclass is kept.
x = np.arange(5)
m = [0, 0, 1, 0, 0]
xsub = SubArray(x)
xmsub = masked_array(xsub, mask=m)
assert_(isinstance(xmsub, MaskedArray))
assert_equal(xmsub._data, xsub)
assert_(isinstance(xmsub._data, SubArray))
def test_maskedarray_subclassing(self):
# Tests subclassing MaskedArray
(x, mx) = self.data
assert_(isinstance(mx._data, subarray))
def test_masked_unary_operations(self):
# Tests masked_unary_operation
(x, mx) = self.data
with np.errstate(divide='ignore'):
assert_(isinstance(log(mx), msubarray))
assert_equal(log(x), np.log(x))
def test_masked_binary_operations(self):
# Tests masked_binary_operation
(x, mx) = self.data
# Result should be a msubarray
assert_(isinstance(add(mx, mx), msubarray))
assert_(isinstance(add(mx, x), msubarray))
# Result should work
assert_equal(add(mx, x), mx+x)
assert_(isinstance(add(mx, mx)._data, subarray))
assert_(isinstance(add.outer(mx, mx), msubarray))
assert_(isinstance(hypot(mx, mx), msubarray))
assert_(isinstance(hypot(mx, x), msubarray))
def test_masked_binary_operations2(self):
# Tests domained_masked_binary_operation
(x, mx) = self.data
xmx = masked_array(mx.data.__array__(), mask=mx.mask)
assert_(isinstance(divide(mx, mx), msubarray))
assert_(isinstance(divide(mx, x), msubarray))
assert_equal(divide(mx, mx), divide(xmx, xmx))
def test_attributepropagation(self):
x = array(arange(5), mask=[0]+[1]*4)
my = masked_array(subarray(x))
ym = msubarray(x)
#
z = (my+1)
assert_(isinstance(z, MaskedArray))
assert_(not isinstance(z, MSubArray))
assert_(isinstance(z._data, SubArray))
assert_equal(z._data.info, {})
#
z = (ym+1)
assert_(isinstance(z, MaskedArray))
assert_(isinstance(z, MSubArray))
assert_(isinstance(z._data, SubArray))
assert_(z._data.info['added'] > 0)
# Test that inplace methods from data get used (gh-4617)
ym += 1
assert_(isinstance(ym, MaskedArray))
assert_(isinstance(ym, MSubArray))
assert_(isinstance(ym._data, SubArray))
assert_(ym._data.info['iadded'] > 0)
#
ym._set_mask([1, 0, 0, 0, 1])
assert_equal(ym._mask, [1, 0, 0, 0, 1])
ym._series._set_mask([0, 0, 0, 0, 1])
assert_equal(ym._mask, [0, 0, 0, 0, 1])
#
xsub = subarray(x, info={'name':'x'})
mxsub = masked_array(xsub)
assert_(hasattr(mxsub, 'info'))
assert_equal(mxsub.info, xsub.info)
def test_subclasspreservation(self):
# Checks that masked_array(...,subok=True) preserves the class.
x = np.arange(5)
m = [0, 0, 1, 0, 0]
xinfo = [(i, j) for (i, j) in zip(x, m)]
xsub = MSubArray(x, mask=m, info={'xsub':xinfo})
#
mxsub = masked_array(xsub, subok=False)
assert_(not isinstance(mxsub, MSubArray))
assert_(isinstance(mxsub, MaskedArray))
assert_equal(mxsub._mask, m)
#
mxsub = asarray(xsub)
assert_(not isinstance(mxsub, MSubArray))
assert_(isinstance(mxsub, MaskedArray))
assert_equal(mxsub._mask, m)
#
mxsub = masked_array(xsub, subok=True)
assert_(isinstance(mxsub, MSubArray))
assert_equal(mxsub.info, xsub.info)
assert_equal(mxsub._mask, xsub._mask)
#
mxsub = asanyarray(xsub)
assert_(isinstance(mxsub, MSubArray))
assert_equal(mxsub.info, xsub.info)
assert_equal(mxsub._mask, m)
def test_subclass_items(self):
"""test that getter and setter go via baseclass"""
x = np.arange(5)
xcsub = ComplicatedSubArray(x)
mxcsub = masked_array(xcsub, mask=[True, False, True, False, False])
# getter should return a ComplicatedSubArray, even for single item
# first check we wrote ComplicatedSubArray correctly
assert_(isinstance(xcsub[1], ComplicatedSubArray))
assert_(isinstance(xcsub[1,...], ComplicatedSubArray))
assert_(isinstance(xcsub[1:4], ComplicatedSubArray))
# now that it propagates inside the MaskedArray
assert_(isinstance(mxcsub[1], ComplicatedSubArray))
assert_(isinstance(mxcsub[1,...].data, ComplicatedSubArray))
assert_(mxcsub[0] is masked)
assert_(isinstance(mxcsub[0,...].data, ComplicatedSubArray))
assert_(isinstance(mxcsub[1:4].data, ComplicatedSubArray))
# also for flattened version (which goes via MaskedIterator)
assert_(isinstance(mxcsub.flat[1].data, ComplicatedSubArray))
assert_(mxcsub.flat[0] is masked)
assert_(isinstance(mxcsub.flat[1:4].base, ComplicatedSubArray))
# setter should only work with ComplicatedSubArray input
# first check we wrote ComplicatedSubArray correctly
assert_raises(ValueError, xcsub.__setitem__, 1, x[4])
# now that it propagates inside the MaskedArray
assert_raises(ValueError, mxcsub.__setitem__, 1, x[4])
assert_raises(ValueError, mxcsub.__setitem__, slice(1, 4), x[1:4])
mxcsub[1] = xcsub[4]
mxcsub[1:4] = xcsub[1:4]
# also for flattened version (which goes via MaskedIterator)
assert_raises(ValueError, mxcsub.flat.__setitem__, 1, x[4])
assert_raises(ValueError, mxcsub.flat.__setitem__, slice(1, 4), x[1:4])
mxcsub.flat[1] = xcsub[4]
mxcsub.flat[1:4] = xcsub[1:4]
def test_subclass_nomask_items(self):
x = np.arange(5)
xcsub = ComplicatedSubArray(x)
mxcsub_nomask = masked_array(xcsub)
assert_(isinstance(mxcsub_nomask[1,...].data, ComplicatedSubArray))
assert_(isinstance(mxcsub_nomask[0,...].data, ComplicatedSubArray))
assert_(isinstance(mxcsub_nomask[1], ComplicatedSubArray))
assert_(isinstance(mxcsub_nomask[0], ComplicatedSubArray))
def test_subclass_repr(self):
"""test that repr uses the name of the subclass
and 'array' for np.ndarray"""
x = np.arange(5)
mx = masked_array(x, mask=[True, False, True, False, False])
assert_startswith(repr(mx), 'masked_array')
xsub = SubArray(x)
mxsub = masked_array(xsub, mask=[True, False, True, False, False])
assert_startswith(repr(mxsub),
f'masked_{SubArray.__name__}(data=[--, 1, --, 3, 4]')
def test_subclass_str(self):
"""test str with subclass that has overridden str, setitem"""
# first without override
x = np.arange(5)
xsub = SubArray(x)
mxsub = masked_array(xsub, mask=[True, False, True, False, False])
assert_equal(str(mxsub), '[-- 1 -- 3 4]')
xcsub = ComplicatedSubArray(x)
assert_raises(ValueError, xcsub.__setitem__, 0,
np.ma.core.masked_print_option)
mxcsub = masked_array(xcsub, mask=[True, False, True, False, False])
assert_equal(str(mxcsub), 'myprefix [-- 1 -- 3 4] mypostfix')
def test_pure_subclass_info_preservation(self):
# Test that ufuncs and methods conserve extra information consistently;
# see gh-7122.
arr1 = SubMaskedArray('test', data=[1,2,3,4,5,6])
arr2 = SubMaskedArray(data=[0,1,2,3,4,5])
diff1 = np.subtract(arr1, arr2)
assert_('info' in diff1._optinfo)
assert_(diff1._optinfo['info'] == 'test')
diff2 = arr1 - arr2
assert_('info' in diff2._optinfo)
assert_(diff2._optinfo['info'] == 'test')
class ArrayNoInheritance:
"""Quantity-like class that does not inherit from ndarray"""
def __init__(self, data, units):
self.magnitude = data
self.units = units
def __getattr__(self, attr):
return getattr(self.magnitude, attr)
def test_array_no_inheritance():
data_masked = np.ma.array([1, 2, 3], mask=[True, False, True])
data_masked_units = ArrayNoInheritance(data_masked, 'meters')
# Get the masked representation of the Quantity-like class
new_array = np.ma.array(data_masked_units)
assert_equal(data_masked.data, new_array.data)
assert_equal(data_masked.mask, new_array.mask)
# Test sharing the mask
data_masked.mask = [True, False, False]
assert_equal(data_masked.mask, new_array.mask)
assert_(new_array.sharedmask)
# Get the masked representation of the Quantity-like class
new_array = np.ma.array(data_masked_units, copy=True)
assert_equal(data_masked.data, new_array.data)
assert_equal(data_masked.mask, new_array.mask)
# Test that the mask is not shared when copy=True
data_masked.mask = [True, False, True]
assert_equal([True, False, False], new_array.mask)
assert_(not new_array.sharedmask)
# Get the masked representation of the Quantity-like class
new_array = np.ma.array(data_masked_units, keep_mask=False)
assert_equal(data_masked.data, new_array.data)
# The change did not affect the original mask
assert_equal(data_masked.mask, [True, False, True])
# Test that the mask is False and not shared when keep_mask=False
assert_(not new_array.mask)
assert_(not new_array.sharedmask)
class TestClassWrapping:
# Test suite for classes that wrap MaskedArrays
def setup_method(self):
m = np.ma.masked_array([1, 3, 5], mask=[False, True, False])
wm = WrappedArray(m)
self.data = (m, wm)
def test_masked_unary_operations(self):
# Tests masked_unary_operation
(m, wm) = self.data
with np.errstate(divide='ignore'):
assert_(isinstance(np.log(wm), WrappedArray))
def test_masked_binary_operations(self):
# Tests masked_binary_operation
(m, wm) = self.data
# Result should be a WrappedArray
assert_(isinstance(np.add(wm, wm), WrappedArray))
assert_(isinstance(np.add(m, wm), WrappedArray))
assert_(isinstance(np.add(wm, m), WrappedArray))
# add and '+' should call the same ufunc
assert_equal(np.add(m, wm), m + wm)
assert_(isinstance(np.hypot(m, wm), WrappedArray))
assert_(isinstance(np.hypot(wm, m), WrappedArray))
# Test domained binary operations
assert_(isinstance(np.divide(wm, m), WrappedArray))
assert_(isinstance(np.divide(m, wm), WrappedArray))
assert_equal(np.divide(wm, m) * m, np.divide(m, m) * wm)
# Test broadcasting
m2 = np.stack([m, m])
assert_(isinstance(np.divide(wm, m2), WrappedArray))
assert_(isinstance(np.divide(m2, wm), WrappedArray))
assert_equal(np.divide(m2, wm), np.divide(wm, m2))
| 16,570 | Python | 35.742794 | 79 | 0.613398 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/testing/print_coercion_tables.py | #!/usr/bin/env python3
"""Prints type-coercion tables for the built-in NumPy types
"""
import numpy as np
from collections import namedtuple
# Generic object that can be added, but doesn't do anything else
class GenericObject:
def __init__(self, v):
self.v = v
def __add__(self, other):
return self
def __radd__(self, other):
return self
dtype = np.dtype('O')
def print_cancast_table(ntypes):
print('X', end=' ')
for char in ntypes:
print(char, end=' ')
print()
for row in ntypes:
print(row, end=' ')
for col in ntypes:
if np.can_cast(row, col, "equiv"):
cast = "#"
elif np.can_cast(row, col, "safe"):
cast = "="
elif np.can_cast(row, col, "same_kind"):
cast = "~"
elif np.can_cast(row, col, "unsafe"):
cast = "."
else:
cast = " "
print(cast, end=' ')
print()
def print_coercion_table(ntypes, inputfirstvalue, inputsecondvalue, firstarray, use_promote_types=False):
print('+', end=' ')
for char in ntypes:
print(char, end=' ')
print()
for row in ntypes:
if row == 'O':
rowtype = GenericObject
else:
rowtype = np.obj2sctype(row)
print(row, end=' ')
for col in ntypes:
if col == 'O':
coltype = GenericObject
else:
coltype = np.obj2sctype(col)
try:
if firstarray:
rowvalue = np.array([rowtype(inputfirstvalue)], dtype=rowtype)
else:
rowvalue = rowtype(inputfirstvalue)
colvalue = coltype(inputsecondvalue)
if use_promote_types:
char = np.promote_types(rowvalue.dtype, colvalue.dtype).char
else:
value = np.add(rowvalue, colvalue)
if isinstance(value, np.ndarray):
char = value.dtype.char
else:
char = np.dtype(type(value)).char
except ValueError:
char = '!'
except OverflowError:
char = '@'
except TypeError:
char = '#'
print(char, end=' ')
print()
def print_new_cast_table(*, can_cast=True, legacy=False, flags=False):
"""Prints new casts, the values given are default "can-cast" values, not
actual ones.
"""
from numpy.core._multiarray_tests import get_all_cast_information
cast_table = {
-1: " ",
0: "#", # No cast (classify as equivalent here)
1: "#", # equivalent casting
2: "=", # safe casting
3: "~", # same-kind casting
4: ".", # unsafe casting
}
flags_table = {
0 : "▗", 7: "█",
1: "▚", 2: "▐", 4: "▄",
3: "▜", 5: "▙",
6: "▟",
}
cast_info = namedtuple("cast_info", ["can_cast", "legacy", "flags"])
no_cast_info = cast_info(" ", " ", " ")
casts = get_all_cast_information()
table = {}
dtypes = set()
for cast in casts:
dtypes.add(cast["from"])
dtypes.add(cast["to"])
if cast["from"] not in table:
table[cast["from"]] = {}
to_dict = table[cast["from"]]
can_cast = cast_table[cast["casting"]]
legacy = "L" if cast["legacy"] else "."
flags = 0
if cast["requires_pyapi"]:
flags |= 1
if cast["supports_unaligned"]:
flags |= 2
if cast["no_floatingpoint_errors"]:
flags |= 4
flags = flags_table[flags]
to_dict[cast["to"]] = cast_info(can_cast=can_cast, legacy=legacy, flags=flags)
# The np.dtype(x.type) is a bit strange, because dtype classes do
# not expose much yet.
types = np.typecodes["All"]
def sorter(x):
# This is a bit weird hack, to get a table as close as possible to
# the one printing all typecodes (but expecting user-dtypes).
dtype = np.dtype(x.type)
try:
indx = types.index(dtype.char)
except ValueError:
indx = np.inf
return (indx, dtype.char)
dtypes = sorted(dtypes, key=sorter)
def print_table(field="can_cast"):
print('X', end=' ')
for dt in dtypes:
print(np.dtype(dt.type).char, end=' ')
print()
for from_dt in dtypes:
print(np.dtype(from_dt.type).char, end=' ')
row = table.get(from_dt, {})
for to_dt in dtypes:
print(getattr(row.get(to_dt, no_cast_info), field), end=' ')
print()
if can_cast:
# Print the actual table:
print()
print("Casting: # is equivalent, = is safe, ~ is same-kind, and . is unsafe")
print()
print_table("can_cast")
if legacy:
print()
print("L denotes a legacy cast . a non-legacy one.")
print()
print_table("legacy")
if flags:
print()
print(f"{flags_table[0]}: no flags, {flags_table[1]}: PyAPI, "
f"{flags_table[2]}: supports unaligned, {flags_table[4]}: no-float-errors")
print()
print_table("flags")
if __name__ == '__main__':
print("can cast")
print_cancast_table(np.typecodes['All'])
print()
print("In these tables, ValueError is '!', OverflowError is '@', TypeError is '#'")
print()
print("scalar + scalar")
print_coercion_table(np.typecodes['All'], 0, 0, False)
print()
print("scalar + neg scalar")
print_coercion_table(np.typecodes['All'], 0, -1, False)
print()
print("array + scalar")
print_coercion_table(np.typecodes['All'], 0, 0, True)
print()
print("array + neg scalar")
print_coercion_table(np.typecodes['All'], 0, -1, True)
print()
print("promote_types")
print_coercion_table(np.typecodes['All'], 0, 0, False, True)
print("New casting type promotion:")
print_new_cast_table(can_cast=True, legacy=True, flags=True)
| 6,164 | Python | 29.671642 | 105 | 0.513465 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/testing/__init__.py | """Common test support for all numpy test scripts.
This single module should provide all the common functionality for numpy tests
in a single location, so that test scripts can just import it and work right
away.
"""
from unittest import TestCase
from ._private.utils import *
from ._private.utils import (_assert_valid_refcount, _gen_alignment_data)
from ._private import extbuild, decorators as dec
from ._private.nosetester import (
run_module_suite, NoseTester as Tester
)
__all__ = _private.utils.__all__ + ['TestCase', 'run_module_suite']
from numpy._pytesttester import PytestTester
test = PytestTester(__name__)
del PytestTester
| 650 | Python | 28.590908 | 78 | 0.752308 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/testing/utils.py | """
Back compatibility utils module. It will import the appropriate
set of tools
"""
import warnings
# 2018-04-04, numpy 1.15.0 ImportWarning
# 2019-09-18, numpy 1.18.0 DeprecatonWarning (changed)
warnings.warn("Importing from numpy.testing.utils is deprecated "
"since 1.15.0, import from numpy.testing instead.",
DeprecationWarning, stacklevel=2)
from ._private.utils import *
from ._private.utils import _assert_valid_refcount, _gen_alignment_data
__all__ = [
'assert_equal', 'assert_almost_equal', 'assert_approx_equal',
'assert_array_equal', 'assert_array_less', 'assert_string_equal',
'assert_array_almost_equal', 'assert_raises', 'build_err_msg',
'decorate_methods', 'jiffies', 'memusage', 'print_assert_equal',
'raises', 'rundocs', 'runstring', 'verbose', 'measure',
'assert_', 'assert_array_almost_equal_nulp', 'assert_raises_regex',
'assert_array_max_ulp', 'assert_warns', 'assert_no_warnings',
'assert_allclose', 'IgnoreException', 'clear_and_catch_warnings',
'SkipTest', 'KnownFailureException', 'temppath', 'tempdir', 'IS_PYPY',
'HAS_REFCOUNT', 'suppress_warnings', 'assert_array_compare',
'assert_no_gc_cycles'
]
| 1,255 | Python | 40.866665 | 78 | 0.658167 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/testing/_private/noseclasses.py | # These classes implement a doctest runner plugin for nose, a "known failure"
# error class, and a customized TestProgram for NumPy.
# Because this module imports nose directly, it should not
# be used except by nosetester.py to avoid a general NumPy
# dependency on nose.
import os
import sys
import doctest
import inspect
import numpy
import nose
from nose.plugins import doctests as npd
from nose.plugins.errorclass import ErrorClass, ErrorClassPlugin
from nose.plugins.base import Plugin
from nose.util import src
from .nosetester import get_package_name
from .utils import KnownFailureException, KnownFailureTest
# Some of the classes in this module begin with 'Numpy' to clearly distinguish
# them from the plethora of very similar names from nose/unittest/doctest
#-----------------------------------------------------------------------------
# Modified version of the one in the stdlib, that fixes a python bug (doctests
# not found in extension modules, https://bugs.python.org/issue3158)
class NumpyDocTestFinder(doctest.DocTestFinder):
def _from_module(self, module, object):
"""
Return true if the given object is defined in the given
module.
"""
if module is None:
return True
elif inspect.isfunction(object):
return module.__dict__ is object.__globals__
elif inspect.isbuiltin(object):
return module.__name__ == object.__module__
elif inspect.isclass(object):
return module.__name__ == object.__module__
elif inspect.ismethod(object):
# This one may be a bug in cython that fails to correctly set the
# __module__ attribute of methods, but since the same error is easy
# to make by extension code writers, having this safety in place
# isn't such a bad idea
return module.__name__ == object.__self__.__class__.__module__
elif inspect.getmodule(object) is not None:
return module is inspect.getmodule(object)
elif hasattr(object, '__module__'):
return module.__name__ == object.__module__
elif isinstance(object, property):
return True # [XX] no way not be sure.
else:
raise ValueError("object must be a class or function")
def _find(self, tests, obj, name, module, source_lines, globs, seen):
"""
Find tests for the given object and any contained objects, and
add them to `tests`.
"""
doctest.DocTestFinder._find(self, tests, obj, name, module,
source_lines, globs, seen)
# Below we re-run pieces of the above method with manual modifications,
# because the original code is buggy and fails to correctly identify
# doctests in extension modules.
# Local shorthands
from inspect import (
isroutine, isclass, ismodule, isfunction, ismethod
)
# Look for tests in a module's contained objects.
if ismodule(obj) and self._recurse:
for valname, val in obj.__dict__.items():
valname1 = f'{name}.{valname}'
if ( (isroutine(val) or isclass(val))
and self._from_module(module, val)):
self._find(tests, val, valname1, module, source_lines,
globs, seen)
# Look for tests in a class's contained objects.
if isclass(obj) and self._recurse:
for valname, val in obj.__dict__.items():
# Special handling for staticmethod/classmethod.
if isinstance(val, staticmethod):
val = getattr(obj, valname)
if isinstance(val, classmethod):
val = getattr(obj, valname).__func__
# Recurse to methods, properties, and nested classes.
if ((isfunction(val) or isclass(val) or
ismethod(val) or isinstance(val, property)) and
self._from_module(module, val)):
valname = f'{name}.{valname}'
self._find(tests, val, valname, module, source_lines,
globs, seen)
# second-chance checker; if the default comparison doesn't
# pass, then see if the expected output string contains flags that
# tell us to ignore the output
class NumpyOutputChecker(doctest.OutputChecker):
def check_output(self, want, got, optionflags):
ret = doctest.OutputChecker.check_output(self, want, got,
optionflags)
if not ret:
if "#random" in want:
return True
# it would be useful to normalize endianness so that
# bigendian machines don't fail all the tests (and there are
# actually some bigendian examples in the doctests). Let's try
# making them all little endian
got = got.replace("'>", "'<")
want = want.replace("'>", "'<")
# try to normalize out 32 and 64 bit default int sizes
for sz in [4, 8]:
got = got.replace("'<i%d'" % sz, "int")
want = want.replace("'<i%d'" % sz, "int")
ret = doctest.OutputChecker.check_output(self, want,
got, optionflags)
return ret
# Subclass nose.plugins.doctests.DocTestCase to work around a bug in
# its constructor that blocks non-default arguments from being passed
# down into doctest.DocTestCase
class NumpyDocTestCase(npd.DocTestCase):
def __init__(self, test, optionflags=0, setUp=None, tearDown=None,
checker=None, obj=None, result_var='_'):
self._result_var = result_var
self._nose_obj = obj
doctest.DocTestCase.__init__(self, test,
optionflags=optionflags,
setUp=setUp, tearDown=tearDown,
checker=checker)
print_state = numpy.get_printoptions()
class NumpyDoctest(npd.Doctest):
name = 'numpydoctest' # call nosetests with --with-numpydoctest
score = 1000 # load late, after doctest builtin
# always use whitespace and ellipsis options for doctests
doctest_optflags = doctest.NORMALIZE_WHITESPACE | doctest.ELLIPSIS
# files that should be ignored for doctests
doctest_ignore = ['generate_numpy_api.py',
'setup.py']
# Custom classes; class variables to allow subclassing
doctest_case_class = NumpyDocTestCase
out_check_class = NumpyOutputChecker
test_finder_class = NumpyDocTestFinder
# Don't use the standard doctest option handler; hard-code the option values
def options(self, parser, env=os.environ):
Plugin.options(self, parser, env)
# Test doctests in 'test' files / directories. Standard plugin default
# is False
self.doctest_tests = True
# Variable name; if defined, doctest results stored in this variable in
# the top-level namespace. None is the standard default
self.doctest_result_var = None
def configure(self, options, config):
# parent method sets enabled flag from command line --with-numpydoctest
Plugin.configure(self, options, config)
self.finder = self.test_finder_class()
self.parser = doctest.DocTestParser()
if self.enabled:
# Pull standard doctest out of plugin list; there's no reason to run
# both. In practice the Unplugger plugin above would cover us when
# run from a standard numpy.test() call; this is just in case
# someone wants to run our plugin outside the numpy.test() machinery
config.plugins.plugins = [p for p in config.plugins.plugins
if p.name != 'doctest']
def set_test_context(self, test):
""" Configure `test` object to set test context
We set the numpy / scipy standard doctest namespace
Parameters
----------
test : test object
with ``globs`` dictionary defining namespace
Returns
-------
None
Notes
-----
`test` object modified in place
"""
# set the namespace for tests
pkg_name = get_package_name(os.path.dirname(test.filename))
# Each doctest should execute in an environment equivalent to
# starting Python and executing "import numpy as np", and,
# for SciPy packages, an additional import of the local
# package (so that scipy.linalg.basic.py's doctests have an
# implicit "from scipy import linalg" as well).
#
# Note: __file__ allows the doctest in NoseTester to run
# without producing an error
test.globs = {'__builtins__':__builtins__,
'__file__':'__main__',
'__name__':'__main__',
'np':numpy}
# add appropriate scipy import for SciPy tests
if 'scipy' in pkg_name:
p = pkg_name.split('.')
p2 = p[-1]
test.globs[p2] = __import__(pkg_name, test.globs, {}, [p2])
# Override test loading to customize test context (with set_test_context
# method), set standard docstring options, and install our own test output
# checker
def loadTestsFromModule(self, module):
if not self.matches(module.__name__):
npd.log.debug("Doctest doesn't want module %s", module)
return
try:
tests = self.finder.find(module)
except AttributeError:
# nose allows module.__test__ = False; doctest does not and
# throws AttributeError
return
if not tests:
return
tests.sort()
module_file = src(module.__file__)
for test in tests:
if not test.examples:
continue
if not test.filename:
test.filename = module_file
# Set test namespace; test altered in place
self.set_test_context(test)
yield self.doctest_case_class(test,
optionflags=self.doctest_optflags,
checker=self.out_check_class(),
result_var=self.doctest_result_var)
# Add an afterContext method to nose.plugins.doctests.Doctest in order
# to restore print options to the original state after each doctest
def afterContext(self):
numpy.set_printoptions(**print_state)
# Ignore NumPy-specific build files that shouldn't be searched for tests
def wantFile(self, file):
bn = os.path.basename(file)
if bn in self.doctest_ignore:
return False
return npd.Doctest.wantFile(self, file)
class Unplugger:
""" Nose plugin to remove named plugin late in loading
By default it removes the "doctest" plugin.
"""
name = 'unplugger'
enabled = True # always enabled
score = 4000 # load late in order to be after builtins
def __init__(self, to_unplug='doctest'):
self.to_unplug = to_unplug
def options(self, parser, env):
pass
def configure(self, options, config):
# Pull named plugin out of plugins list
config.plugins.plugins = [p for p in config.plugins.plugins
if p.name != self.to_unplug]
class KnownFailurePlugin(ErrorClassPlugin):
'''Plugin that installs a KNOWNFAIL error class for the
KnownFailureClass exception. When KnownFailure is raised,
the exception will be logged in the knownfail attribute of the
result, 'K' or 'KNOWNFAIL' (verbose) will be output, and the
exception will not be counted as an error or failure.'''
enabled = True
knownfail = ErrorClass(KnownFailureException,
label='KNOWNFAIL',
isfailure=False)
def options(self, parser, env=os.environ):
env_opt = 'NOSE_WITHOUT_KNOWNFAIL'
parser.add_option('--no-knownfail', action='store_true',
dest='noKnownFail', default=env.get(env_opt, False),
help='Disable special handling of KnownFailure '
'exceptions')
def configure(self, options, conf):
if not self.can_configure:
return
self.conf = conf
disable = getattr(options, 'noKnownFail', False)
if disable:
self.enabled = False
KnownFailure = KnownFailurePlugin # backwards compat
class FPUModeCheckPlugin(Plugin):
"""
Plugin that checks the FPU mode before and after each test,
raising failures if the test changed the mode.
"""
def prepareTestCase(self, test):
from numpy.core._multiarray_tests import get_fpu_mode
def run(result):
old_mode = get_fpu_mode()
test.test(result)
new_mode = get_fpu_mode()
if old_mode != new_mode:
try:
raise AssertionError(
"FPU mode changed from {0:#x} to {1:#x} during the "
"test".format(old_mode, new_mode))
except AssertionError:
result.addFailure(test, sys.exc_info())
return run
# Class allows us to save the results of the tests in runTests - see runTests
# method docstring for details
class NumpyTestProgram(nose.core.TestProgram):
def runTests(self):
"""Run Tests. Returns true on success, false on failure, and
sets self.success to the same value.
Because nose currently discards the test result object, but we need
to return it to the user, override TestProgram.runTests to retain
the result
"""
if self.testRunner is None:
self.testRunner = nose.core.TextTestRunner(stream=self.config.stream,
verbosity=self.config.verbosity,
config=self.config)
plug_runner = self.config.plugins.prepareTestRunner(self.testRunner)
if plug_runner is not None:
self.testRunner = plug_runner
self.result = self.testRunner.run(self.test)
self.success = self.result.wasSuccessful()
return self.success
| 14,516 | Python | 38.772603 | 87 | 0.592312 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/testing/_private/parameterized.py | """
tl;dr: all code is licensed under simplified BSD, unless stated otherwise.
Unless stated otherwise in the source files, all code is copyright 2010 David
Wolever <[email protected]>. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY <COPYRIGHT HOLDER> ``AS IS'' AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
EVENT SHALL <COPYRIGHT HOLDER> OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are those
of the authors and should not be interpreted as representing official policies,
either expressed or implied, of David Wolever.
"""
import re
import inspect
import warnings
from functools import wraps
from types import MethodType
from collections import namedtuple
from unittest import TestCase
_param = namedtuple("param", "args kwargs")
class param(_param):
""" Represents a single parameter to a test case.
For example::
>>> p = param("foo", bar=16)
>>> p
param("foo", bar=16)
>>> p.args
('foo', )
>>> p.kwargs
{'bar': 16}
Intended to be used as an argument to ``@parameterized``::
@parameterized([
param("foo", bar=16),
])
def test_stuff(foo, bar=16):
pass
"""
def __new__(cls, *args , **kwargs):
return _param.__new__(cls, args, kwargs)
@classmethod
def explicit(cls, args=None, kwargs=None):
""" Creates a ``param`` by explicitly specifying ``args`` and
``kwargs``::
>>> param.explicit([1,2,3])
param(*(1, 2, 3))
>>> param.explicit(kwargs={"foo": 42})
param(*(), **{"foo": "42"})
"""
args = args or ()
kwargs = kwargs or {}
return cls(*args, **kwargs)
@classmethod
def from_decorator(cls, args):
""" Returns an instance of ``param()`` for ``@parameterized`` argument
``args``::
>>> param.from_decorator((42, ))
param(args=(42, ), kwargs={})
>>> param.from_decorator("foo")
param(args=("foo", ), kwargs={})
"""
if isinstance(args, param):
return args
elif isinstance(args, (str,)):
args = (args, )
try:
return cls(*args)
except TypeError as e:
if "after * must be" not in str(e):
raise
raise TypeError(
"Parameters must be tuples, but %r is not (hint: use '(%r, )')"
%(args, args),
)
def __repr__(self):
return "param(*%r, **%r)" %self
def parameterized_argument_value_pairs(func, p):
"""Return tuples of parameterized arguments and their values.
This is useful if you are writing your own doc_func
function and need to know the values for each parameter name::
>>> def func(a, foo=None, bar=42, **kwargs): pass
>>> p = param(1, foo=7, extra=99)
>>> parameterized_argument_value_pairs(func, p)
[("a", 1), ("foo", 7), ("bar", 42), ("**kwargs", {"extra": 99})]
If the function's first argument is named ``self`` then it will be
ignored::
>>> def func(self, a): pass
>>> p = param(1)
>>> parameterized_argument_value_pairs(func, p)
[("a", 1)]
Additionally, empty ``*args`` or ``**kwargs`` will be ignored::
>>> def func(foo, *args): pass
>>> p = param(1)
>>> parameterized_argument_value_pairs(func, p)
[("foo", 1)]
>>> p = param(1, 16)
>>> parameterized_argument_value_pairs(func, p)
[("foo", 1), ("*args", (16, ))]
"""
argspec = inspect.getargspec(func)
arg_offset = 1 if argspec.args[:1] == ["self"] else 0
named_args = argspec.args[arg_offset:]
result = list(zip(named_args, p.args))
named_args = argspec.args[len(result) + arg_offset:]
varargs = p.args[len(result):]
result.extend([
(name, p.kwargs.get(name, default))
for (name, default)
in zip(named_args, argspec.defaults or [])
])
seen_arg_names = {n for (n, _) in result}
keywords = dict(sorted([
(name, p.kwargs[name])
for name in p.kwargs
if name not in seen_arg_names
]))
if varargs:
result.append(("*%s" %(argspec.varargs, ), tuple(varargs)))
if keywords:
result.append(("**%s" %(argspec.keywords, ), keywords))
return result
def short_repr(x, n=64):
""" A shortened repr of ``x`` which is guaranteed to be ``unicode``::
>>> short_repr("foo")
u"foo"
>>> short_repr("123456789", n=4)
u"12...89"
"""
x_repr = repr(x)
if isinstance(x_repr, bytes):
try:
x_repr = str(x_repr, "utf-8")
except UnicodeDecodeError:
x_repr = str(x_repr, "latin1")
if len(x_repr) > n:
x_repr = x_repr[:n//2] + "..." + x_repr[len(x_repr) - n//2:]
return x_repr
def default_doc_func(func, num, p):
if func.__doc__ is None:
return None
all_args_with_values = parameterized_argument_value_pairs(func, p)
# Assumes that the function passed is a bound method.
descs = [f'{n}={short_repr(v)}' for n, v in all_args_with_values]
# The documentation might be a multiline string, so split it
# and just work with the first string, ignoring the period
# at the end if there is one.
first, nl, rest = func.__doc__.lstrip().partition("\n")
suffix = ""
if first.endswith("."):
suffix = "."
first = first[:-1]
args = "%s[with %s]" %(len(first) and " " or "", ", ".join(descs))
return "".join([first.rstrip(), args, suffix, nl, rest])
def default_name_func(func, num, p):
base_name = func.__name__
name_suffix = "_%s" %(num, )
if len(p.args) > 0 and isinstance(p.args[0], (str,)):
name_suffix += "_" + parameterized.to_safe_name(p.args[0])
return base_name + name_suffix
# force nose for numpy purposes.
_test_runner_override = 'nose'
_test_runner_guess = False
_test_runners = set(["unittest", "unittest2", "nose", "nose2", "pytest"])
_test_runner_aliases = {
"_pytest": "pytest",
}
def set_test_runner(name):
global _test_runner_override
if name not in _test_runners:
raise TypeError(
"Invalid test runner: %r (must be one of: %s)"
%(name, ", ".join(_test_runners)),
)
_test_runner_override = name
def detect_runner():
""" Guess which test runner we're using by traversing the stack and looking
for the first matching module. This *should* be reasonably safe, as
it's done during test discovery where the test runner should be the
stack frame immediately outside. """
if _test_runner_override is not None:
return _test_runner_override
global _test_runner_guess
if _test_runner_guess is False:
stack = inspect.stack()
for record in reversed(stack):
frame = record[0]
module = frame.f_globals.get("__name__").partition(".")[0]
if module in _test_runner_aliases:
module = _test_runner_aliases[module]
if module in _test_runners:
_test_runner_guess = module
break
else:
_test_runner_guess = None
return _test_runner_guess
class parameterized:
""" Parameterize a test case::
class TestInt:
@parameterized([
("A", 10),
("F", 15),
param("10", 42, base=42)
])
def test_int(self, input, expected, base=16):
actual = int(input, base=base)
assert_equal(actual, expected)
@parameterized([
(2, 3, 5)
(3, 5, 8),
])
def test_add(a, b, expected):
assert_equal(a + b, expected)
"""
def __init__(self, input, doc_func=None):
self.get_input = self.input_as_callable(input)
self.doc_func = doc_func or default_doc_func
def __call__(self, test_func):
self.assert_not_in_testcase_subclass()
@wraps(test_func)
def wrapper(test_self=None):
test_cls = test_self and type(test_self)
original_doc = wrapper.__doc__
for num, args in enumerate(wrapper.parameterized_input):
p = param.from_decorator(args)
unbound_func, nose_tuple = self.param_as_nose_tuple(test_self, test_func, num, p)
try:
wrapper.__doc__ = nose_tuple[0].__doc__
# Nose uses `getattr(instance, test_func.__name__)` to get
# a method bound to the test instance (as opposed to a
# method bound to the instance of the class created when
# tests were being enumerated). Set a value here to make
# sure nose can get the correct test method.
if test_self is not None:
setattr(test_cls, test_func.__name__, unbound_func)
yield nose_tuple
finally:
if test_self is not None:
delattr(test_cls, test_func.__name__)
wrapper.__doc__ = original_doc
wrapper.parameterized_input = self.get_input()
wrapper.parameterized_func = test_func
test_func.__name__ = "_parameterized_original_%s" %(test_func.__name__, )
return wrapper
def param_as_nose_tuple(self, test_self, func, num, p):
nose_func = wraps(func)(lambda *args: func(*args[:-1], **args[-1]))
nose_func.__doc__ = self.doc_func(func, num, p)
# Track the unbound function because we need to setattr the unbound
# function onto the class for nose to work (see comments above), and
# Python 3 doesn't let us pull the function out of a bound method.
unbound_func = nose_func
if test_self is not None:
nose_func = MethodType(nose_func, test_self)
return unbound_func, (nose_func, ) + p.args + (p.kwargs or {}, )
def assert_not_in_testcase_subclass(self):
parent_classes = self._terrible_magic_get_defining_classes()
if any(issubclass(cls, TestCase) for cls in parent_classes):
raise Exception("Warning: '@parameterized' tests won't work "
"inside subclasses of 'TestCase' - use "
"'@parameterized.expand' instead.")
def _terrible_magic_get_defining_classes(self):
""" Returns the list of parent classes of the class currently being defined.
Will likely only work if called from the ``parameterized`` decorator.
This function is entirely @brandon_rhodes's fault, as he suggested
the implementation: http://stackoverflow.com/a/8793684/71522
"""
stack = inspect.stack()
if len(stack) <= 4:
return []
frame = stack[4]
code_context = frame[4] and frame[4][0].strip()
if not (code_context and code_context.startswith("class ")):
return []
_, _, parents = code_context.partition("(")
parents, _, _ = parents.partition(")")
return eval("[" + parents + "]", frame[0].f_globals, frame[0].f_locals)
@classmethod
def input_as_callable(cls, input):
if callable(input):
return lambda: cls.check_input_values(input())
input_values = cls.check_input_values(input)
return lambda: input_values
@classmethod
def check_input_values(cls, input_values):
# Explicitly convert non-list inputs to a list so that:
# 1. A helpful exception will be raised if they aren't iterable, and
# 2. Generators are unwrapped exactly once (otherwise `nosetests
# --processes=n` has issues; see:
# https://github.com/wolever/nose-parameterized/pull/31)
if not isinstance(input_values, list):
input_values = list(input_values)
return [ param.from_decorator(p) for p in input_values ]
@classmethod
def expand(cls, input, name_func=None, doc_func=None, **legacy):
""" A "brute force" method of parameterizing test cases. Creates new
test cases and injects them into the namespace that the wrapped
function is being defined in. Useful for parameterizing tests in
subclasses of 'UnitTest', where Nose test generators don't work.
>>> @parameterized.expand([("foo", 1, 2)])
... def test_add1(name, input, expected):
... actual = add1(input)
... assert_equal(actual, expected)
...
>>> locals()
... 'test_add1_foo_0': <function ...> ...
>>>
"""
if "testcase_func_name" in legacy:
warnings.warn("testcase_func_name= is deprecated; use name_func=",
DeprecationWarning, stacklevel=2)
if not name_func:
name_func = legacy["testcase_func_name"]
if "testcase_func_doc" in legacy:
warnings.warn("testcase_func_doc= is deprecated; use doc_func=",
DeprecationWarning, stacklevel=2)
if not doc_func:
doc_func = legacy["testcase_func_doc"]
doc_func = doc_func or default_doc_func
name_func = name_func or default_name_func
def parameterized_expand_wrapper(f, instance=None):
stack = inspect.stack()
frame = stack[1]
frame_locals = frame[0].f_locals
parameters = cls.input_as_callable(input)()
for num, p in enumerate(parameters):
name = name_func(f, num, p)
frame_locals[name] = cls.param_as_standalone_func(p, f, name)
frame_locals[name].__doc__ = doc_func(f, num, p)
f.__test__ = False
return parameterized_expand_wrapper
@classmethod
def param_as_standalone_func(cls, p, func, name):
@wraps(func)
def standalone_func(*a):
return func(*(a + p.args), **p.kwargs)
standalone_func.__name__ = name
# place_as is used by py.test to determine what source file should be
# used for this test.
standalone_func.place_as = func
# Remove __wrapped__ because py.test will try to look at __wrapped__
# to determine which parameters should be used with this test case,
# and obviously we don't need it to do any parameterization.
try:
del standalone_func.__wrapped__
except AttributeError:
pass
return standalone_func
@classmethod
def to_safe_name(cls, s):
return str(re.sub("[^a-zA-Z0-9_]+", "_", s))
| 16,156 | Python | 36.314088 | 97 | 0.569572 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/testing/_private/utils.py | """
Utility function to facilitate testing.
"""
import os
import sys
import platform
import re
import gc
import operator
import warnings
from functools import partial, wraps
import shutil
import contextlib
from tempfile import mkdtemp, mkstemp
from unittest.case import SkipTest
from warnings import WarningMessage
import pprint
import numpy as np
from numpy.core import(
intp, float32, empty, arange, array_repr, ndarray, isnat, array)
import numpy.linalg.lapack_lite
from io import StringIO
__all__ = [
'assert_equal', 'assert_almost_equal', 'assert_approx_equal',
'assert_array_equal', 'assert_array_less', 'assert_string_equal',
'assert_array_almost_equal', 'assert_raises', 'build_err_msg',
'decorate_methods', 'jiffies', 'memusage', 'print_assert_equal',
'raises', 'rundocs', 'runstring', 'verbose', 'measure',
'assert_', 'assert_array_almost_equal_nulp', 'assert_raises_regex',
'assert_array_max_ulp', 'assert_warns', 'assert_no_warnings',
'assert_allclose', 'IgnoreException', 'clear_and_catch_warnings',
'SkipTest', 'KnownFailureException', 'temppath', 'tempdir', 'IS_PYPY',
'HAS_REFCOUNT', 'suppress_warnings', 'assert_array_compare',
'assert_no_gc_cycles', 'break_cycles', 'HAS_LAPACK64', 'IS_PYSTON',
]
class KnownFailureException(Exception):
'''Raise this exception to mark a test as a known failing test.'''
pass
KnownFailureTest = KnownFailureException # backwards compat
verbose = 0
IS_PYPY = platform.python_implementation() == 'PyPy'
IS_PYSTON = hasattr(sys, "pyston_version_info")
HAS_REFCOUNT = getattr(sys, 'getrefcount', None) is not None and not IS_PYSTON
HAS_LAPACK64 = numpy.linalg.lapack_lite._ilp64
def import_nose():
""" Import nose only when needed.
"""
nose_is_good = True
minimum_nose_version = (1, 0, 0)
try:
import nose
except ImportError:
nose_is_good = False
else:
if nose.__versioninfo__ < minimum_nose_version:
nose_is_good = False
if not nose_is_good:
msg = ('Need nose >= %d.%d.%d for tests - see '
'https://nose.readthedocs.io' %
minimum_nose_version)
raise ImportError(msg)
return nose
def assert_(val, msg=''):
"""
Assert that works in release mode.
Accepts callable msg to allow deferring evaluation until failure.
The Python built-in ``assert`` does not work when executing code in
optimized mode (the ``-O`` flag) - no byte-code is generated for it.
For documentation on usage, refer to the Python documentation.
"""
__tracebackhide__ = True # Hide traceback for py.test
if not val:
try:
smsg = msg()
except TypeError:
smsg = msg
raise AssertionError(smsg)
def gisnan(x):
"""like isnan, but always raise an error if type not supported instead of
returning a TypeError object.
Notes
-----
isnan and other ufunc sometimes return a NotImplementedType object instead
of raising any exception. This function is a wrapper to make sure an
exception is always raised.
This should be removed once this problem is solved at the Ufunc level."""
from numpy.core import isnan
st = isnan(x)
if isinstance(st, type(NotImplemented)):
raise TypeError("isnan not supported for this type")
return st
def gisfinite(x):
"""like isfinite, but always raise an error if type not supported instead
of returning a TypeError object.
Notes
-----
isfinite and other ufunc sometimes return a NotImplementedType object
instead of raising any exception. This function is a wrapper to make sure
an exception is always raised.
This should be removed once this problem is solved at the Ufunc level."""
from numpy.core import isfinite, errstate
with errstate(invalid='ignore'):
st = isfinite(x)
if isinstance(st, type(NotImplemented)):
raise TypeError("isfinite not supported for this type")
return st
def gisinf(x):
"""like isinf, but always raise an error if type not supported instead of
returning a TypeError object.
Notes
-----
isinf and other ufunc sometimes return a NotImplementedType object instead
of raising any exception. This function is a wrapper to make sure an
exception is always raised.
This should be removed once this problem is solved at the Ufunc level."""
from numpy.core import isinf, errstate
with errstate(invalid='ignore'):
st = isinf(x)
if isinstance(st, type(NotImplemented)):
raise TypeError("isinf not supported for this type")
return st
if os.name == 'nt':
# Code "stolen" from enthought/debug/memusage.py
def GetPerformanceAttributes(object, counter, instance=None,
inum=-1, format=None, machine=None):
# NOTE: Many counters require 2 samples to give accurate results,
# including "% Processor Time" (as by definition, at any instant, a
# thread's CPU usage is either 0 or 100). To read counters like this,
# you should copy this function, but keep the counter open, and call
# CollectQueryData() each time you need to know.
# See http://msdn.microsoft.com/library/en-us/dnperfmo/html/perfmonpt2.asp (dead link)
# My older explanation for this was that the "AddCounter" process
# forced the CPU to 100%, but the above makes more sense :)
import win32pdh
if format is None:
format = win32pdh.PDH_FMT_LONG
path = win32pdh.MakeCounterPath( (machine, object, instance, None,
inum, counter))
hq = win32pdh.OpenQuery()
try:
hc = win32pdh.AddCounter(hq, path)
try:
win32pdh.CollectQueryData(hq)
type, val = win32pdh.GetFormattedCounterValue(hc, format)
return val
finally:
win32pdh.RemoveCounter(hc)
finally:
win32pdh.CloseQuery(hq)
def memusage(processName="python", instance=0):
# from win32pdhutil, part of the win32all package
import win32pdh
return GetPerformanceAttributes("Process", "Virtual Bytes",
processName, instance,
win32pdh.PDH_FMT_LONG, None)
elif sys.platform[:5] == 'linux':
def memusage(_proc_pid_stat=f'/proc/{os.getpid()}/stat'):
"""
Return virtual memory size in bytes of the running python.
"""
try:
with open(_proc_pid_stat, 'r') as f:
l = f.readline().split(' ')
return int(l[22])
except Exception:
return
else:
def memusage():
"""
Return memory usage of running python. [Not implemented]
"""
raise NotImplementedError
if sys.platform[:5] == 'linux':
def jiffies(_proc_pid_stat=f'/proc/{os.getpid()}/stat', _load_time=[]):
"""
Return number of jiffies elapsed.
Return number of jiffies (1/100ths of a second) that this
process has been scheduled in user mode. See man 5 proc.
"""
import time
if not _load_time:
_load_time.append(time.time())
try:
with open(_proc_pid_stat, 'r') as f:
l = f.readline().split(' ')
return int(l[13])
except Exception:
return int(100*(time.time()-_load_time[0]))
else:
# os.getpid is not in all platforms available.
# Using time is safe but inaccurate, especially when process
# was suspended or sleeping.
def jiffies(_load_time=[]):
"""
Return number of jiffies elapsed.
Return number of jiffies (1/100ths of a second) that this
process has been scheduled in user mode. See man 5 proc.
"""
import time
if not _load_time:
_load_time.append(time.time())
return int(100*(time.time()-_load_time[0]))
def build_err_msg(arrays, err_msg, header='Items are not equal:',
verbose=True, names=('ACTUAL', 'DESIRED'), precision=8):
msg = ['\n' + header]
if err_msg:
if err_msg.find('\n') == -1 and len(err_msg) < 79-len(header):
msg = [msg[0] + ' ' + err_msg]
else:
msg.append(err_msg)
if verbose:
for i, a in enumerate(arrays):
if isinstance(a, ndarray):
# precision argument is only needed if the objects are ndarrays
r_func = partial(array_repr, precision=precision)
else:
r_func = repr
try:
r = r_func(a)
except Exception as exc:
r = f'[repr failed for <{type(a).__name__}>: {exc}]'
if r.count('\n') > 3:
r = '\n'.join(r.splitlines()[:3])
r += '...'
msg.append(f' {names[i]}: {r}')
return '\n'.join(msg)
def assert_equal(actual, desired, err_msg='', verbose=True):
"""
Raises an AssertionError if two objects are not equal.
Given two objects (scalars, lists, tuples, dictionaries or numpy arrays),
check that all elements of these objects are equal. An exception is raised
at the first conflicting values.
When one of `actual` and `desired` is a scalar and the other is array_like,
the function checks that each element of the array_like object is equal to
the scalar.
This function handles NaN comparisons as if NaN was a "normal" number.
That is, AssertionError is not raised if both objects have NaNs in the same
positions. This is in contrast to the IEEE standard on NaNs, which says
that NaN compared to anything must return False.
Parameters
----------
actual : array_like
The object to check.
desired : array_like
The expected object.
err_msg : str, optional
The error message to be printed in case of failure.
verbose : bool, optional
If True, the conflicting values are appended to the error message.
Raises
------
AssertionError
If actual and desired are not equal.
Examples
--------
>>> np.testing.assert_equal([4,5], [4,6])
Traceback (most recent call last):
...
AssertionError:
Items are not equal:
item=1
ACTUAL: 5
DESIRED: 6
The following comparison does not raise an exception. There are NaNs
in the inputs, but they are in the same positions.
>>> np.testing.assert_equal(np.array([1.0, 2.0, np.nan]), [1, 2, np.nan])
"""
__tracebackhide__ = True # Hide traceback for py.test
if isinstance(desired, dict):
if not isinstance(actual, dict):
raise AssertionError(repr(type(actual)))
assert_equal(len(actual), len(desired), err_msg, verbose)
for k, i in desired.items():
if k not in actual:
raise AssertionError(repr(k))
assert_equal(actual[k], desired[k], f'key={k!r}\n{err_msg}',
verbose)
return
if isinstance(desired, (list, tuple)) and isinstance(actual, (list, tuple)):
assert_equal(len(actual), len(desired), err_msg, verbose)
for k in range(len(desired)):
assert_equal(actual[k], desired[k], f'item={k!r}\n{err_msg}',
verbose)
return
from numpy.core import ndarray, isscalar, signbit
from numpy.lib import iscomplexobj, real, imag
if isinstance(actual, ndarray) or isinstance(desired, ndarray):
return assert_array_equal(actual, desired, err_msg, verbose)
msg = build_err_msg([actual, desired], err_msg, verbose=verbose)
# Handle complex numbers: separate into real/imag to handle
# nan/inf/negative zero correctly
# XXX: catch ValueError for subclasses of ndarray where iscomplex fail
try:
usecomplex = iscomplexobj(actual) or iscomplexobj(desired)
except (ValueError, TypeError):
usecomplex = False
if usecomplex:
if iscomplexobj(actual):
actualr = real(actual)
actuali = imag(actual)
else:
actualr = actual
actuali = 0
if iscomplexobj(desired):
desiredr = real(desired)
desiredi = imag(desired)
else:
desiredr = desired
desiredi = 0
try:
assert_equal(actualr, desiredr)
assert_equal(actuali, desiredi)
except AssertionError:
raise AssertionError(msg)
# isscalar test to check cases such as [np.nan] != np.nan
if isscalar(desired) != isscalar(actual):
raise AssertionError(msg)
try:
isdesnat = isnat(desired)
isactnat = isnat(actual)
dtypes_match = (np.asarray(desired).dtype.type ==
np.asarray(actual).dtype.type)
if isdesnat and isactnat:
# If both are NaT (and have the same dtype -- datetime or
# timedelta) they are considered equal.
if dtypes_match:
return
else:
raise AssertionError(msg)
except (TypeError, ValueError, NotImplementedError):
pass
# Inf/nan/negative zero handling
try:
isdesnan = gisnan(desired)
isactnan = gisnan(actual)
if isdesnan and isactnan:
return # both nan, so equal
# handle signed zero specially for floats
array_actual = np.asarray(actual)
array_desired = np.asarray(desired)
if (array_actual.dtype.char in 'Mm' or
array_desired.dtype.char in 'Mm'):
# version 1.18
# until this version, gisnan failed for datetime64 and timedelta64.
# Now it succeeds but comparison to scalar with a different type
# emits a DeprecationWarning.
# Avoid that by skipping the next check
raise NotImplementedError('cannot compare to a scalar '
'with a different type')
if desired == 0 and actual == 0:
if not signbit(desired) == signbit(actual):
raise AssertionError(msg)
except (TypeError, ValueError, NotImplementedError):
pass
try:
# Explicitly use __eq__ for comparison, gh-2552
if not (desired == actual):
raise AssertionError(msg)
except (DeprecationWarning, FutureWarning) as e:
# this handles the case when the two types are not even comparable
if 'elementwise == comparison' in e.args[0]:
raise AssertionError(msg)
else:
raise
def print_assert_equal(test_string, actual, desired):
"""
Test if two objects are equal, and print an error message if test fails.
The test is performed with ``actual == desired``.
Parameters
----------
test_string : str
The message supplied to AssertionError.
actual : object
The object to test for equality against `desired`.
desired : object
The expected result.
Examples
--------
>>> np.testing.print_assert_equal('Test XYZ of func xyz', [0, 1], [0, 1])
>>> np.testing.print_assert_equal('Test XYZ of func xyz', [0, 1], [0, 2])
Traceback (most recent call last):
...
AssertionError: Test XYZ of func xyz failed
ACTUAL:
[0, 1]
DESIRED:
[0, 2]
"""
__tracebackhide__ = True # Hide traceback for py.test
import pprint
if not (actual == desired):
msg = StringIO()
msg.write(test_string)
msg.write(' failed\nACTUAL: \n')
pprint.pprint(actual, msg)
msg.write('DESIRED: \n')
pprint.pprint(desired, msg)
raise AssertionError(msg.getvalue())
def assert_almost_equal(actual,desired,decimal=7,err_msg='',verbose=True):
"""
Raises an AssertionError if two items are not equal up to desired
precision.
.. note:: It is recommended to use one of `assert_allclose`,
`assert_array_almost_equal_nulp` or `assert_array_max_ulp`
instead of this function for more consistent floating point
comparisons.
The test verifies that the elements of `actual` and `desired` satisfy.
``abs(desired-actual) < 1.5 * 10**(-decimal)``
That is a looser test than originally documented, but agrees with what the
actual implementation in `assert_array_almost_equal` did up to rounding
vagaries. An exception is raised at conflicting values. For ndarrays this
delegates to assert_array_almost_equal
Parameters
----------
actual : array_like
The object to check.
desired : array_like
The expected object.
decimal : int, optional
Desired precision, default is 7.
err_msg : str, optional
The error message to be printed in case of failure.
verbose : bool, optional
If True, the conflicting values are appended to the error message.
Raises
------
AssertionError
If actual and desired are not equal up to specified precision.
See Also
--------
assert_allclose: Compare two array_like objects for equality with desired
relative and/or absolute precision.
assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal
Examples
--------
>>> from numpy.testing import assert_almost_equal
>>> assert_almost_equal(2.3333333333333, 2.33333334)
>>> assert_almost_equal(2.3333333333333, 2.33333334, decimal=10)
Traceback (most recent call last):
...
AssertionError:
Arrays are not almost equal to 10 decimals
ACTUAL: 2.3333333333333
DESIRED: 2.33333334
>>> assert_almost_equal(np.array([1.0,2.3333333333333]),
... np.array([1.0,2.33333334]), decimal=9)
Traceback (most recent call last):
...
AssertionError:
Arrays are not almost equal to 9 decimals
<BLANKLINE>
Mismatched elements: 1 / 2 (50%)
Max absolute difference: 6.66669964e-09
Max relative difference: 2.85715698e-09
x: array([1. , 2.333333333])
y: array([1. , 2.33333334])
"""
__tracebackhide__ = True # Hide traceback for py.test
from numpy.core import ndarray
from numpy.lib import iscomplexobj, real, imag
# Handle complex numbers: separate into real/imag to handle
# nan/inf/negative zero correctly
# XXX: catch ValueError for subclasses of ndarray where iscomplex fail
try:
usecomplex = iscomplexobj(actual) or iscomplexobj(desired)
except ValueError:
usecomplex = False
def _build_err_msg():
header = ('Arrays are not almost equal to %d decimals' % decimal)
return build_err_msg([actual, desired], err_msg, verbose=verbose,
header=header)
if usecomplex:
if iscomplexobj(actual):
actualr = real(actual)
actuali = imag(actual)
else:
actualr = actual
actuali = 0
if iscomplexobj(desired):
desiredr = real(desired)
desiredi = imag(desired)
else:
desiredr = desired
desiredi = 0
try:
assert_almost_equal(actualr, desiredr, decimal=decimal)
assert_almost_equal(actuali, desiredi, decimal=decimal)
except AssertionError:
raise AssertionError(_build_err_msg())
if isinstance(actual, (ndarray, tuple, list)) \
or isinstance(desired, (ndarray, tuple, list)):
return assert_array_almost_equal(actual, desired, decimal, err_msg)
try:
# If one of desired/actual is not finite, handle it specially here:
# check that both are nan if any is a nan, and test for equality
# otherwise
if not (gisfinite(desired) and gisfinite(actual)):
if gisnan(desired) or gisnan(actual):
if not (gisnan(desired) and gisnan(actual)):
raise AssertionError(_build_err_msg())
else:
if not desired == actual:
raise AssertionError(_build_err_msg())
return
except (NotImplementedError, TypeError):
pass
if abs(desired - actual) >= 1.5 * 10.0**(-decimal):
raise AssertionError(_build_err_msg())
def assert_approx_equal(actual,desired,significant=7,err_msg='',verbose=True):
"""
Raises an AssertionError if two items are not equal up to significant
digits.
.. note:: It is recommended to use one of `assert_allclose`,
`assert_array_almost_equal_nulp` or `assert_array_max_ulp`
instead of this function for more consistent floating point
comparisons.
Given two numbers, check that they are approximately equal.
Approximately equal is defined as the number of significant digits
that agree.
Parameters
----------
actual : scalar
The object to check.
desired : scalar
The expected object.
significant : int, optional
Desired precision, default is 7.
err_msg : str, optional
The error message to be printed in case of failure.
verbose : bool, optional
If True, the conflicting values are appended to the error message.
Raises
------
AssertionError
If actual and desired are not equal up to specified precision.
See Also
--------
assert_allclose: Compare two array_like objects for equality with desired
relative and/or absolute precision.
assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal
Examples
--------
>>> np.testing.assert_approx_equal(0.12345677777777e-20, 0.1234567e-20)
>>> np.testing.assert_approx_equal(0.12345670e-20, 0.12345671e-20,
... significant=8)
>>> np.testing.assert_approx_equal(0.12345670e-20, 0.12345672e-20,
... significant=8)
Traceback (most recent call last):
...
AssertionError:
Items are not equal to 8 significant digits:
ACTUAL: 1.234567e-21
DESIRED: 1.2345672e-21
the evaluated condition that raises the exception is
>>> abs(0.12345670e-20/1e-21 - 0.12345672e-20/1e-21) >= 10**-(8-1)
True
"""
__tracebackhide__ = True # Hide traceback for py.test
import numpy as np
(actual, desired) = map(float, (actual, desired))
if desired == actual:
return
# Normalized the numbers to be in range (-10.0,10.0)
# scale = float(pow(10,math.floor(math.log10(0.5*(abs(desired)+abs(actual))))))
with np.errstate(invalid='ignore'):
scale = 0.5*(np.abs(desired) + np.abs(actual))
scale = np.power(10, np.floor(np.log10(scale)))
try:
sc_desired = desired/scale
except ZeroDivisionError:
sc_desired = 0.0
try:
sc_actual = actual/scale
except ZeroDivisionError:
sc_actual = 0.0
msg = build_err_msg(
[actual, desired], err_msg,
header='Items are not equal to %d significant digits:' % significant,
verbose=verbose)
try:
# If one of desired/actual is not finite, handle it specially here:
# check that both are nan if any is a nan, and test for equality
# otherwise
if not (gisfinite(desired) and gisfinite(actual)):
if gisnan(desired) or gisnan(actual):
if not (gisnan(desired) and gisnan(actual)):
raise AssertionError(msg)
else:
if not desired == actual:
raise AssertionError(msg)
return
except (TypeError, NotImplementedError):
pass
if np.abs(sc_desired - sc_actual) >= np.power(10., -(significant-1)):
raise AssertionError(msg)
def assert_array_compare(comparison, x, y, err_msg='', verbose=True, header='',
precision=6, equal_nan=True, equal_inf=True):
__tracebackhide__ = True # Hide traceback for py.test
from numpy.core import array, array2string, isnan, inf, bool_, errstate, all, max, object_
x = np.asanyarray(x)
y = np.asanyarray(y)
# original array for output formatting
ox, oy = x, y
def isnumber(x):
return x.dtype.char in '?bhilqpBHILQPefdgFDG'
def istime(x):
return x.dtype.char in "Mm"
def func_assert_same_pos(x, y, func=isnan, hasval='nan'):
"""Handling nan/inf.
Combine results of running func on x and y, checking that they are True
at the same locations.
"""
__tracebackhide__ = True # Hide traceback for py.test
x_id = func(x)
y_id = func(y)
# We include work-arounds here to handle three types of slightly
# pathological ndarray subclasses:
# (1) all() on `masked` array scalars can return masked arrays, so we
# use != True
# (2) __eq__ on some ndarray subclasses returns Python booleans
# instead of element-wise comparisons, so we cast to bool_() and
# use isinstance(..., bool) checks
# (3) subclasses with bare-bones __array_function__ implementations may
# not implement np.all(), so favor using the .all() method
# We are not committed to supporting such subclasses, but it's nice to
# support them if possible.
if bool_(x_id == y_id).all() != True:
msg = build_err_msg([x, y],
err_msg + '\nx and y %s location mismatch:'
% (hasval), verbose=verbose, header=header,
names=('x', 'y'), precision=precision)
raise AssertionError(msg)
# If there is a scalar, then here we know the array has the same
# flag as it everywhere, so we should return the scalar flag.
if isinstance(x_id, bool) or x_id.ndim == 0:
return bool_(x_id)
elif isinstance(y_id, bool) or y_id.ndim == 0:
return bool_(y_id)
else:
return y_id
try:
cond = (x.shape == () or y.shape == ()) or x.shape == y.shape
if not cond:
msg = build_err_msg([x, y],
err_msg
+ f'\n(shapes {x.shape}, {y.shape} mismatch)',
verbose=verbose, header=header,
names=('x', 'y'), precision=precision)
raise AssertionError(msg)
flagged = bool_(False)
if isnumber(x) and isnumber(y):
if equal_nan:
flagged = func_assert_same_pos(x, y, func=isnan, hasval='nan')
if equal_inf:
flagged |= func_assert_same_pos(x, y,
func=lambda xy: xy == +inf,
hasval='+inf')
flagged |= func_assert_same_pos(x, y,
func=lambda xy: xy == -inf,
hasval='-inf')
elif istime(x) and istime(y):
# If one is datetime64 and the other timedelta64 there is no point
if equal_nan and x.dtype.type == y.dtype.type:
flagged = func_assert_same_pos(x, y, func=isnat, hasval="NaT")
if flagged.ndim > 0:
x, y = x[~flagged], y[~flagged]
# Only do the comparison if actual values are left
if x.size == 0:
return
elif flagged:
# no sense doing comparison if everything is flagged.
return
val = comparison(x, y)
if isinstance(val, bool):
cond = val
reduced = array([val])
else:
reduced = val.ravel()
cond = reduced.all()
# The below comparison is a hack to ensure that fully masked
# results, for which val.ravel().all() returns np.ma.masked,
# do not trigger a failure (np.ma.masked != True evaluates as
# np.ma.masked, which is falsy).
if cond != True:
n_mismatch = reduced.size - reduced.sum(dtype=intp)
n_elements = flagged.size if flagged.ndim != 0 else reduced.size
percent_mismatch = 100 * n_mismatch / n_elements
remarks = [
'Mismatched elements: {} / {} ({:.3g}%)'.format(
n_mismatch, n_elements, percent_mismatch)]
with errstate(all='ignore'):
# ignore errors for non-numeric types
with contextlib.suppress(TypeError):
error = abs(x - y)
max_abs_error = max(error)
if getattr(error, 'dtype', object_) == object_:
remarks.append('Max absolute difference: '
+ str(max_abs_error))
else:
remarks.append('Max absolute difference: '
+ array2string(max_abs_error))
# note: this definition of relative error matches that one
# used by assert_allclose (found in np.isclose)
# Filter values where the divisor would be zero
nonzero = bool_(y != 0)
if all(~nonzero):
max_rel_error = array(inf)
else:
max_rel_error = max(error[nonzero] / abs(y[nonzero]))
if getattr(error, 'dtype', object_) == object_:
remarks.append('Max relative difference: '
+ str(max_rel_error))
else:
remarks.append('Max relative difference: '
+ array2string(max_rel_error))
err_msg += '\n' + '\n'.join(remarks)
msg = build_err_msg([ox, oy], err_msg,
verbose=verbose, header=header,
names=('x', 'y'), precision=precision)
raise AssertionError(msg)
except ValueError:
import traceback
efmt = traceback.format_exc()
header = f'error during assertion:\n\n{efmt}\n\n{header}'
msg = build_err_msg([x, y], err_msg, verbose=verbose, header=header,
names=('x', 'y'), precision=precision)
raise ValueError(msg)
def assert_array_equal(x, y, err_msg='', verbose=True):
"""
Raises an AssertionError if two array_like objects are not equal.
Given two array_like objects, check that the shape is equal and all
elements of these objects are equal (but see the Notes for the special
handling of a scalar). An exception is raised at shape mismatch or
conflicting values. In contrast to the standard usage in numpy, NaNs
are compared like numbers, no assertion is raised if both objects have
NaNs in the same positions.
The usual caution for verifying equality with floating point numbers is
advised.
Parameters
----------
x : array_like
The actual object to check.
y : array_like
The desired, expected object.
err_msg : str, optional
The error message to be printed in case of failure.
verbose : bool, optional
If True, the conflicting values are appended to the error message.
Raises
------
AssertionError
If actual and desired objects are not equal.
See Also
--------
assert_allclose: Compare two array_like objects for equality with desired
relative and/or absolute precision.
assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal
Notes
-----
When one of `x` and `y` is a scalar and the other is array_like, the
function checks that each element of the array_like object is equal to
the scalar.
Examples
--------
The first assert does not raise an exception:
>>> np.testing.assert_array_equal([1.0,2.33333,np.nan],
... [np.exp(0),2.33333, np.nan])
Assert fails with numerical imprecision with floats:
>>> np.testing.assert_array_equal([1.0,np.pi,np.nan],
... [1, np.sqrt(np.pi)**2, np.nan])
Traceback (most recent call last):
...
AssertionError:
Arrays are not equal
<BLANKLINE>
Mismatched elements: 1 / 3 (33.3%)
Max absolute difference: 4.4408921e-16
Max relative difference: 1.41357986e-16
x: array([1. , 3.141593, nan])
y: array([1. , 3.141593, nan])
Use `assert_allclose` or one of the nulp (number of floating point values)
functions for these cases instead:
>>> np.testing.assert_allclose([1.0,np.pi,np.nan],
... [1, np.sqrt(np.pi)**2, np.nan],
... rtol=1e-10, atol=0)
As mentioned in the Notes section, `assert_array_equal` has special
handling for scalars. Here the test checks that each value in `x` is 3:
>>> x = np.full((2, 5), fill_value=3)
>>> np.testing.assert_array_equal(x, 3)
"""
__tracebackhide__ = True # Hide traceback for py.test
assert_array_compare(operator.__eq__, x, y, err_msg=err_msg,
verbose=verbose, header='Arrays are not equal')
def assert_array_almost_equal(x, y, decimal=6, err_msg='', verbose=True):
"""
Raises an AssertionError if two objects are not equal up to desired
precision.
.. note:: It is recommended to use one of `assert_allclose`,
`assert_array_almost_equal_nulp` or `assert_array_max_ulp`
instead of this function for more consistent floating point
comparisons.
The test verifies identical shapes and that the elements of ``actual`` and
``desired`` satisfy.
``abs(desired-actual) < 1.5 * 10**(-decimal)``
That is a looser test than originally documented, but agrees with what the
actual implementation did up to rounding vagaries. An exception is raised
at shape mismatch or conflicting values. In contrast to the standard usage
in numpy, NaNs are compared like numbers, no assertion is raised if both
objects have NaNs in the same positions.
Parameters
----------
x : array_like
The actual object to check.
y : array_like
The desired, expected object.
decimal : int, optional
Desired precision, default is 6.
err_msg : str, optional
The error message to be printed in case of failure.
verbose : bool, optional
If True, the conflicting values are appended to the error message.
Raises
------
AssertionError
If actual and desired are not equal up to specified precision.
See Also
--------
assert_allclose: Compare two array_like objects for equality with desired
relative and/or absolute precision.
assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal
Examples
--------
the first assert does not raise an exception
>>> np.testing.assert_array_almost_equal([1.0,2.333,np.nan],
... [1.0,2.333,np.nan])
>>> np.testing.assert_array_almost_equal([1.0,2.33333,np.nan],
... [1.0,2.33339,np.nan], decimal=5)
Traceback (most recent call last):
...
AssertionError:
Arrays are not almost equal to 5 decimals
<BLANKLINE>
Mismatched elements: 1 / 3 (33.3%)
Max absolute difference: 6.e-05
Max relative difference: 2.57136612e-05
x: array([1. , 2.33333, nan])
y: array([1. , 2.33339, nan])
>>> np.testing.assert_array_almost_equal([1.0,2.33333,np.nan],
... [1.0,2.33333, 5], decimal=5)
Traceback (most recent call last):
...
AssertionError:
Arrays are not almost equal to 5 decimals
<BLANKLINE>
x and y nan location mismatch:
x: array([1. , 2.33333, nan])
y: array([1. , 2.33333, 5. ])
"""
__tracebackhide__ = True # Hide traceback for py.test
from numpy.core import number, float_, result_type, array
from numpy.core.numerictypes import issubdtype
from numpy.core.fromnumeric import any as npany
def compare(x, y):
try:
if npany(gisinf(x)) or npany( gisinf(y)):
xinfid = gisinf(x)
yinfid = gisinf(y)
if not (xinfid == yinfid).all():
return False
# if one item, x and y is +- inf
if x.size == y.size == 1:
return x == y
x = x[~xinfid]
y = y[~yinfid]
except (TypeError, NotImplementedError):
pass
# make sure y is an inexact type to avoid abs(MIN_INT); will cause
# casting of x later.
dtype = result_type(y, 1.)
y = np.asanyarray(y, dtype)
z = abs(x - y)
if not issubdtype(z.dtype, number):
z = z.astype(float_) # handle object arrays
return z < 1.5 * 10.0**(-decimal)
assert_array_compare(compare, x, y, err_msg=err_msg, verbose=verbose,
header=('Arrays are not almost equal to %d decimals' % decimal),
precision=decimal)
def assert_array_less(x, y, err_msg='', verbose=True):
"""
Raises an AssertionError if two array_like objects are not ordered by less
than.
Given two array_like objects, check that the shape is equal and all
elements of the first object are strictly smaller than those of the
second object. An exception is raised at shape mismatch or incorrectly
ordered values. Shape mismatch does not raise if an object has zero
dimension. In contrast to the standard usage in numpy, NaNs are
compared, no assertion is raised if both objects have NaNs in the same
positions.
Parameters
----------
x : array_like
The smaller object to check.
y : array_like
The larger object to compare.
err_msg : string
The error message to be printed in case of failure.
verbose : bool
If True, the conflicting values are appended to the error message.
Raises
------
AssertionError
If actual and desired objects are not equal.
See Also
--------
assert_array_equal: tests objects for equality
assert_array_almost_equal: test objects for equality up to precision
Examples
--------
>>> np.testing.assert_array_less([1.0, 1.0, np.nan], [1.1, 2.0, np.nan])
>>> np.testing.assert_array_less([1.0, 1.0, np.nan], [1, 2.0, np.nan])
Traceback (most recent call last):
...
AssertionError:
Arrays are not less-ordered
<BLANKLINE>
Mismatched elements: 1 / 3 (33.3%)
Max absolute difference: 1.
Max relative difference: 0.5
x: array([ 1., 1., nan])
y: array([ 1., 2., nan])
>>> np.testing.assert_array_less([1.0, 4.0], 3)
Traceback (most recent call last):
...
AssertionError:
Arrays are not less-ordered
<BLANKLINE>
Mismatched elements: 1 / 2 (50%)
Max absolute difference: 2.
Max relative difference: 0.66666667
x: array([1., 4.])
y: array(3)
>>> np.testing.assert_array_less([1.0, 2.0, 3.0], [4])
Traceback (most recent call last):
...
AssertionError:
Arrays are not less-ordered
<BLANKLINE>
(shapes (3,), (1,) mismatch)
x: array([1., 2., 3.])
y: array([4])
"""
__tracebackhide__ = True # Hide traceback for py.test
assert_array_compare(operator.__lt__, x, y, err_msg=err_msg,
verbose=verbose,
header='Arrays are not less-ordered',
equal_inf=False)
def runstring(astr, dict):
exec(astr, dict)
def assert_string_equal(actual, desired):
"""
Test if two strings are equal.
If the given strings are equal, `assert_string_equal` does nothing.
If they are not equal, an AssertionError is raised, and the diff
between the strings is shown.
Parameters
----------
actual : str
The string to test for equality against the expected string.
desired : str
The expected string.
Examples
--------
>>> np.testing.assert_string_equal('abc', 'abc')
>>> np.testing.assert_string_equal('abc', 'abcd')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
...
AssertionError: Differences in strings:
- abc+ abcd? +
"""
# delay import of difflib to reduce startup time
__tracebackhide__ = True # Hide traceback for py.test
import difflib
if not isinstance(actual, str):
raise AssertionError(repr(type(actual)))
if not isinstance(desired, str):
raise AssertionError(repr(type(desired)))
if desired == actual:
return
diff = list(difflib.Differ().compare(actual.splitlines(True),
desired.splitlines(True)))
diff_list = []
while diff:
d1 = diff.pop(0)
if d1.startswith(' '):
continue
if d1.startswith('- '):
l = [d1]
d2 = diff.pop(0)
if d2.startswith('? '):
l.append(d2)
d2 = diff.pop(0)
if not d2.startswith('+ '):
raise AssertionError(repr(d2))
l.append(d2)
if diff:
d3 = diff.pop(0)
if d3.startswith('? '):
l.append(d3)
else:
diff.insert(0, d3)
if d2[2:] == d1[2:]:
continue
diff_list.extend(l)
continue
raise AssertionError(repr(d1))
if not diff_list:
return
msg = f"Differences in strings:\n{''.join(diff_list).rstrip()}"
if actual != desired:
raise AssertionError(msg)
def rundocs(filename=None, raise_on_error=True):
"""
Run doctests found in the given file.
By default `rundocs` raises an AssertionError on failure.
Parameters
----------
filename : str
The path to the file for which the doctests are run.
raise_on_error : bool
Whether to raise an AssertionError when a doctest fails. Default is
True.
Notes
-----
The doctests can be run by the user/developer by adding the ``doctests``
argument to the ``test()`` call. For example, to run all tests (including
doctests) for `numpy.lib`:
>>> np.lib.test(doctests=True) # doctest: +SKIP
"""
from numpy.distutils.misc_util import exec_mod_from_location
import doctest
if filename is None:
f = sys._getframe(1)
filename = f.f_globals['__file__']
name = os.path.splitext(os.path.basename(filename))[0]
m = exec_mod_from_location(name, filename)
tests = doctest.DocTestFinder().find(m)
runner = doctest.DocTestRunner(verbose=False)
msg = []
if raise_on_error:
out = lambda s: msg.append(s)
else:
out = None
for test in tests:
runner.run(test, out=out)
if runner.failures > 0 and raise_on_error:
raise AssertionError("Some doctests failed:\n%s" % "\n".join(msg))
def raises(*args):
"""Decorator to check for raised exceptions.
The decorated test function must raise one of the passed exceptions to
pass. If you want to test many assertions about exceptions in a single
test, you may want to use `assert_raises` instead.
.. warning::
This decorator is nose specific, do not use it if you are using a
different test framework.
Parameters
----------
args : exceptions
The test passes if any of the passed exceptions is raised.
Raises
------
AssertionError
Examples
--------
Usage::
@raises(TypeError, ValueError)
def test_raises_type_error():
raise TypeError("This test passes")
@raises(Exception)
def test_that_fails_by_passing():
pass
"""
nose = import_nose()
return nose.tools.raises(*args)
#
# assert_raises and assert_raises_regex are taken from unittest.
#
import unittest
class _Dummy(unittest.TestCase):
def nop(self):
pass
_d = _Dummy('nop')
def assert_raises(*args, **kwargs):
"""
assert_raises(exception_class, callable, *args, **kwargs)
assert_raises(exception_class)
Fail unless an exception of class exception_class is thrown
by callable when invoked with arguments args and keyword
arguments kwargs. If a different type of exception is
thrown, it will not be caught, and the test case will be
deemed to have suffered an error, exactly as for an
unexpected exception.
Alternatively, `assert_raises` can be used as a context manager:
>>> from numpy.testing import assert_raises
>>> with assert_raises(ZeroDivisionError):
... 1 / 0
is equivalent to
>>> def div(x, y):
... return x / y
>>> assert_raises(ZeroDivisionError, div, 1, 0)
"""
__tracebackhide__ = True # Hide traceback for py.test
return _d.assertRaises(*args,**kwargs)
def assert_raises_regex(exception_class, expected_regexp, *args, **kwargs):
"""
assert_raises_regex(exception_class, expected_regexp, callable, *args,
**kwargs)
assert_raises_regex(exception_class, expected_regexp)
Fail unless an exception of class exception_class and with message that
matches expected_regexp is thrown by callable when invoked with arguments
args and keyword arguments kwargs.
Alternatively, can be used as a context manager like `assert_raises`.
Notes
-----
.. versionadded:: 1.9.0
"""
__tracebackhide__ = True # Hide traceback for py.test
return _d.assertRaisesRegex(exception_class, expected_regexp, *args, **kwargs)
def decorate_methods(cls, decorator, testmatch=None):
"""
Apply a decorator to all methods in a class matching a regular expression.
The given decorator is applied to all public methods of `cls` that are
matched by the regular expression `testmatch`
(``testmatch.search(methodname)``). Methods that are private, i.e. start
with an underscore, are ignored.
Parameters
----------
cls : class
Class whose methods to decorate.
decorator : function
Decorator to apply to methods
testmatch : compiled regexp or str, optional
The regular expression. Default value is None, in which case the
nose default (``re.compile(r'(?:^|[\\b_\\.%s-])[Tt]est' % os.sep)``)
is used.
If `testmatch` is a string, it is compiled to a regular expression
first.
"""
if testmatch is None:
testmatch = re.compile(r'(?:^|[\\b_\\.%s-])[Tt]est' % os.sep)
else:
testmatch = re.compile(testmatch)
cls_attr = cls.__dict__
# delayed import to reduce startup time
from inspect import isfunction
methods = [_m for _m in cls_attr.values() if isfunction(_m)]
for function in methods:
try:
if hasattr(function, 'compat_func_name'):
funcname = function.compat_func_name
else:
funcname = function.__name__
except AttributeError:
# not a function
continue
if testmatch.search(funcname) and not funcname.startswith('_'):
setattr(cls, funcname, decorator(function))
return
def measure(code_str, times=1, label=None):
"""
Return elapsed time for executing code in the namespace of the caller.
The supplied code string is compiled with the Python builtin ``compile``.
The precision of the timing is 10 milli-seconds. If the code will execute
fast on this timescale, it can be executed many times to get reasonable
timing accuracy.
Parameters
----------
code_str : str
The code to be timed.
times : int, optional
The number of times the code is executed. Default is 1. The code is
only compiled once.
label : str, optional
A label to identify `code_str` with. This is passed into ``compile``
as the second argument (for run-time error messages).
Returns
-------
elapsed : float
Total elapsed time in seconds for executing `code_str` `times` times.
Examples
--------
>>> times = 10
>>> etime = np.testing.measure('for i in range(1000): np.sqrt(i**2)', times=times)
>>> print("Time for a single execution : ", etime / times, "s") # doctest: +SKIP
Time for a single execution : 0.005 s
"""
frame = sys._getframe(1)
locs, globs = frame.f_locals, frame.f_globals
code = compile(code_str, f'Test name: {label} ', 'exec')
i = 0
elapsed = jiffies()
while i < times:
i += 1
exec(code, globs, locs)
elapsed = jiffies() - elapsed
return 0.01*elapsed
def _assert_valid_refcount(op):
"""
Check that ufuncs don't mishandle refcount of object `1`.
Used in a few regression tests.
"""
if not HAS_REFCOUNT:
return True
import gc
import numpy as np
b = np.arange(100*100).reshape(100, 100)
c = b
i = 1
gc.disable()
try:
rc = sys.getrefcount(i)
for j in range(15):
d = op(b, c)
assert_(sys.getrefcount(i) >= rc)
finally:
gc.enable()
del d # for pyflakes
def assert_allclose(actual, desired, rtol=1e-7, atol=0, equal_nan=True,
err_msg='', verbose=True):
"""
Raises an AssertionError if two objects are not equal up to desired
tolerance.
The test is equivalent to ``allclose(actual, desired, rtol, atol)`` (note
that ``allclose`` has different default values). It compares the difference
between `actual` and `desired` to ``atol + rtol * abs(desired)``.
.. versionadded:: 1.5.0
Parameters
----------
actual : array_like
Array obtained.
desired : array_like
Array desired.
rtol : float, optional
Relative tolerance.
atol : float, optional
Absolute tolerance.
equal_nan : bool, optional.
If True, NaNs will compare equal.
err_msg : str, optional
The error message to be printed in case of failure.
verbose : bool, optional
If True, the conflicting values are appended to the error message.
Raises
------
AssertionError
If actual and desired are not equal up to specified precision.
See Also
--------
assert_array_almost_equal_nulp, assert_array_max_ulp
Examples
--------
>>> x = [1e-5, 1e-3, 1e-1]
>>> y = np.arccos(np.cos(x))
>>> np.testing.assert_allclose(x, y, rtol=1e-5, atol=0)
"""
__tracebackhide__ = True # Hide traceback for py.test
import numpy as np
def compare(x, y):
return np.core.numeric.isclose(x, y, rtol=rtol, atol=atol,
equal_nan=equal_nan)
actual, desired = np.asanyarray(actual), np.asanyarray(desired)
header = f'Not equal to tolerance rtol={rtol:g}, atol={atol:g}'
assert_array_compare(compare, actual, desired, err_msg=str(err_msg),
verbose=verbose, header=header, equal_nan=equal_nan)
def assert_array_almost_equal_nulp(x, y, nulp=1):
"""
Compare two arrays relatively to their spacing.
This is a relatively robust method to compare two arrays whose amplitude
is variable.
Parameters
----------
x, y : array_like
Input arrays.
nulp : int, optional
The maximum number of unit in the last place for tolerance (see Notes).
Default is 1.
Returns
-------
None
Raises
------
AssertionError
If the spacing between `x` and `y` for one or more elements is larger
than `nulp`.
See Also
--------
assert_array_max_ulp : Check that all items of arrays differ in at most
N Units in the Last Place.
spacing : Return the distance between x and the nearest adjacent number.
Notes
-----
An assertion is raised if the following condition is not met::
abs(x - y) <= nulps * spacing(maximum(abs(x), abs(y)))
Examples
--------
>>> x = np.array([1., 1e-10, 1e-20])
>>> eps = np.finfo(x.dtype).eps
>>> np.testing.assert_array_almost_equal_nulp(x, x*eps/2 + x)
>>> np.testing.assert_array_almost_equal_nulp(x, x*eps + x)
Traceback (most recent call last):
...
AssertionError: X and Y are not equal to 1 ULP (max is 2)
"""
__tracebackhide__ = True # Hide traceback for py.test
import numpy as np
ax = np.abs(x)
ay = np.abs(y)
ref = nulp * np.spacing(np.where(ax > ay, ax, ay))
if not np.all(np.abs(x-y) <= ref):
if np.iscomplexobj(x) or np.iscomplexobj(y):
msg = "X and Y are not equal to %d ULP" % nulp
else:
max_nulp = np.max(nulp_diff(x, y))
msg = "X and Y are not equal to %d ULP (max is %g)" % (nulp, max_nulp)
raise AssertionError(msg)
def assert_array_max_ulp(a, b, maxulp=1, dtype=None):
"""
Check that all items of arrays differ in at most N Units in the Last Place.
Parameters
----------
a, b : array_like
Input arrays to be compared.
maxulp : int, optional
The maximum number of units in the last place that elements of `a` and
`b` can differ. Default is 1.
dtype : dtype, optional
Data-type to convert `a` and `b` to if given. Default is None.
Returns
-------
ret : ndarray
Array containing number of representable floating point numbers between
items in `a` and `b`.
Raises
------
AssertionError
If one or more elements differ by more than `maxulp`.
Notes
-----
For computing the ULP difference, this API does not differentiate between
various representations of NAN (ULP difference between 0x7fc00000 and 0xffc00000
is zero).
See Also
--------
assert_array_almost_equal_nulp : Compare two arrays relatively to their
spacing.
Examples
--------
>>> a = np.linspace(0., 1., 100)
>>> res = np.testing.assert_array_max_ulp(a, np.arcsin(np.sin(a)))
"""
__tracebackhide__ = True # Hide traceback for py.test
import numpy as np
ret = nulp_diff(a, b, dtype)
if not np.all(ret <= maxulp):
raise AssertionError("Arrays are not almost equal up to %g "
"ULP (max difference is %g ULP)" %
(maxulp, np.max(ret)))
return ret
def nulp_diff(x, y, dtype=None):
"""For each item in x and y, return the number of representable floating
points between them.
Parameters
----------
x : array_like
first input array
y : array_like
second input array
dtype : dtype, optional
Data-type to convert `x` and `y` to if given. Default is None.
Returns
-------
nulp : array_like
number of representable floating point numbers between each item in x
and y.
Notes
-----
For computing the ULP difference, this API does not differentiate between
various representations of NAN (ULP difference between 0x7fc00000 and 0xffc00000
is zero).
Examples
--------
# By definition, epsilon is the smallest number such as 1 + eps != 1, so
# there should be exactly one ULP between 1 and 1 + eps
>>> nulp_diff(1, 1 + np.finfo(x.dtype).eps)
1.0
"""
import numpy as np
if dtype:
x = np.asarray(x, dtype=dtype)
y = np.asarray(y, dtype=dtype)
else:
x = np.asarray(x)
y = np.asarray(y)
t = np.common_type(x, y)
if np.iscomplexobj(x) or np.iscomplexobj(y):
raise NotImplementedError("_nulp not implemented for complex array")
x = np.array([x], dtype=t)
y = np.array([y], dtype=t)
x[np.isnan(x)] = np.nan
y[np.isnan(y)] = np.nan
if not x.shape == y.shape:
raise ValueError("x and y do not have the same shape: %s - %s" %
(x.shape, y.shape))
def _diff(rx, ry, vdt):
diff = np.asarray(rx-ry, dtype=vdt)
return np.abs(diff)
rx = integer_repr(x)
ry = integer_repr(y)
return _diff(rx, ry, t)
def _integer_repr(x, vdt, comp):
# Reinterpret binary representation of the float as sign-magnitude:
# take into account two-complement representation
# See also
# https://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition/
rx = x.view(vdt)
if not (rx.size == 1):
rx[rx < 0] = comp - rx[rx < 0]
else:
if rx < 0:
rx = comp - rx
return rx
def integer_repr(x):
"""Return the signed-magnitude interpretation of the binary representation
of x."""
import numpy as np
if x.dtype == np.float16:
return _integer_repr(x, np.int16, np.int16(-2**15))
elif x.dtype == np.float32:
return _integer_repr(x, np.int32, np.int32(-2**31))
elif x.dtype == np.float64:
return _integer_repr(x, np.int64, np.int64(-2**63))
else:
raise ValueError(f'Unsupported dtype {x.dtype}')
@contextlib.contextmanager
def _assert_warns_context(warning_class, name=None):
__tracebackhide__ = True # Hide traceback for py.test
with suppress_warnings() as sup:
l = sup.record(warning_class)
yield
if not len(l) > 0:
name_str = f' when calling {name}' if name is not None else ''
raise AssertionError("No warning raised" + name_str)
def assert_warns(warning_class, *args, **kwargs):
"""
Fail unless the given callable throws the specified warning.
A warning of class warning_class should be thrown by the callable when
invoked with arguments args and keyword arguments kwargs.
If a different type of warning is thrown, it will not be caught.
If called with all arguments other than the warning class omitted, may be
used as a context manager:
with assert_warns(SomeWarning):
do_something()
The ability to be used as a context manager is new in NumPy v1.11.0.
.. versionadded:: 1.4.0
Parameters
----------
warning_class : class
The class defining the warning that `func` is expected to throw.
func : callable, optional
Callable to test
*args : Arguments
Arguments for `func`.
**kwargs : Kwargs
Keyword arguments for `func`.
Returns
-------
The value returned by `func`.
Examples
--------
>>> import warnings
>>> def deprecated_func(num):
... warnings.warn("Please upgrade", DeprecationWarning)
... return num*num
>>> with np.testing.assert_warns(DeprecationWarning):
... assert deprecated_func(4) == 16
>>> # or passing a func
>>> ret = np.testing.assert_warns(DeprecationWarning, deprecated_func, 4)
>>> assert ret == 16
"""
if not args:
return _assert_warns_context(warning_class)
func = args[0]
args = args[1:]
with _assert_warns_context(warning_class, name=func.__name__):
return func(*args, **kwargs)
@contextlib.contextmanager
def _assert_no_warnings_context(name=None):
__tracebackhide__ = True # Hide traceback for py.test
with warnings.catch_warnings(record=True) as l:
warnings.simplefilter('always')
yield
if len(l) > 0:
name_str = f' when calling {name}' if name is not None else ''
raise AssertionError(f'Got warnings{name_str}: {l}')
def assert_no_warnings(*args, **kwargs):
"""
Fail if the given callable produces any warnings.
If called with all arguments omitted, may be used as a context manager:
with assert_no_warnings():
do_something()
The ability to be used as a context manager is new in NumPy v1.11.0.
.. versionadded:: 1.7.0
Parameters
----------
func : callable
The callable to test.
\\*args : Arguments
Arguments passed to `func`.
\\*\\*kwargs : Kwargs
Keyword arguments passed to `func`.
Returns
-------
The value returned by `func`.
"""
if not args:
return _assert_no_warnings_context()
func = args[0]
args = args[1:]
with _assert_no_warnings_context(name=func.__name__):
return func(*args, **kwargs)
def _gen_alignment_data(dtype=float32, type='binary', max_size=24):
"""
generator producing data with different alignment and offsets
to test simd vectorization
Parameters
----------
dtype : dtype
data type to produce
type : string
'unary': create data for unary operations, creates one input
and output array
'binary': create data for unary operations, creates two input
and output array
max_size : integer
maximum size of data to produce
Returns
-------
if type is 'unary' yields one output, one input array and a message
containing information on the data
if type is 'binary' yields one output array, two input array and a message
containing information on the data
"""
ufmt = 'unary offset=(%d, %d), size=%d, dtype=%r, %s'
bfmt = 'binary offset=(%d, %d, %d), size=%d, dtype=%r, %s'
for o in range(3):
for s in range(o + 2, max(o + 3, max_size)):
if type == 'unary':
inp = lambda: arange(s, dtype=dtype)[o:]
out = empty((s,), dtype=dtype)[o:]
yield out, inp(), ufmt % (o, o, s, dtype, 'out of place')
d = inp()
yield d, d, ufmt % (o, o, s, dtype, 'in place')
yield out[1:], inp()[:-1], ufmt % \
(o + 1, o, s - 1, dtype, 'out of place')
yield out[:-1], inp()[1:], ufmt % \
(o, o + 1, s - 1, dtype, 'out of place')
yield inp()[:-1], inp()[1:], ufmt % \
(o, o + 1, s - 1, dtype, 'aliased')
yield inp()[1:], inp()[:-1], ufmt % \
(o + 1, o, s - 1, dtype, 'aliased')
if type == 'binary':
inp1 = lambda: arange(s, dtype=dtype)[o:]
inp2 = lambda: arange(s, dtype=dtype)[o:]
out = empty((s,), dtype=dtype)[o:]
yield out, inp1(), inp2(), bfmt % \
(o, o, o, s, dtype, 'out of place')
d = inp1()
yield d, d, inp2(), bfmt % \
(o, o, o, s, dtype, 'in place1')
d = inp2()
yield d, inp1(), d, bfmt % \
(o, o, o, s, dtype, 'in place2')
yield out[1:], inp1()[:-1], inp2()[:-1], bfmt % \
(o + 1, o, o, s - 1, dtype, 'out of place')
yield out[:-1], inp1()[1:], inp2()[:-1], bfmt % \
(o, o + 1, o, s - 1, dtype, 'out of place')
yield out[:-1], inp1()[:-1], inp2()[1:], bfmt % \
(o, o, o + 1, s - 1, dtype, 'out of place')
yield inp1()[1:], inp1()[:-1], inp2()[:-1], bfmt % \
(o + 1, o, o, s - 1, dtype, 'aliased')
yield inp1()[:-1], inp1()[1:], inp2()[:-1], bfmt % \
(o, o + 1, o, s - 1, dtype, 'aliased')
yield inp1()[:-1], inp1()[:-1], inp2()[1:], bfmt % \
(o, o, o + 1, s - 1, dtype, 'aliased')
class IgnoreException(Exception):
"Ignoring this exception due to disabled feature"
pass
@contextlib.contextmanager
def tempdir(*args, **kwargs):
"""Context manager to provide a temporary test folder.
All arguments are passed as this to the underlying tempfile.mkdtemp
function.
"""
tmpdir = mkdtemp(*args, **kwargs)
try:
yield tmpdir
finally:
shutil.rmtree(tmpdir)
@contextlib.contextmanager
def temppath(*args, **kwargs):
"""Context manager for temporary files.
Context manager that returns the path to a closed temporary file. Its
parameters are the same as for tempfile.mkstemp and are passed directly
to that function. The underlying file is removed when the context is
exited, so it should be closed at that time.
Windows does not allow a temporary file to be opened if it is already
open, so the underlying file must be closed after opening before it
can be opened again.
"""
fd, path = mkstemp(*args, **kwargs)
os.close(fd)
try:
yield path
finally:
os.remove(path)
class clear_and_catch_warnings(warnings.catch_warnings):
""" Context manager that resets warning registry for catching warnings
Warnings can be slippery, because, whenever a warning is triggered, Python
adds a ``__warningregistry__`` member to the *calling* module. This makes
it impossible to retrigger the warning in this module, whatever you put in
the warnings filters. This context manager accepts a sequence of `modules`
as a keyword argument to its constructor and:
* stores and removes any ``__warningregistry__`` entries in given `modules`
on entry;
* resets ``__warningregistry__`` to its previous state on exit.
This makes it possible to trigger any warning afresh inside the context
manager without disturbing the state of warnings outside.
For compatibility with Python 3.0, please consider all arguments to be
keyword-only.
Parameters
----------
record : bool, optional
Specifies whether warnings should be captured by a custom
implementation of ``warnings.showwarning()`` and be appended to a list
returned by the context manager. Otherwise None is returned by the
context manager. The objects appended to the list are arguments whose
attributes mirror the arguments to ``showwarning()``.
modules : sequence, optional
Sequence of modules for which to reset warnings registry on entry and
restore on exit. To work correctly, all 'ignore' filters should
filter by one of these modules.
Examples
--------
>>> import warnings
>>> with np.testing.clear_and_catch_warnings(
... modules=[np.core.fromnumeric]):
... warnings.simplefilter('always')
... warnings.filterwarnings('ignore', module='np.core.fromnumeric')
... # do something that raises a warning but ignore those in
... # np.core.fromnumeric
"""
class_modules = ()
def __init__(self, record=False, modules=()):
self.modules = set(modules).union(self.class_modules)
self._warnreg_copies = {}
super().__init__(record=record)
def __enter__(self):
for mod in self.modules:
if hasattr(mod, '__warningregistry__'):
mod_reg = mod.__warningregistry__
self._warnreg_copies[mod] = mod_reg.copy()
mod_reg.clear()
return super().__enter__()
def __exit__(self, *exc_info):
super().__exit__(*exc_info)
for mod in self.modules:
if hasattr(mod, '__warningregistry__'):
mod.__warningregistry__.clear()
if mod in self._warnreg_copies:
mod.__warningregistry__.update(self._warnreg_copies[mod])
class suppress_warnings:
"""
Context manager and decorator doing much the same as
``warnings.catch_warnings``.
However, it also provides a filter mechanism to work around
https://bugs.python.org/issue4180.
This bug causes Python before 3.4 to not reliably show warnings again
after they have been ignored once (even within catch_warnings). It
means that no "ignore" filter can be used easily, since following
tests might need to see the warning. Additionally it allows easier
specificity for testing warnings and can be nested.
Parameters
----------
forwarding_rule : str, optional
One of "always", "once", "module", or "location". Analogous to
the usual warnings module filter mode, it is useful to reduce
noise mostly on the outmost level. Unsuppressed and unrecorded
warnings will be forwarded based on this rule. Defaults to "always".
"location" is equivalent to the warnings "default", match by exact
location the warning warning originated from.
Notes
-----
Filters added inside the context manager will be discarded again
when leaving it. Upon entering all filters defined outside a
context will be applied automatically.
When a recording filter is added, matching warnings are stored in the
``log`` attribute as well as in the list returned by ``record``.
If filters are added and the ``module`` keyword is given, the
warning registry of this module will additionally be cleared when
applying it, entering the context, or exiting it. This could cause
warnings to appear a second time after leaving the context if they
were configured to be printed once (default) and were already
printed before the context was entered.
Nesting this context manager will work as expected when the
forwarding rule is "always" (default). Unfiltered and unrecorded
warnings will be passed out and be matched by the outer level.
On the outmost level they will be printed (or caught by another
warnings context). The forwarding rule argument can modify this
behaviour.
Like ``catch_warnings`` this context manager is not threadsafe.
Examples
--------
With a context manager::
with np.testing.suppress_warnings() as sup:
sup.filter(DeprecationWarning, "Some text")
sup.filter(module=np.ma.core)
log = sup.record(FutureWarning, "Does this occur?")
command_giving_warnings()
# The FutureWarning was given once, the filtered warnings were
# ignored. All other warnings abide outside settings (may be
# printed/error)
assert_(len(log) == 1)
assert_(len(sup.log) == 1) # also stored in log attribute
Or as a decorator::
sup = np.testing.suppress_warnings()
sup.filter(module=np.ma.core) # module must match exactly
@sup
def some_function():
# do something which causes a warning in np.ma.core
pass
"""
def __init__(self, forwarding_rule="always"):
self._entered = False
# Suppressions are either instance or defined inside one with block:
self._suppressions = []
if forwarding_rule not in {"always", "module", "once", "location"}:
raise ValueError("unsupported forwarding rule.")
self._forwarding_rule = forwarding_rule
def _clear_registries(self):
if hasattr(warnings, "_filters_mutated"):
# clearing the registry should not be necessary on new pythons,
# instead the filters should be mutated.
warnings._filters_mutated()
return
# Simply clear the registry, this should normally be harmless,
# note that on new pythons it would be invalidated anyway.
for module in self._tmp_modules:
if hasattr(module, "__warningregistry__"):
module.__warningregistry__.clear()
def _filter(self, category=Warning, message="", module=None, record=False):
if record:
record = [] # The log where to store warnings
else:
record = None
if self._entered:
if module is None:
warnings.filterwarnings(
"always", category=category, message=message)
else:
module_regex = module.__name__.replace('.', r'\.') + '$'
warnings.filterwarnings(
"always", category=category, message=message,
module=module_regex)
self._tmp_modules.add(module)
self._clear_registries()
self._tmp_suppressions.append(
(category, message, re.compile(message, re.I), module, record))
else:
self._suppressions.append(
(category, message, re.compile(message, re.I), module, record))
return record
def filter(self, category=Warning, message="", module=None):
"""
Add a new suppressing filter or apply it if the state is entered.
Parameters
----------
category : class, optional
Warning class to filter
message : string, optional
Regular expression matching the warning message.
module : module, optional
Module to filter for. Note that the module (and its file)
must match exactly and cannot be a submodule. This may make
it unreliable for external modules.
Notes
-----
When added within a context, filters are only added inside
the context and will be forgotten when the context is exited.
"""
self._filter(category=category, message=message, module=module,
record=False)
def record(self, category=Warning, message="", module=None):
"""
Append a new recording filter or apply it if the state is entered.
All warnings matching will be appended to the ``log`` attribute.
Parameters
----------
category : class, optional
Warning class to filter
message : string, optional
Regular expression matching the warning message.
module : module, optional
Module to filter for. Note that the module (and its file)
must match exactly and cannot be a submodule. This may make
it unreliable for external modules.
Returns
-------
log : list
A list which will be filled with all matched warnings.
Notes
-----
When added within a context, filters are only added inside
the context and will be forgotten when the context is exited.
"""
return self._filter(category=category, message=message, module=module,
record=True)
def __enter__(self):
if self._entered:
raise RuntimeError("cannot enter suppress_warnings twice.")
self._orig_show = warnings.showwarning
self._filters = warnings.filters
warnings.filters = self._filters[:]
self._entered = True
self._tmp_suppressions = []
self._tmp_modules = set()
self._forwarded = set()
self.log = [] # reset global log (no need to keep same list)
for cat, mess, _, mod, log in self._suppressions:
if log is not None:
del log[:] # clear the log
if mod is None:
warnings.filterwarnings(
"always", category=cat, message=mess)
else:
module_regex = mod.__name__.replace('.', r'\.') + '$'
warnings.filterwarnings(
"always", category=cat, message=mess,
module=module_regex)
self._tmp_modules.add(mod)
warnings.showwarning = self._showwarning
self._clear_registries()
return self
def __exit__(self, *exc_info):
warnings.showwarning = self._orig_show
warnings.filters = self._filters
self._clear_registries()
self._entered = False
del self._orig_show
del self._filters
def _showwarning(self, message, category, filename, lineno,
*args, use_warnmsg=None, **kwargs):
for cat, _, pattern, mod, rec in (
self._suppressions + self._tmp_suppressions)[::-1]:
if (issubclass(category, cat) and
pattern.match(message.args[0]) is not None):
if mod is None:
# Message and category match, either recorded or ignored
if rec is not None:
msg = WarningMessage(message, category, filename,
lineno, **kwargs)
self.log.append(msg)
rec.append(msg)
return
# Use startswith, because warnings strips the c or o from
# .pyc/.pyo files.
elif mod.__file__.startswith(filename):
# The message and module (filename) match
if rec is not None:
msg = WarningMessage(message, category, filename,
lineno, **kwargs)
self.log.append(msg)
rec.append(msg)
return
# There is no filter in place, so pass to the outside handler
# unless we should only pass it once
if self._forwarding_rule == "always":
if use_warnmsg is None:
self._orig_show(message, category, filename, lineno,
*args, **kwargs)
else:
self._orig_showmsg(use_warnmsg)
return
if self._forwarding_rule == "once":
signature = (message.args, category)
elif self._forwarding_rule == "module":
signature = (message.args, category, filename)
elif self._forwarding_rule == "location":
signature = (message.args, category, filename, lineno)
if signature in self._forwarded:
return
self._forwarded.add(signature)
if use_warnmsg is None:
self._orig_show(message, category, filename, lineno, *args,
**kwargs)
else:
self._orig_showmsg(use_warnmsg)
def __call__(self, func):
"""
Function decorator to apply certain suppressions to a whole
function.
"""
@wraps(func)
def new_func(*args, **kwargs):
with self:
return func(*args, **kwargs)
return new_func
@contextlib.contextmanager
def _assert_no_gc_cycles_context(name=None):
__tracebackhide__ = True # Hide traceback for py.test
# not meaningful to test if there is no refcounting
if not HAS_REFCOUNT:
yield
return
assert_(gc.isenabled())
gc.disable()
gc_debug = gc.get_debug()
try:
for i in range(100):
if gc.collect() == 0:
break
else:
raise RuntimeError(
"Unable to fully collect garbage - perhaps a __del__ method "
"is creating more reference cycles?")
gc.set_debug(gc.DEBUG_SAVEALL)
yield
# gc.collect returns the number of unreachable objects in cycles that
# were found -- we are checking that no cycles were created in the context
n_objects_in_cycles = gc.collect()
objects_in_cycles = gc.garbage[:]
finally:
del gc.garbage[:]
gc.set_debug(gc_debug)
gc.enable()
if n_objects_in_cycles:
name_str = f' when calling {name}' if name is not None else ''
raise AssertionError(
"Reference cycles were found{}: {} objects were collected, "
"of which {} are shown below:{}"
.format(
name_str,
n_objects_in_cycles,
len(objects_in_cycles),
''.join(
"\n {} object with id={}:\n {}".format(
type(o).__name__,
id(o),
pprint.pformat(o).replace('\n', '\n ')
) for o in objects_in_cycles
)
)
)
def assert_no_gc_cycles(*args, **kwargs):
"""
Fail if the given callable produces any reference cycles.
If called with all arguments omitted, may be used as a context manager:
with assert_no_gc_cycles():
do_something()
.. versionadded:: 1.15.0
Parameters
----------
func : callable
The callable to test.
\\*args : Arguments
Arguments passed to `func`.
\\*\\*kwargs : Kwargs
Keyword arguments passed to `func`.
Returns
-------
Nothing. The result is deliberately discarded to ensure that all cycles
are found.
"""
if not args:
return _assert_no_gc_cycles_context()
func = args[0]
args = args[1:]
with _assert_no_gc_cycles_context(name=func.__name__):
func(*args, **kwargs)
def break_cycles():
"""
Break reference cycles by calling gc.collect
Objects can call other objects' methods (for instance, another object's
__del__) inside their own __del__. On PyPy, the interpreter only runs
between calls to gc.collect, so multiple calls are needed to completely
release all cycles.
"""
gc.collect()
if IS_PYPY:
# a few more, just to make sure all the finalizers are called
gc.collect()
gc.collect()
gc.collect()
gc.collect()
def requires_memory(free_bytes):
"""Decorator to skip a test if not enough memory is available"""
import pytest
def decorator(func):
@wraps(func)
def wrapper(*a, **kw):
msg = check_free_memory(free_bytes)
if msg is not None:
pytest.skip(msg)
try:
return func(*a, **kw)
except MemoryError:
# Probably ran out of memory regardless: don't regard as failure
pytest.xfail("MemoryError raised")
return wrapper
return decorator
def check_free_memory(free_bytes):
"""
Check whether `free_bytes` amount of memory is currently free.
Returns: None if enough memory available, otherwise error message
"""
env_var = 'NPY_AVAILABLE_MEM'
env_value = os.environ.get(env_var)
if env_value is not None:
try:
mem_free = _parse_size(env_value)
except ValueError as exc:
raise ValueError(f'Invalid environment variable {env_var}: {exc}')
msg = (f'{free_bytes/1e9} GB memory required, but environment variable '
f'NPY_AVAILABLE_MEM={env_value} set')
else:
mem_free = _get_mem_available()
if mem_free is None:
msg = ("Could not determine available memory; set NPY_AVAILABLE_MEM "
"environment variable (e.g. NPY_AVAILABLE_MEM=16GB) to run "
"the test.")
mem_free = -1
else:
msg = f'{free_bytes/1e9} GB memory required, but {mem_free/1e9} GB available'
return msg if mem_free < free_bytes else None
def _parse_size(size_str):
"""Convert memory size strings ('12 GB' etc.) to float"""
suffixes = {'': 1, 'b': 1,
'k': 1000, 'm': 1000**2, 'g': 1000**3, 't': 1000**4,
'kb': 1000, 'mb': 1000**2, 'gb': 1000**3, 'tb': 1000**4,
'kib': 1024, 'mib': 1024**2, 'gib': 1024**3, 'tib': 1024**4}
size_re = re.compile(r'^\s*(\d+|\d+\.\d+)\s*({0})\s*$'.format(
'|'.join(suffixes.keys())), re.I)
m = size_re.match(size_str.lower())
if not m or m.group(2) not in suffixes:
raise ValueError(f'value {size_str!r} not a valid size')
return int(float(m.group(1)) * suffixes[m.group(2)])
def _get_mem_available():
"""Return available memory in bytes, or None if unknown."""
try:
import psutil
return psutil.virtual_memory().available
except (ImportError, AttributeError):
pass
if sys.platform.startswith('linux'):
info = {}
with open('/proc/meminfo', 'r') as f:
for line in f:
p = line.split()
info[p[0].strip(':').lower()] = int(p[1]) * 1024
if 'memavailable' in info:
# Linux >= 3.14
return info['memavailable']
else:
return info['memfree'] + info['cached']
return None
def _no_tracing(func):
"""
Decorator to temporarily turn off tracing for the duration of a test.
Needed in tests that check refcounting, otherwise the tracing itself
influences the refcounts
"""
if not hasattr(sys, 'gettrace'):
return func
else:
@wraps(func)
def wrapper(*args, **kwargs):
original_trace = sys.gettrace()
try:
sys.settrace(None)
return func(*args, **kwargs)
finally:
sys.settrace(original_trace)
return wrapper
def _get_glibc_version():
try:
ver = os.confstr('CS_GNU_LIBC_VERSION').rsplit(' ')[1]
except Exception as inst:
ver = '0.0'
return ver
_glibcver = _get_glibc_version()
_glibc_older_than = lambda x: (_glibcver != '0.0' and _glibcver < x)
| 85,421 | Python | 32.750296 | 97 | 0.585594 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/testing/_private/decorators.py | """
Decorators for labeling and modifying behavior of test objects.
Decorators that merely return a modified version of the original
function object are straightforward. Decorators that return a new
function object need to use
::
nose.tools.make_decorator(original_function)(decorator)
in returning the decorator, in order to preserve meta-data such as
function name, setup and teardown functions and so on - see
``nose.tools`` for more information.
"""
import collections.abc
import warnings
from .utils import SkipTest, assert_warns, HAS_REFCOUNT
__all__ = ['slow', 'setastest', 'skipif', 'knownfailureif', 'deprecated',
'parametrize', '_needs_refcount',]
def slow(t):
"""
.. deprecated:: 1.21
This decorator is retained for compatibility with the nose testing framework, which is being phased out.
Please use the nose2 or pytest frameworks instead.
Label a test as 'slow'.
The exact definition of a slow test is obviously both subjective and
hardware-dependent, but in general any individual test that requires more
than a second or two should be labeled as slow (the whole suite consists of
thousands of tests, so even a second is significant).
Parameters
----------
t : callable
The test to label as slow.
Returns
-------
t : callable
The decorated test `t`.
Examples
--------
The `numpy.testing` module includes ``import decorators as dec``.
A test can be decorated as slow like this::
from numpy.testing import *
@dec.slow
def test_big(self):
print('Big, slow test')
"""
# Numpy 1.21, 2020-12-20
warnings.warn('the np.testing.dec decorators are included for nose support, and are '
'deprecated since NumPy v1.21. Use the nose2 or pytest frameworks instead.', DeprecationWarning, stacklevel=2)
t.slow = True
return t
def setastest(tf=True):
"""
.. deprecated:: 1.21
This decorator is retained for compatibility with the nose testing framework, which is being phased out.
Please use the nose2 or pytest frameworks instead.
Signals to nose that this function is or is not a test.
Parameters
----------
tf : bool
If True, specifies that the decorated callable is a test.
If False, specifies that the decorated callable is not a test.
Default is True.
Notes
-----
This decorator can't use the nose namespace, because it can be
called from a non-test module. See also ``istest`` and ``nottest`` in
``nose.tools``.
Examples
--------
`setastest` can be used in the following way::
from numpy.testing import dec
@dec.setastest(False)
def func_with_test_in_name(arg1, arg2):
pass
"""
# Numpy 1.21, 2020-12-20
warnings.warn('the np.testing.dec decorators are included for nose support, and are '
'deprecated since NumPy v1.21. Use the nose2 or pytest frameworks instead.', DeprecationWarning, stacklevel=2)
def set_test(t):
t.__test__ = tf
return t
return set_test
def skipif(skip_condition, msg=None):
"""
.. deprecated:: 1.21
This decorator is retained for compatibility with the nose testing framework, which is being phased out.
Please use the nose2 or pytest frameworks instead.
Make function raise SkipTest exception if a given condition is true.
If the condition is a callable, it is used at runtime to dynamically
make the decision. This is useful for tests that may require costly
imports, to delay the cost until the test suite is actually executed.
Parameters
----------
skip_condition : bool or callable
Flag to determine whether to skip the decorated test.
msg : str, optional
Message to give on raising a SkipTest exception. Default is None.
Returns
-------
decorator : function
Decorator which, when applied to a function, causes SkipTest
to be raised when `skip_condition` is True, and the function
to be called normally otherwise.
Notes
-----
The decorator itself is decorated with the ``nose.tools.make_decorator``
function in order to transmit function name, and various other metadata.
"""
def skip_decorator(f):
# Local import to avoid a hard nose dependency and only incur the
# import time overhead at actual test-time.
import nose
# Numpy 1.21, 2020-12-20
warnings.warn('the np.testing.dec decorators are included for nose support, and are '
'deprecated since NumPy v1.21. Use the nose2 or pytest frameworks instead.', DeprecationWarning, stacklevel=2)
# Allow for both boolean or callable skip conditions.
if isinstance(skip_condition, collections.abc.Callable):
skip_val = lambda: skip_condition()
else:
skip_val = lambda: skip_condition
def get_msg(func,msg=None):
"""Skip message with information about function being skipped."""
if msg is None:
out = 'Test skipped due to test condition'
else:
out = msg
return f'Skipping test: {func.__name__}: {out}'
# We need to define *two* skippers because Python doesn't allow both
# return with value and yield inside the same function.
def skipper_func(*args, **kwargs):
"""Skipper for normal test functions."""
if skip_val():
raise SkipTest(get_msg(f, msg))
else:
return f(*args, **kwargs)
def skipper_gen(*args, **kwargs):
"""Skipper for test generators."""
if skip_val():
raise SkipTest(get_msg(f, msg))
else:
yield from f(*args, **kwargs)
# Choose the right skipper to use when building the actual decorator.
if nose.util.isgenerator(f):
skipper = skipper_gen
else:
skipper = skipper_func
return nose.tools.make_decorator(f)(skipper)
return skip_decorator
def knownfailureif(fail_condition, msg=None):
"""
.. deprecated:: 1.21
This decorator is retained for compatibility with the nose testing framework, which is being phased out.
Please use the nose2 or pytest frameworks instead.
Make function raise KnownFailureException exception if given condition is true.
If the condition is a callable, it is used at runtime to dynamically
make the decision. This is useful for tests that may require costly
imports, to delay the cost until the test suite is actually executed.
Parameters
----------
fail_condition : bool or callable
Flag to determine whether to mark the decorated test as a known
failure (if True) or not (if False).
msg : str, optional
Message to give on raising a KnownFailureException exception.
Default is None.
Returns
-------
decorator : function
Decorator, which, when applied to a function, causes
KnownFailureException to be raised when `fail_condition` is True,
and the function to be called normally otherwise.
Notes
-----
The decorator itself is decorated with the ``nose.tools.make_decorator``
function in order to transmit function name, and various other metadata.
"""
# Numpy 1.21, 2020-12-20
warnings.warn('the np.testing.dec decorators are included for nose support, and are '
'deprecated since NumPy v1.21. Use the nose2 or pytest frameworks instead.', DeprecationWarning, stacklevel=2)
if msg is None:
msg = 'Test skipped due to known failure'
# Allow for both boolean or callable known failure conditions.
if isinstance(fail_condition, collections.abc.Callable):
fail_val = lambda: fail_condition()
else:
fail_val = lambda: fail_condition
def knownfail_decorator(f):
# Local import to avoid a hard nose dependency and only incur the
# import time overhead at actual test-time.
import nose
from .noseclasses import KnownFailureException
def knownfailer(*args, **kwargs):
if fail_val():
raise KnownFailureException(msg)
else:
return f(*args, **kwargs)
return nose.tools.make_decorator(f)(knownfailer)
return knownfail_decorator
def deprecated(conditional=True):
"""
.. deprecated:: 1.21
This decorator is retained for compatibility with the nose testing framework, which is being phased out.
Please use the nose2 or pytest frameworks instead.
Filter deprecation warnings while running the test suite.
This decorator can be used to filter DeprecationWarning's, to avoid
printing them during the test suite run, while checking that the test
actually raises a DeprecationWarning.
Parameters
----------
conditional : bool or callable, optional
Flag to determine whether to mark test as deprecated or not. If the
condition is a callable, it is used at runtime to dynamically make the
decision. Default is True.
Returns
-------
decorator : function
The `deprecated` decorator itself.
Notes
-----
.. versionadded:: 1.4.0
"""
def deprecate_decorator(f):
# Local import to avoid a hard nose dependency and only incur the
# import time overhead at actual test-time.
import nose
# Numpy 1.21, 2020-12-20
warnings.warn('the np.testing.dec decorators are included for nose support, and are '
'deprecated since NumPy v1.21. Use the nose2 or pytest frameworks instead.', DeprecationWarning, stacklevel=2)
def _deprecated_imp(*args, **kwargs):
# Poor man's replacement for the with statement
with assert_warns(DeprecationWarning):
f(*args, **kwargs)
if isinstance(conditional, collections.abc.Callable):
cond = conditional()
else:
cond = conditional
if cond:
return nose.tools.make_decorator(f)(_deprecated_imp)
else:
return f
return deprecate_decorator
def parametrize(vars, input):
"""
.. deprecated:: 1.21
This decorator is retained for compatibility with the nose testing framework, which is being phased out.
Please use the nose2 or pytest frameworks instead.
Pytest compatibility class. This implements the simplest level of
pytest.mark.parametrize for use in nose as an aid in making the transition
to pytest. It achieves that by adding a dummy var parameter and ignoring
the doc_func parameter of the base class. It does not support variable
substitution by name, nor does it support nesting or classes. See the
pytest documentation for usage.
.. versionadded:: 1.14.0
"""
from .parameterized import parameterized
# Numpy 1.21, 2020-12-20
warnings.warn('the np.testing.dec decorators are included for nose support, and are '
'deprecated since NumPy v1.21. Use the nose2 or pytest frameworks instead.', DeprecationWarning, stacklevel=2)
return parameterized(input)
_needs_refcount = skipif(not HAS_REFCOUNT, "python has no sys.getrefcount")
| 11,401 | Python | 33.343373 | 126 | 0.656609 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/testing/_private/extbuild.py | """
Build a c-extension module on-the-fly in tests.
See build_and_import_extensions for usage hints
"""
import os
import pathlib
import sys
import sysconfig
__all__ = ['build_and_import_extension', 'compile_extension_module']
def build_and_import_extension(
modname, functions, *, prologue="", build_dir=None,
include_dirs=[], more_init=""):
"""
Build and imports a c-extension module `modname` from a list of function
fragments `functions`.
Parameters
----------
functions : list of fragments
Each fragment is a sequence of func_name, calling convention, snippet.
prologue : string
Code to precede the rest, usually extra ``#include`` or ``#define``
macros.
build_dir : pathlib.Path
Where to build the module, usually a temporary directory
include_dirs : list
Extra directories to find include files when compiling
more_init : string
Code to appear in the module PyMODINIT_FUNC
Returns
-------
out: module
The module will have been loaded and is ready for use
Examples
--------
>>> functions = [("test_bytes", "METH_O", \"\"\"
if ( !PyBytesCheck(args)) {
Py_RETURN_FALSE;
}
Py_RETURN_TRUE;
\"\"\")]
>>> mod = build_and_import_extension("testme", functions)
>>> assert not mod.test_bytes(u'abc')
>>> assert mod.test_bytes(b'abc')
"""
from distutils.errors import CompileError
body = prologue + _make_methods(functions, modname)
init = """PyObject *mod = PyModule_Create(&moduledef);
"""
if not build_dir:
build_dir = pathlib.Path('.')
if more_init:
init += """#define INITERROR return NULL
"""
init += more_init
init += "\nreturn mod;"
source_string = _make_source(modname, init, body)
try:
mod_so = compile_extension_module(
modname, build_dir, include_dirs, source_string)
except CompileError as e:
# shorten the exception chain
raise RuntimeError(f"could not compile in {build_dir}:") from e
import importlib.util
spec = importlib.util.spec_from_file_location(modname, mod_so)
foo = importlib.util.module_from_spec(spec)
spec.loader.exec_module(foo)
return foo
def compile_extension_module(
name, builddir, include_dirs,
source_string, libraries=[], library_dirs=[]):
"""
Build an extension module and return the filename of the resulting
native code file.
Parameters
----------
name : string
name of the module, possibly including dots if it is a module inside a
package.
builddir : pathlib.Path
Where to build the module, usually a temporary directory
include_dirs : list
Extra directories to find include files when compiling
libraries : list
Libraries to link into the extension module
library_dirs: list
Where to find the libraries, ``-L`` passed to the linker
"""
modname = name.split('.')[-1]
dirname = builddir / name
dirname.mkdir(exist_ok=True)
cfile = _convert_str_to_file(source_string, dirname)
include_dirs = include_dirs + [sysconfig.get_config_var('INCLUDEPY')]
return _c_compile(
cfile, outputfilename=dirname / modname,
include_dirs=include_dirs, libraries=[], library_dirs=[],
)
def _convert_str_to_file(source, dirname):
"""Helper function to create a file ``source.c`` in `dirname` that contains
the string in `source`. Returns the file name
"""
filename = dirname / 'source.c'
with filename.open('w') as f:
f.write(str(source))
return filename
def _make_methods(functions, modname):
""" Turns the name, signature, code in functions into complete functions
and lists them in a methods_table. Then turns the methods_table into a
``PyMethodDef`` structure and returns the resulting code fragment ready
for compilation
"""
methods_table = []
codes = []
for funcname, flags, code in functions:
cfuncname = "%s_%s" % (modname, funcname)
if 'METH_KEYWORDS' in flags:
signature = '(PyObject *self, PyObject *args, PyObject *kwargs)'
else:
signature = '(PyObject *self, PyObject *args)'
methods_table.append(
"{\"%s\", (PyCFunction)%s, %s}," % (funcname, cfuncname, flags))
func_code = """
static PyObject* {cfuncname}{signature}
{{
{code}
}}
""".format(cfuncname=cfuncname, signature=signature, code=code)
codes.append(func_code)
body = "\n".join(codes) + """
static PyMethodDef methods[] = {
%(methods)s
{ NULL }
};
static struct PyModuleDef moduledef = {
PyModuleDef_HEAD_INIT,
"%(modname)s", /* m_name */
NULL, /* m_doc */
-1, /* m_size */
methods, /* m_methods */
};
""" % dict(methods='\n'.join(methods_table), modname=modname)
return body
def _make_source(name, init, body):
""" Combines the code fragments into source code ready to be compiled
"""
code = """
#include <Python.h>
%(body)s
PyMODINIT_FUNC
PyInit_%(name)s(void) {
%(init)s
}
""" % dict(
name=name, init=init, body=body,
)
return code
def _c_compile(cfile, outputfilename, include_dirs=[], libraries=[],
library_dirs=[]):
if sys.platform == 'win32':
compile_extra = ["/we4013"]
link_extra = ["/LIBPATH:" + os.path.join(sys.base_prefix, 'libs')]
elif sys.platform.startswith('linux'):
compile_extra = [
"-O0", "-g", "-Werror=implicit-function-declaration", "-fPIC"]
link_extra = None
else:
compile_extra = link_extra = None
pass
if sys.platform == 'win32':
link_extra = link_extra + ['/DEBUG'] # generate .pdb file
if sys.platform == 'darwin':
# support Fink & Darwinports
for s in ('/sw/', '/opt/local/'):
if (s + 'include' not in include_dirs
and os.path.exists(s + 'include')):
include_dirs.append(s + 'include')
if s + 'lib' not in library_dirs and os.path.exists(s + 'lib'):
library_dirs.append(s + 'lib')
outputfilename = outputfilename.with_suffix(get_so_suffix())
saved_environ = os.environ.copy()
try:
build(
cfile, outputfilename,
compile_extra, link_extra,
include_dirs, libraries, library_dirs)
finally:
# workaround for a distutils bugs where some env vars can
# become longer and longer every time it is used
for key, value in saved_environ.items():
if os.environ.get(key) != value:
os.environ[key] = value
return outputfilename
def build(cfile, outputfilename, compile_extra, link_extra,
include_dirs, libraries, library_dirs):
"cd into the directory where the cfile is, use distutils to build"
from numpy.distutils.ccompiler import new_compiler
compiler = new_compiler(force=1, verbose=2)
compiler.customize('')
objects = []
old = os.getcwd()
os.chdir(cfile.parent)
try:
res = compiler.compile(
[str(cfile.name)],
include_dirs=include_dirs,
extra_preargs=compile_extra
)
objects += [str(cfile.parent / r) for r in res]
finally:
os.chdir(old)
compiler.link_shared_object(
objects, str(outputfilename),
libraries=libraries,
extra_preargs=link_extra,
library_dirs=library_dirs)
def get_so_suffix():
ret = sysconfig.get_config_var('EXT_SUFFIX')
assert ret
return ret
| 7,816 | Python | 30.019841 | 79 | 0.597492 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/testing/_private/nosetester.py | """
Nose test running.
This module implements ``test()`` and ``bench()`` functions for NumPy modules.
"""
import os
import sys
import warnings
import numpy as np
from .utils import import_nose, suppress_warnings
__all__ = ['get_package_name', 'run_module_suite', 'NoseTester',
'_numpy_tester', 'get_package_name', 'import_nose',
'suppress_warnings']
def get_package_name(filepath):
"""
Given a path where a package is installed, determine its name.
Parameters
----------
filepath : str
Path to a file. If the determination fails, "numpy" is returned.
Examples
--------
>>> np.testing.nosetester.get_package_name('nonsense')
'numpy'
"""
fullpath = filepath[:]
pkg_name = []
while 'site-packages' in filepath or 'dist-packages' in filepath:
filepath, p2 = os.path.split(filepath)
if p2 in ('site-packages', 'dist-packages'):
break
pkg_name.append(p2)
# if package name determination failed, just default to numpy/scipy
if not pkg_name:
if 'scipy' in fullpath:
return 'scipy'
else:
return 'numpy'
# otherwise, reverse to get correct order and return
pkg_name.reverse()
# don't include the outer egg directory
if pkg_name[0].endswith('.egg'):
pkg_name.pop(0)
return '.'.join(pkg_name)
def run_module_suite(file_to_run=None, argv=None):
"""
Run a test module.
Equivalent to calling ``$ nosetests <argv> <file_to_run>`` from
the command line
Parameters
----------
file_to_run : str, optional
Path to test module, or None.
By default, run the module from which this function is called.
argv : list of strings
Arguments to be passed to the nose test runner. ``argv[0]`` is
ignored. All command line arguments accepted by ``nosetests``
will work. If it is the default value None, sys.argv is used.
.. versionadded:: 1.9.0
Examples
--------
Adding the following::
if __name__ == "__main__" :
run_module_suite(argv=sys.argv)
at the end of a test module will run the tests when that module is
called in the python interpreter.
Alternatively, calling::
>>> run_module_suite(file_to_run="numpy/tests/test_matlib.py") # doctest: +SKIP
from an interpreter will run all the test routine in 'test_matlib.py'.
"""
if file_to_run is None:
f = sys._getframe(1)
file_to_run = f.f_locals.get('__file__', None)
if file_to_run is None:
raise AssertionError
if argv is None:
argv = sys.argv + [file_to_run]
else:
argv = argv + [file_to_run]
nose = import_nose()
from .noseclasses import KnownFailurePlugin
nose.run(argv=argv, addplugins=[KnownFailurePlugin()])
class NoseTester:
"""
Nose test runner.
This class is made available as numpy.testing.Tester, and a test function
is typically added to a package's __init__.py like so::
from numpy.testing import Tester
test = Tester().test
Calling this test function finds and runs all tests associated with the
package and all its sub-packages.
Attributes
----------
package_path : str
Full path to the package to test.
package_name : str
Name of the package to test.
Parameters
----------
package : module, str or None, optional
The package to test. If a string, this should be the full path to
the package. If None (default), `package` is set to the module from
which `NoseTester` is initialized.
raise_warnings : None, str or sequence of warnings, optional
This specifies which warnings to configure as 'raise' instead
of being shown once during the test execution. Valid strings are:
- "develop" : equals ``(Warning,)``
- "release" : equals ``()``, don't raise on any warnings.
Default is "release".
depth : int, optional
If `package` is None, then this can be used to initialize from the
module of the caller of (the caller of (...)) the code that
initializes `NoseTester`. Default of 0 means the module of the
immediate caller; higher values are useful for utility routines that
want to initialize `NoseTester` objects on behalf of other code.
"""
def __init__(self, package=None, raise_warnings="release", depth=0,
check_fpu_mode=False):
# Back-compat: 'None' used to mean either "release" or "develop"
# depending on whether this was a release or develop version of
# numpy. Those semantics were fine for testing numpy, but not so
# helpful for downstream projects like scipy that use
# numpy.testing. (They want to set this based on whether *they* are a
# release or develop version, not whether numpy is.) So we continue to
# accept 'None' for back-compat, but it's now just an alias for the
# default "release".
if raise_warnings is None:
raise_warnings = "release"
package_name = None
if package is None:
f = sys._getframe(1 + depth)
package_path = f.f_locals.get('__file__', None)
if package_path is None:
raise AssertionError
package_path = os.path.dirname(package_path)
package_name = f.f_locals.get('__name__', None)
elif isinstance(package, type(os)):
package_path = os.path.dirname(package.__file__)
package_name = getattr(package, '__name__', None)
else:
package_path = str(package)
self.package_path = package_path
# Find the package name under test; this name is used to limit coverage
# reporting (if enabled).
if package_name is None:
package_name = get_package_name(package_path)
self.package_name = package_name
# Set to "release" in constructor in maintenance branches.
self.raise_warnings = raise_warnings
# Whether to check for FPU mode changes
self.check_fpu_mode = check_fpu_mode
def _test_argv(self, label, verbose, extra_argv):
''' Generate argv for nosetest command
Parameters
----------
label : {'fast', 'full', '', attribute identifier}, optional
see ``test`` docstring
verbose : int, optional
Verbosity value for test outputs, in the range 1-10. Default is 1.
extra_argv : list, optional
List with any extra arguments to pass to nosetests.
Returns
-------
argv : list
command line arguments that will be passed to nose
'''
argv = [__file__, self.package_path, '-s']
if label and label != 'full':
if not isinstance(label, str):
raise TypeError('Selection label should be a string')
if label == 'fast':
label = 'not slow'
argv += ['-A', label]
argv += ['--verbosity', str(verbose)]
# When installing with setuptools, and also in some other cases, the
# test_*.py files end up marked +x executable. Nose, by default, does
# not run files marked with +x as they might be scripts. However, in
# our case nose only looks for test_*.py files under the package
# directory, which should be safe.
argv += ['--exe']
if extra_argv:
argv += extra_argv
return argv
def _show_system_info(self):
nose = import_nose()
import numpy
print(f'NumPy version {numpy.__version__}')
relaxed_strides = numpy.ones((10, 1), order="C").flags.f_contiguous
print("NumPy relaxed strides checking option:", relaxed_strides)
npdir = os.path.dirname(numpy.__file__)
print(f'NumPy is installed in {npdir}')
if 'scipy' in self.package_name:
import scipy
print(f'SciPy version {scipy.__version__}')
spdir = os.path.dirname(scipy.__file__)
print(f'SciPy is installed in {spdir}')
pyversion = sys.version.replace('\n', '')
print(f'Python version {pyversion}')
print("nose version %d.%d.%d" % nose.__versioninfo__)
def _get_custom_doctester(self):
""" Return instantiated plugin for doctests
Allows subclassing of this class to override doctester
A return value of None means use the nose builtin doctest plugin
"""
from .noseclasses import NumpyDoctest
return NumpyDoctest()
def prepare_test_args(self, label='fast', verbose=1, extra_argv=None,
doctests=False, coverage=False, timer=False):
"""
Run tests for module using nose.
This method does the heavy lifting for the `test` method. It takes all
the same arguments, for details see `test`.
See Also
--------
test
"""
# fail with nice error message if nose is not present
import_nose()
# compile argv
argv = self._test_argv(label, verbose, extra_argv)
# our way of doing coverage
if coverage:
argv += [f'--cover-package={self.package_name}', '--with-coverage',
'--cover-tests', '--cover-erase']
if timer:
if timer is True:
argv += ['--with-timer']
elif isinstance(timer, int):
argv += ['--with-timer', '--timer-top-n', str(timer)]
# construct list of plugins
import nose.plugins.builtin
from nose.plugins import EntryPointPluginManager
from .noseclasses import (KnownFailurePlugin, Unplugger,
FPUModeCheckPlugin)
plugins = [KnownFailurePlugin()]
plugins += [p() for p in nose.plugins.builtin.plugins]
if self.check_fpu_mode:
plugins += [FPUModeCheckPlugin()]
argv += ["--with-fpumodecheckplugin"]
try:
# External plugins (like nose-timer)
entrypoint_manager = EntryPointPluginManager()
entrypoint_manager.loadPlugins()
plugins += [p for p in entrypoint_manager.plugins]
except ImportError:
# Relies on pkg_resources, not a hard dependency
pass
# add doctesting if required
doctest_argv = '--with-doctest' in argv
if doctests == False and doctest_argv:
doctests = True
plug = self._get_custom_doctester()
if plug is None:
# use standard doctesting
if doctests and not doctest_argv:
argv += ['--with-doctest']
else: # custom doctesting
if doctest_argv: # in fact the unplugger would take care of this
argv.remove('--with-doctest')
plugins += [Unplugger('doctest'), plug]
if doctests:
argv += ['--with-' + plug.name]
return argv, plugins
def test(self, label='fast', verbose=1, extra_argv=None,
doctests=False, coverage=False, raise_warnings=None,
timer=False):
"""
Run tests for module using nose.
Parameters
----------
label : {'fast', 'full', '', attribute identifier}, optional
Identifies the tests to run. This can be a string to pass to
the nosetests executable with the '-A' option, or one of several
special values. Special values are:
* 'fast' - the default - which corresponds to the ``nosetests -A``
option of 'not slow'.
* 'full' - fast (as above) and slow tests as in the
'no -A' option to nosetests - this is the same as ''.
* None or '' - run all tests.
* attribute_identifier - string passed directly to nosetests as '-A'.
verbose : int, optional
Verbosity value for test outputs, in the range 1-10. Default is 1.
extra_argv : list, optional
List with any extra arguments to pass to nosetests.
doctests : bool, optional
If True, run doctests in module. Default is False.
coverage : bool, optional
If True, report coverage of NumPy code. Default is False.
(This requires the
`coverage module <https://pypi.org/project/coverage/>`_).
raise_warnings : None, str or sequence of warnings, optional
This specifies which warnings to configure as 'raise' instead
of being shown once during the test execution. Valid strings are:
* "develop" : equals ``(Warning,)``
* "release" : equals ``()``, do not raise on any warnings.
timer : bool or int, optional
Timing of individual tests with ``nose-timer`` (which needs to be
installed). If True, time tests and report on all of them.
If an integer (say ``N``), report timing results for ``N`` slowest
tests.
Returns
-------
result : object
Returns the result of running the tests as a
``nose.result.TextTestResult`` object.
Notes
-----
Each NumPy module exposes `test` in its namespace to run all tests for it.
For example, to run all tests for numpy.lib:
>>> np.lib.test() #doctest: +SKIP
Examples
--------
>>> result = np.lib.test() #doctest: +SKIP
Running unit tests for numpy.lib
...
Ran 976 tests in 3.933s
OK
>>> result.errors #doctest: +SKIP
[]
>>> result.knownfail #doctest: +SKIP
[]
"""
# cap verbosity at 3 because nose becomes *very* verbose beyond that
verbose = min(verbose, 3)
from . import utils
utils.verbose = verbose
argv, plugins = self.prepare_test_args(
label, verbose, extra_argv, doctests, coverage, timer)
if doctests:
print(f'Running unit tests and doctests for {self.package_name}')
else:
print(f'Running unit tests for {self.package_name}')
self._show_system_info()
# reset doctest state on every run
import doctest
doctest.master = None
if raise_warnings is None:
raise_warnings = self.raise_warnings
_warn_opts = dict(develop=(Warning,),
release=())
if isinstance(raise_warnings, str):
raise_warnings = _warn_opts[raise_warnings]
with suppress_warnings("location") as sup:
# Reset the warning filters to the default state,
# so that running the tests is more repeatable.
warnings.resetwarnings()
# Set all warnings to 'warn', this is because the default 'once'
# has the bad property of possibly shadowing later warnings.
warnings.filterwarnings('always')
# Force the requested warnings to raise
for warningtype in raise_warnings:
warnings.filterwarnings('error', category=warningtype)
# Filter out annoying import messages.
sup.filter(message='Not importing directory')
sup.filter(message="numpy.dtype size changed")
sup.filter(message="numpy.ufunc size changed")
sup.filter(category=np.ModuleDeprecationWarning)
# Filter out boolean '-' deprecation messages. This allows
# older versions of scipy to test without a flood of messages.
sup.filter(message=".*boolean negative.*")
sup.filter(message=".*boolean subtract.*")
# Filter out distutils cpu warnings (could be localized to
# distutils tests). ASV has problems with top level import,
# so fetch module for suppression here.
with warnings.catch_warnings():
warnings.simplefilter("always")
from ...distutils import cpuinfo
sup.filter(category=UserWarning, module=cpuinfo)
# Filter out some deprecation warnings inside nose 1.3.7 when run
# on python 3.5b2. See
# https://github.com/nose-devs/nose/issues/929
# Note: it is hard to filter based on module for sup (lineno could
# be implemented).
warnings.filterwarnings("ignore", message=".*getargspec.*",
category=DeprecationWarning,
module=r"nose\.")
from .noseclasses import NumpyTestProgram
t = NumpyTestProgram(argv=argv, exit=False, plugins=plugins)
return t.result
def bench(self, label='fast', verbose=1, extra_argv=None):
"""
Run benchmarks for module using nose.
Parameters
----------
label : {'fast', 'full', '', attribute identifier}, optional
Identifies the benchmarks to run. This can be a string to pass to
the nosetests executable with the '-A' option, or one of several
special values. Special values are:
* 'fast' - the default - which corresponds to the ``nosetests -A``
option of 'not slow'.
* 'full' - fast (as above) and slow benchmarks as in the
'no -A' option to nosetests - this is the same as ''.
* None or '' - run all tests.
* attribute_identifier - string passed directly to nosetests as '-A'.
verbose : int, optional
Verbosity value for benchmark outputs, in the range 1-10. Default is 1.
extra_argv : list, optional
List with any extra arguments to pass to nosetests.
Returns
-------
success : bool
Returns True if running the benchmarks works, False if an error
occurred.
Notes
-----
Benchmarks are like tests, but have names starting with "bench" instead
of "test", and can be found under the "benchmarks" sub-directory of the
module.
Each NumPy module exposes `bench` in its namespace to run all benchmarks
for it.
Examples
--------
>>> success = np.lib.bench() #doctest: +SKIP
Running benchmarks for numpy.lib
...
using 562341 items:
unique:
0.11
unique1d:
0.11
ratio: 1.0
nUnique: 56230 == 56230
...
OK
>>> success #doctest: +SKIP
True
"""
print(f'Running benchmarks for {self.package_name}')
self._show_system_info()
argv = self._test_argv(label, verbose, extra_argv)
argv += ['--match', r'(?:^|[\\b_\\.%s-])[Bb]ench' % os.sep]
# import nose or make informative error
nose = import_nose()
# get plugin to disable doctests
from .noseclasses import Unplugger
add_plugins = [Unplugger('doctest')]
return nose.run(argv=argv, addplugins=add_plugins)
def _numpy_tester():
if hasattr(np, "__version__") and ".dev0" in np.__version__:
mode = "develop"
else:
mode = "release"
return NoseTester(raise_warnings=mode, depth=1,
check_fpu_mode=True)
| 19,435 | Python | 34.59707 | 84 | 0.581785 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/testing/tests/test_utils.py | import warnings
import sys
import os
import itertools
import pytest
import weakref
import numpy as np
from numpy.testing import (
assert_equal, assert_array_equal, assert_almost_equal,
assert_array_almost_equal, assert_array_less, build_err_msg, raises,
assert_raises, assert_warns, assert_no_warnings, assert_allclose,
assert_approx_equal, assert_array_almost_equal_nulp, assert_array_max_ulp,
clear_and_catch_warnings, suppress_warnings, assert_string_equal, assert_,
tempdir, temppath, assert_no_gc_cycles, HAS_REFCOUNT
)
from numpy.core.overrides import ARRAY_FUNCTION_ENABLED
class _GenericTest:
def _test_equal(self, a, b):
self._assert_func(a, b)
def _test_not_equal(self, a, b):
with assert_raises(AssertionError):
self._assert_func(a, b)
def test_array_rank1_eq(self):
"""Test two equal array of rank 1 are found equal."""
a = np.array([1, 2])
b = np.array([1, 2])
self._test_equal(a, b)
def test_array_rank1_noteq(self):
"""Test two different array of rank 1 are found not equal."""
a = np.array([1, 2])
b = np.array([2, 2])
self._test_not_equal(a, b)
def test_array_rank2_eq(self):
"""Test two equal array of rank 2 are found equal."""
a = np.array([[1, 2], [3, 4]])
b = np.array([[1, 2], [3, 4]])
self._test_equal(a, b)
def test_array_diffshape(self):
"""Test two arrays with different shapes are found not equal."""
a = np.array([1, 2])
b = np.array([[1, 2], [1, 2]])
self._test_not_equal(a, b)
def test_objarray(self):
"""Test object arrays."""
a = np.array([1, 1], dtype=object)
self._test_equal(a, 1)
def test_array_likes(self):
self._test_equal([1, 2, 3], (1, 2, 3))
class TestArrayEqual(_GenericTest):
def setup_method(self):
self._assert_func = assert_array_equal
def test_generic_rank1(self):
"""Test rank 1 array for all dtypes."""
def foo(t):
a = np.empty(2, t)
a.fill(1)
b = a.copy()
c = a.copy()
c.fill(0)
self._test_equal(a, b)
self._test_not_equal(c, b)
# Test numeric types and object
for t in '?bhilqpBHILQPfdgFDG':
foo(t)
# Test strings
for t in ['S1', 'U1']:
foo(t)
def test_0_ndim_array(self):
x = np.array(473963742225900817127911193656584771)
y = np.array(18535119325151578301457182298393896)
assert_raises(AssertionError, self._assert_func, x, y)
y = x
self._assert_func(x, y)
x = np.array(43)
y = np.array(10)
assert_raises(AssertionError, self._assert_func, x, y)
y = x
self._assert_func(x, y)
def test_generic_rank3(self):
"""Test rank 3 array for all dtypes."""
def foo(t):
a = np.empty((4, 2, 3), t)
a.fill(1)
b = a.copy()
c = a.copy()
c.fill(0)
self._test_equal(a, b)
self._test_not_equal(c, b)
# Test numeric types and object
for t in '?bhilqpBHILQPfdgFDG':
foo(t)
# Test strings
for t in ['S1', 'U1']:
foo(t)
def test_nan_array(self):
"""Test arrays with nan values in them."""
a = np.array([1, 2, np.nan])
b = np.array([1, 2, np.nan])
self._test_equal(a, b)
c = np.array([1, 2, 3])
self._test_not_equal(c, b)
def test_string_arrays(self):
"""Test two arrays with different shapes are found not equal."""
a = np.array(['floupi', 'floupa'])
b = np.array(['floupi', 'floupa'])
self._test_equal(a, b)
c = np.array(['floupipi', 'floupa'])
self._test_not_equal(c, b)
def test_recarrays(self):
"""Test record arrays."""
a = np.empty(2, [('floupi', float), ('floupa', float)])
a['floupi'] = [1, 2]
a['floupa'] = [1, 2]
b = a.copy()
self._test_equal(a, b)
c = np.empty(2, [('floupipi', float),
('floupi', float), ('floupa', float)])
c['floupipi'] = a['floupi'].copy()
c['floupa'] = a['floupa'].copy()
with pytest.raises(TypeError):
self._test_not_equal(c, b)
def test_masked_nan_inf(self):
# Regression test for gh-11121
a = np.ma.MaskedArray([3., 4., 6.5], mask=[False, True, False])
b = np.array([3., np.nan, 6.5])
self._test_equal(a, b)
self._test_equal(b, a)
a = np.ma.MaskedArray([3., 4., 6.5], mask=[True, False, False])
b = np.array([np.inf, 4., 6.5])
self._test_equal(a, b)
self._test_equal(b, a)
def test_subclass_that_overrides_eq(self):
# While we cannot guarantee testing functions will always work for
# subclasses, the tests should ideally rely only on subclasses having
# comparison operators, not on them being able to store booleans
# (which, e.g., astropy Quantity cannot usefully do). See gh-8452.
class MyArray(np.ndarray):
def __eq__(self, other):
return bool(np.equal(self, other).all())
def __ne__(self, other):
return not self == other
a = np.array([1., 2.]).view(MyArray)
b = np.array([2., 3.]).view(MyArray)
assert_(type(a == a), bool)
assert_(a == a)
assert_(a != b)
self._test_equal(a, a)
self._test_not_equal(a, b)
self._test_not_equal(b, a)
@pytest.mark.skipif(
not ARRAY_FUNCTION_ENABLED, reason='requires __array_function__')
def test_subclass_that_does_not_implement_npall(self):
class MyArray(np.ndarray):
def __array_function__(self, *args, **kwargs):
return NotImplemented
a = np.array([1., 2.]).view(MyArray)
b = np.array([2., 3.]).view(MyArray)
with assert_raises(TypeError):
np.all(a)
self._test_equal(a, a)
self._test_not_equal(a, b)
self._test_not_equal(b, a)
def test_suppress_overflow_warnings(self):
# Based on issue #18992
with pytest.raises(AssertionError):
with np.errstate(all="raise"):
np.testing.assert_array_equal(
np.array([1, 2, 3], np.float32),
np.array([1, 1e-40, 3], np.float32))
class TestBuildErrorMessage:
def test_build_err_msg_defaults(self):
x = np.array([1.00001, 2.00002, 3.00003])
y = np.array([1.00002, 2.00003, 3.00004])
err_msg = 'There is a mismatch'
a = build_err_msg([x, y], err_msg)
b = ('\nItems are not equal: There is a mismatch\n ACTUAL: array(['
'1.00001, 2.00002, 3.00003])\n DESIRED: array([1.00002, '
'2.00003, 3.00004])')
assert_equal(a, b)
def test_build_err_msg_no_verbose(self):
x = np.array([1.00001, 2.00002, 3.00003])
y = np.array([1.00002, 2.00003, 3.00004])
err_msg = 'There is a mismatch'
a = build_err_msg([x, y], err_msg, verbose=False)
b = '\nItems are not equal: There is a mismatch'
assert_equal(a, b)
def test_build_err_msg_custom_names(self):
x = np.array([1.00001, 2.00002, 3.00003])
y = np.array([1.00002, 2.00003, 3.00004])
err_msg = 'There is a mismatch'
a = build_err_msg([x, y], err_msg, names=('FOO', 'BAR'))
b = ('\nItems are not equal: There is a mismatch\n FOO: array(['
'1.00001, 2.00002, 3.00003])\n BAR: array([1.00002, 2.00003, '
'3.00004])')
assert_equal(a, b)
def test_build_err_msg_custom_precision(self):
x = np.array([1.000000001, 2.00002, 3.00003])
y = np.array([1.000000002, 2.00003, 3.00004])
err_msg = 'There is a mismatch'
a = build_err_msg([x, y], err_msg, precision=10)
b = ('\nItems are not equal: There is a mismatch\n ACTUAL: array(['
'1.000000001, 2.00002 , 3.00003 ])\n DESIRED: array(['
'1.000000002, 2.00003 , 3.00004 ])')
assert_equal(a, b)
class TestEqual(TestArrayEqual):
def setup_method(self):
self._assert_func = assert_equal
def test_nan_items(self):
self._assert_func(np.nan, np.nan)
self._assert_func([np.nan], [np.nan])
self._test_not_equal(np.nan, [np.nan])
self._test_not_equal(np.nan, 1)
def test_inf_items(self):
self._assert_func(np.inf, np.inf)
self._assert_func([np.inf], [np.inf])
self._test_not_equal(np.inf, [np.inf])
def test_datetime(self):
self._test_equal(
np.datetime64("2017-01-01", "s"),
np.datetime64("2017-01-01", "s")
)
self._test_equal(
np.datetime64("2017-01-01", "s"),
np.datetime64("2017-01-01", "m")
)
# gh-10081
self._test_not_equal(
np.datetime64("2017-01-01", "s"),
np.datetime64("2017-01-02", "s")
)
self._test_not_equal(
np.datetime64("2017-01-01", "s"),
np.datetime64("2017-01-02", "m")
)
def test_nat_items(self):
# not a datetime
nadt_no_unit = np.datetime64("NaT")
nadt_s = np.datetime64("NaT", "s")
nadt_d = np.datetime64("NaT", "ns")
# not a timedelta
natd_no_unit = np.timedelta64("NaT")
natd_s = np.timedelta64("NaT", "s")
natd_d = np.timedelta64("NaT", "ns")
dts = [nadt_no_unit, nadt_s, nadt_d]
tds = [natd_no_unit, natd_s, natd_d]
for a, b in itertools.product(dts, dts):
self._assert_func(a, b)
self._assert_func([a], [b])
self._test_not_equal([a], b)
for a, b in itertools.product(tds, tds):
self._assert_func(a, b)
self._assert_func([a], [b])
self._test_not_equal([a], b)
for a, b in itertools.product(tds, dts):
self._test_not_equal(a, b)
self._test_not_equal(a, [b])
self._test_not_equal([a], [b])
self._test_not_equal([a], np.datetime64("2017-01-01", "s"))
self._test_not_equal([b], np.datetime64("2017-01-01", "s"))
self._test_not_equal([a], np.timedelta64(123, "s"))
self._test_not_equal([b], np.timedelta64(123, "s"))
def test_non_numeric(self):
self._assert_func('ab', 'ab')
self._test_not_equal('ab', 'abb')
def test_complex_item(self):
self._assert_func(complex(1, 2), complex(1, 2))
self._assert_func(complex(1, np.nan), complex(1, np.nan))
self._test_not_equal(complex(1, np.nan), complex(1, 2))
self._test_not_equal(complex(np.nan, 1), complex(1, np.nan))
self._test_not_equal(complex(np.nan, np.inf), complex(np.nan, 2))
def test_negative_zero(self):
self._test_not_equal(np.PZERO, np.NZERO)
def test_complex(self):
x = np.array([complex(1, 2), complex(1, np.nan)])
y = np.array([complex(1, 2), complex(1, 2)])
self._assert_func(x, x)
self._test_not_equal(x, y)
def test_object(self):
#gh-12942
import datetime
a = np.array([datetime.datetime(2000, 1, 1),
datetime.datetime(2000, 1, 2)])
self._test_not_equal(a, a[::-1])
class TestArrayAlmostEqual(_GenericTest):
def setup_method(self):
self._assert_func = assert_array_almost_equal
def test_closeness(self):
# Note that in the course of time we ended up with
# `abs(x - y) < 1.5 * 10**(-decimal)`
# instead of the previously documented
# `abs(x - y) < 0.5 * 10**(-decimal)`
# so this check serves to preserve the wrongness.
# test scalars
self._assert_func(1.499999, 0.0, decimal=0)
assert_raises(AssertionError,
lambda: self._assert_func(1.5, 0.0, decimal=0))
# test arrays
self._assert_func([1.499999], [0.0], decimal=0)
assert_raises(AssertionError,
lambda: self._assert_func([1.5], [0.0], decimal=0))
def test_simple(self):
x = np.array([1234.2222])
y = np.array([1234.2223])
self._assert_func(x, y, decimal=3)
self._assert_func(x, y, decimal=4)
assert_raises(AssertionError,
lambda: self._assert_func(x, y, decimal=5))
def test_nan(self):
anan = np.array([np.nan])
aone = np.array([1])
ainf = np.array([np.inf])
self._assert_func(anan, anan)
assert_raises(AssertionError,
lambda: self._assert_func(anan, aone))
assert_raises(AssertionError,
lambda: self._assert_func(anan, ainf))
assert_raises(AssertionError,
lambda: self._assert_func(ainf, anan))
def test_inf(self):
a = np.array([[1., 2.], [3., 4.]])
b = a.copy()
a[0, 0] = np.inf
assert_raises(AssertionError,
lambda: self._assert_func(a, b))
b[0, 0] = -np.inf
assert_raises(AssertionError,
lambda: self._assert_func(a, b))
def test_subclass(self):
a = np.array([[1., 2.], [3., 4.]])
b = np.ma.masked_array([[1., 2.], [0., 4.]],
[[False, False], [True, False]])
self._assert_func(a, b)
self._assert_func(b, a)
self._assert_func(b, b)
# Test fully masked as well (see gh-11123).
a = np.ma.MaskedArray(3.5, mask=True)
b = np.array([3., 4., 6.5])
self._test_equal(a, b)
self._test_equal(b, a)
a = np.ma.masked
b = np.array([3., 4., 6.5])
self._test_equal(a, b)
self._test_equal(b, a)
a = np.ma.MaskedArray([3., 4., 6.5], mask=[True, True, True])
b = np.array([1., 2., 3.])
self._test_equal(a, b)
self._test_equal(b, a)
a = np.ma.MaskedArray([3., 4., 6.5], mask=[True, True, True])
b = np.array(1.)
self._test_equal(a, b)
self._test_equal(b, a)
def test_subclass_that_cannot_be_bool(self):
# While we cannot guarantee testing functions will always work for
# subclasses, the tests should ideally rely only on subclasses having
# comparison operators, not on them being able to store booleans
# (which, e.g., astropy Quantity cannot usefully do). See gh-8452.
class MyArray(np.ndarray):
def __eq__(self, other):
return super().__eq__(other).view(np.ndarray)
def __lt__(self, other):
return super().__lt__(other).view(np.ndarray)
def all(self, *args, **kwargs):
raise NotImplementedError
a = np.array([1., 2.]).view(MyArray)
self._assert_func(a, a)
class TestAlmostEqual(_GenericTest):
def setup_method(self):
self._assert_func = assert_almost_equal
def test_closeness(self):
# Note that in the course of time we ended up with
# `abs(x - y) < 1.5 * 10**(-decimal)`
# instead of the previously documented
# `abs(x - y) < 0.5 * 10**(-decimal)`
# so this check serves to preserve the wrongness.
# test scalars
self._assert_func(1.499999, 0.0, decimal=0)
assert_raises(AssertionError,
lambda: self._assert_func(1.5, 0.0, decimal=0))
# test arrays
self._assert_func([1.499999], [0.0], decimal=0)
assert_raises(AssertionError,
lambda: self._assert_func([1.5], [0.0], decimal=0))
def test_nan_item(self):
self._assert_func(np.nan, np.nan)
assert_raises(AssertionError,
lambda: self._assert_func(np.nan, 1))
assert_raises(AssertionError,
lambda: self._assert_func(np.nan, np.inf))
assert_raises(AssertionError,
lambda: self._assert_func(np.inf, np.nan))
def test_inf_item(self):
self._assert_func(np.inf, np.inf)
self._assert_func(-np.inf, -np.inf)
assert_raises(AssertionError,
lambda: self._assert_func(np.inf, 1))
assert_raises(AssertionError,
lambda: self._assert_func(-np.inf, np.inf))
def test_simple_item(self):
self._test_not_equal(1, 2)
def test_complex_item(self):
self._assert_func(complex(1, 2), complex(1, 2))
self._assert_func(complex(1, np.nan), complex(1, np.nan))
self._assert_func(complex(np.inf, np.nan), complex(np.inf, np.nan))
self._test_not_equal(complex(1, np.nan), complex(1, 2))
self._test_not_equal(complex(np.nan, 1), complex(1, np.nan))
self._test_not_equal(complex(np.nan, np.inf), complex(np.nan, 2))
def test_complex(self):
x = np.array([complex(1, 2), complex(1, np.nan)])
z = np.array([complex(1, 2), complex(np.nan, 1)])
y = np.array([complex(1, 2), complex(1, 2)])
self._assert_func(x, x)
self._test_not_equal(x, y)
self._test_not_equal(x, z)
def test_error_message(self):
"""Check the message is formatted correctly for the decimal value.
Also check the message when input includes inf or nan (gh12200)"""
x = np.array([1.00000000001, 2.00000000002, 3.00003])
y = np.array([1.00000000002, 2.00000000003, 3.00004])
# Test with a different amount of decimal digits
with pytest.raises(AssertionError) as exc_info:
self._assert_func(x, y, decimal=12)
msgs = str(exc_info.value).split('\n')
assert_equal(msgs[3], 'Mismatched elements: 3 / 3 (100%)')
assert_equal(msgs[4], 'Max absolute difference: 1.e-05')
assert_equal(msgs[5], 'Max relative difference: 3.33328889e-06')
assert_equal(
msgs[6],
' x: array([1.00000000001, 2.00000000002, 3.00003 ])')
assert_equal(
msgs[7],
' y: array([1.00000000002, 2.00000000003, 3.00004 ])')
# With the default value of decimal digits, only the 3rd element
# differs. Note that we only check for the formatting of the arrays
# themselves.
with pytest.raises(AssertionError) as exc_info:
self._assert_func(x, y)
msgs = str(exc_info.value).split('\n')
assert_equal(msgs[3], 'Mismatched elements: 1 / 3 (33.3%)')
assert_equal(msgs[4], 'Max absolute difference: 1.e-05')
assert_equal(msgs[5], 'Max relative difference: 3.33328889e-06')
assert_equal(msgs[6], ' x: array([1. , 2. , 3.00003])')
assert_equal(msgs[7], ' y: array([1. , 2. , 3.00004])')
# Check the error message when input includes inf
x = np.array([np.inf, 0])
y = np.array([np.inf, 1])
with pytest.raises(AssertionError) as exc_info:
self._assert_func(x, y)
msgs = str(exc_info.value).split('\n')
assert_equal(msgs[3], 'Mismatched elements: 1 / 2 (50%)')
assert_equal(msgs[4], 'Max absolute difference: 1.')
assert_equal(msgs[5], 'Max relative difference: 1.')
assert_equal(msgs[6], ' x: array([inf, 0.])')
assert_equal(msgs[7], ' y: array([inf, 1.])')
# Check the error message when dividing by zero
x = np.array([1, 2])
y = np.array([0, 0])
with pytest.raises(AssertionError) as exc_info:
self._assert_func(x, y)
msgs = str(exc_info.value).split('\n')
assert_equal(msgs[3], 'Mismatched elements: 2 / 2 (100%)')
assert_equal(msgs[4], 'Max absolute difference: 2')
assert_equal(msgs[5], 'Max relative difference: inf')
def test_error_message_2(self):
"""Check the message is formatted correctly when either x or y is a scalar."""
x = 2
y = np.ones(20)
with pytest.raises(AssertionError) as exc_info:
self._assert_func(x, y)
msgs = str(exc_info.value).split('\n')
assert_equal(msgs[3], 'Mismatched elements: 20 / 20 (100%)')
assert_equal(msgs[4], 'Max absolute difference: 1.')
assert_equal(msgs[5], 'Max relative difference: 1.')
y = 2
x = np.ones(20)
with pytest.raises(AssertionError) as exc_info:
self._assert_func(x, y)
msgs = str(exc_info.value).split('\n')
assert_equal(msgs[3], 'Mismatched elements: 20 / 20 (100%)')
assert_equal(msgs[4], 'Max absolute difference: 1.')
assert_equal(msgs[5], 'Max relative difference: 0.5')
def test_subclass_that_cannot_be_bool(self):
# While we cannot guarantee testing functions will always work for
# subclasses, the tests should ideally rely only on subclasses having
# comparison operators, not on them being able to store booleans
# (which, e.g., astropy Quantity cannot usefully do). See gh-8452.
class MyArray(np.ndarray):
def __eq__(self, other):
return super().__eq__(other).view(np.ndarray)
def __lt__(self, other):
return super().__lt__(other).view(np.ndarray)
def all(self, *args, **kwargs):
raise NotImplementedError
a = np.array([1., 2.]).view(MyArray)
self._assert_func(a, a)
class TestApproxEqual:
def setup_method(self):
self._assert_func = assert_approx_equal
def test_simple_0d_arrays(self):
x = np.array(1234.22)
y = np.array(1234.23)
self._assert_func(x, y, significant=5)
self._assert_func(x, y, significant=6)
assert_raises(AssertionError,
lambda: self._assert_func(x, y, significant=7))
def test_simple_items(self):
x = 1234.22
y = 1234.23
self._assert_func(x, y, significant=4)
self._assert_func(x, y, significant=5)
self._assert_func(x, y, significant=6)
assert_raises(AssertionError,
lambda: self._assert_func(x, y, significant=7))
def test_nan_array(self):
anan = np.array(np.nan)
aone = np.array(1)
ainf = np.array(np.inf)
self._assert_func(anan, anan)
assert_raises(AssertionError, lambda: self._assert_func(anan, aone))
assert_raises(AssertionError, lambda: self._assert_func(anan, ainf))
assert_raises(AssertionError, lambda: self._assert_func(ainf, anan))
def test_nan_items(self):
anan = np.array(np.nan)
aone = np.array(1)
ainf = np.array(np.inf)
self._assert_func(anan, anan)
assert_raises(AssertionError, lambda: self._assert_func(anan, aone))
assert_raises(AssertionError, lambda: self._assert_func(anan, ainf))
assert_raises(AssertionError, lambda: self._assert_func(ainf, anan))
class TestArrayAssertLess:
def setup_method(self):
self._assert_func = assert_array_less
def test_simple_arrays(self):
x = np.array([1.1, 2.2])
y = np.array([1.2, 2.3])
self._assert_func(x, y)
assert_raises(AssertionError, lambda: self._assert_func(y, x))
y = np.array([1.0, 2.3])
assert_raises(AssertionError, lambda: self._assert_func(x, y))
assert_raises(AssertionError, lambda: self._assert_func(y, x))
def test_rank2(self):
x = np.array([[1.1, 2.2], [3.3, 4.4]])
y = np.array([[1.2, 2.3], [3.4, 4.5]])
self._assert_func(x, y)
assert_raises(AssertionError, lambda: self._assert_func(y, x))
y = np.array([[1.0, 2.3], [3.4, 4.5]])
assert_raises(AssertionError, lambda: self._assert_func(x, y))
assert_raises(AssertionError, lambda: self._assert_func(y, x))
def test_rank3(self):
x = np.ones(shape=(2, 2, 2))
y = np.ones(shape=(2, 2, 2))+1
self._assert_func(x, y)
assert_raises(AssertionError, lambda: self._assert_func(y, x))
y[0, 0, 0] = 0
assert_raises(AssertionError, lambda: self._assert_func(x, y))
assert_raises(AssertionError, lambda: self._assert_func(y, x))
def test_simple_items(self):
x = 1.1
y = 2.2
self._assert_func(x, y)
assert_raises(AssertionError, lambda: self._assert_func(y, x))
y = np.array([2.2, 3.3])
self._assert_func(x, y)
assert_raises(AssertionError, lambda: self._assert_func(y, x))
y = np.array([1.0, 3.3])
assert_raises(AssertionError, lambda: self._assert_func(x, y))
def test_nan_noncompare(self):
anan = np.array(np.nan)
aone = np.array(1)
ainf = np.array(np.inf)
self._assert_func(anan, anan)
assert_raises(AssertionError, lambda: self._assert_func(aone, anan))
assert_raises(AssertionError, lambda: self._assert_func(anan, aone))
assert_raises(AssertionError, lambda: self._assert_func(anan, ainf))
assert_raises(AssertionError, lambda: self._assert_func(ainf, anan))
def test_nan_noncompare_array(self):
x = np.array([1.1, 2.2, 3.3])
anan = np.array(np.nan)
assert_raises(AssertionError, lambda: self._assert_func(x, anan))
assert_raises(AssertionError, lambda: self._assert_func(anan, x))
x = np.array([1.1, 2.2, np.nan])
assert_raises(AssertionError, lambda: self._assert_func(x, anan))
assert_raises(AssertionError, lambda: self._assert_func(anan, x))
y = np.array([1.0, 2.0, np.nan])
self._assert_func(y, x)
assert_raises(AssertionError, lambda: self._assert_func(x, y))
def test_inf_compare(self):
aone = np.array(1)
ainf = np.array(np.inf)
self._assert_func(aone, ainf)
self._assert_func(-ainf, aone)
self._assert_func(-ainf, ainf)
assert_raises(AssertionError, lambda: self._assert_func(ainf, aone))
assert_raises(AssertionError, lambda: self._assert_func(aone, -ainf))
assert_raises(AssertionError, lambda: self._assert_func(ainf, ainf))
assert_raises(AssertionError, lambda: self._assert_func(ainf, -ainf))
assert_raises(AssertionError, lambda: self._assert_func(-ainf, -ainf))
def test_inf_compare_array(self):
x = np.array([1.1, 2.2, np.inf])
ainf = np.array(np.inf)
assert_raises(AssertionError, lambda: self._assert_func(x, ainf))
assert_raises(AssertionError, lambda: self._assert_func(ainf, x))
assert_raises(AssertionError, lambda: self._assert_func(x, -ainf))
assert_raises(AssertionError, lambda: self._assert_func(-x, -ainf))
assert_raises(AssertionError, lambda: self._assert_func(-ainf, -x))
self._assert_func(-ainf, x)
@pytest.mark.skip(reason="The raises decorator depends on Nose")
class TestRaises:
def setup_method(self):
class MyException(Exception):
pass
self.e = MyException
def raises_exception(self, e):
raise e
def does_not_raise_exception(self):
pass
def test_correct_catch(self):
raises(self.e)(self.raises_exception)(self.e) # raises?
def test_wrong_exception(self):
try:
raises(self.e)(self.raises_exception)(RuntimeError) # raises?
except RuntimeError:
return
else:
raise AssertionError("should have caught RuntimeError")
def test_catch_no_raise(self):
try:
raises(self.e)(self.does_not_raise_exception)() # raises?
except AssertionError:
return
else:
raise AssertionError("should have raised an AssertionError")
class TestWarns:
def test_warn(self):
def f():
warnings.warn("yo")
return 3
before_filters = sys.modules['warnings'].filters[:]
assert_equal(assert_warns(UserWarning, f), 3)
after_filters = sys.modules['warnings'].filters
assert_raises(AssertionError, assert_no_warnings, f)
assert_equal(assert_no_warnings(lambda x: x, 1), 1)
# Check that the warnings state is unchanged
assert_equal(before_filters, after_filters,
"assert_warns does not preserver warnings state")
def test_context_manager(self):
before_filters = sys.modules['warnings'].filters[:]
with assert_warns(UserWarning):
warnings.warn("yo")
after_filters = sys.modules['warnings'].filters
def no_warnings():
with assert_no_warnings():
warnings.warn("yo")
assert_raises(AssertionError, no_warnings)
assert_equal(before_filters, after_filters,
"assert_warns does not preserver warnings state")
def test_warn_wrong_warning(self):
def f():
warnings.warn("yo", DeprecationWarning)
failed = False
with warnings.catch_warnings():
warnings.simplefilter("error", DeprecationWarning)
try:
# Should raise a DeprecationWarning
assert_warns(UserWarning, f)
failed = True
except DeprecationWarning:
pass
if failed:
raise AssertionError("wrong warning caught by assert_warn")
class TestAssertAllclose:
def test_simple(self):
x = 1e-3
y = 1e-9
assert_allclose(x, y, atol=1)
assert_raises(AssertionError, assert_allclose, x, y)
a = np.array([x, y, x, y])
b = np.array([x, y, x, x])
assert_allclose(a, b, atol=1)
assert_raises(AssertionError, assert_allclose, a, b)
b[-1] = y * (1 + 1e-8)
assert_allclose(a, b)
assert_raises(AssertionError, assert_allclose, a, b, rtol=1e-9)
assert_allclose(6, 10, rtol=0.5)
assert_raises(AssertionError, assert_allclose, 10, 6, rtol=0.5)
def test_min_int(self):
a = np.array([np.iinfo(np.int_).min], dtype=np.int_)
# Should not raise:
assert_allclose(a, a)
def test_report_fail_percentage(self):
a = np.array([1, 1, 1, 1])
b = np.array([1, 1, 1, 2])
with pytest.raises(AssertionError) as exc_info:
assert_allclose(a, b)
msg = str(exc_info.value)
assert_('Mismatched elements: 1 / 4 (25%)\n'
'Max absolute difference: 1\n'
'Max relative difference: 0.5' in msg)
def test_equal_nan(self):
a = np.array([np.nan])
b = np.array([np.nan])
# Should not raise:
assert_allclose(a, b, equal_nan=True)
def test_not_equal_nan(self):
a = np.array([np.nan])
b = np.array([np.nan])
assert_raises(AssertionError, assert_allclose, a, b, equal_nan=False)
def test_equal_nan_default(self):
# Make sure equal_nan default behavior remains unchanged. (All
# of these functions use assert_array_compare under the hood.)
# None of these should raise.
a = np.array([np.nan])
b = np.array([np.nan])
assert_array_equal(a, b)
assert_array_almost_equal(a, b)
assert_array_less(a, b)
assert_allclose(a, b)
def test_report_max_relative_error(self):
a = np.array([0, 1])
b = np.array([0, 2])
with pytest.raises(AssertionError) as exc_info:
assert_allclose(a, b)
msg = str(exc_info.value)
assert_('Max relative difference: 0.5' in msg)
def test_timedelta(self):
# see gh-18286
a = np.array([[1, 2, 3, "NaT"]], dtype="m8[ns]")
assert_allclose(a, a)
class TestArrayAlmostEqualNulp:
def test_float64_pass(self):
# The number of units of least precision
# In this case, use a few places above the lowest level (ie nulp=1)
nulp = 5
x = np.linspace(-20, 20, 50, dtype=np.float64)
x = 10**x
x = np.r_[-x, x]
# Addition
eps = np.finfo(x.dtype).eps
y = x + x*eps*nulp/2.
assert_array_almost_equal_nulp(x, y, nulp)
# Subtraction
epsneg = np.finfo(x.dtype).epsneg
y = x - x*epsneg*nulp/2.
assert_array_almost_equal_nulp(x, y, nulp)
def test_float64_fail(self):
nulp = 5
x = np.linspace(-20, 20, 50, dtype=np.float64)
x = 10**x
x = np.r_[-x, x]
eps = np.finfo(x.dtype).eps
y = x + x*eps*nulp*2.
assert_raises(AssertionError, assert_array_almost_equal_nulp,
x, y, nulp)
epsneg = np.finfo(x.dtype).epsneg
y = x - x*epsneg*nulp*2.
assert_raises(AssertionError, assert_array_almost_equal_nulp,
x, y, nulp)
def test_float64_ignore_nan(self):
# Ignore ULP differences between various NAN's
# Note that MIPS may reverse quiet and signaling nans
# so we use the builtin version as a base.
offset = np.uint64(0xffffffff)
nan1_i64 = np.array(np.nan, dtype=np.float64).view(np.uint64)
nan2_i64 = nan1_i64 ^ offset # nan payload on MIPS is all ones.
nan1_f64 = nan1_i64.view(np.float64)
nan2_f64 = nan2_i64.view(np.float64)
assert_array_max_ulp(nan1_f64, nan2_f64, 0)
def test_float32_pass(self):
nulp = 5
x = np.linspace(-20, 20, 50, dtype=np.float32)
x = 10**x
x = np.r_[-x, x]
eps = np.finfo(x.dtype).eps
y = x + x*eps*nulp/2.
assert_array_almost_equal_nulp(x, y, nulp)
epsneg = np.finfo(x.dtype).epsneg
y = x - x*epsneg*nulp/2.
assert_array_almost_equal_nulp(x, y, nulp)
def test_float32_fail(self):
nulp = 5
x = np.linspace(-20, 20, 50, dtype=np.float32)
x = 10**x
x = np.r_[-x, x]
eps = np.finfo(x.dtype).eps
y = x + x*eps*nulp*2.
assert_raises(AssertionError, assert_array_almost_equal_nulp,
x, y, nulp)
epsneg = np.finfo(x.dtype).epsneg
y = x - x*epsneg*nulp*2.
assert_raises(AssertionError, assert_array_almost_equal_nulp,
x, y, nulp)
def test_float32_ignore_nan(self):
# Ignore ULP differences between various NAN's
# Note that MIPS may reverse quiet and signaling nans
# so we use the builtin version as a base.
offset = np.uint32(0xffff)
nan1_i32 = np.array(np.nan, dtype=np.float32).view(np.uint32)
nan2_i32 = nan1_i32 ^ offset # nan payload on MIPS is all ones.
nan1_f32 = nan1_i32.view(np.float32)
nan2_f32 = nan2_i32.view(np.float32)
assert_array_max_ulp(nan1_f32, nan2_f32, 0)
def test_float16_pass(self):
nulp = 5
x = np.linspace(-4, 4, 10, dtype=np.float16)
x = 10**x
x = np.r_[-x, x]
eps = np.finfo(x.dtype).eps
y = x + x*eps*nulp/2.
assert_array_almost_equal_nulp(x, y, nulp)
epsneg = np.finfo(x.dtype).epsneg
y = x - x*epsneg*nulp/2.
assert_array_almost_equal_nulp(x, y, nulp)
def test_float16_fail(self):
nulp = 5
x = np.linspace(-4, 4, 10, dtype=np.float16)
x = 10**x
x = np.r_[-x, x]
eps = np.finfo(x.dtype).eps
y = x + x*eps*nulp*2.
assert_raises(AssertionError, assert_array_almost_equal_nulp,
x, y, nulp)
epsneg = np.finfo(x.dtype).epsneg
y = x - x*epsneg*nulp*2.
assert_raises(AssertionError, assert_array_almost_equal_nulp,
x, y, nulp)
def test_float16_ignore_nan(self):
# Ignore ULP differences between various NAN's
# Note that MIPS may reverse quiet and signaling nans
# so we use the builtin version as a base.
offset = np.uint16(0xff)
nan1_i16 = np.array(np.nan, dtype=np.float16).view(np.uint16)
nan2_i16 = nan1_i16 ^ offset # nan payload on MIPS is all ones.
nan1_f16 = nan1_i16.view(np.float16)
nan2_f16 = nan2_i16.view(np.float16)
assert_array_max_ulp(nan1_f16, nan2_f16, 0)
def test_complex128_pass(self):
nulp = 5
x = np.linspace(-20, 20, 50, dtype=np.float64)
x = 10**x
x = np.r_[-x, x]
xi = x + x*1j
eps = np.finfo(x.dtype).eps
y = x + x*eps*nulp/2.
assert_array_almost_equal_nulp(xi, x + y*1j, nulp)
assert_array_almost_equal_nulp(xi, y + x*1j, nulp)
# The test condition needs to be at least a factor of sqrt(2) smaller
# because the real and imaginary parts both change
y = x + x*eps*nulp/4.
assert_array_almost_equal_nulp(xi, y + y*1j, nulp)
epsneg = np.finfo(x.dtype).epsneg
y = x - x*epsneg*nulp/2.
assert_array_almost_equal_nulp(xi, x + y*1j, nulp)
assert_array_almost_equal_nulp(xi, y + x*1j, nulp)
y = x - x*epsneg*nulp/4.
assert_array_almost_equal_nulp(xi, y + y*1j, nulp)
def test_complex128_fail(self):
nulp = 5
x = np.linspace(-20, 20, 50, dtype=np.float64)
x = 10**x
x = np.r_[-x, x]
xi = x + x*1j
eps = np.finfo(x.dtype).eps
y = x + x*eps*nulp*2.
assert_raises(AssertionError, assert_array_almost_equal_nulp,
xi, x + y*1j, nulp)
assert_raises(AssertionError, assert_array_almost_equal_nulp,
xi, y + x*1j, nulp)
# The test condition needs to be at least a factor of sqrt(2) smaller
# because the real and imaginary parts both change
y = x + x*eps*nulp
assert_raises(AssertionError, assert_array_almost_equal_nulp,
xi, y + y*1j, nulp)
epsneg = np.finfo(x.dtype).epsneg
y = x - x*epsneg*nulp*2.
assert_raises(AssertionError, assert_array_almost_equal_nulp,
xi, x + y*1j, nulp)
assert_raises(AssertionError, assert_array_almost_equal_nulp,
xi, y + x*1j, nulp)
y = x - x*epsneg*nulp
assert_raises(AssertionError, assert_array_almost_equal_nulp,
xi, y + y*1j, nulp)
def test_complex64_pass(self):
nulp = 5
x = np.linspace(-20, 20, 50, dtype=np.float32)
x = 10**x
x = np.r_[-x, x]
xi = x + x*1j
eps = np.finfo(x.dtype).eps
y = x + x*eps*nulp/2.
assert_array_almost_equal_nulp(xi, x + y*1j, nulp)
assert_array_almost_equal_nulp(xi, y + x*1j, nulp)
y = x + x*eps*nulp/4.
assert_array_almost_equal_nulp(xi, y + y*1j, nulp)
epsneg = np.finfo(x.dtype).epsneg
y = x - x*epsneg*nulp/2.
assert_array_almost_equal_nulp(xi, x + y*1j, nulp)
assert_array_almost_equal_nulp(xi, y + x*1j, nulp)
y = x - x*epsneg*nulp/4.
assert_array_almost_equal_nulp(xi, y + y*1j, nulp)
def test_complex64_fail(self):
nulp = 5
x = np.linspace(-20, 20, 50, dtype=np.float32)
x = 10**x
x = np.r_[-x, x]
xi = x + x*1j
eps = np.finfo(x.dtype).eps
y = x + x*eps*nulp*2.
assert_raises(AssertionError, assert_array_almost_equal_nulp,
xi, x + y*1j, nulp)
assert_raises(AssertionError, assert_array_almost_equal_nulp,
xi, y + x*1j, nulp)
y = x + x*eps*nulp
assert_raises(AssertionError, assert_array_almost_equal_nulp,
xi, y + y*1j, nulp)
epsneg = np.finfo(x.dtype).epsneg
y = x - x*epsneg*nulp*2.
assert_raises(AssertionError, assert_array_almost_equal_nulp,
xi, x + y*1j, nulp)
assert_raises(AssertionError, assert_array_almost_equal_nulp,
xi, y + x*1j, nulp)
y = x - x*epsneg*nulp
assert_raises(AssertionError, assert_array_almost_equal_nulp,
xi, y + y*1j, nulp)
class TestULP:
def test_equal(self):
x = np.random.randn(10)
assert_array_max_ulp(x, x, maxulp=0)
def test_single(self):
# Generate 1 + small deviation, check that adding eps gives a few UNL
x = np.ones(10).astype(np.float32)
x += 0.01 * np.random.randn(10).astype(np.float32)
eps = np.finfo(np.float32).eps
assert_array_max_ulp(x, x+eps, maxulp=20)
def test_double(self):
# Generate 1 + small deviation, check that adding eps gives a few UNL
x = np.ones(10).astype(np.float64)
x += 0.01 * np.random.randn(10).astype(np.float64)
eps = np.finfo(np.float64).eps
assert_array_max_ulp(x, x+eps, maxulp=200)
def test_inf(self):
for dt in [np.float32, np.float64]:
inf = np.array([np.inf]).astype(dt)
big = np.array([np.finfo(dt).max])
assert_array_max_ulp(inf, big, maxulp=200)
def test_nan(self):
# Test that nan is 'far' from small, tiny, inf, max and min
for dt in [np.float32, np.float64]:
if dt == np.float32:
maxulp = 1e6
else:
maxulp = 1e12
inf = np.array([np.inf]).astype(dt)
nan = np.array([np.nan]).astype(dt)
big = np.array([np.finfo(dt).max])
tiny = np.array([np.finfo(dt).tiny])
zero = np.array([np.PZERO]).astype(dt)
nzero = np.array([np.NZERO]).astype(dt)
assert_raises(AssertionError,
lambda: assert_array_max_ulp(nan, inf,
maxulp=maxulp))
assert_raises(AssertionError,
lambda: assert_array_max_ulp(nan, big,
maxulp=maxulp))
assert_raises(AssertionError,
lambda: assert_array_max_ulp(nan, tiny,
maxulp=maxulp))
assert_raises(AssertionError,
lambda: assert_array_max_ulp(nan, zero,
maxulp=maxulp))
assert_raises(AssertionError,
lambda: assert_array_max_ulp(nan, nzero,
maxulp=maxulp))
class TestStringEqual:
def test_simple(self):
assert_string_equal("hello", "hello")
assert_string_equal("hello\nmultiline", "hello\nmultiline")
with pytest.raises(AssertionError) as exc_info:
assert_string_equal("foo\nbar", "hello\nbar")
msg = str(exc_info.value)
assert_equal(msg, "Differences in strings:\n- foo\n+ hello")
assert_raises(AssertionError,
lambda: assert_string_equal("foo", "hello"))
def test_regex(self):
assert_string_equal("a+*b", "a+*b")
assert_raises(AssertionError,
lambda: assert_string_equal("aaa", "a+b"))
def assert_warn_len_equal(mod, n_in_context):
try:
mod_warns = mod.__warningregistry__
except AttributeError:
# the lack of a __warningregistry__
# attribute means that no warning has
# occurred; this can be triggered in
# a parallel test scenario, while in
# a serial test scenario an initial
# warning (and therefore the attribute)
# are always created first
mod_warns = {}
num_warns = len(mod_warns)
if 'version' in mod_warns:
# Python 3 adds a 'version' entry to the registry,
# do not count it.
num_warns -= 1
assert_equal(num_warns, n_in_context)
def test_warn_len_equal_call_scenarios():
# assert_warn_len_equal is called under
# varying circumstances depending on serial
# vs. parallel test scenarios; this test
# simply aims to probe both code paths and
# check that no assertion is uncaught
# parallel scenario -- no warning issued yet
class mod:
pass
mod_inst = mod()
assert_warn_len_equal(mod=mod_inst,
n_in_context=0)
# serial test scenario -- the __warningregistry__
# attribute should be present
class mod:
def __init__(self):
self.__warningregistry__ = {'warning1':1,
'warning2':2}
mod_inst = mod()
assert_warn_len_equal(mod=mod_inst,
n_in_context=2)
def _get_fresh_mod():
# Get this module, with warning registry empty
my_mod = sys.modules[__name__]
try:
my_mod.__warningregistry__.clear()
except AttributeError:
# will not have a __warningregistry__ unless warning has been
# raised in the module at some point
pass
return my_mod
def test_clear_and_catch_warnings():
# Initial state of module, no warnings
my_mod = _get_fresh_mod()
assert_equal(getattr(my_mod, '__warningregistry__', {}), {})
with clear_and_catch_warnings(modules=[my_mod]):
warnings.simplefilter('ignore')
warnings.warn('Some warning')
assert_equal(my_mod.__warningregistry__, {})
# Without specified modules, don't clear warnings during context.
# catch_warnings doesn't make an entry for 'ignore'.
with clear_and_catch_warnings():
warnings.simplefilter('ignore')
warnings.warn('Some warning')
assert_warn_len_equal(my_mod, 0)
# Manually adding two warnings to the registry:
my_mod.__warningregistry__ = {'warning1': 1,
'warning2': 2}
# Confirm that specifying module keeps old warning, does not add new
with clear_and_catch_warnings(modules=[my_mod]):
warnings.simplefilter('ignore')
warnings.warn('Another warning')
assert_warn_len_equal(my_mod, 2)
# Another warning, no module spec it clears up registry
with clear_and_catch_warnings():
warnings.simplefilter('ignore')
warnings.warn('Another warning')
assert_warn_len_equal(my_mod, 0)
def test_suppress_warnings_module():
# Initial state of module, no warnings
my_mod = _get_fresh_mod()
assert_equal(getattr(my_mod, '__warningregistry__', {}), {})
def warn_other_module():
# Apply along axis is implemented in python; stacklevel=2 means
# we end up inside its module, not ours.
def warn(arr):
warnings.warn("Some warning 2", stacklevel=2)
return arr
np.apply_along_axis(warn, 0, [0])
# Test module based warning suppression:
assert_warn_len_equal(my_mod, 0)
with suppress_warnings() as sup:
sup.record(UserWarning)
# suppress warning from other module (may have .pyc ending),
# if apply_along_axis is moved, had to be changed.
sup.filter(module=np.lib.shape_base)
warnings.warn("Some warning")
warn_other_module()
# Check that the suppression did test the file correctly (this module
# got filtered)
assert_equal(len(sup.log), 1)
assert_equal(sup.log[0].message.args[0], "Some warning")
assert_warn_len_equal(my_mod, 0)
sup = suppress_warnings()
# Will have to be changed if apply_along_axis is moved:
sup.filter(module=my_mod)
with sup:
warnings.warn('Some warning')
assert_warn_len_equal(my_mod, 0)
# And test repeat works:
sup.filter(module=my_mod)
with sup:
warnings.warn('Some warning')
assert_warn_len_equal(my_mod, 0)
# Without specified modules
with suppress_warnings():
warnings.simplefilter('ignore')
warnings.warn('Some warning')
assert_warn_len_equal(my_mod, 0)
def test_suppress_warnings_type():
# Initial state of module, no warnings
my_mod = _get_fresh_mod()
assert_equal(getattr(my_mod, '__warningregistry__', {}), {})
# Test module based warning suppression:
with suppress_warnings() as sup:
sup.filter(UserWarning)
warnings.warn('Some warning')
assert_warn_len_equal(my_mod, 0)
sup = suppress_warnings()
sup.filter(UserWarning)
with sup:
warnings.warn('Some warning')
assert_warn_len_equal(my_mod, 0)
# And test repeat works:
sup.filter(module=my_mod)
with sup:
warnings.warn('Some warning')
assert_warn_len_equal(my_mod, 0)
# Without specified modules
with suppress_warnings():
warnings.simplefilter('ignore')
warnings.warn('Some warning')
assert_warn_len_equal(my_mod, 0)
def test_suppress_warnings_decorate_no_record():
sup = suppress_warnings()
sup.filter(UserWarning)
@sup
def warn(category):
warnings.warn('Some warning', category)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
warn(UserWarning) # should be supppressed
warn(RuntimeWarning)
assert_equal(len(w), 1)
def test_suppress_warnings_record():
sup = suppress_warnings()
log1 = sup.record()
with sup:
log2 = sup.record(message='Some other warning 2')
sup.filter(message='Some warning')
warnings.warn('Some warning')
warnings.warn('Some other warning')
warnings.warn('Some other warning 2')
assert_equal(len(sup.log), 2)
assert_equal(len(log1), 1)
assert_equal(len(log2),1)
assert_equal(log2[0].message.args[0], 'Some other warning 2')
# Do it again, with the same context to see if some warnings survived:
with sup:
log2 = sup.record(message='Some other warning 2')
sup.filter(message='Some warning')
warnings.warn('Some warning')
warnings.warn('Some other warning')
warnings.warn('Some other warning 2')
assert_equal(len(sup.log), 2)
assert_equal(len(log1), 1)
assert_equal(len(log2), 1)
assert_equal(log2[0].message.args[0], 'Some other warning 2')
# Test nested:
with suppress_warnings() as sup:
sup.record()
with suppress_warnings() as sup2:
sup2.record(message='Some warning')
warnings.warn('Some warning')
warnings.warn('Some other warning')
assert_equal(len(sup2.log), 1)
assert_equal(len(sup.log), 1)
def test_suppress_warnings_forwarding():
def warn_other_module():
# Apply along axis is implemented in python; stacklevel=2 means
# we end up inside its module, not ours.
def warn(arr):
warnings.warn("Some warning", stacklevel=2)
return arr
np.apply_along_axis(warn, 0, [0])
with suppress_warnings() as sup:
sup.record()
with suppress_warnings("always"):
for i in range(2):
warnings.warn("Some warning")
assert_equal(len(sup.log), 2)
with suppress_warnings() as sup:
sup.record()
with suppress_warnings("location"):
for i in range(2):
warnings.warn("Some warning")
warnings.warn("Some warning")
assert_equal(len(sup.log), 2)
with suppress_warnings() as sup:
sup.record()
with suppress_warnings("module"):
for i in range(2):
warnings.warn("Some warning")
warnings.warn("Some warning")
warn_other_module()
assert_equal(len(sup.log), 2)
with suppress_warnings() as sup:
sup.record()
with suppress_warnings("once"):
for i in range(2):
warnings.warn("Some warning")
warnings.warn("Some other warning")
warn_other_module()
assert_equal(len(sup.log), 2)
def test_tempdir():
with tempdir() as tdir:
fpath = os.path.join(tdir, 'tmp')
with open(fpath, 'w'):
pass
assert_(not os.path.isdir(tdir))
raised = False
try:
with tempdir() as tdir:
raise ValueError()
except ValueError:
raised = True
assert_(raised)
assert_(not os.path.isdir(tdir))
def test_temppath():
with temppath() as fpath:
with open(fpath, 'w'):
pass
assert_(not os.path.isfile(fpath))
raised = False
try:
with temppath() as fpath:
raise ValueError()
except ValueError:
raised = True
assert_(raised)
assert_(not os.path.isfile(fpath))
class my_cacw(clear_and_catch_warnings):
class_modules = (sys.modules[__name__],)
def test_clear_and_catch_warnings_inherit():
# Test can subclass and add default modules
my_mod = _get_fresh_mod()
with my_cacw():
warnings.simplefilter('ignore')
warnings.warn('Some warning')
assert_equal(my_mod.__warningregistry__, {})
@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
class TestAssertNoGcCycles:
""" Test assert_no_gc_cycles """
def test_passes(self):
def no_cycle():
b = []
b.append([])
return b
with assert_no_gc_cycles():
no_cycle()
assert_no_gc_cycles(no_cycle)
def test_asserts(self):
def make_cycle():
a = []
a.append(a)
a.append(a)
return a
with assert_raises(AssertionError):
with assert_no_gc_cycles():
make_cycle()
with assert_raises(AssertionError):
assert_no_gc_cycles(make_cycle)
@pytest.mark.slow
def test_fails(self):
"""
Test that in cases where the garbage cannot be collected, we raise an
error, instead of hanging forever trying to clear it.
"""
class ReferenceCycleInDel:
"""
An object that not only contains a reference cycle, but creates new
cycles whenever it's garbage-collected and its __del__ runs
"""
make_cycle = True
def __init__(self):
self.cycle = self
def __del__(self):
# break the current cycle so that `self` can be freed
self.cycle = None
if ReferenceCycleInDel.make_cycle:
# but create a new one so that the garbage collector has more
# work to do.
ReferenceCycleInDel()
try:
w = weakref.ref(ReferenceCycleInDel())
try:
with assert_raises(RuntimeError):
# this will be unable to get a baseline empty garbage
assert_no_gc_cycles(lambda: None)
except AssertionError:
# the above test is only necessary if the GC actually tried to free
# our object anyway, which python 2.7 does not.
if w() is not None:
pytest.skip("GC does not call __del__ on cyclic objects")
raise
finally:
# make sure that we stop creating reference cycles
ReferenceCycleInDel.make_cycle = False
| 55,074 | Python | 33.123296 | 86 | 0.562788 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/testing/tests/test_doctesting.py | """ Doctests for NumPy-specific nose/doctest modifications
"""
#FIXME: None of these tests is run, because 'check' is not a recognized
# testing prefix.
# try the #random directive on the output line
def check_random_directive():
'''
>>> 2+2
<BadExample object at 0x084D05AC> #random: may vary on your system
'''
# check the implicit "import numpy as np"
def check_implicit_np():
'''
>>> np.array([1,2,3])
array([1, 2, 3])
'''
# there's some extraneous whitespace around the correct responses
def check_whitespace_enabled():
'''
# whitespace after the 3
>>> 1+2
3
# whitespace before the 7
>>> 3+4
7
'''
def check_empty_output():
""" Check that no output does not cause an error.
This is related to nose bug 445; the numpy plugin changed the
doctest-result-variable default and therefore hit this bug:
http://code.google.com/p/python-nose/issues/detail?id=445
>>> a = 10
"""
def check_skip():
""" Check skip directive
The test below should not run
>>> 1/0 #doctest: +SKIP
"""
if __name__ == '__main__':
# Run tests outside numpy test rig
import nose
from numpy.testing.noseclasses import NumpyDoctest
argv = ['', __file__, '--with-numpydoctest']
nose.core.TestProgram(argv=argv, addplugins=[NumpyDoctest()])
| 1,347 | Python | 22.241379 | 71 | 0.634744 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/_pyinstaller/test_pyinstaller.py | import subprocess
from pathlib import Path
import pytest
# PyInstaller has been very unproactive about replacing 'imp' with 'importlib'.
@pytest.mark.filterwarnings('ignore::DeprecationWarning')
# It also leaks io.BytesIO()s.
@pytest.mark.filterwarnings('ignore::ResourceWarning')
@pytest.mark.parametrize("mode", ["--onedir", "--onefile"])
@pytest.mark.slow
def test_pyinstaller(mode, tmp_path):
"""Compile and run pyinstaller-smoke.py using PyInstaller."""
pyinstaller_cli = pytest.importorskip("PyInstaller.__main__").run
source = Path(__file__).with_name("pyinstaller-smoke.py").resolve()
args = [
# Place all generated files in ``tmp_path``.
'--workpath', str(tmp_path / "build"),
'--distpath', str(tmp_path / "dist"),
'--specpath', str(tmp_path),
mode,
str(source),
]
pyinstaller_cli(args)
if mode == "--onefile":
exe = tmp_path / "dist" / source.stem
else:
exe = tmp_path / "dist" / source.stem / source.stem
p = subprocess.run([str(exe)], check=True, stdout=subprocess.PIPE)
assert p.stdout.strip() == b"I made it!"
| 1,135 | Python | 30.555555 | 79 | 0.643172 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/_pyinstaller/pyinstaller-smoke.py | """A crude *bit of everything* smoke test to verify PyInstaller compatibility.
PyInstaller typically goes wrong by forgetting to package modules, extension
modules or shared libraries. This script should aim to touch as many of those
as possible in an attempt to trip a ModuleNotFoundError or a DLL load failure
due to an uncollected resource. Missing resources are unlikely to lead to
arithmitic errors so there's generally no need to verify any calculation's
output - merely that it made it to the end OK. This script should not
explicitly import any of numpy's submodules as that gives PyInstaller undue
hints that those submodules exist and should be collected (accessing implicitly
loaded submodules is OK).
"""
import numpy as np
a = np.arange(1., 10.).reshape((3, 3)) % 5
np.linalg.det(a)
a @ a
a @ a.T
np.linalg.inv(a)
np.sin(np.exp(a))
np.linalg.svd(a)
np.linalg.eigh(a)
np.unique(np.random.randint(0, 10, 100))
np.sort(np.random.uniform(0, 10, 100))
np.fft.fft(np.exp(2j * np.pi * np.arange(8) / 8))
np.ma.masked_array(np.arange(10), np.random.rand(10) < .5).sum()
np.polynomial.Legendre([7, 8, 9]).roots()
print("I made it!")
| 1,143 | Python | 33.666666 | 79 | 0.746282 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/_pyinstaller/hook-numpy.py | """This hook should collect all binary files and any hidden modules that numpy
needs.
Our (some-what inadequate) docs for writing PyInstaller hooks are kept here:
https://pyinstaller.readthedocs.io/en/stable/hooks.html
"""
from PyInstaller.compat import is_conda, is_pure_conda
from PyInstaller.utils.hooks import collect_dynamic_libs, is_module_satisfies
# Collect all DLLs inside numpy's installation folder, dump them into built
# app's root.
binaries = collect_dynamic_libs("numpy", ".")
# If using Conda without any non-conda virtual environment manager:
if is_pure_conda:
# Assume running the NumPy from Conda-forge and collect it's DLLs from the
# communal Conda bin directory. DLLs from NumPy's dependencies must also be
# collected to capture MKL, OpenBlas, OpenMP, etc.
from PyInstaller.utils.hooks import conda_support
datas = conda_support.collect_dynamic_libs("numpy", dependencies=True)
# Submodules PyInstaller cannot detect (probably because they are only imported
# by extension modules, which PyInstaller cannot read).
hiddenimports = ['numpy.core._dtype_ctypes']
if is_conda:
hiddenimports.append("six")
# Remove testing and building code and packages that are referenced throughout
# NumPy but are not really dependencies.
excludedimports = [
"scipy",
"pytest",
"nose",
"f2py",
"setuptools",
"numpy.f2py",
"distutils",
"numpy.distutils",
]
| 1,422 | Python | 33.707316 | 79 | 0.746132 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/f2py/__version__.py | from numpy.version import version
| 34 | Python | 16.499992 | 33 | 0.852941 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/f2py/func2subr.py | #!/usr/bin/env python3
"""
Rules for building C/API module with f2py2e.
Copyright 1999,2000 Pearu Peterson all rights reserved,
Pearu Peterson <[email protected]>
Permission to use, modify, and distribute this software is given under the
terms of the NumPy License.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Date: 2004/11/26 11:13:06 $
Pearu Peterson
"""
__version__ = "$Revision: 1.16 $"[10:-1]
f2py_version = 'See `f2py -v`'
import copy
from .auxfuncs import (
getfortranname, isexternal, isfunction, isfunction_wrap, isintent_in,
isintent_out, islogicalfunction, ismoduleroutine, isscalar,
issubroutine, issubroutine_wrap, outmess, show
)
def var2fixfortran(vars, a, fa=None, f90mode=None):
if fa is None:
fa = a
if a not in vars:
show(vars)
outmess('var2fixfortran: No definition for argument "%s".\n' % a)
return ''
if 'typespec' not in vars[a]:
show(vars[a])
outmess('var2fixfortran: No typespec for argument "%s".\n' % a)
return ''
vardef = vars[a]['typespec']
if vardef == 'type' and 'typename' in vars[a]:
vardef = '%s(%s)' % (vardef, vars[a]['typename'])
selector = {}
lk = ''
if 'kindselector' in vars[a]:
selector = vars[a]['kindselector']
lk = 'kind'
elif 'charselector' in vars[a]:
selector = vars[a]['charselector']
lk = 'len'
if '*' in selector:
if f90mode:
if selector['*'] in ['*', ':', '(*)']:
vardef = '%s(len=*)' % (vardef)
else:
vardef = '%s(%s=%s)' % (vardef, lk, selector['*'])
else:
if selector['*'] in ['*', ':']:
vardef = '%s*(%s)' % (vardef, selector['*'])
else:
vardef = '%s*%s' % (vardef, selector['*'])
else:
if 'len' in selector:
vardef = '%s(len=%s' % (vardef, selector['len'])
if 'kind' in selector:
vardef = '%s,kind=%s)' % (vardef, selector['kind'])
else:
vardef = '%s)' % (vardef)
elif 'kind' in selector:
vardef = '%s(kind=%s)' % (vardef, selector['kind'])
vardef = '%s %s' % (vardef, fa)
if 'dimension' in vars[a]:
vardef = '%s(%s)' % (vardef, ','.join(vars[a]['dimension']))
return vardef
def createfuncwrapper(rout, signature=0):
assert isfunction(rout)
extra_args = []
vars = rout['vars']
for a in rout['args']:
v = rout['vars'][a]
for i, d in enumerate(v.get('dimension', [])):
if d == ':':
dn = 'f2py_%s_d%s' % (a, i)
dv = dict(typespec='integer', intent=['hide'])
dv['='] = 'shape(%s, %s)' % (a, i)
extra_args.append(dn)
vars[dn] = dv
v['dimension'][i] = dn
rout['args'].extend(extra_args)
need_interface = bool(extra_args)
ret = ['']
def add(line, ret=ret):
ret[0] = '%s\n %s' % (ret[0], line)
name = rout['name']
fortranname = getfortranname(rout)
f90mode = ismoduleroutine(rout)
newname = '%sf2pywrap' % (name)
if newname not in vars:
vars[newname] = vars[name]
args = [newname] + rout['args'][1:]
else:
args = [newname] + rout['args']
l = var2fixfortran(vars, name, newname, f90mode)
if l[:13] == 'character*(*)':
if f90mode:
l = 'character(len=10)' + l[13:]
else:
l = 'character*10' + l[13:]
charselect = vars[name]['charselector']
if charselect.get('*', '') == '(*)':
charselect['*'] = '10'
sargs = ', '.join(args)
if f90mode:
add('subroutine f2pywrap_%s_%s (%s)' %
(rout['modulename'], name, sargs))
if not signature:
add('use %s, only : %s' % (rout['modulename'], fortranname))
else:
add('subroutine f2pywrap%s (%s)' % (name, sargs))
if not need_interface:
add('external %s' % (fortranname))
l = l + ', ' + fortranname
if need_interface:
for line in rout['saved_interface'].split('\n'):
if line.lstrip().startswith('use ') and '__user__' not in line:
add(line)
args = args[1:]
dumped_args = []
for a in args:
if isexternal(vars[a]):
add('external %s' % (a))
dumped_args.append(a)
for a in args:
if a in dumped_args:
continue
if isscalar(vars[a]):
add(var2fixfortran(vars, a, f90mode=f90mode))
dumped_args.append(a)
for a in args:
if a in dumped_args:
continue
if isintent_in(vars[a]):
add(var2fixfortran(vars, a, f90mode=f90mode))
dumped_args.append(a)
for a in args:
if a in dumped_args:
continue
add(var2fixfortran(vars, a, f90mode=f90mode))
add(l)
if need_interface:
if f90mode:
# f90 module already defines needed interface
pass
else:
add('interface')
add(rout['saved_interface'].lstrip())
add('end interface')
sargs = ', '.join([a for a in args if a not in extra_args])
if not signature:
if islogicalfunction(rout):
add('%s = .not.(.not.%s(%s))' % (newname, fortranname, sargs))
else:
add('%s = %s(%s)' % (newname, fortranname, sargs))
if f90mode:
add('end subroutine f2pywrap_%s_%s' % (rout['modulename'], name))
else:
add('end')
return ret[0]
def createsubrwrapper(rout, signature=0):
assert issubroutine(rout)
extra_args = []
vars = rout['vars']
for a in rout['args']:
v = rout['vars'][a]
for i, d in enumerate(v.get('dimension', [])):
if d == ':':
dn = 'f2py_%s_d%s' % (a, i)
dv = dict(typespec='integer', intent=['hide'])
dv['='] = 'shape(%s, %s)' % (a, i)
extra_args.append(dn)
vars[dn] = dv
v['dimension'][i] = dn
rout['args'].extend(extra_args)
need_interface = bool(extra_args)
ret = ['']
def add(line, ret=ret):
ret[0] = '%s\n %s' % (ret[0], line)
name = rout['name']
fortranname = getfortranname(rout)
f90mode = ismoduleroutine(rout)
args = rout['args']
sargs = ', '.join(args)
if f90mode:
add('subroutine f2pywrap_%s_%s (%s)' %
(rout['modulename'], name, sargs))
if not signature:
add('use %s, only : %s' % (rout['modulename'], fortranname))
else:
add('subroutine f2pywrap%s (%s)' % (name, sargs))
if not need_interface:
add('external %s' % (fortranname))
if need_interface:
for line in rout['saved_interface'].split('\n'):
if line.lstrip().startswith('use ') and '__user__' not in line:
add(line)
dumped_args = []
for a in args:
if isexternal(vars[a]):
add('external %s' % (a))
dumped_args.append(a)
for a in args:
if a in dumped_args:
continue
if isscalar(vars[a]):
add(var2fixfortran(vars, a, f90mode=f90mode))
dumped_args.append(a)
for a in args:
if a in dumped_args:
continue
add(var2fixfortran(vars, a, f90mode=f90mode))
if need_interface:
if f90mode:
# f90 module already defines needed interface
pass
else:
add('interface')
for line in rout['saved_interface'].split('\n'):
if line.lstrip().startswith('use ') and '__user__' in line:
continue
add(line)
add('end interface')
sargs = ', '.join([a for a in args if a not in extra_args])
if not signature:
add('call %s(%s)' % (fortranname, sargs))
if f90mode:
add('end subroutine f2pywrap_%s_%s' % (rout['modulename'], name))
else:
add('end')
return ret[0]
def assubr(rout):
if isfunction_wrap(rout):
fortranname = getfortranname(rout)
name = rout['name']
outmess('\t\tCreating wrapper for Fortran function "%s"("%s")...\n' % (
name, fortranname))
rout = copy.copy(rout)
fname = name
rname = fname
if 'result' in rout:
rname = rout['result']
rout['vars'][fname] = rout['vars'][rname]
fvar = rout['vars'][fname]
if not isintent_out(fvar):
if 'intent' not in fvar:
fvar['intent'] = []
fvar['intent'].append('out')
flag = 1
for i in fvar['intent']:
if i.startswith('out='):
flag = 0
break
if flag:
fvar['intent'].append('out=%s' % (rname))
rout['args'][:] = [fname] + rout['args']
return rout, createfuncwrapper(rout)
if issubroutine_wrap(rout):
fortranname = getfortranname(rout)
name = rout['name']
outmess('\t\tCreating wrapper for Fortran subroutine "%s"("%s")...\n' % (
name, fortranname))
rout = copy.copy(rout)
return rout, createsubrwrapper(rout)
return rout, ''
| 9,355 | Python | 30.083056 | 81 | 0.509139 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/f2py/cfuncs.py | #!/usr/bin/env python3
"""
C declarations, CPP macros, and C functions for f2py2e.
Only required declarations/macros/functions will be used.
Copyright 1999,2000 Pearu Peterson all rights reserved,
Pearu Peterson <[email protected]>
Permission to use, modify, and distribute this software is given under the
terms of the NumPy License.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Date: 2005/05/06 11:42:34 $
Pearu Peterson
"""
import sys
import copy
from . import __version__
f2py_version = __version__.version
errmess = sys.stderr.write
##################### Definitions ##################
outneeds = {'includes0': [], 'includes': [], 'typedefs': [], 'typedefs_generated': [],
'userincludes': [],
'cppmacros': [], 'cfuncs': [], 'callbacks': [], 'f90modhooks': [],
'commonhooks': []}
needs = {}
includes0 = {'includes0': '/*need_includes0*/'}
includes = {'includes': '/*need_includes*/'}
userincludes = {'userincludes': '/*need_userincludes*/'}
typedefs = {'typedefs': '/*need_typedefs*/'}
typedefs_generated = {'typedefs_generated': '/*need_typedefs_generated*/'}
cppmacros = {'cppmacros': '/*need_cppmacros*/'}
cfuncs = {'cfuncs': '/*need_cfuncs*/'}
callbacks = {'callbacks': '/*need_callbacks*/'}
f90modhooks = {'f90modhooks': '/*need_f90modhooks*/',
'initf90modhooksstatic': '/*initf90modhooksstatic*/',
'initf90modhooksdynamic': '/*initf90modhooksdynamic*/',
}
commonhooks = {'commonhooks': '/*need_commonhooks*/',
'initcommonhooks': '/*need_initcommonhooks*/',
}
############ Includes ###################
includes0['math.h'] = '#include <math.h>'
includes0['string.h'] = '#include <string.h>'
includes0['setjmp.h'] = '#include <setjmp.h>'
includes['arrayobject.h'] = '''#define PY_ARRAY_UNIQUE_SYMBOL PyArray_API
#include "arrayobject.h"'''
includes['arrayobject.h'] = '#include "fortranobject.h"'
includes['stdarg.h'] = '#include <stdarg.h>'
############# Type definitions ###############
typedefs['unsigned_char'] = 'typedef unsigned char unsigned_char;'
typedefs['unsigned_short'] = 'typedef unsigned short unsigned_short;'
typedefs['unsigned_long'] = 'typedef unsigned long unsigned_long;'
typedefs['signed_char'] = 'typedef signed char signed_char;'
typedefs['long_long'] = """\
#if defined(NPY_OS_WIN32)
typedef __int64 long_long;
#else
typedef long long long_long;
typedef unsigned long long unsigned_long_long;
#endif
"""
typedefs['unsigned_long_long'] = """\
#if defined(NPY_OS_WIN32)
typedef __uint64 long_long;
#else
typedef unsigned long long unsigned_long_long;
#endif
"""
typedefs['long_double'] = """\
#ifndef _LONG_DOUBLE
typedef long double long_double;
#endif
"""
typedefs[
'complex_long_double'] = 'typedef struct {long double r,i;} complex_long_double;'
typedefs['complex_float'] = 'typedef struct {float r,i;} complex_float;'
typedefs['complex_double'] = 'typedef struct {double r,i;} complex_double;'
typedefs['string'] = """typedef char * string;"""
############### CPP macros ####################
cppmacros['CFUNCSMESS'] = """\
#ifdef DEBUGCFUNCS
#define CFUNCSMESS(mess) fprintf(stderr,\"debug-capi:\"mess);
#define CFUNCSMESSPY(mess,obj) CFUNCSMESS(mess) \\
PyObject_Print((PyObject *)obj,stderr,Py_PRINT_RAW);\\
fprintf(stderr,\"\\n\");
#else
#define CFUNCSMESS(mess)
#define CFUNCSMESSPY(mess,obj)
#endif
"""
cppmacros['F_FUNC'] = """\
#if defined(PREPEND_FORTRAN)
#if defined(NO_APPEND_FORTRAN)
#if defined(UPPERCASE_FORTRAN)
#define F_FUNC(f,F) _##F
#else
#define F_FUNC(f,F) _##f
#endif
#else
#if defined(UPPERCASE_FORTRAN)
#define F_FUNC(f,F) _##F##_
#else
#define F_FUNC(f,F) _##f##_
#endif
#endif
#else
#if defined(NO_APPEND_FORTRAN)
#if defined(UPPERCASE_FORTRAN)
#define F_FUNC(f,F) F
#else
#define F_FUNC(f,F) f
#endif
#else
#if defined(UPPERCASE_FORTRAN)
#define F_FUNC(f,F) F##_
#else
#define F_FUNC(f,F) f##_
#endif
#endif
#endif
#if defined(UNDERSCORE_G77)
#define F_FUNC_US(f,F) F_FUNC(f##_,F##_)
#else
#define F_FUNC_US(f,F) F_FUNC(f,F)
#endif
"""
cppmacros['F_WRAPPEDFUNC'] = """\
#if defined(PREPEND_FORTRAN)
#if defined(NO_APPEND_FORTRAN)
#if defined(UPPERCASE_FORTRAN)
#define F_WRAPPEDFUNC(f,F) _F2PYWRAP##F
#else
#define F_WRAPPEDFUNC(f,F) _f2pywrap##f
#endif
#else
#if defined(UPPERCASE_FORTRAN)
#define F_WRAPPEDFUNC(f,F) _F2PYWRAP##F##_
#else
#define F_WRAPPEDFUNC(f,F) _f2pywrap##f##_
#endif
#endif
#else
#if defined(NO_APPEND_FORTRAN)
#if defined(UPPERCASE_FORTRAN)
#define F_WRAPPEDFUNC(f,F) F2PYWRAP##F
#else
#define F_WRAPPEDFUNC(f,F) f2pywrap##f
#endif
#else
#if defined(UPPERCASE_FORTRAN)
#define F_WRAPPEDFUNC(f,F) F2PYWRAP##F##_
#else
#define F_WRAPPEDFUNC(f,F) f2pywrap##f##_
#endif
#endif
#endif
#if defined(UNDERSCORE_G77)
#define F_WRAPPEDFUNC_US(f,F) F_WRAPPEDFUNC(f##_,F##_)
#else
#define F_WRAPPEDFUNC_US(f,F) F_WRAPPEDFUNC(f,F)
#endif
"""
cppmacros['F_MODFUNC'] = """\
#if defined(F90MOD2CCONV1) /*E.g. Compaq Fortran */
#if defined(NO_APPEND_FORTRAN)
#define F_MODFUNCNAME(m,f) $ ## m ## $ ## f
#else
#define F_MODFUNCNAME(m,f) $ ## m ## $ ## f ## _
#endif
#endif
#if defined(F90MOD2CCONV2) /*E.g. IBM XL Fortran, not tested though */
#if defined(NO_APPEND_FORTRAN)
#define F_MODFUNCNAME(m,f) __ ## m ## _MOD_ ## f
#else
#define F_MODFUNCNAME(m,f) __ ## m ## _MOD_ ## f ## _
#endif
#endif
#if defined(F90MOD2CCONV3) /*E.g. MIPSPro Compilers */
#if defined(NO_APPEND_FORTRAN)
#define F_MODFUNCNAME(m,f) f ## .in. ## m
#else
#define F_MODFUNCNAME(m,f) f ## .in. ## m ## _
#endif
#endif
/*
#if defined(UPPERCASE_FORTRAN)
#define F_MODFUNC(m,M,f,F) F_MODFUNCNAME(M,F)
#else
#define F_MODFUNC(m,M,f,F) F_MODFUNCNAME(m,f)
#endif
*/
#define F_MODFUNC(m,f) (*(f2pymodstruct##m##.##f))
"""
cppmacros['SWAPUNSAFE'] = """\
#define SWAP(a,b) (size_t)(a) = ((size_t)(a) ^ (size_t)(b));\\
(size_t)(b) = ((size_t)(a) ^ (size_t)(b));\\
(size_t)(a) = ((size_t)(a) ^ (size_t)(b))
"""
cppmacros['SWAP'] = """\
#define SWAP(a,b,t) {\\
t *c;\\
c = a;\\
a = b;\\
b = c;}
"""
# cppmacros['ISCONTIGUOUS']='#define ISCONTIGUOUS(m) (PyArray_FLAGS(m) &
# NPY_ARRAY_C_CONTIGUOUS)'
cppmacros['PRINTPYOBJERR'] = """\
#define PRINTPYOBJERR(obj)\\
fprintf(stderr,\"#modulename#.error is related to \");\\
PyObject_Print((PyObject *)obj,stderr,Py_PRINT_RAW);\\
fprintf(stderr,\"\\n\");
"""
cppmacros['MINMAX'] = """\
#ifndef max
#define max(a,b) ((a > b) ? (a) : (b))
#endif
#ifndef min
#define min(a,b) ((a < b) ? (a) : (b))
#endif
#ifndef MAX
#define MAX(a,b) ((a > b) ? (a) : (b))
#endif
#ifndef MIN
#define MIN(a,b) ((a < b) ? (a) : (b))
#endif
"""
needs['len..'] = ['f2py_size']
cppmacros['len..'] = """\
#define rank(var) var ## _Rank
#define shape(var,dim) var ## _Dims[dim]
#define old_rank(var) (PyArray_NDIM((PyArrayObject *)(capi_ ## var ## _tmp)))
#define old_shape(var,dim) PyArray_DIM(((PyArrayObject *)(capi_ ## var ## _tmp)),dim)
#define fshape(var,dim) shape(var,rank(var)-dim-1)
#define len(var) shape(var,0)
#define flen(var) fshape(var,0)
#define old_size(var) PyArray_SIZE((PyArrayObject *)(capi_ ## var ## _tmp))
/* #define index(i) capi_i ## i */
#define slen(var) capi_ ## var ## _len
#define size(var, ...) f2py_size((PyArrayObject *)(capi_ ## var ## _tmp), ## __VA_ARGS__, -1)
"""
needs['f2py_size'] = ['stdarg.h']
cfuncs['f2py_size'] = """\
static int f2py_size(PyArrayObject* var, ...)
{
npy_int sz = 0;
npy_int dim;
npy_int rank;
va_list argp;
va_start(argp, var);
dim = va_arg(argp, npy_int);
if (dim==-1)
{
sz = PyArray_SIZE(var);
}
else
{
rank = PyArray_NDIM(var);
if (dim>=1 && dim<=rank)
sz = PyArray_DIM(var, dim-1);
else
fprintf(stderr, \"f2py_size: 2nd argument value=%d fails to satisfy 1<=value<=%d. Result will be 0.\\n\", dim, rank);
}
va_end(argp);
return sz;
}
"""
cppmacros[
'pyobj_from_char1'] = '#define pyobj_from_char1(v) (PyLong_FromLong(v))'
cppmacros[
'pyobj_from_short1'] = '#define pyobj_from_short1(v) (PyLong_FromLong(v))'
needs['pyobj_from_int1'] = ['signed_char']
cppmacros['pyobj_from_int1'] = '#define pyobj_from_int1(v) (PyLong_FromLong(v))'
cppmacros[
'pyobj_from_long1'] = '#define pyobj_from_long1(v) (PyLong_FromLong(v))'
needs['pyobj_from_long_long1'] = ['long_long']
cppmacros['pyobj_from_long_long1'] = """\
#ifdef HAVE_LONG_LONG
#define pyobj_from_long_long1(v) (PyLong_FromLongLong(v))
#else
#warning HAVE_LONG_LONG is not available. Redefining pyobj_from_long_long.
#define pyobj_from_long_long1(v) (PyLong_FromLong(v))
#endif
"""
needs['pyobj_from_long_double1'] = ['long_double']
cppmacros[
'pyobj_from_long_double1'] = '#define pyobj_from_long_double1(v) (PyFloat_FromDouble(v))'
cppmacros[
'pyobj_from_double1'] = '#define pyobj_from_double1(v) (PyFloat_FromDouble(v))'
cppmacros[
'pyobj_from_float1'] = '#define pyobj_from_float1(v) (PyFloat_FromDouble(v))'
needs['pyobj_from_complex_long_double1'] = ['complex_long_double']
cppmacros[
'pyobj_from_complex_long_double1'] = '#define pyobj_from_complex_long_double1(v) (PyComplex_FromDoubles(v.r,v.i))'
needs['pyobj_from_complex_double1'] = ['complex_double']
cppmacros[
'pyobj_from_complex_double1'] = '#define pyobj_from_complex_double1(v) (PyComplex_FromDoubles(v.r,v.i))'
needs['pyobj_from_complex_float1'] = ['complex_float']
cppmacros[
'pyobj_from_complex_float1'] = '#define pyobj_from_complex_float1(v) (PyComplex_FromDoubles(v.r,v.i))'
needs['pyobj_from_string1'] = ['string']
cppmacros[
'pyobj_from_string1'] = '#define pyobj_from_string1(v) (PyUnicode_FromString((char *)v))'
needs['pyobj_from_string1size'] = ['string']
cppmacros[
'pyobj_from_string1size'] = '#define pyobj_from_string1size(v,len) (PyUnicode_FromStringAndSize((char *)v, len))'
needs['TRYPYARRAYTEMPLATE'] = ['PRINTPYOBJERR']
cppmacros['TRYPYARRAYTEMPLATE'] = """\
/* New SciPy */
#define TRYPYARRAYTEMPLATECHAR case NPY_STRING: *(char *)(PyArray_DATA(arr))=*v; break;
#define TRYPYARRAYTEMPLATELONG case NPY_LONG: *(long *)(PyArray_DATA(arr))=*v; break;
#define TRYPYARRAYTEMPLATEOBJECT case NPY_OBJECT: PyArray_SETITEM(arr,PyArray_DATA(arr),pyobj_from_ ## ctype ## 1(*v)); break;
#define TRYPYARRAYTEMPLATE(ctype,typecode) \\
PyArrayObject *arr = NULL;\\
if (!obj) return -2;\\
if (!PyArray_Check(obj)) return -1;\\
if (!(arr=(PyArrayObject *)obj)) {fprintf(stderr,\"TRYPYARRAYTEMPLATE:\");PRINTPYOBJERR(obj);return 0;}\\
if (PyArray_DESCR(arr)->type==typecode) {*(ctype *)(PyArray_DATA(arr))=*v; return 1;}\\
switch (PyArray_TYPE(arr)) {\\
case NPY_DOUBLE: *(npy_double *)(PyArray_DATA(arr))=*v; break;\\
case NPY_INT: *(npy_int *)(PyArray_DATA(arr))=*v; break;\\
case NPY_LONG: *(npy_long *)(PyArray_DATA(arr))=*v; break;\\
case NPY_FLOAT: *(npy_float *)(PyArray_DATA(arr))=*v; break;\\
case NPY_CDOUBLE: *(npy_double *)(PyArray_DATA(arr))=*v; break;\\
case NPY_CFLOAT: *(npy_float *)(PyArray_DATA(arr))=*v; break;\\
case NPY_BOOL: *(npy_bool *)(PyArray_DATA(arr))=(*v!=0); break;\\
case NPY_UBYTE: *(npy_ubyte *)(PyArray_DATA(arr))=*v; break;\\
case NPY_BYTE: *(npy_byte *)(PyArray_DATA(arr))=*v; break;\\
case NPY_SHORT: *(npy_short *)(PyArray_DATA(arr))=*v; break;\\
case NPY_USHORT: *(npy_ushort *)(PyArray_DATA(arr))=*v; break;\\
case NPY_UINT: *(npy_uint *)(PyArray_DATA(arr))=*v; break;\\
case NPY_ULONG: *(npy_ulong *)(PyArray_DATA(arr))=*v; break;\\
case NPY_LONGLONG: *(npy_longlong *)(PyArray_DATA(arr))=*v; break;\\
case NPY_ULONGLONG: *(npy_ulonglong *)(PyArray_DATA(arr))=*v; break;\\
case NPY_LONGDOUBLE: *(npy_longdouble *)(PyArray_DATA(arr))=*v; break;\\
case NPY_CLONGDOUBLE: *(npy_longdouble *)(PyArray_DATA(arr))=*v; break;\\
case NPY_OBJECT: PyArray_SETITEM(arr, PyArray_DATA(arr), pyobj_from_ ## ctype ## 1(*v)); break;\\
default: return -2;\\
};\\
return 1
"""
needs['TRYCOMPLEXPYARRAYTEMPLATE'] = ['PRINTPYOBJERR']
cppmacros['TRYCOMPLEXPYARRAYTEMPLATE'] = """\
#define TRYCOMPLEXPYARRAYTEMPLATEOBJECT case NPY_OBJECT: PyArray_SETITEM(arr, PyArray_DATA(arr), pyobj_from_complex_ ## ctype ## 1((*v))); break;
#define TRYCOMPLEXPYARRAYTEMPLATE(ctype,typecode)\\
PyArrayObject *arr = NULL;\\
if (!obj) return -2;\\
if (!PyArray_Check(obj)) return -1;\\
if (!(arr=(PyArrayObject *)obj)) {fprintf(stderr,\"TRYCOMPLEXPYARRAYTEMPLATE:\");PRINTPYOBJERR(obj);return 0;}\\
if (PyArray_DESCR(arr)->type==typecode) {\\
*(ctype *)(PyArray_DATA(arr))=(*v).r;\\
*(ctype *)(PyArray_DATA(arr)+sizeof(ctype))=(*v).i;\\
return 1;\\
}\\
switch (PyArray_TYPE(arr)) {\\
case NPY_CDOUBLE: *(npy_double *)(PyArray_DATA(arr))=(*v).r;\\
*(npy_double *)(PyArray_DATA(arr)+sizeof(npy_double))=(*v).i;\\
break;\\
case NPY_CFLOAT: *(npy_float *)(PyArray_DATA(arr))=(*v).r;\\
*(npy_float *)(PyArray_DATA(arr)+sizeof(npy_float))=(*v).i;\\
break;\\
case NPY_DOUBLE: *(npy_double *)(PyArray_DATA(arr))=(*v).r; break;\\
case NPY_LONG: *(npy_long *)(PyArray_DATA(arr))=(*v).r; break;\\
case NPY_FLOAT: *(npy_float *)(PyArray_DATA(arr))=(*v).r; break;\\
case NPY_INT: *(npy_int *)(PyArray_DATA(arr))=(*v).r; break;\\
case NPY_SHORT: *(npy_short *)(PyArray_DATA(arr))=(*v).r; break;\\
case NPY_UBYTE: *(npy_ubyte *)(PyArray_DATA(arr))=(*v).r; break;\\
case NPY_BYTE: *(npy_byte *)(PyArray_DATA(arr))=(*v).r; break;\\
case NPY_BOOL: *(npy_bool *)(PyArray_DATA(arr))=((*v).r!=0 && (*v).i!=0); break;\\
case NPY_USHORT: *(npy_ushort *)(PyArray_DATA(arr))=(*v).r; break;\\
case NPY_UINT: *(npy_uint *)(PyArray_DATA(arr))=(*v).r; break;\\
case NPY_ULONG: *(npy_ulong *)(PyArray_DATA(arr))=(*v).r; break;\\
case NPY_LONGLONG: *(npy_longlong *)(PyArray_DATA(arr))=(*v).r; break;\\
case NPY_ULONGLONG: *(npy_ulonglong *)(PyArray_DATA(arr))=(*v).r; break;\\
case NPY_LONGDOUBLE: *(npy_longdouble *)(PyArray_DATA(arr))=(*v).r; break;\\
case NPY_CLONGDOUBLE: *(npy_longdouble *)(PyArray_DATA(arr))=(*v).r;\\
*(npy_longdouble *)(PyArray_DATA(arr)+sizeof(npy_longdouble))=(*v).i;\\
break;\\
case NPY_OBJECT: PyArray_SETITEM(arr, PyArray_DATA(arr), pyobj_from_complex_ ## ctype ## 1((*v))); break;\\
default: return -2;\\
};\\
return -1;
"""
# cppmacros['NUMFROMARROBJ']="""\
# define NUMFROMARROBJ(typenum,ctype) \\
# if (PyArray_Check(obj)) arr = (PyArrayObject *)obj;\\
# else arr = (PyArrayObject *)PyArray_ContiguousFromObject(obj,typenum,0,0);\\
# if (arr) {\\
# if (PyArray_TYPE(arr)==NPY_OBJECT) {\\
# if (!ctype ## _from_pyobj(v,(PyArray_DESCR(arr)->getitem)(PyArray_DATA(arr)),\"\"))\\
# goto capi_fail;\\
# } else {\\
# (PyArray_DESCR(arr)->cast[typenum])(PyArray_DATA(arr),1,(char*)v,1,1);\\
# }\\
# if ((PyObject *)arr != obj) { Py_DECREF(arr); }\\
# return 1;\\
# }
# """
# XXX: Note that CNUMFROMARROBJ is identical with NUMFROMARROBJ
# cppmacros['CNUMFROMARROBJ']="""\
# define CNUMFROMARROBJ(typenum,ctype) \\
# if (PyArray_Check(obj)) arr = (PyArrayObject *)obj;\\
# else arr = (PyArrayObject *)PyArray_ContiguousFromObject(obj,typenum,0,0);\\
# if (arr) {\\
# if (PyArray_TYPE(arr)==NPY_OBJECT) {\\
# if (!ctype ## _from_pyobj(v,(PyArray_DESCR(arr)->getitem)(PyArray_DATA(arr)),\"\"))\\
# goto capi_fail;\\
# } else {\\
# (PyArray_DESCR(arr)->cast[typenum])((void *)(PyArray_DATA(arr)),1,(void *)(v),1,1);\\
# }\\
# if ((PyObject *)arr != obj) { Py_DECREF(arr); }\\
# return 1;\\
# }
# """
needs['GETSTRFROMPYTUPLE'] = ['STRINGCOPYN', 'PRINTPYOBJERR']
cppmacros['GETSTRFROMPYTUPLE'] = """\
#define GETSTRFROMPYTUPLE(tuple,index,str,len) {\\
PyObject *rv_cb_str = PyTuple_GetItem((tuple),(index));\\
if (rv_cb_str == NULL)\\
goto capi_fail;\\
if (PyBytes_Check(rv_cb_str)) {\\
str[len-1]='\\0';\\
STRINGCOPYN((str),PyBytes_AS_STRING((PyBytesObject*)rv_cb_str),(len));\\
} else {\\
PRINTPYOBJERR(rv_cb_str);\\
PyErr_SetString(#modulename#_error,\"string object expected\");\\
goto capi_fail;\\
}\\
}
"""
cppmacros['GETSCALARFROMPYTUPLE'] = """\
#define GETSCALARFROMPYTUPLE(tuple,index,var,ctype,mess) {\\
if ((capi_tmp = PyTuple_GetItem((tuple),(index)))==NULL) goto capi_fail;\\
if (!(ctype ## _from_pyobj((var),capi_tmp,mess)))\\
goto capi_fail;\\
}
"""
cppmacros['FAILNULL'] = """\\
#define FAILNULL(p) do { \\
if ((p) == NULL) { \\
PyErr_SetString(PyExc_MemoryError, "NULL pointer found"); \\
goto capi_fail; \\
} \\
} while (0)
"""
needs['MEMCOPY'] = ['string.h', 'FAILNULL']
cppmacros['MEMCOPY'] = """\
#define MEMCOPY(to,from,n)\\
do { FAILNULL(to); FAILNULL(from); (void)memcpy(to,from,n); } while (0)
"""
cppmacros['STRINGMALLOC'] = """\
#define STRINGMALLOC(str,len)\\
if ((str = (string)malloc(len+1)) == NULL) {\\
PyErr_SetString(PyExc_MemoryError, \"out of memory\");\\
goto capi_fail;\\
} else {\\
(str)[len] = '\\0';\\
}
"""
cppmacros['STRINGFREE'] = """\
#define STRINGFREE(str) do {if (!(str == NULL)) free(str);} while (0)
"""
needs['STRINGPADN'] = ['string.h']
cppmacros['STRINGPADN'] = """\
/*
STRINGPADN replaces null values with padding values from the right.
`to` must have size of at least N bytes.
If the `to[N-1]` has null value, then replace it and all the
preceding, nulls with the given padding.
STRINGPADN(to, N, PADDING, NULLVALUE) is an inverse operation.
*/
#define STRINGPADN(to, N, NULLVALUE, PADDING) \\
do { \\
int _m = (N); \\
char *_to = (to); \\
for (_m -= 1; _m >= 0 && _to[_m] == NULLVALUE; _m--) { \\
_to[_m] = PADDING; \\
} \\
} while (0)
"""
needs['STRINGCOPYN'] = ['string.h', 'FAILNULL']
cppmacros['STRINGCOPYN'] = """\
/*
STRINGCOPYN copies N bytes.
`to` and `from` buffers must have sizes of at least N bytes.
*/
#define STRINGCOPYN(to,from,N) \\
do { \\
int _m = (N); \\
char *_to = (to); \\
char *_from = (from); \\
FAILNULL(_to); FAILNULL(_from); \\
(void)strncpy(_to, _from, _m); \\
} while (0)
"""
needs['STRINGCOPY'] = ['string.h', 'FAILNULL']
cppmacros['STRINGCOPY'] = """\
#define STRINGCOPY(to,from)\\
do { FAILNULL(to); FAILNULL(from); (void)strcpy(to,from); } while (0)
"""
cppmacros['CHECKGENERIC'] = """\
#define CHECKGENERIC(check,tcheck,name) \\
if (!(check)) {\\
PyErr_SetString(#modulename#_error,\"(\"tcheck\") failed for \"name);\\
/*goto capi_fail;*/\\
} else """
cppmacros['CHECKARRAY'] = """\
#define CHECKARRAY(check,tcheck,name) \\
if (!(check)) {\\
PyErr_SetString(#modulename#_error,\"(\"tcheck\") failed for \"name);\\
/*goto capi_fail;*/\\
} else """
cppmacros['CHECKSTRING'] = """\
#define CHECKSTRING(check,tcheck,name,show,var)\\
if (!(check)) {\\
char errstring[256];\\
sprintf(errstring, \"%s: \"show, \"(\"tcheck\") failed for \"name, slen(var), var);\\
PyErr_SetString(#modulename#_error, errstring);\\
/*goto capi_fail;*/\\
} else """
cppmacros['CHECKSCALAR'] = """\
#define CHECKSCALAR(check,tcheck,name,show,var)\\
if (!(check)) {\\
char errstring[256];\\
sprintf(errstring, \"%s: \"show, \"(\"tcheck\") failed for \"name, var);\\
PyErr_SetString(#modulename#_error,errstring);\\
/*goto capi_fail;*/\\
} else """
# cppmacros['CHECKDIMS']="""\
# define CHECKDIMS(dims,rank) \\
# for (int i=0;i<(rank);i++)\\
# if (dims[i]<0) {\\
# fprintf(stderr,\"Unspecified array argument requires a complete dimension specification.\\n\");\\
# goto capi_fail;\\
# }
# """
cppmacros[
'ARRSIZE'] = '#define ARRSIZE(dims,rank) (_PyArray_multiply_list(dims,rank))'
cppmacros['OLDPYNUM'] = """\
#ifdef OLDPYNUM
#error You need to install NumPy version 0.13 or higher. See https://scipy.org/install.html
#endif
"""
cppmacros["F2PY_THREAD_LOCAL_DECL"] = """\
#ifndef F2PY_THREAD_LOCAL_DECL
#if defined(_MSC_VER)
#define F2PY_THREAD_LOCAL_DECL __declspec(thread)
#elif defined(NPY_OS_MINGW)
#define F2PY_THREAD_LOCAL_DECL __thread
#elif defined(__STDC_VERSION__) \\
&& (__STDC_VERSION__ >= 201112L) \\
&& !defined(__STDC_NO_THREADS__) \\
&& (!defined(__GLIBC__) || __GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ > 12)) \\
&& !defined(NPY_OS_OPENBSD)
/* __STDC_NO_THREADS__ was first defined in a maintenance release of glibc 2.12,
see https://lists.gnu.org/archive/html/commit-hurd/2012-07/msg00180.html,
so `!defined(__STDC_NO_THREADS__)` may give false positive for the existence
of `threads.h` when using an older release of glibc 2.12
See gh-19437 for details on OpenBSD */
#include <threads.h>
#define F2PY_THREAD_LOCAL_DECL thread_local
#elif defined(__GNUC__) \\
&& (__GNUC__ > 4 || (__GNUC__ == 4 && (__GNUC_MINOR__ >= 4)))
#define F2PY_THREAD_LOCAL_DECL __thread
#endif
#endif
"""
################# C functions ###############
cfuncs['calcarrindex'] = """\
static int calcarrindex(int *i,PyArrayObject *arr) {
int k,ii = i[0];
for (k=1; k < PyArray_NDIM(arr); k++)
ii += (ii*(PyArray_DIM(arr,k) - 1)+i[k]); /* assuming contiguous arr */
return ii;
}"""
cfuncs['calcarrindextr'] = """\
static int calcarrindextr(int *i,PyArrayObject *arr) {
int k,ii = i[PyArray_NDIM(arr)-1];
for (k=1; k < PyArray_NDIM(arr); k++)
ii += (ii*(PyArray_DIM(arr,PyArray_NDIM(arr)-k-1) - 1)+i[PyArray_NDIM(arr)-k-1]); /* assuming contiguous arr */
return ii;
}"""
cfuncs['forcomb'] = """\
static struct { int nd;npy_intp *d;int *i,*i_tr,tr; } forcombcache;
static int initforcomb(npy_intp *dims,int nd,int tr) {
int k;
if (dims==NULL) return 0;
if (nd<0) return 0;
forcombcache.nd = nd;
forcombcache.d = dims;
forcombcache.tr = tr;
if ((forcombcache.i = (int *)malloc(sizeof(int)*nd))==NULL) return 0;
if ((forcombcache.i_tr = (int *)malloc(sizeof(int)*nd))==NULL) return 0;
for (k=1;k<nd;k++) {
forcombcache.i[k] = forcombcache.i_tr[nd-k-1] = 0;
}
forcombcache.i[0] = forcombcache.i_tr[nd-1] = -1;
return 1;
}
static int *nextforcomb(void) {
int j,*i,*i_tr,k;
int nd=forcombcache.nd;
if ((i=forcombcache.i) == NULL) return NULL;
if ((i_tr=forcombcache.i_tr) == NULL) return NULL;
if (forcombcache.d == NULL) return NULL;
i[0]++;
if (i[0]==forcombcache.d[0]) {
j=1;
while ((j<nd) && (i[j]==forcombcache.d[j]-1)) j++;
if (j==nd) {
free(i);
free(i_tr);
return NULL;
}
for (k=0;k<j;k++) i[k] = i_tr[nd-k-1] = 0;
i[j]++;
i_tr[nd-j-1]++;
} else
i_tr[nd-1]++;
if (forcombcache.tr) return i_tr;
return i;
}"""
needs['try_pyarr_from_string'] = ['STRINGCOPYN', 'PRINTPYOBJERR', 'string']
cfuncs['try_pyarr_from_string'] = """\
/*
try_pyarr_from_string copies str[:len(obj)] to the data of an `ndarray`.
If obj is an `ndarray`, it is assumed to be contiguous.
If the specified len==-1, str must be null-terminated.
*/
static int try_pyarr_from_string(PyObject *obj,
const string str, const int len) {
#ifdef DEBUGCFUNCS
fprintf(stderr, "try_pyarr_from_string(str='%s', len=%d, obj=%p)\\n",
(char*)str,len, obj);
#endif
if (PyArray_Check(obj)) {
PyArrayObject *arr = (PyArrayObject *)obj;
assert(ISCONTIGUOUS(arr));
string buf = PyArray_DATA(arr);
npy_intp n = len;
if (n == -1) {
/* Assuming null-terminated str. */
n = strlen(str);
}
if (n > PyArray_NBYTES(arr)) {
n = PyArray_NBYTES(arr);
}
STRINGCOPYN(buf, str, n);
return 1;
}
capi_fail:
PRINTPYOBJERR(obj);
PyErr_SetString(#modulename#_error, \"try_pyarr_from_string failed\");
return 0;
}
"""
needs['string_from_pyobj'] = ['string', 'STRINGMALLOC', 'STRINGCOPYN']
cfuncs['string_from_pyobj'] = """\
/*
Create a new string buffer `str` of at most length `len` from a
Python string-like object `obj`.
The string buffer has given size (len) or the size of inistr when len==-1.
The string buffer is padded with blanks: in Fortran, trailing blanks
are insignificant contrary to C nulls.
*/
static int
string_from_pyobj(string *str, int *len, const string inistr, PyObject *obj,
const char *errmess)
{
PyObject *tmp = NULL;
string buf = NULL;
npy_intp n = -1;
#ifdef DEBUGCFUNCS
fprintf(stderr,\"string_from_pyobj(str='%s',len=%d,inistr='%s',obj=%p)\\n\",
(char*)str, *len, (char *)inistr, obj);
#endif
if (obj == Py_None) {
n = strlen(inistr);
buf = inistr;
}
else if (PyArray_Check(obj)) {
PyArrayObject *arr = (PyArrayObject *)obj;
if (!ISCONTIGUOUS(arr)) {
PyErr_SetString(PyExc_ValueError,
\"array object is non-contiguous.\");
goto capi_fail;
}
n = PyArray_NBYTES(arr);
buf = PyArray_DATA(arr);
n = strnlen(buf, n);
}
else {
if (PyBytes_Check(obj)) {
tmp = obj;
Py_INCREF(tmp);
}
else if (PyUnicode_Check(obj)) {
tmp = PyUnicode_AsASCIIString(obj);
}
else {
PyObject *tmp2;
tmp2 = PyObject_Str(obj);
if (tmp2) {
tmp = PyUnicode_AsASCIIString(tmp2);
Py_DECREF(tmp2);
}
else {
tmp = NULL;
}
}
if (tmp == NULL) goto capi_fail;
n = PyBytes_GET_SIZE(tmp);
buf = PyBytes_AS_STRING(tmp);
}
if (*len == -1) {
/* TODO: change the type of `len` so that we can remove this */
if (n > NPY_MAX_INT) {
PyErr_SetString(PyExc_OverflowError,
"object too large for a 32-bit int");
goto capi_fail;
}
*len = n;
}
else if (*len < n) {
/* discard the last (len-n) bytes of input buf */
n = *len;
}
if (n < 0 || *len < 0 || buf == NULL) {
goto capi_fail;
}
STRINGMALLOC(*str, *len); // *str is allocated with size (*len + 1)
if (n < *len) {
/*
Pad fixed-width string with nulls. The caller will replace
nulls with blanks when the corresponding argument is not
intent(c).
*/
memset(*str + n, '\\0', *len - n);
}
STRINGCOPYN(*str, buf, n);
Py_XDECREF(tmp);
return 1;
capi_fail:
Py_XDECREF(tmp);
{
PyObject* err = PyErr_Occurred();
if (err == NULL) {
err = #modulename#_error;
}
PyErr_SetString(err, errmess);
}
return 0;
}
"""
needs['char_from_pyobj'] = ['int_from_pyobj']
cfuncs['char_from_pyobj'] = """\
static int
char_from_pyobj(char* v, PyObject *obj, const char *errmess) {
int i = 0;
if (int_from_pyobj(&i, obj, errmess)) {
*v = (char)i;
return 1;
}
return 0;
}
"""
needs['signed_char_from_pyobj'] = ['int_from_pyobj', 'signed_char']
cfuncs['signed_char_from_pyobj'] = """\
static int
signed_char_from_pyobj(signed_char* v, PyObject *obj, const char *errmess) {
int i = 0;
if (int_from_pyobj(&i, obj, errmess)) {
*v = (signed_char)i;
return 1;
}
return 0;
}
"""
needs['short_from_pyobj'] = ['int_from_pyobj']
cfuncs['short_from_pyobj'] = """\
static int
short_from_pyobj(short* v, PyObject *obj, const char *errmess) {
int i = 0;
if (int_from_pyobj(&i, obj, errmess)) {
*v = (short)i;
return 1;
}
return 0;
}
"""
cfuncs['int_from_pyobj'] = """\
static int
int_from_pyobj(int* v, PyObject *obj, const char *errmess)
{
PyObject* tmp = NULL;
if (PyLong_Check(obj)) {
*v = Npy__PyLong_AsInt(obj);
return !(*v == -1 && PyErr_Occurred());
}
tmp = PyNumber_Long(obj);
if (tmp) {
*v = Npy__PyLong_AsInt(tmp);
Py_DECREF(tmp);
return !(*v == -1 && PyErr_Occurred());
}
if (PyComplex_Check(obj)) {
PyErr_Clear();
tmp = PyObject_GetAttrString(obj,\"real\");
}
else if (PyBytes_Check(obj) || PyUnicode_Check(obj)) {
/*pass*/;
}
else if (PySequence_Check(obj)) {
PyErr_Clear();
tmp = PySequence_GetItem(obj, 0);
}
if (tmp) {
if (int_from_pyobj(v, tmp, errmess)) {
Py_DECREF(tmp);
return 1;
}
Py_DECREF(tmp);
}
{
PyObject* err = PyErr_Occurred();
if (err == NULL) {
err = #modulename#_error;
}
PyErr_SetString(err, errmess);
}
return 0;
}
"""
cfuncs['long_from_pyobj'] = """\
static int
long_from_pyobj(long* v, PyObject *obj, const char *errmess) {
PyObject* tmp = NULL;
if (PyLong_Check(obj)) {
*v = PyLong_AsLong(obj);
return !(*v == -1 && PyErr_Occurred());
}
tmp = PyNumber_Long(obj);
if (tmp) {
*v = PyLong_AsLong(tmp);
Py_DECREF(tmp);
return !(*v == -1 && PyErr_Occurred());
}
if (PyComplex_Check(obj)) {
PyErr_Clear();
tmp = PyObject_GetAttrString(obj,\"real\");
}
else if (PyBytes_Check(obj) || PyUnicode_Check(obj)) {
/*pass*/;
}
else if (PySequence_Check(obj)) {
PyErr_Clear();
tmp = PySequence_GetItem(obj, 0);
}
if (tmp) {
if (long_from_pyobj(v, tmp, errmess)) {
Py_DECREF(tmp);
return 1;
}
Py_DECREF(tmp);
}
{
PyObject* err = PyErr_Occurred();
if (err == NULL) {
err = #modulename#_error;
}
PyErr_SetString(err, errmess);
}
return 0;
}
"""
needs['long_long_from_pyobj'] = ['long_long']
cfuncs['long_long_from_pyobj'] = """\
static int
long_long_from_pyobj(long_long* v, PyObject *obj, const char *errmess)
{
PyObject* tmp = NULL;
if (PyLong_Check(obj)) {
*v = PyLong_AsLongLong(obj);
return !(*v == -1 && PyErr_Occurred());
}
tmp = PyNumber_Long(obj);
if (tmp) {
*v = PyLong_AsLongLong(tmp);
Py_DECREF(tmp);
return !(*v == -1 && PyErr_Occurred());
}
if (PyComplex_Check(obj)) {
PyErr_Clear();
tmp = PyObject_GetAttrString(obj,\"real\");
}
else if (PyBytes_Check(obj) || PyUnicode_Check(obj)) {
/*pass*/;
}
else if (PySequence_Check(obj)) {
PyErr_Clear();
tmp = PySequence_GetItem(obj, 0);
}
if (tmp) {
if (long_long_from_pyobj(v, tmp, errmess)) {
Py_DECREF(tmp);
return 1;
}
Py_DECREF(tmp);
}
{
PyObject* err = PyErr_Occurred();
if (err == NULL) {
err = #modulename#_error;
}
PyErr_SetString(err,errmess);
}
return 0;
}
"""
needs['long_double_from_pyobj'] = ['double_from_pyobj', 'long_double']
cfuncs['long_double_from_pyobj'] = """\
static int
long_double_from_pyobj(long_double* v, PyObject *obj, const char *errmess)
{
double d=0;
if (PyArray_CheckScalar(obj)){
if PyArray_IsScalar(obj, LongDouble) {
PyArray_ScalarAsCtype(obj, v);
return 1;
}
else if (PyArray_Check(obj) && PyArray_TYPE(obj) == NPY_LONGDOUBLE) {
(*v) = *((npy_longdouble *)PyArray_DATA(obj));
return 1;
}
}
if (double_from_pyobj(&d, obj, errmess)) {
*v = (long_double)d;
return 1;
}
return 0;
}
"""
cfuncs['double_from_pyobj'] = """\
static int
double_from_pyobj(double* v, PyObject *obj, const char *errmess)
{
PyObject* tmp = NULL;
if (PyFloat_Check(obj)) {
*v = PyFloat_AsDouble(obj);
return !(*v == -1.0 && PyErr_Occurred());
}
tmp = PyNumber_Float(obj);
if (tmp) {
*v = PyFloat_AsDouble(tmp);
Py_DECREF(tmp);
return !(*v == -1.0 && PyErr_Occurred());
}
if (PyComplex_Check(obj)) {
PyErr_Clear();
tmp = PyObject_GetAttrString(obj,\"real\");
}
else if (PyBytes_Check(obj) || PyUnicode_Check(obj)) {
/*pass*/;
}
else if (PySequence_Check(obj)) {
PyErr_Clear();
tmp = PySequence_GetItem(obj, 0);
}
if (tmp) {
if (double_from_pyobj(v,tmp,errmess)) {Py_DECREF(tmp); return 1;}
Py_DECREF(tmp);
}
{
PyObject* err = PyErr_Occurred();
if (err==NULL) err = #modulename#_error;
PyErr_SetString(err,errmess);
}
return 0;
}
"""
needs['float_from_pyobj'] = ['double_from_pyobj']
cfuncs['float_from_pyobj'] = """\
static int
float_from_pyobj(float* v, PyObject *obj, const char *errmess)
{
double d=0.0;
if (double_from_pyobj(&d,obj,errmess)) {
*v = (float)d;
return 1;
}
return 0;
}
"""
needs['complex_long_double_from_pyobj'] = ['complex_long_double', 'long_double',
'complex_double_from_pyobj']
cfuncs['complex_long_double_from_pyobj'] = """\
static int
complex_long_double_from_pyobj(complex_long_double* v, PyObject *obj, const char *errmess)
{
complex_double cd = {0.0,0.0};
if (PyArray_CheckScalar(obj)){
if PyArray_IsScalar(obj, CLongDouble) {
PyArray_ScalarAsCtype(obj, v);
return 1;
}
else if (PyArray_Check(obj) && PyArray_TYPE(obj)==NPY_CLONGDOUBLE) {
(*v).r = ((npy_clongdouble *)PyArray_DATA(obj))->real;
(*v).i = ((npy_clongdouble *)PyArray_DATA(obj))->imag;
return 1;
}
}
if (complex_double_from_pyobj(&cd,obj,errmess)) {
(*v).r = (long_double)cd.r;
(*v).i = (long_double)cd.i;
return 1;
}
return 0;
}
"""
needs['complex_double_from_pyobj'] = ['complex_double']
cfuncs['complex_double_from_pyobj'] = """\
static int
complex_double_from_pyobj(complex_double* v, PyObject *obj, const char *errmess) {
Py_complex c;
if (PyComplex_Check(obj)) {
c = PyComplex_AsCComplex(obj);
(*v).r = c.real;
(*v).i = c.imag;
return 1;
}
if (PyArray_IsScalar(obj, ComplexFloating)) {
if (PyArray_IsScalar(obj, CFloat)) {
npy_cfloat new;
PyArray_ScalarAsCtype(obj, &new);
(*v).r = (double)new.real;
(*v).i = (double)new.imag;
}
else if (PyArray_IsScalar(obj, CLongDouble)) {
npy_clongdouble new;
PyArray_ScalarAsCtype(obj, &new);
(*v).r = (double)new.real;
(*v).i = (double)new.imag;
}
else { /* if (PyArray_IsScalar(obj, CDouble)) */
PyArray_ScalarAsCtype(obj, v);
}
return 1;
}
if (PyArray_CheckScalar(obj)) { /* 0-dim array or still array scalar */
PyArrayObject *arr;
if (PyArray_Check(obj)) {
arr = (PyArrayObject *)PyArray_Cast((PyArrayObject *)obj, NPY_CDOUBLE);
}
else {
arr = (PyArrayObject *)PyArray_FromScalar(obj, PyArray_DescrFromType(NPY_CDOUBLE));
}
if (arr == NULL) {
return 0;
}
(*v).r = ((npy_cdouble *)PyArray_DATA(arr))->real;
(*v).i = ((npy_cdouble *)PyArray_DATA(arr))->imag;
Py_DECREF(arr);
return 1;
}
/* Python does not provide PyNumber_Complex function :-( */
(*v).i = 0.0;
if (PyFloat_Check(obj)) {
(*v).r = PyFloat_AsDouble(obj);
return !((*v).r == -1.0 && PyErr_Occurred());
}
if (PyLong_Check(obj)) {
(*v).r = PyLong_AsDouble(obj);
return !((*v).r == -1.0 && PyErr_Occurred());
}
if (PySequence_Check(obj) && !(PyBytes_Check(obj) || PyUnicode_Check(obj))) {
PyObject *tmp = PySequence_GetItem(obj,0);
if (tmp) {
if (complex_double_from_pyobj(v,tmp,errmess)) {
Py_DECREF(tmp);
return 1;
}
Py_DECREF(tmp);
}
}
{
PyObject* err = PyErr_Occurred();
if (err==NULL)
err = PyExc_TypeError;
PyErr_SetString(err,errmess);
}
return 0;
}
"""
needs['complex_float_from_pyobj'] = [
'complex_float', 'complex_double_from_pyobj']
cfuncs['complex_float_from_pyobj'] = """\
static int
complex_float_from_pyobj(complex_float* v,PyObject *obj,const char *errmess)
{
complex_double cd={0.0,0.0};
if (complex_double_from_pyobj(&cd,obj,errmess)) {
(*v).r = (float)cd.r;
(*v).i = (float)cd.i;
return 1;
}
return 0;
}
"""
needs['try_pyarr_from_char'] = ['pyobj_from_char1', 'TRYPYARRAYTEMPLATE']
cfuncs[
'try_pyarr_from_char'] = 'static int try_pyarr_from_char(PyObject* obj,char* v) {\n TRYPYARRAYTEMPLATE(char,\'c\');\n}\n'
needs['try_pyarr_from_signed_char'] = ['TRYPYARRAYTEMPLATE', 'unsigned_char']
cfuncs[
'try_pyarr_from_unsigned_char'] = 'static int try_pyarr_from_unsigned_char(PyObject* obj,unsigned_char* v) {\n TRYPYARRAYTEMPLATE(unsigned_char,\'b\');\n}\n'
needs['try_pyarr_from_signed_char'] = ['TRYPYARRAYTEMPLATE', 'signed_char']
cfuncs[
'try_pyarr_from_signed_char'] = 'static int try_pyarr_from_signed_char(PyObject* obj,signed_char* v) {\n TRYPYARRAYTEMPLATE(signed_char,\'1\');\n}\n'
needs['try_pyarr_from_short'] = ['pyobj_from_short1', 'TRYPYARRAYTEMPLATE']
cfuncs[
'try_pyarr_from_short'] = 'static int try_pyarr_from_short(PyObject* obj,short* v) {\n TRYPYARRAYTEMPLATE(short,\'s\');\n}\n'
needs['try_pyarr_from_int'] = ['pyobj_from_int1', 'TRYPYARRAYTEMPLATE']
cfuncs[
'try_pyarr_from_int'] = 'static int try_pyarr_from_int(PyObject* obj,int* v) {\n TRYPYARRAYTEMPLATE(int,\'i\');\n}\n'
needs['try_pyarr_from_long'] = ['pyobj_from_long1', 'TRYPYARRAYTEMPLATE']
cfuncs[
'try_pyarr_from_long'] = 'static int try_pyarr_from_long(PyObject* obj,long* v) {\n TRYPYARRAYTEMPLATE(long,\'l\');\n}\n'
needs['try_pyarr_from_long_long'] = [
'pyobj_from_long_long1', 'TRYPYARRAYTEMPLATE', 'long_long']
cfuncs[
'try_pyarr_from_long_long'] = 'static int try_pyarr_from_long_long(PyObject* obj,long_long* v) {\n TRYPYARRAYTEMPLATE(long_long,\'L\');\n}\n'
needs['try_pyarr_from_float'] = ['pyobj_from_float1', 'TRYPYARRAYTEMPLATE']
cfuncs[
'try_pyarr_from_float'] = 'static int try_pyarr_from_float(PyObject* obj,float* v) {\n TRYPYARRAYTEMPLATE(float,\'f\');\n}\n'
needs['try_pyarr_from_double'] = ['pyobj_from_double1', 'TRYPYARRAYTEMPLATE']
cfuncs[
'try_pyarr_from_double'] = 'static int try_pyarr_from_double(PyObject* obj,double* v) {\n TRYPYARRAYTEMPLATE(double,\'d\');\n}\n'
needs['try_pyarr_from_complex_float'] = [
'pyobj_from_complex_float1', 'TRYCOMPLEXPYARRAYTEMPLATE', 'complex_float']
cfuncs[
'try_pyarr_from_complex_float'] = 'static int try_pyarr_from_complex_float(PyObject* obj,complex_float* v) {\n TRYCOMPLEXPYARRAYTEMPLATE(float,\'F\');\n}\n'
needs['try_pyarr_from_complex_double'] = [
'pyobj_from_complex_double1', 'TRYCOMPLEXPYARRAYTEMPLATE', 'complex_double']
cfuncs[
'try_pyarr_from_complex_double'] = 'static int try_pyarr_from_complex_double(PyObject* obj,complex_double* v) {\n TRYCOMPLEXPYARRAYTEMPLATE(double,\'D\');\n}\n'
needs['create_cb_arglist'] = ['CFUNCSMESS', 'PRINTPYOBJERR', 'MINMAX']
# create the list of arguments to be used when calling back to python
cfuncs['create_cb_arglist'] = """\
static int
create_cb_arglist(PyObject* fun, PyTupleObject* xa , const int maxnofargs,
const int nofoptargs, int *nofargs, PyTupleObject **args,
const char *errmess)
{
PyObject *tmp = NULL;
PyObject *tmp_fun = NULL;
Py_ssize_t tot, opt, ext, siz, i, di = 0;
CFUNCSMESS(\"create_cb_arglist\\n\");
tot=opt=ext=siz=0;
/* Get the total number of arguments */
if (PyFunction_Check(fun)) {
tmp_fun = fun;
Py_INCREF(tmp_fun);
}
else {
di = 1;
if (PyObject_HasAttrString(fun,\"im_func\")) {
tmp_fun = PyObject_GetAttrString(fun,\"im_func\");
}
else if (PyObject_HasAttrString(fun,\"__call__\")) {
tmp = PyObject_GetAttrString(fun,\"__call__\");
if (PyObject_HasAttrString(tmp,\"im_func\"))
tmp_fun = PyObject_GetAttrString(tmp,\"im_func\");
else {
tmp_fun = fun; /* built-in function */
Py_INCREF(tmp_fun);
tot = maxnofargs;
if (PyCFunction_Check(fun)) {
/* In case the function has a co_argcount (like on PyPy) */
di = 0;
}
if (xa != NULL)
tot += PyTuple_Size((PyObject *)xa);
}
Py_XDECREF(tmp);
}
else if (PyFortran_Check(fun) || PyFortran_Check1(fun)) {
tot = maxnofargs;
if (xa != NULL)
tot += PyTuple_Size((PyObject *)xa);
tmp_fun = fun;
Py_INCREF(tmp_fun);
}
else if (F2PyCapsule_Check(fun)) {
tot = maxnofargs;
if (xa != NULL)
ext = PyTuple_Size((PyObject *)xa);
if(ext>0) {
fprintf(stderr,\"extra arguments tuple cannot be used with CObject call-back\\n\");
goto capi_fail;
}
tmp_fun = fun;
Py_INCREF(tmp_fun);
}
}
if (tmp_fun == NULL) {
fprintf(stderr,
\"Call-back argument must be function|instance|instance.__call__|f2py-function \"
\"but got %s.\\n\",
((fun == NULL) ? \"NULL\" : Py_TYPE(fun)->tp_name));
goto capi_fail;
}
if (PyObject_HasAttrString(tmp_fun,\"__code__\")) {
if (PyObject_HasAttrString(tmp = PyObject_GetAttrString(tmp_fun,\"__code__\"),\"co_argcount\")) {
PyObject *tmp_argcount = PyObject_GetAttrString(tmp,\"co_argcount\");
Py_DECREF(tmp);
if (tmp_argcount == NULL) {
goto capi_fail;
}
tot = PyLong_AsSsize_t(tmp_argcount) - di;
Py_DECREF(tmp_argcount);
}
}
/* Get the number of optional arguments */
if (PyObject_HasAttrString(tmp_fun,\"__defaults__\")) {
if (PyTuple_Check(tmp = PyObject_GetAttrString(tmp_fun,\"__defaults__\")))
opt = PyTuple_Size(tmp);
Py_XDECREF(tmp);
}
/* Get the number of extra arguments */
if (xa != NULL)
ext = PyTuple_Size((PyObject *)xa);
/* Calculate the size of call-backs argument list */
siz = MIN(maxnofargs+ext,tot);
*nofargs = MAX(0,siz-ext);
#ifdef DEBUGCFUNCS
fprintf(stderr,
\"debug-capi:create_cb_arglist:maxnofargs(-nofoptargs),\"
\"tot,opt,ext,siz,nofargs = %d(-%d), %zd, %zd, %zd, %zd, %d\\n\",
maxnofargs, nofoptargs, tot, opt, ext, siz, *nofargs);
#endif
if (siz < tot-opt) {
fprintf(stderr,
\"create_cb_arglist: Failed to build argument list \"
\"(siz) with enough arguments (tot-opt) required by \"
\"user-supplied function (siz,tot,opt=%zd, %zd, %zd).\\n\",
siz, tot, opt);
goto capi_fail;
}
/* Initialize argument list */
*args = (PyTupleObject *)PyTuple_New(siz);
for (i=0;i<*nofargs;i++) {
Py_INCREF(Py_None);
PyTuple_SET_ITEM((PyObject *)(*args),i,Py_None);
}
if (xa != NULL)
for (i=(*nofargs);i<siz;i++) {
tmp = PyTuple_GetItem((PyObject *)xa,i-(*nofargs));
Py_INCREF(tmp);
PyTuple_SET_ITEM(*args,i,tmp);
}
CFUNCSMESS(\"create_cb_arglist-end\\n\");
Py_DECREF(tmp_fun);
return 1;
capi_fail:
if (PyErr_Occurred() == NULL)
PyErr_SetString(#modulename#_error, errmess);
Py_XDECREF(tmp_fun);
return 0;
}
"""
def buildcfuncs():
from .capi_maps import c2capi_map
for k in c2capi_map.keys():
m = 'pyarr_from_p_%s1' % k
cppmacros[
m] = '#define %s(v) (PyArray_SimpleNewFromData(0,NULL,%s,(char *)v))' % (m, c2capi_map[k])
k = 'string'
m = 'pyarr_from_p_%s1' % k
# NPY_CHAR compatibility, NPY_STRING with itemsize 1
cppmacros[
m] = '#define %s(v,dims) (PyArray_New(&PyArray_Type, 1, dims, NPY_STRING, NULL, v, 1, NPY_ARRAY_CARRAY, NULL))' % (m)
############ Auxiliary functions for sorting needs ###################
def append_needs(need, flag=1):
# This function modifies the contents of the global `outneeds` dict.
if isinstance(need, list):
for n in need:
append_needs(n, flag)
elif isinstance(need, str):
if not need:
return
if need in includes0:
n = 'includes0'
elif need in includes:
n = 'includes'
elif need in typedefs:
n = 'typedefs'
elif need in typedefs_generated:
n = 'typedefs_generated'
elif need in cppmacros:
n = 'cppmacros'
elif need in cfuncs:
n = 'cfuncs'
elif need in callbacks:
n = 'callbacks'
elif need in f90modhooks:
n = 'f90modhooks'
elif need in commonhooks:
n = 'commonhooks'
else:
errmess('append_needs: unknown need %s\n' % (repr(need)))
return
if need in outneeds[n]:
return
if flag:
tmp = {}
if need in needs:
for nn in needs[need]:
t = append_needs(nn, 0)
if isinstance(t, dict):
for nnn in t.keys():
if nnn in tmp:
tmp[nnn] = tmp[nnn] + t[nnn]
else:
tmp[nnn] = t[nnn]
for nn in tmp.keys():
for nnn in tmp[nn]:
if nnn not in outneeds[nn]:
outneeds[nn] = [nnn] + outneeds[nn]
outneeds[n].append(need)
else:
tmp = {}
if need in needs:
for nn in needs[need]:
t = append_needs(nn, flag)
if isinstance(t, dict):
for nnn in t.keys():
if nnn in tmp:
tmp[nnn] = t[nnn] + tmp[nnn]
else:
tmp[nnn] = t[nnn]
if n not in tmp:
tmp[n] = []
tmp[n].append(need)
return tmp
else:
errmess('append_needs: expected list or string but got :%s\n' %
(repr(need)))
def get_needs():
# This function modifies the contents of the global `outneeds` dict.
res = {}
for n in outneeds.keys():
out = []
saveout = copy.copy(outneeds[n])
while len(outneeds[n]) > 0:
if outneeds[n][0] not in needs:
out.append(outneeds[n][0])
del outneeds[n][0]
else:
flag = 0
for k in outneeds[n][1:]:
if k in needs[outneeds[n][0]]:
flag = 1
break
if flag:
outneeds[n] = outneeds[n][1:] + [outneeds[n][0]]
else:
out.append(outneeds[n][0])
del outneeds[n][0]
if saveout and (0 not in map(lambda x, y: x == y, saveout, outneeds[n])) \
and outneeds[n] != []:
print(n, saveout)
errmess(
'get_needs: no progress in sorting needs, probably circular dependence, skipping.\n')
out = out + saveout
break
saveout = copy.copy(outneeds[n])
if out == []:
out = [n]
res[n] = out
return res
| 49,442 | Python | 32.680518 | 167 | 0.548744 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/f2py/symbolic.py | """Fortran/C symbolic expressions
References:
- J3/21-007: Draft Fortran 202x. https://j3-fortran.org/doc/year/21/21-007.pdf
"""
# To analyze Fortran expressions to solve dimensions specifications,
# for instances, we implement a minimal symbolic engine for parsing
# expressions into a tree of expression instances. As a first
# instance, we care only about arithmetic expressions involving
# integers and operations like addition (+), subtraction (-),
# multiplication (*), division (Fortran / is Python //, Fortran // is
# concatenate), and exponentiation (**). In addition, .pyf files may
# contain C expressions that support here is implemented as well.
#
# TODO: support logical constants (Op.BOOLEAN)
# TODO: support logical operators (.AND., ...)
# TODO: support defined operators (.MYOP., ...)
#
__all__ = ['Expr']
import re
import warnings
from enum import Enum
from math import gcd
class Language(Enum):
"""
Used as Expr.tostring language argument.
"""
Python = 0
Fortran = 1
C = 2
class Op(Enum):
"""
Used as Expr op attribute.
"""
INTEGER = 10
REAL = 12
COMPLEX = 15
STRING = 20
ARRAY = 30
SYMBOL = 40
TERNARY = 100
APPLY = 200
INDEXING = 210
CONCAT = 220
RELATIONAL = 300
TERMS = 1000
FACTORS = 2000
REF = 3000
DEREF = 3001
class RelOp(Enum):
"""
Used in Op.RELATIONAL expression to specify the function part.
"""
EQ = 1
NE = 2
LT = 3
LE = 4
GT = 5
GE = 6
@classmethod
def fromstring(cls, s, language=Language.C):
if language is Language.Fortran:
return {'.eq.': RelOp.EQ, '.ne.': RelOp.NE,
'.lt.': RelOp.LT, '.le.': RelOp.LE,
'.gt.': RelOp.GT, '.ge.': RelOp.GE}[s.lower()]
return {'==': RelOp.EQ, '!=': RelOp.NE, '<': RelOp.LT,
'<=': RelOp.LE, '>': RelOp.GT, '>=': RelOp.GE}[s]
def tostring(self, language=Language.C):
if language is Language.Fortran:
return {RelOp.EQ: '.eq.', RelOp.NE: '.ne.',
RelOp.LT: '.lt.', RelOp.LE: '.le.',
RelOp.GT: '.gt.', RelOp.GE: '.ge.'}[self]
return {RelOp.EQ: '==', RelOp.NE: '!=',
RelOp.LT: '<', RelOp.LE: '<=',
RelOp.GT: '>', RelOp.GE: '>='}[self]
class ArithOp(Enum):
"""
Used in Op.APPLY expression to specify the function part.
"""
POS = 1
NEG = 2
ADD = 3
SUB = 4
MUL = 5
DIV = 6
POW = 7
class OpError(Exception):
pass
class Precedence(Enum):
"""
Used as Expr.tostring precedence argument.
"""
ATOM = 0
POWER = 1
UNARY = 2
PRODUCT = 3
SUM = 4
LT = 6
EQ = 7
LAND = 11
LOR = 12
TERNARY = 13
ASSIGN = 14
TUPLE = 15
NONE = 100
integer_types = (int,)
number_types = (int, float)
def _pairs_add(d, k, v):
# Internal utility method for updating terms and factors data.
c = d.get(k)
if c is None:
d[k] = v
else:
c = c + v
if c:
d[k] = c
else:
del d[k]
class ExprWarning(UserWarning):
pass
def ewarn(message):
warnings.warn(message, ExprWarning, stacklevel=2)
class Expr:
"""Represents a Fortran expression as a op-data pair.
Expr instances are hashable and sortable.
"""
@staticmethod
def parse(s, language=Language.C):
"""Parse a Fortran expression to a Expr.
"""
return fromstring(s, language=language)
def __init__(self, op, data):
assert isinstance(op, Op)
# sanity checks
if op is Op.INTEGER:
# data is a 2-tuple of numeric object and a kind value
# (default is 4)
assert isinstance(data, tuple) and len(data) == 2
assert isinstance(data[0], int)
assert isinstance(data[1], (int, str)), data
elif op is Op.REAL:
# data is a 2-tuple of numeric object and a kind value
# (default is 4)
assert isinstance(data, tuple) and len(data) == 2
assert isinstance(data[0], float)
assert isinstance(data[1], (int, str)), data
elif op is Op.COMPLEX:
# data is a 2-tuple of constant expressions
assert isinstance(data, tuple) and len(data) == 2
elif op is Op.STRING:
# data is a 2-tuple of quoted string and a kind value
# (default is 1)
assert isinstance(data, tuple) and len(data) == 2
assert (isinstance(data[0], str)
and data[0][::len(data[0])-1] in ('""', "''", '@@'))
assert isinstance(data[1], (int, str)), data
elif op is Op.SYMBOL:
# data is any hashable object
assert hash(data) is not None
elif op in (Op.ARRAY, Op.CONCAT):
# data is a tuple of expressions
assert isinstance(data, tuple)
assert all(isinstance(item, Expr) for item in data), data
elif op in (Op.TERMS, Op.FACTORS):
# data is {<term|base>:<coeff|exponent>} where dict values
# are nonzero Python integers
assert isinstance(data, dict)
elif op is Op.APPLY:
# data is (<function>, <operands>, <kwoperands>) where
# operands are Expr instances
assert isinstance(data, tuple) and len(data) == 3
# function is any hashable object
assert hash(data[0]) is not None
assert isinstance(data[1], tuple)
assert isinstance(data[2], dict)
elif op is Op.INDEXING:
# data is (<object>, <indices>)
assert isinstance(data, tuple) and len(data) == 2
# function is any hashable object
assert hash(data[0]) is not None
elif op is Op.TERNARY:
# data is (<cond>, <expr1>, <expr2>)
assert isinstance(data, tuple) and len(data) == 3
elif op in (Op.REF, Op.DEREF):
# data is Expr instance
assert isinstance(data, Expr)
elif op is Op.RELATIONAL:
# data is (<relop>, <left>, <right>)
assert isinstance(data, tuple) and len(data) == 3
else:
raise NotImplementedError(
f'unknown op or missing sanity check: {op}')
self.op = op
self.data = data
def __eq__(self, other):
return (isinstance(other, Expr)
and self.op is other.op
and self.data == other.data)
def __hash__(self):
if self.op in (Op.TERMS, Op.FACTORS):
data = tuple(sorted(self.data.items()))
elif self.op is Op.APPLY:
data = self.data[:2] + tuple(sorted(self.data[2].items()))
else:
data = self.data
return hash((self.op, data))
def __lt__(self, other):
if isinstance(other, Expr):
if self.op is not other.op:
return self.op.value < other.op.value
if self.op in (Op.TERMS, Op.FACTORS):
return (tuple(sorted(self.data.items()))
< tuple(sorted(other.data.items())))
if self.op is Op.APPLY:
if self.data[:2] != other.data[:2]:
return self.data[:2] < other.data[:2]
return tuple(sorted(self.data[2].items())) < tuple(
sorted(other.data[2].items()))
return self.data < other.data
return NotImplemented
def __le__(self, other): return self == other or self < other
def __gt__(self, other): return not (self <= other)
def __ge__(self, other): return not (self < other)
def __repr__(self):
return f'{type(self).__name__}({self.op}, {self.data!r})'
def __str__(self):
return self.tostring()
def tostring(self, parent_precedence=Precedence.NONE,
language=Language.Fortran):
"""Return a string representation of Expr.
"""
if self.op in (Op.INTEGER, Op.REAL):
precedence = (Precedence.SUM if self.data[0] < 0
else Precedence.ATOM)
r = str(self.data[0]) + (f'_{self.data[1]}'
if self.data[1] != 4 else '')
elif self.op is Op.COMPLEX:
r = ', '.join(item.tostring(Precedence.TUPLE, language=language)
for item in self.data)
r = '(' + r + ')'
precedence = Precedence.ATOM
elif self.op is Op.SYMBOL:
precedence = Precedence.ATOM
r = str(self.data)
elif self.op is Op.STRING:
r = self.data[0]
if self.data[1] != 1:
r = self.data[1] + '_' + r
precedence = Precedence.ATOM
elif self.op is Op.ARRAY:
r = ', '.join(item.tostring(Precedence.TUPLE, language=language)
for item in self.data)
r = '[' + r + ']'
precedence = Precedence.ATOM
elif self.op is Op.TERMS:
terms = []
for term, coeff in sorted(self.data.items()):
if coeff < 0:
op = ' - '
coeff = -coeff
else:
op = ' + '
if coeff == 1:
term = term.tostring(Precedence.SUM, language=language)
else:
if term == as_number(1):
term = str(coeff)
else:
term = f'{coeff} * ' + term.tostring(
Precedence.PRODUCT, language=language)
if terms:
terms.append(op)
elif op == ' - ':
terms.append('-')
terms.append(term)
r = ''.join(terms) or '0'
precedence = Precedence.SUM if terms else Precedence.ATOM
elif self.op is Op.FACTORS:
factors = []
tail = []
for base, exp in sorted(self.data.items()):
op = ' * '
if exp == 1:
factor = base.tostring(Precedence.PRODUCT,
language=language)
elif language is Language.C:
if exp in range(2, 10):
factor = base.tostring(Precedence.PRODUCT,
language=language)
factor = ' * '.join([factor] * exp)
elif exp in range(-10, 0):
factor = base.tostring(Precedence.PRODUCT,
language=language)
tail += [factor] * -exp
continue
else:
factor = base.tostring(Precedence.TUPLE,
language=language)
factor = f'pow({factor}, {exp})'
else:
factor = base.tostring(Precedence.POWER,
language=language) + f' ** {exp}'
if factors:
factors.append(op)
factors.append(factor)
if tail:
if not factors:
factors += ['1']
factors += ['/', '(', ' * '.join(tail), ')']
r = ''.join(factors) or '1'
precedence = Precedence.PRODUCT if factors else Precedence.ATOM
elif self.op is Op.APPLY:
name, args, kwargs = self.data
if name is ArithOp.DIV and language is Language.C:
numer, denom = [arg.tostring(Precedence.PRODUCT,
language=language)
for arg in args]
r = f'{numer} / {denom}'
precedence = Precedence.PRODUCT
else:
args = [arg.tostring(Precedence.TUPLE, language=language)
for arg in args]
args += [k + '=' + v.tostring(Precedence.NONE)
for k, v in kwargs.items()]
r = f'{name}({", ".join(args)})'
precedence = Precedence.ATOM
elif self.op is Op.INDEXING:
name = self.data[0]
args = [arg.tostring(Precedence.TUPLE, language=language)
for arg in self.data[1:]]
r = f'{name}[{", ".join(args)}]'
precedence = Precedence.ATOM
elif self.op is Op.CONCAT:
args = [arg.tostring(Precedence.PRODUCT, language=language)
for arg in self.data]
r = " // ".join(args)
precedence = Precedence.PRODUCT
elif self.op is Op.TERNARY:
cond, expr1, expr2 = [a.tostring(Precedence.TUPLE,
language=language)
for a in self.data]
if language is Language.C:
r = f'({cond}?{expr1}:{expr2})'
elif language is Language.Python:
r = f'({expr1} if {cond} else {expr2})'
elif language is Language.Fortran:
r = f'merge({expr1}, {expr2}, {cond})'
else:
raise NotImplementedError(
f'tostring for {self.op} and {language}')
precedence = Precedence.ATOM
elif self.op is Op.REF:
r = '&' + self.data.tostring(Precedence.UNARY, language=language)
precedence = Precedence.UNARY
elif self.op is Op.DEREF:
r = '*' + self.data.tostring(Precedence.UNARY, language=language)
precedence = Precedence.UNARY
elif self.op is Op.RELATIONAL:
rop, left, right = self.data
precedence = (Precedence.EQ if rop in (RelOp.EQ, RelOp.NE)
else Precedence.LT)
left = left.tostring(precedence, language=language)
right = right.tostring(precedence, language=language)
rop = rop.tostring(language=language)
r = f'{left} {rop} {right}'
else:
raise NotImplementedError(f'tostring for op {self.op}')
if parent_precedence.value < precedence.value:
# If parent precedence is higher than operand precedence,
# operand will be enclosed in parenthesis.
return '(' + r + ')'
return r
def __pos__(self):
return self
def __neg__(self):
return self * -1
def __add__(self, other):
other = as_expr(other)
if isinstance(other, Expr):
if self.op is other.op:
if self.op in (Op.INTEGER, Op.REAL):
return as_number(
self.data[0] + other.data[0],
max(self.data[1], other.data[1]))
if self.op is Op.COMPLEX:
r1, i1 = self.data
r2, i2 = other.data
return as_complex(r1 + r2, i1 + i2)
if self.op is Op.TERMS:
r = Expr(self.op, dict(self.data))
for k, v in other.data.items():
_pairs_add(r.data, k, v)
return normalize(r)
if self.op is Op.COMPLEX and other.op in (Op.INTEGER, Op.REAL):
return self + as_complex(other)
elif self.op in (Op.INTEGER, Op.REAL) and other.op is Op.COMPLEX:
return as_complex(self) + other
elif self.op is Op.REAL and other.op is Op.INTEGER:
return self + as_real(other, kind=self.data[1])
elif self.op is Op.INTEGER and other.op is Op.REAL:
return as_real(self, kind=other.data[1]) + other
return as_terms(self) + as_terms(other)
return NotImplemented
def __radd__(self, other):
if isinstance(other, number_types):
return as_number(other) + self
return NotImplemented
def __sub__(self, other):
return self + (-other)
def __rsub__(self, other):
if isinstance(other, number_types):
return as_number(other) - self
return NotImplemented
def __mul__(self, other):
other = as_expr(other)
if isinstance(other, Expr):
if self.op is other.op:
if self.op in (Op.INTEGER, Op.REAL):
return as_number(self.data[0] * other.data[0],
max(self.data[1], other.data[1]))
elif self.op is Op.COMPLEX:
r1, i1 = self.data
r2, i2 = other.data
return as_complex(r1 * r2 - i1 * i2, r1 * i2 + r2 * i1)
if self.op is Op.FACTORS:
r = Expr(self.op, dict(self.data))
for k, v in other.data.items():
_pairs_add(r.data, k, v)
return normalize(r)
elif self.op is Op.TERMS:
r = Expr(self.op, {})
for t1, c1 in self.data.items():
for t2, c2 in other.data.items():
_pairs_add(r.data, t1 * t2, c1 * c2)
return normalize(r)
if self.op is Op.COMPLEX and other.op in (Op.INTEGER, Op.REAL):
return self * as_complex(other)
elif other.op is Op.COMPLEX and self.op in (Op.INTEGER, Op.REAL):
return as_complex(self) * other
elif self.op is Op.REAL and other.op is Op.INTEGER:
return self * as_real(other, kind=self.data[1])
elif self.op is Op.INTEGER and other.op is Op.REAL:
return as_real(self, kind=other.data[1]) * other
if self.op is Op.TERMS:
return self * as_terms(other)
elif other.op is Op.TERMS:
return as_terms(self) * other
return as_factors(self) * as_factors(other)
return NotImplemented
def __rmul__(self, other):
if isinstance(other, number_types):
return as_number(other) * self
return NotImplemented
def __pow__(self, other):
other = as_expr(other)
if isinstance(other, Expr):
if other.op is Op.INTEGER:
exponent = other.data[0]
# TODO: other kind not used
if exponent == 0:
return as_number(1)
if exponent == 1:
return self
if exponent > 0:
if self.op is Op.FACTORS:
r = Expr(self.op, {})
for k, v in self.data.items():
r.data[k] = v * exponent
return normalize(r)
return self * (self ** (exponent - 1))
elif exponent != -1:
return (self ** (-exponent)) ** -1
return Expr(Op.FACTORS, {self: exponent})
return as_apply(ArithOp.POW, self, other)
return NotImplemented
def __truediv__(self, other):
other = as_expr(other)
if isinstance(other, Expr):
# Fortran / is different from Python /:
# - `/` is a truncate operation for integer operands
return normalize(as_apply(ArithOp.DIV, self, other))
return NotImplemented
def __rtruediv__(self, other):
other = as_expr(other)
if isinstance(other, Expr):
return other / self
return NotImplemented
def __floordiv__(self, other):
other = as_expr(other)
if isinstance(other, Expr):
# Fortran // is different from Python //:
# - `//` is a concatenate operation for string operands
return normalize(Expr(Op.CONCAT, (self, other)))
return NotImplemented
def __rfloordiv__(self, other):
other = as_expr(other)
if isinstance(other, Expr):
return other // self
return NotImplemented
def __call__(self, *args, **kwargs):
# In Fortran, parenthesis () are use for both function call as
# well as indexing operations.
#
# TODO: implement a method for deciding when __call__ should
# return an INDEXING expression.
return as_apply(self, *map(as_expr, args),
**dict((k, as_expr(v)) for k, v in kwargs.items()))
def __getitem__(self, index):
# Provided to support C indexing operations that .pyf files
# may contain.
index = as_expr(index)
if not isinstance(index, tuple):
index = index,
if len(index) > 1:
ewarn(f'C-index should be a single expression but got `{index}`')
return Expr(Op.INDEXING, (self,) + index)
def substitute(self, symbols_map):
"""Recursively substitute symbols with values in symbols map.
Symbols map is a dictionary of symbol-expression pairs.
"""
if self.op is Op.SYMBOL:
value = symbols_map.get(self)
if value is None:
return self
m = re.match(r'\A(@__f2py_PARENTHESIS_(\w+)_\d+@)\Z', self.data)
if m:
# complement to fromstring method
items, paren = m.groups()
if paren in ['ROUNDDIV', 'SQUARE']:
return as_array(value)
assert paren == 'ROUND', (paren, value)
return value
if self.op in (Op.INTEGER, Op.REAL, Op.STRING):
return self
if self.op in (Op.ARRAY, Op.COMPLEX):
return Expr(self.op, tuple(item.substitute(symbols_map)
for item in self.data))
if self.op is Op.CONCAT:
return normalize(Expr(self.op, tuple(item.substitute(symbols_map)
for item in self.data)))
if self.op is Op.TERMS:
r = None
for term, coeff in self.data.items():
if r is None:
r = term.substitute(symbols_map) * coeff
else:
r += term.substitute(symbols_map) * coeff
if r is None:
ewarn('substitute: empty TERMS expression interpreted as'
' int-literal 0')
return as_number(0)
return r
if self.op is Op.FACTORS:
r = None
for base, exponent in self.data.items():
if r is None:
r = base.substitute(symbols_map) ** exponent
else:
r *= base.substitute(symbols_map) ** exponent
if r is None:
ewarn('substitute: empty FACTORS expression interpreted'
' as int-literal 1')
return as_number(1)
return r
if self.op is Op.APPLY:
target, args, kwargs = self.data
if isinstance(target, Expr):
target = target.substitute(symbols_map)
args = tuple(a.substitute(symbols_map) for a in args)
kwargs = dict((k, v.substitute(symbols_map))
for k, v in kwargs.items())
return normalize(Expr(self.op, (target, args, kwargs)))
if self.op is Op.INDEXING:
func = self.data[0]
if isinstance(func, Expr):
func = func.substitute(symbols_map)
args = tuple(a.substitute(symbols_map) for a in self.data[1:])
return normalize(Expr(self.op, (func,) + args))
if self.op is Op.TERNARY:
operands = tuple(a.substitute(symbols_map) for a in self.data)
return normalize(Expr(self.op, operands))
if self.op in (Op.REF, Op.DEREF):
return normalize(Expr(self.op, self.data.substitute(symbols_map)))
if self.op is Op.RELATIONAL:
rop, left, right = self.data
left = left.substitute(symbols_map)
right = right.substitute(symbols_map)
return normalize(Expr(self.op, (rop, left, right)))
raise NotImplementedError(f'substitute method for {self.op}: {self!r}')
def traverse(self, visit, *args, **kwargs):
"""Traverse expression tree with visit function.
The visit function is applied to an expression with given args
and kwargs.
Traverse call returns an expression returned by visit when not
None, otherwise return a new normalized expression with
traverse-visit sub-expressions.
"""
result = visit(self, *args, **kwargs)
if result is not None:
return result
if self.op in (Op.INTEGER, Op.REAL, Op.STRING, Op.SYMBOL):
return self
elif self.op in (Op.COMPLEX, Op.ARRAY, Op.CONCAT, Op.TERNARY):
return normalize(Expr(self.op, tuple(
item.traverse(visit, *args, **kwargs)
for item in self.data)))
elif self.op in (Op.TERMS, Op.FACTORS):
data = {}
for k, v in self.data.items():
k = k.traverse(visit, *args, **kwargs)
v = (v.traverse(visit, *args, **kwargs)
if isinstance(v, Expr) else v)
if k in data:
v = data[k] + v
data[k] = v
return normalize(Expr(self.op, data))
elif self.op is Op.APPLY:
obj = self.data[0]
func = (obj.traverse(visit, *args, **kwargs)
if isinstance(obj, Expr) else obj)
operands = tuple(operand.traverse(visit, *args, **kwargs)
for operand in self.data[1])
kwoperands = dict((k, v.traverse(visit, *args, **kwargs))
for k, v in self.data[2].items())
return normalize(Expr(self.op, (func, operands, kwoperands)))
elif self.op is Op.INDEXING:
obj = self.data[0]
obj = (obj.traverse(visit, *args, **kwargs)
if isinstance(obj, Expr) else obj)
indices = tuple(index.traverse(visit, *args, **kwargs)
for index in self.data[1:])
return normalize(Expr(self.op, (obj,) + indices))
elif self.op in (Op.REF, Op.DEREF):
return normalize(Expr(self.op,
self.data.traverse(visit, *args, **kwargs)))
elif self.op is Op.RELATIONAL:
rop, left, right = self.data
left = left.traverse(visit, *args, **kwargs)
right = right.traverse(visit, *args, **kwargs)
return normalize(Expr(self.op, (rop, left, right)))
raise NotImplementedError(f'traverse method for {self.op}')
def contains(self, other):
"""Check if self contains other.
"""
found = []
def visit(expr, found=found):
if found:
return expr
elif expr == other:
found.append(1)
return expr
self.traverse(visit)
return len(found) != 0
def symbols(self):
"""Return a set of symbols contained in self.
"""
found = set()
def visit(expr, found=found):
if expr.op is Op.SYMBOL:
found.add(expr)
self.traverse(visit)
return found
def polynomial_atoms(self):
"""Return a set of expressions used as atoms in polynomial self.
"""
found = set()
def visit(expr, found=found):
if expr.op is Op.FACTORS:
for b in expr.data:
b.traverse(visit)
return expr
if expr.op in (Op.TERMS, Op.COMPLEX):
return
if expr.op is Op.APPLY and isinstance(expr.data[0], ArithOp):
if expr.data[0] is ArithOp.POW:
expr.data[1][0].traverse(visit)
return expr
return
if expr.op in (Op.INTEGER, Op.REAL):
return expr
found.add(expr)
if expr.op in (Op.INDEXING, Op.APPLY):
return expr
self.traverse(visit)
return found
def linear_solve(self, symbol):
"""Return a, b such that a * symbol + b == self.
If self is not linear with respect to symbol, raise RuntimeError.
"""
b = self.substitute({symbol: as_number(0)})
ax = self - b
a = ax.substitute({symbol: as_number(1)})
zero, _ = as_numer_denom(a * symbol - ax)
if zero != as_number(0):
raise RuntimeError(f'not a {symbol}-linear equation:'
f' {a} * {symbol} + {b} == {self}')
return a, b
def normalize(obj):
"""Normalize Expr and apply basic evaluation methods.
"""
if not isinstance(obj, Expr):
return obj
if obj.op is Op.TERMS:
d = {}
for t, c in obj.data.items():
if c == 0:
continue
if t.op is Op.COMPLEX and c != 1:
t = t * c
c = 1
if t.op is Op.TERMS:
for t1, c1 in t.data.items():
_pairs_add(d, t1, c1 * c)
else:
_pairs_add(d, t, c)
if len(d) == 0:
# TODO: deterimine correct kind
return as_number(0)
elif len(d) == 1:
(t, c), = d.items()
if c == 1:
return t
return Expr(Op.TERMS, d)
if obj.op is Op.FACTORS:
coeff = 1
d = {}
for b, e in obj.data.items():
if e == 0:
continue
if b.op is Op.TERMS and isinstance(e, integer_types) and e > 1:
# expand integer powers of sums
b = b * (b ** (e - 1))
e = 1
if b.op in (Op.INTEGER, Op.REAL):
if e == 1:
coeff *= b.data[0]
elif e > 0:
coeff *= b.data[0] ** e
else:
_pairs_add(d, b, e)
elif b.op is Op.FACTORS:
if e > 0 and isinstance(e, integer_types):
for b1, e1 in b.data.items():
_pairs_add(d, b1, e1 * e)
else:
_pairs_add(d, b, e)
else:
_pairs_add(d, b, e)
if len(d) == 0 or coeff == 0:
# TODO: deterimine correct kind
assert isinstance(coeff, number_types)
return as_number(coeff)
elif len(d) == 1:
(b, e), = d.items()
if e == 1:
t = b
else:
t = Expr(Op.FACTORS, d)
if coeff == 1:
return t
return Expr(Op.TERMS, {t: coeff})
elif coeff == 1:
return Expr(Op.FACTORS, d)
else:
return Expr(Op.TERMS, {Expr(Op.FACTORS, d): coeff})
if obj.op is Op.APPLY and obj.data[0] is ArithOp.DIV:
dividend, divisor = obj.data[1]
t1, c1 = as_term_coeff(dividend)
t2, c2 = as_term_coeff(divisor)
if isinstance(c1, integer_types) and isinstance(c2, integer_types):
g = gcd(c1, c2)
c1, c2 = c1//g, c2//g
else:
c1, c2 = c1/c2, 1
if t1.op is Op.APPLY and t1.data[0] is ArithOp.DIV:
numer = t1.data[1][0] * c1
denom = t1.data[1][1] * t2 * c2
return as_apply(ArithOp.DIV, numer, denom)
if t2.op is Op.APPLY and t2.data[0] is ArithOp.DIV:
numer = t2.data[1][1] * t1 * c1
denom = t2.data[1][0] * c2
return as_apply(ArithOp.DIV, numer, denom)
d = dict(as_factors(t1).data)
for b, e in as_factors(t2).data.items():
_pairs_add(d, b, -e)
numer, denom = {}, {}
for b, e in d.items():
if e > 0:
numer[b] = e
else:
denom[b] = -e
numer = normalize(Expr(Op.FACTORS, numer)) * c1
denom = normalize(Expr(Op.FACTORS, denom)) * c2
if denom.op in (Op.INTEGER, Op.REAL) and denom.data[0] == 1:
# TODO: denom kind not used
return numer
return as_apply(ArithOp.DIV, numer, denom)
if obj.op is Op.CONCAT:
lst = [obj.data[0]]
for s in obj.data[1:]:
last = lst[-1]
if (
last.op is Op.STRING
and s.op is Op.STRING
and last.data[0][0] in '"\''
and s.data[0][0] == last.data[0][-1]
):
new_last = as_string(last.data[0][:-1] + s.data[0][1:],
max(last.data[1], s.data[1]))
lst[-1] = new_last
else:
lst.append(s)
if len(lst) == 1:
return lst[0]
return Expr(Op.CONCAT, tuple(lst))
if obj.op is Op.TERNARY:
cond, expr1, expr2 = map(normalize, obj.data)
if cond.op is Op.INTEGER:
return expr1 if cond.data[0] else expr2
return Expr(Op.TERNARY, (cond, expr1, expr2))
return obj
def as_expr(obj):
"""Convert non-Expr objects to Expr objects.
"""
if isinstance(obj, complex):
return as_complex(obj.real, obj.imag)
if isinstance(obj, number_types):
return as_number(obj)
if isinstance(obj, str):
# STRING expression holds string with boundary quotes, hence
# applying repr:
return as_string(repr(obj))
if isinstance(obj, tuple):
return tuple(map(as_expr, obj))
return obj
def as_symbol(obj):
"""Return object as SYMBOL expression (variable or unparsed expression).
"""
return Expr(Op.SYMBOL, obj)
def as_number(obj, kind=4):
"""Return object as INTEGER or REAL constant.
"""
if isinstance(obj, int):
return Expr(Op.INTEGER, (obj, kind))
if isinstance(obj, float):
return Expr(Op.REAL, (obj, kind))
if isinstance(obj, Expr):
if obj.op in (Op.INTEGER, Op.REAL):
return obj
raise OpError(f'cannot convert {obj} to INTEGER or REAL constant')
def as_integer(obj, kind=4):
"""Return object as INTEGER constant.
"""
if isinstance(obj, int):
return Expr(Op.INTEGER, (obj, kind))
if isinstance(obj, Expr):
if obj.op is Op.INTEGER:
return obj
raise OpError(f'cannot convert {obj} to INTEGER constant')
def as_real(obj, kind=4):
"""Return object as REAL constant.
"""
if isinstance(obj, int):
return Expr(Op.REAL, (float(obj), kind))
if isinstance(obj, float):
return Expr(Op.REAL, (obj, kind))
if isinstance(obj, Expr):
if obj.op is Op.REAL:
return obj
elif obj.op is Op.INTEGER:
return Expr(Op.REAL, (float(obj.data[0]), kind))
raise OpError(f'cannot convert {obj} to REAL constant')
def as_string(obj, kind=1):
"""Return object as STRING expression (string literal constant).
"""
return Expr(Op.STRING, (obj, kind))
def as_array(obj):
"""Return object as ARRAY expression (array constant).
"""
if isinstance(obj, Expr):
obj = obj,
return Expr(Op.ARRAY, obj)
def as_complex(real, imag=0):
"""Return object as COMPLEX expression (complex literal constant).
"""
return Expr(Op.COMPLEX, (as_expr(real), as_expr(imag)))
def as_apply(func, *args, **kwargs):
"""Return object as APPLY expression (function call, constructor, etc.)
"""
return Expr(Op.APPLY,
(func, tuple(map(as_expr, args)),
dict((k, as_expr(v)) for k, v in kwargs.items())))
def as_ternary(cond, expr1, expr2):
"""Return object as TERNARY expression (cond?expr1:expr2).
"""
return Expr(Op.TERNARY, (cond, expr1, expr2))
def as_ref(expr):
"""Return object as referencing expression.
"""
return Expr(Op.REF, expr)
def as_deref(expr):
"""Return object as dereferencing expression.
"""
return Expr(Op.DEREF, expr)
def as_eq(left, right):
return Expr(Op.RELATIONAL, (RelOp.EQ, left, right))
def as_ne(left, right):
return Expr(Op.RELATIONAL, (RelOp.NE, left, right))
def as_lt(left, right):
return Expr(Op.RELATIONAL, (RelOp.LT, left, right))
def as_le(left, right):
return Expr(Op.RELATIONAL, (RelOp.LE, left, right))
def as_gt(left, right):
return Expr(Op.RELATIONAL, (RelOp.GT, left, right))
def as_ge(left, right):
return Expr(Op.RELATIONAL, (RelOp.GE, left, right))
def as_terms(obj):
"""Return expression as TERMS expression.
"""
if isinstance(obj, Expr):
obj = normalize(obj)
if obj.op is Op.TERMS:
return obj
if obj.op is Op.INTEGER:
return Expr(Op.TERMS, {as_integer(1, obj.data[1]): obj.data[0]})
if obj.op is Op.REAL:
return Expr(Op.TERMS, {as_real(1, obj.data[1]): obj.data[0]})
return Expr(Op.TERMS, {obj: 1})
raise OpError(f'cannot convert {type(obj)} to terms Expr')
def as_factors(obj):
"""Return expression as FACTORS expression.
"""
if isinstance(obj, Expr):
obj = normalize(obj)
if obj.op is Op.FACTORS:
return obj
if obj.op is Op.TERMS:
if len(obj.data) == 1:
(term, coeff), = obj.data.items()
if coeff == 1:
return Expr(Op.FACTORS, {term: 1})
return Expr(Op.FACTORS, {term: 1, Expr.number(coeff): 1})
if ((obj.op is Op.APPLY
and obj.data[0] is ArithOp.DIV
and not obj.data[2])):
return Expr(Op.FACTORS, {obj.data[1][0]: 1, obj.data[1][1]: -1})
return Expr(Op.FACTORS, {obj: 1})
raise OpError(f'cannot convert {type(obj)} to terms Expr')
def as_term_coeff(obj):
"""Return expression as term-coefficient pair.
"""
if isinstance(obj, Expr):
obj = normalize(obj)
if obj.op is Op.INTEGER:
return as_integer(1, obj.data[1]), obj.data[0]
if obj.op is Op.REAL:
return as_real(1, obj.data[1]), obj.data[0]
if obj.op is Op.TERMS:
if len(obj.data) == 1:
(term, coeff), = obj.data.items()
return term, coeff
# TODO: find common divisor of coefficients
if obj.op is Op.APPLY and obj.data[0] is ArithOp.DIV:
t, c = as_term_coeff(obj.data[1][0])
return as_apply(ArithOp.DIV, t, obj.data[1][1]), c
return obj, 1
raise OpError(f'cannot convert {type(obj)} to term and coeff')
def as_numer_denom(obj):
"""Return expression as numer-denom pair.
"""
if isinstance(obj, Expr):
obj = normalize(obj)
if obj.op in (Op.INTEGER, Op.REAL, Op.COMPLEX, Op.SYMBOL,
Op.INDEXING, Op.TERNARY):
return obj, as_number(1)
elif obj.op is Op.APPLY:
if obj.data[0] is ArithOp.DIV and not obj.data[2]:
numers, denoms = map(as_numer_denom, obj.data[1])
return numers[0] * denoms[1], numers[1] * denoms[0]
return obj, as_number(1)
elif obj.op is Op.TERMS:
numers, denoms = [], []
for term, coeff in obj.data.items():
n, d = as_numer_denom(term)
n = n * coeff
numers.append(n)
denoms.append(d)
numer, denom = as_number(0), as_number(1)
for i in range(len(numers)):
n = numers[i]
for j in range(len(numers)):
if i != j:
n *= denoms[j]
numer += n
denom *= denoms[i]
if denom.op in (Op.INTEGER, Op.REAL) and denom.data[0] < 0:
numer, denom = -numer, -denom
return numer, denom
elif obj.op is Op.FACTORS:
numer, denom = as_number(1), as_number(1)
for b, e in obj.data.items():
bnumer, bdenom = as_numer_denom(b)
if e > 0:
numer *= bnumer ** e
denom *= bdenom ** e
elif e < 0:
numer *= bdenom ** (-e)
denom *= bnumer ** (-e)
return numer, denom
raise OpError(f'cannot convert {type(obj)} to numer and denom')
def _counter():
# Used internally to generate unique dummy symbols
counter = 0
while True:
counter += 1
yield counter
COUNTER = _counter()
def eliminate_quotes(s):
"""Replace quoted substrings of input string.
Return a new string and a mapping of replacements.
"""
d = {}
def repl(m):
kind, value = m.groups()[:2]
if kind:
# remove trailing underscore
kind = kind[:-1]
p = {"'": "SINGLE", '"': "DOUBLE"}[value[0]]
k = f'{kind}@__f2py_QUOTES_{p}_{COUNTER.__next__()}@'
d[k] = value
return k
new_s = re.sub(r'({kind}_|)({single_quoted}|{double_quoted})'.format(
kind=r'\w[\w\d_]*',
single_quoted=r"('([^'\\]|(\\.))*')",
double_quoted=r'("([^"\\]|(\\.))*")'),
repl, s)
assert '"' not in new_s
assert "'" not in new_s
return new_s, d
def insert_quotes(s, d):
"""Inverse of eliminate_quotes.
"""
for k, v in d.items():
kind = k[:k.find('@')]
if kind:
kind += '_'
s = s.replace(k, kind + v)
return s
def replace_parenthesis(s):
"""Replace substrings of input that are enclosed in parenthesis.
Return a new string and a mapping of replacements.
"""
# Find a parenthesis pair that appears first.
# Fortran deliminator are `(`, `)`, `[`, `]`, `(/', '/)`, `/`.
# We don't handle `/` deliminator because it is not a part of an
# expression.
left, right = None, None
mn_i = len(s)
for left_, right_ in (('(/', '/)'),
'()',
'{}', # to support C literal structs
'[]'):
i = s.find(left_)
if i == -1:
continue
if i < mn_i:
mn_i = i
left, right = left_, right_
if left is None:
return s, {}
i = mn_i
j = s.find(right, i)
while s.count(left, i + 1, j) != s.count(right, i + 1, j):
j = s.find(right, j + 1)
if j == -1:
raise ValueError(f'Mismatch of {left+right} parenthesis in {s!r}')
p = {'(': 'ROUND', '[': 'SQUARE', '{': 'CURLY', '(/': 'ROUNDDIV'}[left]
k = f'@__f2py_PARENTHESIS_{p}_{COUNTER.__next__()}@'
v = s[i+len(left):j]
r, d = replace_parenthesis(s[j+len(right):])
d[k] = v
return s[:i] + k + r, d
def _get_parenthesis_kind(s):
assert s.startswith('@__f2py_PARENTHESIS_'), s
return s.split('_')[4]
def unreplace_parenthesis(s, d):
"""Inverse of replace_parenthesis.
"""
for k, v in d.items():
p = _get_parenthesis_kind(k)
left = dict(ROUND='(', SQUARE='[', CURLY='{', ROUNDDIV='(/')[p]
right = dict(ROUND=')', SQUARE=']', CURLY='}', ROUNDDIV='/)')[p]
s = s.replace(k, left + v + right)
return s
def fromstring(s, language=Language.C):
"""Create an expression from a string.
This is a "lazy" parser, that is, only arithmetic operations are
resolved, non-arithmetic operations are treated as symbols.
"""
r = _FromStringWorker(language=language).parse(s)
if isinstance(r, Expr):
return r
raise ValueError(f'failed to parse `{s}` to Expr instance: got `{r}`')
class _Pair:
# Internal class to represent a pair of expressions
def __init__(self, left, right):
self.left = left
self.right = right
def substitute(self, symbols_map):
left, right = self.left, self.right
if isinstance(left, Expr):
left = left.substitute(symbols_map)
if isinstance(right, Expr):
right = right.substitute(symbols_map)
return _Pair(left, right)
def __repr__(self):
return f'{type(self).__name__}({self.left}, {self.right})'
class _FromStringWorker:
def __init__(self, language=Language.C):
self.original = None
self.quotes_map = None
self.language = language
def finalize_string(self, s):
return insert_quotes(s, self.quotes_map)
def parse(self, inp):
self.original = inp
unquoted, self.quotes_map = eliminate_quotes(inp)
return self.process(unquoted)
def process(self, s, context='expr'):
"""Parse string within the given context.
The context may define the result in case of ambiguous
expressions. For instance, consider expressions `f(x, y)` and
`(x, y) + (a, b)` where `f` is a function and pair `(x, y)`
denotes complex number. Specifying context as "args" or
"expr", the subexpression `(x, y)` will be parse to an
argument list or to a complex number, respectively.
"""
if isinstance(s, (list, tuple)):
return type(s)(self.process(s_, context) for s_ in s)
assert isinstance(s, str), (type(s), s)
# replace subexpressions in parenthesis with f2py @-names
r, raw_symbols_map = replace_parenthesis(s)
r = r.strip()
def restore(r):
# restores subexpressions marked with f2py @-names
if isinstance(r, (list, tuple)):
return type(r)(map(restore, r))
return unreplace_parenthesis(r, raw_symbols_map)
# comma-separated tuple
if ',' in r:
operands = restore(r.split(','))
if context == 'args':
return tuple(self.process(operands))
if context == 'expr':
if len(operands) == 2:
# complex number literal
return as_complex(*self.process(operands))
raise NotImplementedError(
f'parsing comma-separated list (context={context}): {r}')
# ternary operation
m = re.match(r'\A([^?]+)[?]([^:]+)[:](.+)\Z', r)
if m:
assert context == 'expr', context
oper, expr1, expr2 = restore(m.groups())
oper = self.process(oper)
expr1 = self.process(expr1)
expr2 = self.process(expr2)
return as_ternary(oper, expr1, expr2)
# relational expression
if self.language is Language.Fortran:
m = re.match(
r'\A(.+)\s*[.](eq|ne|lt|le|gt|ge)[.]\s*(.+)\Z', r, re.I)
else:
m = re.match(
r'\A(.+)\s*([=][=]|[!][=]|[<][=]|[<]|[>][=]|[>])\s*(.+)\Z', r)
if m:
left, rop, right = m.groups()
if self.language is Language.Fortran:
rop = '.' + rop + '.'
left, right = self.process(restore((left, right)))
rop = RelOp.fromstring(rop, language=self.language)
return Expr(Op.RELATIONAL, (rop, left, right))
# keyword argument
m = re.match(r'\A(\w[\w\d_]*)\s*[=](.*)\Z', r)
if m:
keyname, value = m.groups()
value = restore(value)
return _Pair(keyname, self.process(value))
# addition/subtraction operations
operands = re.split(r'((?<!\d[edED])[+-])', r)
if len(operands) > 1:
result = self.process(restore(operands[0] or '0'))
for op, operand in zip(operands[1::2], operands[2::2]):
operand = self.process(restore(operand))
op = op.strip()
if op == '+':
result += operand
else:
assert op == '-'
result -= operand
return result
# string concatenate operation
if self.language is Language.Fortran and '//' in r:
operands = restore(r.split('//'))
return Expr(Op.CONCAT,
tuple(self.process(operands)))
# multiplication/division operations
operands = re.split(r'(?<=[@\w\d_])\s*([*]|/)',
(r if self.language is Language.C
else r.replace('**', '@__f2py_DOUBLE_STAR@')))
if len(operands) > 1:
operands = restore(operands)
if self.language is not Language.C:
operands = [operand.replace('@__f2py_DOUBLE_STAR@', '**')
for operand in operands]
# Expression is an arithmetic product
result = self.process(operands[0])
for op, operand in zip(operands[1::2], operands[2::2]):
operand = self.process(operand)
op = op.strip()
if op == '*':
result *= operand
else:
assert op == '/'
result /= operand
return result
# referencing/dereferencing
if r.startswith('*') or r.startswith('&'):
op = {'*': Op.DEREF, '&': Op.REF}[r[0]]
operand = self.process(restore(r[1:]))
return Expr(op, operand)
# exponentiation operations
if self.language is not Language.C and '**' in r:
operands = list(reversed(restore(r.split('**'))))
result = self.process(operands[0])
for operand in operands[1:]:
operand = self.process(operand)
result = operand ** result
return result
# int-literal-constant
m = re.match(r'\A({digit_string})({kind}|)\Z'.format(
digit_string=r'\d+',
kind=r'_(\d+|\w[\w\d_]*)'), r)
if m:
value, _, kind = m.groups()
if kind and kind.isdigit():
kind = int(kind)
return as_integer(int(value), kind or 4)
# real-literal-constant
m = re.match(r'\A({significant}({exponent}|)|\d+{exponent})({kind}|)\Z'
.format(
significant=r'[.]\d+|\d+[.]\d*',
exponent=r'[edED][+-]?\d+',
kind=r'_(\d+|\w[\w\d_]*)'), r)
if m:
value, _, _, kind = m.groups()
if kind and kind.isdigit():
kind = int(kind)
value = value.lower()
if 'd' in value:
return as_real(float(value.replace('d', 'e')), kind or 8)
return as_real(float(value), kind or 4)
# string-literal-constant with kind parameter specification
if r in self.quotes_map:
kind = r[:r.find('@')]
return as_string(self.quotes_map[r], kind or 1)
# array constructor or literal complex constant or
# parenthesized expression
if r in raw_symbols_map:
paren = _get_parenthesis_kind(r)
items = self.process(restore(raw_symbols_map[r]),
'expr' if paren == 'ROUND' else 'args')
if paren == 'ROUND':
if isinstance(items, Expr):
return items
if paren in ['ROUNDDIV', 'SQUARE']:
# Expression is a array constructor
if isinstance(items, Expr):
items = (items,)
return as_array(items)
# function call/indexing
m = re.match(r'\A(.+)\s*(@__f2py_PARENTHESIS_(ROUND|SQUARE)_\d+@)\Z',
r)
if m:
target, args, paren = m.groups()
target = self.process(restore(target))
args = self.process(restore(args)[1:-1], 'args')
if not isinstance(args, tuple):
args = args,
if paren == 'ROUND':
kwargs = dict((a.left, a.right) for a in args
if isinstance(a, _Pair))
args = tuple(a for a in args if not isinstance(a, _Pair))
# Warning: this could also be Fortran indexing operation..
return as_apply(target, *args, **kwargs)
else:
# Expression is a C/Python indexing operation
# (e.g. used in .pyf files)
assert paren == 'SQUARE'
return target[args]
# Fortran standard conforming identifier
m = re.match(r'\A\w[\w\d_]*\Z', r)
if m:
return as_symbol(r)
# fall-back to symbol
r = self.finalize_string(restore(r))
ewarn(
f'fromstring: treating {r!r} as symbol (original={self.original})')
return as_symbol(r)
| 53,004 | Python | 34.079418 | 79 | 0.503471 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/f2py/__init__.py | #!/usr/bin/env python3
"""Fortran to Python Interface Generator.
"""
__all__ = ['run_main', 'compile', 'get_include']
import sys
import subprocess
import os
from . import f2py2e
from . import diagnose
run_main = f2py2e.run_main
main = f2py2e.main
def compile(source,
modulename='untitled',
extra_args='',
verbose=True,
source_fn=None,
extension='.f',
full_output=False
):
"""
Build extension module from a Fortran 77 source string with f2py.
Parameters
----------
source : str or bytes
Fortran source of module / subroutine to compile
.. versionchanged:: 1.16.0
Accept str as well as bytes
modulename : str, optional
The name of the compiled python module
extra_args : str or list, optional
Additional parameters passed to f2py
.. versionchanged:: 1.16.0
A list of args may also be provided.
verbose : bool, optional
Print f2py output to screen
source_fn : str, optional
Name of the file where the fortran source is written.
The default is to use a temporary file with the extension
provided by the ``extension`` parameter
extension : ``{'.f', '.f90'}``, optional
Filename extension if `source_fn` is not provided.
The extension tells which fortran standard is used.
The default is ``.f``, which implies F77 standard.
.. versionadded:: 1.11.0
full_output : bool, optional
If True, return a `subprocess.CompletedProcess` containing
the stdout and stderr of the compile process, instead of just
the status code.
.. versionadded:: 1.20.0
Returns
-------
result : int or `subprocess.CompletedProcess`
0 on success, or a `subprocess.CompletedProcess` if
``full_output=True``
Examples
--------
.. literalinclude:: ../../source/f2py/code/results/compile_session.dat
:language: python
"""
import tempfile
import shlex
if source_fn is None:
f, fname = tempfile.mkstemp(suffix=extension)
# f is a file descriptor so need to close it
# carefully -- not with .close() directly
os.close(f)
else:
fname = source_fn
if not isinstance(source, str):
source = str(source, 'utf-8')
try:
with open(fname, 'w') as f:
f.write(source)
args = ['-c', '-m', modulename, f.name]
if isinstance(extra_args, str):
is_posix = (os.name == 'posix')
extra_args = shlex.split(extra_args, posix=is_posix)
args.extend(extra_args)
c = [sys.executable,
'-c',
'import numpy.f2py as f2py2e;f2py2e.main()'] + args
try:
cp = subprocess.run(c, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except OSError:
# preserve historic status code used by exec_command()
cp = subprocess.CompletedProcess(c, 127, stdout=b'', stderr=b'')
else:
if verbose:
print(cp.stdout.decode())
finally:
if source_fn is None:
os.remove(fname)
if full_output:
return cp
else:
return cp.returncode
def get_include():
"""
Return the directory that contains the ``fortranobject.c`` and ``.h`` files.
.. note::
This function is not needed when building an extension with
`numpy.distutils` directly from ``.f`` and/or ``.pyf`` files
in one go.
Python extension modules built with f2py-generated code need to use
``fortranobject.c`` as a source file, and include the ``fortranobject.h``
header. This function can be used to obtain the directory containing
both of these files.
Returns
-------
include_path : str
Absolute path to the directory containing ``fortranobject.c`` and
``fortranobject.h``.
Notes
-----
.. versionadded:: 1.21.1
Unless the build system you are using has specific support for f2py,
building a Python extension using a ``.pyf`` signature file is a two-step
process. For a module ``mymod``:
* Step 1: run ``python -m numpy.f2py mymod.pyf --quiet``. This
generates ``_mymodmodule.c`` and (if needed)
``_fblas-f2pywrappers.f`` files next to ``mymod.pyf``.
* Step 2: build your Python extension module. This requires the
following source files:
* ``_mymodmodule.c``
* ``_mymod-f2pywrappers.f`` (if it was generated in Step 1)
* ``fortranobject.c``
See Also
--------
numpy.get_include : function that returns the numpy include directory
"""
return os.path.join(os.path.dirname(__file__), 'src')
def __getattr__(attr):
# Avoid importing things that aren't needed for building
# which might import the main numpy module
if attr == "test":
from numpy._pytesttester import PytestTester
test = PytestTester(__name__)
return test
else:
raise AttributeError("module {!r} has no attribute "
"{!r}".format(__name__, attr))
def __dir__():
return list(globals().keys() | {"test"})
| 5,289 | Python | 27.138298 | 80 | 0.593307 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/f2py/capi_maps.py | #!/usr/bin/env python3
"""
Copyright 1999,2000 Pearu Peterson all rights reserved,
Pearu Peterson <[email protected]>
Permission to use, modify, and distribute this software is given under the
terms of the NumPy License.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Date: 2005/05/06 10:57:33 $
Pearu Peterson
"""
from . import __version__
f2py_version = __version__.version
import copy
import re
import os
from .crackfortran import markoutercomma
from . import cb_rules
# The environment provided by auxfuncs.py is needed for some calls to eval.
# As the needed functions cannot be determined by static inspection of the
# code, it is safest to use import * pending a major refactoring of f2py.
from .auxfuncs import *
__all__ = [
'getctype', 'getstrlength', 'getarrdims', 'getpydocsign',
'getarrdocsign', 'getinit', 'sign2map', 'routsign2map', 'modsign2map',
'cb_sign2map', 'cb_routsign2map', 'common_sign2map'
]
# Numarray and Numeric users should set this False
using_newcore = True
depargs = []
lcb_map = {}
lcb2_map = {}
# forced casting: mainly caused by the fact that Python or Numeric
# C/APIs do not support the corresponding C types.
c2py_map = {'double': 'float',
'float': 'float', # forced casting
'long_double': 'float', # forced casting
'char': 'int', # forced casting
'signed_char': 'int', # forced casting
'unsigned_char': 'int', # forced casting
'short': 'int', # forced casting
'unsigned_short': 'int', # forced casting
'int': 'int', # forced casting
'long': 'int',
'long_long': 'long',
'unsigned': 'int', # forced casting
'complex_float': 'complex', # forced casting
'complex_double': 'complex',
'complex_long_double': 'complex', # forced casting
'string': 'string',
}
c2capi_map = {'double': 'NPY_DOUBLE',
'float': 'NPY_FLOAT',
'long_double': 'NPY_DOUBLE', # forced casting
'char': 'NPY_STRING',
'unsigned_char': 'NPY_UBYTE',
'signed_char': 'NPY_BYTE',
'short': 'NPY_SHORT',
'unsigned_short': 'NPY_USHORT',
'int': 'NPY_INT',
'unsigned': 'NPY_UINT',
'long': 'NPY_LONG',
'long_long': 'NPY_LONG', # forced casting
'complex_float': 'NPY_CFLOAT',
'complex_double': 'NPY_CDOUBLE',
'complex_long_double': 'NPY_CDOUBLE', # forced casting
'string': 'NPY_STRING'}
# These new maps aren't used anywhere yet, but should be by default
# unless building numeric or numarray extensions.
if using_newcore:
c2capi_map = {'double': 'NPY_DOUBLE',
'float': 'NPY_FLOAT',
'long_double': 'NPY_LONGDOUBLE',
'char': 'NPY_BYTE',
'unsigned_char': 'NPY_UBYTE',
'signed_char': 'NPY_BYTE',
'short': 'NPY_SHORT',
'unsigned_short': 'NPY_USHORT',
'int': 'NPY_INT',
'unsigned': 'NPY_UINT',
'long': 'NPY_LONG',
'unsigned_long': 'NPY_ULONG',
'long_long': 'NPY_LONGLONG',
'unsigned_long_long': 'NPY_ULONGLONG',
'complex_float': 'NPY_CFLOAT',
'complex_double': 'NPY_CDOUBLE',
'complex_long_double': 'NPY_CDOUBLE',
'string':'NPY_STRING'
}
c2pycode_map = {'double': 'd',
'float': 'f',
'long_double': 'd', # forced casting
'char': '1',
'signed_char': '1',
'unsigned_char': 'b',
'short': 's',
'unsigned_short': 'w',
'int': 'i',
'unsigned': 'u',
'long': 'l',
'long_long': 'L',
'complex_float': 'F',
'complex_double': 'D',
'complex_long_double': 'D', # forced casting
'string': 'c'
}
if using_newcore:
c2pycode_map = {'double': 'd',
'float': 'f',
'long_double': 'g',
'char': 'b',
'unsigned_char': 'B',
'signed_char': 'b',
'short': 'h',
'unsigned_short': 'H',
'int': 'i',
'unsigned': 'I',
'long': 'l',
'unsigned_long': 'L',
'long_long': 'q',
'unsigned_long_long': 'Q',
'complex_float': 'F',
'complex_double': 'D',
'complex_long_double': 'G',
'string': 'S'}
c2buildvalue_map = {'double': 'd',
'float': 'f',
'char': 'b',
'signed_char': 'b',
'short': 'h',
'int': 'i',
'long': 'l',
'long_long': 'L',
'complex_float': 'N',
'complex_double': 'N',
'complex_long_double': 'N',
'string': 'y'}
f2cmap_all = {'real': {'': 'float', '4': 'float', '8': 'double',
'12': 'long_double', '16': 'long_double'},
'integer': {'': 'int', '1': 'signed_char', '2': 'short',
'4': 'int', '8': 'long_long',
'-1': 'unsigned_char', '-2': 'unsigned_short',
'-4': 'unsigned', '-8': 'unsigned_long_long'},
'complex': {'': 'complex_float', '8': 'complex_float',
'16': 'complex_double', '24': 'complex_long_double',
'32': 'complex_long_double'},
'complexkind': {'': 'complex_float', '4': 'complex_float',
'8': 'complex_double', '12': 'complex_long_double',
'16': 'complex_long_double'},
'logical': {'': 'int', '1': 'char', '2': 'short', '4': 'int',
'8': 'long_long'},
'double complex': {'': 'complex_double'},
'double precision': {'': 'double'},
'byte': {'': 'char'},
'character': {'': 'string'}
}
f2cmap_default = copy.deepcopy(f2cmap_all)
f2cmap_mapped = []
def load_f2cmap_file(f2cmap_file):
global f2cmap_all
f2cmap_all = copy.deepcopy(f2cmap_default)
if f2cmap_file is None:
# Default value
f2cmap_file = '.f2py_f2cmap'
if not os.path.isfile(f2cmap_file):
return
# User defined additions to f2cmap_all.
# f2cmap_file must contain a dictionary of dictionaries, only. For
# example, {'real':{'low':'float'}} means that Fortran 'real(low)' is
# interpreted as C 'float'. This feature is useful for F90/95 users if
# they use PARAMETERS in type specifications.
try:
outmess('Reading f2cmap from {!r} ...\n'.format(f2cmap_file))
with open(f2cmap_file, 'r') as f:
d = eval(f.read().lower(), {}, {})
for k, d1 in d.items():
for k1 in d1.keys():
d1[k1.lower()] = d1[k1]
d[k.lower()] = d[k]
for k in d.keys():
if k not in f2cmap_all:
f2cmap_all[k] = {}
for k1 in d[k].keys():
if d[k][k1] in c2py_map:
if k1 in f2cmap_all[k]:
outmess(
"\tWarning: redefinition of {'%s':{'%s':'%s'->'%s'}}\n" % (k, k1, f2cmap_all[k][k1], d[k][k1]))
f2cmap_all[k][k1] = d[k][k1]
outmess('\tMapping "%s(kind=%s)" to "%s"\n' %
(k, k1, d[k][k1]))
f2cmap_mapped.append(d[k][k1])
else:
errmess("\tIgnoring map {'%s':{'%s':'%s'}}: '%s' must be in %s\n" % (
k, k1, d[k][k1], d[k][k1], list(c2py_map.keys())))
outmess('Successfully applied user defined f2cmap changes\n')
except Exception as msg:
errmess(
'Failed to apply user defined f2cmap changes: %s. Skipping.\n' % (msg))
cformat_map = {'double': '%g',
'float': '%g',
'long_double': '%Lg',
'char': '%d',
'signed_char': '%d',
'unsigned_char': '%hhu',
'short': '%hd',
'unsigned_short': '%hu',
'int': '%d',
'unsigned': '%u',
'long': '%ld',
'unsigned_long': '%lu',
'long_long': '%ld',
'complex_float': '(%g,%g)',
'complex_double': '(%g,%g)',
'complex_long_double': '(%Lg,%Lg)',
'string': '%s',
}
# Auxiliary functions
def getctype(var):
"""
Determines C type
"""
ctype = 'void'
if isfunction(var):
if 'result' in var:
a = var['result']
else:
a = var['name']
if a in var['vars']:
return getctype(var['vars'][a])
else:
errmess('getctype: function %s has no return value?!\n' % a)
elif issubroutine(var):
return ctype
elif 'typespec' in var and var['typespec'].lower() in f2cmap_all:
typespec = var['typespec'].lower()
f2cmap = f2cmap_all[typespec]
ctype = f2cmap[''] # default type
if 'kindselector' in var:
if '*' in var['kindselector']:
try:
ctype = f2cmap[var['kindselector']['*']]
except KeyError:
errmess('getctype: "%s %s %s" not supported.\n' %
(var['typespec'], '*', var['kindselector']['*']))
elif 'kind' in var['kindselector']:
if typespec + 'kind' in f2cmap_all:
f2cmap = f2cmap_all[typespec + 'kind']
try:
ctype = f2cmap[var['kindselector']['kind']]
except KeyError:
if typespec in f2cmap_all:
f2cmap = f2cmap_all[typespec]
try:
ctype = f2cmap[str(var['kindselector']['kind'])]
except KeyError:
errmess('getctype: "%s(kind=%s)" is mapped to C "%s" (to override define dict(%s = dict(%s="<C typespec>")) in %s/.f2py_f2cmap file).\n'
% (typespec, var['kindselector']['kind'], ctype,
typespec, var['kindselector']['kind'], os.getcwd()))
else:
if not isexternal(var):
errmess('getctype: No C-type found in "%s", assuming void.\n' % var)
return ctype
def getstrlength(var):
if isstringfunction(var):
if 'result' in var:
a = var['result']
else:
a = var['name']
if a in var['vars']:
return getstrlength(var['vars'][a])
else:
errmess('getstrlength: function %s has no return value?!\n' % a)
if not isstring(var):
errmess(
'getstrlength: expected a signature of a string but got: %s\n' % (repr(var)))
len = '1'
if 'charselector' in var:
a = var['charselector']
if '*' in a:
len = a['*']
elif 'len' in a:
len = a['len']
if re.match(r'\(\s*(\*|:)\s*\)', len) or re.match(r'(\*|:)', len):
if isintent_hide(var):
errmess('getstrlength:intent(hide): expected a string with defined length but got: %s\n' % (
repr(var)))
len = '-1'
return len
def getarrdims(a, var, verbose=0):
ret = {}
if isstring(var) and not isarray(var):
ret['dims'] = getstrlength(var)
ret['size'] = ret['dims']
ret['rank'] = '1'
elif isscalar(var):
ret['size'] = '1'
ret['rank'] = '0'
ret['dims'] = ''
elif isarray(var):
dim = copy.copy(var['dimension'])
ret['size'] = '*'.join(dim)
try:
ret['size'] = repr(eval(ret['size']))
except Exception:
pass
ret['dims'] = ','.join(dim)
ret['rank'] = repr(len(dim))
ret['rank*[-1]'] = repr(len(dim) * [-1])[1:-1]
for i in range(len(dim)): # solve dim for dependencies
v = []
if dim[i] in depargs:
v = [dim[i]]
else:
for va in depargs:
if re.match(r'.*?\b%s\b.*' % va, dim[i]):
v.append(va)
for va in v:
if depargs.index(va) > depargs.index(a):
dim[i] = '*'
break
ret['setdims'], i = '', -1
for d in dim:
i = i + 1
if d not in ['*', ':', '(*)', '(:)']:
ret['setdims'] = '%s#varname#_Dims[%d]=%s,' % (
ret['setdims'], i, d)
if ret['setdims']:
ret['setdims'] = ret['setdims'][:-1]
ret['cbsetdims'], i = '', -1
for d in var['dimension']:
i = i + 1
if d not in ['*', ':', '(*)', '(:)']:
ret['cbsetdims'] = '%s#varname#_Dims[%d]=%s,' % (
ret['cbsetdims'], i, d)
elif isintent_in(var):
outmess('getarrdims:warning: assumed shape array, using 0 instead of %r\n'
% (d))
ret['cbsetdims'] = '%s#varname#_Dims[%d]=%s,' % (
ret['cbsetdims'], i, 0)
elif verbose:
errmess(
'getarrdims: If in call-back function: array argument %s must have bounded dimensions: got %s\n' % (repr(a), repr(d)))
if ret['cbsetdims']:
ret['cbsetdims'] = ret['cbsetdims'][:-1]
# if not isintent_c(var):
# var['dimension'].reverse()
return ret
def getpydocsign(a, var):
global lcb_map
if isfunction(var):
if 'result' in var:
af = var['result']
else:
af = var['name']
if af in var['vars']:
return getpydocsign(af, var['vars'][af])
else:
errmess('getctype: function %s has no return value?!\n' % af)
return '', ''
sig, sigout = a, a
opt = ''
if isintent_in(var):
opt = 'input'
elif isintent_inout(var):
opt = 'in/output'
out_a = a
if isintent_out(var):
for k in var['intent']:
if k[:4] == 'out=':
out_a = k[4:]
break
init = ''
ctype = getctype(var)
if hasinitvalue(var):
init, showinit = getinit(a, var)
init = ', optional\\n Default: %s' % showinit
if isscalar(var):
if isintent_inout(var):
sig = '%s : %s rank-0 array(%s,\'%s\')%s' % (a, opt, c2py_map[ctype],
c2pycode_map[ctype], init)
else:
sig = '%s : %s %s%s' % (a, opt, c2py_map[ctype], init)
sigout = '%s : %s' % (out_a, c2py_map[ctype])
elif isstring(var):
if isintent_inout(var):
sig = '%s : %s rank-0 array(string(len=%s),\'c\')%s' % (
a, opt, getstrlength(var), init)
else:
sig = '%s : %s string(len=%s)%s' % (
a, opt, getstrlength(var), init)
sigout = '%s : string(len=%s)' % (out_a, getstrlength(var))
elif isarray(var):
dim = var['dimension']
rank = repr(len(dim))
sig = '%s : %s rank-%s array(\'%s\') with bounds (%s)%s' % (a, opt, rank,
c2pycode_map[
ctype],
','.join(dim), init)
if a == out_a:
sigout = '%s : rank-%s array(\'%s\') with bounds (%s)'\
% (a, rank, c2pycode_map[ctype], ','.join(dim))
else:
sigout = '%s : rank-%s array(\'%s\') with bounds (%s) and %s storage'\
% (out_a, rank, c2pycode_map[ctype], ','.join(dim), a)
elif isexternal(var):
ua = ''
if a in lcb_map and lcb_map[a] in lcb2_map and 'argname' in lcb2_map[lcb_map[a]]:
ua = lcb2_map[lcb_map[a]]['argname']
if not ua == a:
ua = ' => %s' % ua
else:
ua = ''
sig = '%s : call-back function%s' % (a, ua)
sigout = sig
else:
errmess(
'getpydocsign: Could not resolve docsignature for "%s".\n' % a)
return sig, sigout
def getarrdocsign(a, var):
ctype = getctype(var)
if isstring(var) and (not isarray(var)):
sig = '%s : rank-0 array(string(len=%s),\'c\')' % (a,
getstrlength(var))
elif isscalar(var):
sig = '%s : rank-0 array(%s,\'%s\')' % (a, c2py_map[ctype],
c2pycode_map[ctype],)
elif isarray(var):
dim = var['dimension']
rank = repr(len(dim))
sig = '%s : rank-%s array(\'%s\') with bounds (%s)' % (a, rank,
c2pycode_map[
ctype],
','.join(dim))
return sig
def getinit(a, var):
if isstring(var):
init, showinit = '""', "''"
else:
init, showinit = '', ''
if hasinitvalue(var):
init = var['=']
showinit = init
if iscomplex(var) or iscomplexarray(var):
ret = {}
try:
v = var["="]
if ',' in v:
ret['init.r'], ret['init.i'] = markoutercomma(
v[1:-1]).split('@,@')
else:
v = eval(v, {}, {})
ret['init.r'], ret['init.i'] = str(v.real), str(v.imag)
except Exception:
raise ValueError(
'getinit: expected complex number `(r,i)\' but got `%s\' as initial value of %r.' % (init, a))
if isarray(var):
init = '(capi_c.r=%s,capi_c.i=%s,capi_c)' % (
ret['init.r'], ret['init.i'])
elif isstring(var):
if not init:
init, showinit = '""', "''"
if init[0] == "'":
init = '"%s"' % (init[1:-1].replace('"', '\\"'))
if init[0] == '"':
showinit = "'%s'" % (init[1:-1])
return init, showinit
def sign2map(a, var):
"""
varname,ctype,atype
init,init.r,init.i,pytype
vardebuginfo,vardebugshowvalue,varshowvalue
varrformat
intent
"""
out_a = a
if isintent_out(var):
for k in var['intent']:
if k[:4] == 'out=':
out_a = k[4:]
break
ret = {'varname': a, 'outvarname': out_a, 'ctype': getctype(var)}
intent_flags = []
for f, s in isintent_dict.items():
if f(var):
intent_flags.append('F2PY_%s' % s)
if intent_flags:
# TODO: Evaluate intent_flags here.
ret['intent'] = '|'.join(intent_flags)
else:
ret['intent'] = 'F2PY_INTENT_IN'
if isarray(var):
ret['varrformat'] = 'N'
elif ret['ctype'] in c2buildvalue_map:
ret['varrformat'] = c2buildvalue_map[ret['ctype']]
else:
ret['varrformat'] = 'O'
ret['init'], ret['showinit'] = getinit(a, var)
if hasinitvalue(var) and iscomplex(var) and not isarray(var):
ret['init.r'], ret['init.i'] = markoutercomma(
ret['init'][1:-1]).split('@,@')
if isexternal(var):
ret['cbnamekey'] = a
if a in lcb_map:
ret['cbname'] = lcb_map[a]
ret['maxnofargs'] = lcb2_map[lcb_map[a]]['maxnofargs']
ret['nofoptargs'] = lcb2_map[lcb_map[a]]['nofoptargs']
ret['cbdocstr'] = lcb2_map[lcb_map[a]]['docstr']
ret['cblatexdocstr'] = lcb2_map[lcb_map[a]]['latexdocstr']
else:
ret['cbname'] = a
errmess('sign2map: Confused: external %s is not in lcb_map%s.\n' % (
a, list(lcb_map.keys())))
if isstring(var):
ret['length'] = getstrlength(var)
if isarray(var):
ret = dictappend(ret, getarrdims(a, var))
dim = copy.copy(var['dimension'])
if ret['ctype'] in c2capi_map:
ret['atype'] = c2capi_map[ret['ctype']]
# Debug info
if debugcapi(var):
il = [isintent_in, 'input', isintent_out, 'output',
isintent_inout, 'inoutput', isrequired, 'required',
isoptional, 'optional', isintent_hide, 'hidden',
iscomplex, 'complex scalar',
l_and(isscalar, l_not(iscomplex)), 'scalar',
isstring, 'string', isarray, 'array',
iscomplexarray, 'complex array', isstringarray, 'string array',
iscomplexfunction, 'complex function',
l_and(isfunction, l_not(iscomplexfunction)), 'function',
isexternal, 'callback',
isintent_callback, 'callback',
isintent_aux, 'auxiliary',
]
rl = []
for i in range(0, len(il), 2):
if il[i](var):
rl.append(il[i + 1])
if isstring(var):
rl.append('slen(%s)=%s' % (a, ret['length']))
if isarray(var):
ddim = ','.join(
map(lambda x, y: '%s|%s' % (x, y), var['dimension'], dim))
rl.append('dims(%s)' % ddim)
if isexternal(var):
ret['vardebuginfo'] = 'debug-capi:%s=>%s:%s' % (
a, ret['cbname'], ','.join(rl))
else:
ret['vardebuginfo'] = 'debug-capi:%s %s=%s:%s' % (
ret['ctype'], a, ret['showinit'], ','.join(rl))
if isscalar(var):
if ret['ctype'] in cformat_map:
ret['vardebugshowvalue'] = 'debug-capi:%s=%s' % (
a, cformat_map[ret['ctype']])
if isstring(var):
ret['vardebugshowvalue'] = 'debug-capi:slen(%s)=%%d %s=\\"%%s\\"' % (
a, a)
if isexternal(var):
ret['vardebugshowvalue'] = 'debug-capi:%s=%%p' % (a)
if ret['ctype'] in cformat_map:
ret['varshowvalue'] = '#name#:%s=%s' % (a, cformat_map[ret['ctype']])
ret['showvalueformat'] = '%s' % (cformat_map[ret['ctype']])
if isstring(var):
ret['varshowvalue'] = '#name#:slen(%s)=%%d %s=\\"%%s\\"' % (a, a)
ret['pydocsign'], ret['pydocsignout'] = getpydocsign(a, var)
if hasnote(var):
ret['note'] = var['note']
return ret
def routsign2map(rout):
"""
name,NAME,begintitle,endtitle
rname,ctype,rformat
routdebugshowvalue
"""
global lcb_map
name = rout['name']
fname = getfortranname(rout)
ret = {'name': name,
'texname': name.replace('_', '\\_'),
'name_lower': name.lower(),
'NAME': name.upper(),
'begintitle': gentitle(name),
'endtitle': gentitle('end of %s' % name),
'fortranname': fname,
'FORTRANNAME': fname.upper(),
'callstatement': getcallstatement(rout) or '',
'usercode': getusercode(rout) or '',
'usercode1': getusercode1(rout) or '',
}
if '_' in fname:
ret['F_FUNC'] = 'F_FUNC_US'
else:
ret['F_FUNC'] = 'F_FUNC'
if '_' in name:
ret['F_WRAPPEDFUNC'] = 'F_WRAPPEDFUNC_US'
else:
ret['F_WRAPPEDFUNC'] = 'F_WRAPPEDFUNC'
lcb_map = {}
if 'use' in rout:
for u in rout['use'].keys():
if u in cb_rules.cb_map:
for un in cb_rules.cb_map[u]:
ln = un[0]
if 'map' in rout['use'][u]:
for k in rout['use'][u]['map'].keys():
if rout['use'][u]['map'][k] == un[0]:
ln = k
break
lcb_map[ln] = un[1]
elif 'externals' in rout and rout['externals']:
errmess('routsign2map: Confused: function %s has externals %s but no "use" statement.\n' % (
ret['name'], repr(rout['externals'])))
ret['callprotoargument'] = getcallprotoargument(rout, lcb_map) or ''
if isfunction(rout):
if 'result' in rout:
a = rout['result']
else:
a = rout['name']
ret['rname'] = a
ret['pydocsign'], ret['pydocsignout'] = getpydocsign(a, rout)
ret['ctype'] = getctype(rout['vars'][a])
if hasresultnote(rout):
ret['resultnote'] = rout['vars'][a]['note']
rout['vars'][a]['note'] = ['See elsewhere.']
if ret['ctype'] in c2buildvalue_map:
ret['rformat'] = c2buildvalue_map[ret['ctype']]
else:
ret['rformat'] = 'O'
errmess('routsign2map: no c2buildvalue key for type %s\n' %
(repr(ret['ctype'])))
if debugcapi(rout):
if ret['ctype'] in cformat_map:
ret['routdebugshowvalue'] = 'debug-capi:%s=%s' % (
a, cformat_map[ret['ctype']])
if isstringfunction(rout):
ret['routdebugshowvalue'] = 'debug-capi:slen(%s)=%%d %s=\\"%%s\\"' % (
a, a)
if isstringfunction(rout):
ret['rlength'] = getstrlength(rout['vars'][a])
if ret['rlength'] == '-1':
errmess('routsign2map: expected explicit specification of the length of the string returned by the fortran function %s; taking 10.\n' % (
repr(rout['name'])))
ret['rlength'] = '10'
if hasnote(rout):
ret['note'] = rout['note']
rout['note'] = ['See elsewhere.']
return ret
def modsign2map(m):
"""
modulename
"""
if ismodule(m):
ret = {'f90modulename': m['name'],
'F90MODULENAME': m['name'].upper(),
'texf90modulename': m['name'].replace('_', '\\_')}
else:
ret = {'modulename': m['name'],
'MODULENAME': m['name'].upper(),
'texmodulename': m['name'].replace('_', '\\_')}
ret['restdoc'] = getrestdoc(m) or []
if hasnote(m):
ret['note'] = m['note']
ret['usercode'] = getusercode(m) or ''
ret['usercode1'] = getusercode1(m) or ''
if m['body']:
ret['interface_usercode'] = getusercode(m['body'][0]) or ''
else:
ret['interface_usercode'] = ''
ret['pymethoddef'] = getpymethoddef(m) or ''
if 'coutput' in m:
ret['coutput'] = m['coutput']
if 'f2py_wrapper_output' in m:
ret['f2py_wrapper_output'] = m['f2py_wrapper_output']
return ret
def cb_sign2map(a, var, index=None):
ret = {'varname': a}
ret['varname_i'] = ret['varname']
ret['ctype'] = getctype(var)
if ret['ctype'] in c2capi_map:
ret['atype'] = c2capi_map[ret['ctype']]
if ret['ctype'] in cformat_map:
ret['showvalueformat'] = '%s' % (cformat_map[ret['ctype']])
if isarray(var):
ret = dictappend(ret, getarrdims(a, var))
ret['pydocsign'], ret['pydocsignout'] = getpydocsign(a, var)
if hasnote(var):
ret['note'] = var['note']
var['note'] = ['See elsewhere.']
return ret
def cb_routsign2map(rout, um):
"""
name,begintitle,endtitle,argname
ctype,rctype,maxnofargs,nofoptargs,returncptr
"""
ret = {'name': 'cb_%s_in_%s' % (rout['name'], um),
'returncptr': ''}
if isintent_callback(rout):
if '_' in rout['name']:
F_FUNC = 'F_FUNC_US'
else:
F_FUNC = 'F_FUNC'
ret['callbackname'] = '%s(%s,%s)' \
% (F_FUNC,
rout['name'].lower(),
rout['name'].upper(),
)
ret['static'] = 'extern'
else:
ret['callbackname'] = ret['name']
ret['static'] = 'static'
ret['argname'] = rout['name']
ret['begintitle'] = gentitle(ret['name'])
ret['endtitle'] = gentitle('end of %s' % ret['name'])
ret['ctype'] = getctype(rout)
ret['rctype'] = 'void'
if ret['ctype'] == 'string':
ret['rctype'] = 'void'
else:
ret['rctype'] = ret['ctype']
if ret['rctype'] != 'void':
if iscomplexfunction(rout):
ret['returncptr'] = """
#ifdef F2PY_CB_RETURNCOMPLEX
return_value=
#endif
"""
else:
ret['returncptr'] = 'return_value='
if ret['ctype'] in cformat_map:
ret['showvalueformat'] = '%s' % (cformat_map[ret['ctype']])
if isstringfunction(rout):
ret['strlength'] = getstrlength(rout)
if isfunction(rout):
if 'result' in rout:
a = rout['result']
else:
a = rout['name']
if hasnote(rout['vars'][a]):
ret['note'] = rout['vars'][a]['note']
rout['vars'][a]['note'] = ['See elsewhere.']
ret['rname'] = a
ret['pydocsign'], ret['pydocsignout'] = getpydocsign(a, rout)
if iscomplexfunction(rout):
ret['rctype'] = """
#ifdef F2PY_CB_RETURNCOMPLEX
#ctype#
#else
void
#endif
"""
else:
if hasnote(rout):
ret['note'] = rout['note']
rout['note'] = ['See elsewhere.']
nofargs = 0
nofoptargs = 0
if 'args' in rout and 'vars' in rout:
for a in rout['args']:
var = rout['vars'][a]
if l_or(isintent_in, isintent_inout)(var):
nofargs = nofargs + 1
if isoptional(var):
nofoptargs = nofoptargs + 1
ret['maxnofargs'] = repr(nofargs)
ret['nofoptargs'] = repr(nofoptargs)
if hasnote(rout) and isfunction(rout) and 'result' in rout:
ret['routnote'] = rout['note']
rout['note'] = ['See elsewhere.']
return ret
def common_sign2map(a, var): # obsolute
ret = {'varname': a, 'ctype': getctype(var)}
if isstringarray(var):
ret['ctype'] = 'char'
if ret['ctype'] in c2capi_map:
ret['atype'] = c2capi_map[ret['ctype']]
if ret['ctype'] in cformat_map:
ret['showvalueformat'] = '%s' % (cformat_map[ret['ctype']])
if isarray(var):
ret = dictappend(ret, getarrdims(a, var))
elif isstring(var):
ret['size'] = getstrlength(var)
ret['rank'] = '1'
ret['pydocsign'], ret['pydocsignout'] = getpydocsign(a, var)
if hasnote(var):
ret['note'] = var['note']
var['note'] = ['See elsewhere.']
# for strings this returns 0-rank but actually is 1-rank
ret['arrdocstr'] = getarrdocsign(a, var)
return ret
| 31,388 | Python | 36.457041 | 160 | 0.457022 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/f2py/setup.py | #!/usr/bin/env python3
"""
setup.py for installing F2PY
Usage:
pip install .
Copyright 2001-2005 Pearu Peterson all rights reserved,
Pearu Peterson <[email protected]>
Permission to use, modify, and distribute this software is given under the
terms of the NumPy License.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Revision: 1.32 $
$Date: 2005/01/30 17:22:14 $
Pearu Peterson
"""
from numpy.distutils.core import setup
from numpy.distutils.misc_util import Configuration
from __version__ import version
def configuration(parent_package='', top_path=None):
config = Configuration('f2py', parent_package, top_path)
config.add_subpackage('tests')
config.add_data_dir('tests/src')
config.add_data_files(
'src/fortranobject.c',
'src/fortranobject.h')
config.add_data_files('*.pyi')
return config
if __name__ == "__main__":
config = configuration(top_path='')
config = config.todict()
config['classifiers'] = [
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: NumPy License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: C',
'Programming Language :: Fortran',
'Programming Language :: Python',
'Topic :: Scientific/Engineering',
'Topic :: Software Development :: Code Generators',
]
setup(version=version,
description="F2PY - Fortran to Python Interface Generator",
author="Pearu Peterson",
author_email="[email protected]",
maintainer="Pearu Peterson",
maintainer_email="[email protected]",
license="BSD",
platforms="Unix, Windows (mingw|cygwin), Mac OSX",
long_description="""\
The Fortran to Python Interface Generator, or F2PY for short, is a
command line tool (f2py) for generating Python C/API modules for
wrapping Fortran 77/90/95 subroutines, accessing common blocks from
Python, and calling Python functions from Fortran (call-backs).
Interfacing subroutines/data from Fortran 90/95 modules is supported.""",
url="https://numpy.org/doc/stable/f2py/",
keywords=['Fortran', 'f2py'],
**config)
| 2,335 | Python | 31.444444 | 74 | 0.662099 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/f2py/f90mod_rules.py | #!/usr/bin/env python3
"""
Build F90 module support for f2py2e.
Copyright 2000 Pearu Peterson all rights reserved,
Pearu Peterson <[email protected]>
Permission to use, modify, and distribute this software is given under the
terms of the NumPy License.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Date: 2005/02/03 19:30:23 $
Pearu Peterson
"""
__version__ = "$Revision: 1.27 $"[10:-1]
f2py_version = 'See `f2py -v`'
import numpy as np
from . import capi_maps
from . import func2subr
from .crackfortran import undo_rmbadname, undo_rmbadname1
# The environment provided by auxfuncs.py is needed for some calls to eval.
# As the needed functions cannot be determined by static inspection of the
# code, it is safest to use import * pending a major refactoring of f2py.
from .auxfuncs import *
options = {}
def findf90modules(m):
if ismodule(m):
return [m]
if not hasbody(m):
return []
ret = []
for b in m['body']:
if ismodule(b):
ret.append(b)
else:
ret = ret + findf90modules(b)
return ret
fgetdims1 = """\
external f2pysetdata
logical ns
integer r,i
integer(%d) s(*)
ns = .FALSE.
if (allocated(d)) then
do i=1,r
if ((size(d,i).ne.s(i)).and.(s(i).ge.0)) then
ns = .TRUE.
end if
end do
if (ns) then
deallocate(d)
end if
end if
if ((.not.allocated(d)).and.(s(1).ge.1)) then""" % np.intp().itemsize
fgetdims2 = """\
end if
if (allocated(d)) then
do i=1,r
s(i) = size(d,i)
end do
end if
flag = 1
call f2pysetdata(d,allocated(d))"""
fgetdims2_sa = """\
end if
if (allocated(d)) then
do i=1,r
s(i) = size(d,i)
end do
!s(r) must be equal to len(d(1))
end if
flag = 2
call f2pysetdata(d,allocated(d))"""
def buildhooks(pymod):
from . import rules
ret = {'f90modhooks': [], 'initf90modhooks': [], 'body': [],
'need': ['F_FUNC', 'arrayobject.h'],
'separatorsfor': {'includes0': '\n', 'includes': '\n'},
'docs': ['"Fortran 90/95 modules:\\n"'],
'latexdoc': []}
fhooks = ['']
def fadd(line, s=fhooks):
s[0] = '%s\n %s' % (s[0], line)
doc = ['']
def dadd(line, s=doc):
s[0] = '%s\n%s' % (s[0], line)
for m in findf90modules(pymod):
sargs, fargs, efargs, modobjs, notvars, onlyvars = [], [], [], [], [
m['name']], []
sargsp = []
ifargs = []
mfargs = []
if hasbody(m):
for b in m['body']:
notvars.append(b['name'])
for n in m['vars'].keys():
var = m['vars'][n]
if (n not in notvars) and (not l_or(isintent_hide, isprivate)(var)):
onlyvars.append(n)
mfargs.append(n)
outmess('\t\tConstructing F90 module support for "%s"...\n' %
(m['name']))
if onlyvars:
outmess('\t\t Variables: %s\n' % (' '.join(onlyvars)))
chooks = ['']
def cadd(line, s=chooks):
s[0] = '%s\n%s' % (s[0], line)
ihooks = ['']
def iadd(line, s=ihooks):
s[0] = '%s\n%s' % (s[0], line)
vrd = capi_maps.modsign2map(m)
cadd('static FortranDataDef f2py_%s_def[] = {' % (m['name']))
dadd('\\subsection{Fortran 90/95 module \\texttt{%s}}\n' % (m['name']))
if hasnote(m):
note = m['note']
if isinstance(note, list):
note = '\n'.join(note)
dadd(note)
if onlyvars:
dadd('\\begin{description}')
for n in onlyvars:
var = m['vars'][n]
modobjs.append(n)
ct = capi_maps.getctype(var)
at = capi_maps.c2capi_map[ct]
dm = capi_maps.getarrdims(n, var)
dms = dm['dims'].replace('*', '-1').strip()
dms = dms.replace(':', '-1').strip()
if not dms:
dms = '-1'
use_fgetdims2 = fgetdims2
if isstringarray(var):
if 'charselector' in var and 'len' in var['charselector']:
cadd('\t{"%s",%s,{{%s,%s}},%s},'
% (undo_rmbadname1(n), dm['rank'], dms, var['charselector']['len'], at))
use_fgetdims2 = fgetdims2_sa
else:
cadd('\t{"%s",%s,{{%s}},%s},' %
(undo_rmbadname1(n), dm['rank'], dms, at))
else:
cadd('\t{"%s",%s,{{%s}},%s},' %
(undo_rmbadname1(n), dm['rank'], dms, at))
dadd('\\item[]{{}\\verb@%s@{}}' %
(capi_maps.getarrdocsign(n, var)))
if hasnote(var):
note = var['note']
if isinstance(note, list):
note = '\n'.join(note)
dadd('--- %s' % (note))
if isallocatable(var):
fargs.append('f2py_%s_getdims_%s' % (m['name'], n))
efargs.append(fargs[-1])
sargs.append(
'void (*%s)(int*,int*,void(*)(char*,int*),int*)' % (n))
sargsp.append('void (*)(int*,int*,void(*)(char*,int*),int*)')
iadd('\tf2py_%s_def[i_f2py++].func = %s;' % (m['name'], n))
fadd('subroutine %s(r,s,f2pysetdata,flag)' % (fargs[-1]))
fadd('use %s, only: d => %s\n' %
(m['name'], undo_rmbadname1(n)))
fadd('integer flag\n')
fhooks[0] = fhooks[0] + fgetdims1
dms = range(1, int(dm['rank']) + 1)
fadd(' allocate(d(%s))\n' %
(','.join(['s(%s)' % i for i in dms])))
fhooks[0] = fhooks[0] + use_fgetdims2
fadd('end subroutine %s' % (fargs[-1]))
else:
fargs.append(n)
sargs.append('char *%s' % (n))
sargsp.append('char*')
iadd('\tf2py_%s_def[i_f2py++].data = %s;' % (m['name'], n))
if onlyvars:
dadd('\\end{description}')
if hasbody(m):
for b in m['body']:
if not isroutine(b):
outmess("f90mod_rules.buildhooks:"
f" skipping {b['block']} {b['name']}\n")
continue
modobjs.append('%s()' % (b['name']))
b['modulename'] = m['name']
api, wrap = rules.buildapi(b)
if isfunction(b):
fhooks[0] = fhooks[0] + wrap
fargs.append('f2pywrap_%s_%s' % (m['name'], b['name']))
ifargs.append(func2subr.createfuncwrapper(b, signature=1))
else:
if wrap:
fhooks[0] = fhooks[0] + wrap
fargs.append('f2pywrap_%s_%s' % (m['name'], b['name']))
ifargs.append(
func2subr.createsubrwrapper(b, signature=1))
else:
fargs.append(b['name'])
mfargs.append(fargs[-1])
api['externroutines'] = []
ar = applyrules(api, vrd)
ar['docs'] = []
ar['docshort'] = []
ret = dictappend(ret, ar)
cadd('\t{"%s",-1,{{-1}},0,NULL,(void *)f2py_rout_#modulename#_%s_%s,doc_f2py_rout_#modulename#_%s_%s},' %
(b['name'], m['name'], b['name'], m['name'], b['name']))
sargs.append('char *%s' % (b['name']))
sargsp.append('char *')
iadd('\tf2py_%s_def[i_f2py++].data = %s;' %
(m['name'], b['name']))
cadd('\t{NULL}\n};\n')
iadd('}')
ihooks[0] = 'static void f2py_setup_%s(%s) {\n\tint i_f2py=0;%s' % (
m['name'], ','.join(sargs), ihooks[0])
if '_' in m['name']:
F_FUNC = 'F_FUNC_US'
else:
F_FUNC = 'F_FUNC'
iadd('extern void %s(f2pyinit%s,F2PYINIT%s)(void (*)(%s));'
% (F_FUNC, m['name'], m['name'].upper(), ','.join(sargsp)))
iadd('static void f2py_init_%s(void) {' % (m['name']))
iadd('\t%s(f2pyinit%s,F2PYINIT%s)(f2py_setup_%s);'
% (F_FUNC, m['name'], m['name'].upper(), m['name']))
iadd('}\n')
ret['f90modhooks'] = ret['f90modhooks'] + chooks + ihooks
ret['initf90modhooks'] = ['\tPyDict_SetItemString(d, "%s", PyFortranObject_New(f2py_%s_def,f2py_init_%s));' % (
m['name'], m['name'], m['name'])] + ret['initf90modhooks']
fadd('')
fadd('subroutine f2pyinit%s(f2pysetupfunc)' % (m['name']))
if mfargs:
for a in undo_rmbadname(mfargs):
fadd('use %s, only : %s' % (m['name'], a))
if ifargs:
fadd(' '.join(['interface'] + ifargs))
fadd('end interface')
fadd('external f2pysetupfunc')
if efargs:
for a in undo_rmbadname(efargs):
fadd('external %s' % (a))
fadd('call f2pysetupfunc(%s)' % (','.join(undo_rmbadname(fargs))))
fadd('end subroutine f2pyinit%s\n' % (m['name']))
dadd('\n'.join(ret['latexdoc']).replace(
r'\subsection{', r'\subsubsection{'))
ret['latexdoc'] = []
ret['docs'].append('"\t%s --- %s"' % (m['name'],
','.join(undo_rmbadname(modobjs))))
ret['routine_defs'] = ''
ret['doc'] = []
ret['docshort'] = []
ret['latexdoc'] = doc[0]
if len(ret['docs']) <= 1:
ret['docs'] = ''
return ret, fhooks[0]
| 9,811 | Python | 35.206642 | 121 | 0.451432 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/f2py/common_rules.py | #!/usr/bin/env python3
"""
Build common block mechanism for f2py2e.
Copyright 2000 Pearu Peterson all rights reserved,
Pearu Peterson <[email protected]>
Permission to use, modify, and distribute this software is given under the
terms of the NumPy License
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Date: 2005/05/06 10:57:33 $
Pearu Peterson
"""
from . import __version__
f2py_version = __version__.version
from .auxfuncs import (
hasbody, hascommon, hasnote, isintent_hide, outmess
)
from . import capi_maps
from . import func2subr
from .crackfortran import rmbadname
def findcommonblocks(block, top=1):
ret = []
if hascommon(block):
for key, value in block['common'].items():
vars_ = {v: block['vars'][v] for v in value}
ret.append((key, value, vars_))
elif hasbody(block):
for b in block['body']:
ret = ret + findcommonblocks(b, 0)
if top:
tret = []
names = []
for t in ret:
if t[0] not in names:
names.append(t[0])
tret.append(t)
return tret
return ret
def buildhooks(m):
ret = {'commonhooks': [], 'initcommonhooks': [],
'docs': ['"COMMON blocks:\\n"']}
fwrap = ['']
def fadd(line, s=fwrap):
s[0] = '%s\n %s' % (s[0], line)
chooks = ['']
def cadd(line, s=chooks):
s[0] = '%s\n%s' % (s[0], line)
ihooks = ['']
def iadd(line, s=ihooks):
s[0] = '%s\n%s' % (s[0], line)
doc = ['']
def dadd(line, s=doc):
s[0] = '%s\n%s' % (s[0], line)
for (name, vnames, vars) in findcommonblocks(m):
lower_name = name.lower()
hnames, inames = [], []
for n in vnames:
if isintent_hide(vars[n]):
hnames.append(n)
else:
inames.append(n)
if hnames:
outmess('\t\tConstructing COMMON block support for "%s"...\n\t\t %s\n\t\t Hidden: %s\n' % (
name, ','.join(inames), ','.join(hnames)))
else:
outmess('\t\tConstructing COMMON block support for "%s"...\n\t\t %s\n' % (
name, ','.join(inames)))
fadd('subroutine f2pyinit%s(setupfunc)' % name)
fadd('external setupfunc')
for n in vnames:
fadd(func2subr.var2fixfortran(vars, n))
if name == '_BLNK_':
fadd('common %s' % (','.join(vnames)))
else:
fadd('common /%s/ %s' % (name, ','.join(vnames)))
fadd('call setupfunc(%s)' % (','.join(inames)))
fadd('end\n')
cadd('static FortranDataDef f2py_%s_def[] = {' % (name))
idims = []
for n in inames:
ct = capi_maps.getctype(vars[n])
at = capi_maps.c2capi_map[ct]
dm = capi_maps.getarrdims(n, vars[n])
if dm['dims']:
idims.append('(%s)' % (dm['dims']))
else:
idims.append('')
dms = dm['dims'].strip()
if not dms:
dms = '-1'
cadd('\t{\"%s\",%s,{{%s}},%s},' % (n, dm['rank'], dms, at))
cadd('\t{NULL}\n};')
inames1 = rmbadname(inames)
inames1_tps = ','.join(['char *' + s for s in inames1])
cadd('static void f2py_setup_%s(%s) {' % (name, inames1_tps))
cadd('\tint i_f2py=0;')
for n in inames1:
cadd('\tf2py_%s_def[i_f2py++].data = %s;' % (name, n))
cadd('}')
if '_' in lower_name:
F_FUNC = 'F_FUNC_US'
else:
F_FUNC = 'F_FUNC'
cadd('extern void %s(f2pyinit%s,F2PYINIT%s)(void(*)(%s));'
% (F_FUNC, lower_name, name.upper(),
','.join(['char*'] * len(inames1))))
cadd('static void f2py_init_%s(void) {' % name)
cadd('\t%s(f2pyinit%s,F2PYINIT%s)(f2py_setup_%s);'
% (F_FUNC, lower_name, name.upper(), name))
cadd('}\n')
iadd('\ttmp = PyFortranObject_New(f2py_%s_def,f2py_init_%s);' % (name, name))
iadd('\tF2PyDict_SetItemString(d, \"%s\", tmp);' % name)
iadd('\tPy_DECREF(tmp);')
tname = name.replace('_', '\\_')
dadd('\\subsection{Common block \\texttt{%s}}\n' % (tname))
dadd('\\begin{description}')
for n in inames:
dadd('\\item[]{{}\\verb@%s@{}}' %
(capi_maps.getarrdocsign(n, vars[n])))
if hasnote(vars[n]):
note = vars[n]['note']
if isinstance(note, list):
note = '\n'.join(note)
dadd('--- %s' % (note))
dadd('\\end{description}')
ret['docs'].append(
'"\t/%s/ %s\\n"' % (name, ','.join(map(lambda v, d: v + d, inames, idims))))
ret['commonhooks'] = chooks
ret['initcommonhooks'] = ihooks
ret['latexdoc'] = doc[0]
if len(ret['docs']) <= 1:
ret['docs'] = ''
return ret, fwrap[0]
| 4,925 | Python | 32.739726 | 105 | 0.49198 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/f2py/rules.py | #!/usr/bin/env python3
"""
Rules for building C/API module with f2py2e.
Here is a skeleton of a new wrapper function (13Dec2001):
wrapper_function(args)
declarations
get_python_arguments, say, `a' and `b'
get_a_from_python
if (successful) {
get_b_from_python
if (successful) {
callfortran
if (successful) {
put_a_to_python
if (successful) {
put_b_to_python
if (successful) {
buildvalue = ...
}
}
}
}
cleanup_b
}
cleanup_a
return buildvalue
Copyright 1999,2000 Pearu Peterson all rights reserved,
Pearu Peterson <[email protected]>
Permission to use, modify, and distribute this software is given under the
terms of the NumPy License.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Date: 2005/08/30 08:58:42 $
Pearu Peterson
"""
import os, sys
import time
import copy
from pathlib import Path
# __version__.version is now the same as the NumPy version
from . import __version__
f2py_version = __version__.version
numpy_version = __version__.version
from .auxfuncs import (
applyrules, debugcapi, dictappend, errmess, gentitle, getargs2,
hascallstatement, hasexternals, hasinitvalue, hasnote, hasresultnote,
isarray, isarrayofstrings, iscomplex, iscomplexarray,
iscomplexfunction, iscomplexfunction_warn, isdummyroutine, isexternal,
isfunction, isfunction_wrap, isint1array, isintent_aux, isintent_c,
isintent_callback, isintent_copy, isintent_hide, isintent_inout,
isintent_nothide, isintent_out, isintent_overwrite, islogical,
islong_complex, islong_double, islong_doublefunction, islong_long,
islong_longfunction, ismoduleroutine, isoptional, isrequired, isscalar,
issigned_long_longarray, isstring, isstringarray, isstringfunction,
issubroutine, issubroutine_wrap, isthreadsafe, isunsigned,
isunsigned_char, isunsigned_chararray, isunsigned_long_long,
isunsigned_long_longarray, isunsigned_short, isunsigned_shortarray,
l_and, l_not, l_or, outmess, replace, stripcomma, requiresf90wrapper
)
from . import capi_maps
from . import cfuncs
from . import common_rules
from . import use_rules
from . import f90mod_rules
from . import func2subr
options = {}
sepdict = {}
#for k in ['need_cfuncs']: sepdict[k]=','
for k in ['decl',
'frompyobj',
'cleanupfrompyobj',
'topyarr', 'method',
'pyobjfrom', 'closepyobjfrom',
'freemem',
'userincludes',
'includes0', 'includes', 'typedefs', 'typedefs_generated',
'cppmacros', 'cfuncs', 'callbacks',
'latexdoc',
'restdoc',
'routine_defs', 'externroutines',
'initf2pywraphooks',
'commonhooks', 'initcommonhooks',
'f90modhooks', 'initf90modhooks']:
sepdict[k] = '\n'
#################### Rules for C/API module #################
generationtime = int(os.environ.get('SOURCE_DATE_EPOCH', time.time()))
module_rules = {
'modulebody': """\
/* File: #modulename#module.c
* This file is auto-generated with f2py (version:#f2py_version#).
* f2py is a Fortran to Python Interface Generator (FPIG), Second Edition,
* written by Pearu Peterson <[email protected]>.
* Generation date: """ + time.asctime(time.gmtime(generationtime)) + """
* Do not edit this file directly unless you know what you are doing!!!
*/
#ifdef __cplusplus
extern \"C\" {
#endif
#ifndef PY_SSIZE_T_CLEAN
#define PY_SSIZE_T_CLEAN
#endif /* PY_SSIZE_T_CLEAN */
/* Unconditionally included */
#include <Python.h>
#include <numpy/npy_os.h>
""" + gentitle("See f2py2e/cfuncs.py: includes") + """
#includes#
#includes0#
""" + gentitle("See f2py2e/rules.py: mod_rules['modulebody']") + """
static PyObject *#modulename#_error;
static PyObject *#modulename#_module;
""" + gentitle("See f2py2e/cfuncs.py: typedefs") + """
#typedefs#
""" + gentitle("See f2py2e/cfuncs.py: typedefs_generated") + """
#typedefs_generated#
""" + gentitle("See f2py2e/cfuncs.py: cppmacros") + """
#cppmacros#
""" + gentitle("See f2py2e/cfuncs.py: cfuncs") + """
#cfuncs#
""" + gentitle("See f2py2e/cfuncs.py: userincludes") + """
#userincludes#
""" + gentitle("See f2py2e/capi_rules.py: usercode") + """
#usercode#
/* See f2py2e/rules.py */
#externroutines#
""" + gentitle("See f2py2e/capi_rules.py: usercode1") + """
#usercode1#
""" + gentitle("See f2py2e/cb_rules.py: buildcallback") + """
#callbacks#
""" + gentitle("See f2py2e/rules.py: buildapi") + """
#body#
""" + gentitle("See f2py2e/f90mod_rules.py: buildhooks") + """
#f90modhooks#
""" + gentitle("See f2py2e/rules.py: module_rules['modulebody']") + """
""" + gentitle("See f2py2e/common_rules.py: buildhooks") + """
#commonhooks#
""" + gentitle("See f2py2e/rules.py") + """
static FortranDataDef f2py_routine_defs[] = {
#routine_defs#
{NULL}
};
static PyMethodDef f2py_module_methods[] = {
#pymethoddef#
{NULL,NULL}
};
static struct PyModuleDef moduledef = {
PyModuleDef_HEAD_INIT,
"#modulename#",
NULL,
-1,
f2py_module_methods,
NULL,
NULL,
NULL,
NULL
};
PyMODINIT_FUNC PyInit_#modulename#(void) {
int i;
PyObject *m,*d, *s, *tmp;
m = #modulename#_module = PyModule_Create(&moduledef);
Py_SET_TYPE(&PyFortran_Type, &PyType_Type);
import_array();
if (PyErr_Occurred())
{PyErr_SetString(PyExc_ImportError, \"can't initialize module #modulename# (failed to import numpy)\"); return m;}
d = PyModule_GetDict(m);
s = PyUnicode_FromString(\"#f2py_version#\");
PyDict_SetItemString(d, \"__version__\", s);
Py_DECREF(s);
s = PyUnicode_FromString(
\"This module '#modulename#' is auto-generated with f2py (version:#f2py_version#).\\nFunctions:\\n\"\n#docs#\".\");
PyDict_SetItemString(d, \"__doc__\", s);
Py_DECREF(s);
s = PyUnicode_FromString(\"""" + numpy_version + """\");
PyDict_SetItemString(d, \"__f2py_numpy_version__\", s);
Py_DECREF(s);
#modulename#_error = PyErr_NewException (\"#modulename#.error\", NULL, NULL);
/*
* Store the error object inside the dict, so that it could get deallocated.
* (in practice, this is a module, so it likely will not and cannot.)
*/
PyDict_SetItemString(d, \"_#modulename#_error\", #modulename#_error);
Py_DECREF(#modulename#_error);
for(i=0;f2py_routine_defs[i].name!=NULL;i++) {
tmp = PyFortranObject_NewAsAttr(&f2py_routine_defs[i]);
PyDict_SetItemString(d, f2py_routine_defs[i].name, tmp);
Py_DECREF(tmp);
}
#initf2pywraphooks#
#initf90modhooks#
#initcommonhooks#
#interface_usercode#
#ifdef F2PY_REPORT_ATEXIT
if (! PyErr_Occurred())
on_exit(f2py_report_on_exit,(void*)\"#modulename#\");
#endif
return m;
}
#ifdef __cplusplus
}
#endif
""",
'separatorsfor': {'latexdoc': '\n\n',
'restdoc': '\n\n'},
'latexdoc': ['\\section{Module \\texttt{#texmodulename#}}\n',
'#modnote#\n',
'#latexdoc#'],
'restdoc': ['Module #modulename#\n' + '=' * 80,
'\n#restdoc#']
}
defmod_rules = [
{'body': '/*eof body*/',
'method': '/*eof method*/',
'externroutines': '/*eof externroutines*/',
'routine_defs': '/*eof routine_defs*/',
'initf90modhooks': '/*eof initf90modhooks*/',
'initf2pywraphooks': '/*eof initf2pywraphooks*/',
'initcommonhooks': '/*eof initcommonhooks*/',
'latexdoc': '',
'restdoc': '',
'modnote': {hasnote: '#note#', l_not(hasnote): ''},
}
]
routine_rules = {
'separatorsfor': sepdict,
'body': """
#begintitle#
static char doc_#apiname#[] = \"\\\n#docreturn##name#(#docsignatureshort#)\\n\\nWrapper for ``#name#``.\\\n\\n#docstrsigns#\";
/* #declfortranroutine# */
static PyObject *#apiname#(const PyObject *capi_self,
PyObject *capi_args,
PyObject *capi_keywds,
#functype# (*f2py_func)(#callprotoargument#)) {
PyObject * volatile capi_buildvalue = NULL;
volatile int f2py_success = 1;
#decl#
static char *capi_kwlist[] = {#kwlist##kwlistopt##kwlistxa#NULL};
#usercode#
#routdebugenter#
#ifdef F2PY_REPORT_ATEXIT
f2py_start_clock();
#endif
if (!PyArg_ParseTupleAndKeywords(capi_args,capi_keywds,\\
\"#argformat#|#keyformat##xaformat#:#pyname#\",\\
capi_kwlist#args_capi##keys_capi##keys_xa#))\n return NULL;
#frompyobj#
/*end of frompyobj*/
#ifdef F2PY_REPORT_ATEXIT
f2py_start_call_clock();
#endif
#callfortranroutine#
if (PyErr_Occurred())
f2py_success = 0;
#ifdef F2PY_REPORT_ATEXIT
f2py_stop_call_clock();
#endif
/*end of callfortranroutine*/
if (f2py_success) {
#pyobjfrom#
/*end of pyobjfrom*/
CFUNCSMESS(\"Building return value.\\n\");
capi_buildvalue = Py_BuildValue(\"#returnformat#\"#return#);
/*closepyobjfrom*/
#closepyobjfrom#
} /*if (f2py_success) after callfortranroutine*/
/*cleanupfrompyobj*/
#cleanupfrompyobj#
if (capi_buildvalue == NULL) {
#routdebugfailure#
} else {
#routdebugleave#
}
CFUNCSMESS(\"Freeing memory.\\n\");
#freemem#
#ifdef F2PY_REPORT_ATEXIT
f2py_stop_clock();
#endif
return capi_buildvalue;
}
#endtitle#
""",
'routine_defs': '#routine_def#',
'initf2pywraphooks': '#initf2pywraphook#',
'externroutines': '#declfortranroutine#',
'doc': '#docreturn##name#(#docsignature#)',
'docshort': '#docreturn##name#(#docsignatureshort#)',
'docs': '" #docreturn##name#(#docsignature#)\\n"\n',
'need': ['arrayobject.h', 'CFUNCSMESS', 'MINMAX'],
'cppmacros': {debugcapi: '#define DEBUGCFUNCS'},
'latexdoc': ['\\subsection{Wrapper function \\texttt{#texname#}}\n',
"""
\\noindent{{}\\verb@#docreturn##name#@{}}\\texttt{(#latexdocsignatureshort#)}
#routnote#
#latexdocstrsigns#
"""],
'restdoc': ['Wrapped function ``#name#``\n' + '-' * 80,
]
}
################## Rules for C/API function ##############
rout_rules = [
{ # Init
'separatorsfor': {'callfortranroutine': '\n', 'routdebugenter': '\n', 'decl': '\n',
'routdebugleave': '\n', 'routdebugfailure': '\n',
'setjmpbuf': ' || ',
'docstrreq': '\n', 'docstropt': '\n', 'docstrout': '\n',
'docstrcbs': '\n', 'docstrsigns': '\\n"\n"',
'latexdocstrsigns': '\n',
'latexdocstrreq': '\n', 'latexdocstropt': '\n',
'latexdocstrout': '\n', 'latexdocstrcbs': '\n',
},
'kwlist': '', 'kwlistopt': '', 'callfortran': '', 'callfortranappend': '',
'docsign': '', 'docsignopt': '', 'decl': '/*decl*/',
'freemem': '/*freemem*/',
'docsignshort': '', 'docsignoptshort': '',
'docstrsigns': '', 'latexdocstrsigns': '',
'docstrreq': '\\nParameters\\n----------',
'docstropt': '\\nOther Parameters\\n----------------',
'docstrout': '\\nReturns\\n-------',
'docstrcbs': '\\nNotes\\n-----\\nCall-back functions::\\n',
'latexdocstrreq': '\\noindent Required arguments:',
'latexdocstropt': '\\noindent Optional arguments:',
'latexdocstrout': '\\noindent Return objects:',
'latexdocstrcbs': '\\noindent Call-back functions:',
'args_capi': '', 'keys_capi': '', 'functype': '',
'frompyobj': '/*frompyobj*/',
# this list will be reversed
'cleanupfrompyobj': ['/*end of cleanupfrompyobj*/'],
'pyobjfrom': '/*pyobjfrom*/',
# this list will be reversed
'closepyobjfrom': ['/*end of closepyobjfrom*/'],
'topyarr': '/*topyarr*/', 'routdebugleave': '/*routdebugleave*/',
'routdebugenter': '/*routdebugenter*/',
'routdebugfailure': '/*routdebugfailure*/',
'callfortranroutine': '/*callfortranroutine*/',
'argformat': '', 'keyformat': '', 'need_cfuncs': '',
'docreturn': '', 'return': '', 'returnformat': '', 'rformat': '',
'kwlistxa': '', 'keys_xa': '', 'xaformat': '', 'docsignxa': '', 'docsignxashort': '',
'initf2pywraphook': '',
'routnote': {hasnote: '--- #note#', l_not(hasnote): ''},
}, {
'apiname': 'f2py_rout_#modulename#_#name#',
'pyname': '#modulename#.#name#',
'decl': '',
'_check': l_not(ismoduleroutine)
}, {
'apiname': 'f2py_rout_#modulename#_#f90modulename#_#name#',
'pyname': '#modulename#.#f90modulename#.#name#',
'decl': '',
'_check': ismoduleroutine
}, { # Subroutine
'functype': 'void',
'declfortranroutine': {l_and(l_not(l_or(ismoduleroutine, isintent_c)), l_not(isdummyroutine)): 'extern void #F_FUNC#(#fortranname#,#FORTRANNAME#)(#callprotoargument#);',
l_and(l_not(ismoduleroutine), isintent_c, l_not(isdummyroutine)): 'extern void #fortranname#(#callprotoargument#);',
ismoduleroutine: '',
isdummyroutine: ''
},
'routine_def': {l_not(l_or(ismoduleroutine, isintent_c, isdummyroutine)): ' {\"#name#\",-1,{{-1}},0,(char *)#F_FUNC#(#fortranname#,#FORTRANNAME#),(f2py_init_func)#apiname#,doc_#apiname#},',
l_and(l_not(ismoduleroutine), isintent_c, l_not(isdummyroutine)): ' {\"#name#\",-1,{{-1}},0,(char *)#fortranname#,(f2py_init_func)#apiname#,doc_#apiname#},',
l_and(l_not(ismoduleroutine), isdummyroutine): ' {\"#name#\",-1,{{-1}},0,NULL,(f2py_init_func)#apiname#,doc_#apiname#},',
},
'need': {l_and(l_not(l_or(ismoduleroutine, isintent_c)), l_not(isdummyroutine)): 'F_FUNC'},
'callfortranroutine': [
{debugcapi: [
""" fprintf(stderr,\"debug-capi:Fortran subroutine `#fortranname#(#callfortran#)\'\\n\");"""]},
{hasexternals: """\
if (#setjmpbuf#) {
f2py_success = 0;
} else {"""},
{isthreadsafe: ' Py_BEGIN_ALLOW_THREADS'},
{hascallstatement: ''' #callstatement#;
/*(*f2py_func)(#callfortran#);*/'''},
{l_not(l_or(hascallstatement, isdummyroutine))
: ' (*f2py_func)(#callfortran#);'},
{isthreadsafe: ' Py_END_ALLOW_THREADS'},
{hasexternals: """ }"""}
],
'_check': l_and(issubroutine, l_not(issubroutine_wrap)),
}, { # Wrapped function
'functype': 'void',
'declfortranroutine': {l_not(l_or(ismoduleroutine, isdummyroutine)): 'extern void #F_WRAPPEDFUNC#(#name_lower#,#NAME#)(#callprotoargument#);',
isdummyroutine: '',
},
'routine_def': {l_not(l_or(ismoduleroutine, isdummyroutine)): ' {\"#name#\",-1,{{-1}},0,(char *)#F_WRAPPEDFUNC#(#name_lower#,#NAME#),(f2py_init_func)#apiname#,doc_#apiname#},',
isdummyroutine: ' {\"#name#\",-1,{{-1}},0,NULL,(f2py_init_func)#apiname#,doc_#apiname#},',
},
'initf2pywraphook': {l_not(l_or(ismoduleroutine, isdummyroutine)): '''
{
extern #ctype# #F_FUNC#(#name_lower#,#NAME#)(void);
PyObject* o = PyDict_GetItemString(d,"#name#");
tmp = F2PyCapsule_FromVoidPtr((void*)#F_FUNC#(#name_lower#,#NAME#),NULL);
PyObject_SetAttrString(o,"_cpointer", tmp);
Py_DECREF(tmp);
s = PyUnicode_FromString("#name#");
PyObject_SetAttrString(o,"__name__", s);
Py_DECREF(s);
}
'''},
'need': {l_not(l_or(ismoduleroutine, isdummyroutine)): ['F_WRAPPEDFUNC', 'F_FUNC']},
'callfortranroutine': [
{debugcapi: [
""" fprintf(stderr,\"debug-capi:Fortran subroutine `f2pywrap#name_lower#(#callfortran#)\'\\n\");"""]},
{hasexternals: """\
if (#setjmpbuf#) {
f2py_success = 0;
} else {"""},
{isthreadsafe: ' Py_BEGIN_ALLOW_THREADS'},
{l_not(l_or(hascallstatement, isdummyroutine))
: ' (*f2py_func)(#callfortran#);'},
{hascallstatement:
' #callstatement#;\n /*(*f2py_func)(#callfortran#);*/'},
{isthreadsafe: ' Py_END_ALLOW_THREADS'},
{hasexternals: ' }'}
],
'_check': isfunction_wrap,
}, { # Wrapped subroutine
'functype': 'void',
'declfortranroutine': {l_not(l_or(ismoduleroutine, isdummyroutine)): 'extern void #F_WRAPPEDFUNC#(#name_lower#,#NAME#)(#callprotoargument#);',
isdummyroutine: '',
},
'routine_def': {l_not(l_or(ismoduleroutine, isdummyroutine)): ' {\"#name#\",-1,{{-1}},0,(char *)#F_WRAPPEDFUNC#(#name_lower#,#NAME#),(f2py_init_func)#apiname#,doc_#apiname#},',
isdummyroutine: ' {\"#name#\",-1,{{-1}},0,NULL,(f2py_init_func)#apiname#,doc_#apiname#},',
},
'initf2pywraphook': {l_not(l_or(ismoduleroutine, isdummyroutine)): '''
{
extern void #F_FUNC#(#name_lower#,#NAME#)(void);
PyObject* o = PyDict_GetItemString(d,"#name#");
tmp = F2PyCapsule_FromVoidPtr((void*)#F_FUNC#(#name_lower#,#NAME#),NULL);
PyObject_SetAttrString(o,"_cpointer", tmp);
Py_DECREF(tmp);
s = PyUnicode_FromString("#name#");
PyObject_SetAttrString(o,"__name__", s);
Py_DECREF(s);
}
'''},
'need': {l_not(l_or(ismoduleroutine, isdummyroutine)): ['F_WRAPPEDFUNC', 'F_FUNC']},
'callfortranroutine': [
{debugcapi: [
""" fprintf(stderr,\"debug-capi:Fortran subroutine `f2pywrap#name_lower#(#callfortran#)\'\\n\");"""]},
{hasexternals: """\
if (#setjmpbuf#) {
f2py_success = 0;
} else {"""},
{isthreadsafe: ' Py_BEGIN_ALLOW_THREADS'},
{l_not(l_or(hascallstatement, isdummyroutine))
: ' (*f2py_func)(#callfortran#);'},
{hascallstatement:
' #callstatement#;\n /*(*f2py_func)(#callfortran#);*/'},
{isthreadsafe: ' Py_END_ALLOW_THREADS'},
{hasexternals: ' }'}
],
'_check': issubroutine_wrap,
}, { # Function
'functype': '#ctype#',
'docreturn': {l_not(isintent_hide): '#rname#,'},
'docstrout': '#pydocsignout#',
'latexdocstrout': ['\\item[]{{}\\verb@#pydocsignout#@{}}',
{hasresultnote: '--- #resultnote#'}],
'callfortranroutine': [{l_and(debugcapi, isstringfunction): """\
#ifdef USESCOMPAQFORTRAN
fprintf(stderr,\"debug-capi:Fortran function #ctype# #fortranname#(#callcompaqfortran#)\\n\");
#else
fprintf(stderr,\"debug-capi:Fortran function #ctype# #fortranname#(#callfortran#)\\n\");
#endif
"""},
{l_and(debugcapi, l_not(isstringfunction)): """\
fprintf(stderr,\"debug-capi:Fortran function #ctype# #fortranname#(#callfortran#)\\n\");
"""}
],
'_check': l_and(isfunction, l_not(isfunction_wrap))
}, { # Scalar function
'declfortranroutine': {l_and(l_not(l_or(ismoduleroutine, isintent_c)), l_not(isdummyroutine)): 'extern #ctype# #F_FUNC#(#fortranname#,#FORTRANNAME#)(#callprotoargument#);',
l_and(l_not(ismoduleroutine), isintent_c, l_not(isdummyroutine)): 'extern #ctype# #fortranname#(#callprotoargument#);',
isdummyroutine: ''
},
'routine_def': {l_and(l_not(l_or(ismoduleroutine, isintent_c)), l_not(isdummyroutine)): ' {\"#name#\",-1,{{-1}},0,(char *)#F_FUNC#(#fortranname#,#FORTRANNAME#),(f2py_init_func)#apiname#,doc_#apiname#},',
l_and(l_not(ismoduleroutine), isintent_c, l_not(isdummyroutine)): ' {\"#name#\",-1,{{-1}},0,(char *)#fortranname#,(f2py_init_func)#apiname#,doc_#apiname#},',
isdummyroutine: ' {\"#name#\",-1,{{-1}},0,NULL,(f2py_init_func)#apiname#,doc_#apiname#},',
},
'decl': [{iscomplexfunction_warn: ' #ctype# #name#_return_value={0,0};',
l_not(iscomplexfunction): ' #ctype# #name#_return_value=0;'},
{iscomplexfunction:
' PyObject *#name#_return_value_capi = Py_None;'}
],
'callfortranroutine': [
{hasexternals: """\
if (#setjmpbuf#) {
f2py_success = 0;
} else {"""},
{isthreadsafe: ' Py_BEGIN_ALLOW_THREADS'},
{hascallstatement: ''' #callstatement#;
/* #name#_return_value = (*f2py_func)(#callfortran#);*/
'''},
{l_not(l_or(hascallstatement, isdummyroutine))
: ' #name#_return_value = (*f2py_func)(#callfortran#);'},
{isthreadsafe: ' Py_END_ALLOW_THREADS'},
{hasexternals: ' }'},
{l_and(debugcapi, iscomplexfunction)
: ' fprintf(stderr,"#routdebugshowvalue#\\n",#name#_return_value.r,#name#_return_value.i);'},
{l_and(debugcapi, l_not(iscomplexfunction)): ' fprintf(stderr,"#routdebugshowvalue#\\n",#name#_return_value);'}],
'pyobjfrom': {iscomplexfunction: ' #name#_return_value_capi = pyobj_from_#ctype#1(#name#_return_value);'},
'need': [{l_not(isdummyroutine): 'F_FUNC'},
{iscomplexfunction: 'pyobj_from_#ctype#1'},
{islong_longfunction: 'long_long'},
{islong_doublefunction: 'long_double'}],
'returnformat': {l_not(isintent_hide): '#rformat#'},
'return': {iscomplexfunction: ',#name#_return_value_capi',
l_not(l_or(iscomplexfunction, isintent_hide)): ',#name#_return_value'},
'_check': l_and(isfunction, l_not(isstringfunction), l_not(isfunction_wrap))
}, { # String function # in use for --no-wrap
'declfortranroutine': 'extern void #F_FUNC#(#fortranname#,#FORTRANNAME#)(#callprotoargument#);',
'routine_def': {l_not(l_or(ismoduleroutine, isintent_c)):
' {\"#name#\",-1,{{-1}},0,(char *)#F_FUNC#(#fortranname#,#FORTRANNAME#),(f2py_init_func)#apiname#,doc_#apiname#},',
l_and(l_not(ismoduleroutine), isintent_c):
' {\"#name#\",-1,{{-1}},0,(char *)#fortranname#,(f2py_init_func)#apiname#,doc_#apiname#},'
},
'decl': [' #ctype# #name#_return_value = NULL;',
' int #name#_return_value_len = 0;'],
'callfortran':'#name#_return_value,#name#_return_value_len,',
'callfortranroutine':[' #name#_return_value_len = #rlength#;',
' if ((#name#_return_value = (string)malloc('
+ '#name#_return_value_len+1) == NULL) {',
' PyErr_SetString(PyExc_MemoryError, \"out of memory\");',
' f2py_success = 0;',
' } else {',
" (#name#_return_value)[#name#_return_value_len] = '\\0';",
' }',
' if (f2py_success) {',
{hasexternals: """\
if (#setjmpbuf#) {
f2py_success = 0;
} else {"""},
{isthreadsafe: ' Py_BEGIN_ALLOW_THREADS'},
"""\
#ifdef USESCOMPAQFORTRAN
(*f2py_func)(#callcompaqfortran#);
#else
(*f2py_func)(#callfortran#);
#endif
""",
{isthreadsafe: ' Py_END_ALLOW_THREADS'},
{hasexternals: ' }'},
{debugcapi:
' fprintf(stderr,"#routdebugshowvalue#\\n",#name#_return_value_len,#name#_return_value);'},
' } /* if (f2py_success) after (string)malloc */',
],
'returnformat': '#rformat#',
'return': ',#name#_return_value',
'freemem': ' STRINGFREE(#name#_return_value);',
'need': ['F_FUNC', '#ctype#', 'STRINGFREE'],
'_check':l_and(isstringfunction, l_not(isfunction_wrap)) # ???obsolete
},
{ # Debugging
'routdebugenter': ' fprintf(stderr,"debug-capi:Python C/API function #modulename#.#name#(#docsignature#)\\n");',
'routdebugleave': ' fprintf(stderr,"debug-capi:Python C/API function #modulename#.#name#: successful.\\n");',
'routdebugfailure': ' fprintf(stderr,"debug-capi:Python C/API function #modulename#.#name#: failure.\\n");',
'_check': debugcapi
}
]
################ Rules for arguments ##################
typedef_need_dict = {islong_long: 'long_long',
islong_double: 'long_double',
islong_complex: 'complex_long_double',
isunsigned_char: 'unsigned_char',
isunsigned_short: 'unsigned_short',
isunsigned: 'unsigned',
isunsigned_long_long: 'unsigned_long_long',
isunsigned_chararray: 'unsigned_char',
isunsigned_shortarray: 'unsigned_short',
isunsigned_long_longarray: 'unsigned_long_long',
issigned_long_longarray: 'long_long',
}
aux_rules = [
{
'separatorsfor': sepdict
},
{ # Common
'frompyobj': [' /* Processing auxiliary variable #varname# */',
{debugcapi: ' fprintf(stderr,"#vardebuginfo#\\n");'}, ],
'cleanupfrompyobj': ' /* End of cleaning variable #varname# */',
'need': typedef_need_dict,
},
# Scalars (not complex)
{ # Common
'decl': ' #ctype# #varname# = 0;',
'need': {hasinitvalue: 'math.h'},
'frompyobj': {hasinitvalue: ' #varname# = #init#;'},
'_check': l_and(isscalar, l_not(iscomplex)),
},
{
'return': ',#varname#',
'docstrout': '#pydocsignout#',
'docreturn': '#outvarname#,',
'returnformat': '#varrformat#',
'_check': l_and(isscalar, l_not(iscomplex), isintent_out),
},
# Complex scalars
{ # Common
'decl': ' #ctype# #varname#;',
'frompyobj': {hasinitvalue: ' #varname#.r = #init.r#, #varname#.i = #init.i#;'},
'_check': iscomplex
},
# String
{ # Common
'decl': [' #ctype# #varname# = NULL;',
' int slen(#varname#);',
],
'need':['len..'],
'_check':isstring
},
# Array
{ # Common
'decl': [' #ctype# *#varname# = NULL;',
' npy_intp #varname#_Dims[#rank#] = {#rank*[-1]#};',
' const int #varname#_Rank = #rank#;',
],
'need':['len..', {hasinitvalue: 'forcomb'}, {hasinitvalue: 'CFUNCSMESS'}],
'_check': isarray
},
# Scalararray
{ # Common
'_check': l_and(isarray, l_not(iscomplexarray))
}, { # Not hidden
'_check': l_and(isarray, l_not(iscomplexarray), isintent_nothide)
},
# Integer*1 array
{'need': '#ctype#',
'_check': isint1array,
'_depend': ''
},
# Integer*-1 array
{'need': '#ctype#',
'_check': isunsigned_chararray,
'_depend': ''
},
# Integer*-2 array
{'need': '#ctype#',
'_check': isunsigned_shortarray,
'_depend': ''
},
# Integer*-8 array
{'need': '#ctype#',
'_check': isunsigned_long_longarray,
'_depend': ''
},
# Complexarray
{'need': '#ctype#',
'_check': iscomplexarray,
'_depend': ''
},
# Stringarray
{
'callfortranappend': {isarrayofstrings: 'flen(#varname#),'},
'need': 'string',
'_check': isstringarray
}
]
arg_rules = [
{
'separatorsfor': sepdict
},
{ # Common
'frompyobj': [' /* Processing variable #varname# */',
{debugcapi: ' fprintf(stderr,"#vardebuginfo#\\n");'}, ],
'cleanupfrompyobj': ' /* End of cleaning variable #varname# */',
'_depend': '',
'need': typedef_need_dict,
},
# Doc signatures
{
'docstropt': {l_and(isoptional, isintent_nothide): '#pydocsign#'},
'docstrreq': {l_and(isrequired, isintent_nothide): '#pydocsign#'},
'docstrout': {isintent_out: '#pydocsignout#'},
'latexdocstropt': {l_and(isoptional, isintent_nothide): ['\\item[]{{}\\verb@#pydocsign#@{}}',
{hasnote: '--- #note#'}]},
'latexdocstrreq': {l_and(isrequired, isintent_nothide): ['\\item[]{{}\\verb@#pydocsign#@{}}',
{hasnote: '--- #note#'}]},
'latexdocstrout': {isintent_out: ['\\item[]{{}\\verb@#pydocsignout#@{}}',
{l_and(hasnote, isintent_hide): '--- #note#',
l_and(hasnote, isintent_nothide): '--- See above.'}]},
'depend': ''
},
# Required/Optional arguments
{
'kwlist': '"#varname#",',
'docsign': '#varname#,',
'_check': l_and(isintent_nothide, l_not(isoptional))
},
{
'kwlistopt': '"#varname#",',
'docsignopt': '#varname#=#showinit#,',
'docsignoptshort': '#varname#,',
'_check': l_and(isintent_nothide, isoptional)
},
# Docstring/BuildValue
{
'docreturn': '#outvarname#,',
'returnformat': '#varrformat#',
'_check': isintent_out
},
# Externals (call-back functions)
{ # Common
'docsignxa': {isintent_nothide: '#varname#_extra_args=(),'},
'docsignxashort': {isintent_nothide: '#varname#_extra_args,'},
'docstropt': {isintent_nothide: '#varname#_extra_args : input tuple, optional\\n Default: ()'},
'docstrcbs': '#cbdocstr#',
'latexdocstrcbs': '\\item[] #cblatexdocstr#',
'latexdocstropt': {isintent_nothide: '\\item[]{{}\\verb@#varname#_extra_args := () input tuple@{}} --- Extra arguments for call-back function {{}\\verb@#varname#@{}}.'},
'decl': [' #cbname#_t #varname#_cb = { Py_None, NULL, 0 };',
' #cbname#_t *#varname#_cb_ptr = &#varname#_cb;',
' PyTupleObject *#varname#_xa_capi = NULL;',
{l_not(isintent_callback):
' #cbname#_typedef #varname#_cptr;'}
],
'kwlistxa': {isintent_nothide: '"#varname#_extra_args",'},
'argformat': {isrequired: 'O'},
'keyformat': {isoptional: 'O'},
'xaformat': {isintent_nothide: 'O!'},
'args_capi': {isrequired: ',&#varname#_cb.capi'},
'keys_capi': {isoptional: ',&#varname#_cb.capi'},
'keys_xa': ',&PyTuple_Type,&#varname#_xa_capi',
'setjmpbuf': '(setjmp(#varname#_cb.jmpbuf))',
'callfortran': {l_not(isintent_callback): '#varname#_cptr,'},
'need': ['#cbname#', 'setjmp.h'],
'_check':isexternal
},
{
'frompyobj': [{l_not(isintent_callback): """\
if(F2PyCapsule_Check(#varname#_cb.capi)) {
#varname#_cptr = F2PyCapsule_AsVoidPtr(#varname#_cb.capi);
} else {
#varname#_cptr = #cbname#;
}
"""}, {isintent_callback: """\
if (#varname#_cb.capi==Py_None) {
#varname#_cb.capi = PyObject_GetAttrString(#modulename#_module,\"#varname#\");
if (#varname#_cb.capi) {
if (#varname#_xa_capi==NULL) {
if (PyObject_HasAttrString(#modulename#_module,\"#varname#_extra_args\")) {
PyObject* capi_tmp = PyObject_GetAttrString(#modulename#_module,\"#varname#_extra_args\");
if (capi_tmp) {
#varname#_xa_capi = (PyTupleObject *)PySequence_Tuple(capi_tmp);
Py_DECREF(capi_tmp);
}
else {
#varname#_xa_capi = (PyTupleObject *)Py_BuildValue(\"()\");
}
if (#varname#_xa_capi==NULL) {
PyErr_SetString(#modulename#_error,\"Failed to convert #modulename#.#varname#_extra_args to tuple.\\n\");
return NULL;
}
}
}
}
if (#varname#_cb.capi==NULL) {
PyErr_SetString(#modulename#_error,\"Callback #varname# not defined (as an argument or module #modulename# attribute).\\n\");
return NULL;
}
}
"""},
"""\
if (create_cb_arglist(#varname#_cb.capi,#varname#_xa_capi,#maxnofargs#,#nofoptargs#,&#varname#_cb.nofargs,&#varname#_cb.args_capi,\"failed in processing argument list for call-back #varname#.\")) {
""",
{debugcapi: ["""\
fprintf(stderr,\"debug-capi:Assuming %d arguments; at most #maxnofargs#(-#nofoptargs#) is expected.\\n\",#varname#_cb.nofargs);
CFUNCSMESSPY(\"for #varname#=\",#varname#_cb.capi);""",
{l_not(isintent_callback): """ fprintf(stderr,\"#vardebugshowvalue# (call-back in C).\\n\",#cbname#);"""}]},
"""\
CFUNCSMESS(\"Saving callback variables for `#varname#`.\\n\");
#varname#_cb_ptr = swap_active_#cbname#(#varname#_cb_ptr);""",
],
'cleanupfrompyobj':
"""\
CFUNCSMESS(\"Restoring callback variables for `#varname#`.\\n\");
#varname#_cb_ptr = swap_active_#cbname#(#varname#_cb_ptr);
Py_DECREF(#varname#_cb.args_capi);
}""",
'need': ['SWAP', 'create_cb_arglist'],
'_check':isexternal,
'_depend':''
},
# Scalars (not complex)
{ # Common
'decl': ' #ctype# #varname# = 0;',
'pyobjfrom': {debugcapi: ' fprintf(stderr,"#vardebugshowvalue#\\n",#varname#);'},
'callfortran': {isintent_c: '#varname#,', l_not(isintent_c): '&#varname#,'},
'return': {isintent_out: ',#varname#'},
'_check': l_and(isscalar, l_not(iscomplex))
}, {
'need': {hasinitvalue: 'math.h'},
'_check': l_and(isscalar, l_not(iscomplex)),
}, { # Not hidden
'decl': ' PyObject *#varname#_capi = Py_None;',
'argformat': {isrequired: 'O'},
'keyformat': {isoptional: 'O'},
'args_capi': {isrequired: ',&#varname#_capi'},
'keys_capi': {isoptional: ',&#varname#_capi'},
'pyobjfrom': {isintent_inout: """\
f2py_success = try_pyarr_from_#ctype#(#varname#_capi,&#varname#);
if (f2py_success) {"""},
'closepyobjfrom': {isintent_inout: " } /*if (f2py_success) of #varname# pyobjfrom*/"},
'need': {isintent_inout: 'try_pyarr_from_#ctype#'},
'_check': l_and(isscalar, l_not(iscomplex), isintent_nothide)
}, {
'frompyobj': [
# hasinitvalue...
# if pyobj is None:
# varname = init
# else
# from_pyobj(varname)
#
# isoptional and noinitvalue...
# if pyobj is not None:
# from_pyobj(varname)
# else:
# varname is uninitialized
#
# ...
# from_pyobj(varname)
#
{hasinitvalue: ' if (#varname#_capi == Py_None) #varname# = #init#; else',
'_depend': ''},
{l_and(isoptional, l_not(hasinitvalue)): ' if (#varname#_capi != Py_None)',
'_depend': ''},
{l_not(islogical): '''\
f2py_success = #ctype#_from_pyobj(&#varname#,#varname#_capi,"#pyname#() #nth# (#varname#) can\'t be converted to #ctype#");
if (f2py_success) {'''},
{islogical: '''\
#varname# = (#ctype#)PyObject_IsTrue(#varname#_capi);
f2py_success = 1;
if (f2py_success) {'''},
],
'cleanupfrompyobj': ' } /*if (f2py_success) of #varname#*/',
'need': {l_not(islogical): '#ctype#_from_pyobj'},
'_check': l_and(isscalar, l_not(iscomplex), isintent_nothide),
'_depend': ''
}, { # Hidden
'frompyobj': {hasinitvalue: ' #varname# = #init#;'},
'need': typedef_need_dict,
'_check': l_and(isscalar, l_not(iscomplex), isintent_hide),
'_depend': ''
}, { # Common
'frompyobj': {debugcapi: ' fprintf(stderr,"#vardebugshowvalue#\\n",#varname#);'},
'_check': l_and(isscalar, l_not(iscomplex)),
'_depend': ''
},
# Complex scalars
{ # Common
'decl': ' #ctype# #varname#;',
'callfortran': {isintent_c: '#varname#,', l_not(isintent_c): '&#varname#,'},
'pyobjfrom': {debugcapi: ' fprintf(stderr,"#vardebugshowvalue#\\n",#varname#.r,#varname#.i);'},
'return': {isintent_out: ',#varname#_capi'},
'_check': iscomplex
}, { # Not hidden
'decl': ' PyObject *#varname#_capi = Py_None;',
'argformat': {isrequired: 'O'},
'keyformat': {isoptional: 'O'},
'args_capi': {isrequired: ',&#varname#_capi'},
'keys_capi': {isoptional: ',&#varname#_capi'},
'need': {isintent_inout: 'try_pyarr_from_#ctype#'},
'pyobjfrom': {isintent_inout: """\
f2py_success = try_pyarr_from_#ctype#(#varname#_capi,&#varname#);
if (f2py_success) {"""},
'closepyobjfrom': {isintent_inout: " } /*if (f2py_success) of #varname# pyobjfrom*/"},
'_check': l_and(iscomplex, isintent_nothide)
}, {
'frompyobj': [{hasinitvalue: ' if (#varname#_capi==Py_None) {#varname#.r = #init.r#, #varname#.i = #init.i#;} else'},
{l_and(isoptional, l_not(hasinitvalue))
: ' if (#varname#_capi != Py_None)'},
' f2py_success = #ctype#_from_pyobj(&#varname#,#varname#_capi,"#pyname#() #nth# (#varname#) can\'t be converted to #ctype#");'
'\n if (f2py_success) {'],
'cleanupfrompyobj': ' } /*if (f2py_success) of #varname# frompyobj*/',
'need': ['#ctype#_from_pyobj'],
'_check': l_and(iscomplex, isintent_nothide),
'_depend': ''
}, { # Hidden
'decl': {isintent_out: ' PyObject *#varname#_capi = Py_None;'},
'_check': l_and(iscomplex, isintent_hide)
}, {
'frompyobj': {hasinitvalue: ' #varname#.r = #init.r#, #varname#.i = #init.i#;'},
'_check': l_and(iscomplex, isintent_hide),
'_depend': ''
}, { # Common
'pyobjfrom': {isintent_out: ' #varname#_capi = pyobj_from_#ctype#1(#varname#);'},
'need': ['pyobj_from_#ctype#1'],
'_check': iscomplex
}, {
'frompyobj': {debugcapi: ' fprintf(stderr,"#vardebugshowvalue#\\n",#varname#.r,#varname#.i);'},
'_check': iscomplex,
'_depend': ''
},
# String
{ # Common
'decl': [' #ctype# #varname# = NULL;',
' int slen(#varname#);',
' PyObject *#varname#_capi = Py_None;'],
'callfortran':'#varname#,',
'callfortranappend':'slen(#varname#),',
'pyobjfrom':[
{debugcapi:
' fprintf(stderr,'
'"#vardebugshowvalue#\\n",slen(#varname#),#varname#);'},
# The trailing null value for Fortran is blank.
{l_and(isintent_out, l_not(isintent_c)):
" STRINGPADN(#varname#, slen(#varname#), ' ', '\\0');"},
],
'return': {isintent_out: ',#varname#'},
'need': ['len..',
{l_and(isintent_out, l_not(isintent_c)): 'STRINGPADN'}],
'_check':isstring
}, { # Common
'frompyobj': [
"""\
slen(#varname#) = #length#;
f2py_success = #ctype#_from_pyobj(&#varname#,&slen(#varname#),#init#,"""
"""#varname#_capi,\"#ctype#_from_pyobj failed in converting #nth#"""
"""`#varname#\' of #pyname# to C #ctype#\");
if (f2py_success) {""",
# The trailing null value for Fortran is blank.
{l_not(isintent_c):
" STRINGPADN(#varname#, slen(#varname#), '\\0', ' ');"},
],
'cleanupfrompyobj': """\
STRINGFREE(#varname#);
} /*if (f2py_success) of #varname#*/""",
'need': ['#ctype#_from_pyobj', 'len..', 'STRINGFREE',
{l_not(isintent_c): 'STRINGPADN'}],
'_check':isstring,
'_depend':''
}, { # Not hidden
'argformat': {isrequired: 'O'},
'keyformat': {isoptional: 'O'},
'args_capi': {isrequired: ',&#varname#_capi'},
'keys_capi': {isoptional: ',&#varname#_capi'},
'pyobjfrom': [
{l_and(isintent_inout, l_not(isintent_c)):
" STRINGPADN(#varname#, slen(#varname#), ' ', '\\0');"},
{isintent_inout: '''\
f2py_success = try_pyarr_from_#ctype#(#varname#_capi, #varname#,
slen(#varname#));
if (f2py_success) {'''}],
'closepyobjfrom': {isintent_inout: ' } /*if (f2py_success) of #varname# pyobjfrom*/'},
'need': {isintent_inout: 'try_pyarr_from_#ctype#',
l_and(isintent_inout, l_not(isintent_c)): 'STRINGPADN'},
'_check': l_and(isstring, isintent_nothide)
}, { # Hidden
'_check': l_and(isstring, isintent_hide)
}, {
'frompyobj': {debugcapi: ' fprintf(stderr,"#vardebugshowvalue#\\n",slen(#varname#),#varname#);'},
'_check': isstring,
'_depend': ''
},
# Array
{ # Common
'decl': [' #ctype# *#varname# = NULL;',
' npy_intp #varname#_Dims[#rank#] = {#rank*[-1]#};',
' const int #varname#_Rank = #rank#;',
' PyArrayObject *capi_#varname#_tmp = NULL;',
' int capi_#varname#_intent = 0;',
],
'callfortran':'#varname#,',
'return':{isintent_out: ',capi_#varname#_tmp'},
'need': 'len..',
'_check': isarray
}, { # intent(overwrite) array
'decl': ' int capi_overwrite_#varname# = 1;',
'kwlistxa': '"overwrite_#varname#",',
'xaformat': 'i',
'keys_xa': ',&capi_overwrite_#varname#',
'docsignxa': 'overwrite_#varname#=1,',
'docsignxashort': 'overwrite_#varname#,',
'docstropt': 'overwrite_#varname# : input int, optional\\n Default: 1',
'_check': l_and(isarray, isintent_overwrite),
}, {
'frompyobj': ' capi_#varname#_intent |= (capi_overwrite_#varname#?0:F2PY_INTENT_COPY);',
'_check': l_and(isarray, isintent_overwrite),
'_depend': '',
},
{ # intent(copy) array
'decl': ' int capi_overwrite_#varname# = 0;',
'kwlistxa': '"overwrite_#varname#",',
'xaformat': 'i',
'keys_xa': ',&capi_overwrite_#varname#',
'docsignxa': 'overwrite_#varname#=0,',
'docsignxashort': 'overwrite_#varname#,',
'docstropt': 'overwrite_#varname# : input int, optional\\n Default: 0',
'_check': l_and(isarray, isintent_copy),
}, {
'frompyobj': ' capi_#varname#_intent |= (capi_overwrite_#varname#?0:F2PY_INTENT_COPY);',
'_check': l_and(isarray, isintent_copy),
'_depend': '',
}, {
'need': [{hasinitvalue: 'forcomb'}, {hasinitvalue: 'CFUNCSMESS'}],
'_check': isarray,
'_depend': ''
}, { # Not hidden
'decl': ' PyObject *#varname#_capi = Py_None;',
'argformat': {isrequired: 'O'},
'keyformat': {isoptional: 'O'},
'args_capi': {isrequired: ',&#varname#_capi'},
'keys_capi': {isoptional: ',&#varname#_capi'},
'_check': l_and(isarray, isintent_nothide)
}, {
'frompyobj': [' #setdims#;',
' capi_#varname#_intent |= #intent#;',
{isintent_hide:
' capi_#varname#_tmp = array_from_pyobj(#atype#,#varname#_Dims,#varname#_Rank,capi_#varname#_intent,Py_None);'},
{isintent_nothide:
' capi_#varname#_tmp = array_from_pyobj(#atype#,#varname#_Dims,#varname#_Rank,capi_#varname#_intent,#varname#_capi);'},
"""\
if (capi_#varname#_tmp == NULL) {
PyObject *exc, *val, *tb;
PyErr_Fetch(&exc, &val, &tb);
PyErr_SetString(exc ? exc : #modulename#_error,\"failed in converting #nth# `#varname#\' of #pyname# to C/Fortran array\" );
npy_PyErr_ChainExceptionsCause(exc, val, tb);
} else {
#varname# = (#ctype# *)(PyArray_DATA(capi_#varname#_tmp));
""",
{hasinitvalue: [
{isintent_nothide:
' if (#varname#_capi == Py_None) {'},
{isintent_hide: ' {'},
{iscomplexarray: ' #ctype# capi_c;'},
"""\
int *_i,capi_i=0;
CFUNCSMESS(\"#name#: Initializing #varname#=#init#\\n\");
if (initforcomb(PyArray_DIMS(capi_#varname#_tmp),PyArray_NDIM(capi_#varname#_tmp),1)) {
while ((_i = nextforcomb()))
#varname#[capi_i++] = #init#; /* fortran way */
} else {
PyObject *exc, *val, *tb;
PyErr_Fetch(&exc, &val, &tb);
PyErr_SetString(exc ? exc : #modulename#_error,\"Initialization of #nth# #varname# failed (initforcomb).\");
npy_PyErr_ChainExceptionsCause(exc, val, tb);
f2py_success = 0;
}
}
if (f2py_success) {"""]},
],
'cleanupfrompyobj': [ # note that this list will be reversed
' } /*if (capi_#varname#_tmp == NULL) ... else of #varname#*/',
{l_not(l_or(isintent_out, isintent_hide)): """\
if((PyObject *)capi_#varname#_tmp!=#varname#_capi) {
Py_XDECREF(capi_#varname#_tmp); }"""},
{l_and(isintent_hide, l_not(isintent_out))
: """ Py_XDECREF(capi_#varname#_tmp);"""},
{hasinitvalue: ' } /*if (f2py_success) of #varname# init*/'},
],
'_check': isarray,
'_depend': ''
},
# Scalararray
{ # Common
'_check': l_and(isarray, l_not(iscomplexarray))
}, { # Not hidden
'_check': l_and(isarray, l_not(iscomplexarray), isintent_nothide)
},
# Integer*1 array
{'need': '#ctype#',
'_check': isint1array,
'_depend': ''
},
# Integer*-1 array
{'need': '#ctype#',
'_check': isunsigned_chararray,
'_depend': ''
},
# Integer*-2 array
{'need': '#ctype#',
'_check': isunsigned_shortarray,
'_depend': ''
},
# Integer*-8 array
{'need': '#ctype#',
'_check': isunsigned_long_longarray,
'_depend': ''
},
# Complexarray
{'need': '#ctype#',
'_check': iscomplexarray,
'_depend': ''
},
# Stringarray
{
'callfortranappend': {isarrayofstrings: 'flen(#varname#),'},
'need': 'string',
'_check': isstringarray
}
]
################# Rules for checking ###############
check_rules = [
{
'frompyobj': {debugcapi: ' fprintf(stderr,\"debug-capi:Checking `#check#\'\\n\");'},
'need': 'len..'
}, {
'frompyobj': ' CHECKSCALAR(#check#,\"#check#\",\"#nth# #varname#\",\"#varshowvalue#\",#varname#) {',
'cleanupfrompyobj': ' } /*CHECKSCALAR(#check#)*/',
'need': 'CHECKSCALAR',
'_check': l_and(isscalar, l_not(iscomplex)),
'_break': ''
}, {
'frompyobj': ' CHECKSTRING(#check#,\"#check#\",\"#nth# #varname#\",\"#varshowvalue#\",#varname#) {',
'cleanupfrompyobj': ' } /*CHECKSTRING(#check#)*/',
'need': 'CHECKSTRING',
'_check': isstring,
'_break': ''
}, {
'need': 'CHECKARRAY',
'frompyobj': ' CHECKARRAY(#check#,\"#check#\",\"#nth# #varname#\") {',
'cleanupfrompyobj': ' } /*CHECKARRAY(#check#)*/',
'_check': isarray,
'_break': ''
}, {
'need': 'CHECKGENERIC',
'frompyobj': ' CHECKGENERIC(#check#,\"#check#\",\"#nth# #varname#\") {',
'cleanupfrompyobj': ' } /*CHECKGENERIC(#check#)*/',
}
]
########## Applying the rules. No need to modify what follows #############
#################### Build C/API module #######################
def buildmodule(m, um):
"""
Return
"""
outmess(' Building module "%s"...\n' % (m['name']))
ret = {}
mod_rules = defmod_rules[:]
vrd = capi_maps.modsign2map(m)
rd = dictappend({'f2py_version': f2py_version}, vrd)
funcwrappers = []
funcwrappers2 = [] # F90 codes
for n in m['interfaced']:
nb = None
for bi in m['body']:
if bi['block'] not in ['interface', 'abstract interface']:
errmess('buildmodule: Expected interface block. Skipping.\n')
continue
for b in bi['body']:
if b['name'] == n:
nb = b
break
if not nb:
print(
'buildmodule: Could not find the body of interfaced routine "%s". Skipping.\n' % (n), file=sys.stderr)
continue
nb_list = [nb]
if 'entry' in nb:
for k, a in nb['entry'].items():
nb1 = copy.deepcopy(nb)
del nb1['entry']
nb1['name'] = k
nb1['args'] = a
nb_list.append(nb1)
for nb in nb_list:
# requiresf90wrapper must be called before buildapi as it
# rewrites assumed shape arrays as automatic arrays.
isf90 = requiresf90wrapper(nb)
# options is in scope here
if options['emptygen']:
b_path = options['buildpath']
m_name = vrd['modulename']
outmess(' Generating possibly empty wrappers"\n')
Path(f"{b_path}/{vrd['coutput']}").touch()
if isf90:
# f77 + f90 wrappers
outmess(f' Maybe empty "{m_name}-f2pywrappers2.f90"\n')
Path(f'{b_path}/{m_name}-f2pywrappers2.f90').touch()
outmess(f' Maybe empty "{m_name}-f2pywrappers.f"\n')
Path(f'{b_path}/{m_name}-f2pywrappers.f').touch()
else:
# only f77 wrappers
outmess(f' Maybe empty "{m_name}-f2pywrappers.f"\n')
Path(f'{b_path}/{m_name}-f2pywrappers.f').touch()
api, wrap = buildapi(nb)
if wrap:
if isf90:
funcwrappers2.append(wrap)
else:
funcwrappers.append(wrap)
ar = applyrules(api, vrd)
rd = dictappend(rd, ar)
# Construct COMMON block support
cr, wrap = common_rules.buildhooks(m)
if wrap:
funcwrappers.append(wrap)
ar = applyrules(cr, vrd)
rd = dictappend(rd, ar)
# Construct F90 module support
mr, wrap = f90mod_rules.buildhooks(m)
if wrap:
funcwrappers2.append(wrap)
ar = applyrules(mr, vrd)
rd = dictappend(rd, ar)
for u in um:
ar = use_rules.buildusevars(u, m['use'][u['name']])
rd = dictappend(rd, ar)
needs = cfuncs.get_needs()
# Add mapped definitions
needs['typedefs'] += [cvar for cvar in capi_maps.f2cmap_mapped #
if cvar in typedef_need_dict.values()]
code = {}
for n in needs.keys():
code[n] = []
for k in needs[n]:
c = ''
if k in cfuncs.includes0:
c = cfuncs.includes0[k]
elif k in cfuncs.includes:
c = cfuncs.includes[k]
elif k in cfuncs.userincludes:
c = cfuncs.userincludes[k]
elif k in cfuncs.typedefs:
c = cfuncs.typedefs[k]
elif k in cfuncs.typedefs_generated:
c = cfuncs.typedefs_generated[k]
elif k in cfuncs.cppmacros:
c = cfuncs.cppmacros[k]
elif k in cfuncs.cfuncs:
c = cfuncs.cfuncs[k]
elif k in cfuncs.callbacks:
c = cfuncs.callbacks[k]
elif k in cfuncs.f90modhooks:
c = cfuncs.f90modhooks[k]
elif k in cfuncs.commonhooks:
c = cfuncs.commonhooks[k]
else:
errmess('buildmodule: unknown need %s.\n' % (repr(k)))
continue
code[n].append(c)
mod_rules.append(code)
for r in mod_rules:
if ('_check' in r and r['_check'](m)) or ('_check' not in r):
ar = applyrules(r, vrd, m)
rd = dictappend(rd, ar)
ar = applyrules(module_rules, rd)
fn = os.path.join(options['buildpath'], vrd['coutput'])
ret['csrc'] = fn
with open(fn, 'w') as f:
f.write(ar['modulebody'].replace('\t', 2 * ' '))
outmess(' Wrote C/API module "%s" to file "%s"\n' % (m['name'], fn))
if options['dorestdoc']:
fn = os.path.join(
options['buildpath'], vrd['modulename'] + 'module.rest')
with open(fn, 'w') as f:
f.write('.. -*- rest -*-\n')
f.write('\n'.join(ar['restdoc']))
outmess(' ReST Documentation is saved to file "%s/%smodule.rest"\n' %
(options['buildpath'], vrd['modulename']))
if options['dolatexdoc']:
fn = os.path.join(
options['buildpath'], vrd['modulename'] + 'module.tex')
ret['ltx'] = fn
with open(fn, 'w') as f:
f.write(
'%% This file is auto-generated with f2py (version:%s)\n' % (f2py_version))
if 'shortlatex' not in options:
f.write(
'\\documentclass{article}\n\\usepackage{a4wide}\n\\begin{document}\n\\tableofcontents\n\n')
f.write('\n'.join(ar['latexdoc']))
if 'shortlatex' not in options:
f.write('\\end{document}')
outmess(' Documentation is saved to file "%s/%smodule.tex"\n' %
(options['buildpath'], vrd['modulename']))
if funcwrappers:
wn = os.path.join(options['buildpath'], vrd['f2py_wrapper_output'])
ret['fsrc'] = wn
with open(wn, 'w') as f:
f.write('C -*- fortran -*-\n')
f.write(
'C This file is autogenerated with f2py (version:%s)\n' % (f2py_version))
f.write(
'C It contains Fortran 77 wrappers to fortran functions.\n')
lines = []
for l in ('\n\n'.join(funcwrappers) + '\n').split('\n'):
if 0 <= l.find('!') < 66:
# don't split comment lines
lines.append(l + '\n')
elif l and l[0] == ' ':
while len(l) >= 66:
lines.append(l[:66] + '\n &')
l = l[66:]
lines.append(l + '\n')
else:
lines.append(l + '\n')
lines = ''.join(lines).replace('\n &\n', '\n')
f.write(lines)
outmess(' Fortran 77 wrappers are saved to "%s"\n' % (wn))
if funcwrappers2:
wn = os.path.join(
options['buildpath'], '%s-f2pywrappers2.f90' % (vrd['modulename']))
ret['fsrc'] = wn
with open(wn, 'w') as f:
f.write('! -*- f90 -*-\n')
f.write(
'! This file is autogenerated with f2py (version:%s)\n' % (f2py_version))
f.write(
'! It contains Fortran 90 wrappers to fortran functions.\n')
lines = []
for l in ('\n\n'.join(funcwrappers2) + '\n').split('\n'):
if 0 <= l.find('!') < 72:
# don't split comment lines
lines.append(l + '\n')
elif len(l) > 72 and l[0] == ' ':
lines.append(l[:72] + '&\n &')
l = l[72:]
while len(l) > 66:
lines.append(l[:66] + '&\n &')
l = l[66:]
lines.append(l + '\n')
else:
lines.append(l + '\n')
lines = ''.join(lines).replace('\n &\n', '\n')
f.write(lines)
outmess(' Fortran 90 wrappers are saved to "%s"\n' % (wn))
return ret
################## Build C/API function #############
stnd = {1: 'st', 2: 'nd', 3: 'rd', 4: 'th', 5: 'th',
6: 'th', 7: 'th', 8: 'th', 9: 'th', 0: 'th'}
def buildapi(rout):
rout, wrap = func2subr.assubr(rout)
args, depargs = getargs2(rout)
capi_maps.depargs = depargs
var = rout['vars']
if ismoduleroutine(rout):
outmess(' Constructing wrapper function "%s.%s"...\n' %
(rout['modulename'], rout['name']))
else:
outmess(' Constructing wrapper function "%s"...\n' % (rout['name']))
# Routine
vrd = capi_maps.routsign2map(rout)
rd = dictappend({}, vrd)
for r in rout_rules:
if ('_check' in r and r['_check'](rout)) or ('_check' not in r):
ar = applyrules(r, vrd, rout)
rd = dictappend(rd, ar)
# Args
nth, nthk = 0, 0
savevrd = {}
for a in args:
vrd = capi_maps.sign2map(a, var[a])
if isintent_aux(var[a]):
_rules = aux_rules
else:
_rules = arg_rules
if not isintent_hide(var[a]):
if not isoptional(var[a]):
nth = nth + 1
vrd['nth'] = repr(nth) + stnd[nth % 10] + ' argument'
else:
nthk = nthk + 1
vrd['nth'] = repr(nthk) + stnd[nthk % 10] + ' keyword'
else:
vrd['nth'] = 'hidden'
savevrd[a] = vrd
for r in _rules:
if '_depend' in r:
continue
if ('_check' in r and r['_check'](var[a])) or ('_check' not in r):
ar = applyrules(r, vrd, var[a])
rd = dictappend(rd, ar)
if '_break' in r:
break
for a in depargs:
if isintent_aux(var[a]):
_rules = aux_rules
else:
_rules = arg_rules
vrd = savevrd[a]
for r in _rules:
if '_depend' not in r:
continue
if ('_check' in r and r['_check'](var[a])) or ('_check' not in r):
ar = applyrules(r, vrd, var[a])
rd = dictappend(rd, ar)
if '_break' in r:
break
if 'check' in var[a]:
for c in var[a]['check']:
vrd['check'] = c
ar = applyrules(check_rules, vrd, var[a])
rd = dictappend(rd, ar)
if isinstance(rd['cleanupfrompyobj'], list):
rd['cleanupfrompyobj'].reverse()
if isinstance(rd['closepyobjfrom'], list):
rd['closepyobjfrom'].reverse()
rd['docsignature'] = stripcomma(replace('#docsign##docsignopt##docsignxa#',
{'docsign': rd['docsign'],
'docsignopt': rd['docsignopt'],
'docsignxa': rd['docsignxa']}))
optargs = stripcomma(replace('#docsignopt##docsignxa#',
{'docsignxa': rd['docsignxashort'],
'docsignopt': rd['docsignoptshort']}
))
if optargs == '':
rd['docsignatureshort'] = stripcomma(
replace('#docsign#', {'docsign': rd['docsign']}))
else:
rd['docsignatureshort'] = replace('#docsign#[#docsignopt#]',
{'docsign': rd['docsign'],
'docsignopt': optargs,
})
rd['latexdocsignatureshort'] = rd['docsignatureshort'].replace('_', '\\_')
rd['latexdocsignatureshort'] = rd[
'latexdocsignatureshort'].replace(',', ', ')
cfs = stripcomma(replace('#callfortran##callfortranappend#', {
'callfortran': rd['callfortran'], 'callfortranappend': rd['callfortranappend']}))
if len(rd['callfortranappend']) > 1:
rd['callcompaqfortran'] = stripcomma(replace('#callfortran# 0,#callfortranappend#', {
'callfortran': rd['callfortran'], 'callfortranappend': rd['callfortranappend']}))
else:
rd['callcompaqfortran'] = cfs
rd['callfortran'] = cfs
if isinstance(rd['docreturn'], list):
rd['docreturn'] = stripcomma(
replace('#docreturn#', {'docreturn': rd['docreturn']})) + ' = '
rd['docstrsigns'] = []
rd['latexdocstrsigns'] = []
for k in ['docstrreq', 'docstropt', 'docstrout', 'docstrcbs']:
if k in rd and isinstance(rd[k], list):
rd['docstrsigns'] = rd['docstrsigns'] + rd[k]
k = 'latex' + k
if k in rd and isinstance(rd[k], list):
rd['latexdocstrsigns'] = rd['latexdocstrsigns'] + rd[k][0:1] +\
['\\begin{description}'] + rd[k][1:] +\
['\\end{description}']
ar = applyrules(routine_rules, rd)
if ismoduleroutine(rout):
outmess(' %s\n' % (ar['docshort']))
else:
outmess(' %s\n' % (ar['docshort']))
return ar, wrap
#################### EOF rules.py #######################
| 61,517 | Python | 39.713435 | 214 | 0.510591 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/f2py/f2py2e.py | #!/usr/bin/env python3
"""
f2py2e - Fortran to Python C/API generator. 2nd Edition.
See __usage__ below.
Copyright 1999--2011 Pearu Peterson all rights reserved,
Pearu Peterson <[email protected]>
Permission to use, modify, and distribute this software is given under the
terms of the NumPy License.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Date: 2005/05/06 08:31:19 $
Pearu Peterson
"""
import sys
import os
import pprint
import re
from pathlib import Path
from . import crackfortran
from . import rules
from . import cb_rules
from . import auxfuncs
from . import cfuncs
from . import f90mod_rules
from . import __version__
from . import capi_maps
f2py_version = __version__.version
numpy_version = __version__.version
errmess = sys.stderr.write
# outmess=sys.stdout.write
show = pprint.pprint
outmess = auxfuncs.outmess
__usage__ =\
f"""Usage:
1) To construct extension module sources:
f2py [<options>] <fortran files> [[[only:]||[skip:]] \\
<fortran functions> ] \\
[: <fortran files> ...]
2) To compile fortran files and build extension modules:
f2py -c [<options>, <build_flib options>, <extra options>] <fortran files>
3) To generate signature files:
f2py -h <filename.pyf> ...< same options as in (1) >
Description: This program generates a Python C/API file (<modulename>module.c)
that contains wrappers for given fortran functions so that they
can be called from Python. With the -c option the corresponding
extension modules are built.
Options:
--2d-numpy Use numpy.f2py tool with NumPy support. [DEFAULT]
--2d-numeric Use f2py2e tool with Numeric support.
--2d-numarray Use f2py2e tool with Numarray support.
--g3-numpy Use 3rd generation f2py from the separate f2py package.
[NOT AVAILABLE YET]
-h <filename> Write signatures of the fortran routines to file <filename>
and exit. You can then edit <filename> and use it instead
of <fortran files>. If <filename>==stdout then the
signatures are printed to stdout.
<fortran functions> Names of fortran routines for which Python C/API
functions will be generated. Default is all that are found
in <fortran files>.
<fortran files> Paths to fortran/signature files that will be scanned for
<fortran functions> in order to determine their signatures.
skip: Ignore fortran functions that follow until `:'.
only: Use only fortran functions that follow until `:'.
: Get back to <fortran files> mode.
-m <modulename> Name of the module; f2py generates a Python/C API
file <modulename>module.c or extension module <modulename>.
Default is 'untitled'.
'-include<header>' Writes additional headers in the C wrapper, can be passed
multiple times, generates #include <header> each time.
--[no-]lower Do [not] lower the cases in <fortran files>. By default,
--lower is assumed with -h key, and --no-lower without -h key.
--build-dir <dirname> All f2py generated files are created in <dirname>.
Default is tempfile.mkdtemp().
--overwrite-signature Overwrite existing signature file.
--[no-]latex-doc Create (or not) <modulename>module.tex.
Default is --no-latex-doc.
--short-latex Create 'incomplete' LaTeX document (without commands
\\documentclass, \\tableofcontents, and \\begin{{document}},
\\end{{document}}).
--[no-]rest-doc Create (or not) <modulename>module.rst.
Default is --no-rest-doc.
--debug-capi Create C/API code that reports the state of the wrappers
during runtime. Useful for debugging.
--[no-]wrap-functions Create Fortran subroutine wrappers to Fortran 77
functions. --wrap-functions is default because it ensures
maximum portability/compiler independence.
--include-paths <path1>:<path2>:... Search include files from the given
directories.
--help-link [..] List system resources found by system_info.py. See also
--link-<resource> switch below. [..] is optional list
of resources names. E.g. try 'f2py --help-link lapack_opt'.
--f2cmap <filename> Load Fortran-to-Python KIND specification from the given
file. Default: .f2py_f2cmap in current directory.
--quiet Run quietly.
--verbose Run with extra verbosity.
--skip-empty-wrappers Only generate wrapper files when needed.
-v Print f2py version ID and exit.
numpy.distutils options (only effective with -c):
--fcompiler= Specify Fortran compiler type by vendor
--compiler= Specify C compiler type (as defined by distutils)
--help-fcompiler List available Fortran compilers and exit
--f77exec= Specify the path to F77 compiler
--f90exec= Specify the path to F90 compiler
--f77flags= Specify F77 compiler flags
--f90flags= Specify F90 compiler flags
--opt= Specify optimization flags
--arch= Specify architecture specific optimization flags
--noopt Compile without optimization
--noarch Compile without arch-dependent optimization
--debug Compile with debugging information
Extra options (only effective with -c):
--link-<resource> Link extension module with <resource> as defined
by numpy.distutils/system_info.py. E.g. to link
with optimized LAPACK libraries (vecLib on MacOSX,
ATLAS elsewhere), use --link-lapack_opt.
See also --help-link switch.
-L/path/to/lib/ -l<libname>
-D<define> -U<name>
-I/path/to/include/
<filename>.o <filename>.so <filename>.a
Using the following macros may be required with non-gcc Fortran
compilers:
-DPREPEND_FORTRAN -DNO_APPEND_FORTRAN -DUPPERCASE_FORTRAN
-DUNDERSCORE_G77
When using -DF2PY_REPORT_ATEXIT, a performance report of F2PY
interface is printed out at exit (platforms: Linux).
When using -DF2PY_REPORT_ON_ARRAY_COPY=<int>, a message is
sent to stderr whenever F2PY interface makes a copy of an
array. Integer <int> sets the threshold for array sizes when
a message should be shown.
Version: {f2py_version}
numpy Version: {numpy_version}
Requires: Python 3.5 or higher.
License: NumPy license (see LICENSE.txt in the NumPy source code)
Copyright 1999 - 2011 Pearu Peterson all rights reserved.
https://web.archive.org/web/20140822061353/http://cens.ioc.ee/projects/f2py2e"""
def scaninputline(inputline):
files, skipfuncs, onlyfuncs, debug = [], [], [], []
f, f2, f3, f5, f6, f7, f8, f9, f10 = 1, 0, 0, 0, 0, 0, 0, 0, 0
verbose = 1
emptygen = True
dolc = -1
dolatexdoc = 0
dorestdoc = 0
wrapfuncs = 1
buildpath = '.'
include_paths = []
signsfile, modulename = None, None
options = {'buildpath': buildpath,
'coutput': None,
'f2py_wrapper_output': None}
for l in inputline:
if l == '':
pass
elif l == 'only:':
f = 0
elif l == 'skip:':
f = -1
elif l == ':':
f = 1
elif l[:8] == '--debug-':
debug.append(l[8:])
elif l == '--lower':
dolc = 1
elif l == '--build-dir':
f6 = 1
elif l == '--no-lower':
dolc = 0
elif l == '--quiet':
verbose = 0
elif l == '--verbose':
verbose += 1
elif l == '--latex-doc':
dolatexdoc = 1
elif l == '--no-latex-doc':
dolatexdoc = 0
elif l == '--rest-doc':
dorestdoc = 1
elif l == '--no-rest-doc':
dorestdoc = 0
elif l == '--wrap-functions':
wrapfuncs = 1
elif l == '--no-wrap-functions':
wrapfuncs = 0
elif l == '--short-latex':
options['shortlatex'] = 1
elif l == '--coutput':
f8 = 1
elif l == '--f2py-wrapper-output':
f9 = 1
elif l == '--f2cmap':
f10 = 1
elif l == '--overwrite-signature':
options['h-overwrite'] = 1
elif l == '-h':
f2 = 1
elif l == '-m':
f3 = 1
elif l[:2] == '-v':
print(f2py_version)
sys.exit()
elif l == '--show-compilers':
f5 = 1
elif l[:8] == '-include':
cfuncs.outneeds['userincludes'].append(l[9:-1])
cfuncs.userincludes[l[9:-1]] = '#include ' + l[8:]
elif l[:15] in '--include_paths':
outmess(
'f2py option --include_paths is deprecated, use --include-paths instead.\n')
f7 = 1
elif l[:15] in '--include-paths':
f7 = 1
elif l == '--skip-empty-wrappers':
emptygen = False
elif l[0] == '-':
errmess('Unknown option %s\n' % repr(l))
sys.exit()
elif f2:
f2 = 0
signsfile = l
elif f3:
f3 = 0
modulename = l
elif f6:
f6 = 0
buildpath = l
elif f7:
f7 = 0
include_paths.extend(l.split(os.pathsep))
elif f8:
f8 = 0
options["coutput"] = l
elif f9:
f9 = 0
options["f2py_wrapper_output"] = l
elif f10:
f10 = 0
options["f2cmap_file"] = l
elif f == 1:
try:
with open(l):
pass
files.append(l)
except OSError as detail:
errmess(f'OSError: {detail!s}. Skipping file "{l!s}".\n')
elif f == -1:
skipfuncs.append(l)
elif f == 0:
onlyfuncs.append(l)
if not f5 and not files and not modulename:
print(__usage__)
sys.exit()
if not os.path.isdir(buildpath):
if not verbose:
outmess('Creating build directory %s\n' % (buildpath))
os.mkdir(buildpath)
if signsfile:
signsfile = os.path.join(buildpath, signsfile)
if signsfile and os.path.isfile(signsfile) and 'h-overwrite' not in options:
errmess(
'Signature file "%s" exists!!! Use --overwrite-signature to overwrite.\n' % (signsfile))
sys.exit()
options['emptygen'] = emptygen
options['debug'] = debug
options['verbose'] = verbose
if dolc == -1 and not signsfile:
options['do-lower'] = 0
else:
options['do-lower'] = dolc
if modulename:
options['module'] = modulename
if signsfile:
options['signsfile'] = signsfile
if onlyfuncs:
options['onlyfuncs'] = onlyfuncs
if skipfuncs:
options['skipfuncs'] = skipfuncs
options['dolatexdoc'] = dolatexdoc
options['dorestdoc'] = dorestdoc
options['wrapfuncs'] = wrapfuncs
options['buildpath'] = buildpath
options['include_paths'] = include_paths
options.setdefault('f2cmap_file', None)
return files, options
def callcrackfortran(files, options):
rules.options = options
crackfortran.debug = options['debug']
crackfortran.verbose = options['verbose']
if 'module' in options:
crackfortran.f77modulename = options['module']
if 'skipfuncs' in options:
crackfortran.skipfuncs = options['skipfuncs']
if 'onlyfuncs' in options:
crackfortran.onlyfuncs = options['onlyfuncs']
crackfortran.include_paths[:] = options['include_paths']
crackfortran.dolowercase = options['do-lower']
postlist = crackfortran.crackfortran(files)
if 'signsfile' in options:
outmess('Saving signatures to file "%s"\n' % (options['signsfile']))
pyf = crackfortran.crack2fortran(postlist)
if options['signsfile'][-6:] == 'stdout':
sys.stdout.write(pyf)
else:
with open(options['signsfile'], 'w') as f:
f.write(pyf)
if options["coutput"] is None:
for mod in postlist:
mod["coutput"] = "%smodule.c" % mod["name"]
else:
for mod in postlist:
mod["coutput"] = options["coutput"]
if options["f2py_wrapper_output"] is None:
for mod in postlist:
mod["f2py_wrapper_output"] = "%s-f2pywrappers.f" % mod["name"]
else:
for mod in postlist:
mod["f2py_wrapper_output"] = options["f2py_wrapper_output"]
return postlist
def buildmodules(lst):
cfuncs.buildcfuncs()
outmess('Building modules...\n')
modules, mnames, isusedby = [], [], {}
for item in lst:
if '__user__' in item['name']:
cb_rules.buildcallbacks(item)
else:
if 'use' in item:
for u in item['use'].keys():
if u not in isusedby:
isusedby[u] = []
isusedby[u].append(item['name'])
modules.append(item)
mnames.append(item['name'])
ret = {}
for module, name in zip(modules, mnames):
if name in isusedby:
outmess('\tSkipping module "%s" which is used by %s.\n' % (
name, ','.join('"%s"' % s for s in isusedby[name])))
else:
um = []
if 'use' in module:
for u in module['use'].keys():
if u in isusedby and u in mnames:
um.append(modules[mnames.index(u)])
else:
outmess(
f'\tModule "{name}" uses nonexisting "{u}" '
'which will be ignored.\n')
ret[name] = {}
dict_append(ret[name], rules.buildmodule(module, um))
return ret
def dict_append(d_out, d_in):
for (k, v) in d_in.items():
if k not in d_out:
d_out[k] = []
if isinstance(v, list):
d_out[k] = d_out[k] + v
else:
d_out[k].append(v)
def run_main(comline_list):
"""
Equivalent to running::
f2py <args>
where ``<args>=string.join(<list>,' ')``, but in Python. Unless
``-h`` is used, this function returns a dictionary containing
information on generated modules and their dependencies on source
files.
You cannot build extension modules with this function, that is,
using ``-c`` is not allowed. Use the ``compile`` command instead.
Examples
--------
The command ``f2py -m scalar scalar.f`` can be executed from Python as
follows.
.. literalinclude:: ../../source/f2py/code/results/run_main_session.dat
:language: python
"""
crackfortran.reset_global_f2py_vars()
f2pydir = os.path.dirname(os.path.abspath(cfuncs.__file__))
fobjhsrc = os.path.join(f2pydir, 'src', 'fortranobject.h')
fobjcsrc = os.path.join(f2pydir, 'src', 'fortranobject.c')
files, options = scaninputline(comline_list)
auxfuncs.options = options
capi_maps.load_f2cmap_file(options['f2cmap_file'])
postlist = callcrackfortran(files, options)
isusedby = {}
for plist in postlist:
if 'use' in plist:
for u in plist['use'].keys():
if u not in isusedby:
isusedby[u] = []
isusedby[u].append(plist['name'])
for plist in postlist:
if plist['block'] == 'python module' and '__user__' in plist['name']:
if plist['name'] in isusedby:
# if not quiet:
outmess(
f'Skipping Makefile build for module "{plist["name"]}" '
'which is used by {}\n'.format(
','.join(f'"{s}"' for s in isusedby[plist['name']])))
if 'signsfile' in options:
if options['verbose'] > 1:
outmess(
'Stopping. Edit the signature file and then run f2py on the signature file: ')
outmess('%s %s\n' %
(os.path.basename(sys.argv[0]), options['signsfile']))
return
for plist in postlist:
if plist['block'] != 'python module':
if 'python module' not in options:
errmess(
'Tip: If your original code is Fortran source then you must use -m option.\n')
raise TypeError('All blocks must be python module blocks but got %s' % (
repr(plist['block'])))
auxfuncs.debugoptions = options['debug']
f90mod_rules.options = options
auxfuncs.wrapfuncs = options['wrapfuncs']
ret = buildmodules(postlist)
for mn in ret.keys():
dict_append(ret[mn], {'csrc': fobjcsrc, 'h': fobjhsrc})
return ret
def filter_files(prefix, suffix, files, remove_prefix=None):
"""
Filter files by prefix and suffix.
"""
filtered, rest = [], []
match = re.compile(prefix + r'.*' + suffix + r'\Z').match
if remove_prefix:
ind = len(prefix)
else:
ind = 0
for file in [x.strip() for x in files]:
if match(file):
filtered.append(file[ind:])
else:
rest.append(file)
return filtered, rest
def get_prefix(module):
p = os.path.dirname(os.path.dirname(module.__file__))
return p
def run_compile():
"""
Do it all in one call!
"""
import tempfile
i = sys.argv.index('-c')
del sys.argv[i]
remove_build_dir = 0
try:
i = sys.argv.index('--build-dir')
except ValueError:
i = None
if i is not None:
build_dir = sys.argv[i + 1]
del sys.argv[i + 1]
del sys.argv[i]
else:
remove_build_dir = 1
build_dir = tempfile.mkdtemp()
_reg1 = re.compile(r'--link-')
sysinfo_flags = [_m for _m in sys.argv[1:] if _reg1.match(_m)]
sys.argv = [_m for _m in sys.argv if _m not in sysinfo_flags]
if sysinfo_flags:
sysinfo_flags = [f[7:] for f in sysinfo_flags]
_reg2 = re.compile(
r'--((no-|)(wrap-functions|lower)|debug-capi|quiet|skip-empty-wrappers)|-include')
f2py_flags = [_m for _m in sys.argv[1:] if _reg2.match(_m)]
sys.argv = [_m for _m in sys.argv if _m not in f2py_flags]
f2py_flags2 = []
fl = 0
for a in sys.argv[1:]:
if a in ['only:', 'skip:']:
fl = 1
elif a == ':':
fl = 0
if fl or a == ':':
f2py_flags2.append(a)
if f2py_flags2 and f2py_flags2[-1] != ':':
f2py_flags2.append(':')
f2py_flags.extend(f2py_flags2)
sys.argv = [_m for _m in sys.argv if _m not in f2py_flags2]
_reg3 = re.compile(
r'--((f(90)?compiler(-exec|)|compiler)=|help-compiler)')
flib_flags = [_m for _m in sys.argv[1:] if _reg3.match(_m)]
sys.argv = [_m for _m in sys.argv if _m not in flib_flags]
_reg4 = re.compile(
r'--((f(77|90)(flags|exec)|opt|arch)=|(debug|noopt|noarch|help-fcompiler))')
fc_flags = [_m for _m in sys.argv[1:] if _reg4.match(_m)]
sys.argv = [_m for _m in sys.argv if _m not in fc_flags]
del_list = []
for s in flib_flags:
v = '--fcompiler='
if s[:len(v)] == v:
from numpy.distutils import fcompiler
fcompiler.load_all_fcompiler_classes()
allowed_keys = list(fcompiler.fcompiler_class.keys())
nv = ov = s[len(v):].lower()
if ov not in allowed_keys:
vmap = {} # XXX
try:
nv = vmap[ov]
except KeyError:
if ov not in vmap.values():
print('Unknown vendor: "%s"' % (s[len(v):]))
nv = ov
i = flib_flags.index(s)
flib_flags[i] = '--fcompiler=' + nv
continue
for s in del_list:
i = flib_flags.index(s)
del flib_flags[i]
assert len(flib_flags) <= 2, repr(flib_flags)
_reg5 = re.compile(r'--(verbose)')
setup_flags = [_m for _m in sys.argv[1:] if _reg5.match(_m)]
sys.argv = [_m for _m in sys.argv if _m not in setup_flags]
if '--quiet' in f2py_flags:
setup_flags.append('--quiet')
modulename = 'untitled'
sources = sys.argv[1:]
for optname in ['--include_paths', '--include-paths', '--f2cmap']:
if optname in sys.argv:
i = sys.argv.index(optname)
f2py_flags.extend(sys.argv[i:i + 2])
del sys.argv[i + 1], sys.argv[i]
sources = sys.argv[1:]
if '-m' in sys.argv:
i = sys.argv.index('-m')
modulename = sys.argv[i + 1]
del sys.argv[i + 1], sys.argv[i]
sources = sys.argv[1:]
else:
from numpy.distutils.command.build_src import get_f2py_modulename
pyf_files, sources = filter_files('', '[.]pyf([.]src|)', sources)
sources = pyf_files + sources
for f in pyf_files:
modulename = get_f2py_modulename(f)
if modulename:
break
extra_objects, sources = filter_files('', '[.](o|a|so|dylib)', sources)
include_dirs, sources = filter_files('-I', '', sources, remove_prefix=1)
library_dirs, sources = filter_files('-L', '', sources, remove_prefix=1)
libraries, sources = filter_files('-l', '', sources, remove_prefix=1)
undef_macros, sources = filter_files('-U', '', sources, remove_prefix=1)
define_macros, sources = filter_files('-D', '', sources, remove_prefix=1)
for i in range(len(define_macros)):
name_value = define_macros[i].split('=', 1)
if len(name_value) == 1:
name_value.append(None)
if len(name_value) == 2:
define_macros[i] = tuple(name_value)
else:
print('Invalid use of -D:', name_value)
from numpy.distutils.system_info import get_info
num_info = {}
if num_info:
include_dirs.extend(num_info.get('include_dirs', []))
from numpy.distutils.core import setup, Extension
ext_args = {'name': modulename, 'sources': sources,
'include_dirs': include_dirs,
'library_dirs': library_dirs,
'libraries': libraries,
'define_macros': define_macros,
'undef_macros': undef_macros,
'extra_objects': extra_objects,
'f2py_options': f2py_flags,
}
if sysinfo_flags:
from numpy.distutils.misc_util import dict_append
for n in sysinfo_flags:
i = get_info(n)
if not i:
outmess('No %s resources found in system'
' (try `f2py --help-link`)\n' % (repr(n)))
dict_append(ext_args, **i)
ext = Extension(**ext_args)
sys.argv = [sys.argv[0]] + setup_flags
sys.argv.extend(['build',
'--build-temp', build_dir,
'--build-base', build_dir,
'--build-platlib', '.',
# disable CCompilerOpt
'--disable-optimization'])
if fc_flags:
sys.argv.extend(['config_fc'] + fc_flags)
if flib_flags:
sys.argv.extend(['build_ext'] + flib_flags)
setup(ext_modules=[ext])
if remove_build_dir and os.path.exists(build_dir):
import shutil
outmess('Removing build directory %s\n' % (build_dir))
shutil.rmtree(build_dir)
def main():
if '--help-link' in sys.argv[1:]:
sys.argv.remove('--help-link')
from numpy.distutils.system_info import show_all
show_all()
return
# Probably outdated options that were not working before 1.16
if '--g3-numpy' in sys.argv[1:]:
sys.stderr.write("G3 f2py support is not implemented, yet.\\n")
sys.exit(1)
elif '--2e-numeric' in sys.argv[1:]:
sys.argv.remove('--2e-numeric')
elif '--2e-numarray' in sys.argv[1:]:
# Note that this errors becaust the -DNUMARRAY argument is
# not recognized. Just here for back compatibility and the
# error message.
sys.argv.append("-DNUMARRAY")
sys.argv.remove('--2e-numarray')
elif '--2e-numpy' in sys.argv[1:]:
sys.argv.remove('--2e-numpy')
else:
pass
if '-c' in sys.argv[1:]:
run_compile()
else:
run_main(sys.argv[1:])
| 24,626 | Python | 33.931915 | 100 | 0.556363 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/f2py/diagnose.py | #!/usr/bin/env python3
import os
import sys
import tempfile
def run_command(cmd):
print('Running %r:' % (cmd))
os.system(cmd)
print('------')
def run():
_path = os.getcwd()
os.chdir(tempfile.gettempdir())
print('------')
print('os.name=%r' % (os.name))
print('------')
print('sys.platform=%r' % (sys.platform))
print('------')
print('sys.version:')
print(sys.version)
print('------')
print('sys.prefix:')
print(sys.prefix)
print('------')
print('sys.path=%r' % (':'.join(sys.path)))
print('------')
try:
import numpy
has_newnumpy = 1
except ImportError:
print('Failed to import new numpy:', sys.exc_info()[1])
has_newnumpy = 0
try:
from numpy.f2py import f2py2e
has_f2py2e = 1
except ImportError:
print('Failed to import f2py2e:', sys.exc_info()[1])
has_f2py2e = 0
try:
import numpy.distutils
has_numpy_distutils = 2
except ImportError:
try:
import numpy_distutils
has_numpy_distutils = 1
except ImportError:
print('Failed to import numpy_distutils:', sys.exc_info()[1])
has_numpy_distutils = 0
if has_newnumpy:
try:
print('Found new numpy version %r in %s' %
(numpy.__version__, numpy.__file__))
except Exception as msg:
print('error:', msg)
print('------')
if has_f2py2e:
try:
print('Found f2py2e version %r in %s' %
(f2py2e.__version__.version, f2py2e.__file__))
except Exception as msg:
print('error:', msg)
print('------')
if has_numpy_distutils:
try:
if has_numpy_distutils == 2:
print('Found numpy.distutils version %r in %r' % (
numpy.distutils.__version__,
numpy.distutils.__file__))
else:
print('Found numpy_distutils version %r in %r' % (
numpy_distutils.numpy_distutils_version.numpy_distutils_version,
numpy_distutils.__file__))
print('------')
except Exception as msg:
print('error:', msg)
print('------')
try:
if has_numpy_distutils == 1:
print(
'Importing numpy_distutils.command.build_flib ...', end=' ')
import numpy_distutils.command.build_flib as build_flib
print('ok')
print('------')
try:
print(
'Checking availability of supported Fortran compilers:')
for compiler_class in build_flib.all_compilers:
compiler_class(verbose=1).is_available()
print('------')
except Exception as msg:
print('error:', msg)
print('------')
except Exception as msg:
print(
'error:', msg, '(ignore it, build_flib is obsolute for numpy.distutils 0.2.2 and up)')
print('------')
try:
if has_numpy_distutils == 2:
print('Importing numpy.distutils.fcompiler ...', end=' ')
import numpy.distutils.fcompiler as fcompiler
else:
print('Importing numpy_distutils.fcompiler ...', end=' ')
import numpy_distutils.fcompiler as fcompiler
print('ok')
print('------')
try:
print('Checking availability of supported Fortran compilers:')
fcompiler.show_fcompilers()
print('------')
except Exception as msg:
print('error:', msg)
print('------')
except Exception as msg:
print('error:', msg)
print('------')
try:
if has_numpy_distutils == 2:
print('Importing numpy.distutils.cpuinfo ...', end=' ')
from numpy.distutils.cpuinfo import cpuinfo
print('ok')
print('------')
else:
try:
print(
'Importing numpy_distutils.command.cpuinfo ...', end=' ')
from numpy_distutils.command.cpuinfo import cpuinfo
print('ok')
print('------')
except Exception as msg:
print('error:', msg, '(ignore it)')
print('Importing numpy_distutils.cpuinfo ...', end=' ')
from numpy_distutils.cpuinfo import cpuinfo
print('ok')
print('------')
cpu = cpuinfo()
print('CPU information:', end=' ')
for name in dir(cpuinfo):
if name[0] == '_' and name[1] != '_' and getattr(cpu, name[1:])():
print(name[1:], end=' ')
print('------')
except Exception as msg:
print('error:', msg)
print('------')
os.chdir(_path)
if __name__ == "__main__":
run()
| 5,230 | Python | 32.748387 | 102 | 0.461185 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/f2py/cb_rules.py | #!/usr/bin/env python3
"""
Build call-back mechanism for f2py2e.
Copyright 2000 Pearu Peterson all rights reserved,
Pearu Peterson <[email protected]>
Permission to use, modify, and distribute this software is given under the
terms of the NumPy License.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Date: 2005/07/20 11:27:58 $
Pearu Peterson
"""
from . import __version__
from .auxfuncs import (
applyrules, debugcapi, dictappend, errmess, getargs, hasnote, isarray,
iscomplex, iscomplexarray, iscomplexfunction, isfunction, isintent_c,
isintent_hide, isintent_in, isintent_inout, isintent_nothide,
isintent_out, isoptional, isrequired, isscalar, isstring,
isstringfunction, issubroutine, l_and, l_not, l_or, outmess, replace,
stripcomma, throw_error
)
from . import cfuncs
f2py_version = __version__.version
################## Rules for callback function ##############
cb_routine_rules = {
'cbtypedefs': 'typedef #rctype#(*#name#_typedef)(#optargs_td##args_td##strarglens_td##noargs#);',
'body': """
#begintitle#
typedef struct {
PyObject *capi;
PyTupleObject *args_capi;
int nofargs;
jmp_buf jmpbuf;
} #name#_t;
#if defined(F2PY_THREAD_LOCAL_DECL) && !defined(F2PY_USE_PYTHON_TLS)
static F2PY_THREAD_LOCAL_DECL #name#_t *_active_#name# = NULL;
static #name#_t *swap_active_#name#(#name#_t *ptr) {
#name#_t *prev = _active_#name#;
_active_#name# = ptr;
return prev;
}
static #name#_t *get_active_#name#(void) {
return _active_#name#;
}
#else
static #name#_t *swap_active_#name#(#name#_t *ptr) {
char *key = "__f2py_cb_#name#";
return (#name#_t *)F2PySwapThreadLocalCallbackPtr(key, ptr);
}
static #name#_t *get_active_#name#(void) {
char *key = "__f2py_cb_#name#";
return (#name#_t *)F2PyGetThreadLocalCallbackPtr(key);
}
#endif
/*typedef #rctype#(*#name#_typedef)(#optargs_td##args_td##strarglens_td##noargs#);*/
#static# #rctype# #callbackname# (#optargs##args##strarglens##noargs#) {
#name#_t cb_local = { NULL, NULL, 0 };
#name#_t *cb = NULL;
PyTupleObject *capi_arglist = NULL;
PyObject *capi_return = NULL;
PyObject *capi_tmp = NULL;
PyObject *capi_arglist_list = NULL;
int capi_j,capi_i = 0;
int capi_longjmp_ok = 1;
#decl#
#ifdef F2PY_REPORT_ATEXIT
f2py_cb_start_clock();
#endif
cb = get_active_#name#();
if (cb == NULL) {
capi_longjmp_ok = 0;
cb = &cb_local;
}
capi_arglist = cb->args_capi;
CFUNCSMESS(\"cb:Call-back function #name# (maxnofargs=#maxnofargs#(-#nofoptargs#))\\n\");
CFUNCSMESSPY(\"cb:#name#_capi=\",cb->capi);
if (cb->capi==NULL) {
capi_longjmp_ok = 0;
cb->capi = PyObject_GetAttrString(#modulename#_module,\"#argname#\");
CFUNCSMESSPY(\"cb:#name#_capi=\",cb->capi);
}
if (cb->capi==NULL) {
PyErr_SetString(#modulename#_error,\"cb: Callback #argname# not defined (as an argument or module #modulename# attribute).\\n\");
goto capi_fail;
}
if (F2PyCapsule_Check(cb->capi)) {
#name#_typedef #name#_cptr;
#name#_cptr = F2PyCapsule_AsVoidPtr(cb->capi);
#returncptr#(*#name#_cptr)(#optargs_nm##args_nm##strarglens_nm#);
#return#
}
if (capi_arglist==NULL) {
capi_longjmp_ok = 0;
capi_tmp = PyObject_GetAttrString(#modulename#_module,\"#argname#_extra_args\");
if (capi_tmp) {
capi_arglist = (PyTupleObject *)PySequence_Tuple(capi_tmp);
Py_DECREF(capi_tmp);
if (capi_arglist==NULL) {
PyErr_SetString(#modulename#_error,\"Failed to convert #modulename#.#argname#_extra_args to tuple.\\n\");
goto capi_fail;
}
} else {
PyErr_Clear();
capi_arglist = (PyTupleObject *)Py_BuildValue(\"()\");
}
}
if (capi_arglist == NULL) {
PyErr_SetString(#modulename#_error,\"Callback #argname# argument list is not set.\\n\");
goto capi_fail;
}
#setdims#
#ifdef PYPY_VERSION
#define CAPI_ARGLIST_SETITEM(idx, value) PyList_SetItem((PyObject *)capi_arglist_list, idx, value)
capi_arglist_list = PySequence_List(capi_arglist);
if (capi_arglist_list == NULL) goto capi_fail;
#else
#define CAPI_ARGLIST_SETITEM(idx, value) PyTuple_SetItem((PyObject *)capi_arglist, idx, value)
#endif
#pyobjfrom#
#undef CAPI_ARGLIST_SETITEM
#ifdef PYPY_VERSION
CFUNCSMESSPY(\"cb:capi_arglist=\",capi_arglist_list);
#else
CFUNCSMESSPY(\"cb:capi_arglist=\",capi_arglist);
#endif
CFUNCSMESS(\"cb:Call-back calling Python function #argname#.\\n\");
#ifdef F2PY_REPORT_ATEXIT
f2py_cb_start_call_clock();
#endif
#ifdef PYPY_VERSION
capi_return = PyObject_CallObject(cb->capi,(PyObject *)capi_arglist_list);
Py_DECREF(capi_arglist_list);
capi_arglist_list = NULL;
#else
capi_return = PyObject_CallObject(cb->capi,(PyObject *)capi_arglist);
#endif
#ifdef F2PY_REPORT_ATEXIT
f2py_cb_stop_call_clock();
#endif
CFUNCSMESSPY(\"cb:capi_return=\",capi_return);
if (capi_return == NULL) {
fprintf(stderr,\"capi_return is NULL\\n\");
goto capi_fail;
}
if (capi_return == Py_None) {
Py_DECREF(capi_return);
capi_return = Py_BuildValue(\"()\");
}
else if (!PyTuple_Check(capi_return)) {
capi_return = Py_BuildValue(\"(N)\",capi_return);
}
capi_j = PyTuple_Size(capi_return);
capi_i = 0;
#frompyobj#
CFUNCSMESS(\"cb:#name#:successful\\n\");
Py_DECREF(capi_return);
#ifdef F2PY_REPORT_ATEXIT
f2py_cb_stop_clock();
#endif
goto capi_return_pt;
capi_fail:
fprintf(stderr,\"Call-back #name# failed.\\n\");
Py_XDECREF(capi_return);
Py_XDECREF(capi_arglist_list);
if (capi_longjmp_ok) {
longjmp(cb->jmpbuf,-1);
}
capi_return_pt:
;
#return#
}
#endtitle#
""",
'need': ['setjmp.h', 'CFUNCSMESS', 'F2PY_THREAD_LOCAL_DECL'],
'maxnofargs': '#maxnofargs#',
'nofoptargs': '#nofoptargs#',
'docstr': """\
def #argname#(#docsignature#): return #docreturn#\\n\\
#docstrsigns#""",
'latexdocstr': """
{{}\\verb@def #argname#(#latexdocsignature#): return #docreturn#@{}}
#routnote#
#latexdocstrsigns#""",
'docstrshort': 'def #argname#(#docsignature#): return #docreturn#'
}
cb_rout_rules = [
{ # Init
'separatorsfor': {'decl': '\n',
'args': ',', 'optargs': '', 'pyobjfrom': '\n', 'freemem': '\n',
'args_td': ',', 'optargs_td': '',
'args_nm': ',', 'optargs_nm': '',
'frompyobj': '\n', 'setdims': '\n',
'docstrsigns': '\\n"\n"',
'latexdocstrsigns': '\n',
'latexdocstrreq': '\n', 'latexdocstropt': '\n',
'latexdocstrout': '\n', 'latexdocstrcbs': '\n',
},
'decl': '/*decl*/', 'pyobjfrom': '/*pyobjfrom*/', 'frompyobj': '/*frompyobj*/',
'args': [], 'optargs': '', 'return': '', 'strarglens': '', 'freemem': '/*freemem*/',
'args_td': [], 'optargs_td': '', 'strarglens_td': '',
'args_nm': [], 'optargs_nm': '', 'strarglens_nm': '',
'noargs': '',
'setdims': '/*setdims*/',
'docstrsigns': '', 'latexdocstrsigns': '',
'docstrreq': ' Required arguments:',
'docstropt': ' Optional arguments:',
'docstrout': ' Return objects:',
'docstrcbs': ' Call-back functions:',
'docreturn': '', 'docsign': '', 'docsignopt': '',
'latexdocstrreq': '\\noindent Required arguments:',
'latexdocstropt': '\\noindent Optional arguments:',
'latexdocstrout': '\\noindent Return objects:',
'latexdocstrcbs': '\\noindent Call-back functions:',
'routnote': {hasnote: '--- #note#', l_not(hasnote): ''},
}, { # Function
'decl': ' #ctype# return_value = 0;',
'frompyobj': [
{debugcapi: ' CFUNCSMESS("cb:Getting return_value->");'},
'''\
if (capi_j>capi_i) {
GETSCALARFROMPYTUPLE(capi_return,capi_i++,&return_value,#ctype#,
"#ctype#_from_pyobj failed in converting return_value of"
" call-back function #name# to C #ctype#\\n");
} else {
fprintf(stderr,"Warning: call-back function #name# did not provide"
" return value (index=%d, type=#ctype#)\\n",capi_i);
}''',
{debugcapi:
' fprintf(stderr,"#showvalueformat#.\\n",return_value);'}
],
'need': ['#ctype#_from_pyobj', {debugcapi: 'CFUNCSMESS'}, 'GETSCALARFROMPYTUPLE'],
'return': ' return return_value;',
'_check': l_and(isfunction, l_not(isstringfunction), l_not(iscomplexfunction))
},
{ # String function
'pyobjfrom': {debugcapi: ' fprintf(stderr,"debug-capi:cb:#name#:%d:\\n",return_value_len);'},
'args': '#ctype# return_value,int return_value_len',
'args_nm': 'return_value,&return_value_len',
'args_td': '#ctype# ,int',
'frompyobj': [
{debugcapi: ' CFUNCSMESS("cb:Getting return_value->\\"");'},
"""\
if (capi_j>capi_i) {
GETSTRFROMPYTUPLE(capi_return,capi_i++,return_value,return_value_len);
} else {
fprintf(stderr,"Warning: call-back function #name# did not provide"
" return value (index=%d, type=#ctype#)\\n",capi_i);
}""",
{debugcapi:
' fprintf(stderr,"#showvalueformat#\\".\\n",return_value);'}
],
'need': ['#ctype#_from_pyobj', {debugcapi: 'CFUNCSMESS'},
'string.h', 'GETSTRFROMPYTUPLE'],
'return': 'return;',
'_check': isstringfunction
},
{ # Complex function
'optargs': """
#ifndef F2PY_CB_RETURNCOMPLEX
#ctype# *return_value
#endif
""",
'optargs_nm': """
#ifndef F2PY_CB_RETURNCOMPLEX
return_value
#endif
""",
'optargs_td': """
#ifndef F2PY_CB_RETURNCOMPLEX
#ctype# *
#endif
""",
'decl': """
#ifdef F2PY_CB_RETURNCOMPLEX
#ctype# return_value = {0, 0};
#endif
""",
'frompyobj': [
{debugcapi: ' CFUNCSMESS("cb:Getting return_value->");'},
"""\
if (capi_j>capi_i) {
#ifdef F2PY_CB_RETURNCOMPLEX
GETSCALARFROMPYTUPLE(capi_return,capi_i++,&return_value,#ctype#,
\"#ctype#_from_pyobj failed in converting return_value of call-back\"
\" function #name# to C #ctype#\\n\");
#else
GETSCALARFROMPYTUPLE(capi_return,capi_i++,return_value,#ctype#,
\"#ctype#_from_pyobj failed in converting return_value of call-back\"
\" function #name# to C #ctype#\\n\");
#endif
} else {
fprintf(stderr,
\"Warning: call-back function #name# did not provide\"
\" return value (index=%d, type=#ctype#)\\n\",capi_i);
}""",
{debugcapi: """\
#ifdef F2PY_CB_RETURNCOMPLEX
fprintf(stderr,\"#showvalueformat#.\\n\",(return_value).r,(return_value).i);
#else
fprintf(stderr,\"#showvalueformat#.\\n\",(*return_value).r,(*return_value).i);
#endif
"""}
],
'return': """
#ifdef F2PY_CB_RETURNCOMPLEX
return return_value;
#else
return;
#endif
""",
'need': ['#ctype#_from_pyobj', {debugcapi: 'CFUNCSMESS'},
'string.h', 'GETSCALARFROMPYTUPLE', '#ctype#'],
'_check': iscomplexfunction
},
{'docstrout': ' #pydocsignout#',
'latexdocstrout': ['\\item[]{{}\\verb@#pydocsignout#@{}}',
{hasnote: '--- #note#'}],
'docreturn': '#rname#,',
'_check': isfunction},
{'_check': issubroutine, 'return': 'return;'}
]
cb_arg_rules = [
{ # Doc
'docstropt': {l_and(isoptional, isintent_nothide): ' #pydocsign#'},
'docstrreq': {l_and(isrequired, isintent_nothide): ' #pydocsign#'},
'docstrout': {isintent_out: ' #pydocsignout#'},
'latexdocstropt': {l_and(isoptional, isintent_nothide): ['\\item[]{{}\\verb@#pydocsign#@{}}',
{hasnote: '--- #note#'}]},
'latexdocstrreq': {l_and(isrequired, isintent_nothide): ['\\item[]{{}\\verb@#pydocsign#@{}}',
{hasnote: '--- #note#'}]},
'latexdocstrout': {isintent_out: ['\\item[]{{}\\verb@#pydocsignout#@{}}',
{l_and(hasnote, isintent_hide): '--- #note#',
l_and(hasnote, isintent_nothide): '--- See above.'}]},
'docsign': {l_and(isrequired, isintent_nothide): '#varname#,'},
'docsignopt': {l_and(isoptional, isintent_nothide): '#varname#,'},
'depend': ''
},
{
'args': {
l_and(isscalar, isintent_c): '#ctype# #varname_i#',
l_and(isscalar, l_not(isintent_c)): '#ctype# *#varname_i#_cb_capi',
isarray: '#ctype# *#varname_i#',
isstring: '#ctype# #varname_i#'
},
'args_nm': {
l_and(isscalar, isintent_c): '#varname_i#',
l_and(isscalar, l_not(isintent_c)): '#varname_i#_cb_capi',
isarray: '#varname_i#',
isstring: '#varname_i#'
},
'args_td': {
l_and(isscalar, isintent_c): '#ctype#',
l_and(isscalar, l_not(isintent_c)): '#ctype# *',
isarray: '#ctype# *',
isstring: '#ctype#'
},
'need': {l_or(isscalar, isarray, isstring): '#ctype#'},
# untested with multiple args
'strarglens': {isstring: ',int #varname_i#_cb_len'},
'strarglens_td': {isstring: ',int'}, # untested with multiple args
# untested with multiple args
'strarglens_nm': {isstring: ',#varname_i#_cb_len'},
},
{ # Scalars
'decl': {l_not(isintent_c): ' #ctype# #varname_i#=(*#varname_i#_cb_capi);'},
'error': {l_and(isintent_c, isintent_out,
throw_error('intent(c,out) is forbidden for callback scalar arguments')):
''},
'frompyobj': [{debugcapi: ' CFUNCSMESS("cb:Getting #varname#->");'},
{isintent_out:
' if (capi_j>capi_i)\n GETSCALARFROMPYTUPLE(capi_return,capi_i++,#varname_i#_cb_capi,#ctype#,"#ctype#_from_pyobj failed in converting argument #varname# of call-back function #name# to C #ctype#\\n");'},
{l_and(debugcapi, l_and(l_not(iscomplex), isintent_c)):
' fprintf(stderr,"#showvalueformat#.\\n",#varname_i#);'},
{l_and(debugcapi, l_and(l_not(iscomplex), l_not( isintent_c))):
' fprintf(stderr,"#showvalueformat#.\\n",*#varname_i#_cb_capi);'},
{l_and(debugcapi, l_and(iscomplex, isintent_c)):
' fprintf(stderr,"#showvalueformat#.\\n",(#varname_i#).r,(#varname_i#).i);'},
{l_and(debugcapi, l_and(iscomplex, l_not( isintent_c))):
' fprintf(stderr,"#showvalueformat#.\\n",(*#varname_i#_cb_capi).r,(*#varname_i#_cb_capi).i);'},
],
'need': [{isintent_out: ['#ctype#_from_pyobj', 'GETSCALARFROMPYTUPLE']},
{debugcapi: 'CFUNCSMESS'}],
'_check': isscalar
}, {
'pyobjfrom': [{isintent_in: """\
if (cb->nofargs>capi_i)
if (CAPI_ARGLIST_SETITEM(capi_i++,pyobj_from_#ctype#1(#varname_i#)))
goto capi_fail;"""},
{isintent_inout: """\
if (cb->nofargs>capi_i)
if (CAPI_ARGLIST_SETITEM(capi_i++,pyarr_from_p_#ctype#1(#varname_i#_cb_capi)))
goto capi_fail;"""}],
'need': [{isintent_in: 'pyobj_from_#ctype#1'},
{isintent_inout: 'pyarr_from_p_#ctype#1'},
{iscomplex: '#ctype#'}],
'_check': l_and(isscalar, isintent_nothide),
'_optional': ''
}, { # String
'frompyobj': [{debugcapi: ' CFUNCSMESS("cb:Getting #varname#->\\"");'},
""" if (capi_j>capi_i)
GETSTRFROMPYTUPLE(capi_return,capi_i++,#varname_i#,#varname_i#_cb_len);""",
{debugcapi:
' fprintf(stderr,"#showvalueformat#\\":%d:.\\n",#varname_i#,#varname_i#_cb_len);'},
],
'need': ['#ctype#', 'GETSTRFROMPYTUPLE',
{debugcapi: 'CFUNCSMESS'}, 'string.h'],
'_check': l_and(isstring, isintent_out)
}, {
'pyobjfrom': [{debugcapi: ' fprintf(stderr,"debug-capi:cb:#varname#=\\"#showvalueformat#\\":%d:\\n",#varname_i#,#varname_i#_cb_len);'},
{isintent_in: """\
if (cb->nofargs>capi_i)
if (CAPI_ARGLIST_SETITEM(capi_i++,pyobj_from_#ctype#1size(#varname_i#,#varname_i#_cb_len)))
goto capi_fail;"""},
{isintent_inout: """\
if (cb->nofargs>capi_i) {
int #varname_i#_cb_dims[] = {#varname_i#_cb_len};
if (CAPI_ARGLIST_SETITEM(capi_i++,pyarr_from_p_#ctype#1(#varname_i#,#varname_i#_cb_dims)))
goto capi_fail;
}"""}],
'need': [{isintent_in: 'pyobj_from_#ctype#1size'},
{isintent_inout: 'pyarr_from_p_#ctype#1'}],
'_check': l_and(isstring, isintent_nothide),
'_optional': ''
},
# Array ...
{
'decl': ' npy_intp #varname_i#_Dims[#rank#] = {#rank*[-1]#};',
'setdims': ' #cbsetdims#;',
'_check': isarray,
'_depend': ''
},
{
'pyobjfrom': [{debugcapi: ' fprintf(stderr,"debug-capi:cb:#varname#\\n");'},
{isintent_c: """\
if (cb->nofargs>capi_i) {
int itemsize_ = #atype# == NPY_STRING ? 1 : 0;
/*XXX: Hmm, what will destroy this array??? */
PyArrayObject *tmp_arr = (PyArrayObject *)PyArray_New(&PyArray_Type,#rank#,#varname_i#_Dims,#atype#,NULL,(char*)#varname_i#,itemsize_,NPY_ARRAY_CARRAY,NULL);
""",
l_not(isintent_c): """\
if (cb->nofargs>capi_i) {
int itemsize_ = #atype# == NPY_STRING ? 1 : 0;
/*XXX: Hmm, what will destroy this array??? */
PyArrayObject *tmp_arr = (PyArrayObject *)PyArray_New(&PyArray_Type,#rank#,#varname_i#_Dims,#atype#,NULL,(char*)#varname_i#,itemsize_,NPY_ARRAY_FARRAY,NULL);
""",
},
"""
if (tmp_arr==NULL)
goto capi_fail;
if (CAPI_ARGLIST_SETITEM(capi_i++,(PyObject *)tmp_arr))
goto capi_fail;
}"""],
'_check': l_and(isarray, isintent_nothide, l_or(isintent_in, isintent_inout)),
'_optional': '',
}, {
'frompyobj': [{debugcapi: ' CFUNCSMESS("cb:Getting #varname#->");'},
""" if (capi_j>capi_i) {
PyArrayObject *rv_cb_arr = NULL;
if ((capi_tmp = PyTuple_GetItem(capi_return,capi_i++))==NULL) goto capi_fail;
rv_cb_arr = array_from_pyobj(#atype#,#varname_i#_Dims,#rank#,F2PY_INTENT_IN""",
{isintent_c: '|F2PY_INTENT_C'},
""",capi_tmp);
if (rv_cb_arr == NULL) {
fprintf(stderr,\"rv_cb_arr is NULL\\n\");
goto capi_fail;
}
MEMCOPY(#varname_i#,PyArray_DATA(rv_cb_arr),PyArray_NBYTES(rv_cb_arr));
if (capi_tmp != (PyObject *)rv_cb_arr) {
Py_DECREF(rv_cb_arr);
}
}""",
{debugcapi: ' fprintf(stderr,"<-.\\n");'},
],
'need': ['MEMCOPY', {iscomplexarray: '#ctype#'}],
'_check': l_and(isarray, isintent_out)
}, {
'docreturn': '#varname#,',
'_check': isintent_out
}
]
################## Build call-back module #############
cb_map = {}
def buildcallbacks(m):
cb_map[m['name']] = []
for bi in m['body']:
if bi['block'] == 'interface':
for b in bi['body']:
if b:
buildcallback(b, m['name'])
else:
errmess('warning: empty body for %s\n' % (m['name']))
def buildcallback(rout, um):
from . import capi_maps
outmess(' Constructing call-back function "cb_%s_in_%s"\n' %
(rout['name'], um))
args, depargs = getargs(rout)
capi_maps.depargs = depargs
var = rout['vars']
vrd = capi_maps.cb_routsign2map(rout, um)
rd = dictappend({}, vrd)
cb_map[um].append([rout['name'], rd['name']])
for r in cb_rout_rules:
if ('_check' in r and r['_check'](rout)) or ('_check' not in r):
ar = applyrules(r, vrd, rout)
rd = dictappend(rd, ar)
savevrd = {}
for i, a in enumerate(args):
vrd = capi_maps.cb_sign2map(a, var[a], index=i)
savevrd[a] = vrd
for r in cb_arg_rules:
if '_depend' in r:
continue
if '_optional' in r and isoptional(var[a]):
continue
if ('_check' in r and r['_check'](var[a])) or ('_check' not in r):
ar = applyrules(r, vrd, var[a])
rd = dictappend(rd, ar)
if '_break' in r:
break
for a in args:
vrd = savevrd[a]
for r in cb_arg_rules:
if '_depend' in r:
continue
if ('_optional' not in r) or ('_optional' in r and isrequired(var[a])):
continue
if ('_check' in r and r['_check'](var[a])) or ('_check' not in r):
ar = applyrules(r, vrd, var[a])
rd = dictappend(rd, ar)
if '_break' in r:
break
for a in depargs:
vrd = savevrd[a]
for r in cb_arg_rules:
if '_depend' not in r:
continue
if '_optional' in r:
continue
if ('_check' in r and r['_check'](var[a])) or ('_check' not in r):
ar = applyrules(r, vrd, var[a])
rd = dictappend(rd, ar)
if '_break' in r:
break
if 'args' in rd and 'optargs' in rd:
if isinstance(rd['optargs'], list):
rd['optargs'] = rd['optargs'] + ["""
#ifndef F2PY_CB_RETURNCOMPLEX
,
#endif
"""]
rd['optargs_nm'] = rd['optargs_nm'] + ["""
#ifndef F2PY_CB_RETURNCOMPLEX
,
#endif
"""]
rd['optargs_td'] = rd['optargs_td'] + ["""
#ifndef F2PY_CB_RETURNCOMPLEX
,
#endif
"""]
if isinstance(rd['docreturn'], list):
rd['docreturn'] = stripcomma(
replace('#docreturn#', {'docreturn': rd['docreturn']}))
optargs = stripcomma(replace('#docsignopt#',
{'docsignopt': rd['docsignopt']}
))
if optargs == '':
rd['docsignature'] = stripcomma(
replace('#docsign#', {'docsign': rd['docsign']}))
else:
rd['docsignature'] = replace('#docsign#[#docsignopt#]',
{'docsign': rd['docsign'],
'docsignopt': optargs,
})
rd['latexdocsignature'] = rd['docsignature'].replace('_', '\\_')
rd['latexdocsignature'] = rd['latexdocsignature'].replace(',', ', ')
rd['docstrsigns'] = []
rd['latexdocstrsigns'] = []
for k in ['docstrreq', 'docstropt', 'docstrout', 'docstrcbs']:
if k in rd and isinstance(rd[k], list):
rd['docstrsigns'] = rd['docstrsigns'] + rd[k]
k = 'latex' + k
if k in rd and isinstance(rd[k], list):
rd['latexdocstrsigns'] = rd['latexdocstrsigns'] + rd[k][0:1] +\
['\\begin{description}'] + rd[k][1:] +\
['\\end{description}']
if 'args' not in rd:
rd['args'] = ''
rd['args_td'] = ''
rd['args_nm'] = ''
if not (rd.get('args') or rd.get('optargs') or rd.get('strarglens')):
rd['noargs'] = 'void'
ar = applyrules(cb_routine_rules, rd)
cfuncs.callbacks[rd['name']] = ar['body']
if isinstance(ar['need'], str):
ar['need'] = [ar['need']]
if 'need' in rd:
for t in cfuncs.typedefs.keys():
if t in rd['need']:
ar['need'].append(t)
cfuncs.typedefs_generated[rd['name'] + '_typedef'] = ar['cbtypedefs']
ar['need'].append(rd['name'] + '_typedef')
cfuncs.needs[rd['name']] = ar['need']
capi_maps.lcb2_map[rd['name']] = {'maxnofargs': ar['maxnofargs'],
'nofoptargs': ar['nofoptargs'],
'docstr': ar['docstr'],
'latexdocstr': ar['latexdocstr'],
'argname': rd['argname']
}
outmess(' %s\n' % (ar['docstrshort']))
return
################## Build call-back function #############
| 24,854 | Python | 37.775351 | 236 | 0.520158 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/f2py/auxfuncs.py | #!/usr/bin/env python3
"""
Auxiliary functions for f2py2e.
Copyright 1999,2000 Pearu Peterson all rights reserved,
Pearu Peterson <[email protected]>
Permission to use, modify, and distribute this software is given under the
terms of the NumPy (BSD style) LICENSE.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Date: 2005/07/24 19:01:55 $
Pearu Peterson
"""
import pprint
import sys
import types
from functools import reduce
from . import __version__
from . import cfuncs
__all__ = [
'applyrules', 'debugcapi', 'dictappend', 'errmess', 'gentitle',
'getargs2', 'getcallprotoargument', 'getcallstatement',
'getfortranname', 'getpymethoddef', 'getrestdoc', 'getusercode',
'getusercode1', 'hasbody', 'hascallstatement', 'hascommon',
'hasexternals', 'hasinitvalue', 'hasnote', 'hasresultnote',
'isallocatable', 'isarray', 'isarrayofstrings', 'iscomplex',
'iscomplexarray', 'iscomplexfunction', 'iscomplexfunction_warn',
'isdouble', 'isdummyroutine', 'isexternal', 'isfunction',
'isfunction_wrap', 'isint1array', 'isinteger', 'isintent_aux',
'isintent_c', 'isintent_callback', 'isintent_copy', 'isintent_dict',
'isintent_hide', 'isintent_in', 'isintent_inout', 'isintent_inplace',
'isintent_nothide', 'isintent_out', 'isintent_overwrite', 'islogical',
'islogicalfunction', 'islong_complex', 'islong_double',
'islong_doublefunction', 'islong_long', 'islong_longfunction',
'ismodule', 'ismoduleroutine', 'isoptional', 'isprivate', 'isrequired',
'isroutine', 'isscalar', 'issigned_long_longarray', 'isstring',
'isstringarray', 'isstringfunction', 'issubroutine',
'issubroutine_wrap', 'isthreadsafe', 'isunsigned', 'isunsigned_char',
'isunsigned_chararray', 'isunsigned_long_long',
'isunsigned_long_longarray', 'isunsigned_short',
'isunsigned_shortarray', 'l_and', 'l_not', 'l_or', 'outmess',
'replace', 'show', 'stripcomma', 'throw_error',
]
f2py_version = __version__.version
errmess = sys.stderr.write
show = pprint.pprint
options = {}
debugoptions = []
wrapfuncs = 1
def outmess(t):
if options.get('verbose', 1):
sys.stdout.write(t)
def debugcapi(var):
return 'capi' in debugoptions
def _isstring(var):
return 'typespec' in var and var['typespec'] == 'character' and \
not isexternal(var)
def isstring(var):
return _isstring(var) and not isarray(var)
def ischaracter(var):
return isstring(var) and 'charselector' not in var
def isstringarray(var):
return isarray(var) and _isstring(var)
def isarrayofstrings(var):
# leaving out '*' for now so that `character*(*) a(m)` and `character
# a(m,*)` are treated differently. Luckily `character**` is illegal.
return isstringarray(var) and var['dimension'][-1] == '(*)'
def isarray(var):
return 'dimension' in var and not isexternal(var)
def isscalar(var):
return not (isarray(var) or isstring(var) or isexternal(var))
def iscomplex(var):
return isscalar(var) and \
var.get('typespec') in ['complex', 'double complex']
def islogical(var):
return isscalar(var) and var.get('typespec') == 'logical'
def isinteger(var):
return isscalar(var) and var.get('typespec') == 'integer'
def isreal(var):
return isscalar(var) and var.get('typespec') == 'real'
def get_kind(var):
try:
return var['kindselector']['*']
except KeyError:
try:
return var['kindselector']['kind']
except KeyError:
pass
def islong_long(var):
if not isscalar(var):
return 0
if var.get('typespec') not in ['integer', 'logical']:
return 0
return get_kind(var) == '8'
def isunsigned_char(var):
if not isscalar(var):
return 0
if var.get('typespec') != 'integer':
return 0
return get_kind(var) == '-1'
def isunsigned_short(var):
if not isscalar(var):
return 0
if var.get('typespec') != 'integer':
return 0
return get_kind(var) == '-2'
def isunsigned(var):
if not isscalar(var):
return 0
if var.get('typespec') != 'integer':
return 0
return get_kind(var) == '-4'
def isunsigned_long_long(var):
if not isscalar(var):
return 0
if var.get('typespec') != 'integer':
return 0
return get_kind(var) == '-8'
def isdouble(var):
if not isscalar(var):
return 0
if not var.get('typespec') == 'real':
return 0
return get_kind(var) == '8'
def islong_double(var):
if not isscalar(var):
return 0
if not var.get('typespec') == 'real':
return 0
return get_kind(var) == '16'
def islong_complex(var):
if not iscomplex(var):
return 0
return get_kind(var) == '32'
def iscomplexarray(var):
return isarray(var) and \
var.get('typespec') in ['complex', 'double complex']
def isint1array(var):
return isarray(var) and var.get('typespec') == 'integer' \
and get_kind(var) == '1'
def isunsigned_chararray(var):
return isarray(var) and var.get('typespec') in ['integer', 'logical']\
and get_kind(var) == '-1'
def isunsigned_shortarray(var):
return isarray(var) and var.get('typespec') in ['integer', 'logical']\
and get_kind(var) == '-2'
def isunsignedarray(var):
return isarray(var) and var.get('typespec') in ['integer', 'logical']\
and get_kind(var) == '-4'
def isunsigned_long_longarray(var):
return isarray(var) and var.get('typespec') in ['integer', 'logical']\
and get_kind(var) == '-8'
def issigned_chararray(var):
return isarray(var) and var.get('typespec') in ['integer', 'logical']\
and get_kind(var) == '1'
def issigned_shortarray(var):
return isarray(var) and var.get('typespec') in ['integer', 'logical']\
and get_kind(var) == '2'
def issigned_array(var):
return isarray(var) and var.get('typespec') in ['integer', 'logical']\
and get_kind(var) == '4'
def issigned_long_longarray(var):
return isarray(var) and var.get('typespec') in ['integer', 'logical']\
and get_kind(var) == '8'
def isallocatable(var):
return 'attrspec' in var and 'allocatable' in var['attrspec']
def ismutable(var):
return not ('dimension' not in var or isstring(var))
def ismoduleroutine(rout):
return 'modulename' in rout
def ismodule(rout):
return 'block' in rout and 'module' == rout['block']
def isfunction(rout):
return 'block' in rout and 'function' == rout['block']
def isfunction_wrap(rout):
if isintent_c(rout):
return 0
return wrapfuncs and isfunction(rout) and (not isexternal(rout))
def issubroutine(rout):
return 'block' in rout and 'subroutine' == rout['block']
def issubroutine_wrap(rout):
if isintent_c(rout):
return 0
return issubroutine(rout) and hasassumedshape(rout)
def hasassumedshape(rout):
if rout.get('hasassumedshape'):
return True
for a in rout['args']:
for d in rout['vars'].get(a, {}).get('dimension', []):
if d == ':':
rout['hasassumedshape'] = True
return True
return False
def requiresf90wrapper(rout):
return ismoduleroutine(rout) or hasassumedshape(rout)
def isroutine(rout):
return isfunction(rout) or issubroutine(rout)
def islogicalfunction(rout):
if not isfunction(rout):
return 0
if 'result' in rout:
a = rout['result']
else:
a = rout['name']
if a in rout['vars']:
return islogical(rout['vars'][a])
return 0
def islong_longfunction(rout):
if not isfunction(rout):
return 0
if 'result' in rout:
a = rout['result']
else:
a = rout['name']
if a in rout['vars']:
return islong_long(rout['vars'][a])
return 0
def islong_doublefunction(rout):
if not isfunction(rout):
return 0
if 'result' in rout:
a = rout['result']
else:
a = rout['name']
if a in rout['vars']:
return islong_double(rout['vars'][a])
return 0
def iscomplexfunction(rout):
if not isfunction(rout):
return 0
if 'result' in rout:
a = rout['result']
else:
a = rout['name']
if a in rout['vars']:
return iscomplex(rout['vars'][a])
return 0
def iscomplexfunction_warn(rout):
if iscomplexfunction(rout):
outmess("""\
**************************************************************
Warning: code with a function returning complex value
may not work correctly with your Fortran compiler.
When using GNU gcc/g77 compilers, codes should work
correctly for callbacks with:
f2py -c -DF2PY_CB_RETURNCOMPLEX
**************************************************************\n""")
return 1
return 0
def isstringfunction(rout):
if not isfunction(rout):
return 0
if 'result' in rout:
a = rout['result']
else:
a = rout['name']
if a in rout['vars']:
return isstring(rout['vars'][a])
return 0
def hasexternals(rout):
return 'externals' in rout and rout['externals']
def isthreadsafe(rout):
return 'f2pyenhancements' in rout and \
'threadsafe' in rout['f2pyenhancements']
def hasvariables(rout):
return 'vars' in rout and rout['vars']
def isoptional(var):
return ('attrspec' in var and 'optional' in var['attrspec'] and
'required' not in var['attrspec']) and isintent_nothide(var)
def isexternal(var):
return 'attrspec' in var and 'external' in var['attrspec']
def isrequired(var):
return not isoptional(var) and isintent_nothide(var)
def isintent_in(var):
if 'intent' not in var:
return 1
if 'hide' in var['intent']:
return 0
if 'inplace' in var['intent']:
return 0
if 'in' in var['intent']:
return 1
if 'out' in var['intent']:
return 0
if 'inout' in var['intent']:
return 0
if 'outin' in var['intent']:
return 0
return 1
def isintent_inout(var):
return ('intent' in var and ('inout' in var['intent'] or
'outin' in var['intent']) and 'in' not in var['intent'] and
'hide' not in var['intent'] and 'inplace' not in var['intent'])
def isintent_out(var):
return 'out' in var.get('intent', [])
def isintent_hide(var):
return ('intent' in var and ('hide' in var['intent'] or
('out' in var['intent'] and 'in' not in var['intent'] and
(not l_or(isintent_inout, isintent_inplace)(var)))))
def isintent_nothide(var):
return not isintent_hide(var)
def isintent_c(var):
return 'c' in var.get('intent', [])
def isintent_cache(var):
return 'cache' in var.get('intent', [])
def isintent_copy(var):
return 'copy' in var.get('intent', [])
def isintent_overwrite(var):
return 'overwrite' in var.get('intent', [])
def isintent_callback(var):
return 'callback' in var.get('intent', [])
def isintent_inplace(var):
return 'inplace' in var.get('intent', [])
def isintent_aux(var):
return 'aux' in var.get('intent', [])
def isintent_aligned4(var):
return 'aligned4' in var.get('intent', [])
def isintent_aligned8(var):
return 'aligned8' in var.get('intent', [])
def isintent_aligned16(var):
return 'aligned16' in var.get('intent', [])
isintent_dict = {isintent_in: 'INTENT_IN', isintent_inout: 'INTENT_INOUT',
isintent_out: 'INTENT_OUT', isintent_hide: 'INTENT_HIDE',
isintent_cache: 'INTENT_CACHE',
isintent_c: 'INTENT_C', isoptional: 'OPTIONAL',
isintent_inplace: 'INTENT_INPLACE',
isintent_aligned4: 'INTENT_ALIGNED4',
isintent_aligned8: 'INTENT_ALIGNED8',
isintent_aligned16: 'INTENT_ALIGNED16',
}
def isprivate(var):
return 'attrspec' in var and 'private' in var['attrspec']
def hasinitvalue(var):
return '=' in var
def hasinitvalueasstring(var):
if not hasinitvalue(var):
return 0
return var['='][0] in ['"', "'"]
def hasnote(var):
return 'note' in var
def hasresultnote(rout):
if not isfunction(rout):
return 0
if 'result' in rout:
a = rout['result']
else:
a = rout['name']
if a in rout['vars']:
return hasnote(rout['vars'][a])
return 0
def hascommon(rout):
return 'common' in rout
def containscommon(rout):
if hascommon(rout):
return 1
if hasbody(rout):
for b in rout['body']:
if containscommon(b):
return 1
return 0
def containsmodule(block):
if ismodule(block):
return 1
if not hasbody(block):
return 0
for b in block['body']:
if containsmodule(b):
return 1
return 0
def hasbody(rout):
return 'body' in rout
def hascallstatement(rout):
return getcallstatement(rout) is not None
def istrue(var):
return 1
def isfalse(var):
return 0
class F2PYError(Exception):
pass
class throw_error:
def __init__(self, mess):
self.mess = mess
def __call__(self, var):
mess = '\n\n var = %s\n Message: %s\n' % (var, self.mess)
raise F2PYError(mess)
def l_and(*f):
l, l2 = 'lambda v', []
for i in range(len(f)):
l = '%s,f%d=f[%d]' % (l, i, i)
l2.append('f%d(v)' % (i))
return eval('%s:%s' % (l, ' and '.join(l2)))
def l_or(*f):
l, l2 = 'lambda v', []
for i in range(len(f)):
l = '%s,f%d=f[%d]' % (l, i, i)
l2.append('f%d(v)' % (i))
return eval('%s:%s' % (l, ' or '.join(l2)))
def l_not(f):
return eval('lambda v,f=f:not f(v)')
def isdummyroutine(rout):
try:
return rout['f2pyenhancements']['fortranname'] == ''
except KeyError:
return 0
def getfortranname(rout):
try:
name = rout['f2pyenhancements']['fortranname']
if name == '':
raise KeyError
if not name:
errmess('Failed to use fortranname from %s\n' %
(rout['f2pyenhancements']))
raise KeyError
except KeyError:
name = rout['name']
return name
def getmultilineblock(rout, blockname, comment=1, counter=0):
try:
r = rout['f2pyenhancements'].get(blockname)
except KeyError:
return
if not r:
return
if counter > 0 and isinstance(r, str):
return
if isinstance(r, list):
if counter >= len(r):
return
r = r[counter]
if r[:3] == "'''":
if comment:
r = '\t/* start ' + blockname + \
' multiline (' + repr(counter) + ') */\n' + r[3:]
else:
r = r[3:]
if r[-3:] == "'''":
if comment:
r = r[:-3] + '\n\t/* end multiline (' + repr(counter) + ')*/'
else:
r = r[:-3]
else:
errmess("%s multiline block should end with `'''`: %s\n"
% (blockname, repr(r)))
return r
def getcallstatement(rout):
return getmultilineblock(rout, 'callstatement')
def getcallprotoargument(rout, cb_map={}):
r = getmultilineblock(rout, 'callprotoargument', comment=0)
if r:
return r
if hascallstatement(rout):
outmess(
'warning: callstatement is defined without callprotoargument\n')
return
from .capi_maps import getctype
arg_types, arg_types2 = [], []
if l_and(isstringfunction, l_not(isfunction_wrap))(rout):
arg_types.extend(['char*', 'size_t'])
for n in rout['args']:
var = rout['vars'][n]
if isintent_callback(var):
continue
if n in cb_map:
ctype = cb_map[n] + '_typedef'
else:
ctype = getctype(var)
if l_and(isintent_c, l_or(isscalar, iscomplex))(var):
pass
elif isstring(var):
pass
else:
ctype = ctype + '*'
if isstring(var) or isarrayofstrings(var):
arg_types2.append('size_t')
arg_types.append(ctype)
proto_args = ','.join(arg_types + arg_types2)
if not proto_args:
proto_args = 'void'
return proto_args
def getusercode(rout):
return getmultilineblock(rout, 'usercode')
def getusercode1(rout):
return getmultilineblock(rout, 'usercode', counter=1)
def getpymethoddef(rout):
return getmultilineblock(rout, 'pymethoddef')
def getargs(rout):
sortargs, args = [], []
if 'args' in rout:
args = rout['args']
if 'sortvars' in rout:
for a in rout['sortvars']:
if a in args:
sortargs.append(a)
for a in args:
if a not in sortargs:
sortargs.append(a)
else:
sortargs = rout['args']
return args, sortargs
def getargs2(rout):
sortargs, args = [], rout.get('args', [])
auxvars = [a for a in rout['vars'].keys() if isintent_aux(rout['vars'][a])
and a not in args]
args = auxvars + args
if 'sortvars' in rout:
for a in rout['sortvars']:
if a in args:
sortargs.append(a)
for a in args:
if a not in sortargs:
sortargs.append(a)
else:
sortargs = auxvars + rout['args']
return args, sortargs
def getrestdoc(rout):
if 'f2pymultilines' not in rout:
return None
k = None
if rout['block'] == 'python module':
k = rout['block'], rout['name']
return rout['f2pymultilines'].get(k, None)
def gentitle(name):
l = (80 - len(name) - 6) // 2
return '/*%s %s %s*/' % (l * '*', name, l * '*')
def flatlist(l):
if isinstance(l, list):
return reduce(lambda x, y, f=flatlist: x + f(y), l, [])
return [l]
def stripcomma(s):
if s and s[-1] == ',':
return s[:-1]
return s
def replace(str, d, defaultsep=''):
if isinstance(d, list):
return [replace(str, _m, defaultsep) for _m in d]
if isinstance(str, list):
return [replace(_m, d, defaultsep) for _m in str]
for k in 2 * list(d.keys()):
if k == 'separatorsfor':
continue
if 'separatorsfor' in d and k in d['separatorsfor']:
sep = d['separatorsfor'][k]
else:
sep = defaultsep
if isinstance(d[k], list):
str = str.replace('#%s#' % (k), sep.join(flatlist(d[k])))
else:
str = str.replace('#%s#' % (k), d[k])
return str
def dictappend(rd, ar):
if isinstance(ar, list):
for a in ar:
rd = dictappend(rd, a)
return rd
for k in ar.keys():
if k[0] == '_':
continue
if k in rd:
if isinstance(rd[k], str):
rd[k] = [rd[k]]
if isinstance(rd[k], list):
if isinstance(ar[k], list):
rd[k] = rd[k] + ar[k]
else:
rd[k].append(ar[k])
elif isinstance(rd[k], dict):
if isinstance(ar[k], dict):
if k == 'separatorsfor':
for k1 in ar[k].keys():
if k1 not in rd[k]:
rd[k][k1] = ar[k][k1]
else:
rd[k] = dictappend(rd[k], ar[k])
else:
rd[k] = ar[k]
return rd
def applyrules(rules, d, var={}):
ret = {}
if isinstance(rules, list):
for r in rules:
rr = applyrules(r, d, var)
ret = dictappend(ret, rr)
if '_break' in rr:
break
return ret
if '_check' in rules and (not rules['_check'](var)):
return ret
if 'need' in rules:
res = applyrules({'needs': rules['need']}, d, var)
if 'needs' in res:
cfuncs.append_needs(res['needs'])
for k in rules.keys():
if k == 'separatorsfor':
ret[k] = rules[k]
continue
if isinstance(rules[k], str):
ret[k] = replace(rules[k], d)
elif isinstance(rules[k], list):
ret[k] = []
for i in rules[k]:
ar = applyrules({k: i}, d, var)
if k in ar:
ret[k].append(ar[k])
elif k[0] == '_':
continue
elif isinstance(rules[k], dict):
ret[k] = []
for k1 in rules[k].keys():
if isinstance(k1, types.FunctionType) and k1(var):
if isinstance(rules[k][k1], list):
for i in rules[k][k1]:
if isinstance(i, dict):
res = applyrules({'supertext': i}, d, var)
if 'supertext' in res:
i = res['supertext']
else:
i = ''
ret[k].append(replace(i, d))
else:
i = rules[k][k1]
if isinstance(i, dict):
res = applyrules({'supertext': i}, d)
if 'supertext' in res:
i = res['supertext']
else:
i = ''
ret[k].append(replace(i, d))
else:
errmess('applyrules: ignoring rule %s.\n' % repr(rules[k]))
if isinstance(ret[k], list):
if len(ret[k]) == 1:
ret[k] = ret[k][0]
if ret[k] == []:
del ret[k]
return ret
| 21,779 | Python | 24.384615 | 78 | 0.549933 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/f2py/__main__.py | # See:
# https://web.archive.org/web/20140822061353/http://cens.ioc.ee/projects/f2py2e
from numpy.f2py.f2py2e import main
main()
| 130 | Python | 20.83333 | 79 | 0.753846 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/f2py/crackfortran.py | #!/usr/bin/env python3
"""
crackfortran --- read fortran (77,90) code and extract declaration information.
Copyright 1999-2004 Pearu Peterson all rights reserved,
Pearu Peterson <[email protected]>
Permission to use, modify, and distribute this software is given under the
terms of the NumPy License.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Date: 2005/09/27 07:13:49 $
Pearu Peterson
Usage of crackfortran:
======================
Command line keys: -quiet,-verbose,-fix,-f77,-f90,-show,-h <pyffilename>
-m <module name for f77 routines>,--ignore-contains
Functions: crackfortran, crack2fortran
The following Fortran statements/constructions are supported
(or will be if needed):
block data,byte,call,character,common,complex,contains,data,
dimension,double complex,double precision,end,external,function,
implicit,integer,intent,interface,intrinsic,
logical,module,optional,parameter,private,public,
program,real,(sequence?),subroutine,type,use,virtual,
include,pythonmodule
Note: 'virtual' is mapped to 'dimension'.
Note: 'implicit integer (z) static (z)' is 'implicit static (z)' (this is minor bug).
Note: code after 'contains' will be ignored until its scope ends.
Note: 'common' statement is extended: dimensions are moved to variable definitions
Note: f2py directive: <commentchar>f2py<line> is read as <line>
Note: pythonmodule is introduced to represent Python module
Usage:
`postlist=crackfortran(files)`
`postlist` contains declaration information read from the list of files `files`.
`crack2fortran(postlist)` returns a fortran code to be saved to pyf-file
`postlist` has the following structure:
*** it is a list of dictionaries containing `blocks':
B = {'block','body','vars','parent_block'[,'name','prefix','args','result',
'implicit','externals','interfaced','common','sortvars',
'commonvars','note']}
B['block'] = 'interface' | 'function' | 'subroutine' | 'module' |
'program' | 'block data' | 'type' | 'pythonmodule' |
'abstract interface'
B['body'] --- list containing `subblocks' with the same structure as `blocks'
B['parent_block'] --- dictionary of a parent block:
C['body'][<index>]['parent_block'] is C
B['vars'] --- dictionary of variable definitions
B['sortvars'] --- dictionary of variable definitions sorted by dependence (independent first)
B['name'] --- name of the block (not if B['block']=='interface')
B['prefix'] --- prefix string (only if B['block']=='function')
B['args'] --- list of argument names if B['block']== 'function' | 'subroutine'
B['result'] --- name of the return value (only if B['block']=='function')
B['implicit'] --- dictionary {'a':<variable definition>,'b':...} | None
B['externals'] --- list of variables being external
B['interfaced'] --- list of variables being external and defined
B['common'] --- dictionary of common blocks (list of objects)
B['commonvars'] --- list of variables used in common blocks (dimensions are moved to variable definitions)
B['from'] --- string showing the 'parents' of the current block
B['use'] --- dictionary of modules used in current block:
{<modulename>:{['only':<0|1>],['map':{<local_name1>:<use_name1>,...}]}}
B['note'] --- list of LaTeX comments on the block
B['f2pyenhancements'] --- optional dictionary
{'threadsafe':'','fortranname':<name>,
'callstatement':<C-expr>|<multi-line block>,
'callprotoargument':<C-expr-list>,
'usercode':<multi-line block>|<list of multi-line blocks>,
'pymethoddef:<multi-line block>'
}
B['entry'] --- dictionary {entryname:argslist,..}
B['varnames'] --- list of variable names given in the order of reading the
Fortran code, useful for derived types.
B['saved_interface'] --- a string of scanned routine signature, defines explicit interface
*** Variable definition is a dictionary
D = B['vars'][<variable name>] =
{'typespec'[,'attrspec','kindselector','charselector','=','typename']}
D['typespec'] = 'byte' | 'character' | 'complex' | 'double complex' |
'double precision' | 'integer' | 'logical' | 'real' | 'type'
D['attrspec'] --- list of attributes (e.g. 'dimension(<arrayspec>)',
'external','intent(in|out|inout|hide|c|callback|cache|aligned4|aligned8|aligned16)',
'optional','required', etc)
K = D['kindselector'] = {['*','kind']} (only if D['typespec'] =
'complex' | 'integer' | 'logical' | 'real' )
C = D['charselector'] = {['*','len','kind']}
(only if D['typespec']=='character')
D['='] --- initialization expression string
D['typename'] --- name of the type if D['typespec']=='type'
D['dimension'] --- list of dimension bounds
D['intent'] --- list of intent specifications
D['depend'] --- list of variable names on which current variable depends on
D['check'] --- list of C-expressions; if C-expr returns zero, exception is raised
D['note'] --- list of LaTeX comments on the variable
*** Meaning of kind/char selectors (few examples):
D['typespec>']*K['*']
D['typespec'](kind=K['kind'])
character*C['*']
character(len=C['len'],kind=C['kind'])
(see also fortran type declaration statement formats below)
Fortran 90 type declaration statement format (F77 is subset of F90)
====================================================================
(Main source: IBM XL Fortran 5.1 Language Reference Manual)
type declaration = <typespec> [[<attrspec>]::] <entitydecl>
<typespec> = byte |
character[<charselector>] |
complex[<kindselector>] |
double complex |
double precision |
integer[<kindselector>] |
logical[<kindselector>] |
real[<kindselector>] |
type(<typename>)
<charselector> = * <charlen> |
([len=]<len>[,[kind=]<kind>]) |
(kind=<kind>[,len=<len>])
<kindselector> = * <intlen> |
([kind=]<kind>)
<attrspec> = comma separated list of attributes.
Only the following attributes are used in
building up the interface:
external
(parameter --- affects '=' key)
optional
intent
Other attributes are ignored.
<intentspec> = in | out | inout
<arrayspec> = comma separated list of dimension bounds.
<entitydecl> = <name> [[*<charlen>][(<arrayspec>)] | [(<arrayspec>)]*<charlen>]
[/<init_expr>/ | =<init_expr>] [,<entitydecl>]
In addition, the following attributes are used: check,depend,note
TODO:
* Apply 'parameter' attribute (e.g. 'integer parameter :: i=2' 'real x(i)'
-> 'real x(2)')
The above may be solved by creating appropriate preprocessor program, for example.
"""
import sys
import string
import fileinput
import re
import os
import copy
import platform
from . import __version__
# The environment provided by auxfuncs.py is needed for some calls to eval.
# As the needed functions cannot be determined by static inspection of the
# code, it is safest to use import * pending a major refactoring of f2py.
from .auxfuncs import *
from . import symbolic
f2py_version = __version__.version
# Global flags:
strictf77 = 1 # Ignore `!' comments unless line[0]=='!'
sourcecodeform = 'fix' # 'fix','free'
quiet = 0 # Be verbose if 0 (Obsolete: not used any more)
verbose = 1 # Be quiet if 0, extra verbose if > 1.
tabchar = 4 * ' '
pyffilename = ''
f77modulename = ''
skipemptyends = 0 # for old F77 programs without 'program' statement
ignorecontains = 1
dolowercase = 1
debug = []
# Global variables
beginpattern = ''
currentfilename = ''
expectbegin = 1
f90modulevars = {}
filepositiontext = ''
gotnextfile = 1
groupcache = None
groupcounter = 0
grouplist = {groupcounter: []}
groupname = ''
include_paths = []
neededmodule = -1
onlyfuncs = []
previous_context = None
skipblocksuntil = -1
skipfuncs = []
skipfunctions = []
usermodules = []
def reset_global_f2py_vars():
global groupcounter, grouplist, neededmodule, expectbegin
global skipblocksuntil, usermodules, f90modulevars, gotnextfile
global filepositiontext, currentfilename, skipfunctions, skipfuncs
global onlyfuncs, include_paths, previous_context
global strictf77, sourcecodeform, quiet, verbose, tabchar, pyffilename
global f77modulename, skipemptyends, ignorecontains, dolowercase, debug
# flags
strictf77 = 1
sourcecodeform = 'fix'
quiet = 0
verbose = 1
tabchar = 4 * ' '
pyffilename = ''
f77modulename = ''
skipemptyends = 0
ignorecontains = 1
dolowercase = 1
debug = []
# variables
groupcounter = 0
grouplist = {groupcounter: []}
neededmodule = -1
expectbegin = 1
skipblocksuntil = -1
usermodules = []
f90modulevars = {}
gotnextfile = 1
filepositiontext = ''
currentfilename = ''
skipfunctions = []
skipfuncs = []
onlyfuncs = []
include_paths = []
previous_context = None
def outmess(line, flag=1):
global filepositiontext
if not verbose:
return
if not quiet:
if flag:
sys.stdout.write(filepositiontext)
sys.stdout.write(line)
re._MAXCACHE = 50
defaultimplicitrules = {}
for c in "abcdefghopqrstuvwxyz$_":
defaultimplicitrules[c] = {'typespec': 'real'}
for c in "ijklmn":
defaultimplicitrules[c] = {'typespec': 'integer'}
badnames = {}
invbadnames = {}
for n in ['int', 'double', 'float', 'char', 'short', 'long', 'void', 'case', 'while',
'return', 'signed', 'unsigned', 'if', 'for', 'typedef', 'sizeof', 'union',
'struct', 'static', 'register', 'new', 'break', 'do', 'goto', 'switch',
'continue', 'else', 'inline', 'extern', 'delete', 'const', 'auto',
'len', 'rank', 'shape', 'index', 'slen', 'size', '_i',
'max', 'min',
'flen', 'fshape',
'string', 'complex_double', 'float_double', 'stdin', 'stderr', 'stdout',
'type', 'default']:
badnames[n] = n + '_bn'
invbadnames[n + '_bn'] = n
def rmbadname1(name):
if name in badnames:
errmess('rmbadname1: Replacing "%s" with "%s".\n' %
(name, badnames[name]))
return badnames[name]
return name
def rmbadname(names):
return [rmbadname1(_m) for _m in names]
def undo_rmbadname1(name):
if name in invbadnames:
errmess('undo_rmbadname1: Replacing "%s" with "%s".\n'
% (name, invbadnames[name]))
return invbadnames[name]
return name
def undo_rmbadname(names):
return [undo_rmbadname1(_m) for _m in names]
def getextension(name):
i = name.rfind('.')
if i == -1:
return ''
if '\\' in name[i:]:
return ''
if '/' in name[i:]:
return ''
return name[i + 1:]
is_f_file = re.compile(r'.*\.(for|ftn|f77|f)\Z', re.I).match
_has_f_header = re.compile(r'-\*-\s*fortran\s*-\*-', re.I).search
_has_f90_header = re.compile(r'-\*-\s*f90\s*-\*-', re.I).search
_has_fix_header = re.compile(r'-\*-\s*fix\s*-\*-', re.I).search
_free_f90_start = re.compile(r'[^c*]\s*[^\s\d\t]', re.I).match
def is_free_format(file):
"""Check if file is in free format Fortran."""
# f90 allows both fixed and free format, assuming fixed unless
# signs of free format are detected.
result = 0
with open(file, 'r') as f:
line = f.readline()
n = 15 # the number of non-comment lines to scan for hints
if _has_f_header(line):
n = 0
elif _has_f90_header(line):
n = 0
result = 1
while n > 0 and line:
if line[0] != '!' and line.strip():
n -= 1
if (line[0] != '\t' and _free_f90_start(line[:5])) or line[-2:-1] == '&':
result = 1
break
line = f.readline()
return result
# Read fortran (77,90) code
def readfortrancode(ffile, dowithline=show, istop=1):
"""
Read fortran codes from files and
1) Get rid of comments, line continuations, and empty lines; lower cases.
2) Call dowithline(line) on every line.
3) Recursively call itself when statement \"include '<filename>'\" is met.
"""
global gotnextfile, filepositiontext, currentfilename, sourcecodeform, strictf77
global beginpattern, quiet, verbose, dolowercase, include_paths
if not istop:
saveglobals = gotnextfile, filepositiontext, currentfilename, sourcecodeform, strictf77,\
beginpattern, quiet, verbose, dolowercase
if ffile == []:
return
localdolowercase = dolowercase
# cont: set to True when the content of the last line read
# indicates statement continuation
cont = False
finalline = ''
ll = ''
includeline = re.compile(
r'\s*include\s*(\'|")(?P<name>[^\'"]*)(\'|")', re.I)
cont1 = re.compile(r'(?P<line>.*)&\s*\Z')
cont2 = re.compile(r'(\s*&|)(?P<line>.*)')
mline_mark = re.compile(r".*?'''")
if istop:
dowithline('', -1)
ll, l1 = '', ''
spacedigits = [' '] + [str(_m) for _m in range(10)]
filepositiontext = ''
fin = fileinput.FileInput(ffile)
while True:
l = fin.readline()
if not l:
break
if fin.isfirstline():
filepositiontext = ''
currentfilename = fin.filename()
gotnextfile = 1
l1 = l
strictf77 = 0
sourcecodeform = 'fix'
ext = os.path.splitext(currentfilename)[1]
if is_f_file(currentfilename) and \
not (_has_f90_header(l) or _has_fix_header(l)):
strictf77 = 1
elif is_free_format(currentfilename) and not _has_fix_header(l):
sourcecodeform = 'free'
if strictf77:
beginpattern = beginpattern77
else:
beginpattern = beginpattern90
outmess('\tReading file %s (format:%s%s)\n'
% (repr(currentfilename), sourcecodeform,
strictf77 and ',strict' or ''))
l = l.expandtabs().replace('\xa0', ' ')
# Get rid of newline characters
while not l == '':
if l[-1] not in "\n\r\f":
break
l = l[:-1]
if not strictf77:
(l, rl) = split_by_unquoted(l, '!')
l += ' '
if rl[:5].lower() == '!f2py': # f2py directive
l, _ = split_by_unquoted(l + 4 * ' ' + rl[5:], '!')
if l.strip() == '': # Skip empty line
if sourcecodeform == 'free':
# In free form, a statement continues in the next line
# that is not a comment line [3.3.2.4^1], lines with
# blanks are comment lines [3.3.2.3^1]. Hence, the
# line continuation flag must retain its state.
pass
else:
# In fixed form, statement continuation is determined
# by a non-blank character at the 6-th position. Empty
# line indicates a start of a new statement
# [3.3.3.3^1]. Hence, the line continuation flag must
# be reset.
cont = False
continue
if sourcecodeform == 'fix':
if l[0] in ['*', 'c', '!', 'C', '#']:
if l[1:5].lower() == 'f2py': # f2py directive
l = ' ' + l[5:]
else: # Skip comment line
cont = False
continue
elif strictf77:
if len(l) > 72:
l = l[:72]
if not (l[0] in spacedigits):
raise Exception('readfortrancode: Found non-(space,digit) char '
'in the first column.\n\tAre you sure that '
'this code is in fix form?\n\tline=%s' % repr(l))
if (not cont or strictf77) and (len(l) > 5 and not l[5] == ' '):
# Continuation of a previous line
ll = ll + l[6:]
finalline = ''
origfinalline = ''
else:
if not strictf77:
# F90 continuation
r = cont1.match(l)
if r:
l = r.group('line') # Continuation follows ..
if cont:
ll = ll + cont2.match(l).group('line')
finalline = ''
origfinalline = ''
else:
# clean up line beginning from possible digits.
l = ' ' + l[5:]
if localdolowercase:
finalline = ll.lower()
else:
finalline = ll
origfinalline = ll
ll = l
cont = (r is not None)
else:
# clean up line beginning from possible digits.
l = ' ' + l[5:]
if localdolowercase:
finalline = ll.lower()
else:
finalline = ll
origfinalline = ll
ll = l
elif sourcecodeform == 'free':
if not cont and ext == '.pyf' and mline_mark.match(l):
l = l + '\n'
while True:
lc = fin.readline()
if not lc:
errmess(
'Unexpected end of file when reading multiline\n')
break
l = l + lc
if mline_mark.match(lc):
break
l = l.rstrip()
r = cont1.match(l)
if r:
l = r.group('line') # Continuation follows ..
if cont:
ll = ll + cont2.match(l).group('line')
finalline = ''
origfinalline = ''
else:
if localdolowercase:
finalline = ll.lower()
else:
finalline = ll
origfinalline = ll
ll = l
cont = (r is not None)
else:
raise ValueError(
"Flag sourcecodeform must be either 'fix' or 'free': %s" % repr(sourcecodeform))
filepositiontext = 'Line #%d in %s:"%s"\n\t' % (
fin.filelineno() - 1, currentfilename, l1)
m = includeline.match(origfinalline)
if m:
fn = m.group('name')
if os.path.isfile(fn):
readfortrancode(fn, dowithline=dowithline, istop=0)
else:
include_dirs = [
os.path.dirname(currentfilename)] + include_paths
foundfile = 0
for inc_dir in include_dirs:
fn1 = os.path.join(inc_dir, fn)
if os.path.isfile(fn1):
foundfile = 1
readfortrancode(fn1, dowithline=dowithline, istop=0)
break
if not foundfile:
outmess('readfortrancode: could not find include file %s in %s. Ignoring.\n' % (
repr(fn), os.pathsep.join(include_dirs)))
else:
dowithline(finalline)
l1 = ll
if localdolowercase:
finalline = ll.lower()
else:
finalline = ll
origfinalline = ll
filepositiontext = 'Line #%d in %s:"%s"\n\t' % (
fin.filelineno() - 1, currentfilename, l1)
m = includeline.match(origfinalline)
if m:
fn = m.group('name')
if os.path.isfile(fn):
readfortrancode(fn, dowithline=dowithline, istop=0)
else:
include_dirs = [os.path.dirname(currentfilename)] + include_paths
foundfile = 0
for inc_dir in include_dirs:
fn1 = os.path.join(inc_dir, fn)
if os.path.isfile(fn1):
foundfile = 1
readfortrancode(fn1, dowithline=dowithline, istop=0)
break
if not foundfile:
outmess('readfortrancode: could not find include file %s in %s. Ignoring.\n' % (
repr(fn), os.pathsep.join(include_dirs)))
else:
dowithline(finalline)
filepositiontext = ''
fin.close()
if istop:
dowithline('', 1)
else:
gotnextfile, filepositiontext, currentfilename, sourcecodeform, strictf77,\
beginpattern, quiet, verbose, dolowercase = saveglobals
# Crack line
beforethisafter = r'\s*(?P<before>%s(?=\s*(\b(%s)\b)))' + \
r'\s*(?P<this>(\b(%s)\b))' + \
r'\s*(?P<after>%s)\s*\Z'
##
fortrantypes = r'character|logical|integer|real|complex|double\s*(precision\s*(complex|)|complex)|type(?=\s*\([\w\s,=(*)]*\))|byte'
typespattern = re.compile(
beforethisafter % ('', fortrantypes, fortrantypes, '.*'), re.I), 'type'
typespattern4implicit = re.compile(beforethisafter % (
'', fortrantypes + '|static|automatic|undefined', fortrantypes + '|static|automatic|undefined', '.*'), re.I)
#
functionpattern = re.compile(beforethisafter % (
r'([a-z]+[\w\s(=*+-/)]*?|)', 'function', 'function', '.*'), re.I), 'begin'
subroutinepattern = re.compile(beforethisafter % (
r'[a-z\s]*?', 'subroutine', 'subroutine', '.*'), re.I), 'begin'
# modulepattern=re.compile(beforethisafter%('[a-z\s]*?','module','module','.*'),re.I),'begin'
#
groupbegins77 = r'program|block\s*data'
beginpattern77 = re.compile(
beforethisafter % ('', groupbegins77, groupbegins77, '.*'), re.I), 'begin'
groupbegins90 = groupbegins77 + \
r'|module(?!\s*procedure)|python\s*module|(abstract|)\s*interface|' + \
r'type(?!\s*\()'
beginpattern90 = re.compile(
beforethisafter % ('', groupbegins90, groupbegins90, '.*'), re.I), 'begin'
groupends = (r'end|endprogram|endblockdata|endmodule|endpythonmodule|'
r'endinterface|endsubroutine|endfunction')
endpattern = re.compile(
beforethisafter % ('', groupends, groupends, r'.*'), re.I), 'end'
endifs = r'end\s*(if|do|where|select|while|forall|associate|block|' + \
r'critical|enum|team)'
endifpattern = re.compile(
beforethisafter % (r'[\w]*?', endifs, endifs, r'[\w\s]*'), re.I), 'endif'
#
moduleprocedures = r'module\s*procedure'
moduleprocedurepattern = re.compile(
beforethisafter % ('', moduleprocedures, moduleprocedures, r'.*'), re.I), \
'moduleprocedure'
implicitpattern = re.compile(
beforethisafter % ('', 'implicit', 'implicit', '.*'), re.I), 'implicit'
dimensionpattern = re.compile(beforethisafter % (
'', 'dimension|virtual', 'dimension|virtual', '.*'), re.I), 'dimension'
externalpattern = re.compile(
beforethisafter % ('', 'external', 'external', '.*'), re.I), 'external'
optionalpattern = re.compile(
beforethisafter % ('', 'optional', 'optional', '.*'), re.I), 'optional'
requiredpattern = re.compile(
beforethisafter % ('', 'required', 'required', '.*'), re.I), 'required'
publicpattern = re.compile(
beforethisafter % ('', 'public', 'public', '.*'), re.I), 'public'
privatepattern = re.compile(
beforethisafter % ('', 'private', 'private', '.*'), re.I), 'private'
intrinsicpattern = re.compile(
beforethisafter % ('', 'intrinsic', 'intrinsic', '.*'), re.I), 'intrinsic'
intentpattern = re.compile(beforethisafter % (
'', 'intent|depend|note|check', 'intent|depend|note|check', r'\s*\(.*?\).*'), re.I), 'intent'
parameterpattern = re.compile(
beforethisafter % ('', 'parameter', 'parameter', r'\s*\(.*'), re.I), 'parameter'
datapattern = re.compile(
beforethisafter % ('', 'data', 'data', '.*'), re.I), 'data'
callpattern = re.compile(
beforethisafter % ('', 'call', 'call', '.*'), re.I), 'call'
entrypattern = re.compile(
beforethisafter % ('', 'entry', 'entry', '.*'), re.I), 'entry'
callfunpattern = re.compile(
beforethisafter % ('', 'callfun', 'callfun', '.*'), re.I), 'callfun'
commonpattern = re.compile(
beforethisafter % ('', 'common', 'common', '.*'), re.I), 'common'
usepattern = re.compile(
beforethisafter % ('', 'use', 'use', '.*'), re.I), 'use'
containspattern = re.compile(
beforethisafter % ('', 'contains', 'contains', ''), re.I), 'contains'
formatpattern = re.compile(
beforethisafter % ('', 'format', 'format', '.*'), re.I), 'format'
# Non-fortran and f2py-specific statements
f2pyenhancementspattern = re.compile(beforethisafter % ('', 'threadsafe|fortranname|callstatement|callprotoargument|usercode|pymethoddef',
'threadsafe|fortranname|callstatement|callprotoargument|usercode|pymethoddef', '.*'), re.I | re.S), 'f2pyenhancements'
multilinepattern = re.compile(
r"\s*(?P<before>''')(?P<this>.*?)(?P<after>''')\s*\Z", re.S), 'multiline'
##
def split_by_unquoted(line, characters):
"""
Splits the line into (line[:i], line[i:]),
where i is the index of first occurrence of one of the characters
not within quotes, or len(line) if no such index exists
"""
assert not (set('"\'') & set(characters)), "cannot split by unquoted quotes"
r = re.compile(
r"\A(?P<before>({single_quoted}|{double_quoted}|{not_quoted})*)"
r"(?P<after>{char}.*)\Z".format(
not_quoted="[^\"'{}]".format(re.escape(characters)),
char="[{}]".format(re.escape(characters)),
single_quoted=r"('([^'\\]|(\\.))*')",
double_quoted=r'("([^"\\]|(\\.))*")'))
m = r.match(line)
if m:
d = m.groupdict()
return (d["before"], d["after"])
return (line, "")
def _simplifyargs(argsline):
a = []
for n in markoutercomma(argsline).split('@,@'):
for r in '(),':
n = n.replace(r, '_')
a.append(n)
return ','.join(a)
crackline_re_1 = re.compile(r'\s*(?P<result>\b[a-z]+\w*\b)\s*=.*', re.I)
def crackline(line, reset=0):
"""
reset=-1 --- initialize
reset=0 --- crack the line
reset=1 --- final check if mismatch of blocks occurred
Cracked data is saved in grouplist[0].
"""
global beginpattern, groupcounter, groupname, groupcache, grouplist
global filepositiontext, currentfilename, neededmodule, expectbegin
global skipblocksuntil, skipemptyends, previous_context, gotnextfile
_, has_semicolon = split_by_unquoted(line, ";")
if has_semicolon and not (f2pyenhancementspattern[0].match(line) or
multilinepattern[0].match(line)):
# XXX: non-zero reset values need testing
assert reset == 0, repr(reset)
# split line on unquoted semicolons
line, semicolon_line = split_by_unquoted(line, ";")
while semicolon_line:
crackline(line, reset)
line, semicolon_line = split_by_unquoted(semicolon_line[1:], ";")
crackline(line, reset)
return
if reset < 0:
groupcounter = 0
groupname = {groupcounter: ''}
groupcache = {groupcounter: {}}
grouplist = {groupcounter: []}
groupcache[groupcounter]['body'] = []
groupcache[groupcounter]['vars'] = {}
groupcache[groupcounter]['block'] = ''
groupcache[groupcounter]['name'] = ''
neededmodule = -1
skipblocksuntil = -1
return
if reset > 0:
fl = 0
if f77modulename and neededmodule == groupcounter:
fl = 2
while groupcounter > fl:
outmess('crackline: groupcounter=%s groupname=%s\n' %
(repr(groupcounter), repr(groupname)))
outmess(
'crackline: Mismatch of blocks encountered. Trying to fix it by assuming "end" statement.\n')
grouplist[groupcounter - 1].append(groupcache[groupcounter])
grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter]
del grouplist[groupcounter]
groupcounter = groupcounter - 1
if f77modulename and neededmodule == groupcounter:
grouplist[groupcounter - 1].append(groupcache[groupcounter])
grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter]
del grouplist[groupcounter]
groupcounter = groupcounter - 1 # end interface
grouplist[groupcounter - 1].append(groupcache[groupcounter])
grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter]
del grouplist[groupcounter]
groupcounter = groupcounter - 1 # end module
neededmodule = -1
return
if line == '':
return
flag = 0
for pat in [dimensionpattern, externalpattern, intentpattern, optionalpattern,
requiredpattern,
parameterpattern, datapattern, publicpattern, privatepattern,
intrinsicpattern,
endifpattern, endpattern,
formatpattern,
beginpattern, functionpattern, subroutinepattern,
implicitpattern, typespattern, commonpattern,
callpattern, usepattern, containspattern,
entrypattern,
f2pyenhancementspattern,
multilinepattern,
moduleprocedurepattern
]:
m = pat[0].match(line)
if m:
break
flag = flag + 1
if not m:
re_1 = crackline_re_1
if 0 <= skipblocksuntil <= groupcounter:
return
if 'externals' in groupcache[groupcounter]:
for name in groupcache[groupcounter]['externals']:
if name in invbadnames:
name = invbadnames[name]
if 'interfaced' in groupcache[groupcounter] and name in groupcache[groupcounter]['interfaced']:
continue
m1 = re.match(
r'(?P<before>[^"]*)\b%s\b\s*@\(@(?P<args>[^@]*)@\)@.*\Z' % name, markouterparen(line), re.I)
if m1:
m2 = re_1.match(m1.group('before'))
a = _simplifyargs(m1.group('args'))
if m2:
line = 'callfun %s(%s) result (%s)' % (
name, a, m2.group('result'))
else:
line = 'callfun %s(%s)' % (name, a)
m = callfunpattern[0].match(line)
if not m:
outmess(
'crackline: could not resolve function call for line=%s.\n' % repr(line))
return
analyzeline(m, 'callfun', line)
return
if verbose > 1 or (verbose == 1 and currentfilename.lower().endswith('.pyf')):
previous_context = None
outmess('crackline:%d: No pattern for line\n' % (groupcounter))
return
elif pat[1] == 'end':
if 0 <= skipblocksuntil < groupcounter:
groupcounter = groupcounter - 1
if skipblocksuntil <= groupcounter:
return
if groupcounter <= 0:
raise Exception('crackline: groupcounter(=%s) is nonpositive. '
'Check the blocks.'
% (groupcounter))
m1 = beginpattern[0].match((line))
if (m1) and (not m1.group('this') == groupname[groupcounter]):
raise Exception('crackline: End group %s does not match with '
'previous Begin group %s\n\t%s' %
(repr(m1.group('this')), repr(groupname[groupcounter]),
filepositiontext)
)
if skipblocksuntil == groupcounter:
skipblocksuntil = -1
grouplist[groupcounter - 1].append(groupcache[groupcounter])
grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter]
del grouplist[groupcounter]
groupcounter = groupcounter - 1
if not skipemptyends:
expectbegin = 1
elif pat[1] == 'begin':
if 0 <= skipblocksuntil <= groupcounter:
groupcounter = groupcounter + 1
return
gotnextfile = 0
analyzeline(m, pat[1], line)
expectbegin = 0
elif pat[1] == 'endif':
pass
elif pat[1] == 'moduleprocedure':
analyzeline(m, pat[1], line)
elif pat[1] == 'contains':
if ignorecontains:
return
if 0 <= skipblocksuntil <= groupcounter:
return
skipblocksuntil = groupcounter
else:
if 0 <= skipblocksuntil <= groupcounter:
return
analyzeline(m, pat[1], line)
def markouterparen(line):
l = ''
f = 0
for c in line:
if c == '(':
f = f + 1
if f == 1:
l = l + '@(@'
continue
elif c == ')':
f = f - 1
if f == 0:
l = l + '@)@'
continue
l = l + c
return l
def markoutercomma(line, comma=','):
l = ''
f = 0
before, after = split_by_unquoted(line, comma + '()')
l += before
while after:
if (after[0] == comma) and (f == 0):
l += '@' + comma + '@'
else:
l += after[0]
if after[0] == '(':
f += 1
elif after[0] == ')':
f -= 1
before, after = split_by_unquoted(after[1:], comma + '()')
l += before
assert not f, repr((f, line, l))
return l
def unmarkouterparen(line):
r = line.replace('@(@', '(').replace('@)@', ')')
return r
def appenddecl(decl, decl2, force=1):
if not decl:
decl = {}
if not decl2:
return decl
if decl is decl2:
return decl
for k in list(decl2.keys()):
if k == 'typespec':
if force or k not in decl:
decl[k] = decl2[k]
elif k == 'attrspec':
for l in decl2[k]:
decl = setattrspec(decl, l, force)
elif k == 'kindselector':
decl = setkindselector(decl, decl2[k], force)
elif k == 'charselector':
decl = setcharselector(decl, decl2[k], force)
elif k in ['=', 'typename']:
if force or k not in decl:
decl[k] = decl2[k]
elif k == 'note':
pass
elif k in ['intent', 'check', 'dimension', 'optional',
'required', 'depend']:
errmess('appenddecl: "%s" not implemented.\n' % k)
else:
raise Exception('appenddecl: Unknown variable definition key: ' +
str(k))
return decl
selectpattern = re.compile(
r'\s*(?P<this>(@\(@.*?@\)@|\*[\d*]+|\*\s*@\(@.*?@\)@|))(?P<after>.*)\Z', re.I)
typedefpattern = re.compile(
r'(?:,(?P<attributes>[\w(),]+))?(::)?(?P<name>\b[a-z$_][\w$]*\b)'
r'(?:\((?P<params>[\w,]*)\))?\Z', re.I)
nameargspattern = re.compile(
r'\s*(?P<name>\b[\w$]+\b)\s*(@\(@\s*(?P<args>[\w\s,]*)\s*@\)@|)\s*((result(\s*@\(@\s*(?P<result>\b[\w$]+\b)\s*@\)@|))|(bind\s*@\(@\s*(?P<bind>.*)\s*@\)@))*\s*\Z', re.I)
operatorpattern = re.compile(
r'\s*(?P<scheme>(operator|assignment))'
r'@\(@\s*(?P<name>[^)]+)\s*@\)@\s*\Z', re.I)
callnameargspattern = re.compile(
r'\s*(?P<name>\b[\w$]+\b)\s*@\(@\s*(?P<args>.*)\s*@\)@\s*\Z', re.I)
real16pattern = re.compile(
r'([-+]?(?:\d+(?:\.\d*)?|\d*\.\d+))[dD]((?:[-+]?\d+)?)')
real8pattern = re.compile(
r'([-+]?((?:\d+(?:\.\d*)?|\d*\.\d+))[eE]((?:[-+]?\d+)?)|(\d+\.\d*))')
_intentcallbackpattern = re.compile(r'intent\s*\(.*?\bcallback\b', re.I)
def _is_intent_callback(vdecl):
for a in vdecl.get('attrspec', []):
if _intentcallbackpattern.match(a):
return 1
return 0
def _resolvetypedefpattern(line):
line = ''.join(line.split()) # removes whitespace
m1 = typedefpattern.match(line)
print(line, m1)
if m1:
attrs = m1.group('attributes')
attrs = [a.lower() for a in attrs.split(',')] if attrs else []
return m1.group('name'), attrs, m1.group('params')
return None, [], None
def _resolvenameargspattern(line):
line = markouterparen(line)
m1 = nameargspattern.match(line)
if m1:
return m1.group('name'), m1.group('args'), m1.group('result'), m1.group('bind')
m1 = operatorpattern.match(line)
if m1:
name = m1.group('scheme') + '(' + m1.group('name') + ')'
return name, [], None, None
m1 = callnameargspattern.match(line)
if m1:
return m1.group('name'), m1.group('args'), None, None
return None, [], None, None
def analyzeline(m, case, line):
global groupcounter, groupname, groupcache, grouplist, filepositiontext
global currentfilename, f77modulename, neededinterface, neededmodule
global expectbegin, gotnextfile, previous_context
block = m.group('this')
if case != 'multiline':
previous_context = None
if expectbegin and case not in ['begin', 'call', 'callfun', 'type'] \
and not skipemptyends and groupcounter < 1:
newname = os.path.basename(currentfilename).split('.')[0]
outmess(
'analyzeline: no group yet. Creating program group with name "%s".\n' % newname)
gotnextfile = 0
groupcounter = groupcounter + 1
groupname[groupcounter] = 'program'
groupcache[groupcounter] = {}
grouplist[groupcounter] = []
groupcache[groupcounter]['body'] = []
groupcache[groupcounter]['vars'] = {}
groupcache[groupcounter]['block'] = 'program'
groupcache[groupcounter]['name'] = newname
groupcache[groupcounter]['from'] = 'fromsky'
expectbegin = 0
if case in ['begin', 'call', 'callfun']:
# Crack line => block,name,args,result
block = block.lower()
if re.match(r'block\s*data', block, re.I):
block = 'block data'
elif re.match(r'python\s*module', block, re.I):
block = 'python module'
elif re.match(r'abstract\s*interface', block, re.I):
block = 'abstract interface'
if block == 'type':
name, attrs, _ = _resolvetypedefpattern(m.group('after'))
groupcache[groupcounter]['vars'][name] = dict(attrspec = attrs)
args = []
result = None
else:
name, args, result, _ = _resolvenameargspattern(m.group('after'))
if name is None:
if block == 'block data':
name = '_BLOCK_DATA_'
else:
name = ''
if block not in ['interface', 'block data', 'abstract interface']:
outmess('analyzeline: No name/args pattern found for line.\n')
previous_context = (block, name, groupcounter)
if args:
args = rmbadname([x.strip()
for x in markoutercomma(args).split('@,@')])
else:
args = []
if '' in args:
while '' in args:
args.remove('')
outmess(
'analyzeline: argument list is malformed (missing argument).\n')
# end of crack line => block,name,args,result
needmodule = 0
needinterface = 0
if case in ['call', 'callfun']:
needinterface = 1
if 'args' not in groupcache[groupcounter]:
return
if name not in groupcache[groupcounter]['args']:
return
for it in grouplist[groupcounter]:
if it['name'] == name:
return
if name in groupcache[groupcounter]['interfaced']:
return
block = {'call': 'subroutine', 'callfun': 'function'}[case]
if f77modulename and neededmodule == -1 and groupcounter <= 1:
neededmodule = groupcounter + 2
needmodule = 1
if block not in ['interface', 'abstract interface']:
needinterface = 1
# Create new block(s)
groupcounter = groupcounter + 1
groupcache[groupcounter] = {}
grouplist[groupcounter] = []
if needmodule:
if verbose > 1:
outmess('analyzeline: Creating module block %s\n' %
repr(f77modulename), 0)
groupname[groupcounter] = 'module'
groupcache[groupcounter]['block'] = 'python module'
groupcache[groupcounter]['name'] = f77modulename
groupcache[groupcounter]['from'] = ''
groupcache[groupcounter]['body'] = []
groupcache[groupcounter]['externals'] = []
groupcache[groupcounter]['interfaced'] = []
groupcache[groupcounter]['vars'] = {}
groupcounter = groupcounter + 1
groupcache[groupcounter] = {}
grouplist[groupcounter] = []
if needinterface:
if verbose > 1:
outmess('analyzeline: Creating additional interface block (groupcounter=%s).\n' % (
groupcounter), 0)
groupname[groupcounter] = 'interface'
groupcache[groupcounter]['block'] = 'interface'
groupcache[groupcounter]['name'] = 'unknown_interface'
groupcache[groupcounter]['from'] = '%s:%s' % (
groupcache[groupcounter - 1]['from'], groupcache[groupcounter - 1]['name'])
groupcache[groupcounter]['body'] = []
groupcache[groupcounter]['externals'] = []
groupcache[groupcounter]['interfaced'] = []
groupcache[groupcounter]['vars'] = {}
groupcounter = groupcounter + 1
groupcache[groupcounter] = {}
grouplist[groupcounter] = []
groupname[groupcounter] = block
groupcache[groupcounter]['block'] = block
if not name:
name = 'unknown_' + block.replace(' ', '_')
groupcache[groupcounter]['prefix'] = m.group('before')
groupcache[groupcounter]['name'] = rmbadname1(name)
groupcache[groupcounter]['result'] = result
if groupcounter == 1:
groupcache[groupcounter]['from'] = currentfilename
else:
if f77modulename and groupcounter == 3:
groupcache[groupcounter]['from'] = '%s:%s' % (
groupcache[groupcounter - 1]['from'], currentfilename)
else:
groupcache[groupcounter]['from'] = '%s:%s' % (
groupcache[groupcounter - 1]['from'], groupcache[groupcounter - 1]['name'])
for k in list(groupcache[groupcounter].keys()):
if not groupcache[groupcounter][k]:
del groupcache[groupcounter][k]
groupcache[groupcounter]['args'] = args
groupcache[groupcounter]['body'] = []
groupcache[groupcounter]['externals'] = []
groupcache[groupcounter]['interfaced'] = []
groupcache[groupcounter]['vars'] = {}
groupcache[groupcounter]['entry'] = {}
# end of creation
if block == 'type':
groupcache[groupcounter]['varnames'] = []
if case in ['call', 'callfun']: # set parents variables
if name not in groupcache[groupcounter - 2]['externals']:
groupcache[groupcounter - 2]['externals'].append(name)
groupcache[groupcounter]['vars'] = copy.deepcopy(
groupcache[groupcounter - 2]['vars'])
try:
del groupcache[groupcounter]['vars'][name][
groupcache[groupcounter]['vars'][name]['attrspec'].index('external')]
except Exception:
pass
if block in ['function', 'subroutine']: # set global attributes
try:
groupcache[groupcounter]['vars'][name] = appenddecl(
groupcache[groupcounter]['vars'][name], groupcache[groupcounter - 2]['vars'][''])
except Exception:
pass
if case == 'callfun': # return type
if result and result in groupcache[groupcounter]['vars']:
if not name == result:
groupcache[groupcounter]['vars'][name] = appenddecl(
groupcache[groupcounter]['vars'][name], groupcache[groupcounter]['vars'][result])
# if groupcounter>1: # name is interfaced
try:
groupcache[groupcounter - 2]['interfaced'].append(name)
except Exception:
pass
if block == 'function':
t = typespattern[0].match(m.group('before') + ' ' + name)
if t:
typespec, selector, attr, edecl = cracktypespec0(
t.group('this'), t.group('after'))
updatevars(typespec, selector, attr, edecl)
if case in ['call', 'callfun']:
grouplist[groupcounter - 1].append(groupcache[groupcounter])
grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter]
del grouplist[groupcounter]
groupcounter = groupcounter - 1 # end routine
grouplist[groupcounter - 1].append(groupcache[groupcounter])
grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter]
del grouplist[groupcounter]
groupcounter = groupcounter - 1 # end interface
elif case == 'entry':
name, args, result, bind = _resolvenameargspattern(m.group('after'))
if name is not None:
if args:
args = rmbadname([x.strip()
for x in markoutercomma(args).split('@,@')])
else:
args = []
assert result is None, repr(result)
groupcache[groupcounter]['entry'][name] = args
previous_context = ('entry', name, groupcounter)
elif case == 'type':
typespec, selector, attr, edecl = cracktypespec0(
block, m.group('after'))
last_name = updatevars(typespec, selector, attr, edecl)
if last_name is not None:
previous_context = ('variable', last_name, groupcounter)
elif case in ['dimension', 'intent', 'optional', 'required', 'external', 'public', 'private', 'intrinsic']:
edecl = groupcache[groupcounter]['vars']
ll = m.group('after').strip()
i = ll.find('::')
if i < 0 and case == 'intent':
i = markouterparen(ll).find('@)@') - 2
ll = ll[:i + 1] + '::' + ll[i + 1:]
i = ll.find('::')
if ll[i:] == '::' and 'args' in groupcache[groupcounter]:
outmess('All arguments will have attribute %s%s\n' %
(m.group('this'), ll[:i]))
ll = ll + ','.join(groupcache[groupcounter]['args'])
if i < 0:
i = 0
pl = ''
else:
pl = ll[:i].strip()
ll = ll[i + 2:]
ch = markoutercomma(pl).split('@,@')
if len(ch) > 1:
pl = ch[0]
outmess('analyzeline: cannot handle multiple attributes without type specification. Ignoring %r.\n' % (
','.join(ch[1:])))
last_name = None
for e in [x.strip() for x in markoutercomma(ll).split('@,@')]:
m1 = namepattern.match(e)
if not m1:
if case in ['public', 'private']:
k = ''
else:
print(m.groupdict())
outmess('analyzeline: no name pattern found in %s statement for %s. Skipping.\n' % (
case, repr(e)))
continue
else:
k = rmbadname1(m1.group('name'))
if case in ['public', 'private'] and \
(k == 'operator' or k == 'assignment'):
k += m1.group('after')
if k not in edecl:
edecl[k] = {}
if case == 'dimension':
ap = case + m1.group('after')
if case == 'intent':
ap = m.group('this') + pl
if _intentcallbackpattern.match(ap):
if k not in groupcache[groupcounter]['args']:
if groupcounter > 1:
if '__user__' not in groupcache[groupcounter - 2]['name']:
outmess(
'analyzeline: missing __user__ module (could be nothing)\n')
# fixes ticket 1693
if k != groupcache[groupcounter]['name']:
outmess('analyzeline: appending intent(callback) %s'
' to %s arguments\n' % (k, groupcache[groupcounter]['name']))
groupcache[groupcounter]['args'].append(k)
else:
errmess(
'analyzeline: intent(callback) %s is ignored\n' % (k))
else:
errmess('analyzeline: intent(callback) %s is already'
' in argument list\n' % (k))
if case in ['optional', 'required', 'public', 'external', 'private', 'intrinsic']:
ap = case
if 'attrspec' in edecl[k]:
edecl[k]['attrspec'].append(ap)
else:
edecl[k]['attrspec'] = [ap]
if case == 'external':
if groupcache[groupcounter]['block'] == 'program':
outmess('analyzeline: ignoring program arguments\n')
continue
if k not in groupcache[groupcounter]['args']:
continue
if 'externals' not in groupcache[groupcounter]:
groupcache[groupcounter]['externals'] = []
groupcache[groupcounter]['externals'].append(k)
last_name = k
groupcache[groupcounter]['vars'] = edecl
if last_name is not None:
previous_context = ('variable', last_name, groupcounter)
elif case == 'moduleprocedure':
groupcache[groupcounter]['implementedby'] = \
[x.strip() for x in m.group('after').split(',')]
elif case == 'parameter':
edecl = groupcache[groupcounter]['vars']
ll = m.group('after').strip()[1:-1]
last_name = None
for e in markoutercomma(ll).split('@,@'):
try:
k, initexpr = [x.strip() for x in e.split('=')]
except Exception:
outmess(
'analyzeline: could not extract name,expr in parameter statement "%s" of "%s"\n' % (e, ll))
continue
params = get_parameters(edecl)
k = rmbadname1(k)
if k not in edecl:
edecl[k] = {}
if '=' in edecl[k] and (not edecl[k]['='] == initexpr):
outmess('analyzeline: Overwriting the value of parameter "%s" ("%s") with "%s".\n' % (
k, edecl[k]['='], initexpr))
t = determineexprtype(initexpr, params)
if t:
if t.get('typespec') == 'real':
tt = list(initexpr)
for m in real16pattern.finditer(initexpr):
tt[m.start():m.end()] = list(
initexpr[m.start():m.end()].lower().replace('d', 'e'))
initexpr = ''.join(tt)
elif t.get('typespec') == 'complex':
initexpr = initexpr[1:].lower().replace('d', 'e').\
replace(',', '+1j*(')
try:
v = eval(initexpr, {}, params)
except (SyntaxError, NameError, TypeError) as msg:
errmess('analyzeline: Failed to evaluate %r. Ignoring: %s\n'
% (initexpr, msg))
continue
edecl[k]['='] = repr(v)
if 'attrspec' in edecl[k]:
edecl[k]['attrspec'].append('parameter')
else:
edecl[k]['attrspec'] = ['parameter']
last_name = k
groupcache[groupcounter]['vars'] = edecl
if last_name is not None:
previous_context = ('variable', last_name, groupcounter)
elif case == 'implicit':
if m.group('after').strip().lower() == 'none':
groupcache[groupcounter]['implicit'] = None
elif m.group('after'):
if 'implicit' in groupcache[groupcounter]:
impl = groupcache[groupcounter]['implicit']
else:
impl = {}
if impl is None:
outmess(
'analyzeline: Overwriting earlier "implicit none" statement.\n')
impl = {}
for e in markoutercomma(m.group('after')).split('@,@'):
decl = {}
m1 = re.match(
r'\s*(?P<this>.*?)\s*(\(\s*(?P<after>[a-z-, ]+)\s*\)\s*|)\Z', e, re.I)
if not m1:
outmess(
'analyzeline: could not extract info of implicit statement part "%s"\n' % (e))
continue
m2 = typespattern4implicit.match(m1.group('this'))
if not m2:
outmess(
'analyzeline: could not extract types pattern of implicit statement part "%s"\n' % (e))
continue
typespec, selector, attr, edecl = cracktypespec0(
m2.group('this'), m2.group('after'))
kindselect, charselect, typename = cracktypespec(
typespec, selector)
decl['typespec'] = typespec
decl['kindselector'] = kindselect
decl['charselector'] = charselect
decl['typename'] = typename
for k in list(decl.keys()):
if not decl[k]:
del decl[k]
for r in markoutercomma(m1.group('after')).split('@,@'):
if '-' in r:
try:
begc, endc = [x.strip() for x in r.split('-')]
except Exception:
outmess(
'analyzeline: expected "<char>-<char>" instead of "%s" in range list of implicit statement\n' % r)
continue
else:
begc = endc = r.strip()
if not len(begc) == len(endc) == 1:
outmess(
'analyzeline: expected "<char>-<char>" instead of "%s" in range list of implicit statement (2)\n' % r)
continue
for o in range(ord(begc), ord(endc) + 1):
impl[chr(o)] = decl
groupcache[groupcounter]['implicit'] = impl
elif case == 'data':
ll = []
dl = ''
il = ''
f = 0
fc = 1
inp = 0
for c in m.group('after'):
if not inp:
if c == "'":
fc = not fc
if c == '/' and fc:
f = f + 1
continue
if c == '(':
inp = inp + 1
elif c == ')':
inp = inp - 1
if f == 0:
dl = dl + c
elif f == 1:
il = il + c
elif f == 2:
dl = dl.strip()
if dl.startswith(','):
dl = dl[1:].strip()
ll.append([dl, il])
dl = c
il = ''
f = 0
if f == 2:
dl = dl.strip()
if dl.startswith(','):
dl = dl[1:].strip()
ll.append([dl, il])
vars = {}
if 'vars' in groupcache[groupcounter]:
vars = groupcache[groupcounter]['vars']
last_name = None
for l in ll:
l = [x.strip() for x in l]
if l[0][0] == ',':
l[0] = l[0][1:]
if l[0][0] == '(':
outmess(
'analyzeline: implied-DO list "%s" is not supported. Skipping.\n' % l[0])
continue
i = 0
j = 0
llen = len(l[1])
for v in rmbadname([x.strip() for x in markoutercomma(l[0]).split('@,@')]):
if v[0] == '(':
outmess(
'analyzeline: implied-DO list "%s" is not supported. Skipping.\n' % v)
# XXX: subsequent init expressions may get wrong values.
# Ignoring since data statements are irrelevant for
# wrapping.
continue
fc = 0
while (i < llen) and (fc or not l[1][i] == ','):
if l[1][i] == "'":
fc = not fc
i = i + 1
i = i + 1
if v not in vars:
vars[v] = {}
if '=' in vars[v] and not vars[v]['='] == l[1][j:i - 1]:
outmess('analyzeline: changing init expression of "%s" ("%s") to "%s"\n' % (
v, vars[v]['='], l[1][j:i - 1]))
vars[v]['='] = l[1][j:i - 1]
j = i
last_name = v
groupcache[groupcounter]['vars'] = vars
if last_name is not None:
previous_context = ('variable', last_name, groupcounter)
elif case == 'common':
line = m.group('after').strip()
if not line[0] == '/':
line = '//' + line
cl = []
f = 0
bn = ''
ol = ''
for c in line:
if c == '/':
f = f + 1
continue
if f >= 3:
bn = bn.strip()
if not bn:
bn = '_BLNK_'
cl.append([bn, ol])
f = f - 2
bn = ''
ol = ''
if f % 2:
bn = bn + c
else:
ol = ol + c
bn = bn.strip()
if not bn:
bn = '_BLNK_'
cl.append([bn, ol])
commonkey = {}
if 'common' in groupcache[groupcounter]:
commonkey = groupcache[groupcounter]['common']
for c in cl:
if c[0] not in commonkey:
commonkey[c[0]] = []
for i in [x.strip() for x in markoutercomma(c[1]).split('@,@')]:
if i:
commonkey[c[0]].append(i)
groupcache[groupcounter]['common'] = commonkey
previous_context = ('common', bn, groupcounter)
elif case == 'use':
m1 = re.match(
r'\A\s*(?P<name>\b\w+\b)\s*((,(\s*\bonly\b\s*:|(?P<notonly>))\s*(?P<list>.*))|)\s*\Z', m.group('after'), re.I)
if m1:
mm = m1.groupdict()
if 'use' not in groupcache[groupcounter]:
groupcache[groupcounter]['use'] = {}
name = m1.group('name')
groupcache[groupcounter]['use'][name] = {}
isonly = 0
if 'list' in mm and mm['list'] is not None:
if 'notonly' in mm and mm['notonly'] is None:
isonly = 1
groupcache[groupcounter]['use'][name]['only'] = isonly
ll = [x.strip() for x in mm['list'].split(',')]
rl = {}
for l in ll:
if '=' in l:
m2 = re.match(
r'\A\s*(?P<local>\b\w+\b)\s*=\s*>\s*(?P<use>\b\w+\b)\s*\Z', l, re.I)
if m2:
rl[m2.group('local').strip()] = m2.group(
'use').strip()
else:
outmess(
'analyzeline: Not local=>use pattern found in %s\n' % repr(l))
else:
rl[l] = l
groupcache[groupcounter]['use'][name]['map'] = rl
else:
pass
else:
print(m.groupdict())
outmess('analyzeline: Could not crack the use statement.\n')
elif case in ['f2pyenhancements']:
if 'f2pyenhancements' not in groupcache[groupcounter]:
groupcache[groupcounter]['f2pyenhancements'] = {}
d = groupcache[groupcounter]['f2pyenhancements']
if m.group('this') == 'usercode' and 'usercode' in d:
if isinstance(d['usercode'], str):
d['usercode'] = [d['usercode']]
d['usercode'].append(m.group('after'))
else:
d[m.group('this')] = m.group('after')
elif case == 'multiline':
if previous_context is None:
if verbose:
outmess('analyzeline: No context for multiline block.\n')
return
gc = groupcounter
appendmultiline(groupcache[gc],
previous_context[:2],
m.group('this'))
else:
if verbose > 1:
print(m.groupdict())
outmess('analyzeline: No code implemented for line.\n')
def appendmultiline(group, context_name, ml):
if 'f2pymultilines' not in group:
group['f2pymultilines'] = {}
d = group['f2pymultilines']
if context_name not in d:
d[context_name] = []
d[context_name].append(ml)
return
def cracktypespec0(typespec, ll):
selector = None
attr = None
if re.match(r'double\s*complex', typespec, re.I):
typespec = 'double complex'
elif re.match(r'double\s*precision', typespec, re.I):
typespec = 'double precision'
else:
typespec = typespec.strip().lower()
m1 = selectpattern.match(markouterparen(ll))
if not m1:
outmess(
'cracktypespec0: no kind/char_selector pattern found for line.\n')
return
d = m1.groupdict()
for k in list(d.keys()):
d[k] = unmarkouterparen(d[k])
if typespec in ['complex', 'integer', 'logical', 'real', 'character', 'type']:
selector = d['this']
ll = d['after']
i = ll.find('::')
if i >= 0:
attr = ll[:i].strip()
ll = ll[i + 2:]
return typespec, selector, attr, ll
#####
namepattern = re.compile(r'\s*(?P<name>\b\w+\b)\s*(?P<after>.*)\s*\Z', re.I)
kindselector = re.compile(
r'\s*(\(\s*(kind\s*=)?\s*(?P<kind>.*)\s*\)|\*\s*(?P<kind2>.*?))\s*\Z', re.I)
charselector = re.compile(
r'\s*(\((?P<lenkind>.*)\)|\*\s*(?P<charlen>.*))\s*\Z', re.I)
lenkindpattern = re.compile(
r'\s*(kind\s*=\s*(?P<kind>.*?)\s*(@,@\s*len\s*=\s*(?P<len>.*)|)|(len\s*=\s*|)(?P<len2>.*?)\s*(@,@\s*(kind\s*=\s*|)(?P<kind2>.*)|))\s*\Z', re.I)
lenarraypattern = re.compile(
r'\s*(@\(@\s*(?!/)\s*(?P<array>.*?)\s*@\)@\s*\*\s*(?P<len>.*?)|(\*\s*(?P<len2>.*?)|)\s*(@\(@\s*(?!/)\s*(?P<array2>.*?)\s*@\)@|))\s*(=\s*(?P<init>.*?)|(@\(@|)/\s*(?P<init2>.*?)\s*/(@\)@|)|)\s*\Z', re.I)
def removespaces(expr):
expr = expr.strip()
if len(expr) <= 1:
return expr
expr2 = expr[0]
for i in range(1, len(expr) - 1):
if (expr[i] == ' ' and
((expr[i + 1] in "()[]{}=+-/* ") or
(expr[i - 1] in "()[]{}=+-/* "))):
continue
expr2 = expr2 + expr[i]
expr2 = expr2 + expr[-1]
return expr2
def markinnerspaces(line):
"""
The function replace all spaces in the input variable line which are
surrounded with quotation marks, with the triplet "@_@".
For instance, for the input "a 'b c'" the function returns "a 'b@_@c'"
Parameters
----------
line : str
Returns
-------
str
"""
fragment = ''
inside = False
current_quote = None
escaped = ''
for c in line:
if escaped == '\\' and c in ['\\', '\'', '"']:
fragment += c
escaped = c
continue
if not inside and c in ['\'', '"']:
current_quote = c
if c == current_quote:
inside = not inside
elif c == ' ' and inside:
fragment += '@_@'
continue
fragment += c
escaped = c # reset to non-backslash
return fragment
def updatevars(typespec, selector, attrspec, entitydecl):
global groupcache, groupcounter
last_name = None
kindselect, charselect, typename = cracktypespec(typespec, selector)
if attrspec:
attrspec = [x.strip() for x in markoutercomma(attrspec).split('@,@')]
l = []
c = re.compile(r'(?P<start>[a-zA-Z]+)')
for a in attrspec:
if not a:
continue
m = c.match(a)
if m:
s = m.group('start').lower()
a = s + a[len(s):]
l.append(a)
attrspec = l
el = [x.strip() for x in markoutercomma(entitydecl).split('@,@')]
el1 = []
for e in el:
for e1 in [x.strip() for x in markoutercomma(removespaces(markinnerspaces(e)), comma=' ').split('@ @')]:
if e1:
el1.append(e1.replace('@_@', ' '))
for e in el1:
m = namepattern.match(e)
if not m:
outmess(
'updatevars: no name pattern found for entity=%s. Skipping.\n' % (repr(e)))
continue
ename = rmbadname1(m.group('name'))
edecl = {}
if ename in groupcache[groupcounter]['vars']:
edecl = groupcache[groupcounter]['vars'][ename].copy()
not_has_typespec = 'typespec' not in edecl
if not_has_typespec:
edecl['typespec'] = typespec
elif typespec and (not typespec == edecl['typespec']):
outmess('updatevars: attempt to change the type of "%s" ("%s") to "%s". Ignoring.\n' % (
ename, edecl['typespec'], typespec))
if 'kindselector' not in edecl:
edecl['kindselector'] = copy.copy(kindselect)
elif kindselect:
for k in list(kindselect.keys()):
if k in edecl['kindselector'] and (not kindselect[k] == edecl['kindselector'][k]):
outmess('updatevars: attempt to change the kindselector "%s" of "%s" ("%s") to "%s". Ignoring.\n' % (
k, ename, edecl['kindselector'][k], kindselect[k]))
else:
edecl['kindselector'][k] = copy.copy(kindselect[k])
if 'charselector' not in edecl and charselect:
if not_has_typespec:
edecl['charselector'] = charselect
else:
errmess('updatevars:%s: attempt to change empty charselector to %r. Ignoring.\n'
% (ename, charselect))
elif charselect:
for k in list(charselect.keys()):
if k in edecl['charselector'] and (not charselect[k] == edecl['charselector'][k]):
outmess('updatevars: attempt to change the charselector "%s" of "%s" ("%s") to "%s". Ignoring.\n' % (
k, ename, edecl['charselector'][k], charselect[k]))
else:
edecl['charselector'][k] = copy.copy(charselect[k])
if 'typename' not in edecl:
edecl['typename'] = typename
elif typename and (not edecl['typename'] == typename):
outmess('updatevars: attempt to change the typename of "%s" ("%s") to "%s". Ignoring.\n' % (
ename, edecl['typename'], typename))
if 'attrspec' not in edecl:
edecl['attrspec'] = copy.copy(attrspec)
elif attrspec:
for a in attrspec:
if a not in edecl['attrspec']:
edecl['attrspec'].append(a)
else:
edecl['typespec'] = copy.copy(typespec)
edecl['kindselector'] = copy.copy(kindselect)
edecl['charselector'] = copy.copy(charselect)
edecl['typename'] = typename
edecl['attrspec'] = copy.copy(attrspec)
if 'external' in (edecl.get('attrspec') or []) and e in groupcache[groupcounter]['args']:
if 'externals' not in groupcache[groupcounter]:
groupcache[groupcounter]['externals'] = []
groupcache[groupcounter]['externals'].append(e)
if m.group('after'):
m1 = lenarraypattern.match(markouterparen(m.group('after')))
if m1:
d1 = m1.groupdict()
for lk in ['len', 'array', 'init']:
if d1[lk + '2'] is not None:
d1[lk] = d1[lk + '2']
del d1[lk + '2']
for k in list(d1.keys()):
if d1[k] is not None:
d1[k] = unmarkouterparen(d1[k])
else:
del d1[k]
if 'len' in d1 and 'array' in d1:
if d1['len'] == '':
d1['len'] = d1['array']
del d1['array']
else:
d1['array'] = d1['array'] + ',' + d1['len']
del d1['len']
errmess('updatevars: "%s %s" is mapped to "%s %s(%s)"\n' % (
typespec, e, typespec, ename, d1['array']))
if 'array' in d1:
dm = 'dimension(%s)' % d1['array']
if 'attrspec' not in edecl or (not edecl['attrspec']):
edecl['attrspec'] = [dm]
else:
edecl['attrspec'].append(dm)
for dm1 in edecl['attrspec']:
if dm1[:9] == 'dimension' and dm1 != dm:
del edecl['attrspec'][-1]
errmess('updatevars:%s: attempt to change %r to %r. Ignoring.\n'
% (ename, dm1, dm))
break
if 'len' in d1:
if typespec in ['complex', 'integer', 'logical', 'real']:
if ('kindselector' not in edecl) or (not edecl['kindselector']):
edecl['kindselector'] = {}
edecl['kindselector']['*'] = d1['len']
elif typespec == 'character':
if ('charselector' not in edecl) or (not edecl['charselector']):
edecl['charselector'] = {}
if 'len' in edecl['charselector']:
del edecl['charselector']['len']
edecl['charselector']['*'] = d1['len']
if 'init' in d1:
if '=' in edecl and (not edecl['='] == d1['init']):
outmess('updatevars: attempt to change the init expression of "%s" ("%s") to "%s". Ignoring.\n' % (
ename, edecl['='], d1['init']))
else:
edecl['='] = d1['init']
else:
outmess('updatevars: could not crack entity declaration "%s". Ignoring.\n' % (
ename + m.group('after')))
for k in list(edecl.keys()):
if not edecl[k]:
del edecl[k]
groupcache[groupcounter]['vars'][ename] = edecl
if 'varnames' in groupcache[groupcounter]:
groupcache[groupcounter]['varnames'].append(ename)
last_name = ename
return last_name
def cracktypespec(typespec, selector):
kindselect = None
charselect = None
typename = None
if selector:
if typespec in ['complex', 'integer', 'logical', 'real']:
kindselect = kindselector.match(selector)
if not kindselect:
outmess(
'cracktypespec: no kindselector pattern found for %s\n' % (repr(selector)))
return
kindselect = kindselect.groupdict()
kindselect['*'] = kindselect['kind2']
del kindselect['kind2']
for k in list(kindselect.keys()):
if not kindselect[k]:
del kindselect[k]
for k, i in list(kindselect.items()):
kindselect[k] = rmbadname1(i)
elif typespec == 'character':
charselect = charselector.match(selector)
if not charselect:
outmess(
'cracktypespec: no charselector pattern found for %s\n' % (repr(selector)))
return
charselect = charselect.groupdict()
charselect['*'] = charselect['charlen']
del charselect['charlen']
if charselect['lenkind']:
lenkind = lenkindpattern.match(
markoutercomma(charselect['lenkind']))
lenkind = lenkind.groupdict()
for lk in ['len', 'kind']:
if lenkind[lk + '2']:
lenkind[lk] = lenkind[lk + '2']
charselect[lk] = lenkind[lk]
del lenkind[lk + '2']
del charselect['lenkind']
for k in list(charselect.keys()):
if not charselect[k]:
del charselect[k]
for k, i in list(charselect.items()):
charselect[k] = rmbadname1(i)
elif typespec == 'type':
typename = re.match(r'\s*\(\s*(?P<name>\w+)\s*\)', selector, re.I)
if typename:
typename = typename.group('name')
else:
outmess('cracktypespec: no typename found in %s\n' %
(repr(typespec + selector)))
else:
outmess('cracktypespec: no selector used for %s\n' %
(repr(selector)))
return kindselect, charselect, typename
######
def setattrspec(decl, attr, force=0):
if not decl:
decl = {}
if not attr:
return decl
if 'attrspec' not in decl:
decl['attrspec'] = [attr]
return decl
if force:
decl['attrspec'].append(attr)
if attr in decl['attrspec']:
return decl
if attr == 'static' and 'automatic' not in decl['attrspec']:
decl['attrspec'].append(attr)
elif attr == 'automatic' and 'static' not in decl['attrspec']:
decl['attrspec'].append(attr)
elif attr == 'public':
if 'private' not in decl['attrspec']:
decl['attrspec'].append(attr)
elif attr == 'private':
if 'public' not in decl['attrspec']:
decl['attrspec'].append(attr)
else:
decl['attrspec'].append(attr)
return decl
def setkindselector(decl, sel, force=0):
if not decl:
decl = {}
if not sel:
return decl
if 'kindselector' not in decl:
decl['kindselector'] = sel
return decl
for k in list(sel.keys()):
if force or k not in decl['kindselector']:
decl['kindselector'][k] = sel[k]
return decl
def setcharselector(decl, sel, force=0):
if not decl:
decl = {}
if not sel:
return decl
if 'charselector' not in decl:
decl['charselector'] = sel
return decl
for k in list(sel.keys()):
if force or k not in decl['charselector']:
decl['charselector'][k] = sel[k]
return decl
def getblockname(block, unknown='unknown'):
if 'name' in block:
return block['name']
return unknown
# post processing
def setmesstext(block):
global filepositiontext
try:
filepositiontext = 'In: %s:%s\n' % (block['from'], block['name'])
except Exception:
pass
def get_usedict(block):
usedict = {}
if 'parent_block' in block:
usedict = get_usedict(block['parent_block'])
if 'use' in block:
usedict.update(block['use'])
return usedict
def get_useparameters(block, param_map=None):
global f90modulevars
if param_map is None:
param_map = {}
usedict = get_usedict(block)
if not usedict:
return param_map
for usename, mapping in list(usedict.items()):
usename = usename.lower()
if usename not in f90modulevars:
outmess('get_useparameters: no module %s info used by %s\n' %
(usename, block.get('name')))
continue
mvars = f90modulevars[usename]
params = get_parameters(mvars)
if not params:
continue
# XXX: apply mapping
if mapping:
errmess('get_useparameters: mapping for %s not impl.\n' % (mapping))
for k, v in list(params.items()):
if k in param_map:
outmess('get_useparameters: overriding parameter %s with'
' value from module %s\n' % (repr(k), repr(usename)))
param_map[k] = v
return param_map
def postcrack2(block, tab='', param_map=None):
global f90modulevars
if not f90modulevars:
return block
if isinstance(block, list):
ret = [postcrack2(g, tab=tab + '\t', param_map=param_map)
for g in block]
return ret
setmesstext(block)
outmess('%sBlock: %s\n' % (tab, block['name']), 0)
if param_map is None:
param_map = get_useparameters(block)
if param_map is not None and 'vars' in block:
vars = block['vars']
for n in list(vars.keys()):
var = vars[n]
if 'kindselector' in var:
kind = var['kindselector']
if 'kind' in kind:
val = kind['kind']
if val in param_map:
kind['kind'] = param_map[val]
new_body = [postcrack2(b, tab=tab + '\t', param_map=param_map)
for b in block['body']]
block['body'] = new_body
return block
def postcrack(block, args=None, tab=''):
"""
TODO:
function return values
determine expression types if in argument list
"""
global usermodules, onlyfunctions
if isinstance(block, list):
gret = []
uret = []
for g in block:
setmesstext(g)
g = postcrack(g, tab=tab + '\t')
# sort user routines to appear first
if 'name' in g and '__user__' in g['name']:
uret.append(g)
else:
gret.append(g)
return uret + gret
setmesstext(block)
if not isinstance(block, dict) and 'block' not in block:
raise Exception('postcrack: Expected block dictionary instead of ' +
str(block))
if 'name' in block and not block['name'] == 'unknown_interface':
outmess('%sBlock: %s\n' % (tab, block['name']), 0)
block = analyzeargs(block)
block = analyzecommon(block)
block['vars'] = analyzevars(block)
block['sortvars'] = sortvarnames(block['vars'])
if 'args' in block and block['args']:
args = block['args']
block['body'] = analyzebody(block, args, tab=tab)
userisdefined = []
if 'use' in block:
useblock = block['use']
for k in list(useblock.keys()):
if '__user__' in k:
userisdefined.append(k)
else:
useblock = {}
name = ''
if 'name' in block:
name = block['name']
# and not userisdefined: # Build a __user__ module
if 'externals' in block and block['externals']:
interfaced = []
if 'interfaced' in block:
interfaced = block['interfaced']
mvars = copy.copy(block['vars'])
if name:
mname = name + '__user__routines'
else:
mname = 'unknown__user__routines'
if mname in userisdefined:
i = 1
while '%s_%i' % (mname, i) in userisdefined:
i = i + 1
mname = '%s_%i' % (mname, i)
interface = {'block': 'interface', 'body': [],
'vars': {}, 'name': name + '_user_interface'}
for e in block['externals']:
if e in interfaced:
edef = []
j = -1
for b in block['body']:
j = j + 1
if b['block'] == 'interface':
i = -1
for bb in b['body']:
i = i + 1
if 'name' in bb and bb['name'] == e:
edef = copy.copy(bb)
del b['body'][i]
break
if edef:
if not b['body']:
del block['body'][j]
del interfaced[interfaced.index(e)]
break
interface['body'].append(edef)
else:
if e in mvars and not isexternal(mvars[e]):
interface['vars'][e] = mvars[e]
if interface['vars'] or interface['body']:
block['interfaced'] = interfaced
mblock = {'block': 'python module', 'body': [
interface], 'vars': {}, 'name': mname, 'interfaced': block['externals']}
useblock[mname] = {}
usermodules.append(mblock)
if useblock:
block['use'] = useblock
return block
def sortvarnames(vars):
indep = []
dep = []
for v in list(vars.keys()):
if 'depend' in vars[v] and vars[v]['depend']:
dep.append(v)
else:
indep.append(v)
n = len(dep)
i = 0
while dep: # XXX: How to catch dependence cycles correctly?
v = dep[0]
fl = 0
for w in dep[1:]:
if w in vars[v]['depend']:
fl = 1
break
if fl:
dep = dep[1:] + [v]
i = i + 1
if i > n:
errmess('sortvarnames: failed to compute dependencies because'
' of cyclic dependencies between '
+ ', '.join(dep) + '\n')
indep = indep + dep
break
else:
indep.append(v)
dep = dep[1:]
n = len(dep)
i = 0
return indep
def analyzecommon(block):
if not hascommon(block):
return block
commonvars = []
for k in list(block['common'].keys()):
comvars = []
for e in block['common'][k]:
m = re.match(
r'\A\s*\b(?P<name>.*?)\b\s*(\((?P<dims>.*?)\)|)\s*\Z', e, re.I)
if m:
dims = []
if m.group('dims'):
dims = [x.strip()
for x in markoutercomma(m.group('dims')).split('@,@')]
n = rmbadname1(m.group('name').strip())
if n in block['vars']:
if 'attrspec' in block['vars'][n]:
block['vars'][n]['attrspec'].append(
'dimension(%s)' % (','.join(dims)))
else:
block['vars'][n]['attrspec'] = [
'dimension(%s)' % (','.join(dims))]
else:
if dims:
block['vars'][n] = {
'attrspec': ['dimension(%s)' % (','.join(dims))]}
else:
block['vars'][n] = {}
if n not in commonvars:
commonvars.append(n)
else:
n = e
errmess(
'analyzecommon: failed to extract "<name>[(<dims>)]" from "%s" in common /%s/.\n' % (e, k))
comvars.append(n)
block['common'][k] = comvars
if 'commonvars' not in block:
block['commonvars'] = commonvars
else:
block['commonvars'] = block['commonvars'] + commonvars
return block
def analyzebody(block, args, tab=''):
global usermodules, skipfuncs, onlyfuncs, f90modulevars
setmesstext(block)
body = []
for b in block['body']:
b['parent_block'] = block
if b['block'] in ['function', 'subroutine']:
if args is not None and b['name'] not in args:
continue
else:
as_ = b['args']
if b['name'] in skipfuncs:
continue
if onlyfuncs and b['name'] not in onlyfuncs:
continue
b['saved_interface'] = crack2fortrangen(
b, '\n' + ' ' * 6, as_interface=True)
else:
as_ = args
b = postcrack(b, as_, tab=tab + '\t')
if b['block'] in ['interface', 'abstract interface'] and \
not b['body'] and not b.get('implementedby'):
if 'f2pyenhancements' not in b:
continue
if b['block'].replace(' ', '') == 'pythonmodule':
usermodules.append(b)
else:
if b['block'] == 'module':
f90modulevars[b['name']] = b['vars']
body.append(b)
return body
def buildimplicitrules(block):
setmesstext(block)
implicitrules = defaultimplicitrules
attrrules = {}
if 'implicit' in block:
if block['implicit'] is None:
implicitrules = None
if verbose > 1:
outmess(
'buildimplicitrules: no implicit rules for routine %s.\n' % repr(block['name']))
else:
for k in list(block['implicit'].keys()):
if block['implicit'][k].get('typespec') not in ['static', 'automatic']:
implicitrules[k] = block['implicit'][k]
else:
attrrules[k] = block['implicit'][k]['typespec']
return implicitrules, attrrules
def myeval(e, g=None, l=None):
""" Like `eval` but returns only integers and floats """
r = eval(e, g, l)
if type(r) in [int, float]:
return r
raise ValueError('r=%r' % (r))
getlincoef_re_1 = re.compile(r'\A\b\w+\b\Z', re.I)
def getlincoef(e, xset): # e = a*x+b ; x in xset
"""
Obtain ``a`` and ``b`` when ``e == "a*x+b"``, where ``x`` is a symbol in
xset.
>>> getlincoef('2*x + 1', {'x'})
(2, 1, 'x')
>>> getlincoef('3*x + x*2 + 2 + 1', {'x'})
(5, 3, 'x')
>>> getlincoef('0', {'x'})
(0, 0, None)
>>> getlincoef('0*x', {'x'})
(0, 0, 'x')
>>> getlincoef('x*x', {'x'})
(None, None, None)
This can be tricked by sufficiently complex expressions
>>> getlincoef('(x - 0.5)*(x - 1.5)*(x - 1)*x + 2*x + 3', {'x'})
(2.0, 3.0, 'x')
"""
try:
c = int(myeval(e, {}, {}))
return 0, c, None
except Exception:
pass
if getlincoef_re_1.match(e):
return 1, 0, e
len_e = len(e)
for x in xset:
if len(x) > len_e:
continue
if re.search(r'\w\s*\([^)]*\b' + x + r'\b', e):
# skip function calls having x as an argument, e.g max(1, x)
continue
re_1 = re.compile(r'(?P<before>.*?)\b' + x + r'\b(?P<after>.*)', re.I)
m = re_1.match(e)
if m:
try:
m1 = re_1.match(e)
while m1:
ee = '%s(%s)%s' % (
m1.group('before'), 0, m1.group('after'))
m1 = re_1.match(ee)
b = myeval(ee, {}, {})
m1 = re_1.match(e)
while m1:
ee = '%s(%s)%s' % (
m1.group('before'), 1, m1.group('after'))
m1 = re_1.match(ee)
a = myeval(ee, {}, {}) - b
m1 = re_1.match(e)
while m1:
ee = '%s(%s)%s' % (
m1.group('before'), 0.5, m1.group('after'))
m1 = re_1.match(ee)
c = myeval(ee, {}, {})
# computing another point to be sure that expression is linear
m1 = re_1.match(e)
while m1:
ee = '%s(%s)%s' % (
m1.group('before'), 1.5, m1.group('after'))
m1 = re_1.match(ee)
c2 = myeval(ee, {}, {})
if (a * 0.5 + b == c and a * 1.5 + b == c2):
return a, b, x
except Exception:
pass
break
return None, None, None
word_pattern = re.compile(r'\b[a-z][\w$]*\b', re.I)
def _get_depend_dict(name, vars, deps):
if name in vars:
words = vars[name].get('depend', [])
if '=' in vars[name] and not isstring(vars[name]):
for word in word_pattern.findall(vars[name]['=']):
# The word_pattern may return values that are not
# only variables, they can be string content for instance
if word not in words and word in vars and word != name:
words.append(word)
for word in words[:]:
for w in deps.get(word, []) \
or _get_depend_dict(word, vars, deps):
if w not in words:
words.append(w)
else:
outmess('_get_depend_dict: no dependence info for %s\n' % (repr(name)))
words = []
deps[name] = words
return words
def _calc_depend_dict(vars):
names = list(vars.keys())
depend_dict = {}
for n in names:
_get_depend_dict(n, vars, depend_dict)
return depend_dict
def get_sorted_names(vars):
"""
"""
depend_dict = _calc_depend_dict(vars)
names = []
for name in list(depend_dict.keys()):
if not depend_dict[name]:
names.append(name)
del depend_dict[name]
while depend_dict:
for name, lst in list(depend_dict.items()):
new_lst = [n for n in lst if n in depend_dict]
if not new_lst:
names.append(name)
del depend_dict[name]
else:
depend_dict[name] = new_lst
return [name for name in names if name in vars]
def _kind_func(string):
# XXX: return something sensible.
if string[0] in "'\"":
string = string[1:-1]
if real16pattern.match(string):
return 8
elif real8pattern.match(string):
return 4
return 'kind(' + string + ')'
def _selected_int_kind_func(r):
# XXX: This should be processor dependent
m = 10 ** r
if m <= 2 ** 8:
return 1
if m <= 2 ** 16:
return 2
if m <= 2 ** 32:
return 4
if m <= 2 ** 63:
return 8
if m <= 2 ** 128:
return 16
return -1
def _selected_real_kind_func(p, r=0, radix=0):
# XXX: This should be processor dependent
# This is only good for 0 <= p <= 20
if p < 7:
return 4
if p < 16:
return 8
machine = platform.machine().lower()
if machine.startswith(('aarch64', 'power', 'ppc', 'riscv', 's390x', 'sparc')):
if p <= 20:
return 16
else:
if p < 19:
return 10
elif p <= 20:
return 16
return -1
def get_parameters(vars, global_params={}):
params = copy.copy(global_params)
g_params = copy.copy(global_params)
for name, func in [('kind', _kind_func),
('selected_int_kind', _selected_int_kind_func),
('selected_real_kind', _selected_real_kind_func), ]:
if name not in g_params:
g_params[name] = func
param_names = []
for n in get_sorted_names(vars):
if 'attrspec' in vars[n] and 'parameter' in vars[n]['attrspec']:
param_names.append(n)
kind_re = re.compile(r'\bkind\s*\(\s*(?P<value>.*)\s*\)', re.I)
selected_int_kind_re = re.compile(
r'\bselected_int_kind\s*\(\s*(?P<value>.*)\s*\)', re.I)
selected_kind_re = re.compile(
r'\bselected_(int|real)_kind\s*\(\s*(?P<value>.*)\s*\)', re.I)
for n in param_names:
if '=' in vars[n]:
v = vars[n]['=']
if islogical(vars[n]):
v = v.lower()
for repl in [
('.false.', 'False'),
('.true.', 'True'),
# TODO: test .eq., .neq., etc replacements.
]:
v = v.replace(*repl)
v = kind_re.sub(r'kind("\1")', v)
v = selected_int_kind_re.sub(r'selected_int_kind(\1)', v)
# We need to act according to the data.
# The easy case is if the data has a kind-specifier,
# then we may easily remove those specifiers.
# However, it may be that the user uses other specifiers...(!)
is_replaced = False
if 'kindselector' in vars[n]:
if 'kind' in vars[n]['kindselector']:
orig_v_len = len(v)
v = v.replace('_' + vars[n]['kindselector']['kind'], '')
# Again, this will be true if even a single specifier
# has been replaced, see comment above.
is_replaced = len(v) < orig_v_len
if not is_replaced:
if not selected_kind_re.match(v):
v_ = v.split('_')
# In case there are additive parameters
if len(v_) > 1:
v = ''.join(v_[:-1]).lower().replace(v_[-1].lower(), '')
# Currently this will not work for complex numbers.
# There is missing code for extracting a complex number,
# which may be defined in either of these:
# a) (Re, Im)
# b) cmplx(Re, Im)
# c) dcmplx(Re, Im)
# d) cmplx(Re, Im, <prec>)
if isdouble(vars[n]):
tt = list(v)
for m in real16pattern.finditer(v):
tt[m.start():m.end()] = list(
v[m.start():m.end()].lower().replace('d', 'e'))
v = ''.join(tt)
elif iscomplex(vars[n]):
outmess(f'get_parameters[TODO]: '
f'implement evaluation of complex expression {v}\n')
# Handle _dp for gh-6624
# Also fixes gh-20460
if real16pattern.search(v):
v = 8
elif real8pattern.search(v):
v = 4
try:
params[n] = eval(v, g_params, params)
except Exception as msg:
params[n] = v
outmess('get_parameters: got "%s" on %s\n' % (msg, repr(v)))
if isstring(vars[n]) and isinstance(params[n], int):
params[n] = chr(params[n])
nl = n.lower()
if nl != n:
params[nl] = params[n]
else:
print(vars[n])
outmess(
'get_parameters:parameter %s does not have value?!\n' % (repr(n)))
return params
def _eval_length(length, params):
if length in ['(:)', '(*)', '*']:
return '(*)'
return _eval_scalar(length, params)
_is_kind_number = re.compile(r'\d+_').match
def _eval_scalar(value, params):
if _is_kind_number(value):
value = value.split('_')[0]
try:
value = eval(value, {}, params)
value = (repr if isinstance(value, str) else str)(value)
except (NameError, SyntaxError, TypeError):
return value
except Exception as msg:
errmess('"%s" in evaluating %r '
'(available names: %s)\n'
% (msg, value, list(params.keys())))
return value
def analyzevars(block):
global f90modulevars
setmesstext(block)
implicitrules, attrrules = buildimplicitrules(block)
vars = copy.copy(block['vars'])
if block['block'] == 'function' and block['name'] not in vars:
vars[block['name']] = {}
if '' in block['vars']:
del vars['']
if 'attrspec' in block['vars']['']:
gen = block['vars']['']['attrspec']
for n in list(vars.keys()):
for k in ['public', 'private']:
if k in gen:
vars[n] = setattrspec(vars[n], k)
svars = []
args = block['args']
for a in args:
try:
vars[a]
svars.append(a)
except KeyError:
pass
for n in list(vars.keys()):
if n not in args:
svars.append(n)
params = get_parameters(vars, get_useparameters(block))
dep_matches = {}
name_match = re.compile(r'[A-Za-z][\w$]*').match
for v in list(vars.keys()):
m = name_match(v)
if m:
n = v[m.start():m.end()]
try:
dep_matches[n]
except KeyError:
dep_matches[n] = re.compile(r'.*\b%s\b' % (v), re.I).match
for n in svars:
if n[0] in list(attrrules.keys()):
vars[n] = setattrspec(vars[n], attrrules[n[0]])
if 'typespec' not in vars[n]:
if not('attrspec' in vars[n] and 'external' in vars[n]['attrspec']):
if implicitrules:
ln0 = n[0].lower()
for k in list(implicitrules[ln0].keys()):
if k == 'typespec' and implicitrules[ln0][k] == 'undefined':
continue
if k not in vars[n]:
vars[n][k] = implicitrules[ln0][k]
elif k == 'attrspec':
for l in implicitrules[ln0][k]:
vars[n] = setattrspec(vars[n], l)
elif n in block['args']:
outmess('analyzevars: typespec of variable %s is not defined in routine %s.\n' % (
repr(n), block['name']))
if 'charselector' in vars[n]:
if 'len' in vars[n]['charselector']:
l = vars[n]['charselector']['len']
try:
l = str(eval(l, {}, params))
except Exception:
pass
vars[n]['charselector']['len'] = l
if 'kindselector' in vars[n]:
if 'kind' in vars[n]['kindselector']:
l = vars[n]['kindselector']['kind']
try:
l = str(eval(l, {}, params))
except Exception:
pass
vars[n]['kindselector']['kind'] = l
dimension_exprs = {}
if 'attrspec' in vars[n]:
attr = vars[n]['attrspec']
attr.reverse()
vars[n]['attrspec'] = []
dim, intent, depend, check, note = None, None, None, None, None
for a in attr:
if a[:9] == 'dimension':
dim = (a[9:].strip())[1:-1]
elif a[:6] == 'intent':
intent = (a[6:].strip())[1:-1]
elif a[:6] == 'depend':
depend = (a[6:].strip())[1:-1]
elif a[:5] == 'check':
check = (a[5:].strip())[1:-1]
elif a[:4] == 'note':
note = (a[4:].strip())[1:-1]
else:
vars[n] = setattrspec(vars[n], a)
if intent:
if 'intent' not in vars[n]:
vars[n]['intent'] = []
for c in [x.strip() for x in markoutercomma(intent).split('@,@')]:
# Remove spaces so that 'in out' becomes 'inout'
tmp = c.replace(' ', '')
if tmp not in vars[n]['intent']:
vars[n]['intent'].append(tmp)
intent = None
if note:
note = note.replace('\\n\\n', '\n\n')
note = note.replace('\\n ', '\n')
if 'note' not in vars[n]:
vars[n]['note'] = [note]
else:
vars[n]['note'].append(note)
note = None
if depend is not None:
if 'depend' not in vars[n]:
vars[n]['depend'] = []
for c in rmbadname([x.strip() for x in markoutercomma(depend).split('@,@')]):
if c not in vars[n]['depend']:
vars[n]['depend'].append(c)
depend = None
if check is not None:
if 'check' not in vars[n]:
vars[n]['check'] = []
for c in [x.strip() for x in markoutercomma(check).split('@,@')]:
if c not in vars[n]['check']:
vars[n]['check'].append(c)
check = None
if dim and 'dimension' not in vars[n]:
vars[n]['dimension'] = []
for d in rmbadname([x.strip() for x in markoutercomma(dim).split('@,@')]):
star = ':' if d == ':' else '*'
# Evaluate `d` with respect to params
if d in params:
d = str(params[d])
for p in params:
re_1 = re.compile(r'(?P<before>.*?)\b' + p + r'\b(?P<after>.*)', re.I)
m = re_1.match(d)
while m:
d = m.group('before') + \
str(params[p]) + m.group('after')
m = re_1.match(d)
if d == star:
dl = [star]
else:
dl = markoutercomma(d, ':').split('@:@')
if len(dl) == 2 and '*' in dl: # e.g. dimension(5:*)
dl = ['*']
d = '*'
if len(dl) == 1 and dl[0] != star:
dl = ['1', dl[0]]
if len(dl) == 2:
d1, d2 = map(symbolic.Expr.parse, dl)
dsize = d2 - d1 + 1
d = dsize.tostring(language=symbolic.Language.C)
# find variables v that define d as a linear
# function, `d == a * v + b`, and store
# coefficients a and b for further analysis.
solver_and_deps = {}
for v in block['vars']:
s = symbolic.as_symbol(v)
if dsize.contains(s):
try:
a, b = dsize.linear_solve(s)
def solve_v(s, a=a, b=b):
return (s - b) / a
all_symbols = set(a.symbols())
all_symbols.update(b.symbols())
except RuntimeError as msg:
# d is not a linear function of v,
# however, if v can be determined
# from d using other means,
# implement the corresponding
# solve_v function here.
solve_v = None
all_symbols = set(dsize.symbols())
v_deps = set(
s.data for s in all_symbols
if s.data in vars)
solver_and_deps[v] = solve_v, list(v_deps)
# Note that dsize may contain symbols that are
# not defined in block['vars']. Here we assume
# these correspond to Fortran/C intrinsic
# functions or that are defined by other
# means. We'll let the compiler validate the
# definiteness of such symbols.
dimension_exprs[d] = solver_and_deps
vars[n]['dimension'].append(d)
if 'dimension' in vars[n]:
if isstringarray(vars[n]):
if 'charselector' in vars[n]:
d = vars[n]['charselector']
if '*' in d:
d = d['*']
errmess('analyzevars: character array "character*%s %s(%s)" is considered as "character %s(%s)"; "intent(c)" is forced.\n'
% (d, n,
','.join(vars[n]['dimension']),
n, ','.join(vars[n]['dimension'] + [d])))
vars[n]['dimension'].append(d)
del vars[n]['charselector']
if 'intent' not in vars[n]:
vars[n]['intent'] = []
if 'c' not in vars[n]['intent']:
vars[n]['intent'].append('c')
else:
errmess(
"analyzevars: charselector=%r unhandled.\n" % (d))
if 'check' not in vars[n] and 'args' in block and n in block['args']:
# n is an argument that has no checks defined. Here we
# generate some consistency checks for n, and when n is an
# array, generate checks for its dimensions and construct
# initialization expressions.
n_deps = vars[n].get('depend', [])
n_checks = []
n_is_input = l_or(isintent_in, isintent_inout,
isintent_inplace)(vars[n])
if isarray(vars[n]): # n is array
for i, d in enumerate(vars[n]['dimension']):
coeffs_and_deps = dimension_exprs.get(d)
if coeffs_and_deps is None:
# d is `:` or `*` or a constant expression
pass
elif n_is_input:
# n is an input array argument and its shape
# may define variables used in dimension
# specifications.
for v, (solver, deps) in coeffs_and_deps.items():
def compute_deps(v, deps):
for v1 in coeffs_and_deps.get(v, [None, []])[1]:
if v1 not in deps:
deps.add(v1)
compute_deps(v1, deps)
all_deps = set()
compute_deps(v, all_deps)
if ((v in n_deps
or '=' in vars[v]
or 'depend' in vars[v])):
# Skip a variable that
# - n depends on
# - has user-defined initialization expression
# - has user-defined dependencies
continue
if solver is not None and v not in all_deps:
# v can be solved from d, hence, we
# make it an optional argument with
# initialization expression:
is_required = False
init = solver(symbolic.as_symbol(
f'shape({n}, {i})'))
init = init.tostring(
language=symbolic.Language.C)
vars[v]['='] = init
# n needs to be initialized before v. So,
# making v dependent on n and on any
# variables in solver or d.
vars[v]['depend'] = [n] + deps
if 'check' not in vars[v]:
# add check only when no
# user-specified checks exist
vars[v]['check'] = [
f'shape({n}, {i}) == {d}']
else:
# d is a non-linear function on v,
# hence, v must be a required input
# argument that n will depend on
is_required = True
if 'intent' not in vars[v]:
vars[v]['intent'] = []
if 'in' not in vars[v]['intent']:
vars[v]['intent'].append('in')
# v needs to be initialized before n
n_deps.append(v)
n_checks.append(
f'shape({n}, {i}) == {d}')
v_attr = vars[v].get('attrspec', [])
if not ('optional' in v_attr
or 'required' in v_attr):
v_attr.append(
'required' if is_required else 'optional')
if v_attr:
vars[v]['attrspec'] = v_attr
if coeffs_and_deps is not None:
# extend v dependencies with ones specified in attrspec
for v, (solver, deps) in coeffs_and_deps.items():
v_deps = vars[v].get('depend', [])
for aa in vars[v].get('attrspec', []):
if aa.startswith('depend'):
aa = ''.join(aa.split())
v_deps.extend(aa[7:-1].split(','))
if v_deps:
vars[v]['depend'] = list(set(v_deps))
if n not in v_deps:
n_deps.append(v)
elif isstring(vars[n]):
if 'charselector' in vars[n]:
if '*' in vars[n]['charselector']:
length = _eval_length(vars[n]['charselector']['*'],
params)
vars[n]['charselector']['*'] = length
elif 'len' in vars[n]['charselector']:
length = _eval_length(vars[n]['charselector']['len'],
params)
del vars[n]['charselector']['len']
vars[n]['charselector']['*'] = length
if n_checks:
vars[n]['check'] = n_checks
if n_deps:
vars[n]['depend'] = list(set(n_deps))
if '=' in vars[n]:
if 'attrspec' not in vars[n]:
vars[n]['attrspec'] = []
if ('optional' not in vars[n]['attrspec']) and \
('required' not in vars[n]['attrspec']):
vars[n]['attrspec'].append('optional')
if 'depend' not in vars[n]:
vars[n]['depend'] = []
for v, m in list(dep_matches.items()):
if m(vars[n]['=']):
vars[n]['depend'].append(v)
if not vars[n]['depend']:
del vars[n]['depend']
if isscalar(vars[n]):
vars[n]['='] = _eval_scalar(vars[n]['='], params)
for n in list(vars.keys()):
if n == block['name']: # n is block name
if 'note' in vars[n]:
block['note'] = vars[n]['note']
if block['block'] == 'function':
if 'result' in block and block['result'] in vars:
vars[n] = appenddecl(vars[n], vars[block['result']])
if 'prefix' in block:
pr = block['prefix']
pr1 = pr.replace('pure', '')
ispure = (not pr == pr1)
pr = pr1.replace('recursive', '')
isrec = (not pr == pr1)
m = typespattern[0].match(pr)
if m:
typespec, selector, attr, edecl = cracktypespec0(
m.group('this'), m.group('after'))
kindselect, charselect, typename = cracktypespec(
typespec, selector)
vars[n]['typespec'] = typespec
if kindselect:
if 'kind' in kindselect:
try:
kindselect['kind'] = eval(
kindselect['kind'], {}, params)
except Exception:
pass
vars[n]['kindselector'] = kindselect
if charselect:
vars[n]['charselector'] = charselect
if typename:
vars[n]['typename'] = typename
if ispure:
vars[n] = setattrspec(vars[n], 'pure')
if isrec:
vars[n] = setattrspec(vars[n], 'recursive')
else:
outmess(
'analyzevars: prefix (%s) were not used\n' % repr(block['prefix']))
if not block['block'] in ['module', 'pythonmodule', 'python module', 'block data']:
if 'commonvars' in block:
neededvars = copy.copy(block['args'] + block['commonvars'])
else:
neededvars = copy.copy(block['args'])
for n in list(vars.keys()):
if l_or(isintent_callback, isintent_aux)(vars[n]):
neededvars.append(n)
if 'entry' in block:
neededvars.extend(list(block['entry'].keys()))
for k in list(block['entry'].keys()):
for n in block['entry'][k]:
if n not in neededvars:
neededvars.append(n)
if block['block'] == 'function':
if 'result' in block:
neededvars.append(block['result'])
else:
neededvars.append(block['name'])
if block['block'] in ['subroutine', 'function']:
name = block['name']
if name in vars and 'intent' in vars[name]:
block['intent'] = vars[name]['intent']
if block['block'] == 'type':
neededvars.extend(list(vars.keys()))
for n in list(vars.keys()):
if n not in neededvars:
del vars[n]
return vars
analyzeargs_re_1 = re.compile(r'\A[a-z]+[\w$]*\Z', re.I)
def expr2name(a, block, args=[]):
orig_a = a
a_is_expr = not analyzeargs_re_1.match(a)
if a_is_expr: # `a` is an expression
implicitrules, attrrules = buildimplicitrules(block)
at = determineexprtype(a, block['vars'], implicitrules)
na = 'e_'
for c in a:
c = c.lower()
if c not in string.ascii_lowercase + string.digits:
c = '_'
na = na + c
if na[-1] == '_':
na = na + 'e'
else:
na = na + '_e'
a = na
while a in block['vars'] or a in block['args']:
a = a + 'r'
if a in args:
k = 1
while a + str(k) in args:
k = k + 1
a = a + str(k)
if a_is_expr:
block['vars'][a] = at
else:
if a not in block['vars']:
if orig_a in block['vars']:
block['vars'][a] = block['vars'][orig_a]
else:
block['vars'][a] = {}
if 'externals' in block and orig_a in block['externals'] + block['interfaced']:
block['vars'][a] = setattrspec(block['vars'][a], 'external')
return a
def analyzeargs(block):
setmesstext(block)
implicitrules, _ = buildimplicitrules(block)
if 'args' not in block:
block['args'] = []
args = []
for a in block['args']:
a = expr2name(a, block, args)
args.append(a)
block['args'] = args
if 'entry' in block:
for k, args1 in list(block['entry'].items()):
for a in args1:
if a not in block['vars']:
block['vars'][a] = {}
for b in block['body']:
if b['name'] in args:
if 'externals' not in block:
block['externals'] = []
if b['name'] not in block['externals']:
block['externals'].append(b['name'])
if 'result' in block and block['result'] not in block['vars']:
block['vars'][block['result']] = {}
return block
determineexprtype_re_1 = re.compile(r'\A\(.+?,.+?\)\Z', re.I)
determineexprtype_re_2 = re.compile(r'\A[+-]?\d+(_(?P<name>\w+)|)\Z', re.I)
determineexprtype_re_3 = re.compile(
r'\A[+-]?[\d.]+[-\d+de.]*(_(?P<name>\w+)|)\Z', re.I)
determineexprtype_re_4 = re.compile(r'\A\(.*\)\Z', re.I)
determineexprtype_re_5 = re.compile(r'\A(?P<name>\w+)\s*\(.*?\)\s*\Z', re.I)
def _ensure_exprdict(r):
if isinstance(r, int):
return {'typespec': 'integer'}
if isinstance(r, float):
return {'typespec': 'real'}
if isinstance(r, complex):
return {'typespec': 'complex'}
if isinstance(r, dict):
return r
raise AssertionError(repr(r))
def determineexprtype(expr, vars, rules={}):
if expr in vars:
return _ensure_exprdict(vars[expr])
expr = expr.strip()
if determineexprtype_re_1.match(expr):
return {'typespec': 'complex'}
m = determineexprtype_re_2.match(expr)
if m:
if 'name' in m.groupdict() and m.group('name'):
outmess(
'determineexprtype: selected kind types not supported (%s)\n' % repr(expr))
return {'typespec': 'integer'}
m = determineexprtype_re_3.match(expr)
if m:
if 'name' in m.groupdict() and m.group('name'):
outmess(
'determineexprtype: selected kind types not supported (%s)\n' % repr(expr))
return {'typespec': 'real'}
for op in ['+', '-', '*', '/']:
for e in [x.strip() for x in markoutercomma(expr, comma=op).split('@' + op + '@')]:
if e in vars:
return _ensure_exprdict(vars[e])
t = {}
if determineexprtype_re_4.match(expr): # in parenthesis
t = determineexprtype(expr[1:-1], vars, rules)
else:
m = determineexprtype_re_5.match(expr)
if m:
rn = m.group('name')
t = determineexprtype(m.group('name'), vars, rules)
if t and 'attrspec' in t:
del t['attrspec']
if not t:
if rn[0] in rules:
return _ensure_exprdict(rules[rn[0]])
if expr[0] in '\'"':
return {'typespec': 'character', 'charselector': {'*': '*'}}
if not t:
outmess(
'determineexprtype: could not determine expressions (%s) type.\n' % (repr(expr)))
return t
######
def crack2fortrangen(block, tab='\n', as_interface=False):
global skipfuncs, onlyfuncs
setmesstext(block)
ret = ''
if isinstance(block, list):
for g in block:
if g and g['block'] in ['function', 'subroutine']:
if g['name'] in skipfuncs:
continue
if onlyfuncs and g['name'] not in onlyfuncs:
continue
ret = ret + crack2fortrangen(g, tab, as_interface=as_interface)
return ret
prefix = ''
name = ''
args = ''
blocktype = block['block']
if blocktype == 'program':
return ''
argsl = []
if 'name' in block:
name = block['name']
if 'args' in block:
vars = block['vars']
for a in block['args']:
a = expr2name(a, block, argsl)
if not isintent_callback(vars[a]):
argsl.append(a)
if block['block'] == 'function' or argsl:
args = '(%s)' % ','.join(argsl)
f2pyenhancements = ''
if 'f2pyenhancements' in block:
for k in list(block['f2pyenhancements'].keys()):
f2pyenhancements = '%s%s%s %s' % (
f2pyenhancements, tab + tabchar, k, block['f2pyenhancements'][k])
intent_lst = block.get('intent', [])[:]
if blocktype == 'function' and 'callback' in intent_lst:
intent_lst.remove('callback')
if intent_lst:
f2pyenhancements = '%s%sintent(%s) %s' %\
(f2pyenhancements, tab + tabchar,
','.join(intent_lst), name)
use = ''
if 'use' in block:
use = use2fortran(block['use'], tab + tabchar)
common = ''
if 'common' in block:
common = common2fortran(block['common'], tab + tabchar)
if name == 'unknown_interface':
name = ''
result = ''
if 'result' in block:
result = ' result (%s)' % block['result']
if block['result'] not in argsl:
argsl.append(block['result'])
body = crack2fortrangen(block['body'], tab + tabchar, as_interface=as_interface)
vars = vars2fortran(
block, block['vars'], argsl, tab + tabchar, as_interface=as_interface)
mess = ''
if 'from' in block and not as_interface:
mess = '! in %s' % block['from']
if 'entry' in block:
entry_stmts = ''
for k, i in list(block['entry'].items()):
entry_stmts = '%s%sentry %s(%s)' \
% (entry_stmts, tab + tabchar, k, ','.join(i))
body = body + entry_stmts
if blocktype == 'block data' and name == '_BLOCK_DATA_':
name = ''
ret = '%s%s%s %s%s%s %s%s%s%s%s%s%send %s %s' % (
tab, prefix, blocktype, name, args, result, mess, f2pyenhancements, use, vars, common, body, tab, blocktype, name)
return ret
def common2fortran(common, tab=''):
ret = ''
for k in list(common.keys()):
if k == '_BLNK_':
ret = '%s%scommon %s' % (ret, tab, ','.join(common[k]))
else:
ret = '%s%scommon /%s/ %s' % (ret, tab, k, ','.join(common[k]))
return ret
def use2fortran(use, tab=''):
ret = ''
for m in list(use.keys()):
ret = '%s%suse %s,' % (ret, tab, m)
if use[m] == {}:
if ret and ret[-1] == ',':
ret = ret[:-1]
continue
if 'only' in use[m] and use[m]['only']:
ret = '%s only:' % (ret)
if 'map' in use[m] and use[m]['map']:
c = ' '
for k in list(use[m]['map'].keys()):
if k == use[m]['map'][k]:
ret = '%s%s%s' % (ret, c, k)
c = ','
else:
ret = '%s%s%s=>%s' % (ret, c, k, use[m]['map'][k])
c = ','
if ret and ret[-1] == ',':
ret = ret[:-1]
return ret
def true_intent_list(var):
lst = var['intent']
ret = []
for intent in lst:
try:
f = globals()['isintent_%s' % intent]
except KeyError:
pass
else:
if f(var):
ret.append(intent)
return ret
def vars2fortran(block, vars, args, tab='', as_interface=False):
"""
TODO:
public sub
...
"""
setmesstext(block)
ret = ''
nout = []
for a in args:
if a in block['vars']:
nout.append(a)
if 'commonvars' in block:
for a in block['commonvars']:
if a in vars:
if a not in nout:
nout.append(a)
else:
errmess(
'vars2fortran: Confused?!: "%s" is not defined in vars.\n' % a)
if 'varnames' in block:
nout.extend(block['varnames'])
if not as_interface:
for a in list(vars.keys()):
if a not in nout:
nout.append(a)
for a in nout:
if 'depend' in vars[a]:
for d in vars[a]['depend']:
if d in vars and 'depend' in vars[d] and a in vars[d]['depend']:
errmess(
'vars2fortran: Warning: cross-dependence between variables "%s" and "%s"\n' % (a, d))
if 'externals' in block and a in block['externals']:
if isintent_callback(vars[a]):
ret = '%s%sintent(callback) %s' % (ret, tab, a)
ret = '%s%sexternal %s' % (ret, tab, a)
if isoptional(vars[a]):
ret = '%s%soptional %s' % (ret, tab, a)
if a in vars and 'typespec' not in vars[a]:
continue
cont = 1
for b in block['body']:
if a == b['name'] and b['block'] == 'function':
cont = 0
break
if cont:
continue
if a not in vars:
show(vars)
outmess('vars2fortran: No definition for argument "%s".\n' % a)
continue
if a == block['name']:
if block['block'] != 'function' or block.get('result'):
# 1) skip declaring a variable that name matches with
# subroutine name
# 2) skip declaring function when its type is
# declared via `result` construction
continue
if 'typespec' not in vars[a]:
if 'attrspec' in vars[a] and 'external' in vars[a]['attrspec']:
if a in args:
ret = '%s%sexternal %s' % (ret, tab, a)
continue
show(vars[a])
outmess('vars2fortran: No typespec for argument "%s".\n' % a)
continue
vardef = vars[a]['typespec']
if vardef == 'type' and 'typename' in vars[a]:
vardef = '%s(%s)' % (vardef, vars[a]['typename'])
selector = {}
if 'kindselector' in vars[a]:
selector = vars[a]['kindselector']
elif 'charselector' in vars[a]:
selector = vars[a]['charselector']
if '*' in selector:
if selector['*'] in ['*', ':']:
vardef = '%s*(%s)' % (vardef, selector['*'])
else:
vardef = '%s*%s' % (vardef, selector['*'])
else:
if 'len' in selector:
vardef = '%s(len=%s' % (vardef, selector['len'])
if 'kind' in selector:
vardef = '%s,kind=%s)' % (vardef, selector['kind'])
else:
vardef = '%s)' % (vardef)
elif 'kind' in selector:
vardef = '%s(kind=%s)' % (vardef, selector['kind'])
c = ' '
if 'attrspec' in vars[a]:
attr = [l for l in vars[a]['attrspec']
if l not in ['external']]
if attr:
vardef = '%s, %s' % (vardef, ','.join(attr))
c = ','
if 'dimension' in vars[a]:
vardef = '%s%sdimension(%s)' % (
vardef, c, ','.join(vars[a]['dimension']))
c = ','
if 'intent' in vars[a]:
lst = true_intent_list(vars[a])
if lst:
vardef = '%s%sintent(%s)' % (vardef, c, ','.join(lst))
c = ','
if 'check' in vars[a]:
vardef = '%s%scheck(%s)' % (vardef, c, ','.join(vars[a]['check']))
c = ','
if 'depend' in vars[a]:
vardef = '%s%sdepend(%s)' % (
vardef, c, ','.join(vars[a]['depend']))
c = ','
if '=' in vars[a]:
v = vars[a]['=']
if vars[a]['typespec'] in ['complex', 'double complex']:
try:
v = eval(v)
v = '(%s,%s)' % (v.real, v.imag)
except Exception:
pass
vardef = '%s :: %s=%s' % (vardef, a, v)
else:
vardef = '%s :: %s' % (vardef, a)
ret = '%s%s%s' % (ret, tab, vardef)
return ret
######
def crackfortran(files):
global usermodules
outmess('Reading fortran codes...\n', 0)
readfortrancode(files, crackline)
outmess('Post-processing...\n', 0)
usermodules = []
postlist = postcrack(grouplist[0])
outmess('Post-processing (stage 2)...\n', 0)
postlist = postcrack2(postlist)
return usermodules + postlist
def crack2fortran(block):
global f2py_version
pyf = crack2fortrangen(block) + '\n'
header = """! -*- f90 -*-
! Note: the context of this file is case sensitive.
"""
footer = """
! This file was auto-generated with f2py (version:%s).
! See:
! https://web.archive.org/web/20140822061353/http://cens.ioc.ee/projects/f2py2e
""" % (f2py_version)
return header + pyf + footer
if __name__ == "__main__":
files = []
funcs = []
f = 1
f2 = 0
f3 = 0
showblocklist = 0
for l in sys.argv[1:]:
if l == '':
pass
elif l[0] == ':':
f = 0
elif l == '-quiet':
quiet = 1
verbose = 0
elif l == '-verbose':
verbose = 2
quiet = 0
elif l == '-fix':
if strictf77:
outmess(
'Use option -f90 before -fix if Fortran 90 code is in fix form.\n', 0)
skipemptyends = 1
sourcecodeform = 'fix'
elif l == '-skipemptyends':
skipemptyends = 1
elif l == '--ignore-contains':
ignorecontains = 1
elif l == '-f77':
strictf77 = 1
sourcecodeform = 'fix'
elif l == '-f90':
strictf77 = 0
sourcecodeform = 'free'
skipemptyends = 1
elif l == '-h':
f2 = 1
elif l == '-show':
showblocklist = 1
elif l == '-m':
f3 = 1
elif l[0] == '-':
errmess('Unknown option %s\n' % repr(l))
elif f2:
f2 = 0
pyffilename = l
elif f3:
f3 = 0
f77modulename = l
elif f:
try:
open(l).close()
files.append(l)
except OSError as detail:
errmess(f'OSError: {detail!s}\n')
else:
funcs.append(l)
if not strictf77 and f77modulename and not skipemptyends:
outmess("""\
Warning: You have specified module name for non Fortran 77 code
that should not need one (expect if you are scanning F90 code
for non module blocks but then you should use flag -skipemptyends
and also be sure that the files do not contain programs without program statement).
""", 0)
postlist = crackfortran(files)
if pyffilename:
outmess('Writing fortran code to file %s\n' % repr(pyffilename), 0)
pyf = crack2fortran(postlist)
with open(pyffilename, 'w') as f:
f.write(pyf)
if showblocklist:
show(postlist)
| 132,025 | Python | 38.316855 | 205 | 0.47872 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/f2py/use_rules.py | #!/usr/bin/env python3
"""
Build 'use others module data' mechanism for f2py2e.
Unfinished.
Copyright 2000 Pearu Peterson all rights reserved,
Pearu Peterson <[email protected]>
Permission to use, modify, and distribute this software is given under the
terms of the NumPy License.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Date: 2000/09/10 12:35:43 $
Pearu Peterson
"""
__version__ = "$Revision: 1.3 $"[10:-1]
f2py_version = 'See `f2py -v`'
from .auxfuncs import (
applyrules, dictappend, gentitle, hasnote, outmess
)
usemodule_rules = {
'body': """
#begintitle#
static char doc_#apiname#[] = \"\\\nVariable wrapper signature:\\n\\
\t #name# = get_#name#()\\n\\
Arguments:\\n\\
#docstr#\";
extern F_MODFUNC(#usemodulename#,#USEMODULENAME#,#realname#,#REALNAME#);
static PyObject *#apiname#(PyObject *capi_self, PyObject *capi_args) {
/*#decl#*/
\tif (!PyArg_ParseTuple(capi_args, \"\")) goto capi_fail;
printf(\"c: %d\\n\",F_MODFUNC(#usemodulename#,#USEMODULENAME#,#realname#,#REALNAME#));
\treturn Py_BuildValue(\"\");
capi_fail:
\treturn NULL;
}
""",
'method': '\t{\"get_#name#\",#apiname#,METH_VARARGS|METH_KEYWORDS,doc_#apiname#},',
'need': ['F_MODFUNC']
}
################
def buildusevars(m, r):
ret = {}
outmess(
'\t\tBuilding use variable hooks for module "%s" (feature only for F90/F95)...\n' % (m['name']))
varsmap = {}
revmap = {}
if 'map' in r:
for k in r['map'].keys():
if r['map'][k] in revmap:
outmess('\t\t\tVariable "%s<=%s" is already mapped by "%s". Skipping.\n' % (
r['map'][k], k, revmap[r['map'][k]]))
else:
revmap[r['map'][k]] = k
if 'only' in r and r['only']:
for v in r['map'].keys():
if r['map'][v] in m['vars']:
if revmap[r['map'][v]] == v:
varsmap[v] = r['map'][v]
else:
outmess('\t\t\tIgnoring map "%s=>%s". See above.\n' %
(v, r['map'][v]))
else:
outmess(
'\t\t\tNo definition for variable "%s=>%s". Skipping.\n' % (v, r['map'][v]))
else:
for v in m['vars'].keys():
if v in revmap:
varsmap[v] = revmap[v]
else:
varsmap[v] = v
for v in varsmap.keys():
ret = dictappend(ret, buildusevar(v, varsmap[v], m['vars'], m['name']))
return ret
def buildusevar(name, realname, vars, usemodulename):
outmess('\t\t\tConstructing wrapper function for variable "%s=>%s"...\n' % (
name, realname))
ret = {}
vrd = {'name': name,
'realname': realname,
'REALNAME': realname.upper(),
'usemodulename': usemodulename,
'USEMODULENAME': usemodulename.upper(),
'texname': name.replace('_', '\\_'),
'begintitle': gentitle('%s=>%s' % (name, realname)),
'endtitle': gentitle('end of %s=>%s' % (name, realname)),
'apiname': '#modulename#_use_%s_from_%s' % (realname, usemodulename)
}
nummap = {0: 'Ro', 1: 'Ri', 2: 'Rii', 3: 'Riii', 4: 'Riv',
5: 'Rv', 6: 'Rvi', 7: 'Rvii', 8: 'Rviii', 9: 'Rix'}
vrd['texnamename'] = name
for i in nummap.keys():
vrd['texnamename'] = vrd['texnamename'].replace(repr(i), nummap[i])
if hasnote(vars[realname]):
vrd['note'] = vars[realname]['note']
rd = dictappend({}, vrd)
print(name, realname, vars[realname])
ret = applyrules(usemodule_rules, rd)
return ret
| 3,587 | Python | 30.473684 | 104 | 0.537218 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/f2py/tests/test_common.py | import os
import sys
import pytest
import numpy as np
from . import util
class TestCommonBlock(util.F2PyTest):
sources = [util.getpath("tests", "src", "common", "block.f")]
@pytest.mark.skipif(sys.platform == "win32",
reason="Fails with MinGW64 Gfortran (Issue #9673)")
def test_common_block(self):
self.module.initcb()
assert self.module.block.long_bn == np.array(1.0, dtype=np.float64)
assert self.module.block.string_bn == np.array("2", dtype="|S1")
assert self.module.block.ok == np.array(3, dtype=np.int32)
| 584 | Python | 29.789472 | 75 | 0.640411 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/f2py/tests/test_string.py | import os
import pytest
import textwrap
import numpy as np
from . import util
class TestString(util.F2PyTest):
sources = [util.getpath("tests", "src", "string", "char.f90")]
@pytest.mark.slow
def test_char(self):
strings = np.array(["ab", "cd", "ef"], dtype="c").T
inp, out = self.module.char_test.change_strings(
strings, strings.shape[1])
assert inp == pytest.approx(strings)
expected = strings.copy()
expected[1, :] = "AAA"
assert out == pytest.approx(expected)
class TestDocStringArguments(util.F2PyTest):
sources = [util.getpath("tests", "src", "string", "string.f")]
def test_example(self):
a = np.array(b"123\0\0")
b = np.array(b"123\0\0")
c = np.array(b"123")
d = np.array(b"123")
self.module.foo(a, b, c, d)
assert a.tobytes() == b"123\0\0"
assert b.tobytes() == b"B23\0\0"
assert c.tobytes() == b"123"
assert d.tobytes() == b"D23"
class TestFixedString(util.F2PyTest):
sources = [util.getpath("tests", "src", "string", "fixed_string.f90")]
@staticmethod
def _sint(s, start=0, end=None):
"""Return the content of a string buffer as integer value.
For example:
_sint('1234') -> 4321
_sint('123A') -> 17321
"""
if isinstance(s, np.ndarray):
s = s.tobytes()
elif isinstance(s, str):
s = s.encode()
assert isinstance(s, bytes)
if end is None:
end = len(s)
i = 0
for j in range(start, min(end, len(s))):
i += s[j] * 10**j
return i
def _get_input(self, intent="in"):
if intent in ["in"]:
yield ""
yield "1"
yield "1234"
yield "12345"
yield b""
yield b"\0"
yield b"1"
yield b"\01"
yield b"1\0"
yield b"1234"
yield b"12345"
yield np.ndarray((), np.bytes_, buffer=b"") # array(b'', dtype='|S0')
yield np.array(b"") # array(b'', dtype='|S1')
yield np.array(b"\0")
yield np.array(b"1")
yield np.array(b"1\0")
yield np.array(b"\01")
yield np.array(b"1234")
yield np.array(b"123\0")
yield np.array(b"12345")
def test_intent_in(self):
for s in self._get_input():
r = self.module.test_in_bytes4(s)
# also checks that s is not changed inplace
expected = self._sint(s, end=4)
assert r == expected, s
def test_intent_inout(self):
for s in self._get_input(intent="inout"):
rest = self._sint(s, start=4)
r = self.module.test_inout_bytes4(s)
expected = self._sint(s, end=4)
assert r == expected
# check that the rest of input string is preserved
assert rest == self._sint(s, start=4)
| 2,962 | Python | 28.336633 | 78 | 0.516205 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/f2py/tests/util.py | """
Utility functions for
- building and importing modules on test time, using a temporary location
- detecting if compilers are present
- determining paths to tests
"""
import os
import sys
import subprocess
import tempfile
import shutil
import atexit
import textwrap
import re
import pytest
import contextlib
import numpy
from pathlib import Path
from numpy.compat import asbytes, asstr
from numpy.testing import temppath
from importlib import import_module
#
# Maintaining a temporary module directory
#
_module_dir = None
_module_num = 5403
def _cleanup():
global _module_dir
if _module_dir is not None:
try:
sys.path.remove(_module_dir)
except ValueError:
pass
try:
shutil.rmtree(_module_dir)
except OSError:
pass
_module_dir = None
def get_module_dir():
global _module_dir
if _module_dir is None:
_module_dir = tempfile.mkdtemp()
atexit.register(_cleanup)
if _module_dir not in sys.path:
sys.path.insert(0, _module_dir)
return _module_dir
def get_temp_module_name():
# Assume single-threaded, and the module dir usable only by this thread
global _module_num
d = get_module_dir()
name = "_test_ext_module_%d" % _module_num
_module_num += 1
if name in sys.modules:
# this should not be possible, but check anyway
raise RuntimeError("Temporary module name already in use.")
return name
def _memoize(func):
memo = {}
def wrapper(*a, **kw):
key = repr((a, kw))
if key not in memo:
try:
memo[key] = func(*a, **kw)
except Exception as e:
memo[key] = e
raise
ret = memo[key]
if isinstance(ret, Exception):
raise ret
return ret
wrapper.__name__ = func.__name__
return wrapper
#
# Building modules
#
@_memoize
def build_module(source_files, options=[], skip=[], only=[], module_name=None):
"""
Compile and import a f2py module, built from the given files.
"""
code = f"import sys; sys.path = {sys.path!r}; import numpy.f2py; numpy.f2py.main()"
d = get_module_dir()
# Copy files
dst_sources = []
f2py_sources = []
for fn in source_files:
if not os.path.isfile(fn):
raise RuntimeError("%s is not a file" % fn)
dst = os.path.join(d, os.path.basename(fn))
shutil.copyfile(fn, dst)
dst_sources.append(dst)
base, ext = os.path.splitext(dst)
if ext in (".f90", ".f", ".c", ".pyf"):
f2py_sources.append(dst)
# Prepare options
if module_name is None:
module_name = get_temp_module_name()
f2py_opts = ["-c", "-m", module_name] + options + f2py_sources
if skip:
f2py_opts += ["skip:"] + skip
if only:
f2py_opts += ["only:"] + only
# Build
cwd = os.getcwd()
try:
os.chdir(d)
cmd = [sys.executable, "-c", code] + f2py_opts
p = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
out, err = p.communicate()
if p.returncode != 0:
raise RuntimeError("Running f2py failed: %s\n%s" %
(cmd[4:], asstr(out)))
finally:
os.chdir(cwd)
# Partial cleanup
for fn in dst_sources:
os.unlink(fn)
# Import
return import_module(module_name)
@_memoize
def build_code(source_code,
options=[],
skip=[],
only=[],
suffix=None,
module_name=None):
"""
Compile and import Fortran code using f2py.
"""
if suffix is None:
suffix = ".f"
with temppath(suffix=suffix) as path:
with open(path, "w") as f:
f.write(source_code)
return build_module([path],
options=options,
skip=skip,
only=only,
module_name=module_name)
#
# Check if compilers are available at all...
#
_compiler_status = None
def _get_compiler_status():
global _compiler_status
if _compiler_status is not None:
return _compiler_status
_compiler_status = (False, False, False)
# XXX: this is really ugly. But I don't know how to invoke Distutils
# in a safer way...
code = textwrap.dedent(f"""\
import os
import sys
sys.path = {repr(sys.path)}
def configuration(parent_name='',top_path=None):
global config
from numpy.distutils.misc_util import Configuration
config = Configuration('', parent_name, top_path)
return config
from numpy.distutils.core import setup
setup(configuration=configuration)
config_cmd = config.get_config_cmd()
have_c = config_cmd.try_compile('void foo() {{}}')
print('COMPILERS:%%d,%%d,%%d' %% (have_c,
config.have_f77c(),
config.have_f90c()))
sys.exit(99)
""")
code = code % dict(syspath=repr(sys.path))
tmpdir = tempfile.mkdtemp()
try:
script = os.path.join(tmpdir, "setup.py")
with open(script, "w") as f:
f.write(code)
cmd = [sys.executable, "setup.py", "config"]
p = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=tmpdir)
out, err = p.communicate()
finally:
shutil.rmtree(tmpdir)
m = re.search(br"COMPILERS:(\d+),(\d+),(\d+)", out)
if m:
_compiler_status = (
bool(int(m.group(1))),
bool(int(m.group(2))),
bool(int(m.group(3))),
)
# Finished
return _compiler_status
def has_c_compiler():
return _get_compiler_status()[0]
def has_f77_compiler():
return _get_compiler_status()[1]
def has_f90_compiler():
return _get_compiler_status()[2]
#
# Building with distutils
#
@_memoize
def build_module_distutils(source_files, config_code, module_name, **kw):
"""
Build a module via distutils and import it.
"""
d = get_module_dir()
# Copy files
dst_sources = []
for fn in source_files:
if not os.path.isfile(fn):
raise RuntimeError("%s is not a file" % fn)
dst = os.path.join(d, os.path.basename(fn))
shutil.copyfile(fn, dst)
dst_sources.append(dst)
# Build script
config_code = textwrap.dedent(config_code).replace("\n", "\n ")
code = fr"""
import os
import sys
sys.path = {repr(sys.path)}
def configuration(parent_name='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('', parent_name, top_path)
{config_code}
return config
if __name__ == "__main__":
from numpy.distutils.core import setup
setup(configuration=configuration)
"""
script = os.path.join(d, get_temp_module_name() + ".py")
dst_sources.append(script)
with open(script, "wb") as f:
f.write(asbytes(code))
# Build
cwd = os.getcwd()
try:
os.chdir(d)
cmd = [sys.executable, script, "build_ext", "-i"]
p = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
out, err = p.communicate()
if p.returncode != 0:
raise RuntimeError("Running distutils build failed: %s\n%s" %
(cmd[4:], asstr(out)))
finally:
os.chdir(cwd)
# Partial cleanup
for fn in dst_sources:
os.unlink(fn)
# Import
__import__(module_name)
return sys.modules[module_name]
#
# Unittest convenience
#
class F2PyTest:
code = None
sources = None
options = []
skip = []
only = []
suffix = ".f"
module = None
module_name = None
def setup_method(self):
if sys.platform == "win32":
pytest.skip("Fails with MinGW64 Gfortran (Issue #9673)")
if self.module is not None:
return
# Check compiler availability first
if not has_c_compiler():
pytest.skip("No C compiler available")
codes = []
if self.sources:
codes.extend(self.sources)
if self.code is not None:
codes.append(self.suffix)
needs_f77 = False
needs_f90 = False
needs_pyf = False
for fn in codes:
if str(fn).endswith(".f"):
needs_f77 = True
elif str(fn).endswith(".f90"):
needs_f90 = True
elif str(fn).endswith(".pyf"):
needs_pyf = True
if needs_f77 and not has_f77_compiler():
pytest.skip("No Fortran 77 compiler available")
if needs_f90 and not has_f90_compiler():
pytest.skip("No Fortran 90 compiler available")
if needs_pyf and not (has_f90_compiler() or has_f77_compiler()):
pytest.skip("No Fortran compiler available")
# Build the module
if self.code is not None:
self.module = build_code(
self.code,
options=self.options,
skip=self.skip,
only=self.only,
suffix=self.suffix,
module_name=self.module_name,
)
if self.sources is not None:
self.module = build_module(
self.sources,
options=self.options,
skip=self.skip,
only=self.only,
module_name=self.module_name,
)
#
# Helper functions
#
def getpath(*a):
# Package root
d = Path(numpy.f2py.__file__).parent.resolve()
return d.joinpath(*a)
@contextlib.contextmanager
def switchdir(path):
curpath = Path.cwd()
os.chdir(path)
try:
yield
finally:
os.chdir(curpath)
| 10,196 | Python | 23.810219 | 87 | 0.543743 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/f2py/tests/test_f2cmap.py | from . import util
import numpy as np
class TestF2Cmap(util.F2PyTest):
sources = [
util.getpath("tests", "src", "f2cmap", "isoFortranEnvMap.f90"),
util.getpath("tests", "src", "f2cmap", ".f2py_f2cmap")
]
# gh-15095
def test_long_long_map(self):
inp = np.ones(3)
out = self.module.func1(inp)
exp_out = 3
assert out == exp_out
| 391 | Python | 23.499999 | 71 | 0.57289 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/f2py/tests/test_semicolon_split.py | import platform
import pytest
import numpy as np
from . import util
@pytest.mark.skipif(
platform.system() == "Darwin",
reason="Prone to error when run with numpy/f2py/tests on mac os, "
"but not when run in isolation",
)
@pytest.mark.skipif(
np.dtype(np.intp).itemsize < 8,
reason="32-bit builds are buggy"
)
class TestMultiline(util.F2PyTest):
suffix = ".pyf"
module_name = "multiline"
code = f"""
python module {module_name}
usercode '''
void foo(int* x) {{
char dummy = ';';
*x = 42;
}}
'''
interface
subroutine foo(x)
intent(c) foo
integer intent(out) :: x
end subroutine foo
end interface
end python module {module_name}
"""
def test_multiline(self):
assert self.module.foo() == 42
@pytest.mark.skipif(
platform.system() == "Darwin",
reason="Prone to error when run with numpy/f2py/tests on mac os, "
"but not when run in isolation",
)
@pytest.mark.skipif(
np.dtype(np.intp).itemsize < 8,
reason="32-bit builds are buggy"
)
class TestCallstatement(util.F2PyTest):
suffix = ".pyf"
module_name = "callstatement"
code = f"""
python module {module_name}
usercode '''
void foo(int* x) {{
}}
'''
interface
subroutine foo(x)
intent(c) foo
integer intent(out) :: x
callprotoargument int*
callstatement {{ &
; &
x = 42; &
}}
end subroutine foo
end interface
end python module {module_name}
"""
def test_callstatement(self):
assert self.module.foo() == 42
| 1,635 | Python | 20.813333 | 70 | 0.585321 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/f2py/tests/test_abstract_interface.py | from pathlib import Path
import textwrap
from . import util
from numpy.f2py import crackfortran
class TestAbstractInterface(util.F2PyTest):
sources = [util.getpath("tests", "src", "abstract_interface", "foo.f90")]
skip = ["add1", "add2"]
def test_abstract_interface(self):
assert self.module.ops_module.foo(3, 5) == (8, 13)
def test_parse_abstract_interface(self):
# Test gh18403
fpath = util.getpath("tests", "src", "abstract_interface",
"gh18403_mod.f90")
mod = crackfortran.crackfortran([str(fpath)])
assert len(mod) == 1
assert len(mod[0]["body"]) == 1
assert mod[0]["body"][0]["block"] == "abstract interface"
| 721 | Python | 30.391303 | 77 | 0.61165 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/f2py/tests/test_return_complex.py | import pytest
from numpy import array
from . import util
class TestReturnComplex(util.F2PyTest):
def check_function(self, t, tname):
if tname in ["t0", "t8", "s0", "s8"]:
err = 1e-5
else:
err = 0.0
assert abs(t(234j) - 234.0j) <= err
assert abs(t(234.6) - 234.6) <= err
assert abs(t(234) - 234.0) <= err
assert abs(t(234.6 + 3j) - (234.6 + 3j)) <= err
# assert abs(t('234')-234.)<=err
# assert abs(t('234.6')-234.6)<=err
assert abs(t(-234) + 234.0) <= err
assert abs(t([234]) - 234.0) <= err
assert abs(t((234, )) - 234.0) <= err
assert abs(t(array(234)) - 234.0) <= err
assert abs(t(array(23 + 4j, "F")) - (23 + 4j)) <= err
assert abs(t(array([234])) - 234.0) <= err
assert abs(t(array([[234]])) - 234.0) <= err
assert abs(t(array([234], "b")) + 22.0) <= err
assert abs(t(array([234], "h")) - 234.0) <= err
assert abs(t(array([234], "i")) - 234.0) <= err
assert abs(t(array([234], "l")) - 234.0) <= err
assert abs(t(array([234], "q")) - 234.0) <= err
assert abs(t(array([234], "f")) - 234.0) <= err
assert abs(t(array([234], "d")) - 234.0) <= err
assert abs(t(array([234 + 3j], "F")) - (234 + 3j)) <= err
assert abs(t(array([234], "D")) - 234.0) <= err
# pytest.raises(TypeError, t, array([234], 'a1'))
pytest.raises(TypeError, t, "abc")
pytest.raises(IndexError, t, [])
pytest.raises(IndexError, t, ())
pytest.raises(TypeError, t, t)
pytest.raises(TypeError, t, {})
try:
r = t(10**400)
assert repr(r) in ["(inf+0j)", "(Infinity+0j)"]
except OverflowError:
pass
class TestFReturnComplex(TestReturnComplex):
sources = [
util.getpath("tests", "src", "return_complex", "foo77.f"),
util.getpath("tests", "src", "return_complex", "foo90.f90"),
]
@pytest.mark.parametrize("name", "t0,t8,t16,td,s0,s8,s16,sd".split(","))
def test_all_f77(self, name):
self.check_function(getattr(self.module, name), name)
@pytest.mark.parametrize("name", "t0,t8,t16,td,s0,s8,s16,sd".split(","))
def test_all_f90(self, name):
self.check_function(getattr(self.module.f90_return_complex, name),
name)
| 2,390 | Python | 35.227272 | 76 | 0.517155 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/f2py/tests/test_return_integer.py | import pytest
from numpy import array
from . import util
class TestReturnInteger(util.F2PyTest):
def check_function(self, t, tname):
assert t(123) == 123
assert t(123.6) == 123
assert t("123") == 123
assert t(-123) == -123
assert t([123]) == 123
assert t((123, )) == 123
assert t(array(123)) == 123
assert t(array([123])) == 123
assert t(array([[123]])) == 123
assert t(array([123], "b")) == 123
assert t(array([123], "h")) == 123
assert t(array([123], "i")) == 123
assert t(array([123], "l")) == 123
assert t(array([123], "B")) == 123
assert t(array([123], "f")) == 123
assert t(array([123], "d")) == 123
# pytest.raises(ValueError, t, array([123],'S3'))
pytest.raises(ValueError, t, "abc")
pytest.raises(IndexError, t, [])
pytest.raises(IndexError, t, ())
pytest.raises(Exception, t, t)
pytest.raises(Exception, t, {})
if tname in ["t8", "s8"]:
pytest.raises(OverflowError, t, 100000000000000000000000)
pytest.raises(OverflowError, t, 10000000011111111111111.23)
class TestFReturnInteger(TestReturnInteger):
sources = [
util.getpath("tests", "src", "return_integer", "foo77.f"),
util.getpath("tests", "src", "return_integer", "foo90.f90"),
]
@pytest.mark.parametrize("name",
"t0,t1,t2,t4,t8,s0,s1,s2,s4,s8".split(","))
def test_all_f77(self, name):
self.check_function(getattr(self.module, name), name)
@pytest.mark.parametrize("name",
"t0,t1,t2,t4,t8,s0,s1,s2,s4,s8".split(","))
def test_all_f90(self, name):
self.check_function(getattr(self.module.f90_return_integer, name),
name)
| 1,850 | Python | 32.053571 | 74 | 0.542162 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/f2py/tests/test_regression.py | import os
import pytest
import numpy as np
from . import util
class TestIntentInOut(util.F2PyTest):
# Check that intent(in out) translates as intent(inout)
sources = [util.getpath("tests", "src", "regression", "inout.f90")]
@pytest.mark.slow
def test_inout(self):
# non-contiguous should raise error
x = np.arange(6, dtype=np.float32)[::2]
pytest.raises(ValueError, self.module.foo, x)
# check values with contiguous array
x = np.arange(3, dtype=np.float32)
self.module.foo(x)
assert np.allclose(x, [3, 1, 2])
class TestNegativeBounds(util.F2PyTest):
# Check that negative bounds work correctly
sources = [util.getpath("tests", "src", "negative_bounds", "issue_20853.f90")]
@pytest.mark.slow
def test_negbound(self):
xvec = np.arange(12)
xlow = -6
xhigh = 4
# Calculate the upper bound,
# Keeping the 1 index in mind
def ubound(xl, xh):
return xh - xl + 1
rval = self.module.foo(is_=xlow, ie_=xhigh,
arr=xvec[:ubound(xlow, xhigh)])
expval = np.arange(11, dtype = np.float32)
assert np.allclose(rval, expval)
class TestNumpyVersionAttribute(util.F2PyTest):
# Check that th attribute __f2py_numpy_version__ is present
# in the compiled module and that has the value np.__version__.
sources = [util.getpath("tests", "src", "regression", "inout.f90")]
@pytest.mark.slow
def test_numpy_version_attribute(self):
# Check that self.module has an attribute named "__f2py_numpy_version__"
assert hasattr(self.module, "__f2py_numpy_version__")
# Check that the attribute __f2py_numpy_version__ is a string
assert isinstance(self.module.__f2py_numpy_version__, str)
# Check that __f2py_numpy_version__ has the value numpy.__version__
assert np.__version__ == self.module.__f2py_numpy_version__
def test_include_path():
incdir = np.f2py.get_include()
fnames_in_dir = os.listdir(incdir)
for fname in ("fortranobject.c", "fortranobject.h"):
assert fname in fnames_in_dir
| 2,157 | Python | 31.208955 | 82 | 0.629578 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/f2py/tests/test_parameter.py | import os
import pytest
import numpy as np
from . import util
class TestParameters(util.F2PyTest):
# Check that intent(in out) translates as intent(inout)
sources = [
util.getpath("tests", "src", "parameter", "constant_real.f90"),
util.getpath("tests", "src", "parameter", "constant_integer.f90"),
util.getpath("tests", "src", "parameter", "constant_both.f90"),
util.getpath("tests", "src", "parameter", "constant_compound.f90"),
util.getpath("tests", "src", "parameter", "constant_non_compound.f90"),
]
@pytest.mark.slow
def test_constant_real_single(self):
# non-contiguous should raise error
x = np.arange(6, dtype=np.float32)[::2]
pytest.raises(ValueError, self.module.foo_single, x)
# check values with contiguous array
x = np.arange(3, dtype=np.float32)
self.module.foo_single(x)
assert np.allclose(x, [0 + 1 + 2 * 3, 1, 2])
@pytest.mark.slow
def test_constant_real_double(self):
# non-contiguous should raise error
x = np.arange(6, dtype=np.float64)[::2]
pytest.raises(ValueError, self.module.foo_double, x)
# check values with contiguous array
x = np.arange(3, dtype=np.float64)
self.module.foo_double(x)
assert np.allclose(x, [0 + 1 + 2 * 3, 1, 2])
@pytest.mark.slow
def test_constant_compound_int(self):
# non-contiguous should raise error
x = np.arange(6, dtype=np.int32)[::2]
pytest.raises(ValueError, self.module.foo_compound_int, x)
# check values with contiguous array
x = np.arange(3, dtype=np.int32)
self.module.foo_compound_int(x)
assert np.allclose(x, [0 + 1 + 2 * 6, 1, 2])
@pytest.mark.slow
def test_constant_non_compound_int(self):
# check values
x = np.arange(4, dtype=np.int32)
self.module.foo_non_compound_int(x)
assert np.allclose(x, [0 + 1 + 2 + 3 * 4, 1, 2, 3])
@pytest.mark.slow
def test_constant_integer_int(self):
# non-contiguous should raise error
x = np.arange(6, dtype=np.int32)[::2]
pytest.raises(ValueError, self.module.foo_int, x)
# check values with contiguous array
x = np.arange(3, dtype=np.int32)
self.module.foo_int(x)
assert np.allclose(x, [0 + 1 + 2 * 3, 1, 2])
@pytest.mark.slow
def test_constant_integer_long(self):
# non-contiguous should raise error
x = np.arange(6, dtype=np.int64)[::2]
pytest.raises(ValueError, self.module.foo_long, x)
# check values with contiguous array
x = np.arange(3, dtype=np.int64)
self.module.foo_long(x)
assert np.allclose(x, [0 + 1 + 2 * 3, 1, 2])
@pytest.mark.slow
def test_constant_both(self):
# non-contiguous should raise error
x = np.arange(6, dtype=np.float64)[::2]
pytest.raises(ValueError, self.module.foo, x)
# check values with contiguous array
x = np.arange(3, dtype=np.float64)
self.module.foo(x)
assert np.allclose(x, [0 + 1 * 3 * 3 + 2 * 3 * 3, 1 * 3, 2 * 3])
@pytest.mark.slow
def test_constant_no(self):
# non-contiguous should raise error
x = np.arange(6, dtype=np.float64)[::2]
pytest.raises(ValueError, self.module.foo_no, x)
# check values with contiguous array
x = np.arange(3, dtype=np.float64)
self.module.foo_no(x)
assert np.allclose(x, [0 + 1 * 3 * 3 + 2 * 3 * 3, 1 * 3, 2 * 3])
@pytest.mark.slow
def test_constant_sum(self):
# non-contiguous should raise error
x = np.arange(6, dtype=np.float64)[::2]
pytest.raises(ValueError, self.module.foo_sum, x)
# check values with contiguous array
x = np.arange(3, dtype=np.float64)
self.module.foo_sum(x)
assert np.allclose(x, [0 + 1 * 3 * 3 + 2 * 3 * 3, 1 * 3, 2 * 3])
| 3,941 | Python | 33.884955 | 79 | 0.59325 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/f2py/tests/test_kind.py | import os
import pytest
from numpy.f2py.crackfortran import (
_selected_int_kind_func as selected_int_kind,
_selected_real_kind_func as selected_real_kind,
)
from . import util
class TestKind(util.F2PyTest):
sources = [util.getpath("tests", "src", "kind", "foo.f90")]
def test_all(self):
selectedrealkind = self.module.selectedrealkind
selectedintkind = self.module.selectedintkind
for i in range(40):
assert selectedintkind(i) == selected_int_kind(
i
), f"selectedintkind({i}): expected {selected_int_kind(i)!r} but got {selectedintkind(i)!r}"
for i in range(20):
assert selectedrealkind(i) == selected_real_kind(
i
), f"selectedrealkind({i}): expected {selected_real_kind(i)!r} but got {selectedrealkind(i)!r}"
| 847 | Python | 30.407406 | 107 | 0.62928 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/f2py/tests/test_crackfortran.py | import pytest
import numpy as np
from numpy.f2py.crackfortran import markinnerspaces
from . import util
from numpy.f2py import crackfortran
import textwrap
class TestNoSpace(util.F2PyTest):
# issue gh-15035: add handling for endsubroutine, endfunction with no space
# between "end" and the block name
sources = [util.getpath("tests", "src", "crackfortran", "gh15035.f")]
def test_module(self):
k = np.array([1, 2, 3], dtype=np.float64)
w = np.array([1, 2, 3], dtype=np.float64)
self.module.subb(k)
assert np.allclose(k, w + 1)
self.module.subc([w, k])
assert np.allclose(k, w + 1)
assert self.module.t0(23) == b"2"
class TestPublicPrivate:
def test_defaultPrivate(self):
fpath = util.getpath("tests", "src", "crackfortran", "privatemod.f90")
mod = crackfortran.crackfortran([str(fpath)])
assert len(mod) == 1
mod = mod[0]
assert "private" in mod["vars"]["a"]["attrspec"]
assert "public" not in mod["vars"]["a"]["attrspec"]
assert "private" in mod["vars"]["b"]["attrspec"]
assert "public" not in mod["vars"]["b"]["attrspec"]
assert "private" not in mod["vars"]["seta"]["attrspec"]
assert "public" in mod["vars"]["seta"]["attrspec"]
def test_defaultPublic(self, tmp_path):
fpath = util.getpath("tests", "src", "crackfortran", "publicmod.f90")
mod = crackfortran.crackfortran([str(fpath)])
assert len(mod) == 1
mod = mod[0]
assert "private" in mod["vars"]["a"]["attrspec"]
assert "public" not in mod["vars"]["a"]["attrspec"]
assert "private" not in mod["vars"]["seta"]["attrspec"]
assert "public" in mod["vars"]["seta"]["attrspec"]
def test_access_type(self, tmp_path):
fpath = util.getpath("tests", "src", "crackfortran", "accesstype.f90")
mod = crackfortran.crackfortran([str(fpath)])
assert len(mod) == 1
tt = mod[0]['vars']
assert set(tt['a']['attrspec']) == {'private', 'bind(c)'}
assert set(tt['b_']['attrspec']) == {'public', 'bind(c)'}
assert set(tt['c']['attrspec']) == {'public'}
class TestModuleProcedure():
def test_moduleOperators(self, tmp_path):
fpath = util.getpath("tests", "src", "crackfortran", "operators.f90")
mod = crackfortran.crackfortran([str(fpath)])
assert len(mod) == 1
mod = mod[0]
assert "body" in mod and len(mod["body"]) == 9
assert mod["body"][1]["name"] == "operator(.item.)"
assert "implementedby" in mod["body"][1]
assert mod["body"][1]["implementedby"] == \
["item_int", "item_real"]
assert mod["body"][2]["name"] == "operator(==)"
assert "implementedby" in mod["body"][2]
assert mod["body"][2]["implementedby"] == ["items_are_equal"]
assert mod["body"][3]["name"] == "assignment(=)"
assert "implementedby" in mod["body"][3]
assert mod["body"][3]["implementedby"] == \
["get_int", "get_real"]
class TestExternal(util.F2PyTest):
# issue gh-17859: add external attribute support
sources = [util.getpath("tests", "src", "crackfortran", "gh17859.f")]
def test_external_as_statement(self):
def incr(x):
return x + 123
r = self.module.external_as_statement(incr)
assert r == 123
def test_external_as_attribute(self):
def incr(x):
return x + 123
r = self.module.external_as_attribute(incr)
assert r == 123
class TestCrackFortran(util.F2PyTest):
# gh-2848: commented lines between parameters in subroutine parameter lists
sources = [util.getpath("tests", "src", "crackfortran", "gh2848.f90")]
def test_gh2848(self):
r = self.module.gh2848(1, 2)
assert r == (1, 2)
class TestMarkinnerspaces:
# gh-14118: markinnerspaces does not handle multiple quotations
def test_do_not_touch_normal_spaces(self):
test_list = ["a ", " a", "a b c", "'abcdefghij'"]
for i in test_list:
assert markinnerspaces(i) == i
def test_one_relevant_space(self):
assert markinnerspaces("a 'b c' \\' \\'") == "a 'b@_@c' \\' \\'"
assert markinnerspaces(r'a "b c" \" \"') == r'a "b@_@c" \" \"'
def test_ignore_inner_quotes(self):
assert markinnerspaces("a 'b c\" \" d' e") == "a 'b@_@c\"@_@\"@_@d' e"
assert markinnerspaces("a \"b c' ' d\" e") == "a \"b@_@c'@_@'@_@d\" e"
def test_multiple_relevant_spaces(self):
assert markinnerspaces("a 'b c' 'd e'") == "a 'b@_@c' 'd@_@e'"
assert markinnerspaces(r'a "b c" "d e"') == r'a "b@_@c" "d@_@e"'
class TestDimSpec(util.F2PyTest):
"""This test suite tests various expressions that are used as dimension
specifications.
There exists two usage cases where analyzing dimensions
specifications are important.
In the first case, the size of output arrays must be defined based
on the inputs to a Fortran function. Because Fortran supports
arbitrary bases for indexing, for instance, `arr(lower:upper)`,
f2py has to evaluate an expression `upper - lower + 1` where
`lower` and `upper` are arbitrary expressions of input parameters.
The evaluation is performed in C, so f2py has to translate Fortran
expressions to valid C expressions (an alternative approach is
that a developer specifies the corresponding C expressions in a
.pyf file).
In the second case, when user provides an input array with a given
size but some hidden parameters used in dimensions specifications
need to be determined based on the input array size. This is a
harder problem because f2py has to solve the inverse problem: find
a parameter `p` such that `upper(p) - lower(p) + 1` equals to the
size of input array. In the case when this equation cannot be
solved (e.g. because the input array size is wrong), raise an
error before calling the Fortran function (that otherwise would
likely crash Python process when the size of input arrays is
wrong). f2py currently supports this case only when the equation
is linear with respect to unknown parameter.
"""
suffix = ".f90"
code_template = textwrap.dedent("""
function get_arr_size_{count}(a, n) result (length)
integer, intent(in) :: n
integer, dimension({dimspec}), intent(out) :: a
integer length
length = size(a)
end function
subroutine get_inv_arr_size_{count}(a, n)
integer :: n
! the value of n is computed in f2py wrapper
!f2py intent(out) n
integer, dimension({dimspec}), intent(in) :: a
if (a({first}).gt.0) then
print*, "a=", a
endif
end subroutine
""")
linear_dimspecs = [
"n", "2*n", "2:n", "n/2", "5 - n/2", "3*n:20", "n*(n+1):n*(n+5)",
"2*n, n"
]
nonlinear_dimspecs = ["2*n:3*n*n+2*n"]
all_dimspecs = linear_dimspecs + nonlinear_dimspecs
code = ""
for count, dimspec in enumerate(all_dimspecs):
lst = [(d.split(":")[0] if ":" in d else "1") for d in dimspec.split(',')]
code += code_template.format(
count=count,
dimspec=dimspec,
first=", ".join(lst),
)
@pytest.mark.parametrize("dimspec", all_dimspecs)
def test_array_size(self, dimspec):
count = self.all_dimspecs.index(dimspec)
get_arr_size = getattr(self.module, f"get_arr_size_{count}")
for n in [1, 2, 3, 4, 5]:
sz, a = get_arr_size(n)
assert a.size == sz
@pytest.mark.parametrize("dimspec", all_dimspecs)
def test_inv_array_size(self, dimspec):
count = self.all_dimspecs.index(dimspec)
get_arr_size = getattr(self.module, f"get_arr_size_{count}")
get_inv_arr_size = getattr(self.module, f"get_inv_arr_size_{count}")
for n in [1, 2, 3, 4, 5]:
sz, a = get_arr_size(n)
if dimspec in self.nonlinear_dimspecs:
# one must specify n as input, the call we'll ensure
# that a and n are compatible:
n1 = get_inv_arr_size(a, n)
else:
# in case of linear dependence, n can be determined
# from the shape of a:
n1 = get_inv_arr_size(a)
# n1 may be different from n (for instance, when `a` size
# is a function of some `n` fraction) but it must produce
# the same sized array
sz1, _ = get_arr_size(n1)
assert sz == sz1, (n, n1, sz, sz1)
class TestModuleDeclaration:
def test_dependencies(self, tmp_path):
fpath = util.getpath("tests", "src", "crackfortran", "foo_deps.f90")
mod = crackfortran.crackfortran([str(fpath)])
assert len(mod) == 1
assert mod[0]["vars"]["abar"]["="] == "bar('abar')"
| 8,934 | Python | 37.183761 | 82 | 0.591336 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/f2py/tests/test_symbolic.py | import pytest
from numpy.f2py.symbolic import (
Expr,
Op,
ArithOp,
Language,
as_symbol,
as_number,
as_string,
as_array,
as_complex,
as_terms,
as_factors,
eliminate_quotes,
insert_quotes,
fromstring,
as_expr,
as_apply,
as_numer_denom,
as_ternary,
as_ref,
as_deref,
normalize,
as_eq,
as_ne,
as_lt,
as_gt,
as_le,
as_ge,
)
from . import util
class TestSymbolic(util.F2PyTest):
def test_eliminate_quotes(self):
def worker(s):
r, d = eliminate_quotes(s)
s1 = insert_quotes(r, d)
assert s1 == s
for kind in ["", "mykind_"]:
worker(kind + '"1234" // "ABCD"')
worker(kind + '"1234" // ' + kind + '"ABCD"')
worker(kind + "\"1234\" // 'ABCD'")
worker(kind + '"1234" // ' + kind + "'ABCD'")
worker(kind + '"1\\"2\'AB\'34"')
worker("a = " + kind + "'1\\'2\"AB\"34'")
def test_sanity(self):
x = as_symbol("x")
y = as_symbol("y")
z = as_symbol("z")
assert x.op == Op.SYMBOL
assert repr(x) == "Expr(Op.SYMBOL, 'x')"
assert x == x
assert x != y
assert hash(x) is not None
n = as_number(123)
m = as_number(456)
assert n.op == Op.INTEGER
assert repr(n) == "Expr(Op.INTEGER, (123, 4))"
assert n == n
assert n != m
assert hash(n) is not None
fn = as_number(12.3)
fm = as_number(45.6)
assert fn.op == Op.REAL
assert repr(fn) == "Expr(Op.REAL, (12.3, 4))"
assert fn == fn
assert fn != fm
assert hash(fn) is not None
c = as_complex(1, 2)
c2 = as_complex(3, 4)
assert c.op == Op.COMPLEX
assert repr(c) == ("Expr(Op.COMPLEX, (Expr(Op.INTEGER, (1, 4)),"
" Expr(Op.INTEGER, (2, 4))))")
assert c == c
assert c != c2
assert hash(c) is not None
s = as_string("'123'")
s2 = as_string('"ABC"')
assert s.op == Op.STRING
assert repr(s) == "Expr(Op.STRING, (\"'123'\", 1))", repr(s)
assert s == s
assert s != s2
a = as_array((n, m))
b = as_array((n, ))
assert a.op == Op.ARRAY
assert repr(a) == ("Expr(Op.ARRAY, (Expr(Op.INTEGER, (123, 4)),"
" Expr(Op.INTEGER, (456, 4))))")
assert a == a
assert a != b
t = as_terms(x)
u = as_terms(y)
assert t.op == Op.TERMS
assert repr(t) == "Expr(Op.TERMS, {Expr(Op.SYMBOL, 'x'): 1})"
assert t == t
assert t != u
assert hash(t) is not None
v = as_factors(x)
w = as_factors(y)
assert v.op == Op.FACTORS
assert repr(v) == "Expr(Op.FACTORS, {Expr(Op.SYMBOL, 'x'): 1})"
assert v == v
assert w != v
assert hash(v) is not None
t = as_ternary(x, y, z)
u = as_ternary(x, z, y)
assert t.op == Op.TERNARY
assert t == t
assert t != u
assert hash(t) is not None
e = as_eq(x, y)
f = as_lt(x, y)
assert e.op == Op.RELATIONAL
assert e == e
assert e != f
assert hash(e) is not None
def test_tostring_fortran(self):
x = as_symbol("x")
y = as_symbol("y")
z = as_symbol("z")
n = as_number(123)
m = as_number(456)
a = as_array((n, m))
c = as_complex(n, m)
assert str(x) == "x"
assert str(n) == "123"
assert str(a) == "[123, 456]"
assert str(c) == "(123, 456)"
assert str(Expr(Op.TERMS, {x: 1})) == "x"
assert str(Expr(Op.TERMS, {x: 2})) == "2 * x"
assert str(Expr(Op.TERMS, {x: -1})) == "-x"
assert str(Expr(Op.TERMS, {x: -2})) == "-2 * x"
assert str(Expr(Op.TERMS, {x: 1, y: 1})) == "x + y"
assert str(Expr(Op.TERMS, {x: -1, y: -1})) == "-x - y"
assert str(Expr(Op.TERMS, {x: 2, y: 3})) == "2 * x + 3 * y"
assert str(Expr(Op.TERMS, {x: -2, y: 3})) == "-2 * x + 3 * y"
assert str(Expr(Op.TERMS, {x: 2, y: -3})) == "2 * x - 3 * y"
assert str(Expr(Op.FACTORS, {x: 1})) == "x"
assert str(Expr(Op.FACTORS, {x: 2})) == "x ** 2"
assert str(Expr(Op.FACTORS, {x: -1})) == "x ** -1"
assert str(Expr(Op.FACTORS, {x: -2})) == "x ** -2"
assert str(Expr(Op.FACTORS, {x: 1, y: 1})) == "x * y"
assert str(Expr(Op.FACTORS, {x: 2, y: 3})) == "x ** 2 * y ** 3"
v = Expr(Op.FACTORS, {x: 2, Expr(Op.TERMS, {x: 1, y: 1}): 3})
assert str(v) == "x ** 2 * (x + y) ** 3", str(v)
v = Expr(Op.FACTORS, {x: 2, Expr(Op.FACTORS, {x: 1, y: 1}): 3})
assert str(v) == "x ** 2 * (x * y) ** 3", str(v)
assert str(Expr(Op.APPLY, ("f", (), {}))) == "f()"
assert str(Expr(Op.APPLY, ("f", (x, ), {}))) == "f(x)"
assert str(Expr(Op.APPLY, ("f", (x, y), {}))) == "f(x, y)"
assert str(Expr(Op.INDEXING, ("f", x))) == "f[x]"
assert str(as_ternary(x, y, z)) == "merge(y, z, x)"
assert str(as_eq(x, y)) == "x .eq. y"
assert str(as_ne(x, y)) == "x .ne. y"
assert str(as_lt(x, y)) == "x .lt. y"
assert str(as_le(x, y)) == "x .le. y"
assert str(as_gt(x, y)) == "x .gt. y"
assert str(as_ge(x, y)) == "x .ge. y"
def test_tostring_c(self):
language = Language.C
x = as_symbol("x")
y = as_symbol("y")
z = as_symbol("z")
n = as_number(123)
assert Expr(Op.FACTORS, {x: 2}).tostring(language=language) == "x * x"
assert (Expr(Op.FACTORS, {
x + y: 2
}).tostring(language=language) == "(x + y) * (x + y)")
assert Expr(Op.FACTORS, {
x: 12
}).tostring(language=language) == "pow(x, 12)"
assert as_apply(ArithOp.DIV, x,
y).tostring(language=language) == "x / y"
assert (as_apply(ArithOp.DIV, x,
x + y).tostring(language=language) == "x / (x + y)")
assert (as_apply(ArithOp.DIV, x - y, x +
y).tostring(language=language) == "(x - y) / (x + y)")
assert (x + (x - y) / (x + y) +
n).tostring(language=language) == "123 + x + (x - y) / (x + y)"
assert as_ternary(x, y, z).tostring(language=language) == "(x?y:z)"
assert as_eq(x, y).tostring(language=language) == "x == y"
assert as_ne(x, y).tostring(language=language) == "x != y"
assert as_lt(x, y).tostring(language=language) == "x < y"
assert as_le(x, y).tostring(language=language) == "x <= y"
assert as_gt(x, y).tostring(language=language) == "x > y"
assert as_ge(x, y).tostring(language=language) == "x >= y"
def test_operations(self):
x = as_symbol("x")
y = as_symbol("y")
z = as_symbol("z")
assert x + x == Expr(Op.TERMS, {x: 2})
assert x - x == Expr(Op.INTEGER, (0, 4))
assert x + y == Expr(Op.TERMS, {x: 1, y: 1})
assert x - y == Expr(Op.TERMS, {x: 1, y: -1})
assert x * x == Expr(Op.FACTORS, {x: 2})
assert x * y == Expr(Op.FACTORS, {x: 1, y: 1})
assert +x == x
assert -x == Expr(Op.TERMS, {x: -1}), repr(-x)
assert 2 * x == Expr(Op.TERMS, {x: 2})
assert 2 + x == Expr(Op.TERMS, {x: 1, as_number(1): 2})
assert 2 * x + 3 * y == Expr(Op.TERMS, {x: 2, y: 3})
assert (x + y) * 2 == Expr(Op.TERMS, {x: 2, y: 2})
assert x**2 == Expr(Op.FACTORS, {x: 2})
assert (x + y)**2 == Expr(
Op.TERMS,
{
Expr(Op.FACTORS, {x: 2}): 1,
Expr(Op.FACTORS, {y: 2}): 1,
Expr(Op.FACTORS, {
x: 1,
y: 1
}): 2,
},
)
assert (x + y) * x == x**2 + x * y
assert (x + y)**2 == x**2 + 2 * x * y + y**2
assert (x + y)**2 + (x - y)**2 == 2 * x**2 + 2 * y**2
assert (x + y) * z == x * z + y * z
assert z * (x + y) == x * z + y * z
assert (x / 2) == as_apply(ArithOp.DIV, x, as_number(2))
assert (2 * x / 2) == x
assert (3 * x / 2) == as_apply(ArithOp.DIV, 3 * x, as_number(2))
assert (4 * x / 2) == 2 * x
assert (5 * x / 2) == as_apply(ArithOp.DIV, 5 * x, as_number(2))
assert (6 * x / 2) == 3 * x
assert ((3 * 5) * x / 6) == as_apply(ArithOp.DIV, 5 * x, as_number(2))
assert (30 * x**2 * y**4 / (24 * x**3 * y**3)) == as_apply(
ArithOp.DIV, 5 * y, 4 * x)
assert ((15 * x / 6) / 5) == as_apply(ArithOp.DIV, x,
as_number(2)), (15 * x / 6) / 5
assert (x / (5 / x)) == as_apply(ArithOp.DIV, x**2, as_number(5))
assert (x / 2.0) == Expr(Op.TERMS, {x: 0.5})
s = as_string('"ABC"')
t = as_string('"123"')
assert s // t == Expr(Op.STRING, ('"ABC123"', 1))
assert s // x == Expr(Op.CONCAT, (s, x))
assert x // s == Expr(Op.CONCAT, (x, s))
c = as_complex(1.0, 2.0)
assert -c == as_complex(-1.0, -2.0)
assert c + c == as_expr((1 + 2j) * 2)
assert c * c == as_expr((1 + 2j)**2)
def test_substitute(self):
x = as_symbol("x")
y = as_symbol("y")
z = as_symbol("z")
a = as_array((x, y))
assert x.substitute({x: y}) == y
assert (x + y).substitute({x: z}) == y + z
assert (x * y).substitute({x: z}) == y * z
assert (x**4).substitute({x: z}) == z**4
assert (x / y).substitute({x: z}) == z / y
assert x.substitute({x: y + z}) == y + z
assert a.substitute({x: y + z}) == as_array((y + z, y))
assert as_ternary(x, y,
z).substitute({x: y + z}) == as_ternary(y + z, y, z)
assert as_eq(x, y).substitute({x: y + z}) == as_eq(y + z, y)
def test_fromstring(self):
x = as_symbol("x")
y = as_symbol("y")
z = as_symbol("z")
f = as_symbol("f")
s = as_string('"ABC"')
t = as_string('"123"')
a = as_array((x, y))
assert fromstring("x") == x
assert fromstring("+ x") == x
assert fromstring("- x") == -x
assert fromstring("x + y") == x + y
assert fromstring("x + 1") == x + 1
assert fromstring("x * y") == x * y
assert fromstring("x * 2") == x * 2
assert fromstring("x / y") == x / y
assert fromstring("x ** 2", language=Language.Python) == x**2
assert fromstring("x ** 2 ** 3", language=Language.Python) == x**2**3
assert fromstring("(x + y) * z") == (x + y) * z
assert fromstring("f(x)") == f(x)
assert fromstring("f(x,y)") == f(x, y)
assert fromstring("f[x]") == f[x]
assert fromstring("f[x][y]") == f[x][y]
assert fromstring('"ABC"') == s
assert (normalize(
fromstring('"ABC" // "123" ',
language=Language.Fortran)) == s // t)
assert fromstring('f("ABC")') == f(s)
assert fromstring('MYSTRKIND_"ABC"') == as_string('"ABC"', "MYSTRKIND")
assert fromstring("(/x, y/)") == a, fromstring("(/x, y/)")
assert fromstring("f((/x, y/))") == f(a)
assert fromstring("(/(x+y)*z/)") == as_array(((x + y) * z, ))
assert fromstring("123") == as_number(123)
assert fromstring("123_2") == as_number(123, 2)
assert fromstring("123_myintkind") == as_number(123, "myintkind")
assert fromstring("123.0") == as_number(123.0, 4)
assert fromstring("123.0_4") == as_number(123.0, 4)
assert fromstring("123.0_8") == as_number(123.0, 8)
assert fromstring("123.0e0") == as_number(123.0, 4)
assert fromstring("123.0d0") == as_number(123.0, 8)
assert fromstring("123d0") == as_number(123.0, 8)
assert fromstring("123e-0") == as_number(123.0, 4)
assert fromstring("123d+0") == as_number(123.0, 8)
assert fromstring("123.0_myrealkind") == as_number(123.0, "myrealkind")
assert fromstring("3E4") == as_number(30000.0, 4)
assert fromstring("(1, 2)") == as_complex(1, 2)
assert fromstring("(1e2, PI)") == as_complex(as_number(100.0),
as_symbol("PI"))
assert fromstring("[1, 2]") == as_array((as_number(1), as_number(2)))
assert fromstring("POINT(x, y=1)") == as_apply(as_symbol("POINT"),
x,
y=as_number(1))
assert fromstring(
'PERSON(name="John", age=50, shape=(/34, 23/))') == as_apply(
as_symbol("PERSON"),
name=as_string('"John"'),
age=as_number(50),
shape=as_array((as_number(34), as_number(23))),
)
assert fromstring("x?y:z") == as_ternary(x, y, z)
assert fromstring("*x") == as_deref(x)
assert fromstring("**x") == as_deref(as_deref(x))
assert fromstring("&x") == as_ref(x)
assert fromstring("(*x) * (*y)") == as_deref(x) * as_deref(y)
assert fromstring("(*x) * *y") == as_deref(x) * as_deref(y)
assert fromstring("*x * *y") == as_deref(x) * as_deref(y)
assert fromstring("*x**y") == as_deref(x) * as_deref(y)
assert fromstring("x == y") == as_eq(x, y)
assert fromstring("x != y") == as_ne(x, y)
assert fromstring("x < y") == as_lt(x, y)
assert fromstring("x > y") == as_gt(x, y)
assert fromstring("x <= y") == as_le(x, y)
assert fromstring("x >= y") == as_ge(x, y)
assert fromstring("x .eq. y", language=Language.Fortran) == as_eq(x, y)
assert fromstring("x .ne. y", language=Language.Fortran) == as_ne(x, y)
assert fromstring("x .lt. y", language=Language.Fortran) == as_lt(x, y)
assert fromstring("x .gt. y", language=Language.Fortran) == as_gt(x, y)
assert fromstring("x .le. y", language=Language.Fortran) == as_le(x, y)
assert fromstring("x .ge. y", language=Language.Fortran) == as_ge(x, y)
def test_traverse(self):
x = as_symbol("x")
y = as_symbol("y")
z = as_symbol("z")
f = as_symbol("f")
# Use traverse to substitute a symbol
def replace_visit(s, r=z):
if s == x:
return r
assert x.traverse(replace_visit) == z
assert y.traverse(replace_visit) == y
assert z.traverse(replace_visit) == z
assert (f(y)).traverse(replace_visit) == f(y)
assert (f(x)).traverse(replace_visit) == f(z)
assert (f[y]).traverse(replace_visit) == f[y]
assert (f[z]).traverse(replace_visit) == f[z]
assert (x + y + z).traverse(replace_visit) == (2 * z + y)
assert (x +
f(y, x - z)).traverse(replace_visit) == (z +
f(y, as_number(0)))
assert as_eq(x, y).traverse(replace_visit) == as_eq(z, y)
# Use traverse to collect symbols, method 1
function_symbols = set()
symbols = set()
def collect_symbols(s):
if s.op is Op.APPLY:
oper = s.data[0]
function_symbols.add(oper)
if oper in symbols:
symbols.remove(oper)
elif s.op is Op.SYMBOL and s not in function_symbols:
symbols.add(s)
(x + f(y, x - z)).traverse(collect_symbols)
assert function_symbols == {f}
assert symbols == {x, y, z}
# Use traverse to collect symbols, method 2
def collect_symbols2(expr, symbols):
if expr.op is Op.SYMBOL:
symbols.add(expr)
symbols = set()
(x + f(y, x - z)).traverse(collect_symbols2, symbols)
assert symbols == {x, y, z, f}
# Use traverse to partially collect symbols
def collect_symbols3(expr, symbols):
if expr.op is Op.APPLY:
# skip traversing function calls
return expr
if expr.op is Op.SYMBOL:
symbols.add(expr)
symbols = set()
(x + f(y, x - z)).traverse(collect_symbols3, symbols)
assert symbols == {x}
def test_linear_solve(self):
x = as_symbol("x")
y = as_symbol("y")
z = as_symbol("z")
assert x.linear_solve(x) == (as_number(1), as_number(0))
assert (x + 1).linear_solve(x) == (as_number(1), as_number(1))
assert (2 * x).linear_solve(x) == (as_number(2), as_number(0))
assert (2 * x + 3).linear_solve(x) == (as_number(2), as_number(3))
assert as_number(3).linear_solve(x) == (as_number(0), as_number(3))
assert y.linear_solve(x) == (as_number(0), y)
assert (y * z).linear_solve(x) == (as_number(0), y * z)
assert (x + y).linear_solve(x) == (as_number(1), y)
assert (z * x + y).linear_solve(x) == (z, y)
assert ((z + y) * x + y).linear_solve(x) == (z + y, y)
assert (z * y * x + y).linear_solve(x) == (z * y, y)
pytest.raises(RuntimeError, lambda: (x * x).linear_solve(x))
def test_as_numer_denom(self):
x = as_symbol("x")
y = as_symbol("y")
n = as_number(123)
assert as_numer_denom(x) == (x, as_number(1))
assert as_numer_denom(x / n) == (x, n)
assert as_numer_denom(n / x) == (n, x)
assert as_numer_denom(x / y) == (x, y)
assert as_numer_denom(x * y) == (x * y, as_number(1))
assert as_numer_denom(n + x / y) == (x + n * y, y)
assert as_numer_denom(n + x / (y - x / n)) == (y * n**2, y * n - x)
def test_polynomial_atoms(self):
x = as_symbol("x")
y = as_symbol("y")
n = as_number(123)
assert x.polynomial_atoms() == {x}
assert n.polynomial_atoms() == set()
assert (y[x]).polynomial_atoms() == {y[x]}
assert (y(x)).polynomial_atoms() == {y(x)}
assert (y(x) + x).polynomial_atoms() == {y(x), x}
assert (y(x) * x[y]).polynomial_atoms() == {y(x), x[y]}
assert (y(x)**x).polynomial_atoms() == {y(x)}
| 18,341 | Python | 36.054545 | 79 | 0.464969 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/f2py/tests/test_quoted_character.py | """See https://github.com/numpy/numpy/pull/10676.
"""
import sys
import pytest
from . import util
class TestQuotedCharacter(util.F2PyTest):
sources = [util.getpath("tests", "src", "quoted_character", "foo.f")]
@pytest.mark.skipif(sys.platform == "win32",
reason="Fails with MinGW64 Gfortran (Issue #9673)")
def test_quoted_character(self):
assert self.module.foo() == (b"'", b'"', b";", b"!", b"(", b")")
| 454 | Python | 25.764704 | 75 | 0.603524 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/f2py/tests/test_return_character.py | import pytest
from numpy import array
from . import util
import platform
IS_S390X = platform.machine() == "s390x"
class TestReturnCharacter(util.F2PyTest):
def check_function(self, t, tname):
if tname in ["t0", "t1", "s0", "s1"]:
assert t(23) == b"2"
r = t("ab")
assert r == b"a"
r = t(array("ab"))
assert r == b"a"
r = t(array(77, "u1"))
assert r == b"M"
elif tname in ["ts", "ss"]:
assert t(23) == b"23"
assert t("123456789abcdef") == b"123456789a"
elif tname in ["t5", "s5"]:
assert t(23) == b"23"
assert t("ab") == b"ab"
assert t("123456789abcdef") == b"12345"
else:
raise NotImplementedError
class TestFReturnCharacter(TestReturnCharacter):
sources = [
util.getpath("tests", "src", "return_character", "foo77.f"),
util.getpath("tests", "src", "return_character", "foo90.f90"),
]
@pytest.mark.xfail(IS_S390X, reason="callback returns ' '")
@pytest.mark.parametrize("name", "t0,t1,t5,s0,s1,s5,ss".split(","))
def test_all_f77(self, name):
self.check_function(getattr(self.module, name), name)
@pytest.mark.xfail(IS_S390X, reason="callback returns ' '")
@pytest.mark.parametrize("name", "t0,t1,t5,ts,s0,s1,s5,ss".split(","))
def test_all_f90(self, name):
self.check_function(getattr(self.module.f90_return_char, name), name)
| 1,491 | Python | 31.434782 | 77 | 0.557344 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/f2py/tests/test_f2py2e.py | import textwrap, re, sys, subprocess, shlex
from pathlib import Path
from collections import namedtuple
import pytest
from . import util
from numpy.f2py.f2py2e import main as f2pycli
#########################
# CLI utils and classes #
#########################
PPaths = namedtuple("PPaths", "finp, f90inp, pyf, wrap77, wrap90, cmodf")
def get_io_paths(fname_inp, mname="untitled"):
"""Takes in a temporary file for testing and returns the expected output and input paths
Here expected output is essentially one of any of the possible generated
files.
..note::
Since this does not actually run f2py, none of these are guaranteed to
exist, and module names are typically incorrect
Parameters
----------
fname_inp : str
The input filename
mname : str, optional
The name of the module, untitled by default
Returns
-------
genp : NamedTuple PPaths
The possible paths which are generated, not all of which exist
"""
bpath = Path(fname_inp)
return PPaths(
finp=bpath.with_suffix(".f"),
f90inp=bpath.with_suffix(".f90"),
pyf=bpath.with_suffix(".pyf"),
wrap77=bpath.with_name(f"{mname}-f2pywrappers.f"),
wrap90=bpath.with_name(f"{mname}-f2pywrappers2.f90"),
cmodf=bpath.with_name(f"{mname}module.c"),
)
##############
# CLI Fixtures and Tests #
#############
@pytest.fixture(scope="session")
def hello_world_f90(tmpdir_factory):
"""Generates a single f90 file for testing"""
fdat = util.getpath("tests", "src", "cli", "hiworld.f90").read_text()
fn = tmpdir_factory.getbasetemp() / "hello.f90"
fn.write_text(fdat, encoding="ascii")
return fn
@pytest.fixture(scope="session")
def hello_world_f77(tmpdir_factory):
"""Generates a single f77 file for testing"""
fdat = util.getpath("tests", "src", "cli", "hi77.f").read_text()
fn = tmpdir_factory.getbasetemp() / "hello.f"
fn.write_text(fdat, encoding="ascii")
return fn
@pytest.fixture(scope="session")
def retreal_f77(tmpdir_factory):
"""Generates a single f77 file for testing"""
fdat = util.getpath("tests", "src", "return_real", "foo77.f").read_text()
fn = tmpdir_factory.getbasetemp() / "foo.f"
fn.write_text(fdat, encoding="ascii")
return fn
def test_gen_pyf(capfd, hello_world_f90, monkeypatch):
"""Ensures that a signature file is generated via the CLI
CLI :: -h
"""
ipath = Path(hello_world_f90)
opath = Path(hello_world_f90).stem + ".pyf"
monkeypatch.setattr(sys, "argv", f'f2py -h {opath} {ipath}'.split())
with util.switchdir(ipath.parent):
f2pycli() # Generate wrappers
out, _ = capfd.readouterr()
assert "Saving signatures to file" in out
assert Path(f'{opath}').exists()
def test_gen_pyf_stdout(capfd, hello_world_f90, monkeypatch):
"""Ensures that a signature file can be dumped to stdout
CLI :: -h
"""
ipath = Path(hello_world_f90)
monkeypatch.setattr(sys, "argv", f'f2py -h stdout {ipath}'.split())
with util.switchdir(ipath.parent):
f2pycli()
out, _ = capfd.readouterr()
assert "Saving signatures to file" in out
def test_gen_pyf_no_overwrite(capfd, hello_world_f90, monkeypatch):
"""Ensures that the CLI refuses to overwrite signature files
CLI :: -h without --overwrite-signature
"""
ipath = Path(hello_world_f90)
monkeypatch.setattr(sys, "argv", f'f2py -h faker.pyf {ipath}'.split())
with util.switchdir(ipath.parent):
Path("faker.pyf").write_text("Fake news", encoding="ascii")
with pytest.raises(SystemExit):
f2pycli() # Refuse to overwrite
_, err = capfd.readouterr()
assert "Use --overwrite-signature to overwrite" in err
@pytest.mark.xfail
def test_f2py_skip(capfd, retreal_f77, monkeypatch):
"""Tests that functions can be skipped
CLI :: skip:
"""
foutl = get_io_paths(retreal_f77, mname="test")
ipath = foutl.finp
toskip = "t0 t4 t8 sd s8 s4"
remaining = "td s0"
monkeypatch.setattr(
sys, "argv",
f'f2py {ipath} -m test skip: {toskip}'.split())
with util.switchdir(ipath.parent):
f2pycli()
out, err = capfd.readouterr()
for skey in toskip.split():
assert (
f'buildmodule: Could not found the body of interfaced routine "{skey}". Skipping.'
in err)
for rkey in remaining.split():
assert f'Constructing wrapper function "{rkey}"' in out
def test_f2py_only(capfd, retreal_f77, monkeypatch):
"""Test that functions can be kept by only:
CLI :: only:
"""
foutl = get_io_paths(retreal_f77, mname="test")
ipath = foutl.finp
toskip = "t0 t4 t8 sd s8 s4"
tokeep = "td s0"
monkeypatch.setattr(
sys, "argv",
f'f2py {ipath} -m test only: {tokeep}'.split())
with util.switchdir(ipath.parent):
f2pycli()
out, err = capfd.readouterr()
for skey in toskip.split():
assert (
f'buildmodule: Could not find the body of interfaced routine "{skey}". Skipping.'
in err)
for rkey in tokeep.split():
assert f'Constructing wrapper function "{rkey}"' in out
def test_file_processing_switch(capfd, hello_world_f90, retreal_f77,
monkeypatch):
"""Tests that it is possible to return to file processing mode
CLI :: :
BUG: numpy-gh #20520
"""
foutl = get_io_paths(retreal_f77, mname="test")
ipath = foutl.finp
toskip = "t0 t4 t8 sd s8 s4"
ipath2 = Path(hello_world_f90)
tokeep = "td s0 hi" # hi is in ipath2
mname = "blah"
monkeypatch.setattr(
sys,
"argv",
f'f2py {ipath} -m {mname} only: {tokeep} : {ipath2}'.split(
),
)
with util.switchdir(ipath.parent):
f2pycli()
out, err = capfd.readouterr()
for skey in toskip.split():
assert (
f'buildmodule: Could not find the body of interfaced routine "{skey}". Skipping.'
in err)
for rkey in tokeep.split():
assert f'Constructing wrapper function "{rkey}"' in out
def test_mod_gen_f77(capfd, hello_world_f90, monkeypatch):
"""Checks the generation of files based on a module name
CLI :: -m
"""
MNAME = "hi"
foutl = get_io_paths(hello_world_f90, mname=MNAME)
ipath = foutl.f90inp
monkeypatch.setattr(sys, "argv", f'f2py {ipath} -m {MNAME}'.split())
with util.switchdir(ipath.parent):
f2pycli()
# Always generate C module
assert Path.exists(foutl.cmodf)
# File contains a function, check for F77 wrappers
assert Path.exists(foutl.wrap77)
def test_lower_cmod(capfd, hello_world_f77, monkeypatch):
"""Lowers cases by flag or when -h is present
CLI :: --[no-]lower
"""
foutl = get_io_paths(hello_world_f77, mname="test")
ipath = foutl.finp
capshi = re.compile(r"HI\(\)")
capslo = re.compile(r"hi\(\)")
# Case I: --lower is passed
monkeypatch.setattr(sys, "argv", f'f2py {ipath} -m test --lower'.split())
with util.switchdir(ipath.parent):
f2pycli()
out, _ = capfd.readouterr()
assert capslo.search(out) is not None
assert capshi.search(out) is None
# Case II: --no-lower is passed
monkeypatch.setattr(sys, "argv",
f'f2py {ipath} -m test --no-lower'.split())
with util.switchdir(ipath.parent):
f2pycli()
out, _ = capfd.readouterr()
assert capslo.search(out) is None
assert capshi.search(out) is not None
def test_lower_sig(capfd, hello_world_f77, monkeypatch):
"""Lowers cases in signature files by flag or when -h is present
CLI :: --[no-]lower -h
"""
foutl = get_io_paths(hello_world_f77, mname="test")
ipath = foutl.finp
# Signature files
capshi = re.compile(r"Block: HI")
capslo = re.compile(r"Block: hi")
# Case I: --lower is implied by -h
# TODO: Clean up to prevent passing --overwrite-signature
monkeypatch.setattr(
sys,
"argv",
f'f2py {ipath} -h {foutl.pyf} -m test --overwrite-signature'.split(),
)
with util.switchdir(ipath.parent):
f2pycli()
out, _ = capfd.readouterr()
assert capslo.search(out) is not None
assert capshi.search(out) is None
# Case II: --no-lower overrides -h
monkeypatch.setattr(
sys,
"argv",
f'f2py {ipath} -h {foutl.pyf} -m test --overwrite-signature --no-lower'
.split(),
)
with util.switchdir(ipath.parent):
f2pycli()
out, _ = capfd.readouterr()
assert capslo.search(out) is None
assert capshi.search(out) is not None
def test_build_dir(capfd, hello_world_f90, monkeypatch):
"""Ensures that the build directory can be specified
CLI :: --build-dir
"""
ipath = Path(hello_world_f90)
mname = "blah"
odir = "tttmp"
monkeypatch.setattr(sys, "argv",
f'f2py -m {mname} {ipath} --build-dir {odir}'.split())
with util.switchdir(ipath.parent):
f2pycli()
out, _ = capfd.readouterr()
assert f"Wrote C/API module \"{mname}\"" in out
def test_overwrite(capfd, hello_world_f90, monkeypatch):
"""Ensures that the build directory can be specified
CLI :: --overwrite-signature
"""
ipath = Path(hello_world_f90)
monkeypatch.setattr(
sys, "argv",
f'f2py -h faker.pyf {ipath} --overwrite-signature'.split())
with util.switchdir(ipath.parent):
Path("faker.pyf").write_text("Fake news", encoding="ascii")
f2pycli()
out, _ = capfd.readouterr()
assert "Saving signatures to file" in out
def test_latexdoc(capfd, hello_world_f90, monkeypatch):
"""Ensures that TeX documentation is written out
CLI :: --latex-doc
"""
ipath = Path(hello_world_f90)
mname = "blah"
monkeypatch.setattr(sys, "argv",
f'f2py -m {mname} {ipath} --latex-doc'.split())
with util.switchdir(ipath.parent):
f2pycli()
out, _ = capfd.readouterr()
assert "Documentation is saved to file" in out
with Path(f"{mname}module.tex").open() as otex:
assert "\\documentclass" in otex.read()
def test_nolatexdoc(capfd, hello_world_f90, monkeypatch):
"""Ensures that TeX documentation is written out
CLI :: --no-latex-doc
"""
ipath = Path(hello_world_f90)
mname = "blah"
monkeypatch.setattr(sys, "argv",
f'f2py -m {mname} {ipath} --no-latex-doc'.split())
with util.switchdir(ipath.parent):
f2pycli()
out, _ = capfd.readouterr()
assert "Documentation is saved to file" not in out
def test_shortlatex(capfd, hello_world_f90, monkeypatch):
"""Ensures that truncated documentation is written out
TODO: Test to ensure this has no effect without --latex-doc
CLI :: --latex-doc --short-latex
"""
ipath = Path(hello_world_f90)
mname = "blah"
monkeypatch.setattr(
sys,
"argv",
f'f2py -m {mname} {ipath} --latex-doc --short-latex'.split(),
)
with util.switchdir(ipath.parent):
f2pycli()
out, _ = capfd.readouterr()
assert "Documentation is saved to file" in out
with Path(f"./{mname}module.tex").open() as otex:
assert "\\documentclass" not in otex.read()
def test_restdoc(capfd, hello_world_f90, monkeypatch):
"""Ensures that RsT documentation is written out
CLI :: --rest-doc
"""
ipath = Path(hello_world_f90)
mname = "blah"
monkeypatch.setattr(sys, "argv",
f'f2py -m {mname} {ipath} --rest-doc'.split())
with util.switchdir(ipath.parent):
f2pycli()
out, _ = capfd.readouterr()
assert "ReST Documentation is saved to file" in out
with Path(f"./{mname}module.rest").open() as orst:
assert r".. -*- rest -*-" in orst.read()
def test_norestexdoc(capfd, hello_world_f90, monkeypatch):
"""Ensures that TeX documentation is written out
CLI :: --no-rest-doc
"""
ipath = Path(hello_world_f90)
mname = "blah"
monkeypatch.setattr(sys, "argv",
f'f2py -m {mname} {ipath} --no-rest-doc'.split())
with util.switchdir(ipath.parent):
f2pycli()
out, _ = capfd.readouterr()
assert "ReST Documentation is saved to file" not in out
def test_debugcapi(capfd, hello_world_f90, monkeypatch):
"""Ensures that debugging wrappers are written
CLI :: --debug-capi
"""
ipath = Path(hello_world_f90)
mname = "blah"
monkeypatch.setattr(sys, "argv",
f'f2py -m {mname} {ipath} --debug-capi'.split())
with util.switchdir(ipath.parent):
f2pycli()
with Path(f"./{mname}module.c").open() as ocmod:
assert r"#define DEBUGCFUNCS" in ocmod.read()
@pytest.mark.xfail(reason="Consistently fails on CI.")
def test_debugcapi_bld(hello_world_f90, monkeypatch):
"""Ensures that debugging wrappers work
CLI :: --debug-capi -c
"""
ipath = Path(hello_world_f90)
mname = "blah"
monkeypatch.setattr(sys, "argv",
f'f2py -m {mname} {ipath} -c --debug-capi'.split())
with util.switchdir(ipath.parent):
f2pycli()
cmd_run = shlex.split("python3 -c \"import blah; blah.hi()\"")
rout = subprocess.run(cmd_run, capture_output=True, encoding='UTF-8')
eout = ' Hello World\n'
eerr = textwrap.dedent("""\
debug-capi:Python C/API function blah.hi()
debug-capi:float hi=:output,hidden,scalar
debug-capi:hi=0
debug-capi:Fortran subroutine `f2pywraphi(&hi)'
debug-capi:hi=0
debug-capi:Building return value.
debug-capi:Python C/API function blah.hi: successful.
debug-capi:Freeing memory.
""")
assert rout.stdout == eout
assert rout.stderr == eerr
def test_wrapfunc_def(capfd, hello_world_f90, monkeypatch):
"""Ensures that fortran subroutine wrappers for F77 are included by default
CLI :: --[no]-wrap-functions
"""
# Implied
ipath = Path(hello_world_f90)
mname = "blah"
monkeypatch.setattr(sys, "argv", f'f2py -m {mname} {ipath}'.split())
with util.switchdir(ipath.parent):
f2pycli()
out, _ = capfd.readouterr()
assert r"Fortran 77 wrappers are saved to" in out
# Explicit
monkeypatch.setattr(sys, "argv",
f'f2py -m {mname} {ipath} --wrap-functions'.split())
with util.switchdir(ipath.parent):
f2pycli()
out, _ = capfd.readouterr()
assert r"Fortran 77 wrappers are saved to" in out
def test_nowrapfunc(capfd, hello_world_f90, monkeypatch):
"""Ensures that fortran subroutine wrappers for F77 can be disabled
CLI :: --no-wrap-functions
"""
ipath = Path(hello_world_f90)
mname = "blah"
monkeypatch.setattr(sys, "argv",
f'f2py -m {mname} {ipath} --no-wrap-functions'.split())
with util.switchdir(ipath.parent):
f2pycli()
out, _ = capfd.readouterr()
assert r"Fortran 77 wrappers are saved to" not in out
def test_inclheader(capfd, hello_world_f90, monkeypatch):
"""Add to the include directories
CLI :: -include
TODO: Document this in the help string
"""
ipath = Path(hello_world_f90)
mname = "blah"
monkeypatch.setattr(
sys,
"argv",
f'f2py -m {mname} {ipath} -include<stdbool.h> -include<stdio.h> '.
split(),
)
with util.switchdir(ipath.parent):
f2pycli()
with Path(f"./{mname}module.c").open() as ocmod:
ocmr = ocmod.read()
assert "#include <stdbool.h>" in ocmr
assert "#include <stdio.h>" in ocmr
def test_inclpath():
"""Add to the include directories
CLI :: --include-paths
"""
# TODO: populate
pass
def test_hlink():
"""Add to the include directories
CLI :: --help-link
"""
# TODO: populate
pass
def test_f2cmap():
"""Check that Fortran-to-Python KIND specs can be passed
CLI :: --f2cmap
"""
# TODO: populate
pass
def test_quiet(capfd, hello_world_f90, monkeypatch):
"""Reduce verbosity
CLI :: --quiet
"""
ipath = Path(hello_world_f90)
monkeypatch.setattr(sys, "argv", f'f2py -m blah {ipath} --quiet'.split())
with util.switchdir(ipath.parent):
f2pycli()
out, _ = capfd.readouterr()
assert len(out) == 0
def test_verbose(capfd, hello_world_f90, monkeypatch):
"""Increase verbosity
CLI :: --verbose
"""
ipath = Path(hello_world_f90)
monkeypatch.setattr(sys, "argv", f'f2py -m blah {ipath} --verbose'.split())
with util.switchdir(ipath.parent):
f2pycli()
out, _ = capfd.readouterr()
assert "analyzeline" in out
def test_version(capfd, monkeypatch):
"""Ensure version
CLI :: -v
"""
monkeypatch.setattr(sys, "argv", 'f2py -v'.split())
# TODO: f2py2e should not call sys.exit() after printing the version
with pytest.raises(SystemExit):
f2pycli()
out, _ = capfd.readouterr()
import numpy as np
assert np.__version__ == out.strip()
@pytest.mark.xfail(reason="Consistently fails on CI.")
def test_npdistop(hello_world_f90, monkeypatch):
"""
CLI :: -c
"""
ipath = Path(hello_world_f90)
monkeypatch.setattr(sys, "argv", f'f2py -m blah {ipath} -c'.split())
with util.switchdir(ipath.parent):
f2pycli()
cmd_run = shlex.split("python -c \"import blah; blah.hi()\"")
rout = subprocess.run(cmd_run, capture_output=True, encoding='UTF-8')
eout = ' Hello World\n'
assert rout.stdout == eout
# Numpy distutils flags
# TODO: These should be tested separately
def test_npd_fcompiler():
"""
CLI :: -c --fcompiler
"""
# TODO: populate
pass
def test_npd_compiler():
"""
CLI :: -c --compiler
"""
# TODO: populate
pass
def test_npd_help_fcompiler():
"""
CLI :: -c --help-fcompiler
"""
# TODO: populate
pass
def test_npd_f77exec():
"""
CLI :: -c --f77exec
"""
# TODO: populate
pass
def test_npd_f90exec():
"""
CLI :: -c --f90exec
"""
# TODO: populate
pass
def test_npd_f77flags():
"""
CLI :: -c --f77flags
"""
# TODO: populate
pass
def test_npd_f90flags():
"""
CLI :: -c --f90flags
"""
# TODO: populate
pass
def test_npd_opt():
"""
CLI :: -c --opt
"""
# TODO: populate
pass
def test_npd_arch():
"""
CLI :: -c --arch
"""
# TODO: populate
pass
def test_npd_noopt():
"""
CLI :: -c --noopt
"""
# TODO: populate
pass
def test_npd_noarch():
"""
CLI :: -c --noarch
"""
# TODO: populate
pass
def test_npd_debug():
"""
CLI :: -c --debug
"""
# TODO: populate
pass
def test_npd_link_auto():
"""
CLI :: -c --link-<resource>
"""
# TODO: populate
pass
def test_npd_lib():
"""
CLI :: -c -L/path/to/lib/ -l<libname>
"""
# TODO: populate
pass
def test_npd_define():
"""
CLI :: -D<define>
"""
# TODO: populate
pass
def test_npd_undefine():
"""
CLI :: -U<name>
"""
# TODO: populate
pass
def test_npd_incl():
"""
CLI :: -I/path/to/include/
"""
# TODO: populate
pass
def test_npd_linker():
"""
CLI :: <filename>.o <filename>.so <filename>.a
"""
# TODO: populate
pass
| 19,766 | Python | 25.391188 | 98 | 0.591622 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/f2py/tests/test_return_logical.py | import pytest
from numpy import array
from . import util
class TestReturnLogical(util.F2PyTest):
def check_function(self, t):
assert t(True) == 1
assert t(False) == 0
assert t(0) == 0
assert t(None) == 0
assert t(0.0) == 0
assert t(0j) == 0
assert t(1j) == 1
assert t(234) == 1
assert t(234.6) == 1
assert t(234.6 + 3j) == 1
assert t("234") == 1
assert t("aaa") == 1
assert t("") == 0
assert t([]) == 0
assert t(()) == 0
assert t({}) == 0
assert t(t) == 1
assert t(-234) == 1
assert t(10**100) == 1
assert t([234]) == 1
assert t((234, )) == 1
assert t(array(234)) == 1
assert t(array([234])) == 1
assert t(array([[234]])) == 1
assert t(array([234], "b")) == 1
assert t(array([234], "h")) == 1
assert t(array([234], "i")) == 1
assert t(array([234], "l")) == 1
assert t(array([234], "f")) == 1
assert t(array([234], "d")) == 1
assert t(array([234 + 3j], "F")) == 1
assert t(array([234], "D")) == 1
assert t(array(0)) == 0
assert t(array([0])) == 0
assert t(array([[0]])) == 0
assert t(array([0j])) == 0
assert t(array([1])) == 1
pytest.raises(ValueError, t, array([0, 0]))
class TestFReturnLogical(TestReturnLogical):
sources = [
util.getpath("tests", "src", "return_logical", "foo77.f"),
util.getpath("tests", "src", "return_logical", "foo90.f90"),
]
@pytest.mark.slow
@pytest.mark.parametrize("name", "t0,t1,t2,t4,s0,s1,s2,s4".split(","))
def test_all_f77(self, name):
self.check_function(getattr(self.module, name))
@pytest.mark.slow
@pytest.mark.parametrize("name",
"t0,t1,t2,t4,t8,s0,s1,s2,s4,s8".split(","))
def test_all_f90(self, name):
self.check_function(getattr(self.module.f90_return_logical, name))
| 2,017 | Python | 30.046153 | 74 | 0.492315 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/f2py/tests/test_array_from_pyobj.py | import os
import sys
import copy
import platform
import pytest
import numpy as np
from numpy.core.multiarray import typeinfo
from . import util
wrap = None
def setup_module():
"""
Build the required testing extension module
"""
global wrap
# Check compiler availability first
if not util.has_c_compiler():
pytest.skip("No C compiler available")
if wrap is None:
config_code = """
config.add_extension('test_array_from_pyobj_ext',
sources=['wrapmodule.c', 'fortranobject.c'],
define_macros=[])
"""
d = os.path.dirname(__file__)
src = [
util.getpath("tests", "src", "array_from_pyobj", "wrapmodule.c"),
util.getpath("src", "fortranobject.c"),
util.getpath("src", "fortranobject.h"),
]
wrap = util.build_module_distutils(src, config_code,
"test_array_from_pyobj_ext")
def flags_info(arr):
flags = wrap.array_attrs(arr)[6]
return flags2names(flags)
def flags2names(flags):
info = []
for flagname in [
"CONTIGUOUS",
"FORTRAN",
"OWNDATA",
"ENSURECOPY",
"ENSUREARRAY",
"ALIGNED",
"NOTSWAPPED",
"WRITEABLE",
"WRITEBACKIFCOPY",
"BEHAVED",
"BEHAVED_RO",
"CARRAY",
"FARRAY",
]:
if abs(flags) & getattr(wrap, flagname, 0):
info.append(flagname)
return info
class Intent:
def __init__(self, intent_list=[]):
self.intent_list = intent_list[:]
flags = 0
for i in intent_list:
if i == "optional":
flags |= wrap.F2PY_OPTIONAL
else:
flags |= getattr(wrap, "F2PY_INTENT_" + i.upper())
self.flags = flags
def __getattr__(self, name):
name = name.lower()
if name == "in_":
name = "in"
return self.__class__(self.intent_list + [name])
def __str__(self):
return "intent(%s)" % (",".join(self.intent_list))
def __repr__(self):
return "Intent(%r)" % (self.intent_list)
def is_intent(self, *names):
for name in names:
if name not in self.intent_list:
return False
return True
def is_intent_exact(self, *names):
return len(self.intent_list) == len(names) and self.is_intent(*names)
intent = Intent()
_type_names = [
"BOOL",
"BYTE",
"UBYTE",
"SHORT",
"USHORT",
"INT",
"UINT",
"LONG",
"ULONG",
"LONGLONG",
"ULONGLONG",
"FLOAT",
"DOUBLE",
"CFLOAT",
]
_cast_dict = {"BOOL": ["BOOL"]}
_cast_dict["BYTE"] = _cast_dict["BOOL"] + ["BYTE"]
_cast_dict["UBYTE"] = _cast_dict["BOOL"] + ["UBYTE"]
_cast_dict["BYTE"] = ["BYTE"]
_cast_dict["UBYTE"] = ["UBYTE"]
_cast_dict["SHORT"] = _cast_dict["BYTE"] + ["UBYTE", "SHORT"]
_cast_dict["USHORT"] = _cast_dict["UBYTE"] + ["BYTE", "USHORT"]
_cast_dict["INT"] = _cast_dict["SHORT"] + ["USHORT", "INT"]
_cast_dict["UINT"] = _cast_dict["USHORT"] + ["SHORT", "UINT"]
_cast_dict["LONG"] = _cast_dict["INT"] + ["LONG"]
_cast_dict["ULONG"] = _cast_dict["UINT"] + ["ULONG"]
_cast_dict["LONGLONG"] = _cast_dict["LONG"] + ["LONGLONG"]
_cast_dict["ULONGLONG"] = _cast_dict["ULONG"] + ["ULONGLONG"]
_cast_dict["FLOAT"] = _cast_dict["SHORT"] + ["USHORT", "FLOAT"]
_cast_dict["DOUBLE"] = _cast_dict["INT"] + ["UINT", "FLOAT", "DOUBLE"]
_cast_dict["CFLOAT"] = _cast_dict["FLOAT"] + ["CFLOAT"]
# 32 bit system malloc typically does not provide the alignment required by
# 16 byte long double types this means the inout intent cannot be satisfied
# and several tests fail as the alignment flag can be randomly true or fals
# when numpy gains an aligned allocator the tests could be enabled again
#
# Furthermore, on macOS ARM64, LONGDOUBLE is an alias for DOUBLE.
if ((np.intp().dtype.itemsize != 4 or np.clongdouble().dtype.alignment <= 8)
and sys.platform != "win32"
and (platform.system(), platform.processor()) != ("Darwin", "arm")):
_type_names.extend(["LONGDOUBLE", "CDOUBLE", "CLONGDOUBLE"])
_cast_dict["LONGDOUBLE"] = _cast_dict["LONG"] + [
"ULONG",
"FLOAT",
"DOUBLE",
"LONGDOUBLE",
]
_cast_dict["CLONGDOUBLE"] = _cast_dict["LONGDOUBLE"] + [
"CFLOAT",
"CDOUBLE",
"CLONGDOUBLE",
]
_cast_dict["CDOUBLE"] = _cast_dict["DOUBLE"] + ["CFLOAT", "CDOUBLE"]
class Type:
_type_cache = {}
def __new__(cls, name):
if isinstance(name, np.dtype):
dtype0 = name
name = None
for n, i in typeinfo.items():
if not isinstance(i, type) and dtype0.type is i.type:
name = n
break
obj = cls._type_cache.get(name.upper(), None)
if obj is not None:
return obj
obj = object.__new__(cls)
obj._init(name)
cls._type_cache[name.upper()] = obj
return obj
def _init(self, name):
self.NAME = name.upper()
info = typeinfo[self.NAME]
self.type_num = getattr(wrap, "NPY_" + self.NAME)
assert self.type_num == info.num
self.dtype = np.dtype(info.type)
self.type = info.type
self.elsize = info.bits / 8
self.dtypechar = info.char
def cast_types(self):
return [self.__class__(_m) for _m in _cast_dict[self.NAME]]
def all_types(self):
return [self.__class__(_m) for _m in _type_names]
def smaller_types(self):
bits = typeinfo[self.NAME].alignment
types = []
for name in _type_names:
if typeinfo[name].alignment < bits:
types.append(Type(name))
return types
def equal_types(self):
bits = typeinfo[self.NAME].alignment
types = []
for name in _type_names:
if name == self.NAME:
continue
if typeinfo[name].alignment == bits:
types.append(Type(name))
return types
def larger_types(self):
bits = typeinfo[self.NAME].alignment
types = []
for name in _type_names:
if typeinfo[name].alignment > bits:
types.append(Type(name))
return types
class Array:
def __init__(self, typ, dims, intent, obj):
self.type = typ
self.dims = dims
self.intent = intent
self.obj_copy = copy.deepcopy(obj)
self.obj = obj
# arr.dtypechar may be different from typ.dtypechar
self.arr = wrap.call(typ.type_num, dims, intent.flags, obj)
assert isinstance(self.arr, np.ndarray)
self.arr_attr = wrap.array_attrs(self.arr)
if len(dims) > 1:
if self.intent.is_intent("c"):
assert (intent.flags & wrap.F2PY_INTENT_C)
assert not self.arr.flags["FORTRAN"]
assert self.arr.flags["CONTIGUOUS"]
assert (not self.arr_attr[6] & wrap.FORTRAN)
else:
assert (not intent.flags & wrap.F2PY_INTENT_C)
assert self.arr.flags["FORTRAN"]
assert not self.arr.flags["CONTIGUOUS"]
assert (self.arr_attr[6] & wrap.FORTRAN)
if obj is None:
self.pyarr = None
self.pyarr_attr = None
return
if intent.is_intent("cache"):
assert isinstance(obj, np.ndarray), repr(type(obj))
self.pyarr = np.array(obj).reshape(*dims).copy()
else:
self.pyarr = np.array(
np.array(obj, dtype=typ.dtypechar).reshape(*dims),
order=self.intent.is_intent("c") and "C" or "F",
)
assert self.pyarr.dtype == typ
self.pyarr.setflags(write=self.arr.flags["WRITEABLE"])
assert self.pyarr.flags["OWNDATA"], (obj, intent)
self.pyarr_attr = wrap.array_attrs(self.pyarr)
if len(dims) > 1:
if self.intent.is_intent("c"):
assert not self.pyarr.flags["FORTRAN"]
assert self.pyarr.flags["CONTIGUOUS"]
assert (not self.pyarr_attr[6] & wrap.FORTRAN)
else:
assert self.pyarr.flags["FORTRAN"]
assert not self.pyarr.flags["CONTIGUOUS"]
assert (self.pyarr_attr[6] & wrap.FORTRAN)
assert self.arr_attr[1] == self.pyarr_attr[1] # nd
assert self.arr_attr[2] == self.pyarr_attr[2] # dimensions
if self.arr_attr[1] <= 1:
assert self.arr_attr[3] == self.pyarr_attr[3], repr((
self.arr_attr[3],
self.pyarr_attr[3],
self.arr.tobytes(),
self.pyarr.tobytes(),
)) # strides
assert self.arr_attr[5][-2:] == self.pyarr_attr[5][-2:] # descr
assert self.arr_attr[6] == self.pyarr_attr[6], repr((
self.arr_attr[6],
self.pyarr_attr[6],
flags2names(0 * self.arr_attr[6] - self.pyarr_attr[6]),
flags2names(self.arr_attr[6]),
intent,
)) # flags
if intent.is_intent("cache"):
assert self.arr_attr[5][3] >= self.type.elsize
else:
assert self.arr_attr[5][3] == self.type.elsize
assert (self.arr_equal(self.pyarr, self.arr))
if isinstance(self.obj, np.ndarray):
if typ.elsize == Type(obj.dtype).elsize:
if not intent.is_intent("copy") and self.arr_attr[1] <= 1:
assert self.has_shared_memory()
def arr_equal(self, arr1, arr2):
if arr1.shape != arr2.shape:
return False
return (arr1 == arr2).all()
def __str__(self):
return str(self.arr)
def has_shared_memory(self):
"""Check that created array shares data with input array."""
if self.obj is self.arr:
return True
if not isinstance(self.obj, np.ndarray):
return False
obj_attr = wrap.array_attrs(self.obj)
return obj_attr[0] == self.arr_attr[0]
class TestIntent:
def test_in_out(self):
assert str(intent.in_.out) == "intent(in,out)"
assert intent.in_.c.is_intent("c")
assert not intent.in_.c.is_intent_exact("c")
assert intent.in_.c.is_intent_exact("c", "in")
assert intent.in_.c.is_intent_exact("in", "c")
assert not intent.in_.is_intent("c")
class TestSharedMemory:
num2seq = [1, 2]
num23seq = [[1, 2, 3], [4, 5, 6]]
@pytest.fixture(autouse=True, scope="class", params=_type_names)
def setup_type(self, request):
request.cls.type = Type(request.param)
request.cls.array = lambda self, dims, intent, obj: Array(
Type(request.param), dims, intent, obj)
def test_in_from_2seq(self):
a = self.array([2], intent.in_, self.num2seq)
assert not a.has_shared_memory()
def test_in_from_2casttype(self):
for t in self.type.cast_types():
obj = np.array(self.num2seq, dtype=t.dtype)
a = self.array([len(self.num2seq)], intent.in_, obj)
if t.elsize == self.type.elsize:
assert a.has_shared_memory(), repr((self.type.dtype, t.dtype))
else:
assert not a.has_shared_memory()
@pytest.mark.parametrize("write", ["w", "ro"])
@pytest.mark.parametrize("order", ["C", "F"])
@pytest.mark.parametrize("inp", ["2seq", "23seq"])
def test_in_nocopy(self, write, order, inp):
"""Test if intent(in) array can be passed without copies"""
seq = getattr(self, "num" + inp)
obj = np.array(seq, dtype=self.type.dtype, order=order)
obj.setflags(write=(write == "w"))
a = self.array(obj.shape,
((order == "C" and intent.in_.c) or intent.in_), obj)
assert a.has_shared_memory()
def test_inout_2seq(self):
obj = np.array(self.num2seq, dtype=self.type.dtype)
a = self.array([len(self.num2seq)], intent.inout, obj)
assert a.has_shared_memory()
try:
a = self.array([2], intent.in_.inout, self.num2seq)
except TypeError as msg:
if not str(msg).startswith(
"failed to initialize intent(inout|inplace|cache) array"):
raise
else:
raise SystemError("intent(inout) should have failed on sequence")
def test_f_inout_23seq(self):
obj = np.array(self.num23seq, dtype=self.type.dtype, order="F")
shape = (len(self.num23seq), len(self.num23seq[0]))
a = self.array(shape, intent.in_.inout, obj)
assert a.has_shared_memory()
obj = np.array(self.num23seq, dtype=self.type.dtype, order="C")
shape = (len(self.num23seq), len(self.num23seq[0]))
try:
a = self.array(shape, intent.in_.inout, obj)
except ValueError as msg:
if not str(msg).startswith(
"failed to initialize intent(inout) array"):
raise
else:
raise SystemError(
"intent(inout) should have failed on improper array")
def test_c_inout_23seq(self):
obj = np.array(self.num23seq, dtype=self.type.dtype)
shape = (len(self.num23seq), len(self.num23seq[0]))
a = self.array(shape, intent.in_.c.inout, obj)
assert a.has_shared_memory()
def test_in_copy_from_2casttype(self):
for t in self.type.cast_types():
obj = np.array(self.num2seq, dtype=t.dtype)
a = self.array([len(self.num2seq)], intent.in_.copy, obj)
assert not a.has_shared_memory()
def test_c_in_from_23seq(self):
a = self.array(
[len(self.num23seq), len(self.num23seq[0])], intent.in_,
self.num23seq)
assert not a.has_shared_memory()
def test_in_from_23casttype(self):
for t in self.type.cast_types():
obj = np.array(self.num23seq, dtype=t.dtype)
a = self.array(
[len(self.num23seq), len(self.num23seq[0])], intent.in_, obj)
assert not a.has_shared_memory()
def test_f_in_from_23casttype(self):
for t in self.type.cast_types():
obj = np.array(self.num23seq, dtype=t.dtype, order="F")
a = self.array(
[len(self.num23seq), len(self.num23seq[0])], intent.in_, obj)
if t.elsize == self.type.elsize:
assert a.has_shared_memory()
else:
assert not a.has_shared_memory()
def test_c_in_from_23casttype(self):
for t in self.type.cast_types():
obj = np.array(self.num23seq, dtype=t.dtype)
a = self.array(
[len(self.num23seq), len(self.num23seq[0])], intent.in_.c, obj)
if t.elsize == self.type.elsize:
assert a.has_shared_memory()
else:
assert not a.has_shared_memory()
def test_f_copy_in_from_23casttype(self):
for t in self.type.cast_types():
obj = np.array(self.num23seq, dtype=t.dtype, order="F")
a = self.array(
[len(self.num23seq), len(self.num23seq[0])], intent.in_.copy,
obj)
assert not a.has_shared_memory()
def test_c_copy_in_from_23casttype(self):
for t in self.type.cast_types():
obj = np.array(self.num23seq, dtype=t.dtype)
a = self.array(
[len(self.num23seq), len(self.num23seq[0])], intent.in_.c.copy,
obj)
assert not a.has_shared_memory()
def test_in_cache_from_2casttype(self):
for t in self.type.all_types():
if t.elsize != self.type.elsize:
continue
obj = np.array(self.num2seq, dtype=t.dtype)
shape = (len(self.num2seq), )
a = self.array(shape, intent.in_.c.cache, obj)
assert a.has_shared_memory()
a = self.array(shape, intent.in_.cache, obj)
assert a.has_shared_memory()
obj = np.array(self.num2seq, dtype=t.dtype, order="F")
a = self.array(shape, intent.in_.c.cache, obj)
assert a.has_shared_memory()
a = self.array(shape, intent.in_.cache, obj)
assert a.has_shared_memory(), repr(t.dtype)
try:
a = self.array(shape, intent.in_.cache, obj[::-1])
except ValueError as msg:
if not str(msg).startswith(
"failed to initialize intent(cache) array"):
raise
else:
raise SystemError(
"intent(cache) should have failed on multisegmented array")
def test_in_cache_from_2casttype_failure(self):
for t in self.type.all_types():
if t.elsize >= self.type.elsize:
continue
obj = np.array(self.num2seq, dtype=t.dtype)
shape = (len(self.num2seq), )
try:
self.array(shape, intent.in_.cache, obj) # Should succeed
except ValueError as msg:
if not str(msg).startswith(
"failed to initialize intent(cache) array"):
raise
else:
raise SystemError(
"intent(cache) should have failed on smaller array")
def test_cache_hidden(self):
shape = (2, )
a = self.array(shape, intent.cache.hide, None)
assert a.arr.shape == shape
shape = (2, 3)
a = self.array(shape, intent.cache.hide, None)
assert a.arr.shape == shape
shape = (-1, 3)
try:
a = self.array(shape, intent.cache.hide, None)
except ValueError as msg:
if not str(msg).startswith(
"failed to create intent(cache|hide)|optional array"):
raise
else:
raise SystemError(
"intent(cache) should have failed on undefined dimensions")
def test_hidden(self):
shape = (2, )
a = self.array(shape, intent.hide, None)
assert a.arr.shape == shape
assert a.arr_equal(a.arr, np.zeros(shape, dtype=self.type.dtype))
shape = (2, 3)
a = self.array(shape, intent.hide, None)
assert a.arr.shape == shape
assert a.arr_equal(a.arr, np.zeros(shape, dtype=self.type.dtype))
assert a.arr.flags["FORTRAN"] and not a.arr.flags["CONTIGUOUS"]
shape = (2, 3)
a = self.array(shape, intent.c.hide, None)
assert a.arr.shape == shape
assert a.arr_equal(a.arr, np.zeros(shape, dtype=self.type.dtype))
assert not a.arr.flags["FORTRAN"] and a.arr.flags["CONTIGUOUS"]
shape = (-1, 3)
try:
a = self.array(shape, intent.hide, None)
except ValueError as msg:
if not str(msg).startswith(
"failed to create intent(cache|hide)|optional array"):
raise
else:
raise SystemError(
"intent(hide) should have failed on undefined dimensions")
def test_optional_none(self):
shape = (2, )
a = self.array(shape, intent.optional, None)
assert a.arr.shape == shape
assert a.arr_equal(a.arr, np.zeros(shape, dtype=self.type.dtype))
shape = (2, 3)
a = self.array(shape, intent.optional, None)
assert a.arr.shape == shape
assert a.arr_equal(a.arr, np.zeros(shape, dtype=self.type.dtype))
assert a.arr.flags["FORTRAN"] and not a.arr.flags["CONTIGUOUS"]
shape = (2, 3)
a = self.array(shape, intent.c.optional, None)
assert a.arr.shape == shape
assert a.arr_equal(a.arr, np.zeros(shape, dtype=self.type.dtype))
assert not a.arr.flags["FORTRAN"] and a.arr.flags["CONTIGUOUS"]
def test_optional_from_2seq(self):
obj = self.num2seq
shape = (len(obj), )
a = self.array(shape, intent.optional, obj)
assert a.arr.shape == shape
assert not a.has_shared_memory()
def test_optional_from_23seq(self):
obj = self.num23seq
shape = (len(obj), len(obj[0]))
a = self.array(shape, intent.optional, obj)
assert a.arr.shape == shape
assert not a.has_shared_memory()
a = self.array(shape, intent.optional.c, obj)
assert a.arr.shape == shape
assert not a.has_shared_memory()
def test_inplace(self):
obj = np.array(self.num23seq, dtype=self.type.dtype)
assert not obj.flags["FORTRAN"] and obj.flags["CONTIGUOUS"]
shape = obj.shape
a = self.array(shape, intent.inplace, obj)
assert obj[1][2] == a.arr[1][2], repr((obj, a.arr))
a.arr[1][2] = 54
assert obj[1][2] == a.arr[1][2] == np.array(54, dtype=self.type.dtype)
assert a.arr is obj
assert obj.flags["FORTRAN"] # obj attributes are changed inplace!
assert not obj.flags["CONTIGUOUS"]
def test_inplace_from_casttype(self):
for t in self.type.cast_types():
if t is self.type:
continue
obj = np.array(self.num23seq, dtype=t.dtype)
assert obj.dtype.type == t.type
assert obj.dtype.type is not self.type.type
assert not obj.flags["FORTRAN"] and obj.flags["CONTIGUOUS"]
shape = obj.shape
a = self.array(shape, intent.inplace, obj)
assert obj[1][2] == a.arr[1][2], repr((obj, a.arr))
a.arr[1][2] = 54
assert obj[1][2] == a.arr[1][2] == np.array(54,
dtype=self.type.dtype)
assert a.arr is obj
assert obj.flags["FORTRAN"] # obj attributes changed inplace!
assert not obj.flags["CONTIGUOUS"]
assert obj.dtype.type is self.type.type # obj changed inplace!
| 22,071 | Python | 34.146497 | 79 | 0.54438 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/f2py/tests/test_callback.py | import math
import textwrap
import sys
import pytest
import threading
import traceback
import time
import numpy as np
from numpy.testing import IS_PYPY
from . import util
class TestF77Callback(util.F2PyTest):
sources = [util.getpath("tests", "src", "callback", "foo.f")]
@pytest.mark.parametrize("name", "t,t2".split(","))
def test_all(self, name):
self.check_function(name)
@pytest.mark.xfail(IS_PYPY,
reason="PyPy cannot modify tp_doc after PyType_Ready")
def test_docstring(self):
expected = textwrap.dedent("""\
a = t(fun,[fun_extra_args])
Wrapper for ``t``.
Parameters
----------
fun : call-back function
Other Parameters
----------------
fun_extra_args : input tuple, optional
Default: ()
Returns
-------
a : int
Notes
-----
Call-back functions::
def fun(): return a
Return objects:
a : int
""")
assert self.module.t.__doc__ == expected
def check_function(self, name):
t = getattr(self.module, name)
r = t(lambda: 4)
assert r == 4
r = t(lambda a: 5, fun_extra_args=(6, ))
assert r == 5
r = t(lambda a: a, fun_extra_args=(6, ))
assert r == 6
r = t(lambda a: 5 + a, fun_extra_args=(7, ))
assert r == 12
r = t(lambda a: math.degrees(a), fun_extra_args=(math.pi, ))
assert r == 180
r = t(math.degrees, fun_extra_args=(math.pi, ))
assert r == 180
r = t(self.module.func, fun_extra_args=(6, ))
assert r == 17
r = t(self.module.func0)
assert r == 11
r = t(self.module.func0._cpointer)
assert r == 11
class A:
def __call__(self):
return 7
def mth(self):
return 9
a = A()
r = t(a)
assert r == 7
r = t(a.mth)
assert r == 9
@pytest.mark.skipif(sys.platform == "win32",
reason="Fails with MinGW64 Gfortran (Issue #9673)")
def test_string_callback(self):
def callback(code):
if code == "r":
return 0
else:
return 1
f = getattr(self.module, "string_callback")
r = f(callback)
assert r == 0
@pytest.mark.skipif(sys.platform == "win32",
reason="Fails with MinGW64 Gfortran (Issue #9673)")
def test_string_callback_array(self):
# See gh-10027
cu = np.zeros((1, 8), "S1")
def callback(cu, lencu):
if cu.shape != (lencu, 8):
return 1
if cu.dtype != "S1":
return 2
if not np.all(cu == b""):
return 3
return 0
f = getattr(self.module, "string_callback_array")
res = f(callback, cu, len(cu))
assert res == 0
def test_threadsafety(self):
# Segfaults if the callback handling is not threadsafe
errors = []
def cb():
# Sleep here to make it more likely for another thread
# to call their callback at the same time.
time.sleep(1e-3)
# Check reentrancy
r = self.module.t(lambda: 123)
assert r == 123
return 42
def runner(name):
try:
for j in range(50):
r = self.module.t(cb)
assert r == 42
self.check_function(name)
except Exception:
errors.append(traceback.format_exc())
threads = [
threading.Thread(target=runner, args=(arg, ))
for arg in ("t", "t2") for n in range(20)
]
for t in threads:
t.start()
for t in threads:
t.join()
errors = "\n\n".join(errors)
if errors:
raise AssertionError(errors)
def test_hidden_callback(self):
try:
self.module.hidden_callback(2)
except Exception as msg:
assert str(msg).startswith("Callback global_f not defined")
try:
self.module.hidden_callback2(2)
except Exception as msg:
assert str(msg).startswith("cb: Callback global_f not defined")
self.module.global_f = lambda x: x + 1
r = self.module.hidden_callback(2)
assert r == 3
self.module.global_f = lambda x: x + 2
r = self.module.hidden_callback(2)
assert r == 4
del self.module.global_f
try:
self.module.hidden_callback(2)
except Exception as msg:
assert str(msg).startswith("Callback global_f not defined")
self.module.global_f = lambda x=0: x + 3
r = self.module.hidden_callback(2)
assert r == 5
# reproducer of gh18341
r = self.module.hidden_callback2(2)
assert r == 3
class TestF77CallbackPythonTLS(TestF77Callback):
"""
Callback tests using Python thread-local storage instead of
compiler-provided
"""
options = ["-DF2PY_USE_PYTHON_TLS"]
class TestF90Callback(util.F2PyTest):
sources = [util.getpath("tests", "src", "callback", "gh17797.f90")]
def test_gh17797(self):
def incr(x):
return x + 123
y = np.array([1, 2, 3], dtype=np.int64)
r = self.module.gh17797(incr, y)
assert r == 123 + 1 + 2 + 3
class TestGH18335(util.F2PyTest):
"""The reproduction of the reported issue requires specific input that
extensions may break the issue conditions, so the reproducer is
implemented as a separate test class. Do not extend this test with
other tests!
"""
sources = [util.getpath("tests", "src", "callback", "gh18335.f90")]
def test_gh18335(self):
def foo(x):
x[0] += 1
y = np.array([1, 2, 3], dtype=np.int8)
r = self.module.gh18335(foo)
assert r == 123 + 1
| 6,087 | Python | 25.585153 | 77 | 0.518975 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/f2py/tests/test_return_real.py | import platform
import pytest
import numpy as np
from numpy import array
from . import util
class TestReturnReal(util.F2PyTest):
def check_function(self, t, tname):
if tname in ["t0", "t4", "s0", "s4"]:
err = 1e-5
else:
err = 0.0
assert abs(t(234) - 234.0) <= err
assert abs(t(234.6) - 234.6) <= err
assert abs(t("234") - 234) <= err
assert abs(t("234.6") - 234.6) <= err
assert abs(t(-234) + 234) <= err
assert abs(t([234]) - 234) <= err
assert abs(t((234, )) - 234.0) <= err
assert abs(t(array(234)) - 234.0) <= err
assert abs(t(array([234])) - 234.0) <= err
assert abs(t(array([[234]])) - 234.0) <= err
assert abs(t(array([234], "b")) + 22) <= err
assert abs(t(array([234], "h")) - 234.0) <= err
assert abs(t(array([234], "i")) - 234.0) <= err
assert abs(t(array([234], "l")) - 234.0) <= err
assert abs(t(array([234], "B")) - 234.0) <= err
assert abs(t(array([234], "f")) - 234.0) <= err
assert abs(t(array([234], "d")) - 234.0) <= err
if tname in ["t0", "t4", "s0", "s4"]:
assert t(1e200) == t(1e300) # inf
# pytest.raises(ValueError, t, array([234], 'S1'))
pytest.raises(ValueError, t, "abc")
pytest.raises(IndexError, t, [])
pytest.raises(IndexError, t, ())
pytest.raises(Exception, t, t)
pytest.raises(Exception, t, {})
try:
r = t(10**400)
assert repr(r) in ["inf", "Infinity"]
except OverflowError:
pass
@pytest.mark.skipif(
platform.system() == "Darwin",
reason="Prone to error when run with numpy/f2py/tests on mac os, "
"but not when run in isolation",
)
@pytest.mark.skipif(
np.dtype(np.intp).itemsize < 8,
reason="32-bit builds are buggy"
)
class TestCReturnReal(TestReturnReal):
suffix = ".pyf"
module_name = "c_ext_return_real"
code = """
python module c_ext_return_real
usercode \'\'\'
float t4(float value) { return value; }
void s4(float *t4, float value) { *t4 = value; }
double t8(double value) { return value; }
void s8(double *t8, double value) { *t8 = value; }
\'\'\'
interface
function t4(value)
real*4 intent(c) :: t4,value
end
function t8(value)
real*8 intent(c) :: t8,value
end
subroutine s4(t4,value)
intent(c) s4
real*4 intent(out) :: t4
real*4 intent(c) :: value
end
subroutine s8(t8,value)
intent(c) s8
real*8 intent(out) :: t8
real*8 intent(c) :: value
end
end interface
end python module c_ext_return_real
"""
@pytest.mark.parametrize("name", "t4,t8,s4,s8".split(","))
def test_all(self, name):
self.check_function(getattr(self.module, name), name)
class TestFReturnReal(TestReturnReal):
sources = [
util.getpath("tests", "src", "return_real", "foo77.f"),
util.getpath("tests", "src", "return_real", "foo90.f90"),
]
@pytest.mark.parametrize("name", "t0,t4,t8,td,s0,s4,s8,sd".split(","))
def test_all_f77(self, name):
self.check_function(getattr(self.module, name), name)
@pytest.mark.parametrize("name", "t0,t4,t8,td,s0,s4,s8,sd".split(","))
def test_all_f90(self, name):
self.check_function(getattr(self.module.f90_return_real, name), name)
| 3,346 | Python | 29.427272 | 77 | 0.566348 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/f2py/tests/test_mixed.py | import os
import textwrap
import pytest
from numpy.testing import IS_PYPY
from . import util
class TestMixed(util.F2PyTest):
sources = [
util.getpath("tests", "src", "mixed", "foo.f"),
util.getpath("tests", "src", "mixed", "foo_fixed.f90"),
util.getpath("tests", "src", "mixed", "foo_free.f90"),
]
def test_all(self):
assert self.module.bar11() == 11
assert self.module.foo_fixed.bar12() == 12
assert self.module.foo_free.bar13() == 13
@pytest.mark.xfail(IS_PYPY,
reason="PyPy cannot modify tp_doc after PyType_Ready")
def test_docstring(self):
expected = textwrap.dedent("""\
a = bar11()
Wrapper for ``bar11``.
Returns
-------
a : int
""")
assert self.module.bar11.__doc__ == expected
| 848 | Python | 23.970588 | 77 | 0.558962 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/f2py/tests/test_block_docstring.py | import sys
import pytest
from . import util
from numpy.testing import IS_PYPY
class TestBlockDocString(util.F2PyTest):
sources = [util.getpath("tests", "src", "block_docstring", "foo.f")]
@pytest.mark.skipif(sys.platform == "win32",
reason="Fails with MinGW64 Gfortran (Issue #9673)")
@pytest.mark.xfail(IS_PYPY,
reason="PyPy cannot modify tp_doc after PyType_Ready")
def test_block_docstring(self):
expected = "bar : 'i'-array(2,3)\n"
assert self.module.block.__doc__ == expected
| 564 | Python | 30.388887 | 77 | 0.62766 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/f2py/tests/test_assumed_shape.py | import os
import pytest
import tempfile
from . import util
class TestAssumedShapeSumExample(util.F2PyTest):
sources = [
util.getpath("tests", "src", "assumed_shape", "foo_free.f90"),
util.getpath("tests", "src", "assumed_shape", "foo_use.f90"),
util.getpath("tests", "src", "assumed_shape", "precision.f90"),
util.getpath("tests", "src", "assumed_shape", "foo_mod.f90"),
util.getpath("tests", "src", "assumed_shape", ".f2py_f2cmap"),
]
@pytest.mark.slow
def test_all(self):
r = self.module.fsum([1, 2])
assert r == 3
r = self.module.sum([1, 2])
assert r == 3
r = self.module.sum_with_use([1, 2])
assert r == 3
r = self.module.mod.sum([1, 2])
assert r == 3
r = self.module.mod.fsum([1, 2])
assert r == 3
class TestF2cmapOption(TestAssumedShapeSumExample):
def setup_method(self):
# Use a custom file name for .f2py_f2cmap
self.sources = list(self.sources)
f2cmap_src = self.sources.pop(-1)
self.f2cmap_file = tempfile.NamedTemporaryFile(delete=False)
with open(f2cmap_src, "rb") as f:
self.f2cmap_file.write(f.read())
self.f2cmap_file.close()
self.sources.append(self.f2cmap_file.name)
self.options = ["--f2cmap", self.f2cmap_file.name]
super().setup_method()
def teardown_method(self):
os.unlink(self.f2cmap_file.name)
| 1,466 | Python | 28.339999 | 71 | 0.587312 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/f2py/tests/test_compile_function.py | """See https://github.com/numpy/numpy/pull/11937.
"""
import sys
import os
import uuid
from importlib import import_module
import pytest
import numpy.f2py
from . import util
def setup_module():
if not util.has_c_compiler():
pytest.skip("Needs C compiler")
if not util.has_f77_compiler():
pytest.skip("Needs FORTRAN 77 compiler")
# extra_args can be a list (since gh-11937) or string.
# also test absence of extra_args
@pytest.mark.parametrize("extra_args",
[["--noopt", "--debug"], "--noopt --debug", ""])
@pytest.mark.leaks_references(reason="Imported module seems never deleted.")
def test_f2py_init_compile(extra_args):
# flush through the f2py __init__ compile() function code path as a
# crude test for input handling following migration from
# exec_command() to subprocess.check_output() in gh-11937
# the Fortran 77 syntax requires 6 spaces before any commands, but
# more space may be added/
fsource = """
integer function foo()
foo = 10 + 5
return
end
"""
# use various helper functions in util.py to enable robust build /
# compile and reimport cycle in test suite
moddir = util.get_module_dir()
modname = util.get_temp_module_name()
cwd = os.getcwd()
target = os.path.join(moddir, str(uuid.uuid4()) + ".f")
# try running compile() with and without a source_fn provided so
# that the code path where a temporary file for writing Fortran
# source is created is also explored
for source_fn in [target, None]:
# mimic the path changing behavior used by build_module() in
# util.py, but don't actually use build_module() because it has
# its own invocation of subprocess that circumvents the
# f2py.compile code block under test
with util.switchdir(moddir):
ret_val = numpy.f2py.compile(fsource,
modulename=modname,
extra_args=extra_args,
source_fn=source_fn)
# check for compile success return value
assert ret_val == 0
# we are not currently able to import the Python-Fortran
# interface module on Windows / Appveyor, even though we do get
# successful compilation on that platform with Python 3.x
if sys.platform != "win32":
# check for sensible result of Fortran function; that means
# we can import the module name in Python and retrieve the
# result of the sum operation
return_check = import_module(modname)
calc_result = return_check.foo()
assert calc_result == 15
# Removal from sys.modules, is not as such necessary. Even with
# removal, the module (dict) stays alive.
del sys.modules[modname]
def test_f2py_init_compile_failure():
# verify an appropriate integer status value returned by
# f2py.compile() when invalid Fortran is provided
ret_val = numpy.f2py.compile(b"invalid")
assert ret_val == 1
def test_f2py_init_compile_bad_cmd():
# verify that usage of invalid command in f2py.compile() returns
# status value of 127 for historic consistency with exec_command()
# error handling
# patch the sys Python exe path temporarily to induce an OSError
# downstream NOTE: how bad of an idea is this patching?
try:
temp = sys.executable
sys.executable = "does not exist"
# the OSError should take precedence over invalid Fortran
ret_val = numpy.f2py.compile(b"invalid")
assert ret_val == 127
finally:
sys.executable = temp
@pytest.mark.parametrize(
"fsource",
[
"program test_f2py\nend program test_f2py",
b"program test_f2py\nend program test_f2py",
],
)
def test_compile_from_strings(tmpdir, fsource):
# Make sure we can compile str and bytes gh-12796
with util.switchdir(tmpdir):
ret_val = numpy.f2py.compile(fsource,
modulename="test_compile_from_strings",
extension=".f90")
assert ret_val == 0
| 4,186 | Python | 34.483051 | 76 | 0.632824 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/f2py/tests/test_size.py | import os
import pytest
import numpy as np
from . import util
class TestSizeSumExample(util.F2PyTest):
sources = [util.getpath("tests", "src", "size", "foo.f90")]
@pytest.mark.slow
def test_all(self):
r = self.module.foo([[]])
assert r == [0]
r = self.module.foo([[1, 2]])
assert r == [3]
r = self.module.foo([[1, 2], [3, 4]])
assert np.allclose(r, [3, 7])
r = self.module.foo([[1, 2], [3, 4], [5, 6]])
assert np.allclose(r, [3, 7, 11])
@pytest.mark.slow
def test_transpose(self):
r = self.module.trans([[]])
assert np.allclose(r.T, np.array([[]]))
r = self.module.trans([[1, 2]])
assert np.allclose(r, [[1.], [2.]])
r = self.module.trans([[1, 2, 3], [4, 5, 6]])
assert np.allclose(r, [[1, 4], [2, 5], [3, 6]])
@pytest.mark.slow
def test_flatten(self):
r = self.module.flatten([[]])
assert np.allclose(r, [])
r = self.module.flatten([[1, 2]])
assert np.allclose(r, [1, 2])
r = self.module.flatten([[1, 2, 3], [4, 5, 6]])
assert np.allclose(r, [1, 2, 3, 4, 5, 6])
| 1,164 | Python | 24.326086 | 63 | 0.5 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/f2py/tests/test_module_doc.py | import os
import sys
import pytest
import textwrap
from . import util
from numpy.testing import IS_PYPY
class TestModuleDocString(util.F2PyTest):
sources = [
util.getpath("tests", "src", "module_data",
"module_data_docstring.f90")
]
@pytest.mark.skipif(sys.platform == "win32",
reason="Fails with MinGW64 Gfortran (Issue #9673)")
@pytest.mark.xfail(IS_PYPY,
reason="PyPy cannot modify tp_doc after PyType_Ready")
def test_module_docstring(self):
assert self.module.mod.__doc__ == textwrap.dedent("""\
i : 'i'-scalar
x : 'i'-array(4)
a : 'f'-array(2,3)
b : 'f'-array(-1,-1), not allocated\x00
foo()\n
Wrapper for ``foo``.\n\n""")
| 863 | Python | 29.857142 | 77 | 0.514484 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/mingw32ccompiler.py | """
Support code for building Python extensions on Windows.
# NT stuff
# 1. Make sure libpython<version>.a exists for gcc. If not, build it.
# 2. Force windows to use gcc (we're struggling with MSVC and g77 support)
# 3. Force windows to use g77
"""
import os
import platform
import sys
import subprocess
import re
import textwrap
# Overwrite certain distutils.ccompiler functions:
import numpy.distutils.ccompiler # noqa: F401
from numpy.distutils import log
# NT stuff
# 1. Make sure libpython<version>.a exists for gcc. If not, build it.
# 2. Force windows to use gcc (we're struggling with MSVC and g77 support)
# --> this is done in numpy/distutils/ccompiler.py
# 3. Force windows to use g77
import distutils.cygwinccompiler
from distutils.unixccompiler import UnixCCompiler
from distutils.msvccompiler import get_build_version as get_build_msvc_version
from distutils.errors import UnknownFileError
from numpy.distutils.misc_util import (msvc_runtime_library,
msvc_runtime_version,
msvc_runtime_major,
get_build_architecture)
def get_msvcr_replacement():
"""Replacement for outdated version of get_msvcr from cygwinccompiler"""
msvcr = msvc_runtime_library()
return [] if msvcr is None else [msvcr]
# Useful to generate table of symbols from a dll
_START = re.compile(r'\[Ordinal/Name Pointer\] Table')
_TABLE = re.compile(r'^\s+\[([\s*[0-9]*)\] ([a-zA-Z0-9_]*)')
# the same as cygwin plus some additional parameters
class Mingw32CCompiler(distutils.cygwinccompiler.CygwinCCompiler):
""" A modified MingW32 compiler compatible with an MSVC built Python.
"""
compiler_type = 'mingw32'
def __init__ (self,
verbose=0,
dry_run=0,
force=0):
distutils.cygwinccompiler.CygwinCCompiler.__init__ (self, verbose,
dry_run, force)
# **changes: eric jones 4/11/01
# 1. Check for import library on Windows. Build if it doesn't exist.
build_import_library()
# Check for custom msvc runtime library on Windows. Build if it doesn't exist.
msvcr_success = build_msvcr_library()
msvcr_dbg_success = build_msvcr_library(debug=True)
if msvcr_success or msvcr_dbg_success:
# add preprocessor statement for using customized msvcr lib
self.define_macro('NPY_MINGW_USE_CUSTOM_MSVCR')
# Define the MSVC version as hint for MinGW
msvcr_version = msvc_runtime_version()
if msvcr_version:
self.define_macro('__MSVCRT_VERSION__', '0x%04i' % msvcr_version)
# MS_WIN64 should be defined when building for amd64 on windows,
# but python headers define it only for MS compilers, which has all
# kind of bad consequences, like using Py_ModuleInit4 instead of
# Py_ModuleInit4_64, etc... So we add it here
if get_build_architecture() == 'AMD64':
self.set_executables(
compiler='gcc -g -DDEBUG -DMS_WIN64 -O0 -Wall',
compiler_so='gcc -g -DDEBUG -DMS_WIN64 -O0 -Wall '
'-Wstrict-prototypes',
linker_exe='gcc -g',
linker_so='gcc -g -shared')
else:
self.set_executables(
compiler='gcc -O2 -Wall',
compiler_so='gcc -O2 -Wall -Wstrict-prototypes',
linker_exe='g++ ',
linker_so='g++ -shared')
# added for python2.3 support
# we can't pass it through set_executables because pre 2.2 would fail
self.compiler_cxx = ['g++']
# Maybe we should also append -mthreads, but then the finished dlls
# need another dll (mingwm10.dll see Mingw32 docs) (-mthreads: Support
# thread-safe exception handling on `Mingw32')
# no additional libraries needed
#self.dll_libraries=[]
return
# __init__ ()
def link(self,
target_desc,
objects,
output_filename,
output_dir,
libraries,
library_dirs,
runtime_library_dirs,
export_symbols = None,
debug=0,
extra_preargs=None,
extra_postargs=None,
build_temp=None,
target_lang=None):
# Include the appropriate MSVC runtime library if Python was built
# with MSVC >= 7.0 (MinGW standard is msvcrt)
runtime_library = msvc_runtime_library()
if runtime_library:
if not libraries:
libraries = []
libraries.append(runtime_library)
args = (self,
target_desc,
objects,
output_filename,
output_dir,
libraries,
library_dirs,
runtime_library_dirs,
None, #export_symbols, we do this in our def-file
debug,
extra_preargs,
extra_postargs,
build_temp,
target_lang)
func = UnixCCompiler.link
func(*args[:func.__code__.co_argcount])
return
def object_filenames (self,
source_filenames,
strip_dir=0,
output_dir=''):
if output_dir is None: output_dir = ''
obj_names = []
for src_name in source_filenames:
# use normcase to make sure '.rc' is really '.rc' and not '.RC'
(base, ext) = os.path.splitext (os.path.normcase(src_name))
# added these lines to strip off windows drive letters
# without it, .o files are placed next to .c files
# instead of the build directory
drv, base = os.path.splitdrive(base)
if drv:
base = base[1:]
if ext not in (self.src_extensions + ['.rc', '.res']):
raise UnknownFileError(
"unknown file type '%s' (from '%s')" % \
(ext, src_name))
if strip_dir:
base = os.path.basename (base)
if ext == '.res' or ext == '.rc':
# these need to be compiled to object files
obj_names.append (os.path.join (output_dir,
base + ext + self.obj_extension))
else:
obj_names.append (os.path.join (output_dir,
base + self.obj_extension))
return obj_names
# object_filenames ()
def find_python_dll():
# We can't do much here:
# - find it in the virtualenv (sys.prefix)
# - find it in python main dir (sys.base_prefix, if in a virtualenv)
# - sys.real_prefix is main dir for virtualenvs in Python 2.7
# - in system32,
# - ortherwise (Sxs), I don't know how to get it.
stems = [sys.prefix]
if hasattr(sys, 'base_prefix') and sys.base_prefix != sys.prefix:
stems.append(sys.base_prefix)
elif hasattr(sys, 'real_prefix') and sys.real_prefix != sys.prefix:
stems.append(sys.real_prefix)
sub_dirs = ['', 'lib', 'bin']
# generate possible combinations of directory trees and sub-directories
lib_dirs = []
for stem in stems:
for folder in sub_dirs:
lib_dirs.append(os.path.join(stem, folder))
# add system directory as well
if 'SYSTEMROOT' in os.environ:
lib_dirs.append(os.path.join(os.environ['SYSTEMROOT'], 'System32'))
# search in the file system for possible candidates
major_version, minor_version = tuple(sys.version_info[:2])
implementation = platform.python_implementation()
if implementation == 'CPython':
dllname = f'python{major_version}{minor_version}.dll'
elif implementation == 'PyPy':
dllname = f'libpypy{major_version}-c.dll'
else:
dllname = f'Unknown platform {implementation}'
print("Looking for %s" % dllname)
for folder in lib_dirs:
dll = os.path.join(folder, dllname)
if os.path.exists(dll):
return dll
raise ValueError("%s not found in %s" % (dllname, lib_dirs))
def dump_table(dll):
st = subprocess.check_output(["objdump.exe", "-p", dll])
return st.split(b'\n')
def generate_def(dll, dfile):
"""Given a dll file location, get all its exported symbols and dump them
into the given def file.
The .def file will be overwritten"""
dump = dump_table(dll)
for i in range(len(dump)):
if _START.match(dump[i].decode()):
break
else:
raise ValueError("Symbol table not found")
syms = []
for j in range(i+1, len(dump)):
m = _TABLE.match(dump[j].decode())
if m:
syms.append((int(m.group(1).strip()), m.group(2)))
else:
break
if len(syms) == 0:
log.warn('No symbols found in %s' % dll)
with open(dfile, 'w') as d:
d.write('LIBRARY %s\n' % os.path.basename(dll))
d.write(';CODE PRELOAD MOVEABLE DISCARDABLE\n')
d.write(';DATA PRELOAD SINGLE\n')
d.write('\nEXPORTS\n')
for s in syms:
#d.write('@%d %s\n' % (s[0], s[1]))
d.write('%s\n' % s[1])
def find_dll(dll_name):
arch = {'AMD64' : 'amd64',
'Intel' : 'x86'}[get_build_architecture()]
def _find_dll_in_winsxs(dll_name):
# Walk through the WinSxS directory to find the dll.
winsxs_path = os.path.join(os.environ.get('WINDIR', r'C:\WINDOWS'),
'winsxs')
if not os.path.exists(winsxs_path):
return None
for root, dirs, files in os.walk(winsxs_path):
if dll_name in files and arch in root:
return os.path.join(root, dll_name)
return None
def _find_dll_in_path(dll_name):
# First, look in the Python directory, then scan PATH for
# the given dll name.
for path in [sys.prefix] + os.environ['PATH'].split(';'):
filepath = os.path.join(path, dll_name)
if os.path.exists(filepath):
return os.path.abspath(filepath)
return _find_dll_in_winsxs(dll_name) or _find_dll_in_path(dll_name)
def build_msvcr_library(debug=False):
if os.name != 'nt':
return False
# If the version number is None, then we couldn't find the MSVC runtime at
# all, because we are running on a Python distribution which is customed
# compiled; trust that the compiler is the same as the one available to us
# now, and that it is capable of linking with the correct runtime without
# any extra options.
msvcr_ver = msvc_runtime_major()
if msvcr_ver is None:
log.debug('Skip building import library: '
'Runtime is not compiled with MSVC')
return False
# Skip using a custom library for versions < MSVC 8.0
if msvcr_ver < 80:
log.debug('Skip building msvcr library:'
' custom functionality not present')
return False
msvcr_name = msvc_runtime_library()
if debug:
msvcr_name += 'd'
# Skip if custom library already exists
out_name = "lib%s.a" % msvcr_name
out_file = os.path.join(sys.prefix, 'libs', out_name)
if os.path.isfile(out_file):
log.debug('Skip building msvcr library: "%s" exists' %
(out_file,))
return True
# Find the msvcr dll
msvcr_dll_name = msvcr_name + '.dll'
dll_file = find_dll(msvcr_dll_name)
if not dll_file:
log.warn('Cannot build msvcr library: "%s" not found' %
msvcr_dll_name)
return False
def_name = "lib%s.def" % msvcr_name
def_file = os.path.join(sys.prefix, 'libs', def_name)
log.info('Building msvcr library: "%s" (from %s)' \
% (out_file, dll_file))
# Generate a symbol definition file from the msvcr dll
generate_def(dll_file, def_file)
# Create a custom mingw library for the given symbol definitions
cmd = ['dlltool', '-d', def_file, '-l', out_file]
retcode = subprocess.call(cmd)
# Clean up symbol definitions
os.remove(def_file)
return (not retcode)
def build_import_library():
if os.name != 'nt':
return
arch = get_build_architecture()
if arch == 'AMD64':
return _build_import_library_amd64()
elif arch == 'Intel':
return _build_import_library_x86()
else:
raise ValueError("Unhandled arch %s" % arch)
def _check_for_import_lib():
"""Check if an import library for the Python runtime already exists."""
major_version, minor_version = tuple(sys.version_info[:2])
# patterns for the file name of the library itself
patterns = ['libpython%d%d.a',
'libpython%d%d.dll.a',
'libpython%d.%d.dll.a']
# directory trees that may contain the library
stems = [sys.prefix]
if hasattr(sys, 'base_prefix') and sys.base_prefix != sys.prefix:
stems.append(sys.base_prefix)
elif hasattr(sys, 'real_prefix') and sys.real_prefix != sys.prefix:
stems.append(sys.real_prefix)
# possible subdirectories within those trees where it is placed
sub_dirs = ['libs', 'lib']
# generate a list of candidate locations
candidates = []
for pat in patterns:
filename = pat % (major_version, minor_version)
for stem_dir in stems:
for folder in sub_dirs:
candidates.append(os.path.join(stem_dir, folder, filename))
# test the filesystem to see if we can find any of these
for fullname in candidates:
if os.path.isfile(fullname):
# already exists, in location given
return (True, fullname)
# needs to be built, preferred location given first
return (False, candidates[0])
def _build_import_library_amd64():
out_exists, out_file = _check_for_import_lib()
if out_exists:
log.debug('Skip building import library: "%s" exists', out_file)
return
# get the runtime dll for which we are building import library
dll_file = find_python_dll()
log.info('Building import library (arch=AMD64): "%s" (from %s)' %
(out_file, dll_file))
# generate symbol list from this library
def_name = "python%d%d.def" % tuple(sys.version_info[:2])
def_file = os.path.join(sys.prefix, 'libs', def_name)
generate_def(dll_file, def_file)
# generate import library from this symbol list
cmd = ['dlltool', '-d', def_file, '-l', out_file]
subprocess.check_call(cmd)
def _build_import_library_x86():
""" Build the import libraries for Mingw32-gcc on Windows
"""
out_exists, out_file = _check_for_import_lib()
if out_exists:
log.debug('Skip building import library: "%s" exists', out_file)
return
lib_name = "python%d%d.lib" % tuple(sys.version_info[:2])
lib_file = os.path.join(sys.prefix, 'libs', lib_name)
if not os.path.isfile(lib_file):
# didn't find library file in virtualenv, try base distribution, too,
# and use that instead if found there. for Python 2.7 venvs, the base
# directory is in attribute real_prefix instead of base_prefix.
if hasattr(sys, 'base_prefix'):
base_lib = os.path.join(sys.base_prefix, 'libs', lib_name)
elif hasattr(sys, 'real_prefix'):
base_lib = os.path.join(sys.real_prefix, 'libs', lib_name)
else:
base_lib = '' # os.path.isfile('') == False
if os.path.isfile(base_lib):
lib_file = base_lib
else:
log.warn('Cannot build import library: "%s" not found', lib_file)
return
log.info('Building import library (ARCH=x86): "%s"', out_file)
from numpy.distutils import lib2def
def_name = "python%d%d.def" % tuple(sys.version_info[:2])
def_file = os.path.join(sys.prefix, 'libs', def_name)
nm_output = lib2def.getnm(
lib2def.DEFAULT_NM + [lib_file], shell=False)
dlist, flist = lib2def.parse_nm(nm_output)
with open(def_file, 'w') as fid:
lib2def.output_def(dlist, flist, lib2def.DEF_HEADER, fid)
dll_name = find_python_dll ()
cmd = ["dlltool",
"--dllname", dll_name,
"--def", def_file,
"--output-lib", out_file]
status = subprocess.check_output(cmd)
if status:
log.warn('Failed to build import library for gcc. Linking will fail.')
return
#=====================================
# Dealing with Visual Studio MANIFESTS
#=====================================
# Functions to deal with visual studio manifests. Manifest are a mechanism to
# enforce strong DLL versioning on windows, and has nothing to do with
# distutils MANIFEST. manifests are XML files with version info, and used by
# the OS loader; they are necessary when linking against a DLL not in the
# system path; in particular, official python 2.6 binary is built against the
# MS runtime 9 (the one from VS 2008), which is not available on most windows
# systems; python 2.6 installer does install it in the Win SxS (Side by side)
# directory, but this requires the manifest for this to work. This is a big
# mess, thanks MS for a wonderful system.
# XXX: ideally, we should use exactly the same version as used by python. I
# submitted a patch to get this version, but it was only included for python
# 2.6.1 and above. So for versions below, we use a "best guess".
_MSVCRVER_TO_FULLVER = {}
if sys.platform == 'win32':
try:
import msvcrt
# I took one version in my SxS directory: no idea if it is the good
# one, and we can't retrieve it from python
_MSVCRVER_TO_FULLVER['80'] = "8.0.50727.42"
_MSVCRVER_TO_FULLVER['90'] = "9.0.21022.8"
# Value from msvcrt.CRT_ASSEMBLY_VERSION under Python 3.3.0
# on Windows XP:
_MSVCRVER_TO_FULLVER['100'] = "10.0.30319.460"
crt_ver = getattr(msvcrt, 'CRT_ASSEMBLY_VERSION', None)
if crt_ver is not None: # Available at least back to Python 3.3
maj, min = re.match(r'(\d+)\.(\d)', crt_ver).groups()
_MSVCRVER_TO_FULLVER[maj + min] = crt_ver
del maj, min
del crt_ver
except ImportError:
# If we are here, means python was not built with MSVC. Not sure what
# to do in that case: manifest building will fail, but it should not be
# used in that case anyway
log.warn('Cannot import msvcrt: using manifest will not be possible')
def msvc_manifest_xml(maj, min):
"""Given a major and minor version of the MSVCR, returns the
corresponding XML file."""
try:
fullver = _MSVCRVER_TO_FULLVER[str(maj * 10 + min)]
except KeyError:
raise ValueError("Version %d,%d of MSVCRT not supported yet" %
(maj, min)) from None
# Don't be fooled, it looks like an XML, but it is not. In particular, it
# should not have any space before starting, and its size should be
# divisible by 4, most likely for alignment constraints when the xml is
# embedded in the binary...
# This template was copied directly from the python 2.6 binary (using
# strings.exe from mingw on python.exe).
template = textwrap.dedent("""\
<assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">
<trustInfo xmlns="urn:schemas-microsoft-com:asm.v3">
<security>
<requestedPrivileges>
<requestedExecutionLevel level="asInvoker" uiAccess="false"></requestedExecutionLevel>
</requestedPrivileges>
</security>
</trustInfo>
<dependency>
<dependentAssembly>
<assemblyIdentity type="win32" name="Microsoft.VC%(maj)d%(min)d.CRT" version="%(fullver)s" processorArchitecture="*" publicKeyToken="1fc8b3b9a1e18e3b"></assemblyIdentity>
</dependentAssembly>
</dependency>
</assembly>""")
return template % {'fullver': fullver, 'maj': maj, 'min': min}
def manifest_rc(name, type='dll'):
"""Return the rc file used to generate the res file which will be embedded
as manifest for given manifest file name, of given type ('dll' or
'exe').
Parameters
----------
name : str
name of the manifest file to embed
type : str {'dll', 'exe'}
type of the binary which will embed the manifest
"""
if type == 'dll':
rctype = 2
elif type == 'exe':
rctype = 1
else:
raise ValueError("Type %s not supported" % type)
return """\
#include "winuser.h"
%d RT_MANIFEST %s""" % (rctype, name)
def check_embedded_msvcr_match_linked(msver):
"""msver is the ms runtime version used for the MANIFEST."""
# check msvcr major version are the same for linking and
# embedding
maj = msvc_runtime_major()
if maj:
if not maj == int(msver):
raise ValueError(
"Discrepancy between linked msvcr " \
"(%d) and the one about to be embedded " \
"(%d)" % (int(msver), maj))
def configtest_name(config):
base = os.path.basename(config._gen_temp_sourcefile("yo", [], "c"))
return os.path.splitext(base)[0]
def manifest_name(config):
# Get configest name (including suffix)
root = configtest_name(config)
exext = config.compiler.exe_extension
return root + exext + ".manifest"
def rc_name(config):
# Get configtest name (including suffix)
root = configtest_name(config)
return root + ".rc"
def generate_manifest(config):
msver = get_build_msvc_version()
if msver is not None:
if msver >= 8:
check_embedded_msvcr_match_linked(msver)
ma_str, mi_str = str(msver).split('.')
# Write the manifest file
manxml = msvc_manifest_xml(int(ma_str), int(mi_str))
with open(manifest_name(config), "w") as man:
config.temp_files.append(manifest_name(config))
man.write(manxml)
| 22,284 | Python | 36.39094 | 184 | 0.593969 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/unixccompiler.py | """
unixccompiler - can handle very long argument lists for ar.
"""
import os
import sys
import subprocess
import shlex
from distutils.errors import CompileError, DistutilsExecError, LibError
from distutils.unixccompiler import UnixCCompiler
from numpy.distutils.ccompiler import replace_method
from numpy.distutils.misc_util import _commandline_dep_string
from numpy.distutils import log
# Note that UnixCCompiler._compile appeared in Python 2.3
def UnixCCompiler__compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts):
"""Compile a single source files with a Unix-style compiler."""
# HP ad-hoc fix, see ticket 1383
ccomp = self.compiler_so
if ccomp[0] == 'aCC':
# remove flags that will trigger ANSI-C mode for aCC
if '-Ae' in ccomp:
ccomp.remove('-Ae')
if '-Aa' in ccomp:
ccomp.remove('-Aa')
# add flags for (almost) sane C++ handling
ccomp += ['-AA']
self.compiler_so = ccomp
# ensure OPT environment variable is read
if 'OPT' in os.environ:
# XXX who uses this?
from sysconfig import get_config_vars
opt = shlex.join(shlex.split(os.environ['OPT']))
gcv_opt = shlex.join(shlex.split(get_config_vars('OPT')[0]))
ccomp_s = shlex.join(self.compiler_so)
if opt not in ccomp_s:
ccomp_s = ccomp_s.replace(gcv_opt, opt)
self.compiler_so = shlex.split(ccomp_s)
llink_s = shlex.join(self.linker_so)
if opt not in llink_s:
self.linker_so = self.linker_so + shlex.split(opt)
display = '%s: %s' % (os.path.basename(self.compiler_so[0]), src)
# gcc style automatic dependencies, outputs a makefile (-MF) that lists
# all headers needed by a c file as a side effect of compilation (-MMD)
if getattr(self, '_auto_depends', False):
deps = ['-MMD', '-MF', obj + '.d']
else:
deps = []
try:
self.spawn(self.compiler_so + cc_args + [src, '-o', obj] + deps +
extra_postargs, display = display)
except DistutilsExecError as e:
msg = str(e)
raise CompileError(msg) from None
# add commandline flags to dependency file
if deps:
# After running the compiler, the file created will be in EBCDIC
# but will not be tagged as such. This tags it so the file does not
# have multiple different encodings being written to it
if sys.platform == 'zos':
subprocess.check_output(['chtag', '-tc', 'IBM1047', obj + '.d'])
with open(obj + '.d', 'a') as f:
f.write(_commandline_dep_string(cc_args, extra_postargs, pp_opts))
replace_method(UnixCCompiler, '_compile', UnixCCompiler__compile)
def UnixCCompiler_create_static_lib(self, objects, output_libname,
output_dir=None, debug=0, target_lang=None):
"""
Build a static library in a separate sub-process.
Parameters
----------
objects : list or tuple of str
List of paths to object files used to build the static library.
output_libname : str
The library name as an absolute or relative (if `output_dir` is used)
path.
output_dir : str, optional
The path to the output directory. Default is None, in which case
the ``output_dir`` attribute of the UnixCCompiler instance.
debug : bool, optional
This parameter is not used.
target_lang : str, optional
This parameter is not used.
Returns
-------
None
"""
objects, output_dir = self._fix_object_args(objects, output_dir)
output_filename = \
self.library_filename(output_libname, output_dir=output_dir)
if self._need_link(objects, output_filename):
try:
# previous .a may be screwed up; best to remove it first
# and recreate.
# Also, ar on OS X doesn't handle updating universal archives
os.unlink(output_filename)
except OSError:
pass
self.mkpath(os.path.dirname(output_filename))
tmp_objects = objects + self.objects
while tmp_objects:
objects = tmp_objects[:50]
tmp_objects = tmp_objects[50:]
display = '%s: adding %d object files to %s' % (
os.path.basename(self.archiver[0]),
len(objects), output_filename)
self.spawn(self.archiver + [output_filename] + objects,
display = display)
# Not many Unices required ranlib anymore -- SunOS 4.x is, I
# think the only major Unix that does. Maybe we need some
# platform intelligence here to skip ranlib if it's not
# needed -- or maybe Python's configure script took care of
# it for us, hence the check for leading colon.
if self.ranlib:
display = '%s:@ %s' % (os.path.basename(self.ranlib[0]),
output_filename)
try:
self.spawn(self.ranlib + [output_filename],
display = display)
except DistutilsExecError as e:
msg = str(e)
raise LibError(msg) from None
else:
log.debug("skipping %s (up-to-date)", output_filename)
return
replace_method(UnixCCompiler, 'create_static_lib',
UnixCCompiler_create_static_lib)
| 5,426 | Python | 37.21831 | 82 | 0.60247 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/exec_command.py | """
exec_command
Implements exec_command function that is (almost) equivalent to
commands.getstatusoutput function but on NT, DOS systems the
returned status is actually correct (though, the returned status
values may be different by a factor). In addition, exec_command
takes keyword arguments for (re-)defining environment variables.
Provides functions:
exec_command --- execute command in a specified directory and
in the modified environment.
find_executable --- locate a command using info from environment
variable PATH. Equivalent to posix `which`
command.
Author: Pearu Peterson <[email protected]>
Created: 11 January 2003
Requires: Python 2.x
Successfully tested on:
======== ============ =================================================
os.name sys.platform comments
======== ============ =================================================
posix linux2 Debian (sid) Linux, Python 2.1.3+, 2.2.3+, 2.3.3
PyCrust 0.9.3, Idle 1.0.2
posix linux2 Red Hat 9 Linux, Python 2.1.3, 2.2.2, 2.3.2
posix sunos5 SunOS 5.9, Python 2.2, 2.3.2
posix darwin Darwin 7.2.0, Python 2.3
nt win32 Windows Me
Python 2.3(EE), Idle 1.0, PyCrust 0.7.2
Python 2.1.1 Idle 0.8
nt win32 Windows 98, Python 2.1.1. Idle 0.8
nt win32 Cygwin 98-4.10, Python 2.1.1(MSC) - echo tests
fail i.e. redefining environment variables may
not work. FIXED: don't use cygwin echo!
Comment: also `cmd /c echo` will not work
but redefining environment variables do work.
posix cygwin Cygwin 98-4.10, Python 2.3.3(cygming special)
nt win32 Windows XP, Python 2.3.3
======== ============ =================================================
Known bugs:
* Tests, that send messages to stderr, fail when executed from MSYS prompt
because the messages are lost at some point.
"""
__all__ = ['exec_command', 'find_executable']
import os
import sys
import subprocess
import locale
import warnings
from numpy.distutils.misc_util import is_sequence, make_temp_file
from numpy.distutils import log
def filepath_from_subprocess_output(output):
"""
Convert `bytes` in the encoding used by a subprocess into a filesystem-appropriate `str`.
Inherited from `exec_command`, and possibly incorrect.
"""
mylocale = locale.getpreferredencoding(False)
if mylocale is None:
mylocale = 'ascii'
output = output.decode(mylocale, errors='replace')
output = output.replace('\r\n', '\n')
# Another historical oddity
if output[-1:] == '\n':
output = output[:-1]
return output
def forward_bytes_to_stdout(val):
"""
Forward bytes from a subprocess call to the console, without attempting to
decode them.
The assumption is that the subprocess call already returned bytes in
a suitable encoding.
"""
if hasattr(sys.stdout, 'buffer'):
# use the underlying binary output if there is one
sys.stdout.buffer.write(val)
elif hasattr(sys.stdout, 'encoding'):
# round-trip the encoding if necessary
sys.stdout.write(val.decode(sys.stdout.encoding))
else:
# make a best-guess at the encoding
sys.stdout.write(val.decode('utf8', errors='replace'))
def temp_file_name():
# 2019-01-30, 1.17
warnings.warn('temp_file_name is deprecated since NumPy v1.17, use '
'tempfile.mkstemp instead', DeprecationWarning, stacklevel=1)
fo, name = make_temp_file()
fo.close()
return name
def get_pythonexe():
pythonexe = sys.executable
if os.name in ['nt', 'dos']:
fdir, fn = os.path.split(pythonexe)
fn = fn.upper().replace('PYTHONW', 'PYTHON')
pythonexe = os.path.join(fdir, fn)
assert os.path.isfile(pythonexe), '%r is not a file' % (pythonexe,)
return pythonexe
def find_executable(exe, path=None, _cache={}):
"""Return full path of a executable or None.
Symbolic links are not followed.
"""
key = exe, path
try:
return _cache[key]
except KeyError:
pass
log.debug('find_executable(%r)' % exe)
orig_exe = exe
if path is None:
path = os.environ.get('PATH', os.defpath)
if os.name=='posix':
realpath = os.path.realpath
else:
realpath = lambda a:a
if exe.startswith('"'):
exe = exe[1:-1]
suffixes = ['']
if os.name in ['nt', 'dos', 'os2']:
fn, ext = os.path.splitext(exe)
extra_suffixes = ['.exe', '.com', '.bat']
if ext.lower() not in extra_suffixes:
suffixes = extra_suffixes
if os.path.isabs(exe):
paths = ['']
else:
paths = [ os.path.abspath(p) for p in path.split(os.pathsep) ]
for path in paths:
fn = os.path.join(path, exe)
for s in suffixes:
f_ext = fn+s
if not os.path.islink(f_ext):
f_ext = realpath(f_ext)
if os.path.isfile(f_ext) and os.access(f_ext, os.X_OK):
log.info('Found executable %s' % f_ext)
_cache[key] = f_ext
return f_ext
log.warn('Could not locate executable %s' % orig_exe)
return None
############################################################
def _preserve_environment( names ):
log.debug('_preserve_environment(%r)' % (names))
env = {name: os.environ.get(name) for name in names}
return env
def _update_environment( **env ):
log.debug('_update_environment(...)')
for name, value in env.items():
os.environ[name] = value or ''
def exec_command(command, execute_in='', use_shell=None, use_tee=None,
_with_python = 1, **env ):
"""
Return (status,output) of executed command.
.. deprecated:: 1.17
Use subprocess.Popen instead
Parameters
----------
command : str
A concatenated string of executable and arguments.
execute_in : str
Before running command ``cd execute_in`` and after ``cd -``.
use_shell : {bool, None}, optional
If True, execute ``sh -c command``. Default None (True)
use_tee : {bool, None}, optional
If True use tee. Default None (True)
Returns
-------
res : str
Both stdout and stderr messages.
Notes
-----
On NT, DOS systems the returned status is correct for external commands.
Wild cards will not work for non-posix systems or when use_shell=0.
"""
# 2019-01-30, 1.17
warnings.warn('exec_command is deprecated since NumPy v1.17, use '
'subprocess.Popen instead', DeprecationWarning, stacklevel=1)
log.debug('exec_command(%r,%s)' % (command,
','.join(['%s=%r'%kv for kv in env.items()])))
if use_tee is None:
use_tee = os.name=='posix'
if use_shell is None:
use_shell = os.name=='posix'
execute_in = os.path.abspath(execute_in)
oldcwd = os.path.abspath(os.getcwd())
if __name__[-12:] == 'exec_command':
exec_dir = os.path.dirname(os.path.abspath(__file__))
elif os.path.isfile('exec_command.py'):
exec_dir = os.path.abspath('.')
else:
exec_dir = os.path.abspath(sys.argv[0])
if os.path.isfile(exec_dir):
exec_dir = os.path.dirname(exec_dir)
if oldcwd!=execute_in:
os.chdir(execute_in)
log.debug('New cwd: %s' % execute_in)
else:
log.debug('Retaining cwd: %s' % oldcwd)
oldenv = _preserve_environment( list(env.keys()) )
_update_environment( **env )
try:
st = _exec_command(command,
use_shell=use_shell,
use_tee=use_tee,
**env)
finally:
if oldcwd!=execute_in:
os.chdir(oldcwd)
log.debug('Restored cwd to %s' % oldcwd)
_update_environment(**oldenv)
return st
def _exec_command(command, use_shell=None, use_tee = None, **env):
"""
Internal workhorse for exec_command().
"""
if use_shell is None:
use_shell = os.name=='posix'
if use_tee is None:
use_tee = os.name=='posix'
if os.name == 'posix' and use_shell:
# On POSIX, subprocess always uses /bin/sh, override
sh = os.environ.get('SHELL', '/bin/sh')
if is_sequence(command):
command = [sh, '-c', ' '.join(command)]
else:
command = [sh, '-c', command]
use_shell = False
elif os.name == 'nt' and is_sequence(command):
# On Windows, join the string for CreateProcess() ourselves as
# subprocess does it a bit differently
command = ' '.join(_quote_arg(arg) for arg in command)
# Inherit environment by default
env = env or None
try:
# universal_newlines is set to False so that communicate()
# will return bytes. We need to decode the output ourselves
# so that Python will not raise a UnicodeDecodeError when
# it encounters an invalid character; rather, we simply replace it
proc = subprocess.Popen(command, shell=use_shell, env=env,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=False)
except OSError:
# Return 127, as os.spawn*() and /bin/sh do
return 127, ''
text, err = proc.communicate()
mylocale = locale.getpreferredencoding(False)
if mylocale is None:
mylocale = 'ascii'
text = text.decode(mylocale, errors='replace')
text = text.replace('\r\n', '\n')
# Another historical oddity
if text[-1:] == '\n':
text = text[:-1]
if use_tee and text:
print(text)
return proc.returncode, text
def _quote_arg(arg):
"""
Quote the argument for safe use in a shell command line.
"""
# If there is a quote in the string, assume relevants parts of the
# string are already quoted (e.g. '-I"C:\\Program Files\\..."')
if '"' not in arg and ' ' in arg:
return '"%s"' % arg
return arg
############################################################
| 10,343 | Python | 31.630915 | 93 | 0.571788 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/pathccompiler.py | from distutils.unixccompiler import UnixCCompiler
class PathScaleCCompiler(UnixCCompiler):
"""
PathScale compiler compatible with an gcc built Python.
"""
compiler_type = 'pathcc'
cc_exe = 'pathcc'
cxx_exe = 'pathCC'
def __init__ (self, verbose=0, dry_run=0, force=0):
UnixCCompiler.__init__ (self, verbose, dry_run, force)
cc_compiler = self.cc_exe
cxx_compiler = self.cxx_exe
self.set_executables(compiler=cc_compiler,
compiler_so=cc_compiler,
compiler_cxx=cxx_compiler,
linker_exe=cc_compiler,
linker_so=cc_compiler + ' -shared')
| 713 | Python | 31.454544 | 64 | 0.562412 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/log.py | # Colored log
import sys
from distutils.log import * # noqa: F403
from distutils.log import Log as old_Log
from distutils.log import _global_log
from numpy.distutils.misc_util import (red_text, default_text, cyan_text,
green_text, is_sequence, is_string)
def _fix_args(args,flag=1):
if is_string(args):
return args.replace('%', '%%')
if flag and is_sequence(args):
return tuple([_fix_args(a, flag=0) for a in args])
return args
class Log(old_Log):
def _log(self, level, msg, args):
if level >= self.threshold:
if args:
msg = msg % _fix_args(args)
if 0:
if msg.startswith('copying ') and msg.find(' -> ') != -1:
return
if msg.startswith('byte-compiling '):
return
print(_global_color_map[level](msg))
sys.stdout.flush()
def good(self, msg, *args):
"""
If we log WARN messages, log this message as a 'nice' anti-warn
message.
"""
if WARN >= self.threshold:
if args:
print(green_text(msg % _fix_args(args)))
else:
print(green_text(msg))
sys.stdout.flush()
_global_log.__class__ = Log
good = _global_log.good
def set_threshold(level, force=False):
prev_level = _global_log.threshold
if prev_level > DEBUG or force:
# If we're running at DEBUG, don't change the threshold, as there's
# likely a good reason why we're running at this level.
_global_log.threshold = level
if level <= DEBUG:
info('set_threshold: setting threshold to DEBUG level,'
' it can be changed only with force argument')
else:
info('set_threshold: not changing threshold from DEBUG level'
' %s to %s' % (prev_level, level))
return prev_level
def get_threshold():
return _global_log.threshold
def set_verbosity(v, force=False):
prev_level = _global_log.threshold
if v < 0:
set_threshold(ERROR, force)
elif v == 0:
set_threshold(WARN, force)
elif v == 1:
set_threshold(INFO, force)
elif v >= 2:
set_threshold(DEBUG, force)
return {FATAL:-2,ERROR:-1,WARN:0,INFO:1,DEBUG:2}.get(prev_level, 1)
_global_color_map = {
DEBUG:cyan_text,
INFO:default_text,
WARN:red_text,
ERROR:red_text,
FATAL:red_text
}
# don't use INFO,.. flags in set_verbosity, these flags are for set_threshold.
set_verbosity(0, force=True)
_error = error
_warn = warn
_info = info
_debug = debug
def error(msg, *a, **kw):
_error(f"ERROR: {msg}", *a, **kw)
def warn(msg, *a, **kw):
_warn(f"WARN: {msg}", *a, **kw)
def info(msg, *a, **kw):
_info(f"INFO: {msg}", *a, **kw)
def debug(msg, *a, **kw):
_debug(f"DEBUG: {msg}", *a, **kw)
| 2,879 | Python | 24.714285 | 78 | 0.5686 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/npy_pkg_config.py | import sys
import re
import os
from configparser import RawConfigParser
__all__ = ['FormatError', 'PkgNotFound', 'LibraryInfo', 'VariableSet',
'read_config', 'parse_flags']
_VAR = re.compile(r'\$\{([a-zA-Z0-9_-]+)\}')
class FormatError(OSError):
"""
Exception thrown when there is a problem parsing a configuration file.
"""
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
class PkgNotFound(OSError):
"""Exception raised when a package can not be located."""
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
def parse_flags(line):
"""
Parse a line from a config file containing compile flags.
Parameters
----------
line : str
A single line containing one or more compile flags.
Returns
-------
d : dict
Dictionary of parsed flags, split into relevant categories.
These categories are the keys of `d`:
* 'include_dirs'
* 'library_dirs'
* 'libraries'
* 'macros'
* 'ignored'
"""
d = {'include_dirs': [], 'library_dirs': [], 'libraries': [],
'macros': [], 'ignored': []}
flags = (' ' + line).split(' -')
for flag in flags:
flag = '-' + flag
if len(flag) > 0:
if flag.startswith('-I'):
d['include_dirs'].append(flag[2:].strip())
elif flag.startswith('-L'):
d['library_dirs'].append(flag[2:].strip())
elif flag.startswith('-l'):
d['libraries'].append(flag[2:].strip())
elif flag.startswith('-D'):
d['macros'].append(flag[2:].strip())
else:
d['ignored'].append(flag)
return d
def _escape_backslash(val):
return val.replace('\\', '\\\\')
class LibraryInfo:
"""
Object containing build information about a library.
Parameters
----------
name : str
The library name.
description : str
Description of the library.
version : str
Version string.
sections : dict
The sections of the configuration file for the library. The keys are
the section headers, the values the text under each header.
vars : class instance
A `VariableSet` instance, which contains ``(name, value)`` pairs for
variables defined in the configuration file for the library.
requires : sequence, optional
The required libraries for the library to be installed.
Notes
-----
All input parameters (except "sections" which is a method) are available as
attributes of the same name.
"""
def __init__(self, name, description, version, sections, vars, requires=None):
self.name = name
self.description = description
if requires:
self.requires = requires
else:
self.requires = []
self.version = version
self._sections = sections
self.vars = vars
def sections(self):
"""
Return the section headers of the config file.
Parameters
----------
None
Returns
-------
keys : list of str
The list of section headers.
"""
return list(self._sections.keys())
def cflags(self, section="default"):
val = self.vars.interpolate(self._sections[section]['cflags'])
return _escape_backslash(val)
def libs(self, section="default"):
val = self.vars.interpolate(self._sections[section]['libs'])
return _escape_backslash(val)
def __str__(self):
m = ['Name: %s' % self.name, 'Description: %s' % self.description]
if self.requires:
m.append('Requires:')
else:
m.append('Requires: %s' % ",".join(self.requires))
m.append('Version: %s' % self.version)
return "\n".join(m)
class VariableSet:
"""
Container object for the variables defined in a config file.
`VariableSet` can be used as a plain dictionary, with the variable names
as keys.
Parameters
----------
d : dict
Dict of items in the "variables" section of the configuration file.
"""
def __init__(self, d):
self._raw_data = dict([(k, v) for k, v in d.items()])
self._re = {}
self._re_sub = {}
self._init_parse()
def _init_parse(self):
for k, v in self._raw_data.items():
self._init_parse_var(k, v)
def _init_parse_var(self, name, value):
self._re[name] = re.compile(r'\$\{%s\}' % name)
self._re_sub[name] = value
def interpolate(self, value):
# Brute force: we keep interpolating until there is no '${var}' anymore
# or until interpolated string is equal to input string
def _interpolate(value):
for k in self._re.keys():
value = self._re[k].sub(self._re_sub[k], value)
return value
while _VAR.search(value):
nvalue = _interpolate(value)
if nvalue == value:
break
value = nvalue
return value
def variables(self):
"""
Return the list of variable names.
Parameters
----------
None
Returns
-------
names : list of str
The names of all variables in the `VariableSet` instance.
"""
return list(self._raw_data.keys())
# Emulate a dict to set/get variables values
def __getitem__(self, name):
return self._raw_data[name]
def __setitem__(self, name, value):
self._raw_data[name] = value
self._init_parse_var(name, value)
def parse_meta(config):
if not config.has_section('meta'):
raise FormatError("No meta section found !")
d = dict(config.items('meta'))
for k in ['name', 'description', 'version']:
if not k in d:
raise FormatError("Option %s (section [meta]) is mandatory, "
"but not found" % k)
if not 'requires' in d:
d['requires'] = []
return d
def parse_variables(config):
if not config.has_section('variables'):
raise FormatError("No variables section found !")
d = {}
for name, value in config.items("variables"):
d[name] = value
return VariableSet(d)
def parse_sections(config):
return meta_d, r
def pkg_to_filename(pkg_name):
return "%s.ini" % pkg_name
def parse_config(filename, dirs=None):
if dirs:
filenames = [os.path.join(d, filename) for d in dirs]
else:
filenames = [filename]
config = RawConfigParser()
n = config.read(filenames)
if not len(n) >= 1:
raise PkgNotFound("Could not find file(s) %s" % str(filenames))
# Parse meta and variables sections
meta = parse_meta(config)
vars = {}
if config.has_section('variables'):
for name, value in config.items("variables"):
vars[name] = _escape_backslash(value)
# Parse "normal" sections
secs = [s for s in config.sections() if not s in ['meta', 'variables']]
sections = {}
requires = {}
for s in secs:
d = {}
if config.has_option(s, "requires"):
requires[s] = config.get(s, 'requires')
for name, value in config.items(s):
d[name] = value
sections[s] = d
return meta, vars, sections, requires
def _read_config_imp(filenames, dirs=None):
def _read_config(f):
meta, vars, sections, reqs = parse_config(f, dirs)
# recursively add sections and variables of required libraries
for rname, rvalue in reqs.items():
nmeta, nvars, nsections, nreqs = _read_config(pkg_to_filename(rvalue))
# Update var dict for variables not in 'top' config file
for k, v in nvars.items():
if not k in vars:
vars[k] = v
# Update sec dict
for oname, ovalue in nsections[rname].items():
if ovalue:
sections[rname][oname] += ' %s' % ovalue
return meta, vars, sections, reqs
meta, vars, sections, reqs = _read_config(filenames)
# FIXME: document this. If pkgname is defined in the variables section, and
# there is no pkgdir variable defined, pkgdir is automatically defined to
# the path of pkgname. This requires the package to be imported to work
if not 'pkgdir' in vars and "pkgname" in vars:
pkgname = vars["pkgname"]
if not pkgname in sys.modules:
raise ValueError("You should import %s to get information on %s" %
(pkgname, meta["name"]))
mod = sys.modules[pkgname]
vars["pkgdir"] = _escape_backslash(os.path.dirname(mod.__file__))
return LibraryInfo(name=meta["name"], description=meta["description"],
version=meta["version"], sections=sections, vars=VariableSet(vars))
# Trivial cache to cache LibraryInfo instances creation. To be really
# efficient, the cache should be handled in read_config, since a same file can
# be parsed many time outside LibraryInfo creation, but I doubt this will be a
# problem in practice
_CACHE = {}
def read_config(pkgname, dirs=None):
"""
Return library info for a package from its configuration file.
Parameters
----------
pkgname : str
Name of the package (should match the name of the .ini file, without
the extension, e.g. foo for the file foo.ini).
dirs : sequence, optional
If given, should be a sequence of directories - usually including
the NumPy base directory - where to look for npy-pkg-config files.
Returns
-------
pkginfo : class instance
The `LibraryInfo` instance containing the build information.
Raises
------
PkgNotFound
If the package is not found.
See Also
--------
misc_util.get_info, misc_util.get_pkg_info
Examples
--------
>>> npymath_info = np.distutils.npy_pkg_config.read_config('npymath')
>>> type(npymath_info)
<class 'numpy.distutils.npy_pkg_config.LibraryInfo'>
>>> print(npymath_info)
Name: npymath
Description: Portable, core math library implementing C99 standard
Requires:
Version: 0.1 #random
"""
try:
return _CACHE[pkgname]
except KeyError:
v = _read_config_imp(pkg_to_filename(pkgname), dirs)
_CACHE[pkgname] = v
return v
# TODO:
# - implements version comparison (modversion + atleast)
# pkg-config simple emulator - useful for debugging, and maybe later to query
# the system
if __name__ == '__main__':
from optparse import OptionParser
import glob
parser = OptionParser()
parser.add_option("--cflags", dest="cflags", action="store_true",
help="output all preprocessor and compiler flags")
parser.add_option("--libs", dest="libs", action="store_true",
help="output all linker flags")
parser.add_option("--use-section", dest="section",
help="use this section instead of default for options")
parser.add_option("--version", dest="version", action="store_true",
help="output version")
parser.add_option("--atleast-version", dest="min_version",
help="Minimal version")
parser.add_option("--list-all", dest="list_all", action="store_true",
help="Minimal version")
parser.add_option("--define-variable", dest="define_variable",
help="Replace variable with the given value")
(options, args) = parser.parse_args(sys.argv)
if len(args) < 2:
raise ValueError("Expect package name on the command line:")
if options.list_all:
files = glob.glob("*.ini")
for f in files:
info = read_config(f)
print("%s\t%s - %s" % (info.name, info.name, info.description))
pkg_name = args[1]
d = os.environ.get('NPY_PKG_CONFIG_PATH')
if d:
info = read_config(pkg_name, ['numpy/core/lib/npy-pkg-config', '.', d])
else:
info = read_config(pkg_name, ['numpy/core/lib/npy-pkg-config', '.'])
if options.section:
section = options.section
else:
section = "default"
if options.define_variable:
m = re.search(r'([\S]+)=([\S]+)', options.define_variable)
if not m:
raise ValueError("--define-variable option should be of "
"the form --define-variable=foo=bar")
else:
name = m.group(1)
value = m.group(2)
info.vars[name] = value
if options.cflags:
print(info.cflags(section))
if options.libs:
print(info.libs(section))
if options.version:
print(info.version)
if options.min_version:
print(info.version >= options.min_version)
| 12,972 | Python | 28.618721 | 82 | 0.577629 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/numpy_distribution.py | # XXX: Handle setuptools ?
from distutils.core import Distribution
# This class is used because we add new files (sconscripts, and so on) with the
# scons command
class NumpyDistribution(Distribution):
def __init__(self, attrs = None):
# A list of (sconscripts, pre_hook, post_hook, src, parent_names)
self.scons_data = []
# A list of installable libraries
self.installed_libraries = []
# A dict of pkg_config files to generate/install
self.installed_pkg_config = {}
Distribution.__init__(self, attrs)
def has_scons_scripts(self):
return bool(self.scons_data)
| 634 | Python | 34.277776 | 79 | 0.660883 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/system_info.py | #!/usr/bin/env python3
"""
This file defines a set of system_info classes for getting
information about various resources (libraries, library directories,
include directories, etc.) in the system. Usage:
info_dict = get_info(<name>)
where <name> is a string 'atlas','x11','fftw','lapack','blas',
'lapack_src', 'blas_src', etc. For a complete list of allowed names,
see the definition of get_info() function below.
Returned info_dict is a dictionary which is compatible with
distutils.setup keyword arguments. If info_dict == {}, then the
asked resource is not available (system_info could not find it).
Several *_info classes specify an environment variable to specify
the locations of software. When setting the corresponding environment
variable to 'None' then the software will be ignored, even when it
is available in system.
Global parameters:
system_info.search_static_first - search static libraries (.a)
in precedence to shared ones (.so, .sl) if enabled.
system_info.verbosity - output the results to stdout if enabled.
The file 'site.cfg' is looked for in
1) Directory of main setup.py file being run.
2) Home directory of user running the setup.py file as ~/.numpy-site.cfg
3) System wide directory (location of this file...)
The first one found is used to get system configuration options The
format is that used by ConfigParser (i.e., Windows .INI style). The
section ALL is not intended for general use.
Appropriate defaults are used if nothing is specified.
The order of finding the locations of resources is the following:
1. environment variable
2. section in site.cfg
3. DEFAULT section in site.cfg
4. System default search paths (see ``default_*`` variables below).
Only the first complete match is returned.
Currently, the following classes are available, along with their section names:
Numeric_info:Numeric
_numpy_info:Numeric
_pkg_config_info:None
accelerate_info:accelerate
agg2_info:agg2
amd_info:amd
atlas_3_10_blas_info:atlas
atlas_3_10_blas_threads_info:atlas
atlas_3_10_info:atlas
atlas_3_10_threads_info:atlas
atlas_blas_info:atlas
atlas_blas_threads_info:atlas
atlas_info:atlas
atlas_threads_info:atlas
blas64__opt_info:ALL # usage recommended (general ILP64 BLAS, 64_ symbol suffix)
blas_ilp64_opt_info:ALL # usage recommended (general ILP64 BLAS)
blas_ilp64_plain_opt_info:ALL # usage recommended (general ILP64 BLAS, no symbol suffix)
blas_info:blas
blas_mkl_info:mkl
blas_opt_info:ALL # usage recommended
blas_src_info:blas_src
blis_info:blis
boost_python_info:boost_python
dfftw_info:fftw
dfftw_threads_info:fftw
djbfft_info:djbfft
f2py_info:ALL
fft_opt_info:ALL
fftw2_info:fftw
fftw3_info:fftw3
fftw_info:fftw
fftw_threads_info:fftw
flame_info:flame
freetype2_info:freetype2
gdk_2_info:gdk_2
gdk_info:gdk
gdk_pixbuf_2_info:gdk_pixbuf_2
gdk_pixbuf_xlib_2_info:gdk_pixbuf_xlib_2
gdk_x11_2_info:gdk_x11_2
gtkp_2_info:gtkp_2
gtkp_x11_2_info:gtkp_x11_2
lapack64__opt_info:ALL # usage recommended (general ILP64 LAPACK, 64_ symbol suffix)
lapack_atlas_3_10_info:atlas
lapack_atlas_3_10_threads_info:atlas
lapack_atlas_info:atlas
lapack_atlas_threads_info:atlas
lapack_ilp64_opt_info:ALL # usage recommended (general ILP64 LAPACK)
lapack_ilp64_plain_opt_info:ALL # usage recommended (general ILP64 LAPACK, no symbol suffix)
lapack_info:lapack
lapack_mkl_info:mkl
lapack_opt_info:ALL # usage recommended
lapack_src_info:lapack_src
mkl_info:mkl
numarray_info:numarray
numerix_info:numerix
numpy_info:numpy
openblas64__info:openblas64_
openblas64__lapack_info:openblas64_
openblas_clapack_info:openblas
openblas_ilp64_info:openblas_ilp64
openblas_ilp64_lapack_info:openblas_ilp64
openblas_info:openblas
openblas_lapack_info:openblas
sfftw_info:fftw
sfftw_threads_info:fftw
system_info:ALL
umfpack_info:umfpack
wx_info:wx
x11_info:x11
xft_info:xft
Note that blas_opt_info and lapack_opt_info honor the NPY_BLAS_ORDER
and NPY_LAPACK_ORDER environment variables to determine the order in which
specific BLAS and LAPACK libraries are searched for.
This search (or autodetection) can be bypassed by defining the environment
variables NPY_BLAS_LIBS and NPY_LAPACK_LIBS, which should then contain the
exact linker flags to use (language will be set to F77). Building against
Netlib BLAS/LAPACK or stub files, in order to be able to switch BLAS and LAPACK
implementations at runtime. If using this to build NumPy itself, it is
recommended to also define NPY_CBLAS_LIBS (assuming your BLAS library has a
CBLAS interface) to enable CBLAS usage for matrix multiplication (unoptimized
otherwise).
Example:
----------
[DEFAULT]
# default section
library_dirs = /usr/lib:/usr/local/lib:/opt/lib
include_dirs = /usr/include:/usr/local/include:/opt/include
src_dirs = /usr/local/src:/opt/src
# search static libraries (.a) in preference to shared ones (.so)
search_static_first = 0
[fftw]
libraries = rfftw, fftw
[atlas]
library_dirs = /usr/lib/3dnow:/usr/lib/3dnow/atlas
# for overriding the names of the atlas libraries
libraries = lapack, f77blas, cblas, atlas
[x11]
library_dirs = /usr/X11R6/lib
include_dirs = /usr/X11R6/include
----------
Note that the ``libraries`` key is the default setting for libraries.
Authors:
Pearu Peterson <[email protected]>, February 2002
David M. Cooke <[email protected]>, April 2002
Copyright 2002 Pearu Peterson all rights reserved,
Pearu Peterson <[email protected]>
Permission to use, modify, and distribute this software is given under the
terms of the NumPy (BSD style) license. See LICENSE.txt that came with
this distribution for specifics.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
"""
import sys
import os
import re
import copy
import warnings
import subprocess
import textwrap
from glob import glob
from functools import reduce
from configparser import NoOptionError
from configparser import RawConfigParser as ConfigParser
# It seems that some people are importing ConfigParser from here so is
# good to keep its class name. Use of RawConfigParser is needed in
# order to be able to load path names with percent in them, like
# `feature%2Fcool` which is common on git flow branch names.
from distutils.errors import DistutilsError
from distutils.dist import Distribution
import sysconfig
from numpy.distutils import log
from distutils.util import get_platform
from numpy.distutils.exec_command import (
find_executable, filepath_from_subprocess_output,
)
from numpy.distutils.misc_util import (is_sequence, is_string,
get_shared_lib_extension)
from numpy.distutils.command.config import config as cmd_config
from numpy.distutils import customized_ccompiler as _customized_ccompiler
from numpy.distutils import _shell_utils
import distutils.ccompiler
import tempfile
import shutil
__all__ = ['system_info']
# Determine number of bits
import platform
_bits = {'32bit': 32, '64bit': 64}
platform_bits = _bits[platform.architecture()[0]]
global_compiler = None
def customized_ccompiler():
global global_compiler
if not global_compiler:
global_compiler = _customized_ccompiler()
return global_compiler
def _c_string_literal(s):
"""
Convert a python string into a literal suitable for inclusion into C code
"""
# only these three characters are forbidden in C strings
s = s.replace('\\', r'\\')
s = s.replace('"', r'\"')
s = s.replace('\n', r'\n')
return '"{}"'.format(s)
def libpaths(paths, bits):
"""Return a list of library paths valid on 32 or 64 bit systems.
Inputs:
paths : sequence
A sequence of strings (typically paths)
bits : int
An integer, the only valid values are 32 or 64. A ValueError exception
is raised otherwise.
Examples:
Consider a list of directories
>>> paths = ['/usr/X11R6/lib','/usr/X11/lib','/usr/lib']
For a 32-bit platform, this is already valid:
>>> np.distutils.system_info.libpaths(paths,32)
['/usr/X11R6/lib', '/usr/X11/lib', '/usr/lib']
On 64 bits, we prepend the '64' postfix
>>> np.distutils.system_info.libpaths(paths,64)
['/usr/X11R6/lib64', '/usr/X11R6/lib', '/usr/X11/lib64', '/usr/X11/lib',
'/usr/lib64', '/usr/lib']
"""
if bits not in (32, 64):
raise ValueError("Invalid bit size in libpaths: 32 or 64 only")
# Handle 32bit case
if bits == 32:
return paths
# Handle 64bit case
out = []
for p in paths:
out.extend([p + '64', p])
return out
if sys.platform == 'win32':
default_lib_dirs = ['C:\\',
os.path.join(sysconfig.get_config_var('exec_prefix'),
'libs')]
default_runtime_dirs = []
default_include_dirs = []
default_src_dirs = ['.']
default_x11_lib_dirs = []
default_x11_include_dirs = []
_include_dirs = [
'include',
'include/suitesparse',
]
_lib_dirs = [
'lib',
]
_include_dirs = [d.replace('/', os.sep) for d in _include_dirs]
_lib_dirs = [d.replace('/', os.sep) for d in _lib_dirs]
def add_system_root(library_root):
"""Add a package manager root to the include directories"""
global default_lib_dirs
global default_include_dirs
library_root = os.path.normpath(library_root)
default_lib_dirs.extend(
os.path.join(library_root, d) for d in _lib_dirs)
default_include_dirs.extend(
os.path.join(library_root, d) for d in _include_dirs)
# VCpkg is the de-facto package manager on windows for C/C++
# libraries. If it is on the PATH, then we append its paths here.
vcpkg = shutil.which('vcpkg')
if vcpkg:
vcpkg_dir = os.path.dirname(vcpkg)
if platform.architecture()[0] == '32bit':
specifier = 'x86'
else:
specifier = 'x64'
vcpkg_installed = os.path.join(vcpkg_dir, 'installed')
for vcpkg_root in [
os.path.join(vcpkg_installed, specifier + '-windows'),
os.path.join(vcpkg_installed, specifier + '-windows-static'),
]:
add_system_root(vcpkg_root)
# Conda is another popular package manager that provides libraries
conda = shutil.which('conda')
if conda:
conda_dir = os.path.dirname(conda)
add_system_root(os.path.join(conda_dir, '..', 'Library'))
add_system_root(os.path.join(conda_dir, 'Library'))
else:
default_lib_dirs = libpaths(['/usr/local/lib', '/opt/lib', '/usr/lib',
'/opt/local/lib', '/sw/lib'], platform_bits)
default_runtime_dirs = []
default_include_dirs = ['/usr/local/include',
'/opt/include',
# path of umfpack under macports
'/opt/local/include/ufsparse',
'/opt/local/include', '/sw/include',
'/usr/include/suitesparse']
default_src_dirs = ['.', '/usr/local/src', '/opt/src', '/sw/src']
default_x11_lib_dirs = libpaths(['/usr/X11R6/lib', '/usr/X11/lib',
'/usr/lib'], platform_bits)
default_x11_include_dirs = ['/usr/X11R6/include', '/usr/X11/include']
if os.path.exists('/usr/lib/X11'):
globbed_x11_dir = glob('/usr/lib/*/libX11.so')
if globbed_x11_dir:
x11_so_dir = os.path.split(globbed_x11_dir[0])[0]
default_x11_lib_dirs.extend([x11_so_dir, '/usr/lib/X11'])
default_x11_include_dirs.extend(['/usr/lib/X11/include',
'/usr/include/X11'])
with open(os.devnull, 'w') as tmp:
try:
p = subprocess.Popen(["gcc", "-print-multiarch"], stdout=subprocess.PIPE,
stderr=tmp)
except (OSError, DistutilsError):
# OSError if gcc is not installed, or SandboxViolation (DistutilsError
# subclass) if an old setuptools bug is triggered (see gh-3160).
pass
else:
triplet = str(p.communicate()[0].decode().strip())
if p.returncode == 0:
# gcc supports the "-print-multiarch" option
default_x11_lib_dirs += [os.path.join("/usr/lib/", triplet)]
default_lib_dirs += [os.path.join("/usr/lib/", triplet)]
if os.path.join(sys.prefix, 'lib') not in default_lib_dirs:
default_lib_dirs.insert(0, os.path.join(sys.prefix, 'lib'))
default_include_dirs.append(os.path.join(sys.prefix, 'include'))
default_src_dirs.append(os.path.join(sys.prefix, 'src'))
default_lib_dirs = [_m for _m in default_lib_dirs if os.path.isdir(_m)]
default_runtime_dirs = [_m for _m in default_runtime_dirs if os.path.isdir(_m)]
default_include_dirs = [_m for _m in default_include_dirs if os.path.isdir(_m)]
default_src_dirs = [_m for _m in default_src_dirs if os.path.isdir(_m)]
so_ext = get_shared_lib_extension()
def get_standard_file(fname):
"""Returns a list of files named 'fname' from
1) System-wide directory (directory-location of this module)
2) Users HOME directory (os.environ['HOME'])
3) Local directory
"""
# System-wide file
filenames = []
try:
f = __file__
except NameError:
f = sys.argv[0]
sysfile = os.path.join(os.path.split(os.path.abspath(f))[0],
fname)
if os.path.isfile(sysfile):
filenames.append(sysfile)
# Home directory
# And look for the user config file
try:
f = os.path.expanduser('~')
except KeyError:
pass
else:
user_file = os.path.join(f, fname)
if os.path.isfile(user_file):
filenames.append(user_file)
# Local file
if os.path.isfile(fname):
filenames.append(os.path.abspath(fname))
return filenames
def _parse_env_order(base_order, env):
""" Parse an environment variable `env` by splitting with "," and only returning elements from `base_order`
This method will sequence the environment variable and check for their
individual elements in `base_order`.
The items in the environment variable may be negated via '^item' or '!itema,itemb'.
It must start with ^/! to negate all options.
Raises
------
ValueError: for mixed negated and non-negated orders or multiple negated orders
Parameters
----------
base_order : list of str
the base list of orders
env : str
the environment variable to be parsed, if none is found, `base_order` is returned
Returns
-------
allow_order : list of str
allowed orders in lower-case
unknown_order : list of str
for values not overlapping with `base_order`
"""
order_str = os.environ.get(env, None)
# ensure all base-orders are lower-case (for easier comparison)
base_order = [order.lower() for order in base_order]
if order_str is None:
return base_order, []
neg = order_str.startswith('^') or order_str.startswith('!')
# Check format
order_str_l = list(order_str)
sum_neg = order_str_l.count('^') + order_str_l.count('!')
if neg:
if sum_neg > 1:
raise ValueError(f"Environment variable '{env}' may only contain a single (prefixed) negation: {order_str}")
# remove prefix
order_str = order_str[1:]
elif sum_neg > 0:
raise ValueError(f"Environment variable '{env}' may not mix negated an non-negated items: {order_str}")
# Split and lower case
orders = order_str.lower().split(',')
# to inform callee about non-overlapping elements
unknown_order = []
# if negated, we have to remove from the order
if neg:
allow_order = base_order.copy()
for order in orders:
if not order:
continue
if order not in base_order:
unknown_order.append(order)
continue
if order in allow_order:
allow_order.remove(order)
else:
allow_order = []
for order in orders:
if not order:
continue
if order not in base_order:
unknown_order.append(order)
continue
if order not in allow_order:
allow_order.append(order)
return allow_order, unknown_order
def get_info(name, notfound_action=0):
"""
notfound_action:
0 - do nothing
1 - display warning message
2 - raise error
"""
cl = {'armpl': armpl_info,
'blas_armpl': blas_armpl_info,
'lapack_armpl': lapack_armpl_info,
'fftw3_armpl': fftw3_armpl_info,
'atlas': atlas_info, # use lapack_opt or blas_opt instead
'atlas_threads': atlas_threads_info, # ditto
'atlas_blas': atlas_blas_info,
'atlas_blas_threads': atlas_blas_threads_info,
'lapack_atlas': lapack_atlas_info, # use lapack_opt instead
'lapack_atlas_threads': lapack_atlas_threads_info, # ditto
'atlas_3_10': atlas_3_10_info, # use lapack_opt or blas_opt instead
'atlas_3_10_threads': atlas_3_10_threads_info, # ditto
'atlas_3_10_blas': atlas_3_10_blas_info,
'atlas_3_10_blas_threads': atlas_3_10_blas_threads_info,
'lapack_atlas_3_10': lapack_atlas_3_10_info, # use lapack_opt instead
'lapack_atlas_3_10_threads': lapack_atlas_3_10_threads_info, # ditto
'flame': flame_info, # use lapack_opt instead
'mkl': mkl_info,
# openblas which may or may not have embedded lapack
'openblas': openblas_info, # use blas_opt instead
# openblas with embedded lapack
'openblas_lapack': openblas_lapack_info, # use blas_opt instead
'openblas_clapack': openblas_clapack_info, # use blas_opt instead
'blis': blis_info, # use blas_opt instead
'lapack_mkl': lapack_mkl_info, # use lapack_opt instead
'blas_mkl': blas_mkl_info, # use blas_opt instead
'accelerate': accelerate_info, # use blas_opt instead
'openblas64_': openblas64__info,
'openblas64__lapack': openblas64__lapack_info,
'openblas_ilp64': openblas_ilp64_info,
'openblas_ilp64_lapack': openblas_ilp64_lapack_info,
'x11': x11_info,
'fft_opt': fft_opt_info,
'fftw': fftw_info,
'fftw2': fftw2_info,
'fftw3': fftw3_info,
'dfftw': dfftw_info,
'sfftw': sfftw_info,
'fftw_threads': fftw_threads_info,
'dfftw_threads': dfftw_threads_info,
'sfftw_threads': sfftw_threads_info,
'djbfft': djbfft_info,
'blas': blas_info, # use blas_opt instead
'lapack': lapack_info, # use lapack_opt instead
'lapack_src': lapack_src_info,
'blas_src': blas_src_info,
'numpy': numpy_info,
'f2py': f2py_info,
'Numeric': Numeric_info,
'numeric': Numeric_info,
'numarray': numarray_info,
'numerix': numerix_info,
'lapack_opt': lapack_opt_info,
'lapack_ilp64_opt': lapack_ilp64_opt_info,
'lapack_ilp64_plain_opt': lapack_ilp64_plain_opt_info,
'lapack64__opt': lapack64__opt_info,
'blas_opt': blas_opt_info,
'blas_ilp64_opt': blas_ilp64_opt_info,
'blas_ilp64_plain_opt': blas_ilp64_plain_opt_info,
'blas64__opt': blas64__opt_info,
'boost_python': boost_python_info,
'agg2': agg2_info,
'wx': wx_info,
'gdk_pixbuf_xlib_2': gdk_pixbuf_xlib_2_info,
'gdk-pixbuf-xlib-2.0': gdk_pixbuf_xlib_2_info,
'gdk_pixbuf_2': gdk_pixbuf_2_info,
'gdk-pixbuf-2.0': gdk_pixbuf_2_info,
'gdk': gdk_info,
'gdk_2': gdk_2_info,
'gdk-2.0': gdk_2_info,
'gdk_x11_2': gdk_x11_2_info,
'gdk-x11-2.0': gdk_x11_2_info,
'gtkp_x11_2': gtkp_x11_2_info,
'gtk+-x11-2.0': gtkp_x11_2_info,
'gtkp_2': gtkp_2_info,
'gtk+-2.0': gtkp_2_info,
'xft': xft_info,
'freetype2': freetype2_info,
'umfpack': umfpack_info,
'amd': amd_info,
}.get(name.lower(), system_info)
return cl().get_info(notfound_action)
class NotFoundError(DistutilsError):
"""Some third-party program or library is not found."""
class AliasedOptionError(DistutilsError):
"""
Aliases entries in config files should not be existing.
In section '{section}' we found multiple appearances of options {options}."""
class AtlasNotFoundError(NotFoundError):
"""
Atlas (http://github.com/math-atlas/math-atlas) libraries not found.
Directories to search for the libraries can be specified in the
numpy/distutils/site.cfg file (section [atlas]) or by setting
the ATLAS environment variable."""
class FlameNotFoundError(NotFoundError):
"""
FLAME (http://www.cs.utexas.edu/~flame/web/) libraries not found.
Directories to search for the libraries can be specified in the
numpy/distutils/site.cfg file (section [flame])."""
class LapackNotFoundError(NotFoundError):
"""
Lapack (http://www.netlib.org/lapack/) libraries not found.
Directories to search for the libraries can be specified in the
numpy/distutils/site.cfg file (section [lapack]) or by setting
the LAPACK environment variable."""
class LapackSrcNotFoundError(LapackNotFoundError):
"""
Lapack (http://www.netlib.org/lapack/) sources not found.
Directories to search for the sources can be specified in the
numpy/distutils/site.cfg file (section [lapack_src]) or by setting
the LAPACK_SRC environment variable."""
class LapackILP64NotFoundError(NotFoundError):
"""
64-bit Lapack libraries not found.
Known libraries in numpy/distutils/site.cfg file are:
openblas64_, openblas_ilp64
"""
class BlasOptNotFoundError(NotFoundError):
"""
Optimized (vendor) Blas libraries are not found.
Falls back to netlib Blas library which has worse performance.
A better performance should be easily gained by switching
Blas library."""
class BlasNotFoundError(NotFoundError):
"""
Blas (http://www.netlib.org/blas/) libraries not found.
Directories to search for the libraries can be specified in the
numpy/distutils/site.cfg file (section [blas]) or by setting
the BLAS environment variable."""
class BlasILP64NotFoundError(NotFoundError):
"""
64-bit Blas libraries not found.
Known libraries in numpy/distutils/site.cfg file are:
openblas64_, openblas_ilp64
"""
class BlasSrcNotFoundError(BlasNotFoundError):
"""
Blas (http://www.netlib.org/blas/) sources not found.
Directories to search for the sources can be specified in the
numpy/distutils/site.cfg file (section [blas_src]) or by setting
the BLAS_SRC environment variable."""
class FFTWNotFoundError(NotFoundError):
"""
FFTW (http://www.fftw.org/) libraries not found.
Directories to search for the libraries can be specified in the
numpy/distutils/site.cfg file (section [fftw]) or by setting
the FFTW environment variable."""
class DJBFFTNotFoundError(NotFoundError):
"""
DJBFFT (https://cr.yp.to/djbfft.html) libraries not found.
Directories to search for the libraries can be specified in the
numpy/distutils/site.cfg file (section [djbfft]) or by setting
the DJBFFT environment variable."""
class NumericNotFoundError(NotFoundError):
"""
Numeric (https://www.numpy.org/) module not found.
Get it from above location, install it, and retry setup.py."""
class X11NotFoundError(NotFoundError):
"""X11 libraries not found."""
class UmfpackNotFoundError(NotFoundError):
"""
UMFPACK sparse solver (https://www.cise.ufl.edu/research/sparse/umfpack/)
not found. Directories to search for the libraries can be specified in the
numpy/distutils/site.cfg file (section [umfpack]) or by setting
the UMFPACK environment variable."""
class system_info:
""" get_info() is the only public method. Don't use others.
"""
dir_env_var = None
# XXX: search_static_first is disabled by default, may disappear in
# future unless it is proved to be useful.
search_static_first = 0
# The base-class section name is a random word "ALL" and is not really
# intended for general use. It cannot be None nor can it be DEFAULT as
# these break the ConfigParser. See gh-15338
section = 'ALL'
saved_results = {}
notfounderror = NotFoundError
def __init__(self,
default_lib_dirs=default_lib_dirs,
default_include_dirs=default_include_dirs,
):
self.__class__.info = {}
self.local_prefixes = []
defaults = {'library_dirs': os.pathsep.join(default_lib_dirs),
'include_dirs': os.pathsep.join(default_include_dirs),
'runtime_library_dirs': os.pathsep.join(default_runtime_dirs),
'rpath': '',
'src_dirs': os.pathsep.join(default_src_dirs),
'search_static_first': str(self.search_static_first),
'extra_compile_args': '', 'extra_link_args': ''}
self.cp = ConfigParser(defaults)
self.files = []
self.files.extend(get_standard_file('.numpy-site.cfg'))
self.files.extend(get_standard_file('site.cfg'))
self.parse_config_files()
if self.section is not None:
self.search_static_first = self.cp.getboolean(
self.section, 'search_static_first')
assert isinstance(self.search_static_first, int)
def parse_config_files(self):
self.cp.read(self.files)
if not self.cp.has_section(self.section):
if self.section is not None:
self.cp.add_section(self.section)
def calc_libraries_info(self):
libs = self.get_libraries()
dirs = self.get_lib_dirs()
# The extensions use runtime_library_dirs
r_dirs = self.get_runtime_lib_dirs()
# Intrinsic distutils use rpath, we simply append both entries
# as though they were one entry
r_dirs.extend(self.get_runtime_lib_dirs(key='rpath'))
info = {}
for lib in libs:
i = self.check_libs(dirs, [lib])
if i is not None:
dict_append(info, **i)
else:
log.info('Library %s was not found. Ignoring' % (lib))
if r_dirs:
i = self.check_libs(r_dirs, [lib])
if i is not None:
# Swap library keywords found to runtime_library_dirs
# the libraries are insisting on the user having defined
# them using the library_dirs, and not necessarily by
# runtime_library_dirs
del i['libraries']
i['runtime_library_dirs'] = i.pop('library_dirs')
dict_append(info, **i)
else:
log.info('Runtime library %s was not found. Ignoring' % (lib))
return info
def set_info(self, **info):
if info:
lib_info = self.calc_libraries_info()
dict_append(info, **lib_info)
# Update extra information
extra_info = self.calc_extra_info()
dict_append(info, **extra_info)
self.saved_results[self.__class__.__name__] = info
def get_option_single(self, *options):
""" Ensure that only one of `options` are found in the section
Parameters
----------
*options : list of str
a list of options to be found in the section (``self.section``)
Returns
-------
str :
the option that is uniquely found in the section
Raises
------
AliasedOptionError :
in case more than one of the options are found
"""
found = [self.cp.has_option(self.section, opt) for opt in options]
if sum(found) == 1:
return options[found.index(True)]
elif sum(found) == 0:
# nothing is found anyways
return options[0]
# Else we have more than 1 key found
if AliasedOptionError.__doc__ is None:
raise AliasedOptionError()
raise AliasedOptionError(AliasedOptionError.__doc__.format(
section=self.section, options='[{}]'.format(', '.join(options))))
def has_info(self):
return self.__class__.__name__ in self.saved_results
def calc_extra_info(self):
""" Updates the information in the current information with
respect to these flags:
extra_compile_args
extra_link_args
"""
info = {}
for key in ['extra_compile_args', 'extra_link_args']:
# Get values
opt = self.cp.get(self.section, key)
opt = _shell_utils.NativeParser.split(opt)
if opt:
tmp = {key: opt}
dict_append(info, **tmp)
return info
def get_info(self, notfound_action=0):
""" Return a dictionary with items that are compatible
with numpy.distutils.setup keyword arguments.
"""
flag = 0
if not self.has_info():
flag = 1
log.info(self.__class__.__name__ + ':')
if hasattr(self, 'calc_info'):
self.calc_info()
if notfound_action:
if not self.has_info():
if notfound_action == 1:
warnings.warn(self.notfounderror.__doc__, stacklevel=2)
elif notfound_action == 2:
raise self.notfounderror(self.notfounderror.__doc__)
else:
raise ValueError(repr(notfound_action))
if not self.has_info():
log.info(' NOT AVAILABLE')
self.set_info()
else:
log.info(' FOUND:')
res = self.saved_results.get(self.__class__.__name__)
if log.get_threshold() <= log.INFO and flag:
for k, v in res.items():
v = str(v)
if k in ['sources', 'libraries'] and len(v) > 270:
v = v[:120] + '...\n...\n...' + v[-120:]
log.info(' %s = %s', k, v)
log.info('')
return copy.deepcopy(res)
def get_paths(self, section, key):
dirs = self.cp.get(section, key).split(os.pathsep)
env_var = self.dir_env_var
if env_var:
if is_sequence(env_var):
e0 = env_var[-1]
for e in env_var:
if e in os.environ:
e0 = e
break
if not env_var[0] == e0:
log.info('Setting %s=%s' % (env_var[0], e0))
env_var = e0
if env_var and env_var in os.environ:
d = os.environ[env_var]
if d == 'None':
log.info('Disabled %s: %s',
self.__class__.__name__, '(%s is None)'
% (env_var,))
return []
if os.path.isfile(d):
dirs = [os.path.dirname(d)] + dirs
l = getattr(self, '_lib_names', [])
if len(l) == 1:
b = os.path.basename(d)
b = os.path.splitext(b)[0]
if b[:3] == 'lib':
log.info('Replacing _lib_names[0]==%r with %r' \
% (self._lib_names[0], b[3:]))
self._lib_names[0] = b[3:]
else:
ds = d.split(os.pathsep)
ds2 = []
for d in ds:
if os.path.isdir(d):
ds2.append(d)
for dd in ['include', 'lib']:
d1 = os.path.join(d, dd)
if os.path.isdir(d1):
ds2.append(d1)
dirs = ds2 + dirs
default_dirs = self.cp.get(self.section, key).split(os.pathsep)
dirs.extend(default_dirs)
ret = []
for d in dirs:
if len(d) > 0 and not os.path.isdir(d):
warnings.warn('Specified path %s is invalid.' % d, stacklevel=2)
continue
if d not in ret:
ret.append(d)
log.debug('( %s = %s )', key, ':'.join(ret))
return ret
def get_lib_dirs(self, key='library_dirs'):
return self.get_paths(self.section, key)
def get_runtime_lib_dirs(self, key='runtime_library_dirs'):
path = self.get_paths(self.section, key)
if path == ['']:
path = []
return path
def get_include_dirs(self, key='include_dirs'):
return self.get_paths(self.section, key)
def get_src_dirs(self, key='src_dirs'):
return self.get_paths(self.section, key)
def get_libs(self, key, default):
try:
libs = self.cp.get(self.section, key)
except NoOptionError:
if not default:
return []
if is_string(default):
return [default]
return default
return [b for b in [a.strip() for a in libs.split(',')] if b]
def get_libraries(self, key='libraries'):
if hasattr(self, '_lib_names'):
return self.get_libs(key, default=self._lib_names)
else:
return self.get_libs(key, '')
def library_extensions(self):
c = customized_ccompiler()
static_exts = []
if c.compiler_type != 'msvc':
# MSVC doesn't understand binutils
static_exts.append('.a')
if sys.platform == 'win32':
static_exts.append('.lib') # .lib is used by MSVC and others
if self.search_static_first:
exts = static_exts + [so_ext]
else:
exts = [so_ext] + static_exts
if sys.platform == 'cygwin':
exts.append('.dll.a')
if sys.platform == 'darwin':
exts.append('.dylib')
return exts
def check_libs(self, lib_dirs, libs, opt_libs=[]):
"""If static or shared libraries are available then return
their info dictionary.
Checks for all libraries as shared libraries first, then
static (or vice versa if self.search_static_first is True).
"""
exts = self.library_extensions()
info = None
for ext in exts:
info = self._check_libs(lib_dirs, libs, opt_libs, [ext])
if info is not None:
break
if not info:
log.info(' libraries %s not found in %s', ','.join(libs),
lib_dirs)
return info
def check_libs2(self, lib_dirs, libs, opt_libs=[]):
"""If static or shared libraries are available then return
their info dictionary.
Checks each library for shared or static.
"""
exts = self.library_extensions()
info = self._check_libs(lib_dirs, libs, opt_libs, exts)
if not info:
log.info(' libraries %s not found in %s', ','.join(libs),
lib_dirs)
return info
def _find_lib(self, lib_dir, lib, exts):
assert is_string(lib_dir)
# under windows first try without 'lib' prefix
if sys.platform == 'win32':
lib_prefixes = ['', 'lib']
else:
lib_prefixes = ['lib']
# for each library name, see if we can find a file for it.
for ext in exts:
for prefix in lib_prefixes:
p = self.combine_paths(lib_dir, prefix + lib + ext)
if p:
break
if p:
assert len(p) == 1
# ??? splitext on p[0] would do this for cygwin
# doesn't seem correct
if ext == '.dll.a':
lib += '.dll'
if ext == '.lib':
lib = prefix + lib
return lib
return False
def _find_libs(self, lib_dirs, libs, exts):
# make sure we preserve the order of libs, as it can be important
found_dirs, found_libs = [], []
for lib in libs:
for lib_dir in lib_dirs:
found_lib = self._find_lib(lib_dir, lib, exts)
if found_lib:
found_libs.append(found_lib)
if lib_dir not in found_dirs:
found_dirs.append(lib_dir)
break
return found_dirs, found_libs
def _check_libs(self, lib_dirs, libs, opt_libs, exts):
"""Find mandatory and optional libs in expected paths.
Missing optional libraries are silently forgotten.
"""
if not is_sequence(lib_dirs):
lib_dirs = [lib_dirs]
# First, try to find the mandatory libraries
found_dirs, found_libs = self._find_libs(lib_dirs, libs, exts)
if len(found_libs) > 0 and len(found_libs) == len(libs):
# Now, check for optional libraries
opt_found_dirs, opt_found_libs = self._find_libs(lib_dirs, opt_libs, exts)
found_libs.extend(opt_found_libs)
for lib_dir in opt_found_dirs:
if lib_dir not in found_dirs:
found_dirs.append(lib_dir)
info = {'libraries': found_libs, 'library_dirs': found_dirs}
return info
else:
return None
def combine_paths(self, *args):
"""Return a list of existing paths composed by all combinations
of items from the arguments.
"""
return combine_paths(*args)
class fft_opt_info(system_info):
def calc_info(self):
info = {}
fftw_info = get_info('fftw3') or get_info('fftw2') or get_info('dfftw')
djbfft_info = get_info('djbfft')
if fftw_info:
dict_append(info, **fftw_info)
if djbfft_info:
dict_append(info, **djbfft_info)
self.set_info(**info)
return
class fftw_info(system_info):
#variables to override
section = 'fftw'
dir_env_var = 'FFTW'
notfounderror = FFTWNotFoundError
ver_info = [{'name':'fftw3',
'libs':['fftw3'],
'includes':['fftw3.h'],
'macros':[('SCIPY_FFTW3_H', None)]},
{'name':'fftw2',
'libs':['rfftw', 'fftw'],
'includes':['fftw.h', 'rfftw.h'],
'macros':[('SCIPY_FFTW_H', None)]}]
def calc_ver_info(self, ver_param):
"""Returns True on successful version detection, else False"""
lib_dirs = self.get_lib_dirs()
incl_dirs = self.get_include_dirs()
opt = self.get_option_single(self.section + '_libs', 'libraries')
libs = self.get_libs(opt, ver_param['libs'])
info = self.check_libs(lib_dirs, libs)
if info is not None:
flag = 0
for d in incl_dirs:
if len(self.combine_paths(d, ver_param['includes'])) \
== len(ver_param['includes']):
dict_append(info, include_dirs=[d])
flag = 1
break
if flag:
dict_append(info, define_macros=ver_param['macros'])
else:
info = None
if info is not None:
self.set_info(**info)
return True
else:
log.info(' %s not found' % (ver_param['name']))
return False
def calc_info(self):
for i in self.ver_info:
if self.calc_ver_info(i):
break
class fftw2_info(fftw_info):
#variables to override
section = 'fftw'
dir_env_var = 'FFTW'
notfounderror = FFTWNotFoundError
ver_info = [{'name':'fftw2',
'libs':['rfftw', 'fftw'],
'includes':['fftw.h', 'rfftw.h'],
'macros':[('SCIPY_FFTW_H', None)]}
]
class fftw3_info(fftw_info):
#variables to override
section = 'fftw3'
dir_env_var = 'FFTW3'
notfounderror = FFTWNotFoundError
ver_info = [{'name':'fftw3',
'libs':['fftw3'],
'includes':['fftw3.h'],
'macros':[('SCIPY_FFTW3_H', None)]},
]
class fftw3_armpl_info(fftw_info):
section = 'fftw3'
dir_env_var = 'ARMPL_DIR'
notfounderror = FFTWNotFoundError
ver_info = [{'name': 'fftw3',
'libs': ['armpl_lp64_mp'],
'includes': ['fftw3.h'],
'macros': [('SCIPY_FFTW3_H', None)]}]
class dfftw_info(fftw_info):
section = 'fftw'
dir_env_var = 'FFTW'
ver_info = [{'name':'dfftw',
'libs':['drfftw', 'dfftw'],
'includes':['dfftw.h', 'drfftw.h'],
'macros':[('SCIPY_DFFTW_H', None)]}]
class sfftw_info(fftw_info):
section = 'fftw'
dir_env_var = 'FFTW'
ver_info = [{'name':'sfftw',
'libs':['srfftw', 'sfftw'],
'includes':['sfftw.h', 'srfftw.h'],
'macros':[('SCIPY_SFFTW_H', None)]}]
class fftw_threads_info(fftw_info):
section = 'fftw'
dir_env_var = 'FFTW'
ver_info = [{'name':'fftw threads',
'libs':['rfftw_threads', 'fftw_threads'],
'includes':['fftw_threads.h', 'rfftw_threads.h'],
'macros':[('SCIPY_FFTW_THREADS_H', None)]}]
class dfftw_threads_info(fftw_info):
section = 'fftw'
dir_env_var = 'FFTW'
ver_info = [{'name':'dfftw threads',
'libs':['drfftw_threads', 'dfftw_threads'],
'includes':['dfftw_threads.h', 'drfftw_threads.h'],
'macros':[('SCIPY_DFFTW_THREADS_H', None)]}]
class sfftw_threads_info(fftw_info):
section = 'fftw'
dir_env_var = 'FFTW'
ver_info = [{'name':'sfftw threads',
'libs':['srfftw_threads', 'sfftw_threads'],
'includes':['sfftw_threads.h', 'srfftw_threads.h'],
'macros':[('SCIPY_SFFTW_THREADS_H', None)]}]
class djbfft_info(system_info):
section = 'djbfft'
dir_env_var = 'DJBFFT'
notfounderror = DJBFFTNotFoundError
def get_paths(self, section, key):
pre_dirs = system_info.get_paths(self, section, key)
dirs = []
for d in pre_dirs:
dirs.extend(self.combine_paths(d, ['djbfft']) + [d])
return [d for d in dirs if os.path.isdir(d)]
def calc_info(self):
lib_dirs = self.get_lib_dirs()
incl_dirs = self.get_include_dirs()
info = None
for d in lib_dirs:
p = self.combine_paths(d, ['djbfft.a'])
if p:
info = {'extra_objects': p}
break
p = self.combine_paths(d, ['libdjbfft.a', 'libdjbfft' + so_ext])
if p:
info = {'libraries': ['djbfft'], 'library_dirs': [d]}
break
if info is None:
return
for d in incl_dirs:
if len(self.combine_paths(d, ['fftc8.h', 'fftfreq.h'])) == 2:
dict_append(info, include_dirs=[d],
define_macros=[('SCIPY_DJBFFT_H', None)])
self.set_info(**info)
return
return
class mkl_info(system_info):
section = 'mkl'
dir_env_var = 'MKLROOT'
_lib_mkl = ['mkl_rt']
def get_mkl_rootdir(self):
mklroot = os.environ.get('MKLROOT', None)
if mklroot is not None:
return mklroot
paths = os.environ.get('LD_LIBRARY_PATH', '').split(os.pathsep)
ld_so_conf = '/etc/ld.so.conf'
if os.path.isfile(ld_so_conf):
with open(ld_so_conf, 'r') as f:
for d in f:
d = d.strip()
if d:
paths.append(d)
intel_mkl_dirs = []
for path in paths:
path_atoms = path.split(os.sep)
for m in path_atoms:
if m.startswith('mkl'):
d = os.sep.join(path_atoms[:path_atoms.index(m) + 2])
intel_mkl_dirs.append(d)
break
for d in paths:
dirs = glob(os.path.join(d, 'mkl', '*'))
dirs += glob(os.path.join(d, 'mkl*'))
for sub_dir in dirs:
if os.path.isdir(os.path.join(sub_dir, 'lib')):
return sub_dir
return None
def __init__(self):
mklroot = self.get_mkl_rootdir()
if mklroot is None:
system_info.__init__(self)
else:
from .cpuinfo import cpu
if cpu.is_Itanium():
plt = '64'
elif cpu.is_Intel() and cpu.is_64bit():
plt = 'intel64'
else:
plt = '32'
system_info.__init__(
self,
default_lib_dirs=[os.path.join(mklroot, 'lib', plt)],
default_include_dirs=[os.path.join(mklroot, 'include')])
def calc_info(self):
lib_dirs = self.get_lib_dirs()
incl_dirs = self.get_include_dirs()
opt = self.get_option_single('mkl_libs', 'libraries')
mkl_libs = self.get_libs(opt, self._lib_mkl)
info = self.check_libs2(lib_dirs, mkl_libs)
if info is None:
return
dict_append(info,
define_macros=[('SCIPY_MKL_H', None),
('HAVE_CBLAS', None)],
include_dirs=incl_dirs)
if sys.platform == 'win32':
pass # win32 has no pthread library
else:
dict_append(info, libraries=['pthread'])
self.set_info(**info)
class lapack_mkl_info(mkl_info):
pass
class blas_mkl_info(mkl_info):
pass
class armpl_info(system_info):
section = 'armpl'
dir_env_var = 'ARMPL_DIR'
_lib_armpl = ['armpl_lp64_mp']
def calc_info(self):
lib_dirs = self.get_lib_dirs()
incl_dirs = self.get_include_dirs()
armpl_libs = self.get_libs('armpl_libs', self._lib_armpl)
info = self.check_libs2(lib_dirs, armpl_libs)
if info is None:
return
dict_append(info,
define_macros=[('SCIPY_MKL_H', None),
('HAVE_CBLAS', None)],
include_dirs=incl_dirs)
self.set_info(**info)
class lapack_armpl_info(armpl_info):
pass
class blas_armpl_info(armpl_info):
pass
class atlas_info(system_info):
section = 'atlas'
dir_env_var = 'ATLAS'
_lib_names = ['f77blas', 'cblas']
if sys.platform[:7] == 'freebsd':
_lib_atlas = ['atlas_r']
_lib_lapack = ['alapack_r']
else:
_lib_atlas = ['atlas']
_lib_lapack = ['lapack']
notfounderror = AtlasNotFoundError
def get_paths(self, section, key):
pre_dirs = system_info.get_paths(self, section, key)
dirs = []
for d in pre_dirs:
dirs.extend(self.combine_paths(d, ['atlas*', 'ATLAS*',
'sse', '3dnow', 'sse2']) + [d])
return [d for d in dirs if os.path.isdir(d)]
def calc_info(self):
lib_dirs = self.get_lib_dirs()
info = {}
opt = self.get_option_single('atlas_libs', 'libraries')
atlas_libs = self.get_libs(opt, self._lib_names + self._lib_atlas)
lapack_libs = self.get_libs('lapack_libs', self._lib_lapack)
atlas = None
lapack = None
atlas_1 = None
for d in lib_dirs:
atlas = self.check_libs2(d, atlas_libs, [])
if atlas is not None:
lib_dirs2 = [d] + self.combine_paths(d, ['atlas*', 'ATLAS*'])
lapack = self.check_libs2(lib_dirs2, lapack_libs, [])
if lapack is not None:
break
if atlas:
atlas_1 = atlas
log.info(self.__class__)
if atlas is None:
atlas = atlas_1
if atlas is None:
return
include_dirs = self.get_include_dirs()
h = (self.combine_paths(lib_dirs + include_dirs, 'cblas.h') or [None])
h = h[0]
if h:
h = os.path.dirname(h)
dict_append(info, include_dirs=[h])
info['language'] = 'c'
if lapack is not None:
dict_append(info, **lapack)
dict_append(info, **atlas)
elif 'lapack_atlas' in atlas['libraries']:
dict_append(info, **atlas)
dict_append(info,
define_macros=[('ATLAS_WITH_LAPACK_ATLAS', None)])
self.set_info(**info)
return
else:
dict_append(info, **atlas)
dict_append(info, define_macros=[('ATLAS_WITHOUT_LAPACK', None)])
message = textwrap.dedent("""
*********************************************************************
Could not find lapack library within the ATLAS installation.
*********************************************************************
""")
warnings.warn(message, stacklevel=2)
self.set_info(**info)
return
# Check if lapack library is complete, only warn if it is not.
lapack_dir = lapack['library_dirs'][0]
lapack_name = lapack['libraries'][0]
lapack_lib = None
lib_prefixes = ['lib']
if sys.platform == 'win32':
lib_prefixes.append('')
for e in self.library_extensions():
for prefix in lib_prefixes:
fn = os.path.join(lapack_dir, prefix + lapack_name + e)
if os.path.exists(fn):
lapack_lib = fn
break
if lapack_lib:
break
if lapack_lib is not None:
sz = os.stat(lapack_lib)[6]
if sz <= 4000 * 1024:
message = textwrap.dedent("""
*********************************************************************
Lapack library (from ATLAS) is probably incomplete:
size of %s is %sk (expected >4000k)
Follow the instructions in the KNOWN PROBLEMS section of the file
numpy/INSTALL.txt.
*********************************************************************
""") % (lapack_lib, sz / 1024)
warnings.warn(message, stacklevel=2)
else:
info['language'] = 'f77'
atlas_version, atlas_extra_info = get_atlas_version(**atlas)
dict_append(info, **atlas_extra_info)
self.set_info(**info)
class atlas_blas_info(atlas_info):
_lib_names = ['f77blas', 'cblas']
def calc_info(self):
lib_dirs = self.get_lib_dirs()
info = {}
opt = self.get_option_single('atlas_libs', 'libraries')
atlas_libs = self.get_libs(opt, self._lib_names + self._lib_atlas)
atlas = self.check_libs2(lib_dirs, atlas_libs, [])
if atlas is None:
return
include_dirs = self.get_include_dirs()
h = (self.combine_paths(lib_dirs + include_dirs, 'cblas.h') or [None])
h = h[0]
if h:
h = os.path.dirname(h)
dict_append(info, include_dirs=[h])
info['language'] = 'c'
info['define_macros'] = [('HAVE_CBLAS', None)]
atlas_version, atlas_extra_info = get_atlas_version(**atlas)
dict_append(atlas, **atlas_extra_info)
dict_append(info, **atlas)
self.set_info(**info)
return
class atlas_threads_info(atlas_info):
dir_env_var = ['PTATLAS', 'ATLAS']
_lib_names = ['ptf77blas', 'ptcblas']
class atlas_blas_threads_info(atlas_blas_info):
dir_env_var = ['PTATLAS', 'ATLAS']
_lib_names = ['ptf77blas', 'ptcblas']
class lapack_atlas_info(atlas_info):
_lib_names = ['lapack_atlas'] + atlas_info._lib_names
class lapack_atlas_threads_info(atlas_threads_info):
_lib_names = ['lapack_atlas'] + atlas_threads_info._lib_names
class atlas_3_10_info(atlas_info):
_lib_names = ['satlas']
_lib_atlas = _lib_names
_lib_lapack = _lib_names
class atlas_3_10_blas_info(atlas_3_10_info):
_lib_names = ['satlas']
def calc_info(self):
lib_dirs = self.get_lib_dirs()
info = {}
opt = self.get_option_single('atlas_lib', 'libraries')
atlas_libs = self.get_libs(opt, self._lib_names)
atlas = self.check_libs2(lib_dirs, atlas_libs, [])
if atlas is None:
return
include_dirs = self.get_include_dirs()
h = (self.combine_paths(lib_dirs + include_dirs, 'cblas.h') or [None])
h = h[0]
if h:
h = os.path.dirname(h)
dict_append(info, include_dirs=[h])
info['language'] = 'c'
info['define_macros'] = [('HAVE_CBLAS', None)]
atlas_version, atlas_extra_info = get_atlas_version(**atlas)
dict_append(atlas, **atlas_extra_info)
dict_append(info, **atlas)
self.set_info(**info)
return
class atlas_3_10_threads_info(atlas_3_10_info):
dir_env_var = ['PTATLAS', 'ATLAS']
_lib_names = ['tatlas']
_lib_atlas = _lib_names
_lib_lapack = _lib_names
class atlas_3_10_blas_threads_info(atlas_3_10_blas_info):
dir_env_var = ['PTATLAS', 'ATLAS']
_lib_names = ['tatlas']
class lapack_atlas_3_10_info(atlas_3_10_info):
pass
class lapack_atlas_3_10_threads_info(atlas_3_10_threads_info):
pass
class lapack_info(system_info):
section = 'lapack'
dir_env_var = 'LAPACK'
_lib_names = ['lapack']
notfounderror = LapackNotFoundError
def calc_info(self):
lib_dirs = self.get_lib_dirs()
opt = self.get_option_single('lapack_libs', 'libraries')
lapack_libs = self.get_libs(opt, self._lib_names)
info = self.check_libs(lib_dirs, lapack_libs, [])
if info is None:
return
info['language'] = 'f77'
self.set_info(**info)
class lapack_src_info(system_info):
# LAPACK_SRC is deprecated, please do not use this!
# Build or install a BLAS library via your package manager or from
# source separately.
section = 'lapack_src'
dir_env_var = 'LAPACK_SRC'
notfounderror = LapackSrcNotFoundError
def get_paths(self, section, key):
pre_dirs = system_info.get_paths(self, section, key)
dirs = []
for d in pre_dirs:
dirs.extend([d] + self.combine_paths(d, ['LAPACK*/SRC', 'SRC']))
return [d for d in dirs if os.path.isdir(d)]
def calc_info(self):
src_dirs = self.get_src_dirs()
src_dir = ''
for d in src_dirs:
if os.path.isfile(os.path.join(d, 'dgesv.f')):
src_dir = d
break
if not src_dir:
#XXX: Get sources from netlib. May be ask first.
return
# The following is extracted from LAPACK-3.0/SRC/Makefile.
# Added missing names from lapack-lite-3.1.1/SRC/Makefile
# while keeping removed names for Lapack-3.0 compatibility.
allaux = '''
ilaenv ieeeck lsame lsamen xerbla
iparmq
''' # *.f
laux = '''
bdsdc bdsqr disna labad lacpy ladiv lae2 laebz laed0 laed1
laed2 laed3 laed4 laed5 laed6 laed7 laed8 laed9 laeda laev2
lagtf lagts lamch lamrg lanst lapy2 lapy3 larnv larrb larre
larrf lartg laruv las2 lascl lasd0 lasd1 lasd2 lasd3 lasd4
lasd5 lasd6 lasd7 lasd8 lasd9 lasda lasdq lasdt laset lasq1
lasq2 lasq3 lasq4 lasq5 lasq6 lasr lasrt lassq lasv2 pttrf
stebz stedc steqr sterf
larra larrc larrd larr larrk larrj larrr laneg laisnan isnan
lazq3 lazq4
''' # [s|d]*.f
lasrc = '''
gbbrd gbcon gbequ gbrfs gbsv gbsvx gbtf2 gbtrf gbtrs gebak
gebal gebd2 gebrd gecon geequ gees geesx geev geevx gegs gegv
gehd2 gehrd gelq2 gelqf gels gelsd gelss gelsx gelsy geql2
geqlf geqp3 geqpf geqr2 geqrf gerfs gerq2 gerqf gesc2 gesdd
gesv gesvd gesvx getc2 getf2 getrf getri getrs ggbak ggbal
gges ggesx ggev ggevx ggglm gghrd gglse ggqrf ggrqf ggsvd
ggsvp gtcon gtrfs gtsv gtsvx gttrf gttrs gtts2 hgeqz hsein
hseqr labrd lacon laein lags2 lagtm lahqr lahrd laic1 lals0
lalsa lalsd langb lange langt lanhs lansb lansp lansy lantb
lantp lantr lapll lapmt laqgb laqge laqp2 laqps laqsb laqsp
laqsy lar1v lar2v larf larfb larfg larft larfx largv larrv
lartv larz larzb larzt laswp lasyf latbs latdf latps latrd
latrs latrz latzm lauu2 lauum pbcon pbequ pbrfs pbstf pbsv
pbsvx pbtf2 pbtrf pbtrs pocon poequ porfs posv posvx potf2
potrf potri potrs ppcon ppequ pprfs ppsv ppsvx pptrf pptri
pptrs ptcon pteqr ptrfs ptsv ptsvx pttrs ptts2 spcon sprfs
spsv spsvx sptrf sptri sptrs stegr stein sycon syrfs sysv
sysvx sytf2 sytrf sytri sytrs tbcon tbrfs tbtrs tgevc tgex2
tgexc tgsen tgsja tgsna tgsy2 tgsyl tpcon tprfs tptri tptrs
trcon trevc trexc trrfs trsen trsna trsyl trti2 trtri trtrs
tzrqf tzrzf
lacn2 lahr2 stemr laqr0 laqr1 laqr2 laqr3 laqr4 laqr5
''' # [s|c|d|z]*.f
sd_lasrc = '''
laexc lag2 lagv2 laln2 lanv2 laqtr lasy2 opgtr opmtr org2l
org2r orgbr orghr orgl2 orglq orgql orgqr orgr2 orgrq orgtr
orm2l orm2r ormbr ormhr orml2 ormlq ormql ormqr ormr2 ormr3
ormrq ormrz ormtr rscl sbev sbevd sbevx sbgst sbgv sbgvd sbgvx
sbtrd spev spevd spevx spgst spgv spgvd spgvx sptrd stev stevd
stevr stevx syev syevd syevr syevx sygs2 sygst sygv sygvd
sygvx sytd2 sytrd
''' # [s|d]*.f
cz_lasrc = '''
bdsqr hbev hbevd hbevx hbgst hbgv hbgvd hbgvx hbtrd hecon heev
heevd heevr heevx hegs2 hegst hegv hegvd hegvx herfs hesv
hesvx hetd2 hetf2 hetrd hetrf hetri hetrs hpcon hpev hpevd
hpevx hpgst hpgv hpgvd hpgvx hprfs hpsv hpsvx hptrd hptrf
hptri hptrs lacgv lacp2 lacpy lacrm lacrt ladiv laed0 laed7
laed8 laesy laev2 lahef lanhb lanhe lanhp lanht laqhb laqhe
laqhp larcm larnv lartg lascl laset lasr lassq pttrf rot spmv
spr stedc steqr symv syr ung2l ung2r ungbr unghr ungl2 unglq
ungql ungqr ungr2 ungrq ungtr unm2l unm2r unmbr unmhr unml2
unmlq unmql unmqr unmr2 unmr3 unmrq unmrz unmtr upgtr upmtr
''' # [c|z]*.f
#######
sclaux = laux + ' econd ' # s*.f
dzlaux = laux + ' secnd ' # d*.f
slasrc = lasrc + sd_lasrc # s*.f
dlasrc = lasrc + sd_lasrc # d*.f
clasrc = lasrc + cz_lasrc + ' srot srscl ' # c*.f
zlasrc = lasrc + cz_lasrc + ' drot drscl ' # z*.f
oclasrc = ' icmax1 scsum1 ' # *.f
ozlasrc = ' izmax1 dzsum1 ' # *.f
sources = ['s%s.f' % f for f in (sclaux + slasrc).split()] \
+ ['d%s.f' % f for f in (dzlaux + dlasrc).split()] \
+ ['c%s.f' % f for f in (clasrc).split()] \
+ ['z%s.f' % f for f in (zlasrc).split()] \
+ ['%s.f' % f for f in (allaux + oclasrc + ozlasrc).split()]
sources = [os.path.join(src_dir, f) for f in sources]
# Lapack 3.1:
src_dir2 = os.path.join(src_dir, '..', 'INSTALL')
sources += [os.path.join(src_dir2, p + 'lamch.f') for p in 'sdcz']
# Lapack 3.2.1:
sources += [os.path.join(src_dir, p + 'larfp.f') for p in 'sdcz']
sources += [os.path.join(src_dir, 'ila' + p + 'lr.f') for p in 'sdcz']
sources += [os.path.join(src_dir, 'ila' + p + 'lc.f') for p in 'sdcz']
# Should we check here actual existence of source files?
# Yes, the file listing is different between 3.0 and 3.1
# versions.
sources = [f for f in sources if os.path.isfile(f)]
info = {'sources': sources, 'language': 'f77'}
self.set_info(**info)
atlas_version_c_text = r'''
/* This file is generated from numpy/distutils/system_info.py */
void ATL_buildinfo(void);
int main(void) {
ATL_buildinfo();
return 0;
}
'''
_cached_atlas_version = {}
def get_atlas_version(**config):
libraries = config.get('libraries', [])
library_dirs = config.get('library_dirs', [])
key = (tuple(libraries), tuple(library_dirs))
if key in _cached_atlas_version:
return _cached_atlas_version[key]
c = cmd_config(Distribution())
atlas_version = None
info = {}
try:
s, o = c.get_output(atlas_version_c_text,
libraries=libraries, library_dirs=library_dirs,
)
if s and re.search(r'undefined reference to `_gfortran', o, re.M):
s, o = c.get_output(atlas_version_c_text,
libraries=libraries + ['gfortran'],
library_dirs=library_dirs,
)
if not s:
warnings.warn(textwrap.dedent("""
*****************************************************
Linkage with ATLAS requires gfortran. Use
python setup.py config_fc --fcompiler=gnu95 ...
when building extension libraries that use ATLAS.
Make sure that -lgfortran is used for C++ extensions.
*****************************************************
"""), stacklevel=2)
dict_append(info, language='f90',
define_macros=[('ATLAS_REQUIRES_GFORTRAN', None)])
except Exception: # failed to get version from file -- maybe on Windows
# look at directory name
for o in library_dirs:
m = re.search(r'ATLAS_(?P<version>\d+[.]\d+[.]\d+)_', o)
if m:
atlas_version = m.group('version')
if atlas_version is not None:
break
# final choice --- look at ATLAS_VERSION environment
# variable
if atlas_version is None:
atlas_version = os.environ.get('ATLAS_VERSION', None)
if atlas_version:
dict_append(info, define_macros=[(
'ATLAS_INFO', _c_string_literal(atlas_version))
])
else:
dict_append(info, define_macros=[('NO_ATLAS_INFO', -1)])
return atlas_version or '?.?.?', info
if not s:
m = re.search(r'ATLAS version (?P<version>\d+[.]\d+[.]\d+)', o)
if m:
atlas_version = m.group('version')
if atlas_version is None:
if re.search(r'undefined symbol: ATL_buildinfo', o, re.M):
atlas_version = '3.2.1_pre3.3.6'
else:
log.info('Status: %d', s)
log.info('Output: %s', o)
elif atlas_version == '3.2.1_pre3.3.6':
dict_append(info, define_macros=[('NO_ATLAS_INFO', -2)])
else:
dict_append(info, define_macros=[(
'ATLAS_INFO', _c_string_literal(atlas_version))
])
result = _cached_atlas_version[key] = atlas_version, info
return result
class lapack_opt_info(system_info):
notfounderror = LapackNotFoundError
# List of all known LAPACK libraries, in the default order
lapack_order = ['armpl', 'mkl', 'openblas', 'flame',
'accelerate', 'atlas', 'lapack']
order_env_var_name = 'NPY_LAPACK_ORDER'
def _calc_info_armpl(self):
info = get_info('lapack_armpl')
if info:
self.set_info(**info)
return True
return False
def _calc_info_mkl(self):
info = get_info('lapack_mkl')
if info:
self.set_info(**info)
return True
return False
def _calc_info_openblas(self):
info = get_info('openblas_lapack')
if info:
self.set_info(**info)
return True
info = get_info('openblas_clapack')
if info:
self.set_info(**info)
return True
return False
def _calc_info_flame(self):
info = get_info('flame')
if info:
self.set_info(**info)
return True
return False
def _calc_info_atlas(self):
info = get_info('atlas_3_10_threads')
if not info:
info = get_info('atlas_3_10')
if not info:
info = get_info('atlas_threads')
if not info:
info = get_info('atlas')
if info:
# Figure out if ATLAS has lapack...
# If not we need the lapack library, but not BLAS!
l = info.get('define_macros', [])
if ('ATLAS_WITH_LAPACK_ATLAS', None) in l \
or ('ATLAS_WITHOUT_LAPACK', None) in l:
# Get LAPACK (with possible warnings)
# If not found we don't accept anything
# since we can't use ATLAS with LAPACK!
lapack_info = self._get_info_lapack()
if not lapack_info:
return False
dict_append(info, **lapack_info)
self.set_info(**info)
return True
return False
def _calc_info_accelerate(self):
info = get_info('accelerate')
if info:
self.set_info(**info)
return True
return False
def _get_info_blas(self):
# Default to get the optimized BLAS implementation
info = get_info('blas_opt')
if not info:
warnings.warn(BlasNotFoundError.__doc__ or '', stacklevel=3)
info_src = get_info('blas_src')
if not info_src:
warnings.warn(BlasSrcNotFoundError.__doc__ or '', stacklevel=3)
return {}
dict_append(info, libraries=[('fblas_src', info_src)])
return info
def _get_info_lapack(self):
info = get_info('lapack')
if not info:
warnings.warn(LapackNotFoundError.__doc__ or '', stacklevel=3)
info_src = get_info('lapack_src')
if not info_src:
warnings.warn(LapackSrcNotFoundError.__doc__ or '', stacklevel=3)
return {}
dict_append(info, libraries=[('flapack_src', info_src)])
return info
def _calc_info_lapack(self):
info = self._get_info_lapack()
if info:
info_blas = self._get_info_blas()
dict_append(info, **info_blas)
dict_append(info, define_macros=[('NO_ATLAS_INFO', 1)])
self.set_info(**info)
return True
return False
def _calc_info_from_envvar(self):
info = {}
info['language'] = 'f77'
info['libraries'] = []
info['include_dirs'] = []
info['define_macros'] = []
info['extra_link_args'] = os.environ['NPY_LAPACK_LIBS'].split()
self.set_info(**info)
return True
def _calc_info(self, name):
return getattr(self, '_calc_info_{}'.format(name))()
def calc_info(self):
lapack_order, unknown_order = _parse_env_order(self.lapack_order, self.order_env_var_name)
if len(unknown_order) > 0:
raise ValueError("lapack_opt_info user defined "
"LAPACK order has unacceptable "
"values: {}".format(unknown_order))
if 'NPY_LAPACK_LIBS' in os.environ:
# Bypass autodetection, set language to F77 and use env var linker
# flags directly
self._calc_info_from_envvar()
return
for lapack in lapack_order:
if self._calc_info(lapack):
return
if 'lapack' not in lapack_order:
# Since the user may request *not* to use any library, we still need
# to raise warnings to signal missing packages!
warnings.warn(LapackNotFoundError.__doc__ or '', stacklevel=2)
warnings.warn(LapackSrcNotFoundError.__doc__ or '', stacklevel=2)
class _ilp64_opt_info_mixin:
symbol_suffix = None
symbol_prefix = None
def _check_info(self, info):
macros = dict(info.get('define_macros', []))
prefix = macros.get('BLAS_SYMBOL_PREFIX', '')
suffix = macros.get('BLAS_SYMBOL_SUFFIX', '')
if self.symbol_prefix not in (None, prefix):
return False
if self.symbol_suffix not in (None, suffix):
return False
return bool(info)
class lapack_ilp64_opt_info(lapack_opt_info, _ilp64_opt_info_mixin):
notfounderror = LapackILP64NotFoundError
lapack_order = ['openblas64_', 'openblas_ilp64']
order_env_var_name = 'NPY_LAPACK_ILP64_ORDER'
def _calc_info(self, name):
info = get_info(name + '_lapack')
if self._check_info(info):
self.set_info(**info)
return True
return False
class lapack_ilp64_plain_opt_info(lapack_ilp64_opt_info):
# Same as lapack_ilp64_opt_info, but fix symbol names
symbol_prefix = ''
symbol_suffix = ''
class lapack64__opt_info(lapack_ilp64_opt_info):
symbol_prefix = ''
symbol_suffix = '64_'
class blas_opt_info(system_info):
notfounderror = BlasNotFoundError
# List of all known BLAS libraries, in the default order
blas_order = ['armpl', 'mkl', 'blis', 'openblas',
'accelerate', 'atlas', 'blas']
order_env_var_name = 'NPY_BLAS_ORDER'
def _calc_info_armpl(self):
info = get_info('blas_armpl')
if info:
self.set_info(**info)
return True
return False
def _calc_info_mkl(self):
info = get_info('blas_mkl')
if info:
self.set_info(**info)
return True
return False
def _calc_info_blis(self):
info = get_info('blis')
if info:
self.set_info(**info)
return True
return False
def _calc_info_openblas(self):
info = get_info('openblas')
if info:
self.set_info(**info)
return True
return False
def _calc_info_atlas(self):
info = get_info('atlas_3_10_blas_threads')
if not info:
info = get_info('atlas_3_10_blas')
if not info:
info = get_info('atlas_blas_threads')
if not info:
info = get_info('atlas_blas')
if info:
self.set_info(**info)
return True
return False
def _calc_info_accelerate(self):
info = get_info('accelerate')
if info:
self.set_info(**info)
return True
return False
def _calc_info_blas(self):
# Warn about a non-optimized BLAS library
warnings.warn(BlasOptNotFoundError.__doc__ or '', stacklevel=3)
info = {}
dict_append(info, define_macros=[('NO_ATLAS_INFO', 1)])
blas = get_info('blas')
if blas:
dict_append(info, **blas)
else:
# Not even BLAS was found!
warnings.warn(BlasNotFoundError.__doc__ or '', stacklevel=3)
blas_src = get_info('blas_src')
if not blas_src:
warnings.warn(BlasSrcNotFoundError.__doc__ or '', stacklevel=3)
return False
dict_append(info, libraries=[('fblas_src', blas_src)])
self.set_info(**info)
return True
def _calc_info_from_envvar(self):
info = {}
info['language'] = 'f77'
info['libraries'] = []
info['include_dirs'] = []
info['define_macros'] = []
info['extra_link_args'] = os.environ['NPY_BLAS_LIBS'].split()
if 'NPY_CBLAS_LIBS' in os.environ:
info['define_macros'].append(('HAVE_CBLAS', None))
info['extra_link_args'].extend(
os.environ['NPY_CBLAS_LIBS'].split())
self.set_info(**info)
return True
def _calc_info(self, name):
return getattr(self, '_calc_info_{}'.format(name))()
def calc_info(self):
blas_order, unknown_order = _parse_env_order(self.blas_order, self.order_env_var_name)
if len(unknown_order) > 0:
raise ValueError("blas_opt_info user defined BLAS order has unacceptable values: {}".format(unknown_order))
if 'NPY_BLAS_LIBS' in os.environ:
# Bypass autodetection, set language to F77 and use env var linker
# flags directly
self._calc_info_from_envvar()
return
for blas in blas_order:
if self._calc_info(blas):
return
if 'blas' not in blas_order:
# Since the user may request *not* to use any library, we still need
# to raise warnings to signal missing packages!
warnings.warn(BlasNotFoundError.__doc__ or '', stacklevel=2)
warnings.warn(BlasSrcNotFoundError.__doc__ or '', stacklevel=2)
class blas_ilp64_opt_info(blas_opt_info, _ilp64_opt_info_mixin):
notfounderror = BlasILP64NotFoundError
blas_order = ['openblas64_', 'openblas_ilp64']
order_env_var_name = 'NPY_BLAS_ILP64_ORDER'
def _calc_info(self, name):
info = get_info(name)
if self._check_info(info):
self.set_info(**info)
return True
return False
class blas_ilp64_plain_opt_info(blas_ilp64_opt_info):
symbol_prefix = ''
symbol_suffix = ''
class blas64__opt_info(blas_ilp64_opt_info):
symbol_prefix = ''
symbol_suffix = '64_'
class cblas_info(system_info):
section = 'cblas'
dir_env_var = 'CBLAS'
# No default as it's used only in blas_info
_lib_names = []
notfounderror = BlasNotFoundError
class blas_info(system_info):
section = 'blas'
dir_env_var = 'BLAS'
_lib_names = ['blas']
notfounderror = BlasNotFoundError
def calc_info(self):
lib_dirs = self.get_lib_dirs()
opt = self.get_option_single('blas_libs', 'libraries')
blas_libs = self.get_libs(opt, self._lib_names)
info = self.check_libs(lib_dirs, blas_libs, [])
if info is None:
return
else:
info['include_dirs'] = self.get_include_dirs()
if platform.system() == 'Windows':
# The check for windows is needed because get_cblas_libs uses the
# same compiler that was used to compile Python and msvc is
# often not installed when mingw is being used. This rough
# treatment is not desirable, but windows is tricky.
info['language'] = 'f77' # XXX: is it generally true?
# If cblas is given as an option, use those
cblas_info_obj = cblas_info()
cblas_opt = cblas_info_obj.get_option_single('cblas_libs', 'libraries')
cblas_libs = cblas_info_obj.get_libs(cblas_opt, None)
if cblas_libs:
info['libraries'] = cblas_libs + blas_libs
info['define_macros'] = [('HAVE_CBLAS', None)]
else:
lib = self.get_cblas_libs(info)
if lib is not None:
info['language'] = 'c'
info['libraries'] = lib
info['define_macros'] = [('HAVE_CBLAS', None)]
self.set_info(**info)
def get_cblas_libs(self, info):
""" Check whether we can link with CBLAS interface
This method will search through several combinations of libraries
to check whether CBLAS is present:
1. Libraries in ``info['libraries']``, as is
2. As 1. but also explicitly adding ``'cblas'`` as a library
3. As 1. but also explicitly adding ``'blas'`` as a library
4. Check only library ``'cblas'``
5. Check only library ``'blas'``
Parameters
----------
info : dict
system information dictionary for compilation and linking
Returns
-------
libraries : list of str or None
a list of libraries that enables the use of CBLAS interface.
Returns None if not found or a compilation error occurs.
Since 1.17 returns a list.
"""
# primitive cblas check by looking for the header and trying to link
# cblas or blas
c = customized_ccompiler()
tmpdir = tempfile.mkdtemp()
s = textwrap.dedent("""\
#include <cblas.h>
int main(int argc, const char *argv[])
{
double a[4] = {1,2,3,4};
double b[4] = {5,6,7,8};
return cblas_ddot(4, a, 1, b, 1) > 10;
}""")
src = os.path.join(tmpdir, 'source.c')
try:
with open(src, 'wt') as f:
f.write(s)
try:
# check we can compile (find headers)
obj = c.compile([src], output_dir=tmpdir,
include_dirs=self.get_include_dirs())
except (distutils.ccompiler.CompileError, distutils.ccompiler.LinkError):
return None
# check we can link (find library)
# some systems have separate cblas and blas libs.
for libs in [info['libraries'], ['cblas'] + info['libraries'],
['blas'] + info['libraries'], ['cblas'], ['blas']]:
try:
c.link_executable(obj, os.path.join(tmpdir, "a.out"),
libraries=libs,
library_dirs=info['library_dirs'],
extra_postargs=info.get('extra_link_args', []))
return libs
except distutils.ccompiler.LinkError:
pass
finally:
shutil.rmtree(tmpdir)
return None
class openblas_info(blas_info):
section = 'openblas'
dir_env_var = 'OPENBLAS'
_lib_names = ['openblas']
_require_symbols = []
notfounderror = BlasNotFoundError
@property
def symbol_prefix(self):
try:
return self.cp.get(self.section, 'symbol_prefix')
except NoOptionError:
return ''
@property
def symbol_suffix(self):
try:
return self.cp.get(self.section, 'symbol_suffix')
except NoOptionError:
return ''
def _calc_info(self):
c = customized_ccompiler()
lib_dirs = self.get_lib_dirs()
# Prefer to use libraries over openblas_libs
opt = self.get_option_single('openblas_libs', 'libraries')
openblas_libs = self.get_libs(opt, self._lib_names)
info = self.check_libs(lib_dirs, openblas_libs, [])
if c.compiler_type == "msvc" and info is None:
from numpy.distutils.fcompiler import new_fcompiler
f = new_fcompiler(c_compiler=c)
if f and f.compiler_type == 'gnu95':
# Try gfortran-compatible library files
info = self.check_msvc_gfortran_libs(lib_dirs, openblas_libs)
# Skip lapack check, we'd need build_ext to do it
skip_symbol_check = True
elif info:
skip_symbol_check = False
info['language'] = 'c'
if info is None:
return None
# Add extra info for OpenBLAS
extra_info = self.calc_extra_info()
dict_append(info, **extra_info)
if not (skip_symbol_check or self.check_symbols(info)):
return None
info['define_macros'] = [('HAVE_CBLAS', None)]
if self.symbol_prefix:
info['define_macros'] += [('BLAS_SYMBOL_PREFIX', self.symbol_prefix)]
if self.symbol_suffix:
info['define_macros'] += [('BLAS_SYMBOL_SUFFIX', self.symbol_suffix)]
return info
def calc_info(self):
info = self._calc_info()
if info is not None:
self.set_info(**info)
def check_msvc_gfortran_libs(self, library_dirs, libraries):
# First, find the full path to each library directory
library_paths = []
for library in libraries:
for library_dir in library_dirs:
# MinGW static ext will be .a
fullpath = os.path.join(library_dir, library + '.a')
if os.path.isfile(fullpath):
library_paths.append(fullpath)
break
else:
return None
# Generate numpy.distutils virtual static library file
basename = self.__class__.__name__
tmpdir = os.path.join(os.getcwd(), 'build', basename)
if not os.path.isdir(tmpdir):
os.makedirs(tmpdir)
info = {'library_dirs': [tmpdir],
'libraries': [basename],
'language': 'f77'}
fake_lib_file = os.path.join(tmpdir, basename + '.fobjects')
fake_clib_file = os.path.join(tmpdir, basename + '.cobjects')
with open(fake_lib_file, 'w') as f:
f.write("\n".join(library_paths))
with open(fake_clib_file, 'w') as f:
pass
return info
def check_symbols(self, info):
res = False
c = customized_ccompiler()
tmpdir = tempfile.mkdtemp()
prototypes = "\n".join("void %s%s%s();" % (self.symbol_prefix,
symbol_name,
self.symbol_suffix)
for symbol_name in self._require_symbols)
calls = "\n".join("%s%s%s();" % (self.symbol_prefix,
symbol_name,
self.symbol_suffix)
for symbol_name in self._require_symbols)
s = textwrap.dedent("""\
%(prototypes)s
int main(int argc, const char *argv[])
{
%(calls)s
return 0;
}""") % dict(prototypes=prototypes, calls=calls)
src = os.path.join(tmpdir, 'source.c')
out = os.path.join(tmpdir, 'a.out')
# Add the additional "extra" arguments
try:
extra_args = info['extra_link_args']
except Exception:
extra_args = []
try:
with open(src, 'wt') as f:
f.write(s)
obj = c.compile([src], output_dir=tmpdir)
try:
c.link_executable(obj, out, libraries=info['libraries'],
library_dirs=info['library_dirs'],
extra_postargs=extra_args)
res = True
except distutils.ccompiler.LinkError:
res = False
finally:
shutil.rmtree(tmpdir)
return res
class openblas_lapack_info(openblas_info):
section = 'openblas'
dir_env_var = 'OPENBLAS'
_lib_names = ['openblas']
_require_symbols = ['zungqr_']
notfounderror = BlasNotFoundError
class openblas_clapack_info(openblas_lapack_info):
_lib_names = ['openblas', 'lapack']
class openblas_ilp64_info(openblas_info):
section = 'openblas_ilp64'
dir_env_var = 'OPENBLAS_ILP64'
_lib_names = ['openblas64']
_require_symbols = ['dgemm_', 'cblas_dgemm']
notfounderror = BlasILP64NotFoundError
def _calc_info(self):
info = super()._calc_info()
if info is not None:
info['define_macros'] += [('HAVE_BLAS_ILP64', None)]
return info
class openblas_ilp64_lapack_info(openblas_ilp64_info):
_require_symbols = ['dgemm_', 'cblas_dgemm', 'zungqr_', 'LAPACKE_zungqr']
def _calc_info(self):
info = super()._calc_info()
if info:
info['define_macros'] += [('HAVE_LAPACKE', None)]
return info
class openblas64__info(openblas_ilp64_info):
# ILP64 Openblas, with default symbol suffix
section = 'openblas64_'
dir_env_var = 'OPENBLAS64_'
_lib_names = ['openblas64_']
symbol_suffix = '64_'
symbol_prefix = ''
class openblas64__lapack_info(openblas_ilp64_lapack_info, openblas64__info):
pass
class blis_info(blas_info):
section = 'blis'
dir_env_var = 'BLIS'
_lib_names = ['blis']
notfounderror = BlasNotFoundError
def calc_info(self):
lib_dirs = self.get_lib_dirs()
opt = self.get_option_single('blis_libs', 'libraries')
blis_libs = self.get_libs(opt, self._lib_names)
info = self.check_libs2(lib_dirs, blis_libs, [])
if info is None:
return
# Add include dirs
incl_dirs = self.get_include_dirs()
dict_append(info,
language='c',
define_macros=[('HAVE_CBLAS', None)],
include_dirs=incl_dirs)
self.set_info(**info)
class flame_info(system_info):
""" Usage of libflame for LAPACK operations
This requires libflame to be compiled with lapack wrappers:
./configure --enable-lapack2flame ...
Be aware that libflame 5.1.0 has some missing names in the shared library, so
if you have problems, try the static flame library.
"""
section = 'flame'
_lib_names = ['flame']
notfounderror = FlameNotFoundError
def check_embedded_lapack(self, info):
""" libflame does not necessarily have a wrapper for fortran LAPACK, we need to check """
c = customized_ccompiler()
tmpdir = tempfile.mkdtemp()
s = textwrap.dedent("""\
void zungqr_();
int main(int argc, const char *argv[])
{
zungqr_();
return 0;
}""")
src = os.path.join(tmpdir, 'source.c')
out = os.path.join(tmpdir, 'a.out')
# Add the additional "extra" arguments
extra_args = info.get('extra_link_args', [])
try:
with open(src, 'wt') as f:
f.write(s)
obj = c.compile([src], output_dir=tmpdir)
try:
c.link_executable(obj, out, libraries=info['libraries'],
library_dirs=info['library_dirs'],
extra_postargs=extra_args)
return True
except distutils.ccompiler.LinkError:
return False
finally:
shutil.rmtree(tmpdir)
def calc_info(self):
lib_dirs = self.get_lib_dirs()
flame_libs = self.get_libs('libraries', self._lib_names)
info = self.check_libs2(lib_dirs, flame_libs, [])
if info is None:
return
# Add the extra flag args to info
extra_info = self.calc_extra_info()
dict_append(info, **extra_info)
if self.check_embedded_lapack(info):
# check if the user has supplied all information required
self.set_info(**info)
else:
# Try and get the BLAS lib to see if we can get it to work
blas_info = get_info('blas_opt')
if not blas_info:
# since we already failed once, this ain't going to work either
return
# Now we need to merge the two dictionaries
for key in blas_info:
if isinstance(blas_info[key], list):
info[key] = info.get(key, []) + blas_info[key]
elif isinstance(blas_info[key], tuple):
info[key] = info.get(key, ()) + blas_info[key]
else:
info[key] = info.get(key, '') + blas_info[key]
# Now check again
if self.check_embedded_lapack(info):
self.set_info(**info)
class accelerate_info(system_info):
section = 'accelerate'
_lib_names = ['accelerate', 'veclib']
notfounderror = BlasNotFoundError
def calc_info(self):
# Make possible to enable/disable from config file/env var
libraries = os.environ.get('ACCELERATE')
if libraries:
libraries = [libraries]
else:
libraries = self.get_libs('libraries', self._lib_names)
libraries = [lib.strip().lower() for lib in libraries]
if (sys.platform == 'darwin' and
not os.getenv('_PYTHON_HOST_PLATFORM', None)):
# Use the system BLAS from Accelerate or vecLib under OSX
args = []
link_args = []
if get_platform()[-4:] == 'i386' or 'intel' in get_platform() or \
'x86_64' in get_platform() or \
'i386' in platform.platform():
intel = 1
else:
intel = 0
if (os.path.exists('/System/Library/Frameworks'
'/Accelerate.framework/') and
'accelerate' in libraries):
if intel:
args.extend(['-msse3'])
args.extend([
'-I/System/Library/Frameworks/vecLib.framework/Headers'])
link_args.extend(['-Wl,-framework', '-Wl,Accelerate'])
elif (os.path.exists('/System/Library/Frameworks'
'/vecLib.framework/') and
'veclib' in libraries):
if intel:
args.extend(['-msse3'])
args.extend([
'-I/System/Library/Frameworks/vecLib.framework/Headers'])
link_args.extend(['-Wl,-framework', '-Wl,vecLib'])
if args:
self.set_info(extra_compile_args=args,
extra_link_args=link_args,
define_macros=[('NO_ATLAS_INFO', 3),
('HAVE_CBLAS', None)])
return
class blas_src_info(system_info):
# BLAS_SRC is deprecated, please do not use this!
# Build or install a BLAS library via your package manager or from
# source separately.
section = 'blas_src'
dir_env_var = 'BLAS_SRC'
notfounderror = BlasSrcNotFoundError
def get_paths(self, section, key):
pre_dirs = system_info.get_paths(self, section, key)
dirs = []
for d in pre_dirs:
dirs.extend([d] + self.combine_paths(d, ['blas']))
return [d for d in dirs if os.path.isdir(d)]
def calc_info(self):
src_dirs = self.get_src_dirs()
src_dir = ''
for d in src_dirs:
if os.path.isfile(os.path.join(d, 'daxpy.f')):
src_dir = d
break
if not src_dir:
#XXX: Get sources from netlib. May be ask first.
return
blas1 = '''
caxpy csscal dnrm2 dzasum saxpy srotg zdotc ccopy cswap drot
dznrm2 scasum srotm zdotu cdotc dasum drotg icamax scnrm2
srotmg zdrot cdotu daxpy drotm idamax scopy sscal zdscal crotg
dcabs1 drotmg isamax sdot sswap zrotg cscal dcopy dscal izamax
snrm2 zaxpy zscal csrot ddot dswap sasum srot zcopy zswap
scabs1
'''
blas2 = '''
cgbmv chpmv ctrsv dsymv dtrsv sspr2 strmv zhemv ztpmv cgemv
chpr dgbmv dsyr lsame ssymv strsv zher ztpsv cgerc chpr2 dgemv
dsyr2 sgbmv ssyr xerbla zher2 ztrmv cgeru ctbmv dger dtbmv
sgemv ssyr2 zgbmv zhpmv ztrsv chbmv ctbsv dsbmv dtbsv sger
stbmv zgemv zhpr chemv ctpmv dspmv dtpmv ssbmv stbsv zgerc
zhpr2 cher ctpsv dspr dtpsv sspmv stpmv zgeru ztbmv cher2
ctrmv dspr2 dtrmv sspr stpsv zhbmv ztbsv
'''
blas3 = '''
cgemm csymm ctrsm dsyrk sgemm strmm zhemm zsyr2k chemm csyr2k
dgemm dtrmm ssymm strsm zher2k zsyrk cher2k csyrk dsymm dtrsm
ssyr2k zherk ztrmm cherk ctrmm dsyr2k ssyrk zgemm zsymm ztrsm
'''
sources = [os.path.join(src_dir, f + '.f') \
for f in (blas1 + blas2 + blas3).split()]
#XXX: should we check here actual existence of source files?
sources = [f for f in sources if os.path.isfile(f)]
info = {'sources': sources, 'language': 'f77'}
self.set_info(**info)
class x11_info(system_info):
section = 'x11'
notfounderror = X11NotFoundError
_lib_names = ['X11']
def __init__(self):
system_info.__init__(self,
default_lib_dirs=default_x11_lib_dirs,
default_include_dirs=default_x11_include_dirs)
def calc_info(self):
if sys.platform in ['win32']:
return
lib_dirs = self.get_lib_dirs()
include_dirs = self.get_include_dirs()
opt = self.get_option_single('x11_libs', 'libraries')
x11_libs = self.get_libs(opt, self._lib_names)
info = self.check_libs(lib_dirs, x11_libs, [])
if info is None:
return
inc_dir = None
for d in include_dirs:
if self.combine_paths(d, 'X11/X.h'):
inc_dir = d
break
if inc_dir is not None:
dict_append(info, include_dirs=[inc_dir])
self.set_info(**info)
class _numpy_info(system_info):
section = 'Numeric'
modulename = 'Numeric'
notfounderror = NumericNotFoundError
def __init__(self):
include_dirs = []
try:
module = __import__(self.modulename)
prefix = []
for name in module.__file__.split(os.sep):
if name == 'lib':
break
prefix.append(name)
# Ask numpy for its own include path before attempting
# anything else
try:
include_dirs.append(getattr(module, 'get_include')())
except AttributeError:
pass
include_dirs.append(sysconfig.get_path('include'))
except ImportError:
pass
py_incl_dir = sysconfig.get_path('include')
include_dirs.append(py_incl_dir)
py_pincl_dir = sysconfig.get_path('platinclude')
if py_pincl_dir not in include_dirs:
include_dirs.append(py_pincl_dir)
for d in default_include_dirs:
d = os.path.join(d, os.path.basename(py_incl_dir))
if d not in include_dirs:
include_dirs.append(d)
system_info.__init__(self,
default_lib_dirs=[],
default_include_dirs=include_dirs)
def calc_info(self):
try:
module = __import__(self.modulename)
except ImportError:
return
info = {}
macros = []
for v in ['__version__', 'version']:
vrs = getattr(module, v, None)
if vrs is None:
continue
macros = [(self.modulename.upper() + '_VERSION',
_c_string_literal(vrs)),
(self.modulename.upper(), None)]
break
dict_append(info, define_macros=macros)
include_dirs = self.get_include_dirs()
inc_dir = None
for d in include_dirs:
if self.combine_paths(d,
os.path.join(self.modulename,
'arrayobject.h')):
inc_dir = d
break
if inc_dir is not None:
dict_append(info, include_dirs=[inc_dir])
if info:
self.set_info(**info)
return
class numarray_info(_numpy_info):
section = 'numarray'
modulename = 'numarray'
class Numeric_info(_numpy_info):
section = 'Numeric'
modulename = 'Numeric'
class numpy_info(_numpy_info):
section = 'numpy'
modulename = 'numpy'
class numerix_info(system_info):
section = 'numerix'
def calc_info(self):
which = None, None
if os.getenv("NUMERIX"):
which = os.getenv("NUMERIX"), "environment var"
# If all the above fail, default to numpy.
if which[0] is None:
which = "numpy", "defaulted"
try:
import numpy # noqa: F401
which = "numpy", "defaulted"
except ImportError as e:
msg1 = str(e)
try:
import Numeric # noqa: F401
which = "numeric", "defaulted"
except ImportError as e:
msg2 = str(e)
try:
import numarray # noqa: F401
which = "numarray", "defaulted"
except ImportError as e:
msg3 = str(e)
log.info(msg1)
log.info(msg2)
log.info(msg3)
which = which[0].strip().lower(), which[1]
if which[0] not in ["numeric", "numarray", "numpy"]:
raise ValueError("numerix selector must be either 'Numeric' "
"or 'numarray' or 'numpy' but the value obtained"
" from the %s was '%s'." % (which[1], which[0]))
os.environ['NUMERIX'] = which[0]
self.set_info(**get_info(which[0]))
class f2py_info(system_info):
def calc_info(self):
try:
import numpy.f2py as f2py
except ImportError:
return
f2py_dir = os.path.join(os.path.dirname(f2py.__file__), 'src')
self.set_info(sources=[os.path.join(f2py_dir, 'fortranobject.c')],
include_dirs=[f2py_dir])
return
class boost_python_info(system_info):
section = 'boost_python'
dir_env_var = 'BOOST'
def get_paths(self, section, key):
pre_dirs = system_info.get_paths(self, section, key)
dirs = []
for d in pre_dirs:
dirs.extend([d] + self.combine_paths(d, ['boost*']))
return [d for d in dirs if os.path.isdir(d)]
def calc_info(self):
src_dirs = self.get_src_dirs()
src_dir = ''
for d in src_dirs:
if os.path.isfile(os.path.join(d, 'libs', 'python', 'src',
'module.cpp')):
src_dir = d
break
if not src_dir:
return
py_incl_dirs = [sysconfig.get_path('include')]
py_pincl_dir = sysconfig.get_path('platinclude')
if py_pincl_dir not in py_incl_dirs:
py_incl_dirs.append(py_pincl_dir)
srcs_dir = os.path.join(src_dir, 'libs', 'python', 'src')
bpl_srcs = glob(os.path.join(srcs_dir, '*.cpp'))
bpl_srcs += glob(os.path.join(srcs_dir, '*', '*.cpp'))
info = {'libraries': [('boost_python_src',
{'include_dirs': [src_dir] + py_incl_dirs,
'sources':bpl_srcs}
)],
'include_dirs': [src_dir],
}
if info:
self.set_info(**info)
return
class agg2_info(system_info):
section = 'agg2'
dir_env_var = 'AGG2'
def get_paths(self, section, key):
pre_dirs = system_info.get_paths(self, section, key)
dirs = []
for d in pre_dirs:
dirs.extend([d] + self.combine_paths(d, ['agg2*']))
return [d for d in dirs if os.path.isdir(d)]
def calc_info(self):
src_dirs = self.get_src_dirs()
src_dir = ''
for d in src_dirs:
if os.path.isfile(os.path.join(d, 'src', 'agg_affine_matrix.cpp')):
src_dir = d
break
if not src_dir:
return
if sys.platform == 'win32':
agg2_srcs = glob(os.path.join(src_dir, 'src', 'platform',
'win32', 'agg_win32_bmp.cpp'))
else:
agg2_srcs = glob(os.path.join(src_dir, 'src', '*.cpp'))
agg2_srcs += [os.path.join(src_dir, 'src', 'platform',
'X11',
'agg_platform_support.cpp')]
info = {'libraries':
[('agg2_src',
{'sources': agg2_srcs,
'include_dirs': [os.path.join(src_dir, 'include')],
}
)],
'include_dirs': [os.path.join(src_dir, 'include')],
}
if info:
self.set_info(**info)
return
class _pkg_config_info(system_info):
section = None
config_env_var = 'PKG_CONFIG'
default_config_exe = 'pkg-config'
append_config_exe = ''
version_macro_name = None
release_macro_name = None
version_flag = '--modversion'
cflags_flag = '--cflags'
def get_config_exe(self):
if self.config_env_var in os.environ:
return os.environ[self.config_env_var]
return self.default_config_exe
def get_config_output(self, config_exe, option):
cmd = config_exe + ' ' + self.append_config_exe + ' ' + option
try:
o = subprocess.check_output(cmd)
except (OSError, subprocess.CalledProcessError):
pass
else:
o = filepath_from_subprocess_output(o)
return o
def calc_info(self):
config_exe = find_executable(self.get_config_exe())
if not config_exe:
log.warn('File not found: %s. Cannot determine %s info.' \
% (config_exe, self.section))
return
info = {}
macros = []
libraries = []
library_dirs = []
include_dirs = []
extra_link_args = []
extra_compile_args = []
version = self.get_config_output(config_exe, self.version_flag)
if version:
macros.append((self.__class__.__name__.split('.')[-1].upper(),
_c_string_literal(version)))
if self.version_macro_name:
macros.append((self.version_macro_name + '_%s'
% (version.replace('.', '_')), None))
if self.release_macro_name:
release = self.get_config_output(config_exe, '--release')
if release:
macros.append((self.release_macro_name + '_%s'
% (release.replace('.', '_')), None))
opts = self.get_config_output(config_exe, '--libs')
if opts:
for opt in opts.split():
if opt[:2] == '-l':
libraries.append(opt[2:])
elif opt[:2] == '-L':
library_dirs.append(opt[2:])
else:
extra_link_args.append(opt)
opts = self.get_config_output(config_exe, self.cflags_flag)
if opts:
for opt in opts.split():
if opt[:2] == '-I':
include_dirs.append(opt[2:])
elif opt[:2] == '-D':
if '=' in opt:
n, v = opt[2:].split('=')
macros.append((n, v))
else:
macros.append((opt[2:], None))
else:
extra_compile_args.append(opt)
if macros:
dict_append(info, define_macros=macros)
if libraries:
dict_append(info, libraries=libraries)
if library_dirs:
dict_append(info, library_dirs=library_dirs)
if include_dirs:
dict_append(info, include_dirs=include_dirs)
if extra_link_args:
dict_append(info, extra_link_args=extra_link_args)
if extra_compile_args:
dict_append(info, extra_compile_args=extra_compile_args)
if info:
self.set_info(**info)
return
class wx_info(_pkg_config_info):
section = 'wx'
config_env_var = 'WX_CONFIG'
default_config_exe = 'wx-config'
append_config_exe = ''
version_macro_name = 'WX_VERSION'
release_macro_name = 'WX_RELEASE'
version_flag = '--version'
cflags_flag = '--cxxflags'
class gdk_pixbuf_xlib_2_info(_pkg_config_info):
section = 'gdk_pixbuf_xlib_2'
append_config_exe = 'gdk-pixbuf-xlib-2.0'
version_macro_name = 'GDK_PIXBUF_XLIB_VERSION'
class gdk_pixbuf_2_info(_pkg_config_info):
section = 'gdk_pixbuf_2'
append_config_exe = 'gdk-pixbuf-2.0'
version_macro_name = 'GDK_PIXBUF_VERSION'
class gdk_x11_2_info(_pkg_config_info):
section = 'gdk_x11_2'
append_config_exe = 'gdk-x11-2.0'
version_macro_name = 'GDK_X11_VERSION'
class gdk_2_info(_pkg_config_info):
section = 'gdk_2'
append_config_exe = 'gdk-2.0'
version_macro_name = 'GDK_VERSION'
class gdk_info(_pkg_config_info):
section = 'gdk'
append_config_exe = 'gdk'
version_macro_name = 'GDK_VERSION'
class gtkp_x11_2_info(_pkg_config_info):
section = 'gtkp_x11_2'
append_config_exe = 'gtk+-x11-2.0'
version_macro_name = 'GTK_X11_VERSION'
class gtkp_2_info(_pkg_config_info):
section = 'gtkp_2'
append_config_exe = 'gtk+-2.0'
version_macro_name = 'GTK_VERSION'
class xft_info(_pkg_config_info):
section = 'xft'
append_config_exe = 'xft'
version_macro_name = 'XFT_VERSION'
class freetype2_info(_pkg_config_info):
section = 'freetype2'
append_config_exe = 'freetype2'
version_macro_name = 'FREETYPE2_VERSION'
class amd_info(system_info):
section = 'amd'
dir_env_var = 'AMD'
_lib_names = ['amd']
def calc_info(self):
lib_dirs = self.get_lib_dirs()
opt = self.get_option_single('amd_libs', 'libraries')
amd_libs = self.get_libs(opt, self._lib_names)
info = self.check_libs(lib_dirs, amd_libs, [])
if info is None:
return
include_dirs = self.get_include_dirs()
inc_dir = None
for d in include_dirs:
p = self.combine_paths(d, 'amd.h')
if p:
inc_dir = os.path.dirname(p[0])
break
if inc_dir is not None:
dict_append(info, include_dirs=[inc_dir],
define_macros=[('SCIPY_AMD_H', None)],
swig_opts=['-I' + inc_dir])
self.set_info(**info)
return
class umfpack_info(system_info):
section = 'umfpack'
dir_env_var = 'UMFPACK'
notfounderror = UmfpackNotFoundError
_lib_names = ['umfpack']
def calc_info(self):
lib_dirs = self.get_lib_dirs()
opt = self.get_option_single('umfpack_libs', 'libraries')
umfpack_libs = self.get_libs(opt, self._lib_names)
info = self.check_libs(lib_dirs, umfpack_libs, [])
if info is None:
return
include_dirs = self.get_include_dirs()
inc_dir = None
for d in include_dirs:
p = self.combine_paths(d, ['', 'umfpack'], 'umfpack.h')
if p:
inc_dir = os.path.dirname(p[0])
break
if inc_dir is not None:
dict_append(info, include_dirs=[inc_dir],
define_macros=[('SCIPY_UMFPACK_H', None)],
swig_opts=['-I' + inc_dir])
dict_append(info, **get_info('amd'))
self.set_info(**info)
return
def combine_paths(*args, **kws):
""" Return a list of existing paths composed by all combinations of
items from arguments.
"""
r = []
for a in args:
if not a:
continue
if is_string(a):
a = [a]
r.append(a)
args = r
if not args:
return []
if len(args) == 1:
result = reduce(lambda a, b: a + b, map(glob, args[0]), [])
elif len(args) == 2:
result = []
for a0 in args[0]:
for a1 in args[1]:
result.extend(glob(os.path.join(a0, a1)))
else:
result = combine_paths(*(combine_paths(args[0], args[1]) + args[2:]))
log.debug('(paths: %s)', ','.join(result))
return result
language_map = {'c': 0, 'c++': 1, 'f77': 2, 'f90': 3}
inv_language_map = {0: 'c', 1: 'c++', 2: 'f77', 3: 'f90'}
def dict_append(d, **kws):
languages = []
for k, v in kws.items():
if k == 'language':
languages.append(v)
continue
if k in d:
if k in ['library_dirs', 'include_dirs',
'extra_compile_args', 'extra_link_args',
'runtime_library_dirs', 'define_macros']:
[d[k].append(vv) for vv in v if vv not in d[k]]
else:
d[k].extend(v)
else:
d[k] = v
if languages:
l = inv_language_map[max([language_map.get(l, 0) for l in languages])]
d['language'] = l
return
def parseCmdLine(argv=(None,)):
import optparse
parser = optparse.OptionParser("usage: %prog [-v] [info objs]")
parser.add_option('-v', '--verbose', action='store_true', dest='verbose',
default=False,
help='be verbose and print more messages')
opts, args = parser.parse_args(args=argv[1:])
return opts, args
def show_all(argv=None):
import inspect
if argv is None:
argv = sys.argv
opts, args = parseCmdLine(argv)
if opts.verbose:
log.set_threshold(log.DEBUG)
else:
log.set_threshold(log.INFO)
show_only = []
for n in args:
if n[-5:] != '_info':
n = n + '_info'
show_only.append(n)
show_all = not show_only
_gdict_ = globals().copy()
for name, c in _gdict_.items():
if not inspect.isclass(c):
continue
if not issubclass(c, system_info) or c is system_info:
continue
if not show_all:
if name not in show_only:
continue
del show_only[show_only.index(name)]
conf = c()
conf.verbosity = 2
# we don't need the result, but we want
# the side effect of printing diagnostics
conf.get_info()
if show_only:
log.info('Info classes not defined: %s', ','.join(show_only))
if __name__ == "__main__":
show_all()
| 111,028 | Python | 33.991806 | 120 | 0.549348 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/msvc9compiler.py | import os
from distutils.msvc9compiler import MSVCCompiler as _MSVCCompiler
from .system_info import platform_bits
def _merge(old, new):
"""Concatenate two environment paths avoiding repeats.
Here `old` is the environment string before the base class initialize
function is called and `new` is the string after the call. The new string
will be a fixed string if it is not obtained from the current environment,
or the same as the old string if obtained from the same environment. The aim
here is not to append the new string if it is already contained in the old
string so as to limit the growth of the environment string.
Parameters
----------
old : string
Previous environment string.
new : string
New environment string.
Returns
-------
ret : string
Updated environment string.
"""
if not old:
return new
if new in old:
return old
# Neither new nor old is empty. Give old priority.
return ';'.join([old, new])
class MSVCCompiler(_MSVCCompiler):
def __init__(self, verbose=0, dry_run=0, force=0):
_MSVCCompiler.__init__(self, verbose, dry_run, force)
def initialize(self, plat_name=None):
# The 'lib' and 'include' variables may be overwritten
# by MSVCCompiler.initialize, so save them for later merge.
environ_lib = os.getenv('lib')
environ_include = os.getenv('include')
_MSVCCompiler.initialize(self, plat_name)
# Merge current and previous values of 'lib' and 'include'
os.environ['lib'] = _merge(environ_lib, os.environ['lib'])
os.environ['include'] = _merge(environ_include, os.environ['include'])
# msvc9 building for 32 bits requires SSE2 to work around a
# compiler bug.
if platform_bits == 32:
self.compile_options += ['/arch:SSE2']
self.compile_options_debug += ['/arch:SSE2']
def manifest_setup_ldargs(self, output_filename, build_temp, ld_args):
ld_args.append('/MANIFEST')
_MSVCCompiler.manifest_setup_ldargs(self, output_filename,
build_temp, ld_args)
| 2,192 | Python | 33.265624 | 80 | 0.641423 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/extension.py | """distutils.extension
Provides the Extension class, used to describe C/C++ extension
modules in setup scripts.
Overridden to support f2py.
"""
import re
from distutils.extension import Extension as old_Extension
cxx_ext_re = re.compile(r'.*\.(cpp|cxx|cc)\Z', re.I).match
fortran_pyf_ext_re = re.compile(r'.*\.(f90|f95|f77|for|ftn|f|pyf)\Z', re.I).match
class Extension(old_Extension):
"""
Parameters
----------
name : str
Extension name.
sources : list of str
List of source file locations relative to the top directory of
the package.
extra_compile_args : list of str
Extra command line arguments to pass to the compiler.
extra_f77_compile_args : list of str
Extra command line arguments to pass to the fortran77 compiler.
extra_f90_compile_args : list of str
Extra command line arguments to pass to the fortran90 compiler.
"""
def __init__(
self, name, sources,
include_dirs=None,
define_macros=None,
undef_macros=None,
library_dirs=None,
libraries=None,
runtime_library_dirs=None,
extra_objects=None,
extra_compile_args=None,
extra_link_args=None,
export_symbols=None,
swig_opts=None,
depends=None,
language=None,
f2py_options=None,
module_dirs=None,
extra_c_compile_args=None,
extra_cxx_compile_args=None,
extra_f77_compile_args=None,
extra_f90_compile_args=None,):
old_Extension.__init__(
self, name, [],
include_dirs=include_dirs,
define_macros=define_macros,
undef_macros=undef_macros,
library_dirs=library_dirs,
libraries=libraries,
runtime_library_dirs=runtime_library_dirs,
extra_objects=extra_objects,
extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args,
export_symbols=export_symbols)
# Avoid assert statements checking that sources contains strings:
self.sources = sources
# Python 2.4 distutils new features
self.swig_opts = swig_opts or []
# swig_opts is assumed to be a list. Here we handle the case where it
# is specified as a string instead.
if isinstance(self.swig_opts, str):
import warnings
msg = "swig_opts is specified as a string instead of a list"
warnings.warn(msg, SyntaxWarning, stacklevel=2)
self.swig_opts = self.swig_opts.split()
# Python 2.3 distutils new features
self.depends = depends or []
self.language = language
# numpy_distutils features
self.f2py_options = f2py_options or []
self.module_dirs = module_dirs or []
self.extra_c_compile_args = extra_c_compile_args or []
self.extra_cxx_compile_args = extra_cxx_compile_args or []
self.extra_f77_compile_args = extra_f77_compile_args or []
self.extra_f90_compile_args = extra_f90_compile_args or []
return
def has_cxx_sources(self):
for source in self.sources:
if cxx_ext_re(str(source)):
return True
return False
def has_f2py_sources(self):
for source in self.sources:
if fortran_pyf_ext_re(source):
return True
return False
# class Extension
| 3,568 | Python | 32.046296 | 81 | 0.589406 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/cpuinfo.py | #!/usr/bin/env python3
"""
cpuinfo
Copyright 2002 Pearu Peterson all rights reserved,
Pearu Peterson <[email protected]>
Permission to use, modify, and distribute this software is given under the
terms of the NumPy (BSD style) license. See LICENSE.txt that came with
this distribution for specifics.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
Pearu Peterson
"""
__all__ = ['cpu']
import os
import platform
import re
import sys
import types
import warnings
from subprocess import getstatusoutput
def getoutput(cmd, successful_status=(0,), stacklevel=1):
try:
status, output = getstatusoutput(cmd)
except OSError as e:
warnings.warn(str(e), UserWarning, stacklevel=stacklevel)
return False, ""
if os.WIFEXITED(status) and os.WEXITSTATUS(status) in successful_status:
return True, output
return False, output
def command_info(successful_status=(0,), stacklevel=1, **kw):
info = {}
for key in kw:
ok, output = getoutput(kw[key], successful_status=successful_status,
stacklevel=stacklevel+1)
if ok:
info[key] = output.strip()
return info
def command_by_line(cmd, successful_status=(0,), stacklevel=1):
ok, output = getoutput(cmd, successful_status=successful_status,
stacklevel=stacklevel+1)
if not ok:
return
for line in output.splitlines():
yield line.strip()
def key_value_from_command(cmd, sep, successful_status=(0,),
stacklevel=1):
d = {}
for line in command_by_line(cmd, successful_status=successful_status,
stacklevel=stacklevel+1):
l = [s.strip() for s in line.split(sep, 1)]
if len(l) == 2:
d[l[0]] = l[1]
return d
class CPUInfoBase:
"""Holds CPU information and provides methods for requiring
the availability of various CPU features.
"""
def _try_call(self, func):
try:
return func()
except Exception:
pass
def __getattr__(self, name):
if not name.startswith('_'):
if hasattr(self, '_'+name):
attr = getattr(self, '_'+name)
if isinstance(attr, types.MethodType):
return lambda func=self._try_call,attr=attr : func(attr)
else:
return lambda : None
raise AttributeError(name)
def _getNCPUs(self):
return 1
def __get_nbits(self):
abits = platform.architecture()[0]
nbits = re.compile(r'(\d+)bit').search(abits).group(1)
return nbits
def _is_32bit(self):
return self.__get_nbits() == '32'
def _is_64bit(self):
return self.__get_nbits() == '64'
class LinuxCPUInfo(CPUInfoBase):
info = None
def __init__(self):
if self.info is not None:
return
info = [ {} ]
ok, output = getoutput('uname -m')
if ok:
info[0]['uname_m'] = output.strip()
try:
fo = open('/proc/cpuinfo')
except OSError as e:
warnings.warn(str(e), UserWarning, stacklevel=2)
else:
for line in fo:
name_value = [s.strip() for s in line.split(':', 1)]
if len(name_value) != 2:
continue
name, value = name_value
if not info or name in info[-1]: # next processor
info.append({})
info[-1][name] = value
fo.close()
self.__class__.info = info
def _not_impl(self): pass
# Athlon
def _is_AMD(self):
return self.info[0]['vendor_id']=='AuthenticAMD'
def _is_AthlonK6_2(self):
return self._is_AMD() and self.info[0]['model'] == '2'
def _is_AthlonK6_3(self):
return self._is_AMD() and self.info[0]['model'] == '3'
def _is_AthlonK6(self):
return re.match(r'.*?AMD-K6', self.info[0]['model name']) is not None
def _is_AthlonK7(self):
return re.match(r'.*?AMD-K7', self.info[0]['model name']) is not None
def _is_AthlonMP(self):
return re.match(r'.*?Athlon\(tm\) MP\b',
self.info[0]['model name']) is not None
def _is_AMD64(self):
return self.is_AMD() and self.info[0]['family'] == '15'
def _is_Athlon64(self):
return re.match(r'.*?Athlon\(tm\) 64\b',
self.info[0]['model name']) is not None
def _is_AthlonHX(self):
return re.match(r'.*?Athlon HX\b',
self.info[0]['model name']) is not None
def _is_Opteron(self):
return re.match(r'.*?Opteron\b',
self.info[0]['model name']) is not None
def _is_Hammer(self):
return re.match(r'.*?Hammer\b',
self.info[0]['model name']) is not None
# Alpha
def _is_Alpha(self):
return self.info[0]['cpu']=='Alpha'
def _is_EV4(self):
return self.is_Alpha() and self.info[0]['cpu model'] == 'EV4'
def _is_EV5(self):
return self.is_Alpha() and self.info[0]['cpu model'] == 'EV5'
def _is_EV56(self):
return self.is_Alpha() and self.info[0]['cpu model'] == 'EV56'
def _is_PCA56(self):
return self.is_Alpha() and self.info[0]['cpu model'] == 'PCA56'
# Intel
#XXX
_is_i386 = _not_impl
def _is_Intel(self):
return self.info[0]['vendor_id']=='GenuineIntel'
def _is_i486(self):
return self.info[0]['cpu']=='i486'
def _is_i586(self):
return self.is_Intel() and self.info[0]['cpu family'] == '5'
def _is_i686(self):
return self.is_Intel() and self.info[0]['cpu family'] == '6'
def _is_Celeron(self):
return re.match(r'.*?Celeron',
self.info[0]['model name']) is not None
def _is_Pentium(self):
return re.match(r'.*?Pentium',
self.info[0]['model name']) is not None
def _is_PentiumII(self):
return re.match(r'.*?Pentium.*?II\b',
self.info[0]['model name']) is not None
def _is_PentiumPro(self):
return re.match(r'.*?PentiumPro\b',
self.info[0]['model name']) is not None
def _is_PentiumMMX(self):
return re.match(r'.*?Pentium.*?MMX\b',
self.info[0]['model name']) is not None
def _is_PentiumIII(self):
return re.match(r'.*?Pentium.*?III\b',
self.info[0]['model name']) is not None
def _is_PentiumIV(self):
return re.match(r'.*?Pentium.*?(IV|4)\b',
self.info[0]['model name']) is not None
def _is_PentiumM(self):
return re.match(r'.*?Pentium.*?M\b',
self.info[0]['model name']) is not None
def _is_Prescott(self):
return self.is_PentiumIV() and self.has_sse3()
def _is_Nocona(self):
return (self.is_Intel()
and (self.info[0]['cpu family'] == '6'
or self.info[0]['cpu family'] == '15')
and (self.has_sse3() and not self.has_ssse3())
and re.match(r'.*?\blm\b', self.info[0]['flags']) is not None)
def _is_Core2(self):
return (self.is_64bit() and self.is_Intel() and
re.match(r'.*?Core\(TM\)2\b',
self.info[0]['model name']) is not None)
def _is_Itanium(self):
return re.match(r'.*?Itanium\b',
self.info[0]['family']) is not None
def _is_XEON(self):
return re.match(r'.*?XEON\b',
self.info[0]['model name'], re.IGNORECASE) is not None
_is_Xeon = _is_XEON
# Varia
def _is_singleCPU(self):
return len(self.info) == 1
def _getNCPUs(self):
return len(self.info)
def _has_fdiv_bug(self):
return self.info[0]['fdiv_bug']=='yes'
def _has_f00f_bug(self):
return self.info[0]['f00f_bug']=='yes'
def _has_mmx(self):
return re.match(r'.*?\bmmx\b', self.info[0]['flags']) is not None
def _has_sse(self):
return re.match(r'.*?\bsse\b', self.info[0]['flags']) is not None
def _has_sse2(self):
return re.match(r'.*?\bsse2\b', self.info[0]['flags']) is not None
def _has_sse3(self):
return re.match(r'.*?\bpni\b', self.info[0]['flags']) is not None
def _has_ssse3(self):
return re.match(r'.*?\bssse3\b', self.info[0]['flags']) is not None
def _has_3dnow(self):
return re.match(r'.*?\b3dnow\b', self.info[0]['flags']) is not None
def _has_3dnowext(self):
return re.match(r'.*?\b3dnowext\b', self.info[0]['flags']) is not None
class IRIXCPUInfo(CPUInfoBase):
info = None
def __init__(self):
if self.info is not None:
return
info = key_value_from_command('sysconf', sep=' ',
successful_status=(0, 1))
self.__class__.info = info
def _not_impl(self): pass
def _is_singleCPU(self):
return self.info.get('NUM_PROCESSORS') == '1'
def _getNCPUs(self):
return int(self.info.get('NUM_PROCESSORS', 1))
def __cputype(self, n):
return self.info.get('PROCESSORS').split()[0].lower() == 'r%s' % (n)
def _is_r2000(self): return self.__cputype(2000)
def _is_r3000(self): return self.__cputype(3000)
def _is_r3900(self): return self.__cputype(3900)
def _is_r4000(self): return self.__cputype(4000)
def _is_r4100(self): return self.__cputype(4100)
def _is_r4300(self): return self.__cputype(4300)
def _is_r4400(self): return self.__cputype(4400)
def _is_r4600(self): return self.__cputype(4600)
def _is_r4650(self): return self.__cputype(4650)
def _is_r5000(self): return self.__cputype(5000)
def _is_r6000(self): return self.__cputype(6000)
def _is_r8000(self): return self.__cputype(8000)
def _is_r10000(self): return self.__cputype(10000)
def _is_r12000(self): return self.__cputype(12000)
def _is_rorion(self): return self.__cputype('orion')
def get_ip(self):
try: return self.info.get('MACHINE')
except Exception: pass
def __machine(self, n):
return self.info.get('MACHINE').lower() == 'ip%s' % (n)
def _is_IP19(self): return self.__machine(19)
def _is_IP20(self): return self.__machine(20)
def _is_IP21(self): return self.__machine(21)
def _is_IP22(self): return self.__machine(22)
def _is_IP22_4k(self): return self.__machine(22) and self._is_r4000()
def _is_IP22_5k(self): return self.__machine(22) and self._is_r5000()
def _is_IP24(self): return self.__machine(24)
def _is_IP25(self): return self.__machine(25)
def _is_IP26(self): return self.__machine(26)
def _is_IP27(self): return self.__machine(27)
def _is_IP28(self): return self.__machine(28)
def _is_IP30(self): return self.__machine(30)
def _is_IP32(self): return self.__machine(32)
def _is_IP32_5k(self): return self.__machine(32) and self._is_r5000()
def _is_IP32_10k(self): return self.__machine(32) and self._is_r10000()
class DarwinCPUInfo(CPUInfoBase):
info = None
def __init__(self):
if self.info is not None:
return
info = command_info(arch='arch',
machine='machine')
info['sysctl_hw'] = key_value_from_command('sysctl hw', sep='=')
self.__class__.info = info
def _not_impl(self): pass
def _getNCPUs(self):
return int(self.info['sysctl_hw'].get('hw.ncpu', 1))
def _is_Power_Macintosh(self):
return self.info['sysctl_hw']['hw.machine']=='Power Macintosh'
def _is_i386(self):
return self.info['arch']=='i386'
def _is_ppc(self):
return self.info['arch']=='ppc'
def __machine(self, n):
return self.info['machine'] == 'ppc%s'%n
def _is_ppc601(self): return self.__machine(601)
def _is_ppc602(self): return self.__machine(602)
def _is_ppc603(self): return self.__machine(603)
def _is_ppc603e(self): return self.__machine('603e')
def _is_ppc604(self): return self.__machine(604)
def _is_ppc604e(self): return self.__machine('604e')
def _is_ppc620(self): return self.__machine(620)
def _is_ppc630(self): return self.__machine(630)
def _is_ppc740(self): return self.__machine(740)
def _is_ppc7400(self): return self.__machine(7400)
def _is_ppc7450(self): return self.__machine(7450)
def _is_ppc750(self): return self.__machine(750)
def _is_ppc403(self): return self.__machine(403)
def _is_ppc505(self): return self.__machine(505)
def _is_ppc801(self): return self.__machine(801)
def _is_ppc821(self): return self.__machine(821)
def _is_ppc823(self): return self.__machine(823)
def _is_ppc860(self): return self.__machine(860)
class SunOSCPUInfo(CPUInfoBase):
info = None
def __init__(self):
if self.info is not None:
return
info = command_info(arch='arch',
mach='mach',
uname_i='uname_i',
isainfo_b='isainfo -b',
isainfo_n='isainfo -n',
)
info['uname_X'] = key_value_from_command('uname -X', sep='=')
for line in command_by_line('psrinfo -v 0'):
m = re.match(r'\s*The (?P<p>[\w\d]+) processor operates at', line)
if m:
info['processor'] = m.group('p')
break
self.__class__.info = info
def _not_impl(self): pass
def _is_i386(self):
return self.info['isainfo_n']=='i386'
def _is_sparc(self):
return self.info['isainfo_n']=='sparc'
def _is_sparcv9(self):
return self.info['isainfo_n']=='sparcv9'
def _getNCPUs(self):
return int(self.info['uname_X'].get('NumCPU', 1))
def _is_sun4(self):
return self.info['arch']=='sun4'
def _is_SUNW(self):
return re.match(r'SUNW', self.info['uname_i']) is not None
def _is_sparcstation5(self):
return re.match(r'.*SPARCstation-5', self.info['uname_i']) is not None
def _is_ultra1(self):
return re.match(r'.*Ultra-1', self.info['uname_i']) is not None
def _is_ultra250(self):
return re.match(r'.*Ultra-250', self.info['uname_i']) is not None
def _is_ultra2(self):
return re.match(r'.*Ultra-2', self.info['uname_i']) is not None
def _is_ultra30(self):
return re.match(r'.*Ultra-30', self.info['uname_i']) is not None
def _is_ultra4(self):
return re.match(r'.*Ultra-4', self.info['uname_i']) is not None
def _is_ultra5_10(self):
return re.match(r'.*Ultra-5_10', self.info['uname_i']) is not None
def _is_ultra5(self):
return re.match(r'.*Ultra-5', self.info['uname_i']) is not None
def _is_ultra60(self):
return re.match(r'.*Ultra-60', self.info['uname_i']) is not None
def _is_ultra80(self):
return re.match(r'.*Ultra-80', self.info['uname_i']) is not None
def _is_ultraenterprice(self):
return re.match(r'.*Ultra-Enterprise', self.info['uname_i']) is not None
def _is_ultraenterprice10k(self):
return re.match(r'.*Ultra-Enterprise-10000', self.info['uname_i']) is not None
def _is_sunfire(self):
return re.match(r'.*Sun-Fire', self.info['uname_i']) is not None
def _is_ultra(self):
return re.match(r'.*Ultra', self.info['uname_i']) is not None
def _is_cpusparcv7(self):
return self.info['processor']=='sparcv7'
def _is_cpusparcv8(self):
return self.info['processor']=='sparcv8'
def _is_cpusparcv9(self):
return self.info['processor']=='sparcv9'
class Win32CPUInfo(CPUInfoBase):
info = None
pkey = r"HARDWARE\DESCRIPTION\System\CentralProcessor"
# XXX: what does the value of
# HKEY_LOCAL_MACHINE\HARDWARE\DESCRIPTION\System\CentralProcessor\0
# mean?
def __init__(self):
if self.info is not None:
return
info = []
try:
#XXX: Bad style to use so long `try:...except:...`. Fix it!
import winreg
prgx = re.compile(r"family\s+(?P<FML>\d+)\s+model\s+(?P<MDL>\d+)"
r"\s+stepping\s+(?P<STP>\d+)", re.IGNORECASE)
chnd=winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, self.pkey)
pnum=0
while True:
try:
proc=winreg.EnumKey(chnd, pnum)
except winreg.error:
break
else:
pnum+=1
info.append({"Processor":proc})
phnd=winreg.OpenKey(chnd, proc)
pidx=0
while True:
try:
name, value, vtpe=winreg.EnumValue(phnd, pidx)
except winreg.error:
break
else:
pidx=pidx+1
info[-1][name]=value
if name=="Identifier":
srch=prgx.search(value)
if srch:
info[-1]["Family"]=int(srch.group("FML"))
info[-1]["Model"]=int(srch.group("MDL"))
info[-1]["Stepping"]=int(srch.group("STP"))
except Exception as e:
print(e, '(ignoring)')
self.__class__.info = info
def _not_impl(self): pass
# Athlon
def _is_AMD(self):
return self.info[0]['VendorIdentifier']=='AuthenticAMD'
def _is_Am486(self):
return self.is_AMD() and self.info[0]['Family']==4
def _is_Am5x86(self):
return self.is_AMD() and self.info[0]['Family']==4
def _is_AMDK5(self):
return self.is_AMD() and self.info[0]['Family']==5 \
and self.info[0]['Model'] in [0, 1, 2, 3]
def _is_AMDK6(self):
return self.is_AMD() and self.info[0]['Family']==5 \
and self.info[0]['Model'] in [6, 7]
def _is_AMDK6_2(self):
return self.is_AMD() and self.info[0]['Family']==5 \
and self.info[0]['Model']==8
def _is_AMDK6_3(self):
return self.is_AMD() and self.info[0]['Family']==5 \
and self.info[0]['Model']==9
def _is_AMDK7(self):
return self.is_AMD() and self.info[0]['Family'] == 6
# To reliably distinguish between the different types of AMD64 chips
# (Athlon64, Operton, Athlon64 X2, Semperon, Turion 64, etc.) would
# require looking at the 'brand' from cpuid
def _is_AMD64(self):
return self.is_AMD() and self.info[0]['Family'] == 15
# Intel
def _is_Intel(self):
return self.info[0]['VendorIdentifier']=='GenuineIntel'
def _is_i386(self):
return self.info[0]['Family']==3
def _is_i486(self):
return self.info[0]['Family']==4
def _is_i586(self):
return self.is_Intel() and self.info[0]['Family']==5
def _is_i686(self):
return self.is_Intel() and self.info[0]['Family']==6
def _is_Pentium(self):
return self.is_Intel() and self.info[0]['Family']==5
def _is_PentiumMMX(self):
return self.is_Intel() and self.info[0]['Family']==5 \
and self.info[0]['Model']==4
def _is_PentiumPro(self):
return self.is_Intel() and self.info[0]['Family']==6 \
and self.info[0]['Model']==1
def _is_PentiumII(self):
return self.is_Intel() and self.info[0]['Family']==6 \
and self.info[0]['Model'] in [3, 5, 6]
def _is_PentiumIII(self):
return self.is_Intel() and self.info[0]['Family']==6 \
and self.info[0]['Model'] in [7, 8, 9, 10, 11]
def _is_PentiumIV(self):
return self.is_Intel() and self.info[0]['Family']==15
def _is_PentiumM(self):
return self.is_Intel() and self.info[0]['Family'] == 6 \
and self.info[0]['Model'] in [9, 13, 14]
def _is_Core2(self):
return self.is_Intel() and self.info[0]['Family'] == 6 \
and self.info[0]['Model'] in [15, 16, 17]
# Varia
def _is_singleCPU(self):
return len(self.info) == 1
def _getNCPUs(self):
return len(self.info)
def _has_mmx(self):
if self.is_Intel():
return (self.info[0]['Family']==5 and self.info[0]['Model']==4) \
or (self.info[0]['Family'] in [6, 15])
elif self.is_AMD():
return self.info[0]['Family'] in [5, 6, 15]
else:
return False
def _has_sse(self):
if self.is_Intel():
return ((self.info[0]['Family']==6 and
self.info[0]['Model'] in [7, 8, 9, 10, 11])
or self.info[0]['Family']==15)
elif self.is_AMD():
return ((self.info[0]['Family']==6 and
self.info[0]['Model'] in [6, 7, 8, 10])
or self.info[0]['Family']==15)
else:
return False
def _has_sse2(self):
if self.is_Intel():
return self.is_Pentium4() or self.is_PentiumM() \
or self.is_Core2()
elif self.is_AMD():
return self.is_AMD64()
else:
return False
def _has_3dnow(self):
return self.is_AMD() and self.info[0]['Family'] in [5, 6, 15]
def _has_3dnowext(self):
return self.is_AMD() and self.info[0]['Family'] in [6, 15]
if sys.platform.startswith('linux'): # variations: linux2,linux-i386 (any others?)
cpuinfo = LinuxCPUInfo
elif sys.platform.startswith('irix'):
cpuinfo = IRIXCPUInfo
elif sys.platform == 'darwin':
cpuinfo = DarwinCPUInfo
elif sys.platform.startswith('sunos'):
cpuinfo = SunOSCPUInfo
elif sys.platform.startswith('win32'):
cpuinfo = Win32CPUInfo
elif sys.platform.startswith('cygwin'):
cpuinfo = LinuxCPUInfo
#XXX: other OS's. Eg. use _winreg on Win32. Or os.uname on unices.
else:
cpuinfo = CPUInfoBase
cpu = cpuinfo()
#if __name__ == "__main__":
#
# cpu.is_blaa()
# cpu.is_Intel()
# cpu.is_Alpha()
#
# print('CPU information:'),
# for name in dir(cpuinfo):
# if name[0]=='_' and name[1]!='_':
# r = getattr(cpu,name[1:])()
# if r:
# if r!=1:
# print('%s=%s' %(name[1:],r))
# else:
# print(name[1:]),
# print()
| 22,639 | Python | 32.099415 | 86 | 0.543708 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/_shell_utils.py | """
Helper functions for interacting with the shell, and consuming shell-style
parameters provided in config files.
"""
import os
import shlex
import subprocess
try:
from shlex import quote
except ImportError:
from pipes import quote
__all__ = ['WindowsParser', 'PosixParser', 'NativeParser']
class CommandLineParser:
"""
An object that knows how to split and join command-line arguments.
It must be true that ``argv == split(join(argv))`` for all ``argv``.
The reverse neednt be true - `join(split(cmd))` may result in the addition
or removal of unnecessary escaping.
"""
@staticmethod
def join(argv):
""" Join a list of arguments into a command line string """
raise NotImplementedError
@staticmethod
def split(cmd):
""" Split a command line string into a list of arguments """
raise NotImplementedError
class WindowsParser:
"""
The parsing behavior used by `subprocess.call("string")` on Windows, which
matches the Microsoft C/C++ runtime.
Note that this is _not_ the behavior of cmd.
"""
@staticmethod
def join(argv):
# note that list2cmdline is specific to the windows syntax
return subprocess.list2cmdline(argv)
@staticmethod
def split(cmd):
import ctypes # guarded import for systems without ctypes
try:
ctypes.windll
except AttributeError:
raise NotImplementedError
# Windows has special parsing rules for the executable (no quotes),
# that we do not care about - insert a dummy element
if not cmd:
return []
cmd = 'dummy ' + cmd
CommandLineToArgvW = ctypes.windll.shell32.CommandLineToArgvW
CommandLineToArgvW.restype = ctypes.POINTER(ctypes.c_wchar_p)
CommandLineToArgvW.argtypes = (ctypes.c_wchar_p, ctypes.POINTER(ctypes.c_int))
nargs = ctypes.c_int()
lpargs = CommandLineToArgvW(cmd, ctypes.byref(nargs))
args = [lpargs[i] for i in range(nargs.value)]
assert not ctypes.windll.kernel32.LocalFree(lpargs)
# strip the element we inserted
assert args[0] == "dummy"
return args[1:]
class PosixParser:
"""
The parsing behavior used by `subprocess.call("string", shell=True)` on Posix.
"""
@staticmethod
def join(argv):
return ' '.join(quote(arg) for arg in argv)
@staticmethod
def split(cmd):
return shlex.split(cmd, posix=True)
if os.name == 'nt':
NativeParser = WindowsParser
elif os.name == 'posix':
NativeParser = PosixParser
| 2,613 | Python | 27.413043 | 86 | 0.652889 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/__init__.py | """
An enhanced distutils, providing support for Fortran compilers, for BLAS,
LAPACK and other common libraries for numerical computing, and more.
Public submodules are::
misc_util
system_info
cpu_info
log
exec_command
For details, please see the *Packaging* and *NumPy Distutils User Guide*
sections of the NumPy Reference Guide.
For configuring the preference for and location of libraries like BLAS and
LAPACK, and for setting include paths and similar build options, please see
``site.cfg.example`` in the root of the NumPy repository or sdist.
"""
import warnings
# Must import local ccompiler ASAP in order to get
# customized CCompiler.spawn effective.
from . import ccompiler
from . import unixccompiler
from .npy_pkg_config import *
warnings.warn("\n\n"
" `numpy.distutils` is deprecated since NumPy 1.23.0, as a result\n"
" of the deprecation of `distutils` itself. It will be removed for\n"
" Python >= 3.12. For older Python versions it will remain present.\n"
" It is recommended to use `setuptools < 60.0` for those Python versions.\n"
" For more details, see:\n"
" https://numpy.org/devdocs/reference/distutils_status_migration.html \n\n",
DeprecationWarning, stacklevel=2
)
del warnings
# If numpy is installed, add distutils.test()
try:
from . import __config__
# Normally numpy is installed if the above import works, but an interrupted
# in-place build could also have left a __config__.py. In that case the
# next import may still fail, so keep it inside the try block.
from numpy._pytesttester import PytestTester
test = PytestTester(__name__)
del PytestTester
except ImportError:
pass
def customized_fcompiler(plat=None, compiler=None):
from numpy.distutils.fcompiler import new_fcompiler
c = new_fcompiler(plat=plat, compiler=compiler)
c.customize()
return c
def customized_ccompiler(plat=None, compiler=None, verbose=1):
c = ccompiler.new_compiler(plat=plat, compiler=compiler, verbose=verbose)
c.customize('')
return c
| 2,074 | Python | 30.923076 | 83 | 0.725651 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/intelccompiler.py | import platform
from distutils.unixccompiler import UnixCCompiler
from numpy.distutils.exec_command import find_executable
from numpy.distutils.ccompiler import simple_version_match
if platform.system() == 'Windows':
from numpy.distutils.msvc9compiler import MSVCCompiler
class IntelCCompiler(UnixCCompiler):
"""A modified Intel compiler compatible with a GCC-built Python."""
compiler_type = 'intel'
cc_exe = 'icc'
cc_args = 'fPIC'
def __init__(self, verbose=0, dry_run=0, force=0):
UnixCCompiler.__init__(self, verbose, dry_run, force)
v = self.get_version()
mpopt = 'openmp' if v and v < '15' else 'qopenmp'
self.cc_exe = ('icc -fPIC -fp-model strict -O3 '
'-fomit-frame-pointer -{}').format(mpopt)
compiler = self.cc_exe
if platform.system() == 'Darwin':
shared_flag = '-Wl,-undefined,dynamic_lookup'
else:
shared_flag = '-shared'
self.set_executables(compiler=compiler,
compiler_so=compiler,
compiler_cxx=compiler,
archiver='xiar' + ' cru',
linker_exe=compiler + ' -shared-intel',
linker_so=compiler + ' ' + shared_flag +
' -shared-intel')
class IntelItaniumCCompiler(IntelCCompiler):
compiler_type = 'intele'
# On Itanium, the Intel Compiler used to be called ecc, let's search for
# it (now it's also icc, so ecc is last in the search).
for cc_exe in map(find_executable, ['icc', 'ecc']):
if cc_exe:
break
class IntelEM64TCCompiler(UnixCCompiler):
"""
A modified Intel x86_64 compiler compatible with a 64bit GCC-built Python.
"""
compiler_type = 'intelem'
cc_exe = 'icc -m64'
cc_args = '-fPIC'
def __init__(self, verbose=0, dry_run=0, force=0):
UnixCCompiler.__init__(self, verbose, dry_run, force)
v = self.get_version()
mpopt = 'openmp' if v and v < '15' else 'qopenmp'
self.cc_exe = ('icc -std=c99 -m64 -fPIC -fp-model strict -O3 '
'-fomit-frame-pointer -{}').format(mpopt)
compiler = self.cc_exe
if platform.system() == 'Darwin':
shared_flag = '-Wl,-undefined,dynamic_lookup'
else:
shared_flag = '-shared'
self.set_executables(compiler=compiler,
compiler_so=compiler,
compiler_cxx=compiler,
archiver='xiar' + ' cru',
linker_exe=compiler + ' -shared-intel',
linker_so=compiler + ' ' + shared_flag +
' -shared-intel')
if platform.system() == 'Windows':
class IntelCCompilerW(MSVCCompiler):
"""
A modified Intel compiler compatible with an MSVC-built Python.
"""
compiler_type = 'intelw'
compiler_cxx = 'icl'
def __init__(self, verbose=0, dry_run=0, force=0):
MSVCCompiler.__init__(self, verbose, dry_run, force)
version_match = simple_version_match(start=r'Intel\(R\).*?32,')
self.__version = version_match
def initialize(self, plat_name=None):
MSVCCompiler.initialize(self, plat_name)
self.cc = self.find_exe('icl.exe')
self.lib = self.find_exe('xilib')
self.linker = self.find_exe('xilink')
self.compile_options = ['/nologo', '/O3', '/MD', '/W3',
'/Qstd=c99']
self.compile_options_debug = ['/nologo', '/Od', '/MDd', '/W3',
'/Qstd=c99', '/Z7', '/D_DEBUG']
class IntelEM64TCCompilerW(IntelCCompilerW):
"""
A modified Intel x86_64 compiler compatible with
a 64bit MSVC-built Python.
"""
compiler_type = 'intelemw'
def __init__(self, verbose=0, dry_run=0, force=0):
MSVCCompiler.__init__(self, verbose, dry_run, force)
version_match = simple_version_match(start=r'Intel\(R\).*?64,')
self.__version = version_match
| 4,234 | Python | 36.8125 | 78 | 0.541096 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/core.py | import sys
from distutils.core import Distribution
if 'setuptools' in sys.modules:
have_setuptools = True
from setuptools import setup as old_setup
# easy_install imports math, it may be picked up from cwd
from setuptools.command import easy_install
try:
# very old versions of setuptools don't have this
from setuptools.command import bdist_egg
except ImportError:
have_setuptools = False
else:
from distutils.core import setup as old_setup
have_setuptools = False
import warnings
import distutils.core
import distutils.dist
from numpy.distutils.extension import Extension # noqa: F401
from numpy.distutils.numpy_distribution import NumpyDistribution
from numpy.distutils.command import config, config_compiler, \
build, build_py, build_ext, build_clib, build_src, build_scripts, \
sdist, install_data, install_headers, install, bdist_rpm, \
install_clib
from numpy.distutils.misc_util import is_sequence, is_string
numpy_cmdclass = {'build': build.build,
'build_src': build_src.build_src,
'build_scripts': build_scripts.build_scripts,
'config_cc': config_compiler.config_cc,
'config_fc': config_compiler.config_fc,
'config': config.config,
'build_ext': build_ext.build_ext,
'build_py': build_py.build_py,
'build_clib': build_clib.build_clib,
'sdist': sdist.sdist,
'install_data': install_data.install_data,
'install_headers': install_headers.install_headers,
'install_clib': install_clib.install_clib,
'install': install.install,
'bdist_rpm': bdist_rpm.bdist_rpm,
}
if have_setuptools:
# Use our own versions of develop and egg_info to ensure that build_src is
# handled appropriately.
from numpy.distutils.command import develop, egg_info
numpy_cmdclass['bdist_egg'] = bdist_egg.bdist_egg
numpy_cmdclass['develop'] = develop.develop
numpy_cmdclass['easy_install'] = easy_install.easy_install
numpy_cmdclass['egg_info'] = egg_info.egg_info
def _dict_append(d, **kws):
for k, v in kws.items():
if k not in d:
d[k] = v
continue
dv = d[k]
if isinstance(dv, tuple):
d[k] = dv + tuple(v)
elif isinstance(dv, list):
d[k] = dv + list(v)
elif isinstance(dv, dict):
_dict_append(dv, **v)
elif is_string(dv):
d[k] = dv + v
else:
raise TypeError(repr(type(dv)))
def _command_line_ok(_cache=None):
""" Return True if command line does not contain any
help or display requests.
"""
if _cache:
return _cache[0]
elif _cache is None:
_cache = []
ok = True
display_opts = ['--'+n for n in Distribution.display_option_names]
for o in Distribution.display_options:
if o[1]:
display_opts.append('-'+o[1])
for arg in sys.argv:
if arg.startswith('--help') or arg=='-h' or arg in display_opts:
ok = False
break
_cache.append(ok)
return ok
def get_distribution(always=False):
dist = distutils.core._setup_distribution
# XXX Hack to get numpy installable with easy_install.
# The problem is easy_install runs it's own setup(), which
# sets up distutils.core._setup_distribution. However,
# when our setup() runs, that gets overwritten and lost.
# We can't use isinstance, as the DistributionWithoutHelpCommands
# class is local to a function in setuptools.command.easy_install
if dist is not None and \
'DistributionWithoutHelpCommands' in repr(dist):
dist = None
if always and dist is None:
dist = NumpyDistribution()
return dist
def setup(**attr):
cmdclass = numpy_cmdclass.copy()
new_attr = attr.copy()
if 'cmdclass' in new_attr:
cmdclass.update(new_attr['cmdclass'])
new_attr['cmdclass'] = cmdclass
if 'configuration' in new_attr:
# To avoid calling configuration if there are any errors
# or help request in command in the line.
configuration = new_attr.pop('configuration')
old_dist = distutils.core._setup_distribution
old_stop = distutils.core._setup_stop_after
distutils.core._setup_distribution = None
distutils.core._setup_stop_after = "commandline"
try:
dist = setup(**new_attr)
finally:
distutils.core._setup_distribution = old_dist
distutils.core._setup_stop_after = old_stop
if dist.help or not _command_line_ok():
# probably displayed help, skip running any commands
return dist
# create setup dictionary and append to new_attr
config = configuration()
if hasattr(config, 'todict'):
config = config.todict()
_dict_append(new_attr, **config)
# Move extension source libraries to libraries
libraries = []
for ext in new_attr.get('ext_modules', []):
new_libraries = []
for item in ext.libraries:
if is_sequence(item):
lib_name, build_info = item
_check_append_ext_library(libraries, lib_name, build_info)
new_libraries.append(lib_name)
elif is_string(item):
new_libraries.append(item)
else:
raise TypeError("invalid description of extension module "
"library %r" % (item,))
ext.libraries = new_libraries
if libraries:
if 'libraries' not in new_attr:
new_attr['libraries'] = []
for item in libraries:
_check_append_library(new_attr['libraries'], item)
# sources in ext_modules or libraries may contain header files
if ('ext_modules' in new_attr or 'libraries' in new_attr) \
and 'headers' not in new_attr:
new_attr['headers'] = []
# Use our custom NumpyDistribution class instead of distutils' one
new_attr['distclass'] = NumpyDistribution
return old_setup(**new_attr)
def _check_append_library(libraries, item):
for libitem in libraries:
if is_sequence(libitem):
if is_sequence(item):
if item[0]==libitem[0]:
if item[1] is libitem[1]:
return
warnings.warn("[0] libraries list contains %r with"
" different build_info" % (item[0],),
stacklevel=2)
break
else:
if item==libitem[0]:
warnings.warn("[1] libraries list contains %r with"
" no build_info" % (item[0],),
stacklevel=2)
break
else:
if is_sequence(item):
if item[0]==libitem:
warnings.warn("[2] libraries list contains %r with"
" no build_info" % (item[0],),
stacklevel=2)
break
else:
if item==libitem:
return
libraries.append(item)
def _check_append_ext_library(libraries, lib_name, build_info):
for item in libraries:
if is_sequence(item):
if item[0]==lib_name:
if item[1] is build_info:
return
warnings.warn("[3] libraries list contains %r with"
" different build_info" % (lib_name,),
stacklevel=2)
break
elif item==lib_name:
warnings.warn("[4] libraries list contains %r with"
" no build_info" % (lib_name,),
stacklevel=2)
break
libraries.append((lib_name, build_info))
| 8,173 | Python | 36.842592 | 78 | 0.558913 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/setup.py | #!/usr/bin/env python3
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('distutils', parent_package, top_path)
config.add_subpackage('command')
config.add_subpackage('fcompiler')
config.add_subpackage('tests')
config.add_data_files('site.cfg')
config.add_data_files('mingw/gfortran_vs2003_hack.c')
config.add_data_dir('checks')
config.add_data_files('*.pyi')
config.make_config_py()
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(configuration=configuration)
| 634 | Python | 34.277776 | 65 | 0.695584 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/misc_util.py | import os
import re
import sys
import copy
import glob
import atexit
import tempfile
import subprocess
import shutil
import multiprocessing
import textwrap
import importlib.util
from threading import local as tlocal
from functools import reduce
import distutils
from distutils.errors import DistutilsError
# stores temporary directory of each thread to only create one per thread
_tdata = tlocal()
# store all created temporary directories so they can be deleted on exit
_tmpdirs = []
def clean_up_temporary_directory():
if _tmpdirs is not None:
for d in _tmpdirs:
try:
shutil.rmtree(d)
except OSError:
pass
atexit.register(clean_up_temporary_directory)
__all__ = ['Configuration', 'get_numpy_include_dirs', 'default_config_dict',
'dict_append', 'appendpath', 'generate_config_py',
'get_cmd', 'allpath', 'get_mathlibs',
'terminal_has_colors', 'red_text', 'green_text', 'yellow_text',
'blue_text', 'cyan_text', 'cyg2win32', 'mingw32', 'all_strings',
'has_f_sources', 'has_cxx_sources', 'filter_sources',
'get_dependencies', 'is_local_src_dir', 'get_ext_source_files',
'get_script_files', 'get_lib_source_files', 'get_data_files',
'dot_join', 'get_frame', 'minrelpath', 'njoin',
'is_sequence', 'is_string', 'as_list', 'gpaths', 'get_language',
'get_build_architecture', 'get_info', 'get_pkg_info',
'get_num_build_jobs', 'sanitize_cxx_flags',
'exec_mod_from_location']
class InstallableLib:
"""
Container to hold information on an installable library.
Parameters
----------
name : str
Name of the installed library.
build_info : dict
Dictionary holding build information.
target_dir : str
Absolute path specifying where to install the library.
See Also
--------
Configuration.add_installed_library
Notes
-----
The three parameters are stored as attributes with the same names.
"""
def __init__(self, name, build_info, target_dir):
self.name = name
self.build_info = build_info
self.target_dir = target_dir
def get_num_build_jobs():
"""
Get number of parallel build jobs set by the --parallel command line
argument of setup.py
If the command did not receive a setting the environment variable
NPY_NUM_BUILD_JOBS is checked. If that is unset, return the number of
processors on the system, with a maximum of 8 (to prevent
overloading the system if there a lot of CPUs).
Returns
-------
out : int
number of parallel jobs that can be run
"""
from numpy.distutils.core import get_distribution
try:
cpu_count = len(os.sched_getaffinity(0))
except AttributeError:
cpu_count = multiprocessing.cpu_count()
cpu_count = min(cpu_count, 8)
envjobs = int(os.environ.get("NPY_NUM_BUILD_JOBS", cpu_count))
dist = get_distribution()
# may be None during configuration
if dist is None:
return envjobs
# any of these three may have the job set, take the largest
cmdattr = (getattr(dist.get_command_obj('build'), 'parallel', None),
getattr(dist.get_command_obj('build_ext'), 'parallel', None),
getattr(dist.get_command_obj('build_clib'), 'parallel', None))
if all(x is None for x in cmdattr):
return envjobs
else:
return max(x for x in cmdattr if x is not None)
def quote_args(args):
"""Quote list of arguments.
.. deprecated:: 1.22.
"""
import warnings
warnings.warn('"quote_args" is deprecated.',
DeprecationWarning, stacklevel=2)
# don't used _nt_quote_args as it does not check if
# args items already have quotes or not.
args = list(args)
for i in range(len(args)):
a = args[i]
if ' ' in a and a[0] not in '"\'':
args[i] = '"%s"' % (a)
return args
def allpath(name):
"Convert a /-separated pathname to one using the OS's path separator."
split = name.split('/')
return os.path.join(*split)
def rel_path(path, parent_path):
"""Return path relative to parent_path."""
# Use realpath to avoid issues with symlinked dirs (see gh-7707)
pd = os.path.realpath(os.path.abspath(parent_path))
apath = os.path.realpath(os.path.abspath(path))
if len(apath) < len(pd):
return path
if apath == pd:
return ''
if pd == apath[:len(pd)]:
assert apath[len(pd)] in [os.sep], repr((path, apath[len(pd)]))
path = apath[len(pd)+1:]
return path
def get_path_from_frame(frame, parent_path=None):
"""Return path of the module given a frame object from the call stack.
Returned path is relative to parent_path when given,
otherwise it is absolute path.
"""
# First, try to find if the file name is in the frame.
try:
caller_file = eval('__file__', frame.f_globals, frame.f_locals)
d = os.path.dirname(os.path.abspath(caller_file))
except NameError:
# __file__ is not defined, so let's try __name__. We try this second
# because setuptools spoofs __name__ to be '__main__' even though
# sys.modules['__main__'] might be something else, like easy_install(1).
caller_name = eval('__name__', frame.f_globals, frame.f_locals)
__import__(caller_name)
mod = sys.modules[caller_name]
if hasattr(mod, '__file__'):
d = os.path.dirname(os.path.abspath(mod.__file__))
else:
# we're probably running setup.py as execfile("setup.py")
# (likely we're building an egg)
d = os.path.abspath('.')
if parent_path is not None:
d = rel_path(d, parent_path)
return d or '.'
def njoin(*path):
"""Join two or more pathname components +
- convert a /-separated pathname to one using the OS's path separator.
- resolve `..` and `.` from path.
Either passing n arguments as in njoin('a','b'), or a sequence
of n names as in njoin(['a','b']) is handled, or a mixture of such arguments.
"""
paths = []
for p in path:
if is_sequence(p):
# njoin(['a', 'b'], 'c')
paths.append(njoin(*p))
else:
assert is_string(p)
paths.append(p)
path = paths
if not path:
# njoin()
joined = ''
else:
# njoin('a', 'b')
joined = os.path.join(*path)
if os.path.sep != '/':
joined = joined.replace('/', os.path.sep)
return minrelpath(joined)
def get_mathlibs(path=None):
"""Return the MATHLIB line from numpyconfig.h
"""
if path is not None:
config_file = os.path.join(path, '_numpyconfig.h')
else:
# Look for the file in each of the numpy include directories.
dirs = get_numpy_include_dirs()
for path in dirs:
fn = os.path.join(path, '_numpyconfig.h')
if os.path.exists(fn):
config_file = fn
break
else:
raise DistutilsError('_numpyconfig.h not found in numpy include '
'dirs %r' % (dirs,))
with open(config_file) as fid:
mathlibs = []
s = '#define MATHLIB'
for line in fid:
if line.startswith(s):
value = line[len(s):].strip()
if value:
mathlibs.extend(value.split(','))
return mathlibs
def minrelpath(path):
"""Resolve `..` and '.' from path.
"""
if not is_string(path):
return path
if '.' not in path:
return path
l = path.split(os.sep)
while l:
try:
i = l.index('.', 1)
except ValueError:
break
del l[i]
j = 1
while l:
try:
i = l.index('..', j)
except ValueError:
break
if l[i-1]=='..':
j += 1
else:
del l[i], l[i-1]
j = 1
if not l:
return ''
return os.sep.join(l)
def sorted_glob(fileglob):
"""sorts output of python glob for https://bugs.python.org/issue30461
to allow extensions to have reproducible build results"""
return sorted(glob.glob(fileglob))
def _fix_paths(paths, local_path, include_non_existing):
assert is_sequence(paths), repr(type(paths))
new_paths = []
assert not is_string(paths), repr(paths)
for n in paths:
if is_string(n):
if '*' in n or '?' in n:
p = sorted_glob(n)
p2 = sorted_glob(njoin(local_path, n))
if p2:
new_paths.extend(p2)
elif p:
new_paths.extend(p)
else:
if include_non_existing:
new_paths.append(n)
print('could not resolve pattern in %r: %r' %
(local_path, n))
else:
n2 = njoin(local_path, n)
if os.path.exists(n2):
new_paths.append(n2)
else:
if os.path.exists(n):
new_paths.append(n)
elif include_non_existing:
new_paths.append(n)
if not os.path.exists(n):
print('non-existing path in %r: %r' %
(local_path, n))
elif is_sequence(n):
new_paths.extend(_fix_paths(n, local_path, include_non_existing))
else:
new_paths.append(n)
return [minrelpath(p) for p in new_paths]
def gpaths(paths, local_path='', include_non_existing=True):
"""Apply glob to paths and prepend local_path if needed.
"""
if is_string(paths):
paths = (paths,)
return _fix_paths(paths, local_path, include_non_existing)
def make_temp_file(suffix='', prefix='', text=True):
if not hasattr(_tdata, 'tempdir'):
_tdata.tempdir = tempfile.mkdtemp()
_tmpdirs.append(_tdata.tempdir)
fid, name = tempfile.mkstemp(suffix=suffix,
prefix=prefix,
dir=_tdata.tempdir,
text=text)
fo = os.fdopen(fid, 'w')
return fo, name
# Hooks for colored terminal output.
# See also https://web.archive.org/web/20100314204946/http://www.livinglogic.de/Python/ansistyle
def terminal_has_colors():
if sys.platform=='cygwin' and 'USE_COLOR' not in os.environ:
# Avoid importing curses that causes illegal operation
# with a message:
# PYTHON2 caused an invalid page fault in
# module CYGNURSES7.DLL as 015f:18bbfc28
# Details: Python 2.3.3 [GCC 3.3.1 (cygming special)]
# ssh to Win32 machine from debian
# curses.version is 2.2
# CYGWIN_98-4.10, release 1.5.7(0.109/3/2))
return 0
if hasattr(sys.stdout, 'isatty') and sys.stdout.isatty():
try:
import curses
curses.setupterm()
if (curses.tigetnum("colors") >= 0
and curses.tigetnum("pairs") >= 0
and ((curses.tigetstr("setf") is not None
and curses.tigetstr("setb") is not None)
or (curses.tigetstr("setaf") is not None
and curses.tigetstr("setab") is not None)
or curses.tigetstr("scp") is not None)):
return 1
except Exception:
pass
return 0
if terminal_has_colors():
_colour_codes = dict(black=0, red=1, green=2, yellow=3,
blue=4, magenta=5, cyan=6, white=7, default=9)
def colour_text(s, fg=None, bg=None, bold=False):
seq = []
if bold:
seq.append('1')
if fg:
fgcode = 30 + _colour_codes.get(fg.lower(), 0)
seq.append(str(fgcode))
if bg:
bgcode = 40 + _colour_codes.get(fg.lower(), 7)
seq.append(str(bgcode))
if seq:
return '\x1b[%sm%s\x1b[0m' % (';'.join(seq), s)
else:
return s
else:
def colour_text(s, fg=None, bg=None):
return s
def default_text(s):
return colour_text(s, 'default')
def red_text(s):
return colour_text(s, 'red')
def green_text(s):
return colour_text(s, 'green')
def yellow_text(s):
return colour_text(s, 'yellow')
def cyan_text(s):
return colour_text(s, 'cyan')
def blue_text(s):
return colour_text(s, 'blue')
#########################
def cyg2win32(path: str) -> str:
"""Convert a path from Cygwin-native to Windows-native.
Uses the cygpath utility (part of the Base install) to do the
actual conversion. Falls back to returning the original path if
this fails.
Handles the default ``/cygdrive`` mount prefix as well as the
``/proc/cygdrive`` portable prefix, custom cygdrive prefixes such
as ``/`` or ``/mnt``, and absolute paths such as ``/usr/src/`` or
``/home/username``
Parameters
----------
path : str
The path to convert
Returns
-------
converted_path : str
The converted path
Notes
-----
Documentation for cygpath utility:
https://cygwin.com/cygwin-ug-net/cygpath.html
Documentation for the C function it wraps:
https://cygwin.com/cygwin-api/func-cygwin-conv-path.html
"""
if sys.platform != "cygwin":
return path
return subprocess.check_output(
["/usr/bin/cygpath", "--windows", path], universal_newlines=True
)
def mingw32():
"""Return true when using mingw32 environment.
"""
if sys.platform=='win32':
if os.environ.get('OSTYPE', '')=='msys':
return True
if os.environ.get('MSYSTEM', '')=='MINGW32':
return True
return False
def msvc_runtime_version():
"Return version of MSVC runtime library, as defined by __MSC_VER__ macro"
msc_pos = sys.version.find('MSC v.')
if msc_pos != -1:
msc_ver = int(sys.version[msc_pos+6:msc_pos+10])
else:
msc_ver = None
return msc_ver
def msvc_runtime_library():
"Return name of MSVC runtime library if Python was built with MSVC >= 7"
ver = msvc_runtime_major ()
if ver:
if ver < 140:
return "msvcr%i" % ver
else:
return "vcruntime%i" % ver
else:
return None
def msvc_runtime_major():
"Return major version of MSVC runtime coded like get_build_msvc_version"
major = {1300: 70, # MSVC 7.0
1310: 71, # MSVC 7.1
1400: 80, # MSVC 8
1500: 90, # MSVC 9 (aka 2008)
1600: 100, # MSVC 10 (aka 2010)
1900: 140, # MSVC 14 (aka 2015)
}.get(msvc_runtime_version(), None)
return major
#########################
#XXX need support for .C that is also C++
cxx_ext_match = re.compile(r'.*\.(cpp|cxx|cc)\Z', re.I).match
fortran_ext_match = re.compile(r'.*\.(f90|f95|f77|for|ftn|f)\Z', re.I).match
f90_ext_match = re.compile(r'.*\.(f90|f95)\Z', re.I).match
f90_module_name_match = re.compile(r'\s*module\s*(?P<name>[\w_]+)', re.I).match
def _get_f90_modules(source):
"""Return a list of Fortran f90 module names that
given source file defines.
"""
if not f90_ext_match(source):
return []
modules = []
with open(source, 'r') as f:
for line in f:
m = f90_module_name_match(line)
if m:
name = m.group('name')
modules.append(name)
# break # XXX can we assume that there is one module per file?
return modules
def is_string(s):
return isinstance(s, str)
def all_strings(lst):
"""Return True if all items in lst are string objects. """
for item in lst:
if not is_string(item):
return False
return True
def is_sequence(seq):
if is_string(seq):
return False
try:
len(seq)
except Exception:
return False
return True
def is_glob_pattern(s):
return is_string(s) and ('*' in s or '?' in s)
def as_list(seq):
if is_sequence(seq):
return list(seq)
else:
return [seq]
def get_language(sources):
# not used in numpy/scipy packages, use build_ext.detect_language instead
"""Determine language value (c,f77,f90) from sources """
language = None
for source in sources:
if isinstance(source, str):
if f90_ext_match(source):
language = 'f90'
break
elif fortran_ext_match(source):
language = 'f77'
return language
def has_f_sources(sources):
"""Return True if sources contains Fortran files """
for source in sources:
if fortran_ext_match(source):
return True
return False
def has_cxx_sources(sources):
"""Return True if sources contains C++ files """
for source in sources:
if cxx_ext_match(source):
return True
return False
def filter_sources(sources):
"""Return four lists of filenames containing
C, C++, Fortran, and Fortran 90 module sources,
respectively.
"""
c_sources = []
cxx_sources = []
f_sources = []
fmodule_sources = []
for source in sources:
if fortran_ext_match(source):
modules = _get_f90_modules(source)
if modules:
fmodule_sources.append(source)
else:
f_sources.append(source)
elif cxx_ext_match(source):
cxx_sources.append(source)
else:
c_sources.append(source)
return c_sources, cxx_sources, f_sources, fmodule_sources
def _get_headers(directory_list):
# get *.h files from list of directories
headers = []
for d in directory_list:
head = sorted_glob(os.path.join(d, "*.h")) #XXX: *.hpp files??
headers.extend(head)
return headers
def _get_directories(list_of_sources):
# get unique directories from list of sources.
direcs = []
for f in list_of_sources:
d = os.path.split(f)
if d[0] != '' and not d[0] in direcs:
direcs.append(d[0])
return direcs
def _commandline_dep_string(cc_args, extra_postargs, pp_opts):
"""
Return commandline representation used to determine if a file needs
to be recompiled
"""
cmdline = 'commandline: '
cmdline += ' '.join(cc_args)
cmdline += ' '.join(extra_postargs)
cmdline += ' '.join(pp_opts) + '\n'
return cmdline
def get_dependencies(sources):
#XXX scan sources for include statements
return _get_headers(_get_directories(sources))
def is_local_src_dir(directory):
"""Return true if directory is local directory.
"""
if not is_string(directory):
return False
abs_dir = os.path.abspath(directory)
c = os.path.commonprefix([os.getcwd(), abs_dir])
new_dir = abs_dir[len(c):].split(os.sep)
if new_dir and not new_dir[0]:
new_dir = new_dir[1:]
if new_dir and new_dir[0]=='build':
return False
new_dir = os.sep.join(new_dir)
return os.path.isdir(new_dir)
def general_source_files(top_path):
pruned_directories = {'CVS':1, '.svn':1, 'build':1}
prune_file_pat = re.compile(r'(?:[~#]|\.py[co]|\.o)$')
for dirpath, dirnames, filenames in os.walk(top_path, topdown=True):
pruned = [ d for d in dirnames if d not in pruned_directories ]
dirnames[:] = pruned
for f in filenames:
if not prune_file_pat.search(f):
yield os.path.join(dirpath, f)
def general_source_directories_files(top_path):
"""Return a directory name relative to top_path and
files contained.
"""
pruned_directories = ['CVS', '.svn', 'build']
prune_file_pat = re.compile(r'(?:[~#]|\.py[co]|\.o)$')
for dirpath, dirnames, filenames in os.walk(top_path, topdown=True):
pruned = [ d for d in dirnames if d not in pruned_directories ]
dirnames[:] = pruned
for d in dirnames:
dpath = os.path.join(dirpath, d)
rpath = rel_path(dpath, top_path)
files = []
for f in os.listdir(dpath):
fn = os.path.join(dpath, f)
if os.path.isfile(fn) and not prune_file_pat.search(fn):
files.append(fn)
yield rpath, files
dpath = top_path
rpath = rel_path(dpath, top_path)
filenames = [os.path.join(dpath, f) for f in os.listdir(dpath) \
if not prune_file_pat.search(f)]
files = [f for f in filenames if os.path.isfile(f)]
yield rpath, files
def get_ext_source_files(ext):
# Get sources and any include files in the same directory.
filenames = []
sources = [_m for _m in ext.sources if is_string(_m)]
filenames.extend(sources)
filenames.extend(get_dependencies(sources))
for d in ext.depends:
if is_local_src_dir(d):
filenames.extend(list(general_source_files(d)))
elif os.path.isfile(d):
filenames.append(d)
return filenames
def get_script_files(scripts):
scripts = [_m for _m in scripts if is_string(_m)]
return scripts
def get_lib_source_files(lib):
filenames = []
sources = lib[1].get('sources', [])
sources = [_m for _m in sources if is_string(_m)]
filenames.extend(sources)
filenames.extend(get_dependencies(sources))
depends = lib[1].get('depends', [])
for d in depends:
if is_local_src_dir(d):
filenames.extend(list(general_source_files(d)))
elif os.path.isfile(d):
filenames.append(d)
return filenames
def get_shared_lib_extension(is_python_ext=False):
"""Return the correct file extension for shared libraries.
Parameters
----------
is_python_ext : bool, optional
Whether the shared library is a Python extension. Default is False.
Returns
-------
so_ext : str
The shared library extension.
Notes
-----
For Python shared libs, `so_ext` will typically be '.so' on Linux and OS X,
and '.pyd' on Windows. For Python >= 3.2 `so_ext` has a tag prepended on
POSIX systems according to PEP 3149.
"""
confvars = distutils.sysconfig.get_config_vars()
so_ext = confvars.get('EXT_SUFFIX', '')
if not is_python_ext:
# hardcode known values, config vars (including SHLIB_SUFFIX) are
# unreliable (see #3182)
# darwin, windows and debug linux are wrong in 3.3.1 and older
if (sys.platform.startswith('linux') or
sys.platform.startswith('gnukfreebsd')):
so_ext = '.so'
elif sys.platform.startswith('darwin'):
so_ext = '.dylib'
elif sys.platform.startswith('win'):
so_ext = '.dll'
else:
# fall back to config vars for unknown platforms
# fix long extension for Python >=3.2, see PEP 3149.
if 'SOABI' in confvars:
# Does nothing unless SOABI config var exists
so_ext = so_ext.replace('.' + confvars.get('SOABI'), '', 1)
return so_ext
def get_data_files(data):
if is_string(data):
return [data]
sources = data[1]
filenames = []
for s in sources:
if hasattr(s, '__call__'):
continue
if is_local_src_dir(s):
filenames.extend(list(general_source_files(s)))
elif is_string(s):
if os.path.isfile(s):
filenames.append(s)
else:
print('Not existing data file:', s)
else:
raise TypeError(repr(s))
return filenames
def dot_join(*args):
return '.'.join([a for a in args if a])
def get_frame(level=0):
"""Return frame object from call stack with given level.
"""
try:
return sys._getframe(level+1)
except AttributeError:
frame = sys.exc_info()[2].tb_frame
for _ in range(level+1):
frame = frame.f_back
return frame
######################
class Configuration:
_list_keys = ['packages', 'ext_modules', 'data_files', 'include_dirs',
'libraries', 'headers', 'scripts', 'py_modules',
'installed_libraries', 'define_macros']
_dict_keys = ['package_dir', 'installed_pkg_config']
_extra_keys = ['name', 'version']
numpy_include_dirs = []
def __init__(self,
package_name=None,
parent_name=None,
top_path=None,
package_path=None,
caller_level=1,
setup_name='setup.py',
**attrs):
"""Construct configuration instance of a package.
package_name -- name of the package
Ex.: 'distutils'
parent_name -- name of the parent package
Ex.: 'numpy'
top_path -- directory of the toplevel package
Ex.: the directory where the numpy package source sits
package_path -- directory of package. Will be computed by magic from the
directory of the caller module if not specified
Ex.: the directory where numpy.distutils is
caller_level -- frame level to caller namespace, internal parameter.
"""
self.name = dot_join(parent_name, package_name)
self.version = None
caller_frame = get_frame(caller_level)
self.local_path = get_path_from_frame(caller_frame, top_path)
# local_path -- directory of a file (usually setup.py) that
# defines a configuration() function.
# local_path -- directory of a file (usually setup.py) that
# defines a configuration() function.
if top_path is None:
top_path = self.local_path
self.local_path = ''
if package_path is None:
package_path = self.local_path
elif os.path.isdir(njoin(self.local_path, package_path)):
package_path = njoin(self.local_path, package_path)
if not os.path.isdir(package_path or '.'):
raise ValueError("%r is not a directory" % (package_path,))
self.top_path = top_path
self.package_path = package_path
# this is the relative path in the installed package
self.path_in_package = os.path.join(*self.name.split('.'))
self.list_keys = self._list_keys[:]
self.dict_keys = self._dict_keys[:]
for n in self.list_keys:
v = copy.copy(attrs.get(n, []))
setattr(self, n, as_list(v))
for n in self.dict_keys:
v = copy.copy(attrs.get(n, {}))
setattr(self, n, v)
known_keys = self.list_keys + self.dict_keys
self.extra_keys = self._extra_keys[:]
for n in attrs.keys():
if n in known_keys:
continue
a = attrs[n]
setattr(self, n, a)
if isinstance(a, list):
self.list_keys.append(n)
elif isinstance(a, dict):
self.dict_keys.append(n)
else:
self.extra_keys.append(n)
if os.path.exists(njoin(package_path, '__init__.py')):
self.packages.append(self.name)
self.package_dir[self.name] = package_path
self.options = dict(
ignore_setup_xxx_py = False,
assume_default_configuration = False,
delegate_options_to_subpackages = False,
quiet = False,
)
caller_instance = None
for i in range(1, 3):
try:
f = get_frame(i)
except ValueError:
break
try:
caller_instance = eval('self', f.f_globals, f.f_locals)
break
except NameError:
pass
if isinstance(caller_instance, self.__class__):
if caller_instance.options['delegate_options_to_subpackages']:
self.set_options(**caller_instance.options)
self.setup_name = setup_name
def todict(self):
"""
Return a dictionary compatible with the keyword arguments of distutils
setup function.
Examples
--------
>>> setup(**config.todict()) #doctest: +SKIP
"""
self._optimize_data_files()
d = {}
known_keys = self.list_keys + self.dict_keys + self.extra_keys
for n in known_keys:
a = getattr(self, n)
if a:
d[n] = a
return d
def info(self, message):
if not self.options['quiet']:
print(message)
def warn(self, message):
sys.stderr.write('Warning: %s\n' % (message,))
def set_options(self, **options):
"""
Configure Configuration instance.
The following options are available:
- ignore_setup_xxx_py
- assume_default_configuration
- delegate_options_to_subpackages
- quiet
"""
for key, value in options.items():
if key in self.options:
self.options[key] = value
else:
raise ValueError('Unknown option: '+key)
def get_distribution(self):
"""Return the distutils distribution object for self."""
from numpy.distutils.core import get_distribution
return get_distribution()
def _wildcard_get_subpackage(self, subpackage_name,
parent_name,
caller_level = 1):
l = subpackage_name.split('.')
subpackage_path = njoin([self.local_path]+l)
dirs = [_m for _m in sorted_glob(subpackage_path) if os.path.isdir(_m)]
config_list = []
for d in dirs:
if not os.path.isfile(njoin(d, '__init__.py')):
continue
if 'build' in d.split(os.sep):
continue
n = '.'.join(d.split(os.sep)[-len(l):])
c = self.get_subpackage(n,
parent_name = parent_name,
caller_level = caller_level+1)
config_list.extend(c)
return config_list
def _get_configuration_from_setup_py(self, setup_py,
subpackage_name,
subpackage_path,
parent_name,
caller_level = 1):
# In case setup_py imports local modules:
sys.path.insert(0, os.path.dirname(setup_py))
try:
setup_name = os.path.splitext(os.path.basename(setup_py))[0]
n = dot_join(self.name, subpackage_name, setup_name)
setup_module = exec_mod_from_location(
'_'.join(n.split('.')), setup_py)
if not hasattr(setup_module, 'configuration'):
if not self.options['assume_default_configuration']:
self.warn('Assuming default configuration '\
'(%s does not define configuration())'\
% (setup_module))
config = Configuration(subpackage_name, parent_name,
self.top_path, subpackage_path,
caller_level = caller_level + 1)
else:
pn = dot_join(*([parent_name] + subpackage_name.split('.')[:-1]))
args = (pn,)
if setup_module.configuration.__code__.co_argcount > 1:
args = args + (self.top_path,)
config = setup_module.configuration(*args)
if config.name!=dot_join(parent_name, subpackage_name):
self.warn('Subpackage %r configuration returned as %r' % \
(dot_join(parent_name, subpackage_name), config.name))
finally:
del sys.path[0]
return config
def get_subpackage(self,subpackage_name,
subpackage_path=None,
parent_name=None,
caller_level = 1):
"""Return list of subpackage configurations.
Parameters
----------
subpackage_name : str or None
Name of the subpackage to get the configuration. '*' in
subpackage_name is handled as a wildcard.
subpackage_path : str
If None, then the path is assumed to be the local path plus the
subpackage_name. If a setup.py file is not found in the
subpackage_path, then a default configuration is used.
parent_name : str
Parent name.
"""
if subpackage_name is None:
if subpackage_path is None:
raise ValueError(
"either subpackage_name or subpackage_path must be specified")
subpackage_name = os.path.basename(subpackage_path)
# handle wildcards
l = subpackage_name.split('.')
if subpackage_path is None and '*' in subpackage_name:
return self._wildcard_get_subpackage(subpackage_name,
parent_name,
caller_level = caller_level+1)
assert '*' not in subpackage_name, repr((subpackage_name, subpackage_path, parent_name))
if subpackage_path is None:
subpackage_path = njoin([self.local_path] + l)
else:
subpackage_path = njoin([subpackage_path] + l[:-1])
subpackage_path = self.paths([subpackage_path])[0]
setup_py = njoin(subpackage_path, self.setup_name)
if not self.options['ignore_setup_xxx_py']:
if not os.path.isfile(setup_py):
setup_py = njoin(subpackage_path,
'setup_%s.py' % (subpackage_name))
if not os.path.isfile(setup_py):
if not self.options['assume_default_configuration']:
self.warn('Assuming default configuration '\
'(%s/{setup_%s,setup}.py was not found)' \
% (os.path.dirname(setup_py), subpackage_name))
config = Configuration(subpackage_name, parent_name,
self.top_path, subpackage_path,
caller_level = caller_level+1)
else:
config = self._get_configuration_from_setup_py(
setup_py,
subpackage_name,
subpackage_path,
parent_name,
caller_level = caller_level + 1)
if config:
return [config]
else:
return []
def add_subpackage(self,subpackage_name,
subpackage_path=None,
standalone = False):
"""Add a sub-package to the current Configuration instance.
This is useful in a setup.py script for adding sub-packages to a
package.
Parameters
----------
subpackage_name : str
name of the subpackage
subpackage_path : str
if given, the subpackage path such as the subpackage is in
subpackage_path / subpackage_name. If None,the subpackage is
assumed to be located in the local path / subpackage_name.
standalone : bool
"""
if standalone:
parent_name = None
else:
parent_name = self.name
config_list = self.get_subpackage(subpackage_name, subpackage_path,
parent_name = parent_name,
caller_level = 2)
if not config_list:
self.warn('No configuration returned, assuming unavailable.')
for config in config_list:
d = config
if isinstance(config, Configuration):
d = config.todict()
assert isinstance(d, dict), repr(type(d))
self.info('Appending %s configuration to %s' \
% (d.get('name'), self.name))
self.dict_append(**d)
dist = self.get_distribution()
if dist is not None:
self.warn('distutils distribution has been initialized,'\
' it may be too late to add a subpackage '+ subpackage_name)
def add_data_dir(self, data_path):
"""Recursively add files under data_path to data_files list.
Recursively add files under data_path to the list of data_files to be
installed (and distributed). The data_path can be either a relative
path-name, or an absolute path-name, or a 2-tuple where the first
argument shows where in the install directory the data directory
should be installed to.
Parameters
----------
data_path : seq or str
Argument can be either
* 2-sequence (<datadir suffix>, <path to data directory>)
* path to data directory where python datadir suffix defaults
to package dir.
Notes
-----
Rules for installation paths::
foo/bar -> (foo/bar, foo/bar) -> parent/foo/bar
(gun, foo/bar) -> parent/gun
foo/* -> (foo/a, foo/a), (foo/b, foo/b) -> parent/foo/a, parent/foo/b
(gun, foo/*) -> (gun, foo/a), (gun, foo/b) -> gun
(gun/*, foo/*) -> parent/gun/a, parent/gun/b
/foo/bar -> (bar, /foo/bar) -> parent/bar
(gun, /foo/bar) -> parent/gun
(fun/*/gun/*, sun/foo/bar) -> parent/fun/foo/gun/bar
Examples
--------
For example suppose the source directory contains fun/foo.dat and
fun/bar/car.dat:
>>> self.add_data_dir('fun') #doctest: +SKIP
>>> self.add_data_dir(('sun', 'fun')) #doctest: +SKIP
>>> self.add_data_dir(('gun', '/full/path/to/fun'))#doctest: +SKIP
Will install data-files to the locations::
<package install directory>/
fun/
foo.dat
bar/
car.dat
sun/
foo.dat
bar/
car.dat
gun/
foo.dat
car.dat
"""
if is_sequence(data_path):
d, data_path = data_path
else:
d = None
if is_sequence(data_path):
[self.add_data_dir((d, p)) for p in data_path]
return
if not is_string(data_path):
raise TypeError("not a string: %r" % (data_path,))
if d is None:
if os.path.isabs(data_path):
return self.add_data_dir((os.path.basename(data_path), data_path))
return self.add_data_dir((data_path, data_path))
paths = self.paths(data_path, include_non_existing=False)
if is_glob_pattern(data_path):
if is_glob_pattern(d):
pattern_list = allpath(d).split(os.sep)
pattern_list.reverse()
# /a/*//b/ -> /a/*/b
rl = list(range(len(pattern_list)-1)); rl.reverse()
for i in rl:
if not pattern_list[i]:
del pattern_list[i]
#
for path in paths:
if not os.path.isdir(path):
print('Not a directory, skipping', path)
continue
rpath = rel_path(path, self.local_path)
path_list = rpath.split(os.sep)
path_list.reverse()
target_list = []
i = 0
for s in pattern_list:
if is_glob_pattern(s):
if i>=len(path_list):
raise ValueError('cannot fill pattern %r with %r' \
% (d, path))
target_list.append(path_list[i])
else:
assert s==path_list[i], repr((s, path_list[i], data_path, d, path, rpath))
target_list.append(s)
i += 1
if path_list[i:]:
self.warn('mismatch of pattern_list=%s and path_list=%s'\
% (pattern_list, path_list))
target_list.reverse()
self.add_data_dir((os.sep.join(target_list), path))
else:
for path in paths:
self.add_data_dir((d, path))
return
assert not is_glob_pattern(d), repr(d)
dist = self.get_distribution()
if dist is not None and dist.data_files is not None:
data_files = dist.data_files
else:
data_files = self.data_files
for path in paths:
for d1, f in list(general_source_directories_files(path)):
target_path = os.path.join(self.path_in_package, d, d1)
data_files.append((target_path, f))
def _optimize_data_files(self):
data_dict = {}
for p, files in self.data_files:
if p not in data_dict:
data_dict[p] = set()
for f in files:
data_dict[p].add(f)
self.data_files[:] = [(p, list(files)) for p, files in data_dict.items()]
def add_data_files(self,*files):
"""Add data files to configuration data_files.
Parameters
----------
files : sequence
Argument(s) can be either
* 2-sequence (<datadir prefix>,<path to data file(s)>)
* paths to data files where python datadir prefix defaults
to package dir.
Notes
-----
The form of each element of the files sequence is very flexible
allowing many combinations of where to get the files from the package
and where they should ultimately be installed on the system. The most
basic usage is for an element of the files argument sequence to be a
simple filename. This will cause that file from the local path to be
installed to the installation path of the self.name package (package
path). The file argument can also be a relative path in which case the
entire relative path will be installed into the package directory.
Finally, the file can be an absolute path name in which case the file
will be found at the absolute path name but installed to the package
path.
This basic behavior can be augmented by passing a 2-tuple in as the
file argument. The first element of the tuple should specify the
relative path (under the package install directory) where the
remaining sequence of files should be installed to (it has nothing to
do with the file-names in the source distribution). The second element
of the tuple is the sequence of files that should be installed. The
files in this sequence can be filenames, relative paths, or absolute
paths. For absolute paths the file will be installed in the top-level
package installation directory (regardless of the first argument).
Filenames and relative path names will be installed in the package
install directory under the path name given as the first element of
the tuple.
Rules for installation paths:
#. file.txt -> (., file.txt)-> parent/file.txt
#. foo/file.txt -> (foo, foo/file.txt) -> parent/foo/file.txt
#. /foo/bar/file.txt -> (., /foo/bar/file.txt) -> parent/file.txt
#. ``*``.txt -> parent/a.txt, parent/b.txt
#. foo/``*``.txt`` -> parent/foo/a.txt, parent/foo/b.txt
#. ``*/*.txt`` -> (``*``, ``*``/``*``.txt) -> parent/c/a.txt, parent/d/b.txt
#. (sun, file.txt) -> parent/sun/file.txt
#. (sun, bar/file.txt) -> parent/sun/file.txt
#. (sun, /foo/bar/file.txt) -> parent/sun/file.txt
#. (sun, ``*``.txt) -> parent/sun/a.txt, parent/sun/b.txt
#. (sun, bar/``*``.txt) -> parent/sun/a.txt, parent/sun/b.txt
#. (sun/``*``, ``*``/``*``.txt) -> parent/sun/c/a.txt, parent/d/b.txt
An additional feature is that the path to a data-file can actually be
a function that takes no arguments and returns the actual path(s) to
the data-files. This is useful when the data files are generated while
building the package.
Examples
--------
Add files to the list of data_files to be included with the package.
>>> self.add_data_files('foo.dat',
... ('fun', ['gun.dat', 'nun/pun.dat', '/tmp/sun.dat']),
... 'bar/cat.dat',
... '/full/path/to/can.dat') #doctest: +SKIP
will install these data files to::
<package install directory>/
foo.dat
fun/
gun.dat
nun/
pun.dat
sun.dat
bar/
car.dat
can.dat
where <package install directory> is the package (or sub-package)
directory such as '/usr/lib/python2.4/site-packages/mypackage' ('C:
\\Python2.4 \\Lib \\site-packages \\mypackage') or
'/usr/lib/python2.4/site- packages/mypackage/mysubpackage' ('C:
\\Python2.4 \\Lib \\site-packages \\mypackage \\mysubpackage').
"""
if len(files)>1:
for f in files:
self.add_data_files(f)
return
assert len(files)==1
if is_sequence(files[0]):
d, files = files[0]
else:
d = None
if is_string(files):
filepat = files
elif is_sequence(files):
if len(files)==1:
filepat = files[0]
else:
for f in files:
self.add_data_files((d, f))
return
else:
raise TypeError(repr(type(files)))
if d is None:
if hasattr(filepat, '__call__'):
d = ''
elif os.path.isabs(filepat):
d = ''
else:
d = os.path.dirname(filepat)
self.add_data_files((d, files))
return
paths = self.paths(filepat, include_non_existing=False)
if is_glob_pattern(filepat):
if is_glob_pattern(d):
pattern_list = d.split(os.sep)
pattern_list.reverse()
for path in paths:
path_list = path.split(os.sep)
path_list.reverse()
path_list.pop() # filename
target_list = []
i = 0
for s in pattern_list:
if is_glob_pattern(s):
target_list.append(path_list[i])
i += 1
else:
target_list.append(s)
target_list.reverse()
self.add_data_files((os.sep.join(target_list), path))
else:
self.add_data_files((d, paths))
return
assert not is_glob_pattern(d), repr((d, filepat))
dist = self.get_distribution()
if dist is not None and dist.data_files is not None:
data_files = dist.data_files
else:
data_files = self.data_files
data_files.append((os.path.join(self.path_in_package, d), paths))
### XXX Implement add_py_modules
def add_define_macros(self, macros):
"""Add define macros to configuration
Add the given sequence of macro name and value duples to the beginning
of the define_macros list This list will be visible to all extension
modules of the current package.
"""
dist = self.get_distribution()
if dist is not None:
if not hasattr(dist, 'define_macros'):
dist.define_macros = []
dist.define_macros.extend(macros)
else:
self.define_macros.extend(macros)
def add_include_dirs(self,*paths):
"""Add paths to configuration include directories.
Add the given sequence of paths to the beginning of the include_dirs
list. This list will be visible to all extension modules of the
current package.
"""
include_dirs = self.paths(paths)
dist = self.get_distribution()
if dist is not None:
if dist.include_dirs is None:
dist.include_dirs = []
dist.include_dirs.extend(include_dirs)
else:
self.include_dirs.extend(include_dirs)
def add_headers(self,*files):
"""Add installable headers to configuration.
Add the given sequence of files to the beginning of the headers list.
By default, headers will be installed under <python-
include>/<self.name.replace('.','/')>/ directory. If an item of files
is a tuple, then its first argument specifies the actual installation
location relative to the <python-include> path.
Parameters
----------
files : str or seq
Argument(s) can be either:
* 2-sequence (<includedir suffix>,<path to header file(s)>)
* path(s) to header file(s) where python includedir suffix will
default to package name.
"""
headers = []
for path in files:
if is_string(path):
[headers.append((self.name, p)) for p in self.paths(path)]
else:
if not isinstance(path, (tuple, list)) or len(path) != 2:
raise TypeError(repr(path))
[headers.append((path[0], p)) for p in self.paths(path[1])]
dist = self.get_distribution()
if dist is not None:
if dist.headers is None:
dist.headers = []
dist.headers.extend(headers)
else:
self.headers.extend(headers)
def paths(self,*paths,**kws):
"""Apply glob to paths and prepend local_path if needed.
Applies glob.glob(...) to each path in the sequence (if needed) and
pre-pends the local_path if needed. Because this is called on all
source lists, this allows wildcard characters to be specified in lists
of sources for extension modules and libraries and scripts and allows
path-names be relative to the source directory.
"""
include_non_existing = kws.get('include_non_existing', True)
return gpaths(paths,
local_path = self.local_path,
include_non_existing=include_non_existing)
def _fix_paths_dict(self, kw):
for k in kw.keys():
v = kw[k]
if k in ['sources', 'depends', 'include_dirs', 'library_dirs',
'module_dirs', 'extra_objects']:
new_v = self.paths(v)
kw[k] = new_v
def add_extension(self,name,sources,**kw):
"""Add extension to configuration.
Create and add an Extension instance to the ext_modules list. This
method also takes the following optional keyword arguments that are
passed on to the Extension constructor.
Parameters
----------
name : str
name of the extension
sources : seq
list of the sources. The list of sources may contain functions
(called source generators) which must take an extension instance
and a build directory as inputs and return a source file or list of
source files or None. If None is returned then no sources are
generated. If the Extension instance has no sources after
processing all source generators, then no extension module is
built.
include_dirs :
define_macros :
undef_macros :
library_dirs :
libraries :
runtime_library_dirs :
extra_objects :
extra_compile_args :
extra_link_args :
extra_f77_compile_args :
extra_f90_compile_args :
export_symbols :
swig_opts :
depends :
The depends list contains paths to files or directories that the
sources of the extension module depend on. If any path in the
depends list is newer than the extension module, then the module
will be rebuilt.
language :
f2py_options :
module_dirs :
extra_info : dict or list
dict or list of dict of keywords to be appended to keywords.
Notes
-----
The self.paths(...) method is applied to all lists that may contain
paths.
"""
ext_args = copy.copy(kw)
ext_args['name'] = dot_join(self.name, name)
ext_args['sources'] = sources
if 'extra_info' in ext_args:
extra_info = ext_args['extra_info']
del ext_args['extra_info']
if isinstance(extra_info, dict):
extra_info = [extra_info]
for info in extra_info:
assert isinstance(info, dict), repr(info)
dict_append(ext_args,**info)
self._fix_paths_dict(ext_args)
# Resolve out-of-tree dependencies
libraries = ext_args.get('libraries', [])
libnames = []
ext_args['libraries'] = []
for libname in libraries:
if isinstance(libname, tuple):
self._fix_paths_dict(libname[1])
# Handle library names of the form libname@relative/path/to/library
if '@' in libname:
lname, lpath = libname.split('@', 1)
lpath = os.path.abspath(njoin(self.local_path, lpath))
if os.path.isdir(lpath):
c = self.get_subpackage(None, lpath,
caller_level = 2)
if isinstance(c, Configuration):
c = c.todict()
for l in [l[0] for l in c.get('libraries', [])]:
llname = l.split('__OF__', 1)[0]
if llname == lname:
c.pop('name', None)
dict_append(ext_args,**c)
break
continue
libnames.append(libname)
ext_args['libraries'] = libnames + ext_args['libraries']
ext_args['define_macros'] = \
self.define_macros + ext_args.get('define_macros', [])
from numpy.distutils.core import Extension
ext = Extension(**ext_args)
self.ext_modules.append(ext)
dist = self.get_distribution()
if dist is not None:
self.warn('distutils distribution has been initialized,'\
' it may be too late to add an extension '+name)
return ext
def add_library(self,name,sources,**build_info):
"""
Add library to configuration.
Parameters
----------
name : str
Name of the extension.
sources : sequence
List of the sources. The list of sources may contain functions
(called source generators) which must take an extension instance
and a build directory as inputs and return a source file or list of
source files or None. If None is returned then no sources are
generated. If the Extension instance has no sources after
processing all source generators, then no extension module is
built.
build_info : dict, optional
The following keys are allowed:
* depends
* macros
* include_dirs
* extra_compiler_args
* extra_f77_compile_args
* extra_f90_compile_args
* f2py_options
* language
"""
self._add_library(name, sources, None, build_info)
dist = self.get_distribution()
if dist is not None:
self.warn('distutils distribution has been initialized,'\
' it may be too late to add a library '+ name)
def _add_library(self, name, sources, install_dir, build_info):
"""Common implementation for add_library and add_installed_library. Do
not use directly"""
build_info = copy.copy(build_info)
build_info['sources'] = sources
# Sometimes, depends is not set up to an empty list by default, and if
# depends is not given to add_library, distutils barfs (#1134)
if not 'depends' in build_info:
build_info['depends'] = []
self._fix_paths_dict(build_info)
# Add to libraries list so that it is build with build_clib
self.libraries.append((name, build_info))
def add_installed_library(self, name, sources, install_dir, build_info=None):
"""
Similar to add_library, but the specified library is installed.
Most C libraries used with `distutils` are only used to build python
extensions, but libraries built through this method will be installed
so that they can be reused by third-party packages.
Parameters
----------
name : str
Name of the installed library.
sources : sequence
List of the library's source files. See `add_library` for details.
install_dir : str
Path to install the library, relative to the current sub-package.
build_info : dict, optional
The following keys are allowed:
* depends
* macros
* include_dirs
* extra_compiler_args
* extra_f77_compile_args
* extra_f90_compile_args
* f2py_options
* language
Returns
-------
None
See Also
--------
add_library, add_npy_pkg_config, get_info
Notes
-----
The best way to encode the options required to link against the specified
C libraries is to use a "libname.ini" file, and use `get_info` to
retrieve the required options (see `add_npy_pkg_config` for more
information).
"""
if not build_info:
build_info = {}
install_dir = os.path.join(self.package_path, install_dir)
self._add_library(name, sources, install_dir, build_info)
self.installed_libraries.append(InstallableLib(name, build_info, install_dir))
def add_npy_pkg_config(self, template, install_dir, subst_dict=None):
"""
Generate and install a npy-pkg config file from a template.
The config file generated from `template` is installed in the
given install directory, using `subst_dict` for variable substitution.
Parameters
----------
template : str
The path of the template, relatively to the current package path.
install_dir : str
Where to install the npy-pkg config file, relatively to the current
package path.
subst_dict : dict, optional
If given, any string of the form ``@key@`` will be replaced by
``subst_dict[key]`` in the template file when installed. The install
prefix is always available through the variable ``@prefix@``, since the
install prefix is not easy to get reliably from setup.py.
See also
--------
add_installed_library, get_info
Notes
-----
This works for both standard installs and in-place builds, i.e. the
``@prefix@`` refer to the source directory for in-place builds.
Examples
--------
::
config.add_npy_pkg_config('foo.ini.in', 'lib', {'foo': bar})
Assuming the foo.ini.in file has the following content::
[meta]
Name=@foo@
Version=1.0
Description=dummy description
[default]
Cflags=-I@prefix@/include
Libs=
The generated file will have the following content::
[meta]
Name=bar
Version=1.0
Description=dummy description
[default]
Cflags=-Iprefix_dir/include
Libs=
and will be installed as foo.ini in the 'lib' subpath.
When cross-compiling with numpy distutils, it might be necessary to
use modified npy-pkg-config files. Using the default/generated files
will link with the host libraries (i.e. libnpymath.a). For
cross-compilation you of-course need to link with target libraries,
while using the host Python installation.
You can copy out the numpy/core/lib/npy-pkg-config directory, add a
pkgdir value to the .ini files and set NPY_PKG_CONFIG_PATH environment
variable to point to the directory with the modified npy-pkg-config
files.
Example npymath.ini modified for cross-compilation::
[meta]
Name=npymath
Description=Portable, core math library implementing C99 standard
Version=0.1
[variables]
pkgname=numpy.core
pkgdir=/build/arm-linux-gnueabi/sysroot/usr/lib/python3.7/site-packages/numpy/core
prefix=${pkgdir}
libdir=${prefix}/lib
includedir=${prefix}/include
[default]
Libs=-L${libdir} -lnpymath
Cflags=-I${includedir}
Requires=mlib
[msvc]
Libs=/LIBPATH:${libdir} npymath.lib
Cflags=/INCLUDE:${includedir}
Requires=mlib
"""
if subst_dict is None:
subst_dict = {}
template = os.path.join(self.package_path, template)
if self.name in self.installed_pkg_config:
self.installed_pkg_config[self.name].append((template, install_dir,
subst_dict))
else:
self.installed_pkg_config[self.name] = [(template, install_dir,
subst_dict)]
def add_scripts(self,*files):
"""Add scripts to configuration.
Add the sequence of files to the beginning of the scripts list.
Scripts will be installed under the <prefix>/bin/ directory.
"""
scripts = self.paths(files)
dist = self.get_distribution()
if dist is not None:
if dist.scripts is None:
dist.scripts = []
dist.scripts.extend(scripts)
else:
self.scripts.extend(scripts)
def dict_append(self,**dict):
for key in self.list_keys:
a = getattr(self, key)
a.extend(dict.get(key, []))
for key in self.dict_keys:
a = getattr(self, key)
a.update(dict.get(key, {}))
known_keys = self.list_keys + self.dict_keys + self.extra_keys
for key in dict.keys():
if key not in known_keys:
a = getattr(self, key, None)
if a and a==dict[key]: continue
self.warn('Inheriting attribute %r=%r from %r' \
% (key, dict[key], dict.get('name', '?')))
setattr(self, key, dict[key])
self.extra_keys.append(key)
elif key in self.extra_keys:
self.info('Ignoring attempt to set %r (from %r to %r)' \
% (key, getattr(self, key), dict[key]))
elif key in known_keys:
# key is already processed above
pass
else:
raise ValueError("Don't know about key=%r" % (key))
def __str__(self):
from pprint import pformat
known_keys = self.list_keys + self.dict_keys + self.extra_keys
s = '<'+5*'-' + '\n'
s += 'Configuration of '+self.name+':\n'
known_keys.sort()
for k in known_keys:
a = getattr(self, k, None)
if a:
s += '%s = %s\n' % (k, pformat(a))
s += 5*'-' + '>'
return s
def get_config_cmd(self):
"""
Returns the numpy.distutils config command instance.
"""
cmd = get_cmd('config')
cmd.ensure_finalized()
cmd.dump_source = 0
cmd.noisy = 0
old_path = os.environ.get('PATH')
if old_path:
path = os.pathsep.join(['.', old_path])
os.environ['PATH'] = path
return cmd
def get_build_temp_dir(self):
"""
Return a path to a temporary directory where temporary files should be
placed.
"""
cmd = get_cmd('build')
cmd.ensure_finalized()
return cmd.build_temp
def have_f77c(self):
"""Check for availability of Fortran 77 compiler.
Use it inside source generating function to ensure that
setup distribution instance has been initialized.
Notes
-----
True if a Fortran 77 compiler is available (because a simple Fortran 77
code was able to be compiled successfully).
"""
simple_fortran_subroutine = '''
subroutine simple
end
'''
config_cmd = self.get_config_cmd()
flag = config_cmd.try_compile(simple_fortran_subroutine, lang='f77')
return flag
def have_f90c(self):
"""Check for availability of Fortran 90 compiler.
Use it inside source generating function to ensure that
setup distribution instance has been initialized.
Notes
-----
True if a Fortran 90 compiler is available (because a simple Fortran
90 code was able to be compiled successfully)
"""
simple_fortran_subroutine = '''
subroutine simple
end
'''
config_cmd = self.get_config_cmd()
flag = config_cmd.try_compile(simple_fortran_subroutine, lang='f90')
return flag
def append_to(self, extlib):
"""Append libraries, include_dirs to extension or library item.
"""
if is_sequence(extlib):
lib_name, build_info = extlib
dict_append(build_info,
libraries=self.libraries,
include_dirs=self.include_dirs)
else:
from numpy.distutils.core import Extension
assert isinstance(extlib, Extension), repr(extlib)
extlib.libraries.extend(self.libraries)
extlib.include_dirs.extend(self.include_dirs)
def _get_svn_revision(self, path):
"""Return path's SVN revision number.
"""
try:
output = subprocess.check_output(['svnversion'], cwd=path)
except (subprocess.CalledProcessError, OSError):
pass
else:
m = re.match(rb'(?P<revision>\d+)', output)
if m:
return int(m.group('revision'))
if sys.platform=='win32' and os.environ.get('SVN_ASP_DOT_NET_HACK', None):
entries = njoin(path, '_svn', 'entries')
else:
entries = njoin(path, '.svn', 'entries')
if os.path.isfile(entries):
with open(entries) as f:
fstr = f.read()
if fstr[:5] == '<?xml': # pre 1.4
m = re.search(r'revision="(?P<revision>\d+)"', fstr)
if m:
return int(m.group('revision'))
else: # non-xml entries file --- check to be sure that
m = re.search(r'dir[\n\r]+(?P<revision>\d+)', fstr)
if m:
return int(m.group('revision'))
return None
def _get_hg_revision(self, path):
"""Return path's Mercurial revision number.
"""
try:
output = subprocess.check_output(
['hg', 'identify', '--num'], cwd=path)
except (subprocess.CalledProcessError, OSError):
pass
else:
m = re.match(rb'(?P<revision>\d+)', output)
if m:
return int(m.group('revision'))
branch_fn = njoin(path, '.hg', 'branch')
branch_cache_fn = njoin(path, '.hg', 'branch.cache')
if os.path.isfile(branch_fn):
branch0 = None
with open(branch_fn) as f:
revision0 = f.read().strip()
branch_map = {}
with open(branch_cache_fn, 'r') as f:
for line in f:
branch1, revision1 = line.split()[:2]
if revision1==revision0:
branch0 = branch1
try:
revision1 = int(revision1)
except ValueError:
continue
branch_map[branch1] = revision1
return branch_map.get(branch0)
return None
def get_version(self, version_file=None, version_variable=None):
"""Try to get version string of a package.
Return a version string of the current package or None if the version
information could not be detected.
Notes
-----
This method scans files named
__version__.py, <packagename>_version.py, version.py, and
__svn_version__.py for string variables version, __version__, and
<packagename>_version, until a version number is found.
"""
version = getattr(self, 'version', None)
if version is not None:
return version
# Get version from version file.
if version_file is None:
files = ['__version__.py',
self.name.split('.')[-1]+'_version.py',
'version.py',
'__svn_version__.py',
'__hg_version__.py']
else:
files = [version_file]
if version_variable is None:
version_vars = ['version',
'__version__',
self.name.split('.')[-1]+'_version']
else:
version_vars = [version_variable]
for f in files:
fn = njoin(self.local_path, f)
if os.path.isfile(fn):
info = ('.py', 'U', 1)
name = os.path.splitext(os.path.basename(fn))[0]
n = dot_join(self.name, name)
try:
version_module = exec_mod_from_location(
'_'.join(n.split('.')), fn)
except ImportError as e:
self.warn(str(e))
version_module = None
if version_module is None:
continue
for a in version_vars:
version = getattr(version_module, a, None)
if version is not None:
break
# Try if versioneer module
try:
version = version_module.get_versions()['version']
except AttributeError:
pass
if version is not None:
break
if version is not None:
self.version = version
return version
# Get version as SVN or Mercurial revision number
revision = self._get_svn_revision(self.local_path)
if revision is None:
revision = self._get_hg_revision(self.local_path)
if revision is not None:
version = str(revision)
self.version = version
return version
def make_svn_version_py(self, delete=True):
"""Appends a data function to the data_files list that will generate
__svn_version__.py file to the current package directory.
Generate package __svn_version__.py file from SVN revision number,
it will be removed after python exits but will be available
when sdist, etc commands are executed.
Notes
-----
If __svn_version__.py existed before, nothing is done.
This is
intended for working with source directories that are in an SVN
repository.
"""
target = njoin(self.local_path, '__svn_version__.py')
revision = self._get_svn_revision(self.local_path)
if os.path.isfile(target) or revision is None:
return
else:
def generate_svn_version_py():
if not os.path.isfile(target):
version = str(revision)
self.info('Creating %s (version=%r)' % (target, version))
with open(target, 'w') as f:
f.write('version = %r\n' % (version))
def rm_file(f=target,p=self.info):
if delete:
try: os.remove(f); p('removed '+f)
except OSError: pass
try: os.remove(f+'c'); p('removed '+f+'c')
except OSError: pass
atexit.register(rm_file)
return target
self.add_data_files(('', generate_svn_version_py()))
def make_hg_version_py(self, delete=True):
"""Appends a data function to the data_files list that will generate
__hg_version__.py file to the current package directory.
Generate package __hg_version__.py file from Mercurial revision,
it will be removed after python exits but will be available
when sdist, etc commands are executed.
Notes
-----
If __hg_version__.py existed before, nothing is done.
This is intended for working with source directories that are
in an Mercurial repository.
"""
target = njoin(self.local_path, '__hg_version__.py')
revision = self._get_hg_revision(self.local_path)
if os.path.isfile(target) or revision is None:
return
else:
def generate_hg_version_py():
if not os.path.isfile(target):
version = str(revision)
self.info('Creating %s (version=%r)' % (target, version))
with open(target, 'w') as f:
f.write('version = %r\n' % (version))
def rm_file(f=target,p=self.info):
if delete:
try: os.remove(f); p('removed '+f)
except OSError: pass
try: os.remove(f+'c'); p('removed '+f+'c')
except OSError: pass
atexit.register(rm_file)
return target
self.add_data_files(('', generate_hg_version_py()))
def make_config_py(self,name='__config__'):
"""Generate package __config__.py file containing system_info
information used during building the package.
This file is installed to the
package installation directory.
"""
self.py_modules.append((self.name, name, generate_config_py))
def get_info(self,*names):
"""Get resources information.
Return information (from system_info.get_info) for all of the names in
the argument list in a single dictionary.
"""
from .system_info import get_info, dict_append
info_dict = {}
for a in names:
dict_append(info_dict,**get_info(a))
return info_dict
def get_cmd(cmdname, _cache={}):
if cmdname not in _cache:
import distutils.core
dist = distutils.core._setup_distribution
if dist is None:
from distutils.errors import DistutilsInternalError
raise DistutilsInternalError(
'setup distribution instance not initialized')
cmd = dist.get_command_obj(cmdname)
_cache[cmdname] = cmd
return _cache[cmdname]
def get_numpy_include_dirs():
# numpy_include_dirs are set by numpy/core/setup.py, otherwise []
include_dirs = Configuration.numpy_include_dirs[:]
if not include_dirs:
import numpy
include_dirs = [ numpy.get_include() ]
# else running numpy/core/setup.py
return include_dirs
def get_npy_pkg_dir():
"""Return the path where to find the npy-pkg-config directory.
If the NPY_PKG_CONFIG_PATH environment variable is set, the value of that
is returned. Otherwise, a path inside the location of the numpy module is
returned.
The NPY_PKG_CONFIG_PATH can be useful when cross-compiling, maintaining
customized npy-pkg-config .ini files for the cross-compilation
environment, and using them when cross-compiling.
"""
d = os.environ.get('NPY_PKG_CONFIG_PATH')
if d is not None:
return d
spec = importlib.util.find_spec('numpy')
d = os.path.join(os.path.dirname(spec.origin),
'core', 'lib', 'npy-pkg-config')
return d
def get_pkg_info(pkgname, dirs=None):
"""
Return library info for the given package.
Parameters
----------
pkgname : str
Name of the package (should match the name of the .ini file, without
the extension, e.g. foo for the file foo.ini).
dirs : sequence, optional
If given, should be a sequence of additional directories where to look
for npy-pkg-config files. Those directories are searched prior to the
NumPy directory.
Returns
-------
pkginfo : class instance
The `LibraryInfo` instance containing the build information.
Raises
------
PkgNotFound
If the package is not found.
See Also
--------
Configuration.add_npy_pkg_config, Configuration.add_installed_library,
get_info
"""
from numpy.distutils.npy_pkg_config import read_config
if dirs:
dirs.append(get_npy_pkg_dir())
else:
dirs = [get_npy_pkg_dir()]
return read_config(pkgname, dirs)
def get_info(pkgname, dirs=None):
"""
Return an info dict for a given C library.
The info dict contains the necessary options to use the C library.
Parameters
----------
pkgname : str
Name of the package (should match the name of the .ini file, without
the extension, e.g. foo for the file foo.ini).
dirs : sequence, optional
If given, should be a sequence of additional directories where to look
for npy-pkg-config files. Those directories are searched prior to the
NumPy directory.
Returns
-------
info : dict
The dictionary with build information.
Raises
------
PkgNotFound
If the package is not found.
See Also
--------
Configuration.add_npy_pkg_config, Configuration.add_installed_library,
get_pkg_info
Examples
--------
To get the necessary information for the npymath library from NumPy:
>>> npymath_info = np.distutils.misc_util.get_info('npymath')
>>> npymath_info #doctest: +SKIP
{'define_macros': [], 'libraries': ['npymath'], 'library_dirs':
['.../numpy/core/lib'], 'include_dirs': ['.../numpy/core/include']}
This info dict can then be used as input to a `Configuration` instance::
config.add_extension('foo', sources=['foo.c'], extra_info=npymath_info)
"""
from numpy.distutils.npy_pkg_config import parse_flags
pkg_info = get_pkg_info(pkgname, dirs)
# Translate LibraryInfo instance into a build_info dict
info = parse_flags(pkg_info.cflags())
for k, v in parse_flags(pkg_info.libs()).items():
info[k].extend(v)
# add_extension extra_info argument is ANAL
info['define_macros'] = info['macros']
del info['macros']
del info['ignored']
return info
def is_bootstrapping():
import builtins
try:
builtins.__NUMPY_SETUP__
return True
except AttributeError:
return False
#########################
def default_config_dict(name = None, parent_name = None, local_path=None):
"""Return a configuration dictionary for usage in
configuration() function defined in file setup_<name>.py.
"""
import warnings
warnings.warn('Use Configuration(%r,%r,top_path=%r) instead of '\
'deprecated default_config_dict(%r,%r,%r)'
% (name, parent_name, local_path,
name, parent_name, local_path,
), stacklevel=2)
c = Configuration(name, parent_name, local_path)
return c.todict()
def dict_append(d, **kws):
for k, v in kws.items():
if k in d:
ov = d[k]
if isinstance(ov, str):
d[k] = v
else:
d[k].extend(v)
else:
d[k] = v
def appendpath(prefix, path):
if os.path.sep != '/':
prefix = prefix.replace('/', os.path.sep)
path = path.replace('/', os.path.sep)
drive = ''
if os.path.isabs(path):
drive = os.path.splitdrive(prefix)[0]
absprefix = os.path.splitdrive(os.path.abspath(prefix))[1]
pathdrive, path = os.path.splitdrive(path)
d = os.path.commonprefix([absprefix, path])
if os.path.join(absprefix[:len(d)], absprefix[len(d):]) != absprefix \
or os.path.join(path[:len(d)], path[len(d):]) != path:
# Handle invalid paths
d = os.path.dirname(d)
subpath = path[len(d):]
if os.path.isabs(subpath):
subpath = subpath[1:]
else:
subpath = path
return os.path.normpath(njoin(drive + prefix, subpath))
def generate_config_py(target):
"""Generate config.py file containing system_info information
used during building the package.
Usage:
config['py_modules'].append((packagename, '__config__',generate_config_py))
"""
from numpy.distutils.system_info import system_info
from distutils.dir_util import mkpath
mkpath(os.path.dirname(target))
with open(target, 'w') as f:
f.write('# This file is generated by numpy\'s %s\n' % (os.path.basename(sys.argv[0])))
f.write('# It contains system_info results at the time of building this package.\n')
f.write('__all__ = ["get_info","show"]\n\n')
# For gfortran+msvc combination, extra shared libraries may exist
f.write(textwrap.dedent("""
import os
import sys
extra_dll_dir = os.path.join(os.path.dirname(__file__), '.libs')
if sys.platform == 'win32' and os.path.isdir(extra_dll_dir):
os.add_dll_directory(extra_dll_dir)
"""))
for k, i in system_info.saved_results.items():
f.write('%s=%r\n' % (k, i))
f.write(textwrap.dedent(r'''
def get_info(name):
g = globals()
return g.get(name, g.get(name + "_info", {}))
def show():
"""
Show libraries in the system on which NumPy was built.
Print information about various resources (libraries, library
directories, include directories, etc.) in the system on which
NumPy was built.
See Also
--------
get_include : Returns the directory containing NumPy C
header files.
Notes
-----
1. Classes specifying the information to be printed are defined
in the `numpy.distutils.system_info` module.
Information may include:
* ``language``: language used to write the libraries (mostly
C or f77)
* ``libraries``: names of libraries found in the system
* ``library_dirs``: directories containing the libraries
* ``include_dirs``: directories containing library header files
* ``src_dirs``: directories containing library source files
* ``define_macros``: preprocessor macros used by
``distutils.setup``
* ``baseline``: minimum CPU features required
* ``found``: dispatched features supported in the system
* ``not found``: dispatched features that are not supported
in the system
2. NumPy BLAS/LAPACK Installation Notes
Installing a numpy wheel (``pip install numpy`` or force it
via ``pip install numpy --only-binary :numpy: numpy``) includes
an OpenBLAS implementation of the BLAS and LAPACK linear algebra
APIs. In this case, ``library_dirs`` reports the original build
time configuration as compiled with gcc/gfortran; at run time
the OpenBLAS library is in
``site-packages/numpy.libs/`` (linux), or
``site-packages/numpy/.dylibs/`` (macOS), or
``site-packages/numpy/.libs/`` (windows).
Installing numpy from source
(``pip install numpy --no-binary numpy``) searches for BLAS and
LAPACK dynamic link libraries at build time as influenced by
environment variables NPY_BLAS_LIBS, NPY_CBLAS_LIBS, and
NPY_LAPACK_LIBS; or NPY_BLAS_ORDER and NPY_LAPACK_ORDER;
or the optional file ``~/.numpy-site.cfg``.
NumPy remembers those locations and expects to load the same
libraries at run-time.
In NumPy 1.21+ on macOS, 'accelerate' (Apple's Accelerate BLAS
library) is in the default build-time search order after
'openblas'.
Examples
--------
>>> import numpy as np
>>> np.show_config()
blas_opt_info:
language = c
define_macros = [('HAVE_CBLAS', None)]
libraries = ['openblas', 'openblas']
library_dirs = ['/usr/local/lib']
"""
from numpy.core._multiarray_umath import (
__cpu_features__, __cpu_baseline__, __cpu_dispatch__
)
for name,info_dict in globals().items():
if name[0] == "_" or type(info_dict) is not type({}): continue
print(name + ":")
if not info_dict:
print(" NOT AVAILABLE")
for k,v in info_dict.items():
v = str(v)
if k == "sources" and len(v) > 200:
v = v[:60] + " ...\n... " + v[-60:]
print(" %s = %s" % (k,v))
features_found, features_not_found = [], []
for feature in __cpu_dispatch__:
if __cpu_features__[feature]:
features_found.append(feature)
else:
features_not_found.append(feature)
print("Supported SIMD extensions in this NumPy install:")
print(" baseline = %s" % (','.join(__cpu_baseline__)))
print(" found = %s" % (','.join(features_found)))
print(" not found = %s" % (','.join(features_not_found)))
'''))
return target
def msvc_version(compiler):
"""Return version major and minor of compiler instance if it is
MSVC, raise an exception otherwise."""
if not compiler.compiler_type == "msvc":
raise ValueError("Compiler instance is not msvc (%s)"\
% compiler.compiler_type)
return compiler._MSVCCompiler__version
def get_build_architecture():
# Importing distutils.msvccompiler triggers a warning on non-Windows
# systems, so delay the import to here.
from distutils.msvccompiler import get_build_architecture
return get_build_architecture()
_cxx_ignore_flags = {'-Werror=implicit-function-declaration', '-std=c99'}
def sanitize_cxx_flags(cxxflags):
'''
Some flags are valid for C but not C++. Prune them.
'''
return [flag for flag in cxxflags if flag not in _cxx_ignore_flags]
def exec_mod_from_location(modname, modfile):
'''
Use importlib machinery to import a module `modname` from the file
`modfile`. Depending on the `spec.loader`, the module may not be
registered in sys.modules.
'''
spec = importlib.util.spec_from_file_location(modname, modfile)
foo = importlib.util.module_from_spec(spec)
spec.loader.exec_module(foo)
return foo
| 89,383 | Python | 34.839615 | 102 | 0.544824 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/conv_template.py | #!/usr/bin/env python3
"""
takes templated file .xxx.src and produces .xxx file where .xxx is
.i or .c or .h, using the following template rules
/**begin repeat -- on a line by itself marks the start of a repeated code
segment
/**end repeat**/ -- on a line by itself marks it's end
After the /**begin repeat and before the */, all the named templates are placed
these should all have the same number of replacements
Repeat blocks can be nested, with each nested block labeled with its depth,
i.e.
/**begin repeat1
*....
*/
/**end repeat1**/
When using nested loops, you can optionally exclude particular
combinations of the variables using (inside the comment portion of the inner loop):
:exclude: var1=value1, var2=value2, ...
This will exclude the pattern where var1 is value1 and var2 is value2 when
the result is being generated.
In the main body each replace will use one entry from the list of named replacements
Note that all #..# forms in a block must have the same number of
comma-separated entries.
Example:
An input file containing
/**begin repeat
* #a = 1,2,3#
* #b = 1,2,3#
*/
/**begin repeat1
* #c = ted, jim#
*/
@a@, @b@, @c@
/**end repeat1**/
/**end repeat**/
produces
line 1 "template.c.src"
/*
*********************************************************************
** This file was autogenerated from a template DO NOT EDIT!!**
** Changes should be made to the original source (.src) file **
*********************************************************************
*/
#line 9
1, 1, ted
#line 9
1, 1, jim
#line 9
2, 2, ted
#line 9
2, 2, jim
#line 9
3, 3, ted
#line 9
3, 3, jim
"""
__all__ = ['process_str', 'process_file']
import os
import sys
import re
# names for replacement that are already global.
global_names = {}
# header placed at the front of head processed file
header =\
"""
/*
*****************************************************************************
** This file was autogenerated from a template DO NOT EDIT!!!! **
** Changes should be made to the original source (.src) file **
*****************************************************************************
*/
"""
# Parse string for repeat loops
def parse_structure(astr, level):
"""
The returned line number is from the beginning of the string, starting
at zero. Returns an empty list if no loops found.
"""
if level == 0 :
loopbeg = "/**begin repeat"
loopend = "/**end repeat**/"
else :
loopbeg = "/**begin repeat%d" % level
loopend = "/**end repeat%d**/" % level
ind = 0
line = 0
spanlist = []
while True:
start = astr.find(loopbeg, ind)
if start == -1:
break
start2 = astr.find("*/", start)
start2 = astr.find("\n", start2)
fini1 = astr.find(loopend, start2)
fini2 = astr.find("\n", fini1)
line += astr.count("\n", ind, start2+1)
spanlist.append((start, start2+1, fini1, fini2+1, line))
line += astr.count("\n", start2+1, fini2)
ind = fini2
spanlist.sort()
return spanlist
def paren_repl(obj):
torep = obj.group(1)
numrep = obj.group(2)
return ','.join([torep]*int(numrep))
parenrep = re.compile(r"\(([^)]*)\)\*(\d+)")
plainrep = re.compile(r"([^*]+)\*(\d+)")
def parse_values(astr):
# replaces all occurrences of '(a,b,c)*4' in astr
# with 'a,b,c,a,b,c,a,b,c,a,b,c'. Empty braces generate
# empty values, i.e., ()*4 yields ',,,'. The result is
# split at ',' and a list of values returned.
astr = parenrep.sub(paren_repl, astr)
# replaces occurrences of xxx*3 with xxx, xxx, xxx
astr = ','.join([plainrep.sub(paren_repl, x.strip())
for x in astr.split(',')])
return astr.split(',')
stripast = re.compile(r"\n\s*\*?")
named_re = re.compile(r"#\s*(\w*)\s*=([^#]*)#")
exclude_vars_re = re.compile(r"(\w*)=(\w*)")
exclude_re = re.compile(":exclude:")
def parse_loop_header(loophead) :
"""Find all named replacements in the header
Returns a list of dictionaries, one for each loop iteration,
where each key is a name to be substituted and the corresponding
value is the replacement string.
Also return a list of exclusions. The exclusions are dictionaries
of key value pairs. There can be more than one exclusion.
[{'var1':'value1', 'var2', 'value2'[,...]}, ...]
"""
# Strip out '\n' and leading '*', if any, in continuation lines.
# This should not effect code previous to this change as
# continuation lines were not allowed.
loophead = stripast.sub("", loophead)
# parse out the names and lists of values
names = []
reps = named_re.findall(loophead)
nsub = None
for rep in reps:
name = rep[0]
vals = parse_values(rep[1])
size = len(vals)
if nsub is None :
nsub = size
elif nsub != size :
msg = "Mismatch in number of values, %d != %d\n%s = %s"
raise ValueError(msg % (nsub, size, name, vals))
names.append((name, vals))
# Find any exclude variables
excludes = []
for obj in exclude_re.finditer(loophead):
span = obj.span()
# find next newline
endline = loophead.find('\n', span[1])
substr = loophead[span[1]:endline]
ex_names = exclude_vars_re.findall(substr)
excludes.append(dict(ex_names))
# generate list of dictionaries, one for each template iteration
dlist = []
if nsub is None :
raise ValueError("No substitution variables found")
for i in range(nsub):
tmp = {name: vals[i] for name, vals in names}
dlist.append(tmp)
return dlist
replace_re = re.compile(r"@(\w+)@")
def parse_string(astr, env, level, line) :
lineno = "#line %d\n" % line
# local function for string replacement, uses env
def replace(match):
name = match.group(1)
try :
val = env[name]
except KeyError:
msg = 'line %d: no definition of key "%s"'%(line, name)
raise ValueError(msg) from None
return val
code = [lineno]
struct = parse_structure(astr, level)
if struct :
# recurse over inner loops
oldend = 0
newlevel = level + 1
for sub in struct:
pref = astr[oldend:sub[0]]
head = astr[sub[0]:sub[1]]
text = astr[sub[1]:sub[2]]
oldend = sub[3]
newline = line + sub[4]
code.append(replace_re.sub(replace, pref))
try :
envlist = parse_loop_header(head)
except ValueError as e:
msg = "line %d: %s" % (newline, e)
raise ValueError(msg)
for newenv in envlist :
newenv.update(env)
newcode = parse_string(text, newenv, newlevel, newline)
code.extend(newcode)
suff = astr[oldend:]
code.append(replace_re.sub(replace, suff))
else :
# replace keys
code.append(replace_re.sub(replace, astr))
code.append('\n')
return ''.join(code)
def process_str(astr):
code = [header]
code.extend(parse_string(astr, global_names, 0, 1))
return ''.join(code)
include_src_re = re.compile(r"(\n|\A)#include\s*['\"]"
r"(?P<name>[\w\d./\\]+[.]src)['\"]", re.I)
def resolve_includes(source):
d = os.path.dirname(source)
with open(source) as fid:
lines = []
for line in fid:
m = include_src_re.match(line)
if m:
fn = m.group('name')
if not os.path.isabs(fn):
fn = os.path.join(d, fn)
if os.path.isfile(fn):
lines.extend(resolve_includes(fn))
else:
lines.append(line)
else:
lines.append(line)
return lines
def process_file(source):
lines = resolve_includes(source)
sourcefile = os.path.normcase(source).replace("\\", "\\\\")
try:
code = process_str(''.join(lines))
except ValueError as e:
raise ValueError('In "%s" loop at %s' % (sourcefile, e)) from None
return '#line 1 "%s"\n%s' % (sourcefile, code)
def unique_key(adict):
# this obtains a unique key given a dictionary
# currently it works by appending together n of the letters of the
# current keys and increasing n until a unique key is found
# -- not particularly quick
allkeys = list(adict.keys())
done = False
n = 1
while not done:
newkey = "".join([x[:n] for x in allkeys])
if newkey in allkeys:
n += 1
else:
done = True
return newkey
def main():
try:
file = sys.argv[1]
except IndexError:
fid = sys.stdin
outfile = sys.stdout
else:
fid = open(file, 'r')
(base, ext) = os.path.splitext(file)
newname = base
outfile = open(newname, 'w')
allstr = fid.read()
try:
writestr = process_str(allstr)
except ValueError as e:
raise ValueError("In %s loop at %s" % (file, e)) from None
outfile.write(writestr)
if __name__ == "__main__":
main()
| 9,536 | Python | 27.9 | 84 | 0.543624 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/from_template.py | #!/usr/bin/env python3
"""
process_file(filename)
takes templated file .xxx.src and produces .xxx file where .xxx
is .pyf .f90 or .f using the following template rules:
'<..>' denotes a template.
All function and subroutine blocks in a source file with names that
contain '<..>' will be replicated according to the rules in '<..>'.
The number of comma-separated words in '<..>' will determine the number of
replicates.
'<..>' may have two different forms, named and short. For example,
named:
<p=d,s,z,c> where anywhere inside a block '<p>' will be replaced with
'd', 's', 'z', and 'c' for each replicate of the block.
<_c> is already defined: <_c=s,d,c,z>
<_t> is already defined: <_t=real,double precision,complex,double complex>
short:
<s,d,c,z>, a short form of the named, useful when no <p> appears inside
a block.
In general, '<..>' contains a comma separated list of arbitrary
expressions. If these expression must contain a comma|leftarrow|rightarrow,
then prepend the comma|leftarrow|rightarrow with a backslash.
If an expression matches '\\<index>' then it will be replaced
by <index>-th expression.
Note that all '<..>' forms in a block must have the same number of
comma-separated entries.
Predefined named template rules:
<prefix=s,d,c,z>
<ftype=real,double precision,complex,double complex>
<ftypereal=real,double precision,\\0,\\1>
<ctype=float,double,complex_float,complex_double>
<ctypereal=float,double,\\0,\\1>
"""
__all__ = ['process_str', 'process_file']
import os
import sys
import re
routine_start_re = re.compile(r'(\n|\A)(( (\$|\*))|)\s*(subroutine|function)\b', re.I)
routine_end_re = re.compile(r'\n\s*end\s*(subroutine|function)\b.*(\n|\Z)', re.I)
function_start_re = re.compile(r'\n (\$|\*)\s*function\b', re.I)
def parse_structure(astr):
""" Return a list of tuples for each function or subroutine each
tuple is the start and end of a subroutine or function to be
expanded.
"""
spanlist = []
ind = 0
while True:
m = routine_start_re.search(astr, ind)
if m is None:
break
start = m.start()
if function_start_re.match(astr, start, m.end()):
while True:
i = astr.rfind('\n', ind, start)
if i==-1:
break
start = i
if astr[i:i+7]!='\n $':
break
start += 1
m = routine_end_re.search(astr, m.end())
ind = end = m and m.end()-1 or len(astr)
spanlist.append((start, end))
return spanlist
template_re = re.compile(r"<\s*(\w[\w\d]*)\s*>")
named_re = re.compile(r"<\s*(\w[\w\d]*)\s*=\s*(.*?)\s*>")
list_re = re.compile(r"<\s*((.*?))\s*>")
def find_repl_patterns(astr):
reps = named_re.findall(astr)
names = {}
for rep in reps:
name = rep[0].strip() or unique_key(names)
repl = rep[1].replace(r'\,', '@comma@')
thelist = conv(repl)
names[name] = thelist
return names
def find_and_remove_repl_patterns(astr):
names = find_repl_patterns(astr)
astr = re.subn(named_re, '', astr)[0]
return astr, names
item_re = re.compile(r"\A\\(?P<index>\d+)\Z")
def conv(astr):
b = astr.split(',')
l = [x.strip() for x in b]
for i in range(len(l)):
m = item_re.match(l[i])
if m:
j = int(m.group('index'))
l[i] = l[j]
return ','.join(l)
def unique_key(adict):
""" Obtain a unique key given a dictionary."""
allkeys = list(adict.keys())
done = False
n = 1
while not done:
newkey = '__l%s' % (n)
if newkey in allkeys:
n += 1
else:
done = True
return newkey
template_name_re = re.compile(r'\A\s*(\w[\w\d]*)\s*\Z')
def expand_sub(substr, names):
substr = substr.replace(r'\>', '@rightarrow@')
substr = substr.replace(r'\<', '@leftarrow@')
lnames = find_repl_patterns(substr)
substr = named_re.sub(r"<\1>", substr) # get rid of definition templates
def listrepl(mobj):
thelist = conv(mobj.group(1).replace(r'\,', '@comma@'))
if template_name_re.match(thelist):
return "<%s>" % (thelist)
name = None
for key in lnames.keys(): # see if list is already in dictionary
if lnames[key] == thelist:
name = key
if name is None: # this list is not in the dictionary yet
name = unique_key(lnames)
lnames[name] = thelist
return "<%s>" % name
substr = list_re.sub(listrepl, substr) # convert all lists to named templates
# newnames are constructed as needed
numsubs = None
base_rule = None
rules = {}
for r in template_re.findall(substr):
if r not in rules:
thelist = lnames.get(r, names.get(r, None))
if thelist is None:
raise ValueError('No replicates found for <%s>' % (r))
if r not in names and not thelist.startswith('_'):
names[r] = thelist
rule = [i.replace('@comma@', ',') for i in thelist.split(',')]
num = len(rule)
if numsubs is None:
numsubs = num
rules[r] = rule
base_rule = r
elif num == numsubs:
rules[r] = rule
else:
print("Mismatch in number of replacements (base <%s=%s>)"
" for <%s=%s>. Ignoring." %
(base_rule, ','.join(rules[base_rule]), r, thelist))
if not rules:
return substr
def namerepl(mobj):
name = mobj.group(1)
return rules.get(name, (k+1)*[name])[k]
newstr = ''
for k in range(numsubs):
newstr += template_re.sub(namerepl, substr) + '\n\n'
newstr = newstr.replace('@rightarrow@', '>')
newstr = newstr.replace('@leftarrow@', '<')
return newstr
def process_str(allstr):
newstr = allstr
writestr = ''
struct = parse_structure(newstr)
oldend = 0
names = {}
names.update(_special_names)
for sub in struct:
cleanedstr, defs = find_and_remove_repl_patterns(newstr[oldend:sub[0]])
writestr += cleanedstr
names.update(defs)
writestr += expand_sub(newstr[sub[0]:sub[1]], names)
oldend = sub[1]
writestr += newstr[oldend:]
return writestr
include_src_re = re.compile(r"(\n|\A)\s*include\s*['\"](?P<name>[\w\d./\\]+\.src)['\"]", re.I)
def resolve_includes(source):
d = os.path.dirname(source)
with open(source) as fid:
lines = []
for line in fid:
m = include_src_re.match(line)
if m:
fn = m.group('name')
if not os.path.isabs(fn):
fn = os.path.join(d, fn)
if os.path.isfile(fn):
lines.extend(resolve_includes(fn))
else:
lines.append(line)
else:
lines.append(line)
return lines
def process_file(source):
lines = resolve_includes(source)
return process_str(''.join(lines))
_special_names = find_repl_patterns('''
<_c=s,d,c,z>
<_t=real,double precision,complex,double complex>
<prefix=s,d,c,z>
<ftype=real,double precision,complex,double complex>
<ctype=float,double,complex_float,complex_double>
<ftypereal=real,double precision,\\0,\\1>
<ctypereal=float,double,\\0,\\1>
''')
def main():
try:
file = sys.argv[1]
except IndexError:
fid = sys.stdin
outfile = sys.stdout
else:
fid = open(file, 'r')
(base, ext) = os.path.splitext(file)
newname = base
outfile = open(newname, 'w')
allstr = fid.read()
writestr = process_str(allstr)
outfile.write(writestr)
if __name__ == "__main__":
main()
| 7,913 | Python | 29.206107 | 94 | 0.560091 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/ccompiler_opt.py | """Provides the `CCompilerOpt` class, used for handling the CPU/hardware
optimization, starting from parsing the command arguments, to managing the
relation between the CPU baseline and dispatch-able features,
also generating the required C headers and ending with compiling
the sources with proper compiler's flags.
`CCompilerOpt` doesn't provide runtime detection for the CPU features,
instead only focuses on the compiler side, but it creates abstract C headers
that can be used later for the final runtime dispatching process."""
import atexit
import inspect
import os
import pprint
import re
import subprocess
import textwrap
# These flags are used to compile any C++ source within Numpy.
# They are chosen to have very few runtime dependencies.
NPY_CXX_FLAGS = [
'-std=c++11', # Minimal standard version
'-D__STDC_VERSION__=0', # for compatibility with C headers
'-fno-exceptions', # no exception support
'-fno-rtti'] # no runtime type information
class _Config:
"""An abstract class holds all configurable attributes of `CCompilerOpt`,
these class attributes can be used to change the default behavior
of `CCompilerOpt` in order to fit other requirements.
Attributes
----------
conf_nocache : bool
Set True to disable memory and file cache.
Default is False.
conf_noopt : bool
Set True to forces the optimization to be disabled,
in this case `CCompilerOpt` tends to generate all
expected headers in order to 'not' break the build.
Default is False.
conf_cache_factors : list
Add extra factors to the primary caching factors. The caching factors
are utilized to determine if there are changes had happened that
requires to discard the cache and re-updating it. The primary factors
are the arguments of `CCompilerOpt` and `CCompiler`'s properties(type, flags, etc).
Default is list of two items, containing the time of last modification
of `ccompiler_opt` and value of attribute "conf_noopt"
conf_tmp_path : str,
The path of temporary directory. Default is auto-created
temporary directory via ``tempfile.mkdtemp()``.
conf_check_path : str
The path of testing files. Each added CPU feature must have a
**C** source file contains at least one intrinsic or instruction that
related to this feature, so it can be tested against the compiler.
Default is ``./distutils/checks``.
conf_target_groups : dict
Extra tokens that can be reached from dispatch-able sources through
the special mark ``@targets``. Default is an empty dictionary.
**Notes**:
- case-insensitive for tokens and group names
- sign '#' must stick in the begin of group name and only within ``@targets``
**Example**:
.. code-block:: console
$ "@targets #avx_group other_tokens" > group_inside.c
>>> CCompilerOpt.conf_target_groups["avx_group"] = \\
"$werror $maxopt avx2 avx512f avx512_skx"
>>> cco = CCompilerOpt(cc_instance)
>>> cco.try_dispatch(["group_inside.c"])
conf_c_prefix : str
The prefix of public C definitions. Default is ``"NPY_"``.
conf_c_prefix_ : str
The prefix of internal C definitions. Default is ``"NPY__"``.
conf_cc_flags : dict
Nested dictionaries defining several compiler flags
that linked to some major functions, the main key
represent the compiler name and sub-keys represent
flags names. Default is already covers all supported
**C** compilers.
Sub-keys explained as follows:
"native": str or None
used by argument option `native`, to detect the current
machine support via the compiler.
"werror": str or None
utilized to treat warning as errors during testing CPU features
against the compiler and also for target's policy `$werror`
via dispatch-able sources.
"maxopt": str or None
utilized for target's policy '$maxopt' and the value should
contains the maximum acceptable optimization by the compiler.
e.g. in gcc `'-O3'`
**Notes**:
* case-sensitive for compiler names and flags
* use space to separate multiple flags
* any flag will tested against the compiler and it will skipped
if it's not applicable.
conf_min_features : dict
A dictionary defines the used CPU features for
argument option `'min'`, the key represent the CPU architecture
name e.g. `'x86'`. Default values provide the best effort
on wide range of users platforms.
**Note**: case-sensitive for architecture names.
conf_features : dict
Nested dictionaries used for identifying the CPU features.
the primary key is represented as a feature name or group name
that gathers several features. Default values covers all
supported features but without the major options like "flags",
these undefined options handle it by method `conf_features_partial()`.
Default value is covers almost all CPU features for *X86*, *IBM/Power64*
and *ARM 7/8*.
Sub-keys explained as follows:
"implies" : str or list, optional,
List of CPU feature names to be implied by it,
the feature name must be defined within `conf_features`.
Default is None.
"flags": str or list, optional
List of compiler flags. Default is None.
"detect": str or list, optional
List of CPU feature names that required to be detected
in runtime. By default, its the feature name or features
in "group" if its specified.
"implies_detect": bool, optional
If True, all "detect" of implied features will be combined.
Default is True. see `feature_detect()`.
"group": str or list, optional
Same as "implies" but doesn't require the feature name to be
defined within `conf_features`.
"interest": int, required
a key for sorting CPU features
"headers": str or list, optional
intrinsics C header file
"disable": str, optional
force disable feature, the string value should contains the
reason of disabling.
"autovec": bool or None, optional
True or False to declare that CPU feature can be auto-vectorized
by the compiler.
By default(None), treated as True if the feature contains at
least one applicable flag. see `feature_can_autovec()`
"extra_checks": str or list, optional
Extra test case names for the CPU feature that need to be tested
against the compiler.
Each test case must have a C file named ``extra_xxxx.c``, where
``xxxx`` is the case name in lower case, under 'conf_check_path'.
It should contain at least one intrinsic or function related to the test case.
If the compiler able to successfully compile the C file then `CCompilerOpt`
will add a C ``#define`` for it into the main dispatch header, e.g.
``#define {conf_c_prefix}_XXXX`` where ``XXXX`` is the case name in upper case.
**NOTES**:
* space can be used as separator with options that supports "str or list"
* case-sensitive for all values and feature name must be in upper-case.
* if flags aren't applicable, its will skipped rather than disable the
CPU feature
* the CPU feature will disabled if the compiler fail to compile
the test file
"""
conf_nocache = False
conf_noopt = False
conf_cache_factors = None
conf_tmp_path = None
conf_check_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "checks"
)
conf_target_groups = {}
conf_c_prefix = 'NPY_'
conf_c_prefix_ = 'NPY__'
conf_cc_flags = dict(
gcc = dict(
# native should always fail on arm and ppc64,
# native usually works only with x86
native = '-march=native',
opt = '-O3',
werror = '-Werror',
),
clang = dict(
native = '-march=native',
opt = "-O3",
# One of the following flags needs to be applicable for Clang to
# guarantee the sanity of the testing process, however in certain
# cases `-Werror` gets skipped during the availability test due to
# "unused arguments" warnings.
# see https://github.com/numpy/numpy/issues/19624
werror = '-Werror=switch -Werror',
),
icc = dict(
native = '-xHost',
opt = '-O3',
werror = '-Werror',
),
iccw = dict(
native = '/QxHost',
opt = '/O3',
werror = '/Werror',
),
msvc = dict(
native = None,
opt = '/O2',
werror = '/WX',
)
)
conf_min_features = dict(
x86 = "SSE SSE2",
x64 = "SSE SSE2 SSE3",
ppc64 = '', # play it safe
ppc64le = "VSX VSX2",
s390x = '',
armhf = '', # play it safe
aarch64 = "NEON NEON_FP16 NEON_VFPV4 ASIMD"
)
conf_features = dict(
# X86
SSE = dict(
interest=1, headers="xmmintrin.h",
# enabling SSE without SSE2 is useless also
# it's non-optional for x86_64
implies="SSE2"
),
SSE2 = dict(interest=2, implies="SSE", headers="emmintrin.h"),
SSE3 = dict(interest=3, implies="SSE2", headers="pmmintrin.h"),
SSSE3 = dict(interest=4, implies="SSE3", headers="tmmintrin.h"),
SSE41 = dict(interest=5, implies="SSSE3", headers="smmintrin.h"),
POPCNT = dict(interest=6, implies="SSE41", headers="popcntintrin.h"),
SSE42 = dict(interest=7, implies="POPCNT"),
AVX = dict(
interest=8, implies="SSE42", headers="immintrin.h",
implies_detect=False
),
XOP = dict(interest=9, implies="AVX", headers="x86intrin.h"),
FMA4 = dict(interest=10, implies="AVX", headers="x86intrin.h"),
F16C = dict(interest=11, implies="AVX"),
FMA3 = dict(interest=12, implies="F16C"),
AVX2 = dict(interest=13, implies="F16C"),
AVX512F = dict(
interest=20, implies="FMA3 AVX2", implies_detect=False,
extra_checks="AVX512F_REDUCE"
),
AVX512CD = dict(interest=21, implies="AVX512F"),
AVX512_KNL = dict(
interest=40, implies="AVX512CD", group="AVX512ER AVX512PF",
detect="AVX512_KNL", implies_detect=False
),
AVX512_KNM = dict(
interest=41, implies="AVX512_KNL",
group="AVX5124FMAPS AVX5124VNNIW AVX512VPOPCNTDQ",
detect="AVX512_KNM", implies_detect=False
),
AVX512_SKX = dict(
interest=42, implies="AVX512CD", group="AVX512VL AVX512BW AVX512DQ",
detect="AVX512_SKX", implies_detect=False,
extra_checks="AVX512BW_MASK AVX512DQ_MASK"
),
AVX512_CLX = dict(
interest=43, implies="AVX512_SKX", group="AVX512VNNI",
detect="AVX512_CLX"
),
AVX512_CNL = dict(
interest=44, implies="AVX512_SKX", group="AVX512IFMA AVX512VBMI",
detect="AVX512_CNL", implies_detect=False
),
AVX512_ICL = dict(
interest=45, implies="AVX512_CLX AVX512_CNL",
group="AVX512VBMI2 AVX512BITALG AVX512VPOPCNTDQ",
detect="AVX512_ICL", implies_detect=False
),
# IBM/Power
## Power7/ISA 2.06
VSX = dict(interest=1, headers="altivec.h", extra_checks="VSX_ASM"),
## Power8/ISA 2.07
VSX2 = dict(interest=2, implies="VSX", implies_detect=False),
## Power9/ISA 3.00
VSX3 = dict(interest=3, implies="VSX2", implies_detect=False),
## Power10/ISA 3.1
VSX4 = dict(interest=4, implies="VSX3", implies_detect=False,
extra_checks="VSX4_MMA"),
# IBM/Z
## VX(z13) support
VX = dict(interest=1, headers="vecintrin.h"),
## Vector-Enhancements Facility
VXE = dict(interest=2, implies="VX", implies_detect=False),
## Vector-Enhancements Facility 2
VXE2 = dict(interest=3, implies="VXE", implies_detect=False),
# ARM
NEON = dict(interest=1, headers="arm_neon.h"),
NEON_FP16 = dict(interest=2, implies="NEON"),
## FMA
NEON_VFPV4 = dict(interest=3, implies="NEON_FP16"),
## Advanced SIMD
ASIMD = dict(interest=4, implies="NEON_FP16 NEON_VFPV4", implies_detect=False),
## ARMv8.2 half-precision & vector arithm
ASIMDHP = dict(interest=5, implies="ASIMD"),
## ARMv8.2 dot product
ASIMDDP = dict(interest=6, implies="ASIMD"),
## ARMv8.2 Single & half-precision Multiply
ASIMDFHM = dict(interest=7, implies="ASIMDHP"),
)
def conf_features_partial(self):
"""Return a dictionary of supported CPU features by the platform,
and accumulate the rest of undefined options in `conf_features`,
the returned dict has same rules and notes in
class attribute `conf_features`, also its override
any options that been set in 'conf_features'.
"""
if self.cc_noopt:
# optimization is disabled
return {}
on_x86 = self.cc_on_x86 or self.cc_on_x64
is_unix = self.cc_is_gcc or self.cc_is_clang
if on_x86 and is_unix: return dict(
SSE = dict(flags="-msse"),
SSE2 = dict(flags="-msse2"),
SSE3 = dict(flags="-msse3"),
SSSE3 = dict(flags="-mssse3"),
SSE41 = dict(flags="-msse4.1"),
POPCNT = dict(flags="-mpopcnt"),
SSE42 = dict(flags="-msse4.2"),
AVX = dict(flags="-mavx"),
F16C = dict(flags="-mf16c"),
XOP = dict(flags="-mxop"),
FMA4 = dict(flags="-mfma4"),
FMA3 = dict(flags="-mfma"),
AVX2 = dict(flags="-mavx2"),
AVX512F = dict(flags="-mavx512f -mno-mmx"),
AVX512CD = dict(flags="-mavx512cd"),
AVX512_KNL = dict(flags="-mavx512er -mavx512pf"),
AVX512_KNM = dict(
flags="-mavx5124fmaps -mavx5124vnniw -mavx512vpopcntdq"
),
AVX512_SKX = dict(flags="-mavx512vl -mavx512bw -mavx512dq"),
AVX512_CLX = dict(flags="-mavx512vnni"),
AVX512_CNL = dict(flags="-mavx512ifma -mavx512vbmi"),
AVX512_ICL = dict(
flags="-mavx512vbmi2 -mavx512bitalg -mavx512vpopcntdq"
)
)
if on_x86 and self.cc_is_icc: return dict(
SSE = dict(flags="-msse"),
SSE2 = dict(flags="-msse2"),
SSE3 = dict(flags="-msse3"),
SSSE3 = dict(flags="-mssse3"),
SSE41 = dict(flags="-msse4.1"),
POPCNT = {},
SSE42 = dict(flags="-msse4.2"),
AVX = dict(flags="-mavx"),
F16C = {},
XOP = dict(disable="Intel Compiler doesn't support it"),
FMA4 = dict(disable="Intel Compiler doesn't support it"),
# Intel Compiler doesn't support AVX2 or FMA3 independently
FMA3 = dict(
implies="F16C AVX2", flags="-march=core-avx2"
),
AVX2 = dict(implies="FMA3", flags="-march=core-avx2"),
# Intel Compiler doesn't support AVX512F or AVX512CD independently
AVX512F = dict(
implies="AVX2 AVX512CD", flags="-march=common-avx512"
),
AVX512CD = dict(
implies="AVX2 AVX512F", flags="-march=common-avx512"
),
AVX512_KNL = dict(flags="-xKNL"),
AVX512_KNM = dict(flags="-xKNM"),
AVX512_SKX = dict(flags="-xSKYLAKE-AVX512"),
AVX512_CLX = dict(flags="-xCASCADELAKE"),
AVX512_CNL = dict(flags="-xCANNONLAKE"),
AVX512_ICL = dict(flags="-xICELAKE-CLIENT"),
)
if on_x86 and self.cc_is_iccw: return dict(
SSE = dict(flags="/arch:SSE"),
SSE2 = dict(flags="/arch:SSE2"),
SSE3 = dict(flags="/arch:SSE3"),
SSSE3 = dict(flags="/arch:SSSE3"),
SSE41 = dict(flags="/arch:SSE4.1"),
POPCNT = {},
SSE42 = dict(flags="/arch:SSE4.2"),
AVX = dict(flags="/arch:AVX"),
F16C = {},
XOP = dict(disable="Intel Compiler doesn't support it"),
FMA4 = dict(disable="Intel Compiler doesn't support it"),
# Intel Compiler doesn't support FMA3 or AVX2 independently
FMA3 = dict(
implies="F16C AVX2", flags="/arch:CORE-AVX2"
),
AVX2 = dict(
implies="FMA3", flags="/arch:CORE-AVX2"
),
# Intel Compiler doesn't support AVX512F or AVX512CD independently
AVX512F = dict(
implies="AVX2 AVX512CD", flags="/Qx:COMMON-AVX512"
),
AVX512CD = dict(
implies="AVX2 AVX512F", flags="/Qx:COMMON-AVX512"
),
AVX512_KNL = dict(flags="/Qx:KNL"),
AVX512_KNM = dict(flags="/Qx:KNM"),
AVX512_SKX = dict(flags="/Qx:SKYLAKE-AVX512"),
AVX512_CLX = dict(flags="/Qx:CASCADELAKE"),
AVX512_CNL = dict(flags="/Qx:CANNONLAKE"),
AVX512_ICL = dict(flags="/Qx:ICELAKE-CLIENT")
)
if on_x86 and self.cc_is_msvc: return dict(
SSE = dict(flags="/arch:SSE") if self.cc_on_x86 else {},
SSE2 = dict(flags="/arch:SSE2") if self.cc_on_x86 else {},
SSE3 = {},
SSSE3 = {},
SSE41 = {},
POPCNT = dict(headers="nmmintrin.h"),
SSE42 = {},
AVX = dict(flags="/arch:AVX"),
F16C = {},
XOP = dict(headers="ammintrin.h"),
FMA4 = dict(headers="ammintrin.h"),
# MSVC doesn't support FMA3 or AVX2 independently
FMA3 = dict(
implies="F16C AVX2", flags="/arch:AVX2"
),
AVX2 = dict(
implies="F16C FMA3", flags="/arch:AVX2"
),
# MSVC doesn't support AVX512F or AVX512CD independently,
# always generate instructions belong to (VL/VW/DQ)
AVX512F = dict(
implies="AVX2 AVX512CD AVX512_SKX", flags="/arch:AVX512"
),
AVX512CD = dict(
implies="AVX512F AVX512_SKX", flags="/arch:AVX512"
),
AVX512_KNL = dict(
disable="MSVC compiler doesn't support it"
),
AVX512_KNM = dict(
disable="MSVC compiler doesn't support it"
),
AVX512_SKX = dict(flags="/arch:AVX512"),
AVX512_CLX = {},
AVX512_CNL = {},
AVX512_ICL = {}
)
on_power = self.cc_on_ppc64le or self.cc_on_ppc64
if on_power:
partial = dict(
VSX = dict(
implies=("VSX2" if self.cc_on_ppc64le else ""),
flags="-mvsx"
),
VSX2 = dict(
flags="-mcpu=power8", implies_detect=False
),
VSX3 = dict(
flags="-mcpu=power9 -mtune=power9", implies_detect=False
),
VSX4 = dict(
flags="-mcpu=power10 -mtune=power10", implies_detect=False
)
)
if self.cc_is_clang:
partial["VSX"]["flags"] = "-maltivec -mvsx"
partial["VSX2"]["flags"] = "-mpower8-vector"
partial["VSX3"]["flags"] = "-mpower9-vector"
partial["VSX4"]["flags"] = "-mpower10-vector"
return partial
on_zarch = self.cc_on_s390x
if on_zarch:
partial = dict(
VX = dict(
flags="-march=arch11 -mzvector"
),
VXE = dict(
flags="-march=arch12", implies_detect=False
),
VXE2 = dict(
flags="-march=arch13", implies_detect=False
)
)
return partial
if self.cc_on_aarch64 and is_unix: return dict(
NEON = dict(
implies="NEON_FP16 NEON_VFPV4 ASIMD", autovec=True
),
NEON_FP16 = dict(
implies="NEON NEON_VFPV4 ASIMD", autovec=True
),
NEON_VFPV4 = dict(
implies="NEON NEON_FP16 ASIMD", autovec=True
),
ASIMD = dict(
implies="NEON NEON_FP16 NEON_VFPV4", autovec=True
),
ASIMDHP = dict(
flags="-march=armv8.2-a+fp16"
),
ASIMDDP = dict(
flags="-march=armv8.2-a+dotprod"
),
ASIMDFHM = dict(
flags="-march=armv8.2-a+fp16fml"
),
)
if self.cc_on_armhf and is_unix: return dict(
NEON = dict(
flags="-mfpu=neon"
),
NEON_FP16 = dict(
flags="-mfpu=neon-fp16 -mfp16-format=ieee"
),
NEON_VFPV4 = dict(
flags="-mfpu=neon-vfpv4",
),
ASIMD = dict(
flags="-mfpu=neon-fp-armv8 -march=armv8-a+simd",
),
ASIMDHP = dict(
flags="-march=armv8.2-a+fp16"
),
ASIMDDP = dict(
flags="-march=armv8.2-a+dotprod",
),
ASIMDFHM = dict(
flags="-march=armv8.2-a+fp16fml"
)
)
# TODO: ARM MSVC
return {}
def __init__(self):
if self.conf_tmp_path is None:
import shutil
import tempfile
tmp = tempfile.mkdtemp()
def rm_temp():
try:
shutil.rmtree(tmp)
except OSError:
pass
atexit.register(rm_temp)
self.conf_tmp_path = tmp
if self.conf_cache_factors is None:
self.conf_cache_factors = [
os.path.getmtime(__file__),
self.conf_nocache
]
class _Distutils:
"""A helper class that provides a collection of fundamental methods
implemented in a top of Python and NumPy Distutils.
The idea behind this class is to gather all methods that it may
need to override in case of reuse 'CCompilerOpt' in environment
different than of what NumPy has.
Parameters
----------
ccompiler : `CCompiler`
The generate instance that returned from `distutils.ccompiler.new_compiler()`.
"""
def __init__(self, ccompiler):
self._ccompiler = ccompiler
def dist_compile(self, sources, flags, ccompiler=None, **kwargs):
"""Wrap CCompiler.compile()"""
assert(isinstance(sources, list))
assert(isinstance(flags, list))
flags = kwargs.pop("extra_postargs", []) + flags
if not ccompiler:
ccompiler = self._ccompiler
return ccompiler.compile(sources, extra_postargs=flags, **kwargs)
def dist_test(self, source, flags, macros=[]):
"""Return True if 'CCompiler.compile()' able to compile
a source file with certain flags.
"""
assert(isinstance(source, str))
from distutils.errors import CompileError
cc = self._ccompiler;
bk_spawn = getattr(cc, 'spawn', None)
if bk_spawn:
cc_type = getattr(self._ccompiler, "compiler_type", "")
if cc_type in ("msvc",):
setattr(cc, 'spawn', self._dist_test_spawn_paths)
else:
setattr(cc, 'spawn', self._dist_test_spawn)
test = False
try:
self.dist_compile(
[source], flags, macros=macros, output_dir=self.conf_tmp_path
)
test = True
except CompileError as e:
self.dist_log(str(e), stderr=True)
if bk_spawn:
setattr(cc, 'spawn', bk_spawn)
return test
def dist_info(self):
"""
Return a tuple containing info about (platform, compiler, extra_args),
required by the abstract class '_CCompiler' for discovering the
platform environment. This is also used as a cache factor in order
to detect any changes happening from outside.
"""
if hasattr(self, "_dist_info"):
return self._dist_info
cc_type = getattr(self._ccompiler, "compiler_type", '')
if cc_type in ("intelem", "intelemw"):
platform = "x86_64"
elif cc_type in ("intel", "intelw", "intele"):
platform = "x86"
else:
from distutils.util import get_platform
platform = get_platform()
cc_info = getattr(self._ccompiler, "compiler", getattr(self._ccompiler, "compiler_so", ''))
if not cc_type or cc_type == "unix":
if hasattr(cc_info, "__iter__"):
compiler = cc_info[0]
else:
compiler = str(cc_info)
else:
compiler = cc_type
if hasattr(cc_info, "__iter__") and len(cc_info) > 1:
extra_args = ' '.join(cc_info[1:])
else:
extra_args = os.environ.get("CFLAGS", "")
extra_args += os.environ.get("CPPFLAGS", "")
self._dist_info = (platform, compiler, extra_args)
return self._dist_info
@staticmethod
def dist_error(*args):
"""Raise a compiler error"""
from distutils.errors import CompileError
raise CompileError(_Distutils._dist_str(*args))
@staticmethod
def dist_fatal(*args):
"""Raise a distutils error"""
from distutils.errors import DistutilsError
raise DistutilsError(_Distutils._dist_str(*args))
@staticmethod
def dist_log(*args, stderr=False):
"""Print a console message"""
from numpy.distutils import log
out = _Distutils._dist_str(*args)
if stderr:
log.warn(out)
else:
log.info(out)
@staticmethod
def dist_load_module(name, path):
"""Load a module from file, required by the abstract class '_Cache'."""
from .misc_util import exec_mod_from_location
try:
return exec_mod_from_location(name, path)
except Exception as e:
_Distutils.dist_log(e, stderr=True)
return None
@staticmethod
def _dist_str(*args):
"""Return a string to print by log and errors."""
def to_str(arg):
if not isinstance(arg, str) and hasattr(arg, '__iter__'):
ret = []
for a in arg:
ret.append(to_str(a))
return '('+ ' '.join(ret) + ')'
return str(arg)
stack = inspect.stack()[2]
start = "CCompilerOpt.%s[%d] : " % (stack.function, stack.lineno)
out = ' '.join([
to_str(a)
for a in (*args,)
])
return start + out
def _dist_test_spawn_paths(self, cmd, display=None):
"""
Fix msvc SDK ENV path same as distutils do
without it we get c1: fatal error C1356: unable to find mspdbcore.dll
"""
if not hasattr(self._ccompiler, "_paths"):
self._dist_test_spawn(cmd)
return
old_path = os.getenv("path")
try:
os.environ["path"] = self._ccompiler._paths
self._dist_test_spawn(cmd)
finally:
os.environ["path"] = old_path
_dist_warn_regex = re.compile(
# intel and msvc compilers don't raise
# fatal errors when flags are wrong or unsupported
".*("
"warning D9002|" # msvc, it should be work with any language.
"invalid argument for option" # intel
").*"
)
@staticmethod
def _dist_test_spawn(cmd, display=None):
try:
o = subprocess.check_output(cmd, stderr=subprocess.STDOUT,
universal_newlines=True)
if o and re.match(_Distutils._dist_warn_regex, o):
_Distutils.dist_error(
"Flags in command", cmd ,"aren't supported by the compiler"
", output -> \n%s" % o
)
except subprocess.CalledProcessError as exc:
o = exc.output
s = exc.returncode
except OSError as e:
o = e
s = 127
else:
return None
_Distutils.dist_error(
"Command", cmd, "failed with exit status %d output -> \n%s" % (
s, o
))
_share_cache = {}
class _Cache:
"""An abstract class handles caching functionality, provides two
levels of caching, in-memory by share instances attributes among
each other and by store attributes into files.
**Note**:
any attributes that start with ``_`` or ``conf_`` will be ignored.
Parameters
----------
cache_path : str or None
The path of cache file, if None then cache in file will disabled.
*factors :
The caching factors that need to utilize next to `conf_cache_factors`.
Attributes
----------
cache_private : set
Hold the attributes that need be skipped from "in-memory cache".
cache_infile : bool
Utilized during initializing this class, to determine if the cache was able
to loaded from the specified cache path in 'cache_path'.
"""
# skip attributes from cache
_cache_ignore = re.compile("^(_|conf_)")
def __init__(self, cache_path=None, *factors):
self.cache_me = {}
self.cache_private = set()
self.cache_infile = False
self._cache_path = None
if self.conf_nocache:
self.dist_log("cache is disabled by `Config`")
return
self._cache_hash = self.cache_hash(*factors, *self.conf_cache_factors)
self._cache_path = cache_path
if cache_path:
if os.path.exists(cache_path):
self.dist_log("load cache from file ->", cache_path)
cache_mod = self.dist_load_module("cache", cache_path)
if not cache_mod:
self.dist_log(
"unable to load the cache file as a module",
stderr=True
)
elif not hasattr(cache_mod, "hash") or \
not hasattr(cache_mod, "data"):
self.dist_log("invalid cache file", stderr=True)
elif self._cache_hash == cache_mod.hash:
self.dist_log("hit the file cache")
for attr, val in cache_mod.data.items():
setattr(self, attr, val)
self.cache_infile = True
else:
self.dist_log("miss the file cache")
if not self.cache_infile:
other_cache = _share_cache.get(self._cache_hash)
if other_cache:
self.dist_log("hit the memory cache")
for attr, val in other_cache.__dict__.items():
if attr in other_cache.cache_private or \
re.match(self._cache_ignore, attr):
continue
setattr(self, attr, val)
_share_cache[self._cache_hash] = self
atexit.register(self.cache_flush)
def __del__(self):
for h, o in _share_cache.items():
if o == self:
_share_cache.pop(h)
break
def cache_flush(self):
"""
Force update the cache.
"""
if not self._cache_path:
return
# TODO: don't write if the cache doesn't change
self.dist_log("write cache to path ->", self._cache_path)
cdict = self.__dict__.copy()
for attr in self.__dict__.keys():
if re.match(self._cache_ignore, attr):
cdict.pop(attr)
d = os.path.dirname(self._cache_path)
if not os.path.exists(d):
os.makedirs(d)
repr_dict = pprint.pformat(cdict, compact=True)
with open(self._cache_path, "w") as f:
f.write(textwrap.dedent("""\
# AUTOGENERATED DON'T EDIT
# Please make changes to the code generator \
(distutils/ccompiler_opt.py)
hash = {}
data = \\
""").format(self._cache_hash))
f.write(repr_dict)
def cache_hash(self, *factors):
# is there a built-in non-crypto hash?
# sdbm
chash = 0
for f in factors:
for char in str(f):
chash = ord(char) + (chash << 6) + (chash << 16) - chash
chash &= 0xFFFFFFFF
return chash
@staticmethod
def me(cb):
"""
A static method that can be treated as a decorator to
dynamically cache certain methods.
"""
def cache_wrap_me(self, *args, **kwargs):
# good for normal args
cache_key = str((
cb.__name__, *args, *kwargs.keys(), *kwargs.values()
))
if cache_key in self.cache_me:
return self.cache_me[cache_key]
ccb = cb(self, *args, **kwargs)
self.cache_me[cache_key] = ccb
return ccb
return cache_wrap_me
class _CCompiler:
"""A helper class for `CCompilerOpt` containing all utilities that
related to the fundamental compiler's functions.
Attributes
----------
cc_on_x86 : bool
True when the target architecture is 32-bit x86
cc_on_x64 : bool
True when the target architecture is 64-bit x86
cc_on_ppc64 : bool
True when the target architecture is 64-bit big-endian powerpc
cc_on_ppc64le : bool
True when the target architecture is 64-bit litle-endian powerpc
cc_on_s390x : bool
True when the target architecture is IBM/ZARCH on linux
cc_on_armhf : bool
True when the target architecture is 32-bit ARMv7+
cc_on_aarch64 : bool
True when the target architecture is 64-bit Armv8-a+
cc_on_noarch : bool
True when the target architecture is unknown or not supported
cc_is_gcc : bool
True if the compiler is GNU or
if the compiler is unknown
cc_is_clang : bool
True if the compiler is Clang
cc_is_icc : bool
True if the compiler is Intel compiler (unix like)
cc_is_iccw : bool
True if the compiler is Intel compiler (msvc like)
cc_is_nocc : bool
True if the compiler isn't supported directly,
Note: that cause a fail-back to gcc
cc_has_debug : bool
True if the compiler has debug flags
cc_has_native : bool
True if the compiler has native flags
cc_noopt : bool
True if the compiler has definition 'DISABLE_OPT*',
or 'cc_on_noarch' is True
cc_march : str
The target architecture name, or "unknown" if
the architecture isn't supported
cc_name : str
The compiler name, or "unknown" if the compiler isn't supported
cc_flags : dict
Dictionary containing the initialized flags of `_Config.conf_cc_flags`
"""
def __init__(self):
if hasattr(self, "cc_is_cached"):
return
# attr regex compiler-expression
detect_arch = (
("cc_on_x64", ".*(x|x86_|amd)64.*", ""),
("cc_on_x86", ".*(win32|x86|i386|i686).*", ""),
("cc_on_ppc64le", ".*(powerpc|ppc)64(el|le).*", ""),
("cc_on_ppc64", ".*(powerpc|ppc)64.*", ""),
("cc_on_aarch64", ".*(aarch64|arm64).*", ""),
("cc_on_armhf", ".*arm.*", "defined(__ARM_ARCH_7__) || "
"defined(__ARM_ARCH_7A__)"),
("cc_on_s390x", ".*s390x.*", ""),
# undefined platform
("cc_on_noarch", "", ""),
)
detect_compiler = (
("cc_is_gcc", r".*(gcc|gnu\-g).*", ""),
("cc_is_clang", ".*clang.*", ""),
# intel msvc like
("cc_is_iccw", ".*(intelw|intelemw|iccw).*", ""),
("cc_is_icc", ".*(intel|icc).*", ""), # intel unix like
("cc_is_msvc", ".*msvc.*", ""),
# undefined compiler will be treat it as gcc
("cc_is_nocc", "", ""),
)
detect_args = (
("cc_has_debug", ".*(O0|Od|ggdb|coverage|debug:full).*", ""),
("cc_has_native", ".*(-march=native|-xHost|/QxHost).*", ""),
# in case if the class run with -DNPY_DISABLE_OPTIMIZATION
("cc_noopt", ".*DISABLE_OPT.*", ""),
)
dist_info = self.dist_info()
platform, compiler_info, extra_args = dist_info
# set False to all attrs
for section in (detect_arch, detect_compiler, detect_args):
for attr, rgex, cexpr in section:
setattr(self, attr, False)
for detect, searchin in ((detect_arch, platform), (detect_compiler, compiler_info)):
for attr, rgex, cexpr in detect:
if rgex and not re.match(rgex, searchin, re.IGNORECASE):
continue
if cexpr and not self.cc_test_cexpr(cexpr):
continue
setattr(self, attr, True)
break
for attr, rgex, cexpr in detect_args:
if rgex and not re.match(rgex, extra_args, re.IGNORECASE):
continue
if cexpr and not self.cc_test_cexpr(cexpr):
continue
setattr(self, attr, True)
if self.cc_on_noarch:
self.dist_log(
"unable to detect CPU architecture which lead to disable the optimization. "
f"check dist_info:<<\n{dist_info}\n>>",
stderr=True
)
self.cc_noopt = True
if self.conf_noopt:
self.dist_log("Optimization is disabled by the Config", stderr=True)
self.cc_noopt = True
if self.cc_is_nocc:
"""
mingw can be treated as a gcc, and also xlc even if it based on clang,
but still has the same gcc optimization flags.
"""
self.dist_log(
"unable to detect compiler type which leads to treating it as GCC. "
"this is a normal behavior if you're using gcc-like compiler such as MinGW or IBM/XLC."
f"check dist_info:<<\n{dist_info}\n>>",
stderr=True
)
self.cc_is_gcc = True
self.cc_march = "unknown"
for arch in ("x86", "x64", "ppc64", "ppc64le",
"armhf", "aarch64", "s390x"):
if getattr(self, "cc_on_" + arch):
self.cc_march = arch
break
self.cc_name = "unknown"
for name in ("gcc", "clang", "iccw", "icc", "msvc"):
if getattr(self, "cc_is_" + name):
self.cc_name = name
break
self.cc_flags = {}
compiler_flags = self.conf_cc_flags.get(self.cc_name)
if compiler_flags is None:
self.dist_fatal(
"undefined flag for compiler '%s', "
"leave an empty dict instead" % self.cc_name
)
for name, flags in compiler_flags.items():
self.cc_flags[name] = nflags = []
if flags:
assert(isinstance(flags, str))
flags = flags.split()
for f in flags:
if self.cc_test_flags([f]):
nflags.append(f)
self.cc_is_cached = True
@_Cache.me
def cc_test_flags(self, flags):
"""
Returns True if the compiler supports 'flags'.
"""
assert(isinstance(flags, list))
self.dist_log("testing flags", flags)
test_path = os.path.join(self.conf_check_path, "test_flags.c")
test = self.dist_test(test_path, flags)
if not test:
self.dist_log("testing failed", stderr=True)
return test
@_Cache.me
def cc_test_cexpr(self, cexpr, flags=[]):
"""
Same as the above but supports compile-time expressions.
"""
self.dist_log("testing compiler expression", cexpr)
test_path = os.path.join(self.conf_tmp_path, "npy_dist_test_cexpr.c")
with open(test_path, "w") as fd:
fd.write(textwrap.dedent(f"""\
#if !({cexpr})
#error "unsupported expression"
#endif
int dummy;
"""))
test = self.dist_test(test_path, flags)
if not test:
self.dist_log("testing failed", stderr=True)
return test
def cc_normalize_flags(self, flags):
"""
Remove the conflicts that caused due gathering implied features flags.
Parameters
----------
'flags' list, compiler flags
flags should be sorted from the lowest to the highest interest.
Returns
-------
list, filtered from any conflicts.
Examples
--------
>>> self.cc_normalize_flags(['-march=armv8.2-a+fp16', '-march=armv8.2-a+dotprod'])
['armv8.2-a+fp16+dotprod']
>>> self.cc_normalize_flags(
['-msse', '-msse2', '-msse3', '-mssse3', '-msse4.1', '-msse4.2', '-mavx', '-march=core-avx2']
)
['-march=core-avx2']
"""
assert(isinstance(flags, list))
if self.cc_is_gcc or self.cc_is_clang or self.cc_is_icc:
return self._cc_normalize_unix(flags)
if self.cc_is_msvc or self.cc_is_iccw:
return self._cc_normalize_win(flags)
return flags
_cc_normalize_unix_mrgx = re.compile(
# 1- to check the highest of
r"^(-mcpu=|-march=|-x[A-Z0-9\-])"
)
_cc_normalize_unix_frgx = re.compile(
# 2- to remove any flags starts with
# -march, -mcpu, -x(INTEL) and '-m' without '='
r"^(?!(-mcpu=|-march=|-x[A-Z0-9\-]|-m[a-z0-9\-\.]*.$))|"
# exclude:
r"(?:-mzvector)"
)
_cc_normalize_unix_krgx = re.compile(
# 3- keep only the highest of
r"^(-mfpu|-mtune)"
)
_cc_normalize_arch_ver = re.compile(
r"[0-9.]"
)
def _cc_normalize_unix(self, flags):
def ver_flags(f):
# arch ver subflag
# -march=armv8.2-a+fp16fml
tokens = f.split('+')
ver = float('0' + ''.join(
re.findall(self._cc_normalize_arch_ver, tokens[0])
))
return ver, tokens[0], tokens[1:]
if len(flags) <= 1:
return flags
# get the highest matched flag
for i, cur_flag in enumerate(reversed(flags)):
if not re.match(self._cc_normalize_unix_mrgx, cur_flag):
continue
lower_flags = flags[:-(i+1)]
upper_flags = flags[-i:]
filterd = list(filter(
self._cc_normalize_unix_frgx.search, lower_flags
))
# gather subflags
ver, arch, subflags = ver_flags(cur_flag)
if ver > 0 and len(subflags) > 0:
for xflag in lower_flags:
xver, _, xsubflags = ver_flags(xflag)
if ver == xver:
subflags = xsubflags + subflags
cur_flag = arch + '+' + '+'.join(subflags)
flags = filterd + [cur_flag]
if i > 0:
flags += upper_flags
break
# to remove overridable flags
final_flags = []
matched = set()
for f in reversed(flags):
match = re.match(self._cc_normalize_unix_krgx, f)
if not match:
pass
elif match[0] in matched:
continue
else:
matched.add(match[0])
final_flags.insert(0, f)
return final_flags
_cc_normalize_win_frgx = re.compile(
r"^(?!(/arch\:|/Qx\:))"
)
_cc_normalize_win_mrgx = re.compile(
r"^(/arch|/Qx:)"
)
def _cc_normalize_win(self, flags):
for i, f in enumerate(reversed(flags)):
if not re.match(self._cc_normalize_win_mrgx, f):
continue
i += 1
return list(filter(
self._cc_normalize_win_frgx.search, flags[:-i]
)) + flags[-i:]
return flags
class _Feature:
"""A helper class for `CCompilerOpt` that managing CPU features.
Attributes
----------
feature_supported : dict
Dictionary containing all CPU features that supported
by the platform, according to the specified values in attribute
`_Config.conf_features` and `_Config.conf_features_partial()`
feature_min : set
The minimum support of CPU features, according to
the specified values in attribute `_Config.conf_min_features`.
"""
def __init__(self):
if hasattr(self, "feature_is_cached"):
return
self.feature_supported = pfeatures = self.conf_features_partial()
for feature_name in list(pfeatures.keys()):
feature = pfeatures[feature_name]
cfeature = self.conf_features[feature_name]
feature.update({
k:v for k,v in cfeature.items() if k not in feature
})
disabled = feature.get("disable")
if disabled is not None:
pfeatures.pop(feature_name)
self.dist_log(
"feature '%s' is disabled," % feature_name,
disabled, stderr=True
)
continue
# list is used internally for these options
for option in (
"implies", "group", "detect", "headers", "flags", "extra_checks"
) :
oval = feature.get(option)
if isinstance(oval, str):
feature[option] = oval.split()
self.feature_min = set()
min_f = self.conf_min_features.get(self.cc_march, "")
for F in min_f.upper().split():
if F in self.feature_supported:
self.feature_min.add(F)
self.feature_is_cached = True
def feature_names(self, names=None, force_flags=None, macros=[]):
"""
Returns a set of CPU feature names that supported by platform and the **C** compiler.
Parameters
----------
names : sequence or None, optional
Specify certain CPU features to test it against the **C** compiler.
if None(default), it will test all current supported features.
**Note**: feature names must be in upper-case.
force_flags : list or None, optional
If None(default), default compiler flags for every CPU feature will
be used during the test.
macros : list of tuples, optional
A list of C macro definitions.
"""
assert(
names is None or (
not isinstance(names, str) and
hasattr(names, "__iter__")
)
)
assert(force_flags is None or isinstance(force_flags, list))
if names is None:
names = self.feature_supported.keys()
supported_names = set()
for f in names:
if self.feature_is_supported(
f, force_flags=force_flags, macros=macros
):
supported_names.add(f)
return supported_names
def feature_is_exist(self, name):
"""
Returns True if a certain feature is exist and covered within
`_Config.conf_features`.
Parameters
----------
'name': str
feature name in uppercase.
"""
assert(name.isupper())
return name in self.conf_features
def feature_sorted(self, names, reverse=False):
"""
Sort a list of CPU features ordered by the lowest interest.
Parameters
----------
'names': sequence
sequence of supported feature names in uppercase.
'reverse': bool, optional
If true, the sorted features is reversed. (highest interest)
Returns
-------
list, sorted CPU features
"""
def sort_cb(k):
if isinstance(k, str):
return self.feature_supported[k]["interest"]
# multiple features
rank = max([self.feature_supported[f]["interest"] for f in k])
# FIXME: that's not a safe way to increase the rank for
# multi targets
rank += len(k) -1
return rank
return sorted(names, reverse=reverse, key=sort_cb)
def feature_implies(self, names, keep_origins=False):
"""
Return a set of CPU features that implied by 'names'
Parameters
----------
names : str or sequence of str
CPU feature name(s) in uppercase.
keep_origins : bool
if False(default) then the returned set will not contain any
features from 'names'. This case happens only when two features
imply each other.
Examples
--------
>>> self.feature_implies("SSE3")
{'SSE', 'SSE2'}
>>> self.feature_implies("SSE2")
{'SSE'}
>>> self.feature_implies("SSE2", keep_origins=True)
# 'SSE2' found here since 'SSE' and 'SSE2' imply each other
{'SSE', 'SSE2'}
"""
def get_implies(name, _caller=set()):
implies = set()
d = self.feature_supported[name]
for i in d.get("implies", []):
implies.add(i)
if i in _caller:
# infinity recursive guard since
# features can imply each other
continue
_caller.add(name)
implies = implies.union(get_implies(i, _caller))
return implies
if isinstance(names, str):
implies = get_implies(names)
names = [names]
else:
assert(hasattr(names, "__iter__"))
implies = set()
for n in names:
implies = implies.union(get_implies(n))
if not keep_origins:
implies.difference_update(names)
return implies
def feature_implies_c(self, names):
"""same as feature_implies() but combining 'names'"""
if isinstance(names, str):
names = set((names,))
else:
names = set(names)
return names.union(self.feature_implies(names))
def feature_ahead(self, names):
"""
Return list of features in 'names' after remove any
implied features and keep the origins.
Parameters
----------
'names': sequence
sequence of CPU feature names in uppercase.
Returns
-------
list of CPU features sorted as-is 'names'
Examples
--------
>>> self.feature_ahead(["SSE2", "SSE3", "SSE41"])
["SSE41"]
# assume AVX2 and FMA3 implies each other and AVX2
# is the highest interest
>>> self.feature_ahead(["SSE2", "SSE3", "SSE41", "AVX2", "FMA3"])
["AVX2"]
# assume AVX2 and FMA3 don't implies each other
>>> self.feature_ahead(["SSE2", "SSE3", "SSE41", "AVX2", "FMA3"])
["AVX2", "FMA3"]
"""
assert(
not isinstance(names, str)
and hasattr(names, '__iter__')
)
implies = self.feature_implies(names, keep_origins=True)
ahead = [n for n in names if n not in implies]
if len(ahead) == 0:
# return the highest interested feature
# if all features imply each other
ahead = self.feature_sorted(names, reverse=True)[:1]
return ahead
def feature_untied(self, names):
"""
same as 'feature_ahead()' but if both features implied each other
and keep the highest interest.
Parameters
----------
'names': sequence
sequence of CPU feature names in uppercase.
Returns
-------
list of CPU features sorted as-is 'names'
Examples
--------
>>> self.feature_untied(["SSE2", "SSE3", "SSE41"])
["SSE2", "SSE3", "SSE41"]
# assume AVX2 and FMA3 implies each other
>>> self.feature_untied(["SSE2", "SSE3", "SSE41", "FMA3", "AVX2"])
["SSE2", "SSE3", "SSE41", "AVX2"]
"""
assert(
not isinstance(names, str)
and hasattr(names, '__iter__')
)
final = []
for n in names:
implies = self.feature_implies(n)
tied = [
nn for nn in final
if nn in implies and n in self.feature_implies(nn)
]
if tied:
tied = self.feature_sorted(tied + [n])
if n not in tied[1:]:
continue
final.remove(tied[:1][0])
final.append(n)
return final
def feature_get_til(self, names, keyisfalse):
"""
same as `feature_implies_c()` but stop collecting implied
features when feature's option that provided through
parameter 'keyisfalse' is False, also sorting the returned
features.
"""
def til(tnames):
# sort from highest to lowest interest then cut if "key" is False
tnames = self.feature_implies_c(tnames)
tnames = self.feature_sorted(tnames, reverse=True)
for i, n in enumerate(tnames):
if not self.feature_supported[n].get(keyisfalse, True):
tnames = tnames[:i+1]
break
return tnames
if isinstance(names, str) or len(names) <= 1:
names = til(names)
# normalize the sort
names.reverse()
return names
names = self.feature_ahead(names)
names = {t for n in names for t in til(n)}
return self.feature_sorted(names)
def feature_detect(self, names):
"""
Return a list of CPU features that required to be detected
sorted from the lowest to highest interest.
"""
names = self.feature_get_til(names, "implies_detect")
detect = []
for n in names:
d = self.feature_supported[n]
detect += d.get("detect", d.get("group", [n]))
return detect
@_Cache.me
def feature_flags(self, names):
"""
Return a list of CPU features flags sorted from the lowest
to highest interest.
"""
names = self.feature_sorted(self.feature_implies_c(names))
flags = []
for n in names:
d = self.feature_supported[n]
f = d.get("flags", [])
if not f or not self.cc_test_flags(f):
continue
flags += f
return self.cc_normalize_flags(flags)
@_Cache.me
def feature_test(self, name, force_flags=None, macros=[]):
"""
Test a certain CPU feature against the compiler through its own
check file.
Parameters
----------
name : str
Supported CPU feature name.
force_flags : list or None, optional
If None(default), the returned flags from `feature_flags()`
will be used.
macros : list of tuples, optional
A list of C macro definitions.
"""
if force_flags is None:
force_flags = self.feature_flags(name)
self.dist_log(
"testing feature '%s' with flags (%s)" % (
name, ' '.join(force_flags)
))
# Each CPU feature must have C source code contains at
# least one intrinsic or instruction related to this feature.
test_path = os.path.join(
self.conf_check_path, "cpu_%s.c" % name.lower()
)
if not os.path.exists(test_path):
self.dist_fatal("feature test file is not exist", test_path)
test = self.dist_test(
test_path, force_flags + self.cc_flags["werror"], macros=macros
)
if not test:
self.dist_log("testing failed", stderr=True)
return test
@_Cache.me
def feature_is_supported(self, name, force_flags=None, macros=[]):
"""
Check if a certain CPU feature is supported by the platform and compiler.
Parameters
----------
name : str
CPU feature name in uppercase.
force_flags : list or None, optional
If None(default), default compiler flags for every CPU feature will
be used during test.
macros : list of tuples, optional
A list of C macro definitions.
"""
assert(name.isupper())
assert(force_flags is None or isinstance(force_flags, list))
supported = name in self.feature_supported
if supported:
for impl in self.feature_implies(name):
if not self.feature_test(impl, force_flags, macros=macros):
return False
if not self.feature_test(name, force_flags, macros=macros):
return False
return supported
@_Cache.me
def feature_can_autovec(self, name):
"""
check if the feature can be auto-vectorized by the compiler
"""
assert(isinstance(name, str))
d = self.feature_supported[name]
can = d.get("autovec", None)
if can is None:
valid_flags = [
self.cc_test_flags([f]) for f in d.get("flags", [])
]
can = valid_flags and any(valid_flags)
return can
@_Cache.me
def feature_extra_checks(self, name):
"""
Return a list of supported extra checks after testing them against
the compiler.
Parameters
----------
names : str
CPU feature name in uppercase.
"""
assert isinstance(name, str)
d = self.feature_supported[name]
extra_checks = d.get("extra_checks", [])
if not extra_checks:
return []
self.dist_log("Testing extra checks for feature '%s'" % name, extra_checks)
flags = self.feature_flags(name)
available = []
not_available = []
for chk in extra_checks:
test_path = os.path.join(
self.conf_check_path, "extra_%s.c" % chk.lower()
)
if not os.path.exists(test_path):
self.dist_fatal("extra check file does not exist", test_path)
is_supported = self.dist_test(test_path, flags + self.cc_flags["werror"])
if is_supported:
available.append(chk)
else:
not_available.append(chk)
if not_available:
self.dist_log("testing failed for checks", not_available, stderr=True)
return available
def feature_c_preprocessor(self, feature_name, tabs=0):
"""
Generate C preprocessor definitions and include headers of a CPU feature.
Parameters
----------
'feature_name': str
CPU feature name in uppercase.
'tabs': int
if > 0, align the generated strings to the right depend on number of tabs.
Returns
-------
str, generated C preprocessor
Examples
--------
>>> self.feature_c_preprocessor("SSE3")
/** SSE3 **/
#define NPY_HAVE_SSE3 1
#include <pmmintrin.h>
"""
assert(feature_name.isupper())
feature = self.feature_supported.get(feature_name)
assert(feature is not None)
prepr = [
"/** %s **/" % feature_name,
"#define %sHAVE_%s 1" % (self.conf_c_prefix, feature_name)
]
prepr += [
"#include <%s>" % h for h in feature.get("headers", [])
]
extra_defs = feature.get("group", [])
extra_defs += self.feature_extra_checks(feature_name)
for edef in extra_defs:
# Guard extra definitions in case of duplicate with
# another feature
prepr += [
"#ifndef %sHAVE_%s" % (self.conf_c_prefix, edef),
"\t#define %sHAVE_%s 1" % (self.conf_c_prefix, edef),
"#endif",
]
if tabs > 0:
prepr = [('\t'*tabs) + l for l in prepr]
return '\n'.join(prepr)
class _Parse:
"""A helper class that parsing main arguments of `CCompilerOpt`,
also parsing configuration statements in dispatch-able sources.
Parameters
----------
cpu_baseline : str or None
minimal set of required CPU features or special options.
cpu_dispatch : str or None
dispatched set of additional CPU features or special options.
Special options can be:
- **MIN**: Enables the minimum CPU features that utilized via `_Config.conf_min_features`
- **MAX**: Enables all supported CPU features by the Compiler and platform.
- **NATIVE**: Enables all CPU features that supported by the current machine.
- **NONE**: Enables nothing
- **Operand +/-**: remove or add features, useful with options **MAX**, **MIN** and **NATIVE**.
NOTE: operand + is only added for nominal reason.
NOTES:
- Case-insensitive among all CPU features and special options.
- Comma or space can be used as a separator.
- If the CPU feature is not supported by the user platform or compiler,
it will be skipped rather than raising a fatal error.
- Any specified CPU features to 'cpu_dispatch' will be skipped if its part of CPU baseline features
- 'cpu_baseline' force enables implied features.
Attributes
----------
parse_baseline_names : list
Final CPU baseline's feature names(sorted from low to high)
parse_baseline_flags : list
Compiler flags of baseline features
parse_dispatch_names : list
Final CPU dispatch-able feature names(sorted from low to high)
parse_target_groups : dict
Dictionary containing initialized target groups that configured
through class attribute `conf_target_groups`.
The key is represent the group name and value is a tuple
contains three items :
- bool, True if group has the 'baseline' option.
- list, list of CPU features.
- list, list of extra compiler flags.
"""
def __init__(self, cpu_baseline, cpu_dispatch):
self._parse_policies = dict(
# POLICY NAME, (HAVE, NOT HAVE, [DEB])
KEEP_BASELINE = (
None, self._parse_policy_not_keepbase,
[]
),
KEEP_SORT = (
self._parse_policy_keepsort,
self._parse_policy_not_keepsort,
[]
),
MAXOPT = (
self._parse_policy_maxopt, None,
[]
),
WERROR = (
self._parse_policy_werror, None,
[]
),
AUTOVEC = (
self._parse_policy_autovec, None,
["MAXOPT"]
)
)
if hasattr(self, "parse_is_cached"):
return
self.parse_baseline_names = []
self.parse_baseline_flags = []
self.parse_dispatch_names = []
self.parse_target_groups = {}
if self.cc_noopt:
# skip parsing baseline and dispatch args and keep parsing target groups
cpu_baseline = cpu_dispatch = None
self.dist_log("check requested baseline")
if cpu_baseline is not None:
cpu_baseline = self._parse_arg_features("cpu_baseline", cpu_baseline)
baseline_names = self.feature_names(cpu_baseline)
self.parse_baseline_flags = self.feature_flags(baseline_names)
self.parse_baseline_names = self.feature_sorted(
self.feature_implies_c(baseline_names)
)
self.dist_log("check requested dispatch-able features")
if cpu_dispatch is not None:
cpu_dispatch_ = self._parse_arg_features("cpu_dispatch", cpu_dispatch)
cpu_dispatch = {
f for f in cpu_dispatch_
if f not in self.parse_baseline_names
}
conflict_baseline = cpu_dispatch_.difference(cpu_dispatch)
self.parse_dispatch_names = self.feature_sorted(
self.feature_names(cpu_dispatch)
)
if len(conflict_baseline) > 0:
self.dist_log(
"skip features", conflict_baseline, "since its part of baseline"
)
self.dist_log("initialize targets groups")
for group_name, tokens in self.conf_target_groups.items():
self.dist_log("parse target group", group_name)
GROUP_NAME = group_name.upper()
if not tokens or not tokens.strip():
# allow empty groups, useful in case if there's a need
# to disable certain group since '_parse_target_tokens()'
# requires at least one valid target
self.parse_target_groups[GROUP_NAME] = (
False, [], []
)
continue
has_baseline, features, extra_flags = \
self._parse_target_tokens(tokens)
self.parse_target_groups[GROUP_NAME] = (
has_baseline, features, extra_flags
)
self.parse_is_cached = True
def parse_targets(self, source):
"""
Fetch and parse configuration statements that required for
defining the targeted CPU features, statements should be declared
in the top of source in between **C** comment and start
with a special mark **@targets**.
Configuration statements are sort of keywords representing
CPU features names, group of statements and policies, combined
together to determine the required optimization.
Parameters
----------
source : str
the path of **C** source file.
Returns
-------
- bool, True if group has the 'baseline' option
- list, list of CPU features
- list, list of extra compiler flags
"""
self.dist_log("looking for '@targets' inside -> ", source)
# get lines between /*@targets and */
with open(source) as fd:
tokens = ""
max_to_reach = 1000 # good enough, isn't?
start_with = "@targets"
start_pos = -1
end_with = "*/"
end_pos = -1
for current_line, line in enumerate(fd):
if current_line == max_to_reach:
self.dist_fatal("reached the max of lines")
break
if start_pos == -1:
start_pos = line.find(start_with)
if start_pos == -1:
continue
start_pos += len(start_with)
tokens += line
end_pos = line.find(end_with)
if end_pos != -1:
end_pos += len(tokens) - len(line)
break
if start_pos == -1:
self.dist_fatal("expected to find '%s' within a C comment" % start_with)
if end_pos == -1:
self.dist_fatal("expected to end with '%s'" % end_with)
tokens = tokens[start_pos:end_pos]
return self._parse_target_tokens(tokens)
_parse_regex_arg = re.compile(r'\s|,|([+-])')
def _parse_arg_features(self, arg_name, req_features):
if not isinstance(req_features, str):
self.dist_fatal("expected a string in '%s'" % arg_name)
final_features = set()
# space and comma can be used as a separator
tokens = list(filter(None, re.split(self._parse_regex_arg, req_features)))
append = True # append is the default
for tok in tokens:
if tok[0] in ("#", "$"):
self.dist_fatal(
arg_name, "target groups and policies "
"aren't allowed from arguments, "
"only from dispatch-able sources"
)
if tok == '+':
append = True
continue
if tok == '-':
append = False
continue
TOK = tok.upper() # we use upper-case internally
features_to = set()
if TOK == "NONE":
pass
elif TOK == "NATIVE":
native = self.cc_flags["native"]
if not native:
self.dist_fatal(arg_name,
"native option isn't supported by the compiler"
)
features_to = self.feature_names(
force_flags=native, macros=[("DETECT_FEATURES", 1)]
)
elif TOK == "MAX":
features_to = self.feature_supported.keys()
elif TOK == "MIN":
features_to = self.feature_min
else:
if TOK in self.feature_supported:
features_to.add(TOK)
else:
if not self.feature_is_exist(TOK):
self.dist_fatal(arg_name,
", '%s' isn't a known feature or option" % tok
)
if append:
final_features = final_features.union(features_to)
else:
final_features = final_features.difference(features_to)
append = True # back to default
return final_features
_parse_regex_target = re.compile(r'\s|[*,/]|([()])')
def _parse_target_tokens(self, tokens):
assert(isinstance(tokens, str))
final_targets = [] # to keep it sorted as specified
extra_flags = []
has_baseline = False
skipped = set()
policies = set()
multi_target = None
tokens = list(filter(None, re.split(self._parse_regex_target, tokens)))
if not tokens:
self.dist_fatal("expected one token at least")
for tok in tokens:
TOK = tok.upper()
ch = tok[0]
if ch in ('+', '-'):
self.dist_fatal(
"+/- are 'not' allowed from target's groups or @targets, "
"only from cpu_baseline and cpu_dispatch parms"
)
elif ch == '$':
if multi_target is not None:
self.dist_fatal(
"policies aren't allowed inside multi-target '()'"
", only CPU features"
)
policies.add(self._parse_token_policy(TOK))
elif ch == '#':
if multi_target is not None:
self.dist_fatal(
"target groups aren't allowed inside multi-target '()'"
", only CPU features"
)
has_baseline, final_targets, extra_flags = \
self._parse_token_group(TOK, has_baseline, final_targets, extra_flags)
elif ch == '(':
if multi_target is not None:
self.dist_fatal("unclosed multi-target, missing ')'")
multi_target = set()
elif ch == ')':
if multi_target is None:
self.dist_fatal("multi-target opener '(' wasn't found")
targets = self._parse_multi_target(multi_target)
if targets is None:
skipped.add(tuple(multi_target))
else:
if len(targets) == 1:
targets = targets[0]
if targets and targets not in final_targets:
final_targets.append(targets)
multi_target = None # back to default
else:
if TOK == "BASELINE":
if multi_target is not None:
self.dist_fatal("baseline isn't allowed inside multi-target '()'")
has_baseline = True
continue
if multi_target is not None:
multi_target.add(TOK)
continue
if not self.feature_is_exist(TOK):
self.dist_fatal("invalid target name '%s'" % TOK)
is_enabled = (
TOK in self.parse_baseline_names or
TOK in self.parse_dispatch_names
)
if is_enabled:
if TOK not in final_targets:
final_targets.append(TOK)
continue
skipped.add(TOK)
if multi_target is not None:
self.dist_fatal("unclosed multi-target, missing ')'")
if skipped:
self.dist_log(
"skip targets", skipped,
"not part of baseline or dispatch-able features"
)
final_targets = self.feature_untied(final_targets)
# add polices dependencies
for p in list(policies):
_, _, deps = self._parse_policies[p]
for d in deps:
if d in policies:
continue
self.dist_log(
"policy '%s' force enables '%s'" % (
p, d
))
policies.add(d)
# release policies filtrations
for p, (have, nhave, _) in self._parse_policies.items():
func = None
if p in policies:
func = have
self.dist_log("policy '%s' is ON" % p)
else:
func = nhave
if not func:
continue
has_baseline, final_targets, extra_flags = func(
has_baseline, final_targets, extra_flags
)
return has_baseline, final_targets, extra_flags
def _parse_token_policy(self, token):
"""validate policy token"""
if len(token) <= 1 or token[-1:] == token[0]:
self.dist_fatal("'$' must stuck in the begin of policy name")
token = token[1:]
if token not in self._parse_policies:
self.dist_fatal(
"'%s' is an invalid policy name, available policies are" % token,
self._parse_policies.keys()
)
return token
def _parse_token_group(self, token, has_baseline, final_targets, extra_flags):
"""validate group token"""
if len(token) <= 1 or token[-1:] == token[0]:
self.dist_fatal("'#' must stuck in the begin of group name")
token = token[1:]
ghas_baseline, gtargets, gextra_flags = self.parse_target_groups.get(
token, (False, None, [])
)
if gtargets is None:
self.dist_fatal(
"'%s' is an invalid target group name, " % token + \
"available target groups are",
self.parse_target_groups.keys()
)
if ghas_baseline:
has_baseline = True
# always keep sorting as specified
final_targets += [f for f in gtargets if f not in final_targets]
extra_flags += [f for f in gextra_flags if f not in extra_flags]
return has_baseline, final_targets, extra_flags
def _parse_multi_target(self, targets):
"""validate multi targets that defined between parentheses()"""
# remove any implied features and keep the origins
if not targets:
self.dist_fatal("empty multi-target '()'")
if not all([
self.feature_is_exist(tar) for tar in targets
]) :
self.dist_fatal("invalid target name in multi-target", targets)
if not all([
(
tar in self.parse_baseline_names or
tar in self.parse_dispatch_names
)
for tar in targets
]) :
return None
targets = self.feature_ahead(targets)
if not targets:
return None
# force sort multi targets, so it can be comparable
targets = self.feature_sorted(targets)
targets = tuple(targets) # hashable
return targets
def _parse_policy_not_keepbase(self, has_baseline, final_targets, extra_flags):
"""skip all baseline features"""
skipped = []
for tar in final_targets[:]:
is_base = False
if isinstance(tar, str):
is_base = tar in self.parse_baseline_names
else:
# multi targets
is_base = all([
f in self.parse_baseline_names
for f in tar
])
if is_base:
skipped.append(tar)
final_targets.remove(tar)
if skipped:
self.dist_log("skip baseline features", skipped)
return has_baseline, final_targets, extra_flags
def _parse_policy_keepsort(self, has_baseline, final_targets, extra_flags):
"""leave a notice that $keep_sort is on"""
self.dist_log(
"policy 'keep_sort' is on, dispatch-able targets", final_targets, "\n"
"are 'not' sorted depend on the highest interest but"
"as specified in the dispatch-able source or the extra group"
)
return has_baseline, final_targets, extra_flags
def _parse_policy_not_keepsort(self, has_baseline, final_targets, extra_flags):
"""sorted depend on the highest interest"""
final_targets = self.feature_sorted(final_targets, reverse=True)
return has_baseline, final_targets, extra_flags
def _parse_policy_maxopt(self, has_baseline, final_targets, extra_flags):
"""append the compiler optimization flags"""
if self.cc_has_debug:
self.dist_log("debug mode is detected, policy 'maxopt' is skipped.")
elif self.cc_noopt:
self.dist_log("optimization is disabled, policy 'maxopt' is skipped.")
else:
flags = self.cc_flags["opt"]
if not flags:
self.dist_log(
"current compiler doesn't support optimization flags, "
"policy 'maxopt' is skipped", stderr=True
)
else:
extra_flags += flags
return has_baseline, final_targets, extra_flags
def _parse_policy_werror(self, has_baseline, final_targets, extra_flags):
"""force warnings to treated as errors"""
flags = self.cc_flags["werror"]
if not flags:
self.dist_log(
"current compiler doesn't support werror flags, "
"warnings will 'not' treated as errors", stderr=True
)
else:
self.dist_log("compiler warnings are treated as errors")
extra_flags += flags
return has_baseline, final_targets, extra_flags
def _parse_policy_autovec(self, has_baseline, final_targets, extra_flags):
"""skip features that has no auto-vectorized support by compiler"""
skipped = []
for tar in final_targets[:]:
if isinstance(tar, str):
can = self.feature_can_autovec(tar)
else: # multiple target
can = all([
self.feature_can_autovec(t)
for t in tar
])
if not can:
final_targets.remove(tar)
skipped.append(tar)
if skipped:
self.dist_log("skip non auto-vectorized features", skipped)
return has_baseline, final_targets, extra_flags
class CCompilerOpt(_Config, _Distutils, _Cache, _CCompiler, _Feature, _Parse):
"""
A helper class for `CCompiler` aims to provide extra build options
to effectively control of compiler optimizations that are directly
related to CPU features.
"""
def __init__(self, ccompiler, cpu_baseline="min", cpu_dispatch="max", cache_path=None):
_Config.__init__(self)
_Distutils.__init__(self, ccompiler)
_Cache.__init__(self, cache_path, self.dist_info(), cpu_baseline, cpu_dispatch)
_CCompiler.__init__(self)
_Feature.__init__(self)
if not self.cc_noopt and self.cc_has_native:
self.dist_log(
"native flag is specified through environment variables. "
"force cpu-baseline='native'"
)
cpu_baseline = "native"
_Parse.__init__(self, cpu_baseline, cpu_dispatch)
# keep the requested features untouched, need it later for report
# and trace purposes
self._requested_baseline = cpu_baseline
self._requested_dispatch = cpu_dispatch
# key is the dispatch-able source and value is a tuple
# contains two items (has_baseline[boolean], dispatched-features[list])
self.sources_status = getattr(self, "sources_status", {})
# every instance should has a separate one
self.cache_private.add("sources_status")
# set it at the end to make sure the cache writing was done after init
# this class
self.hit_cache = hasattr(self, "hit_cache")
def is_cached(self):
"""
Returns True if the class loaded from the cache file
"""
return self.cache_infile and self.hit_cache
def cpu_baseline_flags(self):
"""
Returns a list of final CPU baseline compiler flags
"""
return self.parse_baseline_flags
def cpu_baseline_names(self):
"""
return a list of final CPU baseline feature names
"""
return self.parse_baseline_names
def cpu_dispatch_names(self):
"""
return a list of final CPU dispatch feature names
"""
return self.parse_dispatch_names
def try_dispatch(self, sources, src_dir=None, ccompiler=None, **kwargs):
"""
Compile one or more dispatch-able sources and generates object files,
also generates abstract C config headers and macros that
used later for the final runtime dispatching process.
The mechanism behind it is to takes each source file that specified
in 'sources' and branching it into several files depend on
special configuration statements that must be declared in the
top of each source which contains targeted CPU features,
then it compiles every branched source with the proper compiler flags.
Parameters
----------
sources : list
Must be a list of dispatch-able sources file paths,
and configuration statements must be declared inside
each file.
src_dir : str
Path of parent directory for the generated headers and wrapped sources.
If None(default) the files will generated in-place.
ccompiler : CCompiler
Distutils `CCompiler` instance to be used for compilation.
If None (default), the provided instance during the initialization
will be used instead.
**kwargs : any
Arguments to pass on to the `CCompiler.compile()`
Returns
-------
list : generated object files
Raises
------
CompileError
Raises by `CCompiler.compile()` on compiling failure.
DistutilsError
Some errors during checking the sanity of configuration statements.
See Also
--------
parse_targets :
Parsing the configuration statements of dispatch-able sources.
"""
to_compile = {}
baseline_flags = self.cpu_baseline_flags()
include_dirs = kwargs.setdefault("include_dirs", [])
for src in sources:
output_dir = os.path.dirname(src)
if src_dir:
if not output_dir.startswith(src_dir):
output_dir = os.path.join(src_dir, output_dir)
if output_dir not in include_dirs:
# To allow including the generated config header(*.dispatch.h)
# by the dispatch-able sources
include_dirs.append(output_dir)
has_baseline, targets, extra_flags = self.parse_targets(src)
nochange = self._generate_config(output_dir, src, targets, has_baseline)
for tar in targets:
tar_src = self._wrap_target(output_dir, src, tar, nochange=nochange)
flags = tuple(extra_flags + self.feature_flags(tar))
to_compile.setdefault(flags, []).append(tar_src)
if has_baseline:
flags = tuple(extra_flags + baseline_flags)
to_compile.setdefault(flags, []).append(src)
self.sources_status[src] = (has_baseline, targets)
# For these reasons, the sources are compiled in a separate loop:
# - Gathering all sources with the same flags to benefit from
# the parallel compiling as much as possible.
# - To generate all config headers of the dispatchable sources,
# before the compilation in case if there are dependency relationships
# among them.
objects = []
for flags, srcs in to_compile.items():
objects += self.dist_compile(
srcs, list(flags), ccompiler=ccompiler, **kwargs
)
return objects
def generate_dispatch_header(self, header_path):
"""
Generate the dispatch header which contains the #definitions and headers
for platform-specific instruction-sets for the enabled CPU baseline and
dispatch-able features.
Its highly recommended to take a look at the generated header
also the generated source files via `try_dispatch()`
in order to get the full picture.
"""
self.dist_log("generate CPU dispatch header: (%s)" % header_path)
baseline_names = self.cpu_baseline_names()
dispatch_names = self.cpu_dispatch_names()
baseline_len = len(baseline_names)
dispatch_len = len(dispatch_names)
header_dir = os.path.dirname(header_path)
if not os.path.exists(header_dir):
self.dist_log(
f"dispatch header dir {header_dir} does not exist, creating it",
stderr=True
)
os.makedirs(header_dir)
with open(header_path, 'w') as f:
baseline_calls = ' \\\n'.join([
(
"\t%sWITH_CPU_EXPAND_(MACRO_TO_CALL(%s, __VA_ARGS__))"
) % (self.conf_c_prefix, f)
for f in baseline_names
])
dispatch_calls = ' \\\n'.join([
(
"\t%sWITH_CPU_EXPAND_(MACRO_TO_CALL(%s, __VA_ARGS__))"
) % (self.conf_c_prefix, f)
for f in dispatch_names
])
f.write(textwrap.dedent("""\
/*
* AUTOGENERATED DON'T EDIT
* Please make changes to the code generator (distutils/ccompiler_opt.py)
*/
#define {pfx}WITH_CPU_BASELINE "{baseline_str}"
#define {pfx}WITH_CPU_DISPATCH "{dispatch_str}"
#define {pfx}WITH_CPU_BASELINE_N {baseline_len}
#define {pfx}WITH_CPU_DISPATCH_N {dispatch_len}
#define {pfx}WITH_CPU_EXPAND_(X) X
#define {pfx}WITH_CPU_BASELINE_CALL(MACRO_TO_CALL, ...) \\
{baseline_calls}
#define {pfx}WITH_CPU_DISPATCH_CALL(MACRO_TO_CALL, ...) \\
{dispatch_calls}
""").format(
pfx=self.conf_c_prefix, baseline_str=" ".join(baseline_names),
dispatch_str=" ".join(dispatch_names), baseline_len=baseline_len,
dispatch_len=dispatch_len, baseline_calls=baseline_calls,
dispatch_calls=dispatch_calls
))
baseline_pre = ''
for name in baseline_names:
baseline_pre += self.feature_c_preprocessor(name, tabs=1) + '\n'
dispatch_pre = ''
for name in dispatch_names:
dispatch_pre += textwrap.dedent("""\
#ifdef {pfx}CPU_TARGET_{name}
{pre}
#endif /*{pfx}CPU_TARGET_{name}*/
""").format(
pfx=self.conf_c_prefix_, name=name, pre=self.feature_c_preprocessor(
name, tabs=1
))
f.write(textwrap.dedent("""\
/******* baseline features *******/
{baseline_pre}
/******* dispatch features *******/
{dispatch_pre}
""").format(
pfx=self.conf_c_prefix_, baseline_pre=baseline_pre,
dispatch_pre=dispatch_pre
))
def report(self, full=False):
report = []
platform_rows = []
baseline_rows = []
dispatch_rows = []
report.append(("Platform", platform_rows))
report.append(("", ""))
report.append(("CPU baseline", baseline_rows))
report.append(("", ""))
report.append(("CPU dispatch", dispatch_rows))
########## platform ##########
platform_rows.append(("Architecture", (
"unsupported" if self.cc_on_noarch else self.cc_march)
))
platform_rows.append(("Compiler", (
"unix-like" if self.cc_is_nocc else self.cc_name)
))
########## baseline ##########
if self.cc_noopt:
baseline_rows.append(("Requested", "optimization disabled"))
else:
baseline_rows.append(("Requested", repr(self._requested_baseline)))
baseline_names = self.cpu_baseline_names()
baseline_rows.append((
"Enabled", (' '.join(baseline_names) if baseline_names else "none")
))
baseline_flags = self.cpu_baseline_flags()
baseline_rows.append((
"Flags", (' '.join(baseline_flags) if baseline_flags else "none")
))
extra_checks = []
for name in baseline_names:
extra_checks += self.feature_extra_checks(name)
baseline_rows.append((
"Extra checks", (' '.join(extra_checks) if extra_checks else "none")
))
########## dispatch ##########
if self.cc_noopt:
baseline_rows.append(("Requested", "optimization disabled"))
else:
dispatch_rows.append(("Requested", repr(self._requested_dispatch)))
dispatch_names = self.cpu_dispatch_names()
dispatch_rows.append((
"Enabled", (' '.join(dispatch_names) if dispatch_names else "none")
))
########## Generated ##########
# TODO:
# - collect object names from 'try_dispatch()'
# then get size of each object and printed
# - give more details about the features that not
# generated due compiler support
# - find a better output's design.
#
target_sources = {}
for source, (_, targets) in self.sources_status.items():
for tar in targets:
target_sources.setdefault(tar, []).append(source)
if not full or not target_sources:
generated = ""
for tar in self.feature_sorted(target_sources):
sources = target_sources[tar]
name = tar if isinstance(tar, str) else '(%s)' % ' '.join(tar)
generated += name + "[%d] " % len(sources)
dispatch_rows.append(("Generated", generated[:-1] if generated else "none"))
else:
dispatch_rows.append(("Generated", ''))
for tar in self.feature_sorted(target_sources):
sources = target_sources[tar]
pretty_name = tar if isinstance(tar, str) else '(%s)' % ' '.join(tar)
flags = ' '.join(self.feature_flags(tar))
implies = ' '.join(self.feature_sorted(self.feature_implies(tar)))
detect = ' '.join(self.feature_detect(tar))
extra_checks = []
for name in ((tar,) if isinstance(tar, str) else tar):
extra_checks += self.feature_extra_checks(name)
extra_checks = (' '.join(extra_checks) if extra_checks else "none")
dispatch_rows.append(('', ''))
dispatch_rows.append((pretty_name, implies))
dispatch_rows.append(("Flags", flags))
dispatch_rows.append(("Extra checks", extra_checks))
dispatch_rows.append(("Detect", detect))
for src in sources:
dispatch_rows.append(("", src))
###############################
# TODO: add support for 'markdown' format
text = []
secs_len = [len(secs) for secs, _ in report]
cols_len = [len(col) for _, rows in report for col, _ in rows]
tab = ' ' * 2
pad = max(max(secs_len), max(cols_len))
for sec, rows in report:
if not sec:
text.append("") # empty line
continue
sec += ' ' * (pad - len(sec))
text.append(sec + tab + ': ')
for col, val in rows:
col += ' ' * (pad - len(col))
text.append(tab + col + ': ' + val)
return '\n'.join(text)
def _wrap_target(self, output_dir, dispatch_src, target, nochange=False):
assert(isinstance(target, (str, tuple)))
if isinstance(target, str):
ext_name = target_name = target
else:
# multi-target
ext_name = '.'.join(target)
target_name = '__'.join(target)
wrap_path = os.path.join(output_dir, os.path.basename(dispatch_src))
wrap_path = "{0}.{2}{1}".format(*os.path.splitext(wrap_path), ext_name.lower())
if nochange and os.path.exists(wrap_path):
return wrap_path
self.dist_log("wrap dispatch-able target -> ", wrap_path)
# sorting for readability
features = self.feature_sorted(self.feature_implies_c(target))
target_join = "#define %sCPU_TARGET_" % self.conf_c_prefix_
target_defs = [target_join + f for f in features]
target_defs = '\n'.join(target_defs)
with open(wrap_path, "w") as fd:
fd.write(textwrap.dedent("""\
/**
* AUTOGENERATED DON'T EDIT
* Please make changes to the code generator \
(distutils/ccompiler_opt.py)
*/
#define {pfx}CPU_TARGET_MODE
#define {pfx}CPU_TARGET_CURRENT {target_name}
{target_defs}
#include "{path}"
""").format(
pfx=self.conf_c_prefix_, target_name=target_name,
path=os.path.abspath(dispatch_src), target_defs=target_defs
))
return wrap_path
def _generate_config(self, output_dir, dispatch_src, targets, has_baseline=False):
config_path = os.path.basename(dispatch_src)
config_path = os.path.splitext(config_path)[0] + '.h'
config_path = os.path.join(output_dir, config_path)
# check if targets didn't change to avoid recompiling
cache_hash = self.cache_hash(targets, has_baseline)
try:
with open(config_path) as f:
last_hash = f.readline().split("cache_hash:")
if len(last_hash) == 2 and int(last_hash[1]) == cache_hash:
return True
except OSError:
pass
os.makedirs(os.path.dirname(config_path), exist_ok=True)
self.dist_log("generate dispatched config -> ", config_path)
dispatch_calls = []
for tar in targets:
if isinstance(tar, str):
target_name = tar
else: # multi target
target_name = '__'.join([t for t in tar])
req_detect = self.feature_detect(tar)
req_detect = '&&'.join([
"CHK(%s)" % f for f in req_detect
])
dispatch_calls.append(
"\t%sCPU_DISPATCH_EXPAND_(CB((%s), %s, __VA_ARGS__))" % (
self.conf_c_prefix_, req_detect, target_name
))
dispatch_calls = ' \\\n'.join(dispatch_calls)
if has_baseline:
baseline_calls = (
"\t%sCPU_DISPATCH_EXPAND_(CB(__VA_ARGS__))"
) % self.conf_c_prefix_
else:
baseline_calls = ''
with open(config_path, "w") as fd:
fd.write(textwrap.dedent("""\
// cache_hash:{cache_hash}
/**
* AUTOGENERATED DON'T EDIT
* Please make changes to the code generator (distutils/ccompiler_opt.py)
*/
#ifndef {pfx}CPU_DISPATCH_EXPAND_
#define {pfx}CPU_DISPATCH_EXPAND_(X) X
#endif
#undef {pfx}CPU_DISPATCH_BASELINE_CALL
#undef {pfx}CPU_DISPATCH_CALL
#define {pfx}CPU_DISPATCH_BASELINE_CALL(CB, ...) \\
{baseline_calls}
#define {pfx}CPU_DISPATCH_CALL(CHK, CB, ...) \\
{dispatch_calls}
""").format(
pfx=self.conf_c_prefix_, baseline_calls=baseline_calls,
dispatch_calls=dispatch_calls, cache_hash=cache_hash
))
return False
def new_ccompiler_opt(compiler, dispatch_hpath, **kwargs):
"""
Create a new instance of 'CCompilerOpt' and generate the dispatch header
which contains the #definitions and headers of platform-specific instruction-sets for
the enabled CPU baseline and dispatch-able features.
Parameters
----------
compiler : CCompiler instance
dispatch_hpath : str
path of the dispatch header
**kwargs: passed as-is to `CCompilerOpt(...)`
Returns
-------
new instance of CCompilerOpt
"""
opt = CCompilerOpt(compiler, **kwargs)
if not os.path.exists(dispatch_hpath) or not opt.is_cached():
opt.generate_dispatch_header(dispatch_hpath)
return opt
| 99,751 | Python | 36.557229 | 107 | 0.53427 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/ccompiler.py | import os
import re
import sys
import shlex
import time
import subprocess
from copy import copy
from distutils import ccompiler
from distutils.ccompiler import (
compiler_class, gen_lib_options, get_default_compiler, new_compiler,
CCompiler
)
from distutils.errors import (
DistutilsExecError, DistutilsModuleError, DistutilsPlatformError,
CompileError, UnknownFileError
)
from distutils.sysconfig import customize_compiler
from distutils.version import LooseVersion
from numpy.distutils import log
from numpy.distutils.exec_command import (
filepath_from_subprocess_output, forward_bytes_to_stdout
)
from numpy.distutils.misc_util import cyg2win32, is_sequence, mingw32, \
get_num_build_jobs, \
_commandline_dep_string, \
sanitize_cxx_flags
# globals for parallel build management
import threading
_job_semaphore = None
_global_lock = threading.Lock()
_processing_files = set()
def _needs_build(obj, cc_args, extra_postargs, pp_opts):
"""
Check if an objects needs to be rebuild based on its dependencies
Parameters
----------
obj : str
object file
Returns
-------
bool
"""
# defined in unixcompiler.py
dep_file = obj + '.d'
if not os.path.exists(dep_file):
return True
# dep_file is a makefile containing 'object: dependencies'
# formatted like posix shell (spaces escaped, \ line continuations)
# the last line contains the compiler commandline arguments as some
# projects may compile an extension multiple times with different
# arguments
with open(dep_file, "r") as f:
lines = f.readlines()
cmdline =_commandline_dep_string(cc_args, extra_postargs, pp_opts)
last_cmdline = lines[-1]
if last_cmdline != cmdline:
return True
contents = ''.join(lines[:-1])
deps = [x for x in shlex.split(contents, posix=True)
if x != "\n" and not x.endswith(":")]
try:
t_obj = os.stat(obj).st_mtime
# check if any of the dependencies is newer than the object
# the dependencies includes the source used to create the object
for f in deps:
if os.stat(f).st_mtime > t_obj:
return True
except OSError:
# no object counts as newer (shouldn't happen if dep_file exists)
return True
return False
def replace_method(klass, method_name, func):
# Py3k does not have unbound method anymore, MethodType does not work
m = lambda self, *args, **kw: func(self, *args, **kw)
setattr(klass, method_name, m)
######################################################################
## Method that subclasses may redefine. But don't call this method,
## it i private to CCompiler class and may return unexpected
## results if used elsewhere. So, you have been warned..
def CCompiler_find_executables(self):
"""
Does nothing here, but is called by the get_version method and can be
overridden by subclasses. In particular it is redefined in the `FCompiler`
class where more documentation can be found.
"""
pass
replace_method(CCompiler, 'find_executables', CCompiler_find_executables)
# Using customized CCompiler.spawn.
def CCompiler_spawn(self, cmd, display=None, env=None):
"""
Execute a command in a sub-process.
Parameters
----------
cmd : str
The command to execute.
display : str or sequence of str, optional
The text to add to the log file kept by `numpy.distutils`.
If not given, `display` is equal to `cmd`.
env : a dictionary for environment variables, optional
Returns
-------
None
Raises
------
DistutilsExecError
If the command failed, i.e. the exit status was not 0.
"""
env = env if env is not None else dict(os.environ)
if display is None:
display = cmd
if is_sequence(display):
display = ' '.join(list(display))
log.info(display)
try:
if self.verbose:
subprocess.check_output(cmd, env=env)
else:
subprocess.check_output(cmd, stderr=subprocess.STDOUT, env=env)
except subprocess.CalledProcessError as exc:
o = exc.output
s = exc.returncode
except OSError as e:
# OSError doesn't have the same hooks for the exception
# output, but exec_command() historically would use an
# empty string for EnvironmentError (base class for
# OSError)
# o = b''
# still that would make the end-user lost in translation!
o = f"\n\n{e}\n\n\n"
try:
o = o.encode(sys.stdout.encoding)
except AttributeError:
o = o.encode('utf8')
# status previously used by exec_command() for parent
# of OSError
s = 127
else:
# use a convenience return here so that any kind of
# caught exception will execute the default code after the
# try / except block, which handles various exceptions
return None
if is_sequence(cmd):
cmd = ' '.join(list(cmd))
if self.verbose:
forward_bytes_to_stdout(o)
if re.search(b'Too many open files', o):
msg = '\nTry rerunning setup command until build succeeds.'
else:
msg = ''
raise DistutilsExecError('Command "%s" failed with exit status %d%s' %
(cmd, s, msg))
replace_method(CCompiler, 'spawn', CCompiler_spawn)
def CCompiler_object_filenames(self, source_filenames, strip_dir=0, output_dir=''):
"""
Return the name of the object files for the given source files.
Parameters
----------
source_filenames : list of str
The list of paths to source files. Paths can be either relative or
absolute, this is handled transparently.
strip_dir : bool, optional
Whether to strip the directory from the returned paths. If True,
the file name prepended by `output_dir` is returned. Default is False.
output_dir : str, optional
If given, this path is prepended to the returned paths to the
object files.
Returns
-------
obj_names : list of str
The list of paths to the object files corresponding to the source
files in `source_filenames`.
"""
if output_dir is None:
output_dir = ''
obj_names = []
for src_name in source_filenames:
base, ext = os.path.splitext(os.path.normpath(src_name))
base = os.path.splitdrive(base)[1] # Chop off the drive
base = base[os.path.isabs(base):] # If abs, chop off leading /
if base.startswith('..'):
# Resolve starting relative path components, middle ones
# (if any) have been handled by os.path.normpath above.
i = base.rfind('..')+2
d = base[:i]
d = os.path.basename(os.path.abspath(d))
base = d + base[i:]
if ext not in self.src_extensions:
raise UnknownFileError("unknown file type '%s' (from '%s')" % (ext, src_name))
if strip_dir:
base = os.path.basename(base)
obj_name = os.path.join(output_dir, base + self.obj_extension)
obj_names.append(obj_name)
return obj_names
replace_method(CCompiler, 'object_filenames', CCompiler_object_filenames)
def CCompiler_compile(self, sources, output_dir=None, macros=None,
include_dirs=None, debug=0, extra_preargs=None,
extra_postargs=None, depends=None):
"""
Compile one or more source files.
Please refer to the Python distutils API reference for more details.
Parameters
----------
sources : list of str
A list of filenames
output_dir : str, optional
Path to the output directory.
macros : list of tuples
A list of macro definitions.
include_dirs : list of str, optional
The directories to add to the default include file search path for
this compilation only.
debug : bool, optional
Whether or not to output debug symbols in or alongside the object
file(s).
extra_preargs, extra_postargs : ?
Extra pre- and post-arguments.
depends : list of str, optional
A list of file names that all targets depend on.
Returns
-------
objects : list of str
A list of object file names, one per source file `sources`.
Raises
------
CompileError
If compilation fails.
"""
global _job_semaphore
jobs = get_num_build_jobs()
# setup semaphore to not exceed number of compile jobs when parallelized at
# extension level (python >= 3.5)
with _global_lock:
if _job_semaphore is None:
_job_semaphore = threading.Semaphore(jobs)
if not sources:
return []
from numpy.distutils.fcompiler import (FCompiler, is_f_file,
has_f90_header)
if isinstance(self, FCompiler):
display = []
for fc in ['f77', 'f90', 'fix']:
fcomp = getattr(self, 'compiler_'+fc)
if fcomp is None:
continue
display.append("Fortran %s compiler: %s" % (fc, ' '.join(fcomp)))
display = '\n'.join(display)
else:
ccomp = self.compiler_so
display = "C compiler: %s\n" % (' '.join(ccomp),)
log.info(display)
macros, objects, extra_postargs, pp_opts, build = \
self._setup_compile(output_dir, macros, include_dirs, sources,
depends, extra_postargs)
cc_args = self._get_cc_args(pp_opts, debug, extra_preargs)
display = "compile options: '%s'" % (' '.join(cc_args))
if extra_postargs:
display += "\nextra options: '%s'" % (' '.join(extra_postargs))
log.info(display)
def single_compile(args):
obj, (src, ext) = args
if not _needs_build(obj, cc_args, extra_postargs, pp_opts):
return
# check if we are currently already processing the same object
# happens when using the same source in multiple extensions
while True:
# need explicit lock as there is no atomic check and add with GIL
with _global_lock:
# file not being worked on, start working
if obj not in _processing_files:
_processing_files.add(obj)
break
# wait for the processing to end
time.sleep(0.1)
try:
# retrieve slot from our #job semaphore and build
with _job_semaphore:
self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts)
finally:
# register being done processing
with _global_lock:
_processing_files.remove(obj)
if isinstance(self, FCompiler):
objects_to_build = list(build.keys())
f77_objects, other_objects = [], []
for obj in objects:
if obj in objects_to_build:
src, ext = build[obj]
if self.compiler_type=='absoft':
obj = cyg2win32(obj)
src = cyg2win32(src)
if is_f_file(src) and not has_f90_header(src):
f77_objects.append((obj, (src, ext)))
else:
other_objects.append((obj, (src, ext)))
# f77 objects can be built in parallel
build_items = f77_objects
# build f90 modules serial, module files are generated during
# compilation and may be used by files later in the list so the
# ordering is important
for o in other_objects:
single_compile(o)
else:
build_items = build.items()
if len(build) > 1 and jobs > 1:
# build parallel
from concurrent.futures import ThreadPoolExecutor
with ThreadPoolExecutor(jobs) as pool:
res = pool.map(single_compile, build_items)
list(res) # access result to raise errors
else:
# build serial
for o in build_items:
single_compile(o)
# Return *all* object filenames, not just the ones we just built.
return objects
replace_method(CCompiler, 'compile', CCompiler_compile)
def CCompiler_customize_cmd(self, cmd, ignore=()):
"""
Customize compiler using distutils command.
Parameters
----------
cmd : class instance
An instance inheriting from `distutils.cmd.Command`.
ignore : sequence of str, optional
List of `CCompiler` commands (without ``'set_'``) that should not be
altered. Strings that are checked for are:
``('include_dirs', 'define', 'undef', 'libraries', 'library_dirs',
'rpath', 'link_objects')``.
Returns
-------
None
"""
log.info('customize %s using %s' % (self.__class__.__name__,
cmd.__class__.__name__))
if hasattr(self, 'compiler') and 'clang' in self.compiler[0]:
# clang defaults to a non-strict floating error point model.
# Since NumPy and most Python libs give warnings for these, override:
self.compiler.append('-ftrapping-math')
self.compiler_so.append('-ftrapping-math')
def allow(attr):
return getattr(cmd, attr, None) is not None and attr not in ignore
if allow('include_dirs'):
self.set_include_dirs(cmd.include_dirs)
if allow('define'):
for (name, value) in cmd.define:
self.define_macro(name, value)
if allow('undef'):
for macro in cmd.undef:
self.undefine_macro(macro)
if allow('libraries'):
self.set_libraries(self.libraries + cmd.libraries)
if allow('library_dirs'):
self.set_library_dirs(self.library_dirs + cmd.library_dirs)
if allow('rpath'):
self.set_runtime_library_dirs(cmd.rpath)
if allow('link_objects'):
self.set_link_objects(cmd.link_objects)
replace_method(CCompiler, 'customize_cmd', CCompiler_customize_cmd)
def _compiler_to_string(compiler):
props = []
mx = 0
keys = list(compiler.executables.keys())
for key in ['version', 'libraries', 'library_dirs',
'object_switch', 'compile_switch',
'include_dirs', 'define', 'undef', 'rpath', 'link_objects']:
if key not in keys:
keys.append(key)
for key in keys:
if hasattr(compiler, key):
v = getattr(compiler, key)
mx = max(mx, len(key))
props.append((key, repr(v)))
fmt = '%-' + repr(mx+1) + 's = %s'
lines = [fmt % prop for prop in props]
return '\n'.join(lines)
def CCompiler_show_customization(self):
"""
Print the compiler customizations to stdout.
Parameters
----------
None
Returns
-------
None
Notes
-----
Printing is only done if the distutils log threshold is < 2.
"""
try:
self.get_version()
except Exception:
pass
if log._global_log.threshold<2:
print('*'*80)
print(self.__class__)
print(_compiler_to_string(self))
print('*'*80)
replace_method(CCompiler, 'show_customization', CCompiler_show_customization)
def CCompiler_customize(self, dist, need_cxx=0):
"""
Do any platform-specific customization of a compiler instance.
This method calls `distutils.sysconfig.customize_compiler` for
platform-specific customization, as well as optionally remove a flag
to suppress spurious warnings in case C++ code is being compiled.
Parameters
----------
dist : object
This parameter is not used for anything.
need_cxx : bool, optional
Whether or not C++ has to be compiled. If so (True), the
``"-Wstrict-prototypes"`` option is removed to prevent spurious
warnings. Default is False.
Returns
-------
None
Notes
-----
All the default options used by distutils can be extracted with::
from distutils import sysconfig
sysconfig.get_config_vars('CC', 'CXX', 'OPT', 'BASECFLAGS',
'CCSHARED', 'LDSHARED', 'SO')
"""
# See FCompiler.customize for suggested usage.
log.info('customize %s' % (self.__class__.__name__))
customize_compiler(self)
if need_cxx:
# In general, distutils uses -Wstrict-prototypes, but this option is
# not valid for C++ code, only for C. Remove it if it's there to
# avoid a spurious warning on every compilation.
try:
self.compiler_so.remove('-Wstrict-prototypes')
except (AttributeError, ValueError):
pass
if hasattr(self, 'compiler') and 'cc' in self.compiler[0]:
if not self.compiler_cxx:
if self.compiler[0].startswith('gcc'):
a, b = 'gcc', 'g++'
else:
a, b = 'cc', 'c++'
self.compiler_cxx = [self.compiler[0].replace(a, b)]\
+ self.compiler[1:]
else:
if hasattr(self, 'compiler'):
log.warn("#### %s #######" % (self.compiler,))
if not hasattr(self, 'compiler_cxx'):
log.warn('Missing compiler_cxx fix for ' + self.__class__.__name__)
# check if compiler supports gcc style automatic dependencies
# run on every extension so skip for known good compilers
if hasattr(self, 'compiler') and ('gcc' in self.compiler[0] or
'g++' in self.compiler[0] or
'clang' in self.compiler[0]):
self._auto_depends = True
elif os.name == 'posix':
import tempfile
import shutil
tmpdir = tempfile.mkdtemp()
try:
fn = os.path.join(tmpdir, "file.c")
with open(fn, "w") as f:
f.write("int a;\n")
self.compile([fn], output_dir=tmpdir,
extra_preargs=['-MMD', '-MF', fn + '.d'])
self._auto_depends = True
except CompileError:
self._auto_depends = False
finally:
shutil.rmtree(tmpdir)
return
replace_method(CCompiler, 'customize', CCompiler_customize)
def simple_version_match(pat=r'[-.\d]+', ignore='', start=''):
"""
Simple matching of version numbers, for use in CCompiler and FCompiler.
Parameters
----------
pat : str, optional
A regular expression matching version numbers.
Default is ``r'[-.\\d]+'``.
ignore : str, optional
A regular expression matching patterns to skip.
Default is ``''``, in which case nothing is skipped.
start : str, optional
A regular expression matching the start of where to start looking
for version numbers.
Default is ``''``, in which case searching is started at the
beginning of the version string given to `matcher`.
Returns
-------
matcher : callable
A function that is appropriate to use as the ``.version_match``
attribute of a `CCompiler` class. `matcher` takes a single parameter,
a version string.
"""
def matcher(self, version_string):
# version string may appear in the second line, so getting rid
# of new lines:
version_string = version_string.replace('\n', ' ')
pos = 0
if start:
m = re.match(start, version_string)
if not m:
return None
pos = m.end()
while True:
m = re.search(pat, version_string[pos:])
if not m:
return None
if ignore and re.match(ignore, m.group(0)):
pos = m.end()
continue
break
return m.group(0)
return matcher
def CCompiler_get_version(self, force=False, ok_status=[0]):
"""
Return compiler version, or None if compiler is not available.
Parameters
----------
force : bool, optional
If True, force a new determination of the version, even if the
compiler already has a version attribute. Default is False.
ok_status : list of int, optional
The list of status values returned by the version look-up process
for which a version string is returned. If the status value is not
in `ok_status`, None is returned. Default is ``[0]``.
Returns
-------
version : str or None
Version string, in the format of `distutils.version.LooseVersion`.
"""
if not force and hasattr(self, 'version'):
return self.version
self.find_executables()
try:
version_cmd = self.version_cmd
except AttributeError:
return None
if not version_cmd or not version_cmd[0]:
return None
try:
matcher = self.version_match
except AttributeError:
try:
pat = self.version_pattern
except AttributeError:
return None
def matcher(version_string):
m = re.match(pat, version_string)
if not m:
return None
version = m.group('version')
return version
try:
output = subprocess.check_output(version_cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as exc:
output = exc.output
status = exc.returncode
except OSError:
# match the historical returns for a parent
# exception class caught by exec_command()
status = 127
output = b''
else:
# output isn't actually a filepath but we do this
# for now to match previous distutils behavior
output = filepath_from_subprocess_output(output)
status = 0
version = None
if status in ok_status:
version = matcher(output)
if version:
version = LooseVersion(version)
self.version = version
return version
replace_method(CCompiler, 'get_version', CCompiler_get_version)
def CCompiler_cxx_compiler(self):
"""
Return the C++ compiler.
Parameters
----------
None
Returns
-------
cxx : class instance
The C++ compiler, as a `CCompiler` instance.
"""
if self.compiler_type in ('msvc', 'intelw', 'intelemw'):
return self
cxx = copy(self)
cxx.compiler_cxx = cxx.compiler_cxx
cxx.compiler_so = [cxx.compiler_cxx[0]] + \
sanitize_cxx_flags(cxx.compiler_so[1:])
if (sys.platform.startswith(('aix', 'os400')) and
'ld_so_aix' in cxx.linker_so[0]):
# AIX needs the ld_so_aix script included with Python
cxx.linker_so = [cxx.linker_so[0], cxx.compiler_cxx[0]] \
+ cxx.linker_so[2:]
if sys.platform.startswith('os400'):
#This is required by i 7.4 and prievous for PRId64 in printf() call.
cxx.compiler_so.append('-D__STDC_FORMAT_MACROS')
#This a bug of gcc10.3, which failed to handle the TLS init.
cxx.compiler_so.append('-fno-extern-tls-init')
cxx.linker_so.append('-fno-extern-tls-init')
else:
cxx.linker_so = [cxx.compiler_cxx[0]] + cxx.linker_so[1:]
return cxx
replace_method(CCompiler, 'cxx_compiler', CCompiler_cxx_compiler)
compiler_class['intel'] = ('intelccompiler', 'IntelCCompiler',
"Intel C Compiler for 32-bit applications")
compiler_class['intele'] = ('intelccompiler', 'IntelItaniumCCompiler',
"Intel C Itanium Compiler for Itanium-based applications")
compiler_class['intelem'] = ('intelccompiler', 'IntelEM64TCCompiler',
"Intel C Compiler for 64-bit applications")
compiler_class['intelw'] = ('intelccompiler', 'IntelCCompilerW',
"Intel C Compiler for 32-bit applications on Windows")
compiler_class['intelemw'] = ('intelccompiler', 'IntelEM64TCCompilerW',
"Intel C Compiler for 64-bit applications on Windows")
compiler_class['pathcc'] = ('pathccompiler', 'PathScaleCCompiler',
"PathScale Compiler for SiCortex-based applications")
compiler_class['arm'] = ('armccompiler', 'ArmCCompiler',
"Arm C Compiler")
ccompiler._default_compilers += (('linux.*', 'intel'),
('linux.*', 'intele'),
('linux.*', 'intelem'),
('linux.*', 'pathcc'),
('nt', 'intelw'),
('nt', 'intelemw'))
if sys.platform == 'win32':
compiler_class['mingw32'] = ('mingw32ccompiler', 'Mingw32CCompiler',
"Mingw32 port of GNU C Compiler for Win32"\
"(for MSC built Python)")
if mingw32():
# On windows platforms, we want to default to mingw32 (gcc)
# because msvc can't build blitz stuff.
log.info('Setting mingw32 as default compiler for nt.')
ccompiler._default_compilers = (('nt', 'mingw32'),) \
+ ccompiler._default_compilers
_distutils_new_compiler = new_compiler
def new_compiler (plat=None,
compiler=None,
verbose=None,
dry_run=0,
force=0):
# Try first C compilers from numpy.distutils.
if verbose is None:
verbose = log.get_threshold() <= log.INFO
if plat is None:
plat = os.name
try:
if compiler is None:
compiler = get_default_compiler(plat)
(module_name, class_name, long_description) = compiler_class[compiler]
except KeyError:
msg = "don't know how to compile C/C++ code on platform '%s'" % plat
if compiler is not None:
msg = msg + " with '%s' compiler" % compiler
raise DistutilsPlatformError(msg)
module_name = "numpy.distutils." + module_name
try:
__import__ (module_name)
except ImportError as e:
msg = str(e)
log.info('%s in numpy.distutils; trying from distutils',
str(msg))
module_name = module_name[6:]
try:
__import__(module_name)
except ImportError as e:
msg = str(e)
raise DistutilsModuleError("can't compile C/C++ code: unable to load module '%s'" % \
module_name)
try:
module = sys.modules[module_name]
klass = vars(module)[class_name]
except KeyError:
raise DistutilsModuleError(("can't compile C/C++ code: unable to find class '%s' " +
"in module '%s'") % (class_name, module_name))
compiler = klass(None, dry_run, force)
compiler.verbose = verbose
log.debug('new_compiler returns %s' % (klass))
return compiler
ccompiler.new_compiler = new_compiler
_distutils_gen_lib_options = gen_lib_options
def gen_lib_options(compiler, library_dirs, runtime_library_dirs, libraries):
# the version of this function provided by CPython allows the following
# to return lists, which are unpacked automatically:
# - compiler.runtime_library_dir_option
# our version extends the behavior to:
# - compiler.library_dir_option
# - compiler.library_option
# - compiler.find_library_file
r = _distutils_gen_lib_options(compiler, library_dirs,
runtime_library_dirs, libraries)
lib_opts = []
for i in r:
if is_sequence(i):
lib_opts.extend(list(i))
else:
lib_opts.append(i)
return lib_opts
ccompiler.gen_lib_options = gen_lib_options
# Also fix up the various compiler modules, which do
# from distutils.ccompiler import gen_lib_options
# Don't bother with mwerks, as we don't support Classic Mac.
for _cc in ['msvc9', 'msvc', '_msvc', 'bcpp', 'cygwinc', 'emxc', 'unixc']:
_m = sys.modules.get('distutils.' + _cc + 'compiler')
if _m is not None:
setattr(_m, 'gen_lib_options', gen_lib_options)
| 28,126 | Python | 33.511656 | 97 | 0.588246 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/lib2def.py | import re
import sys
import subprocess
__doc__ = """This module generates a DEF file from the symbols in
an MSVC-compiled DLL import library. It correctly discriminates between
data and functions. The data is collected from the output of the program
nm(1).
Usage:
python lib2def.py [libname.lib] [output.def]
or
python lib2def.py [libname.lib] > output.def
libname.lib defaults to python<py_ver>.lib and output.def defaults to stdout
Author: Robert Kern <[email protected]>
Last Update: April 30, 1999
"""
__version__ = '0.1a'
py_ver = "%d%d" % tuple(sys.version_info[:2])
DEFAULT_NM = ['nm', '-Cs']
DEF_HEADER = """LIBRARY python%s.dll
;CODE PRELOAD MOVEABLE DISCARDABLE
;DATA PRELOAD SINGLE
EXPORTS
""" % py_ver
# the header of the DEF file
FUNC_RE = re.compile(r"^(.*) in python%s\.dll" % py_ver, re.MULTILINE)
DATA_RE = re.compile(r"^_imp__(.*) in python%s\.dll" % py_ver, re.MULTILINE)
def parse_cmd():
"""Parses the command-line arguments.
libfile, deffile = parse_cmd()"""
if len(sys.argv) == 3:
if sys.argv[1][-4:] == '.lib' and sys.argv[2][-4:] == '.def':
libfile, deffile = sys.argv[1:]
elif sys.argv[1][-4:] == '.def' and sys.argv[2][-4:] == '.lib':
deffile, libfile = sys.argv[1:]
else:
print("I'm assuming that your first argument is the library")
print("and the second is the DEF file.")
elif len(sys.argv) == 2:
if sys.argv[1][-4:] == '.def':
deffile = sys.argv[1]
libfile = 'python%s.lib' % py_ver
elif sys.argv[1][-4:] == '.lib':
deffile = None
libfile = sys.argv[1]
else:
libfile = 'python%s.lib' % py_ver
deffile = None
return libfile, deffile
def getnm(nm_cmd=['nm', '-Cs', 'python%s.lib' % py_ver], shell=True):
"""Returns the output of nm_cmd via a pipe.
nm_output = getnm(nm_cmd = 'nm -Cs py_lib')"""
p = subprocess.Popen(nm_cmd, shell=shell, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, universal_newlines=True)
nm_output, nm_err = p.communicate()
if p.returncode != 0:
raise RuntimeError('failed to run "%s": "%s"' % (
' '.join(nm_cmd), nm_err))
return nm_output
def parse_nm(nm_output):
"""Returns a tuple of lists: dlist for the list of data
symbols and flist for the list of function symbols.
dlist, flist = parse_nm(nm_output)"""
data = DATA_RE.findall(nm_output)
func = FUNC_RE.findall(nm_output)
flist = []
for sym in data:
if sym in func and (sym[:2] == 'Py' or sym[:3] == '_Py' or sym[:4] == 'init'):
flist.append(sym)
dlist = []
for sym in data:
if sym not in flist and (sym[:2] == 'Py' or sym[:3] == '_Py'):
dlist.append(sym)
dlist.sort()
flist.sort()
return dlist, flist
def output_def(dlist, flist, header, file = sys.stdout):
"""Outputs the final DEF file to a file defaulting to stdout.
output_def(dlist, flist, header, file = sys.stdout)"""
for data_sym in dlist:
header = header + '\t%s DATA\n' % data_sym
header = header + '\n' # blank line
for func_sym in flist:
header = header + '\t%s\n' % func_sym
file.write(header)
if __name__ == '__main__':
libfile, deffile = parse_cmd()
if deffile is None:
deffile = sys.stdout
else:
deffile = open(deffile, 'w')
nm_cmd = DEFAULT_NM + [str(libfile)]
nm_output = getnm(nm_cmd, shell=False)
dlist, flist = parse_nm(nm_output)
output_def(dlist, flist, DEF_HEADER, deffile)
| 3,644 | Python | 30.153846 | 86 | 0.587267 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/armccompiler.py | from __future__ import division, absolute_import, print_function
from distutils.unixccompiler import UnixCCompiler
class ArmCCompiler(UnixCCompiler):
"""
Arm compiler.
"""
compiler_type = 'arm'
cc_exe = 'armclang'
cxx_exe = 'armclang++'
def __init__(self, verbose=0, dry_run=0, force=0):
UnixCCompiler.__init__(self, verbose, dry_run, force)
cc_compiler = self.cc_exe
cxx_compiler = self.cxx_exe
self.set_executables(compiler=cc_compiler +
' -O3 -fPIC',
compiler_so=cc_compiler +
' -O3 -fPIC',
compiler_cxx=cxx_compiler +
' -O3 -fPIC',
linker_exe=cc_compiler +
' -lamath',
linker_so=cc_compiler +
' -lamath -shared')
| 1,043 | Python | 34.999999 | 79 | 0.422819 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/line_endings.py | """ Functions for converting from DOS to UNIX line endings
"""
import os
import re
import sys
def dos2unix(file):
"Replace CRLF with LF in argument files. Print names of changed files."
if os.path.isdir(file):
print(file, "Directory!")
return
with open(file, "rb") as fp:
data = fp.read()
if '\0' in data:
print(file, "Binary!")
return
newdata = re.sub("\r\n", "\n", data)
if newdata != data:
print('dos2unix:', file)
with open(file, "wb") as f:
f.write(newdata)
return file
else:
print(file, 'ok')
def dos2unix_one_dir(modified_files, dir_name, file_names):
for file in file_names:
full_path = os.path.join(dir_name, file)
file = dos2unix(full_path)
if file is not None:
modified_files.append(file)
def dos2unix_dir(dir_name):
modified_files = []
os.path.walk(dir_name, dos2unix_one_dir, modified_files)
return modified_files
#----------------------------------
def unix2dos(file):
"Replace LF with CRLF in argument files. Print names of changed files."
if os.path.isdir(file):
print(file, "Directory!")
return
with open(file, "rb") as fp:
data = fp.read()
if '\0' in data:
print(file, "Binary!")
return
newdata = re.sub("\r\n", "\n", data)
newdata = re.sub("\n", "\r\n", newdata)
if newdata != data:
print('unix2dos:', file)
with open(file, "wb") as f:
f.write(newdata)
return file
else:
print(file, 'ok')
def unix2dos_one_dir(modified_files, dir_name, file_names):
for file in file_names:
full_path = os.path.join(dir_name, file)
unix2dos(full_path)
if file is not None:
modified_files.append(file)
def unix2dos_dir(dir_name):
modified_files = []
os.path.walk(dir_name, unix2dos_one_dir, modified_files)
return modified_files
if __name__ == "__main__":
dos2unix_dir(sys.argv[1])
| 2,032 | Python | 25.064102 | 76 | 0.57185 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/fcompiler/compaq.py |
#http://www.compaq.com/fortran/docs/
import os
import sys
from numpy.distutils.fcompiler import FCompiler
from distutils.errors import DistutilsPlatformError
compilers = ['CompaqFCompiler']
if os.name != 'posix' or sys.platform[:6] == 'cygwin' :
# Otherwise we'd get a false positive on posix systems with
# case-insensitive filesystems (like darwin), because we'll pick
# up /bin/df
compilers.append('CompaqVisualFCompiler')
class CompaqFCompiler(FCompiler):
compiler_type = 'compaq'
description = 'Compaq Fortran Compiler'
version_pattern = r'Compaq Fortran (?P<version>[^\s]*).*'
if sys.platform[:5]=='linux':
fc_exe = 'fort'
else:
fc_exe = 'f90'
executables = {
'version_cmd' : ['<F90>', "-version"],
'compiler_f77' : [fc_exe, "-f77rtl", "-fixed"],
'compiler_fix' : [fc_exe, "-fixed"],
'compiler_f90' : [fc_exe],
'linker_so' : ['<F90>'],
'archiver' : ["ar", "-cr"],
'ranlib' : ["ranlib"]
}
module_dir_switch = '-module ' # not tested
module_include_switch = '-I'
def get_flags(self):
return ['-assume no2underscore', '-nomixed_str_len_arg']
def get_flags_debug(self):
return ['-g', '-check bounds']
def get_flags_opt(self):
return ['-O4', '-align dcommons', '-assume bigarrays',
'-assume nozsize', '-math_library fast']
def get_flags_arch(self):
return ['-arch host', '-tune host']
def get_flags_linker_so(self):
if sys.platform[:5]=='linux':
return ['-shared']
return ['-shared', '-Wl,-expect_unresolved,*']
class CompaqVisualFCompiler(FCompiler):
compiler_type = 'compaqv'
description = 'DIGITAL or Compaq Visual Fortran Compiler'
version_pattern = (r'(DIGITAL|Compaq) Visual Fortran Optimizing Compiler'
r' Version (?P<version>[^\s]*).*')
compile_switch = '/compile_only'
object_switch = '/object:'
library_switch = '/OUT:' #No space after /OUT:!
static_lib_extension = ".lib"
static_lib_format = "%s%s"
module_dir_switch = '/module:'
module_include_switch = '/I'
ar_exe = 'lib.exe'
fc_exe = 'DF'
if sys.platform=='win32':
from numpy.distutils.msvccompiler import MSVCCompiler
try:
m = MSVCCompiler()
m.initialize()
ar_exe = m.lib
except DistutilsPlatformError:
pass
except AttributeError as e:
if '_MSVCCompiler__root' in str(e):
print('Ignoring "%s" (I think it is msvccompiler.py bug)' % (e))
else:
raise
except OSError as e:
if not "vcvarsall.bat" in str(e):
print("Unexpected OSError in", __file__)
raise
except ValueError as e:
if not "'path'" in str(e):
print("Unexpected ValueError in", __file__)
raise
executables = {
'version_cmd' : ['<F90>', "/what"],
'compiler_f77' : [fc_exe, "/f77rtl", "/fixed"],
'compiler_fix' : [fc_exe, "/fixed"],
'compiler_f90' : [fc_exe],
'linker_so' : ['<F90>'],
'archiver' : [ar_exe, "/OUT:"],
'ranlib' : None
}
def get_flags(self):
return ['/nologo', '/MD', '/WX', '/iface=(cref,nomixed_str_len_arg)',
'/names:lowercase', '/assume:underscore']
def get_flags_opt(self):
return ['/Ox', '/fast', '/optimize:5', '/unroll:0', '/math_library:fast']
def get_flags_arch(self):
return ['/threads']
def get_flags_debug(self):
return ['/debug']
if __name__ == '__main__':
from distutils import log
log.set_verbosity(2)
from numpy.distutils import customized_fcompiler
print(customized_fcompiler(compiler='compaq').get_version())
| 3,903 | Python | 31.264463 | 81 | 0.555214 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.